]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
authorLinus Torvalds <torvalds@woody.linux-foundation.org>
Thu, 18 Oct 2007 21:54:03 +0000 (14:54 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Thu, 18 Oct 2007 21:54:03 +0000 (14:54 -0700)
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
  sched: reduce schedstat variable overhead a bit
  sched: add KERN_CONT annotation
  sched: cleanup, make struct rq comments more consistent
  sched: cleanup, fix spacing
  sched: fix return value of wait_for_completion_interruptible()

1  2 
include/linux/sched.h
kernel/sched.c

diff --combined include/linux/sched.h
index 7accc04e23ab0310166d72ebf32623427a6c5a71,2f9c1261f2029b9333fc34dd79c1107fd2741c26..10a83d8d5775d370c6eac6ff6b8488e07e9b7579
@@@ -569,7 -569,7 +569,7 @@@ struct sched_info 
                           last_queued; /* when we were last queued to run */
  #ifdef CONFIG_SCHEDSTATS
        /* BKL stats */
-       unsigned long bkl_count;
+       unsigned int bkl_count;
  #endif
  };
  #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
@@@ -705,34 -705,34 +705,34 @@@ struct sched_domain 
  
  #ifdef CONFIG_SCHEDSTATS
        /* load_balance() stats */
-       unsigned long lb_count[CPU_MAX_IDLE_TYPES];
-       unsigned long lb_failed[CPU_MAX_IDLE_TYPES];
-       unsigned long lb_balanced[CPU_MAX_IDLE_TYPES];
-       unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES];
-       unsigned long lb_gained[CPU_MAX_IDLE_TYPES];
-       unsigned long lb_hot_gained[CPU_MAX_IDLE_TYPES];
-       unsigned long lb_nobusyg[CPU_MAX_IDLE_TYPES];
-       unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES];
+       unsigned int lb_count[CPU_MAX_IDLE_TYPES];
+       unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
+       unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
+       unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
+       unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
+       unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
+       unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
+       unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
  
        /* Active load balancing */
-       unsigned long alb_count;
-       unsigned long alb_failed;
-       unsigned long alb_pushed;
+       unsigned int alb_count;
+       unsigned int alb_failed;
+       unsigned int alb_pushed;
  
        /* SD_BALANCE_EXEC stats */
-       unsigned long sbe_count;
-       unsigned long sbe_balanced;
-       unsigned long sbe_pushed;
+       unsigned int sbe_count;
+       unsigned int sbe_balanced;
+       unsigned int sbe_pushed;
  
        /* SD_BALANCE_FORK stats */
-       unsigned long sbf_count;
-       unsigned long sbf_balanced;
-       unsigned long sbf_pushed;
+       unsigned int sbf_count;
+       unsigned int sbf_balanced;
+       unsigned int sbf_pushed;
  
        /* try_to_wake_up() stats */
-       unsigned long ttwu_wake_remote;
-       unsigned long ttwu_move_affine;
-       unsigned long ttwu_move_balance;
+       unsigned int ttwu_wake_remote;
+       unsigned int ttwu_move_affine;
+       unsigned int ttwu_move_balance;
  #endif
  };
  
@@@ -991,7 -991,7 +991,7 @@@ struct task_struct 
        int __user *clear_child_tid;            /* CLONE_CHILD_CLEARTID */
  
        unsigned int rt_priority;
 -      cputime_t utime, stime;
 +      cputime_t utime, stime, utimescaled, stimescaled;
        cputime_t gtime;
        unsigned long nvcsw, nivcsw; /* context switch counts */
        struct timespec start_time;             /* monotonic time */
  
        unsigned long ptrace_message;
        siginfo_t *last_siginfo; /* For ptrace use.  */
 -/*
 - * current io wait handle: wait queue entry to use for io waits
 - * If this thread is processing aio, this points at the waitqueue
 - * inside the currently handled kiocb. It may be NULL (i.e. default
 - * to a stack based synchronous wait) if its doing sync IO.
 - */
 -      wait_queue_t *io_wait;
  #ifdef CONFIG_TASK_XACCT
  /* i/o counters(bytes read/written, #syscalls */
        u64 rchar, wchar, syscr, syscw;
diff --combined kernel/sched.c
index 12534421d7b5f4c46559a3a3c6aede140d2f5f3c,b60f8a5ae2be3a9ffb38aaaf7cb10e14f63be0c5..ed90be46fb31a3a3a98fb0ef92e4fff8a1dedcfc
@@@ -266,7 -266,8 +266,8 @@@ struct rt_rq 
   * acquire operations must be ordered by ascending &runqueue.
   */
  struct rq {
-       spinlock_t lock;        /* runqueue lock */
+       /* runqueue lock: */
+       spinlock_t lock;
  
        /*
         * nr_running and cpu_load should be in the same cacheline because
  #ifdef CONFIG_NO_HZ
        unsigned char in_nohz_recently;
  #endif
-       struct load_weight load;        /* capture load from *all* tasks on this cpu */
+       /* capture load from *all* tasks on this cpu: */
+       struct load_weight load;
        unsigned long nr_load_updates;
        u64 nr_switches;
  
        struct cfs_rq cfs;
  #ifdef CONFIG_FAIR_GROUP_SCHED
-       struct list_head leaf_cfs_rq_list; /* list of leaf cfs_rq on this cpu */
+       /* list of leaf cfs_rq on this cpu: */
+       struct list_head leaf_cfs_rq_list;
  #endif
        struct rt_rq  rt;
  
        /* For active balancing */
        int active_balance;
        int push_cpu;
-       int cpu;                /* cpu of this runqueue */
+       /* cpu of this runqueue: */
+       int cpu;
  
        struct task_struct *migration_thread;
        struct list_head migration_queue;
        struct sched_info rq_sched_info;
  
        /* sys_sched_yield() stats */
-       unsigned long yld_exp_empty;
-       unsigned long yld_act_empty;
-       unsigned long yld_both_empty;
-       unsigned long yld_count;
+       unsigned int yld_exp_empty;
+       unsigned int yld_act_empty;
+       unsigned int yld_both_empty;
+       unsigned int yld_count;
  
        /* schedule() stats */
-       unsigned long sched_switch;
-       unsigned long sched_count;
-       unsigned long sched_goidle;
+       unsigned int sched_switch;
+       unsigned int sched_count;
+       unsigned int sched_goidle;
  
        /* try_to_wake_up() stats */
-       unsigned long ttwu_count;
-       unsigned long ttwu_local;
+       unsigned int ttwu_count;
+       unsigned int ttwu_local;
  
        /* BKL stats */
-       unsigned long bkl_count;
+       unsigned int bkl_count;
  #endif
        struct lock_class_key rq_lock_key;
  };
@@@ -449,12 -453,12 +453,12 @@@ enum 
  };
  
  const_debug unsigned int sysctl_sched_features =
-               SCHED_FEAT_NEW_FAIR_SLEEPERS    *1 |
-               SCHED_FEAT_START_DEBIT          *1 |
-               SCHED_FEAT_TREE_AVG             *0 |
-               SCHED_FEAT_APPROX_AVG           *0 |
-               SCHED_FEAT_WAKEUP_PREEMPT       *1 |
-               SCHED_FEAT_PREEMPT_RESTRICT     *1;
+               SCHED_FEAT_NEW_FAIR_SLEEPERS    * 1 |
+               SCHED_FEAT_START_DEBIT          * 1 |
+               SCHED_FEAT_TREE_AVG             * 0 |
+               SCHED_FEAT_APPROX_AVG           * 0 |
+               SCHED_FEAT_WAKEUP_PREEMPT       * 1 |
+               SCHED_FEAT_PREEMPT_RESTRICT     * 1;
  
  #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
  
@@@ -3333,16 -3337,6 +3337,16 @@@ void account_guest_time(struct task_str
        cpustat->guest = cputime64_add(cpustat->guest, tmp);
  }
  
 +/*
 + * Account scaled user cpu time to a process.
 + * @p: the process that the cpu time gets accounted to
 + * @cputime: the cpu time spent in user space since the last update
 + */
 +void account_user_time_scaled(struct task_struct *p, cputime_t cputime)
 +{
 +      p->utimescaled = cputime_add(p->utimescaled, cputime);
 +}
 +
  /*
   * Account system cpu time to a process.
   * @p: the process that the cpu time gets accounted to
@@@ -3380,17 -3374,6 +3384,17 @@@ void account_system_time(struct task_st
        acct_update_integrals(p);
  }
  
 +/*
 + * Account scaled system cpu time to a process.
 + * @p: the process that the cpu time gets accounted to
 + * @hardirq_offset: the offset to subtract from hardirq_count()
 + * @cputime: the cpu time spent in kernel space since the last update
 + */
 +void account_system_time_scaled(struct task_struct *p, cputime_t cputime)
 +{
 +      p->stimescaled = cputime_add(p->stimescaled, cputime);
 +}
 +
  /*
   * Account for involuntary wait time.
   * @p: the process from which the cpu time has been stolen
@@@ -3880,7 -3863,10 +3884,10 @@@ EXPORT_SYMBOL(wait_for_completion_timeo
  
  int __sched wait_for_completion_interruptible(struct completion *x)
  {
-       return wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
+       long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
+       if (t == -ERESTARTSYS)
+               return t;
+       return 0;
  }
  EXPORT_SYMBOL(wait_for_completion_interruptible);
  
@@@ -4815,18 -4801,18 +4822,18 @@@ static void show_task(struct task_struc
        unsigned state;
  
        state = p->state ? __ffs(p->state) + 1 : 0;
-       printk("%-13.13s %c", p->comm,
+       printk(KERN_INFO "%-13.13s %c", p->comm,
                state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
  #if BITS_PER_LONG == 32
        if (state == TASK_RUNNING)
-               printk(" running  ");
+               printk(KERN_CONT " running  ");
        else
-               printk(" %08lx ", thread_saved_pc(p));
+               printk(KERN_CONT " %08lx ", thread_saved_pc(p));
  #else
        if (state == TASK_RUNNING)
-               printk("  running task    ");
+               printk(KERN_CONT "  running task    ");
        else
-               printk(" %016lx ", thread_saved_pc(p));
+               printk(KERN_CONT " %016lx ", thread_saved_pc(p));
  #endif
  #ifdef CONFIG_DEBUG_STACK_USAGE
        {
                free = (unsigned long)n - (unsigned long)end_of_stack(p);
        }
  #endif
-       printk("%5lu %5d %6d\n", free, p->pid, p->parent->pid);
+       printk(KERN_CONT "%5lu %5d %6d\n", free, p->pid, p->parent->pid);
  
        if (state != TASK_RUNNING)
                show_stack(p, NULL);
@@@ -5385,7 -5371,7 +5392,7 @@@ sd_alloc_ctl_domain_table(struct sched_
        return table;
  }
  
- static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
+ static ctl_table * sd_alloc_ctl_cpu_table(int cpu)
  {
        struct ctl_table *entry, *table;
        struct sched_domain *sd;
@@@ -5619,20 -5605,20 +5626,20 @@@ static void sched_domain_debug(struct s
                        }
  
                        if (!group->__cpu_power) {
-                               printk("\n");
+                               printk(KERN_CONT "\n");
                                printk(KERN_ERR "ERROR: domain->cpu_power not "
                                                "set\n");
                                break;
                        }
  
                        if (!cpus_weight(group->cpumask)) {
-                               printk("\n");
+                               printk(KERN_CONT "\n");
                                printk(KERN_ERR "ERROR: empty group\n");
                                break;
                        }
  
                        if (cpus_intersects(groupmask, group->cpumask)) {
-                               printk("\n");
+                               printk(KERN_CONT "\n");
                                printk(KERN_ERR "ERROR: repeated CPUs\n");
                                break;
                        }
                        cpus_or(groupmask, groupmask, group->cpumask);
  
                        cpumask_scnprintf(str, NR_CPUS, group->cpumask);
-                       printk(" %s", str);
+                       printk(KERN_CONT " %s", str);
  
                        group = group->next;
                } while (group != sd->groups);
-               printk("\n");
+               printk(KERN_CONT "\n");
  
                if (!cpus_equal(sd->span, groupmask))
                        printk(KERN_ERR "ERROR: groups don't span "