]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 13 Jun 2014 02:42:15 +0000 (19:42 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 13 Jun 2014 02:42:15 +0000 (19:42 -0700)
Pull more scheduler updates from Ingo Molnar:
 "Second round of scheduler changes:
   - try-to-wakeup and IPI reduction speedups, from Andy Lutomirski
   - continued power scheduling cleanups and refactorings, from Nicolas
     Pitre
   - misc fixes and enhancements"

* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/deadline: Delete extraneous extern for to_ratio()
  sched/idle: Optimize try-to-wake-up IPI
  sched/idle: Simplify wake_up_idle_cpu()
  sched/idle: Clear polling before descheduling the idle thread
  sched, trace: Add a tracepoint for IPI-less remote wakeups
  cpuidle: Set polling in poll_idle
  sched: Remove redundant assignment to "rt_rq" in update_curr_rt(...)
  sched: Rename capacity related flags
  sched: Final power vs. capacity cleanups
  sched: Remove remaining dubious usage of "power"
  sched: Let 'struct sched_group_power' care about CPU capacity
  sched/fair: Disambiguate existing/remaining "capacity" usage
  sched/fair: Change "has_capacity" to "has_free_capacity"
  sched/fair: Remove "power" from 'struct numa_stats'
  sched: Fix signedness bug in yield_to()
  sched/fair: Use time_after() in record_wakee()
  sched/balancing: Reduce the rate of needless idle load balancing
  sched/fair: Fix unlocked reads of some cfs_b->quota/period

1  2 
arch/arm/kernel/topology.c
arch/powerpc/kernel/smp.c
include/linux/kvm_host.h
include/linux/sched.h
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/sched/sched.h
virt/kvm/kvm_main.c

index 3997c411c1403659123d188c1c0750c58f59af3f,d42a7db22236da087aeb1a4ef41c5391b30dc765..9d853189028bb0c79ad72557b018c12d5416aa67
  #include <asm/topology.h>
  
  /*
-  * cpu power scale management
+  * cpu capacity scale management
   */
  
  /*
-  * cpu power table
+  * cpu capacity table
   * This per cpu data structure describes the relative capacity of each core.
   * On a heteregenous system, cores don't have the same computation capacity
-  * and we reflect that difference in the cpu_power field so the scheduler can
-  * take this difference into account during load balance. A per cpu structure
-  * is preferred because each CPU updates its own cpu_power field during the
-  * load balance except for idle cores. One idle core is selected to run the
-  * rebalance_domains for all idle cores and the cpu_power can be updated
-  * during this sequence.
+  * and we reflect that difference in the cpu_capacity field so the scheduler
+  * can take this difference into account during load balance. A per cpu
+  * structure is preferred because each CPU updates its own cpu_capacity field
+  * during the load balance except for idle cores. One idle core is selected
+  * to run the rebalance_domains for all idle cores and the cpu_capacity can be
+  * updated during this sequence.
   */
  static DEFINE_PER_CPU(unsigned long, cpu_scale);
  
- unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
+ unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
  {
        return per_cpu(cpu_scale, cpu);
  }
  
- static void set_power_scale(unsigned int cpu, unsigned long power)
+ static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
  {
-       per_cpu(cpu_scale, cpu) = power;
+       per_cpu(cpu_scale, cpu) = capacity;
  }
  
  #ifdef CONFIG_OF
@@@ -62,11 -62,11 +62,11 @@@ struct cpu_efficiency 
   * Table of relative efficiency of each processors
   * The efficiency value must fit in 20bit and the final
   * cpu_scale value must be in the range
-  *   0 < cpu_scale < 3*SCHED_POWER_SCALE/2
+  *   0 < cpu_scale < 3*SCHED_CAPACITY_SCALE/2
   * in order to return at most 1 when DIV_ROUND_CLOSEST
   * is used to compute the capacity of a CPU.
   * Processors that are not defined in the table,
-  * use the default SCHED_POWER_SCALE value for cpu_scale.
+  * use the default SCHED_CAPACITY_SCALE value for cpu_scale.
   */
  static const struct cpu_efficiency table_efficiency[] = {
        {"arm,cortex-a15", 3891},
@@@ -83,21 -83,21 +83,21 @@@ static unsigned long middle_capacity = 
   * Iterate all CPUs' descriptor in DT and compute the efficiency
   * (as per table_efficiency). Also calculate a middle efficiency
   * as close as possible to  (max{eff_i} - min{eff_i}) / 2
-  * This is later used to scale the cpu_power field such that an
-  * 'average' CPU is of middle power. Also see the comments near
-  * table_efficiency[] and update_cpu_power().
+  * This is later used to scale the cpu_capacity field such that an
+  * 'average' CPU is of middle capacity. Also see the comments near
+  * table_efficiency[] and update_cpu_capacity().
   */
  static void __init parse_dt_topology(void)
  {
        const struct cpu_efficiency *cpu_eff;
        struct device_node *cn = NULL;
 -      unsigned long min_capacity = (unsigned long)(-1);
 +      unsigned long min_capacity = ULONG_MAX;
        unsigned long max_capacity = 0;
        unsigned long capacity = 0;
 -      int alloc_size, cpu = 0;
 +      int cpu = 0;
  
 -      alloc_size = nr_cpu_ids * sizeof(*__cpu_capacity);
 -      __cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT);
 +      __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
 +                               GFP_NOWAIT);
  
        for_each_possible_cpu(cpu) {
                const u32 *rate;
         * cpu_scale because all CPUs have the same capacity. Otherwise, we
         * compute a middle_capacity factor that will ensure that the capacity
         * of an 'average' CPU of the system will be as close as possible to
-        * SCHED_POWER_SCALE, which is the default value, but with the
+        * SCHED_CAPACITY_SCALE, which is the default value, but with the
         * constraint explained near table_efficiency[].
         */
        if (4*max_capacity < (3*(max_capacity + min_capacity)))
                middle_capacity = (min_capacity + max_capacity)
-                               >> (SCHED_POWER_SHIFT+1);
+                               >> (SCHED_CAPACITY_SHIFT+1);
        else
                middle_capacity = ((max_capacity / 3)
-                               >> (SCHED_POWER_SHIFT-1)) + 1;
+                               >> (SCHED_CAPACITY_SHIFT-1)) + 1;
  
  }
  
   * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
   * function returns directly for SMP system.
   */
- static void update_cpu_power(unsigned int cpu)
+ static void update_cpu_capacity(unsigned int cpu)
  {
        if (!cpu_capacity(cpu))
                return;
  
-       set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity);
+       set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity);
  
-       printk(KERN_INFO "CPU%u: update cpu_power %lu\n",
-               cpu, arch_scale_freq_power(NULL, cpu));
+       printk(KERN_INFO "CPU%u: update cpu_capacity %lu\n",
+               cpu, arch_scale_freq_capacity(NULL, cpu));
  }
  
  #else
  static inline void parse_dt_topology(void) {}
- static inline void update_cpu_power(unsigned int cpuid) {}
+ static inline void update_cpu_capacity(unsigned int cpuid) {}
  #endif
  
   /*
@@@ -267,7 -267,7 +267,7 @@@ void store_cpu_topology(unsigned int cp
  
        update_siblings_masks(cpuid);
  
-       update_cpu_power(cpuid);
+       update_cpu_capacity(cpuid);
  
        printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
                cpuid, cpu_topology[cpuid].thread_id,
@@@ -297,7 -297,7 +297,7 @@@ void __init init_cpu_topology(void
  {
        unsigned int cpu;
  
-       /* init core mask and power*/
+       /* init core mask and capacity */
        for_each_possible_cpu(cpu) {
                struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
  
                cpumask_clear(&cpu_topo->core_sibling);
                cpumask_clear(&cpu_topo->thread_sibling);
  
-               set_power_scale(cpu, SCHED_POWER_SCALE);
+               set_capacity_scale(cpu, SCHED_CAPACITY_SCALE);
        }
        smp_wmb();
  
index 7753af2d261381bcc21bf3e21f8ee4f4880847e9,c51d16379cba7af750c501455e040eedf7ad1530..51a3ff78838aaf1eb6726e92cb871c128221eb2e
@@@ -36,7 -36,6 +36,7 @@@
  #include <linux/atomic.h>
  #include <asm/irq.h>
  #include <asm/hw_irq.h>
 +#include <asm/kvm_ppc.h>
  #include <asm/page.h>
  #include <asm/pgtable.h>
  #include <asm/prom.h>
@@@ -391,7 -390,6 +391,7 @@@ void smp_prepare_boot_cpu(void
  #ifdef CONFIG_PPC64
        paca[boot_cpuid].__current = current;
  #endif
 +      set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
        current_set[boot_cpuid] = task_thread_info(current);
  }
  
@@@ -459,9 -457,38 +459,9 @@@ int generic_check_cpu_restart(unsigned 
        return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
  }
  
 -static atomic_t secondary_inhibit_count;
 -
 -/*
 - * Don't allow secondary CPU threads to come online
 - */
 -void inhibit_secondary_onlining(void)
 +static bool secondaries_inhibited(void)
  {
 -      /*
 -       * This makes secondary_inhibit_count stable during cpu
 -       * online/offline operations.
 -       */
 -      get_online_cpus();
 -
 -      atomic_inc(&secondary_inhibit_count);
 -      put_online_cpus();
 -}
 -EXPORT_SYMBOL_GPL(inhibit_secondary_onlining);
 -
 -/*
 - * Allow secondary CPU threads to come online again
 - */
 -void uninhibit_secondary_onlining(void)
 -{
 -      get_online_cpus();
 -      atomic_dec(&secondary_inhibit_count);
 -      put_online_cpus();
 -}
 -EXPORT_SYMBOL_GPL(uninhibit_secondary_onlining);
 -
 -static int secondaries_inhibited(void)
 -{
 -      return atomic_read(&secondary_inhibit_count);
 +      return kvm_hv_mode_active();
  }
  
  #else /* HOTPLUG_CPU */
@@@ -490,7 -517,7 +490,7 @@@ int __cpu_up(unsigned int cpu, struct t
         * Don't allow secondary threads to come online if inhibited
         */
        if (threads_per_core > 1 && secondaries_inhibited() &&
 -          cpu % threads_per_core != 0)
 +          cpu_thread_in_subcore(cpu))
                return -EBUSY;
  
        if (smp_ops == NULL ||
@@@ -723,12 -750,6 +723,12 @@@ void start_secondary(void *unused
        }
        traverse_core_siblings(cpu, true);
  
 +      /*
 +       * numa_node_id() works after this.
 +       */
 +      set_numa_node(numa_cpu_lookup_table[cpu]);
 +      set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
 +
        smp_wmb();
        notify_cpu_starting(cpu);
        set_cpu_online(cpu, true);
@@@ -749,7 -770,7 +749,7 @@@ int setup_profiling_timer(unsigned int 
  /* cpumask of CPUs with asymetric SMT dependancy */
  static const int powerpc_smt_flags(void)
  {
-       int flags = SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES;
+       int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
  
        if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
                printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
diff --combined include/linux/kvm_host.h
index 970c68197c698898df483198995d9a0c558c0770,3c4bcf146159f25939a29b8fe75b4c1e3889fe96..ec4e3bd83d474e581bb3e4607c3c9c5f4e5319c8
@@@ -134,8 -134,6 +134,8 @@@ static inline bool is_error_page(struc
  #define KVM_REQ_EPR_EXIT          20
  #define KVM_REQ_SCAN_IOAPIC       21
  #define KVM_REQ_GLOBAL_CLOCK_UPDATE 22
 +#define KVM_REQ_ENABLE_IBS        23
 +#define KVM_REQ_DISABLE_IBS       24
  
  #define KVM_USERSPACE_IRQ_SOURCE_ID           0
  #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID      1
@@@ -165,7 -163,6 +165,7 @@@ enum kvm_bus 
        KVM_MMIO_BUS,
        KVM_PIO_BUS,
        KVM_VIRTIO_CCW_NOTIFY_BUS,
 +      KVM_FAST_MMIO_BUS,
        KVM_NR_BUSES
  };
  
@@@ -370,7 -367,6 +370,7 @@@ struct kvm 
        struct mm_struct *mm; /* userspace tied to this vm */
        struct kvm_memslots *memslots;
        struct srcu_struct srcu;
 +      struct srcu_struct irq_srcu;
  #ifdef CONFIG_KVM_APIC_ARCHITECTURE
        u32 bsp_vcpu_id;
  #endif
        unsigned long mmu_notifier_seq;
        long mmu_notifier_count;
  #endif
 -      /* Protected by mmu_lock */
 -      bool tlbs_dirty;
 -
 +      long tlbs_dirty;
        struct list_head devices;
  };
  
@@@ -586,7 -584,7 +586,7 @@@ void mark_page_dirty(struct kvm *kvm, g
  
  void kvm_vcpu_block(struct kvm_vcpu *vcpu);
  void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
  void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
  void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
  void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
@@@ -881,13 -879,6 +881,13 @@@ static inline hpa_t pfn_to_hpa(pfn_t pf
        return (hpa_t)pfn << PAGE_SHIFT;
  }
  
 +static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
 +{
 +      unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
 +
 +      return kvm_is_error_hva(hva);
 +}
 +
  static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
  {
        set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
diff --combined include/linux/sched.h
index b8a98427f964cd05102621821e2f9ef926686004,b4f6bf98292188bb7eaad0995bee2527c62c64b6..306f4f0c987a006f43f520413f7de3a780f98a23
@@@ -136,6 -136,12 +136,6 @@@ struct filename
  #define VMACACHE_SIZE (1U << VMACACHE_BITS)
  #define VMACACHE_MASK (VMACACHE_SIZE - 1)
  
 -/*
 - * List of flags we want to share for kernel threads,
 - * if only because they are not used by them anyway.
 - */
 -#define CLONE_KERNEL  (CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
 -
  /*
   * These are the constant used to fake the fixed-point load-average
   * counting. Some notes:
@@@ -214,7 -220,7 +214,7 @@@ print_cfs_rq(struct seq_file *m, int cp
  #define TASK_PARKED           512
  #define TASK_STATE_MAX                1024
  
 -#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
 +#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
  
  extern char ___assert_task_state[1 - 2*!!(
                sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
@@@ -739,6 -745,7 +739,6 @@@ static inline int signal_group_exit(con
  struct user_struct {
        atomic_t __count;       /* reference count */
        atomic_t processes;     /* How many processes does this user have? */
 -      atomic_t files;         /* How many open files does this user have? */
        atomic_t sigpending;    /* How many pending signals does this user have? */
  #ifdef CONFIG_INOTIFY_USER
        atomic_t inotify_watches; /* How many inotify watches does this user have? */
@@@ -847,10 -854,10 +847,10 @@@ enum cpu_idle_type 
  };
  
  /*
-  * Increase resolution of cpu_power calculations
+  * Increase resolution of cpu_capacity calculations
   */
- #define SCHED_POWER_SHIFT     10
- #define SCHED_POWER_SCALE     (1L << SCHED_POWER_SHIFT)
+ #define SCHED_CAPACITY_SHIFT  10
+ #define SCHED_CAPACITY_SCALE  (1L << SCHED_CAPACITY_SHIFT)
  
  /*
   * sched-domains (multiprocessor balancing) declarations:
  #define SD_BALANCE_FORK               0x0008  /* Balance on fork, clone */
  #define SD_BALANCE_WAKE               0x0010  /* Balance on wakeup */
  #define SD_WAKE_AFFINE                0x0020  /* Wake task to waking CPU */
- #define SD_SHARE_CPUPOWER     0x0080  /* Domain members share cpu power */
+ #define SD_SHARE_CPUCAPACITY  0x0080  /* Domain members share cpu power */
  #define SD_SHARE_POWERDOMAIN  0x0100  /* Domain members share power domain */
  #define SD_SHARE_PKG_RESOURCES        0x0200  /* Domain members share cpu pkg resources */
  #define SD_SERIALIZE          0x0400  /* Only a single load balancing instance */
  #ifdef CONFIG_SCHED_SMT
  static inline const int cpu_smt_flags(void)
  {
-       return SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES;
+       return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
  }
  #endif
  
@@@ -1006,7 -1013,7 +1006,7 @@@ typedef const int (*sched_domain_flags_
  struct sd_data {
        struct sched_domain **__percpu sd;
        struct sched_group **__percpu sg;
-       struct sched_group_power **__percpu sgp;
+       struct sched_group_capacity **__percpu sgc;
  };
  
  struct sched_domain_topology_level {
@@@ -2173,7 -2180,7 +2173,7 @@@ static inline void sched_autogroup_fork
  static inline void sched_autogroup_exit(struct signal_struct *sig) { }
  #endif
  
- extern bool yield_to(struct task_struct *p, bool preempt);
+ extern int yield_to(struct task_struct *p, bool preempt);
  extern void set_user_nice(struct task_struct *p, long nice);
  extern int task_prio(const struct task_struct *p);
  /**
@@@ -2414,6 -2421,9 +2414,6 @@@ extern void flush_itimer_signals(void)
  
  extern void do_group_exit(int);
  
 -extern int allow_signal(int);
 -extern int disallow_signal(int);
 -
  extern int do_execve(struct filename *,
                     const char __user * const __user *,
                     const char __user * const __user *);
@@@ -2421,11 -2431,7 +2421,11 @@@ extern long do_fork(unsigned long, unsi
  struct task_struct *fork_idle(int);
  extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
  
 -extern void set_task_comm(struct task_struct *tsk, const char *from);
 +extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
 +static inline void set_task_comm(struct task_struct *tsk, const char *from)
 +{
 +      __set_task_comm(tsk, from, false);
 +}
  extern char *get_task_comm(char *to, struct task_struct *tsk);
  
  #ifdef CONFIG_SMP
@@@ -2961,7 -2967,7 +2961,7 @@@ static inline void inc_syscw(struct tas
  #define TASK_SIZE_OF(tsk)     TASK_SIZE
  #endif
  
 -#ifdef CONFIG_MM_OWNER
 +#ifdef CONFIG_MEMCG
  extern void mm_update_next_owner(struct mm_struct *mm);
  extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
  #else
@@@ -2972,7 -2978,7 +2972,7 @@@ static inline void mm_update_next_owner
  static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
  {
  }
 -#endif /* CONFIG_MM_OWNER */
 +#endif /* CONFIG_MEMCG */
  
  static inline unsigned long task_rlimit(const struct task_struct *tsk,
                unsigned int limit)
diff --combined kernel/sched/core.c
index 4f611561ba4cc59204522a67643ded9275b0bc37,54f5722aba792391ff8a6d31dc2153529a68c50a..3bdf01b494fe29c267a0abe73828b02a799a737d
@@@ -535,7 -535,7 +535,7 @@@ static inline void init_hrtick(void
        __old;                                                          \
  })
  
- #ifdef TIF_POLLING_NRFLAG
+ #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
  /*
   * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
   * this avoids any races wrt polling state changes and thereby avoids
@@@ -546,12 -546,44 +546,44 @@@ static bool set_nr_and_not_polling(stru
        struct thread_info *ti = task_thread_info(p);
        return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
  }
+ /*
+  * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
+  *
+  * If this returns true, then the idle task promises to call
+  * sched_ttwu_pending() and reschedule soon.
+  */
+ static bool set_nr_if_polling(struct task_struct *p)
+ {
+       struct thread_info *ti = task_thread_info(p);
+       typeof(ti->flags) old, val = ACCESS_ONCE(ti->flags);
+       for (;;) {
+               if (!(val & _TIF_POLLING_NRFLAG))
+                       return false;
+               if (val & _TIF_NEED_RESCHED)
+                       return true;
+               old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
+               if (old == val)
+                       break;
+               val = old;
+       }
+       return true;
+ }
  #else
  static bool set_nr_and_not_polling(struct task_struct *p)
  {
        set_tsk_need_resched(p);
        return true;
  }
+ #ifdef CONFIG_SMP
+ static bool set_nr_if_polling(struct task_struct *p)
+ {
+       return false;
+ }
+ #endif
  #endif
  
  /*
@@@ -580,6 -612,8 +612,8 @@@ void resched_task(struct task_struct *p
  
        if (set_nr_and_not_polling(p))
                smp_send_reschedule(cpu);
+       else
+               trace_sched_wake_idle_without_ipi(cpu);
  }
  
  void resched_cpu(int cpu)
@@@ -642,27 -676,10 +676,10 @@@ static void wake_up_idle_cpu(int cpu
        if (cpu == smp_processor_id())
                return;
  
-       /*
-        * This is safe, as this function is called with the timer
-        * wheel base lock of (cpu) held. When the CPU is on the way
-        * to idle and has not yet set rq->curr to idle then it will
-        * be serialized on the timer wheel base lock and take the new
-        * timer into account automatically.
-        */
-       if (rq->curr != rq->idle)
-               return;
-       /*
-        * We can set TIF_RESCHED on the idle task of the other CPU
-        * lockless. The worst case is that the other CPU runs the
-        * idle task through an additional NOOP schedule()
-        */
-       set_tsk_need_resched(rq->idle);
-       /* NEED_RESCHED must be visible before we test polling */
-       smp_mb();
-       if (!tsk_is_polling(rq->idle))
+       if (set_nr_and_not_polling(rq->idle))
                smp_send_reschedule(cpu);
+       else
+               trace_sched_wake_idle_without_ipi(cpu);
  }
  
  static bool wake_up_full_nohz_cpu(int cpu)
@@@ -888,7 -905,7 +905,7 @@@ static void update_rq_clock_task(struc
        rq->clock_task += delta;
  
  #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
-       if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
+       if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
                sched_rt_avg_update(rq, irq_delta + steal);
  #endif
  }
@@@ -1367,7 -1384,7 +1384,7 @@@ out
                 * leave kernel.
                 */
                if (p->mm && printk_ratelimit()) {
 -                      printk_sched("process %d (%s) no longer affine to cpu%d\n",
 +                      printk_deferred("process %d (%s) no longer affine to cpu%d\n",
                                        task_pid_nr(p), p->comm, cpu);
                }
        }
@@@ -1521,13 -1538,17 +1538,17 @@@ static int ttwu_remote(struct task_stru
  }
  
  #ifdef CONFIG_SMP
static void sched_ttwu_pending(void)
+ void sched_ttwu_pending(void)
  {
        struct rq *rq = this_rq();
        struct llist_node *llist = llist_del_all(&rq->wake_list);
        struct task_struct *p;
+       unsigned long flags;
  
-       raw_spin_lock(&rq->lock);
+       if (!llist)
+               return;
+       raw_spin_lock_irqsave(&rq->lock, flags);
  
        while (llist) {
                p = llist_entry(llist, struct task_struct, wake_entry);
                ttwu_do_activate(rq, p, 0);
        }
  
-       raw_spin_unlock(&rq->lock);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
  }
  
  void scheduler_ipi(void)
  
  static void ttwu_queue_remote(struct task_struct *p, int cpu)
  {
-       if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
-               smp_send_reschedule(cpu);
+       struct rq *rq = cpu_rq(cpu);
+       if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
+               if (!set_nr_if_polling(rq->idle))
+                       smp_send_reschedule(cpu);
+               else
+                       trace_sched_wake_idle_without_ipi(cpu);
+       }
  }
  
  bool cpus_share_cache(int this_cpu, int that_cpu)
@@@ -2527,7 -2554,7 +2554,7 @@@ notrace unsigned long get_parent_ip(uns
  #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
                                defined(CONFIG_PREEMPT_TRACER))
  
 -void __kprobes preempt_count_add(int val)
 +void preempt_count_add(int val)
  {
  #ifdef CONFIG_DEBUG_PREEMPT
        /*
        }
  }
  EXPORT_SYMBOL(preempt_count_add);
 +NOKPROBE_SYMBOL(preempt_count_add);
  
 -void __kprobes preempt_count_sub(int val)
 +void preempt_count_sub(int val)
  {
  #ifdef CONFIG_DEBUG_PREEMPT
        /*
        __preempt_count_sub(val);
  }
  EXPORT_SYMBOL(preempt_count_sub);
 +NOKPROBE_SYMBOL(preempt_count_sub);
  
  #endif
  
@@@ -2859,7 -2884,6 +2886,7 @@@ asmlinkage __visible void __sched notra
                barrier();
        } while (need_resched());
  }
 +NOKPROBE_SYMBOL(preempt_schedule);
  EXPORT_SYMBOL(preempt_schedule);
  #endif /* CONFIG_PREEMPT */
  
@@@ -3726,7 -3750,7 +3753,7 @@@ SYSCALL_DEFINE3(sched_setattr, pid_t, p
        if (retval)
                return retval;
  
 -      if (attr.sched_policy < 0)
 +      if ((int)attr.sched_policy < 0)
                return -EINVAL;
  
        rcu_read_lock();
@@@ -4120,7 -4144,6 +4147,7 @@@ static void __cond_resched(void
  
  int __sched _cond_resched(void)
  {
 +      rcu_cond_resched();
        if (should_resched()) {
                __cond_resched();
                return 1;
@@@ -4139,18 -4162,15 +4166,18 @@@ EXPORT_SYMBOL(_cond_resched)
   */
  int __cond_resched_lock(spinlock_t *lock)
  {
 +      bool need_rcu_resched = rcu_should_resched();
        int resched = should_resched();
        int ret = 0;
  
        lockdep_assert_held(lock);
  
 -      if (spin_needbreak(lock) || resched) {
 +      if (spin_needbreak(lock) || resched || need_rcu_resched) {
                spin_unlock(lock);
                if (resched)
                        __cond_resched();
 +              else if (unlikely(need_rcu_resched))
 +                      rcu_resched();
                else
                        cpu_relax();
                ret = 1;
@@@ -4164,7 -4184,6 +4191,7 @@@ int __sched __cond_resched_softirq(void
  {
        BUG_ON(!in_softirq());
  
 +      rcu_cond_resched();  /* BH disabled OK, just recording QSes. */
        if (should_resched()) {
                local_bh_enable();
                __cond_resched();
@@@ -4219,7 -4238,7 +4246,7 @@@ EXPORT_SYMBOL(yield)
   *    false (0) if we failed to boost the target.
   *    -ESRCH if there's no task to yield to.
   */
bool __sched yield_to(struct task_struct *p, bool preempt)
int __sched yield_to(struct task_struct *p, bool preempt)
  {
        struct task_struct *curr = current;
        struct rq *rq, *p_rq;
@@@ -5245,14 -5264,13 +5272,13 @@@ static int sched_domain_debug_one(struc
                }
  
                /*
-                * Even though we initialize ->power to something semi-sane,
-                * we leave power_orig unset. This allows us to detect if
+                * Even though we initialize ->capacity to something semi-sane,
+                * we leave capacity_orig unset. This allows us to detect if
                 * domain iteration is still funny without causing /0 traps.
                 */
-               if (!group->sgp->power_orig) {
+               if (!group->sgc->capacity_orig) {
                        printk(KERN_CONT "\n");
-                       printk(KERN_ERR "ERROR: domain->cpu_power not "
-                                       "set\n");
+                       printk(KERN_ERR "ERROR: domain->cpu_capacity not set\n");
                        break;
                }
  
                cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
  
                printk(KERN_CONT " %s", str);
-               if (group->sgp->power != SCHED_POWER_SCALE) {
-                       printk(KERN_CONT " (cpu_power = %d)",
-                               group->sgp->power);
+               if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
+                       printk(KERN_CONT " (cpu_capacity = %d)",
+                               group->sgc->capacity);
                }
  
                group = group->next;
@@@ -5334,7 -5352,7 +5360,7 @@@ static int sd_degenerate(struct sched_d
                         SD_BALANCE_NEWIDLE |
                         SD_BALANCE_FORK |
                         SD_BALANCE_EXEC |
-                        SD_SHARE_CPUPOWER |
+                        SD_SHARE_CPUCAPACITY |
                         SD_SHARE_PKG_RESOURCES |
                         SD_SHARE_POWERDOMAIN)) {
                if (sd->groups != sd->groups->next)
@@@ -5365,7 -5383,7 +5391,7 @@@ sd_parent_degenerate(struct sched_domai
                                SD_BALANCE_NEWIDLE |
                                SD_BALANCE_FORK |
                                SD_BALANCE_EXEC |
-                               SD_SHARE_CPUPOWER |
+                               SD_SHARE_CPUCAPACITY |
                                SD_SHARE_PKG_RESOURCES |
                                SD_PREFER_SIBLING |
                                SD_SHARE_POWERDOMAIN);
@@@ -5490,7 -5508,7 +5516,7 @@@ static struct root_domain *alloc_rootdo
        return rd;
  }
  
- static void free_sched_groups(struct sched_group *sg, int free_sgp)
+ static void free_sched_groups(struct sched_group *sg, int free_sgc)
  {
        struct sched_group *tmp, *first;
  
        do {
                tmp = sg->next;
  
-               if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
-                       kfree(sg->sgp);
+               if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
+                       kfree(sg->sgc);
  
                kfree(sg);
                sg = tmp;
@@@ -5520,7 -5538,7 +5546,7 @@@ static void free_sched_domain(struct rc
        if (sd->flags & SD_OVERLAP) {
                free_sched_groups(sd->groups, 1);
        } else if (atomic_dec_and_test(&sd->groups->ref)) {
-               kfree(sd->groups->sgp);
+               kfree(sd->groups->sgc);
                kfree(sd->groups);
        }
        kfree(sd);
@@@ -5731,17 -5749,17 +5757,17 @@@ build_overlap_sched_groups(struct sched
  
                cpumask_or(covered, covered, sg_span);
  
-               sg->sgp = *per_cpu_ptr(sdd->sgp, i);
-               if (atomic_inc_return(&sg->sgp->ref) == 1)
+               sg->sgc = *per_cpu_ptr(sdd->sgc, i);
+               if (atomic_inc_return(&sg->sgc->ref) == 1)
                        build_group_mask(sd, sg);
  
                /*
-                * Initialize sgp->power such that even if we mess up the
+                * Initialize sgc->capacity such that even if we mess up the
                 * domains and no possible iteration will get us here, we won't
                 * die on a /0 trap.
                 */
-               sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
-               sg->sgp->power_orig = sg->sgp->power;
+               sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
+               sg->sgc->capacity_orig = sg->sgc->capacity;
  
                /*
                 * Make sure the first group of this domain contains the
@@@ -5779,8 -5797,8 +5805,8 @@@ static int get_group(int cpu, struct sd
  
        if (sg) {
                *sg = *per_cpu_ptr(sdd->sg, cpu);
-               (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
-               atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
+               (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
+               atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */
        }
  
        return cpu;
  /*
   * build_sched_groups will build a circular linked list of the groups
   * covered by the given span, and will set each group's ->cpumask correctly,
-  * and ->cpu_power to 0.
+  * and ->cpu_capacity to 0.
   *
   * Assumes the sched_domain tree is fully constructed
   */
@@@ -5843,16 -5861,16 +5869,16 @@@ build_sched_groups(struct sched_domain 
  }
  
  /*
-  * Initialize sched groups cpu_power.
+  * Initialize sched groups cpu_capacity.
   *
-  * cpu_power indicates the capacity of sched group, which is used while
+  * cpu_capacity indicates the capacity of sched group, which is used while
   * distributing the load between different sched groups in a sched domain.
-  * Typically cpu_power for all the groups in a sched domain will be same unless
-  * there are asymmetries in the topology. If there are asymmetries, group
-  * having more cpu_power will pickup more load compared to the group having
-  * less cpu_power.
+  * Typically cpu_capacity for all the groups in a sched domain will be same
+  * unless there are asymmetries in the topology. If there are asymmetries,
+  * group having more cpu_capacity will pickup more load compared to the
+  * group having less cpu_capacity.
   */
- static void init_sched_groups_power(int cpu, struct sched_domain *sd)
+ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
  {
        struct sched_group *sg = sd->groups;
  
        if (cpu != group_balance_cpu(sg))
                return;
  
-       update_group_power(sd, cpu);
-       atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
+       update_group_capacity(sd, cpu);
+       atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
  }
  
  /*
@@@ -5958,8 -5976,8 +5984,8 @@@ static void claim_allocations(int cpu, 
        if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
                *per_cpu_ptr(sdd->sg, cpu) = NULL;
  
-       if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
-               *per_cpu_ptr(sdd->sgp, cpu) = NULL;
+       if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
+               *per_cpu_ptr(sdd->sgc, cpu) = NULL;
  }
  
  #ifdef CONFIG_NUMA
@@@ -5972,7 -5990,7 +5998,7 @@@ static int sched_domains_curr_level
  /*
   * SD_flags allowed in topology descriptions.
   *
-  * SD_SHARE_CPUPOWER      - describes SMT topologies
+  * SD_SHARE_CPUCAPACITY      - describes SMT topologies
   * SD_SHARE_PKG_RESOURCES - describes shared caches
   * SD_NUMA                - describes NUMA topologies
   * SD_SHARE_POWERDOMAIN   - describes shared power domain
   * SD_ASYM_PACKING        - describes SMT quirks
   */
  #define TOPOLOGY_SD_FLAGS             \
-       (SD_SHARE_CPUPOWER |            \
+       (SD_SHARE_CPUCAPACITY |         \
         SD_SHARE_PKG_RESOURCES |       \
         SD_NUMA |                      \
         SD_ASYM_PACKING |              \
@@@ -6027,7 -6045,7 +6053,7 @@@ sd_init(struct sched_domain_topology_le
                                        | 1*SD_BALANCE_FORK
                                        | 0*SD_BALANCE_WAKE
                                        | 1*SD_WAKE_AFFINE
-                                       | 0*SD_SHARE_CPUPOWER
+                                       | 0*SD_SHARE_CPUCAPACITY
                                        | 0*SD_SHARE_PKG_RESOURCES
                                        | 0*SD_SERIALIZE
                                        | 0*SD_PREFER_SIBLING
         * Convert topological properties into behaviour.
         */
  
-       if (sd->flags & SD_SHARE_CPUPOWER) {
+       if (sd->flags & SD_SHARE_CPUCAPACITY) {
                sd->imbalance_pct = 110;
                sd->smt_gain = 1178; /* ~15% */
  
@@@ -6361,14 -6379,14 +6387,14 @@@ static int __sdt_alloc(const struct cpu
                if (!sdd->sg)
                        return -ENOMEM;
  
-               sdd->sgp = alloc_percpu(struct sched_group_power *);
-               if (!sdd->sgp)
+               sdd->sgc = alloc_percpu(struct sched_group_capacity *);
+               if (!sdd->sgc)
                        return -ENOMEM;
  
                for_each_cpu(j, cpu_map) {
                        struct sched_domain *sd;
                        struct sched_group *sg;
-                       struct sched_group_power *sgp;
+                       struct sched_group_capacity *sgc;
  
                        sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
                                        GFP_KERNEL, cpu_to_node(j));
  
                        *per_cpu_ptr(sdd->sg, j) = sg;
  
-                       sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(),
+                       sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
                                        GFP_KERNEL, cpu_to_node(j));
-                       if (!sgp)
+                       if (!sgc)
                                return -ENOMEM;
  
-                       *per_cpu_ptr(sdd->sgp, j) = sgp;
+                       *per_cpu_ptr(sdd->sgc, j) = sgc;
                }
        }
  
@@@ -6418,15 -6436,15 +6444,15 @@@ static void __sdt_free(const struct cpu
  
                        if (sdd->sg)
                                kfree(*per_cpu_ptr(sdd->sg, j));
-                       if (sdd->sgp)
-                               kfree(*per_cpu_ptr(sdd->sgp, j));
+                       if (sdd->sgc)
+                               kfree(*per_cpu_ptr(sdd->sgc, j));
                }
                free_percpu(sdd->sd);
                sdd->sd = NULL;
                free_percpu(sdd->sg);
                sdd->sg = NULL;
-               free_percpu(sdd->sgp);
-               sdd->sgp = NULL;
+               free_percpu(sdd->sgc);
+               sdd->sgc = NULL;
        }
  }
  
@@@ -6496,14 -6514,14 +6522,14 @@@ static int build_sched_domains(const st
                }
        }
  
-       /* Calculate CPU power for physical packages and nodes */
+       /* Calculate CPU capacity for physical packages and nodes */
        for (i = nr_cpumask_bits-1; i >= 0; i--) {
                if (!cpumask_test_cpu(i, cpu_map))
                        continue;
  
                for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
                        claim_allocations(i, sd);
-                       init_sched_groups_power(i, sd);
+                       init_sched_groups_capacity(i, sd);
                }
        }
  
@@@ -6946,7 -6964,7 +6972,7 @@@ void __init sched_init(void
  #ifdef CONFIG_SMP
                rq->sd = NULL;
                rq->rd = NULL;
-               rq->cpu_power = SCHED_POWER_SCALE;
+               rq->cpu_capacity = SCHED_CAPACITY_SCALE;
                rq->post_schedule = 0;
                rq->active_balance = 0;
                rq->next_balance = jiffies;
@@@ -7672,7 -7690,7 +7698,7 @@@ cpu_cgroup_css_alloc(struct cgroup_subs
  static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
  {
        struct task_group *tg = css_tg(css);
 -      struct task_group *parent = css_tg(css_parent(css));
 +      struct task_group *parent = css_tg(css->parent);
  
        if (parent)
                sched_online_group(tg, parent);
@@@ -7803,7 -7821,8 +7829,7 @@@ static int tg_set_cfs_bandwidth(struct 
        /* restart the period timer (if active) to handle new period expiry */
        if (runtime_enabled && cfs_b->timer_active) {
                /* force a reprogram */
 -              cfs_b->timer_active = 0;
 -              __start_cfs_bandwidth(cfs_b);
 +              __start_cfs_bandwidth(cfs_b, true);
        }
        raw_spin_unlock_irq(&cfs_b->lock);
  
diff --combined kernel/sched/deadline.c
index 2b8cbf09d1a4add6fe837be0080a71a35f145164,0d6b17057188581665027d9113cfcd668032cf4c..fc4f98b1258f66cbbf3cf1fc1082cb909c0f1144
@@@ -57,8 -57,6 +57,6 @@@ void init_dl_bandwidth(struct dl_bandwi
        dl_b->dl_runtime = runtime;
  }
  
- extern unsigned long to_ratio(u64 period, u64 runtime);
  void init_dl_bw(struct dl_bw *dl_b)
  {
        raw_spin_lock_init(&dl_b->lock);
@@@ -348,7 -346,12 +346,7 @@@ static void replenish_dl_entity(struct 
         * entity.
         */
        if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
 -              static bool lag_once = false;
 -
 -              if (!lag_once) {
 -                      lag_once = true;
 -                      printk_sched("sched: DL replenish lagged to much\n");
 -              }
 +              printk_deferred_once("sched: DL replenish lagged to much\n");
                dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
                dl_se->runtime = pi_se->dl_runtime;
        }
@@@ -508,17 -511,9 +506,17 @@@ static enum hrtimer_restart dl_task_tim
                                                     struct sched_dl_entity,
                                                     dl_timer);
        struct task_struct *p = dl_task_of(dl_se);
 -      struct rq *rq = task_rq(p);
 +      struct rq *rq;
 +again:
 +      rq = task_rq(p);
        raw_spin_lock(&rq->lock);
  
 +      if (rq != task_rq(p)) {
 +              /* Task was moved, retrying. */
 +              raw_spin_unlock(&rq->lock);
 +              goto again;
 +      }
 +
        /*
         * We need to take care of a possible races here. In fact, the
         * task might have changed its scheduling policy to something
diff --combined kernel/sched/fair.c
index 9855e87d671a54982238d325f014162327a974e8,d3c731222199931e789f13d538e4715dba2ba67a..fea7d3335e1fdf3502fc72f5d64b9181bc7e4243
@@@ -1017,7 -1017,7 +1017,7 @@@ bool should_numa_migrate_memory(struct 
  static unsigned long weighted_cpuload(const int cpu);
  static unsigned long source_load(int cpu, int type);
  static unsigned long target_load(int cpu, int type);
- static unsigned long power_of(int cpu);
+ static unsigned long capacity_of(int cpu);
  static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
  
  /* Cached statistics for all CPUs within a node */
@@@ -1026,11 -1026,11 +1026,11 @@@ struct numa_stats 
        unsigned long load;
  
        /* Total compute capacity of CPUs on a node */
-       unsigned long power;
+       unsigned long compute_capacity;
  
        /* Approximate capacity in terms of runnable tasks on a node */
-       unsigned long capacity;
-       int has_capacity;
+       unsigned long task_capacity;
+       int has_free_capacity;
  };
  
  /*
@@@ -1046,7 -1046,7 +1046,7 @@@ static void update_numa_stats(struct nu
  
                ns->nr_running += rq->nr_running;
                ns->load += weighted_cpuload(cpu);
-               ns->power += power_of(cpu);
+               ns->compute_capacity += capacity_of(cpu);
  
                cpus++;
        }
         * the @ns structure is NULL'ed and task_numa_compare() will
         * not find this node attractive.
         *
-        * We'll either bail at !has_capacity, or we'll detect a huge imbalance
-        * and bail there.
+        * We'll either bail at !has_free_capacity, or we'll detect a huge
+        * imbalance and bail there.
         */
        if (!cpus)
                return;
  
-       ns->load = (ns->load * SCHED_POWER_SCALE) / ns->power;
-       ns->capacity = DIV_ROUND_CLOSEST(ns->power, SCHED_POWER_SCALE);
-       ns->has_capacity = (ns->nr_running < ns->capacity);
+       ns->load = (ns->load * SCHED_CAPACITY_SCALE) / ns->compute_capacity;
+       ns->task_capacity =
+               DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE);
+       ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
  }
  
  struct task_numa_env {
@@@ -1120,7 -1121,7 +1121,7 @@@ static bool load_too_imbalanced(long or
        old_imb = orig_dst_load * 100 - orig_src_load * env->imbalance_pct;
  
        /* Would this change make things worse? */
 -      return (old_imb > imb);
 +      return (imb > old_imb);
  }
  
  /*
@@@ -1195,8 -1196,8 +1196,8 @@@ static void task_numa_compare(struct ta
  
        if (!cur) {
                /* Is there capacity at our destination? */
-               if (env->src_stats.has_capacity &&
-                   !env->dst_stats.has_capacity)
+               if (env->src_stats.has_free_capacity &&
+                   !env->dst_stats.has_free_capacity)
                        goto unlock;
  
                goto balance;
@@@ -1213,7 -1214,7 +1214,7 @@@ balance
        orig_dst_load = env->dst_stats.load;
        orig_src_load = env->src_stats.load;
  
-       /* XXX missing power terms */
+       /* XXX missing capacity terms */
        load = task_h_load(env->p);
        dst_load = orig_dst_load + load;
        src_load = orig_src_load - load;
@@@ -1301,8 -1302,8 +1302,8 @@@ static int task_numa_migrate(struct tas
        groupimp = group_weight(p, env.dst_nid) - groupweight;
        update_numa_stats(&env.dst_stats, env.dst_nid);
  
-       /* If the preferred nid has capacity, try to use it. */
-       if (env.dst_stats.has_capacity)
+       /* If the preferred nid has free capacity, try to use it. */
+       if (env.dst_stats.has_free_capacity)
                task_numa_find_cpu(&env, taskimp, groupimp);
  
        /* No space available on the preferred nid. Look elsewhere. */
@@@ -1745,19 -1746,18 +1746,19 @@@ no_join
  void task_numa_free(struct task_struct *p)
  {
        struct numa_group *grp = p->numa_group;
 -      int i;
        void *numa_faults = p->numa_faults_memory;
 +      unsigned long flags;
 +      int i;
  
        if (grp) {
 -              spin_lock_irq(&grp->lock);
 +              spin_lock_irqsave(&grp->lock, flags);
                for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
                        grp->faults[i] -= p->numa_faults_memory[i];
                grp->total_faults -= p->total_numa_faults;
  
                list_del(&p->numa_entry);
                grp->nr_tasks--;
 -              spin_unlock_irq(&grp->lock);
 +              spin_unlock_irqrestore(&grp->lock, flags);
                rcu_assign_pointer(p->numa_group, NULL);
                put_numa_group(grp);
        }
@@@ -3180,7 -3180,7 +3181,7 @@@ static int assign_cfs_rq_runtime(struc
                 */
                if (!cfs_b->timer_active) {
                        __refill_cfs_bandwidth_runtime(cfs_b);
 -                      __start_cfs_bandwidth(cfs_b);
 +                      __start_cfs_bandwidth(cfs_b, false);
                }
  
                if (cfs_b->runtime > 0) {
@@@ -3225,10 -3225,12 +3226,12 @@@ static void expire_cfs_rq_runtime(struc
         * has not truly expired.
         *
         * Fortunately we can check determine whether this the case by checking
-        * whether the global deadline has advanced.
+        * whether the global deadline has advanced. It is valid to compare
+        * cfs_b->runtime_expires without any locks since we only care about
+        * exact equality, so a partial write will still work.
         */
  
-       if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
+       if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
                /* extend local deadline, drift is bounded above by 2 ticks */
                cfs_rq->runtime_expires += TICK_NSEC;
        } else {
@@@ -3359,7 -3361,7 +3362,7 @@@ static void throttle_cfs_rq(struct cfs_
        raw_spin_lock(&cfs_b->lock);
        list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
        if (!cfs_b->timer_active)
 -              __start_cfs_bandwidth(cfs_b);
 +              __start_cfs_bandwidth(cfs_b, false);
        raw_spin_unlock(&cfs_b->lock);
  }
  
@@@ -3457,21 -3459,21 +3460,21 @@@ next
  static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
  {
        u64 runtime, runtime_expires;
-       int idle = 1, throttled;
+       int throttled;
  
-       raw_spin_lock(&cfs_b->lock);
        /* no need to continue the timer with no bandwidth constraint */
        if (cfs_b->quota == RUNTIME_INF)
-               goto out_unlock;
+               goto out_deactivate;
  
        throttled = !list_empty(&cfs_b->throttled_cfs_rq);
-       /* idle depends on !throttled (for the case of a large deficit) */
-       idle = cfs_b->idle && !throttled;
        cfs_b->nr_periods += overrun;
  
-       /* if we're going inactive then everything else can be deferred */
-       if (idle)
-               goto out_unlock;
+       /*
+        * idle depends on !throttled (for the case of a large deficit), and if
+        * we're going inactive then everything else can be deferred
+        */
+       if (cfs_b->idle && !throttled)
+               goto out_deactivate;
  
        /*
         * if we have relooped after returning idle once, we need to update our
        if (!throttled) {
                /* mark as potentially idle for the upcoming period */
                cfs_b->idle = 1;
-               goto out_unlock;
+               return 0;
        }
  
        /* account preceding periods in which throttling occurred */
         * timer to remain active while there are any throttled entities.)
         */
        cfs_b->idle = 0;
- out_unlock:
-       if (idle)
-               cfs_b->timer_active = 0;
-       raw_spin_unlock(&cfs_b->lock);
  
-       return idle;
+       return 0;
+ out_deactivate:
+       cfs_b->timer_active = 0;
+       return 1;
  }
  
  /* a cfs_rq won't donate quota below this amount */
@@@ -3707,6 -3709,7 +3710,7 @@@ static enum hrtimer_restart sched_cfs_p
        int overrun;
        int idle = 0;
  
+       raw_spin_lock(&cfs_b->lock);
        for (;;) {
                now = hrtimer_cb_get_time(timer);
                overrun = hrtimer_forward(timer, now, cfs_b->period);
  
                idle = do_sched_cfs_period_timer(cfs_b, overrun);
        }
+       raw_spin_unlock(&cfs_b->lock);
  
        return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
  }
@@@ -3741,7 -3745,7 +3746,7 @@@ static void init_cfs_rq_runtime(struct 
  }
  
  /* requires cfs_b->lock, may release to reprogram timer */
 -void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
 +void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
  {
        /*
         * The timer may be active because we're trying to set a new bandwidth
                cpu_relax();
                raw_spin_lock(&cfs_b->lock);
                /* if someone else restarted the timer then we're done */
 -              if (cfs_b->timer_active)
 +              if (!force && cfs_b->timer_active)
                        return;
        }
  
@@@ -3775,8 -3779,6 +3780,6 @@@ static void __maybe_unused unthrottle_o
        struct cfs_rq *cfs_rq;
  
        for_each_leaf_cfs_rq(rq, cfs_rq) {
-               struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
                if (!cfs_rq->runtime_enabled)
                        continue;
  
                 * clock_task is not advancing so we just need to make sure
                 * there's some valid quota amount
                 */
-               cfs_rq->runtime_remaining = cfs_b->quota;
+               cfs_rq->runtime_remaining = 1;
                if (cfs_rq_throttled(cfs_rq))
                        unthrottle_cfs_rq(cfs_rq);
        }
@@@ -4041,9 -4043,9 +4044,9 @@@ static unsigned long target_load(int cp
        return max(rq->cpu_load[type-1], total);
  }
  
- static unsigned long power_of(int cpu)
+ static unsigned long capacity_of(int cpu)
  {
-       return cpu_rq(cpu)->cpu_power;
+       return cpu_rq(cpu)->cpu_capacity;
  }
  
  static unsigned long cpu_avg_load_per_task(int cpu)
@@@ -4065,7 -4067,7 +4068,7 @@@ static void record_wakee(struct task_st
         * about the boundary, really active task won't care
         * about the loss.
         */
-       if (jiffies > current->wakee_flip_decay_ts + HZ) {
+       if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
                current->wakee_flips >>= 1;
                current->wakee_flip_decay_ts = jiffies;
        }
@@@ -4286,12 -4288,12 +4289,12 @@@ static int wake_affine(struct sched_dom
                s64 this_eff_load, prev_eff_load;
  
                this_eff_load = 100;
-               this_eff_load *= power_of(prev_cpu);
+               this_eff_load *= capacity_of(prev_cpu);
                this_eff_load *= this_load +
                        effective_load(tg, this_cpu, weight, weight);
  
                prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
-               prev_eff_load *= power_of(this_cpu);
+               prev_eff_load *= capacity_of(this_cpu);
                prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
  
                balanced = this_eff_load <= prev_eff_load;
@@@ -4367,8 -4369,8 +4370,8 @@@ find_idlest_group(struct sched_domain *
                        avg_load += load;
                }
  
-               /* Adjust by relative CPU power of the group */
-               avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
+               /* Adjust by relative CPU capacity of the group */
+               avg_load = (avg_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity;
  
                if (local_group) {
                        this_load = avg_load;
@@@ -4948,14 -4950,14 +4951,14 @@@ static bool yield_to_task_fair(struct r
   *
   *   W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0               (3)
   *
-  * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
+  * C_i is the compute capacity of cpu i, typically it is the
   * fraction of 'recent' time available for SCHED_OTHER task execution. But it
   * can also include other factors [XXX].
   *
   * To achieve this balance we define a measure of imbalance which follows
   * directly from (1):
   *
-  *   imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j }    (4)
+  *   imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j }    (4)
   *
   * We them move tasks around to minimize the imbalance. In the continuous
   * function space it is obvious this converges, in the discrete case we get
@@@ -5530,13 -5532,13 +5533,13 @@@ struct sg_lb_stats 
        unsigned long group_load; /* Total load over the CPUs of the group */
        unsigned long sum_weighted_load; /* Weighted load of group's tasks */
        unsigned long load_per_task;
-       unsigned long group_power;
+       unsigned long group_capacity;
        unsigned int sum_nr_running; /* Nr tasks running in the group */
-       unsigned int group_capacity;
+       unsigned int group_capacity_factor;
        unsigned int idle_cpus;
        unsigned int group_weight;
        int group_imb; /* Is there an imbalance in the group ? */
-       int group_has_capacity; /* Is there extra capacity in the group? */
+       int group_has_free_capacity;
  #ifdef CONFIG_NUMA_BALANCING
        unsigned int nr_numa_running;
        unsigned int nr_preferred_running;
@@@ -5551,7 -5553,7 +5554,7 @@@ struct sd_lb_stats 
        struct sched_group *busiest;    /* Busiest group in this sd */
        struct sched_group *local;      /* Local group in this sd */
        unsigned long total_load;       /* Total load of all groups in sd */
-       unsigned long total_pwr;        /* Total power of all groups in sd */
+       unsigned long total_capacity;   /* Total capacity of all groups in sd */
        unsigned long avg_load; /* Average load across all groups in sd */
  
        struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
@@@ -5570,7 -5572,7 +5573,7 @@@ static inline void init_sd_lb_stats(str
                .busiest = NULL,
                .local = NULL,
                .total_load = 0UL,
-               .total_pwr = 0UL,
+               .total_capacity = 0UL,
                .busiest_stat = {
                        .avg_load = 0UL,
                },
@@@ -5605,17 -5607,17 +5608,17 @@@ static inline int get_sd_load_idx(struc
        return load_idx;
  }
  
- static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
+ static unsigned long default_scale_capacity(struct sched_domain *sd, int cpu)
  {
-       return SCHED_POWER_SCALE;
+       return SCHED_CAPACITY_SCALE;
  }
  
- unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
+ unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
  {
-       return default_scale_freq_power(sd, cpu);
+       return default_scale_capacity(sd, cpu);
  }
  
- static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
+ static unsigned long default_scale_smt_capacity(struct sched_domain *sd, int cpu)
  {
        unsigned long weight = sd->span_weight;
        unsigned long smt_gain = sd->smt_gain;
        return smt_gain;
  }
  
- unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
+ unsigned long __weak arch_scale_smt_capacity(struct sched_domain *sd, int cpu)
  {
-       return default_scale_smt_power(sd, cpu);
+       return default_scale_smt_capacity(sd, cpu);
  }
  
- static unsigned long scale_rt_power(int cpu)
+ static unsigned long scale_rt_capacity(int cpu)
  {
        struct rq *rq = cpu_rq(cpu);
        u64 total, available, age_stamp, avg;
        total = sched_avg_period() + delta;
  
        if (unlikely(total < avg)) {
-               /* Ensures that power won't end up being negative */
+               /* Ensures that capacity won't end up being negative */
                available = 0;
        } else {
                available = total - avg;
        }
  
-       if (unlikely((s64)total < SCHED_POWER_SCALE))
-               total = SCHED_POWER_SCALE;
+       if (unlikely((s64)total < SCHED_CAPACITY_SCALE))
+               total = SCHED_CAPACITY_SCALE;
  
-       total >>= SCHED_POWER_SHIFT;
+       total >>= SCHED_CAPACITY_SHIFT;
  
        return div_u64(available, total);
  }
  
- static void update_cpu_power(struct sched_domain *sd, int cpu)
+ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
  {
        unsigned long weight = sd->span_weight;
-       unsigned long power = SCHED_POWER_SCALE;
+       unsigned long capacity = SCHED_CAPACITY_SCALE;
        struct sched_group *sdg = sd->groups;
  
-       if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
-               if (sched_feat(ARCH_POWER))
-                       power *= arch_scale_smt_power(sd, cpu);
+       if ((sd->flags & SD_SHARE_CPUCAPACITY) && weight > 1) {
+               if (sched_feat(ARCH_CAPACITY))
+                       capacity *= arch_scale_smt_capacity(sd, cpu);
                else
-                       power *= default_scale_smt_power(sd, cpu);
+                       capacity *= default_scale_smt_capacity(sd, cpu);
  
-               power >>= SCHED_POWER_SHIFT;
+               capacity >>= SCHED_CAPACITY_SHIFT;
        }
  
-       sdg->sgp->power_orig = power;
+       sdg->sgc->capacity_orig = capacity;
  
-       if (sched_feat(ARCH_POWER))
-               power *= arch_scale_freq_power(sd, cpu);
+       if (sched_feat(ARCH_CAPACITY))
+               capacity *= arch_scale_freq_capacity(sd, cpu);
        else
-               power *= default_scale_freq_power(sd, cpu);
+               capacity *= default_scale_capacity(sd, cpu);
  
-       power >>= SCHED_POWER_SHIFT;
+       capacity >>= SCHED_CAPACITY_SHIFT;
  
-       power *= scale_rt_power(cpu);
-       power >>= SCHED_POWER_SHIFT;
+       capacity *= scale_rt_capacity(cpu);
+       capacity >>= SCHED_CAPACITY_SHIFT;
  
-       if (!power)
-               power = 1;
+       if (!capacity)
+               capacity = 1;
  
-       cpu_rq(cpu)->cpu_power = power;
-       sdg->sgp->power = power;
+       cpu_rq(cpu)->cpu_capacity = capacity;
+       sdg->sgc->capacity = capacity;
  }
  
- void update_group_power(struct sched_domain *sd, int cpu)
+ void update_group_capacity(struct sched_domain *sd, int cpu)
  {
        struct sched_domain *child = sd->child;
        struct sched_group *group, *sdg = sd->groups;
-       unsigned long power, power_orig;
+       unsigned long capacity, capacity_orig;
        unsigned long interval;
  
        interval = msecs_to_jiffies(sd->balance_interval);
        interval = clamp(interval, 1UL, max_load_balance_interval);
-       sdg->sgp->next_update = jiffies + interval;
+       sdg->sgc->next_update = jiffies + interval;
  
        if (!child) {
-               update_cpu_power(sd, cpu);
+               update_cpu_capacity(sd, cpu);
                return;
        }
  
-       power_orig = power = 0;
+       capacity_orig = capacity = 0;
  
        if (child->flags & SD_OVERLAP) {
                /*
                 */
  
                for_each_cpu(cpu, sched_group_cpus(sdg)) {
-                       struct sched_group_power *sgp;
+                       struct sched_group_capacity *sgc;
                        struct rq *rq = cpu_rq(cpu);
  
                        /*
-                        * build_sched_domains() -> init_sched_groups_power()
+                        * build_sched_domains() -> init_sched_groups_capacity()
                         * gets here before we've attached the domains to the
                         * runqueues.
                         *
-                        * Use power_of(), which is set irrespective of domains
-                        * in update_cpu_power().
+                        * Use capacity_of(), which is set irrespective of domains
+                        * in update_cpu_capacity().
                         *
-                        * This avoids power/power_orig from being 0 and
+                        * This avoids capacity/capacity_orig from being 0 and
                         * causing divide-by-zero issues on boot.
                         *
-                        * Runtime updates will correct power_orig.
+                        * Runtime updates will correct capacity_orig.
                         */
                        if (unlikely(!rq->sd)) {
-                               power_orig += power_of(cpu);
-                               power += power_of(cpu);
+                               capacity_orig += capacity_of(cpu);
+                               capacity += capacity_of(cpu);
                                continue;
                        }
  
-                       sgp = rq->sd->groups->sgp;
-                       power_orig += sgp->power_orig;
-                       power += sgp->power;
+                       sgc = rq->sd->groups->sgc;
+                       capacity_orig += sgc->capacity_orig;
+                       capacity += sgc->capacity;
                }
        } else  {
                /*
  
                group = child->groups;
                do {
-                       power_orig += group->sgp->power_orig;
-                       power += group->sgp->power;
+                       capacity_orig += group->sgc->capacity_orig;
+                       capacity += group->sgc->capacity;
                        group = group->next;
                } while (group != child->groups);
        }
  
-       sdg->sgp->power_orig = power_orig;
-       sdg->sgp->power = power;
+       sdg->sgc->capacity_orig = capacity_orig;
+       sdg->sgc->capacity = capacity;
  }
  
  /*
@@@ -5778,15 -5780,15 +5781,15 @@@ static inline in
  fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
  {
        /*
-        * Only siblings can have significantly less than SCHED_POWER_SCALE
+        * Only siblings can have significantly less than SCHED_CAPACITY_SCALE
         */
-       if (!(sd->flags & SD_SHARE_CPUPOWER))
+       if (!(sd->flags & SD_SHARE_CPUCAPACITY))
                return 0;
  
        /*
-        * If ~90% of the cpu_power is still there, we're good.
+        * If ~90% of the cpu_capacity is still there, we're good.
         */
-       if (group->sgp->power * 32 > group->sgp->power_orig * 29)
+       if (group->sgc->capacity * 32 > group->sgc->capacity_orig * 29)
                return 1;
  
        return 0;
  
  static inline int sg_imbalanced(struct sched_group *group)
  {
-       return group->sgp->imbalance;
+       return group->sgc->imbalance;
  }
  
  /*
-  * Compute the group capacity.
+  * Compute the group capacity factor.
   *
-  * Avoid the issue where N*frac(smt_power) >= 1 creates 'phantom' cores by
+  * Avoid the issue where N*frac(smt_capacity) >= 1 creates 'phantom' cores by
   * first dividing out the smt factor and computing the actual number of cores
-  * and limit power unit capacity with that.
+  * and limit unit capacity with that.
   */
- static inline int sg_capacity(struct lb_env *env, struct sched_group *group)
+ static inline int sg_capacity_factor(struct lb_env *env, struct sched_group *group)
  {
-       unsigned int capacity, smt, cpus;
-       unsigned int power, power_orig;
+       unsigned int capacity_factor, smt, cpus;
+       unsigned int capacity, capacity_orig;
  
-       power = group->sgp->power;
-       power_orig = group->sgp->power_orig;
+       capacity = group->sgc->capacity;
+       capacity_orig = group->sgc->capacity_orig;
        cpus = group->group_weight;
  
-       /* smt := ceil(cpus / power), assumes: 1 < smt_power < 2 */
-       smt = DIV_ROUND_UP(SCHED_POWER_SCALE * cpus, power_orig);
-       capacity = cpus / smt; /* cores */
+       /* smt := ceil(cpus / capacity), assumes: 1 < smt_capacity < 2 */
+       smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, capacity_orig);
+       capacity_factor = cpus / smt; /* cores */
  
-       capacity = min_t(unsigned, capacity, DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE));
-       if (!capacity)
-               capacity = fix_small_capacity(env->sd, group);
+       capacity_factor = min_t(unsigned,
+               capacity_factor, DIV_ROUND_CLOSEST(capacity, SCHED_CAPACITY_SCALE));
+       if (!capacity_factor)
+               capacity_factor = fix_small_capacity(env->sd, group);
  
-       return capacity;
+       return capacity_factor;
  }
  
  /**
@@@ -5890,9 -5893,9 +5894,9 @@@ static inline void update_sg_lb_stats(s
                        sgs->idle_cpus++;
        }
  
-       /* Adjust by relative CPU power of the group */
-       sgs->group_power = group->sgp->power;
-       sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power;
+       /* Adjust by relative CPU capacity of the group */
+       sgs->group_capacity = group->sgc->capacity;
+       sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
  
        if (sgs->sum_nr_running)
                sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
        sgs->group_weight = group->group_weight;
  
        sgs->group_imb = sg_imbalanced(group);
-       sgs->group_capacity = sg_capacity(env, group);
+       sgs->group_capacity_factor = sg_capacity_factor(env, group);
  
-       if (sgs->group_capacity > sgs->sum_nr_running)
-               sgs->group_has_capacity = 1;
+       if (sgs->group_capacity_factor > sgs->sum_nr_running)
+               sgs->group_has_free_capacity = 1;
  }
  
  /**
@@@ -5927,7 -5930,7 +5931,7 @@@ static bool update_sd_pick_busiest(stru
        if (sgs->avg_load <= sds->busiest_stat.avg_load)
                return false;
  
-       if (sgs->sum_nr_running > sgs->group_capacity)
+       if (sgs->sum_nr_running > sgs->group_capacity_factor)
                return true;
  
        if (sgs->group_imb)
@@@ -6007,8 -6010,8 +6011,8 @@@ static inline void update_sd_lb_stats(s
                        sgs = &sds->local_stat;
  
                        if (env->idle != CPU_NEWLY_IDLE ||
-                           time_after_eq(jiffies, sg->sgp->next_update))
-                               update_group_power(env->sd, env->dst_cpu);
+                           time_after_eq(jiffies, sg->sgc->next_update))
+                               update_group_capacity(env->sd, env->dst_cpu);
                }
  
                update_sg_lb_stats(env, sg, load_idx, local_group, sgs);
  
                /*
                 * In case the child domain prefers tasks go to siblings
-                * first, lower the sg capacity to one so that we'll try
+                * first, lower the sg capacity factor to one so that we'll try
                 * and move all the excess tasks away. We lower the capacity
                 * of a group only if the local group has the capacity to fit
-                * these excess tasks, i.e. nr_running < group_capacity. The
+                * these excess tasks, i.e. nr_running < group_capacity_factor. The
                 * extra check prevents the case where you always pull from the
                 * heaviest group when it is already under-utilized (possible
                 * with a large weight task outweighs the tasks on the system).
                 */
                if (prefer_sibling && sds->local &&
-                   sds->local_stat.group_has_capacity)
-                       sgs->group_capacity = min(sgs->group_capacity, 1U);
+                   sds->local_stat.group_has_free_capacity)
+                       sgs->group_capacity_factor = min(sgs->group_capacity_factor, 1U);
  
                if (update_sd_pick_busiest(env, sds, sg, sgs)) {
                        sds->busiest = sg;
  next_group:
                /* Now, start updating sd_lb_stats */
                sds->total_load += sgs->group_load;
-               sds->total_pwr += sgs->group_power;
+               sds->total_capacity += sgs->group_capacity;
  
                sg = sg->next;
        } while (sg != env->sd->groups);
@@@ -6085,8 -6088,8 +6089,8 @@@ static int check_asym_packing(struct lb
                return 0;
  
        env->imbalance = DIV_ROUND_CLOSEST(
-               sds->busiest_stat.avg_load * sds->busiest_stat.group_power,
-               SCHED_POWER_SCALE);
+               sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity,
+               SCHED_CAPACITY_SCALE);
  
        return 1;
  }
  static inline
  void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
  {
-       unsigned long tmp, pwr_now = 0, pwr_move = 0;
+       unsigned long tmp, capa_now = 0, capa_move = 0;
        unsigned int imbn = 2;
        unsigned long scaled_busy_load_per_task;
        struct sg_lb_stats *local, *busiest;
                imbn = 1;
  
        scaled_busy_load_per_task =
-               (busiest->load_per_task * SCHED_POWER_SCALE) /
-               busiest->group_power;
+               (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
+               busiest->group_capacity;
  
        if (busiest->avg_load + scaled_busy_load_per_task >=
            local->avg_load + (scaled_busy_load_per_task * imbn)) {
  
        /*
         * OK, we don't have enough imbalance to justify moving tasks,
-        * however we may be able to increase total CPU power used by
+        * however we may be able to increase total CPU capacity used by
         * moving them.
         */
  
-       pwr_now += busiest->group_power *
+       capa_now += busiest->group_capacity *
                        min(busiest->load_per_task, busiest->avg_load);
-       pwr_now += local->group_power *
+       capa_now += local->group_capacity *
                        min(local->load_per_task, local->avg_load);
-       pwr_now /= SCHED_POWER_SCALE;
+       capa_now /= SCHED_CAPACITY_SCALE;
  
        /* Amount of load we'd subtract */
        if (busiest->avg_load > scaled_busy_load_per_task) {
-               pwr_move += busiest->group_power *
+               capa_move += busiest->group_capacity *
                            min(busiest->load_per_task,
                                busiest->avg_load - scaled_busy_load_per_task);
        }
  
        /* Amount of load we'd add */
-       if (busiest->avg_load * busiest->group_power <
-           busiest->load_per_task * SCHED_POWER_SCALE) {
-               tmp = (busiest->avg_load * busiest->group_power) /
-                     local->group_power;
+       if (busiest->avg_load * busiest->group_capacity <
+           busiest->load_per_task * SCHED_CAPACITY_SCALE) {
+               tmp = (busiest->avg_load * busiest->group_capacity) /
+                     local->group_capacity;
        } else {
-               tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
-                     local->group_power;
+               tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
+                     local->group_capacity;
        }
-       pwr_move += local->group_power *
+       capa_move += local->group_capacity *
                    min(local->load_per_task, local->avg_load + tmp);
-       pwr_move /= SCHED_POWER_SCALE;
+       capa_move /= SCHED_CAPACITY_SCALE;
  
        /* Move if we gain throughput */
-       if (pwr_move > pwr_now)
+       if (capa_move > capa_now)
                env->imbalance = busiest->load_per_task;
  }
  
@@@ -6187,7 -6190,7 +6191,7 @@@ static inline void calculate_imbalance(
        /*
         * In the presence of smp nice balancing, certain scenarios can have
         * max load less than avg load(as we skip the groups at or below
-        * its cpu_power, while calculating max_load..)
+        * its cpu_capacity, while calculating max_load..)
         */
        if (busiest->avg_load <= sds->avg_load ||
            local->avg_load >= sds->avg_load) {
                 * have to drop below capacity to reach cpu-load equilibrium.
                 */
                load_above_capacity =
-                       (busiest->sum_nr_running - busiest->group_capacity);
+                       (busiest->sum_nr_running - busiest->group_capacity_factor);
  
-               load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
-               load_above_capacity /= busiest->group_power;
+               load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_CAPACITY_SCALE);
+               load_above_capacity /= busiest->group_capacity;
        }
  
        /*
  
        /* How much load to actually move to equalise the imbalance */
        env->imbalance = min(
-               max_pull * busiest->group_power,
-               (sds->avg_load - local->avg_load) * local->group_power
-       ) / SCHED_POWER_SCALE;
+               max_pull * busiest->group_capacity,
+               (sds->avg_load - local->avg_load) * local->group_capacity
+       ) / SCHED_CAPACITY_SCALE;
  
        /*
         * if *imbalance is less than the average load per runnable task
@@@ -6276,7 -6279,8 +6280,8 @@@ static struct sched_group *find_busiest
        if (!sds.busiest || busiest->sum_nr_running == 0)
                goto out_balanced;
  
-       sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
+       sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
+                                               / sds.total_capacity;
  
        /*
         * If the busiest group is imbalanced the below checks don't
                goto force_balance;
  
        /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
-       if (env->idle == CPU_NEWLY_IDLE && local->group_has_capacity &&
-           !busiest->group_has_capacity)
+       if (env->idle == CPU_NEWLY_IDLE && local->group_has_free_capacity &&
+           !busiest->group_has_free_capacity)
                goto force_balance;
  
        /*
@@@ -6342,11 -6346,11 +6347,11 @@@ static struct rq *find_busiest_queue(st
                                     struct sched_group *group)
  {
        struct rq *busiest = NULL, *rq;
-       unsigned long busiest_load = 0, busiest_power = 1;
+       unsigned long busiest_load = 0, busiest_capacity = 1;
        int i;
  
        for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
-               unsigned long power, capacity, wl;
+               unsigned long capacity, capacity_factor, wl;
                enum fbq_type rt;
  
                rq = cpu_rq(i);
                if (rt > env->fbq_type)
                        continue;
  
-               power = power_of(i);
-               capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
-               if (!capacity)
-                       capacity = fix_small_capacity(env->sd, group);
+               capacity = capacity_of(i);
+               capacity_factor = DIV_ROUND_CLOSEST(capacity, SCHED_CAPACITY_SCALE);
+               if (!capacity_factor)
+                       capacity_factor = fix_small_capacity(env->sd, group);
  
                wl = weighted_cpuload(i);
  
                /*
                 * When comparing with imbalance, use weighted_cpuload()
-                * which is not scaled with the cpu power.
+                * which is not scaled with the cpu capacity.
                 */
-               if (capacity && rq->nr_running == 1 && wl > env->imbalance)
+               if (capacity_factor && rq->nr_running == 1 && wl > env->imbalance)
                        continue;
  
                /*
                 * For the load comparisons with the other cpu's, consider
-                * the weighted_cpuload() scaled with the cpu power, so that
-                * the load can be moved away from the cpu that is potentially
-                * running at a lower capacity.
+                * the weighted_cpuload() scaled with the cpu capacity, so
+                * that the load can be moved away from the cpu that is
+                * potentially running at a lower capacity.
                 *
-                * Thus we're looking for max(wl_i / power_i), crosswise
+                * Thus we're looking for max(wl_i / capacity_i), crosswise
                 * multiplication to rid ourselves of the division works out
-                * to: wl_i * power_j > wl_j * power_i;  where j is our
-                * previous maximum.
+                * to: wl_i * capacity_j > wl_j * capacity_i;  where j is
+                * our previous maximum.
                 */
-               if (wl * busiest_power > busiest_load * power) {
+               if (wl * busiest_capacity > busiest_load * capacity) {
                        busiest_load = wl;
-                       busiest_power = power;
+                       busiest_capacity = capacity;
                        busiest = rq;
                }
        }
@@@ -6609,7 -6613,7 +6614,7 @@@ more_balance
                 * We failed to reach balance because of affinity.
                 */
                if (sd_parent) {
-                       int *group_imbalance = &sd_parent->groups->sgp->imbalance;
+                       int *group_imbalance = &sd_parent->groups->sgc->imbalance;
  
                        if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
                                *group_imbalance = 1;
@@@ -6996,7 -7000,7 +7001,7 @@@ static inline void set_cpu_sd_state_bus
                goto unlock;
        sd->nohz_idle = 0;
  
-       atomic_inc(&sd->groups->sgp->nr_busy_cpus);
+       atomic_inc(&sd->groups->sgc->nr_busy_cpus);
  unlock:
        rcu_read_unlock();
  }
@@@ -7013,7 -7017,7 +7018,7 @@@ void set_cpu_sd_state_idle(void
                goto unlock;
        sd->nohz_idle = 1;
  
-       atomic_dec(&sd->groups->sgp->nr_busy_cpus);
+       atomic_dec(&sd->groups->sgc->nr_busy_cpus);
  unlock:
        rcu_read_unlock();
  }
@@@ -7192,12 -7196,17 +7197,17 @@@ static void nohz_idle_balance(struct r
  
                rq = cpu_rq(balance_cpu);
  
-               raw_spin_lock_irq(&rq->lock);
-               update_rq_clock(rq);
-               update_idle_cpu_load(rq);
-               raw_spin_unlock_irq(&rq->lock);
-               rebalance_domains(rq, CPU_IDLE);
+               /*
+                * If time for next balance is due,
+                * do the balance.
+                */
+               if (time_after_eq(jiffies, rq->next_balance)) {
+                       raw_spin_lock_irq(&rq->lock);
+                       update_rq_clock(rq);
+                       update_idle_cpu_load(rq);
+                       raw_spin_unlock_irq(&rq->lock);
+                       rebalance_domains(rq, CPU_IDLE);
+               }
  
                if (time_after(this_rq->next_balance, rq->next_balance))
                        this_rq->next_balance = rq->next_balance;
@@@ -7212,7 -7221,7 +7222,7 @@@ end
   * of an idle cpu is the system.
   *   - This rq has more than one task.
   *   - At any scheduler domain level, this cpu's scheduler group has multiple
-  *     busy cpu's exceeding the group's power.
+  *     busy cpu's exceeding the group's capacity.
   *   - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
   *     domain span are idle.
   */
@@@ -7220,7 -7229,7 +7230,7 @@@ static inline int nohz_kick_needed(stru
  {
        unsigned long now = jiffies;
        struct sched_domain *sd;
-       struct sched_group_power *sgp;
+       struct sched_group_capacity *sgc;
        int nr_busy, cpu = rq->cpu;
  
        if (unlikely(rq->idle_balance))
        sd = rcu_dereference(per_cpu(sd_busy, cpu));
  
        if (sd) {
-               sgp = sd->groups->sgp;
-               nr_busy = atomic_read(&sgp->nr_busy_cpus);
+               sgc = sd->groups->sgc;
+               nr_busy = atomic_read(&sgc->nr_busy_cpus);
  
                if (nr_busy > 1)
                        goto need_kick_unlock;
diff --combined kernel/sched/rt.c
index b3512f1afce9361588a8ccb50f7ad75540122426,43406db306af95226bd8746074ff94940c0ba944..a49083192c64c306952c752ec6b3a05a0df7205d
@@@ -890,8 -890,14 +890,8 @@@ static int sched_rt_runtime_exceeded(st
                 * but accrue some time due to boosting.
                 */
                if (likely(rt_b->rt_runtime)) {
 -                      static bool once = false;
 -
                        rt_rq->rt_throttled = 1;
 -
 -                      if (!once) {
 -                              once = true;
 -                              printk_sched("sched: RT throttling activated\n");
 -                      }
 +                      printk_deferred_once("sched: RT throttling activated\n");
                } else {
                        /*
                         * In case we did anyway, make it go away,
@@@ -918,7 -924,6 +918,6 @@@ static void update_curr_rt(struct rq *r
  {
        struct task_struct *curr = rq->curr;
        struct sched_rt_entity *rt_se = &curr->rt;
-       struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
        u64 delta_exec;
  
        if (curr->sched_class != &rt_sched_class)
                return;
  
        for_each_sched_rt_entity(rt_se) {
-               rt_rq = rt_rq_of_se(rt_se);
+               struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
  
                if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
                        raw_spin_lock(&rt_rq->rt_runtime_lock);
diff --combined kernel/sched/sched.h
index e47679b04d167b8cdc2abf91326f7d52243b3996,2f8636199b83a14e6ba8831c455b4ec8d6496fd8..31cc02ebc54ed82f5bf3f62fae879a1c0343a97d
@@@ -278,7 -278,7 +278,7 @@@ extern void init_cfs_bandwidth(struct c
  extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
  
  extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
 -extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
 +extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force);
  extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
  
  extern void free_rt_sched_group(struct task_group *tg);
@@@ -567,7 -567,7 +567,7 @@@ struct rq 
        struct root_domain *rd;
        struct sched_domain *sd;
  
-       unsigned long cpu_power;
+       unsigned long cpu_capacity;
  
        unsigned char idle_balance;
        /* For active balancing */
@@@ -670,6 -670,8 +670,8 @@@ extern int migrate_swap(struct task_str
  
  #ifdef CONFIG_SMP
  
+ extern void sched_ttwu_pending(void);
  #define rcu_dereference_check_sched_domain(p) \
        rcu_dereference_check((p), \
                              lockdep_is_held(&sched_domains_mutex))
@@@ -728,15 -730,15 +730,15 @@@ DECLARE_PER_CPU(struct sched_domain *, 
  DECLARE_PER_CPU(struct sched_domain *, sd_busy);
  DECLARE_PER_CPU(struct sched_domain *, sd_asym);
  
- struct sched_group_power {
+ struct sched_group_capacity {
        atomic_t ref;
        /*
-        * CPU power of this group, SCHED_LOAD_SCALE being max power for a
-        * single CPU.
+        * CPU capacity of this group, SCHED_LOAD_SCALE being max capacity
+        * for a single CPU.
         */
-       unsigned int power, power_orig;
+       unsigned int capacity, capacity_orig;
        unsigned long next_update;
-       int imbalance; /* XXX unrelated to power but shared group state */
+       int imbalance; /* XXX unrelated to capacity but shared group state */
        /*
         * Number of busy cpus in this group.
         */
@@@ -750,7 -752,7 +752,7 @@@ struct sched_group 
        atomic_t ref;
  
        unsigned int group_weight;
-       struct sched_group_power *sgp;
+       struct sched_group_capacity *sgc;
  
        /*
         * The CPUs this group covers.
@@@ -773,7 -775,7 +775,7 @@@ static inline struct cpumask *sched_gro
   */
  static inline struct cpumask *sched_group_mask(struct sched_group *sg)
  {
-       return to_cpumask(sg->sgp->cpumask);
+       return to_cpumask(sg->sgc->cpumask);
  }
  
  /**
@@@ -787,6 -789,10 +789,10 @@@ static inline unsigned int group_first_
  
  extern int group_balance_cpu(struct sched_group *sg);
  
+ #else
+ static inline void sched_ttwu_pending(void) { }
  #endif /* CONFIG_SMP */
  
  #include "stats.h"
@@@ -1167,7 -1173,7 +1173,7 @@@ extern const struct sched_class idle_sc
  
  #ifdef CONFIG_SMP
  
- extern void update_group_power(struct sched_domain *sd, int cpu);
+ extern void update_group_capacity(struct sched_domain *sd, int cpu);
  
  extern void trigger_load_balance(struct rq *rq);
  
diff --combined virt/kvm/kvm_main.c
index c86be0f983db706c81cfe60aedfb77f4a315e20f,86d1c457458d46ce5182cf3d549eec0800baf8d6..4b6c01b477f9cf86df4a69504de6940c5f6bebbf
@@@ -186,12 -186,9 +186,12 @@@ static bool make_all_cpus_request(struc
  
  void kvm_flush_remote_tlbs(struct kvm *kvm)
  {
 +      long dirty_count = kvm->tlbs_dirty;
 +
 +      smp_mb();
        if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
                ++kvm->stat.remote_tlb_flush;
 -      kvm->tlbs_dirty = false;
 +      cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
  }
  EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
  
@@@ -457,11 -454,11 +457,11 @@@ static struct kvm *kvm_create_vm(unsign
  
        r = kvm_arch_init_vm(kvm, type);
        if (r)
 -              goto out_err_nodisable;
 +              goto out_err_no_disable;
  
        r = hardware_enable_all();
        if (r)
 -              goto out_err_nodisable;
 +              goto out_err_no_disable;
  
  #ifdef CONFIG_HAVE_KVM_IRQCHIP
        INIT_HLIST_HEAD(&kvm->mask_notifier_list);
        r = -ENOMEM;
        kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
        if (!kvm->memslots)
 -              goto out_err_nosrcu;
 +              goto out_err_no_srcu;
        kvm_init_memslots_id(kvm);
        if (init_srcu_struct(&kvm->srcu))
 -              goto out_err_nosrcu;
 +              goto out_err_no_srcu;
 +      if (init_srcu_struct(&kvm->irq_srcu))
 +              goto out_err_no_irq_srcu;
        for (i = 0; i < KVM_NR_BUSES; i++) {
                kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
                                        GFP_KERNEL);
        return kvm;
  
  out_err:
 +      cleanup_srcu_struct(&kvm->irq_srcu);
 +out_err_no_irq_srcu:
        cleanup_srcu_struct(&kvm->srcu);
 -out_err_nosrcu:
 +out_err_no_srcu:
        hardware_disable_all();
 -out_err_nodisable:
 +out_err_no_disable:
        for (i = 0; i < KVM_NR_BUSES; i++)
                kfree(kvm->buses[i]);
        kfree(kvm->memslots);
@@@ -608,7 -601,6 +608,7 @@@ static void kvm_destroy_vm(struct kvm *
        kvm_arch_destroy_vm(kvm);
        kvm_destroy_devices(kvm);
        kvm_free_physmem(kvm);
 +      cleanup_srcu_struct(&kvm->irq_srcu);
        cleanup_srcu_struct(&kvm->srcu);
        kvm_arch_free_vm(kvm);
        hardware_disable_all();
@@@ -645,12 -637,14 +645,12 @@@ static int kvm_vm_release(struct inode 
   */
  static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
  {
 -#ifndef CONFIG_S390
        unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
  
        memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes);
        if (!memslot->dirty_bitmap)
                return -ENOMEM;
  
 -#endif /* !CONFIG_S390 */
        return 0;
  }
  
@@@ -1714,11 -1708,11 +1714,11 @@@ void kvm_vcpu_kick(struct kvm_vcpu *vcp
  EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
  #endif /* !CONFIG_S390 */
  
bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
int kvm_vcpu_yield_to(struct kvm_vcpu *target)
  {
        struct pid *pid;
        struct task_struct *task = NULL;
-       bool ret = false;
+       int ret = 0;
  
        rcu_read_lock();
        pid = rcu_dereference(target->pid);
@@@ -2928,7 -2922,6 +2928,7 @@@ static int __kvm_io_bus_read(struct kvm
  
        return -EOPNOTSUPP;
  }
 +EXPORT_SYMBOL_GPL(kvm_io_bus_write);
  
  /* kvm_io_bus_read - called under kvm->slots_lock */
  int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,