]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'pm-cpufreq-sched' into pm-cpufreq
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Sat, 1 Oct 2016 23:42:33 +0000 (01:42 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Sat, 1 Oct 2016 23:42:33 +0000 (01:42 +0200)
1  2 
drivers/cpufreq/Kconfig
drivers/cpufreq/intel_pstate.c

diff --combined drivers/cpufreq/Kconfig
index 9be9013c29140d2eeb8f7d87bd4761eaa51c68ec,4dc95250cf4eaf667d1d1de203807f8128011e8d..d8b164a7c4e517f8da42507ecaf979043dace3d8
@@@ -194,7 -194,7 +194,7 @@@ config CPU_FREQ_GOV_CONSERVATIV
          If in doubt, say N.
  
  config CPU_FREQ_GOV_SCHEDUTIL
-       tristate "'schedutil' cpufreq policy governor"
+       bool "'schedutil' cpufreq policy governor"
        depends on CPU_FREQ && SMP
        select CPU_FREQ_GOV_ATTR_SET
        select IRQ_WORK
          frequency tipping point is at utilization/capacity equal to 80% in
          both cases.
  
-         To compile this driver as a module, choose M here: the module will
-         be called cpufreq_schedutil.
          If in doubt, say N.
  
  comment "CPU frequency scaling drivers"
@@@ -225,7 -222,7 +222,7 @@@ config CPUFREQ_D
        help
          This adds a generic DT based cpufreq driver for frequency management.
          It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
 -        systems which share clock and voltage across all CPUs.
 +        systems.
  
          If in doubt, say N.
  
index 5095b890a06a11d70b90000965bf970b0b01540a,86c29af7eb77f1939b2d1e127bf5fb3ce87a09fa..806f2039571e56ec69bd2cad697f788821826c1a
@@@ -181,6 -181,8 +181,8 @@@ struct _pid 
   * @cpu:              CPU number for this instance data
   * @update_util:      CPUFreq utility callback information
   * @update_util_set:  CPUFreq utility callback is set
+  * @iowait_boost:     iowait-related boost fraction
+  * @last_update:      Time of the last update.
   * @pstate:           Stores P state limits for this CPU
   * @vid:              Stores VID limits for this CPU
   * @pid:              Stores PID parameters for this CPU
@@@ -206,6 -208,7 +208,7 @@@ struct cpudata 
        struct vid_data vid;
        struct _pid pid;
  
+       u64     last_update;
        u64     last_sample_time;
        u64     prev_aperf;
        u64     prev_mperf;
        struct acpi_processor_performance acpi_perf_data;
        bool valid_pss_table;
  #endif
+       unsigned int iowait_boost;
  };
  
  static struct cpudata **all_cpu_data;
   * @p_gain_pct:               PID proportional gain
   * @i_gain_pct:               PID integral gain
   * @d_gain_pct:               PID derivative gain
+  * @boost_iowait:     Whether or not to use iowait boosting.
   *
   * Stores per CPU model static PID configuration data.
   */
@@@ -240,6 -245,7 +245,7 @@@ struct pstate_adjust_policy 
        int p_gain_pct;
        int d_gain_pct;
        int i_gain_pct;
+       bool boost_iowait;
  };
  
  /**
@@@ -1029,7 -1035,7 +1035,7 @@@ static struct cpu_defaults core_params 
        },
  };
  
 -static struct cpu_defaults silvermont_params = {
 +static const struct cpu_defaults silvermont_params = {
        .pid_policy = {
                .sample_rate_ms = 10,
                .deadband = 0,
                .p_gain_pct = 14,
                .d_gain_pct = 0,
                .i_gain_pct = 4,
+               .boost_iowait = true,
        },
        .funcs = {
                .get_max = atom_get_max_pstate,
        },
  };
  
 -static struct cpu_defaults airmont_params = {
 +static const struct cpu_defaults airmont_params = {
        .pid_policy = {
                .sample_rate_ms = 10,
                .deadband = 0,
                .p_gain_pct = 14,
                .d_gain_pct = 0,
                .i_gain_pct = 4,
+               .boost_iowait = true,
        },
        .funcs = {
                .get_max = atom_get_max_pstate,
        },
  };
  
 -static struct cpu_defaults knl_params = {
 +static const struct cpu_defaults knl_params = {
        .pid_policy = {
                .sample_rate_ms = 10,
                .deadband = 0,
        },
  };
  
 -static struct cpu_defaults bxt_params = {
 +static const struct cpu_defaults bxt_params = {
        .pid_policy = {
                .sample_rate_ms = 10,
                .deadband = 0,
                .p_gain_pct = 14,
                .d_gain_pct = 0,
                .i_gain_pct = 4,
+               .boost_iowait = true,
        },
        .funcs = {
                .get_max = core_get_max_pstate,
@@@ -1222,36 -1231,18 +1231,18 @@@ static inline int32_t get_avg_pstate(st
  static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
  {
        struct sample *sample = &cpu->sample;
-       u64 cummulative_iowait, delta_iowait_us;
-       u64 delta_iowait_mperf;
-       u64 mperf, now;
-       int32_t cpu_load;
+       int32_t busy_frac, boost;
  
-       cummulative_iowait = get_cpu_iowait_time_us(cpu->cpu, &now);
+       busy_frac = div_fp(sample->mperf, sample->tsc);
  
-       /*
-        * Convert iowait time into number of IO cycles spent at max_freq.
-        * IO is considered as busy only for the cpu_load algorithm. For
-        * performance this is not needed since we always try to reach the
-        * maximum P-State, so we are already boosting the IOs.
-        */
-       delta_iowait_us = cummulative_iowait - cpu->prev_cummulative_iowait;
-       delta_iowait_mperf = div64_u64(delta_iowait_us * cpu->pstate.scaling *
-               cpu->pstate.max_pstate, MSEC_PER_SEC);
+       boost = cpu->iowait_boost;
+       cpu->iowait_boost >>= 1;
  
-       mperf = cpu->sample.mperf + delta_iowait_mperf;
-       cpu->prev_cummulative_iowait = cummulative_iowait;
+       if (busy_frac < boost)
+               busy_frac = boost;
  
-       /*
-        * The load can be estimated as the ratio of the mperf counter
-        * running at a constant frequency during active periods
-        * (C0) and the time stamp counter running at the same frequency
-        * also during C-states.
-        */
-       cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc);
-       cpu->sample.busy_scaled = cpu_load;
-       return get_avg_pstate(cpu) - pid_calc(&cpu->pid, cpu_load);
+       sample->busy_scaled = busy_frac * 100;
+       return get_avg_pstate(cpu) - pid_calc(&cpu->pid, sample->busy_scaled);
  }
  
  static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
@@@ -1325,15 -1316,29 +1316,29 @@@ static inline void intel_pstate_adjust_
                sample->mperf,
                sample->aperf,
                sample->tsc,
-               get_avg_frequency(cpu));
+               get_avg_frequency(cpu),
+               fp_toint(cpu->iowait_boost * 100));
  }
  
  static void intel_pstate_update_util(struct update_util_data *data, u64 time,
-                                    unsigned long util, unsigned long max)
+                                    unsigned int flags)
  {
        struct cpudata *cpu = container_of(data, struct cpudata, update_util);
-       u64 delta_ns = time - cpu->sample.time;
+       u64 delta_ns;
+       if (pid_params.boost_iowait) {
+               if (flags & SCHED_CPUFREQ_IOWAIT) {
+                       cpu->iowait_boost = int_tofp(1);
+               } else if (cpu->iowait_boost) {
+                       /* Clear iowait_boost if the CPU may have been idle. */
+                       delta_ns = time - cpu->last_update;
+                       if (delta_ns > TICK_NSEC)
+                               cpu->iowait_boost = 0;
+               }
+               cpu->last_update = time;
+       }
  
+       delta_ns = time - cpu->sample.time;
        if ((s64)delta_ns >= pid_params.sample_rate_ns) {
                bool sample_taken = intel_pstate_sample(cpu, time);