]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branches 'pm-domains', 'pm-sleep' and 'pm-cpufreq'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 10 Jul 2017 20:45:16 +0000 (22:45 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 10 Jul 2017 20:45:16 +0000 (22:45 +0200)
* pm-domains:
  PM / Domains: provide pm_genpd_poweroff_noirq() stub
  Revert "PM / Domains: Handle safely genpd_syscore_switch() call on non-genpd device"

* pm-sleep:
  PM / sleep: constify attribute_group structures

* pm-cpufreq:
  cpufreq: intel_pstate: constify attribute_group structures
  cpufreq: cpufreq_stats: constify attribute_group structures

1  2  3  4 
drivers/base/power/domain.c
drivers/cpufreq/intel_pstate.c

Simple merge
index 48a98f11a84ee79b9ccabcff6c6865a0c51a6e26,283491f742d3d78659696bd58c48fc5a3bd7a370,eb1158532de31e7aee418162135a7495b10f9860,9aa4fa128bfa59aa5b15013fa1c742d1912bca0a..d6f323560da39f87d1219a158aabfaf35ceea07d
@@@@@ -231,8 -245,9 -231,10 -231,10 +231,8 @@@@@ struct global_params 
     * @prev_cummulative_iowait: IO Wait time difference from last and
     *                  current sample
     * @sample:         Storage for storing last Sample data
 -   * @perf_limits:    Pointer to perf_limit unique to this CPU
 -   *                  Not all field in the structure are applicable
 -   *                  when per cpu controls are enforced
  -- * @min_perf:               Minimum capacity limit as a fraction of the maximum
  -- *                  turbo P-state capacity.
  -- * @max_perf:               Maximum capacity limit as a fraction of the maximum
  -- *                  turbo P-state capacity.
 +++ * @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios
 +++ * @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios
     * @acpi_perf_data: Stores ACPI perf information read from _PSS
     * @valid_pss_table:        Set to true for valid ACPI _PSS entries found
     * @epp_powersave:  Last saved HWP energy performance preference
@@@@@ -264,8 -279,7 -266,8 -266,8 +264,8 @@@@@ struct cpudata 
        u64     prev_tsc;
        u64     prev_cummulative_iowait;
        struct sample sample;
 -      struct perf_limits *perf_limits;
  --    int32_t min_perf;
  --    int32_t max_perf;
 +++    int32_t min_perf_ratio;
 +++    int32_t max_perf_ratio;
    #ifdef CONFIG_ACPI
        struct acpi_processor_performance acpi_perf_data;
        bool valid_pss_table;
@@@@@ -788,87 -838,96 -794,80 -794,80 +788,87 @@@@@ static struct freq_attr *hwp_cpufreq_at
        NULL,
    };
    
 -  static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
  --static void intel_pstate_hwp_set(unsigned int cpu)
 +++static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max,
 +++                                 int *current_max)
    {
 -      int min, hw_min, max, hw_max, cpu;
 -      struct perf_limits *perf_limits = &global;
  --    struct cpudata *cpu_data = all_cpu_data[cpu];
  --    int min, hw_min, max, hw_max;
 ---    u64 value, cap;
  --    s16 epp;
 +++    u64 cap;
    
 -      for_each_cpu(cpu, policy->cpus) {
 -              struct cpudata *cpu_data = all_cpu_data[cpu];
 -              s16 epp;
 +      rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
  --    hw_min = HWP_LOWEST_PERF(cap);
 +      if (global.no_turbo)
  --            hw_max = HWP_GUARANTEED_PERF(cap);
 +++            *current_max = HWP_GUARANTEED_PERF(cap);
 +      else
  --            hw_max = HWP_HIGHEST_PERF(cap);
 +++            *current_max = HWP_HIGHEST_PERF(cap);
  ++
 -              if (per_cpu_limits)
 -                      perf_limits = all_cpu_data[cpu]->perf_limits;
 +++    *phy_max = HWP_HIGHEST_PERF(cap);
 +++}
  ++
 -              rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
 -              hw_min = HWP_LOWEST_PERF(cap);
 -              if (global.no_turbo)
 -                      hw_max = HWP_GUARANTEED_PERF(cap);
 -              else
 -                      hw_max = HWP_HIGHEST_PERF(cap);
 +++static void intel_pstate_hwp_set(unsigned int cpu)
 +++{
 +++    struct cpudata *cpu_data = all_cpu_data[cpu];
 +++    int max, min;
 +++    u64 value;
 +++    s16 epp;
  ++
 -              max = fp_ext_toint(hw_max * perf_limits->max_perf);
 -              if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
 -                      min = max;
 -              else
 -                      min = fp_ext_toint(hw_max * perf_limits->min_perf);
 +++    max = cpu_data->max_perf_ratio;
 +++    min = cpu_data->min_perf_ratio;
    
 -              rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
  --    max = fp_ext_toint(hw_max * cpu_data->max_perf);
 +      if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
 +              min = max;
  --    else
  --            min = fp_ext_toint(hw_max * cpu_data->min_perf);
    
 -              value &= ~HWP_MIN_PERF(~0L);
 -              value |= HWP_MIN_PERF(min);
 +      rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
    
 -              value &= ~HWP_MAX_PERF(~0L);
 -              value |= HWP_MAX_PERF(max);
 +      value &= ~HWP_MIN_PERF(~0L);
 +      value |= HWP_MIN_PERF(min);
    
 -              if (cpu_data->epp_policy == cpu_data->policy)
 -                      goto skip_epp;
 +      value &= ~HWP_MAX_PERF(~0L);
 +      value |= HWP_MAX_PERF(max);
    
 -              cpu_data->epp_policy = cpu_data->policy;
 +      if (cpu_data->epp_policy == cpu_data->policy)
 +              goto skip_epp;
    
 -              if (cpu_data->epp_saved >= 0) {
 -                      epp = cpu_data->epp_saved;
 -                      cpu_data->epp_saved = -EINVAL;
 -                      goto update_epp;
 -              }
 +      cpu_data->epp_policy = cpu_data->policy;
    
 -              if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
 -                      epp = intel_pstate_get_epp(cpu_data, value);
 -                      cpu_data->epp_powersave = epp;
 -                      /* If EPP read was failed, then don't try to write */
 -                      if (epp < 0)
 -                              goto skip_epp;
 +      if (cpu_data->epp_saved >= 0) {
 +              epp = cpu_data->epp_saved;
 +              cpu_data->epp_saved = -EINVAL;
 +              goto update_epp;
 +      }
    
 +      if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
 +              epp = intel_pstate_get_epp(cpu_data, value);
 +              cpu_data->epp_powersave = epp;
 +              /* If EPP read was failed, then don't try to write */
 +              if (epp < 0)
 +                      goto skip_epp;
    
 -                      epp = 0;
 -              } else {
 -                      /* skip setting EPP, when saved value is invalid */
 -                      if (cpu_data->epp_powersave < 0)
 -                              goto skip_epp;
 +              epp = 0;
 +      } else {
 +              /* skip setting EPP, when saved value is invalid */
 +              if (cpu_data->epp_powersave < 0)
 +                      goto skip_epp;
    
 -                      /*
 -                       * No need to restore EPP when it is not zero. This
 -                       * means:
 -                       *  - Policy is not changed
 -                       *  - user has manually changed
 -                       *  - Error reading EPB
 -                       */
 -                      epp = intel_pstate_get_epp(cpu_data, value);
 -                      if (epp)
 -                              goto skip_epp;
 +              /*
 +               * No need to restore EPP when it is not zero. This
 +               * means:
 +               *  - Policy is not changed
 +               *  - user has manually changed
 +               *  - Error reading EPB
 +               */
 +              epp = intel_pstate_get_epp(cpu_data, value);
 +              if (epp)
 +                      goto skip_epp;
    
 -                      epp = cpu_data->epp_powersave;
 -              }
 +              epp = cpu_data->epp_powersave;
 +      }
    update_epp:
 -              if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
 -                      value &= ~GENMASK_ULL(31, 24);
 -                      value |= (u64)epp << 24;
 -              } else {
 -                      intel_pstate_set_epb(cpu, epp);
 -              }
 -  skip_epp:
 -              wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
 +      if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
 +              value &= ~GENMASK_ULL(31, 24);
 +              value |= (u64)epp << 24;
 +      } else {
 +              intel_pstate_set_epb(cpu, epp);
        }
 -  }
 -  
 -  static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
 -  {
 -      if (hwp_active)
 -              intel_pstate_hwp_set(policy);
 -  
 -      return 0;
 +  skip_epp:
 +      wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
    }
    
    static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
@@@@@ -1525,12 -1702,11 -1524,13 -1524,13 +1525,12 @@@@@ static void intel_pstate_set_min_pstate
    
    static void intel_pstate_max_within_limits(struct cpudata *cpu)
    {
 -      int min_pstate, max_pstate;
 +      int pstate;
    
        update_turbo_state();
 -      intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
 -      intel_pstate_set_pstate(cpu, max_pstate);
 +      pstate = intel_pstate_get_base_pstate(cpu);
  --    pstate = max(cpu->pstate.min_pstate,
  --                 fp_ext_toint(pstate * cpu->max_perf));
 +++    pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
 +      intel_pstate_set_pstate(cpu, pstate);
    }
    
    static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
@@@@@ -1686,12 -1858,11 -1692,13 -1692,13 +1686,12 @@@@@ static inline int32_t get_target_pstate
    
    static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
    {
 -      int max_perf, min_perf;
 +      int max_pstate = intel_pstate_get_base_pstate(cpu);
 +      int min_pstate;
    
 -      intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
 -      pstate = clamp_t(int, pstate, min_perf, max_perf);
 -      return pstate;
  --    min_pstate = max(cpu->pstate.min_pstate,
  --                     fp_ext_toint(max_pstate * cpu->min_perf));
  --    max_pstate = max(min_pstate, fp_ext_toint(max_pstate * cpu->max_perf));
 +++    min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
 +++    max_pstate = max(min_pstate, cpu->max_perf_ratio);
 +      return clamp_t(int, pstate, min_pstate, max_pstate);
    }
    
    static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
@@@@@ -1726,23 -1902,6 -1733,33 -1733,33 +1726,23 @@@@@ static void intel_pstate_adjust_pstate(
                fp_toint(cpu->iowait_boost * 100));
    }
    
  --static void intel_pstate_update_util_hwp(struct update_util_data *data,
  --                                     u64 time, unsigned int flags)
  --{
  --    struct cpudata *cpu = container_of(data, struct cpudata, update_util);
  --    u64 delta_ns = time - cpu->sample.time;
  --
  --    if ((s64)delta_ns >= INTEL_PSTATE_HWP_SAMPLING_INTERVAL)
  --            intel_pstate_sample(cpu, time);
  --}
  --
 +  static void intel_pstate_update_util_pid(struct update_util_data *data,
 +                                       u64 time, unsigned int flags)
 +  {
 +      struct cpudata *cpu = container_of(data, struct cpudata, update_util);
 +      u64 delta_ns = time - cpu->sample.time;
 +  
 +      if ((s64)delta_ns < pid_params.sample_rate_ns)
 +              return;
 +  
 +      if (intel_pstate_sample(cpu, time)) {
 +              int target_pstate;
 +  
 +              target_pstate = get_target_pstate_use_performance(cpu);
 +              intel_pstate_adjust_pstate(cpu, target_pstate);
 +      }
 +  }
 +  
    static void intel_pstate_update_util(struct update_util_data *data, u64 time,
                                     unsigned int flags)
    {
@@@@@ -1942,72 -2055,40 -1956,63 -1956,63 +1942,72 @@@@@ static void intel_pstate_clear_update_u
        synchronize_sched();
    }
    
 +  static int intel_pstate_get_max_freq(struct cpudata *cpu)
 +  {
 +      return global.turbo_disabled || global.no_turbo ?
 +                      cpu->pstate.max_freq : cpu->pstate.turbo_freq;
 +  }
 +  
    static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
 -                                          struct perf_limits *limits)
 +                                          struct cpudata *cpu)
    {
 +      int max_freq = intel_pstate_get_max_freq(cpu);
        int32_t max_policy_perf, min_policy_perf;
  --    max_policy_perf = div_ext_fp(policy->max, max_freq);
  --    max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1));
 +++    int max_state, turbo_max;
 +  
 -      max_policy_perf = div_ext_fp(policy->max, policy->cpuinfo.max_freq);
 -      max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1));
 +++    /*
 +++     * HWP needs some special consideration, because on BDX the
 +++     * HWP_REQUEST uses abstract value to represent performance
 +++     * rather than pure ratios.
 +++     */
 +++    if (hwp_active) {
 +++            intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
 +++    } else {
 +++            max_state = intel_pstate_get_base_pstate(cpu);
 +++            turbo_max = cpu->pstate.turbo_pstate;
 +++    }
  ++
 +++    max_policy_perf = max_state * policy->max / max_freq;
        if (policy->max == policy->min) {
                min_policy_perf = max_policy_perf;
        } else {
 -              min_policy_perf = div_ext_fp(policy->min,
 -                                           policy->cpuinfo.max_freq);
  --            min_policy_perf = div_ext_fp(policy->min, max_freq);
 +++            min_policy_perf = max_state * policy->min / max_freq;
                min_policy_perf = clamp_t(int32_t, min_policy_perf,
                                          0, max_policy_perf);
        }
    
 +++    pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n",
 +++             policy->cpu, max_state,
 +++             min_policy_perf, max_policy_perf);
 +++
        /* Normalize user input to [min_perf, max_perf] */
 -      limits->min_perf = max(min_policy_perf,
 -                             percent_ext_fp(limits->min_sysfs_pct));
 -      limits->min_perf = min(limits->min_perf, max_policy_perf);
 -      limits->max_perf = min(max_policy_perf,
 -                             percent_ext_fp(limits->max_sysfs_pct));
 -      limits->max_perf = max(min_policy_perf, limits->max_perf);
 +      if (per_cpu_limits) {
  --            cpu->min_perf = min_policy_perf;
  --            cpu->max_perf = max_policy_perf;
 +++            cpu->min_perf_ratio = min_policy_perf;
 +++            cpu->max_perf_ratio = max_policy_perf;
 +      } else {
 +              int32_t global_min, global_max;
 +  
 +              /* Global limits are in percent of the maximum turbo P-state. */
  --            global_max = percent_ext_fp(global.max_perf_pct);
  --            global_min = percent_ext_fp(global.min_perf_pct);
  --            if (max_freq != cpu->pstate.turbo_freq) {
  --                    int32_t turbo_factor;
  --
  --                    turbo_factor = div_ext_fp(cpu->pstate.turbo_pstate,
  --                                              cpu->pstate.max_pstate);
  --                    global_min = mul_ext_fp(global_min, turbo_factor);
  --                    global_max = mul_ext_fp(global_max, turbo_factor);
  --            }
 +++            global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
 +++            global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
 +              global_min = clamp_t(int32_t, global_min, 0, global_max);
 +  
  --            cpu->min_perf = max(min_policy_perf, global_min);
  --            cpu->min_perf = min(cpu->min_perf, max_policy_perf);
  --            cpu->max_perf = min(max_policy_perf, global_max);
  --            cpu->max_perf = max(min_policy_perf, cpu->max_perf);
 +++            pr_debug("cpu:%d global_min:%d global_max:%d\n", policy->cpu,
 +++                     global_min, global_max);
    
 -      /* Make sure min_perf <= max_perf */
 -      limits->min_perf = min(limits->min_perf, limits->max_perf);
  --            /* Make sure min_perf <= max_perf */
  --            cpu->min_perf = min(cpu->min_perf, cpu->max_perf);
  --    }
 +++            cpu->min_perf_ratio = max(min_policy_perf, global_min);
 +++            cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf);
 +++            cpu->max_perf_ratio = min(max_policy_perf, global_max);
 +++            cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio);
    
 -      limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS);
 -      limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS);
 -      limits->max_perf_pct = fp_ext_toint(limits->max_perf * 100);
 -      limits->min_perf_pct = fp_ext_toint(limits->min_perf * 100);
  --    cpu->max_perf = round_up(cpu->max_perf, EXT_FRAC_BITS);
  --    cpu->min_perf = round_up(cpu->min_perf, EXT_FRAC_BITS);
 +++            /* Make sure min_perf <= max_perf */
 +++            cpu->min_perf_ratio = min(cpu->min_perf_ratio,
 +++                                      cpu->max_perf_ratio);
    
 ---    pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
 -               limits->max_perf_pct, limits->min_perf_pct);
  --             fp_ext_toint(cpu->max_perf * 100),
  --             fp_ext_toint(cpu->min_perf * 100));
 +++    }
 +++    pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", policy->cpu,
 +++             cpu->max_perf_ratio,
 +++             cpu->min_perf_ratio);
    }
    
    static int intel_pstate_set_policy(struct cpufreq_policy *policy)
                 */
                intel_pstate_clear_update_util_hook(policy->cpu);
                intel_pstate_max_within_limits(cpu);
 +++    } else {
 +++            intel_pstate_set_update_util_hook(policy->cpu);
        }
    
 ---    intel_pstate_set_update_util_hook(policy->cpu);
 ---
 -      intel_pstate_hwp_set_policy(policy);
 +      if (hwp_active)
 +              intel_pstate_hwp_set(policy->cpu);
    
        mutex_unlock(&intel_pstate_limits_lock);
    
@@@@@ -2110,8 -2202,8 -2115,8 -2115,8 +2110,8 @@@@@ static int __intel_pstate_cpu_init(stru
    
        cpu = all_cpu_data[policy->cpu];
    
 -      if (per_cpu_limits)
 -              intel_pstate_init_limits(cpu->perf_limits);
  --    cpu->max_perf = int_ext_tofp(1);
  --    cpu->min_perf = 0;
 +++    cpu->max_perf_ratio = 0xFF;
 +++    cpu->min_perf_ratio = 0;
    
        policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
        policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
@@@@@ -2546,24 -2644,21 -2551,25 -2551,25 +2546,24 @@@@@ static int __init intel_pstate_init(voi
        if (no_load)
                return -ENODEV;
    
 -      if (x86_match_cpu(hwp_support_ids) && !no_hwp) {
 -              copy_cpu_funcs(&core_params.funcs);
 -              hwp_active++;
 -              intel_pstate.attr = hwp_cpufreq_attrs;
 -              goto hwp_cpu_matched;
 -      }
 -  
 -      id = x86_match_cpu(intel_pstate_cpu_ids);
 -      if (!id)
 -              return -ENODEV;
 +      if (x86_match_cpu(hwp_support_ids)) {
 +              copy_cpu_funcs(&core_funcs);
 +              if (no_hwp) {
 +                      pstate_funcs.update_util = intel_pstate_update_util;
 +              } else {
 +                      hwp_active++;
 +                      intel_pstate.attr = hwp_cpufreq_attrs;
  --                    pstate_funcs.update_util = intel_pstate_update_util_hwp;
 +                      goto hwp_cpu_matched;
 +              }
 +      } else {
 +              const struct x86_cpu_id *id;
    
 -      cpu_def = (struct cpu_defaults *)id->driver_data;
 +              id = x86_match_cpu(intel_pstate_cpu_ids);
 +              if (!id)
 +                      return -ENODEV;
    
 -      copy_pid_params(&cpu_def->pid_policy);
 -      copy_cpu_funcs(&cpu_def->funcs);
 +              copy_cpu_funcs((struct pstate_funcs *)id->driver_data);
 +      }
    
        if (intel_pstate_msrs_not_valid())
                return -ENODEV;