diff options
author | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-11 10:41:07 +0300 |
---|---|---|
committer | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-13 08:17:18 +0300 |
commit | e09b41010ba33a20a87472ee821fa407a5b8da36 (patch) | |
tree | d10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/drivers/cpufreq/intel_pstate.c | |
parent | f93b97fd65072de626c074dbe099a1fff05ce060 (diff) |
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page.
During the rebasing, the following patch collided:
Force tick interrupt and get rid of softirq magic(I70131fb85).
Collisions have been removed because its logic was found on the
source already.
Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769
Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/drivers/cpufreq/intel_pstate.c')
-rw-r--r-- | kernel/drivers/cpufreq/intel_pstate.c | 394 |
1 files changed, 278 insertions, 116 deletions
diff --git a/kernel/drivers/cpufreq/intel_pstate.c b/kernel/drivers/cpufreq/intel_pstate.c index 6f9d27f90..98fb88213 100644 --- a/kernel/drivers/cpufreq/intel_pstate.c +++ b/kernel/drivers/cpufreq/intel_pstate.c @@ -26,6 +26,7 @@ #include <linux/fs.h> #include <linux/debugfs.h> #include <linux/acpi.h> +#include <linux/vmalloc.h> #include <trace/events/power.h> #include <asm/div64.h> @@ -33,24 +34,23 @@ #include <asm/cpu_device_id.h> #include <asm/cpufeature.h> -#define BYT_RATIOS 0x66a -#define BYT_VIDS 0x66b -#define BYT_TURBO_RATIOS 0x66c -#define BYT_TURBO_VIDS 0x66d +#define ATOM_RATIOS 0x66a +#define ATOM_VIDS 0x66b +#define ATOM_TURBO_RATIOS 0x66c +#define ATOM_TURBO_VIDS 0x66d #define FRAC_BITS 8 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) #define fp_toint(X) ((X) >> FRAC_BITS) - static inline int32_t mul_fp(int32_t x, int32_t y) { return ((int64_t)x * (int64_t)y) >> FRAC_BITS; } -static inline int32_t div_fp(int32_t x, int32_t y) +static inline int32_t div_fp(s64 x, s64 y) { - return div_s64((int64_t)x << FRAC_BITS, y); + return div64_s64((int64_t)x << FRAC_BITS, y); } static inline int ceiling_fp(int32_t x) @@ -68,6 +68,7 @@ struct sample { int32_t core_pct_busy; u64 aperf; u64 mperf; + u64 tsc; int freq; ktime_t time; }; @@ -76,6 +77,7 @@ struct pstate_data { int current_pstate; int min_pstate; int max_pstate; + int max_pstate_physical; int scaling; int turbo_pstate; }; @@ -109,6 +111,7 @@ struct cpudata { ktime_t last_sample_time; u64 prev_aperf; u64 prev_mperf; + u64 prev_tsc; struct sample sample; }; @@ -124,6 +127,7 @@ struct pstate_adjust_policy { struct pstate_funcs { int (*get_max)(void); + int (*get_max_physical)(void); int (*get_min)(void); int (*get_turbo)(void); int (*get_scaling)(void); @@ -153,7 +157,20 @@ struct perf_limits { int min_sysfs_pct; }; -static struct perf_limits limits = { +static struct perf_limits performance_limits = { + .no_turbo = 0, + .turbo_disabled = 0, + .max_perf_pct = 100, + .max_perf = int_tofp(1), + .min_perf_pct = 100, + .min_perf = int_tofp(1), + .max_policy_pct = 100, + .max_sysfs_pct = 100, + .min_policy_pct = 0, + .min_sysfs_pct = 0, +}; + +static struct perf_limits powersave_limits = { .no_turbo = 0, .turbo_disabled = 0, .max_perf_pct = 100, @@ -166,6 +183,12 @@ static struct perf_limits limits = { .min_sysfs_pct = 0, }; +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE +static struct perf_limits *limits = &performance_limits; +#else +static struct perf_limits *limits = &powersave_limits; +#endif + static inline void pid_reset(struct _pid *pid, int setpoint, int busy, int deadband, int integral) { pid->setpoint = setpoint; @@ -252,29 +275,36 @@ static inline void update_turbo_state(void) cpu = all_cpu_data[0]; rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); - limits.turbo_disabled = + limits->turbo_disabled = (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); } -#define PCT_TO_HWP(x) (x * 255 / 100) static void intel_pstate_hwp_set(void) { - int min, max, cpu; - u64 value, freq; + int min, hw_min, max, hw_max, cpu, range, adj_range; + u64 value, cap; + + rdmsrl(MSR_HWP_CAPABILITIES, cap); + hw_min = HWP_LOWEST_PERF(cap); + hw_max = HWP_HIGHEST_PERF(cap); + range = hw_max - hw_min; get_online_cpus(); for_each_online_cpu(cpu) { rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); - min = PCT_TO_HWP(limits.min_perf_pct); + adj_range = limits->min_perf_pct * range / 100; + min = hw_min + adj_range; value &= ~HWP_MIN_PERF(~0L); value |= HWP_MIN_PERF(min); - max = PCT_TO_HWP(limits.max_perf_pct); - if (limits.no_turbo) { - rdmsrl( MSR_HWP_CAPABILITIES, freq); - max = HWP_GUARANTEED_PERF(freq); + adj_range = limits->max_perf_pct * range / 100; + max = hw_min + adj_range; + if (limits->no_turbo) { + hw_max = HWP_GUARANTEED_PERF(cap); + if (hw_max < max) + max = hw_max; } value &= ~HWP_MAX_PERF(~0L); @@ -340,7 +370,7 @@ static void __init intel_pstate_debug_expose_params(void) static ssize_t show_##file_name \ (struct kobject *kobj, struct attribute *attr, char *buf) \ { \ - return sprintf(buf, "%u\n", limits.object); \ + return sprintf(buf, "%u\n", limits->object); \ } static ssize_t show_turbo_pct(struct kobject *kobj, @@ -376,10 +406,10 @@ static ssize_t show_no_turbo(struct kobject *kobj, ssize_t ret; update_turbo_state(); - if (limits.turbo_disabled) - ret = sprintf(buf, "%u\n", limits.turbo_disabled); + if (limits->turbo_disabled) + ret = sprintf(buf, "%u\n", limits->turbo_disabled); else - ret = sprintf(buf, "%u\n", limits.no_turbo); + ret = sprintf(buf, "%u\n", limits->no_turbo); return ret; } @@ -395,12 +425,12 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, return -EINVAL; update_turbo_state(); - if (limits.turbo_disabled) { - pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); + if (limits->turbo_disabled) { + pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n"); return -EPERM; } - limits.no_turbo = clamp_t(int, input, 0, 1); + limits->no_turbo = clamp_t(int, input, 0, 1); if (hwp_active) intel_pstate_hwp_set(); @@ -418,9 +448,15 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, if (ret != 1) return -EINVAL; - limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); - limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); - limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); + limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); + limits->max_perf_pct = min(limits->max_policy_pct, + limits->max_sysfs_pct); + limits->max_perf_pct = max(limits->min_policy_pct, + limits->max_perf_pct); + limits->max_perf_pct = max(limits->min_perf_pct, + limits->max_perf_pct); + limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), + int_tofp(100)); if (hwp_active) intel_pstate_hwp_set(); @@ -437,9 +473,15 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, if (ret != 1) return -EINVAL; - limits.min_sysfs_pct = clamp_t(int, input, 0 , 100); - limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct); - limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); + limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); + limits->min_perf_pct = max(limits->min_policy_pct, + limits->min_sysfs_pct); + limits->min_perf_pct = min(limits->max_policy_pct, + limits->min_perf_pct); + limits->min_perf_pct = min(limits->max_perf_pct, + limits->min_perf_pct); + limits->min_perf = div_fp(int_tofp(limits->min_perf_pct), + int_tofp(100)); if (hwp_active) intel_pstate_hwp_set(); @@ -481,46 +523,43 @@ static void __init intel_pstate_sysfs_expose_params(void) } /************************** sysfs end ************************/ -static void intel_pstate_hwp_enable(void) +static void intel_pstate_hwp_enable(struct cpudata *cpudata) { - hwp_active++; - pr_info("intel_pstate HWP enabled\n"); - - wrmsrl( MSR_PM_ENABLE, 0x1); + wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); } -static int byt_get_min_pstate(void) +static int atom_get_min_pstate(void) { u64 value; - rdmsrl(BYT_RATIOS, value); + rdmsrl(ATOM_RATIOS, value); return (value >> 8) & 0x7F; } -static int byt_get_max_pstate(void) +static int atom_get_max_pstate(void) { u64 value; - rdmsrl(BYT_RATIOS, value); + rdmsrl(ATOM_RATIOS, value); return (value >> 16) & 0x7F; } -static int byt_get_turbo_pstate(void) +static int atom_get_turbo_pstate(void) { u64 value; - rdmsrl(BYT_TURBO_RATIOS, value); + rdmsrl(ATOM_TURBO_RATIOS, value); return value & 0x7F; } -static void byt_set_pstate(struct cpudata *cpudata, int pstate) +static void atom_set_pstate(struct cpudata *cpudata, int pstate) { u64 val; int32_t vid_fp; u32 vid; - val = pstate << 8; - if (limits.no_turbo && !limits.turbo_disabled) + val = (u64)pstate << 8; + if (limits->no_turbo && !limits->turbo_disabled) val |= (u64)1 << 32; vid_fp = cpudata->vid.min + mul_fp( @@ -538,27 +577,42 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate) wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); } -#define BYT_BCLK_FREQS 5 -static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800}; - -static int byt_get_scaling(void) +static int silvermont_get_scaling(void) { u64 value; int i; + /* Defined in Table 35-6 from SDM (Sept 2015) */ + static int silvermont_freq_table[] = { + 83300, 100000, 133300, 116700, 80000}; rdmsrl(MSR_FSB_FREQ, value); - i = value & 0x3; + i = value & 0x7; + WARN_ON(i > 4); - BUG_ON(i > BYT_BCLK_FREQS); + return silvermont_freq_table[i]; +} - return byt_freq_table[i] * 100; +static int airmont_get_scaling(void) +{ + u64 value; + int i; + /* Defined in Table 35-10 from SDM (Sept 2015) */ + static int airmont_freq_table[] = { + 83300, 100000, 133300, 116700, 80000, + 93300, 90000, 88900, 87500}; + + rdmsrl(MSR_FSB_FREQ, value); + i = value & 0xF; + WARN_ON(i > 8); + + return airmont_freq_table[i]; } -static void byt_get_vid(struct cpudata *cpudata) +static void atom_get_vid(struct cpudata *cpudata) { u64 value; - rdmsrl(BYT_VIDS, value); + rdmsrl(ATOM_VIDS, value); cpudata->vid.min = int_tofp((value >> 8) & 0x7f); cpudata->vid.max = int_tofp((value >> 16) & 0x7f); cpudata->vid.ratio = div_fp( @@ -566,7 +620,7 @@ static void byt_get_vid(struct cpudata *cpudata) int_tofp(cpudata->pstate.max_pstate - cpudata->pstate.min_pstate)); - rdmsrl(BYT_TURBO_VIDS, value); + rdmsrl(ATOM_TURBO_VIDS, value); cpudata->vid.turbo = value & 0x7f; } @@ -578,7 +632,7 @@ static int core_get_min_pstate(void) return (value >> 40) & 0xFF; } -static int core_get_max_pstate(void) +static int core_get_max_pstate_physical(void) { u64 value; @@ -586,6 +640,46 @@ static int core_get_max_pstate(void) return (value >> 8) & 0xFF; } +static int core_get_max_pstate(void) +{ + u64 tar; + u64 plat_info; + int max_pstate; + int err; + + rdmsrl(MSR_PLATFORM_INFO, plat_info); + max_pstate = (plat_info >> 8) & 0xFF; + + err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); + if (!err) { + /* Do some sanity checking for safety */ + if (plat_info & 0x600000000) { + u64 tdp_ctrl; + u64 tdp_ratio; + int tdp_msr; + + err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); + if (err) + goto skip_tar; + + tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl; + err = rdmsrl_safe(tdp_msr, &tdp_ratio); + if (err) + goto skip_tar; + + if (tdp_ratio - 1 == tar) { + max_pstate = tar; + pr_debug("max_pstate=TAC %x\n", max_pstate); + } else { + goto skip_tar; + } + } + } + +skip_tar: + return max_pstate; +} + static int core_get_turbo_pstate(void) { u64 value; @@ -608,8 +702,8 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate) { u64 val; - val = pstate << 8; - if (limits.no_turbo && !limits.turbo_disabled) + val = (u64)pstate << 8; + if (limits->no_turbo && !limits->turbo_disabled) val |= (u64)1 << 32; wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); @@ -639,6 +733,7 @@ static struct cpu_defaults core_params = { }, .funcs = { .get_max = core_get_max_pstate, + .get_max_physical = core_get_max_pstate_physical, .get_min = core_get_min_pstate, .get_turbo = core_get_turbo_pstate, .get_scaling = core_get_scaling, @@ -646,7 +741,27 @@ static struct cpu_defaults core_params = { }, }; -static struct cpu_defaults byt_params = { +static struct cpu_defaults silvermont_params = { + .pid_policy = { + .sample_rate_ms = 10, + .deadband = 0, + .setpoint = 60, + .p_gain_pct = 14, + .d_gain_pct = 0, + .i_gain_pct = 4, + }, + .funcs = { + .get_max = atom_get_max_pstate, + .get_max_physical = atom_get_max_pstate, + .get_min = atom_get_min_pstate, + .get_turbo = atom_get_turbo_pstate, + .set = atom_set_pstate, + .get_scaling = silvermont_get_scaling, + .get_vid = atom_get_vid, + }, +}; + +static struct cpu_defaults airmont_params = { .pid_policy = { .sample_rate_ms = 10, .deadband = 0, @@ -656,12 +771,13 @@ static struct cpu_defaults byt_params = { .i_gain_pct = 4, }, .funcs = { - .get_max = byt_get_max_pstate, - .get_min = byt_get_min_pstate, - .get_turbo = byt_get_turbo_pstate, - .set = byt_set_pstate, - .get_scaling = byt_get_scaling, - .get_vid = byt_get_vid, + .get_max = atom_get_max_pstate, + .get_max_physical = atom_get_max_pstate, + .get_min = atom_get_min_pstate, + .get_turbo = atom_get_turbo_pstate, + .set = atom_set_pstate, + .get_scaling = airmont_get_scaling, + .get_vid = atom_get_vid, }, }; @@ -676,6 +792,7 @@ static struct cpu_defaults knl_params = { }, .funcs = { .get_max = core_get_max_pstate, + .get_max_physical = core_get_max_pstate_physical, .get_min = core_get_min_pstate, .get_turbo = knl_get_turbo_pstate, .get_scaling = core_get_scaling, @@ -689,7 +806,7 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) int max_perf_adj; int min_perf; - if (limits.no_turbo || limits.turbo_disabled) + if (limits->no_turbo || limits->turbo_disabled) max_perf = cpu->pstate.max_pstate; /* @@ -697,27 +814,28 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) * policy, or by cpu specific default values determined through * experimentation. */ - max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); + max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf)); *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); - min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); + min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf)); *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); } -static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) +static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) { int max_perf, min_perf; - update_turbo_state(); - - intel_pstate_get_min_max(cpu, &min_perf, &max_perf); + if (force) { + update_turbo_state(); - pstate = clamp_t(int, pstate, min_perf, max_perf); + intel_pstate_get_min_max(cpu, &min_perf, &max_perf); - if (pstate == cpu->pstate.current_pstate) - return; + pstate = clamp_t(int, pstate, min_perf, max_perf); + if (pstate == cpu->pstate.current_pstate) + return; + } trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); cpu->pstate.current_pstate = pstate; @@ -729,12 +847,13 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) { cpu->pstate.min_pstate = pstate_funcs.get_min(); cpu->pstate.max_pstate = pstate_funcs.get_max(); + cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); cpu->pstate.scaling = pstate_funcs.get_scaling(); if (pstate_funcs.get_vid) pstate_funcs.get_vid(cpu); - intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); + intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false); } static inline void intel_pstate_calc_busy(struct cpudata *cpu) @@ -747,7 +866,8 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu) sample->freq = fp_toint( mul_fp(int_tofp( - cpu->pstate.max_pstate * cpu->pstate.scaling / 100), + cpu->pstate.max_pstate_physical * + cpu->pstate.scaling / 100), core_pct)); sample->core_pct_busy = (int32_t)core_pct; @@ -757,23 +877,33 @@ static inline void intel_pstate_sample(struct cpudata *cpu) { u64 aperf, mperf; unsigned long flags; + u64 tsc; local_irq_save(flags); rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); + if (cpu->prev_mperf == mperf) { + local_irq_restore(flags); + return; + } + + tsc = rdtsc(); local_irq_restore(flags); cpu->last_sample_time = cpu->sample.time; cpu->sample.time = ktime_get(); cpu->sample.aperf = aperf; cpu->sample.mperf = mperf; + cpu->sample.tsc = tsc; cpu->sample.aperf -= cpu->prev_aperf; cpu->sample.mperf -= cpu->prev_mperf; + cpu->sample.tsc -= cpu->prev_tsc; intel_pstate_calc_busy(cpu); cpu->prev_aperf = aperf; cpu->prev_mperf = mperf; + cpu->prev_tsc = tsc; } static inline void intel_hwp_set_sample_time(struct cpudata *cpu) @@ -795,7 +925,7 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) { int32_t core_busy, max_pstate, current_pstate, sample_ratio; - u32 duration_us; + s64 duration_us; u32 sample_time; /* @@ -810,7 +940,7 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) * specified pstate. */ core_busy = cpu->sample.core_pct_busy; - max_pstate = int_tofp(cpu->pstate.max_pstate); + max_pstate = int_tofp(cpu->pstate.max_pstate_physical); current_pstate = int_tofp(cpu->pstate.current_pstate); core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); @@ -822,8 +952,8 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) * to adjust our busyness. */ sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC; - duration_us = (u32) ktime_us_delta(cpu->sample.time, - cpu->last_sample_time); + duration_us = ktime_us_delta(cpu->sample.time, + cpu->last_sample_time); if (duration_us > sample_time * 3) { sample_ratio = div_fp(int_tofp(sample_time), int_tofp(duration_us)); @@ -838,6 +968,10 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) int32_t busy_scaled; struct _pid *pid; signed int ctl; + int from; + struct sample *sample; + + from = cpu->pstate.current_pstate; pid = &cpu->pid; busy_scaled = intel_pstate_get_scaled_busy(cpu); @@ -845,7 +979,17 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) ctl = pid_calc(pid, busy_scaled); /* Negative values of ctl increase the pstate and vice versa */ - intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl); + intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl, true); + + sample = &cpu->sample; + trace_pstate_sample(fp_toint(sample->core_pct_busy), + fp_toint(busy_scaled), + from, + cpu->pstate.current_pstate, + sample->mperf, + sample->aperf, + sample->tsc, + sample->freq); } static void intel_hwp_timer_func(unsigned long __data) @@ -859,21 +1003,11 @@ static void intel_hwp_timer_func(unsigned long __data) static void intel_pstate_timer_func(unsigned long __data) { struct cpudata *cpu = (struct cpudata *) __data; - struct sample *sample; intel_pstate_sample(cpu); - sample = &cpu->sample; - intel_pstate_adjust_busy_pstate(cpu); - trace_pstate_sample(fp_toint(sample->core_pct_busy), - fp_toint(intel_pstate_get_scaled_busy(cpu)), - cpu->pstate.current_pstate, - sample->mperf, - sample->aperf, - sample->freq); - intel_pstate_set_sample_time(cpu); } @@ -884,7 +1018,7 @@ static void intel_pstate_timer_func(unsigned long __data) static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(0x2a, core_params), ICPU(0x2d, core_params), - ICPU(0x37, byt_params), + ICPU(0x37, silvermont_params), ICPU(0x3a, core_params), ICPU(0x3c, core_params), ICPU(0x3d, core_params), @@ -893,9 +1027,10 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(0x45, core_params), ICPU(0x46, core_params), ICPU(0x47, core_params), - ICPU(0x4c, byt_params), + ICPU(0x4c, airmont_params), ICPU(0x4e, core_params), ICPU(0x4f, core_params), + ICPU(0x5e, core_params), ICPU(0x56, core_params), ICPU(0x57, knl_params), {} @@ -920,6 +1055,10 @@ static int intel_pstate_init_cpu(unsigned int cpunum) cpu = all_cpu_data[cpunum]; cpu->cpu = cpunum; + + if (hwp_active) + intel_pstate_hwp_enable(cpu); + intel_pstate_get_cpu_pstates(cpu); init_timer_deferrable(&cpu->timer); @@ -936,7 +1075,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum) add_timer_on(&cpu->timer, cpunum); - pr_debug("Intel pstate controlling: cpu %d\n", cpunum); + pr_debug("intel_pstate: controlling: cpu %d\n", cpunum); return 0; } @@ -960,25 +1099,39 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) if (policy->policy == CPUFREQ_POLICY_PERFORMANCE && policy->max >= policy->cpuinfo.max_freq) { - limits.min_policy_pct = 100; - limits.min_perf_pct = 100; - limits.min_perf = int_tofp(1); - limits.max_policy_pct = 100; - limits.max_perf_pct = 100; - limits.max_perf = int_tofp(1); - limits.no_turbo = 0; + pr_debug("intel_pstate: set performance\n"); + limits = &performance_limits; + if (hwp_active) + intel_pstate_hwp_set(); return 0; } - limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; - limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100); - limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct); - limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); - - limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; - limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); - limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); - limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); + pr_debug("intel_pstate: set powersave\n"); + limits = &powersave_limits; + limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; + limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100); + limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, + policy->cpuinfo.max_freq); + limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100); + + /* Normalize user input to [min_policy_pct, max_policy_pct] */ + limits->min_perf_pct = max(limits->min_policy_pct, + limits->min_sysfs_pct); + limits->min_perf_pct = min(limits->max_policy_pct, + limits->min_perf_pct); + limits->max_perf_pct = min(limits->max_policy_pct, + limits->max_sysfs_pct); + limits->max_perf_pct = max(limits->min_policy_pct, + limits->max_perf_pct); + limits->max_perf = round_up(limits->max_perf, FRAC_BITS); + + /* Make sure min_perf_pct <= max_perf_pct */ + limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); + + limits->min_perf = div_fp(int_tofp(limits->min_perf_pct), + int_tofp(100)); + limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), + int_tofp(100)); if (hwp_active) intel_pstate_hwp_set(); @@ -1002,13 +1155,13 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) int cpu_num = policy->cpu; struct cpudata *cpu = all_cpu_data[cpu_num]; - pr_info("intel_pstate CPU %d exiting\n", cpu_num); + pr_debug("intel_pstate: CPU %d exiting\n", cpu_num); del_timer_sync(&all_cpu_data[cpu_num]->timer); if (hwp_active) return; - intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); + intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false); } static int intel_pstate_cpu_init(struct cpufreq_policy *policy) @@ -1022,7 +1175,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) cpu = all_cpu_data[policy->cpu]; - if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100) + if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100) policy->policy = CPUFREQ_POLICY_PERFORMANCE; else policy->policy = CPUFREQ_POLICY_POWERSAVE; @@ -1078,6 +1231,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy) static void copy_cpu_funcs(struct pstate_funcs *funcs) { pstate_funcs.get_max = funcs->get_max; + pstate_funcs.get_max_physical = funcs->get_max_physical; pstate_funcs.get_min = funcs->get_min; pstate_funcs.get_turbo = funcs->get_turbo; pstate_funcs.get_scaling = funcs->get_scaling; @@ -1157,6 +1311,10 @@ static struct hw_vendor_info vendor_info[] = { {1, "ORACLE", "X4270M3 ", PPC}, {1, "ORACLE", "X4270M2 ", PPC}, {1, "ORACLE", "X4170M2 ", PPC}, + {1, "ORACLE", "X4170 M3", PPC}, + {1, "ORACLE", "X4275 M3", PPC}, + {1, "ORACLE", "X6-2 ", PPC}, + {1, "ORACLE", "Sudbury ", PPC}, {0, "", ""}, }; @@ -1232,8 +1390,10 @@ static int __init intel_pstate_init(void) if (!all_cpu_data) return -ENOMEM; - if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) - intel_pstate_hwp_enable(); + if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) { + pr_info("intel_pstate: HWP enabled\n"); + hwp_active++; + } if (!hwp_active && hwp_only) goto out; @@ -1268,8 +1428,10 @@ static int __init intel_pstate_setup(char *str) if (!strcmp(str, "disable")) no_load = 1; - if (!strcmp(str, "no_hwp")) + if (!strcmp(str, "no_hwp")) { + pr_info("intel_pstate: HWP disabled\n"); no_hwp = 1; + } if (!strcmp(str, "force")) force_load = 1; if (!strcmp(str, "hwp_only")) |