Lines Matching refs:cpu

230 	int cpu;  member
288 int (*get_max)(int cpu);
289 int (*get_max_physical)(int cpu);
290 int (*get_min)(int cpu);
291 int (*get_turbo)(int cpu);
293 int (*get_cpu_scaling)(int cpu);
362 static void intel_pstate_set_itmt_prio(int cpu) in intel_pstate_set_itmt_prio() argument
368 ret = cppc_get_perf_caps(cpu, &cppc_perf); in intel_pstate_set_itmt_prio()
377 cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached)); in intel_pstate_set_itmt_prio()
384 sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu); in intel_pstate_set_itmt_prio()
405 static int intel_pstate_get_cppc_guaranteed(int cpu) in intel_pstate_get_cppc_guaranteed() argument
410 ret = cppc_get_perf_caps(cpu, &cppc_perf); in intel_pstate_get_cppc_guaranteed()
420 static int intel_pstate_cppc_get_scaling(int cpu) in intel_pstate_cppc_get_scaling() argument
428 if (!cppc_get_perf_caps(cpu, &cppc_perf) && in intel_pstate_cppc_get_scaling()
437 static inline void intel_pstate_set_itmt_prio(int cpu) in intel_pstate_set_itmt_prio() argument
444 struct cpudata *cpu; in intel_pstate_init_acpi_perf_limits() local
449 intel_pstate_set_itmt_prio(policy->cpu); in intel_pstate_init_acpi_perf_limits()
456 cpu = all_cpu_data[policy->cpu]; in intel_pstate_init_acpi_perf_limits()
458 ret = acpi_processor_register_performance(&cpu->acpi_perf_data, in intel_pstate_init_acpi_perf_limits()
459 policy->cpu); in intel_pstate_init_acpi_perf_limits()
468 if (cpu->acpi_perf_data.control_register.space_id != in intel_pstate_init_acpi_perf_limits()
476 if (cpu->acpi_perf_data.state_count < 2) in intel_pstate_init_acpi_perf_limits()
479 pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu); in intel_pstate_init_acpi_perf_limits()
480 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { in intel_pstate_init_acpi_perf_limits()
482 (i == cpu->acpi_perf_data.state ? '*' : ' '), i, in intel_pstate_init_acpi_perf_limits()
483 (u32) cpu->acpi_perf_data.states[i].core_frequency, in intel_pstate_init_acpi_perf_limits()
484 (u32) cpu->acpi_perf_data.states[i].power, in intel_pstate_init_acpi_perf_limits()
485 (u32) cpu->acpi_perf_data.states[i].control); in intel_pstate_init_acpi_perf_limits()
488 cpu->valid_pss_table = true; in intel_pstate_init_acpi_perf_limits()
494 cpu->valid_pss_table = false; in intel_pstate_init_acpi_perf_limits()
495 acpi_processor_unregister_performance(policy->cpu); in intel_pstate_init_acpi_perf_limits()
500 struct cpudata *cpu; in intel_pstate_exit_perf_limits() local
502 cpu = all_cpu_data[policy->cpu]; in intel_pstate_exit_perf_limits()
503 if (!cpu->valid_pss_table) in intel_pstate_exit_perf_limits()
506 acpi_processor_unregister_performance(policy->cpu); in intel_pstate_exit_perf_limits()
524 static inline int intel_pstate_get_cppc_guaranteed(int cpu) in intel_pstate_get_cppc_guaranteed() argument
529 static int intel_pstate_cppc_get_scaling(int cpu) in intel_pstate_cppc_get_scaling() argument
535 static int intel_pstate_freq_to_hwp_rel(struct cpudata *cpu, int freq, in intel_pstate_freq_to_hwp_rel() argument
538 if (freq == cpu->pstate.turbo_freq) in intel_pstate_freq_to_hwp_rel()
539 return cpu->pstate.turbo_pstate; in intel_pstate_freq_to_hwp_rel()
541 if (freq == cpu->pstate.max_freq) in intel_pstate_freq_to_hwp_rel()
542 return cpu->pstate.max_pstate; in intel_pstate_freq_to_hwp_rel()
546 return freq / cpu->pstate.scaling; in intel_pstate_freq_to_hwp_rel()
548 return DIV_ROUND_CLOSEST(freq, cpu->pstate.scaling); in intel_pstate_freq_to_hwp_rel()
551 return DIV_ROUND_UP(freq, cpu->pstate.scaling); in intel_pstate_freq_to_hwp_rel()
554 static int intel_pstate_freq_to_hwp(struct cpudata *cpu, int freq) in intel_pstate_freq_to_hwp() argument
556 return intel_pstate_freq_to_hwp_rel(cpu, freq, CPUFREQ_RELATION_L); in intel_pstate_freq_to_hwp()
570 static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu) in intel_pstate_hybrid_hwp_adjust() argument
572 int perf_ctl_max_phys = cpu->pstate.max_pstate_physical; in intel_pstate_hybrid_hwp_adjust()
573 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; in intel_pstate_hybrid_hwp_adjust()
574 int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu); in intel_pstate_hybrid_hwp_adjust()
575 int scaling = cpu->pstate.scaling; in intel_pstate_hybrid_hwp_adjust()
578 pr_debug("CPU%d: PERF_CTL max_phys = %d\n", cpu->cpu, perf_ctl_max_phys); in intel_pstate_hybrid_hwp_adjust()
579 pr_debug("CPU%d: PERF_CTL turbo = %d\n", cpu->cpu, perf_ctl_turbo); in intel_pstate_hybrid_hwp_adjust()
580 pr_debug("CPU%d: PERF_CTL scaling = %d\n", cpu->cpu, perf_ctl_scaling); in intel_pstate_hybrid_hwp_adjust()
581 pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate); in intel_pstate_hybrid_hwp_adjust()
582 pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate); in intel_pstate_hybrid_hwp_adjust()
583 pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling); in intel_pstate_hybrid_hwp_adjust()
590 cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_pstate * scaling, in intel_pstate_hybrid_hwp_adjust()
592 cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling, in intel_pstate_hybrid_hwp_adjust()
596 cpu->pstate.max_pstate_physical = intel_pstate_freq_to_hwp(cpu, freq); in intel_pstate_hybrid_hwp_adjust()
598 freq = cpu->pstate.min_pstate * perf_ctl_scaling; in intel_pstate_hybrid_hwp_adjust()
599 cpu->pstate.min_freq = freq; in intel_pstate_hybrid_hwp_adjust()
604 cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq); in intel_pstate_hybrid_hwp_adjust()
618 struct cpudata *cpu = all_cpu_data[0]; in min_perf_pct_min() local
619 int turbo_pstate = cpu->pstate.turbo_pstate; in min_perf_pct_min()
622 (cpu->pstate.min_pstate * 100 / turbo_pstate) : 0; in min_perf_pct_min()
635 epp = rdmsrq_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, in intel_pstate_get_epp()
720 static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp) in intel_pstate_set_epp() argument
729 u64 value = READ_ONCE(cpu->hwp_req_cached); in intel_pstate_set_epp()
738 WRITE_ONCE(cpu->hwp_req_cached, value); in intel_pstate_set_epp()
739 ret = wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); in intel_pstate_set_epp()
741 cpu->epp_cached = epp; in intel_pstate_set_epp()
797 struct cpudata *cpu = all_cpu_data[policy->cpu]; in store_energy_performance_preference() local
833 ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp); in store_energy_performance_preference()
842 epp = ret ? epp_values[ret] : cpu->epp_default; in store_energy_performance_preference()
844 if (cpu->epp_cached != epp) { in store_energy_performance_preference()
848 ret = intel_pstate_set_epp(cpu, epp); in store_energy_performance_preference()
865 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; in show_energy_performance_preference()
882 struct cpudata *cpu = all_cpu_data[policy->cpu]; in show_base_frequency() local
885 ratio = intel_pstate_get_cppc_guaranteed(policy->cpu); in show_base_frequency()
889 rdmsrq_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap); in show_base_frequency()
893 freq = ratio * cpu->pstate.scaling; in show_base_frequency()
894 if (cpu->pstate.scaling != cpu->pstate.perf_ctl_scaling) in show_base_frequency()
895 freq = rounddown(freq, cpu->pstate.perf_ctl_scaling); in show_base_frequency()
917 static u8 hybrid_get_cpu_type(unsigned int cpu) in hybrid_get_cpu_type() argument
919 return cpu_data(cpu).topo.intel_type; in hybrid_get_cpu_type()
953 static bool hybrid_has_l3(unsigned int cpu) in hybrid_has_l3() argument
955 struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(cpu); in hybrid_has_l3()
992 static bool hybrid_register_perf_domain(unsigned int cpu) in hybrid_register_perf_domain() argument
996 struct cpudata *cpudata = all_cpu_data[cpu]; in hybrid_register_perf_domain()
1007 cpu_dev = get_cpu_device(cpu); in hybrid_register_perf_domain()
1012 cpumask_of(cpu), false)) in hybrid_register_perf_domain()
1022 unsigned int cpu; in hybrid_register_all_perf_domains() local
1024 for_each_online_cpu(cpu) in hybrid_register_all_perf_domains()
1025 hybrid_register_perf_domain(cpu); in hybrid_register_all_perf_domains()
1028 static void hybrid_update_perf_domain(struct cpudata *cpu) in hybrid_update_perf_domain() argument
1030 if (cpu->pd_registered) in hybrid_update_perf_domain()
1031 em_adjust_cpu_capacity(cpu->cpu); in hybrid_update_perf_domain()
1034 static inline bool hybrid_register_perf_domain(unsigned int cpu) { return false; } in hybrid_register_perf_domain() argument
1036 static inline void hybrid_update_perf_domain(struct cpudata *cpu) {} in hybrid_update_perf_domain() argument
1039 static void hybrid_set_cpu_capacity(struct cpudata *cpu) in hybrid_set_cpu_capacity() argument
1041 arch_set_cpu_capacity(cpu->cpu, cpu->capacity_perf, in hybrid_set_cpu_capacity()
1043 cpu->capacity_perf, in hybrid_set_cpu_capacity()
1044 cpu->pstate.max_pstate_physical); in hybrid_set_cpu_capacity()
1045 hybrid_update_perf_domain(cpu); in hybrid_set_cpu_capacity()
1047 topology_set_cpu_scale(cpu->cpu, arch_scale_cpu_capacity(cpu->cpu)); in hybrid_set_cpu_capacity()
1050 cpu->cpu, cpu->capacity_perf, cpu->pstate.max_pstate_physical, in hybrid_set_cpu_capacity()
1059 static void hybrid_get_capacity_perf(struct cpudata *cpu) in hybrid_get_capacity_perf() argument
1062 cpu->capacity_perf = cpu->pstate.max_pstate_physical; in hybrid_get_capacity_perf()
1066 cpu->capacity_perf = HWP_HIGHEST_PERF(READ_ONCE(cpu->hwp_cap_cached)); in hybrid_get_capacity_perf()
1074 struct cpudata *cpu = all_cpu_data[cpunum]; in hybrid_set_capacity_of_cpus() local
1076 if (cpu) in hybrid_set_capacity_of_cpus()
1077 hybrid_set_cpu_capacity(cpu); in hybrid_set_capacity_of_cpus()
1088 struct cpudata *cpu = all_cpu_data[cpunum]; in hybrid_update_cpu_capacity_scaling() local
1090 if (!cpu) in hybrid_update_cpu_capacity_scaling()
1098 hybrid_get_capacity_perf(cpu); in hybrid_update_cpu_capacity_scaling()
1105 if (cpu == hybrid_max_perf_cpu) in hybrid_update_cpu_capacity_scaling()
1108 if (cpu->capacity_perf > max_cap_perf) { in hybrid_update_cpu_capacity_scaling()
1109 max_cap_perf = cpu->capacity_perf; in hybrid_update_cpu_capacity_scaling()
1110 max_perf_cpu = cpu; in hybrid_update_cpu_capacity_scaling()
1186 static void __intel_pstate_get_hwp_cap(struct cpudata *cpu) in __intel_pstate_get_hwp_cap() argument
1190 rdmsrq_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap); in __intel_pstate_get_hwp_cap()
1191 WRITE_ONCE(cpu->hwp_cap_cached, cap); in __intel_pstate_get_hwp_cap()
1192 cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap); in __intel_pstate_get_hwp_cap()
1193 cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap); in __intel_pstate_get_hwp_cap()
1196 static void intel_pstate_get_hwp_cap(struct cpudata *cpu) in intel_pstate_get_hwp_cap() argument
1198 int scaling = cpu->pstate.scaling; in intel_pstate_get_hwp_cap()
1200 __intel_pstate_get_hwp_cap(cpu); in intel_pstate_get_hwp_cap()
1202 cpu->pstate.max_freq = cpu->pstate.max_pstate * scaling; in intel_pstate_get_hwp_cap()
1203 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling; in intel_pstate_get_hwp_cap()
1204 if (scaling != cpu->pstate.perf_ctl_scaling) { in intel_pstate_get_hwp_cap()
1205 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; in intel_pstate_get_hwp_cap()
1207 cpu->pstate.max_freq = rounddown(cpu->pstate.max_freq, in intel_pstate_get_hwp_cap()
1209 cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_freq, in intel_pstate_get_hwp_cap()
1214 static void hybrid_update_capacity(struct cpudata *cpu) in hybrid_update_capacity() argument
1229 intel_pstate_get_hwp_cap(cpu); in hybrid_update_capacity()
1231 hybrid_get_capacity_perf(cpu); in hybrid_update_capacity()
1233 if (cpu->capacity_perf > max_cap_perf) { in hybrid_update_capacity()
1234 hybrid_max_perf_cpu = cpu; in hybrid_update_capacity()
1240 if (cpu == hybrid_max_perf_cpu && cpu->capacity_perf < max_cap_perf) { in hybrid_update_capacity()
1245 hybrid_set_cpu_capacity(cpu); in hybrid_update_capacity()
1252 if (hybrid_register_perf_domain(cpu->cpu)) in hybrid_update_capacity()
1259 static void intel_pstate_hwp_set(unsigned int cpu) in intel_pstate_hwp_set() argument
1261 struct cpudata *cpu_data = all_cpu_data[cpu]; in intel_pstate_hwp_set()
1272 rdmsrq_on_cpu(cpu, MSR_HWP_REQUEST, &value); in intel_pstate_hwp_set()
1318 wrmsrq_on_cpu(cpu, MSR_HWP_REQUEST, value); in intel_pstate_hwp_set()
1323 static void intel_pstate_hwp_offline(struct cpudata *cpu) in intel_pstate_hwp_offline() argument
1325 u64 value = READ_ONCE(cpu->hwp_req_cached); in intel_pstate_hwp_offline()
1328 intel_pstate_disable_hwp_interrupt(cpu); in intel_pstate_hwp_offline()
1337 value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached); in intel_pstate_hwp_offline()
1343 cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN; in intel_pstate_hwp_offline()
1352 WRITE_ONCE(cpu->hwp_req_cached, value); in intel_pstate_hwp_offline()
1355 min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached)); in intel_pstate_hwp_offline()
1365 wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); in intel_pstate_hwp_offline()
1375 if (hybrid_max_perf_cpu == cpu) in intel_pstate_hwp_offline()
1381 hybrid_clear_cpu_capacity(cpu->cpu); in intel_pstate_hwp_offline()
1411 static void intel_pstate_hwp_reenable(struct cpudata *cpu) in intel_pstate_hwp_reenable() argument
1413 intel_pstate_hwp_enable(cpu); in intel_pstate_hwp_reenable()
1414 wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached)); in intel_pstate_hwp_reenable()
1419 struct cpudata *cpu = all_cpu_data[policy->cpu]; in intel_pstate_suspend() local
1421 pr_debug("CPU %d suspending\n", cpu->cpu); in intel_pstate_suspend()
1423 cpu->suspended = true; in intel_pstate_suspend()
1426 intel_pstate_disable_hwp_interrupt(cpu); in intel_pstate_suspend()
1433 struct cpudata *cpu = all_cpu_data[policy->cpu]; in intel_pstate_resume() local
1435 pr_debug("CPU %d resuming\n", cpu->cpu); in intel_pstate_resume()
1443 if (cpu->suspended && hwp_active) { in intel_pstate_resume()
1447 intel_pstate_hwp_reenable(cpu); in intel_pstate_resume()
1452 cpu->suspended = false; in intel_pstate_resume()
1459 int cpu; in intel_pstate_update_policies() local
1461 for_each_possible_cpu(cpu) in intel_pstate_update_policies()
1462 cpufreq_update_policy(cpu); in intel_pstate_update_policies()
1481 struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpudata->cpu); in intel_pstate_update_max_freq()
1492 struct cpudata *cpudata = all_cpu_data[policy->cpu]; in intel_pstate_update_limits()
1501 int cpu; in intel_pstate_update_limits_for_all() local
1503 for_each_possible_cpu(cpu) in intel_pstate_update_limits_for_all()
1504 intel_pstate_update_max_freq(all_cpu_data[cpu]); in intel_pstate_update_limits_for_all()
1551 struct cpudata *cpu; in show_turbo_pct() local
1560 cpu = all_cpu_data[0]; in show_turbo_pct()
1562 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; in show_turbo_pct()
1563 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; in show_turbo_pct()
1573 struct cpudata *cpu; in show_num_pstates() local
1581 cpu = all_cpu_data[0]; in show_num_pstates()
1582 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; in show_num_pstates()
1631 struct cpudata *cpu = all_cpu_data[0]; in store_no_turbo() local
1632 int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate; in store_no_turbo()
1647 static void update_cpu_qos_request(int cpu, enum freq_qos_req_type type) in update_cpu_qos_request() argument
1649 struct cpudata *cpudata = all_cpu_data[cpu]; in update_cpu_qos_request()
1653 struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); in update_cpu_qos_request()
1672 pr_warn("Failed to update freq constraint: CPU%d\n", cpu); in update_cpu_qos_request()
1920 wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); in intel_pstate_notify_work()
1971 wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); in intel_pstate_disable_hwp_interrupt()
1974 cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask); in intel_pstate_disable_hwp_interrupt()
1992 cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask); in intel_pstate_enable_hwp_interrupt()
1999 wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask); in intel_pstate_enable_hwp_interrupt()
2000 wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); in intel_pstate_enable_hwp_interrupt()
2039 wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); in intel_pstate_hwp_enable()
2041 wrmsrq_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); in intel_pstate_hwp_enable()
2153 static int core_get_min_pstate(int cpu) in core_get_min_pstate() argument
2157 rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &value); in core_get_min_pstate()
2161 static int core_get_max_pstate_physical(int cpu) in core_get_max_pstate_physical() argument
2165 rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &value); in core_get_max_pstate_physical()
2169 static int core_get_tdp_ratio(int cpu, u64 plat_info) in core_get_tdp_ratio() argument
2179 err = rdmsrq_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); in core_get_tdp_ratio()
2185 err = rdmsrq_safe_on_cpu(cpu, tdp_msr, &tdp_ratio); in core_get_tdp_ratio()
2202 static int core_get_max_pstate(int cpu) in core_get_max_pstate() argument
2210 rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info); in core_get_max_pstate()
2213 tdp_ratio = core_get_tdp_ratio(cpu, plat_info); in core_get_max_pstate()
2222 err = rdmsrq_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar); in core_get_max_pstate()
2237 static int core_get_turbo_pstate(int cpu) in core_get_turbo_pstate() argument
2242 rdmsrq_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value); in core_get_turbo_pstate()
2243 nont = core_get_max_pstate(cpu); in core_get_turbo_pstate()
2260 static int knl_get_turbo_pstate(int cpu) in knl_get_turbo_pstate() argument
2265 rdmsrq_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value); in knl_get_turbo_pstate()
2266 nont = core_get_max_pstate(cpu); in knl_get_turbo_pstate()
2273 static int hwp_get_cpu_scaling(int cpu) in hwp_get_cpu_scaling() argument
2280 if (hybrid_get_cpu_type(cpu) == INTEL_CPU_TYPE_CORE) in hwp_get_cpu_scaling()
2295 return intel_pstate_cppc_get_scaling(cpu); in hwp_get_cpu_scaling()
2298 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) in intel_pstate_set_pstate() argument
2300 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); in intel_pstate_set_pstate()
2301 cpu->pstate.current_pstate = pstate; in intel_pstate_set_pstate()
2307 wrmsrq_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, in intel_pstate_set_pstate()
2308 pstate_funcs.get_val(cpu, pstate)); in intel_pstate_set_pstate()
2311 static void intel_pstate_set_min_pstate(struct cpudata *cpu) in intel_pstate_set_min_pstate() argument
2313 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); in intel_pstate_set_min_pstate()
2316 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) in intel_pstate_get_cpu_pstates() argument
2320 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(cpu->cpu); in intel_pstate_get_cpu_pstates()
2321 cpu->pstate.min_pstate = pstate_funcs.get_min(cpu->cpu); in intel_pstate_get_cpu_pstates()
2322 cpu->pstate.perf_ctl_scaling = perf_ctl_scaling; in intel_pstate_get_cpu_pstates()
2325 __intel_pstate_get_hwp_cap(cpu); in intel_pstate_get_cpu_pstates()
2328 cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu); in intel_pstate_get_cpu_pstates()
2329 intel_pstate_hybrid_hwp_adjust(cpu); in intel_pstate_get_cpu_pstates()
2331 cpu->pstate.scaling = perf_ctl_scaling; in intel_pstate_get_cpu_pstates()
2337 hybrid_update_capacity(cpu); in intel_pstate_get_cpu_pstates()
2339 cpu->pstate.scaling = perf_ctl_scaling; in intel_pstate_get_cpu_pstates()
2340 cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu); in intel_pstate_get_cpu_pstates()
2341 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(cpu->cpu); in intel_pstate_get_cpu_pstates()
2344 if (cpu->pstate.scaling == perf_ctl_scaling) { in intel_pstate_get_cpu_pstates()
2345 cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling; in intel_pstate_get_cpu_pstates()
2346 cpu->pstate.max_freq = cpu->pstate.max_pstate * perf_ctl_scaling; in intel_pstate_get_cpu_pstates()
2347 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * perf_ctl_scaling; in intel_pstate_get_cpu_pstates()
2351 cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift(); in intel_pstate_get_cpu_pstates()
2354 pstate_funcs.get_vid(cpu); in intel_pstate_get_cpu_pstates()
2356 intel_pstate_set_min_pstate(cpu); in intel_pstate_get_cpu_pstates()
2367 static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu) in intel_pstate_hwp_boost_up() argument
2369 u64 hwp_req = READ_ONCE(cpu->hwp_req_cached); in intel_pstate_hwp_boost_up()
2370 u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached); in intel_pstate_hwp_boost_up()
2390 if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit) in intel_pstate_hwp_boost_up()
2393 if (!cpu->hwp_boost_min) in intel_pstate_hwp_boost_up()
2394 cpu->hwp_boost_min = min_limit; in intel_pstate_hwp_boost_up()
2399 if (cpu->hwp_boost_min < boost_level1) in intel_pstate_hwp_boost_up()
2400 cpu->hwp_boost_min = boost_level1; in intel_pstate_hwp_boost_up()
2401 else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(hwp_cap)) in intel_pstate_hwp_boost_up()
2402 cpu->hwp_boost_min = HWP_GUARANTEED_PERF(hwp_cap); in intel_pstate_hwp_boost_up()
2403 else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(hwp_cap) && in intel_pstate_hwp_boost_up()
2405 cpu->hwp_boost_min = max_limit; in intel_pstate_hwp_boost_up()
2409 hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min; in intel_pstate_hwp_boost_up()
2411 cpu->last_update = cpu->sample.time; in intel_pstate_hwp_boost_up()
2414 static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu) in intel_pstate_hwp_boost_down() argument
2416 if (cpu->hwp_boost_min) { in intel_pstate_hwp_boost_down()
2420 expired = time_after64(cpu->sample.time, cpu->last_update + in intel_pstate_hwp_boost_down()
2423 wrmsrq(MSR_HWP_REQUEST, cpu->hwp_req_cached); in intel_pstate_hwp_boost_down()
2424 cpu->hwp_boost_min = 0; in intel_pstate_hwp_boost_down()
2427 cpu->last_update = cpu->sample.time; in intel_pstate_hwp_boost_down()
2430 static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu, in intel_pstate_update_util_hwp_local() argument
2433 cpu->sample.time = time; in intel_pstate_update_util_hwp_local()
2435 if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) { in intel_pstate_update_util_hwp_local()
2438 cpu->sched_flags = 0; in intel_pstate_update_util_hwp_local()
2446 if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC)) in intel_pstate_update_util_hwp_local()
2449 cpu->last_io_update = time; in intel_pstate_update_util_hwp_local()
2452 intel_pstate_hwp_boost_up(cpu); in intel_pstate_update_util_hwp_local()
2455 intel_pstate_hwp_boost_down(cpu); in intel_pstate_update_util_hwp_local()
2462 struct cpudata *cpu = container_of(data, struct cpudata, update_util); in intel_pstate_update_util_hwp() local
2464 cpu->sched_flags |= flags; in intel_pstate_update_util_hwp()
2466 if (smp_processor_id() == cpu->cpu) in intel_pstate_update_util_hwp()
2467 intel_pstate_update_util_hwp_local(cpu, time); in intel_pstate_update_util_hwp()
2470 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) in intel_pstate_calc_avg_perf() argument
2472 struct sample *sample = &cpu->sample; in intel_pstate_calc_avg_perf()
2477 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) in intel_pstate_sample() argument
2487 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { in intel_pstate_sample()
2493 cpu->last_sample_time = cpu->sample.time; in intel_pstate_sample()
2494 cpu->sample.time = time; in intel_pstate_sample()
2495 cpu->sample.aperf = aperf; in intel_pstate_sample()
2496 cpu->sample.mperf = mperf; in intel_pstate_sample()
2497 cpu->sample.tsc = tsc; in intel_pstate_sample()
2498 cpu->sample.aperf -= cpu->prev_aperf; in intel_pstate_sample()
2499 cpu->sample.mperf -= cpu->prev_mperf; in intel_pstate_sample()
2500 cpu->sample.tsc -= cpu->prev_tsc; in intel_pstate_sample()
2502 cpu->prev_aperf = aperf; in intel_pstate_sample()
2503 cpu->prev_mperf = mperf; in intel_pstate_sample()
2504 cpu->prev_tsc = tsc; in intel_pstate_sample()
2512 if (likely(cpu->last_sample_time)) { in intel_pstate_sample()
2513 intel_pstate_calc_avg_perf(cpu); in intel_pstate_sample()
2519 static inline int32_t get_avg_frequency(struct cpudata *cpu) in get_avg_frequency() argument
2521 return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz); in get_avg_frequency()
2524 static inline int32_t get_avg_pstate(struct cpudata *cpu) in get_avg_pstate() argument
2526 return mul_ext_fp(cpu->pstate.max_pstate_physical, in get_avg_pstate()
2527 cpu->sample.core_avg_perf); in get_avg_pstate()
2530 static inline int32_t get_target_pstate(struct cpudata *cpu) in get_target_pstate() argument
2532 struct sample *sample = &cpu->sample; in get_target_pstate()
2536 busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift, in get_target_pstate()
2539 if (busy_frac < cpu->iowait_boost) in get_target_pstate()
2540 busy_frac = cpu->iowait_boost; in get_target_pstate()
2545 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; in get_target_pstate()
2548 if (target < cpu->pstate.min_pstate) in get_target_pstate()
2549 target = cpu->pstate.min_pstate; in get_target_pstate()
2558 avg_pstate = get_avg_pstate(cpu); in get_target_pstate()
2565 static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) in intel_pstate_prepare_request() argument
2567 int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio); in intel_pstate_prepare_request()
2568 int max_pstate = max(min_pstate, cpu->max_perf_ratio); in intel_pstate_prepare_request()
2573 static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) in intel_pstate_update_pstate() argument
2575 if (pstate == cpu->pstate.current_pstate) in intel_pstate_update_pstate()
2578 cpu->pstate.current_pstate = pstate; in intel_pstate_update_pstate()
2579 wrmsrq(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); in intel_pstate_update_pstate()
2582 static void intel_pstate_adjust_pstate(struct cpudata *cpu) in intel_pstate_adjust_pstate() argument
2584 int from = cpu->pstate.current_pstate; in intel_pstate_adjust_pstate()
2588 target_pstate = get_target_pstate(cpu); in intel_pstate_adjust_pstate()
2589 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); in intel_pstate_adjust_pstate()
2590 trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); in intel_pstate_adjust_pstate()
2591 intel_pstate_update_pstate(cpu, target_pstate); in intel_pstate_adjust_pstate()
2593 sample = &cpu->sample; in intel_pstate_adjust_pstate()
2597 cpu->pstate.current_pstate, in intel_pstate_adjust_pstate()
2601 get_avg_frequency(cpu), in intel_pstate_adjust_pstate()
2602 fp_toint(cpu->iowait_boost * 100)); in intel_pstate_adjust_pstate()
2608 struct cpudata *cpu = container_of(data, struct cpudata, update_util); in intel_pstate_update_util() local
2612 if (smp_processor_id() != cpu->cpu) in intel_pstate_update_util()
2615 delta_ns = time - cpu->last_update; in intel_pstate_update_util()
2619 cpu->iowait_boost = ONE_EIGHTH_FP; in intel_pstate_update_util()
2620 } else if (cpu->iowait_boost >= ONE_EIGHTH_FP) { in intel_pstate_update_util()
2621 cpu->iowait_boost <<= 1; in intel_pstate_update_util()
2622 if (cpu->iowait_boost > int_tofp(1)) in intel_pstate_update_util()
2623 cpu->iowait_boost = int_tofp(1); in intel_pstate_update_util()
2625 cpu->iowait_boost = ONE_EIGHTH_FP; in intel_pstate_update_util()
2627 } else if (cpu->iowait_boost) { in intel_pstate_update_util()
2630 cpu->iowait_boost = 0; in intel_pstate_update_util()
2632 cpu->iowait_boost >>= 1; in intel_pstate_update_util()
2634 cpu->last_update = time; in intel_pstate_update_util()
2635 delta_ns = time - cpu->sample.time; in intel_pstate_update_util()
2639 if (intel_pstate_sample(cpu, time)) in intel_pstate_update_util()
2640 intel_pstate_adjust_pstate(cpu); in intel_pstate_update_util()
2743 struct cpudata *cpu; in intel_pstate_init_cpu() local
2745 cpu = all_cpu_data[cpunum]; in intel_pstate_init_cpu()
2747 if (!cpu) { in intel_pstate_init_cpu()
2748 cpu = kzalloc(sizeof(*cpu), GFP_KERNEL); in intel_pstate_init_cpu()
2749 if (!cpu) in intel_pstate_init_cpu()
2752 WRITE_ONCE(all_cpu_data[cpunum], cpu); in intel_pstate_init_cpu()
2754 cpu->cpu = cpunum; in intel_pstate_init_cpu()
2756 cpu->epp_default = -EINVAL; in intel_pstate_init_cpu()
2759 intel_pstate_hwp_enable(cpu); in intel_pstate_init_cpu()
2770 intel_pstate_hwp_reenable(cpu); in intel_pstate_init_cpu()
2773 cpu->epp_powersave = -EINVAL; in intel_pstate_init_cpu()
2774 cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN; in intel_pstate_init_cpu()
2776 intel_pstate_get_cpu_pstates(cpu); in intel_pstate_init_cpu()
2785 struct cpudata *cpu = all_cpu_data[cpu_num]; in intel_pstate_set_update_util_hook() local
2790 if (cpu->update_util_set) in intel_pstate_set_update_util_hook()
2794 cpu->sample.time = 0; in intel_pstate_set_update_util_hook()
2795 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, in intel_pstate_set_update_util_hook()
2799 cpu->update_util_set = true; in intel_pstate_set_update_util_hook()
2802 static void intel_pstate_clear_update_util_hook(unsigned int cpu) in intel_pstate_clear_update_util_hook() argument
2804 struct cpudata *cpu_data = all_cpu_data[cpu]; in intel_pstate_clear_update_util_hook()
2809 cpufreq_remove_update_util_hook(cpu); in intel_pstate_clear_update_util_hook()
2814 static int intel_pstate_get_max_freq(struct cpudata *cpu) in intel_pstate_get_max_freq() argument
2817 cpu->pstate.max_freq : cpu->pstate.turbo_freq; in intel_pstate_get_max_freq()
2820 static void intel_pstate_update_perf_limits(struct cpudata *cpu, in intel_pstate_update_perf_limits() argument
2824 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; in intel_pstate_update_perf_limits()
2840 if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) { in intel_pstate_update_perf_limits()
2844 max_policy_perf = intel_pstate_freq_to_hwp(cpu, freq); in intel_pstate_update_perf_limits()
2846 min_policy_perf = intel_pstate_freq_to_hwp(cpu, freq); in intel_pstate_update_perf_limits()
2850 cpu->cpu, min_policy_perf, max_policy_perf); in intel_pstate_update_perf_limits()
2854 cpu->min_perf_ratio = min_policy_perf; in intel_pstate_update_perf_limits()
2855 cpu->max_perf_ratio = max_policy_perf; in intel_pstate_update_perf_limits()
2857 int turbo_max = cpu->pstate.turbo_pstate; in intel_pstate_update_perf_limits()
2865 pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu, in intel_pstate_update_perf_limits()
2868 cpu->min_perf_ratio = max(min_policy_perf, global_min); in intel_pstate_update_perf_limits()
2869 cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf); in intel_pstate_update_perf_limits()
2870 cpu->max_perf_ratio = min(max_policy_perf, global_max); in intel_pstate_update_perf_limits()
2871 cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio); in intel_pstate_update_perf_limits()
2874 cpu->min_perf_ratio = min(cpu->min_perf_ratio, in intel_pstate_update_perf_limits()
2875 cpu->max_perf_ratio); in intel_pstate_update_perf_limits()
2878 pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu, in intel_pstate_update_perf_limits()
2879 cpu->max_perf_ratio, in intel_pstate_update_perf_limits()
2880 cpu->min_perf_ratio); in intel_pstate_update_perf_limits()
2885 struct cpudata *cpu; in intel_pstate_set_policy() local
2893 cpu = all_cpu_data[policy->cpu]; in intel_pstate_set_policy()
2894 cpu->policy = policy->policy; in intel_pstate_set_policy()
2898 intel_pstate_update_perf_limits(cpu, policy->min, policy->max); in intel_pstate_set_policy()
2900 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { in intel_pstate_set_policy()
2901 int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); in intel_pstate_set_policy()
2907 intel_pstate_clear_update_util_hook(policy->cpu); in intel_pstate_set_policy()
2908 intel_pstate_set_pstate(cpu, pstate); in intel_pstate_set_policy()
2910 intel_pstate_set_update_util_hook(policy->cpu); in intel_pstate_set_policy()
2920 intel_pstate_clear_update_util_hook(policy->cpu); in intel_pstate_set_policy()
2921 intel_pstate_hwp_set(policy->cpu); in intel_pstate_set_policy()
2934 static void intel_pstate_adjust_policy_max(struct cpudata *cpu, in intel_pstate_adjust_policy_max() argument
2938 cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && in intel_pstate_adjust_policy_max()
2940 policy->max > cpu->pstate.max_freq) { in intel_pstate_adjust_policy_max()
2946 static void intel_pstate_verify_cpu_policy(struct cpudata *cpu, in intel_pstate_verify_cpu_policy() argument
2952 intel_pstate_get_hwp_cap(cpu); in intel_pstate_verify_cpu_policy()
2954 cpu->pstate.max_freq : cpu->pstate.turbo_freq; in intel_pstate_verify_cpu_policy()
2956 max_freq = intel_pstate_get_max_freq(cpu); in intel_pstate_verify_cpu_policy()
2960 intel_pstate_adjust_policy_max(cpu, policy); in intel_pstate_verify_cpu_policy()
2965 intel_pstate_verify_cpu_policy(all_cpu_data[policy->cpu], policy); in intel_pstate_verify_policy()
2972 struct cpudata *cpu = all_cpu_data[policy->cpu]; in intel_cpufreq_cpu_offline() local
2974 pr_debug("CPU %d going offline\n", cpu->cpu); in intel_cpufreq_cpu_offline()
2976 if (cpu->suspended) in intel_cpufreq_cpu_offline()
2986 intel_pstate_hwp_offline(cpu); in intel_cpufreq_cpu_offline()
2988 intel_pstate_set_min_pstate(cpu); in intel_cpufreq_cpu_offline()
2997 struct cpudata *cpu = all_cpu_data[policy->cpu]; in intel_pstate_cpu_online() local
2999 pr_debug("CPU %d going online\n", cpu->cpu); in intel_pstate_cpu_online()
3008 intel_pstate_hwp_reenable(cpu); in intel_pstate_cpu_online()
3009 cpu->suspended = false; in intel_pstate_cpu_online()
3011 hybrid_update_capacity(cpu); in intel_pstate_cpu_online()
3019 intel_pstate_clear_update_util_hook(policy->cpu); in intel_pstate_cpu_offline()
3026 pr_debug("CPU %d exiting\n", policy->cpu); in intel_pstate_cpu_exit()
3033 struct cpudata *cpu; in __intel_pstate_cpu_init() local
3036 rc = intel_pstate_init_cpu(policy->cpu); in __intel_pstate_cpu_init()
3040 cpu = all_cpu_data[policy->cpu]; in __intel_pstate_cpu_init()
3042 cpu->max_perf_ratio = 0xFF; in __intel_pstate_cpu_init()
3043 cpu->min_perf_ratio = 0; in __intel_pstate_cpu_init()
3046 policy->cpuinfo.min_freq = cpu->pstate.min_freq; in __intel_pstate_cpu_init()
3048 cpu->pstate.max_freq : cpu->pstate.turbo_freq; in __intel_pstate_cpu_init()
3074 struct cpudata *cpu = all_cpu_data[policy->cpu]; in intel_pstate_cpu_init() local
3076 cpu->epp_cached = intel_pstate_get_epp(cpu, 0); in intel_pstate_cpu_init()
3098 struct cpudata *cpu = all_cpu_data[policy->cpu]; in intel_cpufreq_verify_policy() local
3100 intel_pstate_verify_cpu_policy(cpu, policy); in intel_cpufreq_verify_policy()
3101 intel_pstate_update_perf_limits(cpu, policy->min, policy->max); in intel_cpufreq_verify_policy()
3122 static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate) in intel_cpufreq_trace() argument
3129 if (!intel_pstate_sample(cpu, ktime_get())) in intel_cpufreq_trace()
3132 sample = &cpu->sample; in intel_cpufreq_trace()
3136 cpu->pstate.current_pstate, in intel_cpufreq_trace()
3140 get_avg_frequency(cpu), in intel_cpufreq_trace()
3141 fp_toint(cpu->iowait_boost * 100)); in intel_cpufreq_trace()
3144 static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max, in intel_cpufreq_hwp_update() argument
3147 u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev; in intel_cpufreq_hwp_update()
3161 WRITE_ONCE(cpu->hwp_req_cached, value); in intel_cpufreq_hwp_update()
3165 wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); in intel_cpufreq_hwp_update()
3168 static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu, in intel_cpufreq_perf_ctl_update() argument
3173 pstate_funcs.get_val(cpu, target_pstate)); in intel_cpufreq_perf_ctl_update()
3175 wrmsrq_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, in intel_cpufreq_perf_ctl_update()
3176 pstate_funcs.get_val(cpu, target_pstate)); in intel_cpufreq_perf_ctl_update()
3182 struct cpudata *cpu = all_cpu_data[policy->cpu]; in intel_cpufreq_update_pstate() local
3183 int old_pstate = cpu->pstate.current_pstate; in intel_cpufreq_update_pstate()
3185 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); in intel_cpufreq_update_pstate()
3188 target_pstate : cpu->max_perf_ratio; in intel_cpufreq_update_pstate()
3190 intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, in intel_cpufreq_update_pstate()
3193 intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch); in intel_cpufreq_update_pstate()
3196 cpu->pstate.current_pstate = target_pstate; in intel_cpufreq_update_pstate()
3198 intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH : in intel_cpufreq_update_pstate()
3208 struct cpudata *cpu = all_cpu_data[policy->cpu]; in intel_cpufreq_target() local
3217 target_pstate = intel_pstate_freq_to_hwp_rel(cpu, freqs.new, relation); in intel_cpufreq_target()
3220 freqs.new = target_pstate * cpu->pstate.scaling; in intel_cpufreq_target()
3230 struct cpudata *cpu = all_cpu_data[policy->cpu]; in intel_cpufreq_fast_switch() local
3233 target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq); in intel_cpufreq_fast_switch()
3237 return target_pstate * cpu->pstate.scaling; in intel_cpufreq_fast_switch()
3245 struct cpudata *cpu = all_cpu_data[cpunum]; in intel_cpufreq_adjust_perf() local
3246 u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached); in intel_cpufreq_adjust_perf()
3247 int old_pstate = cpu->pstate.current_pstate; in intel_cpufreq_adjust_perf()
3264 if (min_pstate < cpu->pstate.min_pstate) in intel_cpufreq_adjust_perf()
3265 min_pstate = cpu->pstate.min_pstate; in intel_cpufreq_adjust_perf()
3267 if (min_pstate < cpu->min_perf_ratio) in intel_cpufreq_adjust_perf()
3268 min_pstate = cpu->min_perf_ratio; in intel_cpufreq_adjust_perf()
3270 if (min_pstate > cpu->max_perf_ratio) in intel_cpufreq_adjust_perf()
3271 min_pstate = cpu->max_perf_ratio; in intel_cpufreq_adjust_perf()
3273 max_pstate = min(cap_pstate, cpu->max_perf_ratio); in intel_cpufreq_adjust_perf()
3279 intel_cpufreq_hwp_update(cpu, min_pstate, max_pstate, target_pstate, true); in intel_cpufreq_adjust_perf()
3281 cpu->pstate.current_pstate = target_pstate; in intel_cpufreq_adjust_perf()
3282 intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate); in intel_cpufreq_adjust_perf()
3288 struct cpudata *cpu; in intel_cpufreq_cpu_init() local
3292 dev = get_cpu_device(policy->cpu); in intel_cpufreq_cpu_init()
3310 cpu = all_cpu_data[policy->cpu]; in intel_cpufreq_cpu_init()
3317 intel_pstate_get_hwp_cap(cpu); in intel_cpufreq_cpu_init()
3319 rdmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value); in intel_cpufreq_cpu_init()
3320 WRITE_ONCE(cpu->hwp_req_cached, value); in intel_cpufreq_cpu_init()
3322 cpu->epp_cached = intel_pstate_get_epp(cpu, value); in intel_cpufreq_cpu_init()
3327 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.min_perf_pct, 100); in intel_cpufreq_cpu_init()
3336 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.max_perf_pct, 100); in intel_cpufreq_cpu_init()
3377 struct cpudata *cpu = all_cpu_data[policy->cpu]; in intel_cpufreq_suspend() local
3378 u64 value = READ_ONCE(cpu->hwp_req_cached); in intel_cpufreq_suspend()
3386 wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); in intel_cpufreq_suspend()
3387 WRITE_ONCE(cpu->hwp_req_cached, value); in intel_cpufreq_suspend()
3412 unsigned int cpu; in intel_pstate_driver_cleanup() local
3415 for_each_online_cpu(cpu) { in intel_pstate_driver_cleanup()
3416 if (all_cpu_data[cpu]) { in intel_pstate_driver_cleanup()
3418 intel_pstate_clear_update_util_hook(cpu); in intel_pstate_driver_cleanup()
3420 kfree(all_cpu_data[cpu]); in intel_pstate_driver_cleanup()
3421 WRITE_ONCE(all_cpu_data[cpu], NULL); in intel_pstate_driver_cleanup()