Lines Matching +full:max +full:- +full:reason

1 // SPDX-License-Identifier: GPL-2.0-or-later
10 #define pr_fmt(fmt) "powernv-cpufreq: " fmt
43 * On an idle system we want the global pstate to ramp-down from max value to
44 * min over a span of ~5 secs. Also we want it to initially ramp-down slowly and
45 * then ramp-down rapidly later on.
60 * struct global_pstate_info - Per policy data structure to maintain history of
140 int reason[OCC_MAX_REASON]; member
150 * max, min and nominal frequencies. It also stores number of
154 * non-turbo frequency.
158 unsigned int max; member
208 if (revmap_data->pstate_id == pstate) in pstate_to_idx()
209 return revmap_data->cpufreq_table_idx; in pstate_to_idx()
218 struct global_pstate_info *gpstates = policy->driver_data; in reset_gpstates()
220 gpstates->highest_lpstate_idx = 0; in reset_gpstates()
221 gpstates->elapsed_time = 0; in reset_gpstates()
222 gpstates->last_sampled_time = 0; in reset_gpstates()
223 gpstates->last_lpstate_idx = 0; in reset_gpstates()
224 gpstates->last_gpstate_idx = 0; in reset_gpstates()
229 * from the firmware passed via device-tree
239 int rc = -ENODEV; in init_powernv_pstates()
241 power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); in init_powernv_pstates()
243 pr_warn("power-mgt node not found\n"); in init_powernv_pstates()
244 return -ENODEV; in init_powernv_pstates()
247 if (of_property_read_u32(power_mgt, "ibm,pstate-min", &pstate_min)) { in init_powernv_pstates()
248 pr_warn("ibm,pstate-min node not found\n"); in init_powernv_pstates()
252 if (of_property_read_u32(power_mgt, "ibm,pstate-max", &pstate_max)) { in init_powernv_pstates()
253 pr_warn("ibm,pstate-max node not found\n"); in init_powernv_pstates()
257 if (of_property_read_u32(power_mgt, "ibm,pstate-nominal", in init_powernv_pstates()
259 pr_warn("ibm,pstate-nominal not found\n"); in init_powernv_pstates()
263 if (of_property_read_u32(power_mgt, "ibm,pstate-ultra-turbo", in init_powernv_pstates()
269 if (of_property_read_u32(power_mgt, "ibm,pstate-turbo", in init_powernv_pstates()
281 pr_info("cpufreq pstate min 0x%x nominal 0x%x max 0x%x\n", pstate_min, in init_powernv_pstates()
286 pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids); in init_powernv_pstates()
288 pr_warn("ibm,pstate-ids not found\n"); in init_powernv_pstates()
292 pstate_freqs = of_get_property(power_mgt, "ibm,pstate-frequencies-mhz", in init_powernv_pstates()
295 pr_warn("ibm,pstate-frequencies-mhz not found\n"); in init_powernv_pstates()
300 pr_warn("Entries in ibm,pstate-ids and " in init_powernv_pstates()
301 "ibm,pstate-frequencies-mhz does not match\n"); in init_powernv_pstates()
325 rc = -ENOMEM; in init_powernv_pstates()
329 revmap_data->pstate_id = id & 0xFF; in init_powernv_pstates()
330 revmap_data->cpufreq_table_idx = i; in init_powernv_pstates()
331 key = (revmap_data->pstate_id) % POWERNV_MAX_PSTATES; in init_powernv_pstates()
332 hash_add(pstate_revmap, &revmap_data->hentry, key); in init_powernv_pstates()
335 powernv_pstate_info.max = i; in init_powernv_pstates()
344 for (j = i - 1; j >= (int)powernv_pstate_info.max; j--) in init_powernv_pstates()
375 * cpuinfo_nominal_freq_show - Show the nominal CPU frequency as indicated by
400 struct chip *chip = per_cpu(chip_info, policy->cpu); \
402 return sprintf(buf, "%u\n", chip->member); \
407 throttle_attr(unthrottle, reason[NO_THROTTLE]);
408 throttle_attr(powercap, reason[POWERCAP]);
409 throttle_attr(overtemp, reason[CPU_OVERTEMP]);
410 throttle_attr(supply_fault, reason[POWER_SUPPLY_FAILURE]);
411 throttle_attr(overcurrent, reason[OVERCURRENT]);
412 throttle_attr(occ_reset, reason[OCC_RESET_THROTTLE]);
485 * ((struct powernv_smp_call_data *)arg)->freq;
493 freq_data->pstate_id = extract_local_pstate(pmspr_val); in powernv_read_cpu_freq()
494 freq_data->freq = pstate_id_to_freq(freq_data->pstate_id); in powernv_read_cpu_freq()
497 raw_smp_processor_id(), pmspr_val, freq_data->pstate_id, in powernv_read_cpu_freq()
498 freq_data->freq); in powernv_read_cpu_freq()
523 * on this CPU should be present in freq_data->pstate_id.
529 unsigned long pstate_ul = freq_data->pstate_id; in set_pstate()
530 unsigned long gpstate_ul = freq_data->gpstate_id; in set_pstate()
569 if (pmsr_pmax_idx != powernv_pstate_info.max) { in powernv_cpufreq_throttle_check()
570 if (chip->throttled) in powernv_cpufreq_throttle_check()
572 chip->throttled = true; in powernv_cpufreq_throttle_check()
575 cpu, chip->id, pmsr_pmax, in powernv_cpufreq_throttle_check()
577 chip->throttle_sub_turbo++; in powernv_cpufreq_throttle_check()
579 chip->throttle_turbo++; in powernv_cpufreq_throttle_check()
581 trace_powernv_throttle(chip->id, in powernv_cpufreq_throttle_check()
582 throttle_reason[chip->throttle_reason], in powernv_cpufreq_throttle_check()
584 } else if (chip->throttled) { in powernv_cpufreq_throttle_check()
585 chip->throttled = false; in powernv_cpufreq_throttle_check()
586 trace_powernv_throttle(chip->id, in powernv_cpufreq_throttle_check()
587 throttle_reason[chip->throttle_reason], in powernv_cpufreq_throttle_check()
611 * calc_global_pstate - Calculate global pstate
634 (powernv_pstate_info.min - highest_lpstate_idx)) / 100; in calc_global_pstate()
653 if ((gpstates->elapsed_time + GPSTATE_TIMER_INTERVAL) in queue_gpstate_timer()
655 timer_interval = MAX_RAMP_DOWN_TIME - gpstates->elapsed_time; in queue_gpstate_timer()
659 mod_timer(&gpstates->timer, jiffies + msecs_to_jiffies(timer_interval)); in queue_gpstate_timer()
674 struct cpufreq_policy *policy = gpstates->policy; in gpstate_timer_handler()
678 - gpstates->last_sampled_time; in gpstate_timer_handler()
681 if (!spin_trylock(&gpstates->gpstate_lock)) in gpstate_timer_handler()
685 * it back to one of the policy->cpus in gpstate_timer_handler()
687 if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) { in gpstate_timer_handler()
688 gpstates->timer.expires = jiffies + msecs_to_jiffies(1); in gpstate_timer_handler()
689 add_timer_on(&gpstates->timer, cpumask_first(policy->cpus)); in gpstate_timer_handler()
690 spin_unlock(&gpstates->gpstate_lock); in gpstate_timer_handler()
696 * We may have wrong in gpstate->last_lpstate_idx in gpstate_timer_handler()
704 spin_unlock(&gpstates->gpstate_lock); in gpstate_timer_handler()
708 gpstates->last_sampled_time += time_diff; in gpstate_timer_handler()
709 gpstates->elapsed_time += time_diff; in gpstate_timer_handler()
711 if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) { in gpstate_timer_handler()
715 gpstates->highest_lpstate_idx = gpstate_idx; in gpstate_timer_handler()
718 gpstate_idx = calc_global_pstate(gpstates->elapsed_time, in gpstate_timer_handler()
719 gpstates->highest_lpstate_idx, in gpstate_timer_handler()
723 gpstates->last_gpstate_idx = gpstate_idx; in gpstate_timer_handler()
724 gpstates->last_lpstate_idx = lpstate_idx; in gpstate_timer_handler()
729 if (gpstate_idx != gpstates->last_lpstate_idx) in gpstate_timer_handler()
733 spin_unlock(&gpstates->gpstate_lock); in gpstate_timer_handler()
739 * mask policy->cpus
746 struct global_pstate_info *gpstates = policy->driver_data; in powernv_cpufreq_target_index()
768 spin_lock(&gpstates->gpstate_lock); in powernv_cpufreq_target_index()
770 if (!gpstates->last_sampled_time) { in powernv_cpufreq_target_index()
772 gpstates->highest_lpstate_idx = new_index; in powernv_cpufreq_target_index()
776 if (gpstates->last_gpstate_idx < new_index) { in powernv_cpufreq_target_index()
777 gpstates->elapsed_time += cur_msec - in powernv_cpufreq_target_index()
778 gpstates->last_sampled_time; in powernv_cpufreq_target_index()
785 if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) { in powernv_cpufreq_target_index()
787 gpstates->highest_lpstate_idx = new_index; in powernv_cpufreq_target_index()
791 gpstate_idx = calc_global_pstate(gpstates->elapsed_time, in powernv_cpufreq_target_index()
792 gpstates->highest_lpstate_idx, in powernv_cpufreq_target_index()
797 gpstates->highest_lpstate_idx = new_index; in powernv_cpufreq_target_index()
808 del_timer_sync(&gpstates->timer); in powernv_cpufreq_target_index()
812 gpstates->last_sampled_time = cur_msec; in powernv_cpufreq_target_index()
813 gpstates->last_gpstate_idx = gpstate_idx; in powernv_cpufreq_target_index()
814 gpstates->last_lpstate_idx = new_index; in powernv_cpufreq_target_index()
816 spin_unlock(&gpstates->gpstate_lock); in powernv_cpufreq_target_index()
822 * if current CPU is within policy->cpus (core) in powernv_cpufreq_target_index()
824 smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1); in powernv_cpufreq_target_index()
834 base = cpu_first_thread_sibling(policy->cpu); in powernv_cpufreq_cpu_init()
837 cpumask_set_cpu(base + i, policy->cpus); in powernv_cpufreq_cpu_init()
839 kn = kernfs_find_and_get(policy->kobj.sd, throttle_attr_grp.name); in powernv_cpufreq_cpu_init()
843 ret = sysfs_create_group(&policy->kobj, &throttle_attr_grp); in powernv_cpufreq_cpu_init()
846 policy->cpu); in powernv_cpufreq_cpu_init()
853 policy->freq_table = powernv_freqs; in powernv_cpufreq_cpu_init()
854 policy->fast_switch_possible = true; in powernv_cpufreq_cpu_init()
859 /* Initialise Gpstate ramp-down timer only on POWER8 */ in powernv_cpufreq_cpu_init()
862 return -ENOMEM; in powernv_cpufreq_cpu_init()
864 policy->driver_data = gpstates; in powernv_cpufreq_cpu_init()
867 gpstates->policy = policy; in powernv_cpufreq_cpu_init()
868 timer_setup(&gpstates->timer, gpstate_timer_handler, in powernv_cpufreq_cpu_init()
870 gpstates->timer.expires = jiffies + in powernv_cpufreq_cpu_init()
872 spin_lock_init(&gpstates->gpstate_lock); in powernv_cpufreq_cpu_init()
880 struct global_pstate_info *gpstates = policy->driver_data; in powernv_cpufreq_cpu_exit()
884 smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1); in powernv_cpufreq_cpu_exit()
886 del_timer_sync(&gpstates->timer); in powernv_cpufreq_cpu_exit()
888 kfree(policy->driver_data); in powernv_cpufreq_cpu_exit()
921 cpumask_and(&mask, &chip->mask, cpu_online_mask); in powernv_cpufreq_work_fn()
925 if (!chip->restore) in powernv_cpufreq_work_fn()
928 chip->restore = false; in powernv_cpufreq_work_fn()
935 index = cpufreq_table_find_index_c(policy, policy->cur, false); in powernv_cpufreq_work_fn()
937 cpumask_andnot(&mask, &mask, policy->cpus); in powernv_cpufreq_work_fn()
954 omsg.type = be64_to_cpu(msg->params[0]); in powernv_cpufreq_occ_msg()
959 pr_info("OCC (On Chip Controller - enforces hard thermal/power limits) Resetting\n"); in powernv_cpufreq_occ_msg()
977 omsg.chip = be64_to_cpu(msg->params[1]); in powernv_cpufreq_occ_msg()
978 omsg.throttle_status = be64_to_cpu(msg->params[2]); in powernv_cpufreq_occ_msg()
1000 chips[i].reason[omsg.throttle_status]++; in powernv_cpufreq_occ_msg()
1032 .name = "powernv-cpufreq",
1053 return -ENOMEM; in init_chip_info()
1058 ret = -ENOMEM; in init_chip_info()
1069 cpumask_set_cpu(cpu, &chip_cpu_mask[nr_chips-1]); in init_chip_info()
1074 ret = -ENOMEM; in init_chip_info()
1117 return -ENODEV; in powernv_cpufreq_init()