Lines Matching +full:idle +full:- +full:state
1 // SPDX-License-Identifier: GPL-2.0-or-later
21 #include <asm/code-patching.h>
29 /* Power ISA 3.0 allows for stop states 0x0 - 0xF */
40 * The default stop state that will be used by ppc_md.power_save
48 * First stop state levels when SPR and TB loss can occur.
54 * psscr value and mask of the deepest stop idle state.
165 * Used to store fastsleep workaround state
166 * 0 - Workaround applied/undone at fastsleep entry/exit path (Default)
167 * 1 - Workaround applied once, never undone.
185 return -EINVAL; in store_fastsleep_workaround_applyonce()
192 * fastsleep workaround needs to be left in 'applied' state on all in store_fastsleep_workaround_applyonce()
193 * the cores. Do this by- in store_fastsleep_workaround_applyonce()
200 * state would have applied workaround. in store_fastsleep_workaround_applyonce()
218 return -EIO; in store_fastsleep_workaround_applyonce()
230 unsigned long *state = &paca_ptrs[first]->idle_state; in atomic_start_thread_idle() local
232 clear_bit(thread_nr, state); in atomic_start_thread_idle()
240 unsigned long *state = &paca_ptrs[first]->idle_state; in atomic_stop_thread_idle() local
242 set_bit(thread_nr, state); in atomic_stop_thread_idle()
249 unsigned long *lock = &paca_ptrs[first]->idle_lock; in atomic_lock_thread_idle()
260 unsigned long *state = &paca_ptrs[first]->idle_state; in atomic_unlock_and_stop_thread_idle() local
261 unsigned long *lock = &paca_ptrs[first]->idle_lock; in atomic_unlock_and_stop_thread_idle()
262 u64 s = READ_ONCE(*state); in atomic_unlock_and_stop_thread_idle()
270 tmp = cmpxchg(state, s, new); in atomic_unlock_and_stop_thread_idle()
282 unsigned long *lock = &paca_ptrs[first]->idle_lock; in atomic_unlock_thread_idle()
318 unsigned long *state = &paca_ptrs[first]->idle_state; in power7_idle_insn() local
320 unsigned long core_thread_mask = (1UL << threads_per_core) - 1; in power7_idle_insn()
323 struct p7_sprs sprs = {}; /* avoid false use-uninitialised */ in power7_idle_insn()
330 BUG_ON(!(*state & thread)); in power7_idle_insn()
331 *state &= ~thread; in power7_idle_insn()
334 if ((*state & core_thread_mask) == 0) { in power7_idle_insn()
364 * distinguish between fast sleep and winkle state in power7_idle_insn()
368 *state += 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT; in power7_idle_insn()
369 if ((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) in power7_idle_insn()
372 *state |= PNV_CORE_IDLE_THREAD_WINKLE_BITS; in power7_idle_insn()
373 WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0); in power7_idle_insn()
385 local_paca->thread_idle_state = type; in power7_idle_insn()
386 srr1 = isa206_idle_insn_mayloss(type); /* go idle */ in power7_idle_insn()
387 local_paca->thread_idle_state = PNV_THREAD_RUNNING; in power7_idle_insn()
412 WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0); in power7_idle_insn()
413 *state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT; in power7_idle_insn()
414 *state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT); in power7_idle_insn()
421 /* HV state loss */ in power7_idle_insn()
428 WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0); in power7_idle_insn()
429 *state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT; in power7_idle_insn()
430 if (*state & (thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT)) { in power7_idle_insn()
431 *state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT); in power7_idle_insn()
437 WARN_ON(*state & thread); in power7_idle_insn()
439 if ((*state & core_thread_mask) != 0) in power7_idle_insn()
442 /* Per-core SPRs */ in power7_idle_insn()
462 if ((*state & local_paca->subcore_sibling_mask) != 0) in power7_idle_insn()
465 /* Per-subcore SPRs */ in power7_idle_insn()
482 /* Per-thread SPRs */ in power7_idle_insn()
493 mtspr(SPRN_SPRG3, local_paca->sprg_vdso); in power7_idle_insn()
517 /* Tell KVM we're entering idle. */ in power7_offline()
533 local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE; in power7_offline()
541 local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL; in power7_offline()
544 if (local_paca->kvm_hstate.hwthread_req) in power7_offline()
612 unsigned long *state = &paca_ptrs[first]->idle_state; in power9_idle_stop() local
613 unsigned long core_thread_mask = (1UL << threads_per_core) - 1; in power9_idle_stop()
618 struct p9_sprs sprs = {}; /* avoid false used-uninitialised */ in power9_idle_stop()
628 srr1 = isa300_idle_stop_noloss(psscr); /* go idle */ in power9_idle_stop()
644 local_paca->requested_psscr = psscr; in power9_idle_stop()
647 if (atomic_read(&local_paca->dont_stop)) { in power9_idle_stop()
648 local_paca->requested_psscr = 0; in power9_idle_stop()
657 * after a state-loss idle. Saving and restoring MMCR0 in power9_idle_stop()
658 * over idle is a workaround. in power9_idle_stop()
693 srr1 = isa300_idle_stop_mayloss(psscr); /* go idle */ in power9_idle_stop()
696 local_paca->requested_psscr = 0; in power9_idle_stop()
741 * just always test PSSCR for SPR/TB state loss. in power9_idle_stop()
750 /* HV state loss */ in power9_idle_stop()
755 if ((*state & core_thread_mask) != 0) in power9_idle_stop()
758 /* Per-core SPRs */ in power9_idle_stop()
779 /* Per-thread SPRs */ in power9_idle_stop()
796 mtspr(SPRN_SPRG3, local_paca->sprg_vdso); in power9_idle_stop()
814 * that are in a stop state.
825 cpu0 = cpu & ~(threads_per_core - 1); in pnv_power9_force_smt4_catch()
828 atomic_inc(&paca_ptrs[cpu0+thr]->dont_stop); in pnv_power9_force_smt4_catch()
833 if (!paca_ptrs[cpu0+thr]->requested_psscr) in pnv_power9_force_smt4_catch()
846 paca_ptrs[cpu0+thr]->hw_cpu_id); in pnv_power9_force_smt4_catch()
853 !paca_ptrs[cpu0+thr]->requested_psscr) { in pnv_power9_force_smt4_catch()
868 cpu0 = cpu & ~(threads_per_core - 1); in pnv_power9_force_smt4_release()
873 atomic_dec(&paca_ptrs[cpu0+thr]->dont_stop); in pnv_power9_force_smt4_release()
884 * isa300 idle routines restore CR, LR.
886 * idle thread doesn't use FP or VEC
897 unsigned long *state = &paca_ptrs[first]->idle_state; in power10_idle_stop() local
898 unsigned long core_thread_mask = (1UL << threads_per_core) - 1; in power10_idle_stop()
901 // struct p10_sprs sprs = {}; /* avoid false used-uninitialised */ in power10_idle_stop()
911 srr1 = isa300_idle_stop_noloss(psscr); /* go idle */ in power10_idle_stop()
926 /* XXX: save SPRs for deep state loss here. */ in power10_idle_stop()
933 srr1 = isa300_idle_stop_mayloss(psscr); /* go idle */ in power10_idle_stop()
946 * just always test PSSCR for SPR/TB state loss. in power10_idle_stop()
955 /* HV state loss */ in power10_idle_stop()
960 if ((*state & core_thread_mask) != 0) in power10_idle_stop()
963 /* XXX: restore per-core SPRs here */ in power10_idle_stop()
981 /* XXX: restore per-thread SPRs here */ in power10_idle_stop()
1047 * Program the LPCR via stop-api only if the deepest stop state in pnv_program_cpu_hotplug_lpcr()
1056 * available platform idle state on a CPU-Offline.
1091 * Power ISA 3.0 idle initialization.
1094 * Register (PSSCR) to control idle behavior.
1097 * ----------------------------------------------------------
1099 * ----------------------------------------------------------
1103 * Bits 0:3 - Power-Saving Level Status (PLS). This field indicates the
1104 * lowest power-saving state the thread entered since stop instruction was
1107 * Bit 41 - Status Disable(SD)
1108 * 0 - Shows PLS entries
1109 * 1 - PLS entries are all 0
1111 * Bit 42 - Enable State Loss
1112 * 0 - No state is lost irrespective of other fields
1113 * 1 - Allows state loss
1115 * Bit 43 - Exit Criterion
1116 * 0 - Exit from power-save mode on any interrupt
1117 * 1 - Exit from power-save mode controlled by LPCR's PECE bits
1119 * Bits 44:47 - Power-Saving Level Limit
1120 * This limits the power-saving level that can be entered into.
1122 * Bits 60:63 - Requested Level
1123 * Used to specify which power-saving level must be entered on executing
1146 * - ESL bit value matches the EC bit value. in validate_psscr_val_mask()
1147 * - ESL bit is set for all the deep stop states. in validate_psscr_val_mask()
1160 * pnv_arch300_idle_init: Initializes the default idle state, first
1161 * deep idle state and deepest idle state on
1164 * @np: /ibm,opal/power-mgt device node
1165 * @flags: cpu-idle-state-flags array
1166 * @dt_idle_states: Number of idle state entries
1180 * the deepest stop state. in pnv_arch300_idle_init()
1183 * the deepest loss-less (OPAL_PM_STOP_INST_FAST) stop state. in pnv_arch300_idle_init()
1189 struct pnv_idle_states_t *state = &pnv_idle_states[i]; in pnv_arch300_idle_init() local
1190 u64 psscr_rl = state->psscr_val & PSSCR_RL_MASK; in pnv_arch300_idle_init()
1194 state->flags & (OPAL_PM_TIMEBASE_STOP|OPAL_PM_LOSE_FULL_CONTEXT)) in pnv_arch300_idle_init()
1197 if ((state->flags & OPAL_PM_TIMEBASE_STOP) && in pnv_arch300_idle_init()
1201 if ((state->flags & OPAL_PM_LOSE_FULL_CONTEXT) && in pnv_arch300_idle_init()
1206 * The idle code does not deal with TB loss occurring in pnv_arch300_idle_init()
1207 * in a shallower state than SPR loss, so force it to in pnv_arch300_idle_init()
1213 if ((state->flags & OPAL_PM_TIMEBASE_STOP) && in pnv_arch300_idle_init()
1217 err = validate_psscr_val_mask(&state->psscr_val, in pnv_arch300_idle_init()
1218 &state->psscr_mask, in pnv_arch300_idle_init()
1219 state->flags); in pnv_arch300_idle_init()
1221 report_invalid_psscr_val(state->psscr_val, err); in pnv_arch300_idle_init()
1225 state->valid = true; in pnv_arch300_idle_init()
1227 if (max_residency_ns < state->residency_ns) { in pnv_arch300_idle_init()
1228 max_residency_ns = state->residency_ns; in pnv_arch300_idle_init()
1229 pnv_deepest_stop_psscr_val = state->psscr_val; in pnv_arch300_idle_init()
1230 pnv_deepest_stop_psscr_mask = state->psscr_mask; in pnv_arch300_idle_init()
1231 pnv_deepest_stop_flag = state->flags; in pnv_arch300_idle_init()
1236 (state->flags & OPAL_PM_STOP_INST_FAST)) { in pnv_arch300_idle_init()
1237 pnv_default_stop_val = state->psscr_val; in pnv_arch300_idle_init()
1238 pnv_default_stop_mask = state->psscr_mask; in pnv_arch300_idle_init()
1240 WARN_ON(state->flags & OPAL_PM_LOSE_FULL_CONTEXT); in pnv_arch300_idle_init()
1245 pr_warn("cpuidle-powernv: No suitable default stop state found. Disabling platform idle.\n"); in pnv_arch300_idle_init()
1248 pr_info("cpuidle-powernv: Default stop: psscr = 0x%016llx,mask=0x%016llx\n", in pnv_arch300_idle_init()
1253 pr_warn("cpuidle-powernv: No suitable stop state for CPU-Hotplug. Offlined CPUs will busy wait"); in pnv_arch300_idle_init()
1255 pr_info("cpuidle-powernv: Deepest stop: psscr = 0x%016llx,mask=0x%016llx\n", in pnv_arch300_idle_init()
1260 pr_info("cpuidle-powernv: First stop level that may lose SPRs = 0x%llx\n", in pnv_arch300_idle_init()
1263 pr_info("cpuidle-powernv: First stop level that may lose timebase = 0x%llx\n", in pnv_arch300_idle_init()
1270 * The stop-api is unable to restore hypervisor in pnv_disable_deep_states()
1271 * resources on wakeup from platform idle states which in pnv_disable_deep_states()
1275 pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n"); in pnv_disable_deep_states()
1276 pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n"); in pnv_disable_deep_states()
1281 * Use the default stop state for CPU-Hotplug in pnv_disable_deep_states()
1287 pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n", in pnv_disable_deep_states()
1289 } else { /* Fallback to snooze loop for CPU-Hotplug */ in pnv_disable_deep_states()
1291 pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n"); in pnv_disable_deep_states()
1297 * Probe device tree for supported idle states
1304 pr_warn("cpuidle-powernv: no idle states found in the DT\n"); in pnv_probe_idle_states()
1316 * This function parses device-tree and populates all the information
1318 * which is the number of cpuidle states discovered through device-tree.
1330 np = of_find_node_by_path("/ibm,opal/power-mgt"); in pnv_parse_cpuidle_dt()
1333 return -ENODEV; in pnv_parse_cpuidle_dt()
1336 "ibm,cpu-idle-state-flags"); in pnv_parse_cpuidle_dt()
1346 rc = -ENOMEM; in pnv_parse_cpuidle_dt()
1351 if (of_property_read_u32_array(np, "ibm,cpu-idle-state-flags", in pnv_parse_cpuidle_dt()
1353 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n"); in pnv_parse_cpuidle_dt()
1354 rc = -EINVAL; in pnv_parse_cpuidle_dt()
1361 if (of_property_read_u32_array(np, "ibm,cpu-idle-state-latencies-ns", in pnv_parse_cpuidle_dt()
1363 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n"); in pnv_parse_cpuidle_dt()
1364 rc = -EINVAL; in pnv_parse_cpuidle_dt()
1371 if (of_property_read_u32_array(np, "ibm,cpu-idle-state-residency-ns", in pnv_parse_cpuidle_dt()
1373 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-residency-ns in DT\n"); in pnv_parse_cpuidle_dt()
1374 rc = -EINVAL; in pnv_parse_cpuidle_dt()
1383 if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr", in pnv_parse_cpuidle_dt()
1385 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n"); in pnv_parse_cpuidle_dt()
1386 rc = -EINVAL; in pnv_parse_cpuidle_dt()
1393 if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr-mask", in pnv_parse_cpuidle_dt()
1395 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr-mask in DT\n"); in pnv_parse_cpuidle_dt()
1396 rc = -EINVAL; in pnv_parse_cpuidle_dt()
1404 * power8 specific properties ibm,cpu-idle-state-pmicr-mask and in pnv_parse_cpuidle_dt()
1405 * ibm,cpu-idle-state-pmicr-val were never used and there is no in pnv_parse_cpuidle_dt()
1409 if (of_property_read_string_array(np, "ibm,cpu-idle-state-names", in pnv_parse_cpuidle_dt()
1411 pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-names in DT\n"); in pnv_parse_cpuidle_dt()
1412 rc = -EINVAL; in pnv_parse_cpuidle_dt()
1437 p->idle_state = 0; in pnv_init_idle_states()
1439 p->idle_state = (1 << threads_per_core) - 1; in pnv_init_idle_states()
1443 p->thread_idle_state = PNV_THREAD_RUNNING; in pnv_init_idle_states()
1447 p->requested_psscr = 0; in pnv_init_idle_states()
1448 atomic_set(&p->dont_stop, 0); in pnv_init_idle_states()