12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2d405a98cSShreyas B. Prabhu /*
3d405a98cSShreyas B. Prabhu * PowerNV cpuidle code
4d405a98cSShreyas B. Prabhu *
5d405a98cSShreyas B. Prabhu * Copyright 2015 IBM Corp.
6d405a98cSShreyas B. Prabhu */
7d405a98cSShreyas B. Prabhu
8d405a98cSShreyas B. Prabhu #include <linux/types.h>
9d405a98cSShreyas B. Prabhu #include <linux/mm.h>
10d405a98cSShreyas B. Prabhu #include <linux/slab.h>
11d405a98cSShreyas B. Prabhu #include <linux/of.h>
125703d2f4SShreyas B. Prabhu #include <linux/device.h>
135703d2f4SShreyas B. Prabhu #include <linux/cpu.h>
14d405a98cSShreyas B. Prabhu
15d405a98cSShreyas B. Prabhu #include <asm/firmware.h>
163a96570fSNicholas Piggin #include <asm/interrupt.h>
174bece972SMichael Ellerman #include <asm/machdep.h>
18d405a98cSShreyas B. Prabhu #include <asm/opal.h>
19d405a98cSShreyas B. Prabhu #include <asm/cputhreads.h>
20d405a98cSShreyas B. Prabhu #include <asm/cpuidle.h>
21d405a98cSShreyas B. Prabhu #include <asm/code-patching.h>
22d405a98cSShreyas B. Prabhu #include <asm/smp.h>
232201f994SNicholas Piggin #include <asm/runlatch.h>
247672691aSPaul Mackerras #include <asm/dbell.h>
25d405a98cSShreyas B. Prabhu
26d405a98cSShreyas B. Prabhu #include "powernv.h"
27d405a98cSShreyas B. Prabhu #include "subcore.h"
28d405a98cSShreyas B. Prabhu
29bcef83a0SShreyas B. Prabhu /* Power ISA 3.0 allows for stop states 0x0 - 0xF */
30bcef83a0SShreyas B. Prabhu #define MAX_STOP_STATE 0xF
31bcef83a0SShreyas B. Prabhu
321e1601b3SAkshay Adiga #define P9_STOP_SPR_MSR 2000
331e1601b3SAkshay Adiga #define P9_STOP_SPR_PSSCR 855
341e1601b3SAkshay Adiga
35d405a98cSShreyas B. Prabhu static u32 supported_cpuidle_states;
369c7b185aSAkshay Adiga struct pnv_idle_states_t *pnv_idle_states;
379c7b185aSAkshay Adiga int nr_pnv_idle_states;
38d405a98cSShreyas B. Prabhu
391e1601b3SAkshay Adiga /*
401e1601b3SAkshay Adiga * The default stop state that will be used by ppc_md.power_save
411e1601b3SAkshay Adiga * function on platforms that support stop instruction.
421e1601b3SAkshay Adiga */
431e1601b3SAkshay Adiga static u64 pnv_default_stop_val;
441e1601b3SAkshay Adiga static u64 pnv_default_stop_mask;
451e1601b3SAkshay Adiga static bool default_stop_found;
461e1601b3SAkshay Adiga
471e1601b3SAkshay Adiga /*
4810d91611SNicholas Piggin * First stop state levels when SPR and TB loss can occur.
491e1601b3SAkshay Adiga */
5010d91611SNicholas Piggin static u64 pnv_first_tb_loss_level = MAX_STOP_STATE + 1;
51dcbbfa6bSPratik Rajesh Sampat static u64 deep_spr_loss_state = MAX_STOP_STATE + 1;
521e1601b3SAkshay Adiga
531e1601b3SAkshay Adiga /*
541e1601b3SAkshay Adiga * psscr value and mask of the deepest stop idle state.
551e1601b3SAkshay Adiga * Used when a cpu is offlined.
561e1601b3SAkshay Adiga */
571e1601b3SAkshay Adiga static u64 pnv_deepest_stop_psscr_val;
581e1601b3SAkshay Adiga static u64 pnv_deepest_stop_psscr_mask;
59785a12afSGautham R. Shenoy static u64 pnv_deepest_stop_flag;
601e1601b3SAkshay Adiga static bool deepest_stop_found;
611e1601b3SAkshay Adiga
6210d91611SNicholas Piggin static unsigned long power7_offline_type;
6310d91611SNicholas Piggin
pnv_save_sprs_for_deep_states(void)64e5913db1SNick Child static int __init pnv_save_sprs_for_deep_states(void)
65d405a98cSShreyas B. Prabhu {
66d405a98cSShreyas B. Prabhu int cpu;
67d405a98cSShreyas B. Prabhu int rc;
68d405a98cSShreyas B. Prabhu
69d405a98cSShreyas B. Prabhu /*
70446957baSAdam Buchbinder * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric across
71d405a98cSShreyas B. Prabhu * all cpus at boot. Get these reg values of current cpu and use the
72446957baSAdam Buchbinder * same across all cpus.
73d405a98cSShreyas B. Prabhu */
7424be85a2SGautham R. Shenoy uint64_t lpcr_val = mfspr(SPRN_LPCR);
75d405a98cSShreyas B. Prabhu uint64_t hid0_val = mfspr(SPRN_HID0);
76d405a98cSShreyas B. Prabhu uint64_t hmeer_val = mfspr(SPRN_HMEER);
771e1601b3SAkshay Adiga uint64_t msr_val = MSR_IDLE;
781e1601b3SAkshay Adiga uint64_t psscr_val = pnv_deepest_stop_psscr_val;
79d405a98cSShreyas B. Prabhu
80ac9816dcSAkshay Adiga for_each_present_cpu(cpu) {
81d405a98cSShreyas B. Prabhu uint64_t pir = get_hard_smp_processor_id(cpu);
82d2e60075SNicholas Piggin uint64_t hsprg0_val = (uint64_t)paca_ptrs[cpu];
83d405a98cSShreyas B. Prabhu
84d405a98cSShreyas B. Prabhu rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val);
85d405a98cSShreyas B. Prabhu if (rc != 0)
86d405a98cSShreyas B. Prabhu return rc;
87d405a98cSShreyas B. Prabhu
88d405a98cSShreyas B. Prabhu rc = opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
89d405a98cSShreyas B. Prabhu if (rc != 0)
90d405a98cSShreyas B. Prabhu return rc;
91d405a98cSShreyas B. Prabhu
921e1601b3SAkshay Adiga if (cpu_has_feature(CPU_FTR_ARCH_300)) {
931e1601b3SAkshay Adiga rc = opal_slw_set_reg(pir, P9_STOP_SPR_MSR, msr_val);
941e1601b3SAkshay Adiga if (rc)
951e1601b3SAkshay Adiga return rc;
961e1601b3SAkshay Adiga
971e1601b3SAkshay Adiga rc = opal_slw_set_reg(pir,
981e1601b3SAkshay Adiga P9_STOP_SPR_PSSCR, psscr_val);
991e1601b3SAkshay Adiga
1001e1601b3SAkshay Adiga if (rc)
1011e1601b3SAkshay Adiga return rc;
1021e1601b3SAkshay Adiga }
1031e1601b3SAkshay Adiga
104d405a98cSShreyas B. Prabhu /* HIDs are per core registers */
105d405a98cSShreyas B. Prabhu if (cpu_thread_in_core(cpu) == 0) {
106d405a98cSShreyas B. Prabhu
107d405a98cSShreyas B. Prabhu rc = opal_slw_set_reg(pir, SPRN_HMEER, hmeer_val);
108d405a98cSShreyas B. Prabhu if (rc != 0)
109d405a98cSShreyas B. Prabhu return rc;
110d405a98cSShreyas B. Prabhu
111d405a98cSShreyas B. Prabhu rc = opal_slw_set_reg(pir, SPRN_HID0, hid0_val);
112d405a98cSShreyas B. Prabhu if (rc != 0)
113d405a98cSShreyas B. Prabhu return rc;
114d405a98cSShreyas B. Prabhu
1151fd02f66SJulia Lawall /* Only p8 needs to set extra HID registers */
1161e1601b3SAkshay Adiga if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
1175c92fb1bSPratik Rajesh Sampat uint64_t hid1_val = mfspr(SPRN_HID1);
1185c92fb1bSPratik Rajesh Sampat uint64_t hid4_val = mfspr(SPRN_HID4);
1195c92fb1bSPratik Rajesh Sampat uint64_t hid5_val = mfspr(SPRN_HID5);
1201e1601b3SAkshay Adiga
121d405a98cSShreyas B. Prabhu rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val);
122d405a98cSShreyas B. Prabhu if (rc != 0)
123d405a98cSShreyas B. Prabhu return rc;
124d405a98cSShreyas B. Prabhu
125d405a98cSShreyas B. Prabhu rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val);
126d405a98cSShreyas B. Prabhu if (rc != 0)
127d405a98cSShreyas B. Prabhu return rc;
128d405a98cSShreyas B. Prabhu
129d405a98cSShreyas B. Prabhu rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val);
130d405a98cSShreyas B. Prabhu if (rc != 0)
131d405a98cSShreyas B. Prabhu return rc;
132d405a98cSShreyas B. Prabhu }
133d405a98cSShreyas B. Prabhu }
1341e1601b3SAkshay Adiga }
135d405a98cSShreyas B. Prabhu
136d405a98cSShreyas B. Prabhu return 0;
137d405a98cSShreyas B. Prabhu }
138d405a98cSShreyas B. Prabhu
pnv_get_supported_cpuidle_states(void)139d405a98cSShreyas B. Prabhu u32 pnv_get_supported_cpuidle_states(void)
140d405a98cSShreyas B. Prabhu {
141d405a98cSShreyas B. Prabhu return supported_cpuidle_states;
142d405a98cSShreyas B. Prabhu }
143d405a98cSShreyas B. Prabhu EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states);
144d405a98cSShreyas B. Prabhu
pnv_fastsleep_workaround_apply(void * info)1455703d2f4SShreyas B. Prabhu static void pnv_fastsleep_workaround_apply(void *info)
1465703d2f4SShreyas B. Prabhu
1475703d2f4SShreyas B. Prabhu {
148b350111bSNicholas Piggin int cpu = smp_processor_id();
1495703d2f4SShreyas B. Prabhu int rc;
1505703d2f4SShreyas B. Prabhu int *err = info;
1515703d2f4SShreyas B. Prabhu
152b350111bSNicholas Piggin if (cpu_first_thread_sibling(cpu) != cpu)
153b350111bSNicholas Piggin return;
154b350111bSNicholas Piggin
1555703d2f4SShreyas B. Prabhu rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP,
1565703d2f4SShreyas B. Prabhu OPAL_CONFIG_IDLE_APPLY);
1575703d2f4SShreyas B. Prabhu if (rc)
1585703d2f4SShreyas B. Prabhu *err = 1;
1595703d2f4SShreyas B. Prabhu }
1605703d2f4SShreyas B. Prabhu
16110d91611SNicholas Piggin static bool power7_fastsleep_workaround_entry = true;
16210d91611SNicholas Piggin static bool power7_fastsleep_workaround_exit = true;
16310d91611SNicholas Piggin
1645703d2f4SShreyas B. Prabhu /*
1655703d2f4SShreyas B. Prabhu * Used to store fastsleep workaround state
1665703d2f4SShreyas B. Prabhu * 0 - Workaround applied/undone at fastsleep entry/exit path (Default)
1675703d2f4SShreyas B. Prabhu * 1 - Workaround applied once, never undone.
1685703d2f4SShreyas B. Prabhu */
1695703d2f4SShreyas B. Prabhu static u8 fastsleep_workaround_applyonce;
1705703d2f4SShreyas B. Prabhu
show_fastsleep_workaround_applyonce(struct device * dev,struct device_attribute * attr,char * buf)1715703d2f4SShreyas B. Prabhu static ssize_t show_fastsleep_workaround_applyonce(struct device *dev,
1725703d2f4SShreyas B. Prabhu struct device_attribute *attr, char *buf)
1735703d2f4SShreyas B. Prabhu {
1745703d2f4SShreyas B. Prabhu return sprintf(buf, "%u\n", fastsleep_workaround_applyonce);
1755703d2f4SShreyas B. Prabhu }
1765703d2f4SShreyas B. Prabhu
store_fastsleep_workaround_applyonce(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1775703d2f4SShreyas B. Prabhu static ssize_t store_fastsleep_workaround_applyonce(struct device *dev,
1785703d2f4SShreyas B. Prabhu struct device_attribute *attr, const char *buf,
1795703d2f4SShreyas B. Prabhu size_t count)
1805703d2f4SShreyas B. Prabhu {
1815703d2f4SShreyas B. Prabhu int err;
1825703d2f4SShreyas B. Prabhu u8 val;
1835703d2f4SShreyas B. Prabhu
1845703d2f4SShreyas B. Prabhu if (kstrtou8(buf, 0, &val) || val != 1)
1855703d2f4SShreyas B. Prabhu return -EINVAL;
1865703d2f4SShreyas B. Prabhu
1875703d2f4SShreyas B. Prabhu if (fastsleep_workaround_applyonce == 1)
1885703d2f4SShreyas B. Prabhu return count;
1895703d2f4SShreyas B. Prabhu
1905703d2f4SShreyas B. Prabhu /*
1915703d2f4SShreyas B. Prabhu * fastsleep_workaround_applyonce = 1 implies
1925703d2f4SShreyas B. Prabhu * fastsleep workaround needs to be left in 'applied' state on all
1935703d2f4SShreyas B. Prabhu * the cores. Do this by-
19410d91611SNicholas Piggin * 1. Disable the 'undo' workaround in fastsleep exit path
19510d91611SNicholas Piggin * 2. Sendi IPIs to all the cores which have at least one online thread
19610d91611SNicholas Piggin * 3. Disable the 'apply' workaround in fastsleep entry path
19710d91611SNicholas Piggin *
1985703d2f4SShreyas B. Prabhu * There is no need to send ipi to cores which have all threads
1995703d2f4SShreyas B. Prabhu * offlined, as last thread of the core entering fastsleep or deeper
2005703d2f4SShreyas B. Prabhu * state would have applied workaround.
2015703d2f4SShreyas B. Prabhu */
20210d91611SNicholas Piggin power7_fastsleep_workaround_exit = false;
2035703d2f4SShreyas B. Prabhu
2045ae36401SSebastian Andrzej Siewior cpus_read_lock();
205b350111bSNicholas Piggin on_each_cpu(pnv_fastsleep_workaround_apply, &err, 1);
2065ae36401SSebastian Andrzej Siewior cpus_read_unlock();
2075703d2f4SShreyas B. Prabhu if (err) {
2085703d2f4SShreyas B. Prabhu pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply");
2095703d2f4SShreyas B. Prabhu goto fail;
2105703d2f4SShreyas B. Prabhu }
2115703d2f4SShreyas B. Prabhu
21210d91611SNicholas Piggin power7_fastsleep_workaround_entry = false;
2135703d2f4SShreyas B. Prabhu
2145703d2f4SShreyas B. Prabhu fastsleep_workaround_applyonce = 1;
2155703d2f4SShreyas B. Prabhu
2165703d2f4SShreyas B. Prabhu return count;
2175703d2f4SShreyas B. Prabhu fail:
2185703d2f4SShreyas B. Prabhu return -EIO;
2195703d2f4SShreyas B. Prabhu }
2205703d2f4SShreyas B. Prabhu
2215703d2f4SShreyas B. Prabhu static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600,
2225703d2f4SShreyas B. Prabhu show_fastsleep_workaround_applyonce,
2235703d2f4SShreyas B. Prabhu store_fastsleep_workaround_applyonce);
2245703d2f4SShreyas B. Prabhu
atomic_start_thread_idle(void)22510d91611SNicholas Piggin static inline void atomic_start_thread_idle(void)
2262201f994SNicholas Piggin {
22710d91611SNicholas Piggin int cpu = raw_smp_processor_id();
22810d91611SNicholas Piggin int first = cpu_first_thread_sibling(cpu);
22910d91611SNicholas Piggin int thread_nr = cpu_thread_in_core(cpu);
23010d91611SNicholas Piggin unsigned long *state = &paca_ptrs[first]->idle_state;
23110d91611SNicholas Piggin
23210d91611SNicholas Piggin clear_bit(thread_nr, state);
23310d91611SNicholas Piggin }
23410d91611SNicholas Piggin
atomic_stop_thread_idle(void)23510d91611SNicholas Piggin static inline void atomic_stop_thread_idle(void)
23610d91611SNicholas Piggin {
23710d91611SNicholas Piggin int cpu = raw_smp_processor_id();
23810d91611SNicholas Piggin int first = cpu_first_thread_sibling(cpu);
23910d91611SNicholas Piggin int thread_nr = cpu_thread_in_core(cpu);
24010d91611SNicholas Piggin unsigned long *state = &paca_ptrs[first]->idle_state;
24110d91611SNicholas Piggin
24210d91611SNicholas Piggin set_bit(thread_nr, state);
24310d91611SNicholas Piggin }
24410d91611SNicholas Piggin
atomic_lock_thread_idle(void)24510d91611SNicholas Piggin static inline void atomic_lock_thread_idle(void)
24610d91611SNicholas Piggin {
24710d91611SNicholas Piggin int cpu = raw_smp_processor_id();
24810d91611SNicholas Piggin int first = cpu_first_thread_sibling(cpu);
249*b0c5b4f1SRohan McLure unsigned long *lock = &paca_ptrs[first]->idle_lock;
25010d91611SNicholas Piggin
251*b0c5b4f1SRohan McLure while (unlikely(test_and_set_bit_lock(NR_PNV_CORE_IDLE_LOCK_BIT, lock)))
25210d91611SNicholas Piggin barrier();
25310d91611SNicholas Piggin }
25410d91611SNicholas Piggin
atomic_unlock_and_stop_thread_idle(void)25510d91611SNicholas Piggin static inline void atomic_unlock_and_stop_thread_idle(void)
25610d91611SNicholas Piggin {
25710d91611SNicholas Piggin int cpu = raw_smp_processor_id();
25810d91611SNicholas Piggin int first = cpu_first_thread_sibling(cpu);
25910d91611SNicholas Piggin unsigned long thread = 1UL << cpu_thread_in_core(cpu);
26010d91611SNicholas Piggin unsigned long *state = &paca_ptrs[first]->idle_state;
261*b0c5b4f1SRohan McLure unsigned long *lock = &paca_ptrs[first]->idle_lock;
26210d91611SNicholas Piggin u64 s = READ_ONCE(*state);
26310d91611SNicholas Piggin u64 new, tmp;
26410d91611SNicholas Piggin
265*b0c5b4f1SRohan McLure BUG_ON(!(READ_ONCE(*lock) & PNV_CORE_IDLE_LOCK_BIT));
26610d91611SNicholas Piggin BUG_ON(s & thread);
26710d91611SNicholas Piggin
26810d91611SNicholas Piggin again:
269*b0c5b4f1SRohan McLure new = s | thread;
27010d91611SNicholas Piggin tmp = cmpxchg(state, s, new);
27110d91611SNicholas Piggin if (unlikely(tmp != s)) {
27210d91611SNicholas Piggin s = tmp;
27310d91611SNicholas Piggin goto again;
27410d91611SNicholas Piggin }
275*b0c5b4f1SRohan McLure clear_bit_unlock(NR_PNV_CORE_IDLE_LOCK_BIT, lock);
27610d91611SNicholas Piggin }
27710d91611SNicholas Piggin
atomic_unlock_thread_idle(void)27810d91611SNicholas Piggin static inline void atomic_unlock_thread_idle(void)
27910d91611SNicholas Piggin {
28010d91611SNicholas Piggin int cpu = raw_smp_processor_id();
28110d91611SNicholas Piggin int first = cpu_first_thread_sibling(cpu);
282*b0c5b4f1SRohan McLure unsigned long *lock = &paca_ptrs[first]->idle_lock;
28310d91611SNicholas Piggin
284*b0c5b4f1SRohan McLure BUG_ON(!test_bit(NR_PNV_CORE_IDLE_LOCK_BIT, lock));
285*b0c5b4f1SRohan McLure clear_bit_unlock(NR_PNV_CORE_IDLE_LOCK_BIT, lock);
28610d91611SNicholas Piggin }
28710d91611SNicholas Piggin
28810d91611SNicholas Piggin /* P7 and P8 */
28910d91611SNicholas Piggin struct p7_sprs {
29010d91611SNicholas Piggin /* per core */
29110d91611SNicholas Piggin u64 tscr;
29210d91611SNicholas Piggin u64 worc;
29310d91611SNicholas Piggin
29410d91611SNicholas Piggin /* per subcore */
29510d91611SNicholas Piggin u64 sdr1;
29610d91611SNicholas Piggin u64 rpr;
29710d91611SNicholas Piggin
29810d91611SNicholas Piggin /* per thread */
29910d91611SNicholas Piggin u64 lpcr;
30010d91611SNicholas Piggin u64 hfscr;
30110d91611SNicholas Piggin u64 fscr;
30210d91611SNicholas Piggin u64 purr;
30310d91611SNicholas Piggin u64 spurr;
30410d91611SNicholas Piggin u64 dscr;
30510d91611SNicholas Piggin u64 wort;
306e9cef018SMichael Ellerman
307e9cef018SMichael Ellerman /* per thread SPRs that get lost in shallow states */
308e9cef018SMichael Ellerman u64 amr;
309e9cef018SMichael Ellerman u64 iamr;
310e9cef018SMichael Ellerman u64 uamor;
31146f9caf1SNicholas Piggin /* amor is restored to constant ~0 */
31210d91611SNicholas Piggin };
31310d91611SNicholas Piggin
power7_idle_insn(unsigned long type)31410d91611SNicholas Piggin static unsigned long power7_idle_insn(unsigned long type)
31510d91611SNicholas Piggin {
31610d91611SNicholas Piggin int cpu = raw_smp_processor_id();
31710d91611SNicholas Piggin int first = cpu_first_thread_sibling(cpu);
31810d91611SNicholas Piggin unsigned long *state = &paca_ptrs[first]->idle_state;
31910d91611SNicholas Piggin unsigned long thread = 1UL << cpu_thread_in_core(cpu);
32010d91611SNicholas Piggin unsigned long core_thread_mask = (1UL << threads_per_core) - 1;
3212201f994SNicholas Piggin unsigned long srr1;
32210d91611SNicholas Piggin bool full_winkle;
32310d91611SNicholas Piggin struct p7_sprs sprs = {}; /* avoid false use-uninitialised */
32410d91611SNicholas Piggin bool sprs_saved = false;
32510d91611SNicholas Piggin int rc;
3262201f994SNicholas Piggin
32710d91611SNicholas Piggin if (unlikely(type != PNV_THREAD_NAP)) {
32810d91611SNicholas Piggin atomic_lock_thread_idle();
3292201f994SNicholas Piggin
33010d91611SNicholas Piggin BUG_ON(!(*state & thread));
33110d91611SNicholas Piggin *state &= ~thread;
3322201f994SNicholas Piggin
33310d91611SNicholas Piggin if (power7_fastsleep_workaround_entry) {
33410d91611SNicholas Piggin if ((*state & core_thread_mask) == 0) {
33510d91611SNicholas Piggin rc = opal_config_cpu_idle_state(
33610d91611SNicholas Piggin OPAL_CONFIG_IDLE_FASTSLEEP,
33710d91611SNicholas Piggin OPAL_CONFIG_IDLE_APPLY);
33810d91611SNicholas Piggin BUG_ON(rc);
33910d91611SNicholas Piggin }
34010d91611SNicholas Piggin }
34110d91611SNicholas Piggin
34210d91611SNicholas Piggin if (type == PNV_THREAD_WINKLE) {
34310d91611SNicholas Piggin sprs.tscr = mfspr(SPRN_TSCR);
34410d91611SNicholas Piggin sprs.worc = mfspr(SPRN_WORC);
34510d91611SNicholas Piggin
34610d91611SNicholas Piggin sprs.sdr1 = mfspr(SPRN_SDR1);
34710d91611SNicholas Piggin sprs.rpr = mfspr(SPRN_RPR);
34810d91611SNicholas Piggin
34910d91611SNicholas Piggin sprs.lpcr = mfspr(SPRN_LPCR);
35010d91611SNicholas Piggin if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
35110d91611SNicholas Piggin sprs.hfscr = mfspr(SPRN_HFSCR);
35210d91611SNicholas Piggin sprs.fscr = mfspr(SPRN_FSCR);
35310d91611SNicholas Piggin }
35410d91611SNicholas Piggin sprs.purr = mfspr(SPRN_PURR);
35510d91611SNicholas Piggin sprs.spurr = mfspr(SPRN_SPURR);
35610d91611SNicholas Piggin sprs.dscr = mfspr(SPRN_DSCR);
35710d91611SNicholas Piggin sprs.wort = mfspr(SPRN_WORT);
35810d91611SNicholas Piggin
35910d91611SNicholas Piggin sprs_saved = true;
36010d91611SNicholas Piggin
36110d91611SNicholas Piggin /*
36210d91611SNicholas Piggin * Increment winkle counter and set all winkle bits if
36310d91611SNicholas Piggin * all threads are winkling. This allows wakeup side to
36410d91611SNicholas Piggin * distinguish between fast sleep and winkle state
36510d91611SNicholas Piggin * loss. Fast sleep still has to resync the timebase so
36610d91611SNicholas Piggin * this may not be a really big win.
36710d91611SNicholas Piggin */
36810d91611SNicholas Piggin *state += 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT;
36910d91611SNicholas Piggin if ((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS)
37010d91611SNicholas Piggin >> PNV_CORE_IDLE_WINKLE_COUNT_SHIFT
37110d91611SNicholas Piggin == threads_per_core)
37210d91611SNicholas Piggin *state |= PNV_CORE_IDLE_THREAD_WINKLE_BITS;
37310d91611SNicholas Piggin WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0);
37410d91611SNicholas Piggin }
37510d91611SNicholas Piggin
37610d91611SNicholas Piggin atomic_unlock_thread_idle();
37710d91611SNicholas Piggin }
37810d91611SNicholas Piggin
379e9cef018SMichael Ellerman if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
380e9cef018SMichael Ellerman sprs.amr = mfspr(SPRN_AMR);
381e9cef018SMichael Ellerman sprs.iamr = mfspr(SPRN_IAMR);
382e9cef018SMichael Ellerman sprs.uamor = mfspr(SPRN_UAMOR);
383e9cef018SMichael Ellerman }
384e9cef018SMichael Ellerman
38510d91611SNicholas Piggin local_paca->thread_idle_state = type;
38610d91611SNicholas Piggin srr1 = isa206_idle_insn_mayloss(type); /* go idle */
38710d91611SNicholas Piggin local_paca->thread_idle_state = PNV_THREAD_RUNNING;
38810d91611SNicholas Piggin
38910d91611SNicholas Piggin WARN_ON_ONCE(!srr1);
39010d91611SNicholas Piggin WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
39110d91611SNicholas Piggin
392e9cef018SMichael Ellerman if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
393e9cef018SMichael Ellerman if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) {
394e9cef018SMichael Ellerman /*
395e9cef018SMichael Ellerman * We don't need an isync after the mtsprs here because
396e9cef018SMichael Ellerman * the upcoming mtmsrd is execution synchronizing.
397e9cef018SMichael Ellerman */
398e9cef018SMichael Ellerman mtspr(SPRN_AMR, sprs.amr);
399e9cef018SMichael Ellerman mtspr(SPRN_IAMR, sprs.iamr);
40046f9caf1SNicholas Piggin mtspr(SPRN_AMOR, ~0);
401e9cef018SMichael Ellerman mtspr(SPRN_UAMOR, sprs.uamor);
402e9cef018SMichael Ellerman }
403e9cef018SMichael Ellerman }
404e9cef018SMichael Ellerman
40510d91611SNicholas Piggin if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI))
40610d91611SNicholas Piggin hmi_exception_realmode(NULL);
40710d91611SNicholas Piggin
40810d91611SNicholas Piggin if (likely((srr1 & SRR1_WAKESTATE) != SRR1_WS_HVLOSS)) {
40910d91611SNicholas Piggin if (unlikely(type != PNV_THREAD_NAP)) {
41010d91611SNicholas Piggin atomic_lock_thread_idle();
41110d91611SNicholas Piggin if (type == PNV_THREAD_WINKLE) {
41210d91611SNicholas Piggin WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0);
41310d91611SNicholas Piggin *state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT;
41410d91611SNicholas Piggin *state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT);
41510d91611SNicholas Piggin }
41610d91611SNicholas Piggin atomic_unlock_and_stop_thread_idle();
41710d91611SNicholas Piggin }
41810d91611SNicholas Piggin return srr1;
41910d91611SNicholas Piggin }
42010d91611SNicholas Piggin
42110d91611SNicholas Piggin /* HV state loss */
42210d91611SNicholas Piggin BUG_ON(type == PNV_THREAD_NAP);
42310d91611SNicholas Piggin
42410d91611SNicholas Piggin atomic_lock_thread_idle();
42510d91611SNicholas Piggin
42610d91611SNicholas Piggin full_winkle = false;
42710d91611SNicholas Piggin if (type == PNV_THREAD_WINKLE) {
42810d91611SNicholas Piggin WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0);
42910d91611SNicholas Piggin *state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT;
43010d91611SNicholas Piggin if (*state & (thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT)) {
43110d91611SNicholas Piggin *state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT);
43210d91611SNicholas Piggin full_winkle = true;
43310d91611SNicholas Piggin BUG_ON(!sprs_saved);
43410d91611SNicholas Piggin }
43510d91611SNicholas Piggin }
43610d91611SNicholas Piggin
43710d91611SNicholas Piggin WARN_ON(*state & thread);
43810d91611SNicholas Piggin
43910d91611SNicholas Piggin if ((*state & core_thread_mask) != 0)
44010d91611SNicholas Piggin goto core_woken;
44110d91611SNicholas Piggin
44210d91611SNicholas Piggin /* Per-core SPRs */
44310d91611SNicholas Piggin if (full_winkle) {
44410d91611SNicholas Piggin mtspr(SPRN_TSCR, sprs.tscr);
44510d91611SNicholas Piggin mtspr(SPRN_WORC, sprs.worc);
44610d91611SNicholas Piggin }
44710d91611SNicholas Piggin
44810d91611SNicholas Piggin if (power7_fastsleep_workaround_exit) {
44910d91611SNicholas Piggin rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP,
45010d91611SNicholas Piggin OPAL_CONFIG_IDLE_UNDO);
45110d91611SNicholas Piggin BUG_ON(rc);
45210d91611SNicholas Piggin }
45310d91611SNicholas Piggin
45410d91611SNicholas Piggin /* TB */
45510d91611SNicholas Piggin if (opal_resync_timebase() != OPAL_SUCCESS)
45610d91611SNicholas Piggin BUG();
45710d91611SNicholas Piggin
45810d91611SNicholas Piggin core_woken:
45910d91611SNicholas Piggin if (!full_winkle)
46010d91611SNicholas Piggin goto subcore_woken;
46110d91611SNicholas Piggin
46210d91611SNicholas Piggin if ((*state & local_paca->subcore_sibling_mask) != 0)
46310d91611SNicholas Piggin goto subcore_woken;
46410d91611SNicholas Piggin
46510d91611SNicholas Piggin /* Per-subcore SPRs */
46610d91611SNicholas Piggin mtspr(SPRN_SDR1, sprs.sdr1);
46710d91611SNicholas Piggin mtspr(SPRN_RPR, sprs.rpr);
46810d91611SNicholas Piggin
46910d91611SNicholas Piggin subcore_woken:
47010d91611SNicholas Piggin /*
47110d91611SNicholas Piggin * isync after restoring shared SPRs and before unlocking. Unlock
47210d91611SNicholas Piggin * only contains hwsync which does not necessarily do the right
47310d91611SNicholas Piggin * thing for SPRs.
47410d91611SNicholas Piggin */
47510d91611SNicholas Piggin isync();
47610d91611SNicholas Piggin atomic_unlock_and_stop_thread_idle();
47710d91611SNicholas Piggin
47810d91611SNicholas Piggin /* Fast sleep does not lose SPRs */
47910d91611SNicholas Piggin if (!full_winkle)
48010d91611SNicholas Piggin return srr1;
48110d91611SNicholas Piggin
48210d91611SNicholas Piggin /* Per-thread SPRs */
48310d91611SNicholas Piggin mtspr(SPRN_LPCR, sprs.lpcr);
48410d91611SNicholas Piggin if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
48510d91611SNicholas Piggin mtspr(SPRN_HFSCR, sprs.hfscr);
48610d91611SNicholas Piggin mtspr(SPRN_FSCR, sprs.fscr);
48710d91611SNicholas Piggin }
48810d91611SNicholas Piggin mtspr(SPRN_PURR, sprs.purr);
48910d91611SNicholas Piggin mtspr(SPRN_SPURR, sprs.spurr);
49010d91611SNicholas Piggin mtspr(SPRN_DSCR, sprs.dscr);
49110d91611SNicholas Piggin mtspr(SPRN_WORT, sprs.wort);
49210d91611SNicholas Piggin
49310d91611SNicholas Piggin mtspr(SPRN_SPRG3, local_paca->sprg_vdso);
49410d91611SNicholas Piggin
495387e220aSNicholas Piggin #ifdef CONFIG_PPC_64S_HASH_MMU
49610d91611SNicholas Piggin /*
49710d91611SNicholas Piggin * The SLB has to be restored here, but it sometimes still
49810d91611SNicholas Piggin * contains entries, so the __ variant must be used to prevent
49910d91611SNicholas Piggin * multi hits.
50010d91611SNicholas Piggin */
50110d91611SNicholas Piggin __slb_restore_bolted_realmode();
502387e220aSNicholas Piggin #endif
5032201f994SNicholas Piggin
5042201f994SNicholas Piggin return srr1;
5052201f994SNicholas Piggin }
5062201f994SNicholas Piggin
50710d91611SNicholas Piggin extern unsigned long idle_kvm_start_guest(unsigned long srr1);
50810d91611SNicholas Piggin
50910d91611SNicholas Piggin #ifdef CONFIG_HOTPLUG_CPU
power7_offline(void)51010d91611SNicholas Piggin static unsigned long power7_offline(void)
51110d91611SNicholas Piggin {
51210d91611SNicholas Piggin unsigned long srr1;
51310d91611SNicholas Piggin
51410d91611SNicholas Piggin mtmsr(MSR_IDLE);
51510d91611SNicholas Piggin
51610d91611SNicholas Piggin #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
51710d91611SNicholas Piggin /* Tell KVM we're entering idle. */
51810d91611SNicholas Piggin /******************************************************/
51910d91611SNicholas Piggin /* N O T E W E L L ! ! ! N O T E W E L L */
52010d91611SNicholas Piggin /* The following store to HSTATE_HWTHREAD_STATE(r13) */
52110d91611SNicholas Piggin /* MUST occur in real mode, i.e. with the MMU off, */
52210d91611SNicholas Piggin /* and the MMU must stay off until we clear this flag */
52310d91611SNicholas Piggin /* and test HSTATE_HWTHREAD_REQ(r13) in */
52410d91611SNicholas Piggin /* pnv_powersave_wakeup in this file. */
52510d91611SNicholas Piggin /* The reason is that another thread can switch the */
52610d91611SNicholas Piggin /* MMU to a guest context whenever this flag is set */
52710d91611SNicholas Piggin /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
52810d91611SNicholas Piggin /* that would potentially cause this thread to start */
52910d91611SNicholas Piggin /* executing instructions from guest memory in */
53010d91611SNicholas Piggin /* hypervisor mode, leading to a host crash or data */
53110d91611SNicholas Piggin /* corruption, or worse. */
53210d91611SNicholas Piggin /******************************************************/
53310d91611SNicholas Piggin local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE;
53410d91611SNicholas Piggin #endif
53510d91611SNicholas Piggin
53610d91611SNicholas Piggin __ppc64_runlatch_off();
53710d91611SNicholas Piggin srr1 = power7_idle_insn(power7_offline_type);
53810d91611SNicholas Piggin __ppc64_runlatch_on();
53910d91611SNicholas Piggin
54010d91611SNicholas Piggin #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
54110d91611SNicholas Piggin local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL;
54210d91611SNicholas Piggin /* Order setting hwthread_state vs. testing hwthread_req */
54310d91611SNicholas Piggin smp_mb();
54410d91611SNicholas Piggin if (local_paca->kvm_hstate.hwthread_req)
54510d91611SNicholas Piggin srr1 = idle_kvm_start_guest(srr1);
54610d91611SNicholas Piggin #endif
54710d91611SNicholas Piggin
54810d91611SNicholas Piggin mtmsr(MSR_KERNEL);
54910d91611SNicholas Piggin
55010d91611SNicholas Piggin return srr1;
55110d91611SNicholas Piggin }
55210d91611SNicholas Piggin #endif
55310d91611SNicholas Piggin
power7_idle_type(unsigned long type)5542201f994SNicholas Piggin void power7_idle_type(unsigned long type)
5552201f994SNicholas Piggin {
556771d4304SNicholas Piggin unsigned long srr1;
557771d4304SNicholas Piggin
55810d91611SNicholas Piggin if (!prep_irq_for_idle_irqsoff())
55910d91611SNicholas Piggin return;
56010d91611SNicholas Piggin
56110d91611SNicholas Piggin mtmsr(MSR_IDLE);
56210d91611SNicholas Piggin __ppc64_runlatch_off();
56310d91611SNicholas Piggin srr1 = power7_idle_insn(type);
56410d91611SNicholas Piggin __ppc64_runlatch_on();
56510d91611SNicholas Piggin mtmsr(MSR_KERNEL);
56610d91611SNicholas Piggin
56710d91611SNicholas Piggin fini_irq_for_idle_irqsoff();
568771d4304SNicholas Piggin irq_set_pending_from_srr1(srr1);
5692201f994SNicholas Piggin }
5702201f994SNicholas Piggin
power7_idle(void)571ffd2961bSNicholas Piggin static void power7_idle(void)
5722201f994SNicholas Piggin {
5732201f994SNicholas Piggin if (!powersave_nap)
5742201f994SNicholas Piggin return;
5752201f994SNicholas Piggin
5762201f994SNicholas Piggin power7_idle_type(PNV_THREAD_NAP);
5772201f994SNicholas Piggin }
5782201f994SNicholas Piggin
57910d91611SNicholas Piggin struct p9_sprs {
58010d91611SNicholas Piggin /* per core */
58110d91611SNicholas Piggin u64 ptcr;
58210d91611SNicholas Piggin u64 rpr;
58310d91611SNicholas Piggin u64 tscr;
58410d91611SNicholas Piggin u64 ldbar;
58510d91611SNicholas Piggin
58610d91611SNicholas Piggin /* per thread */
58710d91611SNicholas Piggin u64 lpcr;
58810d91611SNicholas Piggin u64 hfscr;
58910d91611SNicholas Piggin u64 fscr;
59010d91611SNicholas Piggin u64 pid;
59110d91611SNicholas Piggin u64 purr;
59210d91611SNicholas Piggin u64 spurr;
59310d91611SNicholas Piggin u64 dscr;
594250ad7a4SJordan Niethe u64 ciabr;
59510d91611SNicholas Piggin
59610d91611SNicholas Piggin u64 mmcra;
59710d91611SNicholas Piggin u32 mmcr0;
59810d91611SNicholas Piggin u32 mmcr1;
59910d91611SNicholas Piggin u64 mmcr2;
600e9cef018SMichael Ellerman
601e9cef018SMichael Ellerman /* per thread SPRs that get lost in shallow states */
602e9cef018SMichael Ellerman u64 amr;
603e9cef018SMichael Ellerman u64 iamr;
604e9cef018SMichael Ellerman u64 amor;
605e9cef018SMichael Ellerman u64 uamor;
60610d91611SNicholas Piggin };
60710d91611SNicholas Piggin
power9_idle_stop(unsigned long psscr)608fae5c9f3SNicholas Piggin static unsigned long power9_idle_stop(unsigned long psscr)
60910d91611SNicholas Piggin {
61010d91611SNicholas Piggin int cpu = raw_smp_processor_id();
61110d91611SNicholas Piggin int first = cpu_first_thread_sibling(cpu);
61210d91611SNicholas Piggin unsigned long *state = &paca_ptrs[first]->idle_state;
61310d91611SNicholas Piggin unsigned long core_thread_mask = (1UL << threads_per_core) - 1;
61410d91611SNicholas Piggin unsigned long srr1;
61510d91611SNicholas Piggin unsigned long pls;
61610d91611SNicholas Piggin unsigned long mmcr0 = 0;
6171cade527SAthira Rajeev unsigned long mmcra = 0;
61810d91611SNicholas Piggin struct p9_sprs sprs = {}; /* avoid false used-uninitialised */
61910d91611SNicholas Piggin bool sprs_saved = false;
62010d91611SNicholas Piggin
62110d91611SNicholas Piggin if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
62210d91611SNicholas Piggin /* EC=ESL=0 case */
62310d91611SNicholas Piggin
62410d91611SNicholas Piggin /*
62510d91611SNicholas Piggin * Wake synchronously. SRESET via xscom may still cause
62610d91611SNicholas Piggin * a 0x100 powersave wakeup with SRR1 reason!
62710d91611SNicholas Piggin */
62810d91611SNicholas Piggin srr1 = isa300_idle_stop_noloss(psscr); /* go idle */
62910d91611SNicholas Piggin if (likely(!srr1))
63010d91611SNicholas Piggin return 0;
63110d91611SNicholas Piggin
63210d91611SNicholas Piggin /*
63310d91611SNicholas Piggin * Registers not saved, can't recover!
63410d91611SNicholas Piggin * This would be a hardware bug
63510d91611SNicholas Piggin */
63610d91611SNicholas Piggin BUG_ON((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS);
63710d91611SNicholas Piggin
63810d91611SNicholas Piggin goto out;
63910d91611SNicholas Piggin }
64010d91611SNicholas Piggin
64110d91611SNicholas Piggin /* EC=ESL=1 case */
64210d91611SNicholas Piggin #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
64310d91611SNicholas Piggin if (cpu_has_feature(CPU_FTR_P9_TM_XER_SO_BUG)) {
64410d91611SNicholas Piggin local_paca->requested_psscr = psscr;
64510d91611SNicholas Piggin /* order setting requested_psscr vs testing dont_stop */
64610d91611SNicholas Piggin smp_mb();
64710d91611SNicholas Piggin if (atomic_read(&local_paca->dont_stop)) {
64810d91611SNicholas Piggin local_paca->requested_psscr = 0;
64910d91611SNicholas Piggin return 0;
65010d91611SNicholas Piggin }
65110d91611SNicholas Piggin }
65210d91611SNicholas Piggin #endif
65310d91611SNicholas Piggin
65410d91611SNicholas Piggin if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) {
65510d91611SNicholas Piggin /*
65610d91611SNicholas Piggin * POWER9 DD2 can incorrectly set PMAO when waking up
65710d91611SNicholas Piggin * after a state-loss idle. Saving and restoring MMCR0
65810d91611SNicholas Piggin * over idle is a workaround.
65910d91611SNicholas Piggin */
66010d91611SNicholas Piggin mmcr0 = mfspr(SPRN_MMCR0);
66110d91611SNicholas Piggin }
6621cade527SAthira Rajeev
663dcbbfa6bSPratik Rajesh Sampat if ((psscr & PSSCR_RL_MASK) >= deep_spr_loss_state) {
66410d91611SNicholas Piggin sprs.lpcr = mfspr(SPRN_LPCR);
66510d91611SNicholas Piggin sprs.hfscr = mfspr(SPRN_HFSCR);
66610d91611SNicholas Piggin sprs.fscr = mfspr(SPRN_FSCR);
66710d91611SNicholas Piggin sprs.pid = mfspr(SPRN_PID);
66810d91611SNicholas Piggin sprs.purr = mfspr(SPRN_PURR);
66910d91611SNicholas Piggin sprs.spurr = mfspr(SPRN_SPURR);
67010d91611SNicholas Piggin sprs.dscr = mfspr(SPRN_DSCR);
671250ad7a4SJordan Niethe sprs.ciabr = mfspr(SPRN_CIABR);
67210d91611SNicholas Piggin
67310d91611SNicholas Piggin sprs.mmcra = mfspr(SPRN_MMCRA);
67410d91611SNicholas Piggin sprs.mmcr0 = mfspr(SPRN_MMCR0);
67510d91611SNicholas Piggin sprs.mmcr1 = mfspr(SPRN_MMCR1);
67610d91611SNicholas Piggin sprs.mmcr2 = mfspr(SPRN_MMCR2);
67710d91611SNicholas Piggin
67810d91611SNicholas Piggin sprs.ptcr = mfspr(SPRN_PTCR);
67910d91611SNicholas Piggin sprs.rpr = mfspr(SPRN_RPR);
68010d91611SNicholas Piggin sprs.tscr = mfspr(SPRN_TSCR);
681512a5a64SClaudio Carvalho if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR))
68210d91611SNicholas Piggin sprs.ldbar = mfspr(SPRN_LDBAR);
68310d91611SNicholas Piggin
68410d91611SNicholas Piggin sprs_saved = true;
68510d91611SNicholas Piggin
68610d91611SNicholas Piggin atomic_start_thread_idle();
68710d91611SNicholas Piggin }
68810d91611SNicholas Piggin
689e9cef018SMichael Ellerman sprs.amr = mfspr(SPRN_AMR);
690e9cef018SMichael Ellerman sprs.iamr = mfspr(SPRN_IAMR);
691e9cef018SMichael Ellerman sprs.uamor = mfspr(SPRN_UAMOR);
692e9cef018SMichael Ellerman
69310d91611SNicholas Piggin srr1 = isa300_idle_stop_mayloss(psscr); /* go idle */
69410d91611SNicholas Piggin
69510d91611SNicholas Piggin #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
69610d91611SNicholas Piggin local_paca->requested_psscr = 0;
69710d91611SNicholas Piggin #endif
69810d91611SNicholas Piggin
69910d91611SNicholas Piggin psscr = mfspr(SPRN_PSSCR);
70010d91611SNicholas Piggin
70110d91611SNicholas Piggin WARN_ON_ONCE(!srr1);
70210d91611SNicholas Piggin WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
70310d91611SNicholas Piggin
70410d91611SNicholas Piggin if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) {
70510d91611SNicholas Piggin /*
706e9cef018SMichael Ellerman * We don't need an isync after the mtsprs here because the
707e9cef018SMichael Ellerman * upcoming mtmsrd is execution synchronizing.
708e9cef018SMichael Ellerman */
709e9cef018SMichael Ellerman mtspr(SPRN_AMR, sprs.amr);
710e9cef018SMichael Ellerman mtspr(SPRN_IAMR, sprs.iamr);
71146f9caf1SNicholas Piggin mtspr(SPRN_AMOR, ~0);
712e9cef018SMichael Ellerman mtspr(SPRN_UAMOR, sprs.uamor);
713e9cef018SMichael Ellerman
714e9cef018SMichael Ellerman /*
71510d91611SNicholas Piggin * Workaround for POWER9 DD2.0, if we lost resources, the ERAT
71610d91611SNicholas Piggin * might have been corrupted and needs flushing. We also need
71710d91611SNicholas Piggin * to reload MMCR0 (see mmcr0 comment above).
71810d91611SNicholas Piggin */
71910d91611SNicholas Piggin if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) {
720fe7946ceSNicholas Piggin asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT);
72110d91611SNicholas Piggin mtspr(SPRN_MMCR0, mmcr0);
72210d91611SNicholas Piggin }
72310d91611SNicholas Piggin
72410d91611SNicholas Piggin /*
72510d91611SNicholas Piggin * DD2.2 and earlier need to set then clear bit 60 in MMCRA
72610d91611SNicholas Piggin * to ensure the PMU starts running.
72710d91611SNicholas Piggin */
72810d91611SNicholas Piggin mmcra = mfspr(SPRN_MMCRA);
72910d91611SNicholas Piggin mmcra |= PPC_BIT(60);
73010d91611SNicholas Piggin mtspr(SPRN_MMCRA, mmcra);
73110d91611SNicholas Piggin mmcra &= ~PPC_BIT(60);
73210d91611SNicholas Piggin mtspr(SPRN_MMCRA, mmcra);
73310d91611SNicholas Piggin }
73410d91611SNicholas Piggin
73510d91611SNicholas Piggin if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI))
73610d91611SNicholas Piggin hmi_exception_realmode(NULL);
73710d91611SNicholas Piggin
73810d91611SNicholas Piggin /*
73910d91611SNicholas Piggin * On POWER9, SRR1 bits do not match exactly as expected.
74010d91611SNicholas Piggin * SRR1_WS_GPRLOSS (10b) can also result in SPR loss, so
74110d91611SNicholas Piggin * just always test PSSCR for SPR/TB state loss.
74210d91611SNicholas Piggin */
74310d91611SNicholas Piggin pls = (psscr & PSSCR_PLS) >> PSSCR_PLS_SHIFT;
744dcbbfa6bSPratik Rajesh Sampat if (likely(pls < deep_spr_loss_state)) {
74510d91611SNicholas Piggin if (sprs_saved)
74610d91611SNicholas Piggin atomic_stop_thread_idle();
74710d91611SNicholas Piggin goto out;
74810d91611SNicholas Piggin }
74910d91611SNicholas Piggin
75010d91611SNicholas Piggin /* HV state loss */
75110d91611SNicholas Piggin BUG_ON(!sprs_saved);
75210d91611SNicholas Piggin
75310d91611SNicholas Piggin atomic_lock_thread_idle();
75410d91611SNicholas Piggin
75510d91611SNicholas Piggin if ((*state & core_thread_mask) != 0)
75610d91611SNicholas Piggin goto core_woken;
75710d91611SNicholas Piggin
75810d91611SNicholas Piggin /* Per-core SPRs */
75910d91611SNicholas Piggin mtspr(SPRN_PTCR, sprs.ptcr);
76010d91611SNicholas Piggin mtspr(SPRN_RPR, sprs.rpr);
76110d91611SNicholas Piggin mtspr(SPRN_TSCR, sprs.tscr);
76210d91611SNicholas Piggin
76310d91611SNicholas Piggin if (pls >= pnv_first_tb_loss_level) {
76410d91611SNicholas Piggin /* TB loss */
76510d91611SNicholas Piggin if (opal_resync_timebase() != OPAL_SUCCESS)
76610d91611SNicholas Piggin BUG();
76710d91611SNicholas Piggin }
76810d91611SNicholas Piggin
76910d91611SNicholas Piggin /*
77010d91611SNicholas Piggin * isync after restoring shared SPRs and before unlocking. Unlock
77110d91611SNicholas Piggin * only contains hwsync which does not necessarily do the right
77210d91611SNicholas Piggin * thing for SPRs.
77310d91611SNicholas Piggin */
77410d91611SNicholas Piggin isync();
77510d91611SNicholas Piggin
77610d91611SNicholas Piggin core_woken:
77710d91611SNicholas Piggin atomic_unlock_and_stop_thread_idle();
77810d91611SNicholas Piggin
77910d91611SNicholas Piggin /* Per-thread SPRs */
78010d91611SNicholas Piggin mtspr(SPRN_LPCR, sprs.lpcr);
78110d91611SNicholas Piggin mtspr(SPRN_HFSCR, sprs.hfscr);
78210d91611SNicholas Piggin mtspr(SPRN_FSCR, sprs.fscr);
78310d91611SNicholas Piggin mtspr(SPRN_PID, sprs.pid);
78410d91611SNicholas Piggin mtspr(SPRN_PURR, sprs.purr);
78510d91611SNicholas Piggin mtspr(SPRN_SPURR, sprs.spurr);
78610d91611SNicholas Piggin mtspr(SPRN_DSCR, sprs.dscr);
787250ad7a4SJordan Niethe mtspr(SPRN_CIABR, sprs.ciabr);
78810d91611SNicholas Piggin
78910d91611SNicholas Piggin mtspr(SPRN_MMCRA, sprs.mmcra);
79010d91611SNicholas Piggin mtspr(SPRN_MMCR0, sprs.mmcr0);
79110d91611SNicholas Piggin mtspr(SPRN_MMCR1, sprs.mmcr1);
79210d91611SNicholas Piggin mtspr(SPRN_MMCR2, sprs.mmcr2);
793512a5a64SClaudio Carvalho if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR))
794f5a9e488SAthira Rajeev mtspr(SPRN_LDBAR, sprs.ldbar);
79510d91611SNicholas Piggin
79610d91611SNicholas Piggin mtspr(SPRN_SPRG3, local_paca->sprg_vdso);
79710d91611SNicholas Piggin
79810d91611SNicholas Piggin if (!radix_enabled())
79910d91611SNicholas Piggin __slb_restore_bolted_realmode();
80010d91611SNicholas Piggin
80110d91611SNicholas Piggin out:
80210d91611SNicholas Piggin mtmsr(MSR_KERNEL);
80310d91611SNicholas Piggin
80410d91611SNicholas Piggin return srr1;
80510d91611SNicholas Piggin }
80610d91611SNicholas Piggin
8077672691aSPaul Mackerras #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
8087672691aSPaul Mackerras /*
8097672691aSPaul Mackerras * This is used in working around bugs in thread reconfiguration
8107672691aSPaul Mackerras * on POWER9 (at least up to Nimbus DD2.2) relating to transactional
8117672691aSPaul Mackerras * memory and the way that XER[SO] is checkpointed.
8127672691aSPaul Mackerras * This function forces the core into SMT4 in order by asking
8137672691aSPaul Mackerras * all other threads not to stop, and sending a message to any
8147672691aSPaul Mackerras * that are in a stop state.
8157672691aSPaul Mackerras * Must be called with preemption disabled.
8167672691aSPaul Mackerras */
pnv_power9_force_smt4_catch(void)8177672691aSPaul Mackerras void pnv_power9_force_smt4_catch(void)
8187672691aSPaul Mackerras {
8197672691aSPaul Mackerras int cpu, cpu0, thr;
8207672691aSPaul Mackerras int awake_threads = 1; /* this thread is awake */
8217672691aSPaul Mackerras int poke_threads = 0;
8227672691aSPaul Mackerras int need_awake = threads_per_core;
8237672691aSPaul Mackerras
8247672691aSPaul Mackerras cpu = smp_processor_id();
8257672691aSPaul Mackerras cpu0 = cpu & ~(threads_per_core - 1);
8267672691aSPaul Mackerras for (thr = 0; thr < threads_per_core; ++thr) {
8277672691aSPaul Mackerras if (cpu != cpu0 + thr)
828f437c517SMichael Ellerman atomic_inc(&paca_ptrs[cpu0+thr]->dont_stop);
8297672691aSPaul Mackerras }
8307672691aSPaul Mackerras /* order setting dont_stop vs testing requested_psscr */
83110d91611SNicholas Piggin smp_mb();
8327672691aSPaul Mackerras for (thr = 0; thr < threads_per_core; ++thr) {
833f437c517SMichael Ellerman if (!paca_ptrs[cpu0+thr]->requested_psscr)
8347672691aSPaul Mackerras ++awake_threads;
8357672691aSPaul Mackerras else
8367672691aSPaul Mackerras poke_threads |= (1 << thr);
8377672691aSPaul Mackerras }
8387672691aSPaul Mackerras
8397672691aSPaul Mackerras /* If at least 3 threads are awake, the core is in SMT4 already */
8407672691aSPaul Mackerras if (awake_threads < need_awake) {
8417672691aSPaul Mackerras /* We have to wake some threads; we'll use msgsnd */
8427672691aSPaul Mackerras for (thr = 0; thr < threads_per_core; ++thr) {
8437672691aSPaul Mackerras if (poke_threads & (1 << thr)) {
8447672691aSPaul Mackerras ppc_msgsnd_sync();
8457672691aSPaul Mackerras ppc_msgsnd(PPC_DBELL_MSGTYPE, 0,
846f437c517SMichael Ellerman paca_ptrs[cpu0+thr]->hw_cpu_id);
8477672691aSPaul Mackerras }
8487672691aSPaul Mackerras }
8497672691aSPaul Mackerras /* now spin until at least 3 threads are awake */
8507672691aSPaul Mackerras do {
8517672691aSPaul Mackerras for (thr = 0; thr < threads_per_core; ++thr) {
8527672691aSPaul Mackerras if ((poke_threads & (1 << thr)) &&
853f437c517SMichael Ellerman !paca_ptrs[cpu0+thr]->requested_psscr) {
8547672691aSPaul Mackerras ++awake_threads;
8557672691aSPaul Mackerras poke_threads &= ~(1 << thr);
8567672691aSPaul Mackerras }
8577672691aSPaul Mackerras }
8587672691aSPaul Mackerras } while (awake_threads < need_awake);
8597672691aSPaul Mackerras }
8607672691aSPaul Mackerras }
8617672691aSPaul Mackerras EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_catch);
8627672691aSPaul Mackerras
pnv_power9_force_smt4_release(void)8637672691aSPaul Mackerras void pnv_power9_force_smt4_release(void)
8647672691aSPaul Mackerras {
8657672691aSPaul Mackerras int cpu, cpu0, thr;
8667672691aSPaul Mackerras
8677672691aSPaul Mackerras cpu = smp_processor_id();
8687672691aSPaul Mackerras cpu0 = cpu & ~(threads_per_core - 1);
8697672691aSPaul Mackerras
8707672691aSPaul Mackerras /* clear all the dont_stop flags */
8717672691aSPaul Mackerras for (thr = 0; thr < threads_per_core; ++thr) {
8727672691aSPaul Mackerras if (cpu != cpu0 + thr)
873f437c517SMichael Ellerman atomic_dec(&paca_ptrs[cpu0+thr]->dont_stop);
8747672691aSPaul Mackerras }
8757672691aSPaul Mackerras }
8767672691aSPaul Mackerras EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release);
8777672691aSPaul Mackerras #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
8787672691aSPaul Mackerras
879ffd2961bSNicholas Piggin struct p10_sprs {
880ffd2961bSNicholas Piggin /*
881ffd2961bSNicholas Piggin * SPRs that get lost in shallow states:
882ffd2961bSNicholas Piggin *
883ffd2961bSNicholas Piggin * P10 loses CR, LR, CTR, FPSCR, VSCR, XER, TAR, SPRG2, and HSPRG1
884ffd2961bSNicholas Piggin * isa300 idle routines restore CR, LR.
885ffd2961bSNicholas Piggin * CTR is volatile
886ffd2961bSNicholas Piggin * idle thread doesn't use FP or VEC
887ffd2961bSNicholas Piggin * kernel doesn't use TAR
888ffd2961bSNicholas Piggin * HSPRG1 is only live in HV interrupt entry
889ffd2961bSNicholas Piggin * SPRG2 is only live in KVM guests, KVM handles it.
890ffd2961bSNicholas Piggin */
891ffd2961bSNicholas Piggin };
892ffd2961bSNicholas Piggin
power10_idle_stop(unsigned long psscr)893fae5c9f3SNicholas Piggin static unsigned long power10_idle_stop(unsigned long psscr)
894ffd2961bSNicholas Piggin {
895ffd2961bSNicholas Piggin int cpu = raw_smp_processor_id();
896ffd2961bSNicholas Piggin int first = cpu_first_thread_sibling(cpu);
897ffd2961bSNicholas Piggin unsigned long *state = &paca_ptrs[first]->idle_state;
898ffd2961bSNicholas Piggin unsigned long core_thread_mask = (1UL << threads_per_core) - 1;
899ffd2961bSNicholas Piggin unsigned long srr1;
900ffd2961bSNicholas Piggin unsigned long pls;
901ffd2961bSNicholas Piggin // struct p10_sprs sprs = {}; /* avoid false used-uninitialised */
902ffd2961bSNicholas Piggin bool sprs_saved = false;
903ffd2961bSNicholas Piggin
904ffd2961bSNicholas Piggin if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
905ffd2961bSNicholas Piggin /* EC=ESL=0 case */
906ffd2961bSNicholas Piggin
907ffd2961bSNicholas Piggin /*
908ffd2961bSNicholas Piggin * Wake synchronously. SRESET via xscom may still cause
909ffd2961bSNicholas Piggin * a 0x100 powersave wakeup with SRR1 reason!
910ffd2961bSNicholas Piggin */
911ffd2961bSNicholas Piggin srr1 = isa300_idle_stop_noloss(psscr); /* go idle */
912ffd2961bSNicholas Piggin if (likely(!srr1))
913ffd2961bSNicholas Piggin return 0;
914ffd2961bSNicholas Piggin
915ffd2961bSNicholas Piggin /*
916ffd2961bSNicholas Piggin * Registers not saved, can't recover!
917ffd2961bSNicholas Piggin * This would be a hardware bug
918ffd2961bSNicholas Piggin */
919ffd2961bSNicholas Piggin BUG_ON((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS);
920ffd2961bSNicholas Piggin
921ffd2961bSNicholas Piggin goto out;
922ffd2961bSNicholas Piggin }
923ffd2961bSNicholas Piggin
924ffd2961bSNicholas Piggin /* EC=ESL=1 case */
925ffd2961bSNicholas Piggin if ((psscr & PSSCR_RL_MASK) >= deep_spr_loss_state) {
926ffd2961bSNicholas Piggin /* XXX: save SPRs for deep state loss here. */
927ffd2961bSNicholas Piggin
928ffd2961bSNicholas Piggin sprs_saved = true;
929ffd2961bSNicholas Piggin
930ffd2961bSNicholas Piggin atomic_start_thread_idle();
931ffd2961bSNicholas Piggin }
932ffd2961bSNicholas Piggin
933ffd2961bSNicholas Piggin srr1 = isa300_idle_stop_mayloss(psscr); /* go idle */
934ffd2961bSNicholas Piggin
935ffd2961bSNicholas Piggin psscr = mfspr(SPRN_PSSCR);
936ffd2961bSNicholas Piggin
937ffd2961bSNicholas Piggin WARN_ON_ONCE(!srr1);
938ffd2961bSNicholas Piggin WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
939ffd2961bSNicholas Piggin
940ffd2961bSNicholas Piggin if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI))
941ffd2961bSNicholas Piggin hmi_exception_realmode(NULL);
942ffd2961bSNicholas Piggin
943ffd2961bSNicholas Piggin /*
944ffd2961bSNicholas Piggin * On POWER10, SRR1 bits do not match exactly as expected.
945ffd2961bSNicholas Piggin * SRR1_WS_GPRLOSS (10b) can also result in SPR loss, so
946ffd2961bSNicholas Piggin * just always test PSSCR for SPR/TB state loss.
947ffd2961bSNicholas Piggin */
948ffd2961bSNicholas Piggin pls = (psscr & PSSCR_PLS) >> PSSCR_PLS_SHIFT;
949ffd2961bSNicholas Piggin if (likely(pls < deep_spr_loss_state)) {
950ffd2961bSNicholas Piggin if (sprs_saved)
951ffd2961bSNicholas Piggin atomic_stop_thread_idle();
952ffd2961bSNicholas Piggin goto out;
953ffd2961bSNicholas Piggin }
954ffd2961bSNicholas Piggin
955ffd2961bSNicholas Piggin /* HV state loss */
956ffd2961bSNicholas Piggin BUG_ON(!sprs_saved);
957ffd2961bSNicholas Piggin
958ffd2961bSNicholas Piggin atomic_lock_thread_idle();
959ffd2961bSNicholas Piggin
960ffd2961bSNicholas Piggin if ((*state & core_thread_mask) != 0)
961ffd2961bSNicholas Piggin goto core_woken;
962ffd2961bSNicholas Piggin
963ffd2961bSNicholas Piggin /* XXX: restore per-core SPRs here */
964ffd2961bSNicholas Piggin
965ffd2961bSNicholas Piggin if (pls >= pnv_first_tb_loss_level) {
966ffd2961bSNicholas Piggin /* TB loss */
967ffd2961bSNicholas Piggin if (opal_resync_timebase() != OPAL_SUCCESS)
968ffd2961bSNicholas Piggin BUG();
969ffd2961bSNicholas Piggin }
970ffd2961bSNicholas Piggin
971ffd2961bSNicholas Piggin /*
972ffd2961bSNicholas Piggin * isync after restoring shared SPRs and before unlocking. Unlock
973ffd2961bSNicholas Piggin * only contains hwsync which does not necessarily do the right
974ffd2961bSNicholas Piggin * thing for SPRs.
975ffd2961bSNicholas Piggin */
976ffd2961bSNicholas Piggin isync();
977ffd2961bSNicholas Piggin
978ffd2961bSNicholas Piggin core_woken:
979ffd2961bSNicholas Piggin atomic_unlock_and_stop_thread_idle();
980ffd2961bSNicholas Piggin
981ffd2961bSNicholas Piggin /* XXX: restore per-thread SPRs here */
982ffd2961bSNicholas Piggin
983ffd2961bSNicholas Piggin if (!radix_enabled())
984ffd2961bSNicholas Piggin __slb_restore_bolted_realmode();
985ffd2961bSNicholas Piggin
986ffd2961bSNicholas Piggin out:
987ffd2961bSNicholas Piggin mtmsr(MSR_KERNEL);
988ffd2961bSNicholas Piggin
989ffd2961bSNicholas Piggin return srr1;
990ffd2961bSNicholas Piggin }
991ffd2961bSNicholas Piggin
992ffd2961bSNicholas Piggin #ifdef CONFIG_HOTPLUG_CPU
arch300_offline_stop(unsigned long psscr)993ffd2961bSNicholas Piggin static unsigned long arch300_offline_stop(unsigned long psscr)
994ffd2961bSNicholas Piggin {
995ffd2961bSNicholas Piggin unsigned long srr1;
996ffd2961bSNicholas Piggin
997ffd2961bSNicholas Piggin if (cpu_has_feature(CPU_FTR_ARCH_31))
998fae5c9f3SNicholas Piggin srr1 = power10_idle_stop(psscr);
999ffd2961bSNicholas Piggin else
1000fae5c9f3SNicholas Piggin srr1 = power9_idle_stop(psscr);
1001ffd2961bSNicholas Piggin
1002ffd2961bSNicholas Piggin return srr1;
1003ffd2961bSNicholas Piggin }
1004ffd2961bSNicholas Piggin #endif
1005ffd2961bSNicholas Piggin
arch300_idle_type(unsigned long stop_psscr_val,unsigned long stop_psscr_mask)1006ffd2961bSNicholas Piggin void arch300_idle_type(unsigned long stop_psscr_val,
1007ffd2961bSNicholas Piggin unsigned long stop_psscr_mask)
1008ffd2961bSNicholas Piggin {
1009ffd2961bSNicholas Piggin unsigned long psscr;
1010ffd2961bSNicholas Piggin unsigned long srr1;
1011ffd2961bSNicholas Piggin
1012ffd2961bSNicholas Piggin if (!prep_irq_for_idle_irqsoff())
1013ffd2961bSNicholas Piggin return;
1014ffd2961bSNicholas Piggin
1015ffd2961bSNicholas Piggin psscr = mfspr(SPRN_PSSCR);
1016ffd2961bSNicholas Piggin psscr = (psscr & ~stop_psscr_mask) | stop_psscr_val;
1017ffd2961bSNicholas Piggin
1018ffd2961bSNicholas Piggin __ppc64_runlatch_off();
1019ffd2961bSNicholas Piggin if (cpu_has_feature(CPU_FTR_ARCH_31))
1020fae5c9f3SNicholas Piggin srr1 = power10_idle_stop(psscr);
1021ffd2961bSNicholas Piggin else
1022fae5c9f3SNicholas Piggin srr1 = power9_idle_stop(psscr);
1023ffd2961bSNicholas Piggin __ppc64_runlatch_on();
1024ffd2961bSNicholas Piggin
1025ffd2961bSNicholas Piggin fini_irq_for_idle_irqsoff();
1026ffd2961bSNicholas Piggin
1027ffd2961bSNicholas Piggin irq_set_pending_from_srr1(srr1);
1028ffd2961bSNicholas Piggin }
1029ffd2961bSNicholas Piggin
1030ffd2961bSNicholas Piggin /*
1031ffd2961bSNicholas Piggin * Used for ppc_md.power_save which needs a function with no parameters
1032ffd2961bSNicholas Piggin */
arch300_idle(void)1033ffd2961bSNicholas Piggin static void arch300_idle(void)
1034ffd2961bSNicholas Piggin {
1035ffd2961bSNicholas Piggin arch300_idle_type(pnv_default_stop_val, pnv_default_stop_mask);
1036ffd2961bSNicholas Piggin }
1037ffd2961bSNicholas Piggin
103867d20418SNicholas Piggin #ifdef CONFIG_HOTPLUG_CPU
103919f8a5b5SPaul Mackerras
pnv_program_cpu_hotplug_lpcr(unsigned int cpu,u64 lpcr_val)104019f8a5b5SPaul Mackerras void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
104124be85a2SGautham R. Shenoy {
104224be85a2SGautham R. Shenoy u64 pir = get_hard_smp_processor_id(cpu);
104324be85a2SGautham R. Shenoy
104424be85a2SGautham R. Shenoy mtspr(SPRN_LPCR, lpcr_val);
10455d298baaSGautham R. Shenoy
10465d298baaSGautham R. Shenoy /*
10475d298baaSGautham R. Shenoy * Program the LPCR via stop-api only if the deepest stop state
10485d298baaSGautham R. Shenoy * can lose hypervisor context.
10495d298baaSGautham R. Shenoy */
10505d298baaSGautham R. Shenoy if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)
105124be85a2SGautham R. Shenoy opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
105224be85a2SGautham R. Shenoy }
105324be85a2SGautham R. Shenoy
1054c0691f9dSShreyas B. Prabhu /*
1055a7cd88daSGautham R. Shenoy * pnv_cpu_offline: A function that puts the CPU into the deepest
1056a7cd88daSGautham R. Shenoy * available platform idle state on a CPU-Offline.
10572525db04SNicholas Piggin * interrupts hard disabled and no lazy irq pending.
1058a7cd88daSGautham R. Shenoy */
pnv_cpu_offline(unsigned int cpu)1059a7cd88daSGautham R. Shenoy unsigned long pnv_cpu_offline(unsigned int cpu)
1060a7cd88daSGautham R. Shenoy {
1061a7cd88daSGautham R. Shenoy unsigned long srr1;
1062a7cd88daSGautham R. Shenoy
106340d24343SNicholas Piggin __ppc64_runlatch_off();
10642525db04SNicholas Piggin
1065f3b3f284SGautham R. Shenoy if (cpu_has_feature(CPU_FTR_ARCH_300) && deepest_stop_found) {
10662525db04SNicholas Piggin unsigned long psscr;
10672525db04SNicholas Piggin
10682525db04SNicholas Piggin psscr = mfspr(SPRN_PSSCR);
10692525db04SNicholas Piggin psscr = (psscr & ~pnv_deepest_stop_psscr_mask) |
10702525db04SNicholas Piggin pnv_deepest_stop_psscr_val;
1071ffd2961bSNicholas Piggin srr1 = arch300_offline_stop(psscr);
107210d91611SNicholas Piggin } else if (cpu_has_feature(CPU_FTR_ARCH_206) && power7_offline_type) {
107310d91611SNicholas Piggin srr1 = power7_offline();
107490061231SGautham R. Shenoy } else {
107590061231SGautham R. Shenoy /* This is the fallback method. We emulate snooze */
107690061231SGautham R. Shenoy while (!generic_check_cpu_restart(cpu)) {
107790061231SGautham R. Shenoy HMT_low();
107890061231SGautham R. Shenoy HMT_very_low();
107990061231SGautham R. Shenoy }
108090061231SGautham R. Shenoy srr1 = 0;
108190061231SGautham R. Shenoy HMT_medium();
1082a7cd88daSGautham R. Shenoy }
1083a7cd88daSGautham R. Shenoy
108440d24343SNicholas Piggin __ppc64_runlatch_on();
10852525db04SNicholas Piggin
1086a7cd88daSGautham R. Shenoy return srr1;
1087a7cd88daSGautham R. Shenoy }
108867d20418SNicholas Piggin #endif
1089a7cd88daSGautham R. Shenoy
1090a7cd88daSGautham R. Shenoy /*
1091bcef83a0SShreyas B. Prabhu * Power ISA 3.0 idle initialization.
1092bcef83a0SShreyas B. Prabhu *
1093bcef83a0SShreyas B. Prabhu * POWER ISA 3.0 defines a new SPR Processor stop Status and Control
1094bcef83a0SShreyas B. Prabhu * Register (PSSCR) to control idle behavior.
1095bcef83a0SShreyas B. Prabhu *
1096bcef83a0SShreyas B. Prabhu * PSSCR layout:
1097bcef83a0SShreyas B. Prabhu * ----------------------------------------------------------
1098bcef83a0SShreyas B. Prabhu * | PLS | /// | SD | ESL | EC | PSLL | /// | TR | MTL | RL |
1099bcef83a0SShreyas B. Prabhu * ----------------------------------------------------------
1100bcef83a0SShreyas B. Prabhu * 0 4 41 42 43 44 48 54 56 60
1101bcef83a0SShreyas B. Prabhu *
1102bcef83a0SShreyas B. Prabhu * PSSCR key fields:
1103bcef83a0SShreyas B. Prabhu * Bits 0:3 - Power-Saving Level Status (PLS). This field indicates the
1104bcef83a0SShreyas B. Prabhu * lowest power-saving state the thread entered since stop instruction was
1105bcef83a0SShreyas B. Prabhu * last executed.
1106bcef83a0SShreyas B. Prabhu *
1107bcef83a0SShreyas B. Prabhu * Bit 41 - Status Disable(SD)
1108bcef83a0SShreyas B. Prabhu * 0 - Shows PLS entries
1109bcef83a0SShreyas B. Prabhu * 1 - PLS entries are all 0
1110bcef83a0SShreyas B. Prabhu *
1111bcef83a0SShreyas B. Prabhu * Bit 42 - Enable State Loss
1112bcef83a0SShreyas B. Prabhu * 0 - No state is lost irrespective of other fields
1113bcef83a0SShreyas B. Prabhu * 1 - Allows state loss
1114bcef83a0SShreyas B. Prabhu *
1115bcef83a0SShreyas B. Prabhu * Bit 43 - Exit Criterion
1116bcef83a0SShreyas B. Prabhu * 0 - Exit from power-save mode on any interrupt
1117bcef83a0SShreyas B. Prabhu * 1 - Exit from power-save mode controlled by LPCR's PECE bits
1118bcef83a0SShreyas B. Prabhu *
1119bcef83a0SShreyas B. Prabhu * Bits 44:47 - Power-Saving Level Limit
1120bcef83a0SShreyas B. Prabhu * This limits the power-saving level that can be entered into.
1121bcef83a0SShreyas B. Prabhu *
1122bcef83a0SShreyas B. Prabhu * Bits 60:63 - Requested Level
1123bcef83a0SShreyas B. Prabhu * Used to specify which power-saving level must be entered on executing
1124bcef83a0SShreyas B. Prabhu * stop instruction
112509206b60SGautham R. Shenoy */
112609206b60SGautham R. Shenoy
validate_psscr_val_mask(u64 * psscr_val,u64 * psscr_mask,u32 flags)1127e5913db1SNick Child int __init validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags)
112809206b60SGautham R. Shenoy {
112909206b60SGautham R. Shenoy int err = 0;
113009206b60SGautham R. Shenoy
113109206b60SGautham R. Shenoy /*
113209206b60SGautham R. Shenoy * psscr_mask == 0xf indicates an older firmware.
113309206b60SGautham R. Shenoy * Set remaining fields of psscr to the default values.
113409206b60SGautham R. Shenoy * See NOTE above definition of PSSCR_HV_DEFAULT_VAL
113509206b60SGautham R. Shenoy */
113609206b60SGautham R. Shenoy if (*psscr_mask == 0xf) {
113709206b60SGautham R. Shenoy *psscr_val = *psscr_val | PSSCR_HV_DEFAULT_VAL;
113809206b60SGautham R. Shenoy *psscr_mask = PSSCR_HV_DEFAULT_MASK;
113909206b60SGautham R. Shenoy return err;
114009206b60SGautham R. Shenoy }
114109206b60SGautham R. Shenoy
114209206b60SGautham R. Shenoy /*
114309206b60SGautham R. Shenoy * New firmware is expected to set the psscr_val bits correctly.
114409206b60SGautham R. Shenoy * Validate that the following invariants are correctly maintained by
114509206b60SGautham R. Shenoy * the new firmware.
114609206b60SGautham R. Shenoy * - ESL bit value matches the EC bit value.
114709206b60SGautham R. Shenoy * - ESL bit is set for all the deep stop states.
114809206b60SGautham R. Shenoy */
114909206b60SGautham R. Shenoy if (GET_PSSCR_ESL(*psscr_val) != GET_PSSCR_EC(*psscr_val)) {
115009206b60SGautham R. Shenoy err = ERR_EC_ESL_MISMATCH;
115109206b60SGautham R. Shenoy } else if ((flags & OPAL_PM_LOSE_FULL_CONTEXT) &&
115209206b60SGautham R. Shenoy GET_PSSCR_ESL(*psscr_val) == 0) {
115309206b60SGautham R. Shenoy err = ERR_DEEP_STATE_ESL_MISMATCH;
115409206b60SGautham R. Shenoy }
115509206b60SGautham R. Shenoy
115609206b60SGautham R. Shenoy return err;
115709206b60SGautham R. Shenoy }
115809206b60SGautham R. Shenoy
115909206b60SGautham R. Shenoy /*
116009206b60SGautham R. Shenoy * pnv_arch300_idle_init: Initializes the default idle state, first
116109206b60SGautham R. Shenoy * deep idle state and deepest idle state on
116209206b60SGautham R. Shenoy * ISA 3.0 CPUs.
1163bcef83a0SShreyas B. Prabhu *
1164bcef83a0SShreyas B. Prabhu * @np: /ibm,opal/power-mgt device node
1165bcef83a0SShreyas B. Prabhu * @flags: cpu-idle-state-flags array
1166bcef83a0SShreyas B. Prabhu * @dt_idle_states: Number of idle state entries
1167bcef83a0SShreyas B. Prabhu * Returns 0 on success
1168bcef83a0SShreyas B. Prabhu */
pnv_arch300_idle_init(void)1169ffd2961bSNicholas Piggin static void __init pnv_arch300_idle_init(void)
1170bcef83a0SShreyas B. Prabhu {
117109206b60SGautham R. Shenoy u64 max_residency_ns = 0;
11729c7b185aSAkshay Adiga int i;
1173bcef83a0SShreyas B. Prabhu
1174ffd2961bSNicholas Piggin /* stop is not really architected, we only have p9,p10 drivers */
1175ffd2961bSNicholas Piggin if (!pvr_version_is(PVR_POWER10) && !pvr_version_is(PVR_POWER9))
1176ffd2961bSNicholas Piggin return;
1177ffd2961bSNicholas Piggin
1178bcef83a0SShreyas B. Prabhu /*
117909206b60SGautham R. Shenoy * pnv_deepest_stop_{val,mask} should be set to values corresponding to
118009206b60SGautham R. Shenoy * the deepest stop state.
118109206b60SGautham R. Shenoy *
118209206b60SGautham R. Shenoy * pnv_default_stop_{val,mask} should be set to values corresponding to
118310d91611SNicholas Piggin * the deepest loss-less (OPAL_PM_STOP_INST_FAST) stop state.
1184bcef83a0SShreyas B. Prabhu */
118510d91611SNicholas Piggin pnv_first_tb_loss_level = MAX_STOP_STATE + 1;
1186dcbbfa6bSPratik Rajesh Sampat deep_spr_loss_state = MAX_STOP_STATE + 1;
11879c7b185aSAkshay Adiga for (i = 0; i < nr_pnv_idle_states; i++) {
118809206b60SGautham R. Shenoy int err;
11899c7b185aSAkshay Adiga struct pnv_idle_states_t *state = &pnv_idle_states[i];
11909c7b185aSAkshay Adiga u64 psscr_rl = state->psscr_val & PSSCR_RL_MASK;
1191bcef83a0SShreyas B. Prabhu
1192ffd2961bSNicholas Piggin /* No deep loss driver implemented for POWER10 yet */
1193ffd2961bSNicholas Piggin if (pvr_version_is(PVR_POWER10) &&
1194ffd2961bSNicholas Piggin state->flags & (OPAL_PM_TIMEBASE_STOP|OPAL_PM_LOSE_FULL_CONTEXT))
1195ffd2961bSNicholas Piggin continue;
1196ffd2961bSNicholas Piggin
119710d91611SNicholas Piggin if ((state->flags & OPAL_PM_TIMEBASE_STOP) &&
119810d91611SNicholas Piggin (pnv_first_tb_loss_level > psscr_rl))
119910d91611SNicholas Piggin pnv_first_tb_loss_level = psscr_rl;
120010d91611SNicholas Piggin
12019c7b185aSAkshay Adiga if ((state->flags & OPAL_PM_LOSE_FULL_CONTEXT) &&
1202dcbbfa6bSPratik Rajesh Sampat (deep_spr_loss_state > psscr_rl))
1203dcbbfa6bSPratik Rajesh Sampat deep_spr_loss_state = psscr_rl;
120410d91611SNicholas Piggin
120510d91611SNicholas Piggin /*
120610d91611SNicholas Piggin * The idle code does not deal with TB loss occurring
120710d91611SNicholas Piggin * in a shallower state than SPR loss, so force it to
120810d91611SNicholas Piggin * behave like SPRs are lost if TB is lost. POWER9 would
12091fd02f66SJulia Lawall * never encounter this, but a POWER8 core would if it
121010d91611SNicholas Piggin * implemented the stop instruction. So this is for forward
121110d91611SNicholas Piggin * compatibility.
121210d91611SNicholas Piggin */
121310d91611SNicholas Piggin if ((state->flags & OPAL_PM_TIMEBASE_STOP) &&
1214dcbbfa6bSPratik Rajesh Sampat (deep_spr_loss_state > psscr_rl))
1215dcbbfa6bSPratik Rajesh Sampat deep_spr_loss_state = psscr_rl;
1216c0691f9dSShreyas B. Prabhu
12179c7b185aSAkshay Adiga err = validate_psscr_val_mask(&state->psscr_val,
12189c7b185aSAkshay Adiga &state->psscr_mask,
12199c7b185aSAkshay Adiga state->flags);
122009206b60SGautham R. Shenoy if (err) {
12219c7b185aSAkshay Adiga report_invalid_psscr_val(state->psscr_val, err);
122209206b60SGautham R. Shenoy continue;
122309206b60SGautham R. Shenoy }
122409206b60SGautham R. Shenoy
12253127692dSNicholas Piggin state->valid = true;
12263127692dSNicholas Piggin
12279c7b185aSAkshay Adiga if (max_residency_ns < state->residency_ns) {
12289c7b185aSAkshay Adiga max_residency_ns = state->residency_ns;
12299c7b185aSAkshay Adiga pnv_deepest_stop_psscr_val = state->psscr_val;
12309c7b185aSAkshay Adiga pnv_deepest_stop_psscr_mask = state->psscr_mask;
12319c7b185aSAkshay Adiga pnv_deepest_stop_flag = state->flags;
123209206b60SGautham R. Shenoy deepest_stop_found = true;
123309206b60SGautham R. Shenoy }
123409206b60SGautham R. Shenoy
123509206b60SGautham R. Shenoy if (!default_stop_found &&
12369c7b185aSAkshay Adiga (state->flags & OPAL_PM_STOP_INST_FAST)) {
12379c7b185aSAkshay Adiga pnv_default_stop_val = state->psscr_val;
12389c7b185aSAkshay Adiga pnv_default_stop_mask = state->psscr_mask;
123909206b60SGautham R. Shenoy default_stop_found = true;
124010d91611SNicholas Piggin WARN_ON(state->flags & OPAL_PM_LOSE_FULL_CONTEXT);
124109206b60SGautham R. Shenoy }
124209206b60SGautham R. Shenoy }
124309206b60SGautham R. Shenoy
1244f3b3f284SGautham R. Shenoy if (unlikely(!default_stop_found)) {
1245f3b3f284SGautham R. Shenoy pr_warn("cpuidle-powernv: No suitable default stop state found. Disabling platform idle.\n");
1246f3b3f284SGautham R. Shenoy } else {
1247ffd2961bSNicholas Piggin ppc_md.power_save = arch300_idle;
1248f3b3f284SGautham R. Shenoy pr_info("cpuidle-powernv: Default stop: psscr = 0x%016llx,mask=0x%016llx\n",
124909206b60SGautham R. Shenoy pnv_default_stop_val, pnv_default_stop_mask);
125009206b60SGautham R. Shenoy }
125109206b60SGautham R. Shenoy
1252f3b3f284SGautham R. Shenoy if (unlikely(!deepest_stop_found)) {
1253f3b3f284SGautham R. Shenoy pr_warn("cpuidle-powernv: No suitable stop state for CPU-Hotplug. Offlined CPUs will busy wait");
1254f3b3f284SGautham R. Shenoy } else {
1255f3b3f284SGautham R. Shenoy pr_info("cpuidle-powernv: Deepest stop: psscr = 0x%016llx,mask=0x%016llx\n",
125609206b60SGautham R. Shenoy pnv_deepest_stop_psscr_val,
125709206b60SGautham R. Shenoy pnv_deepest_stop_psscr_mask);
1258bcef83a0SShreyas B. Prabhu }
1259bcef83a0SShreyas B. Prabhu
126087997471SShaokun Zhang pr_info("cpuidle-powernv: First stop level that may lose SPRs = 0x%llx\n",
1261dcbbfa6bSPratik Rajesh Sampat deep_spr_loss_state);
12629c7b185aSAkshay Adiga
126387997471SShaokun Zhang pr_info("cpuidle-powernv: First stop level that may lose timebase = 0x%llx\n",
126410d91611SNicholas Piggin pnv_first_tb_loss_level);
126510d91611SNicholas Piggin }
126610d91611SNicholas Piggin
pnv_disable_deep_states(void)126710d91611SNicholas Piggin static void __init pnv_disable_deep_states(void)
126810d91611SNicholas Piggin {
126910d91611SNicholas Piggin /*
127010d91611SNicholas Piggin * The stop-api is unable to restore hypervisor
127110d91611SNicholas Piggin * resources on wakeup from platform idle states which
127210d91611SNicholas Piggin * lose full context. So disable such states.
127310d91611SNicholas Piggin */
127410d91611SNicholas Piggin supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT;
127510d91611SNicholas Piggin pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n");
127610d91611SNicholas Piggin pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n");
127710d91611SNicholas Piggin
127810d91611SNicholas Piggin if (cpu_has_feature(CPU_FTR_ARCH_300) &&
127910d91611SNicholas Piggin (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) {
128010d91611SNicholas Piggin /*
128110d91611SNicholas Piggin * Use the default stop state for CPU-Hotplug
128210d91611SNicholas Piggin * if available.
128310d91611SNicholas Piggin */
128410d91611SNicholas Piggin if (default_stop_found) {
128510d91611SNicholas Piggin pnv_deepest_stop_psscr_val = pnv_default_stop_val;
128610d91611SNicholas Piggin pnv_deepest_stop_psscr_mask = pnv_default_stop_mask;
128710d91611SNicholas Piggin pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n",
128810d91611SNicholas Piggin pnv_deepest_stop_psscr_val);
128910d91611SNicholas Piggin } else { /* Fallback to snooze loop for CPU-Hotplug */
129010d91611SNicholas Piggin deepest_stop_found = false;
129110d91611SNicholas Piggin pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n");
129210d91611SNicholas Piggin }
129310d91611SNicholas Piggin }
1294bcef83a0SShreyas B. Prabhu }
1295bcef83a0SShreyas B. Prabhu
1296bcef83a0SShreyas B. Prabhu /*
1297bcef83a0SShreyas B. Prabhu * Probe device tree for supported idle states
1298bcef83a0SShreyas B. Prabhu */
pnv_probe_idle_states(void)1299bcef83a0SShreyas B. Prabhu static void __init pnv_probe_idle_states(void)
1300bcef83a0SShreyas B. Prabhu {
1301d405a98cSShreyas B. Prabhu int i;
1302d405a98cSShreyas B. Prabhu
13039c7b185aSAkshay Adiga if (nr_pnv_idle_states < 0) {
13049c7b185aSAkshay Adiga pr_warn("cpuidle-powernv: no idle states found in the DT\n");
13059c7b185aSAkshay Adiga return;
13069c7b185aSAkshay Adiga }
13079c7b185aSAkshay Adiga
130816d83a54SPratik Rajesh Sampat if (cpu_has_feature(CPU_FTR_ARCH_300))
1309ffd2961bSNicholas Piggin pnv_arch300_idle_init();
13109c7b185aSAkshay Adiga
13119c7b185aSAkshay Adiga for (i = 0; i < nr_pnv_idle_states; i++)
13129c7b185aSAkshay Adiga supported_cpuidle_states |= pnv_idle_states[i].flags;
13139c7b185aSAkshay Adiga }
13149c7b185aSAkshay Adiga
13159c7b185aSAkshay Adiga /*
13169c7b185aSAkshay Adiga * This function parses device-tree and populates all the information
13179c7b185aSAkshay Adiga * into pnv_idle_states structure. It also sets up nr_pnv_idle_states
13189c7b185aSAkshay Adiga * which is the number of cpuidle states discovered through device-tree.
13199c7b185aSAkshay Adiga */
13209c7b185aSAkshay Adiga
pnv_parse_cpuidle_dt(void)1321e5913db1SNick Child static int __init pnv_parse_cpuidle_dt(void)
13229c7b185aSAkshay Adiga {
13239c7b185aSAkshay Adiga struct device_node *np;
13249c7b185aSAkshay Adiga int nr_idle_states, i;
13259c7b185aSAkshay Adiga int rc = 0;
13269c7b185aSAkshay Adiga u32 *temp_u32;
13279c7b185aSAkshay Adiga u64 *temp_u64;
13289c7b185aSAkshay Adiga const char **temp_string;
13299c7b185aSAkshay Adiga
1330bcef83a0SShreyas B. Prabhu np = of_find_node_by_path("/ibm,opal/power-mgt");
1331bcef83a0SShreyas B. Prabhu if (!np) {
1332d405a98cSShreyas B. Prabhu pr_warn("opal: PowerMgmt Node not found\n");
13339c7b185aSAkshay Adiga return -ENODEV;
1334d405a98cSShreyas B. Prabhu }
13359c7b185aSAkshay Adiga nr_idle_states = of_property_count_u32_elems(np,
1336d405a98cSShreyas B. Prabhu "ibm,cpu-idle-state-flags");
13379c7b185aSAkshay Adiga
13389c7b185aSAkshay Adiga pnv_idle_states = kcalloc(nr_idle_states, sizeof(*pnv_idle_states),
13399c7b185aSAkshay Adiga GFP_KERNEL);
13409c7b185aSAkshay Adiga temp_u32 = kcalloc(nr_idle_states, sizeof(u32), GFP_KERNEL);
13419c7b185aSAkshay Adiga temp_u64 = kcalloc(nr_idle_states, sizeof(u64), GFP_KERNEL);
13429c7b185aSAkshay Adiga temp_string = kcalloc(nr_idle_states, sizeof(char *), GFP_KERNEL);
13439c7b185aSAkshay Adiga
13449c7b185aSAkshay Adiga if (!(pnv_idle_states && temp_u32 && temp_u64 && temp_string)) {
13459c7b185aSAkshay Adiga pr_err("Could not allocate memory for dt parsing\n");
13469c7b185aSAkshay Adiga rc = -ENOMEM;
1347d405a98cSShreyas B. Prabhu goto out;
1348d405a98cSShreyas B. Prabhu }
1349d405a98cSShreyas B. Prabhu
13509c7b185aSAkshay Adiga /* Read flags */
13519c7b185aSAkshay Adiga if (of_property_read_u32_array(np, "ibm,cpu-idle-state-flags",
13529c7b185aSAkshay Adiga temp_u32, nr_idle_states)) {
1353d405a98cSShreyas B. Prabhu pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n");
13549c7b185aSAkshay Adiga rc = -EINVAL;
1355bcef83a0SShreyas B. Prabhu goto out;
1356bcef83a0SShreyas B. Prabhu }
13579c7b185aSAkshay Adiga for (i = 0; i < nr_idle_states; i++)
13589c7b185aSAkshay Adiga pnv_idle_states[i].flags = temp_u32[i];
1359bcef83a0SShreyas B. Prabhu
13609c7b185aSAkshay Adiga /* Read latencies */
13619c7b185aSAkshay Adiga if (of_property_read_u32_array(np, "ibm,cpu-idle-state-latencies-ns",
13629c7b185aSAkshay Adiga temp_u32, nr_idle_states)) {
13639c7b185aSAkshay Adiga pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
13649c7b185aSAkshay Adiga rc = -EINVAL;
13659c7b185aSAkshay Adiga goto out;
13669c7b185aSAkshay Adiga }
13679c7b185aSAkshay Adiga for (i = 0; i < nr_idle_states; i++)
13689c7b185aSAkshay Adiga pnv_idle_states[i].latency_ns = temp_u32[i];
13699c7b185aSAkshay Adiga
13709c7b185aSAkshay Adiga /* Read residencies */
13719c7b185aSAkshay Adiga if (of_property_read_u32_array(np, "ibm,cpu-idle-state-residency-ns",
13729c7b185aSAkshay Adiga temp_u32, nr_idle_states)) {
13732f62870cSChristophe JAILLET pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-residency-ns in DT\n");
13749c7b185aSAkshay Adiga rc = -EINVAL;
13759c7b185aSAkshay Adiga goto out;
13769c7b185aSAkshay Adiga }
13779c7b185aSAkshay Adiga for (i = 0; i < nr_idle_states; i++)
13789c7b185aSAkshay Adiga pnv_idle_states[i].residency_ns = temp_u32[i];
13799c7b185aSAkshay Adiga
1380ffd2961bSNicholas Piggin /* For power9 and later */
1381bcef83a0SShreyas B. Prabhu if (cpu_has_feature(CPU_FTR_ARCH_300)) {
13829c7b185aSAkshay Adiga /* Read pm_crtl_val */
13839c7b185aSAkshay Adiga if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr",
13849c7b185aSAkshay Adiga temp_u64, nr_idle_states)) {
13859c7b185aSAkshay Adiga pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n");
13869c7b185aSAkshay Adiga rc = -EINVAL;
1387bcef83a0SShreyas B. Prabhu goto out;
1388d405a98cSShreyas B. Prabhu }
13899c7b185aSAkshay Adiga for (i = 0; i < nr_idle_states; i++)
13909c7b185aSAkshay Adiga pnv_idle_states[i].psscr_val = temp_u64[i];
1391d405a98cSShreyas B. Prabhu
13929c7b185aSAkshay Adiga /* Read pm_crtl_mask */
13939c7b185aSAkshay Adiga if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr-mask",
13949c7b185aSAkshay Adiga temp_u64, nr_idle_states)) {
13959c7b185aSAkshay Adiga pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr-mask in DT\n");
13969c7b185aSAkshay Adiga rc = -EINVAL;
13979c7b185aSAkshay Adiga goto out;
1398bcef83a0SShreyas B. Prabhu }
13999c7b185aSAkshay Adiga for (i = 0; i < nr_idle_states; i++)
14009c7b185aSAkshay Adiga pnv_idle_states[i].psscr_mask = temp_u64[i];
14019c7b185aSAkshay Adiga }
14029c7b185aSAkshay Adiga
14039c7b185aSAkshay Adiga /*
14049c7b185aSAkshay Adiga * power8 specific properties ibm,cpu-idle-state-pmicr-mask and
14059c7b185aSAkshay Adiga * ibm,cpu-idle-state-pmicr-val were never used and there is no
14069c7b185aSAkshay Adiga * plan to use it in near future. Hence, not parsing these properties
14079c7b185aSAkshay Adiga */
14089c7b185aSAkshay Adiga
14099c7b185aSAkshay Adiga if (of_property_read_string_array(np, "ibm,cpu-idle-state-names",
14109c7b185aSAkshay Adiga temp_string, nr_idle_states) < 0) {
14119c7b185aSAkshay Adiga pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-names in DT\n");
14129c7b185aSAkshay Adiga rc = -EINVAL;
14139c7b185aSAkshay Adiga goto out;
14149c7b185aSAkshay Adiga }
14159c7b185aSAkshay Adiga for (i = 0; i < nr_idle_states; i++)
141614be3756SWolfram Sang strscpy(pnv_idle_states[i].name, temp_string[i],
14179c7b185aSAkshay Adiga PNV_IDLE_NAME_LEN);
14189c7b185aSAkshay Adiga nr_pnv_idle_states = nr_idle_states;
14199c7b185aSAkshay Adiga rc = 0;
14209c7b185aSAkshay Adiga out:
14219c7b185aSAkshay Adiga kfree(temp_u32);
14229c7b185aSAkshay Adiga kfree(temp_u64);
14239c7b185aSAkshay Adiga kfree(temp_string);
1424605c27f3SLiang He of_node_put(np);
14259c7b185aSAkshay Adiga return rc;
14269c7b185aSAkshay Adiga }
14279c7b185aSAkshay Adiga
pnv_init_idle_states(void)1428bcef83a0SShreyas B. Prabhu static int __init pnv_init_idle_states(void)
1429bcef83a0SShreyas B. Prabhu {
143010d91611SNicholas Piggin int cpu;
14319c7b185aSAkshay Adiga int rc = 0;
143210d91611SNicholas Piggin
143310d91611SNicholas Piggin /* Set up PACA fields */
143410d91611SNicholas Piggin for_each_present_cpu(cpu) {
143510d91611SNicholas Piggin struct paca_struct *p = paca_ptrs[cpu];
143610d91611SNicholas Piggin
143710d91611SNicholas Piggin p->idle_state = 0;
143810d91611SNicholas Piggin if (cpu == cpu_first_thread_sibling(cpu))
143910d91611SNicholas Piggin p->idle_state = (1 << threads_per_core) - 1;
144010d91611SNicholas Piggin
144110d91611SNicholas Piggin if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
144210d91611SNicholas Piggin /* P7/P8 nap */
144310d91611SNicholas Piggin p->thread_idle_state = PNV_THREAD_RUNNING;
1444ffd2961bSNicholas Piggin } else if (pvr_version_is(PVR_POWER9)) {
1445ffd2961bSNicholas Piggin /* P9 stop workarounds */
144610d91611SNicholas Piggin #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
144710d91611SNicholas Piggin p->requested_psscr = 0;
144810d91611SNicholas Piggin atomic_set(&p->dont_stop, 0);
144910d91611SNicholas Piggin #endif
145010d91611SNicholas Piggin }
145110d91611SNicholas Piggin }
1452bcef83a0SShreyas B. Prabhu
14539c7b185aSAkshay Adiga /* In case we error out nr_pnv_idle_states will be zero */
14549c7b185aSAkshay Adiga nr_pnv_idle_states = 0;
145510d91611SNicholas Piggin supported_cpuidle_states = 0;
145610d91611SNicholas Piggin
1457bcef83a0SShreyas B. Prabhu if (cpuidle_disable != IDLE_NO_OVERRIDE)
1458bcef83a0SShreyas B. Prabhu goto out;
14599c7b185aSAkshay Adiga rc = pnv_parse_cpuidle_dt();
14609c7b185aSAkshay Adiga if (rc)
14619c7b185aSAkshay Adiga return rc;
1462bcef83a0SShreyas B. Prabhu pnv_probe_idle_states();
1463bcef83a0SShreyas B. Prabhu
146410d91611SNicholas Piggin if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
1465d405a98cSShreyas B. Prabhu if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
146610d91611SNicholas Piggin power7_fastsleep_workaround_entry = false;
146710d91611SNicholas Piggin power7_fastsleep_workaround_exit = false;
14685703d2f4SShreyas B. Prabhu } else {
14693bf88c42SGreg Kroah-Hartman struct device *dev_root;
14705703d2f4SShreyas B. Prabhu /*
14715703d2f4SShreyas B. Prabhu * OPAL_PM_SLEEP_ENABLED_ER1 is set. It indicates that
14725703d2f4SShreyas B. Prabhu * workaround is needed to use fastsleep. Provide sysfs
147310d91611SNicholas Piggin * control to choose how this workaround has to be
147410d91611SNicholas Piggin * applied.
14755703d2f4SShreyas B. Prabhu */
14763bf88c42SGreg Kroah-Hartman dev_root = bus_get_dev_root(&cpu_subsys);
14773bf88c42SGreg Kroah-Hartman if (dev_root) {
14783bf88c42SGreg Kroah-Hartman device_create_file(dev_root,
14795703d2f4SShreyas B. Prabhu &dev_attr_fastsleep_workaround_applyonce);
14803bf88c42SGreg Kroah-Hartman put_device(dev_root);
14813bf88c42SGreg Kroah-Hartman }
1482d405a98cSShreyas B. Prabhu }
14835703d2f4SShreyas B. Prabhu
148410d91611SNicholas Piggin update_subcore_sibling_mask();
14855593e303SShreyas B. Prabhu
148610d91611SNicholas Piggin if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED) {
14875593e303SShreyas B. Prabhu ppc_md.power_save = power7_idle;
148810d91611SNicholas Piggin power7_offline_type = PNV_THREAD_NAP;
148910d91611SNicholas Piggin }
149010d91611SNicholas Piggin
149110d91611SNicholas Piggin if ((supported_cpuidle_states & OPAL_PM_WINKLE_ENABLED) &&
149210d91611SNicholas Piggin (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT))
149310d91611SNicholas Piggin power7_offline_type = PNV_THREAD_WINKLE;
149410d91611SNicholas Piggin else if ((supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED) ||
149510d91611SNicholas Piggin (supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1))
149610d91611SNicholas Piggin power7_offline_type = PNV_THREAD_SLEEP;
149710d91611SNicholas Piggin }
149810d91611SNicholas Piggin
149910d91611SNicholas Piggin if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) {
150010d91611SNicholas Piggin if (pnv_save_sprs_for_deep_states())
150110d91611SNicholas Piggin pnv_disable_deep_states();
150210d91611SNicholas Piggin }
1503bcef83a0SShreyas B. Prabhu
1504d405a98cSShreyas B. Prabhu out:
1505d405a98cSShreyas B. Prabhu return 0;
1506d405a98cSShreyas B. Prabhu }
15074bece972SMichael Ellerman machine_subsys_initcall(powernv, pnv_init_idle_states);
1508