xref: /linux/arch/powerpc/platforms/powernv/idle.c (revision ac9816dcbab53c57bcf1d7b15370b08f1e284318)
1d405a98cSShreyas B. Prabhu /*
2d405a98cSShreyas B. Prabhu  * PowerNV cpuidle code
3d405a98cSShreyas B. Prabhu  *
4d405a98cSShreyas B. Prabhu  * Copyright 2015 IBM Corp.
5d405a98cSShreyas B. Prabhu  *
6d405a98cSShreyas B. Prabhu  * This program is free software; you can redistribute it and/or
7d405a98cSShreyas B. Prabhu  * modify it under the terms of the GNU General Public License
8d405a98cSShreyas B. Prabhu  * as published by the Free Software Foundation; either version
9d405a98cSShreyas B. Prabhu  * 2 of the License, or (at your option) any later version.
10d405a98cSShreyas B. Prabhu  */
11d405a98cSShreyas B. Prabhu 
12d405a98cSShreyas B. Prabhu #include <linux/types.h>
13d405a98cSShreyas B. Prabhu #include <linux/mm.h>
14d405a98cSShreyas B. Prabhu #include <linux/slab.h>
15d405a98cSShreyas B. Prabhu #include <linux/of.h>
165703d2f4SShreyas B. Prabhu #include <linux/device.h>
175703d2f4SShreyas B. Prabhu #include <linux/cpu.h>
18d405a98cSShreyas B. Prabhu 
19d405a98cSShreyas B. Prabhu #include <asm/firmware.h>
204bece972SMichael Ellerman #include <asm/machdep.h>
21d405a98cSShreyas B. Prabhu #include <asm/opal.h>
22d405a98cSShreyas B. Prabhu #include <asm/cputhreads.h>
23d405a98cSShreyas B. Prabhu #include <asm/cpuidle.h>
24d405a98cSShreyas B. Prabhu #include <asm/code-patching.h>
25d405a98cSShreyas B. Prabhu #include <asm/smp.h>
262201f994SNicholas Piggin #include <asm/runlatch.h>
277672691aSPaul Mackerras #include <asm/dbell.h>
28d405a98cSShreyas B. Prabhu 
29d405a98cSShreyas B. Prabhu #include "powernv.h"
30d405a98cSShreyas B. Prabhu #include "subcore.h"
31d405a98cSShreyas B. Prabhu 
32bcef83a0SShreyas B. Prabhu /* Power ISA 3.0 allows for stop states 0x0 - 0xF */
33bcef83a0SShreyas B. Prabhu #define MAX_STOP_STATE	0xF
34bcef83a0SShreyas B. Prabhu 
351e1601b3SAkshay Adiga #define P9_STOP_SPR_MSR 2000
361e1601b3SAkshay Adiga #define P9_STOP_SPR_PSSCR      855
371e1601b3SAkshay Adiga 
38d405a98cSShreyas B. Prabhu static u32 supported_cpuidle_states;
39d405a98cSShreyas B. Prabhu 
401e1601b3SAkshay Adiga /*
411e1601b3SAkshay Adiga  * The default stop state that will be used by ppc_md.power_save
421e1601b3SAkshay Adiga  * function on platforms that support stop instruction.
431e1601b3SAkshay Adiga  */
441e1601b3SAkshay Adiga static u64 pnv_default_stop_val;
451e1601b3SAkshay Adiga static u64 pnv_default_stop_mask;
461e1601b3SAkshay Adiga static bool default_stop_found;
471e1601b3SAkshay Adiga 
481e1601b3SAkshay Adiga /*
491e1601b3SAkshay Adiga  * First deep stop state. Used to figure out when to save/restore
501e1601b3SAkshay Adiga  * hypervisor context.
511e1601b3SAkshay Adiga  */
521e1601b3SAkshay Adiga u64 pnv_first_deep_stop_state = MAX_STOP_STATE;
531e1601b3SAkshay Adiga 
541e1601b3SAkshay Adiga /*
551e1601b3SAkshay Adiga  * psscr value and mask of the deepest stop idle state.
561e1601b3SAkshay Adiga  * Used when a cpu is offlined.
571e1601b3SAkshay Adiga  */
581e1601b3SAkshay Adiga static u64 pnv_deepest_stop_psscr_val;
591e1601b3SAkshay Adiga static u64 pnv_deepest_stop_psscr_mask;
60785a12afSGautham R. Shenoy static u64 pnv_deepest_stop_flag;
611e1601b3SAkshay Adiga static bool deepest_stop_found;
621e1601b3SAkshay Adiga 
63bcef83a0SShreyas B. Prabhu static int pnv_save_sprs_for_deep_states(void)
64d405a98cSShreyas B. Prabhu {
65d405a98cSShreyas B. Prabhu 	int cpu;
66d405a98cSShreyas B. Prabhu 	int rc;
67d405a98cSShreyas B. Prabhu 
68d405a98cSShreyas B. Prabhu 	/*
69446957baSAdam Buchbinder 	 * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric across
70d405a98cSShreyas B. Prabhu 	 * all cpus at boot. Get these reg values of current cpu and use the
71446957baSAdam Buchbinder 	 * same across all cpus.
72d405a98cSShreyas B. Prabhu 	 */
7324be85a2SGautham R. Shenoy 	uint64_t lpcr_val = mfspr(SPRN_LPCR);
74d405a98cSShreyas B. Prabhu 	uint64_t hid0_val = mfspr(SPRN_HID0);
75d405a98cSShreyas B. Prabhu 	uint64_t hid1_val = mfspr(SPRN_HID1);
76d405a98cSShreyas B. Prabhu 	uint64_t hid4_val = mfspr(SPRN_HID4);
77d405a98cSShreyas B. Prabhu 	uint64_t hid5_val = mfspr(SPRN_HID5);
78d405a98cSShreyas B. Prabhu 	uint64_t hmeer_val = mfspr(SPRN_HMEER);
791e1601b3SAkshay Adiga 	uint64_t msr_val = MSR_IDLE;
801e1601b3SAkshay Adiga 	uint64_t psscr_val = pnv_deepest_stop_psscr_val;
81d405a98cSShreyas B. Prabhu 
82*ac9816dcSAkshay Adiga 	for_each_present_cpu(cpu) {
83d405a98cSShreyas B. Prabhu 		uint64_t pir = get_hard_smp_processor_id(cpu);
84d2e60075SNicholas Piggin 		uint64_t hsprg0_val = (uint64_t)paca_ptrs[cpu];
85d405a98cSShreyas B. Prabhu 
86d405a98cSShreyas B. Prabhu 		rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val);
87d405a98cSShreyas B. Prabhu 		if (rc != 0)
88d405a98cSShreyas B. Prabhu 			return rc;
89d405a98cSShreyas B. Prabhu 
90d405a98cSShreyas B. Prabhu 		rc = opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
91d405a98cSShreyas B. Prabhu 		if (rc != 0)
92d405a98cSShreyas B. Prabhu 			return rc;
93d405a98cSShreyas B. Prabhu 
941e1601b3SAkshay Adiga 		if (cpu_has_feature(CPU_FTR_ARCH_300)) {
951e1601b3SAkshay Adiga 			rc = opal_slw_set_reg(pir, P9_STOP_SPR_MSR, msr_val);
961e1601b3SAkshay Adiga 			if (rc)
971e1601b3SAkshay Adiga 				return rc;
981e1601b3SAkshay Adiga 
991e1601b3SAkshay Adiga 			rc = opal_slw_set_reg(pir,
1001e1601b3SAkshay Adiga 					      P9_STOP_SPR_PSSCR, psscr_val);
1011e1601b3SAkshay Adiga 
1021e1601b3SAkshay Adiga 			if (rc)
1031e1601b3SAkshay Adiga 				return rc;
1041e1601b3SAkshay Adiga 		}
1051e1601b3SAkshay Adiga 
106d405a98cSShreyas B. Prabhu 		/* HIDs are per core registers */
107d405a98cSShreyas B. Prabhu 		if (cpu_thread_in_core(cpu) == 0) {
108d405a98cSShreyas B. Prabhu 
109d405a98cSShreyas B. Prabhu 			rc = opal_slw_set_reg(pir, SPRN_HMEER, hmeer_val);
110d405a98cSShreyas B. Prabhu 			if (rc != 0)
111d405a98cSShreyas B. Prabhu 				return rc;
112d405a98cSShreyas B. Prabhu 
113d405a98cSShreyas B. Prabhu 			rc = opal_slw_set_reg(pir, SPRN_HID0, hid0_val);
114d405a98cSShreyas B. Prabhu 			if (rc != 0)
115d405a98cSShreyas B. Prabhu 				return rc;
116d405a98cSShreyas B. Prabhu 
1171e1601b3SAkshay Adiga 			/* Only p8 needs to set extra HID regiters */
1181e1601b3SAkshay Adiga 			if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
1191e1601b3SAkshay Adiga 
120d405a98cSShreyas B. Prabhu 				rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val);
121d405a98cSShreyas B. Prabhu 				if (rc != 0)
122d405a98cSShreyas B. Prabhu 					return rc;
123d405a98cSShreyas B. Prabhu 
124d405a98cSShreyas B. Prabhu 				rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val);
125d405a98cSShreyas B. Prabhu 				if (rc != 0)
126d405a98cSShreyas B. Prabhu 					return rc;
127d405a98cSShreyas B. Prabhu 
128d405a98cSShreyas B. Prabhu 				rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val);
129d405a98cSShreyas B. Prabhu 				if (rc != 0)
130d405a98cSShreyas B. Prabhu 					return rc;
131d405a98cSShreyas B. Prabhu 			}
132d405a98cSShreyas B. Prabhu 		}
1331e1601b3SAkshay Adiga 	}
134d405a98cSShreyas B. Prabhu 
135d405a98cSShreyas B. Prabhu 	return 0;
136d405a98cSShreyas B. Prabhu }
137d405a98cSShreyas B. Prabhu 
138d405a98cSShreyas B. Prabhu static void pnv_alloc_idle_core_states(void)
139d405a98cSShreyas B. Prabhu {
140d405a98cSShreyas B. Prabhu 	int i, j;
141d405a98cSShreyas B. Prabhu 	int nr_cores = cpu_nr_cores();
142d405a98cSShreyas B. Prabhu 	u32 *core_idle_state;
143d405a98cSShreyas B. Prabhu 
144d405a98cSShreyas B. Prabhu 	/*
1455f221c3cSGautham R. Shenoy 	 * core_idle_state - The lower 8 bits track the idle state of
1465f221c3cSGautham R. Shenoy 	 * each thread of the core.
1475f221c3cSGautham R. Shenoy 	 *
1485f221c3cSGautham R. Shenoy 	 * The most significant bit is the lock bit.
1495f221c3cSGautham R. Shenoy 	 *
1505f221c3cSGautham R. Shenoy 	 * Initially all the bits corresponding to threads_per_core
1515f221c3cSGautham R. Shenoy 	 * are set. They are cleared when the thread enters deep idle
1525f221c3cSGautham R. Shenoy 	 * state like sleep and winkle/stop.
1535f221c3cSGautham R. Shenoy 	 *
1545f221c3cSGautham R. Shenoy 	 * Initially the lock bit is cleared.  The lock bit has 2
1555f221c3cSGautham R. Shenoy 	 * purposes:
1565f221c3cSGautham R. Shenoy 	 * 	a. While the first thread in the core waking up from
1575f221c3cSGautham R. Shenoy 	 * 	   idle is restoring core state, it prevents other
1585f221c3cSGautham R. Shenoy 	 * 	   threads in the core from switching to process
1595f221c3cSGautham R. Shenoy 	 * 	   context.
1605f221c3cSGautham R. Shenoy 	 * 	b. While the last thread in the core is saving the
1615f221c3cSGautham R. Shenoy 	 *	   core state, it prevents a different thread from
1625f221c3cSGautham R. Shenoy 	 *	   waking up.
163d405a98cSShreyas B. Prabhu 	 */
164d405a98cSShreyas B. Prabhu 	for (i = 0; i < nr_cores; i++) {
165d405a98cSShreyas B. Prabhu 		int first_cpu = i * threads_per_core;
166d405a98cSShreyas B. Prabhu 		int node = cpu_to_node(first_cpu);
16717ed4c8fSGautham R. Shenoy 		size_t paca_ptr_array_size;
168d405a98cSShreyas B. Prabhu 
169d405a98cSShreyas B. Prabhu 		core_idle_state = kmalloc_node(sizeof(u32), GFP_KERNEL, node);
1705f221c3cSGautham R. Shenoy 		*core_idle_state = (1 << threads_per_core) - 1;
17117ed4c8fSGautham R. Shenoy 		paca_ptr_array_size = (threads_per_core *
17217ed4c8fSGautham R. Shenoy 				       sizeof(struct paca_struct *));
173d405a98cSShreyas B. Prabhu 
174d405a98cSShreyas B. Prabhu 		for (j = 0; j < threads_per_core; j++) {
175d405a98cSShreyas B. Prabhu 			int cpu = first_cpu + j;
176d405a98cSShreyas B. Prabhu 
177d2e60075SNicholas Piggin 			paca_ptrs[cpu]->core_idle_state_ptr = core_idle_state;
178d2e60075SNicholas Piggin 			paca_ptrs[cpu]->thread_idle_state = PNV_THREAD_RUNNING;
179d2e60075SNicholas Piggin 			paca_ptrs[cpu]->thread_mask = 1 << j;
18017ed4c8fSGautham R. Shenoy 			if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
18117ed4c8fSGautham R. Shenoy 				continue;
182d2e60075SNicholas Piggin 			paca_ptrs[cpu]->thread_sibling_pacas =
18317ed4c8fSGautham R. Shenoy 				kmalloc_node(paca_ptr_array_size,
18417ed4c8fSGautham R. Shenoy 					     GFP_KERNEL, node);
185d405a98cSShreyas B. Prabhu 		}
186d405a98cSShreyas B. Prabhu 	}
187d405a98cSShreyas B. Prabhu 
188d405a98cSShreyas B. Prabhu 	update_subcore_sibling_mask();
189d405a98cSShreyas B. Prabhu 
190785a12afSGautham R. Shenoy 	if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) {
191785a12afSGautham R. Shenoy 		int rc = pnv_save_sprs_for_deep_states();
192785a12afSGautham R. Shenoy 
193785a12afSGautham R. Shenoy 		if (likely(!rc))
194785a12afSGautham R. Shenoy 			return;
195785a12afSGautham R. Shenoy 
196785a12afSGautham R. Shenoy 		/*
197785a12afSGautham R. Shenoy 		 * The stop-api is unable to restore hypervisor
198785a12afSGautham R. Shenoy 		 * resources on wakeup from platform idle states which
199785a12afSGautham R. Shenoy 		 * lose full context. So disable such states.
200785a12afSGautham R. Shenoy 		 */
201785a12afSGautham R. Shenoy 		supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT;
202785a12afSGautham R. Shenoy 		pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n");
203785a12afSGautham R. Shenoy 		pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n");
204785a12afSGautham R. Shenoy 
205785a12afSGautham R. Shenoy 		if (cpu_has_feature(CPU_FTR_ARCH_300) &&
206785a12afSGautham R. Shenoy 		    (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) {
207785a12afSGautham R. Shenoy 			/*
208785a12afSGautham R. Shenoy 			 * Use the default stop state for CPU-Hotplug
209785a12afSGautham R. Shenoy 			 * if available.
210785a12afSGautham R. Shenoy 			 */
211785a12afSGautham R. Shenoy 			if (default_stop_found) {
212785a12afSGautham R. Shenoy 				pnv_deepest_stop_psscr_val =
213785a12afSGautham R. Shenoy 					pnv_default_stop_val;
214785a12afSGautham R. Shenoy 				pnv_deepest_stop_psscr_mask =
215785a12afSGautham R. Shenoy 					pnv_default_stop_mask;
216785a12afSGautham R. Shenoy 				pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n",
217785a12afSGautham R. Shenoy 					pnv_deepest_stop_psscr_val);
218785a12afSGautham R. Shenoy 			} else { /* Fallback to snooze loop for CPU-Hotplug */
219785a12afSGautham R. Shenoy 				deepest_stop_found = false;
220785a12afSGautham R. Shenoy 				pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n");
221785a12afSGautham R. Shenoy 			}
222785a12afSGautham R. Shenoy 		}
223785a12afSGautham R. Shenoy 	}
224d405a98cSShreyas B. Prabhu }
225d405a98cSShreyas B. Prabhu 
226d405a98cSShreyas B. Prabhu u32 pnv_get_supported_cpuidle_states(void)
227d405a98cSShreyas B. Prabhu {
228d405a98cSShreyas B. Prabhu 	return supported_cpuidle_states;
229d405a98cSShreyas B. Prabhu }
230d405a98cSShreyas B. Prabhu EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states);
231d405a98cSShreyas B. Prabhu 
2325703d2f4SShreyas B. Prabhu static void pnv_fastsleep_workaround_apply(void *info)
2335703d2f4SShreyas B. Prabhu 
2345703d2f4SShreyas B. Prabhu {
2355703d2f4SShreyas B. Prabhu 	int rc;
2365703d2f4SShreyas B. Prabhu 	int *err = info;
2375703d2f4SShreyas B. Prabhu 
2385703d2f4SShreyas B. Prabhu 	rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP,
2395703d2f4SShreyas B. Prabhu 					OPAL_CONFIG_IDLE_APPLY);
2405703d2f4SShreyas B. Prabhu 	if (rc)
2415703d2f4SShreyas B. Prabhu 		*err = 1;
2425703d2f4SShreyas B. Prabhu }
2435703d2f4SShreyas B. Prabhu 
2445703d2f4SShreyas B. Prabhu /*
2455703d2f4SShreyas B. Prabhu  * Used to store fastsleep workaround state
2465703d2f4SShreyas B. Prabhu  * 0 - Workaround applied/undone at fastsleep entry/exit path (Default)
2475703d2f4SShreyas B. Prabhu  * 1 - Workaround applied once, never undone.
2485703d2f4SShreyas B. Prabhu  */
2495703d2f4SShreyas B. Prabhu static u8 fastsleep_workaround_applyonce;
2505703d2f4SShreyas B. Prabhu 
2515703d2f4SShreyas B. Prabhu static ssize_t show_fastsleep_workaround_applyonce(struct device *dev,
2525703d2f4SShreyas B. Prabhu 		struct device_attribute *attr, char *buf)
2535703d2f4SShreyas B. Prabhu {
2545703d2f4SShreyas B. Prabhu 	return sprintf(buf, "%u\n", fastsleep_workaround_applyonce);
2555703d2f4SShreyas B. Prabhu }
2565703d2f4SShreyas B. Prabhu 
2575703d2f4SShreyas B. Prabhu static ssize_t store_fastsleep_workaround_applyonce(struct device *dev,
2585703d2f4SShreyas B. Prabhu 		struct device_attribute *attr, const char *buf,
2595703d2f4SShreyas B. Prabhu 		size_t count)
2605703d2f4SShreyas B. Prabhu {
2615703d2f4SShreyas B. Prabhu 	cpumask_t primary_thread_mask;
2625703d2f4SShreyas B. Prabhu 	int err;
2635703d2f4SShreyas B. Prabhu 	u8 val;
2645703d2f4SShreyas B. Prabhu 
2655703d2f4SShreyas B. Prabhu 	if (kstrtou8(buf, 0, &val) || val != 1)
2665703d2f4SShreyas B. Prabhu 		return -EINVAL;
2675703d2f4SShreyas B. Prabhu 
2685703d2f4SShreyas B. Prabhu 	if (fastsleep_workaround_applyonce == 1)
2695703d2f4SShreyas B. Prabhu 		return count;
2705703d2f4SShreyas B. Prabhu 
2715703d2f4SShreyas B. Prabhu 	/*
2725703d2f4SShreyas B. Prabhu 	 * fastsleep_workaround_applyonce = 1 implies
2735703d2f4SShreyas B. Prabhu 	 * fastsleep workaround needs to be left in 'applied' state on all
2745703d2f4SShreyas B. Prabhu 	 * the cores. Do this by-
2755703d2f4SShreyas B. Prabhu 	 * 1. Patching out the call to 'undo' workaround in fastsleep exit path
2765703d2f4SShreyas B. Prabhu 	 * 2. Sending ipi to all the cores which have at least one online thread
2775703d2f4SShreyas B. Prabhu 	 * 3. Patching out the call to 'apply' workaround in fastsleep entry
2785703d2f4SShreyas B. Prabhu 	 * path
2795703d2f4SShreyas B. Prabhu 	 * There is no need to send ipi to cores which have all threads
2805703d2f4SShreyas B. Prabhu 	 * offlined, as last thread of the core entering fastsleep or deeper
2815703d2f4SShreyas B. Prabhu 	 * state would have applied workaround.
2825703d2f4SShreyas B. Prabhu 	 */
2835703d2f4SShreyas B. Prabhu 	err = patch_instruction(
2845703d2f4SShreyas B. Prabhu 		(unsigned int *)pnv_fastsleep_workaround_at_exit,
2855703d2f4SShreyas B. Prabhu 		PPC_INST_NOP);
2865703d2f4SShreyas B. Prabhu 	if (err) {
2875703d2f4SShreyas B. Prabhu 		pr_err("fastsleep_workaround_applyonce change failed while patching pnv_fastsleep_workaround_at_exit");
2885703d2f4SShreyas B. Prabhu 		goto fail;
2895703d2f4SShreyas B. Prabhu 	}
2905703d2f4SShreyas B. Prabhu 
2915703d2f4SShreyas B. Prabhu 	get_online_cpus();
2925703d2f4SShreyas B. Prabhu 	primary_thread_mask = cpu_online_cores_map();
2935703d2f4SShreyas B. Prabhu 	on_each_cpu_mask(&primary_thread_mask,
2945703d2f4SShreyas B. Prabhu 				pnv_fastsleep_workaround_apply,
2955703d2f4SShreyas B. Prabhu 				&err, 1);
2965703d2f4SShreyas B. Prabhu 	put_online_cpus();
2975703d2f4SShreyas B. Prabhu 	if (err) {
2985703d2f4SShreyas B. Prabhu 		pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply");
2995703d2f4SShreyas B. Prabhu 		goto fail;
3005703d2f4SShreyas B. Prabhu 	}
3015703d2f4SShreyas B. Prabhu 
3025703d2f4SShreyas B. Prabhu 	err = patch_instruction(
3035703d2f4SShreyas B. Prabhu 		(unsigned int *)pnv_fastsleep_workaround_at_entry,
3045703d2f4SShreyas B. Prabhu 		PPC_INST_NOP);
3055703d2f4SShreyas B. Prabhu 	if (err) {
3065703d2f4SShreyas B. Prabhu 		pr_err("fastsleep_workaround_applyonce change failed while patching pnv_fastsleep_workaround_at_entry");
3075703d2f4SShreyas B. Prabhu 		goto fail;
3085703d2f4SShreyas B. Prabhu 	}
3095703d2f4SShreyas B. Prabhu 
3105703d2f4SShreyas B. Prabhu 	fastsleep_workaround_applyonce = 1;
3115703d2f4SShreyas B. Prabhu 
3125703d2f4SShreyas B. Prabhu 	return count;
3135703d2f4SShreyas B. Prabhu fail:
3145703d2f4SShreyas B. Prabhu 	return -EIO;
3155703d2f4SShreyas B. Prabhu }
3165703d2f4SShreyas B. Prabhu 
3175703d2f4SShreyas B. Prabhu static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600,
3185703d2f4SShreyas B. Prabhu 			show_fastsleep_workaround_applyonce,
3195703d2f4SShreyas B. Prabhu 			store_fastsleep_workaround_applyonce);
3205703d2f4SShreyas B. Prabhu 
3212201f994SNicholas Piggin static unsigned long __power7_idle_type(unsigned long type)
3222201f994SNicholas Piggin {
3232201f994SNicholas Piggin 	unsigned long srr1;
3242201f994SNicholas Piggin 
3252201f994SNicholas Piggin 	if (!prep_irq_for_idle_irqsoff())
3262201f994SNicholas Piggin 		return 0;
3272201f994SNicholas Piggin 
32840d24343SNicholas Piggin 	__ppc64_runlatch_off();
3292201f994SNicholas Piggin 	srr1 = power7_idle_insn(type);
33040d24343SNicholas Piggin 	__ppc64_runlatch_on();
3312201f994SNicholas Piggin 
3322201f994SNicholas Piggin 	fini_irq_for_idle_irqsoff();
3332201f994SNicholas Piggin 
3342201f994SNicholas Piggin 	return srr1;
3352201f994SNicholas Piggin }
3362201f994SNicholas Piggin 
3372201f994SNicholas Piggin void power7_idle_type(unsigned long type)
3382201f994SNicholas Piggin {
339771d4304SNicholas Piggin 	unsigned long srr1;
340771d4304SNicholas Piggin 
341771d4304SNicholas Piggin 	srr1 = __power7_idle_type(type);
342771d4304SNicholas Piggin 	irq_set_pending_from_srr1(srr1);
3432201f994SNicholas Piggin }
3442201f994SNicholas Piggin 
3452201f994SNicholas Piggin void power7_idle(void)
3462201f994SNicholas Piggin {
3472201f994SNicholas Piggin 	if (!powersave_nap)
3482201f994SNicholas Piggin 		return;
3492201f994SNicholas Piggin 
3502201f994SNicholas Piggin 	power7_idle_type(PNV_THREAD_NAP);
3512201f994SNicholas Piggin }
3522201f994SNicholas Piggin 
3532201f994SNicholas Piggin static unsigned long __power9_idle_type(unsigned long stop_psscr_val,
3542201f994SNicholas Piggin 				      unsigned long stop_psscr_mask)
3552201f994SNicholas Piggin {
3562201f994SNicholas Piggin 	unsigned long psscr;
3572201f994SNicholas Piggin 	unsigned long srr1;
3582201f994SNicholas Piggin 
3592201f994SNicholas Piggin 	if (!prep_irq_for_idle_irqsoff())
3602201f994SNicholas Piggin 		return 0;
3612201f994SNicholas Piggin 
3622201f994SNicholas Piggin 	psscr = mfspr(SPRN_PSSCR);
3632201f994SNicholas Piggin 	psscr = (psscr & ~stop_psscr_mask) | stop_psscr_val;
3642201f994SNicholas Piggin 
36540d24343SNicholas Piggin 	__ppc64_runlatch_off();
3662201f994SNicholas Piggin 	srr1 = power9_idle_stop(psscr);
36740d24343SNicholas Piggin 	__ppc64_runlatch_on();
3682201f994SNicholas Piggin 
3692201f994SNicholas Piggin 	fini_irq_for_idle_irqsoff();
3702201f994SNicholas Piggin 
3712201f994SNicholas Piggin 	return srr1;
3722201f994SNicholas Piggin }
3732201f994SNicholas Piggin 
3742201f994SNicholas Piggin void power9_idle_type(unsigned long stop_psscr_val,
3752201f994SNicholas Piggin 				      unsigned long stop_psscr_mask)
3762201f994SNicholas Piggin {
377771d4304SNicholas Piggin 	unsigned long srr1;
378771d4304SNicholas Piggin 
379771d4304SNicholas Piggin 	srr1 = __power9_idle_type(stop_psscr_val, stop_psscr_mask);
380771d4304SNicholas Piggin 	irq_set_pending_from_srr1(srr1);
3812201f994SNicholas Piggin }
3822201f994SNicholas Piggin 
38309206b60SGautham R. Shenoy /*
384bcef83a0SShreyas B. Prabhu  * Used for ppc_md.power_save which needs a function with no parameters
385bcef83a0SShreyas B. Prabhu  */
3862201f994SNicholas Piggin void power9_idle(void)
387d405a98cSShreyas B. Prabhu {
3882201f994SNicholas Piggin 	power9_idle_type(pnv_default_stop_val, pnv_default_stop_mask);
389bcef83a0SShreyas B. Prabhu }
39009206b60SGautham R. Shenoy 
3917672691aSPaul Mackerras #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
3927672691aSPaul Mackerras /*
3937672691aSPaul Mackerras  * This is used in working around bugs in thread reconfiguration
3947672691aSPaul Mackerras  * on POWER9 (at least up to Nimbus DD2.2) relating to transactional
3957672691aSPaul Mackerras  * memory and the way that XER[SO] is checkpointed.
3967672691aSPaul Mackerras  * This function forces the core into SMT4 in order by asking
3977672691aSPaul Mackerras  * all other threads not to stop, and sending a message to any
3987672691aSPaul Mackerras  * that are in a stop state.
3997672691aSPaul Mackerras  * Must be called with preemption disabled.
4007672691aSPaul Mackerras  */
4017672691aSPaul Mackerras void pnv_power9_force_smt4_catch(void)
4027672691aSPaul Mackerras {
4037672691aSPaul Mackerras 	int cpu, cpu0, thr;
4047672691aSPaul Mackerras 	int awake_threads = 1;		/* this thread is awake */
4057672691aSPaul Mackerras 	int poke_threads = 0;
4067672691aSPaul Mackerras 	int need_awake = threads_per_core;
4077672691aSPaul Mackerras 
4087672691aSPaul Mackerras 	cpu = smp_processor_id();
4097672691aSPaul Mackerras 	cpu0 = cpu & ~(threads_per_core - 1);
4107672691aSPaul Mackerras 	for (thr = 0; thr < threads_per_core; ++thr) {
4117672691aSPaul Mackerras 		if (cpu != cpu0 + thr)
412f437c517SMichael Ellerman 			atomic_inc(&paca_ptrs[cpu0+thr]->dont_stop);
4137672691aSPaul Mackerras 	}
4147672691aSPaul Mackerras 	/* order setting dont_stop vs testing requested_psscr */
4157672691aSPaul Mackerras 	mb();
4167672691aSPaul Mackerras 	for (thr = 0; thr < threads_per_core; ++thr) {
417f437c517SMichael Ellerman 		if (!paca_ptrs[cpu0+thr]->requested_psscr)
4187672691aSPaul Mackerras 			++awake_threads;
4197672691aSPaul Mackerras 		else
4207672691aSPaul Mackerras 			poke_threads |= (1 << thr);
4217672691aSPaul Mackerras 	}
4227672691aSPaul Mackerras 
4237672691aSPaul Mackerras 	/* If at least 3 threads are awake, the core is in SMT4 already */
4247672691aSPaul Mackerras 	if (awake_threads < need_awake) {
4257672691aSPaul Mackerras 		/* We have to wake some threads; we'll use msgsnd */
4267672691aSPaul Mackerras 		for (thr = 0; thr < threads_per_core; ++thr) {
4277672691aSPaul Mackerras 			if (poke_threads & (1 << thr)) {
4287672691aSPaul Mackerras 				ppc_msgsnd_sync();
4297672691aSPaul Mackerras 				ppc_msgsnd(PPC_DBELL_MSGTYPE, 0,
430f437c517SMichael Ellerman 					   paca_ptrs[cpu0+thr]->hw_cpu_id);
4317672691aSPaul Mackerras 			}
4327672691aSPaul Mackerras 		}
4337672691aSPaul Mackerras 		/* now spin until at least 3 threads are awake */
4347672691aSPaul Mackerras 		do {
4357672691aSPaul Mackerras 			for (thr = 0; thr < threads_per_core; ++thr) {
4367672691aSPaul Mackerras 				if ((poke_threads & (1 << thr)) &&
437f437c517SMichael Ellerman 				    !paca_ptrs[cpu0+thr]->requested_psscr) {
4387672691aSPaul Mackerras 					++awake_threads;
4397672691aSPaul Mackerras 					poke_threads &= ~(1 << thr);
4407672691aSPaul Mackerras 				}
4417672691aSPaul Mackerras 			}
4427672691aSPaul Mackerras 		} while (awake_threads < need_awake);
4437672691aSPaul Mackerras 	}
4447672691aSPaul Mackerras }
4457672691aSPaul Mackerras EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_catch);
4467672691aSPaul Mackerras 
4477672691aSPaul Mackerras void pnv_power9_force_smt4_release(void)
4487672691aSPaul Mackerras {
4497672691aSPaul Mackerras 	int cpu, cpu0, thr;
4507672691aSPaul Mackerras 
4517672691aSPaul Mackerras 	cpu = smp_processor_id();
4527672691aSPaul Mackerras 	cpu0 = cpu & ~(threads_per_core - 1);
4537672691aSPaul Mackerras 
4547672691aSPaul Mackerras 	/* clear all the dont_stop flags */
4557672691aSPaul Mackerras 	for (thr = 0; thr < threads_per_core; ++thr) {
4567672691aSPaul Mackerras 		if (cpu != cpu0 + thr)
457f437c517SMichael Ellerman 			atomic_dec(&paca_ptrs[cpu0+thr]->dont_stop);
4587672691aSPaul Mackerras 	}
4597672691aSPaul Mackerras }
4607672691aSPaul Mackerras EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release);
4617672691aSPaul Mackerras #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
4627672691aSPaul Mackerras 
46367d20418SNicholas Piggin #ifdef CONFIG_HOTPLUG_CPU
46424be85a2SGautham R. Shenoy static void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
46524be85a2SGautham R. Shenoy {
46624be85a2SGautham R. Shenoy 	u64 pir = get_hard_smp_processor_id(cpu);
46724be85a2SGautham R. Shenoy 
46824be85a2SGautham R. Shenoy 	mtspr(SPRN_LPCR, lpcr_val);
4695d298baaSGautham R. Shenoy 
4705d298baaSGautham R. Shenoy 	/*
4715d298baaSGautham R. Shenoy 	 * Program the LPCR via stop-api only if the deepest stop state
4725d298baaSGautham R. Shenoy 	 * can lose hypervisor context.
4735d298baaSGautham R. Shenoy 	 */
4745d298baaSGautham R. Shenoy 	if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)
47524be85a2SGautham R. Shenoy 		opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
47624be85a2SGautham R. Shenoy }
47724be85a2SGautham R. Shenoy 
478c0691f9dSShreyas B. Prabhu /*
479a7cd88daSGautham R. Shenoy  * pnv_cpu_offline: A function that puts the CPU into the deepest
480a7cd88daSGautham R. Shenoy  * available platform idle state on a CPU-Offline.
4812525db04SNicholas Piggin  * interrupts hard disabled and no lazy irq pending.
482a7cd88daSGautham R. Shenoy  */
483a7cd88daSGautham R. Shenoy unsigned long pnv_cpu_offline(unsigned int cpu)
484a7cd88daSGautham R. Shenoy {
485a7cd88daSGautham R. Shenoy 	unsigned long srr1;
486a7cd88daSGautham R. Shenoy 	u32 idle_states = pnv_get_supported_cpuidle_states();
48724be85a2SGautham R. Shenoy 	u64 lpcr_val;
48824be85a2SGautham R. Shenoy 
48924be85a2SGautham R. Shenoy 	/*
49024be85a2SGautham R. Shenoy 	 * We don't want to take decrementer interrupts while we are
49124be85a2SGautham R. Shenoy 	 * offline, so clear LPCR:PECE1. We keep PECE2 (and
49224be85a2SGautham R. Shenoy 	 * LPCR_PECE_HVEE on P9) enabled as to let IPIs in.
49324be85a2SGautham R. Shenoy 	 *
49424be85a2SGautham R. Shenoy 	 * If the CPU gets woken up by a special wakeup, ensure that
49524be85a2SGautham R. Shenoy 	 * the SLW engine sets LPCR with decrementer bit cleared, else
49624be85a2SGautham R. Shenoy 	 * the CPU will come back to the kernel due to a spurious
49724be85a2SGautham R. Shenoy 	 * wakeup.
49824be85a2SGautham R. Shenoy 	 */
49924be85a2SGautham R. Shenoy 	lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
50024be85a2SGautham R. Shenoy 	pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
501a7cd88daSGautham R. Shenoy 
50240d24343SNicholas Piggin 	__ppc64_runlatch_off();
5032525db04SNicholas Piggin 
504f3b3f284SGautham R. Shenoy 	if (cpu_has_feature(CPU_FTR_ARCH_300) && deepest_stop_found) {
5052525db04SNicholas Piggin 		unsigned long psscr;
5062525db04SNicholas Piggin 
5072525db04SNicholas Piggin 		psscr = mfspr(SPRN_PSSCR);
5082525db04SNicholas Piggin 		psscr = (psscr & ~pnv_deepest_stop_psscr_mask) |
5092525db04SNicholas Piggin 						pnv_deepest_stop_psscr_val;
5103d4fbffdSNicholas Piggin 		srr1 = power9_offline_stop(psscr);
5112525db04SNicholas Piggin 
512785a12afSGautham R. Shenoy 	} else if ((idle_states & OPAL_PM_WINKLE_ENABLED) &&
513785a12afSGautham R. Shenoy 		   (idle_states & OPAL_PM_LOSE_FULL_CONTEXT)) {
5142525db04SNicholas Piggin 		srr1 = power7_idle_insn(PNV_THREAD_WINKLE);
515a7cd88daSGautham R. Shenoy 	} else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
516a7cd88daSGautham R. Shenoy 		   (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
5172525db04SNicholas Piggin 		srr1 = power7_idle_insn(PNV_THREAD_SLEEP);
51890061231SGautham R. Shenoy 	} else if (idle_states & OPAL_PM_NAP_ENABLED) {
5192525db04SNicholas Piggin 		srr1 = power7_idle_insn(PNV_THREAD_NAP);
52090061231SGautham R. Shenoy 	} else {
52190061231SGautham R. Shenoy 		/* This is the fallback method. We emulate snooze */
52290061231SGautham R. Shenoy 		while (!generic_check_cpu_restart(cpu)) {
52390061231SGautham R. Shenoy 			HMT_low();
52490061231SGautham R. Shenoy 			HMT_very_low();
52590061231SGautham R. Shenoy 		}
52690061231SGautham R. Shenoy 		srr1 = 0;
52790061231SGautham R. Shenoy 		HMT_medium();
528a7cd88daSGautham R. Shenoy 	}
529a7cd88daSGautham R. Shenoy 
53040d24343SNicholas Piggin 	__ppc64_runlatch_on();
5312525db04SNicholas Piggin 
53224be85a2SGautham R. Shenoy 	/*
53324be85a2SGautham R. Shenoy 	 * Re-enable decrementer interrupts in LPCR.
53424be85a2SGautham R. Shenoy 	 *
53524be85a2SGautham R. Shenoy 	 * Further, we want stop states to be woken up by decrementer
53624be85a2SGautham R. Shenoy 	 * for non-hotplug cases. So program the LPCR via stop api as
53724be85a2SGautham R. Shenoy 	 * well.
53824be85a2SGautham R. Shenoy 	 */
53924be85a2SGautham R. Shenoy 	lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1;
54024be85a2SGautham R. Shenoy 	pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
54124be85a2SGautham R. Shenoy 
542a7cd88daSGautham R. Shenoy 	return srr1;
543a7cd88daSGautham R. Shenoy }
54467d20418SNicholas Piggin #endif
545a7cd88daSGautham R. Shenoy 
546a7cd88daSGautham R. Shenoy /*
547bcef83a0SShreyas B. Prabhu  * Power ISA 3.0 idle initialization.
548bcef83a0SShreyas B. Prabhu  *
549bcef83a0SShreyas B. Prabhu  * POWER ISA 3.0 defines a new SPR Processor stop Status and Control
550bcef83a0SShreyas B. Prabhu  * Register (PSSCR) to control idle behavior.
551bcef83a0SShreyas B. Prabhu  *
552bcef83a0SShreyas B. Prabhu  * PSSCR layout:
553bcef83a0SShreyas B. Prabhu  * ----------------------------------------------------------
554bcef83a0SShreyas B. Prabhu  * | PLS | /// | SD | ESL | EC | PSLL | /// | TR | MTL | RL |
555bcef83a0SShreyas B. Prabhu  * ----------------------------------------------------------
556bcef83a0SShreyas B. Prabhu  * 0      4     41   42    43   44     48    54   56    60
557bcef83a0SShreyas B. Prabhu  *
558bcef83a0SShreyas B. Prabhu  * PSSCR key fields:
559bcef83a0SShreyas B. Prabhu  *	Bits 0:3  - Power-Saving Level Status (PLS). This field indicates the
560bcef83a0SShreyas B. Prabhu  *	lowest power-saving state the thread entered since stop instruction was
561bcef83a0SShreyas B. Prabhu  *	last executed.
562bcef83a0SShreyas B. Prabhu  *
563bcef83a0SShreyas B. Prabhu  *	Bit 41 - Status Disable(SD)
564bcef83a0SShreyas B. Prabhu  *	0 - Shows PLS entries
565bcef83a0SShreyas B. Prabhu  *	1 - PLS entries are all 0
566bcef83a0SShreyas B. Prabhu  *
567bcef83a0SShreyas B. Prabhu  *	Bit 42 - Enable State Loss
568bcef83a0SShreyas B. Prabhu  *	0 - No state is lost irrespective of other fields
569bcef83a0SShreyas B. Prabhu  *	1 - Allows state loss
570bcef83a0SShreyas B. Prabhu  *
571bcef83a0SShreyas B. Prabhu  *	Bit 43 - Exit Criterion
572bcef83a0SShreyas B. Prabhu  *	0 - Exit from power-save mode on any interrupt
573bcef83a0SShreyas B. Prabhu  *	1 - Exit from power-save mode controlled by LPCR's PECE bits
574bcef83a0SShreyas B. Prabhu  *
575bcef83a0SShreyas B. Prabhu  *	Bits 44:47 - Power-Saving Level Limit
576bcef83a0SShreyas B. Prabhu  *	This limits the power-saving level that can be entered into.
577bcef83a0SShreyas B. Prabhu  *
578bcef83a0SShreyas B. Prabhu  *	Bits 60:63 - Requested Level
579bcef83a0SShreyas B. Prabhu  *	Used to specify which power-saving level must be entered on executing
580bcef83a0SShreyas B. Prabhu  *	stop instruction
58109206b60SGautham R. Shenoy  */
58209206b60SGautham R. Shenoy 
58309206b60SGautham R. Shenoy int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags)
58409206b60SGautham R. Shenoy {
58509206b60SGautham R. Shenoy 	int err = 0;
58609206b60SGautham R. Shenoy 
58709206b60SGautham R. Shenoy 	/*
58809206b60SGautham R. Shenoy 	 * psscr_mask == 0xf indicates an older firmware.
58909206b60SGautham R. Shenoy 	 * Set remaining fields of psscr to the default values.
59009206b60SGautham R. Shenoy 	 * See NOTE above definition of PSSCR_HV_DEFAULT_VAL
59109206b60SGautham R. Shenoy 	 */
59209206b60SGautham R. Shenoy 	if (*psscr_mask == 0xf) {
59309206b60SGautham R. Shenoy 		*psscr_val = *psscr_val | PSSCR_HV_DEFAULT_VAL;
59409206b60SGautham R. Shenoy 		*psscr_mask = PSSCR_HV_DEFAULT_MASK;
59509206b60SGautham R. Shenoy 		return err;
59609206b60SGautham R. Shenoy 	}
59709206b60SGautham R. Shenoy 
59809206b60SGautham R. Shenoy 	/*
59909206b60SGautham R. Shenoy 	 * New firmware is expected to set the psscr_val bits correctly.
60009206b60SGautham R. Shenoy 	 * Validate that the following invariants are correctly maintained by
60109206b60SGautham R. Shenoy 	 * the new firmware.
60209206b60SGautham R. Shenoy 	 * - ESL bit value matches the EC bit value.
60309206b60SGautham R. Shenoy 	 * - ESL bit is set for all the deep stop states.
60409206b60SGautham R. Shenoy 	 */
60509206b60SGautham R. Shenoy 	if (GET_PSSCR_ESL(*psscr_val) != GET_PSSCR_EC(*psscr_val)) {
60609206b60SGautham R. Shenoy 		err = ERR_EC_ESL_MISMATCH;
60709206b60SGautham R. Shenoy 	} else if ((flags & OPAL_PM_LOSE_FULL_CONTEXT) &&
60809206b60SGautham R. Shenoy 		GET_PSSCR_ESL(*psscr_val) == 0) {
60909206b60SGautham R. Shenoy 		err = ERR_DEEP_STATE_ESL_MISMATCH;
61009206b60SGautham R. Shenoy 	}
61109206b60SGautham R. Shenoy 
61209206b60SGautham R. Shenoy 	return err;
61309206b60SGautham R. Shenoy }
61409206b60SGautham R. Shenoy 
61509206b60SGautham R. Shenoy /*
61609206b60SGautham R. Shenoy  * pnv_arch300_idle_init: Initializes the default idle state, first
61709206b60SGautham R. Shenoy  *                        deep idle state and deepest idle state on
61809206b60SGautham R. Shenoy  *                        ISA 3.0 CPUs.
619bcef83a0SShreyas B. Prabhu  *
620bcef83a0SShreyas B. Prabhu  * @np: /ibm,opal/power-mgt device node
621bcef83a0SShreyas B. Prabhu  * @flags: cpu-idle-state-flags array
622bcef83a0SShreyas B. Prabhu  * @dt_idle_states: Number of idle state entries
623bcef83a0SShreyas B. Prabhu  * Returns 0 on success
624bcef83a0SShreyas B. Prabhu  */
625dd34c74cSGautham R. Shenoy static int __init pnv_power9_idle_init(struct device_node *np, u32 *flags,
626bcef83a0SShreyas B. Prabhu 					int dt_idle_states)
627bcef83a0SShreyas B. Prabhu {
628bcef83a0SShreyas B. Prabhu 	u64 *psscr_val = NULL;
62909206b60SGautham R. Shenoy 	u64 *psscr_mask = NULL;
63009206b60SGautham R. Shenoy 	u32 *residency_ns = NULL;
63109206b60SGautham R. Shenoy 	u64 max_residency_ns = 0;
632bcef83a0SShreyas B. Prabhu 	int rc = 0, i;
633bcef83a0SShreyas B. Prabhu 
63409206b60SGautham R. Shenoy 	psscr_val = kcalloc(dt_idle_states, sizeof(*psscr_val), GFP_KERNEL);
63509206b60SGautham R. Shenoy 	psscr_mask = kcalloc(dt_idle_states, sizeof(*psscr_mask), GFP_KERNEL);
63609206b60SGautham R. Shenoy 	residency_ns = kcalloc(dt_idle_states, sizeof(*residency_ns),
637bcef83a0SShreyas B. Prabhu 			       GFP_KERNEL);
63809206b60SGautham R. Shenoy 
63909206b60SGautham R. Shenoy 	if (!psscr_val || !psscr_mask || !residency_ns) {
640bcef83a0SShreyas B. Prabhu 		rc = -1;
641bcef83a0SShreyas B. Prabhu 		goto out;
642bcef83a0SShreyas B. Prabhu 	}
64309206b60SGautham R. Shenoy 
644bcef83a0SShreyas B. Prabhu 	if (of_property_read_u64_array(np,
645bcef83a0SShreyas B. Prabhu 		"ibm,cpu-idle-state-psscr",
646bcef83a0SShreyas B. Prabhu 		psscr_val, dt_idle_states)) {
64709206b60SGautham R. Shenoy 		pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n");
64809206b60SGautham R. Shenoy 		rc = -1;
64909206b60SGautham R. Shenoy 		goto out;
65009206b60SGautham R. Shenoy 	}
65109206b60SGautham R. Shenoy 
65209206b60SGautham R. Shenoy 	if (of_property_read_u64_array(np,
65309206b60SGautham R. Shenoy 				       "ibm,cpu-idle-state-psscr-mask",
65409206b60SGautham R. Shenoy 				       psscr_mask, dt_idle_states)) {
65509206b60SGautham R. Shenoy 		pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr-mask in DT\n");
65609206b60SGautham R. Shenoy 		rc = -1;
65709206b60SGautham R. Shenoy 		goto out;
65809206b60SGautham R. Shenoy 	}
65909206b60SGautham R. Shenoy 
66009206b60SGautham R. Shenoy 	if (of_property_read_u32_array(np,
66109206b60SGautham R. Shenoy 				       "ibm,cpu-idle-state-residency-ns",
66209206b60SGautham R. Shenoy 					residency_ns, dt_idle_states)) {
66309206b60SGautham R. Shenoy 		pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-residency-ns in DT\n");
664bcef83a0SShreyas B. Prabhu 		rc = -1;
665bcef83a0SShreyas B. Prabhu 		goto out;
666bcef83a0SShreyas B. Prabhu 	}
667bcef83a0SShreyas B. Prabhu 
668bcef83a0SShreyas B. Prabhu 	/*
66909206b60SGautham R. Shenoy 	 * Set pnv_first_deep_stop_state, pnv_deepest_stop_psscr_{val,mask},
67009206b60SGautham R. Shenoy 	 * and the pnv_default_stop_{val,mask}.
67109206b60SGautham R. Shenoy 	 *
672c0691f9dSShreyas B. Prabhu 	 * pnv_first_deep_stop_state should be set to the first stop
673c0691f9dSShreyas B. Prabhu 	 * level to cause hypervisor state loss.
67409206b60SGautham R. Shenoy 	 *
67509206b60SGautham R. Shenoy 	 * pnv_deepest_stop_{val,mask} should be set to values corresponding to
67609206b60SGautham R. Shenoy 	 * the deepest stop state.
67709206b60SGautham R. Shenoy 	 *
67809206b60SGautham R. Shenoy 	 * pnv_default_stop_{val,mask} should be set to values corresponding to
67909206b60SGautham R. Shenoy 	 * the shallowest (OPAL_PM_STOP_INST_FAST) loss-less stop state.
680bcef83a0SShreyas B. Prabhu 	 */
681bcef83a0SShreyas B. Prabhu 	pnv_first_deep_stop_state = MAX_STOP_STATE;
682bcef83a0SShreyas B. Prabhu 	for (i = 0; i < dt_idle_states; i++) {
68309206b60SGautham R. Shenoy 		int err;
684bcef83a0SShreyas B. Prabhu 		u64 psscr_rl = psscr_val[i] & PSSCR_RL_MASK;
685bcef83a0SShreyas B. Prabhu 
686bcef83a0SShreyas B. Prabhu 		if ((flags[i] & OPAL_PM_LOSE_FULL_CONTEXT) &&
687bcef83a0SShreyas B. Prabhu 		     (pnv_first_deep_stop_state > psscr_rl))
688bcef83a0SShreyas B. Prabhu 			pnv_first_deep_stop_state = psscr_rl;
689c0691f9dSShreyas B. Prabhu 
69009206b60SGautham R. Shenoy 		err = validate_psscr_val_mask(&psscr_val[i], &psscr_mask[i],
69109206b60SGautham R. Shenoy 					      flags[i]);
69209206b60SGautham R. Shenoy 		if (err) {
69309206b60SGautham R. Shenoy 			report_invalid_psscr_val(psscr_val[i], err);
69409206b60SGautham R. Shenoy 			continue;
69509206b60SGautham R. Shenoy 		}
69609206b60SGautham R. Shenoy 
69709206b60SGautham R. Shenoy 		if (max_residency_ns < residency_ns[i]) {
69809206b60SGautham R. Shenoy 			max_residency_ns = residency_ns[i];
69909206b60SGautham R. Shenoy 			pnv_deepest_stop_psscr_val = psscr_val[i];
70009206b60SGautham R. Shenoy 			pnv_deepest_stop_psscr_mask = psscr_mask[i];
701785a12afSGautham R. Shenoy 			pnv_deepest_stop_flag = flags[i];
70209206b60SGautham R. Shenoy 			deepest_stop_found = true;
70309206b60SGautham R. Shenoy 		}
70409206b60SGautham R. Shenoy 
70509206b60SGautham R. Shenoy 		if (!default_stop_found &&
70609206b60SGautham R. Shenoy 		    (flags[i] & OPAL_PM_STOP_INST_FAST)) {
70709206b60SGautham R. Shenoy 			pnv_default_stop_val = psscr_val[i];
70809206b60SGautham R. Shenoy 			pnv_default_stop_mask = psscr_mask[i];
70909206b60SGautham R. Shenoy 			default_stop_found = true;
71009206b60SGautham R. Shenoy 		}
71109206b60SGautham R. Shenoy 	}
71209206b60SGautham R. Shenoy 
713f3b3f284SGautham R. Shenoy 	if (unlikely(!default_stop_found)) {
714f3b3f284SGautham R. Shenoy 		pr_warn("cpuidle-powernv: No suitable default stop state found. Disabling platform idle.\n");
715f3b3f284SGautham R. Shenoy 	} else {
716f3b3f284SGautham R. Shenoy 		ppc_md.power_save = power9_idle;
717f3b3f284SGautham R. Shenoy 		pr_info("cpuidle-powernv: Default stop: psscr = 0x%016llx,mask=0x%016llx\n",
71809206b60SGautham R. Shenoy 			pnv_default_stop_val, pnv_default_stop_mask);
71909206b60SGautham R. Shenoy 	}
72009206b60SGautham R. Shenoy 
721f3b3f284SGautham R. Shenoy 	if (unlikely(!deepest_stop_found)) {
722f3b3f284SGautham R. Shenoy 		pr_warn("cpuidle-powernv: No suitable stop state for CPU-Hotplug. Offlined CPUs will busy wait");
723f3b3f284SGautham R. Shenoy 	} else {
724f3b3f284SGautham R. Shenoy 		pr_info("cpuidle-powernv: Deepest stop: psscr = 0x%016llx,mask=0x%016llx\n",
72509206b60SGautham R. Shenoy 			pnv_deepest_stop_psscr_val,
72609206b60SGautham R. Shenoy 			pnv_deepest_stop_psscr_mask);
727bcef83a0SShreyas B. Prabhu 	}
728bcef83a0SShreyas B. Prabhu 
729f3b3f284SGautham R. Shenoy 	pr_info("cpuidle-powernv: Requested Level (RL) value of first deep stop = 0x%llx\n",
730f3b3f284SGautham R. Shenoy 		pnv_first_deep_stop_state);
731bcef83a0SShreyas B. Prabhu out:
732bcef83a0SShreyas B. Prabhu 	kfree(psscr_val);
73309206b60SGautham R. Shenoy 	kfree(psscr_mask);
73409206b60SGautham R. Shenoy 	kfree(residency_ns);
735bcef83a0SShreyas B. Prabhu 	return rc;
736bcef83a0SShreyas B. Prabhu }
737bcef83a0SShreyas B. Prabhu 
738bcef83a0SShreyas B. Prabhu /*
739bcef83a0SShreyas B. Prabhu  * Probe device tree for supported idle states
740bcef83a0SShreyas B. Prabhu  */
741bcef83a0SShreyas B. Prabhu static void __init pnv_probe_idle_states(void)
742bcef83a0SShreyas B. Prabhu {
743bcef83a0SShreyas B. Prabhu 	struct device_node *np;
744d405a98cSShreyas B. Prabhu 	int dt_idle_states;
745bcef83a0SShreyas B. Prabhu 	u32 *flags = NULL;
746d405a98cSShreyas B. Prabhu 	int i;
747d405a98cSShreyas B. Prabhu 
748bcef83a0SShreyas B. Prabhu 	np = of_find_node_by_path("/ibm,opal/power-mgt");
749bcef83a0SShreyas B. Prabhu 	if (!np) {
750d405a98cSShreyas B. Prabhu 		pr_warn("opal: PowerMgmt Node not found\n");
751d405a98cSShreyas B. Prabhu 		goto out;
752d405a98cSShreyas B. Prabhu 	}
753bcef83a0SShreyas B. Prabhu 	dt_idle_states = of_property_count_u32_elems(np,
754d405a98cSShreyas B. Prabhu 			"ibm,cpu-idle-state-flags");
755d405a98cSShreyas B. Prabhu 	if (dt_idle_states < 0) {
756d405a98cSShreyas B. Prabhu 		pr_warn("cpuidle-powernv: no idle states found in the DT\n");
757d405a98cSShreyas B. Prabhu 		goto out;
758d405a98cSShreyas B. Prabhu 	}
759d405a98cSShreyas B. Prabhu 
760bcef83a0SShreyas B. Prabhu 	flags = kcalloc(dt_idle_states, sizeof(*flags),  GFP_KERNEL);
761bcef83a0SShreyas B. Prabhu 
762bcef83a0SShreyas B. Prabhu 	if (of_property_read_u32_array(np,
763d405a98cSShreyas B. Prabhu 			"ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
764d405a98cSShreyas B. Prabhu 		pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n");
765bcef83a0SShreyas B. Prabhu 		goto out;
766bcef83a0SShreyas B. Prabhu 	}
767bcef83a0SShreyas B. Prabhu 
768bcef83a0SShreyas B. Prabhu 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
769dd34c74cSGautham R. Shenoy 		if (pnv_power9_idle_init(np, flags, dt_idle_states))
770bcef83a0SShreyas B. Prabhu 			goto out;
771d405a98cSShreyas B. Prabhu 	}
772d405a98cSShreyas B. Prabhu 
773d405a98cSShreyas B. Prabhu 	for (i = 0; i < dt_idle_states; i++)
774d405a98cSShreyas B. Prabhu 		supported_cpuidle_states |= flags[i];
775d405a98cSShreyas B. Prabhu 
776bcef83a0SShreyas B. Prabhu out:
777bcef83a0SShreyas B. Prabhu 	kfree(flags);
778bcef83a0SShreyas B. Prabhu }
779bcef83a0SShreyas B. Prabhu static int __init pnv_init_idle_states(void)
780bcef83a0SShreyas B. Prabhu {
781bcef83a0SShreyas B. Prabhu 
782bcef83a0SShreyas B. Prabhu 	supported_cpuidle_states = 0;
783bcef83a0SShreyas B. Prabhu 
784bcef83a0SShreyas B. Prabhu 	if (cpuidle_disable != IDLE_NO_OVERRIDE)
785bcef83a0SShreyas B. Prabhu 		goto out;
786bcef83a0SShreyas B. Prabhu 
787bcef83a0SShreyas B. Prabhu 	pnv_probe_idle_states();
788bcef83a0SShreyas B. Prabhu 
789d405a98cSShreyas B. Prabhu 	if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
790d405a98cSShreyas B. Prabhu 		patch_instruction(
791d405a98cSShreyas B. Prabhu 			(unsigned int *)pnv_fastsleep_workaround_at_entry,
792d405a98cSShreyas B. Prabhu 			PPC_INST_NOP);
793d405a98cSShreyas B. Prabhu 		patch_instruction(
794d405a98cSShreyas B. Prabhu 			(unsigned int *)pnv_fastsleep_workaround_at_exit,
795d405a98cSShreyas B. Prabhu 			PPC_INST_NOP);
7965703d2f4SShreyas B. Prabhu 	} else {
7975703d2f4SShreyas B. Prabhu 		/*
7985703d2f4SShreyas B. Prabhu 		 * OPAL_PM_SLEEP_ENABLED_ER1 is set. It indicates that
7995703d2f4SShreyas B. Prabhu 		 * workaround is needed to use fastsleep. Provide sysfs
8005703d2f4SShreyas B. Prabhu 		 * control to choose how this workaround has to be applied.
8015703d2f4SShreyas B. Prabhu 		 */
8025703d2f4SShreyas B. Prabhu 		device_create_file(cpu_subsys.dev_root,
8035703d2f4SShreyas B. Prabhu 				&dev_attr_fastsleep_workaround_applyonce);
804d405a98cSShreyas B. Prabhu 	}
8055703d2f4SShreyas B. Prabhu 
806d405a98cSShreyas B. Prabhu 	pnv_alloc_idle_core_states();
8075593e303SShreyas B. Prabhu 
80817ed4c8fSGautham R. Shenoy 	/*
80917ed4c8fSGautham R. Shenoy 	 * For each CPU, record its PACA address in each of it's
81017ed4c8fSGautham R. Shenoy 	 * sibling thread's PACA at the slot corresponding to this
81117ed4c8fSGautham R. Shenoy 	 * CPU's index in the core.
81217ed4c8fSGautham R. Shenoy 	 */
81317ed4c8fSGautham R. Shenoy 	if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
81417ed4c8fSGautham R. Shenoy 		int cpu;
81517ed4c8fSGautham R. Shenoy 
81617ed4c8fSGautham R. Shenoy 		pr_info("powernv: idle: Saving PACA pointers of all CPUs in their thread sibling PACA\n");
817*ac9816dcSAkshay Adiga 		for_each_present_cpu(cpu) {
81817ed4c8fSGautham R. Shenoy 			int base_cpu = cpu_first_thread_sibling(cpu);
81917ed4c8fSGautham R. Shenoy 			int idx = cpu_thread_in_core(cpu);
82017ed4c8fSGautham R. Shenoy 			int i;
82117ed4c8fSGautham R. Shenoy 
82217ed4c8fSGautham R. Shenoy 			for (i = 0; i < threads_per_core; i++) {
82317ed4c8fSGautham R. Shenoy 				int j = base_cpu + i;
82417ed4c8fSGautham R. Shenoy 
825d2e60075SNicholas Piggin 				paca_ptrs[j]->thread_sibling_pacas[idx] =
826d2e60075SNicholas Piggin 					paca_ptrs[cpu];
82717ed4c8fSGautham R. Shenoy 			}
82817ed4c8fSGautham R. Shenoy 		}
82917ed4c8fSGautham R. Shenoy 	}
83017ed4c8fSGautham R. Shenoy 
8315593e303SShreyas B. Prabhu 	if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED)
8325593e303SShreyas B. Prabhu 		ppc_md.power_save = power7_idle;
833bcef83a0SShreyas B. Prabhu 
834d405a98cSShreyas B. Prabhu out:
835d405a98cSShreyas B. Prabhu 	return 0;
836d405a98cSShreyas B. Prabhu }
8374bece972SMichael Ellerman machine_subsys_initcall(powernv, pnv_init_idle_states);
838