xref: /linux/drivers/cpuidle/cpuidle-psci.c (revision d90d90a1978af6530c7d8b201c4ab117d0506b1a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * PSCI CPU idle driver.
4  *
5  * Copyright (C) 2019 ARM Ltd.
6  * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
7  */
8 
9 #define pr_fmt(fmt) "CPUidle PSCI: " fmt
10 
11 #include <linux/cpuhotplug.h>
12 #include <linux/cpu_cooling.h>
13 #include <linux/cpuidle.h>
14 #include <linux/cpumask.h>
15 #include <linux/cpu_pm.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/of_device.h>
20 #include <linux/platform_device.h>
21 #include <linux/psci.h>
22 #include <linux/pm_domain.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/slab.h>
25 #include <linux/string.h>
26 #include <linux/syscore_ops.h>
27 
28 #include <asm/cpuidle.h>
29 
30 #include "cpuidle-psci.h"
31 #include "dt_idle_states.h"
32 
33 struct psci_cpuidle_data {
34 	u32 *psci_states;
35 	struct device *dev;
36 };
37 
38 static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data);
39 static DEFINE_PER_CPU(u32, domain_state);
40 static bool psci_cpuidle_use_cpuhp;
41 
42 void psci_set_domain_state(u32 state)
43 {
44 	__this_cpu_write(domain_state, state);
45 }
46 
47 static inline u32 psci_get_domain_state(void)
48 {
49 	return __this_cpu_read(domain_state);
50 }
51 
52 static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
53 						    struct cpuidle_driver *drv, int idx,
54 						    bool s2idle)
55 {
56 	struct psci_cpuidle_data *data = this_cpu_ptr(&psci_cpuidle_data);
57 	u32 *states = data->psci_states;
58 	struct device *pd_dev = data->dev;
59 	u32 state;
60 	int ret;
61 
62 	ret = cpu_pm_enter();
63 	if (ret)
64 		return -1;
65 
66 	/* Do runtime PM to manage a hierarchical CPU toplogy. */
67 	if (s2idle)
68 		dev_pm_genpd_suspend(pd_dev);
69 	else
70 		pm_runtime_put_sync_suspend(pd_dev);
71 
72 	state = psci_get_domain_state();
73 	if (!state)
74 		state = states[idx];
75 
76 	ret = psci_cpu_suspend_enter(state) ? -1 : idx;
77 
78 	if (s2idle)
79 		dev_pm_genpd_resume(pd_dev);
80 	else
81 		pm_runtime_get_sync(pd_dev);
82 
83 	cpu_pm_exit();
84 
85 	/* Clear the domain state to start fresh when back from idle. */
86 	psci_set_domain_state(0);
87 	return ret;
88 }
89 
90 static int psci_enter_domain_idle_state(struct cpuidle_device *dev,
91 					struct cpuidle_driver *drv, int idx)
92 {
93 	return __psci_enter_domain_idle_state(dev, drv, idx, false);
94 }
95 
96 static int psci_enter_s2idle_domain_idle_state(struct cpuidle_device *dev,
97 					       struct cpuidle_driver *drv,
98 					       int idx)
99 {
100 	return __psci_enter_domain_idle_state(dev, drv, idx, true);
101 }
102 
103 static int psci_idle_cpuhp_up(unsigned int cpu)
104 {
105 	struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
106 
107 	if (pd_dev)
108 		pm_runtime_get_sync(pd_dev);
109 
110 	return 0;
111 }
112 
113 static int psci_idle_cpuhp_down(unsigned int cpu)
114 {
115 	struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
116 
117 	if (pd_dev) {
118 		pm_runtime_put_sync(pd_dev);
119 		/* Clear domain state to start fresh at next online. */
120 		psci_set_domain_state(0);
121 	}
122 
123 	return 0;
124 }
125 
126 static void psci_idle_syscore_switch(bool suspend)
127 {
128 	bool cleared = false;
129 	struct device *dev;
130 	int cpu;
131 
132 	for_each_possible_cpu(cpu) {
133 		dev = per_cpu_ptr(&psci_cpuidle_data, cpu)->dev;
134 
135 		if (dev && suspend) {
136 			dev_pm_genpd_suspend(dev);
137 		} else if (dev) {
138 			dev_pm_genpd_resume(dev);
139 
140 			/* Account for userspace having offlined a CPU. */
141 			if (pm_runtime_status_suspended(dev))
142 				pm_runtime_set_active(dev);
143 
144 			/* Clear domain state to re-start fresh. */
145 			if (!cleared) {
146 				psci_set_domain_state(0);
147 				cleared = true;
148 			}
149 		}
150 	}
151 }
152 
153 static int psci_idle_syscore_suspend(void)
154 {
155 	psci_idle_syscore_switch(true);
156 	return 0;
157 }
158 
159 static void psci_idle_syscore_resume(void)
160 {
161 	psci_idle_syscore_switch(false);
162 }
163 
164 static struct syscore_ops psci_idle_syscore_ops = {
165 	.suspend = psci_idle_syscore_suspend,
166 	.resume = psci_idle_syscore_resume,
167 };
168 
169 static void psci_idle_init_cpuhp(void)
170 {
171 	int err;
172 
173 	if (!psci_cpuidle_use_cpuhp)
174 		return;
175 
176 	register_syscore_ops(&psci_idle_syscore_ops);
177 
178 	err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
179 					"cpuidle/psci:online",
180 					psci_idle_cpuhp_up,
181 					psci_idle_cpuhp_down);
182 	if (err)
183 		pr_warn("Failed %d while setup cpuhp state\n", err);
184 }
185 
186 static __cpuidle int psci_enter_idle_state(struct cpuidle_device *dev,
187 					   struct cpuidle_driver *drv, int idx)
188 {
189 	u32 *state = __this_cpu_read(psci_cpuidle_data.psci_states);
190 
191 	return CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(psci_cpu_suspend_enter, idx, state[idx]);
192 }
193 
194 static const struct of_device_id psci_idle_state_match[] = {
195 	{ .compatible = "arm,idle-state",
196 	  .data = psci_enter_idle_state },
197 	{ },
198 };
199 
200 int psci_dt_parse_state_node(struct device_node *np, u32 *state)
201 {
202 	int err = of_property_read_u32(np, "arm,psci-suspend-param", state);
203 
204 	if (err) {
205 		pr_warn("%pOF missing arm,psci-suspend-param property\n", np);
206 		return err;
207 	}
208 
209 	if (!psci_power_state_is_valid(*state)) {
210 		pr_warn("Invalid PSCI power state %#x\n", *state);
211 		return -EINVAL;
212 	}
213 
214 	return 0;
215 }
216 
217 static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
218 				     struct psci_cpuidle_data *data,
219 				     unsigned int state_count, int cpu)
220 {
221 	/* Currently limit the hierarchical topology to be used in OSI mode. */
222 	if (!psci_has_osi_support())
223 		return 0;
224 
225 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
226 		return 0;
227 
228 	data->dev = psci_dt_attach_cpu(cpu);
229 	if (IS_ERR_OR_NULL(data->dev))
230 		return PTR_ERR_OR_ZERO(data->dev);
231 
232 	/*
233 	 * Using the deepest state for the CPU to trigger a potential selection
234 	 * of a shared state for the domain, assumes the domain states are all
235 	 * deeper states.
236 	 */
237 	drv->states[state_count - 1].flags |= CPUIDLE_FLAG_RCU_IDLE;
238 	drv->states[state_count - 1].enter = psci_enter_domain_idle_state;
239 	drv->states[state_count - 1].enter_s2idle = psci_enter_s2idle_domain_idle_state;
240 	psci_cpuidle_use_cpuhp = true;
241 
242 	return 0;
243 }
244 
245 static int psci_dt_cpu_init_idle(struct device *dev, struct cpuidle_driver *drv,
246 				 struct device_node *cpu_node,
247 				 unsigned int state_count, int cpu)
248 {
249 	int i, ret = 0;
250 	u32 *psci_states;
251 	struct device_node *state_node;
252 	struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu);
253 
254 	state_count++; /* Add WFI state too */
255 	psci_states = devm_kcalloc(dev, state_count, sizeof(*psci_states),
256 				   GFP_KERNEL);
257 	if (!psci_states)
258 		return -ENOMEM;
259 
260 	for (i = 1; i < state_count; i++) {
261 		state_node = of_get_cpu_state_node(cpu_node, i - 1);
262 		if (!state_node)
263 			break;
264 
265 		ret = psci_dt_parse_state_node(state_node, &psci_states[i]);
266 		of_node_put(state_node);
267 
268 		if (ret)
269 			return ret;
270 
271 		pr_debug("psci-power-state %#x index %d\n", psci_states[i], i);
272 	}
273 
274 	if (i != state_count)
275 		return -ENODEV;
276 
277 	/* Initialize optional data, used for the hierarchical topology. */
278 	ret = psci_dt_cpu_init_topology(drv, data, state_count, cpu);
279 	if (ret < 0)
280 		return ret;
281 
282 	/* Idle states parsed correctly, store them in the per-cpu struct. */
283 	data->psci_states = psci_states;
284 	return 0;
285 }
286 
287 static int psci_cpu_init_idle(struct device *dev, struct cpuidle_driver *drv,
288 			      unsigned int cpu, unsigned int state_count)
289 {
290 	struct device_node *cpu_node;
291 	int ret;
292 
293 	/*
294 	 * If the PSCI cpu_suspend function hook has not been initialized
295 	 * idle states must not be enabled, so bail out
296 	 */
297 	if (!psci_ops.cpu_suspend)
298 		return -EOPNOTSUPP;
299 
300 	cpu_node = of_cpu_device_node_get(cpu);
301 	if (!cpu_node)
302 		return -ENODEV;
303 
304 	ret = psci_dt_cpu_init_idle(dev, drv, cpu_node, state_count, cpu);
305 
306 	of_node_put(cpu_node);
307 
308 	return ret;
309 }
310 
311 static void psci_cpu_deinit_idle(int cpu)
312 {
313 	struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu);
314 
315 	psci_dt_detach_cpu(data->dev);
316 	psci_cpuidle_use_cpuhp = false;
317 }
318 
319 static int psci_idle_init_cpu(struct device *dev, int cpu)
320 {
321 	struct cpuidle_driver *drv;
322 	struct device_node *cpu_node;
323 	const char *enable_method;
324 	int ret = 0;
325 
326 	cpu_node = of_cpu_device_node_get(cpu);
327 	if (!cpu_node)
328 		return -ENODEV;
329 
330 	/*
331 	 * Check whether the enable-method for the cpu is PSCI, fail
332 	 * if it is not.
333 	 */
334 	enable_method = of_get_property(cpu_node, "enable-method", NULL);
335 	if (!enable_method || (strcmp(enable_method, "psci")))
336 		ret = -ENODEV;
337 
338 	of_node_put(cpu_node);
339 	if (ret)
340 		return ret;
341 
342 	drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
343 	if (!drv)
344 		return -ENOMEM;
345 
346 	drv->name = "psci_idle";
347 	drv->owner = THIS_MODULE;
348 	drv->cpumask = (struct cpumask *)cpumask_of(cpu);
349 
350 	/*
351 	 * PSCI idle states relies on architectural WFI to be represented as
352 	 * state index 0.
353 	 */
354 	drv->states[0].enter = psci_enter_idle_state;
355 	drv->states[0].exit_latency = 1;
356 	drv->states[0].target_residency = 1;
357 	drv->states[0].power_usage = UINT_MAX;
358 	strcpy(drv->states[0].name, "WFI");
359 	strcpy(drv->states[0].desc, "ARM WFI");
360 
361 	/*
362 	 * If no DT idle states are detected (ret == 0) let the driver
363 	 * initialization fail accordingly since there is no reason to
364 	 * initialize the idle driver if only wfi is supported, the
365 	 * default archictectural back-end already executes wfi
366 	 * on idle entry.
367 	 */
368 	ret = dt_init_idle_driver(drv, psci_idle_state_match, 1);
369 	if (ret <= 0)
370 		return ret ? : -ENODEV;
371 
372 	/*
373 	 * Initialize PSCI idle states.
374 	 */
375 	ret = psci_cpu_init_idle(dev, drv, cpu, ret);
376 	if (ret) {
377 		pr_err("CPU %d failed to PSCI idle\n", cpu);
378 		return ret;
379 	}
380 
381 	ret = cpuidle_register(drv, NULL);
382 	if (ret)
383 		goto deinit;
384 
385 	cpuidle_cooling_register(drv);
386 
387 	return 0;
388 deinit:
389 	psci_cpu_deinit_idle(cpu);
390 	return ret;
391 }
392 
393 /*
394  * psci_idle_probe - Initializes PSCI cpuidle driver
395  *
396  * Initializes PSCI cpuidle driver for all CPUs, if any CPU fails
397  * to register cpuidle driver then rollback to cancel all CPUs
398  * registration.
399  */
400 static int psci_cpuidle_probe(struct platform_device *pdev)
401 {
402 	int cpu, ret;
403 	struct cpuidle_driver *drv;
404 	struct cpuidle_device *dev;
405 
406 	for_each_possible_cpu(cpu) {
407 		ret = psci_idle_init_cpu(&pdev->dev, cpu);
408 		if (ret)
409 			goto out_fail;
410 	}
411 
412 	psci_idle_init_cpuhp();
413 	return 0;
414 
415 out_fail:
416 	while (--cpu >= 0) {
417 		dev = per_cpu(cpuidle_devices, cpu);
418 		drv = cpuidle_get_cpu_driver(dev);
419 		cpuidle_unregister(drv);
420 		psci_cpu_deinit_idle(cpu);
421 	}
422 
423 	return ret;
424 }
425 
426 static struct platform_driver psci_cpuidle_driver = {
427 	.probe = psci_cpuidle_probe,
428 	.driver = {
429 		.name = "psci-cpuidle",
430 	},
431 };
432 
433 static int __init psci_idle_init(void)
434 {
435 	struct platform_device *pdev;
436 	int ret;
437 
438 	ret = platform_driver_register(&psci_cpuidle_driver);
439 	if (ret)
440 		return ret;
441 
442 	pdev = platform_device_register_simple("psci-cpuidle", -1, NULL, 0);
443 	if (IS_ERR(pdev)) {
444 		platform_driver_unregister(&psci_cpuidle_driver);
445 		return PTR_ERR(pdev);
446 	}
447 
448 	return 0;
449 }
450 device_initcall(psci_idle_init);
451