cpuidle-psci.c (9b031c86506cef9acae45e61339fcf9deaabb793) cpuidle-psci.c (166bf83529c47ddcd4854ff2047f4f980dc492c8)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * PSCI CPU idle driver.
4 *
5 * Copyright (C) 2019 ARM Ltd.
6 * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
7 */
8
9#define pr_fmt(fmt) "CPUidle PSCI: " fmt
10
11#include <linux/cpuhotplug.h>
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * PSCI CPU idle driver.
4 *
5 * Copyright (C) 2019 ARM Ltd.
6 * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
7 */
8
9#define pr_fmt(fmt) "CPUidle PSCI: " fmt
10
11#include <linux/cpuhotplug.h>
12#include <linux/cpu_cooling.h>
12#include <linux/cpuidle.h>
13#include <linux/cpumask.h>
14#include <linux/cpu_pm.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/of.h>
18#include <linux/of_device.h>
13#include <linux/cpuidle.h>
14#include <linux/cpumask.h>
15#include <linux/cpu_pm.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/of_device.h>
20#include <linux/platform_device.h>
19#include <linux/psci.h>
20#include <linux/pm_runtime.h>
21#include <linux/slab.h>
21#include <linux/psci.h>
22#include <linux/pm_runtime.h>
23#include <linux/slab.h>
24#include <linux/string.h>
22
23#include <asm/cpuidle.h>
24
25#include "cpuidle-psci.h"
26#include "dt_idle_states.h"
27
28struct psci_cpuidle_data {
29 u32 *psci_states;
30 struct device *dev;
31};
32
33static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data);
34static DEFINE_PER_CPU(u32, domain_state);
25
26#include <asm/cpuidle.h>
27
28#include "cpuidle-psci.h"
29#include "dt_idle_states.h"
30
31struct psci_cpuidle_data {
32 u32 *psci_states;
33 struct device *dev;
34};
35
36static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data);
37static DEFINE_PER_CPU(u32, domain_state);
35static bool psci_cpuidle_use_cpuhp __initdata;
38static bool psci_cpuidle_use_cpuhp;
36
37void psci_set_domain_state(u32 state)
38{
39 __this_cpu_write(domain_state, state);
40}
41
42static inline u32 psci_get_domain_state(void)
43{

--- 9 unchanged lines hidden (view full) ---

53 struct cpuidle_driver *drv, int idx)
54{
55 struct psci_cpuidle_data *data = this_cpu_ptr(&psci_cpuidle_data);
56 u32 *states = data->psci_states;
57 struct device *pd_dev = data->dev;
58 u32 state;
59 int ret;
60
39
40void psci_set_domain_state(u32 state)
41{
42 __this_cpu_write(domain_state, state);
43}
44
45static inline u32 psci_get_domain_state(void)
46{

--- 9 unchanged lines hidden (view full) ---

56 struct cpuidle_driver *drv, int idx)
57{
58 struct psci_cpuidle_data *data = this_cpu_ptr(&psci_cpuidle_data);
59 u32 *states = data->psci_states;
60 struct device *pd_dev = data->dev;
61 u32 state;
62 int ret;
63
64 ret = cpu_pm_enter();
65 if (ret)
66 return -1;
67
61 /* Do runtime PM to manage a hierarchical CPU toplogy. */
62 pm_runtime_put_sync_suspend(pd_dev);
63
64 state = psci_get_domain_state();
65 if (!state)
66 state = states[idx];
67
68 /* Do runtime PM to manage a hierarchical CPU toplogy. */
69 pm_runtime_put_sync_suspend(pd_dev);
70
71 state = psci_get_domain_state();
72 if (!state)
73 state = states[idx];
74
68 ret = psci_enter_state(idx, state);
75 ret = psci_cpu_suspend_enter(state) ? -1 : idx;
69
70 pm_runtime_get_sync(pd_dev);
71
76
77 pm_runtime_get_sync(pd_dev);
78
79 cpu_pm_exit();
80
72 /* Clear the domain state to start fresh when back from idle. */
73 psci_set_domain_state(0);
74 return ret;
75}
76
77static int psci_idle_cpuhp_up(unsigned int cpu)
78{
79 struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);

--- 12 unchanged lines hidden (view full) ---

92 pm_runtime_put_sync(pd_dev);
93 /* Clear domain state to start fresh at next online. */
94 psci_set_domain_state(0);
95 }
96
97 return 0;
98}
99
81 /* Clear the domain state to start fresh when back from idle. */
82 psci_set_domain_state(0);
83 return ret;
84}
85
86static int psci_idle_cpuhp_up(unsigned int cpu)
87{
88 struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);

--- 12 unchanged lines hidden (view full) ---

101 pm_runtime_put_sync(pd_dev);
102 /* Clear domain state to start fresh at next online. */
103 psci_set_domain_state(0);
104 }
105
106 return 0;
107}
108
100static void __init psci_idle_init_cpuhp(void)
109static void psci_idle_init_cpuhp(void)
101{
102 int err;
103
104 if (!psci_cpuidle_use_cpuhp)
105 return;
106
107 err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
108 "cpuidle/psci:online",

--- 6 unchanged lines hidden (view full) ---

115static int psci_enter_idle_state(struct cpuidle_device *dev,
116 struct cpuidle_driver *drv, int idx)
117{
118 u32 *state = __this_cpu_read(psci_cpuidle_data.psci_states);
119
120 return psci_enter_state(idx, state[idx]);
121}
122
110{
111 int err;
112
113 if (!psci_cpuidle_use_cpuhp)
114 return;
115
116 err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
117 "cpuidle/psci:online",

--- 6 unchanged lines hidden (view full) ---

124static int psci_enter_idle_state(struct cpuidle_device *dev,
125 struct cpuidle_driver *drv, int idx)
126{
127 u32 *state = __this_cpu_read(psci_cpuidle_data.psci_states);
128
129 return psci_enter_state(idx, state[idx]);
130}
131
123static struct cpuidle_driver psci_idle_driver __initdata = {
124 .name = "psci_idle",
125 .owner = THIS_MODULE,
126 /*
127 * PSCI idle states relies on architectural WFI to
128 * be represented as state index 0.
129 */
130 .states[0] = {
131 .enter = psci_enter_idle_state,
132 .exit_latency = 1,
133 .target_residency = 1,
134 .power_usage = UINT_MAX,
135 .name = "WFI",
136 .desc = "ARM WFI",
137 }
138};
139
140static const struct of_device_id psci_idle_state_match[] __initconst = {
132static const struct of_device_id psci_idle_state_match[] = {
141 { .compatible = "arm,idle-state",
142 .data = psci_enter_idle_state },
143 { },
144};
145
133 { .compatible = "arm,idle-state",
134 .data = psci_enter_idle_state },
135 { },
136};
137
146int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
138int psci_dt_parse_state_node(struct device_node *np, u32 *state)
147{
148 int err = of_property_read_u32(np, "arm,psci-suspend-param", state);
149
150 if (err) {
151 pr_warn("%pOF missing arm,psci-suspend-param property\n", np);
152 return err;
153 }
154
155 if (!psci_power_state_is_valid(*state)) {
156 pr_warn("Invalid PSCI power state %#x\n", *state);
157 return -EINVAL;
158 }
159
160 return 0;
161}
162
139{
140 int err = of_property_read_u32(np, "arm,psci-suspend-param", state);
141
142 if (err) {
143 pr_warn("%pOF missing arm,psci-suspend-param property\n", np);
144 return err;
145 }
146
147 if (!psci_power_state_is_valid(*state)) {
148 pr_warn("Invalid PSCI power state %#x\n", *state);
149 return -EINVAL;
150 }
151
152 return 0;
153}
154
163static int __init psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
164 struct psci_cpuidle_data *data,
165 unsigned int state_count, int cpu)
155static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
156 struct psci_cpuidle_data *data,
157 unsigned int state_count, int cpu)
166{
167 /* Currently limit the hierarchical topology to be used in OSI mode. */
168 if (!psci_has_osi_support())
169 return 0;
170
171 data->dev = psci_dt_attach_cpu(cpu);
172 if (IS_ERR_OR_NULL(data->dev))
173 return PTR_ERR_OR_ZERO(data->dev);

--- 4 unchanged lines hidden (view full) ---

178 * deeper states.
179 */
180 drv->states[state_count - 1].enter = psci_enter_domain_idle_state;
181 psci_cpuidle_use_cpuhp = true;
182
183 return 0;
184}
185
158{
159 /* Currently limit the hierarchical topology to be used in OSI mode. */
160 if (!psci_has_osi_support())
161 return 0;
162
163 data->dev = psci_dt_attach_cpu(cpu);
164 if (IS_ERR_OR_NULL(data->dev))
165 return PTR_ERR_OR_ZERO(data->dev);

--- 4 unchanged lines hidden (view full) ---

170 * deeper states.
171 */
172 drv->states[state_count - 1].enter = psci_enter_domain_idle_state;
173 psci_cpuidle_use_cpuhp = true;
174
175 return 0;
176}
177
186static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv,
187 struct device_node *cpu_node,
188 unsigned int state_count, int cpu)
178static int psci_dt_cpu_init_idle(struct device *dev, struct cpuidle_driver *drv,
179 struct device_node *cpu_node,
180 unsigned int state_count, int cpu)
189{
190 int i, ret = 0;
191 u32 *psci_states;
192 struct device_node *state_node;
193 struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu);
194
195 state_count++; /* Add WFI state too */
181{
182 int i, ret = 0;
183 u32 *psci_states;
184 struct device_node *state_node;
185 struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu);
186
187 state_count++; /* Add WFI state too */
196 psci_states = kcalloc(state_count, sizeof(*psci_states), GFP_KERNEL);
188 psci_states = devm_kcalloc(dev, state_count, sizeof(*psci_states),
189 GFP_KERNEL);
197 if (!psci_states)
198 return -ENOMEM;
199
200 for (i = 1; i < state_count; i++) {
201 state_node = of_get_cpu_state_node(cpu_node, i - 1);
202 if (!state_node)
203 break;
204
205 ret = psci_dt_parse_state_node(state_node, &psci_states[i]);
206 of_node_put(state_node);
207
208 if (ret)
190 if (!psci_states)
191 return -ENOMEM;
192
193 for (i = 1; i < state_count; i++) {
194 state_node = of_get_cpu_state_node(cpu_node, i - 1);
195 if (!state_node)
196 break;
197
198 ret = psci_dt_parse_state_node(state_node, &psci_states[i]);
199 of_node_put(state_node);
200
201 if (ret)
209 goto free_mem;
202 return ret;
210
211 pr_debug("psci-power-state %#x index %d\n", psci_states[i], i);
212 }
213
203
204 pr_debug("psci-power-state %#x index %d\n", psci_states[i], i);
205 }
206
214 if (i != state_count) {
215 ret = -ENODEV;
216 goto free_mem;
217 }
207 if (i != state_count)
208 return -ENODEV;
218
219 /* Initialize optional data, used for the hierarchical topology. */
220 ret = psci_dt_cpu_init_topology(drv, data, state_count, cpu);
221 if (ret < 0)
209
210 /* Initialize optional data, used for the hierarchical topology. */
211 ret = psci_dt_cpu_init_topology(drv, data, state_count, cpu);
212 if (ret < 0)
222 goto free_mem;
213 return ret;
223
224 /* Idle states parsed correctly, store them in the per-cpu struct. */
225 data->psci_states = psci_states;
226 return 0;
214
215 /* Idle states parsed correctly, store them in the per-cpu struct. */
216 data->psci_states = psci_states;
217 return 0;
227
228free_mem:
229 kfree(psci_states);
230 return ret;
231}
232
218}
219
233static __init int psci_cpu_init_idle(struct cpuidle_driver *drv,
234 unsigned int cpu, unsigned int state_count)
220static int psci_cpu_init_idle(struct device *dev, struct cpuidle_driver *drv,
221 unsigned int cpu, unsigned int state_count)
235{
236 struct device_node *cpu_node;
237 int ret;
238
239 /*
240 * If the PSCI cpu_suspend function hook has not been initialized
241 * idle states must not be enabled, so bail out
242 */
243 if (!psci_ops.cpu_suspend)
244 return -EOPNOTSUPP;
245
246 cpu_node = of_cpu_device_node_get(cpu);
247 if (!cpu_node)
248 return -ENODEV;
249
222{
223 struct device_node *cpu_node;
224 int ret;
225
226 /*
227 * If the PSCI cpu_suspend function hook has not been initialized
228 * idle states must not be enabled, so bail out
229 */
230 if (!psci_ops.cpu_suspend)
231 return -EOPNOTSUPP;
232
233 cpu_node = of_cpu_device_node_get(cpu);
234 if (!cpu_node)
235 return -ENODEV;
236
250 ret = psci_dt_cpu_init_idle(drv, cpu_node, state_count, cpu);
237 ret = psci_dt_cpu_init_idle(dev, drv, cpu_node, state_count, cpu);
251
252 of_node_put(cpu_node);
253
254 return ret;
255}
256
238
239 of_node_put(cpu_node);
240
241 return ret;
242}
243
257static int __init psci_idle_init_cpu(int cpu)
244static void psci_cpu_deinit_idle(int cpu)
258{
245{
246 struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu);
247
248 psci_dt_detach_cpu(data->dev);
249 psci_cpuidle_use_cpuhp = false;
250}
251
252static int psci_idle_init_cpu(struct device *dev, int cpu)
253{
259 struct cpuidle_driver *drv;
260 struct device_node *cpu_node;
261 const char *enable_method;
262 int ret = 0;
263
264 cpu_node = of_cpu_device_node_get(cpu);
265 if (!cpu_node)
266 return -ENODEV;

--- 5 unchanged lines hidden (view full) ---

272 enable_method = of_get_property(cpu_node, "enable-method", NULL);
273 if (!enable_method || (strcmp(enable_method, "psci")))
274 ret = -ENODEV;
275
276 of_node_put(cpu_node);
277 if (ret)
278 return ret;
279
254 struct cpuidle_driver *drv;
255 struct device_node *cpu_node;
256 const char *enable_method;
257 int ret = 0;
258
259 cpu_node = of_cpu_device_node_get(cpu);
260 if (!cpu_node)
261 return -ENODEV;

--- 5 unchanged lines hidden (view full) ---

267 enable_method = of_get_property(cpu_node, "enable-method", NULL);
268 if (!enable_method || (strcmp(enable_method, "psci")))
269 ret = -ENODEV;
270
271 of_node_put(cpu_node);
272 if (ret)
273 return ret;
274
280 drv = kmemdup(&psci_idle_driver, sizeof(*drv), GFP_KERNEL);
275 drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
281 if (!drv)
282 return -ENOMEM;
283
276 if (!drv)
277 return -ENOMEM;
278
279 drv->name = "psci_idle";
280 drv->owner = THIS_MODULE;
284 drv->cpumask = (struct cpumask *)cpumask_of(cpu);
285
286 /*
281 drv->cpumask = (struct cpumask *)cpumask_of(cpu);
282
283 /*
287 * Initialize idle states data, starting at index 1, since
288 * by default idle state 0 is the quiescent state reached
289 * by the cpu by executing the wfi instruction.
290 *
284 * PSCI idle states relies on architectural WFI to be represented as
285 * state index 0.
286 */
287 drv->states[0].enter = psci_enter_idle_state;
288 drv->states[0].exit_latency = 1;
289 drv->states[0].target_residency = 1;
290 drv->states[0].power_usage = UINT_MAX;
291 strcpy(drv->states[0].name, "WFI");
292 strcpy(drv->states[0].desc, "ARM WFI");
293
294 /*
291 * If no DT idle states are detected (ret == 0) let the driver
292 * initialization fail accordingly since there is no reason to
293 * initialize the idle driver if only wfi is supported, the
294 * default archictectural back-end already executes wfi
295 * on idle entry.
296 */
297 ret = dt_init_idle_driver(drv, psci_idle_state_match, 1);
295 * If no DT idle states are detected (ret == 0) let the driver
296 * initialization fail accordingly since there is no reason to
297 * initialize the idle driver if only wfi is supported, the
298 * default archictectural back-end already executes wfi
299 * on idle entry.
300 */
301 ret = dt_init_idle_driver(drv, psci_idle_state_match, 1);
298 if (ret <= 0) {
299 ret = ret ? : -ENODEV;
300 goto out_kfree_drv;
301 }
302 if (ret <= 0)
303 return ret ? : -ENODEV;
302
303 /*
304 * Initialize PSCI idle states.
305 */
304
305 /*
306 * Initialize PSCI idle states.
307 */
306 ret = psci_cpu_init_idle(drv, cpu, ret);
308 ret = psci_cpu_init_idle(dev, drv, cpu, ret);
307 if (ret) {
308 pr_err("CPU %d failed to PSCI idle\n", cpu);
309 if (ret) {
310 pr_err("CPU %d failed to PSCI idle\n", cpu);
309 goto out_kfree_drv;
311 return ret;
310 }
311
312 ret = cpuidle_register(drv, NULL);
313 if (ret)
312 }
313
314 ret = cpuidle_register(drv, NULL);
315 if (ret)
314 goto out_kfree_drv;
316 goto deinit;
315
317
316 return 0;
318 cpuidle_cooling_register(drv);
317
319
318out_kfree_drv:
319 kfree(drv);
320 return 0;
321deinit:
322 psci_cpu_deinit_idle(cpu);
320 return ret;
321}
322
323/*
323 return ret;
324}
325
326/*
324 * psci_idle_init - Initializes PSCI cpuidle driver
327 * psci_idle_probe - Initializes PSCI cpuidle driver
325 *
326 * Initializes PSCI cpuidle driver for all CPUs, if any CPU fails
327 * to register cpuidle driver then rollback to cancel all CPUs
328 * registration.
329 */
328 *
329 * Initializes PSCI cpuidle driver for all CPUs, if any CPU fails
330 * to register cpuidle driver then rollback to cancel all CPUs
331 * registration.
332 */
330static int __init psci_idle_init(void)
333static int psci_cpuidle_probe(struct platform_device *pdev)
331{
332 int cpu, ret;
333 struct cpuidle_driver *drv;
334 struct cpuidle_device *dev;
335
336 for_each_possible_cpu(cpu) {
334{
335 int cpu, ret;
336 struct cpuidle_driver *drv;
337 struct cpuidle_device *dev;
338
339 for_each_possible_cpu(cpu) {
337 ret = psci_idle_init_cpu(cpu);
340 ret = psci_idle_init_cpu(&pdev->dev, cpu);
338 if (ret)
339 goto out_fail;
340 }
341
342 psci_idle_init_cpuhp();
343 return 0;
344
345out_fail:
346 while (--cpu >= 0) {
347 dev = per_cpu(cpuidle_devices, cpu);
348 drv = cpuidle_get_cpu_driver(dev);
349 cpuidle_unregister(drv);
341 if (ret)
342 goto out_fail;
343 }
344
345 psci_idle_init_cpuhp();
346 return 0;
347
348out_fail:
349 while (--cpu >= 0) {
350 dev = per_cpu(cpuidle_devices, cpu);
351 drv = cpuidle_get_cpu_driver(dev);
352 cpuidle_unregister(drv);
350 kfree(drv);
353 psci_cpu_deinit_idle(cpu);
351 }
352
353 return ret;
354}
354 }
355
356 return ret;
357}
358
359static struct platform_driver psci_cpuidle_driver = {
360 .probe = psci_cpuidle_probe,
361 .driver = {
362 .name = "psci-cpuidle",
363 },
364};
365
366static int __init psci_idle_init(void)
367{
368 struct platform_device *pdev;
369 int ret;
370
371 ret = platform_driver_register(&psci_cpuidle_driver);
372 if (ret)
373 return ret;
374
375 pdev = platform_device_register_simple("psci-cpuidle", -1, NULL, 0);
376 if (IS_ERR(pdev)) {
377 platform_driver_unregister(&psci_cpuidle_driver);
378 return PTR_ERR(pdev);
379 }
380
381 return 0;
382}
355device_initcall(psci_idle_init);
383device_initcall(psci_idle_init);