xref: /linux/drivers/cpuidle/cpuidle-riscv-sbi.c (revision aa3d60e050112ef1373d7216eabe0ee966615527)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * RISC-V SBI CPU idle driver.
4  *
5  * Copyright (c) 2021 Western Digital Corporation or its affiliates.
6  * Copyright (c) 2022 Ventana Micro Systems Inc.
7  */
8 
9 #define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt
10 
11 #include <linux/cpuidle.h>
12 #include <linux/cpumask.h>
13 #include <linux/cpu_pm.h>
14 #include <linux/cpu_cooling.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/of.h>
18 #include <linux/of_device.h>
19 #include <linux/slab.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_domain.h>
22 #include <linux/pm_runtime.h>
23 #include <asm/cpuidle.h>
24 #include <asm/sbi.h>
25 #include <asm/suspend.h>
26 
27 #include "dt_idle_states.h"
28 #include "dt_idle_genpd.h"
29 
30 struct sbi_cpuidle_data {
31 	u32 *states;
32 	struct device *dev;
33 };
34 
35 struct sbi_domain_state {
36 	bool available;
37 	u32 state;
38 };
39 
40 static DEFINE_PER_CPU_READ_MOSTLY(struct sbi_cpuidle_data, sbi_cpuidle_data);
41 static DEFINE_PER_CPU(struct sbi_domain_state, domain_state);
42 static bool sbi_cpuidle_use_osi;
43 static bool sbi_cpuidle_use_cpuhp;
44 static bool sbi_cpuidle_pd_allow_domain_state;
45 
46 static inline void sbi_set_domain_state(u32 state)
47 {
48 	struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
49 
50 	data->available = true;
51 	data->state = state;
52 }
53 
54 static inline u32 sbi_get_domain_state(void)
55 {
56 	struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
57 
58 	return data->state;
59 }
60 
61 static inline void sbi_clear_domain_state(void)
62 {
63 	struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
64 
65 	data->available = false;
66 }
67 
68 static inline bool sbi_is_domain_state_available(void)
69 {
70 	struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
71 
72 	return data->available;
73 }
74 
75 static int sbi_suspend_finisher(unsigned long suspend_type,
76 				unsigned long resume_addr,
77 				unsigned long opaque)
78 {
79 	struct sbiret ret;
80 
81 	ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND,
82 			suspend_type, resume_addr, opaque, 0, 0, 0);
83 
84 	return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0;
85 }
86 
87 static int sbi_suspend(u32 state)
88 {
89 	if (state & SBI_HSM_SUSP_NON_RET_BIT)
90 		return cpu_suspend(state, sbi_suspend_finisher);
91 	else
92 		return sbi_suspend_finisher(state, 0, 0);
93 }
94 
95 static int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
96 				   struct cpuidle_driver *drv, int idx)
97 {
98 	u32 *states = __this_cpu_read(sbi_cpuidle_data.states);
99 
100 	return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, states[idx]);
101 }
102 
103 static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
104 					  struct cpuidle_driver *drv, int idx,
105 					  bool s2idle)
106 {
107 	struct sbi_cpuidle_data *data = this_cpu_ptr(&sbi_cpuidle_data);
108 	u32 *states = data->states;
109 	struct device *pd_dev = data->dev;
110 	u32 state;
111 	int ret;
112 
113 	ret = cpu_pm_enter();
114 	if (ret)
115 		return -1;
116 
117 	/* Do runtime PM to manage a hierarchical CPU toplogy. */
118 	rcu_irq_enter_irqson();
119 	if (s2idle)
120 		dev_pm_genpd_suspend(pd_dev);
121 	else
122 		pm_runtime_put_sync_suspend(pd_dev);
123 	rcu_irq_exit_irqson();
124 
125 	if (sbi_is_domain_state_available())
126 		state = sbi_get_domain_state();
127 	else
128 		state = states[idx];
129 
130 	ret = sbi_suspend(state) ? -1 : idx;
131 
132 	rcu_irq_enter_irqson();
133 	if (s2idle)
134 		dev_pm_genpd_resume(pd_dev);
135 	else
136 		pm_runtime_get_sync(pd_dev);
137 	rcu_irq_exit_irqson();
138 
139 	cpu_pm_exit();
140 
141 	/* Clear the domain state to start fresh when back from idle. */
142 	sbi_clear_domain_state();
143 	return ret;
144 }
145 
146 static int sbi_enter_domain_idle_state(struct cpuidle_device *dev,
147 				       struct cpuidle_driver *drv, int idx)
148 {
149 	return __sbi_enter_domain_idle_state(dev, drv, idx, false);
150 }
151 
152 static int sbi_enter_s2idle_domain_idle_state(struct cpuidle_device *dev,
153 					      struct cpuidle_driver *drv,
154 					      int idx)
155 {
156 	return __sbi_enter_domain_idle_state(dev, drv, idx, true);
157 }
158 
159 static int sbi_cpuidle_cpuhp_up(unsigned int cpu)
160 {
161 	struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev);
162 
163 	if (pd_dev)
164 		pm_runtime_get_sync(pd_dev);
165 
166 	return 0;
167 }
168 
169 static int sbi_cpuidle_cpuhp_down(unsigned int cpu)
170 {
171 	struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev);
172 
173 	if (pd_dev) {
174 		pm_runtime_put_sync(pd_dev);
175 		/* Clear domain state to start fresh at next online. */
176 		sbi_clear_domain_state();
177 	}
178 
179 	return 0;
180 }
181 
182 static void sbi_idle_init_cpuhp(void)
183 {
184 	int err;
185 
186 	if (!sbi_cpuidle_use_cpuhp)
187 		return;
188 
189 	err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
190 					"cpuidle/sbi:online",
191 					sbi_cpuidle_cpuhp_up,
192 					sbi_cpuidle_cpuhp_down);
193 	if (err)
194 		pr_warn("Failed %d while setup cpuhp state\n", err);
195 }
196 
197 static const struct of_device_id sbi_cpuidle_state_match[] = {
198 	{ .compatible = "riscv,idle-state",
199 	  .data = sbi_cpuidle_enter_state },
200 	{ },
201 };
202 
203 static bool sbi_suspend_state_is_valid(u32 state)
204 {
205 	if (state > SBI_HSM_SUSPEND_RET_DEFAULT &&
206 	    state < SBI_HSM_SUSPEND_RET_PLATFORM)
207 		return false;
208 	if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT &&
209 	    state < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
210 		return false;
211 	return true;
212 }
213 
214 static int sbi_dt_parse_state_node(struct device_node *np, u32 *state)
215 {
216 	int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state);
217 
218 	if (err) {
219 		pr_warn("%pOF missing riscv,sbi-suspend-param property\n", np);
220 		return err;
221 	}
222 
223 	if (!sbi_suspend_state_is_valid(*state)) {
224 		pr_warn("Invalid SBI suspend state %#x\n", *state);
225 		return -EINVAL;
226 	}
227 
228 	return 0;
229 }
230 
231 static int sbi_dt_cpu_init_topology(struct cpuidle_driver *drv,
232 				     struct sbi_cpuidle_data *data,
233 				     unsigned int state_count, int cpu)
234 {
235 	/* Currently limit the hierarchical topology to be used in OSI mode. */
236 	if (!sbi_cpuidle_use_osi)
237 		return 0;
238 
239 	data->dev = dt_idle_attach_cpu(cpu, "sbi");
240 	if (IS_ERR_OR_NULL(data->dev))
241 		return PTR_ERR_OR_ZERO(data->dev);
242 
243 	/*
244 	 * Using the deepest state for the CPU to trigger a potential selection
245 	 * of a shared state for the domain, assumes the domain states are all
246 	 * deeper states.
247 	 */
248 	drv->states[state_count - 1].enter = sbi_enter_domain_idle_state;
249 	drv->states[state_count - 1].enter_s2idle =
250 					sbi_enter_s2idle_domain_idle_state;
251 	sbi_cpuidle_use_cpuhp = true;
252 
253 	return 0;
254 }
255 
256 static int sbi_cpuidle_dt_init_states(struct device *dev,
257 					struct cpuidle_driver *drv,
258 					unsigned int cpu,
259 					unsigned int state_count)
260 {
261 	struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
262 	struct device_node *state_node;
263 	struct device_node *cpu_node;
264 	u32 *states;
265 	int i, ret;
266 
267 	cpu_node = of_cpu_device_node_get(cpu);
268 	if (!cpu_node)
269 		return -ENODEV;
270 
271 	states = devm_kcalloc(dev, state_count, sizeof(*states), GFP_KERNEL);
272 	if (!states) {
273 		ret = -ENOMEM;
274 		goto fail;
275 	}
276 
277 	/* Parse SBI specific details from state DT nodes */
278 	for (i = 1; i < state_count; i++) {
279 		state_node = of_get_cpu_state_node(cpu_node, i - 1);
280 		if (!state_node)
281 			break;
282 
283 		ret = sbi_dt_parse_state_node(state_node, &states[i]);
284 		of_node_put(state_node);
285 
286 		if (ret)
287 			return ret;
288 
289 		pr_debug("sbi-state %#x index %d\n", states[i], i);
290 	}
291 	if (i != state_count) {
292 		ret = -ENODEV;
293 		goto fail;
294 	}
295 
296 	/* Initialize optional data, used for the hierarchical topology. */
297 	ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu);
298 	if (ret < 0)
299 		return ret;
300 
301 	/* Store states in the per-cpu struct. */
302 	data->states = states;
303 
304 fail:
305 	of_node_put(cpu_node);
306 
307 	return ret;
308 }
309 
310 static void sbi_cpuidle_deinit_cpu(int cpu)
311 {
312 	struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
313 
314 	dt_idle_detach_cpu(data->dev);
315 	sbi_cpuidle_use_cpuhp = false;
316 }
317 
318 static int sbi_cpuidle_init_cpu(struct device *dev, int cpu)
319 {
320 	struct cpuidle_driver *drv;
321 	unsigned int state_count = 0;
322 	int ret = 0;
323 
324 	drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
325 	if (!drv)
326 		return -ENOMEM;
327 
328 	drv->name = "sbi_cpuidle";
329 	drv->owner = THIS_MODULE;
330 	drv->cpumask = (struct cpumask *)cpumask_of(cpu);
331 
332 	/* RISC-V architectural WFI to be represented as state index 0. */
333 	drv->states[0].enter = sbi_cpuidle_enter_state;
334 	drv->states[0].exit_latency = 1;
335 	drv->states[0].target_residency = 1;
336 	drv->states[0].power_usage = UINT_MAX;
337 	strcpy(drv->states[0].name, "WFI");
338 	strcpy(drv->states[0].desc, "RISC-V WFI");
339 
340 	/*
341 	 * If no DT idle states are detected (ret == 0) let the driver
342 	 * initialization fail accordingly since there is no reason to
343 	 * initialize the idle driver if only wfi is supported, the
344 	 * default archictectural back-end already executes wfi
345 	 * on idle entry.
346 	 */
347 	ret = dt_init_idle_driver(drv, sbi_cpuidle_state_match, 1);
348 	if (ret <= 0) {
349 		pr_debug("HART%ld: failed to parse DT idle states\n",
350 			 cpuid_to_hartid_map(cpu));
351 		return ret ? : -ENODEV;
352 	}
353 	state_count = ret + 1; /* Include WFI state as well */
354 
355 	/* Initialize idle states from DT. */
356 	ret = sbi_cpuidle_dt_init_states(dev, drv, cpu, state_count);
357 	if (ret) {
358 		pr_err("HART%ld: failed to init idle states\n",
359 		       cpuid_to_hartid_map(cpu));
360 		return ret;
361 	}
362 
363 	ret = cpuidle_register(drv, NULL);
364 	if (ret)
365 		goto deinit;
366 
367 	cpuidle_cooling_register(drv);
368 
369 	return 0;
370 deinit:
371 	sbi_cpuidle_deinit_cpu(cpu);
372 	return ret;
373 }
374 
375 static void sbi_cpuidle_domain_sync_state(struct device *dev)
376 {
377 	/*
378 	 * All devices have now been attached/probed to the PM domain
379 	 * topology, hence it's fine to allow domain states to be picked.
380 	 */
381 	sbi_cpuidle_pd_allow_domain_state = true;
382 }
383 
384 #ifdef CONFIG_DT_IDLE_GENPD
385 
386 static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
387 {
388 	struct genpd_power_state *state = &pd->states[pd->state_idx];
389 	u32 *pd_state;
390 
391 	if (!state->data)
392 		return 0;
393 
394 	if (!sbi_cpuidle_pd_allow_domain_state)
395 		return -EBUSY;
396 
397 	/* OSI mode is enabled, set the corresponding domain state. */
398 	pd_state = state->data;
399 	sbi_set_domain_state(*pd_state);
400 
401 	return 0;
402 }
403 
404 struct sbi_pd_provider {
405 	struct list_head link;
406 	struct device_node *node;
407 };
408 
409 static LIST_HEAD(sbi_pd_providers);
410 
411 static int sbi_pd_init(struct device_node *np)
412 {
413 	struct generic_pm_domain *pd;
414 	struct sbi_pd_provider *pd_provider;
415 	struct dev_power_governor *pd_gov;
416 	int ret = -ENOMEM, state_count = 0;
417 
418 	pd = dt_idle_pd_alloc(np, sbi_dt_parse_state_node);
419 	if (!pd)
420 		goto out;
421 
422 	pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL);
423 	if (!pd_provider)
424 		goto free_pd;
425 
426 	pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
427 
428 	/* Allow power off when OSI is available. */
429 	if (sbi_cpuidle_use_osi)
430 		pd->power_off = sbi_cpuidle_pd_power_off;
431 	else
432 		pd->flags |= GENPD_FLAG_ALWAYS_ON;
433 
434 	/* Use governor for CPU PM domains if it has some states to manage. */
435 	pd_gov = state_count > 0 ? &pm_domain_cpu_gov : NULL;
436 
437 	ret = pm_genpd_init(pd, pd_gov, false);
438 	if (ret)
439 		goto free_pd_prov;
440 
441 	ret = of_genpd_add_provider_simple(np, pd);
442 	if (ret)
443 		goto remove_pd;
444 
445 	pd_provider->node = of_node_get(np);
446 	list_add(&pd_provider->link, &sbi_pd_providers);
447 
448 	pr_debug("init PM domain %s\n", pd->name);
449 	return 0;
450 
451 remove_pd:
452 	pm_genpd_remove(pd);
453 free_pd_prov:
454 	kfree(pd_provider);
455 free_pd:
456 	dt_idle_pd_free(pd);
457 out:
458 	pr_err("failed to init PM domain ret=%d %pOF\n", ret, np);
459 	return ret;
460 }
461 
462 static void sbi_pd_remove(void)
463 {
464 	struct sbi_pd_provider *pd_provider, *it;
465 	struct generic_pm_domain *genpd;
466 
467 	list_for_each_entry_safe(pd_provider, it, &sbi_pd_providers, link) {
468 		of_genpd_del_provider(pd_provider->node);
469 
470 		genpd = of_genpd_remove_last(pd_provider->node);
471 		if (!IS_ERR(genpd))
472 			kfree(genpd);
473 
474 		of_node_put(pd_provider->node);
475 		list_del(&pd_provider->link);
476 		kfree(pd_provider);
477 	}
478 }
479 
480 static int sbi_genpd_probe(struct device_node *np)
481 {
482 	struct device_node *node;
483 	int ret = 0, pd_count = 0;
484 
485 	if (!np)
486 		return -ENODEV;
487 
488 	/*
489 	 * Parse child nodes for the "#power-domain-cells" property and
490 	 * initialize a genpd/genpd-of-provider pair when it's found.
491 	 */
492 	for_each_child_of_node(np, node) {
493 		if (!of_find_property(node, "#power-domain-cells", NULL))
494 			continue;
495 
496 		ret = sbi_pd_init(node);
497 		if (ret)
498 			goto put_node;
499 
500 		pd_count++;
501 	}
502 
503 	/* Bail out if not using the hierarchical CPU topology. */
504 	if (!pd_count)
505 		goto no_pd;
506 
507 	/* Link genpd masters/subdomains to model the CPU topology. */
508 	ret = dt_idle_pd_init_topology(np);
509 	if (ret)
510 		goto remove_pd;
511 
512 	return 0;
513 
514 put_node:
515 	of_node_put(node);
516 remove_pd:
517 	sbi_pd_remove();
518 	pr_err("failed to create CPU PM domains ret=%d\n", ret);
519 no_pd:
520 	return ret;
521 }
522 
523 #else
524 
525 static inline int sbi_genpd_probe(struct device_node *np)
526 {
527 	return 0;
528 }
529 
530 #endif
531 
532 static int sbi_cpuidle_probe(struct platform_device *pdev)
533 {
534 	int cpu, ret;
535 	struct cpuidle_driver *drv;
536 	struct cpuidle_device *dev;
537 	struct device_node *np, *pds_node;
538 
539 	/* Detect OSI support based on CPU DT nodes */
540 	sbi_cpuidle_use_osi = true;
541 	for_each_possible_cpu(cpu) {
542 		np = of_cpu_device_node_get(cpu);
543 		if (np &&
544 		    of_find_property(np, "power-domains", NULL) &&
545 		    of_find_property(np, "power-domain-names", NULL)) {
546 			continue;
547 		} else {
548 			sbi_cpuidle_use_osi = false;
549 			break;
550 		}
551 	}
552 
553 	/* Populate generic power domains from DT nodes */
554 	pds_node = of_find_node_by_path("/cpus/power-domains");
555 	if (pds_node) {
556 		ret = sbi_genpd_probe(pds_node);
557 		of_node_put(pds_node);
558 		if (ret)
559 			return ret;
560 	}
561 
562 	/* Initialize CPU idle driver for each CPU */
563 	for_each_possible_cpu(cpu) {
564 		ret = sbi_cpuidle_init_cpu(&pdev->dev, cpu);
565 		if (ret) {
566 			pr_debug("HART%ld: idle driver init failed\n",
567 				 cpuid_to_hartid_map(cpu));
568 			goto out_fail;
569 		}
570 	}
571 
572 	/* Setup CPU hotplut notifiers */
573 	sbi_idle_init_cpuhp();
574 
575 	pr_info("idle driver registered for all CPUs\n");
576 
577 	return 0;
578 
579 out_fail:
580 	while (--cpu >= 0) {
581 		dev = per_cpu(cpuidle_devices, cpu);
582 		drv = cpuidle_get_cpu_driver(dev);
583 		cpuidle_unregister(drv);
584 		sbi_cpuidle_deinit_cpu(cpu);
585 	}
586 
587 	return ret;
588 }
589 
590 static struct platform_driver sbi_cpuidle_driver = {
591 	.probe = sbi_cpuidle_probe,
592 	.driver = {
593 		.name = "sbi-cpuidle",
594 		.sync_state = sbi_cpuidle_domain_sync_state,
595 	},
596 };
597 
598 static int __init sbi_cpuidle_init(void)
599 {
600 	int ret;
601 	struct platform_device *pdev;
602 
603 	/*
604 	 * The SBI HSM suspend function is only available when:
605 	 * 1) SBI version is 0.3 or higher
606 	 * 2) SBI HSM extension is available
607 	 */
608 	if ((sbi_spec_version < sbi_mk_version(0, 3)) ||
609 	    sbi_probe_extension(SBI_EXT_HSM) <= 0) {
610 		pr_info("HSM suspend not available\n");
611 		return 0;
612 	}
613 
614 	ret = platform_driver_register(&sbi_cpuidle_driver);
615 	if (ret)
616 		return ret;
617 
618 	pdev = platform_device_register_simple("sbi-cpuidle",
619 						-1, NULL, 0);
620 	if (IS_ERR(pdev)) {
621 		platform_driver_unregister(&sbi_cpuidle_driver);
622 		return PTR_ERR(pdev);
623 	}
624 
625 	return 0;
626 }
627 device_initcall(sbi_cpuidle_init);
628