1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * RISC-V SBI CPU idle driver. 4 * 5 * Copyright (c) 2021 Western Digital Corporation or its affiliates. 6 * Copyright (c) 2022 Ventana Micro Systems Inc. 7 */ 8 9 #define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt 10 11 #include <linux/cleanup.h> 12 #include <linux/cpuhotplug.h> 13 #include <linux/cpuidle.h> 14 #include <linux/cpumask.h> 15 #include <linux/cpu_pm.h> 16 #include <linux/cpu_cooling.h> 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 #include <linux/platform_device.h> 22 #include <linux/pm_domain.h> 23 #include <linux/pm_runtime.h> 24 #include <asm/cpuidle.h> 25 #include <asm/sbi.h> 26 #include <asm/smp.h> 27 #include <asm/suspend.h> 28 29 #include "cpuidle.h" 30 #include "dt_idle_states.h" 31 #include "dt_idle_genpd.h" 32 33 struct sbi_cpuidle_data { 34 u32 *states; 35 struct device *dev; 36 }; 37 38 struct sbi_domain_state { 39 bool available; 40 u32 state; 41 }; 42 43 static DEFINE_PER_CPU_READ_MOSTLY(struct sbi_cpuidle_data, sbi_cpuidle_data); 44 static DEFINE_PER_CPU(struct sbi_domain_state, domain_state); 45 static bool sbi_cpuidle_use_osi; 46 static bool sbi_cpuidle_use_cpuhp; 47 static bool sbi_cpuidle_pd_allow_domain_state; 48 49 static inline void sbi_set_domain_state(u32 state) 50 { 51 struct sbi_domain_state *data = this_cpu_ptr(&domain_state); 52 53 data->available = true; 54 data->state = state; 55 } 56 57 static inline u32 sbi_get_domain_state(void) 58 { 59 struct sbi_domain_state *data = this_cpu_ptr(&domain_state); 60 61 return data->state; 62 } 63 64 static inline void sbi_clear_domain_state(void) 65 { 66 struct sbi_domain_state *data = this_cpu_ptr(&domain_state); 67 68 data->available = false; 69 } 70 71 static inline bool sbi_is_domain_state_available(void) 72 { 73 struct sbi_domain_state *data = this_cpu_ptr(&domain_state); 74 75 return data->available; 76 } 77 78 static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev, 79 struct cpuidle_driver *drv, int idx) 80 { 81 u32 *states = __this_cpu_read(sbi_cpuidle_data.states); 82 u32 state = states[idx]; 83 84 if (state & SBI_HSM_SUSP_NON_RET_BIT) 85 return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend, idx, state); 86 else 87 return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend, 88 idx, state); 89 } 90 91 static __cpuidle int __sbi_enter_domain_idle_state(struct cpuidle_device *dev, 92 struct cpuidle_driver *drv, int idx, 93 bool s2idle) 94 { 95 struct sbi_cpuidle_data *data = this_cpu_ptr(&sbi_cpuidle_data); 96 u32 *states = data->states; 97 struct device *pd_dev = data->dev; 98 u32 state; 99 int ret; 100 101 ret = cpu_pm_enter(); 102 if (ret) 103 return -1; 104 105 /* Do runtime PM to manage a hierarchical CPU toplogy. */ 106 if (s2idle) 107 dev_pm_genpd_suspend(pd_dev); 108 else 109 pm_runtime_put_sync_suspend(pd_dev); 110 111 ct_cpuidle_enter(); 112 113 if (sbi_is_domain_state_available()) 114 state = sbi_get_domain_state(); 115 else 116 state = states[idx]; 117 118 ret = riscv_sbi_hart_suspend(state) ? -1 : idx; 119 120 ct_cpuidle_exit(); 121 122 if (s2idle) 123 dev_pm_genpd_resume(pd_dev); 124 else 125 pm_runtime_get_sync(pd_dev); 126 127 cpu_pm_exit(); 128 129 /* Clear the domain state to start fresh when back from idle. */ 130 sbi_clear_domain_state(); 131 return ret; 132 } 133 134 static int sbi_enter_domain_idle_state(struct cpuidle_device *dev, 135 struct cpuidle_driver *drv, int idx) 136 { 137 return __sbi_enter_domain_idle_state(dev, drv, idx, false); 138 } 139 140 static int sbi_enter_s2idle_domain_idle_state(struct cpuidle_device *dev, 141 struct cpuidle_driver *drv, 142 int idx) 143 { 144 return __sbi_enter_domain_idle_state(dev, drv, idx, true); 145 } 146 147 static int sbi_cpuidle_cpuhp_up(unsigned int cpu) 148 { 149 struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev); 150 151 if (pd_dev) 152 pm_runtime_get_sync(pd_dev); 153 154 return 0; 155 } 156 157 static int sbi_cpuidle_cpuhp_down(unsigned int cpu) 158 { 159 struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev); 160 161 if (pd_dev) { 162 pm_runtime_put_sync(pd_dev); 163 /* Clear domain state to start fresh at next online. */ 164 sbi_clear_domain_state(); 165 } 166 167 return 0; 168 } 169 170 static void sbi_idle_init_cpuhp(void) 171 { 172 int err; 173 174 if (!sbi_cpuidle_use_cpuhp) 175 return; 176 177 err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING, 178 "cpuidle/sbi:online", 179 sbi_cpuidle_cpuhp_up, 180 sbi_cpuidle_cpuhp_down); 181 if (err) 182 pr_warn("Failed %d while setup cpuhp state\n", err); 183 } 184 185 static const struct of_device_id sbi_cpuidle_state_match[] = { 186 { .compatible = "riscv,idle-state", 187 .data = sbi_cpuidle_enter_state }, 188 { }, 189 }; 190 191 static int sbi_dt_parse_state_node(struct device_node *np, u32 *state) 192 { 193 int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state); 194 195 if (err) { 196 pr_warn("%pOF missing riscv,sbi-suspend-param property\n", np); 197 return err; 198 } 199 200 if (!riscv_sbi_suspend_state_is_valid(*state)) { 201 pr_warn("Invalid SBI suspend state %#x\n", *state); 202 return -EINVAL; 203 } 204 205 return 0; 206 } 207 208 static int sbi_dt_cpu_init_topology(struct cpuidle_driver *drv, 209 struct sbi_cpuidle_data *data, 210 unsigned int state_count, int cpu) 211 { 212 /* Currently limit the hierarchical topology to be used in OSI mode. */ 213 if (!sbi_cpuidle_use_osi) 214 return 0; 215 216 data->dev = dt_idle_attach_cpu(cpu, "sbi"); 217 if (IS_ERR_OR_NULL(data->dev)) 218 return PTR_ERR_OR_ZERO(data->dev); 219 220 /* 221 * Using the deepest state for the CPU to trigger a potential selection 222 * of a shared state for the domain, assumes the domain states are all 223 * deeper states. 224 */ 225 drv->states[state_count - 1].flags |= CPUIDLE_FLAG_RCU_IDLE; 226 drv->states[state_count - 1].enter = sbi_enter_domain_idle_state; 227 drv->states[state_count - 1].enter_s2idle = 228 sbi_enter_s2idle_domain_idle_state; 229 sbi_cpuidle_use_cpuhp = true; 230 231 return 0; 232 } 233 234 static int sbi_cpuidle_dt_init_states(struct device *dev, 235 struct cpuidle_driver *drv, 236 unsigned int cpu, 237 unsigned int state_count) 238 { 239 struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu); 240 struct device_node *state_node; 241 u32 *states; 242 int i, ret; 243 244 struct device_node *cpu_node __free(device_node) = of_cpu_device_node_get(cpu); 245 if (!cpu_node) 246 return -ENODEV; 247 248 states = devm_kcalloc(dev, state_count, sizeof(*states), GFP_KERNEL); 249 if (!states) 250 return -ENOMEM; 251 252 /* Parse SBI specific details from state DT nodes */ 253 for (i = 1; i < state_count; i++) { 254 state_node = of_get_cpu_state_node(cpu_node, i - 1); 255 if (!state_node) 256 break; 257 258 ret = sbi_dt_parse_state_node(state_node, &states[i]); 259 of_node_put(state_node); 260 261 if (ret) 262 return ret; 263 264 pr_debug("sbi-state %#x index %d\n", states[i], i); 265 } 266 if (i != state_count) 267 return -ENODEV; 268 269 /* Initialize optional data, used for the hierarchical topology. */ 270 ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu); 271 if (ret < 0) 272 return ret; 273 274 /* Store states in the per-cpu struct. */ 275 data->states = states; 276 277 return 0; 278 } 279 280 static void sbi_cpuidle_deinit_cpu(int cpu) 281 { 282 struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu); 283 284 dt_idle_detach_cpu(data->dev); 285 sbi_cpuidle_use_cpuhp = false; 286 } 287 288 static int sbi_cpuidle_init_cpu(struct device *dev, int cpu) 289 { 290 struct cpuidle_driver *drv; 291 unsigned int state_count = 0; 292 int ret = 0; 293 294 drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL); 295 if (!drv) 296 return -ENOMEM; 297 298 drv->name = "sbi_cpuidle"; 299 drv->owner = THIS_MODULE; 300 drv->cpumask = (struct cpumask *)cpumask_of(cpu); 301 302 /* RISC-V architectural WFI to be represented as state index 0. */ 303 drv->states[0].enter = sbi_cpuidle_enter_state; 304 drv->states[0].exit_latency = 1; 305 drv->states[0].target_residency = 1; 306 drv->states[0].power_usage = UINT_MAX; 307 strcpy(drv->states[0].name, "WFI"); 308 strcpy(drv->states[0].desc, "RISC-V WFI"); 309 310 /* 311 * If no DT idle states are detected (ret == 0) let the driver 312 * initialization fail accordingly since there is no reason to 313 * initialize the idle driver if only wfi is supported, the 314 * default archictectural back-end already executes wfi 315 * on idle entry. 316 */ 317 ret = dt_init_idle_driver(drv, sbi_cpuidle_state_match, 1); 318 if (ret <= 0) { 319 pr_debug("HART%ld: failed to parse DT idle states\n", 320 cpuid_to_hartid_map(cpu)); 321 return ret ? : -ENODEV; 322 } 323 state_count = ret + 1; /* Include WFI state as well */ 324 325 /* Initialize idle states from DT. */ 326 ret = sbi_cpuidle_dt_init_states(dev, drv, cpu, state_count); 327 if (ret) { 328 pr_err("HART%ld: failed to init idle states\n", 329 cpuid_to_hartid_map(cpu)); 330 return ret; 331 } 332 333 if (cpuidle_disabled()) 334 return 0; 335 336 ret = cpuidle_register(drv, NULL); 337 if (ret) 338 goto deinit; 339 340 cpuidle_cooling_register(drv); 341 342 return 0; 343 deinit: 344 sbi_cpuidle_deinit_cpu(cpu); 345 return ret; 346 } 347 348 static void sbi_cpuidle_domain_sync_state(struct device *dev) 349 { 350 /* 351 * All devices have now been attached/probed to the PM domain 352 * topology, hence it's fine to allow domain states to be picked. 353 */ 354 sbi_cpuidle_pd_allow_domain_state = true; 355 } 356 357 #ifdef CONFIG_DT_IDLE_GENPD 358 359 static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd) 360 { 361 struct genpd_power_state *state = &pd->states[pd->state_idx]; 362 u32 *pd_state; 363 364 if (!state->data) 365 return 0; 366 367 if (!sbi_cpuidle_pd_allow_domain_state) 368 return -EBUSY; 369 370 /* OSI mode is enabled, set the corresponding domain state. */ 371 pd_state = state->data; 372 sbi_set_domain_state(*pd_state); 373 374 return 0; 375 } 376 377 struct sbi_pd_provider { 378 struct list_head link; 379 struct device_node *node; 380 }; 381 382 static LIST_HEAD(sbi_pd_providers); 383 384 static int sbi_pd_init(struct device_node *np) 385 { 386 struct generic_pm_domain *pd; 387 struct sbi_pd_provider *pd_provider; 388 struct dev_power_governor *pd_gov; 389 int ret = -ENOMEM; 390 391 pd = dt_idle_pd_alloc(np, sbi_dt_parse_state_node); 392 if (!pd) 393 goto out; 394 395 pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL); 396 if (!pd_provider) 397 goto free_pd; 398 399 pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN; 400 401 /* Allow power off when OSI is available. */ 402 if (sbi_cpuidle_use_osi) 403 pd->power_off = sbi_cpuidle_pd_power_off; 404 else 405 pd->flags |= GENPD_FLAG_ALWAYS_ON; 406 407 /* Use governor for CPU PM domains if it has some states to manage. */ 408 pd_gov = pd->states ? &pm_domain_cpu_gov : NULL; 409 410 ret = pm_genpd_init(pd, pd_gov, false); 411 if (ret) 412 goto free_pd_prov; 413 414 ret = of_genpd_add_provider_simple(np, pd); 415 if (ret) 416 goto remove_pd; 417 418 pd_provider->node = of_node_get(np); 419 list_add(&pd_provider->link, &sbi_pd_providers); 420 421 pr_debug("init PM domain %s\n", pd->name); 422 return 0; 423 424 remove_pd: 425 pm_genpd_remove(pd); 426 free_pd_prov: 427 kfree(pd_provider); 428 free_pd: 429 dt_idle_pd_free(pd); 430 out: 431 pr_err("failed to init PM domain ret=%d %pOF\n", ret, np); 432 return ret; 433 } 434 435 static void sbi_pd_remove(void) 436 { 437 struct sbi_pd_provider *pd_provider, *it; 438 struct generic_pm_domain *genpd; 439 440 list_for_each_entry_safe(pd_provider, it, &sbi_pd_providers, link) { 441 of_genpd_del_provider(pd_provider->node); 442 443 genpd = of_genpd_remove_last(pd_provider->node); 444 if (!IS_ERR(genpd)) 445 kfree(genpd); 446 447 of_node_put(pd_provider->node); 448 list_del(&pd_provider->link); 449 kfree(pd_provider); 450 } 451 } 452 453 static int sbi_genpd_probe(struct device_node *np) 454 { 455 int ret = 0, pd_count = 0; 456 457 if (!np) 458 return -ENODEV; 459 460 /* 461 * Parse child nodes for the "#power-domain-cells" property and 462 * initialize a genpd/genpd-of-provider pair when it's found. 463 */ 464 for_each_child_of_node_scoped(np, node) { 465 if (!of_property_present(node, "#power-domain-cells")) 466 continue; 467 468 ret = sbi_pd_init(node); 469 if (ret) 470 goto remove_pd; 471 472 pd_count++; 473 } 474 475 /* Bail out if not using the hierarchical CPU topology. */ 476 if (!pd_count) 477 goto no_pd; 478 479 /* Link genpd masters/subdomains to model the CPU topology. */ 480 ret = dt_idle_pd_init_topology(np); 481 if (ret) 482 goto remove_pd; 483 484 return 0; 485 486 remove_pd: 487 sbi_pd_remove(); 488 pr_err("failed to create CPU PM domains ret=%d\n", ret); 489 no_pd: 490 return ret; 491 } 492 493 #else 494 495 static inline int sbi_genpd_probe(struct device_node *np) 496 { 497 return 0; 498 } 499 500 #endif 501 502 static int sbi_cpuidle_probe(struct platform_device *pdev) 503 { 504 int cpu, ret; 505 struct cpuidle_driver *drv; 506 struct cpuidle_device *dev; 507 struct device_node *np, *pds_node; 508 509 /* Detect OSI support based on CPU DT nodes */ 510 sbi_cpuidle_use_osi = true; 511 for_each_possible_cpu(cpu) { 512 np = of_cpu_device_node_get(cpu); 513 if (np && 514 of_property_present(np, "power-domains") && 515 of_property_present(np, "power-domain-names")) { 516 continue; 517 } else { 518 sbi_cpuidle_use_osi = false; 519 break; 520 } 521 } 522 523 /* Populate generic power domains from DT nodes */ 524 pds_node = of_find_node_by_path("/cpus/power-domains"); 525 if (pds_node) { 526 ret = sbi_genpd_probe(pds_node); 527 of_node_put(pds_node); 528 if (ret) 529 return ret; 530 } 531 532 /* Initialize CPU idle driver for each CPU */ 533 for_each_possible_cpu(cpu) { 534 ret = sbi_cpuidle_init_cpu(&pdev->dev, cpu); 535 if (ret) { 536 pr_debug("HART%ld: idle driver init failed\n", 537 cpuid_to_hartid_map(cpu)); 538 goto out_fail; 539 } 540 } 541 542 /* Setup CPU hotplut notifiers */ 543 sbi_idle_init_cpuhp(); 544 545 if (cpuidle_disabled()) 546 pr_info("cpuidle is disabled\n"); 547 else 548 pr_info("idle driver registered for all CPUs\n"); 549 550 return 0; 551 552 out_fail: 553 while (--cpu >= 0) { 554 dev = per_cpu(cpuidle_devices, cpu); 555 drv = cpuidle_get_cpu_driver(dev); 556 cpuidle_unregister(drv); 557 sbi_cpuidle_deinit_cpu(cpu); 558 } 559 560 return ret; 561 } 562 563 static struct platform_driver sbi_cpuidle_driver = { 564 .probe = sbi_cpuidle_probe, 565 .driver = { 566 .name = "sbi-cpuidle", 567 .sync_state = sbi_cpuidle_domain_sync_state, 568 }, 569 }; 570 571 static int __init sbi_cpuidle_init(void) 572 { 573 int ret; 574 struct platform_device *pdev; 575 576 if (!riscv_sbi_hsm_is_supported()) 577 return 0; 578 579 ret = platform_driver_register(&sbi_cpuidle_driver); 580 if (ret) 581 return ret; 582 583 pdev = platform_device_register_simple("sbi-cpuidle", 584 -1, NULL, 0); 585 if (IS_ERR(pdev)) { 586 platform_driver_unregister(&sbi_cpuidle_driver); 587 return PTR_ERR(pdev); 588 } 589 590 return 0; 591 } 592 arch_initcall(sbi_cpuidle_init); 593