1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * 4 * Copyright (C) 2016 ARM Limited 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/atomic.h> 10 #include <linux/completion.h> 11 #include <linux/cpu.h> 12 #include <linux/cpuidle.h> 13 #include <linux/cpu_pm.h> 14 #include <linux/kernel.h> 15 #include <linux/kthread.h> 16 #include <uapi/linux/sched/types.h> 17 #include <linux/module.h> 18 #include <linux/preempt.h> 19 #include <linux/psci.h> 20 #include <linux/slab.h> 21 #include <linux/tick.h> 22 #include <linux/topology.h> 23 24 #include <asm/cpuidle.h> 25 26 #include <uapi/linux/psci.h> 27 28 #define NUM_SUSPEND_CYCLE (10) 29 30 static unsigned int nb_available_cpus; 31 static int tos_resident_cpu = -1; 32 33 static atomic_t nb_active_threads; 34 static struct completion suspend_threads_started = 35 COMPLETION_INITIALIZER(suspend_threads_started); 36 static struct completion suspend_threads_done = 37 COMPLETION_INITIALIZER(suspend_threads_done); 38 39 /* 40 * We assume that PSCI operations are used if they are available. This is not 41 * necessarily true on arm64, since the decision is based on the 42 * "enable-method" property of each CPU in the DT, but given that there is no 43 * arch-specific way to check this, we assume that the DT is sensible. 44 */ 45 static int psci_ops_check(void) 46 { 47 int migrate_type = -1; 48 int cpu; 49 50 if (!(psci_ops.cpu_off && psci_ops.cpu_on && psci_ops.cpu_suspend)) { 51 pr_warn("Missing PSCI operations, aborting tests\n"); 52 return -EOPNOTSUPP; 53 } 54 55 if (psci_ops.migrate_info_type) 56 migrate_type = psci_ops.migrate_info_type(); 57 58 if (migrate_type == PSCI_0_2_TOS_UP_MIGRATE || 59 migrate_type == PSCI_0_2_TOS_UP_NO_MIGRATE) { 60 /* There is a UP Trusted OS, find on which core it resides. */ 61 for_each_online_cpu(cpu) 62 if (psci_tos_resident_on(cpu)) { 63 tos_resident_cpu = cpu; 64 break; 65 } 66 if (tos_resident_cpu == -1) 67 pr_warn("UP Trusted OS resides on no online CPU\n"); 68 } 69 70 return 0; 71 } 72 73 /* 74 * offlined_cpus is a temporary array but passing it as an argument avoids 75 * multiple allocations. 76 */ 77 static unsigned int down_and_up_cpus(const struct cpumask *cpus, 78 struct cpumask *offlined_cpus) 79 { 80 int cpu; 81 int err = 0; 82 83 cpumask_clear(offlined_cpus); 84 85 /* Try to power down all CPUs in the mask. */ 86 for_each_cpu(cpu, cpus) { 87 int ret = remove_cpu(cpu); 88 89 /* 90 * cpu_down() checks the number of online CPUs before the TOS 91 * resident CPU. 92 */ 93 if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) { 94 if (ret != -EBUSY) { 95 pr_err("Unexpected return code %d while trying " 96 "to power down last online CPU %d\n", 97 ret, cpu); 98 ++err; 99 } 100 } else if (cpu == tos_resident_cpu) { 101 if (ret != -EPERM) { 102 pr_err("Unexpected return code %d while trying " 103 "to power down TOS resident CPU %d\n", 104 ret, cpu); 105 ++err; 106 } 107 } else if (ret != 0) { 108 pr_err("Error occurred (%d) while trying " 109 "to power down CPU %d\n", ret, cpu); 110 ++err; 111 } 112 113 if (ret == 0) 114 cpumask_set_cpu(cpu, offlined_cpus); 115 } 116 117 /* Try to power up all the CPUs that have been offlined. */ 118 for_each_cpu(cpu, offlined_cpus) { 119 int ret = add_cpu(cpu); 120 121 if (ret != 0) { 122 pr_err("Error occurred (%d) while trying " 123 "to power up CPU %d\n", ret, cpu); 124 ++err; 125 } else { 126 cpumask_clear_cpu(cpu, offlined_cpus); 127 } 128 } 129 130 /* 131 * Something went bad at some point and some CPUs could not be turned 132 * back on. 133 */ 134 WARN_ON(!cpumask_empty(offlined_cpus) || 135 num_online_cpus() != nb_available_cpus); 136 137 return err; 138 } 139 140 static void free_cpu_groups(int num, cpumask_var_t **pcpu_groups) 141 { 142 int i; 143 cpumask_var_t *cpu_groups = *pcpu_groups; 144 145 for (i = 0; i < num; ++i) 146 free_cpumask_var(cpu_groups[i]); 147 kfree(cpu_groups); 148 } 149 150 static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups) 151 { 152 int num_groups = 0; 153 cpumask_var_t tmp, *cpu_groups; 154 155 if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) 156 return -ENOMEM; 157 158 cpu_groups = kzalloc_objs(*cpu_groups, nb_available_cpus); 159 if (!cpu_groups) { 160 free_cpumask_var(tmp); 161 return -ENOMEM; 162 } 163 164 cpumask_copy(tmp, cpu_online_mask); 165 166 while (!cpumask_empty(tmp)) { 167 const struct cpumask *cpu_group = 168 topology_core_cpumask(cpumask_any(tmp)); 169 170 if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) { 171 free_cpumask_var(tmp); 172 free_cpu_groups(num_groups, &cpu_groups); 173 return -ENOMEM; 174 } 175 cpumask_copy(cpu_groups[num_groups++], cpu_group); 176 cpumask_andnot(tmp, tmp, cpu_group); 177 } 178 179 free_cpumask_var(tmp); 180 *pcpu_groups = cpu_groups; 181 182 return num_groups; 183 } 184 185 static int hotplug_tests(void) 186 { 187 int i, nb_cpu_group, err = -ENOMEM; 188 cpumask_var_t offlined_cpus, *cpu_groups; 189 char *page_buf; 190 191 if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL)) 192 return err; 193 194 nb_cpu_group = alloc_init_cpu_groups(&cpu_groups); 195 if (nb_cpu_group < 0) 196 goto out_free_cpus; 197 page_buf = (char *)__get_free_page(GFP_KERNEL); 198 if (!page_buf) 199 goto out_free_cpu_groups; 200 201 /* 202 * Of course the last CPU cannot be powered down and cpu_down() should 203 * refuse doing that. 204 */ 205 pr_info("Trying to turn off and on again all CPUs\n"); 206 err = down_and_up_cpus(cpu_online_mask, offlined_cpus); 207 208 /* 209 * Take down CPUs by cpu group this time. When the last CPU is turned 210 * off, the cpu group itself should shut down. 211 */ 212 for (i = 0; i < nb_cpu_group; ++i) { 213 ssize_t len = cpumap_print_to_pagebuf(true, page_buf, 214 cpu_groups[i]); 215 /* Remove trailing newline. */ 216 page_buf[len - 1] = '\0'; 217 pr_info("Trying to turn off and on again group %d (CPUs %s)\n", 218 i, page_buf); 219 err += down_and_up_cpus(cpu_groups[i], offlined_cpus); 220 } 221 222 free_page((unsigned long)page_buf); 223 out_free_cpu_groups: 224 free_cpu_groups(nb_cpu_group, &cpu_groups); 225 out_free_cpus: 226 free_cpumask_var(offlined_cpus); 227 return err; 228 } 229 230 static void dummy_callback(struct timer_list *unused) {} 231 232 static int suspend_cpu(struct cpuidle_device *dev, 233 struct cpuidle_driver *drv, int index) 234 { 235 struct cpuidle_state *state = &drv->states[index]; 236 bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP; 237 int ret; 238 239 arch_cpu_idle_enter(); 240 241 if (broadcast) { 242 /* 243 * The local timer will be shut down, we need to enter tick 244 * broadcast. 245 */ 246 ret = tick_broadcast_enter(); 247 if (ret) { 248 /* 249 * In the absence of hardware broadcast mechanism, 250 * this CPU might be used to broadcast wakeups, which 251 * may be why entering tick broadcast has failed. 252 * There is little the kernel can do to work around 253 * that, so enter WFI instead (idle state 0). 254 */ 255 cpu_do_idle(); 256 ret = 0; 257 goto out_arch_exit; 258 } 259 } 260 261 ret = state->enter(dev, drv, index); 262 263 if (broadcast) 264 tick_broadcast_exit(); 265 266 out_arch_exit: 267 arch_cpu_idle_exit(); 268 269 return ret; 270 } 271 272 static int suspend_test_thread(void *arg) 273 { 274 int cpu = (long)arg; 275 int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0; 276 struct cpuidle_device *dev; 277 struct cpuidle_driver *drv; 278 /* No need for an actual callback, we just want to wake up the CPU. */ 279 struct timer_list wakeup_timer; 280 281 /* Wait for the main thread to give the start signal. */ 282 wait_for_completion(&suspend_threads_started); 283 284 /* Set maximum priority to preempt all other threads on this CPU. */ 285 sched_set_fifo(current); 286 287 dev = this_cpu_read(cpuidle_devices); 288 drv = cpuidle_get_cpu_driver(dev); 289 290 pr_info("CPU %d entering suspend cycles, states 1 through %d\n", 291 cpu, drv->state_count - 1); 292 293 timer_setup_on_stack(&wakeup_timer, dummy_callback, 0); 294 for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) { 295 int index; 296 /* 297 * Test all possible states, except 0 (which is usually WFI and 298 * doesn't use PSCI). 299 */ 300 for (index = 1; index < drv->state_count; ++index) { 301 int ret; 302 struct cpuidle_state *state = &drv->states[index]; 303 304 /* 305 * Set the timer to wake this CPU up in some time (which 306 * should be largely sufficient for entering suspend). 307 * If the local tick is disabled when entering suspend, 308 * suspend_cpu() takes care of switching to a broadcast 309 * tick, so the timer will still wake us up. 310 */ 311 mod_timer(&wakeup_timer, jiffies + 312 usecs_to_jiffies(state->target_residency)); 313 314 /* IRQs must be disabled during suspend operations. */ 315 local_irq_disable(); 316 317 ret = suspend_cpu(dev, drv, index); 318 319 /* 320 * We have woken up. Re-enable IRQs to handle any 321 * pending interrupt, do not wait until the end of the 322 * loop. 323 */ 324 local_irq_enable(); 325 326 if (ret == index) { 327 ++nb_suspend; 328 } else if (ret >= 0) { 329 /* We did not enter the expected state. */ 330 ++nb_shallow_sleep; 331 } else { 332 pr_err("Failed to suspend CPU %d: error %d " 333 "(requested state %d, cycle %d)\n", 334 cpu, ret, index, i); 335 ++nb_err; 336 } 337 } 338 } 339 340 /* 341 * Disable the timer to make sure that the timer will not trigger 342 * later. 343 */ 344 timer_delete(&wakeup_timer); 345 timer_destroy_on_stack(&wakeup_timer); 346 347 if (atomic_dec_return_relaxed(&nb_active_threads) == 0) 348 complete(&suspend_threads_done); 349 350 for (;;) { 351 /* Needs to be set first to avoid missing a wakeup. */ 352 set_current_state(TASK_INTERRUPTIBLE); 353 if (kthread_should_park()) 354 break; 355 schedule(); 356 } 357 358 pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n", 359 cpu, nb_suspend, nb_shallow_sleep, nb_err); 360 361 kthread_parkme(); 362 363 return nb_err; 364 } 365 366 static int suspend_tests(void) 367 { 368 int i, cpu, err = 0; 369 struct task_struct **threads; 370 int nb_threads = 0; 371 372 threads = kmalloc_objs(*threads, nb_available_cpus); 373 if (!threads) 374 return -ENOMEM; 375 376 /* 377 * Stop cpuidle to prevent the idle tasks from entering a deep sleep 378 * mode, as it might interfere with the suspend threads on other CPUs. 379 * This does not prevent the suspend threads from using cpuidle (only 380 * the idle tasks check this status). Take the idle lock so that 381 * the cpuidle driver and device look-up can be carried out safely. 382 */ 383 cpuidle_pause_and_lock(); 384 385 for_each_online_cpu(cpu) { 386 struct task_struct *thread; 387 /* Check that cpuidle is available on that CPU. */ 388 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); 389 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 390 391 if (!dev || !drv) { 392 pr_warn("cpuidle not available on CPU %d, ignoring\n", 393 cpu); 394 continue; 395 } 396 397 thread = kthread_create_on_cpu(suspend_test_thread, 398 (void *)(long)cpu, cpu, 399 "psci_suspend_test"); 400 if (IS_ERR(thread)) 401 pr_err("Failed to create kthread on CPU %d\n", cpu); 402 else 403 threads[nb_threads++] = thread; 404 } 405 406 if (nb_threads < 1) { 407 err = -ENODEV; 408 goto out; 409 } 410 411 atomic_set(&nb_active_threads, nb_threads); 412 413 /* 414 * Wake up the suspend threads. To avoid the main thread being preempted 415 * before all the threads have been unparked, the suspend threads will 416 * wait for the completion of suspend_threads_started. 417 */ 418 for (i = 0; i < nb_threads; ++i) 419 wake_up_process(threads[i]); 420 complete_all(&suspend_threads_started); 421 422 wait_for_completion(&suspend_threads_done); 423 424 425 /* Stop and destroy all threads, get return status. */ 426 for (i = 0; i < nb_threads; ++i) { 427 err += kthread_park(threads[i]); 428 err += kthread_stop(threads[i]); 429 } 430 out: 431 cpuidle_resume_and_unlock(); 432 kfree(threads); 433 return err; 434 } 435 436 static int __init psci_checker(void) 437 { 438 int ret; 439 440 /* 441 * Since we're in an initcall, we assume that all the CPUs that all 442 * CPUs that can be onlined have been onlined. 443 * 444 * The tests assume that hotplug is enabled but nobody else is using it, 445 * otherwise the results will be unpredictable. However, since there 446 * is no userspace yet in initcalls, that should be fine, as long as 447 * no torture test is running at the same time (see Kconfig). 448 */ 449 nb_available_cpus = num_online_cpus(); 450 451 /* Check PSCI operations are set up and working. */ 452 ret = psci_ops_check(); 453 if (ret) 454 return ret; 455 456 pr_info("PSCI checker started using %u CPUs\n", nb_available_cpus); 457 458 pr_info("Starting hotplug tests\n"); 459 ret = hotplug_tests(); 460 if (ret == 0) 461 pr_info("Hotplug tests passed OK\n"); 462 else if (ret > 0) 463 pr_err("%d error(s) encountered in hotplug tests\n", ret); 464 else { 465 pr_err("Out of memory\n"); 466 return ret; 467 } 468 469 pr_info("Starting suspend tests (%d cycles per state)\n", 470 NUM_SUSPEND_CYCLE); 471 ret = suspend_tests(); 472 if (ret == 0) 473 pr_info("Suspend tests passed OK\n"); 474 else if (ret > 0) 475 pr_err("%d error(s) encountered in suspend tests\n", ret); 476 else { 477 switch (ret) { 478 case -ENOMEM: 479 pr_err("Out of memory\n"); 480 break; 481 case -ENODEV: 482 pr_warn("Could not start suspend tests on any CPU\n"); 483 break; 484 } 485 } 486 487 pr_info("PSCI checker completed\n"); 488 return ret < 0 ? ret : 0; 489 } 490 late_initcall(psci_checker); 491