xref: /linux/drivers/firmware/psci/psci_checker.c (revision a7f7f6248d9740d710fd6bd190293fe5e16410ac)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright (C) 2016 ARM Limited
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/atomic.h>
10 #include <linux/completion.h>
11 #include <linux/cpu.h>
12 #include <linux/cpuidle.h>
13 #include <linux/cpu_pm.h>
14 #include <linux/kernel.h>
15 #include <linux/kthread.h>
16 #include <uapi/linux/sched/types.h>
17 #include <linux/module.h>
18 #include <linux/preempt.h>
19 #include <linux/psci.h>
20 #include <linux/slab.h>
21 #include <linux/tick.h>
22 #include <linux/topology.h>
23 
24 #include <asm/cpuidle.h>
25 
26 #include <uapi/linux/psci.h>
27 
28 #define NUM_SUSPEND_CYCLE (10)
29 
30 static unsigned int nb_available_cpus;
31 static int tos_resident_cpu = -1;
32 
33 static atomic_t nb_active_threads;
34 static struct completion suspend_threads_started =
35 	COMPLETION_INITIALIZER(suspend_threads_started);
36 static struct completion suspend_threads_done =
37 	COMPLETION_INITIALIZER(suspend_threads_done);
38 
39 /*
40  * We assume that PSCI operations are used if they are available. This is not
41  * necessarily true on arm64, since the decision is based on the
42  * "enable-method" property of each CPU in the DT, but given that there is no
43  * arch-specific way to check this, we assume that the DT is sensible.
44  */
45 static int psci_ops_check(void)
46 {
47 	int migrate_type = -1;
48 	int cpu;
49 
50 	if (!(psci_ops.cpu_off && psci_ops.cpu_on && psci_ops.cpu_suspend)) {
51 		pr_warn("Missing PSCI operations, aborting tests\n");
52 		return -EOPNOTSUPP;
53 	}
54 
55 	if (psci_ops.migrate_info_type)
56 		migrate_type = psci_ops.migrate_info_type();
57 
58 	if (migrate_type == PSCI_0_2_TOS_UP_MIGRATE ||
59 	    migrate_type == PSCI_0_2_TOS_UP_NO_MIGRATE) {
60 		/* There is a UP Trusted OS, find on which core it resides. */
61 		for_each_online_cpu(cpu)
62 			if (psci_tos_resident_on(cpu)) {
63 				tos_resident_cpu = cpu;
64 				break;
65 			}
66 		if (tos_resident_cpu == -1)
67 			pr_warn("UP Trusted OS resides on no online CPU\n");
68 	}
69 
70 	return 0;
71 }
72 
73 /*
74  * offlined_cpus is a temporary array but passing it as an argument avoids
75  * multiple allocations.
76  */
77 static unsigned int down_and_up_cpus(const struct cpumask *cpus,
78 				     struct cpumask *offlined_cpus)
79 {
80 	int cpu;
81 	int err = 0;
82 
83 	cpumask_clear(offlined_cpus);
84 
85 	/* Try to power down all CPUs in the mask. */
86 	for_each_cpu(cpu, cpus) {
87 		int ret = remove_cpu(cpu);
88 
89 		/*
90 		 * cpu_down() checks the number of online CPUs before the TOS
91 		 * resident CPU.
92 		 */
93 		if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) {
94 			if (ret != -EBUSY) {
95 				pr_err("Unexpected return code %d while trying "
96 				       "to power down last online CPU %d\n",
97 				       ret, cpu);
98 				++err;
99 			}
100 		} else if (cpu == tos_resident_cpu) {
101 			if (ret != -EPERM) {
102 				pr_err("Unexpected return code %d while trying "
103 				       "to power down TOS resident CPU %d\n",
104 				       ret, cpu);
105 				++err;
106 			}
107 		} else if (ret != 0) {
108 			pr_err("Error occurred (%d) while trying "
109 			       "to power down CPU %d\n", ret, cpu);
110 			++err;
111 		}
112 
113 		if (ret == 0)
114 			cpumask_set_cpu(cpu, offlined_cpus);
115 	}
116 
117 	/* Try to power up all the CPUs that have been offlined. */
118 	for_each_cpu(cpu, offlined_cpus) {
119 		int ret = add_cpu(cpu);
120 
121 		if (ret != 0) {
122 			pr_err("Error occurred (%d) while trying "
123 			       "to power up CPU %d\n", ret, cpu);
124 			++err;
125 		} else {
126 			cpumask_clear_cpu(cpu, offlined_cpus);
127 		}
128 	}
129 
130 	/*
131 	 * Something went bad at some point and some CPUs could not be turned
132 	 * back on.
133 	 */
134 	WARN_ON(!cpumask_empty(offlined_cpus) ||
135 		num_online_cpus() != nb_available_cpus);
136 
137 	return err;
138 }
139 
140 static void free_cpu_groups(int num, cpumask_var_t **pcpu_groups)
141 {
142 	int i;
143 	cpumask_var_t *cpu_groups = *pcpu_groups;
144 
145 	for (i = 0; i < num; ++i)
146 		free_cpumask_var(cpu_groups[i]);
147 	kfree(cpu_groups);
148 }
149 
150 static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
151 {
152 	int num_groups = 0;
153 	cpumask_var_t tmp, *cpu_groups;
154 
155 	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
156 		return -ENOMEM;
157 
158 	cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups),
159 			     GFP_KERNEL);
160 	if (!cpu_groups)
161 		return -ENOMEM;
162 
163 	cpumask_copy(tmp, cpu_online_mask);
164 
165 	while (!cpumask_empty(tmp)) {
166 		const struct cpumask *cpu_group =
167 			topology_core_cpumask(cpumask_any(tmp));
168 
169 		if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) {
170 			free_cpu_groups(num_groups, &cpu_groups);
171 			return -ENOMEM;
172 		}
173 		cpumask_copy(cpu_groups[num_groups++], cpu_group);
174 		cpumask_andnot(tmp, tmp, cpu_group);
175 	}
176 
177 	free_cpumask_var(tmp);
178 	*pcpu_groups = cpu_groups;
179 
180 	return num_groups;
181 }
182 
183 static int hotplug_tests(void)
184 {
185 	int i, nb_cpu_group, err = -ENOMEM;
186 	cpumask_var_t offlined_cpus, *cpu_groups;
187 	char *page_buf;
188 
189 	if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
190 		return err;
191 
192 	nb_cpu_group = alloc_init_cpu_groups(&cpu_groups);
193 	if (nb_cpu_group < 0)
194 		goto out_free_cpus;
195 	page_buf = (char *)__get_free_page(GFP_KERNEL);
196 	if (!page_buf)
197 		goto out_free_cpu_groups;
198 
199 	err = 0;
200 	/*
201 	 * Of course the last CPU cannot be powered down and cpu_down() should
202 	 * refuse doing that.
203 	 */
204 	pr_info("Trying to turn off and on again all CPUs\n");
205 	err += down_and_up_cpus(cpu_online_mask, offlined_cpus);
206 
207 	/*
208 	 * Take down CPUs by cpu group this time. When the last CPU is turned
209 	 * off, the cpu group itself should shut down.
210 	 */
211 	for (i = 0; i < nb_cpu_group; ++i) {
212 		ssize_t len = cpumap_print_to_pagebuf(true, page_buf,
213 						      cpu_groups[i]);
214 		/* Remove trailing newline. */
215 		page_buf[len - 1] = '\0';
216 		pr_info("Trying to turn off and on again group %d (CPUs %s)\n",
217 			i, page_buf);
218 		err += down_and_up_cpus(cpu_groups[i], offlined_cpus);
219 	}
220 
221 	free_page((unsigned long)page_buf);
222 out_free_cpu_groups:
223 	free_cpu_groups(nb_cpu_group, &cpu_groups);
224 out_free_cpus:
225 	free_cpumask_var(offlined_cpus);
226 	return err;
227 }
228 
229 static void dummy_callback(struct timer_list *unused) {}
230 
231 static int suspend_cpu(struct cpuidle_device *dev,
232 		       struct cpuidle_driver *drv, int index)
233 {
234 	struct cpuidle_state *state = &drv->states[index];
235 	bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP;
236 	int ret;
237 
238 	arch_cpu_idle_enter();
239 
240 	if (broadcast) {
241 		/*
242 		 * The local timer will be shut down, we need to enter tick
243 		 * broadcast.
244 		 */
245 		ret = tick_broadcast_enter();
246 		if (ret) {
247 			/*
248 			 * In the absence of hardware broadcast mechanism,
249 			 * this CPU might be used to broadcast wakeups, which
250 			 * may be why entering tick broadcast has failed.
251 			 * There is little the kernel can do to work around
252 			 * that, so enter WFI instead (idle state 0).
253 			 */
254 			cpu_do_idle();
255 			ret = 0;
256 			goto out_arch_exit;
257 		}
258 	}
259 
260 	ret = state->enter(dev, drv, index);
261 
262 	if (broadcast)
263 		tick_broadcast_exit();
264 
265 out_arch_exit:
266 	arch_cpu_idle_exit();
267 
268 	return ret;
269 }
270 
271 static int suspend_test_thread(void *arg)
272 {
273 	int cpu = (long)arg;
274 	int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0;
275 	struct sched_param sched_priority = { .sched_priority = MAX_RT_PRIO-1 };
276 	struct cpuidle_device *dev;
277 	struct cpuidle_driver *drv;
278 	/* No need for an actual callback, we just want to wake up the CPU. */
279 	struct timer_list wakeup_timer;
280 
281 	/* Wait for the main thread to give the start signal. */
282 	wait_for_completion(&suspend_threads_started);
283 
284 	/* Set maximum priority to preempt all other threads on this CPU. */
285 	if (sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_priority))
286 		pr_warn("Failed to set suspend thread scheduler on CPU %d\n",
287 			cpu);
288 
289 	dev = this_cpu_read(cpuidle_devices);
290 	drv = cpuidle_get_cpu_driver(dev);
291 
292 	pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
293 		cpu, drv->state_count - 1);
294 
295 	timer_setup_on_stack(&wakeup_timer, dummy_callback, 0);
296 	for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
297 		int index;
298 		/*
299 		 * Test all possible states, except 0 (which is usually WFI and
300 		 * doesn't use PSCI).
301 		 */
302 		for (index = 1; index < drv->state_count; ++index) {
303 			int ret;
304 			struct cpuidle_state *state = &drv->states[index];
305 
306 			/*
307 			 * Set the timer to wake this CPU up in some time (which
308 			 * should be largely sufficient for entering suspend).
309 			 * If the local tick is disabled when entering suspend,
310 			 * suspend_cpu() takes care of switching to a broadcast
311 			 * tick, so the timer will still wake us up.
312 			 */
313 			mod_timer(&wakeup_timer, jiffies +
314 				  usecs_to_jiffies(state->target_residency));
315 
316 			/* IRQs must be disabled during suspend operations. */
317 			local_irq_disable();
318 
319 			ret = suspend_cpu(dev, drv, index);
320 
321 			/*
322 			 * We have woken up. Re-enable IRQs to handle any
323 			 * pending interrupt, do not wait until the end of the
324 			 * loop.
325 			 */
326 			local_irq_enable();
327 
328 			if (ret == index) {
329 				++nb_suspend;
330 			} else if (ret >= 0) {
331 				/* We did not enter the expected state. */
332 				++nb_shallow_sleep;
333 			} else {
334 				pr_err("Failed to suspend CPU %d: error %d "
335 				       "(requested state %d, cycle %d)\n",
336 				       cpu, ret, index, i);
337 				++nb_err;
338 			}
339 		}
340 	}
341 
342 	/*
343 	 * Disable the timer to make sure that the timer will not trigger
344 	 * later.
345 	 */
346 	del_timer(&wakeup_timer);
347 	destroy_timer_on_stack(&wakeup_timer);
348 
349 	if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
350 		complete(&suspend_threads_done);
351 
352 	/* Give up on RT scheduling and wait for termination. */
353 	sched_priority.sched_priority = 0;
354 	if (sched_setscheduler_nocheck(current, SCHED_NORMAL, &sched_priority))
355 		pr_warn("Failed to set suspend thread scheduler on CPU %d\n",
356 			cpu);
357 	for (;;) {
358 		/* Needs to be set first to avoid missing a wakeup. */
359 		set_current_state(TASK_INTERRUPTIBLE);
360 		if (kthread_should_park())
361 			break;
362 		schedule();
363 	}
364 
365 	pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
366 		cpu, nb_suspend, nb_shallow_sleep, nb_err);
367 
368 	kthread_parkme();
369 
370 	return nb_err;
371 }
372 
373 static int suspend_tests(void)
374 {
375 	int i, cpu, err = 0;
376 	struct task_struct **threads;
377 	int nb_threads = 0;
378 
379 	threads = kmalloc_array(nb_available_cpus, sizeof(*threads),
380 				GFP_KERNEL);
381 	if (!threads)
382 		return -ENOMEM;
383 
384 	/*
385 	 * Stop cpuidle to prevent the idle tasks from entering a deep sleep
386 	 * mode, as it might interfere with the suspend threads on other CPUs.
387 	 * This does not prevent the suspend threads from using cpuidle (only
388 	 * the idle tasks check this status). Take the idle lock so that
389 	 * the cpuidle driver and device look-up can be carried out safely.
390 	 */
391 	cpuidle_pause_and_lock();
392 
393 	for_each_online_cpu(cpu) {
394 		struct task_struct *thread;
395 		/* Check that cpuidle is available on that CPU. */
396 		struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
397 		struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
398 
399 		if (!dev || !drv) {
400 			pr_warn("cpuidle not available on CPU %d, ignoring\n",
401 				cpu);
402 			continue;
403 		}
404 
405 		thread = kthread_create_on_cpu(suspend_test_thread,
406 					       (void *)(long)cpu, cpu,
407 					       "psci_suspend_test");
408 		if (IS_ERR(thread))
409 			pr_err("Failed to create kthread on CPU %d\n", cpu);
410 		else
411 			threads[nb_threads++] = thread;
412 	}
413 
414 	if (nb_threads < 1) {
415 		err = -ENODEV;
416 		goto out;
417 	}
418 
419 	atomic_set(&nb_active_threads, nb_threads);
420 
421 	/*
422 	 * Wake up the suspend threads. To avoid the main thread being preempted
423 	 * before all the threads have been unparked, the suspend threads will
424 	 * wait for the completion of suspend_threads_started.
425 	 */
426 	for (i = 0; i < nb_threads; ++i)
427 		wake_up_process(threads[i]);
428 	complete_all(&suspend_threads_started);
429 
430 	wait_for_completion(&suspend_threads_done);
431 
432 
433 	/* Stop and destroy all threads, get return status. */
434 	for (i = 0; i < nb_threads; ++i) {
435 		err += kthread_park(threads[i]);
436 		err += kthread_stop(threads[i]);
437 	}
438  out:
439 	cpuidle_resume_and_unlock();
440 	kfree(threads);
441 	return err;
442 }
443 
444 static int __init psci_checker(void)
445 {
446 	int ret;
447 
448 	/*
449 	 * Since we're in an initcall, we assume that all the CPUs that all
450 	 * CPUs that can be onlined have been onlined.
451 	 *
452 	 * The tests assume that hotplug is enabled but nobody else is using it,
453 	 * otherwise the results will be unpredictable. However, since there
454 	 * is no userspace yet in initcalls, that should be fine, as long as
455 	 * no torture test is running at the same time (see Kconfig).
456 	 */
457 	nb_available_cpus = num_online_cpus();
458 
459 	/* Check PSCI operations are set up and working. */
460 	ret = psci_ops_check();
461 	if (ret)
462 		return ret;
463 
464 	pr_info("PSCI checker started using %u CPUs\n", nb_available_cpus);
465 
466 	pr_info("Starting hotplug tests\n");
467 	ret = hotplug_tests();
468 	if (ret == 0)
469 		pr_info("Hotplug tests passed OK\n");
470 	else if (ret > 0)
471 		pr_err("%d error(s) encountered in hotplug tests\n", ret);
472 	else {
473 		pr_err("Out of memory\n");
474 		return ret;
475 	}
476 
477 	pr_info("Starting suspend tests (%d cycles per state)\n",
478 		NUM_SUSPEND_CYCLE);
479 	ret = suspend_tests();
480 	if (ret == 0)
481 		pr_info("Suspend tests passed OK\n");
482 	else if (ret > 0)
483 		pr_err("%d error(s) encountered in suspend tests\n", ret);
484 	else {
485 		switch (ret) {
486 		case -ENOMEM:
487 			pr_err("Out of memory\n");
488 			break;
489 		case -ENODEV:
490 			pr_warn("Could not start suspend tests on any CPU\n");
491 			break;
492 		}
493 	}
494 
495 	pr_info("PSCI checker completed\n");
496 	return ret < 0 ? ret : 0;
497 }
498 late_initcall(psci_checker);
499