xref: /linux/tools/testing/selftests/kvm/aarch64/arch_timer_edge_cases.c (revision 566ab427f827b0256d3e8ce0235d088e6a9c28bd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * arch_timer_edge_cases.c - Tests the aarch64 timer IRQ functionality.
4  *
5  * The test validates some edge cases related to the arch-timer:
6  * - timers above the max TVAL value.
7  * - timers in the past
8  * - moving counters ahead and behind pending timers.
9  * - reprograming timers.
10  * - timers fired multiple times.
11  * - masking/unmasking using the timer control mask.
12  *
13  * Copyright (c) 2021, Google LLC.
14  */
15 
16 #define _GNU_SOURCE
17 
18 #include <pthread.h>
19 #include <sys/sysinfo.h>
20 
21 #include "arch_timer.h"
22 #include "gic.h"
23 #include "vgic.h"
24 
25 static const uint64_t CVAL_MAX = ~0ULL;
26 /* tval is a signed 32-bit int. */
27 static const int32_t TVAL_MAX = INT32_MAX;
28 static const int32_t TVAL_MIN = INT32_MIN;
29 
30 /* After how much time we say there is no IRQ. */
31 static const uint32_t TIMEOUT_NO_IRQ_US = 50000;
32 
33 /* A nice counter value to use as the starting one for most tests. */
34 static const uint64_t DEF_CNT = (CVAL_MAX / 2);
35 
36 /* Number of runs. */
37 static const uint32_t NR_TEST_ITERS_DEF = 5;
38 
39 /* Default wait test time in ms. */
40 static const uint32_t WAIT_TEST_MS = 10;
41 
42 /* Default "long" wait test time in ms. */
43 static const uint32_t LONG_WAIT_TEST_MS = 100;
44 
45 /* Shared with IRQ handler. */
46 struct test_vcpu_shared_data {
47 	atomic_t handled;
48 	atomic_t spurious;
49 } shared_data;
50 
51 struct test_args {
52 	/* Virtual or physical timer and counter tests. */
53 	enum arch_timer timer;
54 	/* Delay used for most timer tests. */
55 	uint64_t wait_ms;
56 	/* Delay used in the test_long_timer_delays test. */
57 	uint64_t long_wait_ms;
58 	/* Number of iterations. */
59 	int iterations;
60 	/* Whether to test the physical timer. */
61 	bool test_physical;
62 	/* Whether to test the virtual timer. */
63 	bool test_virtual;
64 };
65 
66 struct test_args test_args = {
67 	.wait_ms = WAIT_TEST_MS,
68 	.long_wait_ms = LONG_WAIT_TEST_MS,
69 	.iterations = NR_TEST_ITERS_DEF,
70 	.test_physical = true,
71 	.test_virtual = true,
72 };
73 
74 static int vtimer_irq, ptimer_irq;
75 
76 enum sync_cmd {
77 	SET_COUNTER_VALUE,
78 	USERSPACE_USLEEP,
79 	USERSPACE_SCHED_YIELD,
80 	USERSPACE_MIGRATE_SELF,
81 	NO_USERSPACE_CMD,
82 };
83 
84 typedef void (*sleep_method_t)(enum arch_timer timer, uint64_t usec);
85 
86 static void sleep_poll(enum arch_timer timer, uint64_t usec);
87 static void sleep_sched_poll(enum arch_timer timer, uint64_t usec);
88 static void sleep_in_userspace(enum arch_timer timer, uint64_t usec);
89 static void sleep_migrate(enum arch_timer timer, uint64_t usec);
90 
91 sleep_method_t sleep_method[] = {
92 	sleep_poll,
93 	sleep_sched_poll,
94 	sleep_migrate,
95 	sleep_in_userspace,
96 };
97 
98 typedef void (*irq_wait_method_t)(void);
99 
100 static void wait_for_non_spurious_irq(void);
101 static void wait_poll_for_irq(void);
102 static void wait_sched_poll_for_irq(void);
103 static void wait_migrate_poll_for_irq(void);
104 
105 irq_wait_method_t irq_wait_method[] = {
106 	wait_for_non_spurious_irq,
107 	wait_poll_for_irq,
108 	wait_sched_poll_for_irq,
109 	wait_migrate_poll_for_irq,
110 };
111 
112 enum timer_view {
113 	TIMER_CVAL,
114 	TIMER_TVAL,
115 };
116 
117 static void assert_irqs_handled(uint32_t n)
118 {
119 	int h = atomic_read(&shared_data.handled);
120 
121 	__GUEST_ASSERT(h == n, "Handled %d IRQS but expected %d", h, n);
122 }
123 
124 static void userspace_cmd(uint64_t cmd)
125 {
126 	GUEST_SYNC_ARGS(cmd, 0, 0, 0, 0);
127 }
128 
129 static void userspace_migrate_vcpu(void)
130 {
131 	userspace_cmd(USERSPACE_MIGRATE_SELF);
132 }
133 
134 static void userspace_sleep(uint64_t usecs)
135 {
136 	GUEST_SYNC_ARGS(USERSPACE_USLEEP, usecs, 0, 0, 0);
137 }
138 
139 static void set_counter(enum arch_timer timer, uint64_t counter)
140 {
141 	GUEST_SYNC_ARGS(SET_COUNTER_VALUE, counter, timer, 0, 0);
142 }
143 
144 static void guest_irq_handler(struct ex_regs *regs)
145 {
146 	unsigned int intid = gic_get_and_ack_irq();
147 	enum arch_timer timer;
148 	uint64_t cnt, cval;
149 	uint32_t ctl;
150 	bool timer_condition, istatus;
151 
152 	if (intid == IAR_SPURIOUS) {
153 		atomic_inc(&shared_data.spurious);
154 		goto out;
155 	}
156 
157 	if (intid == ptimer_irq)
158 		timer = PHYSICAL;
159 	else if (intid == vtimer_irq)
160 		timer = VIRTUAL;
161 	else
162 		goto out;
163 
164 	ctl = timer_get_ctl(timer);
165 	cval = timer_get_cval(timer);
166 	cnt = timer_get_cntct(timer);
167 	timer_condition = cnt >= cval;
168 	istatus = (ctl & CTL_ISTATUS) && (ctl & CTL_ENABLE);
169 	GUEST_ASSERT_EQ(timer_condition, istatus);
170 
171 	/* Disable and mask the timer. */
172 	timer_set_ctl(timer, CTL_IMASK);
173 
174 	atomic_inc(&shared_data.handled);
175 
176 out:
177 	gic_set_eoi(intid);
178 }
179 
180 static void set_cval_irq(enum arch_timer timer, uint64_t cval_cycles,
181 			 uint32_t ctl)
182 {
183 	atomic_set(&shared_data.handled, 0);
184 	atomic_set(&shared_data.spurious, 0);
185 	timer_set_cval(timer, cval_cycles);
186 	timer_set_ctl(timer, ctl);
187 }
188 
189 static void set_tval_irq(enum arch_timer timer, uint64_t tval_cycles,
190 			 uint32_t ctl)
191 {
192 	atomic_set(&shared_data.handled, 0);
193 	atomic_set(&shared_data.spurious, 0);
194 	timer_set_ctl(timer, ctl);
195 	timer_set_tval(timer, tval_cycles);
196 }
197 
198 static void set_xval_irq(enum arch_timer timer, uint64_t xval, uint32_t ctl,
199 			 enum timer_view tv)
200 {
201 	switch (tv) {
202 	case TIMER_CVAL:
203 		set_cval_irq(timer, xval, ctl);
204 		break;
205 	case TIMER_TVAL:
206 		set_tval_irq(timer, xval, ctl);
207 		break;
208 	default:
209 		GUEST_FAIL("Could not get timer %d", timer);
210 	}
211 }
212 
213 /*
214  * Note that this can theoretically hang forever, so we rely on having
215  * a timeout mechanism in the "runner", like:
216  * tools/testing/selftests/kselftest/runner.sh.
217  */
218 static void wait_for_non_spurious_irq(void)
219 {
220 	int h;
221 
222 	local_irq_disable();
223 
224 	for (h = atomic_read(&shared_data.handled); h == atomic_read(&shared_data.handled);) {
225 		wfi();
226 		local_irq_enable();
227 		isb(); /* handle IRQ */
228 		local_irq_disable();
229 	}
230 }
231 
232 /*
233  * Wait for an non-spurious IRQ by polling in the guest or in
234  * userspace (e.g. userspace_cmd=USERSPACE_SCHED_YIELD).
235  *
236  * Note that this can theoretically hang forever, so we rely on having
237  * a timeout mechanism in the "runner", like:
238  * tools/testing/selftests/kselftest/runner.sh.
239  */
240 static void poll_for_non_spurious_irq(enum sync_cmd usp_cmd)
241 {
242 	int h;
243 
244 	local_irq_disable();
245 
246 	h = atomic_read(&shared_data.handled);
247 
248 	local_irq_enable();
249 	while (h == atomic_read(&shared_data.handled)) {
250 		if (usp_cmd == NO_USERSPACE_CMD)
251 			cpu_relax();
252 		else
253 			userspace_cmd(usp_cmd);
254 	}
255 	local_irq_disable();
256 }
257 
258 static void wait_poll_for_irq(void)
259 {
260 	poll_for_non_spurious_irq(NO_USERSPACE_CMD);
261 }
262 
263 static void wait_sched_poll_for_irq(void)
264 {
265 	poll_for_non_spurious_irq(USERSPACE_SCHED_YIELD);
266 }
267 
268 static void wait_migrate_poll_for_irq(void)
269 {
270 	poll_for_non_spurious_irq(USERSPACE_MIGRATE_SELF);
271 }
272 
273 /*
274  * Sleep for usec microseconds by polling in the guest or in
275  * userspace (e.g. userspace_cmd=USERSPACE_SCHEDULE).
276  */
277 static void guest_poll(enum arch_timer test_timer, uint64_t usec,
278 		       enum sync_cmd usp_cmd)
279 {
280 	uint64_t cycles = usec_to_cycles(usec);
281 	/* Whichever timer we are testing with, sleep with the other. */
282 	enum arch_timer sleep_timer = 1 - test_timer;
283 	uint64_t start = timer_get_cntct(sleep_timer);
284 
285 	while ((timer_get_cntct(sleep_timer) - start) < cycles) {
286 		if (usp_cmd == NO_USERSPACE_CMD)
287 			cpu_relax();
288 		else
289 			userspace_cmd(usp_cmd);
290 	}
291 }
292 
293 static void sleep_poll(enum arch_timer timer, uint64_t usec)
294 {
295 	guest_poll(timer, usec, NO_USERSPACE_CMD);
296 }
297 
298 static void sleep_sched_poll(enum arch_timer timer, uint64_t usec)
299 {
300 	guest_poll(timer, usec, USERSPACE_SCHED_YIELD);
301 }
302 
303 static void sleep_migrate(enum arch_timer timer, uint64_t usec)
304 {
305 	guest_poll(timer, usec, USERSPACE_MIGRATE_SELF);
306 }
307 
308 static void sleep_in_userspace(enum arch_timer timer, uint64_t usec)
309 {
310 	userspace_sleep(usec);
311 }
312 
313 /*
314  * Reset the timer state to some nice values like the counter not being close
315  * to the edge, and the control register masked and disabled.
316  */
317 static void reset_timer_state(enum arch_timer timer, uint64_t cnt)
318 {
319 	set_counter(timer, cnt);
320 	timer_set_ctl(timer, CTL_IMASK);
321 }
322 
323 static void test_timer_xval(enum arch_timer timer, uint64_t xval,
324 			    enum timer_view tv, irq_wait_method_t wm, bool reset_state,
325 			    uint64_t reset_cnt)
326 {
327 	local_irq_disable();
328 
329 	if (reset_state)
330 		reset_timer_state(timer, reset_cnt);
331 
332 	set_xval_irq(timer, xval, CTL_ENABLE, tv);
333 
334 	/* This method re-enables IRQs to handle the one we're looking for. */
335 	wm();
336 
337 	assert_irqs_handled(1);
338 	local_irq_enable();
339 }
340 
341 /*
342  * The test_timer_* functions will program the timer, wait for it, and assert
343  * the firing of the correct IRQ.
344  *
345  * These functions don't have a timeout and return as soon as they receive an
346  * IRQ. They can hang (forever), so we rely on having a timeout mechanism in
347  * the "runner", like: tools/testing/selftests/kselftest/runner.sh.
348  */
349 
350 static void test_timer_cval(enum arch_timer timer, uint64_t cval,
351 			    irq_wait_method_t wm, bool reset_state,
352 			    uint64_t reset_cnt)
353 {
354 	test_timer_xval(timer, cval, TIMER_CVAL, wm, reset_state, reset_cnt);
355 }
356 
357 static void test_timer_tval(enum arch_timer timer, int32_t tval,
358 			    irq_wait_method_t wm, bool reset_state,
359 			    uint64_t reset_cnt)
360 {
361 	test_timer_xval(timer, (uint64_t) tval, TIMER_TVAL, wm, reset_state,
362 			reset_cnt);
363 }
364 
365 static void test_xval_check_no_irq(enum arch_timer timer, uint64_t xval,
366 				   uint64_t usec, enum timer_view timer_view,
367 				   sleep_method_t guest_sleep)
368 {
369 	local_irq_disable();
370 
371 	set_xval_irq(timer, xval, CTL_ENABLE | CTL_IMASK, timer_view);
372 	guest_sleep(timer, usec);
373 
374 	local_irq_enable();
375 	isb();
376 
377 	/* Assume success (no IRQ) after waiting usec microseconds */
378 	assert_irqs_handled(0);
379 }
380 
381 static void test_cval_no_irq(enum arch_timer timer, uint64_t cval,
382 			     uint64_t usec, sleep_method_t wm)
383 {
384 	test_xval_check_no_irq(timer, cval, usec, TIMER_CVAL, wm);
385 }
386 
387 static void test_tval_no_irq(enum arch_timer timer, int32_t tval, uint64_t usec,
388 			     sleep_method_t wm)
389 {
390 	/* tval will be cast to an int32_t in test_xval_check_no_irq */
391 	test_xval_check_no_irq(timer, (uint64_t) tval, usec, TIMER_TVAL, wm);
392 }
393 
394 /* Test masking/unmasking a timer using the timer mask (not the IRQ mask). */
395 static void test_timer_control_mask_then_unmask(enum arch_timer timer)
396 {
397 	reset_timer_state(timer, DEF_CNT);
398 	set_tval_irq(timer, -1, CTL_ENABLE | CTL_IMASK);
399 
400 	/* Unmask the timer, and then get an IRQ. */
401 	local_irq_disable();
402 	timer_set_ctl(timer, CTL_ENABLE);
403 	/* This method re-enables IRQs to handle the one we're looking for. */
404 	wait_for_non_spurious_irq();
405 
406 	assert_irqs_handled(1);
407 	local_irq_enable();
408 }
409 
410 /* Check that timer control masks actually mask a timer being fired. */
411 static void test_timer_control_masks(enum arch_timer timer)
412 {
413 	reset_timer_state(timer, DEF_CNT);
414 
415 	/* Local IRQs are not masked at this point. */
416 
417 	set_tval_irq(timer, -1, CTL_ENABLE | CTL_IMASK);
418 
419 	/* Assume no IRQ after waiting TIMEOUT_NO_IRQ_US microseconds */
420 	sleep_poll(timer, TIMEOUT_NO_IRQ_US);
421 
422 	assert_irqs_handled(0);
423 	timer_set_ctl(timer, CTL_IMASK);
424 }
425 
426 static void test_fire_a_timer_multiple_times(enum arch_timer timer,
427 					     irq_wait_method_t wm, int num)
428 {
429 	int i;
430 
431 	local_irq_disable();
432 	reset_timer_state(timer, DEF_CNT);
433 
434 	set_tval_irq(timer, 0, CTL_ENABLE);
435 
436 	for (i = 1; i <= num; i++) {
437 		/* This method re-enables IRQs to handle the one we're looking for. */
438 		wm();
439 
440 		/* The IRQ handler masked and disabled the timer.
441 		 * Enable and unmmask it again.
442 		 */
443 		timer_set_ctl(timer, CTL_ENABLE);
444 
445 		assert_irqs_handled(i);
446 	}
447 
448 	local_irq_enable();
449 }
450 
451 static void test_timers_fired_multiple_times(enum arch_timer timer)
452 {
453 	int i;
454 
455 	for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++)
456 		test_fire_a_timer_multiple_times(timer, irq_wait_method[i], 10);
457 }
458 
459 /*
460  * Set a timer for tval=delta_1_ms then reprogram it to
461  * tval=delta_2_ms. Check that we get the timer fired. There is no
462  * timeout for the wait: we use the wfi instruction.
463  */
464 static void test_reprogramming_timer(enum arch_timer timer, irq_wait_method_t wm,
465 				     int32_t delta_1_ms, int32_t delta_2_ms)
466 {
467 	local_irq_disable();
468 	reset_timer_state(timer, DEF_CNT);
469 
470 	/* Program the timer to DEF_CNT + delta_1_ms. */
471 	set_tval_irq(timer, msec_to_cycles(delta_1_ms), CTL_ENABLE);
472 
473 	/* Reprogram the timer to DEF_CNT + delta_2_ms. */
474 	timer_set_tval(timer, msec_to_cycles(delta_2_ms));
475 
476 	/* This method re-enables IRQs to handle the one we're looking for. */
477 	wm();
478 
479 	/* The IRQ should arrive at DEF_CNT + delta_2_ms (or after). */
480 	GUEST_ASSERT(timer_get_cntct(timer) >=
481 		     DEF_CNT + msec_to_cycles(delta_2_ms));
482 
483 	local_irq_enable();
484 	assert_irqs_handled(1);
485 };
486 
487 static void test_reprogram_timers(enum arch_timer timer)
488 {
489 	int i;
490 	uint64_t base_wait = test_args.wait_ms;
491 
492 	for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
493 		/*
494 		 * Ensure reprogramming works whether going from a
495 		 * longer time to a shorter or vice versa.
496 		 */
497 		test_reprogramming_timer(timer, irq_wait_method[i], 2 * base_wait,
498 					 base_wait);
499 		test_reprogramming_timer(timer, irq_wait_method[i], base_wait,
500 					 2 * base_wait);
501 	}
502 }
503 
504 static void test_basic_functionality(enum arch_timer timer)
505 {
506 	int32_t tval = (int32_t) msec_to_cycles(test_args.wait_ms);
507 	uint64_t cval = DEF_CNT + msec_to_cycles(test_args.wait_ms);
508 	int i;
509 
510 	for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
511 		irq_wait_method_t wm = irq_wait_method[i];
512 
513 		test_timer_cval(timer, cval, wm, true, DEF_CNT);
514 		test_timer_tval(timer, tval, wm, true, DEF_CNT);
515 	}
516 }
517 
518 /*
519  * This test checks basic timer behavior without actually firing timers, things
520  * like: the relationship between cval and tval, tval down-counting.
521  */
522 static void timers_sanity_checks(enum arch_timer timer, bool use_sched)
523 {
524 	reset_timer_state(timer, DEF_CNT);
525 
526 	local_irq_disable();
527 
528 	/* cval in the past */
529 	timer_set_cval(timer,
530 		       timer_get_cntct(timer) -
531 		       msec_to_cycles(test_args.wait_ms));
532 	if (use_sched)
533 		userspace_migrate_vcpu();
534 	GUEST_ASSERT(timer_get_tval(timer) < 0);
535 
536 	/* tval in the past */
537 	timer_set_tval(timer, -1);
538 	if (use_sched)
539 		userspace_migrate_vcpu();
540 	GUEST_ASSERT(timer_get_cval(timer) < timer_get_cntct(timer));
541 
542 	/* tval larger than TVAL_MAX. This requires programming with
543 	 * timer_set_cval instead so the value is expressible
544 	 */
545 	timer_set_cval(timer,
546 		       timer_get_cntct(timer) + TVAL_MAX +
547 		       msec_to_cycles(test_args.wait_ms));
548 	if (use_sched)
549 		userspace_migrate_vcpu();
550 	GUEST_ASSERT(timer_get_tval(timer) <= 0);
551 
552 	/*
553 	 * tval larger than 2 * TVAL_MAX.
554 	 * Twice the TVAL_MAX completely loops around the TVAL.
555 	 */
556 	timer_set_cval(timer,
557 		       timer_get_cntct(timer) + 2ULL * TVAL_MAX +
558 		       msec_to_cycles(test_args.wait_ms));
559 	if (use_sched)
560 		userspace_migrate_vcpu();
561 	GUEST_ASSERT(timer_get_tval(timer) <=
562 		       msec_to_cycles(test_args.wait_ms));
563 
564 	/* negative tval that rollovers from 0. */
565 	set_counter(timer, msec_to_cycles(1));
566 	timer_set_tval(timer, -1 * msec_to_cycles(test_args.wait_ms));
567 	if (use_sched)
568 		userspace_migrate_vcpu();
569 	GUEST_ASSERT(timer_get_cval(timer) >= (CVAL_MAX - msec_to_cycles(test_args.wait_ms)));
570 
571 	/* tval should keep down-counting from 0 to -1. */
572 	timer_set_tval(timer, 0);
573 	sleep_poll(timer, 1);
574 	GUEST_ASSERT(timer_get_tval(timer) < 0);
575 
576 	local_irq_enable();
577 
578 	/* Mask and disable any pending timer. */
579 	timer_set_ctl(timer, CTL_IMASK);
580 }
581 
582 static void test_timers_sanity_checks(enum arch_timer timer)
583 {
584 	timers_sanity_checks(timer, false);
585 	/* Check how KVM saves/restores these edge-case values. */
586 	timers_sanity_checks(timer, true);
587 }
588 
589 static void test_set_cnt_after_tval_max(enum arch_timer timer, irq_wait_method_t wm)
590 {
591 	local_irq_disable();
592 	reset_timer_state(timer, DEF_CNT);
593 
594 	set_cval_irq(timer,
595 		     (uint64_t) TVAL_MAX +
596 		     msec_to_cycles(test_args.wait_ms) / 2, CTL_ENABLE);
597 
598 	set_counter(timer, TVAL_MAX);
599 
600 	/* This method re-enables IRQs to handle the one we're looking for. */
601 	wm();
602 
603 	assert_irqs_handled(1);
604 	local_irq_enable();
605 }
606 
607 /* Test timers set for: cval = now + TVAL_MAX + wait_ms / 2 */
608 static void test_timers_above_tval_max(enum arch_timer timer)
609 {
610 	uint64_t cval;
611 	int i;
612 
613 	/*
614 	 * Test that the system is not implementing cval in terms of
615 	 * tval.  If that was the case, setting a cval to "cval = now
616 	 * + TVAL_MAX + wait_ms" would wrap to "cval = now +
617 	 * wait_ms", and the timer would fire immediately. Test that it
618 	 * doesn't.
619 	 */
620 	for (i = 0; i < ARRAY_SIZE(sleep_method); i++) {
621 		reset_timer_state(timer, DEF_CNT);
622 		cval = timer_get_cntct(timer) + TVAL_MAX +
623 			msec_to_cycles(test_args.wait_ms);
624 		test_cval_no_irq(timer, cval,
625 				 msecs_to_usecs(test_args.wait_ms) +
626 				 TIMEOUT_NO_IRQ_US, sleep_method[i]);
627 	}
628 
629 	for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
630 		/* Get the IRQ by moving the counter forward. */
631 		test_set_cnt_after_tval_max(timer, irq_wait_method[i]);
632 	}
633 }
634 
635 /*
636  * Template function to be used by the test_move_counter_ahead_* tests.  It
637  * sets the counter to cnt_1, the [c|t]val, the counter to cnt_2, and
638  * then waits for an IRQ.
639  */
640 static void test_set_cnt_after_xval(enum arch_timer timer, uint64_t cnt_1,
641 				    uint64_t xval, uint64_t cnt_2,
642 				    irq_wait_method_t wm, enum timer_view tv)
643 {
644 	local_irq_disable();
645 
646 	set_counter(timer, cnt_1);
647 	timer_set_ctl(timer, CTL_IMASK);
648 
649 	set_xval_irq(timer, xval, CTL_ENABLE, tv);
650 	set_counter(timer, cnt_2);
651 	/* This method re-enables IRQs to handle the one we're looking for. */
652 	wm();
653 
654 	assert_irqs_handled(1);
655 	local_irq_enable();
656 }
657 
658 /*
659  * Template function to be used by the test_move_counter_ahead_* tests.  It
660  * sets the counter to cnt_1, the [c|t]val, the counter to cnt_2, and
661  * then waits for an IRQ.
662  */
663 static void test_set_cnt_after_xval_no_irq(enum arch_timer timer,
664 					   uint64_t cnt_1, uint64_t xval,
665 					   uint64_t cnt_2,
666 					   sleep_method_t guest_sleep,
667 					   enum timer_view tv)
668 {
669 	local_irq_disable();
670 
671 	set_counter(timer, cnt_1);
672 	timer_set_ctl(timer, CTL_IMASK);
673 
674 	set_xval_irq(timer, xval, CTL_ENABLE, tv);
675 	set_counter(timer, cnt_2);
676 	guest_sleep(timer, TIMEOUT_NO_IRQ_US);
677 
678 	local_irq_enable();
679 	isb();
680 
681 	/* Assume no IRQ after waiting TIMEOUT_NO_IRQ_US microseconds */
682 	assert_irqs_handled(0);
683 	timer_set_ctl(timer, CTL_IMASK);
684 }
685 
686 static void test_set_cnt_after_tval(enum arch_timer timer, uint64_t cnt_1,
687 				    int32_t tval, uint64_t cnt_2,
688 				    irq_wait_method_t wm)
689 {
690 	test_set_cnt_after_xval(timer, cnt_1, tval, cnt_2, wm, TIMER_TVAL);
691 }
692 
693 static void test_set_cnt_after_cval(enum arch_timer timer, uint64_t cnt_1,
694 				    uint64_t cval, uint64_t cnt_2,
695 				    irq_wait_method_t wm)
696 {
697 	test_set_cnt_after_xval(timer, cnt_1, cval, cnt_2, wm, TIMER_CVAL);
698 }
699 
700 static void test_set_cnt_after_tval_no_irq(enum arch_timer timer,
701 					   uint64_t cnt_1, int32_t tval,
702 					   uint64_t cnt_2, sleep_method_t wm)
703 {
704 	test_set_cnt_after_xval_no_irq(timer, cnt_1, tval, cnt_2, wm,
705 				       TIMER_TVAL);
706 }
707 
708 static void test_set_cnt_after_cval_no_irq(enum arch_timer timer,
709 					   uint64_t cnt_1, uint64_t cval,
710 					   uint64_t cnt_2, sleep_method_t wm)
711 {
712 	test_set_cnt_after_xval_no_irq(timer, cnt_1, cval, cnt_2, wm,
713 				       TIMER_CVAL);
714 }
715 
716 /* Set a timer and then move the counter ahead of it. */
717 static void test_move_counters_ahead_of_timers(enum arch_timer timer)
718 {
719 	int i;
720 	int32_t tval;
721 
722 	for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
723 		irq_wait_method_t wm = irq_wait_method[i];
724 
725 		test_set_cnt_after_cval(timer, 0, DEF_CNT, DEF_CNT + 1, wm);
726 		test_set_cnt_after_cval(timer, CVAL_MAX, 1, 2, wm);
727 
728 		/* Move counter ahead of negative tval. */
729 		test_set_cnt_after_tval(timer, 0, -1, DEF_CNT + 1, wm);
730 		test_set_cnt_after_tval(timer, 0, -1, TVAL_MAX, wm);
731 		tval = TVAL_MAX;
732 		test_set_cnt_after_tval(timer, 0, tval, (uint64_t) tval + 1,
733 					wm);
734 	}
735 
736 	for (i = 0; i < ARRAY_SIZE(sleep_method); i++) {
737 		sleep_method_t sm = sleep_method[i];
738 
739 		test_set_cnt_after_cval_no_irq(timer, 0, DEF_CNT, CVAL_MAX, sm);
740 	}
741 }
742 
743 /*
744  * Program a timer, mask it, and then change the tval or counter to cancel it.
745  * Unmask it and check that nothing fires.
746  */
747 static void test_move_counters_behind_timers(enum arch_timer timer)
748 {
749 	int i;
750 
751 	for (i = 0; i < ARRAY_SIZE(sleep_method); i++) {
752 		sleep_method_t sm = sleep_method[i];
753 
754 		test_set_cnt_after_cval_no_irq(timer, DEF_CNT, DEF_CNT - 1, 0,
755 					       sm);
756 		test_set_cnt_after_tval_no_irq(timer, DEF_CNT, -1, 0, sm);
757 	}
758 }
759 
760 static void test_timers_in_the_past(enum arch_timer timer)
761 {
762 	int32_t tval = -1 * (int32_t) msec_to_cycles(test_args.wait_ms);
763 	uint64_t cval;
764 	int i;
765 
766 	for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
767 		irq_wait_method_t wm = irq_wait_method[i];
768 
769 		/* set a timer wait_ms the past. */
770 		cval = DEF_CNT - msec_to_cycles(test_args.wait_ms);
771 		test_timer_cval(timer, cval, wm, true, DEF_CNT);
772 		test_timer_tval(timer, tval, wm, true, DEF_CNT);
773 
774 		/* Set a timer to counter=0 (in the past) */
775 		test_timer_cval(timer, 0, wm, true, DEF_CNT);
776 
777 		/* Set a time for tval=0 (now) */
778 		test_timer_tval(timer, 0, wm, true, DEF_CNT);
779 
780 		/* Set a timer to as far in the past as possible */
781 		test_timer_tval(timer, TVAL_MIN, wm, true, DEF_CNT);
782 	}
783 
784 	/*
785 	 * Set the counter to wait_ms, and a tval to -wait_ms. There should be no
786 	 * IRQ as that tval means cval=CVAL_MAX-wait_ms.
787 	 */
788 	for (i = 0; i < ARRAY_SIZE(sleep_method); i++) {
789 		sleep_method_t sm = sleep_method[i];
790 
791 		set_counter(timer, msec_to_cycles(test_args.wait_ms));
792 		test_tval_no_irq(timer, tval, TIMEOUT_NO_IRQ_US, sm);
793 	}
794 }
795 
796 static void test_long_timer_delays(enum arch_timer timer)
797 {
798 	int32_t tval = (int32_t) msec_to_cycles(test_args.long_wait_ms);
799 	uint64_t cval = DEF_CNT + msec_to_cycles(test_args.long_wait_ms);
800 	int i;
801 
802 	for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
803 		irq_wait_method_t wm = irq_wait_method[i];
804 
805 		test_timer_cval(timer, cval, wm, true, DEF_CNT);
806 		test_timer_tval(timer, tval, wm, true, DEF_CNT);
807 	}
808 }
809 
810 static void guest_run_iteration(enum arch_timer timer)
811 {
812 	test_basic_functionality(timer);
813 	test_timers_sanity_checks(timer);
814 
815 	test_timers_above_tval_max(timer);
816 	test_timers_in_the_past(timer);
817 
818 	test_move_counters_ahead_of_timers(timer);
819 	test_move_counters_behind_timers(timer);
820 	test_reprogram_timers(timer);
821 
822 	test_timers_fired_multiple_times(timer);
823 
824 	test_timer_control_mask_then_unmask(timer);
825 	test_timer_control_masks(timer);
826 }
827 
828 static void guest_code(enum arch_timer timer)
829 {
830 	int i;
831 
832 	local_irq_disable();
833 
834 	gic_init(GIC_V3, 1);
835 
836 	timer_set_ctl(VIRTUAL, CTL_IMASK);
837 	timer_set_ctl(PHYSICAL, CTL_IMASK);
838 
839 	gic_irq_enable(vtimer_irq);
840 	gic_irq_enable(ptimer_irq);
841 	local_irq_enable();
842 
843 	for (i = 0; i < test_args.iterations; i++) {
844 		GUEST_SYNC(i);
845 		guest_run_iteration(timer);
846 	}
847 
848 	test_long_timer_delays(timer);
849 	GUEST_DONE();
850 }
851 
852 static uint32_t next_pcpu(void)
853 {
854 	uint32_t max = get_nprocs();
855 	uint32_t cur = sched_getcpu();
856 	uint32_t next = cur;
857 	cpu_set_t cpuset;
858 
859 	TEST_ASSERT(max > 1, "Need at least two physical cpus");
860 
861 	sched_getaffinity(0, sizeof(cpuset), &cpuset);
862 
863 	do {
864 		next = (next + 1) % CPU_SETSIZE;
865 	} while (!CPU_ISSET(next, &cpuset));
866 
867 	return next;
868 }
869 
870 static void migrate_self(uint32_t new_pcpu)
871 {
872 	int ret;
873 	cpu_set_t cpuset;
874 	pthread_t thread;
875 
876 	thread = pthread_self();
877 
878 	CPU_ZERO(&cpuset);
879 	CPU_SET(new_pcpu, &cpuset);
880 
881 	pr_debug("Migrating from %u to %u\n", sched_getcpu(), new_pcpu);
882 
883 	ret = pthread_setaffinity_np(thread, sizeof(cpuset), &cpuset);
884 
885 	TEST_ASSERT(ret == 0, "Failed to migrate to pCPU: %u; ret: %d\n",
886 		    new_pcpu, ret);
887 }
888 
889 static void kvm_set_cntxct(struct kvm_vcpu *vcpu, uint64_t cnt,
890 			   enum arch_timer timer)
891 {
892 	if (timer == PHYSICAL)
893 		vcpu_set_reg(vcpu, KVM_REG_ARM_PTIMER_CNT, cnt);
894 	else
895 		vcpu_set_reg(vcpu, KVM_REG_ARM_TIMER_CNT, cnt);
896 }
897 
898 static void handle_sync(struct kvm_vcpu *vcpu, struct ucall *uc)
899 {
900 	enum sync_cmd cmd = uc->args[1];
901 	uint64_t val = uc->args[2];
902 	enum arch_timer timer = uc->args[3];
903 
904 	switch (cmd) {
905 	case SET_COUNTER_VALUE:
906 		kvm_set_cntxct(vcpu, val, timer);
907 		break;
908 	case USERSPACE_USLEEP:
909 		usleep(val);
910 		break;
911 	case USERSPACE_SCHED_YIELD:
912 		sched_yield();
913 		break;
914 	case USERSPACE_MIGRATE_SELF:
915 		migrate_self(next_pcpu());
916 		break;
917 	default:
918 		break;
919 	}
920 }
921 
922 static void test_run(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
923 {
924 	struct ucall uc;
925 
926 	/* Start on CPU 0 */
927 	migrate_self(0);
928 
929 	while (true) {
930 		vcpu_run(vcpu);
931 		switch (get_ucall(vcpu, &uc)) {
932 		case UCALL_SYNC:
933 			handle_sync(vcpu, &uc);
934 			break;
935 		case UCALL_DONE:
936 			goto out;
937 		case UCALL_ABORT:
938 			REPORT_GUEST_ASSERT(uc);
939 			goto out;
940 		default:
941 			TEST_FAIL("Unexpected guest exit\n");
942 		}
943 	}
944 
945  out:
946 	return;
947 }
948 
949 static void test_init_timer_irq(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
950 {
951 	vcpu_device_attr_get(vcpu, KVM_ARM_VCPU_TIMER_CTRL,
952 			     KVM_ARM_VCPU_TIMER_IRQ_PTIMER, &ptimer_irq);
953 	vcpu_device_attr_get(vcpu, KVM_ARM_VCPU_TIMER_CTRL,
954 			     KVM_ARM_VCPU_TIMER_IRQ_VTIMER, &vtimer_irq);
955 
956 	sync_global_to_guest(vm, ptimer_irq);
957 	sync_global_to_guest(vm, vtimer_irq);
958 
959 	pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq);
960 }
961 
962 static void test_vm_create(struct kvm_vm **vm, struct kvm_vcpu **vcpu,
963 			   enum arch_timer timer)
964 {
965 	*vm = vm_create_with_one_vcpu(vcpu, guest_code);
966 	TEST_ASSERT(*vm, "Failed to create the test VM\n");
967 
968 	vm_init_descriptor_tables(*vm);
969 	vm_install_exception_handler(*vm, VECTOR_IRQ_CURRENT,
970 				     guest_irq_handler);
971 
972 	vcpu_init_descriptor_tables(*vcpu);
973 	vcpu_args_set(*vcpu, 1, timer);
974 
975 	test_init_timer_irq(*vm, *vcpu);
976 	vgic_v3_setup(*vm, 1, 64);
977 	sync_global_to_guest(*vm, test_args);
978 }
979 
980 static void test_print_help(char *name)
981 {
982 	pr_info("Usage: %s [-h] [-b] [-i iterations] [-l long_wait_ms] [-p] [-v]\n"
983 		, name);
984 	pr_info("\t-i: Number of iterations (default: %u)\n",
985 		NR_TEST_ITERS_DEF);
986 	pr_info("\t-b: Test both physical and virtual timers (default: true)\n");
987 	pr_info("\t-l: Delta (in ms) used for long wait time test (default: %u)\n",
988 	     LONG_WAIT_TEST_MS);
989 	pr_info("\t-l: Delta (in ms) used for wait times (default: %u)\n",
990 		WAIT_TEST_MS);
991 	pr_info("\t-p: Test physical timer (default: true)\n");
992 	pr_info("\t-v: Test virtual timer (default: true)\n");
993 	pr_info("\t-h: Print this help message\n");
994 }
995 
996 static bool parse_args(int argc, char *argv[])
997 {
998 	int opt;
999 
1000 	while ((opt = getopt(argc, argv, "bhi:l:pvw:")) != -1) {
1001 		switch (opt) {
1002 		case 'b':
1003 			test_args.test_physical = true;
1004 			test_args.test_virtual = true;
1005 			break;
1006 		case 'i':
1007 			test_args.iterations =
1008 			    atoi_positive("Number of iterations", optarg);
1009 			break;
1010 		case 'l':
1011 			test_args.long_wait_ms =
1012 			    atoi_positive("Long wait time", optarg);
1013 			break;
1014 		case 'p':
1015 			test_args.test_physical = true;
1016 			test_args.test_virtual = false;
1017 			break;
1018 		case 'v':
1019 			test_args.test_virtual = true;
1020 			test_args.test_physical = false;
1021 			break;
1022 		case 'w':
1023 			test_args.wait_ms = atoi_positive("Wait time", optarg);
1024 			break;
1025 		case 'h':
1026 		default:
1027 			goto err;
1028 		}
1029 	}
1030 
1031 	return true;
1032 
1033  err:
1034 	test_print_help(argv[0]);
1035 	return false;
1036 }
1037 
1038 int main(int argc, char *argv[])
1039 {
1040 	struct kvm_vcpu *vcpu;
1041 	struct kvm_vm *vm;
1042 
1043 	/* Tell stdout not to buffer its content */
1044 	setbuf(stdout, NULL);
1045 
1046 	if (!parse_args(argc, argv))
1047 		exit(KSFT_SKIP);
1048 
1049 	if (test_args.test_virtual) {
1050 		test_vm_create(&vm, &vcpu, VIRTUAL);
1051 		test_run(vm, vcpu);
1052 		kvm_vm_free(vm);
1053 	}
1054 
1055 	if (test_args.test_physical) {
1056 		test_vm_create(&vm, &vcpu, PHYSICAL);
1057 		test_run(vm, vcpu);
1058 		kvm_vm_free(vm);
1059 	}
1060 
1061 	return 0;
1062 }
1063