xref: /linux/tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c (revision 7b609187684db646d4854ada6f7e19a6420b4621)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * arch_timer_edge_cases.c - Tests the aarch64 timer IRQ functionality.
4  *
5  * The test validates some edge cases related to the arch-timer:
6  * - timers above the max TVAL value.
7  * - timers in the past
8  * - moving counters ahead and behind pending timers.
9  * - reprograming timers.
10  * - timers fired multiple times.
11  * - masking/unmasking using the timer control mask.
12  *
13  * Copyright (c) 2021, Google LLC.
14  */
15 
16 #define _GNU_SOURCE
17 
18 #include <pthread.h>
19 #include <sys/sysinfo.h>
20 
21 #include "arch_timer.h"
22 #include "gic.h"
23 #include "vgic.h"
24 
25 /* Depends on counter width. */
26 static u64 CVAL_MAX;
27 /* tval is a signed 32-bit int. */
28 static const s32 TVAL_MAX = INT32_MAX;
29 static const s32 TVAL_MIN = INT32_MIN;
30 
31 /* After how much time we say there is no IRQ. */
32 static const u32 TIMEOUT_NO_IRQ_US = 50000;
33 
34 /* Counter value to use as the starting one for most tests. Set to CVAL_MAX/2 */
35 static u64 DEF_CNT;
36 
37 /* Number of runs. */
38 static const u32 NR_TEST_ITERS_DEF = 5;
39 
40 /* Default wait test time in ms. */
41 static const u32 WAIT_TEST_MS = 10;
42 
43 /* Default "long" wait test time in ms. */
44 static const u32 LONG_WAIT_TEST_MS = 100;
45 
46 /* Shared with IRQ handler. */
47 struct test_vcpu_shared_data {
48 	atomic_t handled;
49 	atomic_t spurious;
50 } shared_data;
51 
52 struct test_args {
53 	/* Virtual or physical timer and counter tests. */
54 	enum arch_timer timer;
55 	/* Delay used for most timer tests. */
56 	u64 wait_ms;
57 	/* Delay used in the test_long_timer_delays test. */
58 	u64 long_wait_ms;
59 	/* Number of iterations. */
60 	int iterations;
61 	/* Whether to test the physical timer. */
62 	bool test_physical;
63 	/* Whether to test the virtual timer. */
64 	bool test_virtual;
65 };
66 
67 struct test_args test_args = {
68 	.wait_ms = WAIT_TEST_MS,
69 	.long_wait_ms = LONG_WAIT_TEST_MS,
70 	.iterations = NR_TEST_ITERS_DEF,
71 	.test_physical = true,
72 	.test_virtual = true,
73 };
74 
75 static int vtimer_irq, ptimer_irq;
76 
77 enum sync_cmd {
78 	SET_COUNTER_VALUE,
79 	USERSPACE_USLEEP,
80 	USERSPACE_SCHED_YIELD,
81 	USERSPACE_MIGRATE_SELF,
82 	NO_USERSPACE_CMD,
83 };
84 
85 typedef void (*sleep_method_t)(enum arch_timer timer, u64 usec);
86 
87 static void sleep_poll(enum arch_timer timer, u64 usec);
88 static void sleep_sched_poll(enum arch_timer timer, u64 usec);
89 static void sleep_in_userspace(enum arch_timer timer, u64 usec);
90 static void sleep_migrate(enum arch_timer timer, u64 usec);
91 
92 sleep_method_t sleep_method[] = {
93 	sleep_poll,
94 	sleep_sched_poll,
95 	sleep_migrate,
96 	sleep_in_userspace,
97 };
98 
99 typedef void (*irq_wait_method_t)(void);
100 
101 static void wait_for_non_spurious_irq(void);
102 static void wait_poll_for_irq(void);
103 static void wait_sched_poll_for_irq(void);
104 static void wait_migrate_poll_for_irq(void);
105 
106 irq_wait_method_t irq_wait_method[] = {
107 	wait_for_non_spurious_irq,
108 	wait_poll_for_irq,
109 	wait_sched_poll_for_irq,
110 	wait_migrate_poll_for_irq,
111 };
112 
113 enum timer_view {
114 	TIMER_CVAL,
115 	TIMER_TVAL,
116 };
117 
118 static void assert_irqs_handled(u32 n)
119 {
120 	int h = atomic_read(&shared_data.handled);
121 
122 	__GUEST_ASSERT(h == n, "Handled %d IRQS but expected %d", h, n);
123 }
124 
125 static void userspace_cmd(u64 cmd)
126 {
127 	GUEST_SYNC_ARGS(cmd, 0, 0, 0, 0);
128 }
129 
130 static void userspace_migrate_vcpu(void)
131 {
132 	userspace_cmd(USERSPACE_MIGRATE_SELF);
133 }
134 
135 static void userspace_sleep(u64 usecs)
136 {
137 	GUEST_SYNC_ARGS(USERSPACE_USLEEP, usecs, 0, 0, 0);
138 }
139 
140 static void set_counter(enum arch_timer timer, u64 counter)
141 {
142 	GUEST_SYNC_ARGS(SET_COUNTER_VALUE, counter, timer, 0, 0);
143 }
144 
145 static void guest_irq_handler(struct ex_regs *regs)
146 {
147 	unsigned int intid = gic_get_and_ack_irq();
148 	enum arch_timer timer;
149 	u64 cnt, cval;
150 	u32 ctl;
151 	bool timer_condition, istatus;
152 
153 	if (intid == IAR_SPURIOUS) {
154 		atomic_inc(&shared_data.spurious);
155 		goto out;
156 	}
157 
158 	if (intid == ptimer_irq)
159 		timer = PHYSICAL;
160 	else if (intid == vtimer_irq)
161 		timer = VIRTUAL;
162 	else
163 		goto out;
164 
165 	ctl = timer_get_ctl(timer);
166 	cval = timer_get_cval(timer);
167 	cnt = timer_get_cntct(timer);
168 	timer_condition = cnt >= cval;
169 	istatus = (ctl & CTL_ISTATUS) && (ctl & CTL_ENABLE);
170 	GUEST_ASSERT_EQ(timer_condition, istatus);
171 
172 	/* Disable and mask the timer. */
173 	timer_set_ctl(timer, CTL_IMASK);
174 
175 	atomic_inc(&shared_data.handled);
176 
177 out:
178 	gic_set_eoi(intid);
179 }
180 
181 static void set_cval_irq(enum arch_timer timer, u64 cval_cycles,
182 			 u32 ctl)
183 {
184 	atomic_set(&shared_data.handled, 0);
185 	atomic_set(&shared_data.spurious, 0);
186 	timer_set_cval(timer, cval_cycles);
187 	timer_set_ctl(timer, ctl);
188 }
189 
190 static void set_tval_irq(enum arch_timer timer, u64 tval_cycles,
191 			 u32 ctl)
192 {
193 	atomic_set(&shared_data.handled, 0);
194 	atomic_set(&shared_data.spurious, 0);
195 	timer_set_tval(timer, tval_cycles);
196 	timer_set_ctl(timer, ctl);
197 }
198 
199 static void set_xval_irq(enum arch_timer timer, u64 xval, u32 ctl,
200 			 enum timer_view tv)
201 {
202 	switch (tv) {
203 	case TIMER_CVAL:
204 		set_cval_irq(timer, xval, ctl);
205 		break;
206 	case TIMER_TVAL:
207 		set_tval_irq(timer, xval, ctl);
208 		break;
209 	default:
210 		GUEST_FAIL("Could not get timer %d", timer);
211 	}
212 }
213 
214 /*
215  * Note that this can theoretically hang forever, so we rely on having
216  * a timeout mechanism in the "runner", like:
217  * tools/testing/selftests/kselftest/runner.sh.
218  */
219 static void wait_for_non_spurious_irq(void)
220 {
221 	int h;
222 
223 	local_irq_disable();
224 
225 	for (h = atomic_read(&shared_data.handled); h == atomic_read(&shared_data.handled);) {
226 		wfi();
227 		local_irq_enable();
228 		isb(); /* handle IRQ */
229 		local_irq_disable();
230 	}
231 }
232 
233 /*
234  * Wait for an non-spurious IRQ by polling in the guest or in
235  * userspace (e.g. userspace_cmd=USERSPACE_SCHED_YIELD).
236  *
237  * Note that this can theoretically hang forever, so we rely on having
238  * a timeout mechanism in the "runner", like:
239  * tools/testing/selftests/kselftest/runner.sh.
240  */
241 static void poll_for_non_spurious_irq(enum sync_cmd usp_cmd)
242 {
243 	int h;
244 
245 	local_irq_disable();
246 
247 	h = atomic_read(&shared_data.handled);
248 
249 	local_irq_enable();
250 	while (h == atomic_read(&shared_data.handled)) {
251 		if (usp_cmd == NO_USERSPACE_CMD)
252 			cpu_relax();
253 		else
254 			userspace_cmd(usp_cmd);
255 	}
256 	local_irq_disable();
257 }
258 
259 static void wait_poll_for_irq(void)
260 {
261 	poll_for_non_spurious_irq(NO_USERSPACE_CMD);
262 }
263 
264 static void wait_sched_poll_for_irq(void)
265 {
266 	poll_for_non_spurious_irq(USERSPACE_SCHED_YIELD);
267 }
268 
269 static void wait_migrate_poll_for_irq(void)
270 {
271 	poll_for_non_spurious_irq(USERSPACE_MIGRATE_SELF);
272 }
273 
274 /*
275  * Sleep for usec microseconds by polling in the guest or in
276  * userspace (e.g. userspace_cmd=USERSPACE_SCHEDULE).
277  */
278 static void guest_poll(enum arch_timer test_timer, u64 usec,
279 		       enum sync_cmd usp_cmd)
280 {
281 	u64 cycles = usec_to_cycles(usec);
282 	/* Whichever timer we are testing with, sleep with the other. */
283 	enum arch_timer sleep_timer = 1 - test_timer;
284 	u64 start = timer_get_cntct(sleep_timer);
285 
286 	while ((timer_get_cntct(sleep_timer) - start) < cycles) {
287 		if (usp_cmd == NO_USERSPACE_CMD)
288 			cpu_relax();
289 		else
290 			userspace_cmd(usp_cmd);
291 	}
292 }
293 
294 static void sleep_poll(enum arch_timer timer, u64 usec)
295 {
296 	guest_poll(timer, usec, NO_USERSPACE_CMD);
297 }
298 
299 static void sleep_sched_poll(enum arch_timer timer, u64 usec)
300 {
301 	guest_poll(timer, usec, USERSPACE_SCHED_YIELD);
302 }
303 
304 static void sleep_migrate(enum arch_timer timer, u64 usec)
305 {
306 	guest_poll(timer, usec, USERSPACE_MIGRATE_SELF);
307 }
308 
309 static void sleep_in_userspace(enum arch_timer timer, u64 usec)
310 {
311 	userspace_sleep(usec);
312 }
313 
314 /*
315  * Reset the timer state to some nice values like the counter not being close
316  * to the edge, and the control register masked and disabled.
317  */
318 static void reset_timer_state(enum arch_timer timer, u64 cnt)
319 {
320 	set_counter(timer, cnt);
321 	timer_set_ctl(timer, CTL_IMASK);
322 }
323 
324 static void test_timer_xval(enum arch_timer timer, u64 xval,
325 			    enum timer_view tv, irq_wait_method_t wm, bool reset_state,
326 			    u64 reset_cnt)
327 {
328 	local_irq_disable();
329 
330 	if (reset_state)
331 		reset_timer_state(timer, reset_cnt);
332 
333 	set_xval_irq(timer, xval, CTL_ENABLE, tv);
334 
335 	/* This method re-enables IRQs to handle the one we're looking for. */
336 	wm();
337 
338 	assert_irqs_handled(1);
339 	local_irq_enable();
340 }
341 
342 /*
343  * The test_timer_* functions will program the timer, wait for it, and assert
344  * the firing of the correct IRQ.
345  *
346  * These functions don't have a timeout and return as soon as they receive an
347  * IRQ. They can hang (forever), so we rely on having a timeout mechanism in
348  * the "runner", like: tools/testing/selftests/kselftest/runner.sh.
349  */
350 
351 static void test_timer_cval(enum arch_timer timer, u64 cval,
352 			    irq_wait_method_t wm, bool reset_state,
353 			    u64 reset_cnt)
354 {
355 	test_timer_xval(timer, cval, TIMER_CVAL, wm, reset_state, reset_cnt);
356 }
357 
358 static void test_timer_tval(enum arch_timer timer, s32 tval,
359 			    irq_wait_method_t wm, bool reset_state,
360 			    u64 reset_cnt)
361 {
362 	test_timer_xval(timer, (u64)tval, TIMER_TVAL, wm, reset_state,
363 			reset_cnt);
364 }
365 
366 static void test_xval_check_no_irq(enum arch_timer timer, u64 xval,
367 				   u64 usec, enum timer_view timer_view,
368 				   sleep_method_t guest_sleep)
369 {
370 	local_irq_disable();
371 
372 	set_xval_irq(timer, xval, CTL_ENABLE | CTL_IMASK, timer_view);
373 	guest_sleep(timer, usec);
374 
375 	local_irq_enable();
376 	isb();
377 
378 	/* Assume success (no IRQ) after waiting usec microseconds */
379 	assert_irqs_handled(0);
380 }
381 
382 static void test_cval_no_irq(enum arch_timer timer, u64 cval,
383 			     u64 usec, sleep_method_t wm)
384 {
385 	test_xval_check_no_irq(timer, cval, usec, TIMER_CVAL, wm);
386 }
387 
388 static void test_tval_no_irq(enum arch_timer timer, s32 tval, u64 usec,
389 			     sleep_method_t wm)
390 {
391 	/* tval will be cast to an s32 in test_xval_check_no_irq */
392 	test_xval_check_no_irq(timer, (u64)tval, usec, TIMER_TVAL, wm);
393 }
394 
395 /* Test masking/unmasking a timer using the timer mask (not the IRQ mask). */
396 static void test_timer_control_mask_then_unmask(enum arch_timer timer)
397 {
398 	reset_timer_state(timer, DEF_CNT);
399 	set_tval_irq(timer, -1, CTL_ENABLE | CTL_IMASK);
400 
401 	/* Unmask the timer, and then get an IRQ. */
402 	local_irq_disable();
403 	timer_set_ctl(timer, CTL_ENABLE);
404 	/* This method re-enables IRQs to handle the one we're looking for. */
405 	wait_for_non_spurious_irq();
406 
407 	assert_irqs_handled(1);
408 	local_irq_enable();
409 }
410 
411 /* Check that timer control masks actually mask a timer being fired. */
412 static void test_timer_control_masks(enum arch_timer timer)
413 {
414 	reset_timer_state(timer, DEF_CNT);
415 
416 	/* Local IRQs are not masked at this point. */
417 
418 	set_tval_irq(timer, -1, CTL_ENABLE | CTL_IMASK);
419 
420 	/* Assume no IRQ after waiting TIMEOUT_NO_IRQ_US microseconds */
421 	sleep_poll(timer, TIMEOUT_NO_IRQ_US);
422 
423 	assert_irqs_handled(0);
424 	timer_set_ctl(timer, CTL_IMASK);
425 }
426 
427 static void test_fire_a_timer_multiple_times(enum arch_timer timer,
428 					     irq_wait_method_t wm, int num)
429 {
430 	int i;
431 
432 	local_irq_disable();
433 	reset_timer_state(timer, DEF_CNT);
434 
435 	set_tval_irq(timer, 0, CTL_ENABLE);
436 
437 	for (i = 1; i <= num; i++) {
438 		/* This method re-enables IRQs to handle the one we're looking for. */
439 		wm();
440 
441 		/* The IRQ handler masked and disabled the timer.
442 		 * Enable and unmmask it again.
443 		 */
444 		timer_set_ctl(timer, CTL_ENABLE);
445 
446 		assert_irqs_handled(i);
447 	}
448 
449 	local_irq_enable();
450 }
451 
452 static void test_timers_fired_multiple_times(enum arch_timer timer)
453 {
454 	int i;
455 
456 	for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++)
457 		test_fire_a_timer_multiple_times(timer, irq_wait_method[i], 10);
458 }
459 
460 /*
461  * Set a timer for tval=delta_1_ms then reprogram it to
462  * tval=delta_2_ms. Check that we get the timer fired. There is no
463  * timeout for the wait: we use the wfi instruction.
464  */
465 static void test_reprogramming_timer(enum arch_timer timer, irq_wait_method_t wm,
466 				     s32 delta_1_ms, s32 delta_2_ms)
467 {
468 	local_irq_disable();
469 	reset_timer_state(timer, DEF_CNT);
470 
471 	/* Program the timer to DEF_CNT + delta_1_ms. */
472 	set_tval_irq(timer, msec_to_cycles(delta_1_ms), CTL_ENABLE);
473 
474 	/* Reprogram the timer to DEF_CNT + delta_2_ms. */
475 	timer_set_tval(timer, msec_to_cycles(delta_2_ms));
476 
477 	/* This method re-enables IRQs to handle the one we're looking for. */
478 	wm();
479 
480 	/* The IRQ should arrive at DEF_CNT + delta_2_ms (or after). */
481 	GUEST_ASSERT(timer_get_cntct(timer) >=
482 		     DEF_CNT + msec_to_cycles(delta_2_ms));
483 
484 	local_irq_enable();
485 	assert_irqs_handled(1);
486 };
487 
488 static void test_reprogram_timers(enum arch_timer timer)
489 {
490 	int i;
491 	u64 base_wait = test_args.wait_ms;
492 
493 	for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
494 		/*
495 		 * Ensure reprogramming works whether going from a
496 		 * longer time to a shorter or vice versa.
497 		 */
498 		test_reprogramming_timer(timer, irq_wait_method[i], 2 * base_wait,
499 					 base_wait);
500 		test_reprogramming_timer(timer, irq_wait_method[i], base_wait,
501 					 2 * base_wait);
502 	}
503 }
504 
505 static void test_basic_functionality(enum arch_timer timer)
506 {
507 	s32 tval = (s32)msec_to_cycles(test_args.wait_ms);
508 	u64 cval = DEF_CNT + msec_to_cycles(test_args.wait_ms);
509 	int i;
510 
511 	for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
512 		irq_wait_method_t wm = irq_wait_method[i];
513 
514 		test_timer_cval(timer, cval, wm, true, DEF_CNT);
515 		test_timer_tval(timer, tval, wm, true, DEF_CNT);
516 	}
517 }
518 
519 /*
520  * This test checks basic timer behavior without actually firing timers, things
521  * like: the relationship between cval and tval, tval down-counting.
522  */
523 static void timers_sanity_checks(enum arch_timer timer, bool use_sched)
524 {
525 	reset_timer_state(timer, DEF_CNT);
526 
527 	local_irq_disable();
528 
529 	/* cval in the past */
530 	timer_set_cval(timer,
531 		       timer_get_cntct(timer) -
532 		       msec_to_cycles(test_args.wait_ms));
533 	if (use_sched)
534 		userspace_migrate_vcpu();
535 	GUEST_ASSERT(timer_get_tval(timer) < 0);
536 
537 	/* tval in the past */
538 	timer_set_tval(timer, -1);
539 	if (use_sched)
540 		userspace_migrate_vcpu();
541 	GUEST_ASSERT(timer_get_cval(timer) < timer_get_cntct(timer));
542 
543 	/* tval larger than TVAL_MAX. This requires programming with
544 	 * timer_set_cval instead so the value is expressible
545 	 */
546 	timer_set_cval(timer,
547 		       timer_get_cntct(timer) + TVAL_MAX +
548 		       msec_to_cycles(test_args.wait_ms));
549 	if (use_sched)
550 		userspace_migrate_vcpu();
551 	GUEST_ASSERT(timer_get_tval(timer) <= 0);
552 
553 	/*
554 	 * tval larger than 2 * TVAL_MAX.
555 	 * Twice the TVAL_MAX completely loops around the TVAL.
556 	 */
557 	timer_set_cval(timer,
558 		       timer_get_cntct(timer) + 2ULL * TVAL_MAX +
559 		       msec_to_cycles(test_args.wait_ms));
560 	if (use_sched)
561 		userspace_migrate_vcpu();
562 	GUEST_ASSERT(timer_get_tval(timer) <=
563 		       msec_to_cycles(test_args.wait_ms));
564 
565 	/* negative tval that rollovers from 0. */
566 	set_counter(timer, msec_to_cycles(1));
567 	timer_set_tval(timer, -1 * msec_to_cycles(test_args.wait_ms));
568 	if (use_sched)
569 		userspace_migrate_vcpu();
570 	GUEST_ASSERT(timer_get_cval(timer) >= (CVAL_MAX - msec_to_cycles(test_args.wait_ms)));
571 
572 	/* tval should keep down-counting from 0 to -1. */
573 	timer_set_tval(timer, 0);
574 	sleep_poll(timer, 1);
575 	GUEST_ASSERT(timer_get_tval(timer) < 0);
576 
577 	local_irq_enable();
578 
579 	/* Mask and disable any pending timer. */
580 	timer_set_ctl(timer, CTL_IMASK);
581 }
582 
583 static void test_timers_sanity_checks(enum arch_timer timer)
584 {
585 	timers_sanity_checks(timer, false);
586 	/* Check how KVM saves/restores these edge-case values. */
587 	timers_sanity_checks(timer, true);
588 }
589 
590 static void test_set_cnt_after_tval_max(enum arch_timer timer, irq_wait_method_t wm)
591 {
592 	local_irq_disable();
593 	reset_timer_state(timer, DEF_CNT);
594 
595 	set_cval_irq(timer,
596 		     (u64)TVAL_MAX +
597 		     msec_to_cycles(test_args.wait_ms) / 2, CTL_ENABLE);
598 
599 	set_counter(timer, TVAL_MAX);
600 
601 	/* This method re-enables IRQs to handle the one we're looking for. */
602 	wm();
603 
604 	assert_irqs_handled(1);
605 	local_irq_enable();
606 }
607 
608 /* Test timers set for: cval = now + TVAL_MAX + wait_ms / 2 */
609 static void test_timers_above_tval_max(enum arch_timer timer)
610 {
611 	u64 cval;
612 	int i;
613 
614 	/*
615 	 * Test that the system is not implementing cval in terms of
616 	 * tval.  If that was the case, setting a cval to "cval = now
617 	 * + TVAL_MAX + wait_ms" would wrap to "cval = now +
618 	 * wait_ms", and the timer would fire immediately. Test that it
619 	 * doesn't.
620 	 */
621 	for (i = 0; i < ARRAY_SIZE(sleep_method); i++) {
622 		reset_timer_state(timer, DEF_CNT);
623 		cval = timer_get_cntct(timer) + TVAL_MAX +
624 			msec_to_cycles(test_args.wait_ms);
625 		test_cval_no_irq(timer, cval,
626 				 msecs_to_usecs(test_args.wait_ms) +
627 				 TIMEOUT_NO_IRQ_US, sleep_method[i]);
628 	}
629 
630 	for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
631 		/* Get the IRQ by moving the counter forward. */
632 		test_set_cnt_after_tval_max(timer, irq_wait_method[i]);
633 	}
634 }
635 
636 /*
637  * Template function to be used by the test_move_counter_ahead_* tests.  It
638  * sets the counter to cnt_1, the [c|t]val, the counter to cnt_2, and
639  * then waits for an IRQ.
640  */
641 static void test_set_cnt_after_xval(enum arch_timer timer, u64 cnt_1,
642 				    u64 xval, u64 cnt_2,
643 				    irq_wait_method_t wm, enum timer_view tv)
644 {
645 	local_irq_disable();
646 
647 	set_counter(timer, cnt_1);
648 	timer_set_ctl(timer, CTL_IMASK);
649 
650 	set_xval_irq(timer, xval, CTL_ENABLE, tv);
651 	set_counter(timer, cnt_2);
652 	/* This method re-enables IRQs to handle the one we're looking for. */
653 	wm();
654 
655 	assert_irqs_handled(1);
656 	local_irq_enable();
657 }
658 
659 /*
660  * Template function to be used by the test_move_counter_ahead_* tests.  It
661  * sets the counter to cnt_1, the [c|t]val, the counter to cnt_2, and
662  * then waits for an IRQ.
663  */
664 static void test_set_cnt_after_xval_no_irq(enum arch_timer timer,
665 					   u64 cnt_1, u64 xval,
666 					   u64 cnt_2,
667 					   sleep_method_t guest_sleep,
668 					   enum timer_view tv)
669 {
670 	local_irq_disable();
671 
672 	set_counter(timer, cnt_1);
673 	timer_set_ctl(timer, CTL_IMASK);
674 
675 	set_xval_irq(timer, xval, CTL_ENABLE, tv);
676 	set_counter(timer, cnt_2);
677 	guest_sleep(timer, TIMEOUT_NO_IRQ_US);
678 
679 	local_irq_enable();
680 	isb();
681 
682 	/* Assume no IRQ after waiting TIMEOUT_NO_IRQ_US microseconds */
683 	assert_irqs_handled(0);
684 	timer_set_ctl(timer, CTL_IMASK);
685 }
686 
687 static void test_set_cnt_after_tval(enum arch_timer timer, u64 cnt_1,
688 				    s32 tval, u64 cnt_2,
689 				    irq_wait_method_t wm)
690 {
691 	test_set_cnt_after_xval(timer, cnt_1, tval, cnt_2, wm, TIMER_TVAL);
692 }
693 
694 static void test_set_cnt_after_cval(enum arch_timer timer, u64 cnt_1,
695 				    u64 cval, u64 cnt_2,
696 				    irq_wait_method_t wm)
697 {
698 	test_set_cnt_after_xval(timer, cnt_1, cval, cnt_2, wm, TIMER_CVAL);
699 }
700 
701 static void test_set_cnt_after_tval_no_irq(enum arch_timer timer,
702 					   u64 cnt_1, s32 tval,
703 					   u64 cnt_2, sleep_method_t wm)
704 {
705 	test_set_cnt_after_xval_no_irq(timer, cnt_1, tval, cnt_2, wm,
706 				       TIMER_TVAL);
707 }
708 
709 static void test_set_cnt_after_cval_no_irq(enum arch_timer timer,
710 					   u64 cnt_1, u64 cval,
711 					   u64 cnt_2, sleep_method_t wm)
712 {
713 	test_set_cnt_after_xval_no_irq(timer, cnt_1, cval, cnt_2, wm,
714 				       TIMER_CVAL);
715 }
716 
717 /* Set a timer and then move the counter ahead of it. */
718 static void test_move_counters_ahead_of_timers(enum arch_timer timer)
719 {
720 	int i;
721 	s32 tval;
722 
723 	for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
724 		irq_wait_method_t wm = irq_wait_method[i];
725 
726 		test_set_cnt_after_cval(timer, 0, DEF_CNT, DEF_CNT + 1, wm);
727 		test_set_cnt_after_cval(timer, CVAL_MAX, 1, 2, wm);
728 
729 		/* Move counter ahead of negative tval. */
730 		test_set_cnt_after_tval(timer, 0, -1, DEF_CNT + 1, wm);
731 		test_set_cnt_after_tval(timer, 0, -1, TVAL_MAX, wm);
732 		tval = TVAL_MAX;
733 		test_set_cnt_after_tval(timer, 0, tval, (u64)tval + 1, wm);
734 	}
735 }
736 
737 /*
738  * Program a timer, mask it, and then change the tval or counter to cancel it.
739  * Unmask it and check that nothing fires.
740  */
741 static void test_move_counters_behind_timers(enum arch_timer timer)
742 {
743 	int i;
744 
745 	for (i = 0; i < ARRAY_SIZE(sleep_method); i++) {
746 		sleep_method_t sm = sleep_method[i];
747 
748 		test_set_cnt_after_cval_no_irq(timer, DEF_CNT, DEF_CNT - 1, 0,
749 					       sm);
750 		test_set_cnt_after_tval_no_irq(timer, DEF_CNT, -1, 0, sm);
751 	}
752 }
753 
754 static void test_timers_in_the_past(enum arch_timer timer)
755 {
756 	s32 tval = -1 * (s32)msec_to_cycles(test_args.wait_ms);
757 	u64 cval;
758 	int i;
759 
760 	for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
761 		irq_wait_method_t wm = irq_wait_method[i];
762 
763 		/* set a timer wait_ms the past. */
764 		cval = DEF_CNT - msec_to_cycles(test_args.wait_ms);
765 		test_timer_cval(timer, cval, wm, true, DEF_CNT);
766 		test_timer_tval(timer, tval, wm, true, DEF_CNT);
767 
768 		/* Set a timer to counter=0 (in the past) */
769 		test_timer_cval(timer, 0, wm, true, DEF_CNT);
770 
771 		/* Set a time for tval=0 (now) */
772 		test_timer_tval(timer, 0, wm, true, DEF_CNT);
773 
774 		/* Set a timer to as far in the past as possible */
775 		test_timer_tval(timer, TVAL_MIN, wm, true, DEF_CNT);
776 	}
777 
778 	/*
779 	 * Set the counter to wait_ms, and a tval to -wait_ms. There should be no
780 	 * IRQ as that tval means cval=CVAL_MAX-wait_ms.
781 	 */
782 	for (i = 0; i < ARRAY_SIZE(sleep_method); i++) {
783 		sleep_method_t sm = sleep_method[i];
784 
785 		set_counter(timer, msec_to_cycles(test_args.wait_ms));
786 		test_tval_no_irq(timer, tval, TIMEOUT_NO_IRQ_US, sm);
787 	}
788 }
789 
790 static void test_long_timer_delays(enum arch_timer timer)
791 {
792 	s32 tval = (s32)msec_to_cycles(test_args.long_wait_ms);
793 	u64 cval = DEF_CNT + msec_to_cycles(test_args.long_wait_ms);
794 	int i;
795 
796 	for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
797 		irq_wait_method_t wm = irq_wait_method[i];
798 
799 		test_timer_cval(timer, cval, wm, true, DEF_CNT);
800 		test_timer_tval(timer, tval, wm, true, DEF_CNT);
801 	}
802 }
803 
804 static void guest_run_iteration(enum arch_timer timer)
805 {
806 	test_basic_functionality(timer);
807 	test_timers_sanity_checks(timer);
808 
809 	test_timers_above_tval_max(timer);
810 	test_timers_in_the_past(timer);
811 
812 	test_move_counters_ahead_of_timers(timer);
813 	test_move_counters_behind_timers(timer);
814 	test_reprogram_timers(timer);
815 
816 	test_timers_fired_multiple_times(timer);
817 
818 	test_timer_control_mask_then_unmask(timer);
819 	test_timer_control_masks(timer);
820 }
821 
822 static void guest_code(enum arch_timer timer)
823 {
824 	int i;
825 
826 	local_irq_disable();
827 
828 	gic_init(GIC_V3, 1);
829 
830 	timer_set_ctl(VIRTUAL, CTL_IMASK);
831 	timer_set_ctl(PHYSICAL, CTL_IMASK);
832 
833 	gic_irq_enable(vtimer_irq);
834 	gic_irq_enable(ptimer_irq);
835 	local_irq_enable();
836 
837 	for (i = 0; i < test_args.iterations; i++) {
838 		GUEST_SYNC(i);
839 		guest_run_iteration(timer);
840 	}
841 
842 	test_long_timer_delays(timer);
843 	GUEST_DONE();
844 }
845 
846 static cpu_set_t default_cpuset;
847 
848 static u32 next_pcpu(void)
849 {
850 	u32 max = get_nprocs();
851 	u32 cur = sched_getcpu();
852 	u32 next = cur;
853 	cpu_set_t cpuset = default_cpuset;
854 
855 	TEST_ASSERT(max > 1, "Need at least two physical cpus");
856 
857 	do {
858 		next = (next + 1) % CPU_SETSIZE;
859 	} while (!CPU_ISSET(next, &cpuset));
860 
861 	return next;
862 }
863 
864 static void kvm_set_cntxct(struct kvm_vcpu *vcpu, u64 cnt,
865 			   enum arch_timer timer)
866 {
867 	if (timer == PHYSICAL)
868 		vcpu_set_reg(vcpu, KVM_REG_ARM_PTIMER_CNT, cnt);
869 	else
870 		vcpu_set_reg(vcpu, KVM_REG_ARM_TIMER_CNT, cnt);
871 }
872 
873 static void handle_sync(struct kvm_vcpu *vcpu, struct ucall *uc)
874 {
875 	enum sync_cmd cmd = uc->args[1];
876 	u64 val = uc->args[2];
877 	enum arch_timer timer = uc->args[3];
878 
879 	switch (cmd) {
880 	case SET_COUNTER_VALUE:
881 		kvm_set_cntxct(vcpu, val, timer);
882 		break;
883 	case USERSPACE_USLEEP:
884 		usleep(val);
885 		break;
886 	case USERSPACE_SCHED_YIELD:
887 		sched_yield();
888 		break;
889 	case USERSPACE_MIGRATE_SELF:
890 		pin_self_to_cpu(next_pcpu());
891 		break;
892 	default:
893 		break;
894 	}
895 }
896 
897 static void test_run(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
898 {
899 	struct ucall uc;
900 
901 	/* Start on CPU 0 */
902 	pin_self_to_cpu(0);
903 
904 	while (true) {
905 		vcpu_run(vcpu);
906 		switch (get_ucall(vcpu, &uc)) {
907 		case UCALL_SYNC:
908 			handle_sync(vcpu, &uc);
909 			break;
910 		case UCALL_DONE:
911 			goto out;
912 		case UCALL_ABORT:
913 			REPORT_GUEST_ASSERT(uc);
914 			goto out;
915 		default:
916 			TEST_FAIL("Unexpected guest exit\n");
917 		}
918 	}
919 
920  out:
921 	return;
922 }
923 
924 static void test_init_timer_irq(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
925 {
926 	ptimer_irq = vcpu_get_ptimer_irq(vcpu);
927 	vtimer_irq = vcpu_get_vtimer_irq(vcpu);
928 
929 	sync_global_to_guest(vm, ptimer_irq);
930 	sync_global_to_guest(vm, vtimer_irq);
931 
932 	pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq);
933 }
934 
935 static void test_vm_create(struct kvm_vm **vm, struct kvm_vcpu **vcpu,
936 			   enum arch_timer timer)
937 {
938 	*vm = vm_create_with_one_vcpu(vcpu, guest_code);
939 	TEST_ASSERT(*vm, "Failed to create the test VM\n");
940 
941 	vm_init_descriptor_tables(*vm);
942 	vm_install_exception_handler(*vm, VECTOR_IRQ_CURRENT,
943 				     guest_irq_handler);
944 
945 	vcpu_init_descriptor_tables(*vcpu);
946 	vcpu_args_set(*vcpu, 1, timer);
947 
948 	test_init_timer_irq(*vm, *vcpu);
949 
950 	sync_global_to_guest(*vm, test_args);
951 	sync_global_to_guest(*vm, CVAL_MAX);
952 	sync_global_to_guest(*vm, DEF_CNT);
953 }
954 
955 static void test_vm_cleanup(struct kvm_vm *vm)
956 {
957 	kvm_vm_free(vm);
958 }
959 
960 static void test_print_help(char *name)
961 {
962 	pr_info("Usage: %s [-h] [-b] [-i iterations] [-l long_wait_ms] [-p] [-v]\n"
963 		, name);
964 	pr_info("\t-i: Number of iterations (default: %u)\n",
965 		NR_TEST_ITERS_DEF);
966 	pr_info("\t-b: Test both physical and virtual timers (default: true)\n");
967 	pr_info("\t-l: Delta (in ms) used for long wait time test (default: %u)\n",
968 	     LONG_WAIT_TEST_MS);
969 	pr_info("\t-w: Delta (in ms) used for wait times (default: %u)\n",
970 		WAIT_TEST_MS);
971 	pr_info("\t-p: Test physical timer (default: true)\n");
972 	pr_info("\t-v: Test virtual timer (default: true)\n");
973 	pr_info("\t-h: Print this help message\n");
974 }
975 
976 static bool parse_args(int argc, char *argv[])
977 {
978 	int opt;
979 
980 	while ((opt = getopt(argc, argv, "bhi:l:pvw:")) != -1) {
981 		switch (opt) {
982 		case 'b':
983 			test_args.test_physical = true;
984 			test_args.test_virtual = true;
985 			break;
986 		case 'i':
987 			test_args.iterations =
988 			    atoi_positive("Number of iterations", optarg);
989 			break;
990 		case 'l':
991 			test_args.long_wait_ms =
992 			    atoi_positive("Long wait time", optarg);
993 			break;
994 		case 'p':
995 			test_args.test_physical = true;
996 			test_args.test_virtual = false;
997 			break;
998 		case 'v':
999 			test_args.test_virtual = true;
1000 			test_args.test_physical = false;
1001 			break;
1002 		case 'w':
1003 			test_args.wait_ms = atoi_positive("Wait time", optarg);
1004 			break;
1005 		case 'h':
1006 		default:
1007 			goto err;
1008 		}
1009 	}
1010 
1011 	return true;
1012 
1013  err:
1014 	test_print_help(argv[0]);
1015 	return false;
1016 }
1017 
1018 static void set_counter_defaults(void)
1019 {
1020 	const u64 MIN_ROLLOVER_SECS = 40ULL * 365 * 24 * 3600;
1021 	u64 freq = read_sysreg(CNTFRQ_EL0);
1022 	int width = ilog2(MIN_ROLLOVER_SECS * freq);
1023 
1024 	width = clamp(width, 56, 64);
1025 	CVAL_MAX = GENMASK_ULL(width - 1, 0);
1026 	DEF_CNT = CVAL_MAX / 2;
1027 }
1028 
1029 int main(int argc, char *argv[])
1030 {
1031 	struct kvm_vcpu *vcpu;
1032 	struct kvm_vm *vm;
1033 
1034 	/* Tell stdout not to buffer its content */
1035 	setbuf(stdout, NULL);
1036 
1037 	TEST_REQUIRE(kvm_supports_vgic_v3());
1038 
1039 	if (!parse_args(argc, argv))
1040 		exit(KSFT_SKIP);
1041 
1042 	sched_getaffinity(0, sizeof(default_cpuset), &default_cpuset);
1043 	set_counter_defaults();
1044 
1045 	if (test_args.test_virtual) {
1046 		test_vm_create(&vm, &vcpu, VIRTUAL);
1047 		test_run(vm, vcpu);
1048 		test_vm_cleanup(vm);
1049 	}
1050 
1051 	if (test_args.test_physical) {
1052 		test_vm_create(&vm, &vcpu, PHYSICAL);
1053 		test_run(vm, vcpu);
1054 		test_vm_cleanup(vm);
1055 	}
1056 
1057 	return 0;
1058 }
1059