xref: /linux/tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c (revision e669e322c52c49c161e46492963e64319fbb53a8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * arch_timer_edge_cases.c - Tests the aarch64 timer IRQ functionality.
4  *
5  * The test validates some edge cases related to the arch-timer:
6  * - timers above the max TVAL value.
7  * - timers in the past
8  * - moving counters ahead and behind pending timers.
9  * - reprograming timers.
10  * - timers fired multiple times.
11  * - masking/unmasking using the timer control mask.
12  *
13  * Copyright (c) 2021, Google LLC.
14  */
15 
16 #define _GNU_SOURCE
17 
18 #include <pthread.h>
19 #include <sys/sysinfo.h>
20 
21 #include "arch_timer.h"
22 #include "gic.h"
23 #include "vgic.h"
24 
25 /* Depends on counter width. */
26 static uint64_t CVAL_MAX;
27 /* tval is a signed 32-bit int. */
28 static const int32_t TVAL_MAX = INT32_MAX;
29 static const int32_t TVAL_MIN = INT32_MIN;
30 
31 /* After how much time we say there is no IRQ. */
32 static const uint32_t TIMEOUT_NO_IRQ_US = 50000;
33 
34 /* Counter value to use as the starting one for most tests. Set to CVAL_MAX/2 */
35 static uint64_t DEF_CNT;
36 
37 /* Number of runs. */
38 static const uint32_t NR_TEST_ITERS_DEF = 5;
39 
40 /* Default wait test time in ms. */
41 static const uint32_t WAIT_TEST_MS = 10;
42 
43 /* Default "long" wait test time in ms. */
44 static const uint32_t LONG_WAIT_TEST_MS = 100;
45 
46 /* Shared with IRQ handler. */
47 struct test_vcpu_shared_data {
48 	atomic_t handled;
49 	atomic_t spurious;
50 } shared_data;
51 
52 struct test_args {
53 	/* Virtual or physical timer and counter tests. */
54 	enum arch_timer timer;
55 	/* Delay used for most timer tests. */
56 	uint64_t wait_ms;
57 	/* Delay used in the test_long_timer_delays test. */
58 	uint64_t long_wait_ms;
59 	/* Number of iterations. */
60 	int iterations;
61 	/* Whether to test the physical timer. */
62 	bool test_physical;
63 	/* Whether to test the virtual timer. */
64 	bool test_virtual;
65 };
66 
67 struct test_args test_args = {
68 	.wait_ms = WAIT_TEST_MS,
69 	.long_wait_ms = LONG_WAIT_TEST_MS,
70 	.iterations = NR_TEST_ITERS_DEF,
71 	.test_physical = true,
72 	.test_virtual = true,
73 };
74 
75 static int vtimer_irq, ptimer_irq;
76 
77 enum sync_cmd {
78 	SET_COUNTER_VALUE,
79 	USERSPACE_USLEEP,
80 	USERSPACE_SCHED_YIELD,
81 	USERSPACE_MIGRATE_SELF,
82 	NO_USERSPACE_CMD,
83 };
84 
85 typedef void (*sleep_method_t)(enum arch_timer timer, uint64_t usec);
86 
87 static void sleep_poll(enum arch_timer timer, uint64_t usec);
88 static void sleep_sched_poll(enum arch_timer timer, uint64_t usec);
89 static void sleep_in_userspace(enum arch_timer timer, uint64_t usec);
90 static void sleep_migrate(enum arch_timer timer, uint64_t usec);
91 
92 sleep_method_t sleep_method[] = {
93 	sleep_poll,
94 	sleep_sched_poll,
95 	sleep_migrate,
96 	sleep_in_userspace,
97 };
98 
99 typedef void (*irq_wait_method_t)(void);
100 
101 static void wait_for_non_spurious_irq(void);
102 static void wait_poll_for_irq(void);
103 static void wait_sched_poll_for_irq(void);
104 static void wait_migrate_poll_for_irq(void);
105 
106 irq_wait_method_t irq_wait_method[] = {
107 	wait_for_non_spurious_irq,
108 	wait_poll_for_irq,
109 	wait_sched_poll_for_irq,
110 	wait_migrate_poll_for_irq,
111 };
112 
113 enum timer_view {
114 	TIMER_CVAL,
115 	TIMER_TVAL,
116 };
117 
assert_irqs_handled(uint32_t n)118 static void assert_irqs_handled(uint32_t n)
119 {
120 	int h = atomic_read(&shared_data.handled);
121 
122 	__GUEST_ASSERT(h == n, "Handled %d IRQS but expected %d", h, n);
123 }
124 
userspace_cmd(uint64_t cmd)125 static void userspace_cmd(uint64_t cmd)
126 {
127 	GUEST_SYNC_ARGS(cmd, 0, 0, 0, 0);
128 }
129 
userspace_migrate_vcpu(void)130 static void userspace_migrate_vcpu(void)
131 {
132 	userspace_cmd(USERSPACE_MIGRATE_SELF);
133 }
134 
userspace_sleep(uint64_t usecs)135 static void userspace_sleep(uint64_t usecs)
136 {
137 	GUEST_SYNC_ARGS(USERSPACE_USLEEP, usecs, 0, 0, 0);
138 }
139 
set_counter(enum arch_timer timer,uint64_t counter)140 static void set_counter(enum arch_timer timer, uint64_t counter)
141 {
142 	GUEST_SYNC_ARGS(SET_COUNTER_VALUE, counter, timer, 0, 0);
143 }
144 
guest_irq_handler(struct ex_regs * regs)145 static void guest_irq_handler(struct ex_regs *regs)
146 {
147 	unsigned int intid = gic_get_and_ack_irq();
148 	enum arch_timer timer;
149 	uint64_t cnt, cval;
150 	uint32_t ctl;
151 	bool timer_condition, istatus;
152 
153 	if (intid == IAR_SPURIOUS) {
154 		atomic_inc(&shared_data.spurious);
155 		goto out;
156 	}
157 
158 	if (intid == ptimer_irq)
159 		timer = PHYSICAL;
160 	else if (intid == vtimer_irq)
161 		timer = VIRTUAL;
162 	else
163 		goto out;
164 
165 	ctl = timer_get_ctl(timer);
166 	cval = timer_get_cval(timer);
167 	cnt = timer_get_cntct(timer);
168 	timer_condition = cnt >= cval;
169 	istatus = (ctl & CTL_ISTATUS) && (ctl & CTL_ENABLE);
170 	GUEST_ASSERT_EQ(timer_condition, istatus);
171 
172 	/* Disable and mask the timer. */
173 	timer_set_ctl(timer, CTL_IMASK);
174 
175 	atomic_inc(&shared_data.handled);
176 
177 out:
178 	gic_set_eoi(intid);
179 }
180 
set_cval_irq(enum arch_timer timer,uint64_t cval_cycles,uint32_t ctl)181 static void set_cval_irq(enum arch_timer timer, uint64_t cval_cycles,
182 			 uint32_t ctl)
183 {
184 	atomic_set(&shared_data.handled, 0);
185 	atomic_set(&shared_data.spurious, 0);
186 	timer_set_cval(timer, cval_cycles);
187 	timer_set_ctl(timer, ctl);
188 }
189 
set_tval_irq(enum arch_timer timer,uint64_t tval_cycles,uint32_t ctl)190 static void set_tval_irq(enum arch_timer timer, uint64_t tval_cycles,
191 			 uint32_t ctl)
192 {
193 	atomic_set(&shared_data.handled, 0);
194 	atomic_set(&shared_data.spurious, 0);
195 	timer_set_tval(timer, tval_cycles);
196 	timer_set_ctl(timer, ctl);
197 }
198 
set_xval_irq(enum arch_timer timer,uint64_t xval,uint32_t ctl,enum timer_view tv)199 static void set_xval_irq(enum arch_timer timer, uint64_t xval, uint32_t ctl,
200 			 enum timer_view tv)
201 {
202 	switch (tv) {
203 	case TIMER_CVAL:
204 		set_cval_irq(timer, xval, ctl);
205 		break;
206 	case TIMER_TVAL:
207 		set_tval_irq(timer, xval, ctl);
208 		break;
209 	default:
210 		GUEST_FAIL("Could not get timer %d", timer);
211 	}
212 }
213 
214 /*
215  * Note that this can theoretically hang forever, so we rely on having
216  * a timeout mechanism in the "runner", like:
217  * tools/testing/selftests/kselftest/runner.sh.
218  */
wait_for_non_spurious_irq(void)219 static void wait_for_non_spurious_irq(void)
220 {
221 	int h;
222 
223 	local_irq_disable();
224 
225 	for (h = atomic_read(&shared_data.handled); h == atomic_read(&shared_data.handled);) {
226 		wfi();
227 		local_irq_enable();
228 		isb(); /* handle IRQ */
229 		local_irq_disable();
230 	}
231 }
232 
233 /*
234  * Wait for an non-spurious IRQ by polling in the guest or in
235  * userspace (e.g. userspace_cmd=USERSPACE_SCHED_YIELD).
236  *
237  * Note that this can theoretically hang forever, so we rely on having
238  * a timeout mechanism in the "runner", like:
239  * tools/testing/selftests/kselftest/runner.sh.
240  */
poll_for_non_spurious_irq(enum sync_cmd usp_cmd)241 static void poll_for_non_spurious_irq(enum sync_cmd usp_cmd)
242 {
243 	int h;
244 
245 	local_irq_disable();
246 
247 	h = atomic_read(&shared_data.handled);
248 
249 	local_irq_enable();
250 	while (h == atomic_read(&shared_data.handled)) {
251 		if (usp_cmd == NO_USERSPACE_CMD)
252 			cpu_relax();
253 		else
254 			userspace_cmd(usp_cmd);
255 	}
256 	local_irq_disable();
257 }
258 
wait_poll_for_irq(void)259 static void wait_poll_for_irq(void)
260 {
261 	poll_for_non_spurious_irq(NO_USERSPACE_CMD);
262 }
263 
wait_sched_poll_for_irq(void)264 static void wait_sched_poll_for_irq(void)
265 {
266 	poll_for_non_spurious_irq(USERSPACE_SCHED_YIELD);
267 }
268 
wait_migrate_poll_for_irq(void)269 static void wait_migrate_poll_for_irq(void)
270 {
271 	poll_for_non_spurious_irq(USERSPACE_MIGRATE_SELF);
272 }
273 
274 /*
275  * Sleep for usec microseconds by polling in the guest or in
276  * userspace (e.g. userspace_cmd=USERSPACE_SCHEDULE).
277  */
guest_poll(enum arch_timer test_timer,uint64_t usec,enum sync_cmd usp_cmd)278 static void guest_poll(enum arch_timer test_timer, uint64_t usec,
279 		       enum sync_cmd usp_cmd)
280 {
281 	uint64_t cycles = usec_to_cycles(usec);
282 	/* Whichever timer we are testing with, sleep with the other. */
283 	enum arch_timer sleep_timer = 1 - test_timer;
284 	uint64_t start = timer_get_cntct(sleep_timer);
285 
286 	while ((timer_get_cntct(sleep_timer) - start) < cycles) {
287 		if (usp_cmd == NO_USERSPACE_CMD)
288 			cpu_relax();
289 		else
290 			userspace_cmd(usp_cmd);
291 	}
292 }
293 
sleep_poll(enum arch_timer timer,uint64_t usec)294 static void sleep_poll(enum arch_timer timer, uint64_t usec)
295 {
296 	guest_poll(timer, usec, NO_USERSPACE_CMD);
297 }
298 
sleep_sched_poll(enum arch_timer timer,uint64_t usec)299 static void sleep_sched_poll(enum arch_timer timer, uint64_t usec)
300 {
301 	guest_poll(timer, usec, USERSPACE_SCHED_YIELD);
302 }
303 
sleep_migrate(enum arch_timer timer,uint64_t usec)304 static void sleep_migrate(enum arch_timer timer, uint64_t usec)
305 {
306 	guest_poll(timer, usec, USERSPACE_MIGRATE_SELF);
307 }
308 
sleep_in_userspace(enum arch_timer timer,uint64_t usec)309 static void sleep_in_userspace(enum arch_timer timer, uint64_t usec)
310 {
311 	userspace_sleep(usec);
312 }
313 
314 /*
315  * Reset the timer state to some nice values like the counter not being close
316  * to the edge, and the control register masked and disabled.
317  */
reset_timer_state(enum arch_timer timer,uint64_t cnt)318 static void reset_timer_state(enum arch_timer timer, uint64_t cnt)
319 {
320 	set_counter(timer, cnt);
321 	timer_set_ctl(timer, CTL_IMASK);
322 }
323 
test_timer_xval(enum arch_timer timer,uint64_t xval,enum timer_view tv,irq_wait_method_t wm,bool reset_state,uint64_t reset_cnt)324 static void test_timer_xval(enum arch_timer timer, uint64_t xval,
325 			    enum timer_view tv, irq_wait_method_t wm, bool reset_state,
326 			    uint64_t reset_cnt)
327 {
328 	local_irq_disable();
329 
330 	if (reset_state)
331 		reset_timer_state(timer, reset_cnt);
332 
333 	set_xval_irq(timer, xval, CTL_ENABLE, tv);
334 
335 	/* This method re-enables IRQs to handle the one we're looking for. */
336 	wm();
337 
338 	assert_irqs_handled(1);
339 	local_irq_enable();
340 }
341 
342 /*
343  * The test_timer_* functions will program the timer, wait for it, and assert
344  * the firing of the correct IRQ.
345  *
346  * These functions don't have a timeout and return as soon as they receive an
347  * IRQ. They can hang (forever), so we rely on having a timeout mechanism in
348  * the "runner", like: tools/testing/selftests/kselftest/runner.sh.
349  */
350 
test_timer_cval(enum arch_timer timer,uint64_t cval,irq_wait_method_t wm,bool reset_state,uint64_t reset_cnt)351 static void test_timer_cval(enum arch_timer timer, uint64_t cval,
352 			    irq_wait_method_t wm, bool reset_state,
353 			    uint64_t reset_cnt)
354 {
355 	test_timer_xval(timer, cval, TIMER_CVAL, wm, reset_state, reset_cnt);
356 }
357 
test_timer_tval(enum arch_timer timer,int32_t tval,irq_wait_method_t wm,bool reset_state,uint64_t reset_cnt)358 static void test_timer_tval(enum arch_timer timer, int32_t tval,
359 			    irq_wait_method_t wm, bool reset_state,
360 			    uint64_t reset_cnt)
361 {
362 	test_timer_xval(timer, (uint64_t) tval, TIMER_TVAL, wm, reset_state,
363 			reset_cnt);
364 }
365 
test_xval_check_no_irq(enum arch_timer timer,uint64_t xval,uint64_t usec,enum timer_view timer_view,sleep_method_t guest_sleep)366 static void test_xval_check_no_irq(enum arch_timer timer, uint64_t xval,
367 				   uint64_t usec, enum timer_view timer_view,
368 				   sleep_method_t guest_sleep)
369 {
370 	local_irq_disable();
371 
372 	set_xval_irq(timer, xval, CTL_ENABLE | CTL_IMASK, timer_view);
373 	guest_sleep(timer, usec);
374 
375 	local_irq_enable();
376 	isb();
377 
378 	/* Assume success (no IRQ) after waiting usec microseconds */
379 	assert_irqs_handled(0);
380 }
381 
test_cval_no_irq(enum arch_timer timer,uint64_t cval,uint64_t usec,sleep_method_t wm)382 static void test_cval_no_irq(enum arch_timer timer, uint64_t cval,
383 			     uint64_t usec, sleep_method_t wm)
384 {
385 	test_xval_check_no_irq(timer, cval, usec, TIMER_CVAL, wm);
386 }
387 
test_tval_no_irq(enum arch_timer timer,int32_t tval,uint64_t usec,sleep_method_t wm)388 static void test_tval_no_irq(enum arch_timer timer, int32_t tval, uint64_t usec,
389 			     sleep_method_t wm)
390 {
391 	/* tval will be cast to an int32_t in test_xval_check_no_irq */
392 	test_xval_check_no_irq(timer, (uint64_t) tval, usec, TIMER_TVAL, wm);
393 }
394 
395 /* Test masking/unmasking a timer using the timer mask (not the IRQ mask). */
test_timer_control_mask_then_unmask(enum arch_timer timer)396 static void test_timer_control_mask_then_unmask(enum arch_timer timer)
397 {
398 	reset_timer_state(timer, DEF_CNT);
399 	set_tval_irq(timer, -1, CTL_ENABLE | CTL_IMASK);
400 
401 	/* Unmask the timer, and then get an IRQ. */
402 	local_irq_disable();
403 	timer_set_ctl(timer, CTL_ENABLE);
404 	/* This method re-enables IRQs to handle the one we're looking for. */
405 	wait_for_non_spurious_irq();
406 
407 	assert_irqs_handled(1);
408 	local_irq_enable();
409 }
410 
411 /* Check that timer control masks actually mask a timer being fired. */
test_timer_control_masks(enum arch_timer timer)412 static void test_timer_control_masks(enum arch_timer timer)
413 {
414 	reset_timer_state(timer, DEF_CNT);
415 
416 	/* Local IRQs are not masked at this point. */
417 
418 	set_tval_irq(timer, -1, CTL_ENABLE | CTL_IMASK);
419 
420 	/* Assume no IRQ after waiting TIMEOUT_NO_IRQ_US microseconds */
421 	sleep_poll(timer, TIMEOUT_NO_IRQ_US);
422 
423 	assert_irqs_handled(0);
424 	timer_set_ctl(timer, CTL_IMASK);
425 }
426 
test_fire_a_timer_multiple_times(enum arch_timer timer,irq_wait_method_t wm,int num)427 static void test_fire_a_timer_multiple_times(enum arch_timer timer,
428 					     irq_wait_method_t wm, int num)
429 {
430 	int i;
431 
432 	local_irq_disable();
433 	reset_timer_state(timer, DEF_CNT);
434 
435 	set_tval_irq(timer, 0, CTL_ENABLE);
436 
437 	for (i = 1; i <= num; i++) {
438 		/* This method re-enables IRQs to handle the one we're looking for. */
439 		wm();
440 
441 		/* The IRQ handler masked and disabled the timer.
442 		 * Enable and unmmask it again.
443 		 */
444 		timer_set_ctl(timer, CTL_ENABLE);
445 
446 		assert_irqs_handled(i);
447 	}
448 
449 	local_irq_enable();
450 }
451 
test_timers_fired_multiple_times(enum arch_timer timer)452 static void test_timers_fired_multiple_times(enum arch_timer timer)
453 {
454 	int i;
455 
456 	for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++)
457 		test_fire_a_timer_multiple_times(timer, irq_wait_method[i], 10);
458 }
459 
460 /*
461  * Set a timer for tval=delta_1_ms then reprogram it to
462  * tval=delta_2_ms. Check that we get the timer fired. There is no
463  * timeout for the wait: we use the wfi instruction.
464  */
test_reprogramming_timer(enum arch_timer timer,irq_wait_method_t wm,int32_t delta_1_ms,int32_t delta_2_ms)465 static void test_reprogramming_timer(enum arch_timer timer, irq_wait_method_t wm,
466 				     int32_t delta_1_ms, int32_t delta_2_ms)
467 {
468 	local_irq_disable();
469 	reset_timer_state(timer, DEF_CNT);
470 
471 	/* Program the timer to DEF_CNT + delta_1_ms. */
472 	set_tval_irq(timer, msec_to_cycles(delta_1_ms), CTL_ENABLE);
473 
474 	/* Reprogram the timer to DEF_CNT + delta_2_ms. */
475 	timer_set_tval(timer, msec_to_cycles(delta_2_ms));
476 
477 	/* This method re-enables IRQs to handle the one we're looking for. */
478 	wm();
479 
480 	/* The IRQ should arrive at DEF_CNT + delta_2_ms (or after). */
481 	GUEST_ASSERT(timer_get_cntct(timer) >=
482 		     DEF_CNT + msec_to_cycles(delta_2_ms));
483 
484 	local_irq_enable();
485 	assert_irqs_handled(1);
486 };
487 
test_reprogram_timers(enum arch_timer timer)488 static void test_reprogram_timers(enum arch_timer timer)
489 {
490 	int i;
491 	uint64_t base_wait = test_args.wait_ms;
492 
493 	for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
494 		/*
495 		 * Ensure reprogramming works whether going from a
496 		 * longer time to a shorter or vice versa.
497 		 */
498 		test_reprogramming_timer(timer, irq_wait_method[i], 2 * base_wait,
499 					 base_wait);
500 		test_reprogramming_timer(timer, irq_wait_method[i], base_wait,
501 					 2 * base_wait);
502 	}
503 }
504 
test_basic_functionality(enum arch_timer timer)505 static void test_basic_functionality(enum arch_timer timer)
506 {
507 	int32_t tval = (int32_t) msec_to_cycles(test_args.wait_ms);
508 	uint64_t cval = DEF_CNT + msec_to_cycles(test_args.wait_ms);
509 	int i;
510 
511 	for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
512 		irq_wait_method_t wm = irq_wait_method[i];
513 
514 		test_timer_cval(timer, cval, wm, true, DEF_CNT);
515 		test_timer_tval(timer, tval, wm, true, DEF_CNT);
516 	}
517 }
518 
519 /*
520  * This test checks basic timer behavior without actually firing timers, things
521  * like: the relationship between cval and tval, tval down-counting.
522  */
timers_sanity_checks(enum arch_timer timer,bool use_sched)523 static void timers_sanity_checks(enum arch_timer timer, bool use_sched)
524 {
525 	reset_timer_state(timer, DEF_CNT);
526 
527 	local_irq_disable();
528 
529 	/* cval in the past */
530 	timer_set_cval(timer,
531 		       timer_get_cntct(timer) -
532 		       msec_to_cycles(test_args.wait_ms));
533 	if (use_sched)
534 		userspace_migrate_vcpu();
535 	GUEST_ASSERT(timer_get_tval(timer) < 0);
536 
537 	/* tval in the past */
538 	timer_set_tval(timer, -1);
539 	if (use_sched)
540 		userspace_migrate_vcpu();
541 	GUEST_ASSERT(timer_get_cval(timer) < timer_get_cntct(timer));
542 
543 	/* tval larger than TVAL_MAX. This requires programming with
544 	 * timer_set_cval instead so the value is expressible
545 	 */
546 	timer_set_cval(timer,
547 		       timer_get_cntct(timer) + TVAL_MAX +
548 		       msec_to_cycles(test_args.wait_ms));
549 	if (use_sched)
550 		userspace_migrate_vcpu();
551 	GUEST_ASSERT(timer_get_tval(timer) <= 0);
552 
553 	/*
554 	 * tval larger than 2 * TVAL_MAX.
555 	 * Twice the TVAL_MAX completely loops around the TVAL.
556 	 */
557 	timer_set_cval(timer,
558 		       timer_get_cntct(timer) + 2ULL * TVAL_MAX +
559 		       msec_to_cycles(test_args.wait_ms));
560 	if (use_sched)
561 		userspace_migrate_vcpu();
562 	GUEST_ASSERT(timer_get_tval(timer) <=
563 		       msec_to_cycles(test_args.wait_ms));
564 
565 	/* negative tval that rollovers from 0. */
566 	set_counter(timer, msec_to_cycles(1));
567 	timer_set_tval(timer, -1 * msec_to_cycles(test_args.wait_ms));
568 	if (use_sched)
569 		userspace_migrate_vcpu();
570 	GUEST_ASSERT(timer_get_cval(timer) >= (CVAL_MAX - msec_to_cycles(test_args.wait_ms)));
571 
572 	/* tval should keep down-counting from 0 to -1. */
573 	timer_set_tval(timer, 0);
574 	sleep_poll(timer, 1);
575 	GUEST_ASSERT(timer_get_tval(timer) < 0);
576 
577 	local_irq_enable();
578 
579 	/* Mask and disable any pending timer. */
580 	timer_set_ctl(timer, CTL_IMASK);
581 }
582 
test_timers_sanity_checks(enum arch_timer timer)583 static void test_timers_sanity_checks(enum arch_timer timer)
584 {
585 	timers_sanity_checks(timer, false);
586 	/* Check how KVM saves/restores these edge-case values. */
587 	timers_sanity_checks(timer, true);
588 }
589 
test_set_cnt_after_tval_max(enum arch_timer timer,irq_wait_method_t wm)590 static void test_set_cnt_after_tval_max(enum arch_timer timer, irq_wait_method_t wm)
591 {
592 	local_irq_disable();
593 	reset_timer_state(timer, DEF_CNT);
594 
595 	set_cval_irq(timer,
596 		     (uint64_t) TVAL_MAX +
597 		     msec_to_cycles(test_args.wait_ms) / 2, CTL_ENABLE);
598 
599 	set_counter(timer, TVAL_MAX);
600 
601 	/* This method re-enables IRQs to handle the one we're looking for. */
602 	wm();
603 
604 	assert_irqs_handled(1);
605 	local_irq_enable();
606 }
607 
608 /* Test timers set for: cval = now + TVAL_MAX + wait_ms / 2 */
test_timers_above_tval_max(enum arch_timer timer)609 static void test_timers_above_tval_max(enum arch_timer timer)
610 {
611 	uint64_t cval;
612 	int i;
613 
614 	/*
615 	 * Test that the system is not implementing cval in terms of
616 	 * tval.  If that was the case, setting a cval to "cval = now
617 	 * + TVAL_MAX + wait_ms" would wrap to "cval = now +
618 	 * wait_ms", and the timer would fire immediately. Test that it
619 	 * doesn't.
620 	 */
621 	for (i = 0; i < ARRAY_SIZE(sleep_method); i++) {
622 		reset_timer_state(timer, DEF_CNT);
623 		cval = timer_get_cntct(timer) + TVAL_MAX +
624 			msec_to_cycles(test_args.wait_ms);
625 		test_cval_no_irq(timer, cval,
626 				 msecs_to_usecs(test_args.wait_ms) +
627 				 TIMEOUT_NO_IRQ_US, sleep_method[i]);
628 	}
629 
630 	for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
631 		/* Get the IRQ by moving the counter forward. */
632 		test_set_cnt_after_tval_max(timer, irq_wait_method[i]);
633 	}
634 }
635 
636 /*
637  * Template function to be used by the test_move_counter_ahead_* tests.  It
638  * sets the counter to cnt_1, the [c|t]val, the counter to cnt_2, and
639  * then waits for an IRQ.
640  */
test_set_cnt_after_xval(enum arch_timer timer,uint64_t cnt_1,uint64_t xval,uint64_t cnt_2,irq_wait_method_t wm,enum timer_view tv)641 static void test_set_cnt_after_xval(enum arch_timer timer, uint64_t cnt_1,
642 				    uint64_t xval, uint64_t cnt_2,
643 				    irq_wait_method_t wm, enum timer_view tv)
644 {
645 	local_irq_disable();
646 
647 	set_counter(timer, cnt_1);
648 	timer_set_ctl(timer, CTL_IMASK);
649 
650 	set_xval_irq(timer, xval, CTL_ENABLE, tv);
651 	set_counter(timer, cnt_2);
652 	/* This method re-enables IRQs to handle the one we're looking for. */
653 	wm();
654 
655 	assert_irqs_handled(1);
656 	local_irq_enable();
657 }
658 
659 /*
660  * Template function to be used by the test_move_counter_ahead_* tests.  It
661  * sets the counter to cnt_1, the [c|t]val, the counter to cnt_2, and
662  * then waits for an IRQ.
663  */
test_set_cnt_after_xval_no_irq(enum arch_timer timer,uint64_t cnt_1,uint64_t xval,uint64_t cnt_2,sleep_method_t guest_sleep,enum timer_view tv)664 static void test_set_cnt_after_xval_no_irq(enum arch_timer timer,
665 					   uint64_t cnt_1, uint64_t xval,
666 					   uint64_t cnt_2,
667 					   sleep_method_t guest_sleep,
668 					   enum timer_view tv)
669 {
670 	local_irq_disable();
671 
672 	set_counter(timer, cnt_1);
673 	timer_set_ctl(timer, CTL_IMASK);
674 
675 	set_xval_irq(timer, xval, CTL_ENABLE, tv);
676 	set_counter(timer, cnt_2);
677 	guest_sleep(timer, TIMEOUT_NO_IRQ_US);
678 
679 	local_irq_enable();
680 	isb();
681 
682 	/* Assume no IRQ after waiting TIMEOUT_NO_IRQ_US microseconds */
683 	assert_irqs_handled(0);
684 	timer_set_ctl(timer, CTL_IMASK);
685 }
686 
test_set_cnt_after_tval(enum arch_timer timer,uint64_t cnt_1,int32_t tval,uint64_t cnt_2,irq_wait_method_t wm)687 static void test_set_cnt_after_tval(enum arch_timer timer, uint64_t cnt_1,
688 				    int32_t tval, uint64_t cnt_2,
689 				    irq_wait_method_t wm)
690 {
691 	test_set_cnt_after_xval(timer, cnt_1, tval, cnt_2, wm, TIMER_TVAL);
692 }
693 
test_set_cnt_after_cval(enum arch_timer timer,uint64_t cnt_1,uint64_t cval,uint64_t cnt_2,irq_wait_method_t wm)694 static void test_set_cnt_after_cval(enum arch_timer timer, uint64_t cnt_1,
695 				    uint64_t cval, uint64_t cnt_2,
696 				    irq_wait_method_t wm)
697 {
698 	test_set_cnt_after_xval(timer, cnt_1, cval, cnt_2, wm, TIMER_CVAL);
699 }
700 
test_set_cnt_after_tval_no_irq(enum arch_timer timer,uint64_t cnt_1,int32_t tval,uint64_t cnt_2,sleep_method_t wm)701 static void test_set_cnt_after_tval_no_irq(enum arch_timer timer,
702 					   uint64_t cnt_1, int32_t tval,
703 					   uint64_t cnt_2, sleep_method_t wm)
704 {
705 	test_set_cnt_after_xval_no_irq(timer, cnt_1, tval, cnt_2, wm,
706 				       TIMER_TVAL);
707 }
708 
test_set_cnt_after_cval_no_irq(enum arch_timer timer,uint64_t cnt_1,uint64_t cval,uint64_t cnt_2,sleep_method_t wm)709 static void test_set_cnt_after_cval_no_irq(enum arch_timer timer,
710 					   uint64_t cnt_1, uint64_t cval,
711 					   uint64_t cnt_2, sleep_method_t wm)
712 {
713 	test_set_cnt_after_xval_no_irq(timer, cnt_1, cval, cnt_2, wm,
714 				       TIMER_CVAL);
715 }
716 
717 /* Set a timer and then move the counter ahead of it. */
test_move_counters_ahead_of_timers(enum arch_timer timer)718 static void test_move_counters_ahead_of_timers(enum arch_timer timer)
719 {
720 	int i;
721 	int32_t tval;
722 
723 	for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
724 		irq_wait_method_t wm = irq_wait_method[i];
725 
726 		test_set_cnt_after_cval(timer, 0, DEF_CNT, DEF_CNT + 1, wm);
727 		test_set_cnt_after_cval(timer, CVAL_MAX, 1, 2, wm);
728 
729 		/* Move counter ahead of negative tval. */
730 		test_set_cnt_after_tval(timer, 0, -1, DEF_CNT + 1, wm);
731 		test_set_cnt_after_tval(timer, 0, -1, TVAL_MAX, wm);
732 		tval = TVAL_MAX;
733 		test_set_cnt_after_tval(timer, 0, tval, (uint64_t) tval + 1,
734 					wm);
735 	}
736 }
737 
738 /*
739  * Program a timer, mask it, and then change the tval or counter to cancel it.
740  * Unmask it and check that nothing fires.
741  */
test_move_counters_behind_timers(enum arch_timer timer)742 static void test_move_counters_behind_timers(enum arch_timer timer)
743 {
744 	int i;
745 
746 	for (i = 0; i < ARRAY_SIZE(sleep_method); i++) {
747 		sleep_method_t sm = sleep_method[i];
748 
749 		test_set_cnt_after_cval_no_irq(timer, DEF_CNT, DEF_CNT - 1, 0,
750 					       sm);
751 		test_set_cnt_after_tval_no_irq(timer, DEF_CNT, -1, 0, sm);
752 	}
753 }
754 
test_timers_in_the_past(enum arch_timer timer)755 static void test_timers_in_the_past(enum arch_timer timer)
756 {
757 	int32_t tval = -1 * (int32_t) msec_to_cycles(test_args.wait_ms);
758 	uint64_t cval;
759 	int i;
760 
761 	for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
762 		irq_wait_method_t wm = irq_wait_method[i];
763 
764 		/* set a timer wait_ms the past. */
765 		cval = DEF_CNT - msec_to_cycles(test_args.wait_ms);
766 		test_timer_cval(timer, cval, wm, true, DEF_CNT);
767 		test_timer_tval(timer, tval, wm, true, DEF_CNT);
768 
769 		/* Set a timer to counter=0 (in the past) */
770 		test_timer_cval(timer, 0, wm, true, DEF_CNT);
771 
772 		/* Set a time for tval=0 (now) */
773 		test_timer_tval(timer, 0, wm, true, DEF_CNT);
774 
775 		/* Set a timer to as far in the past as possible */
776 		test_timer_tval(timer, TVAL_MIN, wm, true, DEF_CNT);
777 	}
778 
779 	/*
780 	 * Set the counter to wait_ms, and a tval to -wait_ms. There should be no
781 	 * IRQ as that tval means cval=CVAL_MAX-wait_ms.
782 	 */
783 	for (i = 0; i < ARRAY_SIZE(sleep_method); i++) {
784 		sleep_method_t sm = sleep_method[i];
785 
786 		set_counter(timer, msec_to_cycles(test_args.wait_ms));
787 		test_tval_no_irq(timer, tval, TIMEOUT_NO_IRQ_US, sm);
788 	}
789 }
790 
test_long_timer_delays(enum arch_timer timer)791 static void test_long_timer_delays(enum arch_timer timer)
792 {
793 	int32_t tval = (int32_t) msec_to_cycles(test_args.long_wait_ms);
794 	uint64_t cval = DEF_CNT + msec_to_cycles(test_args.long_wait_ms);
795 	int i;
796 
797 	for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
798 		irq_wait_method_t wm = irq_wait_method[i];
799 
800 		test_timer_cval(timer, cval, wm, true, DEF_CNT);
801 		test_timer_tval(timer, tval, wm, true, DEF_CNT);
802 	}
803 }
804 
guest_run_iteration(enum arch_timer timer)805 static void guest_run_iteration(enum arch_timer timer)
806 {
807 	test_basic_functionality(timer);
808 	test_timers_sanity_checks(timer);
809 
810 	test_timers_above_tval_max(timer);
811 	test_timers_in_the_past(timer);
812 
813 	test_move_counters_ahead_of_timers(timer);
814 	test_move_counters_behind_timers(timer);
815 	test_reprogram_timers(timer);
816 
817 	test_timers_fired_multiple_times(timer);
818 
819 	test_timer_control_mask_then_unmask(timer);
820 	test_timer_control_masks(timer);
821 }
822 
guest_code(enum arch_timer timer)823 static void guest_code(enum arch_timer timer)
824 {
825 	int i;
826 
827 	local_irq_disable();
828 
829 	gic_init(GIC_V3, 1);
830 
831 	timer_set_ctl(VIRTUAL, CTL_IMASK);
832 	timer_set_ctl(PHYSICAL, CTL_IMASK);
833 
834 	gic_irq_enable(vtimer_irq);
835 	gic_irq_enable(ptimer_irq);
836 	local_irq_enable();
837 
838 	for (i = 0; i < test_args.iterations; i++) {
839 		GUEST_SYNC(i);
840 		guest_run_iteration(timer);
841 	}
842 
843 	test_long_timer_delays(timer);
844 	GUEST_DONE();
845 }
846 
847 static cpu_set_t default_cpuset;
848 
next_pcpu(void)849 static uint32_t next_pcpu(void)
850 {
851 	uint32_t max = get_nprocs();
852 	uint32_t cur = sched_getcpu();
853 	uint32_t next = cur;
854 	cpu_set_t cpuset = default_cpuset;
855 
856 	TEST_ASSERT(max > 1, "Need at least two physical cpus");
857 
858 	do {
859 		next = (next + 1) % CPU_SETSIZE;
860 	} while (!CPU_ISSET(next, &cpuset));
861 
862 	return next;
863 }
864 
migrate_self(uint32_t new_pcpu)865 static void migrate_self(uint32_t new_pcpu)
866 {
867 	int ret;
868 	cpu_set_t cpuset;
869 	pthread_t thread;
870 
871 	thread = pthread_self();
872 
873 	CPU_ZERO(&cpuset);
874 	CPU_SET(new_pcpu, &cpuset);
875 
876 	pr_debug("Migrating from %u to %u\n", sched_getcpu(), new_pcpu);
877 
878 	ret = pthread_setaffinity_np(thread, sizeof(cpuset), &cpuset);
879 
880 	TEST_ASSERT(ret == 0, "Failed to migrate to pCPU: %u; ret: %d\n",
881 		    new_pcpu, ret);
882 }
883 
kvm_set_cntxct(struct kvm_vcpu * vcpu,uint64_t cnt,enum arch_timer timer)884 static void kvm_set_cntxct(struct kvm_vcpu *vcpu, uint64_t cnt,
885 			   enum arch_timer timer)
886 {
887 	if (timer == PHYSICAL)
888 		vcpu_set_reg(vcpu, KVM_REG_ARM_PTIMER_CNT, cnt);
889 	else
890 		vcpu_set_reg(vcpu, KVM_REG_ARM_TIMER_CNT, cnt);
891 }
892 
handle_sync(struct kvm_vcpu * vcpu,struct ucall * uc)893 static void handle_sync(struct kvm_vcpu *vcpu, struct ucall *uc)
894 {
895 	enum sync_cmd cmd = uc->args[1];
896 	uint64_t val = uc->args[2];
897 	enum arch_timer timer = uc->args[3];
898 
899 	switch (cmd) {
900 	case SET_COUNTER_VALUE:
901 		kvm_set_cntxct(vcpu, val, timer);
902 		break;
903 	case USERSPACE_USLEEP:
904 		usleep(val);
905 		break;
906 	case USERSPACE_SCHED_YIELD:
907 		sched_yield();
908 		break;
909 	case USERSPACE_MIGRATE_SELF:
910 		migrate_self(next_pcpu());
911 		break;
912 	default:
913 		break;
914 	}
915 }
916 
test_run(struct kvm_vm * vm,struct kvm_vcpu * vcpu)917 static void test_run(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
918 {
919 	struct ucall uc;
920 
921 	/* Start on CPU 0 */
922 	migrate_self(0);
923 
924 	while (true) {
925 		vcpu_run(vcpu);
926 		switch (get_ucall(vcpu, &uc)) {
927 		case UCALL_SYNC:
928 			handle_sync(vcpu, &uc);
929 			break;
930 		case UCALL_DONE:
931 			goto out;
932 		case UCALL_ABORT:
933 			REPORT_GUEST_ASSERT(uc);
934 			goto out;
935 		default:
936 			TEST_FAIL("Unexpected guest exit\n");
937 		}
938 	}
939 
940  out:
941 	return;
942 }
943 
test_init_timer_irq(struct kvm_vm * vm,struct kvm_vcpu * vcpu)944 static void test_init_timer_irq(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
945 {
946 	vcpu_device_attr_get(vcpu, KVM_ARM_VCPU_TIMER_CTRL,
947 			     KVM_ARM_VCPU_TIMER_IRQ_PTIMER, &ptimer_irq);
948 	vcpu_device_attr_get(vcpu, KVM_ARM_VCPU_TIMER_CTRL,
949 			     KVM_ARM_VCPU_TIMER_IRQ_VTIMER, &vtimer_irq);
950 
951 	sync_global_to_guest(vm, ptimer_irq);
952 	sync_global_to_guest(vm, vtimer_irq);
953 
954 	pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq);
955 }
956 
957 static int gic_fd;
958 
test_vm_create(struct kvm_vm ** vm,struct kvm_vcpu ** vcpu,enum arch_timer timer)959 static void test_vm_create(struct kvm_vm **vm, struct kvm_vcpu **vcpu,
960 			   enum arch_timer timer)
961 {
962 	*vm = vm_create_with_one_vcpu(vcpu, guest_code);
963 	TEST_ASSERT(*vm, "Failed to create the test VM\n");
964 
965 	vm_init_descriptor_tables(*vm);
966 	vm_install_exception_handler(*vm, VECTOR_IRQ_CURRENT,
967 				     guest_irq_handler);
968 
969 	vcpu_init_descriptor_tables(*vcpu);
970 	vcpu_args_set(*vcpu, 1, timer);
971 
972 	test_init_timer_irq(*vm, *vcpu);
973 	gic_fd = vgic_v3_setup(*vm, 1, 64);
974 	__TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3");
975 
976 	sync_global_to_guest(*vm, test_args);
977 	sync_global_to_guest(*vm, CVAL_MAX);
978 	sync_global_to_guest(*vm, DEF_CNT);
979 }
980 
test_vm_cleanup(struct kvm_vm * vm)981 static void test_vm_cleanup(struct kvm_vm *vm)
982 {
983 	close(gic_fd);
984 	kvm_vm_free(vm);
985 }
986 
test_print_help(char * name)987 static void test_print_help(char *name)
988 {
989 	pr_info("Usage: %s [-h] [-b] [-i iterations] [-l long_wait_ms] [-p] [-v]\n"
990 		, name);
991 	pr_info("\t-i: Number of iterations (default: %u)\n",
992 		NR_TEST_ITERS_DEF);
993 	pr_info("\t-b: Test both physical and virtual timers (default: true)\n");
994 	pr_info("\t-l: Delta (in ms) used for long wait time test (default: %u)\n",
995 	     LONG_WAIT_TEST_MS);
996 	pr_info("\t-w: Delta (in ms) used for wait times (default: %u)\n",
997 		WAIT_TEST_MS);
998 	pr_info("\t-p: Test physical timer (default: true)\n");
999 	pr_info("\t-v: Test virtual timer (default: true)\n");
1000 	pr_info("\t-h: Print this help message\n");
1001 }
1002 
parse_args(int argc,char * argv[])1003 static bool parse_args(int argc, char *argv[])
1004 {
1005 	int opt;
1006 
1007 	while ((opt = getopt(argc, argv, "bhi:l:pvw:")) != -1) {
1008 		switch (opt) {
1009 		case 'b':
1010 			test_args.test_physical = true;
1011 			test_args.test_virtual = true;
1012 			break;
1013 		case 'i':
1014 			test_args.iterations =
1015 			    atoi_positive("Number of iterations", optarg);
1016 			break;
1017 		case 'l':
1018 			test_args.long_wait_ms =
1019 			    atoi_positive("Long wait time", optarg);
1020 			break;
1021 		case 'p':
1022 			test_args.test_physical = true;
1023 			test_args.test_virtual = false;
1024 			break;
1025 		case 'v':
1026 			test_args.test_virtual = true;
1027 			test_args.test_physical = false;
1028 			break;
1029 		case 'w':
1030 			test_args.wait_ms = atoi_positive("Wait time", optarg);
1031 			break;
1032 		case 'h':
1033 		default:
1034 			goto err;
1035 		}
1036 	}
1037 
1038 	return true;
1039 
1040  err:
1041 	test_print_help(argv[0]);
1042 	return false;
1043 }
1044 
set_counter_defaults(void)1045 static void set_counter_defaults(void)
1046 {
1047 	const uint64_t MIN_ROLLOVER_SECS = 40ULL * 365 * 24 * 3600;
1048 	uint64_t freq = read_sysreg(CNTFRQ_EL0);
1049 	uint64_t width = ilog2(MIN_ROLLOVER_SECS * freq);
1050 
1051 	width = clamp(width, 56, 64);
1052 	CVAL_MAX = GENMASK_ULL(width - 1, 0);
1053 	DEF_CNT = CVAL_MAX / 2;
1054 }
1055 
main(int argc,char * argv[])1056 int main(int argc, char *argv[])
1057 {
1058 	struct kvm_vcpu *vcpu;
1059 	struct kvm_vm *vm;
1060 
1061 	/* Tell stdout not to buffer its content */
1062 	setbuf(stdout, NULL);
1063 
1064 	if (!parse_args(argc, argv))
1065 		exit(KSFT_SKIP);
1066 
1067 	sched_getaffinity(0, sizeof(default_cpuset), &default_cpuset);
1068 	set_counter_defaults();
1069 
1070 	if (test_args.test_virtual) {
1071 		test_vm_create(&vm, &vcpu, VIRTUAL);
1072 		test_run(vm, vcpu);
1073 		test_vm_cleanup(vm);
1074 	}
1075 
1076 	if (test_args.test_physical) {
1077 		test_vm_create(&vm, &vcpu, PHYSICAL);
1078 		test_run(vm, vcpu);
1079 		test_vm_cleanup(vm);
1080 	}
1081 
1082 	return 0;
1083 }
1084