1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * vgic_irq.c - Test userspace injection of IRQs
4 *
5 * This test validates the injection of IRQs from userspace using various
6 * methods (e.g., KVM_IRQ_LINE) and modes (e.g., EOI). The guest "asks" the
7 * host to inject a specific intid via a GUEST_SYNC call, and then checks that
8 * it received it.
9 */
10 #include <asm/kvm.h>
11 #include <asm/kvm_para.h>
12 #include <sys/eventfd.h>
13 #include <linux/sizes.h>
14
15 #include "processor.h"
16 #include "test_util.h"
17 #include "kvm_util.h"
18 #include "gic.h"
19 #include "gic_v3.h"
20 #include "vgic.h"
21
22 /*
23 * Stores the user specified args; it's passed to the guest and to every test
24 * function.
25 */
26 struct test_args {
27 uint32_t nr_irqs; /* number of KVM supported IRQs. */
28 bool eoi_split; /* 1 is eoir+dir, 0 is eoir only */
29 bool level_sensitive; /* 1 is level, 0 is edge */
30 int kvm_max_routes; /* output of KVM_CAP_IRQ_ROUTING */
31 bool kvm_supports_irqfd; /* output of KVM_CAP_IRQFD */
32 uint32_t shared_data;
33 };
34
35 /*
36 * KVM implements 32 priority levels:
37 * 0x00 (highest priority) - 0xF8 (lowest priority), in steps of 8
38 *
39 * Note that these macros will still be correct in the case that KVM implements
40 * more priority levels. Also note that 32 is the minimum for GICv3 and GICv2.
41 */
42 #define KVM_NUM_PRIOS 32
43 #define KVM_PRIO_SHIFT 3 /* steps of 8 = 1 << 3 */
44 #define KVM_PRIO_STEPS (1 << KVM_PRIO_SHIFT) /* 8 */
45 #define LOWEST_PRIO (KVM_NUM_PRIOS - 1)
46 #define CPU_PRIO_MASK (LOWEST_PRIO << KVM_PRIO_SHIFT) /* 0xf8 */
47 #define IRQ_DEFAULT_PRIO (LOWEST_PRIO - 1)
48 #define IRQ_DEFAULT_PRIO_REG (IRQ_DEFAULT_PRIO << KVM_PRIO_SHIFT) /* 0xf0 */
49
50 /*
51 * The kvm_inject_* utilities are used by the guest to ask the host to inject
52 * interrupts (e.g., using the KVM_IRQ_LINE ioctl).
53 */
54
55 typedef enum {
56 KVM_INJECT_EDGE_IRQ_LINE = 1,
57 KVM_SET_IRQ_LINE,
58 KVM_SET_IRQ_LINE_HIGH,
59 KVM_SET_LEVEL_INFO_HIGH,
60 KVM_INJECT_IRQFD,
61 KVM_WRITE_ISPENDR,
62 KVM_WRITE_ISACTIVER,
63 } kvm_inject_cmd;
64
65 struct kvm_inject_args {
66 kvm_inject_cmd cmd;
67 uint32_t first_intid;
68 uint32_t num;
69 int level;
70 bool expect_failure;
71 };
72
73 /* Used on the guest side to perform the hypercall. */
74 static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
75 uint32_t num, int level, bool expect_failure);
76
77 /* Used on the host side to get the hypercall info. */
78 static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
79 struct kvm_inject_args *args);
80
81 #define _KVM_INJECT_MULTI(cmd, intid, num, expect_failure) \
82 kvm_inject_call(cmd, intid, num, -1 /* not used */, expect_failure)
83
84 #define KVM_INJECT_MULTI(cmd, intid, num) \
85 _KVM_INJECT_MULTI(cmd, intid, num, false)
86
87 #define _KVM_INJECT(cmd, intid, expect_failure) \
88 _KVM_INJECT_MULTI(cmd, intid, 1, expect_failure)
89
90 #define KVM_INJECT(cmd, intid) \
91 _KVM_INJECT_MULTI(cmd, intid, 1, false)
92
93 #define KVM_ACTIVATE(cmd, intid) \
94 kvm_inject_call(cmd, intid, 1, 1, false);
95
96 struct kvm_inject_desc {
97 kvm_inject_cmd cmd;
98 /* can inject PPIs, PPIs, and/or SPIs. */
99 bool sgi, ppi, spi;
100 };
101
102 static struct kvm_inject_desc inject_edge_fns[] = {
103 /* sgi ppi spi */
104 { KVM_INJECT_EDGE_IRQ_LINE, false, false, true },
105 { KVM_INJECT_IRQFD, false, false, true },
106 { KVM_WRITE_ISPENDR, true, false, true },
107 { 0, },
108 };
109
110 static struct kvm_inject_desc inject_level_fns[] = {
111 /* sgi ppi spi */
112 { KVM_SET_IRQ_LINE_HIGH, false, true, true },
113 { KVM_SET_LEVEL_INFO_HIGH, false, true, true },
114 { KVM_INJECT_IRQFD, false, false, true },
115 { KVM_WRITE_ISPENDR, false, true, true },
116 { 0, },
117 };
118
119 static struct kvm_inject_desc set_active_fns[] = {
120 /* sgi ppi spi */
121 { KVM_WRITE_ISACTIVER, true, true, true },
122 { 0, },
123 };
124
125 #define for_each_inject_fn(t, f) \
126 for ((f) = (t); (f)->cmd; (f)++)
127
128 #define for_each_supported_inject_fn(args, t, f) \
129 for_each_inject_fn(t, f) \
130 if ((args)->kvm_supports_irqfd || (f)->cmd != KVM_INJECT_IRQFD)
131
132 #define for_each_supported_activate_fn(args, t, f) \
133 for_each_supported_inject_fn((args), (t), (f))
134
135 /* Shared between the guest main thread and the IRQ handlers. */
136 volatile uint64_t irq_handled;
137 volatile uint32_t irqnr_received[MAX_SPI + 1];
138
reset_stats(void)139 static void reset_stats(void)
140 {
141 int i;
142
143 irq_handled = 0;
144 for (i = 0; i <= MAX_SPI; i++)
145 irqnr_received[i] = 0;
146 }
147
gic_read_ap1r0(void)148 static uint64_t gic_read_ap1r0(void)
149 {
150 uint64_t reg = read_sysreg_s(SYS_ICC_AP1R0_EL1);
151
152 dsb(sy);
153 return reg;
154 }
155
gic_write_ap1r0(uint64_t val)156 static void gic_write_ap1r0(uint64_t val)
157 {
158 write_sysreg_s(val, SYS_ICC_AP1R0_EL1);
159 isb();
160 }
161
162 static void guest_set_irq_line(uint32_t intid, uint32_t level);
163
guest_irq_generic_handler(bool eoi_split,bool level_sensitive)164 static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive)
165 {
166 uint32_t intid = gic_get_and_ack_irq();
167
168 if (intid == IAR_SPURIOUS)
169 return;
170
171 GUEST_ASSERT(gic_irq_get_active(intid));
172
173 if (!level_sensitive)
174 GUEST_ASSERT(!gic_irq_get_pending(intid));
175
176 if (level_sensitive)
177 guest_set_irq_line(intid, 0);
178
179 GUEST_ASSERT(intid < MAX_SPI);
180 irqnr_received[intid] += 1;
181 irq_handled += 1;
182
183 gic_set_eoi(intid);
184 GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
185 if (eoi_split)
186 gic_set_dir(intid);
187
188 GUEST_ASSERT(!gic_irq_get_active(intid));
189 GUEST_ASSERT(!gic_irq_get_pending(intid));
190 }
191
kvm_inject_call(kvm_inject_cmd cmd,uint32_t first_intid,uint32_t num,int level,bool expect_failure)192 static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
193 uint32_t num, int level, bool expect_failure)
194 {
195 struct kvm_inject_args args = {
196 .cmd = cmd,
197 .first_intid = first_intid,
198 .num = num,
199 .level = level,
200 .expect_failure = expect_failure,
201 };
202 GUEST_SYNC(&args);
203 }
204
205 #define GUEST_ASSERT_IAR_EMPTY() \
206 do { \
207 uint32_t _intid; \
208 _intid = gic_get_and_ack_irq(); \
209 GUEST_ASSERT(_intid == IAR_SPURIOUS); \
210 } while (0)
211
212 #define CAT_HELPER(a, b) a ## b
213 #define CAT(a, b) CAT_HELPER(a, b)
214 #define PREFIX guest_irq_handler_
215 #define GUEST_IRQ_HANDLER_NAME(split, lev) CAT(PREFIX, CAT(split, lev))
216 #define GENERATE_GUEST_IRQ_HANDLER(split, lev) \
217 static void CAT(PREFIX, CAT(split, lev))(struct ex_regs *regs) \
218 { \
219 guest_irq_generic_handler(split, lev); \
220 }
221
222 GENERATE_GUEST_IRQ_HANDLER(0, 0);
223 GENERATE_GUEST_IRQ_HANDLER(0, 1);
224 GENERATE_GUEST_IRQ_HANDLER(1, 0);
225 GENERATE_GUEST_IRQ_HANDLER(1, 1);
226
227 static void (*guest_irq_handlers[2][2])(struct ex_regs *) = {
228 {GUEST_IRQ_HANDLER_NAME(0, 0), GUEST_IRQ_HANDLER_NAME(0, 1),},
229 {GUEST_IRQ_HANDLER_NAME(1, 0), GUEST_IRQ_HANDLER_NAME(1, 1),},
230 };
231
reset_priorities(struct test_args * args)232 static void reset_priorities(struct test_args *args)
233 {
234 int i;
235
236 for (i = 0; i < args->nr_irqs; i++)
237 gic_set_priority(i, IRQ_DEFAULT_PRIO_REG);
238 }
239
guest_set_irq_line(uint32_t intid,uint32_t level)240 static void guest_set_irq_line(uint32_t intid, uint32_t level)
241 {
242 kvm_inject_call(KVM_SET_IRQ_LINE, intid, 1, level, false);
243 }
244
test_inject_fail(struct test_args * args,uint32_t intid,kvm_inject_cmd cmd)245 static void test_inject_fail(struct test_args *args,
246 uint32_t intid, kvm_inject_cmd cmd)
247 {
248 reset_stats();
249
250 _KVM_INJECT(cmd, intid, true);
251 /* no IRQ to handle on entry */
252
253 GUEST_ASSERT_EQ(irq_handled, 0);
254 GUEST_ASSERT_IAR_EMPTY();
255 }
256
guest_inject(struct test_args * args,uint32_t first_intid,uint32_t num,kvm_inject_cmd cmd)257 static void guest_inject(struct test_args *args,
258 uint32_t first_intid, uint32_t num,
259 kvm_inject_cmd cmd)
260 {
261 uint32_t i;
262
263 reset_stats();
264
265 /* Cycle over all priorities to make things more interesting. */
266 for (i = first_intid; i < num + first_intid; i++)
267 gic_set_priority(i, (i % (KVM_NUM_PRIOS - 1)) << 3);
268
269 asm volatile("msr daifset, #2" : : : "memory");
270 KVM_INJECT_MULTI(cmd, first_intid, num);
271
272 while (irq_handled < num) {
273 wfi();
274 local_irq_enable();
275 isb(); /* handle IRQ */
276 local_irq_disable();
277 }
278 local_irq_enable();
279
280 GUEST_ASSERT_EQ(irq_handled, num);
281 for (i = first_intid; i < num + first_intid; i++)
282 GUEST_ASSERT_EQ(irqnr_received[i], 1);
283 GUEST_ASSERT_IAR_EMPTY();
284
285 reset_priorities(args);
286 }
287
288 /*
289 * Restore the active state of multiple concurrent IRQs (given by
290 * concurrent_irqs). This does what a live-migration would do on the
291 * destination side assuming there are some active IRQs that were not
292 * deactivated yet.
293 */
guest_restore_active(struct test_args * args,uint32_t first_intid,uint32_t num,kvm_inject_cmd cmd)294 static void guest_restore_active(struct test_args *args,
295 uint32_t first_intid, uint32_t num,
296 kvm_inject_cmd cmd)
297 {
298 uint32_t prio, intid, ap1r;
299 int i;
300
301 /*
302 * Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
303 * in descending order, so intid+1 can preempt intid.
304 */
305 for (i = 0, prio = (num - 1) * 8; i < num; i++, prio -= 8) {
306 GUEST_ASSERT(prio >= 0);
307 intid = i + first_intid;
308 gic_set_priority(intid, prio);
309 }
310
311 /*
312 * In a real migration, KVM would restore all GIC state before running
313 * guest code.
314 */
315 for (i = 0; i < num; i++) {
316 intid = i + first_intid;
317 KVM_ACTIVATE(cmd, intid);
318 ap1r = gic_read_ap1r0();
319 ap1r |= 1U << i;
320 gic_write_ap1r0(ap1r);
321 }
322
323 /* This is where the "migration" would occur. */
324
325 /* finish handling the IRQs starting with the highest priority one. */
326 for (i = 0; i < num; i++) {
327 intid = num - i - 1 + first_intid;
328 gic_set_eoi(intid);
329 if (args->eoi_split)
330 gic_set_dir(intid);
331 }
332
333 for (i = 0; i < num; i++)
334 GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
335 GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
336 GUEST_ASSERT_IAR_EMPTY();
337 }
338
339 /*
340 * Polls the IAR until it's not a spurious interrupt.
341 *
342 * This function should only be used in test_inject_preemption (with IRQs
343 * masked).
344 */
wait_for_and_activate_irq(void)345 static uint32_t wait_for_and_activate_irq(void)
346 {
347 uint32_t intid;
348
349 do {
350 asm volatile("wfi" : : : "memory");
351 intid = gic_get_and_ack_irq();
352 } while (intid == IAR_SPURIOUS);
353
354 return intid;
355 }
356
357 /*
358 * Inject multiple concurrent IRQs (num IRQs starting at first_intid) and
359 * handle them without handling the actual exceptions. This is done by masking
360 * interrupts for the whole test.
361 */
test_inject_preemption(struct test_args * args,uint32_t first_intid,int num,const unsigned long * exclude,kvm_inject_cmd cmd)362 static void test_inject_preemption(struct test_args *args,
363 uint32_t first_intid, int num,
364 const unsigned long *exclude,
365 kvm_inject_cmd cmd)
366 {
367 uint32_t intid, prio, step = KVM_PRIO_STEPS;
368 int i;
369
370 /* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
371 * in descending order, so intid+1 can preempt intid.
372 */
373 for (i = 0, prio = (num - 1) * step; i < num; i++, prio -= step) {
374 GUEST_ASSERT(prio >= 0);
375 intid = i + first_intid;
376 gic_set_priority(intid, prio);
377 }
378
379 local_irq_disable();
380
381 for (i = 0; i < num; i++) {
382 uint32_t tmp;
383 intid = i + first_intid;
384
385 if (exclude && test_bit(i, exclude))
386 continue;
387
388 KVM_INJECT(cmd, intid);
389 /* Each successive IRQ will preempt the previous one. */
390 tmp = wait_for_and_activate_irq();
391 GUEST_ASSERT_EQ(tmp, intid);
392 if (args->level_sensitive)
393 guest_set_irq_line(intid, 0);
394 }
395
396 /* finish handling the IRQs starting with the highest priority one. */
397 for (i = 0; i < num; i++) {
398 intid = num - i - 1 + first_intid;
399
400 if (exclude && test_bit(intid - first_intid, exclude))
401 continue;
402
403 gic_set_eoi(intid);
404 }
405
406 if (args->eoi_split) {
407 for (i = 0; i < num; i++) {
408 intid = i + first_intid;
409
410 if (exclude && test_bit(i, exclude))
411 continue;
412
413 if (args->eoi_split)
414 gic_set_dir(intid);
415 }
416 }
417
418 local_irq_enable();
419
420 for (i = 0; i < num; i++) {
421 if (exclude && test_bit(i, exclude))
422 continue;
423
424 GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
425 }
426 GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
427 GUEST_ASSERT_IAR_EMPTY();
428
429 reset_priorities(args);
430 }
431
test_injection(struct test_args * args,struct kvm_inject_desc * f)432 static void test_injection(struct test_args *args, struct kvm_inject_desc *f)
433 {
434 uint32_t nr_irqs = args->nr_irqs;
435
436 if (f->sgi) {
437 guest_inject(args, MIN_SGI, 1, f->cmd);
438 guest_inject(args, 0, 16, f->cmd);
439 }
440
441 if (f->ppi)
442 guest_inject(args, MIN_PPI, 1, f->cmd);
443
444 if (f->spi) {
445 guest_inject(args, MIN_SPI, 1, f->cmd);
446 guest_inject(args, nr_irqs - 1, 1, f->cmd);
447 guest_inject(args, MIN_SPI, nr_irqs - MIN_SPI, f->cmd);
448 }
449 }
450
test_injection_failure(struct test_args * args,struct kvm_inject_desc * f)451 static void test_injection_failure(struct test_args *args,
452 struct kvm_inject_desc *f)
453 {
454 uint32_t bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, };
455 int i;
456
457 for (i = 0; i < ARRAY_SIZE(bad_intid); i++)
458 test_inject_fail(args, bad_intid[i], f->cmd);
459 }
460
test_preemption(struct test_args * args,struct kvm_inject_desc * f)461 static void test_preemption(struct test_args *args, struct kvm_inject_desc *f)
462 {
463 /* Timer PPIs cannot be injected from userspace */
464 static const unsigned long ppi_exclude = (BIT(27 - MIN_PPI) |
465 BIT(30 - MIN_PPI) |
466 BIT(28 - MIN_PPI) |
467 BIT(26 - MIN_PPI));
468
469 if (f->sgi)
470 test_inject_preemption(args, MIN_SGI, 16, NULL, f->cmd);
471
472 if (f->ppi)
473 test_inject_preemption(args, MIN_PPI, 16, &ppi_exclude, f->cmd);
474
475 if (f->spi)
476 test_inject_preemption(args, MIN_SPI, 31, NULL, f->cmd);
477 }
478
test_restore_active(struct test_args * args,struct kvm_inject_desc * f)479 static void test_restore_active(struct test_args *args, struct kvm_inject_desc *f)
480 {
481 if (f->sgi)
482 guest_restore_active(args, MIN_SGI, 16, f->cmd);
483
484 if (f->ppi)
485 guest_restore_active(args, MIN_PPI, 16, f->cmd);
486
487 if (f->spi)
488 guest_restore_active(args, MIN_SPI, 31, f->cmd);
489 }
490
guest_code(struct test_args * args)491 static void guest_code(struct test_args *args)
492 {
493 uint32_t i, nr_irqs = args->nr_irqs;
494 bool level_sensitive = args->level_sensitive;
495 struct kvm_inject_desc *f, *inject_fns;
496
497 gic_init(GIC_V3, 1);
498
499 for (i = MIN_SPI; i < nr_irqs; i++)
500 gic_irq_set_config(i, !level_sensitive);
501
502 for (i = 0; i < nr_irqs; i++)
503 gic_irq_enable(i);
504
505 gic_set_eoi_split(args->eoi_split);
506
507 reset_priorities(args);
508 gic_set_priority_mask(CPU_PRIO_MASK);
509
510 inject_fns = level_sensitive ? inject_level_fns
511 : inject_edge_fns;
512
513 local_irq_enable();
514
515 /* Start the tests. */
516 for_each_supported_inject_fn(args, inject_fns, f) {
517 test_injection(args, f);
518 test_preemption(args, f);
519 test_injection_failure(args, f);
520 }
521
522 /*
523 * Restore the active state of IRQs. This would happen when live
524 * migrating IRQs in the middle of being handled.
525 */
526 for_each_supported_activate_fn(args, set_active_fns, f)
527 test_restore_active(args, f);
528
529 GUEST_DONE();
530 }
531
kvm_irq_line_check(struct kvm_vm * vm,uint32_t intid,int level,struct test_args * test_args,bool expect_failure)532 static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level,
533 struct test_args *test_args, bool expect_failure)
534 {
535 int ret;
536
537 if (!expect_failure) {
538 kvm_arm_irq_line(vm, intid, level);
539 } else {
540 /* The interface doesn't allow larger intid's. */
541 if (intid > KVM_ARM_IRQ_NUM_MASK)
542 return;
543
544 ret = _kvm_arm_irq_line(vm, intid, level);
545 TEST_ASSERT(ret != 0 && errno == EINVAL,
546 "Bad intid %i did not cause KVM_IRQ_LINE "
547 "error: rc: %i errno: %i", intid, ret, errno);
548 }
549 }
550
kvm_irq_set_level_info_check(int gic_fd,uint32_t intid,int level,bool expect_failure)551 void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level,
552 bool expect_failure)
553 {
554 if (!expect_failure) {
555 kvm_irq_set_level_info(gic_fd, intid, level);
556 } else {
557 int ret = _kvm_irq_set_level_info(gic_fd, intid, level);
558 /*
559 * The kernel silently fails for invalid SPIs and SGIs (which
560 * are not level-sensitive). It only checks for intid to not
561 * spill over 1U << 10 (the max reserved SPI). Also, callers
562 * are supposed to mask the intid with 0x3ff (1023).
563 */
564 if (intid > VGIC_MAX_RESERVED)
565 TEST_ASSERT(ret != 0 && errno == EINVAL,
566 "Bad intid %i did not cause VGIC_GRP_LEVEL_INFO "
567 "error: rc: %i errno: %i", intid, ret, errno);
568 else
569 TEST_ASSERT(!ret, "KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO "
570 "for intid %i failed, rc: %i errno: %i",
571 intid, ret, errno);
572 }
573 }
574
kvm_set_gsi_routing_irqchip_check(struct kvm_vm * vm,uint32_t intid,uint32_t num,uint32_t kvm_max_routes,bool expect_failure)575 static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm,
576 uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
577 bool expect_failure)
578 {
579 struct kvm_irq_routing *routing;
580 int ret;
581 uint64_t i;
582
583 assert(num <= kvm_max_routes && kvm_max_routes <= KVM_MAX_IRQ_ROUTES);
584
585 routing = kvm_gsi_routing_create();
586 for (i = intid; i < (uint64_t)intid + num; i++)
587 kvm_gsi_routing_irqchip_add(routing, i - MIN_SPI, i - MIN_SPI);
588
589 if (!expect_failure) {
590 kvm_gsi_routing_write(vm, routing);
591 } else {
592 ret = _kvm_gsi_routing_write(vm, routing);
593 /* The kernel only checks e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS */
594 if (((uint64_t)intid + num - 1 - MIN_SPI) >= KVM_IRQCHIP_NUM_PINS)
595 TEST_ASSERT(ret != 0 && errno == EINVAL,
596 "Bad intid %u did not cause KVM_SET_GSI_ROUTING "
597 "error: rc: %i errno: %i", intid, ret, errno);
598 else
599 TEST_ASSERT(ret == 0, "KVM_SET_GSI_ROUTING "
600 "for intid %i failed, rc: %i errno: %i",
601 intid, ret, errno);
602 }
603 }
604
kvm_irq_write_ispendr_check(int gic_fd,uint32_t intid,struct kvm_vcpu * vcpu,bool expect_failure)605 static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid,
606 struct kvm_vcpu *vcpu,
607 bool expect_failure)
608 {
609 /*
610 * Ignore this when expecting failure as invalid intids will lead to
611 * either trying to inject SGIs when we configured the test to be
612 * level_sensitive (or the reverse), or inject large intids which
613 * will lead to writing above the ISPENDR register space (and we
614 * don't want to do that either).
615 */
616 if (!expect_failure)
617 kvm_irq_write_ispendr(gic_fd, intid, vcpu);
618 }
619
kvm_routing_and_irqfd_check(struct kvm_vm * vm,uint32_t intid,uint32_t num,uint32_t kvm_max_routes,bool expect_failure)620 static void kvm_routing_and_irqfd_check(struct kvm_vm *vm,
621 uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
622 bool expect_failure)
623 {
624 int fd[MAX_SPI];
625 uint64_t val;
626 int ret, f;
627 uint64_t i;
628
629 /*
630 * There is no way to try injecting an SGI or PPI as the interface
631 * starts counting from the first SPI (above the private ones), so just
632 * exit.
633 */
634 if (INTID_IS_SGI(intid) || INTID_IS_PPI(intid))
635 return;
636
637 kvm_set_gsi_routing_irqchip_check(vm, intid, num,
638 kvm_max_routes, expect_failure);
639
640 /*
641 * If expect_failure, then just to inject anyway. These
642 * will silently fail. And in any case, the guest will check
643 * that no actual interrupt was injected for those cases.
644 */
645
646 for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++)
647 fd[f] = kvm_new_eventfd();
648
649 for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
650 assert(i <= (uint64_t)UINT_MAX);
651 kvm_assign_irqfd(vm, i - MIN_SPI, fd[f]);
652 }
653
654 for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
655 val = 1;
656 ret = write(fd[f], &val, sizeof(uint64_t));
657 TEST_ASSERT(ret == sizeof(uint64_t),
658 __KVM_SYSCALL_ERROR("write()", ret));
659 }
660
661 for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++)
662 kvm_close(fd[f]);
663 }
664
665 /* handles the valid case: intid=0xffffffff num=1 */
666 #define for_each_intid(first, num, tmp, i) \
667 for ((tmp) = (i) = (first); \
668 (tmp) < (uint64_t)(first) + (uint64_t)(num); \
669 (tmp)++, (i)++)
670
run_guest_cmd(struct kvm_vcpu * vcpu,int gic_fd,struct kvm_inject_args * inject_args,struct test_args * test_args)671 static void run_guest_cmd(struct kvm_vcpu *vcpu, int gic_fd,
672 struct kvm_inject_args *inject_args,
673 struct test_args *test_args)
674 {
675 kvm_inject_cmd cmd = inject_args->cmd;
676 uint32_t intid = inject_args->first_intid;
677 uint32_t num = inject_args->num;
678 int level = inject_args->level;
679 bool expect_failure = inject_args->expect_failure;
680 struct kvm_vm *vm = vcpu->vm;
681 uint64_t tmp;
682 uint32_t i;
683
684 /* handles the valid case: intid=0xffffffff num=1 */
685 assert(intid < UINT_MAX - num || num == 1);
686
687 switch (cmd) {
688 case KVM_INJECT_EDGE_IRQ_LINE:
689 for_each_intid(intid, num, tmp, i)
690 kvm_irq_line_check(vm, i, 1, test_args,
691 expect_failure);
692 for_each_intid(intid, num, tmp, i)
693 kvm_irq_line_check(vm, i, 0, test_args,
694 expect_failure);
695 break;
696 case KVM_SET_IRQ_LINE:
697 for_each_intid(intid, num, tmp, i)
698 kvm_irq_line_check(vm, i, level, test_args,
699 expect_failure);
700 break;
701 case KVM_SET_IRQ_LINE_HIGH:
702 for_each_intid(intid, num, tmp, i)
703 kvm_irq_line_check(vm, i, 1, test_args,
704 expect_failure);
705 break;
706 case KVM_SET_LEVEL_INFO_HIGH:
707 for_each_intid(intid, num, tmp, i)
708 kvm_irq_set_level_info_check(gic_fd, i, 1,
709 expect_failure);
710 break;
711 case KVM_INJECT_IRQFD:
712 kvm_routing_and_irqfd_check(vm, intid, num,
713 test_args->kvm_max_routes,
714 expect_failure);
715 break;
716 case KVM_WRITE_ISPENDR:
717 for (i = intid; i < intid + num; i++)
718 kvm_irq_write_ispendr_check(gic_fd, i, vcpu,
719 expect_failure);
720 break;
721 case KVM_WRITE_ISACTIVER:
722 for (i = intid; i < intid + num; i++)
723 kvm_irq_write_isactiver(gic_fd, i, vcpu);
724 break;
725 default:
726 break;
727 }
728 }
729
kvm_inject_get_call(struct kvm_vm * vm,struct ucall * uc,struct kvm_inject_args * args)730 static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
731 struct kvm_inject_args *args)
732 {
733 struct kvm_inject_args *kvm_args_hva;
734 vm_vaddr_t kvm_args_gva;
735
736 kvm_args_gva = uc->args[1];
737 kvm_args_hva = (struct kvm_inject_args *)addr_gva2hva(vm, kvm_args_gva);
738 memcpy(args, kvm_args_hva, sizeof(struct kvm_inject_args));
739 }
740
print_args(struct test_args * args)741 static void print_args(struct test_args *args)
742 {
743 printf("nr-irqs=%d level-sensitive=%d eoi-split=%d\n",
744 args->nr_irqs, args->level_sensitive,
745 args->eoi_split);
746 }
747
test_vgic(uint32_t nr_irqs,bool level_sensitive,bool eoi_split)748 static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
749 {
750 struct ucall uc;
751 int gic_fd;
752 struct kvm_vcpu *vcpu;
753 struct kvm_vm *vm;
754 struct kvm_inject_args inject_args;
755 vm_vaddr_t args_gva;
756
757 struct test_args args = {
758 .nr_irqs = nr_irqs,
759 .level_sensitive = level_sensitive,
760 .eoi_split = eoi_split,
761 .kvm_max_routes = kvm_check_cap(KVM_CAP_IRQ_ROUTING),
762 .kvm_supports_irqfd = kvm_check_cap(KVM_CAP_IRQFD),
763 };
764
765 print_args(&args);
766
767 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
768
769 vm_init_descriptor_tables(vm);
770 vcpu_init_descriptor_tables(vcpu);
771
772 /* Setup the guest args page (so it gets the args). */
773 args_gva = vm_vaddr_alloc_page(vm);
774 memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args));
775 vcpu_args_set(vcpu, 1, args_gva);
776
777 gic_fd = vgic_v3_setup(vm, 1, nr_irqs);
778
779 vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT,
780 guest_irq_handlers[args.eoi_split][args.level_sensitive]);
781
782 while (1) {
783 vcpu_run(vcpu);
784
785 switch (get_ucall(vcpu, &uc)) {
786 case UCALL_SYNC:
787 kvm_inject_get_call(vm, &uc, &inject_args);
788 run_guest_cmd(vcpu, gic_fd, &inject_args, &args);
789 break;
790 case UCALL_ABORT:
791 REPORT_GUEST_ASSERT(uc);
792 break;
793 case UCALL_DONE:
794 goto done;
795 default:
796 TEST_FAIL("Unknown ucall %lu", uc.cmd);
797 }
798 }
799
800 done:
801 close(gic_fd);
802 kvm_vm_free(vm);
803 }
804
guest_code_asym_dir(struct test_args * args,int cpuid)805 static void guest_code_asym_dir(struct test_args *args, int cpuid)
806 {
807 gic_init(GIC_V3, 2);
808
809 gic_set_eoi_split(1);
810 gic_set_priority_mask(CPU_PRIO_MASK);
811
812 if (cpuid == 0) {
813 uint32_t intid;
814
815 local_irq_disable();
816
817 gic_set_priority(MIN_PPI, IRQ_DEFAULT_PRIO);
818 gic_irq_enable(MIN_SPI);
819 gic_irq_set_pending(MIN_SPI);
820
821 intid = wait_for_and_activate_irq();
822 GUEST_ASSERT_EQ(intid, MIN_SPI);
823
824 gic_set_eoi(intid);
825 isb();
826
827 WRITE_ONCE(args->shared_data, MIN_SPI);
828 dsb(ishst);
829
830 do {
831 dsb(ishld);
832 } while (READ_ONCE(args->shared_data) == MIN_SPI);
833 GUEST_ASSERT(!gic_irq_get_active(MIN_SPI));
834 } else {
835 do {
836 dsb(ishld);
837 } while (READ_ONCE(args->shared_data) != MIN_SPI);
838
839 gic_set_dir(MIN_SPI);
840 isb();
841
842 WRITE_ONCE(args->shared_data, 0);
843 dsb(ishst);
844 }
845
846 GUEST_DONE();
847 }
848
guest_code_group_en(struct test_args * args,int cpuid)849 static void guest_code_group_en(struct test_args *args, int cpuid)
850 {
851 uint32_t intid;
852
853 gic_init(GIC_V3, 2);
854
855 gic_set_eoi_split(0);
856 gic_set_priority_mask(CPU_PRIO_MASK);
857 /* SGI0 is G0, which is disabled */
858 gic_irq_set_group(0, 0);
859
860 /* Configure all SGIs with decreasing priority */
861 for (intid = 0; intid < MIN_PPI; intid++) {
862 gic_set_priority(intid, (intid + 1) * 8);
863 gic_irq_enable(intid);
864 gic_irq_set_pending(intid);
865 }
866
867 /* Ack and EOI all G1 interrupts */
868 for (int i = 1; i < MIN_PPI; i++) {
869 intid = wait_for_and_activate_irq();
870
871 GUEST_ASSERT(intid < MIN_PPI);
872 gic_set_eoi(intid);
873 isb();
874 }
875
876 /*
877 * Check that SGI0 is still pending, inactive, and that we cannot
878 * ack anything.
879 */
880 GUEST_ASSERT(gic_irq_get_pending(0));
881 GUEST_ASSERT(!gic_irq_get_active(0));
882 GUEST_ASSERT_IAR_EMPTY();
883 GUEST_ASSERT(read_sysreg_s(SYS_ICC_IAR0_EL1) == IAR_SPURIOUS);
884
885 /* Open the G0 gates, and verify we can ack SGI0 */
886 write_sysreg_s(1, SYS_ICC_IGRPEN0_EL1);
887 isb();
888
889 do {
890 intid = read_sysreg_s(SYS_ICC_IAR0_EL1);
891 } while (intid == IAR_SPURIOUS);
892
893 GUEST_ASSERT(intid == 0);
894 GUEST_DONE();
895 }
896
guest_code_timer_spi(struct test_args * args,int cpuid)897 static void guest_code_timer_spi(struct test_args *args, int cpuid)
898 {
899 uint32_t intid;
900 u64 val;
901
902 gic_init(GIC_V3, 2);
903
904 gic_set_eoi_split(1);
905 gic_set_priority_mask(CPU_PRIO_MASK);
906
907 /* Add a pending SPI so that KVM starts trapping DIR */
908 gic_set_priority(MIN_SPI + cpuid, IRQ_DEFAULT_PRIO);
909 gic_irq_set_pending(MIN_SPI + cpuid);
910
911 /* Configure the timer with a higher priority, make it pending */
912 gic_set_priority(27, IRQ_DEFAULT_PRIO - 8);
913
914 isb();
915 val = read_sysreg(cntvct_el0);
916 write_sysreg(val, cntv_cval_el0);
917 write_sysreg(1, cntv_ctl_el0);
918 isb();
919
920 GUEST_ASSERT(gic_irq_get_pending(27));
921
922 /* Enable both interrupts */
923 gic_irq_enable(MIN_SPI + cpuid);
924 gic_irq_enable(27);
925
926 /* The timer must fire */
927 intid = wait_for_and_activate_irq();
928 GUEST_ASSERT(intid == 27);
929
930 /* Check that we can deassert it */
931 write_sysreg(0, cntv_ctl_el0);
932 isb();
933
934 GUEST_ASSERT(!gic_irq_get_pending(27));
935
936 /*
937 * Priority drop, deactivation -- we expect that the host
938 * deactivation will have been effective
939 */
940 gic_set_eoi(27);
941 gic_set_dir(27);
942
943 GUEST_ASSERT(!gic_irq_get_active(27));
944
945 /* Do it one more time */
946 isb();
947 val = read_sysreg(cntvct_el0);
948 write_sysreg(val, cntv_cval_el0);
949 write_sysreg(1, cntv_ctl_el0);
950 isb();
951
952 GUEST_ASSERT(gic_irq_get_pending(27));
953
954 /* The timer must fire again */
955 intid = wait_for_and_activate_irq();
956 GUEST_ASSERT(intid == 27);
957
958 GUEST_DONE();
959 }
960
test_vcpu_run(void * arg)961 static void *test_vcpu_run(void *arg)
962 {
963 struct kvm_vcpu *vcpu = arg;
964 struct ucall uc;
965
966 while (1) {
967 vcpu_run(vcpu);
968
969 switch (get_ucall(vcpu, &uc)) {
970 case UCALL_ABORT:
971 REPORT_GUEST_ASSERT(uc);
972 break;
973 case UCALL_DONE:
974 return NULL;
975 default:
976 TEST_FAIL("Unknown ucall %lu", uc.cmd);
977 }
978 }
979
980 return NULL;
981 }
982
test_vgic_two_cpus(void * gcode)983 static void test_vgic_two_cpus(void *gcode)
984 {
985 pthread_t thr[2];
986 struct kvm_vcpu *vcpus[2];
987 struct test_args args = {};
988 struct kvm_vm *vm;
989 vm_vaddr_t args_gva;
990 int gic_fd, ret;
991
992 vm = vm_create_with_vcpus(2, gcode, vcpus);
993
994 vm_init_descriptor_tables(vm);
995 vcpu_init_descriptor_tables(vcpus[0]);
996 vcpu_init_descriptor_tables(vcpus[1]);
997
998 /* Setup the guest args page (so it gets the args). */
999 args_gva = vm_vaddr_alloc_page(vm);
1000 memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args));
1001 vcpu_args_set(vcpus[0], 2, args_gva, 0);
1002 vcpu_args_set(vcpus[1], 2, args_gva, 1);
1003
1004 gic_fd = vgic_v3_setup(vm, 2, 64);
1005
1006 ret = pthread_create(&thr[0], NULL, test_vcpu_run, vcpus[0]);
1007 if (ret)
1008 TEST_FAIL("Can't create thread for vcpu 0 (%d)\n", ret);
1009 ret = pthread_create(&thr[1], NULL, test_vcpu_run, vcpus[1]);
1010 if (ret)
1011 TEST_FAIL("Can't create thread for vcpu 1 (%d)\n", ret);
1012
1013 pthread_join(thr[0], NULL);
1014 pthread_join(thr[1], NULL);
1015
1016 close(gic_fd);
1017 kvm_vm_free(vm);
1018 }
1019
help(const char * name)1020 static void help(const char *name)
1021 {
1022 printf(
1023 "\n"
1024 "usage: %s [-n num_irqs] [-e eoi_split] [-l level_sensitive]\n", name);
1025 printf(" -n: specify number of IRQs to setup the vgic with. "
1026 "It has to be a multiple of 32 and between 64 and 1024.\n");
1027 printf(" -e: if 1 then EOI is split into a write to DIR on top "
1028 "of writing EOI.\n");
1029 printf(" -l: specify whether the IRQs are level-sensitive (1) or not (0).");
1030 puts("");
1031 exit(1);
1032 }
1033
main(int argc,char ** argv)1034 int main(int argc, char **argv)
1035 {
1036 uint32_t nr_irqs = 64;
1037 bool default_args = true;
1038 bool level_sensitive = false;
1039 int opt;
1040 bool eoi_split = false;
1041
1042 TEST_REQUIRE(kvm_supports_vgic_v3());
1043 test_disable_default_vgic();
1044
1045 while ((opt = getopt(argc, argv, "hn:e:l:")) != -1) {
1046 switch (opt) {
1047 case 'n':
1048 nr_irqs = atoi_non_negative("Number of IRQs", optarg);
1049 if (nr_irqs > 1024 || nr_irqs % 32)
1050 help(argv[0]);
1051 break;
1052 case 'e':
1053 eoi_split = (bool)atoi_paranoid(optarg);
1054 default_args = false;
1055 break;
1056 case 'l':
1057 level_sensitive = (bool)atoi_paranoid(optarg);
1058 default_args = false;
1059 break;
1060 case 'h':
1061 default:
1062 help(argv[0]);
1063 break;
1064 }
1065 }
1066
1067 /*
1068 * If the user just specified nr_irqs and/or gic_version, then run all
1069 * combinations.
1070 */
1071 if (default_args) {
1072 test_vgic(nr_irqs, false /* level */, false /* eoi_split */);
1073 test_vgic(nr_irqs, false /* level */, true /* eoi_split */);
1074 test_vgic(nr_irqs, true /* level */, false /* eoi_split */);
1075 test_vgic(nr_irqs, true /* level */, true /* eoi_split */);
1076 test_vgic_two_cpus(guest_code_asym_dir);
1077 test_vgic_two_cpus(guest_code_group_en);
1078 test_vgic_two_cpus(guest_code_timer_spi);
1079 } else {
1080 test_vgic(nr_irqs, level_sensitive, eoi_split);
1081 }
1082
1083 return 0;
1084 }
1085