1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * sbi_pmu_test.c - Tests the riscv64 SBI PMU functionality.
4 *
5 * Copyright (c) 2024, Rivos Inc.
6 */
7
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <unistd.h>
12 #include <sys/types.h>
13 #include "kvm_util.h"
14 #include "test_util.h"
15 #include "processor.h"
16 #include "sbi.h"
17 #include "arch_timer.h"
18 #include "ucall_common.h"
19
20 /* Maximum counters(firmware + hardware) */
21 #define RISCV_MAX_PMU_COUNTERS 64
22 union sbi_pmu_ctr_info ctrinfo_arr[RISCV_MAX_PMU_COUNTERS];
23
24 /* Snapshot shared memory data */
25 #define PMU_SNAPSHOT_GPA_BASE BIT(30)
26 static void *snapshot_gva;
27 static vm_paddr_t snapshot_gpa;
28
29 static int vcpu_shared_irq_count;
30 static int counter_in_use;
31
32 /* Cache the available counters in a bitmask */
33 static unsigned long counter_mask_available;
34
35 static bool illegal_handler_invoked;
36
37 #define SBI_PMU_TEST_BASIC BIT(0)
38 #define SBI_PMU_TEST_EVENTS BIT(1)
39 #define SBI_PMU_TEST_SNAPSHOT BIT(2)
40 #define SBI_PMU_TEST_OVERFLOW BIT(3)
41
42 #define SBI_PMU_OVERFLOW_IRQNUM_DEFAULT 5
43 struct test_args {
44 int disabled_tests;
45 int overflow_irqnum;
46 };
47
48 static struct test_args targs;
49
pmu_csr_read_num(int csr_num)50 unsigned long pmu_csr_read_num(int csr_num)
51 {
52 #define switchcase_csr_read(__csr_num, __val) {\
53 case __csr_num: \
54 __val = csr_read(__csr_num); \
55 break; }
56 #define switchcase_csr_read_2(__csr_num, __val) {\
57 switchcase_csr_read(__csr_num + 0, __val) \
58 switchcase_csr_read(__csr_num + 1, __val)}
59 #define switchcase_csr_read_4(__csr_num, __val) {\
60 switchcase_csr_read_2(__csr_num + 0, __val) \
61 switchcase_csr_read_2(__csr_num + 2, __val)}
62 #define switchcase_csr_read_8(__csr_num, __val) {\
63 switchcase_csr_read_4(__csr_num + 0, __val) \
64 switchcase_csr_read_4(__csr_num + 4, __val)}
65 #define switchcase_csr_read_16(__csr_num, __val) {\
66 switchcase_csr_read_8(__csr_num + 0, __val) \
67 switchcase_csr_read_8(__csr_num + 8, __val)}
68 #define switchcase_csr_read_32(__csr_num, __val) {\
69 switchcase_csr_read_16(__csr_num + 0, __val) \
70 switchcase_csr_read_16(__csr_num + 16, __val)}
71
72 unsigned long ret = 0;
73
74 switch (csr_num) {
75 switchcase_csr_read_32(CSR_CYCLE, ret)
76 default :
77 break;
78 }
79
80 return ret;
81 #undef switchcase_csr_read_32
82 #undef switchcase_csr_read_16
83 #undef switchcase_csr_read_8
84 #undef switchcase_csr_read_4
85 #undef switchcase_csr_read_2
86 #undef switchcase_csr_read
87 }
88
dummy_func_loop(uint64_t iter)89 static inline void dummy_func_loop(uint64_t iter)
90 {
91 int i = 0;
92
93 while (i < iter) {
94 asm volatile("nop");
95 i++;
96 }
97 }
98
start_counter(unsigned long counter,unsigned long start_flags,unsigned long ival)99 static void start_counter(unsigned long counter, unsigned long start_flags,
100 unsigned long ival)
101 {
102 struct sbiret ret;
103
104 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, counter, 1, start_flags,
105 ival, 0, 0);
106 __GUEST_ASSERT(ret.error == 0, "Unable to start counter %ld\n", counter);
107 }
108
109 /* This should be invoked only for reset counter use case */
stop_reset_counter(unsigned long counter,unsigned long stop_flags)110 static void stop_reset_counter(unsigned long counter, unsigned long stop_flags)
111 {
112 struct sbiret ret;
113
114 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, counter, 1,
115 stop_flags | SBI_PMU_STOP_FLAG_RESET, 0, 0, 0);
116 __GUEST_ASSERT(ret.error == SBI_ERR_ALREADY_STOPPED,
117 "Unable to stop counter %ld\n", counter);
118 }
119
stop_counter(unsigned long counter,unsigned long stop_flags)120 static void stop_counter(unsigned long counter, unsigned long stop_flags)
121 {
122 struct sbiret ret;
123
124 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, counter, 1, stop_flags,
125 0, 0, 0);
126 __GUEST_ASSERT(ret.error == 0 || ret.error == SBI_ERR_ALREADY_STOPPED,
127 "Unable to stop counter %ld error %ld\n", counter, ret.error);
128 }
129
guest_illegal_exception_handler(struct pt_regs * regs)130 static void guest_illegal_exception_handler(struct pt_regs *regs)
131 {
132 unsigned long insn;
133 int opcode, csr_num, funct3;
134
135 __GUEST_ASSERT(regs->cause == EXC_INST_ILLEGAL,
136 "Unexpected exception handler %lx\n", regs->cause);
137
138 insn = regs->badaddr;
139 opcode = (insn & INSN_OPCODE_MASK) >> INSN_OPCODE_SHIFT;
140 __GUEST_ASSERT(opcode == INSN_OPCODE_SYSTEM,
141 "Unexpected instruction with opcode 0x%x insn 0x%lx\n", opcode, insn);
142
143 csr_num = GET_CSR_NUM(insn);
144 funct3 = GET_RM(insn);
145 /* Validate if it is a CSR read/write operation */
146 __GUEST_ASSERT(funct3 <= 7 && (funct3 != 0 && funct3 != 4),
147 "Unexpected system opcode with funct3 0x%x csr_num 0x%x\n",
148 funct3, csr_num);
149
150 /* Validate if it is a HPMCOUNTER CSR operation */
151 __GUEST_ASSERT((csr_num >= CSR_CYCLE && csr_num <= CSR_HPMCOUNTER31),
152 "Unexpected csr_num 0x%x\n", csr_num);
153
154 illegal_handler_invoked = true;
155 /* skip the trapping instruction */
156 regs->epc += 4;
157 }
158
guest_irq_handler(struct pt_regs * regs)159 static void guest_irq_handler(struct pt_regs *regs)
160 {
161 unsigned int irq_num = regs->cause & ~CAUSE_IRQ_FLAG;
162 struct riscv_pmu_snapshot_data *snapshot_data = snapshot_gva;
163 unsigned long overflown_mask;
164
165 /* Validate that we are in the correct irq handler */
166 GUEST_ASSERT_EQ(irq_num, IRQ_PMU_OVF);
167
168 /* Stop all counters first to avoid further interrupts */
169 stop_counter(counter_in_use, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
170
171 csr_clear(CSR_SIP, BIT(IRQ_PMU_OVF));
172
173 overflown_mask = READ_ONCE(snapshot_data->ctr_overflow_mask);
174 GUEST_ASSERT(overflown_mask & 0x01);
175
176 WRITE_ONCE(vcpu_shared_irq_count, vcpu_shared_irq_count+1);
177 }
178
get_counter_index(unsigned long cbase,unsigned long cmask,unsigned long cflags,unsigned long event)179 static unsigned long get_counter_index(unsigned long cbase, unsigned long cmask,
180 unsigned long cflags,
181 unsigned long event)
182 {
183 struct sbiret ret;
184
185 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
186 cflags, event, 0, 0);
187 __GUEST_ASSERT(ret.error == 0, "config matching failed %ld\n", ret.error);
188 GUEST_ASSERT(ret.value < RISCV_MAX_PMU_COUNTERS);
189 GUEST_ASSERT(BIT(ret.value) & counter_mask_available);
190
191 return ret.value;
192 }
193
get_num_counters(void)194 static unsigned long get_num_counters(void)
195 {
196 struct sbiret ret;
197
198 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_NUM_COUNTERS, 0, 0, 0, 0, 0, 0);
199
200 __GUEST_ASSERT(ret.error == 0, "Unable to retrieve number of counters from SBI PMU");
201 __GUEST_ASSERT(ret.value < RISCV_MAX_PMU_COUNTERS,
202 "Invalid number of counters %ld\n", ret.value);
203
204 return ret.value;
205 }
206
update_counter_info(int num_counters)207 static void update_counter_info(int num_counters)
208 {
209 int i = 0;
210 struct sbiret ret;
211
212 for (i = 0; i < num_counters; i++) {
213 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0);
214
215 /* There can be gaps in logical counter indicies*/
216 if (ret.error)
217 continue;
218 GUEST_ASSERT_NE(ret.value, 0);
219
220 ctrinfo_arr[i].value = ret.value;
221 counter_mask_available |= BIT(i);
222 }
223
224 GUEST_ASSERT(counter_mask_available > 0);
225 }
226
read_fw_counter(int idx,union sbi_pmu_ctr_info ctrinfo)227 static unsigned long read_fw_counter(int idx, union sbi_pmu_ctr_info ctrinfo)
228 {
229 struct sbiret ret;
230
231 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ, idx, 0, 0, 0, 0, 0);
232 GUEST_ASSERT(ret.error == 0);
233 return ret.value;
234 }
235
read_counter(int idx,union sbi_pmu_ctr_info ctrinfo)236 static unsigned long read_counter(int idx, union sbi_pmu_ctr_info ctrinfo)
237 {
238 unsigned long counter_val = 0;
239
240 __GUEST_ASSERT(ctrinfo.type < 2, "Invalid counter type %d", ctrinfo.type);
241
242 if (ctrinfo.type == SBI_PMU_CTR_TYPE_HW)
243 counter_val = pmu_csr_read_num(ctrinfo.csr);
244 else if (ctrinfo.type == SBI_PMU_CTR_TYPE_FW)
245 counter_val = read_fw_counter(idx, ctrinfo);
246
247 return counter_val;
248 }
249
verify_sbi_requirement_assert(void)250 static inline void verify_sbi_requirement_assert(void)
251 {
252 long out_val = 0;
253 bool probe;
254
255 probe = guest_sbi_probe_extension(SBI_EXT_PMU, &out_val);
256 GUEST_ASSERT(probe && out_val == 1);
257
258 if (get_host_sbi_spec_version() < sbi_mk_version(2, 0))
259 __GUEST_ASSERT(0, "SBI implementation version doesn't support PMU Snapshot");
260 }
261
snapshot_set_shmem(vm_paddr_t gpa,unsigned long flags)262 static void snapshot_set_shmem(vm_paddr_t gpa, unsigned long flags)
263 {
264 unsigned long lo = (unsigned long)gpa;
265 #if __riscv_xlen == 32
266 unsigned long hi = (unsigned long)(gpa >> 32);
267 #else
268 unsigned long hi = gpa == -1 ? -1 : 0;
269 #endif
270 struct sbiret ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_SNAPSHOT_SET_SHMEM,
271 lo, hi, flags, 0, 0, 0);
272
273 GUEST_ASSERT(ret.value == 0 && ret.error == 0);
274 }
275
test_pmu_event(unsigned long event)276 static void test_pmu_event(unsigned long event)
277 {
278 unsigned long counter;
279 unsigned long counter_value_pre, counter_value_post;
280 unsigned long counter_init_value = 100;
281
282 counter = get_counter_index(0, counter_mask_available, 0, event);
283 counter_value_pre = read_counter(counter, ctrinfo_arr[counter]);
284
285 /* Do not set the initial value */
286 start_counter(counter, 0, 0);
287 dummy_func_loop(10000);
288 stop_counter(counter, 0);
289
290 counter_value_post = read_counter(counter, ctrinfo_arr[counter]);
291 __GUEST_ASSERT(counter_value_post > counter_value_pre,
292 "Event update verification failed: post [%lx] pre [%lx]\n",
293 counter_value_post, counter_value_pre);
294
295 /*
296 * We can't just update the counter without starting it.
297 * Do start/stop twice to simulate that by first initializing to a very
298 * high value and a low value after that.
299 */
300 start_counter(counter, SBI_PMU_START_FLAG_SET_INIT_VALUE, ULONG_MAX/2);
301 stop_counter(counter, 0);
302 counter_value_pre = read_counter(counter, ctrinfo_arr[counter]);
303
304 start_counter(counter, SBI_PMU_START_FLAG_SET_INIT_VALUE, counter_init_value);
305 stop_counter(counter, 0);
306 counter_value_post = read_counter(counter, ctrinfo_arr[counter]);
307 __GUEST_ASSERT(counter_value_pre > counter_value_post,
308 "Counter reinitialization verification failed : post [%lx] pre [%lx]\n",
309 counter_value_post, counter_value_pre);
310
311 /* Now set the initial value and compare */
312 start_counter(counter, SBI_PMU_START_FLAG_SET_INIT_VALUE, counter_init_value);
313 dummy_func_loop(10000);
314 stop_counter(counter, 0);
315
316 counter_value_post = read_counter(counter, ctrinfo_arr[counter]);
317 __GUEST_ASSERT(counter_value_post > counter_init_value,
318 "Event update verification failed: post [%lx] pre [%lx]\n",
319 counter_value_post, counter_init_value);
320
321 stop_reset_counter(counter, 0);
322 }
323
test_pmu_event_snapshot(unsigned long event)324 static void test_pmu_event_snapshot(unsigned long event)
325 {
326 unsigned long counter;
327 unsigned long counter_value_pre, counter_value_post;
328 unsigned long counter_init_value = 100;
329 struct riscv_pmu_snapshot_data *snapshot_data = snapshot_gva;
330
331 counter = get_counter_index(0, counter_mask_available, 0, event);
332 counter_value_pre = read_counter(counter, ctrinfo_arr[counter]);
333
334 /* Do not set the initial value */
335 start_counter(counter, 0, 0);
336 dummy_func_loop(10000);
337 stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
338
339 /* The counter value is updated w.r.t relative index of cbase */
340 counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]);
341 __GUEST_ASSERT(counter_value_post > counter_value_pre,
342 "Event update verification failed: post [%lx] pre [%lx]\n",
343 counter_value_post, counter_value_pre);
344
345 /*
346 * We can't just update the counter without starting it.
347 * Do start/stop twice to simulate that by first initializing to a very
348 * high value and a low value after that.
349 */
350 WRITE_ONCE(snapshot_data->ctr_values[0], ULONG_MAX/2);
351 start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0);
352 stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
353 counter_value_pre = READ_ONCE(snapshot_data->ctr_values[0]);
354
355 WRITE_ONCE(snapshot_data->ctr_values[0], counter_init_value);
356 start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0);
357 stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
358 counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]);
359 __GUEST_ASSERT(counter_value_pre > counter_value_post,
360 "Counter reinitialization verification failed : post [%lx] pre [%lx]\n",
361 counter_value_post, counter_value_pre);
362
363 /* Now set the initial value and compare */
364 WRITE_ONCE(snapshot_data->ctr_values[0], counter_init_value);
365 start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0);
366 dummy_func_loop(10000);
367 stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
368
369 counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]);
370 __GUEST_ASSERT(counter_value_post > counter_init_value,
371 "Event update verification failed: post [%lx] pre [%lx]\n",
372 counter_value_post, counter_init_value);
373
374 stop_reset_counter(counter, 0);
375 }
376
test_pmu_event_overflow(unsigned long event)377 static void test_pmu_event_overflow(unsigned long event)
378 {
379 unsigned long counter;
380 unsigned long counter_value_post;
381 unsigned long counter_init_value = ULONG_MAX - 10000;
382 struct riscv_pmu_snapshot_data *snapshot_data = snapshot_gva;
383
384 counter = get_counter_index(0, counter_mask_available, 0, event);
385 counter_in_use = counter;
386
387 /* The counter value is updated w.r.t relative index of cbase passed to start/stop */
388 WRITE_ONCE(snapshot_data->ctr_values[0], counter_init_value);
389 start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0);
390 dummy_func_loop(10000);
391 udelay(msecs_to_usecs(2000));
392 /* irq handler should have stopped the counter */
393 stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
394
395 counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]);
396 /* The counter value after stopping should be less the init value due to overflow */
397 __GUEST_ASSERT(counter_value_post < counter_init_value,
398 "counter_value_post %lx counter_init_value %lx for counter\n",
399 counter_value_post, counter_init_value);
400
401 stop_reset_counter(counter, 0);
402 }
403
test_invalid_event(void)404 static void test_invalid_event(void)
405 {
406 struct sbiret ret;
407 unsigned long event = 0x1234; /* A random event */
408
409 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, 0,
410 counter_mask_available, 0, event, 0, 0);
411 GUEST_ASSERT_EQ(ret.error, SBI_ERR_NOT_SUPPORTED);
412 }
413
test_pmu_events(void)414 static void test_pmu_events(void)
415 {
416 int num_counters = 0;
417
418 /* Get the counter details */
419 num_counters = get_num_counters();
420 update_counter_info(num_counters);
421
422 /* Sanity testing for any random invalid event */
423 test_invalid_event();
424
425 /* Only these two events are guaranteed to be present */
426 test_pmu_event(SBI_PMU_HW_CPU_CYCLES);
427 test_pmu_event(SBI_PMU_HW_INSTRUCTIONS);
428
429 GUEST_DONE();
430 }
431
test_pmu_basic_sanity(void)432 static void test_pmu_basic_sanity(void)
433 {
434 long out_val = 0;
435 bool probe;
436 struct sbiret ret;
437 int num_counters = 0, i;
438 union sbi_pmu_ctr_info ctrinfo;
439
440 probe = guest_sbi_probe_extension(SBI_EXT_PMU, &out_val);
441 GUEST_ASSERT(probe && out_val == 1);
442
443 num_counters = get_num_counters();
444
445 for (i = 0; i < num_counters; i++) {
446 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i,
447 0, 0, 0, 0, 0);
448
449 /* There can be gaps in logical counter indicies*/
450 if (ret.error)
451 continue;
452 GUEST_ASSERT_NE(ret.value, 0);
453
454 ctrinfo.value = ret.value;
455
456 /**
457 * Accessibility check of hardware and read capability of firmware counters.
458 * The spec doesn't mandate any initial value. No need to check any value.
459 */
460 if (ctrinfo.type == SBI_PMU_CTR_TYPE_HW) {
461 pmu_csr_read_num(ctrinfo.csr);
462 GUEST_ASSERT(illegal_handler_invoked);
463 } else if (ctrinfo.type == SBI_PMU_CTR_TYPE_FW) {
464 read_fw_counter(i, ctrinfo);
465 }
466 }
467
468 GUEST_DONE();
469 }
470
test_pmu_events_snaphost(void)471 static void test_pmu_events_snaphost(void)
472 {
473 int num_counters = 0;
474 struct riscv_pmu_snapshot_data *snapshot_data = snapshot_gva;
475 int i;
476
477 /* Verify presence of SBI PMU and minimum requrired SBI version */
478 verify_sbi_requirement_assert();
479
480 snapshot_set_shmem(snapshot_gpa, 0);
481
482 /* Get the counter details */
483 num_counters = get_num_counters();
484 update_counter_info(num_counters);
485
486 /* Validate shared memory access */
487 GUEST_ASSERT_EQ(READ_ONCE(snapshot_data->ctr_overflow_mask), 0);
488 for (i = 0; i < num_counters; i++) {
489 if (counter_mask_available & (BIT(i)))
490 GUEST_ASSERT_EQ(READ_ONCE(snapshot_data->ctr_values[i]), 0);
491 }
492 /* Only these two events are guranteed to be present */
493 test_pmu_event_snapshot(SBI_PMU_HW_CPU_CYCLES);
494 test_pmu_event_snapshot(SBI_PMU_HW_INSTRUCTIONS);
495
496 GUEST_DONE();
497 }
498
test_pmu_events_overflow(void)499 static void test_pmu_events_overflow(void)
500 {
501 int num_counters = 0, i = 0;
502
503 /* Verify presence of SBI PMU and minimum requrired SBI version */
504 verify_sbi_requirement_assert();
505
506 snapshot_set_shmem(snapshot_gpa, 0);
507 csr_set(CSR_IE, BIT(IRQ_PMU_OVF));
508 local_irq_enable();
509
510 /* Get the counter details */
511 num_counters = get_num_counters();
512 update_counter_info(num_counters);
513
514 /*
515 * Qemu supports overflow for cycle/instruction.
516 * This test may fail on any platform that do not support overflow for these two events.
517 */
518 for (i = 0; i < targs.overflow_irqnum; i++)
519 test_pmu_event_overflow(SBI_PMU_HW_CPU_CYCLES);
520 GUEST_ASSERT_EQ(vcpu_shared_irq_count, targs.overflow_irqnum);
521
522 vcpu_shared_irq_count = 0;
523
524 for (i = 0; i < targs.overflow_irqnum; i++)
525 test_pmu_event_overflow(SBI_PMU_HW_INSTRUCTIONS);
526 GUEST_ASSERT_EQ(vcpu_shared_irq_count, targs.overflow_irqnum);
527
528 GUEST_DONE();
529 }
530
run_vcpu(struct kvm_vcpu * vcpu)531 static void run_vcpu(struct kvm_vcpu *vcpu)
532 {
533 struct ucall uc;
534
535 vcpu_run(vcpu);
536 switch (get_ucall(vcpu, &uc)) {
537 case UCALL_ABORT:
538 REPORT_GUEST_ASSERT(uc);
539 break;
540 case UCALL_DONE:
541 case UCALL_SYNC:
542 break;
543 default:
544 TEST_FAIL("Unknown ucall %lu", uc.cmd);
545 break;
546 }
547 }
548
test_vm_destroy(struct kvm_vm * vm)549 void test_vm_destroy(struct kvm_vm *vm)
550 {
551 memset(ctrinfo_arr, 0, sizeof(union sbi_pmu_ctr_info) * RISCV_MAX_PMU_COUNTERS);
552 counter_mask_available = 0;
553 kvm_vm_free(vm);
554 }
555
test_vm_basic_test(void * guest_code)556 static void test_vm_basic_test(void *guest_code)
557 {
558 struct kvm_vm *vm;
559 struct kvm_vcpu *vcpu;
560
561 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
562 __TEST_REQUIRE(__vcpu_has_sbi_ext(vcpu, KVM_RISCV_SBI_EXT_PMU),
563 "SBI PMU not available, skipping test");
564 vm_init_vector_tables(vm);
565 /* Illegal instruction handler is required to verify read access without configuration */
566 vm_install_exception_handler(vm, EXC_INST_ILLEGAL, guest_illegal_exception_handler);
567
568 vcpu_init_vector_tables(vcpu);
569 run_vcpu(vcpu);
570
571 test_vm_destroy(vm);
572 }
573
test_vm_events_test(void * guest_code)574 static void test_vm_events_test(void *guest_code)
575 {
576 struct kvm_vm *vm = NULL;
577 struct kvm_vcpu *vcpu = NULL;
578
579 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
580 __TEST_REQUIRE(__vcpu_has_sbi_ext(vcpu, KVM_RISCV_SBI_EXT_PMU),
581 "SBI PMU not available, skipping test");
582 run_vcpu(vcpu);
583
584 test_vm_destroy(vm);
585 }
586
test_vm_setup_snapshot_mem(struct kvm_vm * vm,struct kvm_vcpu * vcpu)587 static void test_vm_setup_snapshot_mem(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
588 {
589 /* PMU Snapshot requires single page only */
590 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, PMU_SNAPSHOT_GPA_BASE, 1, 1, 0);
591 /* PMU_SNAPSHOT_GPA_BASE is identity mapped */
592 virt_map(vm, PMU_SNAPSHOT_GPA_BASE, PMU_SNAPSHOT_GPA_BASE, 1);
593
594 snapshot_gva = (void *)(PMU_SNAPSHOT_GPA_BASE);
595 snapshot_gpa = addr_gva2gpa(vcpu->vm, (vm_vaddr_t)snapshot_gva);
596 sync_global_to_guest(vcpu->vm, snapshot_gva);
597 sync_global_to_guest(vcpu->vm, snapshot_gpa);
598 }
599
test_vm_events_snapshot_test(void * guest_code)600 static void test_vm_events_snapshot_test(void *guest_code)
601 {
602 struct kvm_vm *vm = NULL;
603 struct kvm_vcpu *vcpu;
604
605 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
606 __TEST_REQUIRE(__vcpu_has_sbi_ext(vcpu, KVM_RISCV_SBI_EXT_PMU),
607 "SBI PMU not available, skipping test");
608
609 test_vm_setup_snapshot_mem(vm, vcpu);
610
611 run_vcpu(vcpu);
612
613 test_vm_destroy(vm);
614 }
615
test_vm_events_overflow(void * guest_code)616 static void test_vm_events_overflow(void *guest_code)
617 {
618 struct kvm_vm *vm = NULL;
619 struct kvm_vcpu *vcpu;
620
621 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
622 __TEST_REQUIRE(__vcpu_has_sbi_ext(vcpu, KVM_RISCV_SBI_EXT_PMU),
623 "SBI PMU not available, skipping test");
624
625 __TEST_REQUIRE(__vcpu_has_isa_ext(vcpu, KVM_RISCV_ISA_EXT_SSCOFPMF),
626 "Sscofpmf is not available, skipping overflow test");
627
628 test_vm_setup_snapshot_mem(vm, vcpu);
629 vm_init_vector_tables(vm);
630 vm_install_interrupt_handler(vm, guest_irq_handler);
631
632 vcpu_init_vector_tables(vcpu);
633 /* Initialize guest timer frequency. */
634 timer_freq = vcpu_get_reg(vcpu, RISCV_TIMER_REG(frequency));
635
636 /* Export the shared variables to the guest */
637 sync_global_to_guest(vm, timer_freq);
638 sync_global_to_guest(vm, vcpu_shared_irq_count);
639 sync_global_to_guest(vm, targs);
640
641 run_vcpu(vcpu);
642
643 test_vm_destroy(vm);
644 }
645
test_print_help(char * name)646 static void test_print_help(char *name)
647 {
648 pr_info("Usage: %s [-h] [-t <test name>] [-n <number of LCOFI interrupt for overflow test>]\n",
649 name);
650 pr_info("\t-t: Test to run (default all). Available tests are 'basic', 'events', 'snapshot', 'overflow'\n");
651 pr_info("\t-n: Number of LCOFI interrupt to trigger for each event in overflow test (default: %d)\n",
652 SBI_PMU_OVERFLOW_IRQNUM_DEFAULT);
653 pr_info("\t-h: print this help screen\n");
654 }
655
parse_args(int argc,char * argv[])656 static bool parse_args(int argc, char *argv[])
657 {
658 int opt;
659 int temp_disabled_tests = SBI_PMU_TEST_BASIC | SBI_PMU_TEST_EVENTS | SBI_PMU_TEST_SNAPSHOT |
660 SBI_PMU_TEST_OVERFLOW;
661 int overflow_interrupts = 0;
662
663 while ((opt = getopt(argc, argv, "ht:n:")) != -1) {
664 switch (opt) {
665 case 't':
666 if (!strncmp("basic", optarg, 5))
667 temp_disabled_tests &= ~SBI_PMU_TEST_BASIC;
668 else if (!strncmp("events", optarg, 6))
669 temp_disabled_tests &= ~SBI_PMU_TEST_EVENTS;
670 else if (!strncmp("snapshot", optarg, 8))
671 temp_disabled_tests &= ~SBI_PMU_TEST_SNAPSHOT;
672 else if (!strncmp("overflow", optarg, 8))
673 temp_disabled_tests &= ~SBI_PMU_TEST_OVERFLOW;
674 else
675 goto done;
676 targs.disabled_tests = temp_disabled_tests;
677 break;
678 case 'n':
679 overflow_interrupts = atoi_positive("Number of LCOFI", optarg);
680 break;
681 case 'h':
682 default:
683 goto done;
684 }
685 }
686
687 if (overflow_interrupts > 0) {
688 if (targs.disabled_tests & SBI_PMU_TEST_OVERFLOW) {
689 pr_info("-n option is only available for overflow test\n");
690 goto done;
691 } else {
692 targs.overflow_irqnum = overflow_interrupts;
693 }
694 }
695
696 return true;
697 done:
698 test_print_help(argv[0]);
699 return false;
700 }
701
main(int argc,char * argv[])702 int main(int argc, char *argv[])
703 {
704 targs.disabled_tests = 0;
705 targs.overflow_irqnum = SBI_PMU_OVERFLOW_IRQNUM_DEFAULT;
706
707 if (!parse_args(argc, argv))
708 exit(KSFT_SKIP);
709
710 if (!(targs.disabled_tests & SBI_PMU_TEST_BASIC)) {
711 test_vm_basic_test(test_pmu_basic_sanity);
712 pr_info("SBI PMU basic test : PASS\n");
713 }
714
715 if (!(targs.disabled_tests & SBI_PMU_TEST_EVENTS)) {
716 test_vm_events_test(test_pmu_events);
717 pr_info("SBI PMU event verification test : PASS\n");
718 }
719
720 if (!(targs.disabled_tests & SBI_PMU_TEST_SNAPSHOT)) {
721 test_vm_events_snapshot_test(test_pmu_events_snaphost);
722 pr_info("SBI PMU event verification with snapshot test : PASS\n");
723 }
724
725 if (!(targs.disabled_tests & SBI_PMU_TEST_OVERFLOW)) {
726 test_vm_events_overflow(test_pmu_events_overflow);
727 pr_info("SBI PMU event verification with overflow test : PASS\n");
728 }
729
730 return 0;
731 }
732