xref: /linux/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c (revision c94cd9508b1335b949fd13ebd269313c65492df0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * sbi_pmu_test.c - Tests the riscv64 SBI PMU functionality.
4  *
5  * Copyright (c) 2024, Rivos Inc.
6  */
7 
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <unistd.h>
12 #include <sys/types.h>
13 #include "kvm_util.h"
14 #include "test_util.h"
15 #include "processor.h"
16 #include "sbi.h"
17 #include "arch_timer.h"
18 #include "ucall_common.h"
19 
20 /* Maximum counters(firmware + hardware) */
21 #define RISCV_MAX_PMU_COUNTERS 64
22 union sbi_pmu_ctr_info ctrinfo_arr[RISCV_MAX_PMU_COUNTERS];
23 
24 /* Snapshot shared memory data */
25 #define PMU_SNAPSHOT_GPA_BASE		BIT(30)
26 static void *snapshot_gva;
27 static vm_paddr_t snapshot_gpa;
28 
29 static int vcpu_shared_irq_count;
30 static int counter_in_use;
31 
32 /* Cache the available counters in a bitmask */
33 static unsigned long counter_mask_available;
34 
35 static bool illegal_handler_invoked;
36 
37 #define SBI_PMU_TEST_BASIC	BIT(0)
38 #define SBI_PMU_TEST_EVENTS	BIT(1)
39 #define SBI_PMU_TEST_SNAPSHOT	BIT(2)
40 #define SBI_PMU_TEST_OVERFLOW	BIT(3)
41 
42 static int disabled_tests;
43 
44 unsigned long pmu_csr_read_num(int csr_num)
45 {
46 #define switchcase_csr_read(__csr_num, __val)		{\
47 	case __csr_num:					\
48 		__val = csr_read(__csr_num);		\
49 		break; }
50 #define switchcase_csr_read_2(__csr_num, __val)		{\
51 	switchcase_csr_read(__csr_num + 0, __val)	 \
52 	switchcase_csr_read(__csr_num + 1, __val)}
53 #define switchcase_csr_read_4(__csr_num, __val)		{\
54 	switchcase_csr_read_2(__csr_num + 0, __val)	 \
55 	switchcase_csr_read_2(__csr_num + 2, __val)}
56 #define switchcase_csr_read_8(__csr_num, __val)		{\
57 	switchcase_csr_read_4(__csr_num + 0, __val)	 \
58 	switchcase_csr_read_4(__csr_num + 4, __val)}
59 #define switchcase_csr_read_16(__csr_num, __val)	{\
60 	switchcase_csr_read_8(__csr_num + 0, __val)	 \
61 	switchcase_csr_read_8(__csr_num + 8, __val)}
62 #define switchcase_csr_read_32(__csr_num, __val)	{\
63 	switchcase_csr_read_16(__csr_num + 0, __val)	 \
64 	switchcase_csr_read_16(__csr_num + 16, __val)}
65 
66 	unsigned long ret = 0;
67 
68 	switch (csr_num) {
69 	switchcase_csr_read_32(CSR_CYCLE, ret)
70 	switchcase_csr_read_32(CSR_CYCLEH, ret)
71 	default :
72 		break;
73 	}
74 
75 	return ret;
76 #undef switchcase_csr_read_32
77 #undef switchcase_csr_read_16
78 #undef switchcase_csr_read_8
79 #undef switchcase_csr_read_4
80 #undef switchcase_csr_read_2
81 #undef switchcase_csr_read
82 }
83 
84 static inline void dummy_func_loop(uint64_t iter)
85 {
86 	int i = 0;
87 
88 	while (i < iter) {
89 		asm volatile("nop");
90 		i++;
91 	}
92 }
93 
94 static void start_counter(unsigned long counter, unsigned long start_flags,
95 			  unsigned long ival)
96 {
97 	struct sbiret ret;
98 
99 	ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, counter, 1, start_flags,
100 			ival, 0, 0);
101 	__GUEST_ASSERT(ret.error == 0, "Unable to start counter %ld\n", counter);
102 }
103 
104 /* This should be invoked only for reset counter use case */
105 static void stop_reset_counter(unsigned long counter, unsigned long stop_flags)
106 {
107 	struct sbiret ret;
108 
109 	ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, counter, 1,
110 					stop_flags | SBI_PMU_STOP_FLAG_RESET, 0, 0, 0);
111 	__GUEST_ASSERT(ret.error == SBI_ERR_ALREADY_STOPPED,
112 			       "Unable to stop counter %ld\n", counter);
113 }
114 
115 static void stop_counter(unsigned long counter, unsigned long stop_flags)
116 {
117 	struct sbiret ret;
118 
119 	ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, counter, 1, stop_flags,
120 			0, 0, 0);
121 	__GUEST_ASSERT(ret.error == 0, "Unable to stop counter %ld error %ld\n",
122 			       counter, ret.error);
123 }
124 
125 static void guest_illegal_exception_handler(struct ex_regs *regs)
126 {
127 	__GUEST_ASSERT(regs->cause == EXC_INST_ILLEGAL,
128 		       "Unexpected exception handler %lx\n", regs->cause);
129 
130 	illegal_handler_invoked = true;
131 	/* skip the trapping instruction */
132 	regs->epc += 4;
133 }
134 
135 static void guest_irq_handler(struct ex_regs *regs)
136 {
137 	unsigned int irq_num = regs->cause & ~CAUSE_IRQ_FLAG;
138 	struct riscv_pmu_snapshot_data *snapshot_data = snapshot_gva;
139 	unsigned long overflown_mask;
140 	unsigned long counter_val = 0;
141 
142 	/* Validate that we are in the correct irq handler */
143 	GUEST_ASSERT_EQ(irq_num, IRQ_PMU_OVF);
144 
145 	/* Stop all counters first to avoid further interrupts */
146 	stop_counter(counter_in_use, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
147 
148 	csr_clear(CSR_SIP, BIT(IRQ_PMU_OVF));
149 
150 	overflown_mask = READ_ONCE(snapshot_data->ctr_overflow_mask);
151 	GUEST_ASSERT(overflown_mask & 0x01);
152 
153 	WRITE_ONCE(vcpu_shared_irq_count, vcpu_shared_irq_count+1);
154 
155 	counter_val = READ_ONCE(snapshot_data->ctr_values[0]);
156 	/* Now start the counter to mimick the real driver behavior */
157 	start_counter(counter_in_use, SBI_PMU_START_FLAG_SET_INIT_VALUE, counter_val);
158 }
159 
160 static unsigned long get_counter_index(unsigned long cbase, unsigned long cmask,
161 				       unsigned long cflags,
162 				       unsigned long event)
163 {
164 	struct sbiret ret;
165 
166 	ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
167 			cflags, event, 0, 0);
168 	__GUEST_ASSERT(ret.error == 0, "config matching failed %ld\n", ret.error);
169 	GUEST_ASSERT(ret.value < RISCV_MAX_PMU_COUNTERS);
170 	GUEST_ASSERT(BIT(ret.value) & counter_mask_available);
171 
172 	return ret.value;
173 }
174 
175 static unsigned long get_num_counters(void)
176 {
177 	struct sbiret ret;
178 
179 	ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_NUM_COUNTERS, 0, 0, 0, 0, 0, 0);
180 
181 	__GUEST_ASSERT(ret.error == 0, "Unable to retrieve number of counters from SBI PMU");
182 	__GUEST_ASSERT(ret.value < RISCV_MAX_PMU_COUNTERS,
183 		       "Invalid number of counters %ld\n", ret.value);
184 
185 	return ret.value;
186 }
187 
188 static void update_counter_info(int num_counters)
189 {
190 	int i = 0;
191 	struct sbiret ret;
192 
193 	for (i = 0; i < num_counters; i++) {
194 		ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0);
195 
196 		/* There can be gaps in logical counter indicies*/
197 		if (ret.error)
198 			continue;
199 		GUEST_ASSERT_NE(ret.value, 0);
200 
201 		ctrinfo_arr[i].value = ret.value;
202 		counter_mask_available |= BIT(i);
203 	}
204 
205 	GUEST_ASSERT(counter_mask_available > 0);
206 }
207 
208 static unsigned long read_fw_counter(int idx, union sbi_pmu_ctr_info ctrinfo)
209 {
210 	struct sbiret ret;
211 
212 	ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ, idx, 0, 0, 0, 0, 0);
213 	GUEST_ASSERT(ret.error == 0);
214 	return ret.value;
215 }
216 
217 static unsigned long read_counter(int idx, union sbi_pmu_ctr_info ctrinfo)
218 {
219 	unsigned long counter_val = 0;
220 
221 	__GUEST_ASSERT(ctrinfo.type < 2, "Invalid counter type %d", ctrinfo.type);
222 
223 	if (ctrinfo.type == SBI_PMU_CTR_TYPE_HW)
224 		counter_val = pmu_csr_read_num(ctrinfo.csr);
225 	else if (ctrinfo.type == SBI_PMU_CTR_TYPE_FW)
226 		counter_val = read_fw_counter(idx, ctrinfo);
227 
228 	return counter_val;
229 }
230 
231 static inline void verify_sbi_requirement_assert(void)
232 {
233 	long out_val = 0;
234 	bool probe;
235 
236 	probe = guest_sbi_probe_extension(SBI_EXT_PMU, &out_val);
237 	GUEST_ASSERT(probe && out_val == 1);
238 
239 	if (get_host_sbi_spec_version() < sbi_mk_version(2, 0))
240 		__GUEST_ASSERT(0, "SBI implementation version doesn't support PMU Snapshot");
241 }
242 
243 static void snapshot_set_shmem(vm_paddr_t gpa, unsigned long flags)
244 {
245 	unsigned long lo = (unsigned long)gpa;
246 #if __riscv_xlen == 32
247 	unsigned long hi = (unsigned long)(gpa >> 32);
248 #else
249 	unsigned long hi = gpa == -1 ? -1 : 0;
250 #endif
251 	struct sbiret ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_SNAPSHOT_SET_SHMEM,
252 				      lo, hi, flags, 0, 0, 0);
253 
254 	GUEST_ASSERT(ret.value == 0 && ret.error == 0);
255 }
256 
257 static void test_pmu_event(unsigned long event)
258 {
259 	unsigned long counter;
260 	unsigned long counter_value_pre, counter_value_post;
261 	unsigned long counter_init_value = 100;
262 
263 	counter = get_counter_index(0, counter_mask_available, 0, event);
264 	counter_value_pre = read_counter(counter, ctrinfo_arr[counter]);
265 
266 	/* Do not set the initial value */
267 	start_counter(counter, 0, 0);
268 	dummy_func_loop(10000);
269 	stop_counter(counter, 0);
270 
271 	counter_value_post = read_counter(counter, ctrinfo_arr[counter]);
272 	__GUEST_ASSERT(counter_value_post > counter_value_pre,
273 		       "Event update verification failed: post [%lx] pre [%lx]\n",
274 		       counter_value_post, counter_value_pre);
275 
276 	/*
277 	 * We can't just update the counter without starting it.
278 	 * Do start/stop twice to simulate that by first initializing to a very
279 	 * high value and a low value after that.
280 	 */
281 	start_counter(counter, SBI_PMU_START_FLAG_SET_INIT_VALUE, ULONG_MAX/2);
282 	stop_counter(counter, 0);
283 	counter_value_pre = read_counter(counter, ctrinfo_arr[counter]);
284 
285 	start_counter(counter, SBI_PMU_START_FLAG_SET_INIT_VALUE, counter_init_value);
286 	stop_counter(counter, 0);
287 	counter_value_post = read_counter(counter, ctrinfo_arr[counter]);
288 	__GUEST_ASSERT(counter_value_pre > counter_value_post,
289 		       "Counter reinitialization verification failed : post [%lx] pre [%lx]\n",
290 		       counter_value_post, counter_value_pre);
291 
292 	/* Now set the initial value and compare */
293 	start_counter(counter, SBI_PMU_START_FLAG_SET_INIT_VALUE, counter_init_value);
294 	dummy_func_loop(10000);
295 	stop_counter(counter, 0);
296 
297 	counter_value_post = read_counter(counter, ctrinfo_arr[counter]);
298 	__GUEST_ASSERT(counter_value_post > counter_init_value,
299 		       "Event update verification failed: post [%lx] pre [%lx]\n",
300 		       counter_value_post, counter_init_value);
301 
302 	stop_reset_counter(counter, 0);
303 }
304 
305 static void test_pmu_event_snapshot(unsigned long event)
306 {
307 	unsigned long counter;
308 	unsigned long counter_value_pre, counter_value_post;
309 	unsigned long counter_init_value = 100;
310 	struct riscv_pmu_snapshot_data *snapshot_data = snapshot_gva;
311 
312 	counter = get_counter_index(0, counter_mask_available, 0, event);
313 	counter_value_pre = read_counter(counter, ctrinfo_arr[counter]);
314 
315 	/* Do not set the initial value */
316 	start_counter(counter, 0, 0);
317 	dummy_func_loop(10000);
318 	stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
319 
320 	/* The counter value is updated w.r.t relative index of cbase */
321 	counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]);
322 	__GUEST_ASSERT(counter_value_post > counter_value_pre,
323 		       "Event update verification failed: post [%lx] pre [%lx]\n",
324 		       counter_value_post, counter_value_pre);
325 
326 	/*
327 	 * We can't just update the counter without starting it.
328 	 * Do start/stop twice to simulate that by first initializing to a very
329 	 * high value and a low value after that.
330 	 */
331 	WRITE_ONCE(snapshot_data->ctr_values[0], ULONG_MAX/2);
332 	start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0);
333 	stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
334 	counter_value_pre = READ_ONCE(snapshot_data->ctr_values[0]);
335 
336 	WRITE_ONCE(snapshot_data->ctr_values[0], counter_init_value);
337 	start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0);
338 	stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
339 	counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]);
340 	__GUEST_ASSERT(counter_value_pre > counter_value_post,
341 		       "Counter reinitialization verification failed : post [%lx] pre [%lx]\n",
342 		       counter_value_post, counter_value_pre);
343 
344 	/* Now set the initial value and compare */
345 	WRITE_ONCE(snapshot_data->ctr_values[0], counter_init_value);
346 	start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0);
347 	dummy_func_loop(10000);
348 	stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
349 
350 	counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]);
351 	__GUEST_ASSERT(counter_value_post > counter_init_value,
352 		       "Event update verification failed: post [%lx] pre [%lx]\n",
353 		       counter_value_post, counter_init_value);
354 
355 	stop_reset_counter(counter, 0);
356 }
357 
358 static void test_pmu_event_overflow(unsigned long event)
359 {
360 	unsigned long counter;
361 	unsigned long counter_value_post;
362 	unsigned long counter_init_value = ULONG_MAX - 10000;
363 	struct riscv_pmu_snapshot_data *snapshot_data = snapshot_gva;
364 
365 	counter = get_counter_index(0, counter_mask_available, 0, event);
366 	counter_in_use = counter;
367 
368 	/* The counter value is updated w.r.t relative index of cbase passed to start/stop */
369 	WRITE_ONCE(snapshot_data->ctr_values[0], counter_init_value);
370 	start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0);
371 	dummy_func_loop(10000);
372 	udelay(msecs_to_usecs(2000));
373 	/* irq handler should have stopped the counter */
374 	stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
375 
376 	counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]);
377 	/* The counter value after stopping should be less the init value due to overflow */
378 	__GUEST_ASSERT(counter_value_post < counter_init_value,
379 		       "counter_value_post %lx counter_init_value %lx for counter\n",
380 		       counter_value_post, counter_init_value);
381 
382 	stop_reset_counter(counter, 0);
383 }
384 
385 static void test_invalid_event(void)
386 {
387 	struct sbiret ret;
388 	unsigned long event = 0x1234; /* A random event */
389 
390 	ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, 0,
391 			counter_mask_available, 0, event, 0, 0);
392 	GUEST_ASSERT_EQ(ret.error, SBI_ERR_NOT_SUPPORTED);
393 }
394 
395 static void test_pmu_events(void)
396 {
397 	int num_counters = 0;
398 
399 	/* Get the counter details */
400 	num_counters = get_num_counters();
401 	update_counter_info(num_counters);
402 
403 	/* Sanity testing for any random invalid event */
404 	test_invalid_event();
405 
406 	/* Only these two events are guaranteed to be present */
407 	test_pmu_event(SBI_PMU_HW_CPU_CYCLES);
408 	test_pmu_event(SBI_PMU_HW_INSTRUCTIONS);
409 
410 	GUEST_DONE();
411 }
412 
413 static void test_pmu_basic_sanity(void)
414 {
415 	long out_val = 0;
416 	bool probe;
417 	struct sbiret ret;
418 	int num_counters = 0, i;
419 	union sbi_pmu_ctr_info ctrinfo;
420 
421 	probe = guest_sbi_probe_extension(SBI_EXT_PMU, &out_val);
422 	GUEST_ASSERT(probe && out_val == 1);
423 
424 	num_counters = get_num_counters();
425 
426 	for (i = 0; i < num_counters; i++) {
427 		ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i,
428 				0, 0, 0, 0, 0);
429 
430 		/* There can be gaps in logical counter indicies*/
431 		if (ret.error)
432 			continue;
433 		GUEST_ASSERT_NE(ret.value, 0);
434 
435 		ctrinfo.value = ret.value;
436 
437 		/**
438 		 * Accessibility check of hardware and read capability of firmware counters.
439 		 * The spec doesn't mandate any initial value. No need to check any value.
440 		 */
441 		if (ctrinfo.type == SBI_PMU_CTR_TYPE_HW) {
442 			pmu_csr_read_num(ctrinfo.csr);
443 			GUEST_ASSERT(illegal_handler_invoked);
444 		} else if (ctrinfo.type == SBI_PMU_CTR_TYPE_FW) {
445 			read_fw_counter(i, ctrinfo);
446 		}
447 	}
448 
449 	GUEST_DONE();
450 }
451 
452 static void test_pmu_events_snaphost(void)
453 {
454 	int num_counters = 0;
455 	struct riscv_pmu_snapshot_data *snapshot_data = snapshot_gva;
456 	int i;
457 
458 	/* Verify presence of SBI PMU and minimum requrired SBI version */
459 	verify_sbi_requirement_assert();
460 
461 	snapshot_set_shmem(snapshot_gpa, 0);
462 
463 	/* Get the counter details */
464 	num_counters = get_num_counters();
465 	update_counter_info(num_counters);
466 
467 	/* Validate shared memory access */
468 	GUEST_ASSERT_EQ(READ_ONCE(snapshot_data->ctr_overflow_mask), 0);
469 	for (i = 0; i < num_counters; i++) {
470 		if (counter_mask_available & (BIT(i)))
471 			GUEST_ASSERT_EQ(READ_ONCE(snapshot_data->ctr_values[i]), 0);
472 	}
473 	/* Only these two events are guranteed to be present */
474 	test_pmu_event_snapshot(SBI_PMU_HW_CPU_CYCLES);
475 	test_pmu_event_snapshot(SBI_PMU_HW_INSTRUCTIONS);
476 
477 	GUEST_DONE();
478 }
479 
480 static void test_pmu_events_overflow(void)
481 {
482 	int num_counters = 0;
483 
484 	/* Verify presence of SBI PMU and minimum requrired SBI version */
485 	verify_sbi_requirement_assert();
486 
487 	snapshot_set_shmem(snapshot_gpa, 0);
488 	csr_set(CSR_IE, BIT(IRQ_PMU_OVF));
489 	local_irq_enable();
490 
491 	/* Get the counter details */
492 	num_counters = get_num_counters();
493 	update_counter_info(num_counters);
494 
495 	/*
496 	 * Qemu supports overflow for cycle/instruction.
497 	 * This test may fail on any platform that do not support overflow for these two events.
498 	 */
499 	test_pmu_event_overflow(SBI_PMU_HW_CPU_CYCLES);
500 	GUEST_ASSERT_EQ(vcpu_shared_irq_count, 1);
501 
502 	test_pmu_event_overflow(SBI_PMU_HW_INSTRUCTIONS);
503 	GUEST_ASSERT_EQ(vcpu_shared_irq_count, 2);
504 
505 	GUEST_DONE();
506 }
507 
508 static void run_vcpu(struct kvm_vcpu *vcpu)
509 {
510 	struct ucall uc;
511 
512 	vcpu_run(vcpu);
513 	switch (get_ucall(vcpu, &uc)) {
514 	case UCALL_ABORT:
515 		REPORT_GUEST_ASSERT(uc);
516 		break;
517 	case UCALL_DONE:
518 	case UCALL_SYNC:
519 		break;
520 	default:
521 		TEST_FAIL("Unknown ucall %lu", uc.cmd);
522 		break;
523 	}
524 }
525 
526 void test_vm_destroy(struct kvm_vm *vm)
527 {
528 	memset(ctrinfo_arr, 0, sizeof(union sbi_pmu_ctr_info) * RISCV_MAX_PMU_COUNTERS);
529 	counter_mask_available = 0;
530 	kvm_vm_free(vm);
531 }
532 
533 static void test_vm_basic_test(void *guest_code)
534 {
535 	struct kvm_vm *vm;
536 	struct kvm_vcpu *vcpu;
537 
538 	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
539 	__TEST_REQUIRE(__vcpu_has_sbi_ext(vcpu, KVM_RISCV_SBI_EXT_PMU),
540 				   "SBI PMU not available, skipping test");
541 	vm_init_vector_tables(vm);
542 	/* Illegal instruction handler is required to verify read access without configuration */
543 	vm_install_exception_handler(vm, EXC_INST_ILLEGAL, guest_illegal_exception_handler);
544 
545 	vcpu_init_vector_tables(vcpu);
546 	run_vcpu(vcpu);
547 
548 	test_vm_destroy(vm);
549 }
550 
551 static void test_vm_events_test(void *guest_code)
552 {
553 	struct kvm_vm *vm = NULL;
554 	struct kvm_vcpu *vcpu = NULL;
555 
556 	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
557 	__TEST_REQUIRE(__vcpu_has_sbi_ext(vcpu, KVM_RISCV_SBI_EXT_PMU),
558 				   "SBI PMU not available, skipping test");
559 	run_vcpu(vcpu);
560 
561 	test_vm_destroy(vm);
562 }
563 
564 static void test_vm_setup_snapshot_mem(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
565 {
566 	/* PMU Snapshot requires single page only */
567 	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, PMU_SNAPSHOT_GPA_BASE, 1, 1, 0);
568 	/* PMU_SNAPSHOT_GPA_BASE is identity mapped */
569 	virt_map(vm, PMU_SNAPSHOT_GPA_BASE, PMU_SNAPSHOT_GPA_BASE, 1);
570 
571 	snapshot_gva = (void *)(PMU_SNAPSHOT_GPA_BASE);
572 	snapshot_gpa = addr_gva2gpa(vcpu->vm, (vm_vaddr_t)snapshot_gva);
573 	sync_global_to_guest(vcpu->vm, snapshot_gva);
574 	sync_global_to_guest(vcpu->vm, snapshot_gpa);
575 }
576 
577 static void test_vm_events_snapshot_test(void *guest_code)
578 {
579 	struct kvm_vm *vm = NULL;
580 	struct kvm_vcpu *vcpu;
581 
582 	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
583 	__TEST_REQUIRE(__vcpu_has_sbi_ext(vcpu, KVM_RISCV_SBI_EXT_PMU),
584 				   "SBI PMU not available, skipping test");
585 
586 	test_vm_setup_snapshot_mem(vm, vcpu);
587 
588 	run_vcpu(vcpu);
589 
590 	test_vm_destroy(vm);
591 }
592 
593 static void test_vm_events_overflow(void *guest_code)
594 {
595 	struct kvm_vm *vm = NULL;
596 	struct kvm_vcpu *vcpu;
597 
598 	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
599 	__TEST_REQUIRE(__vcpu_has_sbi_ext(vcpu, KVM_RISCV_SBI_EXT_PMU),
600 				   "SBI PMU not available, skipping test");
601 
602 	__TEST_REQUIRE(__vcpu_has_isa_ext(vcpu, KVM_RISCV_ISA_EXT_SSCOFPMF),
603 				   "Sscofpmf is not available, skipping overflow test");
604 
605 	test_vm_setup_snapshot_mem(vm, vcpu);
606 	vm_init_vector_tables(vm);
607 	vm_install_interrupt_handler(vm, guest_irq_handler);
608 
609 	vcpu_init_vector_tables(vcpu);
610 	/* Initialize guest timer frequency. */
611 	vcpu_get_reg(vcpu, RISCV_TIMER_REG(frequency), &timer_freq);
612 	sync_global_to_guest(vm, timer_freq);
613 
614 	run_vcpu(vcpu);
615 
616 	test_vm_destroy(vm);
617 }
618 
619 static void test_print_help(char *name)
620 {
621 	pr_info("Usage: %s [-h] [-d <test name>]\n", name);
622 	pr_info("\t-d: Test to disable. Available tests are 'basic', 'events', 'snapshot', 'overflow'\n");
623 	pr_info("\t-h: print this help screen\n");
624 }
625 
626 static bool parse_args(int argc, char *argv[])
627 {
628 	int opt;
629 
630 	while ((opt = getopt(argc, argv, "hd:")) != -1) {
631 		switch (opt) {
632 		case 'd':
633 			if (!strncmp("basic", optarg, 5))
634 				disabled_tests |= SBI_PMU_TEST_BASIC;
635 			else if (!strncmp("events", optarg, 6))
636 				disabled_tests |= SBI_PMU_TEST_EVENTS;
637 			else if (!strncmp("snapshot", optarg, 8))
638 				disabled_tests |= SBI_PMU_TEST_SNAPSHOT;
639 			else if (!strncmp("overflow", optarg, 8))
640 				disabled_tests |= SBI_PMU_TEST_OVERFLOW;
641 			else
642 				goto done;
643 			break;
644 		case 'h':
645 		default:
646 			goto done;
647 		}
648 	}
649 
650 	return true;
651 done:
652 	test_print_help(argv[0]);
653 	return false;
654 }
655 
656 int main(int argc, char *argv[])
657 {
658 	if (!parse_args(argc, argv))
659 		exit(KSFT_SKIP);
660 
661 	if (!(disabled_tests & SBI_PMU_TEST_BASIC)) {
662 		test_vm_basic_test(test_pmu_basic_sanity);
663 		pr_info("SBI PMU basic test : PASS\n");
664 	}
665 
666 	if (!(disabled_tests & SBI_PMU_TEST_EVENTS)) {
667 		test_vm_events_test(test_pmu_events);
668 		pr_info("SBI PMU event verification test : PASS\n");
669 	}
670 
671 	if (!(disabled_tests & SBI_PMU_TEST_SNAPSHOT)) {
672 		test_vm_events_snapshot_test(test_pmu_events_snaphost);
673 		pr_info("SBI PMU event verification with snapshot test : PASS\n");
674 	}
675 
676 	if (!(disabled_tests & SBI_PMU_TEST_OVERFLOW)) {
677 		test_vm_events_overflow(test_pmu_events_overflow);
678 		pr_info("SBI PMU event verification with overflow test : PASS\n");
679 	}
680 
681 	return 0;
682 }
683