1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * sbi_pmu_test.c - Tests the riscv64 SBI PMU functionality. 4 * 5 * Copyright (c) 2024, Rivos Inc. 6 */ 7 8 #include <stdio.h> 9 #include <stdlib.h> 10 #include <string.h> 11 #include <unistd.h> 12 #include <sys/types.h> 13 #include "kvm_util.h" 14 #include "test_util.h" 15 #include "processor.h" 16 #include "sbi.h" 17 #include "arch_timer.h" 18 19 /* Maximum counters(firmware + hardware) */ 20 #define RISCV_MAX_PMU_COUNTERS 64 21 union sbi_pmu_ctr_info ctrinfo_arr[RISCV_MAX_PMU_COUNTERS]; 22 23 /* Snapshot shared memory data */ 24 #define PMU_SNAPSHOT_GPA_BASE BIT(30) 25 static void *snapshot_gva; 26 static vm_paddr_t snapshot_gpa; 27 28 static int vcpu_shared_irq_count; 29 static int counter_in_use; 30 31 /* Cache the available counters in a bitmask */ 32 static unsigned long counter_mask_available; 33 34 static bool illegal_handler_invoked; 35 36 #define SBI_PMU_TEST_BASIC BIT(0) 37 #define SBI_PMU_TEST_EVENTS BIT(1) 38 #define SBI_PMU_TEST_SNAPSHOT BIT(2) 39 #define SBI_PMU_TEST_OVERFLOW BIT(3) 40 41 static int disabled_tests; 42 43 unsigned long pmu_csr_read_num(int csr_num) 44 { 45 #define switchcase_csr_read(__csr_num, __val) {\ 46 case __csr_num: \ 47 __val = csr_read(__csr_num); \ 48 break; } 49 #define switchcase_csr_read_2(__csr_num, __val) {\ 50 switchcase_csr_read(__csr_num + 0, __val) \ 51 switchcase_csr_read(__csr_num + 1, __val)} 52 #define switchcase_csr_read_4(__csr_num, __val) {\ 53 switchcase_csr_read_2(__csr_num + 0, __val) \ 54 switchcase_csr_read_2(__csr_num + 2, __val)} 55 #define switchcase_csr_read_8(__csr_num, __val) {\ 56 switchcase_csr_read_4(__csr_num + 0, __val) \ 57 switchcase_csr_read_4(__csr_num + 4, __val)} 58 #define switchcase_csr_read_16(__csr_num, __val) {\ 59 switchcase_csr_read_8(__csr_num + 0, __val) \ 60 switchcase_csr_read_8(__csr_num + 8, __val)} 61 #define switchcase_csr_read_32(__csr_num, __val) {\ 62 switchcase_csr_read_16(__csr_num + 0, __val) \ 63 switchcase_csr_read_16(__csr_num + 16, __val)} 64 65 unsigned long ret = 0; 66 67 switch (csr_num) { 68 switchcase_csr_read_32(CSR_CYCLE, ret) 69 switchcase_csr_read_32(CSR_CYCLEH, ret) 70 default : 71 break; 72 } 73 74 return ret; 75 #undef switchcase_csr_read_32 76 #undef switchcase_csr_read_16 77 #undef switchcase_csr_read_8 78 #undef switchcase_csr_read_4 79 #undef switchcase_csr_read_2 80 #undef switchcase_csr_read 81 } 82 83 static inline void dummy_func_loop(uint64_t iter) 84 { 85 int i = 0; 86 87 while (i < iter) { 88 asm volatile("nop"); 89 i++; 90 } 91 } 92 93 static void start_counter(unsigned long counter, unsigned long start_flags, 94 unsigned long ival) 95 { 96 struct sbiret ret; 97 98 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, counter, 1, start_flags, 99 ival, 0, 0); 100 __GUEST_ASSERT(ret.error == 0, "Unable to start counter %ld\n", counter); 101 } 102 103 /* This should be invoked only for reset counter use case */ 104 static void stop_reset_counter(unsigned long counter, unsigned long stop_flags) 105 { 106 struct sbiret ret; 107 108 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, counter, 1, 109 stop_flags | SBI_PMU_STOP_FLAG_RESET, 0, 0, 0); 110 __GUEST_ASSERT(ret.error == SBI_ERR_ALREADY_STOPPED, 111 "Unable to stop counter %ld\n", counter); 112 } 113 114 static void stop_counter(unsigned long counter, unsigned long stop_flags) 115 { 116 struct sbiret ret; 117 118 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, counter, 1, stop_flags, 119 0, 0, 0); 120 __GUEST_ASSERT(ret.error == 0, "Unable to stop counter %ld error %ld\n", 121 counter, ret.error); 122 } 123 124 static void guest_illegal_exception_handler(struct ex_regs *regs) 125 { 126 __GUEST_ASSERT(regs->cause == EXC_INST_ILLEGAL, 127 "Unexpected exception handler %lx\n", regs->cause); 128 129 illegal_handler_invoked = true; 130 /* skip the trapping instruction */ 131 regs->epc += 4; 132 } 133 134 static void guest_irq_handler(struct ex_regs *regs) 135 { 136 unsigned int irq_num = regs->cause & ~CAUSE_IRQ_FLAG; 137 struct riscv_pmu_snapshot_data *snapshot_data = snapshot_gva; 138 unsigned long overflown_mask; 139 unsigned long counter_val = 0; 140 141 /* Validate that we are in the correct irq handler */ 142 GUEST_ASSERT_EQ(irq_num, IRQ_PMU_OVF); 143 144 /* Stop all counters first to avoid further interrupts */ 145 stop_counter(counter_in_use, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT); 146 147 csr_clear(CSR_SIP, BIT(IRQ_PMU_OVF)); 148 149 overflown_mask = READ_ONCE(snapshot_data->ctr_overflow_mask); 150 GUEST_ASSERT(overflown_mask & 0x01); 151 152 WRITE_ONCE(vcpu_shared_irq_count, vcpu_shared_irq_count+1); 153 154 counter_val = READ_ONCE(snapshot_data->ctr_values[0]); 155 /* Now start the counter to mimick the real driver behavior */ 156 start_counter(counter_in_use, SBI_PMU_START_FLAG_SET_INIT_VALUE, counter_val); 157 } 158 159 static unsigned long get_counter_index(unsigned long cbase, unsigned long cmask, 160 unsigned long cflags, 161 unsigned long event) 162 { 163 struct sbiret ret; 164 165 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask, 166 cflags, event, 0, 0); 167 __GUEST_ASSERT(ret.error == 0, "config matching failed %ld\n", ret.error); 168 GUEST_ASSERT(ret.value < RISCV_MAX_PMU_COUNTERS); 169 GUEST_ASSERT(BIT(ret.value) & counter_mask_available); 170 171 return ret.value; 172 } 173 174 static unsigned long get_num_counters(void) 175 { 176 struct sbiret ret; 177 178 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_NUM_COUNTERS, 0, 0, 0, 0, 0, 0); 179 180 __GUEST_ASSERT(ret.error == 0, "Unable to retrieve number of counters from SBI PMU"); 181 __GUEST_ASSERT(ret.value < RISCV_MAX_PMU_COUNTERS, 182 "Invalid number of counters %ld\n", ret.value); 183 184 return ret.value; 185 } 186 187 static void update_counter_info(int num_counters) 188 { 189 int i = 0; 190 struct sbiret ret; 191 192 for (i = 0; i < num_counters; i++) { 193 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0); 194 195 /* There can be gaps in logical counter indicies*/ 196 if (ret.error) 197 continue; 198 GUEST_ASSERT_NE(ret.value, 0); 199 200 ctrinfo_arr[i].value = ret.value; 201 counter_mask_available |= BIT(i); 202 } 203 204 GUEST_ASSERT(counter_mask_available > 0); 205 } 206 207 static unsigned long read_fw_counter(int idx, union sbi_pmu_ctr_info ctrinfo) 208 { 209 struct sbiret ret; 210 211 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ, idx, 0, 0, 0, 0, 0); 212 GUEST_ASSERT(ret.error == 0); 213 return ret.value; 214 } 215 216 static unsigned long read_counter(int idx, union sbi_pmu_ctr_info ctrinfo) 217 { 218 unsigned long counter_val = 0; 219 220 __GUEST_ASSERT(ctrinfo.type < 2, "Invalid counter type %d", ctrinfo.type); 221 222 if (ctrinfo.type == SBI_PMU_CTR_TYPE_HW) 223 counter_val = pmu_csr_read_num(ctrinfo.csr); 224 else if (ctrinfo.type == SBI_PMU_CTR_TYPE_FW) 225 counter_val = read_fw_counter(idx, ctrinfo); 226 227 return counter_val; 228 } 229 230 static inline void verify_sbi_requirement_assert(void) 231 { 232 long out_val = 0; 233 bool probe; 234 235 probe = guest_sbi_probe_extension(SBI_EXT_PMU, &out_val); 236 GUEST_ASSERT(probe && out_val == 1); 237 238 if (get_host_sbi_spec_version() < sbi_mk_version(2, 0)) 239 __GUEST_ASSERT(0, "SBI implementation version doesn't support PMU Snapshot"); 240 } 241 242 static void snapshot_set_shmem(vm_paddr_t gpa, unsigned long flags) 243 { 244 unsigned long lo = (unsigned long)gpa; 245 #if __riscv_xlen == 32 246 unsigned long hi = (unsigned long)(gpa >> 32); 247 #else 248 unsigned long hi = gpa == -1 ? -1 : 0; 249 #endif 250 struct sbiret ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_SNAPSHOT_SET_SHMEM, 251 lo, hi, flags, 0, 0, 0); 252 253 GUEST_ASSERT(ret.value == 0 && ret.error == 0); 254 } 255 256 static void test_pmu_event(unsigned long event) 257 { 258 unsigned long counter; 259 unsigned long counter_value_pre, counter_value_post; 260 unsigned long counter_init_value = 100; 261 262 counter = get_counter_index(0, counter_mask_available, 0, event); 263 counter_value_pre = read_counter(counter, ctrinfo_arr[counter]); 264 265 /* Do not set the initial value */ 266 start_counter(counter, 0, 0); 267 dummy_func_loop(10000); 268 stop_counter(counter, 0); 269 270 counter_value_post = read_counter(counter, ctrinfo_arr[counter]); 271 __GUEST_ASSERT(counter_value_post > counter_value_pre, 272 "Event update verification failed: post [%lx] pre [%lx]\n", 273 counter_value_post, counter_value_pre); 274 275 /* 276 * We can't just update the counter without starting it. 277 * Do start/stop twice to simulate that by first initializing to a very 278 * high value and a low value after that. 279 */ 280 start_counter(counter, SBI_PMU_START_FLAG_SET_INIT_VALUE, ULONG_MAX/2); 281 stop_counter(counter, 0); 282 counter_value_pre = read_counter(counter, ctrinfo_arr[counter]); 283 284 start_counter(counter, SBI_PMU_START_FLAG_SET_INIT_VALUE, counter_init_value); 285 stop_counter(counter, 0); 286 counter_value_post = read_counter(counter, ctrinfo_arr[counter]); 287 __GUEST_ASSERT(counter_value_pre > counter_value_post, 288 "Counter reinitialization verification failed : post [%lx] pre [%lx]\n", 289 counter_value_post, counter_value_pre); 290 291 /* Now set the initial value and compare */ 292 start_counter(counter, SBI_PMU_START_FLAG_SET_INIT_VALUE, counter_init_value); 293 dummy_func_loop(10000); 294 stop_counter(counter, 0); 295 296 counter_value_post = read_counter(counter, ctrinfo_arr[counter]); 297 __GUEST_ASSERT(counter_value_post > counter_init_value, 298 "Event update verification failed: post [%lx] pre [%lx]\n", 299 counter_value_post, counter_init_value); 300 301 stop_reset_counter(counter, 0); 302 } 303 304 static void test_pmu_event_snapshot(unsigned long event) 305 { 306 unsigned long counter; 307 unsigned long counter_value_pre, counter_value_post; 308 unsigned long counter_init_value = 100; 309 struct riscv_pmu_snapshot_data *snapshot_data = snapshot_gva; 310 311 counter = get_counter_index(0, counter_mask_available, 0, event); 312 counter_value_pre = read_counter(counter, ctrinfo_arr[counter]); 313 314 /* Do not set the initial value */ 315 start_counter(counter, 0, 0); 316 dummy_func_loop(10000); 317 stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT); 318 319 /* The counter value is updated w.r.t relative index of cbase */ 320 counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]); 321 __GUEST_ASSERT(counter_value_post > counter_value_pre, 322 "Event update verification failed: post [%lx] pre [%lx]\n", 323 counter_value_post, counter_value_pre); 324 325 /* 326 * We can't just update the counter without starting it. 327 * Do start/stop twice to simulate that by first initializing to a very 328 * high value and a low value after that. 329 */ 330 WRITE_ONCE(snapshot_data->ctr_values[0], ULONG_MAX/2); 331 start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0); 332 stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT); 333 counter_value_pre = READ_ONCE(snapshot_data->ctr_values[0]); 334 335 WRITE_ONCE(snapshot_data->ctr_values[0], counter_init_value); 336 start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0); 337 stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT); 338 counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]); 339 __GUEST_ASSERT(counter_value_pre > counter_value_post, 340 "Counter reinitialization verification failed : post [%lx] pre [%lx]\n", 341 counter_value_post, counter_value_pre); 342 343 /* Now set the initial value and compare */ 344 WRITE_ONCE(snapshot_data->ctr_values[0], counter_init_value); 345 start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0); 346 dummy_func_loop(10000); 347 stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT); 348 349 counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]); 350 __GUEST_ASSERT(counter_value_post > counter_init_value, 351 "Event update verification failed: post [%lx] pre [%lx]\n", 352 counter_value_post, counter_init_value); 353 354 stop_reset_counter(counter, 0); 355 } 356 357 static void test_pmu_event_overflow(unsigned long event) 358 { 359 unsigned long counter; 360 unsigned long counter_value_post; 361 unsigned long counter_init_value = ULONG_MAX - 10000; 362 struct riscv_pmu_snapshot_data *snapshot_data = snapshot_gva; 363 364 counter = get_counter_index(0, counter_mask_available, 0, event); 365 counter_in_use = counter; 366 367 /* The counter value is updated w.r.t relative index of cbase passed to start/stop */ 368 WRITE_ONCE(snapshot_data->ctr_values[0], counter_init_value); 369 start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0); 370 dummy_func_loop(10000); 371 udelay(msecs_to_usecs(2000)); 372 /* irq handler should have stopped the counter */ 373 stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT); 374 375 counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]); 376 /* The counter value after stopping should be less the init value due to overflow */ 377 __GUEST_ASSERT(counter_value_post < counter_init_value, 378 "counter_value_post %lx counter_init_value %lx for counter\n", 379 counter_value_post, counter_init_value); 380 381 stop_reset_counter(counter, 0); 382 } 383 384 static void test_invalid_event(void) 385 { 386 struct sbiret ret; 387 unsigned long event = 0x1234; /* A random event */ 388 389 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, 0, 390 counter_mask_available, 0, event, 0, 0); 391 GUEST_ASSERT_EQ(ret.error, SBI_ERR_NOT_SUPPORTED); 392 } 393 394 static void test_pmu_events(void) 395 { 396 int num_counters = 0; 397 398 /* Get the counter details */ 399 num_counters = get_num_counters(); 400 update_counter_info(num_counters); 401 402 /* Sanity testing for any random invalid event */ 403 test_invalid_event(); 404 405 /* Only these two events are guaranteed to be present */ 406 test_pmu_event(SBI_PMU_HW_CPU_CYCLES); 407 test_pmu_event(SBI_PMU_HW_INSTRUCTIONS); 408 409 GUEST_DONE(); 410 } 411 412 static void test_pmu_basic_sanity(void) 413 { 414 long out_val = 0; 415 bool probe; 416 struct sbiret ret; 417 int num_counters = 0, i; 418 union sbi_pmu_ctr_info ctrinfo; 419 420 probe = guest_sbi_probe_extension(SBI_EXT_PMU, &out_val); 421 GUEST_ASSERT(probe && out_val == 1); 422 423 num_counters = get_num_counters(); 424 425 for (i = 0; i < num_counters; i++) { 426 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 427 0, 0, 0, 0, 0); 428 429 /* There can be gaps in logical counter indicies*/ 430 if (ret.error) 431 continue; 432 GUEST_ASSERT_NE(ret.value, 0); 433 434 ctrinfo.value = ret.value; 435 436 /** 437 * Accessibility check of hardware and read capability of firmware counters. 438 * The spec doesn't mandate any initial value. No need to check any value. 439 */ 440 if (ctrinfo.type == SBI_PMU_CTR_TYPE_HW) { 441 pmu_csr_read_num(ctrinfo.csr); 442 GUEST_ASSERT(illegal_handler_invoked); 443 } else if (ctrinfo.type == SBI_PMU_CTR_TYPE_FW) { 444 read_fw_counter(i, ctrinfo); 445 } 446 } 447 448 GUEST_DONE(); 449 } 450 451 static void test_pmu_events_snaphost(void) 452 { 453 int num_counters = 0; 454 struct riscv_pmu_snapshot_data *snapshot_data = snapshot_gva; 455 int i; 456 457 /* Verify presence of SBI PMU and minimum requrired SBI version */ 458 verify_sbi_requirement_assert(); 459 460 snapshot_set_shmem(snapshot_gpa, 0); 461 462 /* Get the counter details */ 463 num_counters = get_num_counters(); 464 update_counter_info(num_counters); 465 466 /* Validate shared memory access */ 467 GUEST_ASSERT_EQ(READ_ONCE(snapshot_data->ctr_overflow_mask), 0); 468 for (i = 0; i < num_counters; i++) { 469 if (counter_mask_available & (BIT(i))) 470 GUEST_ASSERT_EQ(READ_ONCE(snapshot_data->ctr_values[i]), 0); 471 } 472 /* Only these two events are guranteed to be present */ 473 test_pmu_event_snapshot(SBI_PMU_HW_CPU_CYCLES); 474 test_pmu_event_snapshot(SBI_PMU_HW_INSTRUCTIONS); 475 476 GUEST_DONE(); 477 } 478 479 static void test_pmu_events_overflow(void) 480 { 481 int num_counters = 0; 482 483 /* Verify presence of SBI PMU and minimum requrired SBI version */ 484 verify_sbi_requirement_assert(); 485 486 snapshot_set_shmem(snapshot_gpa, 0); 487 csr_set(CSR_IE, BIT(IRQ_PMU_OVF)); 488 local_irq_enable(); 489 490 /* Get the counter details */ 491 num_counters = get_num_counters(); 492 update_counter_info(num_counters); 493 494 /* 495 * Qemu supports overflow for cycle/instruction. 496 * This test may fail on any platform that do not support overflow for these two events. 497 */ 498 test_pmu_event_overflow(SBI_PMU_HW_CPU_CYCLES); 499 GUEST_ASSERT_EQ(vcpu_shared_irq_count, 1); 500 501 test_pmu_event_overflow(SBI_PMU_HW_INSTRUCTIONS); 502 GUEST_ASSERT_EQ(vcpu_shared_irq_count, 2); 503 504 GUEST_DONE(); 505 } 506 507 static void run_vcpu(struct kvm_vcpu *vcpu) 508 { 509 struct ucall uc; 510 511 vcpu_run(vcpu); 512 switch (get_ucall(vcpu, &uc)) { 513 case UCALL_ABORT: 514 REPORT_GUEST_ASSERT(uc); 515 break; 516 case UCALL_DONE: 517 case UCALL_SYNC: 518 break; 519 default: 520 TEST_FAIL("Unknown ucall %lu", uc.cmd); 521 break; 522 } 523 } 524 525 void test_vm_destroy(struct kvm_vm *vm) 526 { 527 memset(ctrinfo_arr, 0, sizeof(union sbi_pmu_ctr_info) * RISCV_MAX_PMU_COUNTERS); 528 counter_mask_available = 0; 529 kvm_vm_free(vm); 530 } 531 532 static void test_vm_basic_test(void *guest_code) 533 { 534 struct kvm_vm *vm; 535 struct kvm_vcpu *vcpu; 536 537 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 538 __TEST_REQUIRE(__vcpu_has_sbi_ext(vcpu, KVM_RISCV_SBI_EXT_PMU), 539 "SBI PMU not available, skipping test"); 540 vm_init_vector_tables(vm); 541 /* Illegal instruction handler is required to verify read access without configuration */ 542 vm_install_exception_handler(vm, EXC_INST_ILLEGAL, guest_illegal_exception_handler); 543 544 vcpu_init_vector_tables(vcpu); 545 run_vcpu(vcpu); 546 547 test_vm_destroy(vm); 548 } 549 550 static void test_vm_events_test(void *guest_code) 551 { 552 struct kvm_vm *vm = NULL; 553 struct kvm_vcpu *vcpu = NULL; 554 555 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 556 __TEST_REQUIRE(__vcpu_has_sbi_ext(vcpu, KVM_RISCV_SBI_EXT_PMU), 557 "SBI PMU not available, skipping test"); 558 run_vcpu(vcpu); 559 560 test_vm_destroy(vm); 561 } 562 563 static void test_vm_setup_snapshot_mem(struct kvm_vm *vm, struct kvm_vcpu *vcpu) 564 { 565 /* PMU Snapshot requires single page only */ 566 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, PMU_SNAPSHOT_GPA_BASE, 1, 1, 0); 567 /* PMU_SNAPSHOT_GPA_BASE is identity mapped */ 568 virt_map(vm, PMU_SNAPSHOT_GPA_BASE, PMU_SNAPSHOT_GPA_BASE, 1); 569 570 snapshot_gva = (void *)(PMU_SNAPSHOT_GPA_BASE); 571 snapshot_gpa = addr_gva2gpa(vcpu->vm, (vm_vaddr_t)snapshot_gva); 572 sync_global_to_guest(vcpu->vm, snapshot_gva); 573 sync_global_to_guest(vcpu->vm, snapshot_gpa); 574 } 575 576 static void test_vm_events_snapshot_test(void *guest_code) 577 { 578 struct kvm_vm *vm = NULL; 579 struct kvm_vcpu *vcpu; 580 581 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 582 __TEST_REQUIRE(__vcpu_has_sbi_ext(vcpu, KVM_RISCV_SBI_EXT_PMU), 583 "SBI PMU not available, skipping test"); 584 585 test_vm_setup_snapshot_mem(vm, vcpu); 586 587 run_vcpu(vcpu); 588 589 test_vm_destroy(vm); 590 } 591 592 static void test_vm_events_overflow(void *guest_code) 593 { 594 struct kvm_vm *vm = NULL; 595 struct kvm_vcpu *vcpu; 596 597 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 598 __TEST_REQUIRE(__vcpu_has_sbi_ext(vcpu, KVM_RISCV_SBI_EXT_PMU), 599 "SBI PMU not available, skipping test"); 600 601 __TEST_REQUIRE(__vcpu_has_isa_ext(vcpu, KVM_RISCV_ISA_EXT_SSCOFPMF), 602 "Sscofpmf is not available, skipping overflow test"); 603 604 test_vm_setup_snapshot_mem(vm, vcpu); 605 vm_init_vector_tables(vm); 606 vm_install_interrupt_handler(vm, guest_irq_handler); 607 608 vcpu_init_vector_tables(vcpu); 609 /* Initialize guest timer frequency. */ 610 vcpu_get_reg(vcpu, RISCV_TIMER_REG(frequency), &timer_freq); 611 sync_global_to_guest(vm, timer_freq); 612 613 run_vcpu(vcpu); 614 615 test_vm_destroy(vm); 616 } 617 618 static void test_print_help(char *name) 619 { 620 pr_info("Usage: %s [-h] [-d <test name>]\n", name); 621 pr_info("\t-d: Test to disable. Available tests are 'basic', 'events', 'snapshot', 'overflow'\n"); 622 pr_info("\t-h: print this help screen\n"); 623 } 624 625 static bool parse_args(int argc, char *argv[]) 626 { 627 int opt; 628 629 while ((opt = getopt(argc, argv, "hd:")) != -1) { 630 switch (opt) { 631 case 'd': 632 if (!strncmp("basic", optarg, 5)) 633 disabled_tests |= SBI_PMU_TEST_BASIC; 634 else if (!strncmp("events", optarg, 6)) 635 disabled_tests |= SBI_PMU_TEST_EVENTS; 636 else if (!strncmp("snapshot", optarg, 8)) 637 disabled_tests |= SBI_PMU_TEST_SNAPSHOT; 638 else if (!strncmp("overflow", optarg, 8)) 639 disabled_tests |= SBI_PMU_TEST_OVERFLOW; 640 else 641 goto done; 642 break; 643 case 'h': 644 default: 645 goto done; 646 } 647 } 648 649 return true; 650 done: 651 test_print_help(argv[0]); 652 return false; 653 } 654 655 int main(int argc, char *argv[]) 656 { 657 if (!parse_args(argc, argv)) 658 exit(KSFT_SKIP); 659 660 if (!(disabled_tests & SBI_PMU_TEST_BASIC)) { 661 test_vm_basic_test(test_pmu_basic_sanity); 662 pr_info("SBI PMU basic test : PASS\n"); 663 } 664 665 if (!(disabled_tests & SBI_PMU_TEST_EVENTS)) { 666 test_vm_events_test(test_pmu_events); 667 pr_info("SBI PMU event verification test : PASS\n"); 668 } 669 670 if (!(disabled_tests & SBI_PMU_TEST_SNAPSHOT)) { 671 test_vm_events_snapshot_test(test_pmu_events_snaphost); 672 pr_info("SBI PMU event verification with snapshot test : PASS\n"); 673 } 674 675 if (!(disabled_tests & SBI_PMU_TEST_OVERFLOW)) { 676 test_vm_events_overflow(test_pmu_events_overflow); 677 pr_info("SBI PMU event verification with overflow test : PASS\n"); 678 } 679 680 return 0; 681 } 682