1158cb9e6SAtish Patra // SPDX-License-Identifier: GPL-2.0-only
2158cb9e6SAtish Patra /*
3158cb9e6SAtish Patra * sbi_pmu_test.c - Tests the riscv64 SBI PMU functionality.
4158cb9e6SAtish Patra *
5158cb9e6SAtish Patra * Copyright (c) 2024, Rivos Inc.
6158cb9e6SAtish Patra */
7158cb9e6SAtish Patra
8158cb9e6SAtish Patra #include <stdio.h>
9158cb9e6SAtish Patra #include <stdlib.h>
10158cb9e6SAtish Patra #include <string.h>
11158cb9e6SAtish Patra #include <unistd.h>
12158cb9e6SAtish Patra #include <sys/types.h>
13158cb9e6SAtish Patra #include "kvm_util.h"
14158cb9e6SAtish Patra #include "test_util.h"
15158cb9e6SAtish Patra #include "processor.h"
16158cb9e6SAtish Patra #include "sbi.h"
174ace2573SAtish Patra #include "arch_timer.h"
18*0fc670d0SAndrew Jones #include "ucall_common.h"
19158cb9e6SAtish Patra
20158cb9e6SAtish Patra /* Maximum counters(firmware + hardware) */
21158cb9e6SAtish Patra #define RISCV_MAX_PMU_COUNTERS 64
22158cb9e6SAtish Patra union sbi_pmu_ctr_info ctrinfo_arr[RISCV_MAX_PMU_COUNTERS];
23158cb9e6SAtish Patra
2413cb706eSAtish Patra /* Snapshot shared memory data */
2513cb706eSAtish Patra #define PMU_SNAPSHOT_GPA_BASE BIT(30)
2613cb706eSAtish Patra static void *snapshot_gva;
2713cb706eSAtish Patra static vm_paddr_t snapshot_gpa;
2813cb706eSAtish Patra
294ace2573SAtish Patra static int vcpu_shared_irq_count;
304ace2573SAtish Patra static int counter_in_use;
314ace2573SAtish Patra
32158cb9e6SAtish Patra /* Cache the available counters in a bitmask */
33158cb9e6SAtish Patra static unsigned long counter_mask_available;
34158cb9e6SAtish Patra
35158cb9e6SAtish Patra static bool illegal_handler_invoked;
36158cb9e6SAtish Patra
375ef2f3d4SAtish Patra #define SBI_PMU_TEST_BASIC BIT(0)
385ef2f3d4SAtish Patra #define SBI_PMU_TEST_EVENTS BIT(1)
395ef2f3d4SAtish Patra #define SBI_PMU_TEST_SNAPSHOT BIT(2)
405ef2f3d4SAtish Patra #define SBI_PMU_TEST_OVERFLOW BIT(3)
415ef2f3d4SAtish Patra
425ef2f3d4SAtish Patra static int disabled_tests;
435ef2f3d4SAtish Patra
pmu_csr_read_num(int csr_num)44158cb9e6SAtish Patra unsigned long pmu_csr_read_num(int csr_num)
45158cb9e6SAtish Patra {
46158cb9e6SAtish Patra #define switchcase_csr_read(__csr_num, __val) {\
47158cb9e6SAtish Patra case __csr_num: \
48158cb9e6SAtish Patra __val = csr_read(__csr_num); \
49158cb9e6SAtish Patra break; }
50158cb9e6SAtish Patra #define switchcase_csr_read_2(__csr_num, __val) {\
51158cb9e6SAtish Patra switchcase_csr_read(__csr_num + 0, __val) \
52158cb9e6SAtish Patra switchcase_csr_read(__csr_num + 1, __val)}
53158cb9e6SAtish Patra #define switchcase_csr_read_4(__csr_num, __val) {\
54158cb9e6SAtish Patra switchcase_csr_read_2(__csr_num + 0, __val) \
55158cb9e6SAtish Patra switchcase_csr_read_2(__csr_num + 2, __val)}
56158cb9e6SAtish Patra #define switchcase_csr_read_8(__csr_num, __val) {\
57158cb9e6SAtish Patra switchcase_csr_read_4(__csr_num + 0, __val) \
58158cb9e6SAtish Patra switchcase_csr_read_4(__csr_num + 4, __val)}
59158cb9e6SAtish Patra #define switchcase_csr_read_16(__csr_num, __val) {\
60158cb9e6SAtish Patra switchcase_csr_read_8(__csr_num + 0, __val) \
61158cb9e6SAtish Patra switchcase_csr_read_8(__csr_num + 8, __val)}
62158cb9e6SAtish Patra #define switchcase_csr_read_32(__csr_num, __val) {\
63158cb9e6SAtish Patra switchcase_csr_read_16(__csr_num + 0, __val) \
64158cb9e6SAtish Patra switchcase_csr_read_16(__csr_num + 16, __val)}
65158cb9e6SAtish Patra
66158cb9e6SAtish Patra unsigned long ret = 0;
67158cb9e6SAtish Patra
68158cb9e6SAtish Patra switch (csr_num) {
69158cb9e6SAtish Patra switchcase_csr_read_32(CSR_CYCLE, ret)
70158cb9e6SAtish Patra switchcase_csr_read_32(CSR_CYCLEH, ret)
71158cb9e6SAtish Patra default :
72158cb9e6SAtish Patra break;
73158cb9e6SAtish Patra }
74158cb9e6SAtish Patra
75158cb9e6SAtish Patra return ret;
76158cb9e6SAtish Patra #undef switchcase_csr_read_32
77158cb9e6SAtish Patra #undef switchcase_csr_read_16
78158cb9e6SAtish Patra #undef switchcase_csr_read_8
79158cb9e6SAtish Patra #undef switchcase_csr_read_4
80158cb9e6SAtish Patra #undef switchcase_csr_read_2
81158cb9e6SAtish Patra #undef switchcase_csr_read
82158cb9e6SAtish Patra }
83158cb9e6SAtish Patra
dummy_func_loop(uint64_t iter)84158cb9e6SAtish Patra static inline void dummy_func_loop(uint64_t iter)
85158cb9e6SAtish Patra {
86158cb9e6SAtish Patra int i = 0;
87158cb9e6SAtish Patra
88158cb9e6SAtish Patra while (i < iter) {
89158cb9e6SAtish Patra asm volatile("nop");
90158cb9e6SAtish Patra i++;
91158cb9e6SAtish Patra }
92158cb9e6SAtish Patra }
93158cb9e6SAtish Patra
start_counter(unsigned long counter,unsigned long start_flags,unsigned long ival)94158cb9e6SAtish Patra static void start_counter(unsigned long counter, unsigned long start_flags,
95158cb9e6SAtish Patra unsigned long ival)
96158cb9e6SAtish Patra {
97158cb9e6SAtish Patra struct sbiret ret;
98158cb9e6SAtish Patra
99158cb9e6SAtish Patra ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, counter, 1, start_flags,
100158cb9e6SAtish Patra ival, 0, 0);
101158cb9e6SAtish Patra __GUEST_ASSERT(ret.error == 0, "Unable to start counter %ld\n", counter);
102158cb9e6SAtish Patra }
103158cb9e6SAtish Patra
104158cb9e6SAtish Patra /* This should be invoked only for reset counter use case */
stop_reset_counter(unsigned long counter,unsigned long stop_flags)105158cb9e6SAtish Patra static void stop_reset_counter(unsigned long counter, unsigned long stop_flags)
106158cb9e6SAtish Patra {
107158cb9e6SAtish Patra struct sbiret ret;
108158cb9e6SAtish Patra
109158cb9e6SAtish Patra ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, counter, 1,
110158cb9e6SAtish Patra stop_flags | SBI_PMU_STOP_FLAG_RESET, 0, 0, 0);
111158cb9e6SAtish Patra __GUEST_ASSERT(ret.error == SBI_ERR_ALREADY_STOPPED,
112158cb9e6SAtish Patra "Unable to stop counter %ld\n", counter);
113158cb9e6SAtish Patra }
114158cb9e6SAtish Patra
stop_counter(unsigned long counter,unsigned long stop_flags)115158cb9e6SAtish Patra static void stop_counter(unsigned long counter, unsigned long stop_flags)
116158cb9e6SAtish Patra {
117158cb9e6SAtish Patra struct sbiret ret;
118158cb9e6SAtish Patra
119158cb9e6SAtish Patra ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, counter, 1, stop_flags,
120158cb9e6SAtish Patra 0, 0, 0);
121158cb9e6SAtish Patra __GUEST_ASSERT(ret.error == 0, "Unable to stop counter %ld error %ld\n",
122158cb9e6SAtish Patra counter, ret.error);
123158cb9e6SAtish Patra }
124158cb9e6SAtish Patra
guest_illegal_exception_handler(struct ex_regs * regs)125158cb9e6SAtish Patra static void guest_illegal_exception_handler(struct ex_regs *regs)
126158cb9e6SAtish Patra {
127158cb9e6SAtish Patra __GUEST_ASSERT(regs->cause == EXC_INST_ILLEGAL,
128158cb9e6SAtish Patra "Unexpected exception handler %lx\n", regs->cause);
129158cb9e6SAtish Patra
130158cb9e6SAtish Patra illegal_handler_invoked = true;
131158cb9e6SAtish Patra /* skip the trapping instruction */
132158cb9e6SAtish Patra regs->epc += 4;
133158cb9e6SAtish Patra }
134158cb9e6SAtish Patra
guest_irq_handler(struct ex_regs * regs)1354ace2573SAtish Patra static void guest_irq_handler(struct ex_regs *regs)
1364ace2573SAtish Patra {
1374ace2573SAtish Patra unsigned int irq_num = regs->cause & ~CAUSE_IRQ_FLAG;
1384ace2573SAtish Patra struct riscv_pmu_snapshot_data *snapshot_data = snapshot_gva;
1394ace2573SAtish Patra unsigned long overflown_mask;
1404ace2573SAtish Patra unsigned long counter_val = 0;
1414ace2573SAtish Patra
1424ace2573SAtish Patra /* Validate that we are in the correct irq handler */
1434ace2573SAtish Patra GUEST_ASSERT_EQ(irq_num, IRQ_PMU_OVF);
1444ace2573SAtish Patra
1454ace2573SAtish Patra /* Stop all counters first to avoid further interrupts */
1464ace2573SAtish Patra stop_counter(counter_in_use, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
1474ace2573SAtish Patra
1484ace2573SAtish Patra csr_clear(CSR_SIP, BIT(IRQ_PMU_OVF));
1494ace2573SAtish Patra
1504ace2573SAtish Patra overflown_mask = READ_ONCE(snapshot_data->ctr_overflow_mask);
1514ace2573SAtish Patra GUEST_ASSERT(overflown_mask & 0x01);
1524ace2573SAtish Patra
1534ace2573SAtish Patra WRITE_ONCE(vcpu_shared_irq_count, vcpu_shared_irq_count+1);
1544ace2573SAtish Patra
1554ace2573SAtish Patra counter_val = READ_ONCE(snapshot_data->ctr_values[0]);
1564ace2573SAtish Patra /* Now start the counter to mimick the real driver behavior */
1574ace2573SAtish Patra start_counter(counter_in_use, SBI_PMU_START_FLAG_SET_INIT_VALUE, counter_val);
1584ace2573SAtish Patra }
1594ace2573SAtish Patra
get_counter_index(unsigned long cbase,unsigned long cmask,unsigned long cflags,unsigned long event)160158cb9e6SAtish Patra static unsigned long get_counter_index(unsigned long cbase, unsigned long cmask,
161158cb9e6SAtish Patra unsigned long cflags,
162158cb9e6SAtish Patra unsigned long event)
163158cb9e6SAtish Patra {
164158cb9e6SAtish Patra struct sbiret ret;
165158cb9e6SAtish Patra
166158cb9e6SAtish Patra ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
167158cb9e6SAtish Patra cflags, event, 0, 0);
168158cb9e6SAtish Patra __GUEST_ASSERT(ret.error == 0, "config matching failed %ld\n", ret.error);
169158cb9e6SAtish Patra GUEST_ASSERT(ret.value < RISCV_MAX_PMU_COUNTERS);
170158cb9e6SAtish Patra GUEST_ASSERT(BIT(ret.value) & counter_mask_available);
171158cb9e6SAtish Patra
172158cb9e6SAtish Patra return ret.value;
173158cb9e6SAtish Patra }
174158cb9e6SAtish Patra
get_num_counters(void)175158cb9e6SAtish Patra static unsigned long get_num_counters(void)
176158cb9e6SAtish Patra {
177158cb9e6SAtish Patra struct sbiret ret;
178158cb9e6SAtish Patra
179158cb9e6SAtish Patra ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_NUM_COUNTERS, 0, 0, 0, 0, 0, 0);
180158cb9e6SAtish Patra
181158cb9e6SAtish Patra __GUEST_ASSERT(ret.error == 0, "Unable to retrieve number of counters from SBI PMU");
182158cb9e6SAtish Patra __GUEST_ASSERT(ret.value < RISCV_MAX_PMU_COUNTERS,
183158cb9e6SAtish Patra "Invalid number of counters %ld\n", ret.value);
184158cb9e6SAtish Patra
185158cb9e6SAtish Patra return ret.value;
186158cb9e6SAtish Patra }
187158cb9e6SAtish Patra
update_counter_info(int num_counters)188158cb9e6SAtish Patra static void update_counter_info(int num_counters)
189158cb9e6SAtish Patra {
190158cb9e6SAtish Patra int i = 0;
191158cb9e6SAtish Patra struct sbiret ret;
192158cb9e6SAtish Patra
193158cb9e6SAtish Patra for (i = 0; i < num_counters; i++) {
194158cb9e6SAtish Patra ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0);
195158cb9e6SAtish Patra
196158cb9e6SAtish Patra /* There can be gaps in logical counter indicies*/
197158cb9e6SAtish Patra if (ret.error)
198158cb9e6SAtish Patra continue;
199158cb9e6SAtish Patra GUEST_ASSERT_NE(ret.value, 0);
200158cb9e6SAtish Patra
201158cb9e6SAtish Patra ctrinfo_arr[i].value = ret.value;
202158cb9e6SAtish Patra counter_mask_available |= BIT(i);
203158cb9e6SAtish Patra }
204158cb9e6SAtish Patra
205158cb9e6SAtish Patra GUEST_ASSERT(counter_mask_available > 0);
206158cb9e6SAtish Patra }
207158cb9e6SAtish Patra
read_fw_counter(int idx,union sbi_pmu_ctr_info ctrinfo)208158cb9e6SAtish Patra static unsigned long read_fw_counter(int idx, union sbi_pmu_ctr_info ctrinfo)
209158cb9e6SAtish Patra {
210158cb9e6SAtish Patra struct sbiret ret;
211158cb9e6SAtish Patra
212158cb9e6SAtish Patra ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ, idx, 0, 0, 0, 0, 0);
213158cb9e6SAtish Patra GUEST_ASSERT(ret.error == 0);
214158cb9e6SAtish Patra return ret.value;
215158cb9e6SAtish Patra }
216158cb9e6SAtish Patra
read_counter(int idx,union sbi_pmu_ctr_info ctrinfo)217158cb9e6SAtish Patra static unsigned long read_counter(int idx, union sbi_pmu_ctr_info ctrinfo)
218158cb9e6SAtish Patra {
219158cb9e6SAtish Patra unsigned long counter_val = 0;
220158cb9e6SAtish Patra
221158cb9e6SAtish Patra __GUEST_ASSERT(ctrinfo.type < 2, "Invalid counter type %d", ctrinfo.type);
222158cb9e6SAtish Patra
223158cb9e6SAtish Patra if (ctrinfo.type == SBI_PMU_CTR_TYPE_HW)
224158cb9e6SAtish Patra counter_val = pmu_csr_read_num(ctrinfo.csr);
225158cb9e6SAtish Patra else if (ctrinfo.type == SBI_PMU_CTR_TYPE_FW)
226158cb9e6SAtish Patra counter_val = read_fw_counter(idx, ctrinfo);
227158cb9e6SAtish Patra
228158cb9e6SAtish Patra return counter_val;
229158cb9e6SAtish Patra }
230158cb9e6SAtish Patra
verify_sbi_requirement_assert(void)23113cb706eSAtish Patra static inline void verify_sbi_requirement_assert(void)
23213cb706eSAtish Patra {
23313cb706eSAtish Patra long out_val = 0;
23413cb706eSAtish Patra bool probe;
23513cb706eSAtish Patra
23613cb706eSAtish Patra probe = guest_sbi_probe_extension(SBI_EXT_PMU, &out_val);
23713cb706eSAtish Patra GUEST_ASSERT(probe && out_val == 1);
23813cb706eSAtish Patra
23913cb706eSAtish Patra if (get_host_sbi_spec_version() < sbi_mk_version(2, 0))
24013cb706eSAtish Patra __GUEST_ASSERT(0, "SBI implementation version doesn't support PMU Snapshot");
24113cb706eSAtish Patra }
24213cb706eSAtish Patra
snapshot_set_shmem(vm_paddr_t gpa,unsigned long flags)24313cb706eSAtish Patra static void snapshot_set_shmem(vm_paddr_t gpa, unsigned long flags)
24413cb706eSAtish Patra {
24513cb706eSAtish Patra unsigned long lo = (unsigned long)gpa;
24613cb706eSAtish Patra #if __riscv_xlen == 32
24713cb706eSAtish Patra unsigned long hi = (unsigned long)(gpa >> 32);
24813cb706eSAtish Patra #else
24913cb706eSAtish Patra unsigned long hi = gpa == -1 ? -1 : 0;
25013cb706eSAtish Patra #endif
25113cb706eSAtish Patra struct sbiret ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_SNAPSHOT_SET_SHMEM,
25213cb706eSAtish Patra lo, hi, flags, 0, 0, 0);
25313cb706eSAtish Patra
25413cb706eSAtish Patra GUEST_ASSERT(ret.value == 0 && ret.error == 0);
25513cb706eSAtish Patra }
25613cb706eSAtish Patra
test_pmu_event(unsigned long event)257158cb9e6SAtish Patra static void test_pmu_event(unsigned long event)
258158cb9e6SAtish Patra {
259158cb9e6SAtish Patra unsigned long counter;
260158cb9e6SAtish Patra unsigned long counter_value_pre, counter_value_post;
261158cb9e6SAtish Patra unsigned long counter_init_value = 100;
262158cb9e6SAtish Patra
263158cb9e6SAtish Patra counter = get_counter_index(0, counter_mask_available, 0, event);
264158cb9e6SAtish Patra counter_value_pre = read_counter(counter, ctrinfo_arr[counter]);
265158cb9e6SAtish Patra
266158cb9e6SAtish Patra /* Do not set the initial value */
267158cb9e6SAtish Patra start_counter(counter, 0, 0);
268158cb9e6SAtish Patra dummy_func_loop(10000);
269158cb9e6SAtish Patra stop_counter(counter, 0);
270158cb9e6SAtish Patra
271158cb9e6SAtish Patra counter_value_post = read_counter(counter, ctrinfo_arr[counter]);
272158cb9e6SAtish Patra __GUEST_ASSERT(counter_value_post > counter_value_pre,
273158cb9e6SAtish Patra "Event update verification failed: post [%lx] pre [%lx]\n",
274158cb9e6SAtish Patra counter_value_post, counter_value_pre);
275158cb9e6SAtish Patra
276158cb9e6SAtish Patra /*
277158cb9e6SAtish Patra * We can't just update the counter without starting it.
278158cb9e6SAtish Patra * Do start/stop twice to simulate that by first initializing to a very
279158cb9e6SAtish Patra * high value and a low value after that.
280158cb9e6SAtish Patra */
281158cb9e6SAtish Patra start_counter(counter, SBI_PMU_START_FLAG_SET_INIT_VALUE, ULONG_MAX/2);
282158cb9e6SAtish Patra stop_counter(counter, 0);
283158cb9e6SAtish Patra counter_value_pre = read_counter(counter, ctrinfo_arr[counter]);
284158cb9e6SAtish Patra
285158cb9e6SAtish Patra start_counter(counter, SBI_PMU_START_FLAG_SET_INIT_VALUE, counter_init_value);
286158cb9e6SAtish Patra stop_counter(counter, 0);
287158cb9e6SAtish Patra counter_value_post = read_counter(counter, ctrinfo_arr[counter]);
288158cb9e6SAtish Patra __GUEST_ASSERT(counter_value_pre > counter_value_post,
289158cb9e6SAtish Patra "Counter reinitialization verification failed : post [%lx] pre [%lx]\n",
290158cb9e6SAtish Patra counter_value_post, counter_value_pre);
291158cb9e6SAtish Patra
292158cb9e6SAtish Patra /* Now set the initial value and compare */
293158cb9e6SAtish Patra start_counter(counter, SBI_PMU_START_FLAG_SET_INIT_VALUE, counter_init_value);
294158cb9e6SAtish Patra dummy_func_loop(10000);
295158cb9e6SAtish Patra stop_counter(counter, 0);
296158cb9e6SAtish Patra
297158cb9e6SAtish Patra counter_value_post = read_counter(counter, ctrinfo_arr[counter]);
298158cb9e6SAtish Patra __GUEST_ASSERT(counter_value_post > counter_init_value,
299158cb9e6SAtish Patra "Event update verification failed: post [%lx] pre [%lx]\n",
300158cb9e6SAtish Patra counter_value_post, counter_init_value);
301158cb9e6SAtish Patra
302158cb9e6SAtish Patra stop_reset_counter(counter, 0);
303158cb9e6SAtish Patra }
304158cb9e6SAtish Patra
test_pmu_event_snapshot(unsigned long event)30513cb706eSAtish Patra static void test_pmu_event_snapshot(unsigned long event)
30613cb706eSAtish Patra {
30713cb706eSAtish Patra unsigned long counter;
30813cb706eSAtish Patra unsigned long counter_value_pre, counter_value_post;
30913cb706eSAtish Patra unsigned long counter_init_value = 100;
31013cb706eSAtish Patra struct riscv_pmu_snapshot_data *snapshot_data = snapshot_gva;
31113cb706eSAtish Patra
31213cb706eSAtish Patra counter = get_counter_index(0, counter_mask_available, 0, event);
31313cb706eSAtish Patra counter_value_pre = read_counter(counter, ctrinfo_arr[counter]);
31413cb706eSAtish Patra
31513cb706eSAtish Patra /* Do not set the initial value */
31613cb706eSAtish Patra start_counter(counter, 0, 0);
31713cb706eSAtish Patra dummy_func_loop(10000);
31813cb706eSAtish Patra stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
31913cb706eSAtish Patra
32013cb706eSAtish Patra /* The counter value is updated w.r.t relative index of cbase */
32113cb706eSAtish Patra counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]);
32213cb706eSAtish Patra __GUEST_ASSERT(counter_value_post > counter_value_pre,
32313cb706eSAtish Patra "Event update verification failed: post [%lx] pre [%lx]\n",
32413cb706eSAtish Patra counter_value_post, counter_value_pre);
32513cb706eSAtish Patra
32613cb706eSAtish Patra /*
32713cb706eSAtish Patra * We can't just update the counter without starting it.
32813cb706eSAtish Patra * Do start/stop twice to simulate that by first initializing to a very
32913cb706eSAtish Patra * high value and a low value after that.
33013cb706eSAtish Patra */
33113cb706eSAtish Patra WRITE_ONCE(snapshot_data->ctr_values[0], ULONG_MAX/2);
33213cb706eSAtish Patra start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0);
33313cb706eSAtish Patra stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
33413cb706eSAtish Patra counter_value_pre = READ_ONCE(snapshot_data->ctr_values[0]);
33513cb706eSAtish Patra
33613cb706eSAtish Patra WRITE_ONCE(snapshot_data->ctr_values[0], counter_init_value);
33713cb706eSAtish Patra start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0);
33813cb706eSAtish Patra stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
33913cb706eSAtish Patra counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]);
34013cb706eSAtish Patra __GUEST_ASSERT(counter_value_pre > counter_value_post,
34113cb706eSAtish Patra "Counter reinitialization verification failed : post [%lx] pre [%lx]\n",
34213cb706eSAtish Patra counter_value_post, counter_value_pre);
34313cb706eSAtish Patra
34413cb706eSAtish Patra /* Now set the initial value and compare */
34513cb706eSAtish Patra WRITE_ONCE(snapshot_data->ctr_values[0], counter_init_value);
34613cb706eSAtish Patra start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0);
34713cb706eSAtish Patra dummy_func_loop(10000);
34813cb706eSAtish Patra stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
34913cb706eSAtish Patra
35013cb706eSAtish Patra counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]);
35113cb706eSAtish Patra __GUEST_ASSERT(counter_value_post > counter_init_value,
35213cb706eSAtish Patra "Event update verification failed: post [%lx] pre [%lx]\n",
35313cb706eSAtish Patra counter_value_post, counter_init_value);
35413cb706eSAtish Patra
35513cb706eSAtish Patra stop_reset_counter(counter, 0);
35613cb706eSAtish Patra }
35713cb706eSAtish Patra
test_pmu_event_overflow(unsigned long event)3584ace2573SAtish Patra static void test_pmu_event_overflow(unsigned long event)
3594ace2573SAtish Patra {
3604ace2573SAtish Patra unsigned long counter;
3614ace2573SAtish Patra unsigned long counter_value_post;
3624ace2573SAtish Patra unsigned long counter_init_value = ULONG_MAX - 10000;
3634ace2573SAtish Patra struct riscv_pmu_snapshot_data *snapshot_data = snapshot_gva;
3644ace2573SAtish Patra
3654ace2573SAtish Patra counter = get_counter_index(0, counter_mask_available, 0, event);
3664ace2573SAtish Patra counter_in_use = counter;
3674ace2573SAtish Patra
3684ace2573SAtish Patra /* The counter value is updated w.r.t relative index of cbase passed to start/stop */
3694ace2573SAtish Patra WRITE_ONCE(snapshot_data->ctr_values[0], counter_init_value);
3704ace2573SAtish Patra start_counter(counter, SBI_PMU_START_FLAG_INIT_SNAPSHOT, 0);
3714ace2573SAtish Patra dummy_func_loop(10000);
3724ace2573SAtish Patra udelay(msecs_to_usecs(2000));
3734ace2573SAtish Patra /* irq handler should have stopped the counter */
3744ace2573SAtish Patra stop_counter(counter, SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT);
3754ace2573SAtish Patra
3764ace2573SAtish Patra counter_value_post = READ_ONCE(snapshot_data->ctr_values[0]);
3774ace2573SAtish Patra /* The counter value after stopping should be less the init value due to overflow */
3784ace2573SAtish Patra __GUEST_ASSERT(counter_value_post < counter_init_value,
3794ace2573SAtish Patra "counter_value_post %lx counter_init_value %lx for counter\n",
3804ace2573SAtish Patra counter_value_post, counter_init_value);
3814ace2573SAtish Patra
3824ace2573SAtish Patra stop_reset_counter(counter, 0);
3834ace2573SAtish Patra }
3844ace2573SAtish Patra
test_invalid_event(void)385158cb9e6SAtish Patra static void test_invalid_event(void)
386158cb9e6SAtish Patra {
387158cb9e6SAtish Patra struct sbiret ret;
388158cb9e6SAtish Patra unsigned long event = 0x1234; /* A random event */
389158cb9e6SAtish Patra
390158cb9e6SAtish Patra ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, 0,
391158cb9e6SAtish Patra counter_mask_available, 0, event, 0, 0);
392158cb9e6SAtish Patra GUEST_ASSERT_EQ(ret.error, SBI_ERR_NOT_SUPPORTED);
393158cb9e6SAtish Patra }
394158cb9e6SAtish Patra
test_pmu_events(void)395158cb9e6SAtish Patra static void test_pmu_events(void)
396158cb9e6SAtish Patra {
397158cb9e6SAtish Patra int num_counters = 0;
398158cb9e6SAtish Patra
399158cb9e6SAtish Patra /* Get the counter details */
400158cb9e6SAtish Patra num_counters = get_num_counters();
401158cb9e6SAtish Patra update_counter_info(num_counters);
402158cb9e6SAtish Patra
403158cb9e6SAtish Patra /* Sanity testing for any random invalid event */
404158cb9e6SAtish Patra test_invalid_event();
405158cb9e6SAtish Patra
406158cb9e6SAtish Patra /* Only these two events are guaranteed to be present */
407158cb9e6SAtish Patra test_pmu_event(SBI_PMU_HW_CPU_CYCLES);
408158cb9e6SAtish Patra test_pmu_event(SBI_PMU_HW_INSTRUCTIONS);
409158cb9e6SAtish Patra
410158cb9e6SAtish Patra GUEST_DONE();
411158cb9e6SAtish Patra }
412158cb9e6SAtish Patra
test_pmu_basic_sanity(void)413158cb9e6SAtish Patra static void test_pmu_basic_sanity(void)
414158cb9e6SAtish Patra {
415158cb9e6SAtish Patra long out_val = 0;
416158cb9e6SAtish Patra bool probe;
417158cb9e6SAtish Patra struct sbiret ret;
418158cb9e6SAtish Patra int num_counters = 0, i;
419158cb9e6SAtish Patra union sbi_pmu_ctr_info ctrinfo;
420158cb9e6SAtish Patra
421158cb9e6SAtish Patra probe = guest_sbi_probe_extension(SBI_EXT_PMU, &out_val);
422158cb9e6SAtish Patra GUEST_ASSERT(probe && out_val == 1);
423158cb9e6SAtish Patra
424158cb9e6SAtish Patra num_counters = get_num_counters();
425158cb9e6SAtish Patra
426158cb9e6SAtish Patra for (i = 0; i < num_counters; i++) {
427158cb9e6SAtish Patra ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i,
428158cb9e6SAtish Patra 0, 0, 0, 0, 0);
429158cb9e6SAtish Patra
430158cb9e6SAtish Patra /* There can be gaps in logical counter indicies*/
431158cb9e6SAtish Patra if (ret.error)
432158cb9e6SAtish Patra continue;
433158cb9e6SAtish Patra GUEST_ASSERT_NE(ret.value, 0);
434158cb9e6SAtish Patra
435158cb9e6SAtish Patra ctrinfo.value = ret.value;
436158cb9e6SAtish Patra
437158cb9e6SAtish Patra /**
438158cb9e6SAtish Patra * Accessibility check of hardware and read capability of firmware counters.
439158cb9e6SAtish Patra * The spec doesn't mandate any initial value. No need to check any value.
440158cb9e6SAtish Patra */
441158cb9e6SAtish Patra if (ctrinfo.type == SBI_PMU_CTR_TYPE_HW) {
442158cb9e6SAtish Patra pmu_csr_read_num(ctrinfo.csr);
443158cb9e6SAtish Patra GUEST_ASSERT(illegal_handler_invoked);
444158cb9e6SAtish Patra } else if (ctrinfo.type == SBI_PMU_CTR_TYPE_FW) {
445158cb9e6SAtish Patra read_fw_counter(i, ctrinfo);
446158cb9e6SAtish Patra }
447158cb9e6SAtish Patra }
448158cb9e6SAtish Patra
449158cb9e6SAtish Patra GUEST_DONE();
450158cb9e6SAtish Patra }
451158cb9e6SAtish Patra
test_pmu_events_snaphost(void)45213cb706eSAtish Patra static void test_pmu_events_snaphost(void)
45313cb706eSAtish Patra {
45413cb706eSAtish Patra int num_counters = 0;
45513cb706eSAtish Patra struct riscv_pmu_snapshot_data *snapshot_data = snapshot_gva;
45613cb706eSAtish Patra int i;
45713cb706eSAtish Patra
45813cb706eSAtish Patra /* Verify presence of SBI PMU and minimum requrired SBI version */
45913cb706eSAtish Patra verify_sbi_requirement_assert();
46013cb706eSAtish Patra
46113cb706eSAtish Patra snapshot_set_shmem(snapshot_gpa, 0);
46213cb706eSAtish Patra
46313cb706eSAtish Patra /* Get the counter details */
46413cb706eSAtish Patra num_counters = get_num_counters();
46513cb706eSAtish Patra update_counter_info(num_counters);
46613cb706eSAtish Patra
46713cb706eSAtish Patra /* Validate shared memory access */
46813cb706eSAtish Patra GUEST_ASSERT_EQ(READ_ONCE(snapshot_data->ctr_overflow_mask), 0);
46913cb706eSAtish Patra for (i = 0; i < num_counters; i++) {
47013cb706eSAtish Patra if (counter_mask_available & (BIT(i)))
47113cb706eSAtish Patra GUEST_ASSERT_EQ(READ_ONCE(snapshot_data->ctr_values[i]), 0);
47213cb706eSAtish Patra }
47313cb706eSAtish Patra /* Only these two events are guranteed to be present */
47413cb706eSAtish Patra test_pmu_event_snapshot(SBI_PMU_HW_CPU_CYCLES);
47513cb706eSAtish Patra test_pmu_event_snapshot(SBI_PMU_HW_INSTRUCTIONS);
47613cb706eSAtish Patra
47713cb706eSAtish Patra GUEST_DONE();
47813cb706eSAtish Patra }
47913cb706eSAtish Patra
test_pmu_events_overflow(void)4804ace2573SAtish Patra static void test_pmu_events_overflow(void)
4814ace2573SAtish Patra {
4824ace2573SAtish Patra int num_counters = 0;
4834ace2573SAtish Patra
4844ace2573SAtish Patra /* Verify presence of SBI PMU and minimum requrired SBI version */
4854ace2573SAtish Patra verify_sbi_requirement_assert();
4864ace2573SAtish Patra
4874ace2573SAtish Patra snapshot_set_shmem(snapshot_gpa, 0);
4884ace2573SAtish Patra csr_set(CSR_IE, BIT(IRQ_PMU_OVF));
4894ace2573SAtish Patra local_irq_enable();
4904ace2573SAtish Patra
4914ace2573SAtish Patra /* Get the counter details */
4924ace2573SAtish Patra num_counters = get_num_counters();
4934ace2573SAtish Patra update_counter_info(num_counters);
4944ace2573SAtish Patra
4954ace2573SAtish Patra /*
4964ace2573SAtish Patra * Qemu supports overflow for cycle/instruction.
4974ace2573SAtish Patra * This test may fail on any platform that do not support overflow for these two events.
4984ace2573SAtish Patra */
4994ace2573SAtish Patra test_pmu_event_overflow(SBI_PMU_HW_CPU_CYCLES);
5004ace2573SAtish Patra GUEST_ASSERT_EQ(vcpu_shared_irq_count, 1);
5014ace2573SAtish Patra
5024ace2573SAtish Patra test_pmu_event_overflow(SBI_PMU_HW_INSTRUCTIONS);
5034ace2573SAtish Patra GUEST_ASSERT_EQ(vcpu_shared_irq_count, 2);
5044ace2573SAtish Patra
5054ace2573SAtish Patra GUEST_DONE();
5064ace2573SAtish Patra }
5074ace2573SAtish Patra
run_vcpu(struct kvm_vcpu * vcpu)508158cb9e6SAtish Patra static void run_vcpu(struct kvm_vcpu *vcpu)
509158cb9e6SAtish Patra {
510158cb9e6SAtish Patra struct ucall uc;
511158cb9e6SAtish Patra
512158cb9e6SAtish Patra vcpu_run(vcpu);
513158cb9e6SAtish Patra switch (get_ucall(vcpu, &uc)) {
514158cb9e6SAtish Patra case UCALL_ABORT:
515158cb9e6SAtish Patra REPORT_GUEST_ASSERT(uc);
516158cb9e6SAtish Patra break;
517158cb9e6SAtish Patra case UCALL_DONE:
518158cb9e6SAtish Patra case UCALL_SYNC:
519158cb9e6SAtish Patra break;
520158cb9e6SAtish Patra default:
521158cb9e6SAtish Patra TEST_FAIL("Unknown ucall %lu", uc.cmd);
522158cb9e6SAtish Patra break;
523158cb9e6SAtish Patra }
524158cb9e6SAtish Patra }
525158cb9e6SAtish Patra
test_vm_destroy(struct kvm_vm * vm)526158cb9e6SAtish Patra void test_vm_destroy(struct kvm_vm *vm)
527158cb9e6SAtish Patra {
528158cb9e6SAtish Patra memset(ctrinfo_arr, 0, sizeof(union sbi_pmu_ctr_info) * RISCV_MAX_PMU_COUNTERS);
529158cb9e6SAtish Patra counter_mask_available = 0;
530158cb9e6SAtish Patra kvm_vm_free(vm);
531158cb9e6SAtish Patra }
532158cb9e6SAtish Patra
test_vm_basic_test(void * guest_code)533158cb9e6SAtish Patra static void test_vm_basic_test(void *guest_code)
534158cb9e6SAtish Patra {
535158cb9e6SAtish Patra struct kvm_vm *vm;
536158cb9e6SAtish Patra struct kvm_vcpu *vcpu;
537158cb9e6SAtish Patra
538158cb9e6SAtish Patra vm = vm_create_with_one_vcpu(&vcpu, guest_code);
539158cb9e6SAtish Patra __TEST_REQUIRE(__vcpu_has_sbi_ext(vcpu, KVM_RISCV_SBI_EXT_PMU),
540158cb9e6SAtish Patra "SBI PMU not available, skipping test");
541158cb9e6SAtish Patra vm_init_vector_tables(vm);
542158cb9e6SAtish Patra /* Illegal instruction handler is required to verify read access without configuration */
543158cb9e6SAtish Patra vm_install_exception_handler(vm, EXC_INST_ILLEGAL, guest_illegal_exception_handler);
544158cb9e6SAtish Patra
545158cb9e6SAtish Patra vcpu_init_vector_tables(vcpu);
546158cb9e6SAtish Patra run_vcpu(vcpu);
547158cb9e6SAtish Patra
548158cb9e6SAtish Patra test_vm_destroy(vm);
549158cb9e6SAtish Patra }
550158cb9e6SAtish Patra
test_vm_events_test(void * guest_code)551158cb9e6SAtish Patra static void test_vm_events_test(void *guest_code)
552158cb9e6SAtish Patra {
553158cb9e6SAtish Patra struct kvm_vm *vm = NULL;
554158cb9e6SAtish Patra struct kvm_vcpu *vcpu = NULL;
555158cb9e6SAtish Patra
556158cb9e6SAtish Patra vm = vm_create_with_one_vcpu(&vcpu, guest_code);
557158cb9e6SAtish Patra __TEST_REQUIRE(__vcpu_has_sbi_ext(vcpu, KVM_RISCV_SBI_EXT_PMU),
558158cb9e6SAtish Patra "SBI PMU not available, skipping test");
559158cb9e6SAtish Patra run_vcpu(vcpu);
560158cb9e6SAtish Patra
561158cb9e6SAtish Patra test_vm_destroy(vm);
562158cb9e6SAtish Patra }
563158cb9e6SAtish Patra
test_vm_setup_snapshot_mem(struct kvm_vm * vm,struct kvm_vcpu * vcpu)56413cb706eSAtish Patra static void test_vm_setup_snapshot_mem(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
56513cb706eSAtish Patra {
56613cb706eSAtish Patra /* PMU Snapshot requires single page only */
56713cb706eSAtish Patra vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, PMU_SNAPSHOT_GPA_BASE, 1, 1, 0);
56813cb706eSAtish Patra /* PMU_SNAPSHOT_GPA_BASE is identity mapped */
56913cb706eSAtish Patra virt_map(vm, PMU_SNAPSHOT_GPA_BASE, PMU_SNAPSHOT_GPA_BASE, 1);
57013cb706eSAtish Patra
57113cb706eSAtish Patra snapshot_gva = (void *)(PMU_SNAPSHOT_GPA_BASE);
57213cb706eSAtish Patra snapshot_gpa = addr_gva2gpa(vcpu->vm, (vm_vaddr_t)snapshot_gva);
57313cb706eSAtish Patra sync_global_to_guest(vcpu->vm, snapshot_gva);
57413cb706eSAtish Patra sync_global_to_guest(vcpu->vm, snapshot_gpa);
57513cb706eSAtish Patra }
57613cb706eSAtish Patra
test_vm_events_snapshot_test(void * guest_code)57713cb706eSAtish Patra static void test_vm_events_snapshot_test(void *guest_code)
57813cb706eSAtish Patra {
57913cb706eSAtish Patra struct kvm_vm *vm = NULL;
58013cb706eSAtish Patra struct kvm_vcpu *vcpu;
58113cb706eSAtish Patra
58213cb706eSAtish Patra vm = vm_create_with_one_vcpu(&vcpu, guest_code);
58313cb706eSAtish Patra __TEST_REQUIRE(__vcpu_has_sbi_ext(vcpu, KVM_RISCV_SBI_EXT_PMU),
58413cb706eSAtish Patra "SBI PMU not available, skipping test");
58513cb706eSAtish Patra
58613cb706eSAtish Patra test_vm_setup_snapshot_mem(vm, vcpu);
58713cb706eSAtish Patra
58813cb706eSAtish Patra run_vcpu(vcpu);
58913cb706eSAtish Patra
59013cb706eSAtish Patra test_vm_destroy(vm);
59113cb706eSAtish Patra }
59213cb706eSAtish Patra
test_vm_events_overflow(void * guest_code)5934ace2573SAtish Patra static void test_vm_events_overflow(void *guest_code)
5944ace2573SAtish Patra {
5954ace2573SAtish Patra struct kvm_vm *vm = NULL;
5964ace2573SAtish Patra struct kvm_vcpu *vcpu;
5974ace2573SAtish Patra
5984ace2573SAtish Patra vm = vm_create_with_one_vcpu(&vcpu, guest_code);
5994ace2573SAtish Patra __TEST_REQUIRE(__vcpu_has_sbi_ext(vcpu, KVM_RISCV_SBI_EXT_PMU),
6004ace2573SAtish Patra "SBI PMU not available, skipping test");
6014ace2573SAtish Patra
6024ace2573SAtish Patra __TEST_REQUIRE(__vcpu_has_isa_ext(vcpu, KVM_RISCV_ISA_EXT_SSCOFPMF),
6034ace2573SAtish Patra "Sscofpmf is not available, skipping overflow test");
6044ace2573SAtish Patra
6054ace2573SAtish Patra test_vm_setup_snapshot_mem(vm, vcpu);
6064ace2573SAtish Patra vm_init_vector_tables(vm);
6074ace2573SAtish Patra vm_install_interrupt_handler(vm, guest_irq_handler);
6084ace2573SAtish Patra
6094ace2573SAtish Patra vcpu_init_vector_tables(vcpu);
6104ace2573SAtish Patra /* Initialize guest timer frequency. */
6114ace2573SAtish Patra vcpu_get_reg(vcpu, RISCV_TIMER_REG(frequency), &timer_freq);
6124ace2573SAtish Patra sync_global_to_guest(vm, timer_freq);
6134ace2573SAtish Patra
6144ace2573SAtish Patra run_vcpu(vcpu);
6154ace2573SAtish Patra
6164ace2573SAtish Patra test_vm_destroy(vm);
6174ace2573SAtish Patra }
6184ace2573SAtish Patra
test_print_help(char * name)6195ef2f3d4SAtish Patra static void test_print_help(char *name)
620158cb9e6SAtish Patra {
6215ef2f3d4SAtish Patra pr_info("Usage: %s [-h] [-d <test name>]\n", name);
6225ef2f3d4SAtish Patra pr_info("\t-d: Test to disable. Available tests are 'basic', 'events', 'snapshot', 'overflow'\n");
6235ef2f3d4SAtish Patra pr_info("\t-h: print this help screen\n");
6245ef2f3d4SAtish Patra }
6255ef2f3d4SAtish Patra
parse_args(int argc,char * argv[])6265ef2f3d4SAtish Patra static bool parse_args(int argc, char *argv[])
6275ef2f3d4SAtish Patra {
6285ef2f3d4SAtish Patra int opt;
6295ef2f3d4SAtish Patra
6305ef2f3d4SAtish Patra while ((opt = getopt(argc, argv, "hd:")) != -1) {
6315ef2f3d4SAtish Patra switch (opt) {
6325ef2f3d4SAtish Patra case 'd':
6335ef2f3d4SAtish Patra if (!strncmp("basic", optarg, 5))
6345ef2f3d4SAtish Patra disabled_tests |= SBI_PMU_TEST_BASIC;
6355ef2f3d4SAtish Patra else if (!strncmp("events", optarg, 6))
6365ef2f3d4SAtish Patra disabled_tests |= SBI_PMU_TEST_EVENTS;
6375ef2f3d4SAtish Patra else if (!strncmp("snapshot", optarg, 8))
6385ef2f3d4SAtish Patra disabled_tests |= SBI_PMU_TEST_SNAPSHOT;
6395ef2f3d4SAtish Patra else if (!strncmp("overflow", optarg, 8))
6405ef2f3d4SAtish Patra disabled_tests |= SBI_PMU_TEST_OVERFLOW;
6415ef2f3d4SAtish Patra else
6425ef2f3d4SAtish Patra goto done;
6435ef2f3d4SAtish Patra break;
6445ef2f3d4SAtish Patra case 'h':
6455ef2f3d4SAtish Patra default:
6465ef2f3d4SAtish Patra goto done;
6475ef2f3d4SAtish Patra }
6485ef2f3d4SAtish Patra }
6495ef2f3d4SAtish Patra
6505ef2f3d4SAtish Patra return true;
6515ef2f3d4SAtish Patra done:
6525ef2f3d4SAtish Patra test_print_help(argv[0]);
6535ef2f3d4SAtish Patra return false;
6545ef2f3d4SAtish Patra }
6555ef2f3d4SAtish Patra
main(int argc,char * argv[])6565ef2f3d4SAtish Patra int main(int argc, char *argv[])
6575ef2f3d4SAtish Patra {
6585ef2f3d4SAtish Patra if (!parse_args(argc, argv))
6595ef2f3d4SAtish Patra exit(KSFT_SKIP);
6605ef2f3d4SAtish Patra
6615ef2f3d4SAtish Patra if (!(disabled_tests & SBI_PMU_TEST_BASIC)) {
662158cb9e6SAtish Patra test_vm_basic_test(test_pmu_basic_sanity);
663158cb9e6SAtish Patra pr_info("SBI PMU basic test : PASS\n");
6645ef2f3d4SAtish Patra }
665158cb9e6SAtish Patra
6665ef2f3d4SAtish Patra if (!(disabled_tests & SBI_PMU_TEST_EVENTS)) {
667158cb9e6SAtish Patra test_vm_events_test(test_pmu_events);
668158cb9e6SAtish Patra pr_info("SBI PMU event verification test : PASS\n");
6695ef2f3d4SAtish Patra }
670158cb9e6SAtish Patra
6715ef2f3d4SAtish Patra if (!(disabled_tests & SBI_PMU_TEST_SNAPSHOT)) {
67213cb706eSAtish Patra test_vm_events_snapshot_test(test_pmu_events_snaphost);
67313cb706eSAtish Patra pr_info("SBI PMU event verification with snapshot test : PASS\n");
6745ef2f3d4SAtish Patra }
67513cb706eSAtish Patra
6765ef2f3d4SAtish Patra if (!(disabled_tests & SBI_PMU_TEST_OVERFLOW)) {
6774ace2573SAtish Patra test_vm_events_overflow(test_pmu_events_overflow);
6784ace2573SAtish Patra pr_info("SBI PMU event verification with overflow test : PASS\n");
6795ef2f3d4SAtish Patra }
6804ace2573SAtish Patra
681158cb9e6SAtish Patra return 0;
682158cb9e6SAtish Patra }
683