xref: /linux/tools/testing/selftests/kvm/arm64/vpmu_counter_access.c (revision f3826aa9962b4572d01083c84ac0f8345f121168)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * vpmu_counter_access - Test vPMU event counter access
4  *
5  * Copyright (c) 2023 Google LLC.
6  *
7  * This test checks if the guest can see the same number of the PMU event
8  * counters (PMCR_EL0.N) that userspace sets, if the guest can access
9  * those counters, and if the guest is prevented from accessing any
10  * other counters.
11  * It also checks if the userspace accesses to the PMU regsisters honor the
12  * PMCR.N value that's set for the guest.
13  * This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host.
14  */
15 #include <kvm_util.h>
16 #include <processor.h>
17 #include <test_util.h>
18 #include <vgic.h>
19 #include <perf/arm_pmuv3.h>
20 #include <linux/bitfield.h>
21 
22 /* The max number of the PMU event counters (excluding the cycle counter) */
23 #define ARMV8_PMU_MAX_GENERAL_COUNTERS	(ARMV8_PMU_MAX_COUNTERS - 1)
24 
25 /* The cycle counter bit position that's common among the PMU registers */
26 #define ARMV8_PMU_CYCLE_IDX		31
27 
28 struct vpmu_vm {
29 	struct kvm_vm *vm;
30 	struct kvm_vcpu *vcpu;
31 };
32 
33 static struct vpmu_vm vpmu_vm;
34 
35 struct pmreg_sets {
36 	uint64_t set_reg_id;
37 	uint64_t clr_reg_id;
38 };
39 
40 #define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr}
41 
42 static uint64_t get_pmcr_n(uint64_t pmcr)
43 {
44 	return FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
45 }
46 
47 static uint64_t get_counters_mask(uint64_t n)
48 {
49 	uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
50 
51 	if (n)
52 		mask |= GENMASK(n - 1, 0);
53 	return mask;
54 }
55 
56 /* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
57 static inline unsigned long read_sel_evcntr(int sel)
58 {
59 	write_sysreg(sel, pmselr_el0);
60 	isb();
61 	return read_sysreg(pmxevcntr_el0);
62 }
63 
64 /* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
65 static inline void write_sel_evcntr(int sel, unsigned long val)
66 {
67 	write_sysreg(sel, pmselr_el0);
68 	isb();
69 	write_sysreg(val, pmxevcntr_el0);
70 	isb();
71 }
72 
73 /* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
74 static inline unsigned long read_sel_evtyper(int sel)
75 {
76 	write_sysreg(sel, pmselr_el0);
77 	isb();
78 	return read_sysreg(pmxevtyper_el0);
79 }
80 
81 /* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
82 static inline void write_sel_evtyper(int sel, unsigned long val)
83 {
84 	write_sysreg(sel, pmselr_el0);
85 	isb();
86 	write_sysreg(val, pmxevtyper_el0);
87 	isb();
88 }
89 
90 static void pmu_disable_reset(void)
91 {
92 	uint64_t pmcr = read_sysreg(pmcr_el0);
93 
94 	/* Reset all counters, disabling them */
95 	pmcr &= ~ARMV8_PMU_PMCR_E;
96 	write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
97 	isb();
98 }
99 
100 #define RETURN_READ_PMEVCNTRN(n) \
101 	return read_sysreg(pmevcntr##n##_el0)
102 static unsigned long read_pmevcntrn(int n)
103 {
104 	PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
105 	return 0;
106 }
107 
108 #define WRITE_PMEVCNTRN(n) \
109 	write_sysreg(val, pmevcntr##n##_el0)
110 static void write_pmevcntrn(int n, unsigned long val)
111 {
112 	PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
113 	isb();
114 }
115 
116 #define READ_PMEVTYPERN(n) \
117 	return read_sysreg(pmevtyper##n##_el0)
118 static unsigned long read_pmevtypern(int n)
119 {
120 	PMEVN_SWITCH(n, READ_PMEVTYPERN);
121 	return 0;
122 }
123 
124 #define WRITE_PMEVTYPERN(n) \
125 	write_sysreg(val, pmevtyper##n##_el0)
126 static void write_pmevtypern(int n, unsigned long val)
127 {
128 	PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
129 	isb();
130 }
131 
132 /*
133  * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0
134  * accessors that test cases will use. Each of the accessors will
135  * either directly reads/writes PMEV{CNTR,TYPER}<n>_EL0
136  * (i.e. {read,write}_pmev{cnt,type}rn()), or reads/writes them through
137  * PMXEV{CNTR,TYPER}_EL0 (i.e. {read,write}_sel_ev{cnt,type}r()).
138  *
139  * This is used to test that combinations of those accessors provide
140  * the consistent behavior.
141  */
142 struct pmc_accessor {
143 	/* A function to be used to read PMEVTCNTR<n>_EL0 */
144 	unsigned long	(*read_cntr)(int idx);
145 	/* A function to be used to write PMEVTCNTR<n>_EL0 */
146 	void		(*write_cntr)(int idx, unsigned long val);
147 	/* A function to be used to read PMEVTYPER<n>_EL0 */
148 	unsigned long	(*read_typer)(int idx);
149 	/* A function to be used to write PMEVTYPER<n>_EL0 */
150 	void		(*write_typer)(int idx, unsigned long val);
151 };
152 
153 struct pmc_accessor pmc_accessors[] = {
154 	/* test with all direct accesses */
155 	{ read_pmevcntrn, write_pmevcntrn, read_pmevtypern, write_pmevtypern },
156 	/* test with all indirect accesses */
157 	{ read_sel_evcntr, write_sel_evcntr, read_sel_evtyper, write_sel_evtyper },
158 	/* read with direct accesses, and write with indirect accesses */
159 	{ read_pmevcntrn, write_sel_evcntr, read_pmevtypern, write_sel_evtyper },
160 	/* read with indirect accesses, and write with direct accesses */
161 	{ read_sel_evcntr, write_pmevcntrn, read_sel_evtyper, write_pmevtypern },
162 };
163 
164 /*
165  * Convert a pointer of pmc_accessor to an index in pmc_accessors[],
166  * assuming that the pointer is one of the entries in pmc_accessors[].
167  */
168 #define PMC_ACC_TO_IDX(acc)	(acc - &pmc_accessors[0])
169 
170 #define GUEST_ASSERT_BITMAP_REG(regname, mask, set_expected)			 \
171 {										 \
172 	uint64_t _tval = read_sysreg(regname);					 \
173 										 \
174 	if (set_expected)							 \
175 		__GUEST_ASSERT((_tval & mask),					 \
176 				"tval: 0x%lx; mask: 0x%lx; set_expected: %u",	 \
177 				_tval, mask, set_expected);			 \
178 	else									 \
179 		__GUEST_ASSERT(!(_tval & mask),					 \
180 				"tval: 0x%lx; mask: 0x%lx; set_expected: %u",	 \
181 				_tval, mask, set_expected);			 \
182 }
183 
184 /*
185  * Check if @mask bits in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers
186  * are set or cleared as specified in @set_expected.
187  */
188 static void check_bitmap_pmu_regs(uint64_t mask, bool set_expected)
189 {
190 	GUEST_ASSERT_BITMAP_REG(pmcntenset_el0, mask, set_expected);
191 	GUEST_ASSERT_BITMAP_REG(pmcntenclr_el0, mask, set_expected);
192 	GUEST_ASSERT_BITMAP_REG(pmintenset_el1, mask, set_expected);
193 	GUEST_ASSERT_BITMAP_REG(pmintenclr_el1, mask, set_expected);
194 	GUEST_ASSERT_BITMAP_REG(pmovsset_el0, mask, set_expected);
195 	GUEST_ASSERT_BITMAP_REG(pmovsclr_el0, mask, set_expected);
196 }
197 
198 /*
199  * Check if the bit in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers corresponding
200  * to the specified counter (@pmc_idx) can be read/written as expected.
201  * When @set_op is true, it tries to set the bit for the counter in
202  * those registers by writing the SET registers (the bit won't be set
203  * if the counter is not implemented though).
204  * Otherwise, it tries to clear the bits in the registers by writing
205  * the CLR registers.
206  * Then, it checks if the values indicated in the registers are as expected.
207  */
208 static void test_bitmap_pmu_regs(int pmc_idx, bool set_op)
209 {
210 	uint64_t pmcr_n, test_bit = BIT(pmc_idx);
211 	bool set_expected = false;
212 
213 	if (set_op) {
214 		write_sysreg(test_bit, pmcntenset_el0);
215 		write_sysreg(test_bit, pmintenset_el1);
216 		write_sysreg(test_bit, pmovsset_el0);
217 
218 		/* The bit will be set only if the counter is implemented */
219 		pmcr_n = get_pmcr_n(read_sysreg(pmcr_el0));
220 		set_expected = (pmc_idx < pmcr_n) ? true : false;
221 	} else {
222 		write_sysreg(test_bit, pmcntenclr_el0);
223 		write_sysreg(test_bit, pmintenclr_el1);
224 		write_sysreg(test_bit, pmovsclr_el0);
225 	}
226 	check_bitmap_pmu_regs(test_bit, set_expected);
227 }
228 
229 /*
230  * Tests for reading/writing registers for the (implemented) event counter
231  * specified by @pmc_idx.
232  */
233 static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
234 {
235 	uint64_t write_data, read_data;
236 
237 	/* Disable all PMCs and reset all PMCs to zero. */
238 	pmu_disable_reset();
239 
240 	/*
241 	 * Tests for reading/writing {PMCNTEN,PMINTEN,PMOVS}{SET,CLR}_EL1.
242 	 */
243 
244 	/* Make sure that the bit in those registers are set to 0 */
245 	test_bitmap_pmu_regs(pmc_idx, false);
246 	/* Test if setting the bit in those registers works */
247 	test_bitmap_pmu_regs(pmc_idx, true);
248 	/* Test if clearing the bit in those registers works */
249 	test_bitmap_pmu_regs(pmc_idx, false);
250 
251 	/*
252 	 * Tests for reading/writing the event type register.
253 	 */
254 
255 	/*
256 	 * Set the event type register to an arbitrary value just for testing
257 	 * of reading/writing the register.
258 	 * Arm ARM says that for the event from 0x0000 to 0x003F,
259 	 * the value indicated in the PMEVTYPER<n>_EL0.evtCount field is
260 	 * the value written to the field even when the specified event
261 	 * is not supported.
262 	 */
263 	write_data = (ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMUV3_PERFCTR_INST_RETIRED);
264 	acc->write_typer(pmc_idx, write_data);
265 	read_data = acc->read_typer(pmc_idx);
266 	__GUEST_ASSERT(read_data == write_data,
267 		       "pmc_idx: 0x%x; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx",
268 		       pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data);
269 
270 	/*
271 	 * Tests for reading/writing the event count register.
272 	 */
273 
274 	read_data = acc->read_cntr(pmc_idx);
275 
276 	/* The count value must be 0, as it is disabled and reset */
277 	__GUEST_ASSERT(read_data == 0,
278 		       "pmc_idx: 0x%x; acc_idx: 0x%lx; read_data: 0x%lx",
279 		       pmc_idx, PMC_ACC_TO_IDX(acc), read_data);
280 
281 	write_data = read_data + pmc_idx + 0x12345;
282 	acc->write_cntr(pmc_idx, write_data);
283 	read_data = acc->read_cntr(pmc_idx);
284 	__GUEST_ASSERT(read_data == write_data,
285 		       "pmc_idx: 0x%x; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx",
286 		       pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data);
287 }
288 
289 #define INVALID_EC	(-1ul)
290 uint64_t expected_ec = INVALID_EC;
291 
292 static void guest_sync_handler(struct ex_regs *regs)
293 {
294 	uint64_t esr, ec;
295 
296 	esr = read_sysreg(esr_el1);
297 	ec = ESR_ELx_EC(esr);
298 
299 	__GUEST_ASSERT(expected_ec == ec,
300 			"PC: 0x%lx; ESR: 0x%lx; EC: 0x%lx; EC expected: 0x%lx",
301 			regs->pc, esr, ec, expected_ec);
302 
303 	/* skip the trapping instruction */
304 	regs->pc += 4;
305 
306 	/* Use INVALID_EC to indicate an exception occurred */
307 	expected_ec = INVALID_EC;
308 }
309 
310 /*
311  * Run the given operation that should trigger an exception with the
312  * given exception class. The exception handler (guest_sync_handler)
313  * will reset op_end_addr to 0, expected_ec to INVALID_EC, and skip
314  * the instruction that trapped.
315  */
316 #define TEST_EXCEPTION(ec, ops)				\
317 ({							\
318 	GUEST_ASSERT(ec != INVALID_EC);			\
319 	WRITE_ONCE(expected_ec, ec);			\
320 	dsb(ish);					\
321 	ops;						\
322 	GUEST_ASSERT(expected_ec == INVALID_EC);	\
323 })
324 
325 /*
326  * Tests for reading/writing registers for the unimplemented event counter
327  * specified by @pmc_idx (>= PMCR_EL0.N).
328  */
329 static void test_access_invalid_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
330 {
331 	/*
332 	 * Reading/writing the event count/type registers should cause
333 	 * an UNDEFINED exception.
334 	 */
335 	TEST_EXCEPTION(ESR_ELx_EC_UNKNOWN, acc->read_cntr(pmc_idx));
336 	TEST_EXCEPTION(ESR_ELx_EC_UNKNOWN, acc->write_cntr(pmc_idx, 0));
337 	TEST_EXCEPTION(ESR_ELx_EC_UNKNOWN, acc->read_typer(pmc_idx));
338 	TEST_EXCEPTION(ESR_ELx_EC_UNKNOWN, acc->write_typer(pmc_idx, 0));
339 	/*
340 	 * The bit corresponding to the (unimplemented) counter in
341 	 * {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers should be RAZ.
342 	 */
343 	test_bitmap_pmu_regs(pmc_idx, 1);
344 	test_bitmap_pmu_regs(pmc_idx, 0);
345 }
346 
347 /*
348  * The guest is configured with PMUv3 with @expected_pmcr_n number of
349  * event counters.
350  * Check if @expected_pmcr_n is consistent with PMCR_EL0.N, and
351  * if reading/writing PMU registers for implemented or unimplemented
352  * counters works as expected.
353  */
354 static void guest_code(uint64_t expected_pmcr_n)
355 {
356 	uint64_t pmcr, pmcr_n, unimp_mask;
357 	int i, pmc;
358 
359 	__GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS,
360 			"Expected PMCR.N: 0x%lx; ARMv8 general counters: 0x%x",
361 			expected_pmcr_n, ARMV8_PMU_MAX_GENERAL_COUNTERS);
362 
363 	pmcr = read_sysreg(pmcr_el0);
364 	pmcr_n = get_pmcr_n(pmcr);
365 
366 	/* Make sure that PMCR_EL0.N indicates the value userspace set */
367 	__GUEST_ASSERT(pmcr_n == expected_pmcr_n,
368 			"Expected PMCR.N: 0x%lx, PMCR.N: 0x%lx",
369 			expected_pmcr_n, pmcr_n);
370 
371 	/*
372 	 * Make sure that (RAZ) bits corresponding to unimplemented event
373 	 * counters in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers are reset
374 	 * to zero.
375 	 * (NOTE: bits for implemented event counters are reset to UNKNOWN)
376 	 */
377 	unimp_mask = GENMASK_ULL(ARMV8_PMU_MAX_GENERAL_COUNTERS - 1, pmcr_n);
378 	check_bitmap_pmu_regs(unimp_mask, false);
379 
380 	/*
381 	 * Tests for reading/writing PMU registers for implemented counters.
382 	 * Use each combination of PMEV{CNTR,TYPER}<n>_EL0 accessor functions.
383 	 */
384 	for (i = 0; i < ARRAY_SIZE(pmc_accessors); i++) {
385 		for (pmc = 0; pmc < pmcr_n; pmc++)
386 			test_access_pmc_regs(&pmc_accessors[i], pmc);
387 	}
388 
389 	/*
390 	 * Tests for reading/writing PMU registers for unimplemented counters.
391 	 * Use each combination of PMEV{CNTR,TYPER}<n>_EL0 accessor functions.
392 	 */
393 	for (i = 0; i < ARRAY_SIZE(pmc_accessors); i++) {
394 		for (pmc = pmcr_n; pmc < ARMV8_PMU_MAX_GENERAL_COUNTERS; pmc++)
395 			test_access_invalid_pmc_regs(&pmc_accessors[i], pmc);
396 	}
397 
398 	GUEST_DONE();
399 }
400 
401 /* Create a VM that has one vCPU with PMUv3 configured. */
402 static void create_vpmu_vm(void *guest_code)
403 {
404 	struct kvm_vcpu_init init;
405 	uint8_t pmuver, ec;
406 	uint64_t dfr0, irq = 23;
407 	struct kvm_device_attr irq_attr = {
408 		.group = KVM_ARM_VCPU_PMU_V3_CTRL,
409 		.attr = KVM_ARM_VCPU_PMU_V3_IRQ,
410 		.addr = (uint64_t)&irq,
411 	};
412 
413 	/* The test creates the vpmu_vm multiple times. Ensure a clean state */
414 	memset(&vpmu_vm, 0, sizeof(vpmu_vm));
415 
416 	vpmu_vm.vm = vm_create(1);
417 	vm_init_descriptor_tables(vpmu_vm.vm);
418 	for (ec = 0; ec < ESR_ELx_EC_MAX + 1; ec++) {
419 		vm_install_sync_handler(vpmu_vm.vm, VECTOR_SYNC_CURRENT, ec,
420 					guest_sync_handler);
421 	}
422 
423 	/* Create vCPU with PMUv3 */
424 	kvm_get_default_vcpu_target(vpmu_vm.vm, &init);
425 	init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
426 	vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code);
427 	vcpu_init_descriptor_tables(vpmu_vm.vcpu);
428 
429 	kvm_arch_vm_finalize_vcpus(vpmu_vm.vm);
430 
431 	/* Make sure that PMUv3 support is indicated in the ID register */
432 	dfr0 = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1));
433 	pmuver = FIELD_GET(ID_AA64DFR0_EL1_PMUVer, dfr0);
434 	TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF &&
435 		    pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP,
436 		    "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
437 
438 	vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
439 }
440 
441 static void destroy_vpmu_vm(void)
442 {
443 	kvm_vm_free(vpmu_vm.vm);
444 }
445 
446 static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n)
447 {
448 	struct ucall uc;
449 
450 	vcpu_args_set(vcpu, 1, pmcr_n);
451 	vcpu_run(vcpu);
452 	switch (get_ucall(vcpu, &uc)) {
453 	case UCALL_ABORT:
454 		REPORT_GUEST_ASSERT(uc);
455 		break;
456 	case UCALL_DONE:
457 		break;
458 	default:
459 		TEST_FAIL("Unknown ucall %lu", uc.cmd);
460 		break;
461 	}
462 }
463 
464 static void test_create_vpmu_vm_with_nr_counters(unsigned int nr_counters, bool expect_fail)
465 {
466 	struct kvm_vcpu *vcpu;
467 	unsigned int prev;
468 	int ret;
469 
470 	create_vpmu_vm(guest_code);
471 	vcpu = vpmu_vm.vcpu;
472 
473 	prev = get_pmcr_n(vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0)));
474 
475 	ret = __vcpu_device_attr_set(vcpu, KVM_ARM_VCPU_PMU_V3_CTRL,
476 				     KVM_ARM_VCPU_PMU_V3_SET_NR_COUNTERS, &nr_counters);
477 
478 	if (expect_fail)
479 		TEST_ASSERT(ret && errno == EINVAL,
480 			    "Setting more PMU counters (%u) than available (%u) unexpectedly succeeded",
481 			    nr_counters, prev);
482 	else
483 		TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret));
484 
485 	vcpu_device_attr_set(vcpu, KVM_ARM_VCPU_PMU_V3_CTRL, KVM_ARM_VCPU_PMU_V3_INIT, NULL);
486 }
487 
488 /*
489  * Create a guest with one vCPU, set the PMCR_EL0.N for the vCPU to @pmcr_n,
490  * and run the test.
491  */
492 static void run_access_test(uint64_t pmcr_n)
493 {
494 	uint64_t sp;
495 	struct kvm_vcpu *vcpu;
496 	struct kvm_vcpu_init init;
497 
498 	pr_debug("Test with pmcr_n %lu\n", pmcr_n);
499 
500 	test_create_vpmu_vm_with_nr_counters(pmcr_n, false);
501 	vcpu = vpmu_vm.vcpu;
502 
503 	/* Save the initial sp to restore them later to run the guest again */
504 	sp = vcpu_get_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1));
505 
506 	run_vcpu(vcpu, pmcr_n);
507 
508 	/*
509 	 * Reset and re-initialize the vCPU, and run the guest code again to
510 	 * check if PMCR_EL0.N is preserved.
511 	 */
512 	kvm_get_default_vcpu_target(vpmu_vm.vm, &init);
513 	init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
514 	aarch64_vcpu_setup(vcpu, &init);
515 	vcpu_init_descriptor_tables(vcpu);
516 	vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), sp);
517 	vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
518 
519 	run_vcpu(vcpu, pmcr_n);
520 
521 	destroy_vpmu_vm();
522 }
523 
524 static struct pmreg_sets validity_check_reg_sets[] = {
525 	PMREG_SET(SYS_PMCNTENSET_EL0, SYS_PMCNTENCLR_EL0),
526 	PMREG_SET(SYS_PMINTENSET_EL1, SYS_PMINTENCLR_EL1),
527 	PMREG_SET(SYS_PMOVSSET_EL0, SYS_PMOVSCLR_EL0),
528 };
529 
530 /*
531  * Create a VM, and check if KVM handles the userspace accesses of
532  * the PMU register sets in @validity_check_reg_sets[] correctly.
533  */
534 static void run_pmregs_validity_test(uint64_t pmcr_n)
535 {
536 	int i;
537 	struct kvm_vcpu *vcpu;
538 	uint64_t set_reg_id, clr_reg_id, reg_val;
539 	uint64_t valid_counters_mask, max_counters_mask;
540 
541 	test_create_vpmu_vm_with_nr_counters(pmcr_n, false);
542 	vcpu = vpmu_vm.vcpu;
543 
544 	valid_counters_mask = get_counters_mask(pmcr_n);
545 	max_counters_mask = get_counters_mask(ARMV8_PMU_MAX_COUNTERS);
546 
547 	for (i = 0; i < ARRAY_SIZE(validity_check_reg_sets); i++) {
548 		set_reg_id = validity_check_reg_sets[i].set_reg_id;
549 		clr_reg_id = validity_check_reg_sets[i].clr_reg_id;
550 
551 		/*
552 		 * Test if the 'set' and 'clr' variants of the registers
553 		 * are initialized based on the number of valid counters.
554 		 */
555 		reg_val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id));
556 		TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
557 			    "Initial read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
558 			    KVM_ARM64_SYS_REG(set_reg_id), reg_val);
559 
560 		reg_val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id));
561 		TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
562 			    "Initial read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
563 			    KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
564 
565 		/*
566 		 * Using the 'set' variant, force-set the register to the
567 		 * max number of possible counters and test if KVM discards
568 		 * the bits for unimplemented counters as it should.
569 		 */
570 		vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), max_counters_mask);
571 
572 		reg_val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id));
573 		TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
574 			    "Read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
575 			    KVM_ARM64_SYS_REG(set_reg_id), reg_val);
576 
577 		reg_val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id));
578 		TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
579 			    "Read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
580 			    KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
581 	}
582 
583 	destroy_vpmu_vm();
584 }
585 
586 /*
587  * Create a guest with one vCPU, and attempt to set the PMCR_EL0.N for
588  * the vCPU to @pmcr_n, which is larger than the host value.
589  * The attempt should fail as @pmcr_n is too big to set for the vCPU.
590  */
591 static void run_error_test(uint64_t pmcr_n)
592 {
593 	pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n);
594 
595 	test_create_vpmu_vm_with_nr_counters(pmcr_n, true);
596 	destroy_vpmu_vm();
597 }
598 
599 /*
600  * Return the default number of implemented PMU event counters excluding
601  * the cycle counter (i.e. PMCR_EL0.N value) for the guest.
602  */
603 static uint64_t get_pmcr_n_limit(void)
604 {
605 	uint64_t pmcr;
606 
607 	create_vpmu_vm(guest_code);
608 	pmcr = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0));
609 	destroy_vpmu_vm();
610 	return get_pmcr_n(pmcr);
611 }
612 
613 static bool kvm_supports_nr_counters_attr(void)
614 {
615 	bool supported;
616 
617 	create_vpmu_vm(NULL);
618 	supported = !__vcpu_has_device_attr(vpmu_vm.vcpu, KVM_ARM_VCPU_PMU_V3_CTRL,
619 					    KVM_ARM_VCPU_PMU_V3_SET_NR_COUNTERS);
620 	destroy_vpmu_vm();
621 
622 	return supported;
623 }
624 
625 int main(void)
626 {
627 	uint64_t i, pmcr_n;
628 
629 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
630 	TEST_REQUIRE(kvm_supports_vgic_v3());
631 	TEST_REQUIRE(kvm_supports_nr_counters_attr());
632 
633 	pmcr_n = get_pmcr_n_limit();
634 	for (i = 0; i <= pmcr_n; i++) {
635 		run_access_test(i);
636 		run_pmregs_validity_test(i);
637 	}
638 
639 	for (i = pmcr_n + 1; i < ARMV8_PMU_MAX_COUNTERS; i++)
640 		run_error_test(i);
641 
642 	return 0;
643 }
644