xref: /linux/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c (revision 6a4aee277740d04ac0fd54cfa17cc28261932ddc)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2023, Tencent, Inc.
4  */
5 
6 #define _GNU_SOURCE /* for program_invocation_short_name */
7 #include <x86intrin.h>
8 
9 #include "pmu.h"
10 #include "processor.h"
11 
12 /* Number of LOOP instructions for the guest measurement payload. */
13 #define NUM_BRANCHES		10
14 /*
15  * Number of "extra" instructions that will be counted, i.e. the number of
16  * instructions that are needed to set up the loop and then disabled the
17  * counter.  1 CLFLUSH/CLFLUSHOPT/NOP, 1 MFENCE, 2 MOV, 2 XOR, 1 WRMSR.
18  */
19 #define NUM_EXTRA_INSNS		7
20 #define NUM_INSNS_RETIRED	(NUM_BRANCHES + NUM_EXTRA_INSNS)
21 
22 static uint8_t kvm_pmu_version;
23 static bool kvm_has_perf_caps;
24 static bool is_forced_emulation_enabled;
25 
26 static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
27 						  void *guest_code,
28 						  uint8_t pmu_version,
29 						  uint64_t perf_capabilities)
30 {
31 	struct kvm_vm *vm;
32 
33 	vm = vm_create_with_one_vcpu(vcpu, guest_code);
34 	vm_init_descriptor_tables(vm);
35 	vcpu_init_descriptor_tables(*vcpu);
36 
37 	sync_global_to_guest(vm, kvm_pmu_version);
38 	sync_global_to_guest(vm, is_forced_emulation_enabled);
39 
40 	/*
41 	 * Set PERF_CAPABILITIES before PMU version as KVM disallows enabling
42 	 * features via PERF_CAPABILITIES if the guest doesn't have a vPMU.
43 	 */
44 	if (kvm_has_perf_caps)
45 		vcpu_set_msr(*vcpu, MSR_IA32_PERF_CAPABILITIES, perf_capabilities);
46 
47 	vcpu_set_cpuid_property(*vcpu, X86_PROPERTY_PMU_VERSION, pmu_version);
48 	return vm;
49 }
50 
51 static void run_vcpu(struct kvm_vcpu *vcpu)
52 {
53 	struct ucall uc;
54 
55 	do {
56 		vcpu_run(vcpu);
57 		switch (get_ucall(vcpu, &uc)) {
58 		case UCALL_SYNC:
59 			break;
60 		case UCALL_ABORT:
61 			REPORT_GUEST_ASSERT(uc);
62 			break;
63 		case UCALL_PRINTF:
64 			pr_info("%s", uc.buffer);
65 			break;
66 		case UCALL_DONE:
67 			break;
68 		default:
69 			TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
70 		}
71 	} while (uc.cmd != UCALL_DONE);
72 }
73 
74 static uint8_t guest_get_pmu_version(void)
75 {
76 	/*
77 	 * Return the effective PMU version, i.e. the minimum between what KVM
78 	 * supports and what is enumerated to the guest.  The host deliberately
79 	 * advertises a PMU version to the guest beyond what is actually
80 	 * supported by KVM to verify KVM doesn't freak out and do something
81 	 * bizarre with an architecturally valid, but unsupported, version.
82 	 */
83 	return min_t(uint8_t, kvm_pmu_version, this_cpu_property(X86_PROPERTY_PMU_VERSION));
84 }
85 
86 /*
87  * If an architectural event is supported and guaranteed to generate at least
88  * one "hit, assert that its count is non-zero.  If an event isn't supported or
89  * the test can't guarantee the associated action will occur, then all bets are
90  * off regarding the count, i.e. no checks can be done.
91  *
92  * Sanity check that in all cases, the event doesn't count when it's disabled,
93  * and that KVM correctly emulates the write of an arbitrary value.
94  */
95 static void guest_assert_event_count(uint8_t idx,
96 				     struct kvm_x86_pmu_feature event,
97 				     uint32_t pmc, uint32_t pmc_msr)
98 {
99 	uint64_t count;
100 
101 	count = _rdpmc(pmc);
102 	if (!this_pmu_has(event))
103 		goto sanity_checks;
104 
105 	switch (idx) {
106 	case INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX:
107 		GUEST_ASSERT_EQ(count, NUM_INSNS_RETIRED);
108 		break;
109 	case INTEL_ARCH_BRANCHES_RETIRED_INDEX:
110 		GUEST_ASSERT_EQ(count, NUM_BRANCHES);
111 		break;
112 	case INTEL_ARCH_LLC_REFERENCES_INDEX:
113 	case INTEL_ARCH_LLC_MISSES_INDEX:
114 		if (!this_cpu_has(X86_FEATURE_CLFLUSHOPT) &&
115 		    !this_cpu_has(X86_FEATURE_CLFLUSH))
116 			break;
117 		fallthrough;
118 	case INTEL_ARCH_CPU_CYCLES_INDEX:
119 	case INTEL_ARCH_REFERENCE_CYCLES_INDEX:
120 		GUEST_ASSERT_NE(count, 0);
121 		break;
122 	case INTEL_ARCH_TOPDOWN_SLOTS_INDEX:
123 		GUEST_ASSERT(count >= NUM_INSNS_RETIRED);
124 		break;
125 	default:
126 		break;
127 	}
128 
129 sanity_checks:
130 	__asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
131 	GUEST_ASSERT_EQ(_rdpmc(pmc), count);
132 
133 	wrmsr(pmc_msr, 0xdead);
134 	GUEST_ASSERT_EQ(_rdpmc(pmc), 0xdead);
135 }
136 
137 /*
138  * Enable and disable the PMC in a monolithic asm blob to ensure that the
139  * compiler can't insert _any_ code into the measured sequence.  Note, ECX
140  * doesn't need to be clobbered as the input value, @pmc_msr, is restored
141  * before the end of the sequence.
142  *
143  * If CLFUSH{,OPT} is supported, flush the cacheline containing (at least) the
144  * start of the loop to force LLC references and misses, i.e. to allow testing
145  * that those events actually count.
146  *
147  * If forced emulation is enabled (and specified), force emulation on a subset
148  * of the measured code to verify that KVM correctly emulates instructions and
149  * branches retired events in conjunction with hardware also counting said
150  * events.
151  */
152 #define GUEST_MEASURE_EVENT(_msr, _value, clflush, FEP)				\
153 do {										\
154 	__asm__ __volatile__("wrmsr\n\t"					\
155 			     clflush "\n\t"					\
156 			     "mfence\n\t"					\
157 			     "1: mov $" __stringify(NUM_BRANCHES) ", %%ecx\n\t"	\
158 			     FEP "loop .\n\t"					\
159 			     FEP "mov %%edi, %%ecx\n\t"				\
160 			     FEP "xor %%eax, %%eax\n\t"				\
161 			     FEP "xor %%edx, %%edx\n\t"				\
162 			     "wrmsr\n\t"					\
163 			     :: "a"((uint32_t)_value), "d"(_value >> 32),	\
164 				"c"(_msr), "D"(_msr)				\
165 	);									\
166 } while (0)
167 
168 #define GUEST_TEST_EVENT(_idx, _event, _pmc, _pmc_msr, _ctrl_msr, _value, FEP)	\
169 do {										\
170 	wrmsr(pmc_msr, 0);							\
171 										\
172 	if (this_cpu_has(X86_FEATURE_CLFLUSHOPT))				\
173 		GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt 1f", FEP);	\
174 	else if (this_cpu_has(X86_FEATURE_CLFLUSH))				\
175 		GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflush 1f", FEP);	\
176 	else									\
177 		GUEST_MEASURE_EVENT(_ctrl_msr, _value, "nop", FEP);		\
178 										\
179 	guest_assert_event_count(_idx, _event, _pmc, _pmc_msr);			\
180 } while (0)
181 
182 static void __guest_test_arch_event(uint8_t idx, struct kvm_x86_pmu_feature event,
183 				    uint32_t pmc, uint32_t pmc_msr,
184 				    uint32_t ctrl_msr, uint64_t ctrl_msr_value)
185 {
186 	GUEST_TEST_EVENT(idx, event, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, "");
187 
188 	if (is_forced_emulation_enabled)
189 		GUEST_TEST_EVENT(idx, event, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, KVM_FEP);
190 }
191 
192 #define X86_PMU_FEATURE_NULL						\
193 ({									\
194 	struct kvm_x86_pmu_feature feature = {};			\
195 									\
196 	feature;							\
197 })
198 
199 static bool pmu_is_null_feature(struct kvm_x86_pmu_feature event)
200 {
201 	return !(*(u64 *)&event);
202 }
203 
204 static void guest_test_arch_event(uint8_t idx)
205 {
206 	const struct {
207 		struct kvm_x86_pmu_feature gp_event;
208 		struct kvm_x86_pmu_feature fixed_event;
209 	} intel_event_to_feature[] = {
210 		[INTEL_ARCH_CPU_CYCLES_INDEX]		 = { X86_PMU_FEATURE_CPU_CYCLES, X86_PMU_FEATURE_CPU_CYCLES_FIXED },
211 		[INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX]	 = { X86_PMU_FEATURE_INSNS_RETIRED, X86_PMU_FEATURE_INSNS_RETIRED_FIXED },
212 		/*
213 		 * Note, the fixed counter for reference cycles is NOT the same
214 		 * as the general purpose architectural event.  The fixed counter
215 		 * explicitly counts at the same frequency as the TSC, whereas
216 		 * the GP event counts at a fixed, but uarch specific, frequency.
217 		 * Bundle them here for simplicity.
218 		 */
219 		[INTEL_ARCH_REFERENCE_CYCLES_INDEX]	 = { X86_PMU_FEATURE_REFERENCE_CYCLES, X86_PMU_FEATURE_REFERENCE_TSC_CYCLES_FIXED },
220 		[INTEL_ARCH_LLC_REFERENCES_INDEX]	 = { X86_PMU_FEATURE_LLC_REFERENCES, X86_PMU_FEATURE_NULL },
221 		[INTEL_ARCH_LLC_MISSES_INDEX]		 = { X86_PMU_FEATURE_LLC_MISSES, X86_PMU_FEATURE_NULL },
222 		[INTEL_ARCH_BRANCHES_RETIRED_INDEX]	 = { X86_PMU_FEATURE_BRANCH_INSNS_RETIRED, X86_PMU_FEATURE_NULL },
223 		[INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX] = { X86_PMU_FEATURE_BRANCHES_MISPREDICTED, X86_PMU_FEATURE_NULL },
224 		[INTEL_ARCH_TOPDOWN_SLOTS_INDEX]	 = { X86_PMU_FEATURE_TOPDOWN_SLOTS, X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED },
225 	};
226 
227 	uint32_t nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
228 	uint32_t pmu_version = guest_get_pmu_version();
229 	/* PERF_GLOBAL_CTRL exists only for Architectural PMU Version 2+. */
230 	bool guest_has_perf_global_ctrl = pmu_version >= 2;
231 	struct kvm_x86_pmu_feature gp_event, fixed_event;
232 	uint32_t base_pmc_msr;
233 	unsigned int i;
234 
235 	/* The host side shouldn't invoke this without a guest PMU. */
236 	GUEST_ASSERT(pmu_version);
237 
238 	if (this_cpu_has(X86_FEATURE_PDCM) &&
239 	    rdmsr(MSR_IA32_PERF_CAPABILITIES) & PMU_CAP_FW_WRITES)
240 		base_pmc_msr = MSR_IA32_PMC0;
241 	else
242 		base_pmc_msr = MSR_IA32_PERFCTR0;
243 
244 	gp_event = intel_event_to_feature[idx].gp_event;
245 	GUEST_ASSERT_EQ(idx, gp_event.f.bit);
246 
247 	GUEST_ASSERT(nr_gp_counters);
248 
249 	for (i = 0; i < nr_gp_counters; i++) {
250 		uint64_t eventsel = ARCH_PERFMON_EVENTSEL_OS |
251 				    ARCH_PERFMON_EVENTSEL_ENABLE |
252 				    intel_pmu_arch_events[idx];
253 
254 		wrmsr(MSR_P6_EVNTSEL0 + i, 0);
255 		if (guest_has_perf_global_ctrl)
256 			wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, BIT_ULL(i));
257 
258 		__guest_test_arch_event(idx, gp_event, i, base_pmc_msr + i,
259 					MSR_P6_EVNTSEL0 + i, eventsel);
260 	}
261 
262 	if (!guest_has_perf_global_ctrl)
263 		return;
264 
265 	fixed_event = intel_event_to_feature[idx].fixed_event;
266 	if (pmu_is_null_feature(fixed_event) || !this_pmu_has(fixed_event))
267 		return;
268 
269 	i = fixed_event.f.bit;
270 
271 	wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, FIXED_PMC_CTRL(i, FIXED_PMC_KERNEL));
272 
273 	__guest_test_arch_event(idx, fixed_event, i | INTEL_RDPMC_FIXED,
274 				MSR_CORE_PERF_FIXED_CTR0 + i,
275 				MSR_CORE_PERF_GLOBAL_CTRL,
276 				FIXED_PMC_GLOBAL_CTRL_ENABLE(i));
277 }
278 
279 static void guest_test_arch_events(void)
280 {
281 	uint8_t i;
282 
283 	for (i = 0; i < NR_INTEL_ARCH_EVENTS; i++)
284 		guest_test_arch_event(i);
285 
286 	GUEST_DONE();
287 }
288 
289 static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities,
290 			     uint8_t length, uint8_t unavailable_mask)
291 {
292 	struct kvm_vcpu *vcpu;
293 	struct kvm_vm *vm;
294 
295 	/* Testing arch events requires a vPMU (there are no negative tests). */
296 	if (!pmu_version)
297 		return;
298 
299 	vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_arch_events,
300 					 pmu_version, perf_capabilities);
301 
302 	vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH,
303 				length);
304 	vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_EVENTS_MASK,
305 				unavailable_mask);
306 
307 	run_vcpu(vcpu);
308 
309 	kvm_vm_free(vm);
310 }
311 
312 /*
313  * Limit testing to MSRs that are actually defined by Intel (in the SDM).  MSRs
314  * that aren't defined counter MSRs *probably* don't exist, but there's no
315  * guarantee that currently undefined MSR indices won't be used for something
316  * other than PMCs in the future.
317  */
318 #define MAX_NR_GP_COUNTERS	8
319 #define MAX_NR_FIXED_COUNTERS	3
320 
321 #define GUEST_ASSERT_PMC_MSR_ACCESS(insn, msr, expect_gp, vector)		\
322 __GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector,			\
323 	       "Expected %s on " #insn "(0x%x), got vector %u",			\
324 	       expect_gp ? "#GP" : "no fault", msr, vector)			\
325 
326 #define GUEST_ASSERT_PMC_VALUE(insn, msr, val, expected)			\
327 	__GUEST_ASSERT(val == expected_val,					\
328 		       "Expected " #insn "(0x%x) to yield 0x%lx, got 0x%lx",	\
329 		       msr, expected_val, val);
330 
331 static void guest_test_rdpmc(uint32_t rdpmc_idx, bool expect_success,
332 			     uint64_t expected_val)
333 {
334 	uint8_t vector;
335 	uint64_t val;
336 
337 	vector = rdpmc_safe(rdpmc_idx, &val);
338 	GUEST_ASSERT_PMC_MSR_ACCESS(RDPMC, rdpmc_idx, !expect_success, vector);
339 	if (expect_success)
340 		GUEST_ASSERT_PMC_VALUE(RDPMC, rdpmc_idx, val, expected_val);
341 
342 	if (!is_forced_emulation_enabled)
343 		return;
344 
345 	vector = rdpmc_safe_fep(rdpmc_idx, &val);
346 	GUEST_ASSERT_PMC_MSR_ACCESS(RDPMC, rdpmc_idx, !expect_success, vector);
347 	if (expect_success)
348 		GUEST_ASSERT_PMC_VALUE(RDPMC, rdpmc_idx, val, expected_val);
349 }
350 
351 static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters,
352 				 uint8_t nr_counters, uint32_t or_mask)
353 {
354 	const bool pmu_has_fast_mode = !guest_get_pmu_version();
355 	uint8_t i;
356 
357 	for (i = 0; i < nr_possible_counters; i++) {
358 		/*
359 		 * TODO: Test a value that validates full-width writes and the
360 		 * width of the counters.
361 		 */
362 		const uint64_t test_val = 0xffff;
363 		const uint32_t msr = base_msr + i;
364 
365 		/*
366 		 * Fixed counters are supported if the counter is less than the
367 		 * number of enumerated contiguous counters *or* the counter is
368 		 * explicitly enumerated in the supported counters mask.
369 		 */
370 		const bool expect_success = i < nr_counters || (or_mask & BIT(i));
371 
372 		/*
373 		 * KVM drops writes to MSR_P6_PERFCTR[0|1] if the counters are
374 		 * unsupported, i.e. doesn't #GP and reads back '0'.
375 		 */
376 		const uint64_t expected_val = expect_success ? test_val : 0;
377 		const bool expect_gp = !expect_success && msr != MSR_P6_PERFCTR0 &&
378 				       msr != MSR_P6_PERFCTR1;
379 		uint32_t rdpmc_idx;
380 		uint8_t vector;
381 		uint64_t val;
382 
383 		vector = wrmsr_safe(msr, test_val);
384 		GUEST_ASSERT_PMC_MSR_ACCESS(WRMSR, msr, expect_gp, vector);
385 
386 		vector = rdmsr_safe(msr, &val);
387 		GUEST_ASSERT_PMC_MSR_ACCESS(RDMSR, msr, expect_gp, vector);
388 
389 		/* On #GP, the result of RDMSR is undefined. */
390 		if (!expect_gp)
391 			GUEST_ASSERT_PMC_VALUE(RDMSR, msr, val, expected_val);
392 
393 		/*
394 		 * Redo the read tests with RDPMC, which has different indexing
395 		 * semantics and additional capabilities.
396 		 */
397 		rdpmc_idx = i;
398 		if (base_msr == MSR_CORE_PERF_FIXED_CTR0)
399 			rdpmc_idx |= INTEL_RDPMC_FIXED;
400 
401 		guest_test_rdpmc(rdpmc_idx, expect_success, expected_val);
402 
403 		/*
404 		 * KVM doesn't support non-architectural PMUs, i.e. it should
405 		 * impossible to have fast mode RDPMC.  Verify that attempting
406 		 * to use fast RDPMC always #GPs.
407 		 */
408 		GUEST_ASSERT(!expect_success || !pmu_has_fast_mode);
409 		rdpmc_idx |= INTEL_RDPMC_FAST;
410 		guest_test_rdpmc(rdpmc_idx, false, -1ull);
411 
412 		vector = wrmsr_safe(msr, 0);
413 		GUEST_ASSERT_PMC_MSR_ACCESS(WRMSR, msr, expect_gp, vector);
414 	}
415 }
416 
417 static void guest_test_gp_counters(void)
418 {
419 	uint8_t nr_gp_counters = 0;
420 	uint32_t base_msr;
421 
422 	if (guest_get_pmu_version())
423 		nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
424 
425 	if (this_cpu_has(X86_FEATURE_PDCM) &&
426 	    rdmsr(MSR_IA32_PERF_CAPABILITIES) & PMU_CAP_FW_WRITES)
427 		base_msr = MSR_IA32_PMC0;
428 	else
429 		base_msr = MSR_IA32_PERFCTR0;
430 
431 	guest_rd_wr_counters(base_msr, MAX_NR_GP_COUNTERS, nr_gp_counters, 0);
432 	GUEST_DONE();
433 }
434 
435 static void test_gp_counters(uint8_t pmu_version, uint64_t perf_capabilities,
436 			     uint8_t nr_gp_counters)
437 {
438 	struct kvm_vcpu *vcpu;
439 	struct kvm_vm *vm;
440 
441 	vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_gp_counters,
442 					 pmu_version, perf_capabilities);
443 
444 	vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_NR_GP_COUNTERS,
445 				nr_gp_counters);
446 
447 	run_vcpu(vcpu);
448 
449 	kvm_vm_free(vm);
450 }
451 
452 static void guest_test_fixed_counters(void)
453 {
454 	uint64_t supported_bitmask = 0;
455 	uint8_t nr_fixed_counters = 0;
456 	uint8_t i;
457 
458 	/* Fixed counters require Architectural vPMU Version 2+. */
459 	if (guest_get_pmu_version() >= 2)
460 		nr_fixed_counters = this_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
461 
462 	/*
463 	 * The supported bitmask for fixed counters was introduced in PMU
464 	 * version 5.
465 	 */
466 	if (guest_get_pmu_version() >= 5)
467 		supported_bitmask = this_cpu_property(X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK);
468 
469 	guest_rd_wr_counters(MSR_CORE_PERF_FIXED_CTR0, MAX_NR_FIXED_COUNTERS,
470 			     nr_fixed_counters, supported_bitmask);
471 
472 	for (i = 0; i < MAX_NR_FIXED_COUNTERS; i++) {
473 		uint8_t vector;
474 		uint64_t val;
475 
476 		if (i >= nr_fixed_counters && !(supported_bitmask & BIT_ULL(i))) {
477 			vector = wrmsr_safe(MSR_CORE_PERF_FIXED_CTR_CTRL,
478 					    FIXED_PMC_CTRL(i, FIXED_PMC_KERNEL));
479 			__GUEST_ASSERT(vector == GP_VECTOR,
480 				       "Expected #GP for counter %u in FIXED_CTR_CTRL", i);
481 
482 			vector = wrmsr_safe(MSR_CORE_PERF_GLOBAL_CTRL,
483 					    FIXED_PMC_GLOBAL_CTRL_ENABLE(i));
484 			__GUEST_ASSERT(vector == GP_VECTOR,
485 				       "Expected #GP for counter %u in PERF_GLOBAL_CTRL", i);
486 			continue;
487 		}
488 
489 		wrmsr(MSR_CORE_PERF_FIXED_CTR0 + i, 0);
490 		wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, FIXED_PMC_CTRL(i, FIXED_PMC_KERNEL));
491 		wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, FIXED_PMC_GLOBAL_CTRL_ENABLE(i));
492 		__asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
493 		wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
494 		val = rdmsr(MSR_CORE_PERF_FIXED_CTR0 + i);
495 
496 		GUEST_ASSERT_NE(val, 0);
497 	}
498 	GUEST_DONE();
499 }
500 
501 static void test_fixed_counters(uint8_t pmu_version, uint64_t perf_capabilities,
502 				uint8_t nr_fixed_counters,
503 				uint32_t supported_bitmask)
504 {
505 	struct kvm_vcpu *vcpu;
506 	struct kvm_vm *vm;
507 
508 	vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_fixed_counters,
509 					 pmu_version, perf_capabilities);
510 
511 	vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK,
512 				supported_bitmask);
513 	vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_NR_FIXED_COUNTERS,
514 				nr_fixed_counters);
515 
516 	run_vcpu(vcpu);
517 
518 	kvm_vm_free(vm);
519 }
520 
521 static void test_intel_counters(void)
522 {
523 	uint8_t nr_arch_events = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
524 	uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
525 	uint8_t nr_gp_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
526 	uint8_t pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION);
527 	unsigned int i;
528 	uint8_t v, j;
529 	uint32_t k;
530 
531 	const uint64_t perf_caps[] = {
532 		0,
533 		PMU_CAP_FW_WRITES,
534 	};
535 
536 	/*
537 	 * Test up to PMU v5, which is the current maximum version defined by
538 	 * Intel, i.e. is the last version that is guaranteed to be backwards
539 	 * compatible with KVM's existing behavior.
540 	 */
541 	uint8_t max_pmu_version = max_t(typeof(pmu_version), pmu_version, 5);
542 
543 	/*
544 	 * Detect the existence of events that aren't supported by selftests.
545 	 * This will (obviously) fail any time the kernel adds support for a
546 	 * new event, but it's worth paying that price to keep the test fresh.
547 	 */
548 	TEST_ASSERT(nr_arch_events <= NR_INTEL_ARCH_EVENTS,
549 		    "New architectural event(s) detected; please update this test (length = %u, mask = %x)",
550 		    nr_arch_events, kvm_cpu_property(X86_PROPERTY_PMU_EVENTS_MASK));
551 
552 	/*
553 	 * Force iterating over known arch events regardless of whether or not
554 	 * KVM/hardware supports a given event.
555 	 */
556 	nr_arch_events = max_t(typeof(nr_arch_events), nr_arch_events, NR_INTEL_ARCH_EVENTS);
557 
558 	for (v = 0; v <= max_pmu_version; v++) {
559 		for (i = 0; i < ARRAY_SIZE(perf_caps); i++) {
560 			if (!kvm_has_perf_caps && perf_caps[i])
561 				continue;
562 
563 			pr_info("Testing arch events, PMU version %u, perf_caps = %lx\n",
564 				v, perf_caps[i]);
565 			/*
566 			 * To keep the total runtime reasonable, test every
567 			 * possible non-zero, non-reserved bitmap combination
568 			 * only with the native PMU version and the full bit
569 			 * vector length.
570 			 */
571 			if (v == pmu_version) {
572 				for (k = 1; k < (BIT(nr_arch_events) - 1); k++)
573 					test_arch_events(v, perf_caps[i], nr_arch_events, k);
574 			}
575 			/*
576 			 * Test single bits for all PMU version and lengths up
577 			 * the number of events +1 (to verify KVM doesn't do
578 			 * weird things if the guest length is greater than the
579 			 * host length).  Explicitly test a mask of '0' and all
580 			 * ones i.e. all events being available and unavailable.
581 			 */
582 			for (j = 0; j <= nr_arch_events + 1; j++) {
583 				test_arch_events(v, perf_caps[i], j, 0);
584 				test_arch_events(v, perf_caps[i], j, 0xff);
585 
586 				for (k = 0; k < nr_arch_events; k++)
587 					test_arch_events(v, perf_caps[i], j, BIT(k));
588 			}
589 
590 			pr_info("Testing GP counters, PMU version %u, perf_caps = %lx\n",
591 				v, perf_caps[i]);
592 			for (j = 0; j <= nr_gp_counters; j++)
593 				test_gp_counters(v, perf_caps[i], j);
594 
595 			pr_info("Testing fixed counters, PMU version %u, perf_caps = %lx\n",
596 				v, perf_caps[i]);
597 			for (j = 0; j <= nr_fixed_counters; j++) {
598 				for (k = 0; k <= (BIT(nr_fixed_counters) - 1); k++)
599 					test_fixed_counters(v, perf_caps[i], j, k);
600 			}
601 		}
602 	}
603 }
604 
605 int main(int argc, char *argv[])
606 {
607 	TEST_REQUIRE(kvm_is_pmu_enabled());
608 
609 	TEST_REQUIRE(host_cpu_is_intel);
610 	TEST_REQUIRE(kvm_cpu_has_p(X86_PROPERTY_PMU_VERSION));
611 	TEST_REQUIRE(kvm_cpu_property(X86_PROPERTY_PMU_VERSION) > 0);
612 
613 	kvm_pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION);
614 	kvm_has_perf_caps = kvm_cpu_has(X86_FEATURE_PDCM);
615 	is_forced_emulation_enabled = kvm_is_forced_emulation_enabled();
616 
617 	test_intel_counters();
618 
619 	return 0;
620 }
621