xref: /linux/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c (revision 001821b0e79716c4e17c71d8e053a23599a7a508)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2023, Tencent, Inc.
4  */
5 #include <x86intrin.h>
6 
7 #include "pmu.h"
8 #include "processor.h"
9 
10 /* Number of LOOP instructions for the guest measurement payload. */
11 #define NUM_BRANCHES		10
12 /*
13  * Number of "extra" instructions that will be counted, i.e. the number of
14  * instructions that are needed to set up the loop and then disabled the
15  * counter.  1 CLFLUSH/CLFLUSHOPT/NOP, 1 MFENCE, 2 MOV, 2 XOR, 1 WRMSR.
16  */
17 #define NUM_EXTRA_INSNS		7
18 #define NUM_INSNS_RETIRED	(NUM_BRANCHES + NUM_EXTRA_INSNS)
19 
20 static uint8_t kvm_pmu_version;
21 static bool kvm_has_perf_caps;
22 
23 static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
24 						  void *guest_code,
25 						  uint8_t pmu_version,
26 						  uint64_t perf_capabilities)
27 {
28 	struct kvm_vm *vm;
29 
30 	vm = vm_create_with_one_vcpu(vcpu, guest_code);
31 	sync_global_to_guest(vm, kvm_pmu_version);
32 
33 	/*
34 	 * Set PERF_CAPABILITIES before PMU version as KVM disallows enabling
35 	 * features via PERF_CAPABILITIES if the guest doesn't have a vPMU.
36 	 */
37 	if (kvm_has_perf_caps)
38 		vcpu_set_msr(*vcpu, MSR_IA32_PERF_CAPABILITIES, perf_capabilities);
39 
40 	vcpu_set_cpuid_property(*vcpu, X86_PROPERTY_PMU_VERSION, pmu_version);
41 	return vm;
42 }
43 
44 static void run_vcpu(struct kvm_vcpu *vcpu)
45 {
46 	struct ucall uc;
47 
48 	do {
49 		vcpu_run(vcpu);
50 		switch (get_ucall(vcpu, &uc)) {
51 		case UCALL_SYNC:
52 			break;
53 		case UCALL_ABORT:
54 			REPORT_GUEST_ASSERT(uc);
55 			break;
56 		case UCALL_PRINTF:
57 			pr_info("%s", uc.buffer);
58 			break;
59 		case UCALL_DONE:
60 			break;
61 		default:
62 			TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
63 		}
64 	} while (uc.cmd != UCALL_DONE);
65 }
66 
67 static uint8_t guest_get_pmu_version(void)
68 {
69 	/*
70 	 * Return the effective PMU version, i.e. the minimum between what KVM
71 	 * supports and what is enumerated to the guest.  The host deliberately
72 	 * advertises a PMU version to the guest beyond what is actually
73 	 * supported by KVM to verify KVM doesn't freak out and do something
74 	 * bizarre with an architecturally valid, but unsupported, version.
75 	 */
76 	return min_t(uint8_t, kvm_pmu_version, this_cpu_property(X86_PROPERTY_PMU_VERSION));
77 }
78 
79 /*
80  * If an architectural event is supported and guaranteed to generate at least
81  * one "hit, assert that its count is non-zero.  If an event isn't supported or
82  * the test can't guarantee the associated action will occur, then all bets are
83  * off regarding the count, i.e. no checks can be done.
84  *
85  * Sanity check that in all cases, the event doesn't count when it's disabled,
86  * and that KVM correctly emulates the write of an arbitrary value.
87  */
88 static void guest_assert_event_count(uint8_t idx,
89 				     struct kvm_x86_pmu_feature event,
90 				     uint32_t pmc, uint32_t pmc_msr)
91 {
92 	uint64_t count;
93 
94 	count = _rdpmc(pmc);
95 	if (!this_pmu_has(event))
96 		goto sanity_checks;
97 
98 	switch (idx) {
99 	case INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX:
100 		GUEST_ASSERT_EQ(count, NUM_INSNS_RETIRED);
101 		break;
102 	case INTEL_ARCH_BRANCHES_RETIRED_INDEX:
103 		GUEST_ASSERT_EQ(count, NUM_BRANCHES);
104 		break;
105 	case INTEL_ARCH_LLC_REFERENCES_INDEX:
106 	case INTEL_ARCH_LLC_MISSES_INDEX:
107 		if (!this_cpu_has(X86_FEATURE_CLFLUSHOPT) &&
108 		    !this_cpu_has(X86_FEATURE_CLFLUSH))
109 			break;
110 		fallthrough;
111 	case INTEL_ARCH_CPU_CYCLES_INDEX:
112 	case INTEL_ARCH_REFERENCE_CYCLES_INDEX:
113 		GUEST_ASSERT_NE(count, 0);
114 		break;
115 	case INTEL_ARCH_TOPDOWN_SLOTS_INDEX:
116 		GUEST_ASSERT(count >= NUM_INSNS_RETIRED);
117 		break;
118 	default:
119 		break;
120 	}
121 
122 sanity_checks:
123 	__asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
124 	GUEST_ASSERT_EQ(_rdpmc(pmc), count);
125 
126 	wrmsr(pmc_msr, 0xdead);
127 	GUEST_ASSERT_EQ(_rdpmc(pmc), 0xdead);
128 }
129 
130 /*
131  * Enable and disable the PMC in a monolithic asm blob to ensure that the
132  * compiler can't insert _any_ code into the measured sequence.  Note, ECX
133  * doesn't need to be clobbered as the input value, @pmc_msr, is restored
134  * before the end of the sequence.
135  *
136  * If CLFUSH{,OPT} is supported, flush the cacheline containing (at least) the
137  * start of the loop to force LLC references and misses, i.e. to allow testing
138  * that those events actually count.
139  *
140  * If forced emulation is enabled (and specified), force emulation on a subset
141  * of the measured code to verify that KVM correctly emulates instructions and
142  * branches retired events in conjunction with hardware also counting said
143  * events.
144  */
145 #define GUEST_MEASURE_EVENT(_msr, _value, clflush, FEP)				\
146 do {										\
147 	__asm__ __volatile__("wrmsr\n\t"					\
148 			     clflush "\n\t"					\
149 			     "mfence\n\t"					\
150 			     "1: mov $" __stringify(NUM_BRANCHES) ", %%ecx\n\t"	\
151 			     FEP "loop .\n\t"					\
152 			     FEP "mov %%edi, %%ecx\n\t"				\
153 			     FEP "xor %%eax, %%eax\n\t"				\
154 			     FEP "xor %%edx, %%edx\n\t"				\
155 			     "wrmsr\n\t"					\
156 			     :: "a"((uint32_t)_value), "d"(_value >> 32),	\
157 				"c"(_msr), "D"(_msr)				\
158 	);									\
159 } while (0)
160 
161 #define GUEST_TEST_EVENT(_idx, _event, _pmc, _pmc_msr, _ctrl_msr, _value, FEP)	\
162 do {										\
163 	wrmsr(pmc_msr, 0);							\
164 										\
165 	if (this_cpu_has(X86_FEATURE_CLFLUSHOPT))				\
166 		GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt 1f", FEP);	\
167 	else if (this_cpu_has(X86_FEATURE_CLFLUSH))				\
168 		GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflush 1f", FEP);	\
169 	else									\
170 		GUEST_MEASURE_EVENT(_ctrl_msr, _value, "nop", FEP);		\
171 										\
172 	guest_assert_event_count(_idx, _event, _pmc, _pmc_msr);			\
173 } while (0)
174 
175 static void __guest_test_arch_event(uint8_t idx, struct kvm_x86_pmu_feature event,
176 				    uint32_t pmc, uint32_t pmc_msr,
177 				    uint32_t ctrl_msr, uint64_t ctrl_msr_value)
178 {
179 	GUEST_TEST_EVENT(idx, event, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, "");
180 
181 	if (is_forced_emulation_enabled)
182 		GUEST_TEST_EVENT(idx, event, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, KVM_FEP);
183 }
184 
185 #define X86_PMU_FEATURE_NULL						\
186 ({									\
187 	struct kvm_x86_pmu_feature feature = {};			\
188 									\
189 	feature;							\
190 })
191 
192 static bool pmu_is_null_feature(struct kvm_x86_pmu_feature event)
193 {
194 	return !(*(u64 *)&event);
195 }
196 
197 static void guest_test_arch_event(uint8_t idx)
198 {
199 	const struct {
200 		struct kvm_x86_pmu_feature gp_event;
201 		struct kvm_x86_pmu_feature fixed_event;
202 	} intel_event_to_feature[] = {
203 		[INTEL_ARCH_CPU_CYCLES_INDEX]		 = { X86_PMU_FEATURE_CPU_CYCLES, X86_PMU_FEATURE_CPU_CYCLES_FIXED },
204 		[INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX]	 = { X86_PMU_FEATURE_INSNS_RETIRED, X86_PMU_FEATURE_INSNS_RETIRED_FIXED },
205 		/*
206 		 * Note, the fixed counter for reference cycles is NOT the same
207 		 * as the general purpose architectural event.  The fixed counter
208 		 * explicitly counts at the same frequency as the TSC, whereas
209 		 * the GP event counts at a fixed, but uarch specific, frequency.
210 		 * Bundle them here for simplicity.
211 		 */
212 		[INTEL_ARCH_REFERENCE_CYCLES_INDEX]	 = { X86_PMU_FEATURE_REFERENCE_CYCLES, X86_PMU_FEATURE_REFERENCE_TSC_CYCLES_FIXED },
213 		[INTEL_ARCH_LLC_REFERENCES_INDEX]	 = { X86_PMU_FEATURE_LLC_REFERENCES, X86_PMU_FEATURE_NULL },
214 		[INTEL_ARCH_LLC_MISSES_INDEX]		 = { X86_PMU_FEATURE_LLC_MISSES, X86_PMU_FEATURE_NULL },
215 		[INTEL_ARCH_BRANCHES_RETIRED_INDEX]	 = { X86_PMU_FEATURE_BRANCH_INSNS_RETIRED, X86_PMU_FEATURE_NULL },
216 		[INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX] = { X86_PMU_FEATURE_BRANCHES_MISPREDICTED, X86_PMU_FEATURE_NULL },
217 		[INTEL_ARCH_TOPDOWN_SLOTS_INDEX]	 = { X86_PMU_FEATURE_TOPDOWN_SLOTS, X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED },
218 	};
219 
220 	uint32_t nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
221 	uint32_t pmu_version = guest_get_pmu_version();
222 	/* PERF_GLOBAL_CTRL exists only for Architectural PMU Version 2+. */
223 	bool guest_has_perf_global_ctrl = pmu_version >= 2;
224 	struct kvm_x86_pmu_feature gp_event, fixed_event;
225 	uint32_t base_pmc_msr;
226 	unsigned int i;
227 
228 	/* The host side shouldn't invoke this without a guest PMU. */
229 	GUEST_ASSERT(pmu_version);
230 
231 	if (this_cpu_has(X86_FEATURE_PDCM) &&
232 	    rdmsr(MSR_IA32_PERF_CAPABILITIES) & PMU_CAP_FW_WRITES)
233 		base_pmc_msr = MSR_IA32_PMC0;
234 	else
235 		base_pmc_msr = MSR_IA32_PERFCTR0;
236 
237 	gp_event = intel_event_to_feature[idx].gp_event;
238 	GUEST_ASSERT_EQ(idx, gp_event.f.bit);
239 
240 	GUEST_ASSERT(nr_gp_counters);
241 
242 	for (i = 0; i < nr_gp_counters; i++) {
243 		uint64_t eventsel = ARCH_PERFMON_EVENTSEL_OS |
244 				    ARCH_PERFMON_EVENTSEL_ENABLE |
245 				    intel_pmu_arch_events[idx];
246 
247 		wrmsr(MSR_P6_EVNTSEL0 + i, 0);
248 		if (guest_has_perf_global_ctrl)
249 			wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, BIT_ULL(i));
250 
251 		__guest_test_arch_event(idx, gp_event, i, base_pmc_msr + i,
252 					MSR_P6_EVNTSEL0 + i, eventsel);
253 	}
254 
255 	if (!guest_has_perf_global_ctrl)
256 		return;
257 
258 	fixed_event = intel_event_to_feature[idx].fixed_event;
259 	if (pmu_is_null_feature(fixed_event) || !this_pmu_has(fixed_event))
260 		return;
261 
262 	i = fixed_event.f.bit;
263 
264 	wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, FIXED_PMC_CTRL(i, FIXED_PMC_KERNEL));
265 
266 	__guest_test_arch_event(idx, fixed_event, i | INTEL_RDPMC_FIXED,
267 				MSR_CORE_PERF_FIXED_CTR0 + i,
268 				MSR_CORE_PERF_GLOBAL_CTRL,
269 				FIXED_PMC_GLOBAL_CTRL_ENABLE(i));
270 }
271 
272 static void guest_test_arch_events(void)
273 {
274 	uint8_t i;
275 
276 	for (i = 0; i < NR_INTEL_ARCH_EVENTS; i++)
277 		guest_test_arch_event(i);
278 
279 	GUEST_DONE();
280 }
281 
282 static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities,
283 			     uint8_t length, uint8_t unavailable_mask)
284 {
285 	struct kvm_vcpu *vcpu;
286 	struct kvm_vm *vm;
287 
288 	/* Testing arch events requires a vPMU (there are no negative tests). */
289 	if (!pmu_version)
290 		return;
291 
292 	vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_arch_events,
293 					 pmu_version, perf_capabilities);
294 
295 	vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH,
296 				length);
297 	vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_EVENTS_MASK,
298 				unavailable_mask);
299 
300 	run_vcpu(vcpu);
301 
302 	kvm_vm_free(vm);
303 }
304 
305 /*
306  * Limit testing to MSRs that are actually defined by Intel (in the SDM).  MSRs
307  * that aren't defined counter MSRs *probably* don't exist, but there's no
308  * guarantee that currently undefined MSR indices won't be used for something
309  * other than PMCs in the future.
310  */
311 #define MAX_NR_GP_COUNTERS	8
312 #define MAX_NR_FIXED_COUNTERS	3
313 
314 #define GUEST_ASSERT_PMC_MSR_ACCESS(insn, msr, expect_gp, vector)		\
315 __GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector,			\
316 	       "Expected %s on " #insn "(0x%x), got vector %u",			\
317 	       expect_gp ? "#GP" : "no fault", msr, vector)			\
318 
319 #define GUEST_ASSERT_PMC_VALUE(insn, msr, val, expected)			\
320 	__GUEST_ASSERT(val == expected_val,					\
321 		       "Expected " #insn "(0x%x) to yield 0x%lx, got 0x%lx",	\
322 		       msr, expected_val, val);
323 
324 static void guest_test_rdpmc(uint32_t rdpmc_idx, bool expect_success,
325 			     uint64_t expected_val)
326 {
327 	uint8_t vector;
328 	uint64_t val;
329 
330 	vector = rdpmc_safe(rdpmc_idx, &val);
331 	GUEST_ASSERT_PMC_MSR_ACCESS(RDPMC, rdpmc_idx, !expect_success, vector);
332 	if (expect_success)
333 		GUEST_ASSERT_PMC_VALUE(RDPMC, rdpmc_idx, val, expected_val);
334 
335 	if (!is_forced_emulation_enabled)
336 		return;
337 
338 	vector = rdpmc_safe_fep(rdpmc_idx, &val);
339 	GUEST_ASSERT_PMC_MSR_ACCESS(RDPMC, rdpmc_idx, !expect_success, vector);
340 	if (expect_success)
341 		GUEST_ASSERT_PMC_VALUE(RDPMC, rdpmc_idx, val, expected_val);
342 }
343 
344 static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters,
345 				 uint8_t nr_counters, uint32_t or_mask)
346 {
347 	const bool pmu_has_fast_mode = !guest_get_pmu_version();
348 	uint8_t i;
349 
350 	for (i = 0; i < nr_possible_counters; i++) {
351 		/*
352 		 * TODO: Test a value that validates full-width writes and the
353 		 * width of the counters.
354 		 */
355 		const uint64_t test_val = 0xffff;
356 		const uint32_t msr = base_msr + i;
357 
358 		/*
359 		 * Fixed counters are supported if the counter is less than the
360 		 * number of enumerated contiguous counters *or* the counter is
361 		 * explicitly enumerated in the supported counters mask.
362 		 */
363 		const bool expect_success = i < nr_counters || (or_mask & BIT(i));
364 
365 		/*
366 		 * KVM drops writes to MSR_P6_PERFCTR[0|1] if the counters are
367 		 * unsupported, i.e. doesn't #GP and reads back '0'.
368 		 */
369 		const uint64_t expected_val = expect_success ? test_val : 0;
370 		const bool expect_gp = !expect_success && msr != MSR_P6_PERFCTR0 &&
371 				       msr != MSR_P6_PERFCTR1;
372 		uint32_t rdpmc_idx;
373 		uint8_t vector;
374 		uint64_t val;
375 
376 		vector = wrmsr_safe(msr, test_val);
377 		GUEST_ASSERT_PMC_MSR_ACCESS(WRMSR, msr, expect_gp, vector);
378 
379 		vector = rdmsr_safe(msr, &val);
380 		GUEST_ASSERT_PMC_MSR_ACCESS(RDMSR, msr, expect_gp, vector);
381 
382 		/* On #GP, the result of RDMSR is undefined. */
383 		if (!expect_gp)
384 			GUEST_ASSERT_PMC_VALUE(RDMSR, msr, val, expected_val);
385 
386 		/*
387 		 * Redo the read tests with RDPMC, which has different indexing
388 		 * semantics and additional capabilities.
389 		 */
390 		rdpmc_idx = i;
391 		if (base_msr == MSR_CORE_PERF_FIXED_CTR0)
392 			rdpmc_idx |= INTEL_RDPMC_FIXED;
393 
394 		guest_test_rdpmc(rdpmc_idx, expect_success, expected_val);
395 
396 		/*
397 		 * KVM doesn't support non-architectural PMUs, i.e. it should
398 		 * impossible to have fast mode RDPMC.  Verify that attempting
399 		 * to use fast RDPMC always #GPs.
400 		 */
401 		GUEST_ASSERT(!expect_success || !pmu_has_fast_mode);
402 		rdpmc_idx |= INTEL_RDPMC_FAST;
403 		guest_test_rdpmc(rdpmc_idx, false, -1ull);
404 
405 		vector = wrmsr_safe(msr, 0);
406 		GUEST_ASSERT_PMC_MSR_ACCESS(WRMSR, msr, expect_gp, vector);
407 	}
408 }
409 
410 static void guest_test_gp_counters(void)
411 {
412 	uint8_t pmu_version = guest_get_pmu_version();
413 	uint8_t nr_gp_counters = 0;
414 	uint32_t base_msr;
415 
416 	if (pmu_version)
417 		nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
418 
419 	/*
420 	 * For v2+ PMUs, PERF_GLOBAL_CTRL's architectural post-RESET value is
421 	 * "Sets bits n-1:0 and clears the upper bits", where 'n' is the number
422 	 * of GP counters.  If there are no GP counters, require KVM to leave
423 	 * PERF_GLOBAL_CTRL '0'.  This edge case isn't covered by the SDM, but
424 	 * follow the spirit of the architecture and only globally enable GP
425 	 * counters, of which there are none.
426 	 */
427 	if (pmu_version > 1) {
428 		uint64_t global_ctrl = rdmsr(MSR_CORE_PERF_GLOBAL_CTRL);
429 
430 		if (nr_gp_counters)
431 			GUEST_ASSERT_EQ(global_ctrl, GENMASK_ULL(nr_gp_counters - 1, 0));
432 		else
433 			GUEST_ASSERT_EQ(global_ctrl, 0);
434 	}
435 
436 	if (this_cpu_has(X86_FEATURE_PDCM) &&
437 	    rdmsr(MSR_IA32_PERF_CAPABILITIES) & PMU_CAP_FW_WRITES)
438 		base_msr = MSR_IA32_PMC0;
439 	else
440 		base_msr = MSR_IA32_PERFCTR0;
441 
442 	guest_rd_wr_counters(base_msr, MAX_NR_GP_COUNTERS, nr_gp_counters, 0);
443 	GUEST_DONE();
444 }
445 
446 static void test_gp_counters(uint8_t pmu_version, uint64_t perf_capabilities,
447 			     uint8_t nr_gp_counters)
448 {
449 	struct kvm_vcpu *vcpu;
450 	struct kvm_vm *vm;
451 
452 	vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_gp_counters,
453 					 pmu_version, perf_capabilities);
454 
455 	vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_NR_GP_COUNTERS,
456 				nr_gp_counters);
457 
458 	run_vcpu(vcpu);
459 
460 	kvm_vm_free(vm);
461 }
462 
463 static void guest_test_fixed_counters(void)
464 {
465 	uint64_t supported_bitmask = 0;
466 	uint8_t nr_fixed_counters = 0;
467 	uint8_t i;
468 
469 	/* Fixed counters require Architectural vPMU Version 2+. */
470 	if (guest_get_pmu_version() >= 2)
471 		nr_fixed_counters = this_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
472 
473 	/*
474 	 * The supported bitmask for fixed counters was introduced in PMU
475 	 * version 5.
476 	 */
477 	if (guest_get_pmu_version() >= 5)
478 		supported_bitmask = this_cpu_property(X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK);
479 
480 	guest_rd_wr_counters(MSR_CORE_PERF_FIXED_CTR0, MAX_NR_FIXED_COUNTERS,
481 			     nr_fixed_counters, supported_bitmask);
482 
483 	for (i = 0; i < MAX_NR_FIXED_COUNTERS; i++) {
484 		uint8_t vector;
485 		uint64_t val;
486 
487 		if (i >= nr_fixed_counters && !(supported_bitmask & BIT_ULL(i))) {
488 			vector = wrmsr_safe(MSR_CORE_PERF_FIXED_CTR_CTRL,
489 					    FIXED_PMC_CTRL(i, FIXED_PMC_KERNEL));
490 			__GUEST_ASSERT(vector == GP_VECTOR,
491 				       "Expected #GP for counter %u in FIXED_CTR_CTRL", i);
492 
493 			vector = wrmsr_safe(MSR_CORE_PERF_GLOBAL_CTRL,
494 					    FIXED_PMC_GLOBAL_CTRL_ENABLE(i));
495 			__GUEST_ASSERT(vector == GP_VECTOR,
496 				       "Expected #GP for counter %u in PERF_GLOBAL_CTRL", i);
497 			continue;
498 		}
499 
500 		wrmsr(MSR_CORE_PERF_FIXED_CTR0 + i, 0);
501 		wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, FIXED_PMC_CTRL(i, FIXED_PMC_KERNEL));
502 		wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, FIXED_PMC_GLOBAL_CTRL_ENABLE(i));
503 		__asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
504 		wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
505 		val = rdmsr(MSR_CORE_PERF_FIXED_CTR0 + i);
506 
507 		GUEST_ASSERT_NE(val, 0);
508 	}
509 	GUEST_DONE();
510 }
511 
512 static void test_fixed_counters(uint8_t pmu_version, uint64_t perf_capabilities,
513 				uint8_t nr_fixed_counters,
514 				uint32_t supported_bitmask)
515 {
516 	struct kvm_vcpu *vcpu;
517 	struct kvm_vm *vm;
518 
519 	vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_fixed_counters,
520 					 pmu_version, perf_capabilities);
521 
522 	vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK,
523 				supported_bitmask);
524 	vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_NR_FIXED_COUNTERS,
525 				nr_fixed_counters);
526 
527 	run_vcpu(vcpu);
528 
529 	kvm_vm_free(vm);
530 }
531 
532 static void test_intel_counters(void)
533 {
534 	uint8_t nr_arch_events = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
535 	uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
536 	uint8_t nr_gp_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
537 	uint8_t pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION);
538 	unsigned int i;
539 	uint8_t v, j;
540 	uint32_t k;
541 
542 	const uint64_t perf_caps[] = {
543 		0,
544 		PMU_CAP_FW_WRITES,
545 	};
546 
547 	/*
548 	 * Test up to PMU v5, which is the current maximum version defined by
549 	 * Intel, i.e. is the last version that is guaranteed to be backwards
550 	 * compatible with KVM's existing behavior.
551 	 */
552 	uint8_t max_pmu_version = max_t(typeof(pmu_version), pmu_version, 5);
553 
554 	/*
555 	 * Detect the existence of events that aren't supported by selftests.
556 	 * This will (obviously) fail any time the kernel adds support for a
557 	 * new event, but it's worth paying that price to keep the test fresh.
558 	 */
559 	TEST_ASSERT(nr_arch_events <= NR_INTEL_ARCH_EVENTS,
560 		    "New architectural event(s) detected; please update this test (length = %u, mask = %x)",
561 		    nr_arch_events, kvm_cpu_property(X86_PROPERTY_PMU_EVENTS_MASK));
562 
563 	/*
564 	 * Force iterating over known arch events regardless of whether or not
565 	 * KVM/hardware supports a given event.
566 	 */
567 	nr_arch_events = max_t(typeof(nr_arch_events), nr_arch_events, NR_INTEL_ARCH_EVENTS);
568 
569 	for (v = 0; v <= max_pmu_version; v++) {
570 		for (i = 0; i < ARRAY_SIZE(perf_caps); i++) {
571 			if (!kvm_has_perf_caps && perf_caps[i])
572 				continue;
573 
574 			pr_info("Testing arch events, PMU version %u, perf_caps = %lx\n",
575 				v, perf_caps[i]);
576 			/*
577 			 * To keep the total runtime reasonable, test every
578 			 * possible non-zero, non-reserved bitmap combination
579 			 * only with the native PMU version and the full bit
580 			 * vector length.
581 			 */
582 			if (v == pmu_version) {
583 				for (k = 1; k < (BIT(nr_arch_events) - 1); k++)
584 					test_arch_events(v, perf_caps[i], nr_arch_events, k);
585 			}
586 			/*
587 			 * Test single bits for all PMU version and lengths up
588 			 * the number of events +1 (to verify KVM doesn't do
589 			 * weird things if the guest length is greater than the
590 			 * host length).  Explicitly test a mask of '0' and all
591 			 * ones i.e. all events being available and unavailable.
592 			 */
593 			for (j = 0; j <= nr_arch_events + 1; j++) {
594 				test_arch_events(v, perf_caps[i], j, 0);
595 				test_arch_events(v, perf_caps[i], j, 0xff);
596 
597 				for (k = 0; k < nr_arch_events; k++)
598 					test_arch_events(v, perf_caps[i], j, BIT(k));
599 			}
600 
601 			pr_info("Testing GP counters, PMU version %u, perf_caps = %lx\n",
602 				v, perf_caps[i]);
603 			for (j = 0; j <= nr_gp_counters; j++)
604 				test_gp_counters(v, perf_caps[i], j);
605 
606 			pr_info("Testing fixed counters, PMU version %u, perf_caps = %lx\n",
607 				v, perf_caps[i]);
608 			for (j = 0; j <= nr_fixed_counters; j++) {
609 				for (k = 0; k <= (BIT(nr_fixed_counters) - 1); k++)
610 					test_fixed_counters(v, perf_caps[i], j, k);
611 			}
612 		}
613 	}
614 }
615 
616 int main(int argc, char *argv[])
617 {
618 	TEST_REQUIRE(kvm_is_pmu_enabled());
619 
620 	TEST_REQUIRE(host_cpu_is_intel);
621 	TEST_REQUIRE(kvm_cpu_has_p(X86_PROPERTY_PMU_VERSION));
622 	TEST_REQUIRE(kvm_cpu_property(X86_PROPERTY_PMU_VERSION) > 0);
623 
624 	kvm_pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION);
625 	kvm_has_perf_caps = kvm_cpu_has(X86_FEATURE_PDCM);
626 
627 	test_intel_counters();
628 
629 	return 0;
630 }
631