xref: /linux/arch/x86/kvm/vmx/pmu_intel.c (revision 0d3b051adbb72ed81956447d0d1e54d5943ee6f5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM PMU support for Intel CPUs
4  *
5  * Copyright 2011 Red Hat, Inc. and/or its affiliates.
6  *
7  * Authors:
8  *   Avi Kivity   <avi@redhat.com>
9  *   Gleb Natapov <gleb@redhat.com>
10  */
11 #include <linux/types.h>
12 #include <linux/kvm_host.h>
13 #include <linux/perf_event.h>
14 #include <asm/perf_event.h>
15 #include "x86.h"
16 #include "cpuid.h"
17 #include "lapic.h"
18 #include "nested.h"
19 #include "pmu.h"
20 
21 #define MSR_PMC_FULL_WIDTH_BIT      (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
22 
23 static struct kvm_event_hw_type_mapping intel_arch_events[] = {
24 	/* Index must match CPUID 0x0A.EBX bit vector */
25 	[0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
26 	[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
27 	[2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES  },
28 	[3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
29 	[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
30 	[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
31 	[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
32 	[7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
33 };
34 
35 /* mapping between fixed pmc index and intel_arch_events array */
36 static int fixed_pmc_events[] = {1, 0, 7};
37 
38 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
39 {
40 	int i;
41 
42 	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
43 		u8 new_ctrl = fixed_ctrl_field(data, i);
44 		u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
45 		struct kvm_pmc *pmc;
46 
47 		pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
48 
49 		if (old_ctrl == new_ctrl)
50 			continue;
51 
52 		__set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
53 		reprogram_fixed_counter(pmc, new_ctrl, i);
54 	}
55 
56 	pmu->fixed_ctr_ctrl = data;
57 }
58 
59 /* function is called when global control register has been updated. */
60 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
61 {
62 	int bit;
63 	u64 diff = pmu->global_ctrl ^ data;
64 
65 	pmu->global_ctrl = data;
66 
67 	for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
68 		reprogram_counter(pmu, bit);
69 }
70 
71 static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
72 				      u8 event_select,
73 				      u8 unit_mask)
74 {
75 	int i;
76 
77 	for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
78 		if (intel_arch_events[i].eventsel == event_select
79 		    && intel_arch_events[i].unit_mask == unit_mask
80 		    && (pmu->available_event_types & (1 << i)))
81 			break;
82 
83 	if (i == ARRAY_SIZE(intel_arch_events))
84 		return PERF_COUNT_HW_MAX;
85 
86 	return intel_arch_events[i].event_type;
87 }
88 
89 static unsigned intel_find_fixed_event(int idx)
90 {
91 	u32 event;
92 	size_t size = ARRAY_SIZE(fixed_pmc_events);
93 
94 	if (idx >= size)
95 		return PERF_COUNT_HW_MAX;
96 
97 	event = fixed_pmc_events[array_index_nospec(idx, size)];
98 	return intel_arch_events[event].event_type;
99 }
100 
101 /* check if a PMC is enabled by comparing it with globl_ctrl bits. */
102 static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
103 {
104 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
105 
106 	return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
107 }
108 
109 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
110 {
111 	if (pmc_idx < INTEL_PMC_IDX_FIXED)
112 		return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
113 				  MSR_P6_EVNTSEL0);
114 	else {
115 		u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
116 
117 		return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
118 	}
119 }
120 
121 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
122 static int intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
123 {
124 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
125 	bool fixed = idx & (1u << 30);
126 
127 	idx &= ~(3u << 30);
128 
129 	return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
130 		(fixed && idx >= pmu->nr_arch_fixed_counters);
131 }
132 
133 static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
134 					    unsigned int idx, u64 *mask)
135 {
136 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
137 	bool fixed = idx & (1u << 30);
138 	struct kvm_pmc *counters;
139 	unsigned int num_counters;
140 
141 	idx &= ~(3u << 30);
142 	if (fixed) {
143 		counters = pmu->fixed_counters;
144 		num_counters = pmu->nr_arch_fixed_counters;
145 	} else {
146 		counters = pmu->gp_counters;
147 		num_counters = pmu->nr_arch_gp_counters;
148 	}
149 	if (idx >= num_counters)
150 		return NULL;
151 	*mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
152 	return &counters[array_index_nospec(idx, num_counters)];
153 }
154 
155 static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
156 {
157 	if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
158 		return false;
159 
160 	return vcpu->arch.perf_capabilities & PMU_CAP_FW_WRITES;
161 }
162 
163 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
164 {
165 	if (!fw_writes_is_enabled(pmu_to_vcpu(pmu)))
166 		return NULL;
167 
168 	return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
169 }
170 
171 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
172 {
173 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
174 	int ret;
175 
176 	switch (msr) {
177 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
178 	case MSR_CORE_PERF_GLOBAL_STATUS:
179 	case MSR_CORE_PERF_GLOBAL_CTRL:
180 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
181 		ret = pmu->version > 1;
182 		break;
183 	default:
184 		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
185 			get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
186 			get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr);
187 		break;
188 	}
189 
190 	return ret;
191 }
192 
193 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
194 {
195 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
196 	struct kvm_pmc *pmc;
197 
198 	pmc = get_fixed_pmc(pmu, msr);
199 	pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
200 	pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
201 
202 	return pmc;
203 }
204 
205 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
206 {
207 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
208 	struct kvm_pmc *pmc;
209 	u32 msr = msr_info->index;
210 
211 	switch (msr) {
212 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
213 		msr_info->data = pmu->fixed_ctr_ctrl;
214 		return 0;
215 	case MSR_CORE_PERF_GLOBAL_STATUS:
216 		msr_info->data = pmu->global_status;
217 		return 0;
218 	case MSR_CORE_PERF_GLOBAL_CTRL:
219 		msr_info->data = pmu->global_ctrl;
220 		return 0;
221 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
222 		msr_info->data = pmu->global_ovf_ctrl;
223 		return 0;
224 	default:
225 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
226 		    (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
227 			u64 val = pmc_read_counter(pmc);
228 			msr_info->data =
229 				val & pmu->counter_bitmask[KVM_PMC_GP];
230 			return 0;
231 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
232 			u64 val = pmc_read_counter(pmc);
233 			msr_info->data =
234 				val & pmu->counter_bitmask[KVM_PMC_FIXED];
235 			return 0;
236 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
237 			msr_info->data = pmc->eventsel;
238 			return 0;
239 		}
240 	}
241 
242 	return 1;
243 }
244 
245 static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
246 {
247 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
248 	struct kvm_pmc *pmc;
249 	u32 msr = msr_info->index;
250 	u64 data = msr_info->data;
251 
252 	switch (msr) {
253 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
254 		if (pmu->fixed_ctr_ctrl == data)
255 			return 0;
256 		if (!(data & 0xfffffffffffff444ull)) {
257 			reprogram_fixed_counters(pmu, data);
258 			return 0;
259 		}
260 		break;
261 	case MSR_CORE_PERF_GLOBAL_STATUS:
262 		if (msr_info->host_initiated) {
263 			pmu->global_status = data;
264 			return 0;
265 		}
266 		break; /* RO MSR */
267 	case MSR_CORE_PERF_GLOBAL_CTRL:
268 		if (pmu->global_ctrl == data)
269 			return 0;
270 		if (kvm_valid_perf_global_ctrl(pmu, data)) {
271 			global_ctrl_changed(pmu, data);
272 			return 0;
273 		}
274 		break;
275 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
276 		if (!(data & pmu->global_ovf_ctrl_mask)) {
277 			if (!msr_info->host_initiated)
278 				pmu->global_status &= ~data;
279 			pmu->global_ovf_ctrl = data;
280 			return 0;
281 		}
282 		break;
283 	default:
284 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
285 		    (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
286 			if ((msr & MSR_PMC_FULL_WIDTH_BIT) &&
287 			    (data & ~pmu->counter_bitmask[KVM_PMC_GP]))
288 				return 1;
289 			if (!msr_info->host_initiated &&
290 			    !(msr & MSR_PMC_FULL_WIDTH_BIT))
291 				data = (s64)(s32)data;
292 			pmc->counter += data - pmc_read_counter(pmc);
293 			if (pmc->perf_event)
294 				perf_event_period(pmc->perf_event,
295 						  get_sample_period(pmc, data));
296 			return 0;
297 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
298 			pmc->counter += data - pmc_read_counter(pmc);
299 			if (pmc->perf_event)
300 				perf_event_period(pmc->perf_event,
301 						  get_sample_period(pmc, data));
302 			return 0;
303 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
304 			if (data == pmc->eventsel)
305 				return 0;
306 			if (!(data & pmu->reserved_bits)) {
307 				reprogram_gp_counter(pmc, data);
308 				return 0;
309 			}
310 		}
311 	}
312 
313 	return 1;
314 }
315 
316 static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
317 {
318 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
319 	struct x86_pmu_capability x86_pmu;
320 	struct kvm_cpuid_entry2 *entry;
321 	union cpuid10_eax eax;
322 	union cpuid10_edx edx;
323 
324 	pmu->nr_arch_gp_counters = 0;
325 	pmu->nr_arch_fixed_counters = 0;
326 	pmu->counter_bitmask[KVM_PMC_GP] = 0;
327 	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
328 	pmu->version = 0;
329 	pmu->reserved_bits = 0xffffffff00200000ull;
330 	vcpu->arch.perf_capabilities = 0;
331 
332 	entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
333 	if (!entry)
334 		return;
335 	eax.full = entry->eax;
336 	edx.full = entry->edx;
337 
338 	pmu->version = eax.split.version_id;
339 	if (!pmu->version)
340 		return;
341 
342 	perf_get_x86_pmu_capability(&x86_pmu);
343 	if (guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
344 		vcpu->arch.perf_capabilities = vmx_get_perf_capabilities();
345 
346 	pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
347 					 x86_pmu.num_counters_gp);
348 	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
349 	pmu->available_event_types = ~entry->ebx &
350 					((1ull << eax.split.mask_length) - 1);
351 
352 	if (pmu->version == 1) {
353 		pmu->nr_arch_fixed_counters = 0;
354 	} else {
355 		pmu->nr_arch_fixed_counters =
356 			min_t(int, edx.split.num_counters_fixed,
357 			      x86_pmu.num_counters_fixed);
358 		pmu->counter_bitmask[KVM_PMC_FIXED] =
359 			((u64)1 << edx.split.bit_width_fixed) - 1;
360 	}
361 
362 	pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
363 		(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
364 	pmu->global_ctrl_mask = ~pmu->global_ctrl;
365 	pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
366 			& ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
367 			    MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
368 	if (vmx_pt_mode_is_host_guest())
369 		pmu->global_ovf_ctrl_mask &=
370 				~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
371 
372 	entry = kvm_find_cpuid_entry(vcpu, 7, 0);
373 	if (entry &&
374 	    (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
375 	    (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
376 		pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
377 
378 	bitmap_set(pmu->all_valid_pmc_idx,
379 		0, pmu->nr_arch_gp_counters);
380 	bitmap_set(pmu->all_valid_pmc_idx,
381 		INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
382 
383 	nested_vmx_pmu_entry_exit_ctls_update(vcpu);
384 }
385 
386 static void intel_pmu_init(struct kvm_vcpu *vcpu)
387 {
388 	int i;
389 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
390 
391 	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
392 		pmu->gp_counters[i].type = KVM_PMC_GP;
393 		pmu->gp_counters[i].vcpu = vcpu;
394 		pmu->gp_counters[i].idx = i;
395 		pmu->gp_counters[i].current_config = 0;
396 	}
397 
398 	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
399 		pmu->fixed_counters[i].type = KVM_PMC_FIXED;
400 		pmu->fixed_counters[i].vcpu = vcpu;
401 		pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
402 		pmu->fixed_counters[i].current_config = 0;
403 	}
404 }
405 
406 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
407 {
408 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
409 	struct kvm_pmc *pmc = NULL;
410 	int i;
411 
412 	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
413 		pmc = &pmu->gp_counters[i];
414 
415 		pmc_stop_counter(pmc);
416 		pmc->counter = pmc->eventsel = 0;
417 	}
418 
419 	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
420 		pmc = &pmu->fixed_counters[i];
421 
422 		pmc_stop_counter(pmc);
423 		pmc->counter = 0;
424 	}
425 
426 	pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
427 		pmu->global_ovf_ctrl = 0;
428 }
429 
430 struct kvm_pmu_ops intel_pmu_ops = {
431 	.find_arch_event = intel_find_arch_event,
432 	.find_fixed_event = intel_find_fixed_event,
433 	.pmc_is_enabled = intel_pmc_is_enabled,
434 	.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
435 	.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
436 	.msr_idx_to_pmc = intel_msr_idx_to_pmc,
437 	.is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx,
438 	.is_valid_msr = intel_is_valid_msr,
439 	.get_msr = intel_pmu_get_msr,
440 	.set_msr = intel_pmu_set_msr,
441 	.refresh = intel_pmu_refresh,
442 	.init = intel_pmu_init,
443 	.reset = intel_pmu_reset,
444 };
445