xref: /linux/arch/x86/kvm/vmx/pmu_intel.c (revision d374b89edbb9a8d552e03348f59287ff779b4c9d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM PMU support for Intel CPUs
4  *
5  * Copyright 2011 Red Hat, Inc. and/or its affiliates.
6  *
7  * Authors:
8  *   Avi Kivity   <avi@redhat.com>
9  *   Gleb Natapov <gleb@redhat.com>
10  */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/types.h>
14 #include <linux/kvm_host.h>
15 #include <linux/perf_event.h>
16 #include <asm/msr.h>
17 #include <asm/perf_event.h>
18 #include "x86.h"
19 #include "cpuid.h"
20 #include "lapic.h"
21 #include "nested.h"
22 #include "pmu.h"
23 #include "tdx.h"
24 
25 /*
26  * Perf's "BASE" is wildly misleading, architectural PMUs use bits 31:16 of ECX
27  * to encode the "type" of counter to read, i.e. this is not a "base".  And to
28  * further confuse things, non-architectural PMUs use bit 31 as a flag for
29  * "fast" reads, whereas the "type" is an explicit value.
30  */
31 #define INTEL_RDPMC_GP		0
32 #define INTEL_RDPMC_FIXED	INTEL_PMC_FIXED_RDPMC_BASE
33 
34 #define INTEL_RDPMC_TYPE_MASK	GENMASK(31, 16)
35 #define INTEL_RDPMC_INDEX_MASK	GENMASK(15, 0)
36 
37 #define MSR_PMC_FULL_WIDTH_BIT      (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
38 
39 static struct lbr_desc *vcpu_to_lbr_desc(struct kvm_vcpu *vcpu)
40 {
41 	if (is_td_vcpu(vcpu))
42 		return NULL;
43 
44 	return &to_vmx(vcpu)->lbr_desc;
45 }
46 
47 static struct x86_pmu_lbr *vcpu_to_lbr_records(struct kvm_vcpu *vcpu)
48 {
49 	if (is_td_vcpu(vcpu))
50 		return NULL;
51 
52 	return &to_vmx(vcpu)->lbr_desc.records;
53 }
54 
55 #pragma GCC poison to_vmx
56 
57 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
58 {
59 	struct kvm_pmc *pmc;
60 	u64 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
61 	int i;
62 
63 	pmu->fixed_ctr_ctrl = data;
64 	pmu->fixed_ctr_ctrl_hw = data;
65 	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
66 		u8 new_ctrl = fixed_ctrl_field(data, i);
67 		u8 old_ctrl = fixed_ctrl_field(old_fixed_ctr_ctrl, i);
68 
69 		if (old_ctrl == new_ctrl)
70 			continue;
71 
72 		pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
73 
74 		__set_bit(KVM_FIXED_PMC_BASE_IDX + i, pmu->pmc_in_use);
75 		kvm_pmu_request_counter_reprogram(pmc);
76 	}
77 }
78 
79 static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
80 					    unsigned int idx, u64 *mask)
81 {
82 	unsigned int type = idx & INTEL_RDPMC_TYPE_MASK;
83 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
84 	struct kvm_pmc *counters;
85 	unsigned int num_counters;
86 	u64 bitmask;
87 
88 	/*
89 	 * The encoding of ECX for RDPMC is different for architectural versus
90 	 * non-architecturals PMUs (PMUs with version '0').  For architectural
91 	 * PMUs, bits 31:16 specify the PMC type and bits 15:0 specify the PMC
92 	 * index.  For non-architectural PMUs, bit 31 is a "fast" flag, and
93 	 * bits 30:0 specify the PMC index.
94 	 *
95 	 * Yell and reject attempts to read PMCs for a non-architectural PMU,
96 	 * as KVM doesn't support such PMUs.
97 	 */
98 	if (WARN_ON_ONCE(!pmu->version))
99 		return NULL;
100 
101 	/*
102 	 * General Purpose (GP) PMCs are supported on all PMUs, and fixed PMCs
103 	 * are supported on all architectural PMUs, i.e. on all virtual PMUs
104 	 * supported by KVM.  Note, KVM only emulates fixed PMCs for PMU v2+,
105 	 * but the type itself is still valid, i.e. let RDPMC fail due to
106 	 * accessing a non-existent counter.  Reject attempts to read all other
107 	 * types, which are unknown/unsupported.
108 	 */
109 	switch (type) {
110 	case INTEL_RDPMC_FIXED:
111 		counters = pmu->fixed_counters;
112 		num_counters = pmu->nr_arch_fixed_counters;
113 		bitmask = pmu->counter_bitmask[KVM_PMC_FIXED];
114 		break;
115 	case INTEL_RDPMC_GP:
116 		counters = pmu->gp_counters;
117 		num_counters = pmu->nr_arch_gp_counters;
118 		bitmask = pmu->counter_bitmask[KVM_PMC_GP];
119 		break;
120 	default:
121 		return NULL;
122 	}
123 
124 	idx &= INTEL_RDPMC_INDEX_MASK;
125 	if (idx >= num_counters)
126 		return NULL;
127 
128 	*mask &= bitmask;
129 	return &counters[array_index_nospec(idx, num_counters)];
130 }
131 
132 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
133 {
134 	if (!fw_writes_is_enabled(pmu_to_vcpu(pmu)))
135 		return NULL;
136 
137 	return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
138 }
139 
140 static bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu)
141 {
142 	if (is_td_vcpu(vcpu))
143 		return false;
144 
145 	return cpuid_model_is_consistent(vcpu);
146 }
147 
148 bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
149 {
150 	if (is_td_vcpu(vcpu))
151 		return false;
152 
153 	return !!vcpu_to_lbr_records(vcpu)->nr;
154 }
155 
156 static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
157 {
158 	struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
159 	bool ret = false;
160 
161 	if (!intel_pmu_lbr_is_enabled(vcpu))
162 		return ret;
163 
164 	ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) ||
165 		(index >= records->from && index < records->from + records->nr) ||
166 		(index >= records->to && index < records->to + records->nr);
167 
168 	if (!ret && records->info)
169 		ret = (index >= records->info && index < records->info + records->nr);
170 
171 	return ret;
172 }
173 
174 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
175 {
176 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
177 	u64 perf_capabilities;
178 	int ret;
179 
180 	switch (msr) {
181 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
182 		return kvm_pmu_has_perf_global_ctrl(pmu);
183 	case MSR_IA32_PEBS_ENABLE:
184 		ret = vcpu_get_perf_capabilities(vcpu) & PERF_CAP_PEBS_FORMAT;
185 		break;
186 	case MSR_IA32_DS_AREA:
187 		ret = guest_cpu_cap_has(vcpu, X86_FEATURE_DS);
188 		break;
189 	case MSR_PEBS_DATA_CFG:
190 		perf_capabilities = vcpu_get_perf_capabilities(vcpu);
191 		ret = (perf_capabilities & PERF_CAP_PEBS_BASELINE) &&
192 			((perf_capabilities & PERF_CAP_PEBS_FORMAT) > 3);
193 		break;
194 	default:
195 		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
196 			get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
197 			get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) ||
198 			intel_pmu_is_valid_lbr_msr(vcpu, msr);
199 		break;
200 	}
201 
202 	return ret;
203 }
204 
205 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
206 {
207 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
208 	struct kvm_pmc *pmc;
209 
210 	pmc = get_fixed_pmc(pmu, msr);
211 	pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
212 	pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
213 
214 	return pmc;
215 }
216 
217 static inline void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu)
218 {
219 	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
220 
221 	if (!lbr_desc)
222 		return;
223 
224 	if (lbr_desc->event) {
225 		perf_event_release_kernel(lbr_desc->event);
226 		lbr_desc->event = NULL;
227 		vcpu_to_pmu(vcpu)->event_count--;
228 	}
229 }
230 
231 int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
232 {
233 	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
234 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
235 	struct perf_event *event;
236 
237 	/*
238 	 * The perf_event_attr is constructed in the minimum efficient way:
239 	 * - set 'pinned = true' to make it task pinned so that if another
240 	 *   cpu pinned event reclaims LBR, the event->oncpu will be set to -1;
241 	 * - set '.exclude_host = true' to record guest branches behavior;
242 	 *
243 	 * - set '.config = INTEL_FIXED_VLBR_EVENT' to indicates host perf
244 	 *   schedule the event without a real HW counter but a fake one;
245 	 *   check is_guest_lbr_event() and __intel_get_event_constraints();
246 	 *
247 	 * - set 'sample_type = PERF_SAMPLE_BRANCH_STACK' and
248 	 *   'branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
249 	 *   PERF_SAMPLE_BRANCH_USER' to configure it as a LBR callstack
250 	 *   event, which helps KVM to save/restore guest LBR records
251 	 *   during host context switches and reduces quite a lot overhead,
252 	 *   check branch_user_callstack() and intel_pmu_lbr_sched_task();
253 	 */
254 	struct perf_event_attr attr = {
255 		.type = PERF_TYPE_RAW,
256 		.size = sizeof(attr),
257 		.config = INTEL_FIXED_VLBR_EVENT,
258 		.sample_type = PERF_SAMPLE_BRANCH_STACK,
259 		.pinned = true,
260 		.exclude_host = true,
261 		.branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
262 					PERF_SAMPLE_BRANCH_USER,
263 	};
264 
265 	if (WARN_ON_ONCE(!lbr_desc))
266 		return 0;
267 
268 	if (unlikely(lbr_desc->event)) {
269 		__set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
270 		return 0;
271 	}
272 
273 	event = perf_event_create_kernel_counter(&attr, -1,
274 						current, NULL, NULL);
275 	if (IS_ERR(event)) {
276 		pr_debug_ratelimited("%s: failed %ld\n",
277 					__func__, PTR_ERR(event));
278 		return PTR_ERR(event);
279 	}
280 	lbr_desc->event = event;
281 	pmu->event_count++;
282 	__set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
283 	return 0;
284 }
285 
286 /*
287  * It's safe to access LBR msrs from guest when they have not
288  * been passthrough since the host would help restore or reset
289  * the LBR msrs records when the guest LBR event is scheduled in.
290  */
291 static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu,
292 				     struct msr_data *msr_info, bool read)
293 {
294 	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
295 	u32 index = msr_info->index;
296 
297 	if (!intel_pmu_is_valid_lbr_msr(vcpu, index))
298 		return false;
299 
300 	if (!lbr_desc->event && intel_pmu_create_guest_lbr_event(vcpu) < 0)
301 		goto dummy;
302 
303 	/*
304 	 * Disable irq to ensure the LBR feature doesn't get reclaimed by the
305 	 * host at the time the value is read from the msr, and this avoids the
306 	 * host LBR value to be leaked to the guest. If LBR has been reclaimed,
307 	 * return 0 on guest reads.
308 	 */
309 	local_irq_disable();
310 	if (lbr_desc->event->state == PERF_EVENT_STATE_ACTIVE) {
311 		if (read)
312 			rdmsrq(index, msr_info->data);
313 		else
314 			wrmsrq(index, msr_info->data);
315 		__set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
316 		local_irq_enable();
317 		return true;
318 	}
319 	clear_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
320 	local_irq_enable();
321 
322 dummy:
323 	if (read)
324 		msr_info->data = 0;
325 	return true;
326 }
327 
328 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
329 {
330 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
331 	struct kvm_pmc *pmc;
332 	u32 msr = msr_info->index;
333 
334 	switch (msr) {
335 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
336 		msr_info->data = pmu->fixed_ctr_ctrl;
337 		break;
338 	case MSR_IA32_PEBS_ENABLE:
339 		msr_info->data = pmu->pebs_enable;
340 		break;
341 	case MSR_IA32_DS_AREA:
342 		msr_info->data = pmu->ds_area;
343 		break;
344 	case MSR_PEBS_DATA_CFG:
345 		msr_info->data = pmu->pebs_data_cfg;
346 		break;
347 	default:
348 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
349 		    (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
350 			u64 val = pmc_read_counter(pmc);
351 			msr_info->data =
352 				val & pmu->counter_bitmask[KVM_PMC_GP];
353 			break;
354 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
355 			u64 val = pmc_read_counter(pmc);
356 			msr_info->data =
357 				val & pmu->counter_bitmask[KVM_PMC_FIXED];
358 			break;
359 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
360 			msr_info->data = pmc->eventsel;
361 			break;
362 		} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true)) {
363 			break;
364 		}
365 		return 1;
366 	}
367 
368 	return 0;
369 }
370 
371 static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
372 {
373 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
374 	struct kvm_pmc *pmc;
375 	u32 msr = msr_info->index;
376 	u64 data = msr_info->data;
377 	u64 reserved_bits, diff;
378 
379 	switch (msr) {
380 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
381 		if (data & pmu->fixed_ctr_ctrl_rsvd)
382 			return 1;
383 
384 		if (pmu->fixed_ctr_ctrl != data)
385 			reprogram_fixed_counters(pmu, data);
386 		break;
387 	case MSR_IA32_PEBS_ENABLE:
388 		if (data & pmu->pebs_enable_rsvd)
389 			return 1;
390 
391 		if (pmu->pebs_enable != data) {
392 			diff = pmu->pebs_enable ^ data;
393 			pmu->pebs_enable = data;
394 			reprogram_counters(pmu, diff);
395 		}
396 		break;
397 	case MSR_IA32_DS_AREA:
398 		if (is_noncanonical_msr_address(data, vcpu))
399 			return 1;
400 
401 		pmu->ds_area = data;
402 		break;
403 	case MSR_PEBS_DATA_CFG:
404 		if (data & pmu->pebs_data_cfg_rsvd)
405 			return 1;
406 
407 		pmu->pebs_data_cfg = data;
408 		break;
409 	default:
410 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
411 		    (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
412 			if ((msr & MSR_PMC_FULL_WIDTH_BIT) &&
413 			    (data & ~pmu->counter_bitmask[KVM_PMC_GP]))
414 				return 1;
415 
416 			if (!msr_info->host_initiated &&
417 			    !(msr & MSR_PMC_FULL_WIDTH_BIT))
418 				data = (s64)(s32)data;
419 			pmc_write_counter(pmc, data);
420 			break;
421 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
422 			pmc_write_counter(pmc, data);
423 			break;
424 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
425 			reserved_bits = pmu->reserved_bits;
426 			if ((pmc->idx == 2) &&
427 			    (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
428 				reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
429 			if (data & reserved_bits)
430 				return 1;
431 
432 			if (data != pmc->eventsel) {
433 				pmc->eventsel = data;
434 				pmc->eventsel_hw = data;
435 				kvm_pmu_request_counter_reprogram(pmc);
436 			}
437 			break;
438 		} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false)) {
439 			break;
440 		}
441 		/* Not a known PMU MSR. */
442 		return 1;
443 	}
444 
445 	return 0;
446 }
447 
448 /*
449  * Map fixed counter events to architectural general purpose event encodings.
450  * Perf doesn't provide APIs to allow KVM to directly program a fixed counter,
451  * and so KVM instead programs the architectural event to effectively request
452  * the fixed counter.  Perf isn't guaranteed to use a fixed counter and may
453  * instead program the encoding into a general purpose counter, e.g. if a
454  * different perf_event is already utilizing the requested counter, but the end
455  * result is the same (ignoring the fact that using a general purpose counter
456  * will likely exacerbate counter contention).
457  *
458  * Forcibly inlined to allow asserting on @index at build time, and there should
459  * never be more than one user.
460  */
461 static __always_inline u64 intel_get_fixed_pmc_eventsel(unsigned int index)
462 {
463 	const enum perf_hw_id fixed_pmc_perf_ids[] = {
464 		[0] = PERF_COUNT_HW_INSTRUCTIONS,
465 		[1] = PERF_COUNT_HW_CPU_CYCLES,
466 		[2] = PERF_COUNT_HW_REF_CPU_CYCLES,
467 	};
468 	u64 eventsel;
469 
470 	BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_perf_ids) != KVM_MAX_NR_INTEL_FIXED_COUNTERS);
471 	BUILD_BUG_ON(index >= KVM_MAX_NR_INTEL_FIXED_COUNTERS);
472 
473 	/*
474 	 * Yell if perf reports support for a fixed counter but perf doesn't
475 	 * have a known encoding for the associated general purpose event.
476 	 */
477 	eventsel = perf_get_hw_event_config(fixed_pmc_perf_ids[index]);
478 	WARN_ON_ONCE(!eventsel && index < kvm_pmu_cap.num_counters_fixed);
479 	return eventsel;
480 }
481 
482 static void intel_pmu_enable_fixed_counter_bits(struct kvm_pmu *pmu, u64 bits)
483 {
484 	int i;
485 
486 	for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
487 		pmu->fixed_ctr_ctrl_rsvd &= ~intel_fixed_bits_by_idx(i, bits);
488 }
489 
490 static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
491 {
492 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
493 	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
494 	struct kvm_cpuid_entry2 *entry;
495 	union cpuid10_eax eax;
496 	union cpuid10_edx edx;
497 	u64 perf_capabilities;
498 	u64 counter_rsvd;
499 
500 	if (!lbr_desc)
501 		return;
502 
503 	memset(&lbr_desc->records, 0, sizeof(lbr_desc->records));
504 
505 	/*
506 	 * Setting passthrough of LBR MSRs is done only in the VM-Entry loop,
507 	 * and PMU refresh is disallowed after the vCPU has run, i.e. this code
508 	 * should never be reached while KVM is passing through MSRs.
509 	 */
510 	if (KVM_BUG_ON(lbr_desc->msr_passthrough, vcpu->kvm))
511 		return;
512 
513 	entry = kvm_find_cpuid_entry(vcpu, 0xa);
514 	if (!entry)
515 		return;
516 
517 	eax.full = entry->eax;
518 	edx.full = entry->edx;
519 
520 	pmu->version = eax.split.version_id;
521 	if (!pmu->version)
522 		return;
523 
524 	pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
525 					 kvm_pmu_cap.num_counters_gp);
526 	eax.split.bit_width = min_t(int, eax.split.bit_width,
527 				    kvm_pmu_cap.bit_width_gp);
528 	pmu->counter_bitmask[KVM_PMC_GP] = BIT_ULL(eax.split.bit_width) - 1;
529 	eax.split.mask_length = min_t(int, eax.split.mask_length,
530 				      kvm_pmu_cap.events_mask_len);
531 	pmu->available_event_types = ~entry->ebx & (BIT_ULL(eax.split.mask_length) - 1);
532 
533 	entry = kvm_find_cpuid_entry_index(vcpu, 7, 0);
534 	if (entry &&
535 	    (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
536 	    (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
537 		pmu->reserved_bits ^= HSW_IN_TX;
538 		pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
539 	}
540 
541 	perf_capabilities = vcpu_get_perf_capabilities(vcpu);
542 	if (intel_pmu_lbr_is_compatible(vcpu) &&
543 	    (perf_capabilities & PERF_CAP_LBR_FMT))
544 		memcpy(&lbr_desc->records, &vmx_lbr_caps, sizeof(vmx_lbr_caps));
545 	else
546 		lbr_desc->records.nr = 0;
547 
548 	if (lbr_desc->records.nr)
549 		bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
550 
551 	if (pmu->version == 1)
552 		return;
553 
554 	pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed,
555 					    kvm_pmu_cap.num_counters_fixed);
556 	edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed,
557 					  kvm_pmu_cap.bit_width_fixed);
558 	pmu->counter_bitmask[KVM_PMC_FIXED] = BIT_ULL(edx.split.bit_width_fixed) - 1;
559 
560 	intel_pmu_enable_fixed_counter_bits(pmu, INTEL_FIXED_0_KERNEL |
561 						 INTEL_FIXED_0_USER |
562 						 INTEL_FIXED_0_ENABLE_PMI);
563 
564 	counter_rsvd = ~((BIT_ULL(pmu->nr_arch_gp_counters) - 1) |
565 			 ((BIT_ULL(pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX));
566 	pmu->global_ctrl_rsvd = counter_rsvd;
567 
568 	/*
569 	 * GLOBAL_STATUS and GLOBAL_OVF_CONTROL (a.k.a. GLOBAL_STATUS_RESET)
570 	 * share reserved bit definitions.  The kernel just happens to use
571 	 * OVF_CTRL for the names.
572 	 */
573 	pmu->global_status_rsvd = pmu->global_ctrl_rsvd
574 			& ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
575 			    MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
576 	if (vmx_pt_mode_is_host_guest())
577 		pmu->global_status_rsvd &=
578 				~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
579 
580 	if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
581 		if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
582 			pmu->pebs_enable_rsvd = counter_rsvd;
583 			pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
584 			pmu->pebs_data_cfg_rsvd = ~0xff00000full;
585 			intel_pmu_enable_fixed_counter_bits(pmu, ICL_FIXED_0_ADAPTIVE);
586 		} else {
587 			pmu->pebs_enable_rsvd = ~(BIT_ULL(pmu->nr_arch_gp_counters) - 1);
588 		}
589 	}
590 }
591 
592 static void intel_pmu_init(struct kvm_vcpu *vcpu)
593 {
594 	int i;
595 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
596 	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
597 
598 	if (!lbr_desc)
599 		return;
600 
601 	for (i = 0; i < KVM_MAX_NR_INTEL_GP_COUNTERS; i++) {
602 		pmu->gp_counters[i].type = KVM_PMC_GP;
603 		pmu->gp_counters[i].vcpu = vcpu;
604 		pmu->gp_counters[i].idx = i;
605 		pmu->gp_counters[i].current_config = 0;
606 	}
607 
608 	for (i = 0; i < KVM_MAX_NR_INTEL_FIXED_COUNTERS; i++) {
609 		pmu->fixed_counters[i].type = KVM_PMC_FIXED;
610 		pmu->fixed_counters[i].vcpu = vcpu;
611 		pmu->fixed_counters[i].idx = i + KVM_FIXED_PMC_BASE_IDX;
612 		pmu->fixed_counters[i].current_config = 0;
613 		pmu->fixed_counters[i].eventsel = intel_get_fixed_pmc_eventsel(i);
614 	}
615 
616 	lbr_desc->records.nr = 0;
617 	lbr_desc->event = NULL;
618 	lbr_desc->msr_passthrough = false;
619 }
620 
621 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
622 {
623 	intel_pmu_release_guest_lbr_event(vcpu);
624 }
625 
626 /*
627  * Emulate LBR_On_PMI behavior for 1 < pmu.version < 4.
628  *
629  * If Freeze_LBR_On_PMI = 1, the LBR is frozen on PMI and
630  * the KVM emulates to clear the LBR bit (bit 0) in IA32_DEBUGCTL.
631  *
632  * Guest needs to re-enable LBR to resume branches recording.
633  */
634 static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu)
635 {
636 	u64 data = vmx_guest_debugctl_read();
637 
638 	if (data & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) {
639 		data &= ~DEBUGCTLMSR_LBR;
640 		vmx_guest_debugctl_write(vcpu, data);
641 	}
642 }
643 
644 static void intel_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
645 {
646 	u8 version = vcpu_to_pmu(vcpu)->version;
647 
648 	if (!intel_pmu_lbr_is_enabled(vcpu))
649 		return;
650 
651 	if (version > 1 && version < 4)
652 		intel_pmu_legacy_freezing_lbrs_on_pmi(vcpu);
653 }
654 
655 static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
656 {
657 	struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);
658 	int i;
659 
660 	for (i = 0; i < lbr->nr; i++) {
661 		vmx_set_intercept_for_msr(vcpu, lbr->from + i, MSR_TYPE_RW, set);
662 		vmx_set_intercept_for_msr(vcpu, lbr->to + i, MSR_TYPE_RW, set);
663 		if (lbr->info)
664 			vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set);
665 	}
666 
667 	vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set);
668 	vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set);
669 }
670 
671 static inline void vmx_disable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
672 {
673 	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
674 
675 	if (!lbr_desc->msr_passthrough)
676 		return;
677 
678 	vmx_update_intercept_for_lbr_msrs(vcpu, true);
679 	lbr_desc->msr_passthrough = false;
680 }
681 
682 static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
683 {
684 	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
685 
686 	if (lbr_desc->msr_passthrough)
687 		return;
688 
689 	vmx_update_intercept_for_lbr_msrs(vcpu, false);
690 	lbr_desc->msr_passthrough = true;
691 }
692 
693 /*
694  * Higher priority host perf events (e.g. cpu pinned) could reclaim the
695  * pmu resources (e.g. LBR) that were assigned to the guest. This is
696  * usually done via ipi calls (more details in perf_install_in_context).
697  *
698  * Before entering the non-root mode (with irq disabled here), double
699  * confirm that the pmu features enabled to the guest are not reclaimed
700  * by higher priority host events. Otherwise, disallow vcpu's access to
701  * the reclaimed features.
702  */
703 void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
704 {
705 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
706 	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
707 
708 	if (WARN_ON_ONCE(!lbr_desc))
709 		return;
710 
711 	if (!lbr_desc->event) {
712 		vmx_disable_lbr_msrs_passthrough(vcpu);
713 		if (vmx_guest_debugctl_read() & DEBUGCTLMSR_LBR)
714 			goto warn;
715 		if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
716 			goto warn;
717 		return;
718 	}
719 
720 	if (lbr_desc->event->state < PERF_EVENT_STATE_ACTIVE) {
721 		vmx_disable_lbr_msrs_passthrough(vcpu);
722 		__clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
723 		goto warn;
724 	} else
725 		vmx_enable_lbr_msrs_passthrough(vcpu);
726 
727 	return;
728 
729 warn:
730 	pr_warn_ratelimited("vcpu-%d: fail to passthrough LBR.\n", vcpu->vcpu_id);
731 }
732 
733 static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
734 {
735 	if (!(vmx_guest_debugctl_read() & DEBUGCTLMSR_LBR))
736 		intel_pmu_release_guest_lbr_event(vcpu);
737 }
738 
739 void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
740 {
741 	struct kvm_pmc *pmc = NULL;
742 	int bit, hw_idx;
743 
744 	kvm_for_each_pmc(pmu, pmc, bit, (unsigned long *)&pmu->global_ctrl) {
745 		if (!pmc_is_locally_enabled(pmc) ||
746 		    !pmc_is_globally_enabled(pmc) || !pmc->perf_event)
747 			continue;
748 
749 		/*
750 		 * A negative index indicates the event isn't mapped to a
751 		 * physical counter in the host, e.g. due to contention.
752 		 */
753 		hw_idx = pmc->perf_event->hw.idx;
754 		if (hw_idx != pmc->idx && hw_idx > -1)
755 			pmu->host_cross_mapped_mask |= BIT_ULL(hw_idx);
756 	}
757 }
758 
759 static bool intel_pmu_is_mediated_pmu_supported(struct x86_pmu_capability *host_pmu)
760 {
761 	u64 host_perf_cap = 0;
762 
763 	if (boot_cpu_has(X86_FEATURE_PDCM))
764 		rdmsrq(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
765 
766 	/*
767 	 * Require v4+ for MSR_CORE_PERF_GLOBAL_STATUS_SET, and full-width
768 	 * writes so that KVM can precisely load guest counter values.
769 	 */
770 	if (host_pmu->version < 4 || !(host_perf_cap & PERF_CAP_FW_WRITES))
771 		return false;
772 
773 	/*
774 	 * All CPUs that support a mediated PMU are expected to support loading
775 	 * PERF_GLOBAL_CTRL via dedicated VMCS fields.
776 	 */
777 	if (WARN_ON_ONCE(!cpu_has_load_perf_global_ctrl()))
778 		return false;
779 
780 	return true;
781 }
782 
783 static void intel_pmu_write_global_ctrl(u64 global_ctrl)
784 {
785 	vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL, global_ctrl);
786 }
787 
788 
789 static void intel_mediated_pmu_load(struct kvm_vcpu *vcpu)
790 {
791 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
792 	u64 global_status, toggle;
793 
794 	rdmsrq(MSR_CORE_PERF_GLOBAL_STATUS, global_status);
795 	toggle = pmu->global_status ^ global_status;
796 	if (global_status & toggle)
797 		wrmsrq(MSR_CORE_PERF_GLOBAL_OVF_CTRL, global_status & toggle);
798 	if (pmu->global_status & toggle)
799 		wrmsrq(MSR_CORE_PERF_GLOBAL_STATUS_SET, pmu->global_status & toggle);
800 
801 	wrmsrq(MSR_CORE_PERF_FIXED_CTR_CTRL, pmu->fixed_ctr_ctrl_hw);
802 }
803 
804 static void intel_mediated_pmu_put(struct kvm_vcpu *vcpu)
805 {
806 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
807 
808 	/* MSR_CORE_PERF_GLOBAL_CTRL is already saved at VM-exit. */
809 	rdmsrq(MSR_CORE_PERF_GLOBAL_STATUS, pmu->global_status);
810 
811 	/* Clear hardware MSR_CORE_PERF_GLOBAL_STATUS MSR, if non-zero. */
812 	if (pmu->global_status)
813 		wrmsrq(MSR_CORE_PERF_GLOBAL_OVF_CTRL, pmu->global_status);
814 
815 	/*
816 	 * Clear hardware FIXED_CTR_CTRL MSR to avoid information leakage and
817 	 * also to avoid accidentally enabling fixed counters (based on guest
818 	 * state) while running in the host, e.g. when setting global ctrl.
819 	 */
820 	if (pmu->fixed_ctr_ctrl_hw)
821 		wrmsrq(MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
822 }
823 
824 struct kvm_pmu_ops intel_pmu_ops __initdata = {
825 	.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
826 	.msr_idx_to_pmc = intel_msr_idx_to_pmc,
827 	.is_valid_msr = intel_is_valid_msr,
828 	.get_msr = intel_pmu_get_msr,
829 	.set_msr = intel_pmu_set_msr,
830 	.refresh = intel_pmu_refresh,
831 	.init = intel_pmu_init,
832 	.reset = intel_pmu_reset,
833 	.deliver_pmi = intel_pmu_deliver_pmi,
834 	.cleanup = intel_pmu_cleanup,
835 
836 	.is_mediated_pmu_supported = intel_pmu_is_mediated_pmu_supported,
837 	.mediated_load = intel_mediated_pmu_load,
838 	.mediated_put = intel_mediated_pmu_put,
839 	.write_global_ctrl = intel_pmu_write_global_ctrl,
840 
841 	.EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT,
842 	.MAX_NR_GP_COUNTERS = KVM_MAX_NR_INTEL_GP_COUNTERS,
843 	.MIN_NR_GP_COUNTERS = 1,
844 
845 	.PERF_GLOBAL_CTRL = MSR_CORE_PERF_GLOBAL_CTRL,
846 	.GP_EVENTSEL_BASE = MSR_P6_EVNTSEL0,
847 	.GP_COUNTER_BASE = MSR_IA32_PMC0,
848 	.FIXED_COUNTER_BASE = MSR_CORE_PERF_FIXED_CTR0,
849 	.MSR_STRIDE = 1,
850 };
851