xref: /linux/arch/x86/kvm/svm/pmu.c (revision 256e3417065b2721f77bcd37331796b59483ef3b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM PMU support for AMD
4  *
5  * Copyright 2015, Red Hat, Inc. and/or its affiliates.
6  *
7  * Author:
8  *   Wei Huang <wei@redhat.com>
9  *
10  * Implementation is based on pmu_intel.c file
11  */
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/types.h>
15 #include <linux/kvm_host.h>
16 #include <linux/perf_event.h>
17 #include "x86.h"
18 #include "cpuid.h"
19 #include "lapic.h"
20 #include "pmu.h"
21 #include "svm.h"
22 
23 enum pmu_type {
24 	PMU_TYPE_COUNTER = 0,
25 	PMU_TYPE_EVNTSEL,
26 };
27 
amd_pmu_get_pmc(struct kvm_pmu * pmu,int pmc_idx)28 static struct kvm_pmc *amd_pmu_get_pmc(struct kvm_pmu *pmu, int pmc_idx)
29 {
30 	unsigned int num_counters = pmu->nr_arch_gp_counters;
31 
32 	if (pmc_idx >= num_counters)
33 		return NULL;
34 
35 	return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)];
36 }
37 
get_gp_pmc_amd(struct kvm_pmu * pmu,u32 msr,enum pmu_type type)38 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
39 					     enum pmu_type type)
40 {
41 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
42 	unsigned int idx;
43 
44 	if (!pmu->version)
45 		return NULL;
46 
47 	switch (msr) {
48 	case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
49 		if (!guest_cpu_cap_has(vcpu, X86_FEATURE_PERFCTR_CORE))
50 			return NULL;
51 		/*
52 		 * Each PMU counter has a pair of CTL and CTR MSRs. CTLn
53 		 * MSRs (accessed via EVNTSEL) are even, CTRn MSRs are odd.
54 		 */
55 		idx = (unsigned int)((msr - MSR_F15H_PERF_CTL0) / 2);
56 		if (!(msr & 0x1) != (type == PMU_TYPE_EVNTSEL))
57 			return NULL;
58 		break;
59 	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
60 		if (type != PMU_TYPE_EVNTSEL)
61 			return NULL;
62 		idx = msr - MSR_K7_EVNTSEL0;
63 		break;
64 	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
65 		if (type != PMU_TYPE_COUNTER)
66 			return NULL;
67 		idx = msr - MSR_K7_PERFCTR0;
68 		break;
69 	default:
70 		return NULL;
71 	}
72 
73 	return amd_pmu_get_pmc(pmu, idx);
74 }
75 
amd_check_rdpmc_early(struct kvm_vcpu * vcpu,unsigned int idx)76 static int amd_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
77 {
78 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
79 
80 	if (idx >= pmu->nr_arch_gp_counters)
81 		return -EINVAL;
82 
83 	return 0;
84 }
85 
86 /* idx is the ECX register of RDPMC instruction */
amd_rdpmc_ecx_to_pmc(struct kvm_vcpu * vcpu,unsigned int idx,u64 * mask)87 static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
88 	unsigned int idx, u64 *mask)
89 {
90 	return amd_pmu_get_pmc(vcpu_to_pmu(vcpu), idx);
91 }
92 
amd_msr_idx_to_pmc(struct kvm_vcpu * vcpu,u32 msr)93 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
94 {
95 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
96 	struct kvm_pmc *pmc;
97 
98 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
99 	pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
100 
101 	return pmc;
102 }
103 
amd_is_valid_msr(struct kvm_vcpu * vcpu,u32 msr)104 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
105 {
106 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
107 
108 	switch (msr) {
109 	case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3:
110 		return pmu->version > 0;
111 	case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
112 		return guest_cpu_cap_has(vcpu, X86_FEATURE_PERFCTR_CORE);
113 	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
114 	case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
115 	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
116 	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET:
117 		return pmu->version > 1;
118 	default:
119 		if (msr > MSR_F15H_PERF_CTR5 &&
120 		    msr < MSR_F15H_PERF_CTL0 + 2 * pmu->nr_arch_gp_counters)
121 			return pmu->version > 1;
122 		break;
123 	}
124 
125 	return amd_msr_idx_to_pmc(vcpu, msr);
126 }
127 
amd_pmu_get_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)128 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
129 {
130 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
131 	struct kvm_pmc *pmc;
132 	u32 msr = msr_info->index;
133 
134 	/* MSR_PERFCTRn */
135 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
136 	if (pmc) {
137 		msr_info->data = pmc_read_counter(pmc);
138 		return 0;
139 	}
140 	/* MSR_EVNTSELn */
141 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
142 	if (pmc) {
143 		msr_info->data = pmc->eventsel;
144 		return 0;
145 	}
146 
147 	return 1;
148 }
149 
amd_pmu_set_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)150 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
151 {
152 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
153 	struct kvm_pmc *pmc;
154 	u32 msr = msr_info->index;
155 	u64 data = msr_info->data;
156 
157 	/* MSR_PERFCTRn */
158 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
159 	if (pmc) {
160 		pmc_write_counter(pmc, data);
161 		return 0;
162 	}
163 	/* MSR_EVNTSELn */
164 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
165 	if (pmc) {
166 		data &= ~pmu->reserved_bits;
167 		if (data != pmc->eventsel) {
168 			pmc->eventsel = data;
169 			kvm_pmu_request_counter_reprogram(pmc);
170 		}
171 		return 0;
172 	}
173 
174 	return 1;
175 }
176 
amd_pmu_refresh(struct kvm_vcpu * vcpu)177 static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
178 {
179 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
180 	union cpuid_0x80000022_ebx ebx;
181 
182 	pmu->version = 1;
183 	if (guest_cpu_cap_has(vcpu, X86_FEATURE_PERFMON_V2)) {
184 		pmu->version = 2;
185 		/*
186 		 * Note, PERFMON_V2 is also in 0x80000022.0x0, i.e. the guest
187 		 * CPUID entry is guaranteed to be non-NULL.
188 		 */
189 		BUILD_BUG_ON(x86_feature_cpuid(X86_FEATURE_PERFMON_V2).function != 0x80000022 ||
190 			     x86_feature_cpuid(X86_FEATURE_PERFMON_V2).index);
191 		ebx.full = kvm_find_cpuid_entry_index(vcpu, 0x80000022, 0)->ebx;
192 		pmu->nr_arch_gp_counters = ebx.split.num_core_pmc;
193 	} else if (guest_cpu_cap_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
194 		pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
195 	} else {
196 		pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
197 	}
198 
199 	pmu->nr_arch_gp_counters = min_t(unsigned int, pmu->nr_arch_gp_counters,
200 					 kvm_pmu_cap.num_counters_gp);
201 
202 	if (pmu->version > 1) {
203 		pmu->global_ctrl_rsvd = ~(BIT_ULL(pmu->nr_arch_gp_counters) - 1);
204 		pmu->global_status_rsvd = pmu->global_ctrl_rsvd;
205 	}
206 
207 	pmu->counter_bitmask[KVM_PMC_GP] = BIT_ULL(48) - 1;
208 	pmu->reserved_bits = 0xfffffff000280000ull;
209 	pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
210 	/* not applicable to AMD; but clean them to prevent any fall out */
211 	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
212 	pmu->nr_arch_fixed_counters = 0;
213 }
214 
amd_pmu_init(struct kvm_vcpu * vcpu)215 static void amd_pmu_init(struct kvm_vcpu *vcpu)
216 {
217 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
218 	int i;
219 
220 	BUILD_BUG_ON(KVM_MAX_NR_AMD_GP_COUNTERS > AMD64_NUM_COUNTERS_CORE);
221 
222 	for (i = 0; i < KVM_MAX_NR_AMD_GP_COUNTERS; i++) {
223 		pmu->gp_counters[i].type = KVM_PMC_GP;
224 		pmu->gp_counters[i].vcpu = vcpu;
225 		pmu->gp_counters[i].idx = i;
226 		pmu->gp_counters[i].current_config = 0;
227 	}
228 }
229 
230 struct kvm_pmu_ops amd_pmu_ops __initdata = {
231 	.rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
232 	.msr_idx_to_pmc = amd_msr_idx_to_pmc,
233 	.check_rdpmc_early = amd_check_rdpmc_early,
234 	.is_valid_msr = amd_is_valid_msr,
235 	.get_msr = amd_pmu_get_msr,
236 	.set_msr = amd_pmu_set_msr,
237 	.refresh = amd_pmu_refresh,
238 	.init = amd_pmu_init,
239 	.EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT,
240 	.MAX_NR_GP_COUNTERS = KVM_MAX_NR_AMD_GP_COUNTERS,
241 	.MIN_NR_GP_COUNTERS = AMD64_NUM_COUNTERS,
242 };
243