xref: /linux/arch/x86/kvm/svm/pmu.c (revision 23c48a124b469cee2eb0c75e6d22d366d1caa118)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM PMU support for AMD
4  *
5  * Copyright 2015, Red Hat, Inc. and/or its affiliates.
6  *
7  * Author:
8  *   Wei Huang <wei@redhat.com>
9  *
10  * Implementation is based on pmu_intel.c file
11  */
12 #include <linux/types.h>
13 #include <linux/kvm_host.h>
14 #include <linux/perf_event.h>
15 #include "x86.h"
16 #include "cpuid.h"
17 #include "lapic.h"
18 #include "pmu.h"
19 #include "svm.h"
20 
21 enum pmu_type {
22 	PMU_TYPE_COUNTER = 0,
23 	PMU_TYPE_EVNTSEL,
24 };
25 
26 enum index {
27 	INDEX_ZERO = 0,
28 	INDEX_ONE,
29 	INDEX_TWO,
30 	INDEX_THREE,
31 	INDEX_FOUR,
32 	INDEX_FIVE,
33 	INDEX_ERROR,
34 };
35 
36 static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
37 {
38 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
39 
40 	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
41 		if (type == PMU_TYPE_COUNTER)
42 			return MSR_F15H_PERF_CTR;
43 		else
44 			return MSR_F15H_PERF_CTL;
45 	} else {
46 		if (type == PMU_TYPE_COUNTER)
47 			return MSR_K7_PERFCTR0;
48 		else
49 			return MSR_K7_EVNTSEL0;
50 	}
51 }
52 
53 static enum index msr_to_index(u32 msr)
54 {
55 	switch (msr) {
56 	case MSR_F15H_PERF_CTL0:
57 	case MSR_F15H_PERF_CTR0:
58 	case MSR_K7_EVNTSEL0:
59 	case MSR_K7_PERFCTR0:
60 		return INDEX_ZERO;
61 	case MSR_F15H_PERF_CTL1:
62 	case MSR_F15H_PERF_CTR1:
63 	case MSR_K7_EVNTSEL1:
64 	case MSR_K7_PERFCTR1:
65 		return INDEX_ONE;
66 	case MSR_F15H_PERF_CTL2:
67 	case MSR_F15H_PERF_CTR2:
68 	case MSR_K7_EVNTSEL2:
69 	case MSR_K7_PERFCTR2:
70 		return INDEX_TWO;
71 	case MSR_F15H_PERF_CTL3:
72 	case MSR_F15H_PERF_CTR3:
73 	case MSR_K7_EVNTSEL3:
74 	case MSR_K7_PERFCTR3:
75 		return INDEX_THREE;
76 	case MSR_F15H_PERF_CTL4:
77 	case MSR_F15H_PERF_CTR4:
78 		return INDEX_FOUR;
79 	case MSR_F15H_PERF_CTL5:
80 	case MSR_F15H_PERF_CTR5:
81 		return INDEX_FIVE;
82 	default:
83 		return INDEX_ERROR;
84 	}
85 }
86 
87 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
88 					     enum pmu_type type)
89 {
90 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
91 
92 	if (!vcpu->kvm->arch.enable_pmu)
93 		return NULL;
94 
95 	switch (msr) {
96 	case MSR_F15H_PERF_CTL0:
97 	case MSR_F15H_PERF_CTL1:
98 	case MSR_F15H_PERF_CTL2:
99 	case MSR_F15H_PERF_CTL3:
100 	case MSR_F15H_PERF_CTL4:
101 	case MSR_F15H_PERF_CTL5:
102 		if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
103 			return NULL;
104 		fallthrough;
105 	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
106 		if (type != PMU_TYPE_EVNTSEL)
107 			return NULL;
108 		break;
109 	case MSR_F15H_PERF_CTR0:
110 	case MSR_F15H_PERF_CTR1:
111 	case MSR_F15H_PERF_CTR2:
112 	case MSR_F15H_PERF_CTR3:
113 	case MSR_F15H_PERF_CTR4:
114 	case MSR_F15H_PERF_CTR5:
115 		if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
116 			return NULL;
117 		fallthrough;
118 	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
119 		if (type != PMU_TYPE_COUNTER)
120 			return NULL;
121 		break;
122 	default:
123 		return NULL;
124 	}
125 
126 	return &pmu->gp_counters[msr_to_index(msr)];
127 }
128 
129 static bool amd_hw_event_available(struct kvm_pmc *pmc)
130 {
131 	return true;
132 }
133 
134 /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
135  * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
136  */
137 static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
138 {
139 	return true;
140 }
141 
142 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
143 {
144 	unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
145 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
146 
147 	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
148 		/*
149 		 * The idx is contiguous. The MSRs are not. The counter MSRs
150 		 * are interleaved with the event select MSRs.
151 		 */
152 		pmc_idx *= 2;
153 	}
154 
155 	return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
156 }
157 
158 static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
159 {
160 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
161 
162 	idx &= ~(3u << 30);
163 
164 	return idx < pmu->nr_arch_gp_counters;
165 }
166 
167 /* idx is the ECX register of RDPMC instruction */
168 static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
169 	unsigned int idx, u64 *mask)
170 {
171 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
172 	struct kvm_pmc *counters;
173 
174 	idx &= ~(3u << 30);
175 	if (idx >= pmu->nr_arch_gp_counters)
176 		return NULL;
177 	counters = pmu->gp_counters;
178 
179 	return &counters[idx];
180 }
181 
182 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
183 {
184 	/* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough.  */
185 	return false;
186 }
187 
188 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
189 {
190 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
191 	struct kvm_pmc *pmc;
192 
193 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
194 	pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
195 
196 	return pmc;
197 }
198 
199 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
200 {
201 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
202 	struct kvm_pmc *pmc;
203 	u32 msr = msr_info->index;
204 
205 	/* MSR_PERFCTRn */
206 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
207 	if (pmc) {
208 		msr_info->data = pmc_read_counter(pmc);
209 		return 0;
210 	}
211 	/* MSR_EVNTSELn */
212 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
213 	if (pmc) {
214 		msr_info->data = pmc->eventsel;
215 		return 0;
216 	}
217 
218 	return 1;
219 }
220 
221 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
222 {
223 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
224 	struct kvm_pmc *pmc;
225 	u32 msr = msr_info->index;
226 	u64 data = msr_info->data;
227 
228 	/* MSR_PERFCTRn */
229 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
230 	if (pmc) {
231 		pmc->counter += data - pmc_read_counter(pmc);
232 		pmc_update_sample_period(pmc);
233 		return 0;
234 	}
235 	/* MSR_EVNTSELn */
236 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
237 	if (pmc) {
238 		data &= ~pmu->reserved_bits;
239 		if (data != pmc->eventsel) {
240 			pmc->eventsel = data;
241 			reprogram_counter(pmc);
242 		}
243 		return 0;
244 	}
245 
246 	return 1;
247 }
248 
249 static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
250 {
251 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
252 
253 	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
254 		pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
255 	else
256 		pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
257 
258 	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
259 	pmu->reserved_bits = 0xfffffff000280000ull;
260 	pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
261 	pmu->version = 1;
262 	/* not applicable to AMD; but clean them to prevent any fall out */
263 	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
264 	pmu->nr_arch_fixed_counters = 0;
265 	pmu->global_status = 0;
266 	bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
267 }
268 
269 static void amd_pmu_init(struct kvm_vcpu *vcpu)
270 {
271 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
272 	int i;
273 
274 	BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC);
275 
276 	for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
277 		pmu->gp_counters[i].type = KVM_PMC_GP;
278 		pmu->gp_counters[i].vcpu = vcpu;
279 		pmu->gp_counters[i].idx = i;
280 		pmu->gp_counters[i].current_config = 0;
281 	}
282 }
283 
284 static void amd_pmu_reset(struct kvm_vcpu *vcpu)
285 {
286 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
287 	int i;
288 
289 	for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
290 		struct kvm_pmc *pmc = &pmu->gp_counters[i];
291 
292 		pmc_stop_counter(pmc);
293 		pmc->counter = pmc->eventsel = 0;
294 	}
295 }
296 
297 struct kvm_pmu_ops amd_pmu_ops __initdata = {
298 	.hw_event_available = amd_hw_event_available,
299 	.pmc_is_enabled = amd_pmc_is_enabled,
300 	.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
301 	.rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
302 	.msr_idx_to_pmc = amd_msr_idx_to_pmc,
303 	.is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
304 	.is_valid_msr = amd_is_valid_msr,
305 	.get_msr = amd_pmu_get_msr,
306 	.set_msr = amd_pmu_set_msr,
307 	.refresh = amd_pmu_refresh,
308 	.init = amd_pmu_init,
309 	.reset = amd_pmu_reset,
310 };
311