xref: /illumos-gate/usr/src/uts/intel/io/vmm/amd/svm_pmu.c (revision 09ea9c53cd9ac02c506f68475d98e8f07b457ffc)
1*09ea9c53SPatrick Mooney /*
2*09ea9c53SPatrick Mooney  * This file and its contents are supplied under the terms of the
3*09ea9c53SPatrick Mooney  * Common Development and Distribution License ("CDDL"), version 1.0.
4*09ea9c53SPatrick Mooney  * You may only use this file in accordance with the terms of version
5*09ea9c53SPatrick Mooney  * 1.0 of the CDDL.
6*09ea9c53SPatrick Mooney  *
7*09ea9c53SPatrick Mooney  * A full copy of the text of the CDDL should have accompanied this
8*09ea9c53SPatrick Mooney  * source.  A copy of the CDDL is also available via the Internet at
9*09ea9c53SPatrick Mooney  * http://www.illumos.org/license/CDDL.
10*09ea9c53SPatrick Mooney  */
11*09ea9c53SPatrick Mooney 
12*09ea9c53SPatrick Mooney /*
13*09ea9c53SPatrick Mooney  * Copyright 2025 Oxide Computer Company
14*09ea9c53SPatrick Mooney  */
15*09ea9c53SPatrick Mooney 
16*09ea9c53SPatrick Mooney #include <sys/kernel.h>
17*09ea9c53SPatrick Mooney #include <sys/sysmacros.h>
18*09ea9c53SPatrick Mooney #include <sys/cmn_err.h>
19*09ea9c53SPatrick Mooney #include <sys/cpuvar.h>
20*09ea9c53SPatrick Mooney #include <sys/systm.h>
21*09ea9c53SPatrick Mooney #include <sys/x86_archext.h>
22*09ea9c53SPatrick Mooney 
23*09ea9c53SPatrick Mooney #include <sys/vmm_kernel.h>
24*09ea9c53SPatrick Mooney #include "svm.h"
25*09ea9c53SPatrick Mooney #include "svm_softc.h"
26*09ea9c53SPatrick Mooney #include "svm_pmu.h"
27*09ea9c53SPatrick Mooney 
28*09ea9c53SPatrick Mooney /*
29*09ea9c53SPatrick Mooney  * Allow guests to use perf counter resources.
30*09ea9c53SPatrick Mooney  */
31*09ea9c53SPatrick Mooney int svm_pmu_enabled = 1;
32*09ea9c53SPatrick Mooney 
33*09ea9c53SPatrick Mooney /*
34*09ea9c53SPatrick Mooney  * Force guest exits (preclude disabling intercepts) access to perf counter
35*09ea9c53SPatrick Mooney  * resources via RDPMC and RDMSR/WRMSR.
36*09ea9c53SPatrick Mooney  */
37*09ea9c53SPatrick Mooney int svm_pmu_force_exit = 0;
38*09ea9c53SPatrick Mooney 
39*09ea9c53SPatrick Mooney void
svm_pmu_init(struct svm_softc * svm_sc)40*09ea9c53SPatrick Mooney svm_pmu_init(struct svm_softc *svm_sc)
41*09ea9c53SPatrick Mooney {
42*09ea9c53SPatrick Mooney 	if (!is_x86_feature(x86_featureset, X86FSET_AMD_PCEC) ||
43*09ea9c53SPatrick Mooney 	    svm_pmu_enabled == 0) {
44*09ea9c53SPatrick Mooney 		svm_sc->pmu_flavor = SPF_NONE;
45*09ea9c53SPatrick Mooney 		return;
46*09ea9c53SPatrick Mooney 	}
47*09ea9c53SPatrick Mooney 
48*09ea9c53SPatrick Mooney 	switch (uarchrev_uarch(cpuid_getuarchrev(CPU))) {
49*09ea9c53SPatrick Mooney 	case X86_UARCH_AMD_LEGACY:
50*09ea9c53SPatrick Mooney 		svm_sc->pmu_flavor = SPF_PRE_ZEN;
51*09ea9c53SPatrick Mooney 		break;
52*09ea9c53SPatrick Mooney 	case X86_UARCH_AMD_ZEN1:
53*09ea9c53SPatrick Mooney 	case X86_UARCH_AMD_ZENPLUS:
54*09ea9c53SPatrick Mooney 		svm_sc->pmu_flavor = SPF_ZEN1;
55*09ea9c53SPatrick Mooney 		break;
56*09ea9c53SPatrick Mooney 	case X86_UARCH_AMD_ZEN2:
57*09ea9c53SPatrick Mooney 	case X86_UARCH_AMD_ZEN3:
58*09ea9c53SPatrick Mooney 	case X86_UARCH_AMD_ZEN4:
59*09ea9c53SPatrick Mooney 	case X86_UARCH_AMD_ZEN5:
60*09ea9c53SPatrick Mooney 		svm_sc->pmu_flavor = SPF_ZEN2;
61*09ea9c53SPatrick Mooney 		break;
62*09ea9c53SPatrick Mooney 	default:
63*09ea9c53SPatrick Mooney 		/* Exclude unrecognized uarch from perf counter access */
64*09ea9c53SPatrick Mooney 		svm_sc->pmu_flavor = SPF_NONE;
65*09ea9c53SPatrick Mooney 		return;
66*09ea9c53SPatrick Mooney 	}
67*09ea9c53SPatrick Mooney 
68*09ea9c53SPatrick Mooney 	/* Turn on base and extended CPCs for all vCPUs */
69*09ea9c53SPatrick Mooney 	const uint_t maxcpu = vm_get_maxcpus(svm_sc->vm);
70*09ea9c53SPatrick Mooney 	for (uint_t i = 0; i < maxcpu; i++) {
71*09ea9c53SPatrick Mooney 		struct svm_pmu_vcpu *pmu_vcpu = svm_get_pmu(svm_sc, i);
72*09ea9c53SPatrick Mooney 
73*09ea9c53SPatrick Mooney 		pmu_vcpu->spv_hma_state.hscs_flags = HCF_EN_BASE | HCF_EN_EXTD;
74*09ea9c53SPatrick Mooney 	}
75*09ea9c53SPatrick Mooney }
76*09ea9c53SPatrick Mooney 
77*09ea9c53SPatrick Mooney static bool
svm_pmu_is_active(const struct svm_pmu_vcpu * pmu)78*09ea9c53SPatrick Mooney svm_pmu_is_active(const struct svm_pmu_vcpu *pmu)
79*09ea9c53SPatrick Mooney {
80*09ea9c53SPatrick Mooney 	return (pmu->spv_hma_state.hscs_flags != HCF_DISABLED);
81*09ea9c53SPatrick Mooney }
82*09ea9c53SPatrick Mooney 
83*09ea9c53SPatrick Mooney static bool
svm_pmu_is_evt_msr(uint32_t msr)84*09ea9c53SPatrick Mooney svm_pmu_is_evt_msr(uint32_t msr)
85*09ea9c53SPatrick Mooney {
86*09ea9c53SPatrick Mooney 	switch (msr) {
87*09ea9c53SPatrick Mooney 	case MSR_AMD_K7_PERF_EVTSEL0:
88*09ea9c53SPatrick Mooney 	case MSR_AMD_K7_PERF_EVTSEL1:
89*09ea9c53SPatrick Mooney 	case MSR_AMD_K7_PERF_EVTSEL2:
90*09ea9c53SPatrick Mooney 	case MSR_AMD_K7_PERF_EVTSEL3:
91*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_EVTSEL0:
92*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_EVTSEL1:
93*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_EVTSEL2:
94*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_EVTSEL3:
95*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_EVTSEL4:
96*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_EVTSEL5:
97*09ea9c53SPatrick Mooney 		return (true);
98*09ea9c53SPatrick Mooney 	default:
99*09ea9c53SPatrick Mooney 		return (false);
100*09ea9c53SPatrick Mooney 	}
101*09ea9c53SPatrick Mooney }
102*09ea9c53SPatrick Mooney 
103*09ea9c53SPatrick Mooney static bool
svm_pmu_is_ctr_msr(uint32_t msr)104*09ea9c53SPatrick Mooney svm_pmu_is_ctr_msr(uint32_t msr)
105*09ea9c53SPatrick Mooney {
106*09ea9c53SPatrick Mooney 	switch (msr) {
107*09ea9c53SPatrick Mooney 	case MSR_AMD_K7_PERF_CTR0:
108*09ea9c53SPatrick Mooney 	case MSR_AMD_K7_PERF_CTR1:
109*09ea9c53SPatrick Mooney 	case MSR_AMD_K7_PERF_CTR2:
110*09ea9c53SPatrick Mooney 	case MSR_AMD_K7_PERF_CTR3:
111*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_CTR0:
112*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_CTR1:
113*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_CTR2:
114*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_CTR3:
115*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_CTR4:
116*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_CTR5:
117*09ea9c53SPatrick Mooney 		return (true);
118*09ea9c53SPatrick Mooney 	default:
119*09ea9c53SPatrick Mooney 		return (false);
120*09ea9c53SPatrick Mooney 	}
121*09ea9c53SPatrick Mooney }
122*09ea9c53SPatrick Mooney 
123*09ea9c53SPatrick Mooney static uint_t
svm_pmu_msr_to_idx(uint32_t msr)124*09ea9c53SPatrick Mooney svm_pmu_msr_to_idx(uint32_t msr)
125*09ea9c53SPatrick Mooney {
126*09ea9c53SPatrick Mooney 	switch (msr) {
127*09ea9c53SPatrick Mooney 	case MSR_AMD_K7_PERF_EVTSEL0:
128*09ea9c53SPatrick Mooney 	case MSR_AMD_K7_PERF_EVTSEL1:
129*09ea9c53SPatrick Mooney 	case MSR_AMD_K7_PERF_EVTSEL2:
130*09ea9c53SPatrick Mooney 	case MSR_AMD_K7_PERF_EVTSEL3:
131*09ea9c53SPatrick Mooney 		return (msr - MSR_AMD_K7_PERF_EVTSEL0);
132*09ea9c53SPatrick Mooney 	case MSR_AMD_K7_PERF_CTR0:
133*09ea9c53SPatrick Mooney 	case MSR_AMD_K7_PERF_CTR1:
134*09ea9c53SPatrick Mooney 	case MSR_AMD_K7_PERF_CTR2:
135*09ea9c53SPatrick Mooney 	case MSR_AMD_K7_PERF_CTR3:
136*09ea9c53SPatrick Mooney 		return (msr - MSR_AMD_K7_PERF_CTR0);
137*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_EVTSEL0:
138*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_EVTSEL1:
139*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_EVTSEL2:
140*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_EVTSEL3:
141*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_EVTSEL4:
142*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_EVTSEL5:
143*09ea9c53SPatrick Mooney 		return ((msr - MSR_AMD_F15H_PERF_EVTSEL0) / 2);
144*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_CTR0:
145*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_CTR1:
146*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_CTR2:
147*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_CTR3:
148*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_CTR4:
149*09ea9c53SPatrick Mooney 	case MSR_AMD_F15H_PERF_CTR5:
150*09ea9c53SPatrick Mooney 		return ((msr - MSR_AMD_F15H_PERF_CTR0) / 2);
151*09ea9c53SPatrick Mooney 	default:
152*09ea9c53SPatrick Mooney 		panic("unexpected perf. counter MSR: %X", msr);
153*09ea9c53SPatrick Mooney 	}
154*09ea9c53SPatrick Mooney }
155*09ea9c53SPatrick Mooney 
156*09ea9c53SPatrick Mooney bool
svm_pmu_owned_msr(uint32_t msr)157*09ea9c53SPatrick Mooney svm_pmu_owned_msr(uint32_t msr)
158*09ea9c53SPatrick Mooney {
159*09ea9c53SPatrick Mooney 	return (svm_pmu_is_evt_msr(msr) || svm_pmu_is_ctr_msr(msr));
160*09ea9c53SPatrick Mooney }
161*09ea9c53SPatrick Mooney 
162*09ea9c53SPatrick Mooney /*
163*09ea9c53SPatrick Mooney  * Is guest access to a given evtsel allowed for the "flavor" of the PMU?
164*09ea9c53SPatrick Mooney  *
165*09ea9c53SPatrick Mooney  * Initial access is fairly limited, providing access to only the evtsels
166*09ea9c53SPatrick Mooney  * expected to be used by Linux `perf stat`.
167*09ea9c53SPatrick Mooney  */
168*09ea9c53SPatrick Mooney static bool
svm_pmu_evtsel_allowed(uint64_t evtsel,svm_pmu_flavor_t flavor)169*09ea9c53SPatrick Mooney svm_pmu_evtsel_allowed(uint64_t evtsel, svm_pmu_flavor_t flavor)
170*09ea9c53SPatrick Mooney {
171*09ea9c53SPatrick Mooney 	const uint64_t evt = evtsel & AMD_PERF_EVTSEL_EVT_MASK;
172*09ea9c53SPatrick Mooney 	const uint16_t umask = evtsel & AMD_PERF_EVTSEL_UNIT_MASK;
173*09ea9c53SPatrick Mooney 
174*09ea9c53SPatrick Mooney 	/*
175*09ea9c53SPatrick Mooney 	 * Some of the perf counters have stayed fairly consistent in their
176*09ea9c53SPatrick Mooney 	 * identifiers throughout the AMD product line.
177*09ea9c53SPatrick Mooney 	 */
178*09ea9c53SPatrick Mooney 	switch (evt) {
179*09ea9c53SPatrick Mooney 	case 0x76:	/* CPU cycles */
180*09ea9c53SPatrick Mooney 	case 0xc0:	/* Retired instructions */
181*09ea9c53SPatrick Mooney 	case 0xc2:	/* Branch instructions */
182*09ea9c53SPatrick Mooney 	case 0xc3:	/* Branch misses */
183*09ea9c53SPatrick Mooney 		return (true);
184*09ea9c53SPatrick Mooney 	default:
185*09ea9c53SPatrick Mooney 		break;
186*09ea9c53SPatrick Mooney 	}
187*09ea9c53SPatrick Mooney 
188*09ea9c53SPatrick Mooney 	if (flavor == SPF_PRE_ZEN) {
189*09ea9c53SPatrick Mooney 		switch (evt) {
190*09ea9c53SPatrick Mooney 		case 0x7d: /* Cache hits */
191*09ea9c53SPatrick Mooney 		case 0x7e: /* Cache misses */
192*09ea9c53SPatrick Mooney 			return (true);
193*09ea9c53SPatrick Mooney 		default:
194*09ea9c53SPatrick Mooney 			return (false);
195*09ea9c53SPatrick Mooney 		}
196*09ea9c53SPatrick Mooney 	} else if (flavor == SPF_ZEN1) {
197*09ea9c53SPatrick Mooney 		switch (evt) {
198*09ea9c53SPatrick Mooney 		case 0x60: /* L2 accesses (group 1) */
199*09ea9c53SPatrick Mooney 		case 0x64: /* Core to L2 access status */
200*09ea9c53SPatrick Mooney 			return (true);
201*09ea9c53SPatrick Mooney 		case 0x87: /* IC fetch stall */
202*09ea9c53SPatrick Mooney 			switch (umask) {
203*09ea9c53SPatrick Mooney 			case 0x0100: /* backend */
204*09ea9c53SPatrick Mooney 			case 0x0200: /* frontend */
205*09ea9c53SPatrick Mooney 				return (true);
206*09ea9c53SPatrick Mooney 			default:
207*09ea9c53SPatrick Mooney 				return (false);
208*09ea9c53SPatrick Mooney 			}
209*09ea9c53SPatrick Mooney 		default:
210*09ea9c53SPatrick Mooney 			return (false);
211*09ea9c53SPatrick Mooney 		}
212*09ea9c53SPatrick Mooney 	} else if (flavor == SPF_ZEN2) {
213*09ea9c53SPatrick Mooney 		switch (evt) {
214*09ea9c53SPatrick Mooney 		case 0x60: /* L2 accesses (group 1) */
215*09ea9c53SPatrick Mooney 		case 0x64: /* Core to L2 access status */
216*09ea9c53SPatrick Mooney 		case 0xa9: /* u-op queue empty (frontend stall) */
217*09ea9c53SPatrick Mooney 			return (true);
218*09ea9c53SPatrick Mooney 		default:
219*09ea9c53SPatrick Mooney 			return (false);
220*09ea9c53SPatrick Mooney 		}
221*09ea9c53SPatrick Mooney 	}
222*09ea9c53SPatrick Mooney 
223*09ea9c53SPatrick Mooney 	return (false);
224*09ea9c53SPatrick Mooney }
225*09ea9c53SPatrick Mooney 
226*09ea9c53SPatrick Mooney vm_msr_result_t
svm_pmu_rdmsr(struct svm_softc * svm_sc,int vcpu,uint32_t msr,uint64_t * valp)227*09ea9c53SPatrick Mooney svm_pmu_rdmsr(struct svm_softc *svm_sc, int vcpu, uint32_t msr, uint64_t *valp)
228*09ea9c53SPatrick Mooney {
229*09ea9c53SPatrick Mooney 	ASSERT(svm_pmu_owned_msr(msr));
230*09ea9c53SPatrick Mooney 
231*09ea9c53SPatrick Mooney 	struct svm_pmu_vcpu *pmu = svm_get_pmu(svm_sc, vcpu);
232*09ea9c53SPatrick Mooney 
233*09ea9c53SPatrick Mooney 	if (!svm_pmu_is_active(pmu)) {
234*09ea9c53SPatrick Mooney 		return (VMR_UNHANLDED);
235*09ea9c53SPatrick Mooney 	}
236*09ea9c53SPatrick Mooney 
237*09ea9c53SPatrick Mooney 	if (svm_pmu_is_evt_msr(msr)) {
238*09ea9c53SPatrick Mooney 		const uint_t idx = svm_pmu_msr_to_idx(msr);
239*09ea9c53SPatrick Mooney 
240*09ea9c53SPatrick Mooney 		*valp = pmu->spv_evtsel_shadow[idx];
241*09ea9c53SPatrick Mooney 	} else if (svm_pmu_is_ctr_msr(msr)) {
242*09ea9c53SPatrick Mooney 		const uint_t idx = svm_pmu_msr_to_idx(msr);
243*09ea9c53SPatrick Mooney 
244*09ea9c53SPatrick Mooney 		*valp = pmu->spv_hma_state.hscs_regs[idx].hc_ctr;
245*09ea9c53SPatrick Mooney 	} else {
246*09ea9c53SPatrick Mooney 		/* UNREACHABLE */
247*09ea9c53SPatrick Mooney 		return (VMR_UNHANLDED);
248*09ea9c53SPatrick Mooney 	}
249*09ea9c53SPatrick Mooney 
250*09ea9c53SPatrick Mooney 	return (VMR_OK);
251*09ea9c53SPatrick Mooney }
252*09ea9c53SPatrick Mooney 
253*09ea9c53SPatrick Mooney vm_msr_result_t
svm_pmu_wrmsr(struct svm_softc * svm_sc,int vcpu,uint32_t msr,uint64_t val)254*09ea9c53SPatrick Mooney svm_pmu_wrmsr(struct svm_softc *svm_sc, int vcpu, uint32_t msr, uint64_t val)
255*09ea9c53SPatrick Mooney {
256*09ea9c53SPatrick Mooney 	ASSERT(svm_pmu_owned_msr(msr));
257*09ea9c53SPatrick Mooney 
258*09ea9c53SPatrick Mooney 	struct svm_pmu_vcpu *pmu = svm_get_pmu(svm_sc, vcpu);
259*09ea9c53SPatrick Mooney 	const svm_pmu_flavor_t flavor = svm_sc->pmu_flavor;
260*09ea9c53SPatrick Mooney 
261*09ea9c53SPatrick Mooney 	if (!svm_pmu_is_active(pmu)) {
262*09ea9c53SPatrick Mooney 		return (VMR_UNHANLDED);
263*09ea9c53SPatrick Mooney 	}
264*09ea9c53SPatrick Mooney 
265*09ea9c53SPatrick Mooney 	if (svm_pmu_is_evt_msr(msr)) {
266*09ea9c53SPatrick Mooney 		const uint_t idx = svm_pmu_msr_to_idx(msr);
267*09ea9c53SPatrick Mooney 
268*09ea9c53SPatrick Mooney 		/*
269*09ea9c53SPatrick Mooney 		 * Keep the unmodified evtsel shadowed, should the guest choose
270*09ea9c53SPatrick Mooney 		 * to read it out later.
271*09ea9c53SPatrick Mooney 		 *
272*09ea9c53SPatrick Mooney 		 * XXX: Should we balk at reserved bits being set?
273*09ea9c53SPatrick Mooney 		 */
274*09ea9c53SPatrick Mooney 		pmu->spv_evtsel_shadow[idx] = val;
275*09ea9c53SPatrick Mooney 
276*09ea9c53SPatrick Mooney 		if (!svm_pmu_evtsel_allowed(val, flavor)) {
277*09ea9c53SPatrick Mooney 			/*
278*09ea9c53SPatrick Mooney 			 * Disable any counters which have been configured with
279*09ea9c53SPatrick Mooney 			 * an event selector which we do not allow access to.
280*09ea9c53SPatrick Mooney 			 */
281*09ea9c53SPatrick Mooney 			val = 0;
282*09ea9c53SPatrick Mooney 		}
283*09ea9c53SPatrick Mooney 		pmu->spv_hma_state.hscs_regs[idx].hc_evtsel = val;
284*09ea9c53SPatrick Mooney 	} else if (svm_pmu_is_ctr_msr(msr)) {
285*09ea9c53SPatrick Mooney 		const uint_t idx = svm_pmu_msr_to_idx(msr);
286*09ea9c53SPatrick Mooney 
287*09ea9c53SPatrick Mooney 		pmu->spv_hma_state.hscs_regs[idx].hc_ctr = val;
288*09ea9c53SPatrick Mooney 	} else {
289*09ea9c53SPatrick Mooney 		/* UNREACHABLE */
290*09ea9c53SPatrick Mooney 		return (VMR_UNHANLDED);
291*09ea9c53SPatrick Mooney 	}
292*09ea9c53SPatrick Mooney 
293*09ea9c53SPatrick Mooney 	return (VMR_OK);
294*09ea9c53SPatrick Mooney }
295*09ea9c53SPatrick Mooney 
296*09ea9c53SPatrick Mooney bool
svm_pmu_rdpmc(struct svm_softc * svm_sc,int vcpu,uint32_t ecx,uint64_t * valp)297*09ea9c53SPatrick Mooney svm_pmu_rdpmc(struct svm_softc *svm_sc, int vcpu, uint32_t ecx, uint64_t *valp)
298*09ea9c53SPatrick Mooney {
299*09ea9c53SPatrick Mooney 	struct svm_pmu_vcpu *pmu = svm_get_pmu(svm_sc, vcpu);
300*09ea9c53SPatrick Mooney 
301*09ea9c53SPatrick Mooney 	if (!svm_pmu_is_active(pmu)) {
302*09ea9c53SPatrick Mooney 		return (false);
303*09ea9c53SPatrick Mooney 	}
304*09ea9c53SPatrick Mooney 	if (ecx >= SVM_PMU_MAX_COUNTERS) {
305*09ea9c53SPatrick Mooney 		return (false);
306*09ea9c53SPatrick Mooney 	}
307*09ea9c53SPatrick Mooney 
308*09ea9c53SPatrick Mooney 	*valp = pmu->spv_hma_state.hscs_regs[ecx].hc_ctr;
309*09ea9c53SPatrick Mooney 	return (true);
310*09ea9c53SPatrick Mooney }
311*09ea9c53SPatrick Mooney 
312*09ea9c53SPatrick Mooney /*
313*09ea9c53SPatrick Mooney  * Attempt to load guest PMU state, if the guest vCPU happens to be actively
314*09ea9c53SPatrick Mooney  * using any counters.  Host state will be saved if such loading occurs.
315*09ea9c53SPatrick Mooney  *
316*09ea9c53SPatrick Mooney  * The results of any state loading may require adjustment of guest intercepts
317*09ea9c53SPatrick Mooney  * and thus demands a call to svm_apply_dirty() prior to VM entry.
318*09ea9c53SPatrick Mooney  */
319*09ea9c53SPatrick Mooney void
svm_pmu_enter(struct svm_softc * svm_sc,int vcpu)320*09ea9c53SPatrick Mooney svm_pmu_enter(struct svm_softc *svm_sc, int vcpu)
321*09ea9c53SPatrick Mooney {
322*09ea9c53SPatrick Mooney 	struct svm_pmu_vcpu *pmu = svm_get_pmu(svm_sc, vcpu);
323*09ea9c53SPatrick Mooney 
324*09ea9c53SPatrick Mooney 	if (!svm_pmu_is_active(pmu)) {
325*09ea9c53SPatrick Mooney 		return;
326*09ea9c53SPatrick Mooney 	}
327*09ea9c53SPatrick Mooney 
328*09ea9c53SPatrick Mooney 	hma_svm_cpc_res_t entry = hma_svm_cpc_enter(&pmu->spv_hma_state);
329*09ea9c53SPatrick Mooney 
330*09ea9c53SPatrick Mooney 	/*
331*09ea9c53SPatrick Mooney 	 * Until per-vCPU MSR bitmaps are available, ignore ability to expose
332*09ea9c53SPatrick Mooney 	 * direct guest access to counter MSRs
333*09ea9c53SPatrick Mooney 	 */
334*09ea9c53SPatrick Mooney 	entry &= ~HSCR_ACCESS_CTR_MSR;
335*09ea9c53SPatrick Mooney 
336*09ea9c53SPatrick Mooney 	if (entry != pmu->spv_last_entry) {
337*09ea9c53SPatrick Mooney 		/* Update intercepts to match what is allowed per HMA.  */
338*09ea9c53SPatrick Mooney 		if (entry & HSCR_ACCESS_RDPMC && svm_pmu_force_exit == 0) {
339*09ea9c53SPatrick Mooney 			svm_disable_intercept(svm_sc, vcpu, VMCB_CTRL1_INTCPT,
340*09ea9c53SPatrick Mooney 			    VMCB_INTCPT_RDPMC);
341*09ea9c53SPatrick Mooney 		} else {
342*09ea9c53SPatrick Mooney 			svm_enable_intercept(svm_sc, vcpu, VMCB_CTRL1_INTCPT,
343*09ea9c53SPatrick Mooney 			    VMCB_INTCPT_RDPMC);
344*09ea9c53SPatrick Mooney 		}
345*09ea9c53SPatrick Mooney 	}
346*09ea9c53SPatrick Mooney 	pmu->spv_last_entry = entry;
347*09ea9c53SPatrick Mooney }
348*09ea9c53SPatrick Mooney 
349*09ea9c53SPatrick Mooney /*
350*09ea9c53SPatrick Mooney  * If guest PMU state is active, save it, and restore the host state.
351*09ea9c53SPatrick Mooney  */
352*09ea9c53SPatrick Mooney void
svm_pmu_exit(struct svm_softc * svm_sc,int vcpu)353*09ea9c53SPatrick Mooney svm_pmu_exit(struct svm_softc *svm_sc, int vcpu)
354*09ea9c53SPatrick Mooney {
355*09ea9c53SPatrick Mooney 	struct svm_pmu_vcpu *pmu = svm_get_pmu(svm_sc, vcpu);
356*09ea9c53SPatrick Mooney 
357*09ea9c53SPatrick Mooney 	if (!svm_pmu_is_active(pmu)) {
358*09ea9c53SPatrick Mooney 		return;
359*09ea9c53SPatrick Mooney 	}
360*09ea9c53SPatrick Mooney 
361*09ea9c53SPatrick Mooney 	hma_svm_cpc_exit(&pmu->spv_hma_state);
362*09ea9c53SPatrick Mooney }
363*09ea9c53SPatrick Mooney 
364*09ea9c53SPatrick Mooney static int
svm_pmu_data_read(struct vm * vm,int vcpuid,const vmm_data_req_t * req)365*09ea9c53SPatrick Mooney svm_pmu_data_read(struct vm *vm, int vcpuid, const vmm_data_req_t *req)
366*09ea9c53SPatrick Mooney {
367*09ea9c53SPatrick Mooney 	VERIFY3U(req->vdr_class, ==, VDC_PMU_AMD);
368*09ea9c53SPatrick Mooney 	VERIFY3U(req->vdr_version, ==, 1);
369*09ea9c53SPatrick Mooney 	VERIFY3U(req->vdr_len, >=, sizeof (struct vdi_pmu_amd_v1));
370*09ea9c53SPatrick Mooney 
371*09ea9c53SPatrick Mooney 	struct svm_softc *svm_sc = vm_get_cookie(vm);
372*09ea9c53SPatrick Mooney 	struct svm_pmu_vcpu *pmu = svm_get_pmu(svm_sc, vcpuid);
373*09ea9c53SPatrick Mooney 	struct vdi_pmu_amd_v1 *out = req->vdr_data;
374*09ea9c53SPatrick Mooney 
375*09ea9c53SPatrick Mooney 	if (!svm_pmu_is_active(pmu)) {
376*09ea9c53SPatrick Mooney 		bzero(out, sizeof (out));
377*09ea9c53SPatrick Mooney 		return (0);
378*09ea9c53SPatrick Mooney 	}
379*09ea9c53SPatrick Mooney 
380*09ea9c53SPatrick Mooney 	for (uint_t i = 0; i < SVM_PMU_MAX_COUNTERS; i++) {
381*09ea9c53SPatrick Mooney 		out->vpa_evtsel[i] = pmu->spv_evtsel_shadow[i];
382*09ea9c53SPatrick Mooney 		out->vpa_ctr[i] = pmu->spv_hma_state.hscs_regs[i].hc_ctr;
383*09ea9c53SPatrick Mooney 	}
384*09ea9c53SPatrick Mooney 	return (0);
385*09ea9c53SPatrick Mooney }
386*09ea9c53SPatrick Mooney 
387*09ea9c53SPatrick Mooney static int
svm_pmu_data_write(struct vm * vm,int vcpuid,const vmm_data_req_t * req)388*09ea9c53SPatrick Mooney svm_pmu_data_write(struct vm *vm, int vcpuid, const vmm_data_req_t *req)
389*09ea9c53SPatrick Mooney {
390*09ea9c53SPatrick Mooney 	VERIFY3U(req->vdr_class, ==, VDC_PMU_AMD);
391*09ea9c53SPatrick Mooney 	VERIFY3U(req->vdr_version, ==, 1);
392*09ea9c53SPatrick Mooney 	VERIFY3U(req->vdr_len, >=, sizeof (struct vdi_pmu_amd_v1));
393*09ea9c53SPatrick Mooney 
394*09ea9c53SPatrick Mooney 	struct svm_softc *svm_sc = vm_get_cookie(vm);
395*09ea9c53SPatrick Mooney 	struct svm_pmu_vcpu *pmu = svm_get_pmu(svm_sc, vcpuid);
396*09ea9c53SPatrick Mooney 	const struct vdi_pmu_amd_v1 *src = req->vdr_data;
397*09ea9c53SPatrick Mooney 
398*09ea9c53SPatrick Mooney 	if (!svm_pmu_is_active(pmu)) {
399*09ea9c53SPatrick Mooney 		/*
400*09ea9c53SPatrick Mooney 		 * Skip importing state for an inactive PMU.
401*09ea9c53SPatrick Mooney 		 *
402*09ea9c53SPatrick Mooney 		 * It might be appropriate to return an error here, but it's not
403*09ea9c53SPatrick Mooney 		 * clear what would be most appropriate (or what userspace would
404*09ea9c53SPatrick Mooney 		 * do in such a case).
405*09ea9c53SPatrick Mooney 		 */
406*09ea9c53SPatrick Mooney 		return (0);
407*09ea9c53SPatrick Mooney 	}
408*09ea9c53SPatrick Mooney 
409*09ea9c53SPatrick Mooney 	const svm_pmu_flavor_t flavor = svm_sc->pmu_flavor;
410*09ea9c53SPatrick Mooney 	for (uint_t i = 0; i < SVM_PMU_MAX_COUNTERS; i++) {
411*09ea9c53SPatrick Mooney 		const uint64_t evtsel = src->vpa_evtsel[i];
412*09ea9c53SPatrick Mooney 
413*09ea9c53SPatrick Mooney 		/*
414*09ea9c53SPatrick Mooney 		 * Shadow evtsel is kept as-is, but the "active" value undergoes
415*09ea9c53SPatrick Mooney 		 * same verification as guest WRMSR.
416*09ea9c53SPatrick Mooney 		 */
417*09ea9c53SPatrick Mooney 		pmu->spv_evtsel_shadow[i] = evtsel;
418*09ea9c53SPatrick Mooney 		if (svm_pmu_evtsel_allowed(evtsel, flavor)) {
419*09ea9c53SPatrick Mooney 			pmu->spv_hma_state.hscs_regs[i].hc_evtsel = evtsel;
420*09ea9c53SPatrick Mooney 		} else {
421*09ea9c53SPatrick Mooney 			pmu->spv_hma_state.hscs_regs[i].hc_evtsel = 0;
422*09ea9c53SPatrick Mooney 		}
423*09ea9c53SPatrick Mooney 		pmu->spv_hma_state.hscs_regs[i].hc_ctr = src->vpa_ctr[i];
424*09ea9c53SPatrick Mooney 	}
425*09ea9c53SPatrick Mooney 	return (0);
426*09ea9c53SPatrick Mooney }
427*09ea9c53SPatrick Mooney 
428*09ea9c53SPatrick Mooney static const vmm_data_version_entry_t pmu_amd_v1 = {
429*09ea9c53SPatrick Mooney 	.vdve_class = VDC_PMU_AMD,
430*09ea9c53SPatrick Mooney 	.vdve_version = 1,
431*09ea9c53SPatrick Mooney 	.vdve_len_expect = sizeof (struct vdi_pmu_amd_v1),
432*09ea9c53SPatrick Mooney 	.vdve_vcpu_readf = svm_pmu_data_read,
433*09ea9c53SPatrick Mooney 	.vdve_vcpu_writef = svm_pmu_data_write,
434*09ea9c53SPatrick Mooney };
435*09ea9c53SPatrick Mooney VMM_DATA_VERSION(pmu_amd_v1);
436