xref: /freebsd/sys/amd64/vmm/amd/svm_msr.c (revision f3754afd5901857787271e73f9c34d3b9069a03f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2014, Neel Natu (neel@freebsd.org)
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include "opt_bhyve_snapshot.h"
31 
32 #include <sys/param.h>
33 #include <sys/errno.h>
34 #include <sys/systm.h>
35 
36 #include <machine/cpufunc.h>
37 #include <machine/specialreg.h>
38 #include <machine/vmm.h>
39 
40 #include "svm.h"
41 #include "vmcb.h"
42 #include "svm_softc.h"
43 #include "svm_msr.h"
44 
45 #ifndef MSR_AMDK8_IPM
46 #define	MSR_AMDK8_IPM	0xc0010055
47 #endif
48 
49 enum {
50 	IDX_MSR_LSTAR,
51 	IDX_MSR_CSTAR,
52 	IDX_MSR_STAR,
53 	IDX_MSR_SF_MASK,
54 	HOST_MSR_NUM		/* must be the last enumeration */
55 };
56 
57 static uint64_t host_msrs[HOST_MSR_NUM];
58 
59 void
svm_msr_init(void)60 svm_msr_init(void)
61 {
62 	/*
63 	 * It is safe to cache the values of the following MSRs because they
64 	 * don't change based on curcpu, curproc or curthread.
65 	 */
66 	host_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
67 	host_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
68 	host_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
69 	host_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
70 }
71 
72 void
svm_msr_guest_init(struct svm_softc * sc,struct svm_vcpu * vcpu)73 svm_msr_guest_init(struct svm_softc *sc, struct svm_vcpu *vcpu)
74 {
75 	/*
76 	 * All the MSRs accessible to the guest are either saved/restored by
77 	 * hardware on every #VMEXIT/VMRUN (e.g., G_PAT) or are saved/restored
78 	 * by VMSAVE/VMLOAD (e.g., MSR_GSBASE).
79 	 *
80 	 * There are no guest MSRs that are saved/restored "by hand" so nothing
81 	 * more to do here.
82 	 */
83 	return;
84 }
85 
86 void
svm_msr_guest_enter(struct svm_vcpu * vcpu)87 svm_msr_guest_enter(struct svm_vcpu *vcpu)
88 {
89 	/*
90 	 * Save host MSRs (if any) and restore guest MSRs (if any).
91 	 */
92 }
93 
94 void
svm_msr_guest_exit(struct svm_vcpu * vcpu)95 svm_msr_guest_exit(struct svm_vcpu *vcpu)
96 {
97 	/*
98 	 * Save guest MSRs (if any) and restore host MSRs.
99 	 */
100 	wrmsr(MSR_LSTAR, host_msrs[IDX_MSR_LSTAR]);
101 	wrmsr(MSR_CSTAR, host_msrs[IDX_MSR_CSTAR]);
102 	wrmsr(MSR_STAR, host_msrs[IDX_MSR_STAR]);
103 	wrmsr(MSR_SF_MASK, host_msrs[IDX_MSR_SF_MASK]);
104 
105 	/* MSR_KGSBASE will be restored on the way back to userspace */
106 }
107 
108 int
svm_rdmsr(struct svm_vcpu * vcpu,u_int num,uint64_t * result,bool * retu)109 svm_rdmsr(struct svm_vcpu *vcpu, u_int num, uint64_t *result, bool *retu)
110 {
111 	int error = 0;
112 
113 	switch (num) {
114 	case MSR_MCG_CAP:
115 	case MSR_MCG_STATUS:
116 		*result = 0;
117 		break;
118 	case MSR_MTRRcap:
119 	case MSR_MTRRdefType:
120 	case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7:
121 	case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
122 	case MSR_MTRR64kBase:
123 	case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:
124 		if (vm_rdmtrr(&vcpu->mtrr, num, result) != 0) {
125 			vm_inject_gp(vcpu->vcpu);
126 		}
127 		break;
128 	case MSR_SYSCFG:
129 	case MSR_AMDK8_IPM:
130 	case MSR_EXTFEATURES:
131 		*result = 0;
132 		break;
133 	default:
134 		error = EINVAL;
135 		break;
136 	}
137 
138 	return (error);
139 }
140 
141 int
svm_wrmsr(struct svm_vcpu * vcpu,u_int num,uint64_t val,bool * retu)142 svm_wrmsr(struct svm_vcpu *vcpu, u_int num, uint64_t val, bool *retu)
143 {
144 	int error = 0;
145 
146 	switch (num) {
147 	case MSR_MCG_CAP:
148 	case MSR_MCG_STATUS:
149 		break;		/* ignore writes */
150 	case MSR_MTRRcap:
151 	case MSR_MTRRdefType:
152 	case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7:
153 	case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
154 	case MSR_MTRR64kBase:
155 	case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:
156 		if (vm_wrmtrr(&vcpu->mtrr, num, val) != 0) {
157 			vm_inject_gp(vcpu->vcpu);
158 		}
159 		break;
160 	case MSR_SYSCFG:
161 		break;		/* Ignore writes */
162 	case MSR_AMDK8_IPM:
163 		/*
164 		 * Ignore writes to the "Interrupt Pending Message" MSR.
165 		 */
166 		break;
167 	case MSR_K8_UCODE_UPDATE:
168 		/*
169 		 * Ignore writes to microcode update register.
170 		 */
171 		break;
172 #ifdef BHYVE_SNAPSHOT
173 	case MSR_TSC:
174 		svm_set_tsc_offset(vcpu, val - rdtsc());
175 		break;
176 #endif
177 	case MSR_EXTFEATURES:
178 		break;
179 	default:
180 		error = EINVAL;
181 		break;
182 	}
183 
184 	return (error);
185 }
186