1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2011 NetApp, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/types.h>
30
31 #include <machine/cpufunc.h>
32 #include <machine/vmm.h>
33 #include <machine/specialreg.h>
34
35 #include <errno.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39
40 #include <vmmapi.h>
41
42 #include "debug.h"
43 #include "xmsr.h"
44
45 static int cpu_vendor_intel, cpu_vendor_amd, cpu_vendor_hygon;
46
47 int
emulate_wrmsr(struct vcpu * vcpu __unused,uint32_t num,uint64_t val __unused)48 emulate_wrmsr(struct vcpu *vcpu __unused, uint32_t num, uint64_t val __unused)
49 {
50
51 if (cpu_vendor_intel) {
52 switch (num) {
53 case 0xd04: /* Sandy Bridge uncore PMCs */
54 case 0xc24:
55 return (0);
56 case MSR_BIOS_UPDT_TRIG:
57 return (0);
58 case MSR_BIOS_SIGN:
59 return (0);
60 default:
61 break;
62 }
63 } else if (cpu_vendor_amd || cpu_vendor_hygon) {
64 switch (num) {
65 case MSR_HWCR:
66 /*
67 * Ignore writes to hardware configuration MSR.
68 */
69 return (0);
70
71 case MSR_NB_CFG1:
72 case MSR_LS_CFG:
73 case MSR_IC_CFG:
74 return (0); /* Ignore writes */
75
76 case MSR_PERFEVSEL0:
77 case MSR_PERFEVSEL1:
78 case MSR_PERFEVSEL2:
79 case MSR_PERFEVSEL3:
80 /* Ignore writes to the PerfEvtSel MSRs */
81 return (0);
82
83 case MSR_K7_PERFCTR0:
84 case MSR_K7_PERFCTR1:
85 case MSR_K7_PERFCTR2:
86 case MSR_K7_PERFCTR3:
87 /* Ignore writes to the PerfCtr MSRs */
88 return (0);
89
90 case MSR_P_STATE_CONTROL:
91 /* Ignore write to change the P-state */
92 return (0);
93
94 default:
95 break;
96 }
97 }
98 return (-1);
99 }
100
101 int
emulate_rdmsr(struct vcpu * vcpu __unused,uint32_t num,uint64_t * val)102 emulate_rdmsr(struct vcpu *vcpu __unused, uint32_t num, uint64_t *val)
103 {
104 int error = 0;
105
106 if (cpu_vendor_intel) {
107 switch (num) {
108 case MSR_BIOS_SIGN:
109 case MSR_IA32_PLATFORM_ID:
110 case MSR_PKG_ENERGY_STATUS:
111 case MSR_PP0_ENERGY_STATUS:
112 case MSR_PP1_ENERGY_STATUS:
113 case MSR_DRAM_ENERGY_STATUS:
114 case MSR_MISC_FEATURE_ENABLES:
115 *val = 0;
116 break;
117 case MSR_RAPL_POWER_UNIT:
118 /*
119 * Use the default value documented in section
120 * "RAPL Interfaces" in Intel SDM vol3.
121 */
122 *val = 0x000a1003;
123 break;
124 case MSR_IA32_FEATURE_CONTROL:
125 /*
126 * Windows guests check this MSR.
127 * Set the lock bit to avoid writes
128 * to this MSR.
129 */
130 *val = IA32_FEATURE_CONTROL_LOCK;
131 break;
132 default:
133 error = -1;
134 break;
135 }
136 } else if (cpu_vendor_amd || cpu_vendor_hygon) {
137 switch (num) {
138 case MSR_BIOS_SIGN:
139 *val = 0;
140 break;
141 case MSR_HWCR:
142 /*
143 * Bios and Kernel Developer's Guides for AMD Families
144 * 12H, 14H, 15H and 16H.
145 */
146 *val = 0x01000010; /* Reset value */
147 *val |= 1 << 9; /* MONITOR/MWAIT disable */
148 break;
149
150 case MSR_NB_CFG1:
151 case MSR_LS_CFG:
152 case MSR_IC_CFG:
153 /*
154 * The reset value is processor family dependent so
155 * just return 0.
156 */
157 *val = 0;
158 break;
159
160 case MSR_PERFEVSEL0:
161 case MSR_PERFEVSEL1:
162 case MSR_PERFEVSEL2:
163 case MSR_PERFEVSEL3:
164 /*
165 * PerfEvtSel MSRs are not properly virtualized so just
166 * return zero.
167 */
168 *val = 0;
169 break;
170
171 case MSR_K7_PERFCTR0:
172 case MSR_K7_PERFCTR1:
173 case MSR_K7_PERFCTR2:
174 case MSR_K7_PERFCTR3:
175 /*
176 * PerfCtr MSRs are not properly virtualized so just
177 * return zero.
178 */
179 *val = 0;
180 break;
181
182 case MSR_SMM_ADDR:
183 case MSR_SMM_MASK:
184 /*
185 * Return the reset value defined in the AMD Bios and
186 * Kernel Developer's Guide.
187 */
188 *val = 0;
189 break;
190
191 case MSR_P_STATE_LIMIT:
192 case MSR_P_STATE_CONTROL:
193 case MSR_P_STATE_STATUS:
194 case MSR_P_STATE_CONFIG(0): /* P0 configuration */
195 *val = 0;
196 break;
197
198 /*
199 * OpenBSD guests test bit 0 of this MSR to detect if the
200 * workaround for erratum 721 is already applied.
201 * https://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf
202 */
203 case 0xC0011029:
204 *val = 1;
205 break;
206
207 default:
208 error = -1;
209 break;
210 }
211 } else {
212 error = -1;
213 }
214 return (error);
215 }
216
217 int
init_msr(void)218 init_msr(void)
219 {
220 int error;
221 u_int regs[4];
222 char cpu_vendor[13];
223
224 do_cpuid(0, regs);
225 ((u_int *)&cpu_vendor)[0] = regs[1];
226 ((u_int *)&cpu_vendor)[1] = regs[3];
227 ((u_int *)&cpu_vendor)[2] = regs[2];
228 cpu_vendor[12] = '\0';
229
230 error = 0;
231 if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
232 cpu_vendor_amd = 1;
233 } else if (strcmp(cpu_vendor, "HygonGenuine") == 0) {
234 cpu_vendor_hygon = 1;
235 } else if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
236 cpu_vendor_intel = 1;
237 } else {
238 EPRINTLN("Unknown cpu vendor \"%s\"", cpu_vendor);
239 error = ENOENT;
240 }
241 return (error);
242 }
243