1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 /* 31 * This file and its contents are supplied under the terms of the 32 * Common Development and Distribution License ("CDDL"), version 1.0. 33 * You may only use this file in accordance with the terms of version 34 * 1.0 of the CDDL. 35 * 36 * A full copy of the text of the CDDL should have accompanied this 37 * source. A copy of the CDDL is also available via the Internet at 38 * http://www.illumos.org/license/CDDL. 39 * 40 * Copyright 2014 Pluribus Networks Inc. 41 * Copyright 2017 Joyent, Inc. 42 * Copyright 2020 Oxide Computer Company 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 51 #include <machine/specialreg.h> 52 #include <machine/vmm.h> 53 #include "vmx.h" 54 55 /* Bits 0-30 of VMX_BASIC MSR contain VMCS revision identifier */ 56 #define VMX_BASIC_REVISION(v) ((v) & 0x7fffffff) 57 58 uint32_t 59 vmcs_field_encoding(int ident) 60 { 61 switch (ident) { 62 case VM_REG_GUEST_CR0: 63 return (VMCS_GUEST_CR0); 64 case VM_REG_GUEST_CR3: 65 return (VMCS_GUEST_CR3); 66 case VM_REG_GUEST_CR4: 67 return (VMCS_GUEST_CR4); 68 case VM_REG_GUEST_DR7: 69 return (VMCS_GUEST_DR7); 70 case VM_REG_GUEST_RSP: 71 return (VMCS_GUEST_RSP); 72 case VM_REG_GUEST_RIP: 73 return (VMCS_GUEST_RIP); 74 case VM_REG_GUEST_RFLAGS: 75 return (VMCS_GUEST_RFLAGS); 76 case VM_REG_GUEST_ES: 77 return (VMCS_GUEST_ES_SELECTOR); 78 case VM_REG_GUEST_CS: 79 return (VMCS_GUEST_CS_SELECTOR); 80 case VM_REG_GUEST_SS: 81 return (VMCS_GUEST_SS_SELECTOR); 82 case VM_REG_GUEST_DS: 83 return (VMCS_GUEST_DS_SELECTOR); 84 case VM_REG_GUEST_FS: 85 return (VMCS_GUEST_FS_SELECTOR); 86 case VM_REG_GUEST_GS: 87 return (VMCS_GUEST_GS_SELECTOR); 88 case VM_REG_GUEST_TR: 89 return (VMCS_GUEST_TR_SELECTOR); 90 case VM_REG_GUEST_LDTR: 91 return (VMCS_GUEST_LDTR_SELECTOR); 92 case VM_REG_GUEST_EFER: 93 return (VMCS_GUEST_IA32_EFER); 94 case VM_REG_GUEST_PDPTE0: 95 return (VMCS_GUEST_PDPTE0); 96 case VM_REG_GUEST_PDPTE1: 97 return (VMCS_GUEST_PDPTE1); 98 case VM_REG_GUEST_PDPTE2: 99 return (VMCS_GUEST_PDPTE2); 100 case VM_REG_GUEST_PDPTE3: 101 return (VMCS_GUEST_PDPTE3); 102 case VM_REG_GUEST_ENTRY_INST_LENGTH: 103 return (VMCS_ENTRY_INST_LENGTH); 104 default: 105 return (VMCS_INVALID_ENCODING); 106 } 107 } 108 109 void 110 vmcs_seg_desc_encoding(int seg, uint32_t *base, uint32_t *lim, uint32_t *acc) 111 { 112 switch (seg) { 113 case VM_REG_GUEST_ES: 114 *base = VMCS_GUEST_ES_BASE; 115 *lim = VMCS_GUEST_ES_LIMIT; 116 *acc = VMCS_GUEST_ES_ACCESS_RIGHTS; 117 break; 118 case VM_REG_GUEST_CS: 119 *base = VMCS_GUEST_CS_BASE; 120 *lim = VMCS_GUEST_CS_LIMIT; 121 *acc = VMCS_GUEST_CS_ACCESS_RIGHTS; 122 break; 123 case VM_REG_GUEST_SS: 124 *base = VMCS_GUEST_SS_BASE; 125 *lim = VMCS_GUEST_SS_LIMIT; 126 *acc = VMCS_GUEST_SS_ACCESS_RIGHTS; 127 break; 128 case VM_REG_GUEST_DS: 129 *base = VMCS_GUEST_DS_BASE; 130 *lim = VMCS_GUEST_DS_LIMIT; 131 *acc = VMCS_GUEST_DS_ACCESS_RIGHTS; 132 break; 133 case VM_REG_GUEST_FS: 134 *base = VMCS_GUEST_FS_BASE; 135 *lim = VMCS_GUEST_FS_LIMIT; 136 *acc = VMCS_GUEST_FS_ACCESS_RIGHTS; 137 break; 138 case VM_REG_GUEST_GS: 139 *base = VMCS_GUEST_GS_BASE; 140 *lim = VMCS_GUEST_GS_LIMIT; 141 *acc = VMCS_GUEST_GS_ACCESS_RIGHTS; 142 break; 143 case VM_REG_GUEST_TR: 144 *base = VMCS_GUEST_TR_BASE; 145 *lim = VMCS_GUEST_TR_LIMIT; 146 *acc = VMCS_GUEST_TR_ACCESS_RIGHTS; 147 break; 148 case VM_REG_GUEST_LDTR: 149 *base = VMCS_GUEST_LDTR_BASE; 150 *lim = VMCS_GUEST_LDTR_LIMIT; 151 *acc = VMCS_GUEST_LDTR_ACCESS_RIGHTS; 152 break; 153 case VM_REG_GUEST_IDTR: 154 *base = VMCS_GUEST_IDTR_BASE; 155 *lim = VMCS_GUEST_IDTR_LIMIT; 156 *acc = VMCS_INVALID_ENCODING; 157 break; 158 case VM_REG_GUEST_GDTR: 159 *base = VMCS_GUEST_GDTR_BASE; 160 *lim = VMCS_GUEST_GDTR_LIMIT; 161 *acc = VMCS_INVALID_ENCODING; 162 break; 163 default: 164 panic("invalid segment register %d", seg); 165 } 166 } 167 168 uint32_t 169 vmcs_msr_encoding(uint32_t msr) 170 { 171 switch (msr) { 172 case MSR_PAT: 173 return (VMCS_GUEST_IA32_PAT); 174 case MSR_EFER: 175 return (VMCS_GUEST_IA32_EFER); 176 case MSR_SYSENTER_CS_MSR: 177 return (VMCS_GUEST_IA32_SYSENTER_CS); 178 case MSR_SYSENTER_ESP_MSR: 179 return (VMCS_GUEST_IA32_SYSENTER_ESP); 180 case MSR_SYSENTER_EIP_MSR: 181 return (VMCS_GUEST_IA32_SYSENTER_EIP); 182 /* 183 * While fsbase and gsbase are expected to be accessed (by the VMM) via 184 * the segment descriptor interfaces, we still make it available as MSR 185 * contents as well. 186 */ 187 case MSR_FSBASE: 188 return (VMCS_GUEST_FS_BASE); 189 case MSR_GSBASE: 190 return (VMCS_GUEST_GS_BASE); 191 default: 192 return (VMCS_INVALID_ENCODING); 193 } 194 } 195 196 void 197 vmcs_clear(uintptr_t vmcs_pa) 198 { 199 int err; 200 201 __asm __volatile("vmclear %[addr];" 202 VMX_SET_ERROR_CODE_ASM 203 : [error] "=r" (err) 204 : [addr] "m" (vmcs_pa) 205 : "memory"); 206 207 if (err != 0) { 208 panic("vmclear(%p) error %d", (void *)vmcs_pa, err); 209 } 210 211 /* 212 * A call to critical_enter() was made in vmcs_load() to prevent 213 * preemption. Now that the VMCS is unloaded, it is safe to relax that 214 * restriction. 215 */ 216 critical_exit(); 217 } 218 219 void 220 vmcs_initialize(struct vmcs *vmcs, uintptr_t vmcs_pa) 221 { 222 int err; 223 224 /* set to VMCS revision */ 225 vmcs->identifier = VMX_BASIC_REVISION(rdmsr(MSR_VMX_BASIC)); 226 227 /* 228 * Perform a vmclear on the VMCS, but without the critical section 229 * manipulation as done by vmcs_clear() above. 230 */ 231 __asm __volatile("vmclear %[addr];" 232 VMX_SET_ERROR_CODE_ASM 233 : [error] "=r" (err) 234 : [addr] "m" (vmcs_pa) 235 : "memory"); 236 237 if (err != 0) { 238 panic("vmclear(%p) error %d", (void *)vmcs_pa, err); 239 } 240 } 241 242 void 243 vmcs_load(uintptr_t vmcs_pa) 244 { 245 int err; 246 247 /* 248 * While the VMCS is loaded on the CPU for subsequent operations, it is 249 * important that the thread not be preempted. That is ensured with 250 * critical_enter() here, with a matching critical_exit() call in 251 * vmcs_clear() once the VMCS is unloaded. 252 */ 253 critical_enter(); 254 255 __asm __volatile("vmptrld %[addr];" 256 VMX_SET_ERROR_CODE_ASM 257 : [error] "=r" (err) 258 : [addr] "m" (vmcs_pa) 259 : "memory"); 260 261 if (err != 0) { 262 panic("vmptrld(%p) error %d", (void *)vmcs_pa, err); 263 } 264 } 265 266 uint64_t 267 vmcs_read(uint32_t encoding) 268 { 269 int error; 270 uint64_t val; 271 272 __asm __volatile("vmread %[enc], %[val];" 273 VMX_SET_ERROR_CODE_ASM 274 : [error] "=r" (error), [val] "=r" (val) 275 : [enc] "r" ((uint64_t)encoding) 276 : "memory"); 277 278 if (error != 0) { 279 panic("vmread(%x) error %d", encoding, error); 280 } 281 282 return (val); 283 } 284 285 void 286 vmcs_write(uint32_t encoding, uint64_t val) 287 { 288 int error; 289 290 __asm __volatile("vmwrite %[val], %[enc];" 291 VMX_SET_ERROR_CODE_ASM 292 : [error] "=r" (error) 293 : [val] "r" (val), [enc] "r" ((uint64_t)encoding) 294 : "memory"); 295 296 if (error != 0) { 297 panic("vmwrite(%x, %lx) error %d", encoding, val, error); 298 } 299 } 300