1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #ifndef _VMX_CPUFUNC_H_ 32 #define _VMX_CPUFUNC_H_ 33 34 struct vmcs; 35 36 /* 37 * Section 5.2 "Conventions" from Intel Architecture Manual 2B. 38 * 39 * error 40 * VMsucceed 0 41 * VMFailInvalid 1 42 * VMFailValid 2 see also VMCS VM-Instruction Error Field 43 */ 44 #define VM_SUCCESS 0 45 #define VM_FAIL_INVALID 1 46 #define VM_FAIL_VALID 2 47 #define VMX_SET_ERROR_CODE \ 48 " jnc 1f;" \ 49 " mov $1, %[error];" /* CF: error = 1 */ \ 50 " jmp 3f;" \ 51 "1: jnz 2f;" \ 52 " mov $2, %[error];" /* ZF: error = 2 */ \ 53 " jmp 3f;" \ 54 "2: mov $0, %[error];" \ 55 "3:" 56 57 /* returns 0 on success and non-zero on failure */ 58 static __inline int 59 vmxon(char *region) 60 { 61 int error; 62 uint64_t addr; 63 64 addr = vtophys(region); 65 __asm __volatile("vmxon %[addr];" 66 VMX_SET_ERROR_CODE 67 : [error] "=r" (error) 68 : [addr] "m" (*(uint64_t *)&addr) 69 : "memory"); 70 71 return (error); 72 } 73 74 /* returns 0 on success and non-zero on failure */ 75 static __inline int 76 vmclear(struct vmcs *vmcs) 77 { 78 int error; 79 uint64_t addr; 80 81 addr = vtophys(vmcs); 82 __asm __volatile("vmclear %[addr];" 83 VMX_SET_ERROR_CODE 84 : [error] "=r" (error) 85 : [addr] "m" (*(uint64_t *)&addr) 86 : "memory"); 87 return (error); 88 } 89 90 static __inline void 91 vmxoff(void) 92 { 93 94 __asm __volatile("vmxoff"); 95 } 96 97 static __inline void 98 vmptrst(uint64_t *addr) 99 { 100 101 __asm __volatile("vmptrst %[addr]" :: [addr]"m" (*addr) : "memory"); 102 } 103 104 static __inline int 105 vmptrld(struct vmcs *vmcs) 106 { 107 int error; 108 uint64_t addr; 109 110 addr = vtophys(vmcs); 111 __asm __volatile("vmptrld %[addr];" 112 VMX_SET_ERROR_CODE 113 : [error] "=r" (error) 114 : [addr] "m" (*(uint64_t *)&addr) 115 : "memory"); 116 return (error); 117 } 118 119 static __inline int 120 vmwrite(uint64_t reg, uint64_t val) 121 { 122 int error; 123 124 __asm __volatile("vmwrite %[val], %[reg];" 125 VMX_SET_ERROR_CODE 126 : [error] "=r" (error) 127 : [val] "r" (val), [reg] "r" (reg) 128 : "memory"); 129 130 return (error); 131 } 132 133 static __inline int 134 vmread(uint64_t r, uint64_t *addr) 135 { 136 int error; 137 138 __asm __volatile("vmread %[r], %[addr];" 139 VMX_SET_ERROR_CODE 140 : [error] "=r" (error), [addr] "=m" (*addr) 141 : [r] "r" (r) 142 : "memory"); 143 return (error); 144 } 145 146 static void __inline 147 VMCLEAR(struct vmcs *vmcs) 148 { 149 int err; 150 151 err = vmclear(vmcs); 152 if (err != 0) 153 panic("%s: vmclear(%p) error %d", __func__, vmcs, err); 154 155 critical_exit(); 156 } 157 158 static void __inline 159 VMPTRLD(struct vmcs *vmcs) 160 { 161 int err; 162 163 critical_enter(); 164 165 err = vmptrld(vmcs); 166 if (err != 0) 167 panic("%s: vmptrld(%p) error %d", __func__, vmcs, err); 168 } 169 170 #define INVVPID_TYPE_ADDRESS 0UL 171 #define INVVPID_TYPE_SINGLE_CONTEXT 1UL 172 #define INVVPID_TYPE_ALL_CONTEXTS 2UL 173 174 struct invvpid_desc { 175 uint16_t vpid; 176 uint16_t _res1; 177 uint32_t _res2; 178 uint64_t linear_addr; 179 }; 180 CTASSERT(sizeof(struct invvpid_desc) == 16); 181 182 static void __inline 183 invvpid(uint64_t type, struct invvpid_desc desc) 184 { 185 int error; 186 187 __asm __volatile("invvpid %[desc], %[type];" 188 VMX_SET_ERROR_CODE 189 : [error] "=r" (error) 190 : [desc] "m" (desc), [type] "r" (type) 191 : "memory"); 192 193 if (error) 194 panic("invvpid error %d", error); 195 } 196 197 #define INVEPT_TYPE_SINGLE_CONTEXT 1UL 198 #define INVEPT_TYPE_ALL_CONTEXTS 2UL 199 struct invept_desc { 200 uint64_t eptp; 201 uint64_t _res; 202 }; 203 CTASSERT(sizeof(struct invept_desc) == 16); 204 205 static void __inline 206 invept(uint64_t type, struct invept_desc desc) 207 { 208 int error; 209 210 __asm __volatile("invept %[desc], %[type];" 211 VMX_SET_ERROR_CODE 212 : [error] "=r" (error) 213 : [desc] "m" (desc), [type] "r" (type) 214 : "memory"); 215 216 if (error) 217 panic("invept error %d", error); 218 } 219 #endif 220