1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #ifndef _VMCB_H_ 30 #define _VMCB_H_ 31 32 #define BIT(n) (1ULL << n) 33 34 /* 35 * Secure Virtual Machine: AMD64 Programmer's Manual Vol2, Chapter 15 36 * Layout of VMCB: AMD64 Programmer's Manual Vol2, Appendix B 37 */ 38 39 /* vmcb_ctrl->intercept[] array indices */ 40 #define VMCB_CR_INTCPT 0 41 #define VMCB_DR_INTCPT 1 42 #define VMCB_EXC_INTCPT 2 43 #define VMCB_CTRL1_INTCPT 3 44 #define VMCB_CTRL2_INTCPT 4 45 46 /* intercept[VMCB_CTRL1_INTCPT] fields */ 47 #define VMCB_INTCPT_INTR BIT(0) 48 #define VMCB_INTCPT_NMI BIT(1) 49 #define VMCB_INTCPT_SMI BIT(2) 50 #define VMCB_INTCPT_INIT BIT(3) 51 #define VMCB_INTCPT_VINTR BIT(4) 52 #define VMCB_INTCPT_CR0_WRITE BIT(5) 53 #define VMCB_INTCPT_IDTR_READ BIT(6) 54 #define VMCB_INTCPT_GDTR_READ BIT(7) 55 #define VMCB_INTCPT_LDTR_READ BIT(8) 56 #define VMCB_INTCPT_TR_READ BIT(9) 57 #define VMCB_INTCPT_IDTR_WRITE BIT(10) 58 #define VMCB_INTCPT_GDTR_WRITE BIT(11) 59 #define VMCB_INTCPT_LDTR_WRITE BIT(12) 60 #define VMCB_INTCPT_TR_WRITE BIT(13) 61 #define VMCB_INTCPT_RDTSC BIT(14) 62 #define VMCB_INTCPT_RDPMC BIT(15) 63 #define VMCB_INTCPT_PUSHF BIT(16) 64 #define VMCB_INTCPT_POPF BIT(17) 65 #define VMCB_INTCPT_CPUID BIT(18) 66 #define VMCB_INTCPT_RSM BIT(19) 67 #define VMCB_INTCPT_IRET BIT(20) 68 #define VMCB_INTCPT_INTn BIT(21) 69 #define VMCB_INTCPT_INVD BIT(22) 70 #define VMCB_INTCPT_PAUSE BIT(23) 71 #define VMCB_INTCPT_HLT BIT(24) 72 #define VMCB_INTCPT_INVLPG BIT(25) 73 #define VMCB_INTCPT_INVLPGA BIT(26) 74 #define VMCB_INTCPT_IO BIT(27) 75 #define VMCB_INTCPT_MSR BIT(28) 76 #define VMCB_INTCPT_TASK_SWITCH BIT(29) 77 #define VMCB_INTCPT_FERR_FREEZE BIT(30) 78 #define VMCB_INTCPT_SHUTDOWN BIT(31) 79 80 /* intercept[VMCB_CTRL2_INTCPT] fields */ 81 #define VMCB_INTCPT_VMRUN BIT(0) 82 #define VMCB_INTCPT_VMMCALL BIT(1) 83 #define VMCB_INTCPT_VMLOAD BIT(2) 84 #define VMCB_INTCPT_VMSAVE BIT(3) 85 #define VMCB_INTCPT_STGI BIT(4) 86 #define VMCB_INTCPT_CLGI BIT(5) 87 #define VMCB_INTCPT_SKINIT BIT(6) 88 #define VMCB_INTCPT_RDTSCP BIT(7) 89 #define VMCB_INTCPT_ICEBP BIT(8) 90 #define VMCB_INTCPT_WBINVD BIT(9) 91 #define VMCB_INTCPT_MONITOR BIT(10) 92 #define VMCB_INTCPT_MWAIT BIT(11) 93 #define VMCB_INTCPT_MWAIT_ARMED BIT(12) 94 #define VMCB_INTCPT_XSETBV BIT(13) 95 96 /* VMCB TLB control */ 97 #define VMCB_TLB_FLUSH_NOTHING 0 /* Flush nothing */ 98 #define VMCB_TLB_FLUSH_ALL 1 /* Flush entire TLB */ 99 #define VMCB_TLB_FLUSH_GUEST 3 /* Flush all guest entries */ 100 #define VMCB_TLB_FLUSH_GUEST_NONGLOBAL 7 /* Flush guest non-PG entries */ 101 102 /* VMCB state caching */ 103 #define VMCB_CACHE_NONE 0 /* No caching */ 104 #define VMCB_CACHE_I BIT(0) /* Intercept, TSC off, Pause filter */ 105 #define VMCB_CACHE_IOPM BIT(1) /* I/O and MSR permission */ 106 #define VMCB_CACHE_ASID BIT(2) /* ASID */ 107 #define VMCB_CACHE_TPR BIT(3) /* V_TPR to V_INTR_VECTOR */ 108 #define VMCB_CACHE_NP BIT(4) /* Nested Paging */ 109 #define VMCB_CACHE_CR BIT(5) /* CR0, CR3, CR4 & EFER */ 110 #define VMCB_CACHE_DR BIT(6) /* Debug registers */ 111 #define VMCB_CACHE_DT BIT(7) /* GDT/IDT */ 112 #define VMCB_CACHE_SEG BIT(8) /* User segments, CPL */ 113 #define VMCB_CACHE_CR2 BIT(9) /* page fault address */ 114 #define VMCB_CACHE_LBR BIT(10) /* Last branch */ 115 116 /* VMCB control event injection */ 117 #define VMCB_EVENTINJ_EC_VALID BIT(11) /* Error Code valid */ 118 #define VMCB_EVENTINJ_VALID BIT(31) /* Event valid */ 119 120 /* Event types that can be injected */ 121 #define VMCB_EVENTINJ_TYPE_INTR 0 122 #define VMCB_EVENTINJ_TYPE_NMI 2 123 #define VMCB_EVENTINJ_TYPE_EXCEPTION 3 124 #define VMCB_EVENTINJ_TYPE_INTn 4 125 126 /* VMCB exit code, APM vol2 Appendix C */ 127 #define VMCB_EXIT_MC 0x52 128 #define VMCB_EXIT_INTR 0x60 129 #define VMCB_EXIT_NMI 0x61 130 #define VMCB_EXIT_VINTR 0x64 131 #define VMCB_EXIT_PUSHF 0x70 132 #define VMCB_EXIT_POPF 0x71 133 #define VMCB_EXIT_CPUID 0x72 134 #define VMCB_EXIT_IRET 0x74 135 #define VMCB_EXIT_INVD 0x76 136 #define VMCB_EXIT_PAUSE 0x77 137 #define VMCB_EXIT_HLT 0x78 138 #define VMCB_EXIT_INVLPGA 0x7A 139 #define VMCB_EXIT_IO 0x7B 140 #define VMCB_EXIT_MSR 0x7C 141 #define VMCB_EXIT_SHUTDOWN 0x7F 142 #define VMCB_EXIT_VMRUN 0x80 143 #define VMCB_EXIT_VMMCALL 0x81 144 #define VMCB_EXIT_VMLOAD 0x82 145 #define VMCB_EXIT_VMSAVE 0x83 146 #define VMCB_EXIT_STGI 0x84 147 #define VMCB_EXIT_CLGI 0x85 148 #define VMCB_EXIT_SKINIT 0x86 149 #define VMCB_EXIT_ICEBP 0x88 150 #define VMCB_EXIT_WBINVD 0x89 151 #define VMCB_EXIT_MONITOR 0x8A 152 #define VMCB_EXIT_MWAIT 0x8B 153 #define VMCB_EXIT_NPF 0x400 154 #define VMCB_EXIT_INVALID -1 155 156 /* 157 * Nested page fault. 158 * Bit definitions to decode EXITINFO1. 159 */ 160 #define VMCB_NPF_INFO1_P BIT(0) /* Nested page present. */ 161 #define VMCB_NPF_INFO1_W BIT(1) /* Access was write. */ 162 #define VMCB_NPF_INFO1_U BIT(2) /* Access was user access. */ 163 #define VMCB_NPF_INFO1_RSV BIT(3) /* Reserved bits present. */ 164 #define VMCB_NPF_INFO1_ID BIT(4) /* Code read. */ 165 166 #define VMCB_NPF_INFO1_GPA BIT(32) /* Guest physical address. */ 167 #define VMCB_NPF_INFO1_GPT BIT(33) /* Guest page table. */ 168 169 /* 170 * EXITINTINFO, Interrupt exit info for all intercepts. 171 * Section 15.7.2, Intercepts during IDT Interrupt Delivery. 172 */ 173 #define VMCB_EXITINTINFO_VECTOR(x) ((x) & 0xFF) 174 #define VMCB_EXITINTINFO_TYPE(x) (((x) >> 8) & 0x7) 175 #define VMCB_EXITINTINFO_EC_VALID(x) (((x) & BIT(11)) ? 1 : 0) 176 #define VMCB_EXITINTINFO_VALID(x) (((x) & BIT(31)) ? 1 : 0) 177 #define VMCB_EXITINTINFO_EC(x) (((x) >> 32) & 0xFFFFFFFF) 178 179 /* Offset of various VMCB fields. */ 180 #define VMCB_OFF_CTRL(x) (x) 181 #define VMCB_OFF_STATE(x) ((x) + 0x400) 182 183 #define VMCB_OFF_CR_INTERCEPT VMCB_OFF_CTRL(0x0) 184 #define VMCB_OFF_DR_INTERCEPT VMCB_OFF_CTRL(0x4) 185 #define VMCB_OFF_EXC_INTERCEPT VMCB_OFF_CTRL(0x8) 186 #define VMCB_OFF_INST1_INTERCEPT VMCB_OFF_CTRL(0xC) 187 #define VMCB_OFF_INST2_INTERCEPT VMCB_OFF_CTRL(0x10) 188 #define VMCB_OFF_PAUSE_FILTHRESH VMCB_OFF_CTRL(0x3C) 189 #define VMCB_OFF_PAUSE_FILCNT VMCB_OFF_CTRL(0x3E) 190 #define VMCB_OFF_IO_PERM VMCB_OFF_CTRL(0x40) 191 #define VMCB_OFF_MSR_PERM VMCB_OFF_CTRL(0x48) 192 #define VMCB_OFF_TSC_OFFSET VMCB_OFF_CTRL(0x50) 193 #define VMCB_OFF_ASID VMCB_OFF_CTRL(0x58) 194 #define VMCB_OFF_TLB_CTRL VMCB_OFF_CTRL(0x5C) 195 #define VMCB_OFF_VIRQ VMCB_OFF_CTRL(0x60) 196 #define VMCB_OFF_EXIT_REASON VMCB_OFF_CTRL(0x70) 197 #define VMCB_OFF_EXITINFO1 VMCB_OFF_CTRL(0x78) 198 #define VMCB_OFF_EXITINFO2 VMCB_OFF_CTRL(0x80) 199 #define VMCB_OFF_EXITINTINFO VMCB_OFF_CTRL(0x88) 200 #define VMCB_OFF_NP_ENABLE VMCB_OFF_CTRL(0x90) 201 #define VMCB_OFF_AVIC_BAR VMCB_OFF_CTRL(0x98) 202 #define VMCB_OFF_NPT_BASE VMCB_OFF_CTRL(0xB0) 203 #define VMCB_OFF_AVIC_PAGE VMCB_OFF_CTRL(0xE0) 204 #define VMCB_OFF_AVIC_LT VMCB_OFF_CTRL(0xF0) 205 #define VMCB_OFF_AVIC_PT VMCB_OFF_CTRL(0xF8) 206 207 #define VMCB_OFF_CPL VMCB_OFF_STATE(0xCB) 208 #define VMCB_OFF_STAR VMCB_OFF_STATE(0x200) 209 #define VMCB_OFF_LSTAR VMCB_OFF_STATE(0x208) 210 #define VMCB_OFF_CSTAR VMCB_OFF_STATE(0x210) 211 #define VMCB_OFF_SFMASK VMCB_OFF_STATE(0x218) 212 #define VMCB_OFF_KERNELGBASE VMCB_OFF_STATE(0x220) 213 #define VMCB_OFF_SYSENTER_CS VMCB_OFF_STATE(0x228) 214 #define VMCB_OFF_SYSENTER_ESP VMCB_OFF_STATE(0x230) 215 #define VMCB_OFF_SYSENTER_EIP VMCB_OFF_STATE(0x238) 216 #define VMCB_OFF_GUEST_PAT VMCB_OFF_STATE(0x268) 217 #define VMCB_OFF_DBGCTL VMCB_OFF_STATE(0x270) 218 #define VMCB_OFF_BR_FROM VMCB_OFF_STATE(0x278) 219 #define VMCB_OFF_BR_TO VMCB_OFF_STATE(0x280) 220 #define VMCB_OFF_INT_FROM VMCB_OFF_STATE(0x288) 221 #define VMCB_OFF_INT_TO VMCB_OFF_STATE(0x290) 222 223 /* 224 * Encode the VMCB offset and bytes that we want to read from VMCB. 225 */ 226 #define VMCB_ACCESS(o, w) (0x80000000 | (((w) & 0xF) << 16) | \ 227 ((o) & 0xFFF)) 228 #define VMCB_ACCESS_OK(v) ((v) & 0x80000000 ) 229 #define VMCB_ACCESS_BYTES(v) (((v) >> 16) & 0xF) 230 #define VMCB_ACCESS_OFFSET(v) ((v) & 0xFFF) 231 232 #ifdef _KERNEL 233 234 struct svm_softc; 235 struct svm_vcpu; 236 struct vm_snapshot_meta; 237 238 /* VMCB save state area segment format */ 239 struct vmcb_segment { 240 uint16_t selector; 241 uint16_t attrib; 242 uint32_t limit; 243 uint64_t base; 244 } __attribute__ ((__packed__)); 245 CTASSERT(sizeof(struct vmcb_segment) == 16); 246 247 /* Code segment descriptor attribute in 12 bit format as saved by VMCB. */ 248 #define VMCB_CS_ATTRIB_L BIT(9) /* Long mode. */ 249 #define VMCB_CS_ATTRIB_D BIT(10) /* OPerand size bit. */ 250 251 /* 252 * The VMCB is divided into two areas - the first one contains various 253 * control bits including the intercept vector and the second one contains 254 * the guest state. 255 */ 256 257 /* VMCB control area - padded up to 1024 bytes */ 258 struct vmcb_ctrl { 259 uint32_t intercept[5]; /* all intercepts */ 260 uint8_t pad1[0x28]; /* Offsets 0x14-0x3B are reserved. */ 261 uint16_t pause_filthresh; /* Offset 0x3C, PAUSE filter threshold */ 262 uint16_t pause_filcnt; /* Offset 0x3E, PAUSE filter count */ 263 uint64_t iopm_base_pa; /* 0x40: IOPM_BASE_PA */ 264 uint64_t msrpm_base_pa; /* 0x48: MSRPM_BASE_PA */ 265 uint64_t tsc_offset; /* 0x50: TSC_OFFSET */ 266 uint32_t asid; /* 0x58: Guest ASID */ 267 uint8_t tlb_ctrl; /* 0x5C: TLB_CONTROL */ 268 uint8_t pad2[3]; /* 0x5D-0x5F: Reserved. */ 269 uint8_t v_tpr; /* 0x60: V_TPR, guest CR8 */ 270 uint8_t v_irq:1; /* Is virtual interrupt pending? */ 271 uint8_t :7; /* Padding */ 272 uint8_t v_intr_prio:4; /* 0x62: Priority for virtual interrupt. */ 273 uint8_t v_ign_tpr:1; 274 uint8_t :3; 275 uint8_t v_intr_masking:1; /* Guest and host sharing of RFLAGS. */ 276 uint8_t :7; 277 uint8_t v_intr_vector; /* 0x64: Vector for virtual interrupt. */ 278 uint8_t pad3[3]; /* 0x65-0x67 Reserved. */ 279 uint64_t intr_shadow:1; /* 0x68: Interrupt shadow, section15.2.1 APM2 */ 280 uint64_t :63; 281 uint64_t exitcode; /* 0x70, Exitcode */ 282 uint64_t exitinfo1; /* 0x78, EXITINFO1 */ 283 uint64_t exitinfo2; /* 0x80, EXITINFO2 */ 284 uint64_t exitintinfo; /* 0x88, Interrupt exit value. */ 285 uint64_t np_enable:1; /* 0x90, Nested paging enable. */ 286 uint64_t :63; 287 uint8_t pad4[0x10]; /* 0x98-0xA7 reserved. */ 288 uint64_t eventinj; /* 0xA8, Event injection. */ 289 uint64_t n_cr3; /* B0, Nested page table. */ 290 uint64_t lbr_virt_en:1; /* Enable LBR virtualization. */ 291 uint64_t :63; 292 uint32_t vmcb_clean; /* 0xC0: VMCB clean bits for caching */ 293 uint32_t :32; /* 0xC4: Reserved */ 294 uint64_t nrip; /* 0xC8: Guest next nRIP. */ 295 uint8_t inst_len; /* 0xD0: #NPF decode assist */ 296 uint8_t inst_bytes[15]; 297 uint8_t padd6[0x320]; 298 } __attribute__ ((__packed__)); 299 CTASSERT(sizeof(struct vmcb_ctrl) == 1024); 300 301 struct vmcb_state { 302 struct vmcb_segment es; 303 struct vmcb_segment cs; 304 struct vmcb_segment ss; 305 struct vmcb_segment ds; 306 struct vmcb_segment fs; 307 struct vmcb_segment gs; 308 struct vmcb_segment gdt; 309 struct vmcb_segment ldt; 310 struct vmcb_segment idt; 311 struct vmcb_segment tr; 312 uint8_t pad1[0x2b]; /* Reserved: 0xA0-0xCA */ 313 uint8_t cpl; 314 uint8_t pad2[4]; 315 uint64_t efer; 316 uint8_t pad3[0x70]; /* Reserved: 0xd8-0x147 */ 317 uint64_t cr4; 318 uint64_t cr3; /* Guest CR3 */ 319 uint64_t cr0; 320 uint64_t dr7; 321 uint64_t dr6; 322 uint64_t rflags; 323 uint64_t rip; 324 uint8_t pad4[0x58]; /* Reserved: 0x180-0x1D7 */ 325 uint64_t rsp; 326 uint8_t pad5[0x18]; /* Reserved 0x1E0-0x1F7 */ 327 uint64_t rax; 328 uint64_t star; 329 uint64_t lstar; 330 uint64_t cstar; 331 uint64_t sfmask; 332 uint64_t kernelgsbase; 333 uint64_t sysenter_cs; 334 uint64_t sysenter_esp; 335 uint64_t sysenter_eip; 336 uint64_t cr2; 337 uint8_t pad6[0x20]; 338 uint64_t g_pat; 339 uint64_t dbgctl; 340 uint64_t br_from; 341 uint64_t br_to; 342 uint64_t int_from; 343 uint64_t int_to; 344 uint8_t pad7[0x968]; /* Reserved up to end of VMCB */ 345 } __attribute__ ((__packed__)); 346 CTASSERT(sizeof(struct vmcb_state) == 0xC00); 347 348 struct vmcb { 349 struct vmcb_ctrl ctrl; 350 struct vmcb_state state; 351 } __attribute__ ((__packed__)); 352 CTASSERT(sizeof(struct vmcb) == PAGE_SIZE); 353 CTASSERT(offsetof(struct vmcb, state) == 0x400); 354 355 int vmcb_read(struct svm_vcpu *vcpu, int ident, uint64_t *retval); 356 int vmcb_write(struct svm_vcpu *vcpu, int ident, uint64_t val); 357 int vmcb_setdesc(struct svm_vcpu *vcpu, int ident, struct seg_desc *desc); 358 int vmcb_getdesc(struct svm_vcpu *vcpu, int ident, struct seg_desc *desc); 359 int vmcb_seg(struct vmcb *vmcb, int ident, struct vmcb_segment *seg); 360 #ifdef BHYVE_SNAPSHOT 361 int vmcb_getany(struct svm_vcpu *vcpu, int ident, uint64_t *val); 362 int vmcb_setany(struct svm_vcpu *vcpu, int ident, uint64_t val); 363 int vmcb_snapshot_desc(struct svm_vcpu *vcpu, int reg, 364 struct vm_snapshot_meta *meta); 365 int vmcb_snapshot_any(struct svm_vcpu*vcpu, int ident, 366 struct vm_snapshot_meta *meta); 367 #endif 368 369 #endif /* _KERNEL */ 370 #endif /* _VMCB_H_ */ 371