1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #ifndef _VMCB_H_ 32 #define _VMCB_H_ 33 34 #define BIT(n) (1ULL << n) 35 36 /* 37 * Secure Virtual Machine: AMD64 Programmer's Manual Vol2, Chapter 15 38 * Layout of VMCB: AMD64 Programmer's Manual Vol2, Appendix B 39 */ 40 41 /* vmcb_ctrl->intercept[] array indices */ 42 #define VMCB_CR_INTCPT 0 43 #define VMCB_DR_INTCPT 1 44 #define VMCB_EXC_INTCPT 2 45 #define VMCB_CTRL1_INTCPT 3 46 #define VMCB_CTRL2_INTCPT 4 47 48 /* intercept[VMCB_CTRL1_INTCPT] fields */ 49 #define VMCB_INTCPT_INTR BIT(0) 50 #define VMCB_INTCPT_NMI BIT(1) 51 #define VMCB_INTCPT_SMI BIT(2) 52 #define VMCB_INTCPT_INIT BIT(3) 53 #define VMCB_INTCPT_VINTR BIT(4) 54 #define VMCB_INTCPT_CR0_WRITE BIT(5) 55 #define VMCB_INTCPT_IDTR_READ BIT(6) 56 #define VMCB_INTCPT_GDTR_READ BIT(7) 57 #define VMCB_INTCPT_LDTR_READ BIT(8) 58 #define VMCB_INTCPT_TR_READ BIT(9) 59 #define VMCB_INTCPT_IDTR_WRITE BIT(10) 60 #define VMCB_INTCPT_GDTR_WRITE BIT(11) 61 #define VMCB_INTCPT_LDTR_WRITE BIT(12) 62 #define VMCB_INTCPT_TR_WRITE BIT(13) 63 #define VMCB_INTCPT_RDTSC BIT(14) 64 #define VMCB_INTCPT_RDPMC BIT(15) 65 #define VMCB_INTCPT_PUSHF BIT(16) 66 #define VMCB_INTCPT_POPF BIT(17) 67 #define VMCB_INTCPT_CPUID BIT(18) 68 #define VMCB_INTCPT_RSM BIT(19) 69 #define VMCB_INTCPT_IRET BIT(20) 70 #define VMCB_INTCPT_INTn BIT(21) 71 #define VMCB_INTCPT_INVD BIT(22) 72 #define VMCB_INTCPT_PAUSE BIT(23) 73 #define VMCB_INTCPT_HLT BIT(24) 74 #define VMCB_INTCPT_INVLPG BIT(25) 75 #define VMCB_INTCPT_INVLPGA BIT(26) 76 #define VMCB_INTCPT_IO BIT(27) 77 #define VMCB_INTCPT_MSR BIT(28) 78 #define VMCB_INTCPT_TASK_SWITCH BIT(29) 79 #define VMCB_INTCPT_FERR_FREEZE BIT(30) 80 #define VMCB_INTCPT_SHUTDOWN BIT(31) 81 82 /* intercept[VMCB_CTRL2_INTCPT] fields */ 83 #define VMCB_INTCPT_VMRUN BIT(0) 84 #define VMCB_INTCPT_VMMCALL BIT(1) 85 #define VMCB_INTCPT_VMLOAD BIT(2) 86 #define VMCB_INTCPT_VMSAVE BIT(3) 87 #define VMCB_INTCPT_STGI BIT(4) 88 #define VMCB_INTCPT_CLGI BIT(5) 89 #define VMCB_INTCPT_SKINIT BIT(6) 90 #define VMCB_INTCPT_RDTSCP BIT(7) 91 #define VMCB_INTCPT_ICEBP BIT(8) 92 #define VMCB_INTCPT_WBINVD BIT(9) 93 #define VMCB_INTCPT_MONITOR BIT(10) 94 #define VMCB_INTCPT_MWAIT BIT(11) 95 #define VMCB_INTCPT_MWAIT_ARMED BIT(12) 96 #define VMCB_INTCPT_XSETBV BIT(13) 97 98 /* VMCB TLB control */ 99 #define VMCB_TLB_FLUSH_NOTHING 0 /* Flush nothing */ 100 #define VMCB_TLB_FLUSH_ALL 1 /* Flush entire TLB */ 101 #define VMCB_TLB_FLUSH_GUEST 3 /* Flush all guest entries */ 102 #define VMCB_TLB_FLUSH_GUEST_NONGLOBAL 7 /* Flush guest non-PG entries */ 103 104 /* VMCB state caching */ 105 #define VMCB_CACHE_NONE 0 /* No caching */ 106 #define VMCB_CACHE_I BIT(0) /* Intercept, TSC off, Pause filter */ 107 #define VMCB_CACHE_IOPM BIT(1) /* I/O and MSR permission */ 108 #define VMCB_CACHE_ASID BIT(2) /* ASID */ 109 #define VMCB_CACHE_TPR BIT(3) /* V_TPR to V_INTR_VECTOR */ 110 #define VMCB_CACHE_NP BIT(4) /* Nested Paging */ 111 #define VMCB_CACHE_CR BIT(5) /* CR0, CR3, CR4 & EFER */ 112 #define VMCB_CACHE_DR BIT(6) /* Debug registers */ 113 #define VMCB_CACHE_DT BIT(7) /* GDT/IDT */ 114 #define VMCB_CACHE_SEG BIT(8) /* User segments, CPL */ 115 #define VMCB_CACHE_CR2 BIT(9) /* page fault address */ 116 #define VMCB_CACHE_LBR BIT(10) /* Last branch */ 117 118 /* VMCB control event injection */ 119 #define VMCB_EVENTINJ_EC_VALID BIT(11) /* Error Code valid */ 120 #define VMCB_EVENTINJ_VALID BIT(31) /* Event valid */ 121 122 /* Event types that can be injected */ 123 #define VMCB_EVENTINJ_TYPE_INTR 0 124 #define VMCB_EVENTINJ_TYPE_NMI 2 125 #define VMCB_EVENTINJ_TYPE_EXCEPTION 3 126 #define VMCB_EVENTINJ_TYPE_INTn 4 127 128 /* VMCB exit code, APM vol2 Appendix C */ 129 #define VMCB_EXIT_MC 0x52 130 #define VMCB_EXIT_INTR 0x60 131 #define VMCB_EXIT_NMI 0x61 132 #define VMCB_EXIT_VINTR 0x64 133 #define VMCB_EXIT_PUSHF 0x70 134 #define VMCB_EXIT_POPF 0x71 135 #define VMCB_EXIT_CPUID 0x72 136 #define VMCB_EXIT_IRET 0x74 137 #define VMCB_EXIT_INVD 0x76 138 #define VMCB_EXIT_PAUSE 0x77 139 #define VMCB_EXIT_HLT 0x78 140 #define VMCB_EXIT_INVLPGA 0x7A 141 #define VMCB_EXIT_IO 0x7B 142 #define VMCB_EXIT_MSR 0x7C 143 #define VMCB_EXIT_SHUTDOWN 0x7F 144 #define VMCB_EXIT_VMRUN 0x80 145 #define VMCB_EXIT_VMMCALL 0x81 146 #define VMCB_EXIT_VMLOAD 0x82 147 #define VMCB_EXIT_VMSAVE 0x83 148 #define VMCB_EXIT_STGI 0x84 149 #define VMCB_EXIT_CLGI 0x85 150 #define VMCB_EXIT_SKINIT 0x86 151 #define VMCB_EXIT_ICEBP 0x88 152 #define VMCB_EXIT_WBINVD 0x89 153 #define VMCB_EXIT_MONITOR 0x8A 154 #define VMCB_EXIT_MWAIT 0x8B 155 #define VMCB_EXIT_NPF 0x400 156 #define VMCB_EXIT_INVALID -1 157 158 /* 159 * Nested page fault. 160 * Bit definitions to decode EXITINFO1. 161 */ 162 #define VMCB_NPF_INFO1_P BIT(0) /* Nested page present. */ 163 #define VMCB_NPF_INFO1_W BIT(1) /* Access was write. */ 164 #define VMCB_NPF_INFO1_U BIT(2) /* Access was user access. */ 165 #define VMCB_NPF_INFO1_RSV BIT(3) /* Reserved bits present. */ 166 #define VMCB_NPF_INFO1_ID BIT(4) /* Code read. */ 167 168 #define VMCB_NPF_INFO1_GPA BIT(32) /* Guest physical address. */ 169 #define VMCB_NPF_INFO1_GPT BIT(33) /* Guest page table. */ 170 171 /* 172 * EXITINTINFO, Interrupt exit info for all intrecepts. 173 * Section 15.7.2, Intercepts during IDT Interrupt Delivery. 174 */ 175 #define VMCB_EXITINTINFO_VECTOR(x) ((x) & 0xFF) 176 #define VMCB_EXITINTINFO_TYPE(x) (((x) >> 8) & 0x7) 177 #define VMCB_EXITINTINFO_EC_VALID(x) (((x) & BIT(11)) ? 1 : 0) 178 #define VMCB_EXITINTINFO_VALID(x) (((x) & BIT(31)) ? 1 : 0) 179 #define VMCB_EXITINTINFO_EC(x) (((x) >> 32) & 0xFFFFFFFF) 180 181 /* Offset of various VMCB fields. */ 182 #define VMCB_OFF_CTRL(x) (x) 183 #define VMCB_OFF_STATE(x) ((x) + 0x400) 184 185 #define VMCB_OFF_CR_INTERCEPT VMCB_OFF_CTRL(0x0) 186 #define VMCB_OFF_DR_INTERCEPT VMCB_OFF_CTRL(0x4) 187 #define VMCB_OFF_EXC_INTERCEPT VMCB_OFF_CTRL(0x8) 188 #define VMCB_OFF_INST1_INTERCEPT VMCB_OFF_CTRL(0xC) 189 #define VMCB_OFF_INST2_INTERCEPT VMCB_OFF_CTRL(0x10) 190 #define VMCB_OFF_PAUSE_FILTHRESH VMCB_OFF_CTRL(0x3C) 191 #define VMCB_OFF_PAUSE_FILCNT VMCB_OFF_CTRL(0x3E) 192 #define VMCB_OFF_IO_PERM VMCB_OFF_CTRL(0x40) 193 #define VMCB_OFF_MSR_PERM VMCB_OFF_CTRL(0x48) 194 #define VMCB_OFF_TSC_OFFSET VMCB_OFF_CTRL(0x50) 195 #define VMCB_OFF_ASID VMCB_OFF_CTRL(0x58) 196 #define VMCB_OFF_TLB_CTRL VMCB_OFF_CTRL(0x5C) 197 #define VMCB_OFF_VIRQ VMCB_OFF_CTRL(0x60) 198 #define VMCB_OFF_EXIT_REASON VMCB_OFF_CTRL(0x70) 199 #define VMCB_OFF_EXITINFO1 VMCB_OFF_CTRL(0x78) 200 #define VMCB_OFF_EXITINFO2 VMCB_OFF_CTRL(0x80) 201 #define VMCB_OFF_EXITINTINFO VMCB_OFF_CTRL(0x88) 202 #define VMCB_OFF_NP_ENABLE VMCB_OFF_CTRL(0x90) 203 #define VMCB_OFF_AVIC_BAR VMCB_OFF_CTRL(0x98) 204 #define VMCB_OFF_NPT_BASE VMCB_OFF_CTRL(0xB0) 205 #define VMCB_OFF_AVIC_PAGE VMCB_OFF_CTRL(0xE0) 206 #define VMCB_OFF_AVIC_LT VMCB_OFF_CTRL(0xF0) 207 #define VMCB_OFF_AVIC_PT VMCB_OFF_CTRL(0xF8) 208 209 #define VMCB_OFF_CPL VMCB_OFF_STATE(0xCB) 210 #define VMCB_OFF_STAR VMCB_OFF_STATE(0x200) 211 #define VMCB_OFF_LSTAR VMCB_OFF_STATE(0x208) 212 #define VMCB_OFF_CSTAR VMCB_OFF_STATE(0x210) 213 #define VMCB_OFF_SFMASK VMCB_OFF_STATE(0x218) 214 #define VMCB_OFF_KERNELGBASE VMCB_OFF_STATE(0x220) 215 #define VMCB_OFF_SYSENTER_CS VMCB_OFF_STATE(0x228) 216 #define VMCB_OFF_SYSENTER_ESP VMCB_OFF_STATE(0x230) 217 #define VMCB_OFF_SYSENTER_EIP VMCB_OFF_STATE(0x238) 218 #define VMCB_OFF_GUEST_PAT VMCB_OFF_STATE(0x268) 219 #define VMCB_OFF_DBGCTL VMCB_OFF_STATE(0x270) 220 #define VMCB_OFF_BR_FROM VMCB_OFF_STATE(0x278) 221 #define VMCB_OFF_BR_TO VMCB_OFF_STATE(0x280) 222 #define VMCB_OFF_INT_FROM VMCB_OFF_STATE(0x288) 223 #define VMCB_OFF_INT_TO VMCB_OFF_STATE(0x290) 224 225 /* 226 * Encode the VMCB offset and bytes that we want to read from VMCB. 227 */ 228 #define VMCB_ACCESS(o, w) (0x80000000 | (((w) & 0xF) << 16) | \ 229 ((o) & 0xFFF)) 230 #define VMCB_ACCESS_OK(v) ((v) & 0x80000000 ) 231 #define VMCB_ACCESS_BYTES(v) (((v) >> 16) & 0xF) 232 #define VMCB_ACCESS_OFFSET(v) ((v) & 0xFFF) 233 234 #ifdef _KERNEL 235 236 struct svm_softc; 237 struct svm_vcpu; 238 struct vm_snapshot_meta; 239 240 /* VMCB save state area segment format */ 241 struct vmcb_segment { 242 uint16_t selector; 243 uint16_t attrib; 244 uint32_t limit; 245 uint64_t base; 246 } __attribute__ ((__packed__)); 247 CTASSERT(sizeof(struct vmcb_segment) == 16); 248 249 /* Code segment descriptor attribute in 12 bit format as saved by VMCB. */ 250 #define VMCB_CS_ATTRIB_L BIT(9) /* Long mode. */ 251 #define VMCB_CS_ATTRIB_D BIT(10) /* OPerand size bit. */ 252 253 /* 254 * The VMCB is divided into two areas - the first one contains various 255 * control bits including the intercept vector and the second one contains 256 * the guest state. 257 */ 258 259 /* VMCB control area - padded up to 1024 bytes */ 260 struct vmcb_ctrl { 261 uint32_t intercept[5]; /* all intercepts */ 262 uint8_t pad1[0x28]; /* Offsets 0x14-0x3B are reserved. */ 263 uint16_t pause_filthresh; /* Offset 0x3C, PAUSE filter threshold */ 264 uint16_t pause_filcnt; /* Offset 0x3E, PAUSE filter count */ 265 uint64_t iopm_base_pa; /* 0x40: IOPM_BASE_PA */ 266 uint64_t msrpm_base_pa; /* 0x48: MSRPM_BASE_PA */ 267 uint64_t tsc_offset; /* 0x50: TSC_OFFSET */ 268 uint32_t asid; /* 0x58: Guest ASID */ 269 uint8_t tlb_ctrl; /* 0x5C: TLB_CONTROL */ 270 uint8_t pad2[3]; /* 0x5D-0x5F: Reserved. */ 271 uint8_t v_tpr; /* 0x60: V_TPR, guest CR8 */ 272 uint8_t v_irq:1; /* Is virtual interrupt pending? */ 273 uint8_t :7; /* Padding */ 274 uint8_t v_intr_prio:4; /* 0x62: Priority for virtual interrupt. */ 275 uint8_t v_ign_tpr:1; 276 uint8_t :3; 277 uint8_t v_intr_masking:1; /* Guest and host sharing of RFLAGS. */ 278 uint8_t :7; 279 uint8_t v_intr_vector; /* 0x64: Vector for virtual interrupt. */ 280 uint8_t pad3[3]; /* 0x65-0x67 Reserved. */ 281 uint64_t intr_shadow:1; /* 0x68: Interrupt shadow, section15.2.1 APM2 */ 282 uint64_t :63; 283 uint64_t exitcode; /* 0x70, Exitcode */ 284 uint64_t exitinfo1; /* 0x78, EXITINFO1 */ 285 uint64_t exitinfo2; /* 0x80, EXITINFO2 */ 286 uint64_t exitintinfo; /* 0x88, Interrupt exit value. */ 287 uint64_t np_enable:1; /* 0x90, Nested paging enable. */ 288 uint64_t :63; 289 uint8_t pad4[0x10]; /* 0x98-0xA7 reserved. */ 290 uint64_t eventinj; /* 0xA8, Event injection. */ 291 uint64_t n_cr3; /* B0, Nested page table. */ 292 uint64_t lbr_virt_en:1; /* Enable LBR virtualization. */ 293 uint64_t :63; 294 uint32_t vmcb_clean; /* 0xC0: VMCB clean bits for caching */ 295 uint32_t :32; /* 0xC4: Reserved */ 296 uint64_t nrip; /* 0xC8: Guest next nRIP. */ 297 uint8_t inst_len; /* 0xD0: #NPF decode assist */ 298 uint8_t inst_bytes[15]; 299 uint8_t padd6[0x320]; 300 } __attribute__ ((__packed__)); 301 CTASSERT(sizeof(struct vmcb_ctrl) == 1024); 302 303 struct vmcb_state { 304 struct vmcb_segment es; 305 struct vmcb_segment cs; 306 struct vmcb_segment ss; 307 struct vmcb_segment ds; 308 struct vmcb_segment fs; 309 struct vmcb_segment gs; 310 struct vmcb_segment gdt; 311 struct vmcb_segment ldt; 312 struct vmcb_segment idt; 313 struct vmcb_segment tr; 314 uint8_t pad1[0x2b]; /* Reserved: 0xA0-0xCA */ 315 uint8_t cpl; 316 uint8_t pad2[4]; 317 uint64_t efer; 318 uint8_t pad3[0x70]; /* Reserved: 0xd8-0x147 */ 319 uint64_t cr4; 320 uint64_t cr3; /* Guest CR3 */ 321 uint64_t cr0; 322 uint64_t dr7; 323 uint64_t dr6; 324 uint64_t rflags; 325 uint64_t rip; 326 uint8_t pad4[0x58]; /* Reserved: 0x180-0x1D7 */ 327 uint64_t rsp; 328 uint8_t pad5[0x18]; /* Reserved 0x1E0-0x1F7 */ 329 uint64_t rax; 330 uint64_t star; 331 uint64_t lstar; 332 uint64_t cstar; 333 uint64_t sfmask; 334 uint64_t kernelgsbase; 335 uint64_t sysenter_cs; 336 uint64_t sysenter_esp; 337 uint64_t sysenter_eip; 338 uint64_t cr2; 339 uint8_t pad6[0x20]; 340 uint64_t g_pat; 341 uint64_t dbgctl; 342 uint64_t br_from; 343 uint64_t br_to; 344 uint64_t int_from; 345 uint64_t int_to; 346 uint8_t pad7[0x968]; /* Reserved up to end of VMCB */ 347 } __attribute__ ((__packed__)); 348 CTASSERT(sizeof(struct vmcb_state) == 0xC00); 349 350 struct vmcb { 351 struct vmcb_ctrl ctrl; 352 struct vmcb_state state; 353 } __attribute__ ((__packed__)); 354 CTASSERT(sizeof(struct vmcb) == PAGE_SIZE); 355 CTASSERT(offsetof(struct vmcb, state) == 0x400); 356 357 int vmcb_read(struct svm_vcpu *vcpu, int ident, uint64_t *retval); 358 int vmcb_write(struct svm_vcpu *vcpu, int ident, uint64_t val); 359 int vmcb_setdesc(struct svm_vcpu *vcpu, int ident, struct seg_desc *desc); 360 int vmcb_getdesc(struct svm_vcpu *vcpu, int ident, struct seg_desc *desc); 361 int vmcb_seg(struct vmcb *vmcb, int ident, struct vmcb_segment *seg); 362 #ifdef BHYVE_SNAPSHOT 363 int vmcb_getany(struct svm_vcpu *vcpu, int ident, uint64_t *val); 364 int vmcb_setany(struct svm_vcpu *vcpu, int ident, uint64_t val); 365 int vmcb_snapshot_desc(struct svm_vcpu *vcpu, int reg, 366 struct vm_snapshot_meta *meta); 367 int vmcb_snapshot_any(struct svm_vcpu*vcpu, int ident, 368 struct vm_snapshot_meta *meta); 369 #endif 370 371 #endif /* _KERNEL */ 372 #endif /* _VMCB_H_ */ 373