1366f6083SPeter Grehan/*- 2366f6083SPeter Grehan * Copyright (c) 2011 NetApp, Inc. 30492757cSNeel Natu * Copyright (c) 2013 Neel Natu <neel@freebsd.org> 4366f6083SPeter Grehan * All rights reserved. 5366f6083SPeter Grehan * 6366f6083SPeter Grehan * Redistribution and use in source and binary forms, with or without 7366f6083SPeter Grehan * modification, are permitted provided that the following conditions 8366f6083SPeter Grehan * are met: 9366f6083SPeter Grehan * 1. Redistributions of source code must retain the above copyright 10366f6083SPeter Grehan * notice, this list of conditions and the following disclaimer. 11366f6083SPeter Grehan * 2. Redistributions in binary form must reproduce the above copyright 12366f6083SPeter Grehan * notice, this list of conditions and the following disclaimer in the 13366f6083SPeter Grehan * documentation and/or other materials provided with the distribution. 14366f6083SPeter Grehan * 15366f6083SPeter Grehan * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 16366f6083SPeter Grehan * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17366f6083SPeter Grehan * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18366f6083SPeter Grehan * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 19366f6083SPeter Grehan * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20366f6083SPeter Grehan * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21366f6083SPeter Grehan * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22366f6083SPeter Grehan * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23366f6083SPeter Grehan * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24366f6083SPeter Grehan * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25366f6083SPeter Grehan * SUCH DAMAGE. 26366f6083SPeter Grehan * 27366f6083SPeter Grehan * $FreeBSD$ 28366f6083SPeter Grehan */ 29366f6083SPeter Grehan 30366f6083SPeter Grehan#include <machine/asmacros.h> 31366f6083SPeter Grehan 32*b82e2e94SWarner Losh#include "vmx_assym.h" 33366f6083SPeter Grehan 34318224bbSNeel Natu#ifdef SMP 35318224bbSNeel Natu#define LK lock ; 36318224bbSNeel Natu#else 37318224bbSNeel Natu#define LK 38318224bbSNeel Natu#endif 39318224bbSNeel Natu 40897bb47eSPeter Grehan/* Be friendly to DTrace FBT's prologue/epilogue pattern matching */ 41897bb47eSPeter Grehan#define VENTER push %rbp ; mov %rsp,%rbp 42897bb47eSPeter Grehan#define VLEAVE pop %rbp 43897bb47eSPeter Grehan 44366f6083SPeter Grehan/* 45ad54f374SNeel Natu * Assumes that %rdi holds a pointer to the 'vmxctx'. 46ad54f374SNeel Natu * 47ad54f374SNeel Natu * On "return" all registers are updated to reflect guest state. The two 48ad54f374SNeel Natu * exceptions are %rip and %rsp. These registers are atomically switched 49ad54f374SNeel Natu * by hardware from the guest area of the vmcs. 50ad54f374SNeel Natu * 51ad54f374SNeel Natu * We modify %rsp to point to the 'vmxctx' so we can use it to restore 52ad54f374SNeel Natu * host context in case of an error with 'vmlaunch' or 'vmresume'. 53366f6083SPeter Grehan */ 54366f6083SPeter Grehan#define VMX_GUEST_RESTORE \ 55ad54f374SNeel Natu movq %rdi,%rsp; \ 56366f6083SPeter Grehan movq VMXCTX_GUEST_CR2(%rdi),%rsi; \ 57366f6083SPeter Grehan movq %rsi,%cr2; \ 58366f6083SPeter Grehan movq VMXCTX_GUEST_RSI(%rdi),%rsi; \ 59366f6083SPeter Grehan movq VMXCTX_GUEST_RDX(%rdi),%rdx; \ 60366f6083SPeter Grehan movq VMXCTX_GUEST_RCX(%rdi),%rcx; \ 61366f6083SPeter Grehan movq VMXCTX_GUEST_R8(%rdi),%r8; \ 62366f6083SPeter Grehan movq VMXCTX_GUEST_R9(%rdi),%r9; \ 63366f6083SPeter Grehan movq VMXCTX_GUEST_RAX(%rdi),%rax; \ 64366f6083SPeter Grehan movq VMXCTX_GUEST_RBX(%rdi),%rbx; \ 65366f6083SPeter Grehan movq VMXCTX_GUEST_RBP(%rdi),%rbp; \ 66366f6083SPeter Grehan movq VMXCTX_GUEST_R10(%rdi),%r10; \ 67366f6083SPeter Grehan movq VMXCTX_GUEST_R11(%rdi),%r11; \ 68366f6083SPeter Grehan movq VMXCTX_GUEST_R12(%rdi),%r12; \ 69366f6083SPeter Grehan movq VMXCTX_GUEST_R13(%rdi),%r13; \ 70366f6083SPeter Grehan movq VMXCTX_GUEST_R14(%rdi),%r14; \ 71366f6083SPeter Grehan movq VMXCTX_GUEST_R15(%rdi),%r15; \ 72366f6083SPeter Grehan movq VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */ 73366f6083SPeter Grehan 74318224bbSNeel Natu/* 750492757cSNeel Natu * Save and restore the host context. 760492757cSNeel Natu * 770492757cSNeel Natu * Assumes that %rdi holds a pointer to the 'vmxctx'. 78318224bbSNeel Natu */ 7981d597b7SNeel Natu#define VMX_HOST_SAVE \ 800492757cSNeel Natu movq %r15, VMXCTX_HOST_R15(%rdi); \ 810492757cSNeel Natu movq %r14, VMXCTX_HOST_R14(%rdi); \ 820492757cSNeel Natu movq %r13, VMXCTX_HOST_R13(%rdi); \ 830492757cSNeel Natu movq %r12, VMXCTX_HOST_R12(%rdi); \ 840492757cSNeel Natu movq %rbp, VMXCTX_HOST_RBP(%rdi); \ 850492757cSNeel Natu movq %rsp, VMXCTX_HOST_RSP(%rdi); \ 860492757cSNeel Natu movq %rbx, VMXCTX_HOST_RBX(%rdi); \ 870492757cSNeel Natu 8881d597b7SNeel Natu#define VMX_HOST_RESTORE \ 890492757cSNeel Natu movq VMXCTX_HOST_R15(%rdi), %r15; \ 900492757cSNeel Natu movq VMXCTX_HOST_R14(%rdi), %r14; \ 910492757cSNeel Natu movq VMXCTX_HOST_R13(%rdi), %r13; \ 920492757cSNeel Natu movq VMXCTX_HOST_R12(%rdi), %r12; \ 930492757cSNeel Natu movq VMXCTX_HOST_RBP(%rdi), %rbp; \ 940492757cSNeel Natu movq VMXCTX_HOST_RSP(%rdi), %rsp; \ 950492757cSNeel Natu movq VMXCTX_HOST_RBX(%rdi), %rbx; \ 96318224bbSNeel Natu 97318224bbSNeel Natu/* 980492757cSNeel Natu * vmx_enter_guest(struct vmxctx *vmxctx, int launched) 990492757cSNeel Natu * %rdi: pointer to the 'vmxctx' 100953c2c47SNeel Natu * %rsi: pointer to the 'vmx' 101953c2c47SNeel Natu * %edx: launch state of the VMCS 1020492757cSNeel Natu * Interrupts must be disabled on entry. 103318224bbSNeel Natu */ 1040492757cSNeel NatuENTRY(vmx_enter_guest) 105897bb47eSPeter Grehan VENTER 1060492757cSNeel Natu /* 1070492757cSNeel Natu * Save host state before doing anything else. 1080492757cSNeel Natu */ 10981d597b7SNeel Natu VMX_HOST_SAVE 110318224bbSNeel Natu 1110492757cSNeel Natu /* 1120492757cSNeel Natu * Activate guest pmap on this cpu. 1130492757cSNeel Natu */ 1140492757cSNeel Natu movq VMXCTX_PMAP(%rdi), %r11 1150492757cSNeel Natu movl PCPU(CPUID), %eax 1160492757cSNeel Natu LK btsl %eax, PM_ACTIVE(%r11) 117318224bbSNeel Natu 118318224bbSNeel Natu /* 119953c2c47SNeel Natu * If 'vmx->eptgen[curcpu]' is not identical to 'pmap->pm_eptgen' 1200492757cSNeel Natu * then we must invalidate all mappings associated with this EPTP. 1210492757cSNeel Natu */ 1220492757cSNeel Natu movq PM_EPTGEN(%r11), %r10 123953c2c47SNeel Natu cmpq %r10, VMX_EPTGEN(%rsi, %rax, 8) 1240492757cSNeel Natu je guest_restore 1250492757cSNeel Natu 126953c2c47SNeel Natu /* Refresh 'vmx->eptgen[curcpu]' */ 127953c2c47SNeel Natu movq %r10, VMX_EPTGEN(%rsi, %rax, 8) 1280492757cSNeel Natu 1290492757cSNeel Natu /* Setup the invept descriptor on the host stack */ 1300492757cSNeel Natu mov %rsp, %r11 131953c2c47SNeel Natu movq VMX_EPTP(%rsi), %rax 1320492757cSNeel Natu movq %rax, -16(%r11) 1330492757cSNeel Natu movq $0x0, -8(%r11) 1340492757cSNeel Natu mov $0x1, %eax /* Single context invalidate */ 1350492757cSNeel Natu invept -16(%r11), %rax 1360492757cSNeel Natu jbe invept_error /* Check invept instruction error */ 1370492757cSNeel Natu 1380492757cSNeel Natuguest_restore: 139953c2c47SNeel Natu cmpl $0, %edx 1400492757cSNeel Natu je do_launch 1410492757cSNeel Natu 1420492757cSNeel Natu VMX_GUEST_RESTORE 1430492757cSNeel Natu vmresume 1440492757cSNeel Natu /* 1450492757cSNeel Natu * In the common case 'vmresume' returns back to the host through 1460492757cSNeel Natu * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'. 147318224bbSNeel Natu * 1480492757cSNeel Natu * If there is an error we return VMX_VMRESUME_ERROR to the caller. 149318224bbSNeel Natu */ 1500492757cSNeel Natu movq %rsp, %rdi /* point %rdi back to 'vmxctx' */ 1510492757cSNeel Natu movl $VMX_VMRESUME_ERROR, %eax 1520492757cSNeel Natu jmp decode_inst_error 153366f6083SPeter Grehan 1540492757cSNeel Natudo_launch: 1550492757cSNeel Natu VMX_GUEST_RESTORE 1560492757cSNeel Natu vmlaunch 157366f6083SPeter Grehan /* 1580492757cSNeel Natu * In the common case 'vmlaunch' returns back to the host through 1590492757cSNeel Natu * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'. 160366f6083SPeter Grehan * 1610492757cSNeel Natu * If there is an error we return VMX_VMLAUNCH_ERROR to the caller. 162366f6083SPeter Grehan */ 1630492757cSNeel Natu movq %rsp, %rdi /* point %rdi back to 'vmxctx' */ 1640492757cSNeel Natu movl $VMX_VMLAUNCH_ERROR, %eax 1650492757cSNeel Natu jmp decode_inst_error 1660492757cSNeel Natu 1670492757cSNeel Natuinvept_error: 1680492757cSNeel Natu movl $VMX_INVEPT_ERROR, %eax 1690492757cSNeel Natu jmp decode_inst_error 1700492757cSNeel Natu 1710492757cSNeel Natudecode_inst_error: 1720492757cSNeel Natu movl $VM_FAIL_VALID, %r11d 1730492757cSNeel Natu jz inst_error 1740492757cSNeel Natu movl $VM_FAIL_INVALID, %r11d 1750492757cSNeel Natuinst_error: 1760492757cSNeel Natu movl %r11d, VMXCTX_INST_FAIL_STATUS(%rdi) 177366f6083SPeter Grehan 178366f6083SPeter Grehan /* 1790492757cSNeel Natu * The return value is already populated in %eax so we cannot use 1800492757cSNeel Natu * it as a scratch register beyond this point. 181366f6083SPeter Grehan */ 1820492757cSNeel Natu 1830492757cSNeel Natu /* 1840492757cSNeel Natu * Deactivate guest pmap from this cpu. 1850492757cSNeel Natu */ 1860492757cSNeel Natu movq VMXCTX_PMAP(%rdi), %r11 1870492757cSNeel Natu movl PCPU(CPUID), %r10d 1880492757cSNeel Natu LK btrl %r10d, PM_ACTIVE(%r11) 1890492757cSNeel Natu 19081d597b7SNeel Natu VMX_HOST_RESTORE 191897bb47eSPeter Grehan VLEAVE 192366f6083SPeter Grehan ret 193366f6083SPeter Grehan 194366f6083SPeter Grehan/* 195897bb47eSPeter Grehan * Non-error VM-exit from the guest. Make this a label so it can 196897bb47eSPeter Grehan * be used by C code when setting up the VMCS. 197897bb47eSPeter Grehan * The VMCS-restored %rsp points to the struct vmxctx 198366f6083SPeter Grehan */ 199897bb47eSPeter Grehan ALIGN_TEXT 200897bb47eSPeter Grehan .globl vmx_exit_guest 201897bb47eSPeter Grehanvmx_exit_guest: 202366f6083SPeter Grehan /* 203366f6083SPeter Grehan * Save guest state that is not automatically saved in the vmcs. 204366f6083SPeter Grehan */ 205366f6083SPeter Grehan movq %rdi,VMXCTX_GUEST_RDI(%rsp) 206366f6083SPeter Grehan movq %rsi,VMXCTX_GUEST_RSI(%rsp) 207366f6083SPeter Grehan movq %rdx,VMXCTX_GUEST_RDX(%rsp) 208366f6083SPeter Grehan movq %rcx,VMXCTX_GUEST_RCX(%rsp) 209366f6083SPeter Grehan movq %r8,VMXCTX_GUEST_R8(%rsp) 210366f6083SPeter Grehan movq %r9,VMXCTX_GUEST_R9(%rsp) 211366f6083SPeter Grehan movq %rax,VMXCTX_GUEST_RAX(%rsp) 212366f6083SPeter Grehan movq %rbx,VMXCTX_GUEST_RBX(%rsp) 213366f6083SPeter Grehan movq %rbp,VMXCTX_GUEST_RBP(%rsp) 214366f6083SPeter Grehan movq %r10,VMXCTX_GUEST_R10(%rsp) 215366f6083SPeter Grehan movq %r11,VMXCTX_GUEST_R11(%rsp) 216366f6083SPeter Grehan movq %r12,VMXCTX_GUEST_R12(%rsp) 217366f6083SPeter Grehan movq %r13,VMXCTX_GUEST_R13(%rsp) 218366f6083SPeter Grehan movq %r14,VMXCTX_GUEST_R14(%rsp) 219366f6083SPeter Grehan movq %r15,VMXCTX_GUEST_R15(%rsp) 220366f6083SPeter Grehan 221366f6083SPeter Grehan movq %cr2,%rdi 222366f6083SPeter Grehan movq %rdi,VMXCTX_GUEST_CR2(%rsp) 223366f6083SPeter Grehan 224366f6083SPeter Grehan movq %rsp,%rdi 225366f6083SPeter Grehan 226366f6083SPeter Grehan /* 2270492757cSNeel Natu * Deactivate guest pmap from this cpu. 228366f6083SPeter Grehan */ 2290492757cSNeel Natu movq VMXCTX_PMAP(%rdi), %r11 2300492757cSNeel Natu movl PCPU(CPUID), %r10d 2310492757cSNeel Natu LK btrl %r10d, PM_ACTIVE(%r11) 232eeefa4e4SNeel Natu 23381d597b7SNeel Natu VMX_HOST_RESTORE 234318224bbSNeel Natu 235366f6083SPeter Grehan /* 2360492757cSNeel Natu * This will return to the caller of 'vmx_enter_guest()' with a return 2370492757cSNeel Natu * value of VMX_GUEST_VMEXIT. 238366f6083SPeter Grehan */ 2390492757cSNeel Natu movl $VMX_GUEST_VMEXIT, %eax 240897bb47eSPeter Grehan VLEAVE 2410492757cSNeel Natu ret 242897bb47eSPeter GrehanEND(vmx_enter_guest) 243f7d47425SNeel Natu 244f7d47425SNeel Natu/* 245f7d47425SNeel Natu * %rdi = interrupt handler entry point 246f7d47425SNeel Natu * 247f7d47425SNeel Natu * Calling sequence described in the "Instruction Set Reference" for the "INT" 248f7d47425SNeel Natu * instruction in Intel SDM, Vol 2. 249f7d47425SNeel Natu */ 250f7d47425SNeel NatuENTRY(vmx_call_isr) 251897bb47eSPeter Grehan VENTER 252f7d47425SNeel Natu mov %rsp, %r11 /* save %rsp */ 253f7d47425SNeel Natu and $~0xf, %rsp /* align on 16-byte boundary */ 254f7d47425SNeel Natu pushq $KERNEL_SS /* %ss */ 255f7d47425SNeel Natu pushq %r11 /* %rsp */ 256f7d47425SNeel Natu pushfq /* %rflags */ 257f7d47425SNeel Natu pushq $KERNEL_CS /* %cs */ 258f7d47425SNeel Natu cli /* disable interrupts */ 259f7d47425SNeel Natu callq *%rdi /* push %rip and call isr */ 260897bb47eSPeter Grehan VLEAVE 261f7d47425SNeel Natu ret 262f7d47425SNeel NatuEND(vmx_call_isr) 263