1/*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * Copyright (c) 2013 Neel Natu <neel@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD$ 28 */ 29 30#include <machine/asmacros.h> 31 32#include "vmx_assym.h" 33 34#ifdef SMP 35#define LK lock ; 36#else 37#define LK 38#endif 39 40/* Be friendly to DTrace FBT's prologue/epilogue pattern matching */ 41#define VENTER push %rbp ; mov %rsp,%rbp 42#define VLEAVE pop %rbp 43 44/* 45 * Assumes that %rdi holds a pointer to the 'vmxctx'. 46 * 47 * On "return" all registers are updated to reflect guest state. The two 48 * exceptions are %rip and %rsp. These registers are atomically switched 49 * by hardware from the guest area of the vmcs. 50 * 51 * We modify %rsp to point to the 'vmxctx' so we can use it to restore 52 * host context in case of an error with 'vmlaunch' or 'vmresume'. 53 */ 54#define VMX_GUEST_RESTORE \ 55 movq %rdi,%rsp; \ 56 movq VMXCTX_GUEST_CR2(%rdi),%rsi; \ 57 movq %rsi,%cr2; \ 58 movq VMXCTX_GUEST_RSI(%rdi),%rsi; \ 59 movq VMXCTX_GUEST_RDX(%rdi),%rdx; \ 60 movq VMXCTX_GUEST_RCX(%rdi),%rcx; \ 61 movq VMXCTX_GUEST_R8(%rdi),%r8; \ 62 movq VMXCTX_GUEST_R9(%rdi),%r9; \ 63 movq VMXCTX_GUEST_RAX(%rdi),%rax; \ 64 movq VMXCTX_GUEST_RBX(%rdi),%rbx; \ 65 movq VMXCTX_GUEST_RBP(%rdi),%rbp; \ 66 movq VMXCTX_GUEST_R10(%rdi),%r10; \ 67 movq VMXCTX_GUEST_R11(%rdi),%r11; \ 68 movq VMXCTX_GUEST_R12(%rdi),%r12; \ 69 movq VMXCTX_GUEST_R13(%rdi),%r13; \ 70 movq VMXCTX_GUEST_R14(%rdi),%r14; \ 71 movq VMXCTX_GUEST_R15(%rdi),%r15; \ 72 movq VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */ 73 74/* 75 * Save and restore the host context. 76 * 77 * Assumes that %rdi holds a pointer to the 'vmxctx'. 78 */ 79#define VMX_HOST_SAVE \ 80 movq %r15, VMXCTX_HOST_R15(%rdi); \ 81 movq %r14, VMXCTX_HOST_R14(%rdi); \ 82 movq %r13, VMXCTX_HOST_R13(%rdi); \ 83 movq %r12, VMXCTX_HOST_R12(%rdi); \ 84 movq %rbp, VMXCTX_HOST_RBP(%rdi); \ 85 movq %rsp, VMXCTX_HOST_RSP(%rdi); \ 86 movq %rbx, VMXCTX_HOST_RBX(%rdi); \ 87 88#define VMX_HOST_RESTORE \ 89 movq VMXCTX_HOST_R15(%rdi), %r15; \ 90 movq VMXCTX_HOST_R14(%rdi), %r14; \ 91 movq VMXCTX_HOST_R13(%rdi), %r13; \ 92 movq VMXCTX_HOST_R12(%rdi), %r12; \ 93 movq VMXCTX_HOST_RBP(%rdi), %rbp; \ 94 movq VMXCTX_HOST_RSP(%rdi), %rsp; \ 95 movq VMXCTX_HOST_RBX(%rdi), %rbx; \ 96 97/* 98 * vmx_enter_guest(struct vmxctx *vmxctx, int launched) 99 * %rdi: pointer to the 'vmxctx' 100 * %rsi: pointer to the 'vmx' 101 * %edx: launch state of the VMCS 102 * Interrupts must be disabled on entry. 103 */ 104ENTRY(vmx_enter_guest) 105 VENTER 106 /* 107 * Save host state before doing anything else. 108 */ 109 VMX_HOST_SAVE 110 111 /* 112 * Activate guest pmap on this cpu. 113 */ 114 movq VMXCTX_PMAP(%rdi), %r11 115 movl PCPU(CPUID), %eax 116 LK btsl %eax, PM_ACTIVE(%r11) 117 118 /* 119 * If 'vmx->eptgen[curcpu]' is not identical to 'pmap->pm_eptgen' 120 * then we must invalidate all mappings associated with this EPTP. 121 */ 122 movq PM_EPTGEN(%r11), %r10 123 cmpq %r10, VMX_EPTGEN(%rsi, %rax, 8) 124 je guest_restore 125 126 /* Refresh 'vmx->eptgen[curcpu]' */ 127 movq %r10, VMX_EPTGEN(%rsi, %rax, 8) 128 129 /* Setup the invept descriptor on the host stack */ 130 mov %rsp, %r11 131 movq VMX_EPTP(%rsi), %rax 132 movq %rax, -16(%r11) 133 movq $0x0, -8(%r11) 134 mov $0x1, %eax /* Single context invalidate */ 135 invept -16(%r11), %rax 136 jbe invept_error /* Check invept instruction error */ 137 138guest_restore: 139 cmpl $0, %edx 140 je do_launch 141 142 VMX_GUEST_RESTORE 143 vmresume 144 /* 145 * In the common case 'vmresume' returns back to the host through 146 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'. 147 * 148 * If there is an error we return VMX_VMRESUME_ERROR to the caller. 149 */ 150 movq %rsp, %rdi /* point %rdi back to 'vmxctx' */ 151 movl $VMX_VMRESUME_ERROR, %eax 152 jmp decode_inst_error 153 154do_launch: 155 VMX_GUEST_RESTORE 156 vmlaunch 157 /* 158 * In the common case 'vmlaunch' returns back to the host through 159 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'. 160 * 161 * If there is an error we return VMX_VMLAUNCH_ERROR to the caller. 162 */ 163 movq %rsp, %rdi /* point %rdi back to 'vmxctx' */ 164 movl $VMX_VMLAUNCH_ERROR, %eax 165 jmp decode_inst_error 166 167invept_error: 168 movl $VMX_INVEPT_ERROR, %eax 169 jmp decode_inst_error 170 171decode_inst_error: 172 movl $VM_FAIL_VALID, %r11d 173 jz inst_error 174 movl $VM_FAIL_INVALID, %r11d 175inst_error: 176 movl %r11d, VMXCTX_INST_FAIL_STATUS(%rdi) 177 178 /* 179 * The return value is already populated in %eax so we cannot use 180 * it as a scratch register beyond this point. 181 */ 182 183 /* 184 * Deactivate guest pmap from this cpu. 185 */ 186 movq VMXCTX_PMAP(%rdi), %r11 187 movl PCPU(CPUID), %r10d 188 LK btrl %r10d, PM_ACTIVE(%r11) 189 190 VMX_HOST_RESTORE 191 VLEAVE 192 ret 193 194/* 195 * Non-error VM-exit from the guest. Make this a label so it can 196 * be used by C code when setting up the VMCS. 197 * The VMCS-restored %rsp points to the struct vmxctx 198 */ 199 ALIGN_TEXT 200 .globl vmx_exit_guest 201vmx_exit_guest: 202 /* 203 * Save guest state that is not automatically saved in the vmcs. 204 */ 205 movq %rdi,VMXCTX_GUEST_RDI(%rsp) 206 movq %rsi,VMXCTX_GUEST_RSI(%rsp) 207 movq %rdx,VMXCTX_GUEST_RDX(%rsp) 208 movq %rcx,VMXCTX_GUEST_RCX(%rsp) 209 movq %r8,VMXCTX_GUEST_R8(%rsp) 210 movq %r9,VMXCTX_GUEST_R9(%rsp) 211 movq %rax,VMXCTX_GUEST_RAX(%rsp) 212 movq %rbx,VMXCTX_GUEST_RBX(%rsp) 213 movq %rbp,VMXCTX_GUEST_RBP(%rsp) 214 movq %r10,VMXCTX_GUEST_R10(%rsp) 215 movq %r11,VMXCTX_GUEST_R11(%rsp) 216 movq %r12,VMXCTX_GUEST_R12(%rsp) 217 movq %r13,VMXCTX_GUEST_R13(%rsp) 218 movq %r14,VMXCTX_GUEST_R14(%rsp) 219 movq %r15,VMXCTX_GUEST_R15(%rsp) 220 221 movq %cr2,%rdi 222 movq %rdi,VMXCTX_GUEST_CR2(%rsp) 223 224 movq %rsp,%rdi 225 226 /* 227 * Deactivate guest pmap from this cpu. 228 */ 229 movq VMXCTX_PMAP(%rdi), %r11 230 movl PCPU(CPUID), %r10d 231 LK btrl %r10d, PM_ACTIVE(%r11) 232 233 VMX_HOST_RESTORE 234 235 /* 236 * This will return to the caller of 'vmx_enter_guest()' with a return 237 * value of VMX_GUEST_VMEXIT. 238 */ 239 movl $VMX_GUEST_VMEXIT, %eax 240 VLEAVE 241 ret 242END(vmx_enter_guest) 243 244/* 245 * %rdi = interrupt handler entry point 246 * 247 * Calling sequence described in the "Instruction Set Reference" for the "INT" 248 * instruction in Intel SDM, Vol 2. 249 */ 250ENTRY(vmx_call_isr) 251 VENTER 252 mov %rsp, %r11 /* save %rsp */ 253 and $~0xf, %rsp /* align on 16-byte boundary */ 254 pushq $KERNEL_SS /* %ss */ 255 pushq %r11 /* %rsp */ 256 pushfq /* %rflags */ 257 pushq $KERNEL_CS /* %cs */ 258 cli /* disable interrupts */ 259 callq *%rdi /* push %rip and call isr */ 260 VLEAVE 261 ret 262END(vmx_call_isr) 263