1/*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * Copyright (c) 2013 Neel Natu <neel@freebsd.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32#include <machine/asmacros.h> 33#include <machine/specialreg.h> 34 35#include "vmx_assym.h" 36 37#ifdef SMP 38#define LK lock ; 39#else 40#define LK 41#endif 42 43/* Be friendly to DTrace FBT's prologue/epilogue pattern matching */ 44#define VENTER push %rbp ; mov %rsp,%rbp 45#define VLEAVE pop %rbp 46 47/* 48 * Save the guest context. 49 */ 50#define VMX_GUEST_SAVE \ 51 movq %rdi,VMXCTX_GUEST_RDI(%rsp); \ 52 movq %rsi,VMXCTX_GUEST_RSI(%rsp); \ 53 movq %rdx,VMXCTX_GUEST_RDX(%rsp); \ 54 movq %rcx,VMXCTX_GUEST_RCX(%rsp); \ 55 movq %r8,VMXCTX_GUEST_R8(%rsp); \ 56 movq %r9,VMXCTX_GUEST_R9(%rsp); \ 57 movq %rax,VMXCTX_GUEST_RAX(%rsp); \ 58 movq %rbx,VMXCTX_GUEST_RBX(%rsp); \ 59 movq %rbp,VMXCTX_GUEST_RBP(%rsp); \ 60 movq %r10,VMXCTX_GUEST_R10(%rsp); \ 61 movq %r11,VMXCTX_GUEST_R11(%rsp); \ 62 movq %r12,VMXCTX_GUEST_R12(%rsp); \ 63 movq %r13,VMXCTX_GUEST_R13(%rsp); \ 64 movq %r14,VMXCTX_GUEST_R14(%rsp); \ 65 movq %r15,VMXCTX_GUEST_R15(%rsp); \ 66 movq %cr2,%rdi; \ 67 movq %rdi,VMXCTX_GUEST_CR2(%rsp); \ 68 movq %rsp,%rdi; 69 70/* 71 * Assumes that %rdi holds a pointer to the 'vmxctx'. 72 * 73 * On "return" all registers are updated to reflect guest state. The two 74 * exceptions are %rip and %rsp. These registers are atomically switched 75 * by hardware from the guest area of the vmcs. 76 * 77 * We modify %rsp to point to the 'vmxctx' so we can use it to restore 78 * host context in case of an error with 'vmlaunch' or 'vmresume'. 79 */ 80#define VMX_GUEST_RESTORE \ 81 movq %rdi,%rsp; \ 82 movq VMXCTX_GUEST_CR2(%rdi),%rsi; \ 83 movq %rsi,%cr2; \ 84 movq VMXCTX_GUEST_RSI(%rdi),%rsi; \ 85 movq VMXCTX_GUEST_RDX(%rdi),%rdx; \ 86 movq VMXCTX_GUEST_RCX(%rdi),%rcx; \ 87 movq VMXCTX_GUEST_R8(%rdi),%r8; \ 88 movq VMXCTX_GUEST_R9(%rdi),%r9; \ 89 movq VMXCTX_GUEST_RAX(%rdi),%rax; \ 90 movq VMXCTX_GUEST_RBX(%rdi),%rbx; \ 91 movq VMXCTX_GUEST_RBP(%rdi),%rbp; \ 92 movq VMXCTX_GUEST_R10(%rdi),%r10; \ 93 movq VMXCTX_GUEST_R11(%rdi),%r11; \ 94 movq VMXCTX_GUEST_R12(%rdi),%r12; \ 95 movq VMXCTX_GUEST_R13(%rdi),%r13; \ 96 movq VMXCTX_GUEST_R14(%rdi),%r14; \ 97 movq VMXCTX_GUEST_R15(%rdi),%r15; \ 98 movq VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */ 99 100/* 101 * Clobber the remaining registers with guest contents so they can't 102 * be misused. 103 */ 104#define VMX_GUEST_CLOBBER \ 105 xor %rax, %rax; \ 106 xor %rcx, %rcx; \ 107 xor %rdx, %rdx; \ 108 xor %rsi, %rsi; \ 109 xor %r8, %r8; \ 110 xor %r9, %r9; \ 111 xor %r10, %r10; \ 112 xor %r11, %r11; 113 114/* 115 * Save and restore the host context. 116 * 117 * Assumes that %rdi holds a pointer to the 'vmxctx'. 118 */ 119#define VMX_HOST_SAVE \ 120 movq %r15, VMXCTX_HOST_R15(%rdi); \ 121 movq %r14, VMXCTX_HOST_R14(%rdi); \ 122 movq %r13, VMXCTX_HOST_R13(%rdi); \ 123 movq %r12, VMXCTX_HOST_R12(%rdi); \ 124 movq %rbp, VMXCTX_HOST_RBP(%rdi); \ 125 movq %rsp, VMXCTX_HOST_RSP(%rdi); \ 126 movq %rbx, VMXCTX_HOST_RBX(%rdi); \ 127 128#define VMX_HOST_RESTORE \ 129 movq VMXCTX_HOST_R15(%rdi), %r15; \ 130 movq VMXCTX_HOST_R14(%rdi), %r14; \ 131 movq VMXCTX_HOST_R13(%rdi), %r13; \ 132 movq VMXCTX_HOST_R12(%rdi), %r12; \ 133 movq VMXCTX_HOST_RBP(%rdi), %rbp; \ 134 movq VMXCTX_HOST_RSP(%rdi), %rsp; \ 135 movq VMXCTX_HOST_RBX(%rdi), %rbx; \ 136 137/* 138 * vmx_enter_guest(struct vmxctx *vmxctx, int launched) 139 * %rdi: pointer to the 'vmxctx' 140 * %rsi: pointer to the 'vmx' 141 * %edx: launch state of the VMCS 142 * Interrupts must be disabled on entry. 143 */ 144ENTRY(vmx_enter_guest) 145 VENTER 146 /* 147 * Save host state before doing anything else. 148 */ 149 VMX_HOST_SAVE 150 151 /* 152 * Activate guest pmap on this cpu. 153 */ 154 movq VMXCTX_PMAP(%rdi), %r11 155 movl PCPU(CPUID), %eax 156 LK btsl %eax, PM_ACTIVE(%r11) 157 158 /* 159 * If 'vmx->eptgen[curcpu]' is not identical to 'pmap->pm_eptgen' 160 * then we must invalidate all mappings associated with this EPTP. 161 */ 162 movq PM_EPTGEN(%r11), %r10 163 cmpq %r10, VMX_EPTGEN(%rsi, %rax, 8) 164 je guest_restore 165 166 /* Refresh 'vmx->eptgen[curcpu]' */ 167 movq %r10, VMX_EPTGEN(%rsi, %rax, 8) 168 169 /* Setup the invept descriptor on the host stack */ 170 mov %rsp, %r11 171 movq VMX_EPTP(%rsi), %rax 172 movq %rax, -16(%r11) 173 movq $0x0, -8(%r11) 174 mov $0x1, %eax /* Single context invalidate */ 175 invept -16(%r11), %rax 176 jbe invept_error /* Check invept instruction error */ 177 178guest_restore: 179 movl %edx, %r8d 180 cmpb $0, guest_l1d_flush_sw(%rip) 181 je after_l1d 182 call flush_l1d_sw 183after_l1d: 184 cmpl $0, %r8d 185 je do_launch 186 VMX_GUEST_RESTORE 187 vmresume 188 /* 189 * In the common case 'vmresume' returns back to the host through 190 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'. 191 * 192 * If there is an error we return VMX_VMRESUME_ERROR to the caller. 193 */ 194 movq %rsp, %rdi /* point %rdi back to 'vmxctx' */ 195 movl $VMX_VMRESUME_ERROR, %eax 196 jmp decode_inst_error 197 198do_launch: 199 VMX_GUEST_RESTORE 200 vmlaunch 201 /* 202 * In the common case 'vmlaunch' returns back to the host through 203 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'. 204 * 205 * If there is an error we return VMX_VMLAUNCH_ERROR to the caller. 206 */ 207 movq %rsp, %rdi /* point %rdi back to 'vmxctx' */ 208 movl $VMX_VMLAUNCH_ERROR, %eax 209 jmp decode_inst_error 210 211invept_error: 212 movl $VMX_INVEPT_ERROR, %eax 213 jmp decode_inst_error 214 215decode_inst_error: 216 movl $VM_FAIL_VALID, %r11d 217 jz inst_error 218 movl $VM_FAIL_INVALID, %r11d 219inst_error: 220 movl %r11d, VMXCTX_INST_FAIL_STATUS(%rdi) 221 222 /* 223 * The return value is already populated in %eax so we cannot use 224 * it as a scratch register beyond this point. 225 */ 226 227 /* 228 * Deactivate guest pmap from this cpu. 229 */ 230 movq VMXCTX_PMAP(%rdi), %r11 231 movl PCPU(CPUID), %r10d 232 LK btrl %r10d, PM_ACTIVE(%r11) 233 234 VMX_HOST_RESTORE 235 VLEAVE 236 ret 237 238/* 239 * Non-error VM-exit from the guest. Make this a label so it can 240 * be used by C code when setting up the VMCS. 241 * The VMCS-restored %rsp points to the struct vmxctx 242 */ 243 ALIGN_TEXT 244 .globl vmx_exit_guest_flush_rsb 245vmx_exit_guest_flush_rsb: 246 /* 247 * Save guest state that is not automatically saved in the vmcs. 248 */ 249 VMX_GUEST_SAVE 250 251 /* 252 * Deactivate guest pmap from this cpu. 253 */ 254 movq VMXCTX_PMAP(%rdi), %r11 255 movl PCPU(CPUID), %r10d 256 LK btrl %r10d, PM_ACTIVE(%r11) 257 258 VMX_HOST_RESTORE 259 260 VMX_GUEST_CLOBBER 261 262 /* 263 * To prevent malicious branch target predictions from 264 * affecting the host, overwrite all entries in the RSB upon 265 * exiting a guest. 266 */ 267 mov $16, %ecx /* 16 iterations, two calls per loop */ 268 mov %rsp, %rax 2690: call 2f /* create an RSB entry. */ 2701: pause 271 call 1b /* capture rogue speculation. */ 2722: call 2f /* create an RSB entry. */ 2731: pause 274 call 1b /* capture rogue speculation. */ 2752: sub $1, %ecx 276 jnz 0b 277 mov %rax, %rsp 278 279 /* 280 * This will return to the caller of 'vmx_enter_guest()' with a return 281 * value of VMX_GUEST_VMEXIT. 282 */ 283 movl $VMX_GUEST_VMEXIT, %eax 284 VLEAVE 285 ret 286 287 .globl vmx_exit_guest 288vmx_exit_guest: 289 /* 290 * Save guest state that is not automatically saved in the vmcs. 291 */ 292 VMX_GUEST_SAVE 293 294 /* 295 * Deactivate guest pmap from this cpu. 296 */ 297 movq VMXCTX_PMAP(%rdi), %r11 298 movl PCPU(CPUID), %r10d 299 LK btrl %r10d, PM_ACTIVE(%r11) 300 301 VMX_HOST_RESTORE 302 303 VMX_GUEST_CLOBBER 304 305 /* 306 * This will return to the caller of 'vmx_enter_guest()' with a return 307 * value of VMX_GUEST_VMEXIT. 308 */ 309 movl $VMX_GUEST_VMEXIT, %eax 310 VLEAVE 311 ret 312END(vmx_enter_guest) 313 314/* 315 * %rdi = interrupt handler entry point 316 * 317 * Calling sequence described in the "Instruction Set Reference" for the "INT" 318 * instruction in Intel SDM, Vol 2. 319 */ 320ENTRY(vmx_call_isr) 321 VENTER 322 mov %rsp, %r11 /* save %rsp */ 323 and $~0xf, %rsp /* align on 16-byte boundary */ 324 pushq $KERNEL_SS /* %ss */ 325 pushq %r11 /* %rsp */ 326 pushfq /* %rflags */ 327 pushq $KERNEL_CS /* %cs */ 328 cli /* disable interrupts */ 329 callq *%rdi /* push %rip and call isr */ 330 VLEAVE 331 ret 332END(vmx_call_isr) 333