1/*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * Copyright (c) 2013 Neel Natu <neel@freebsd.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30#include <machine/asmacros.h> 31#include <machine/specialreg.h> 32 33#include "vmx_assym.h" 34 35#ifdef SMP 36#define LK lock ; 37#else 38#define LK 39#endif 40 41/* Be friendly to DTrace FBT's prologue/epilogue pattern matching */ 42#define VENTER push %rbp ; mov %rsp,%rbp 43#define VLEAVE pop %rbp 44 45/* 46 * Save the guest context. 47 */ 48#define VMX_GUEST_SAVE \ 49 movq %rdi,VMXCTX_GUEST_RDI(%rsp); \ 50 movq %rsi,VMXCTX_GUEST_RSI(%rsp); \ 51 movq %rdx,VMXCTX_GUEST_RDX(%rsp); \ 52 movq %rcx,VMXCTX_GUEST_RCX(%rsp); \ 53 movq %r8,VMXCTX_GUEST_R8(%rsp); \ 54 movq %r9,VMXCTX_GUEST_R9(%rsp); \ 55 movq %rax,VMXCTX_GUEST_RAX(%rsp); \ 56 movq %rbx,VMXCTX_GUEST_RBX(%rsp); \ 57 movq %rbp,VMXCTX_GUEST_RBP(%rsp); \ 58 movq %r10,VMXCTX_GUEST_R10(%rsp); \ 59 movq %r11,VMXCTX_GUEST_R11(%rsp); \ 60 movq %r12,VMXCTX_GUEST_R12(%rsp); \ 61 movq %r13,VMXCTX_GUEST_R13(%rsp); \ 62 movq %r14,VMXCTX_GUEST_R14(%rsp); \ 63 movq %r15,VMXCTX_GUEST_R15(%rsp); \ 64 movq %cr2,%rdi; \ 65 movq %rdi,VMXCTX_GUEST_CR2(%rsp); \ 66 movq %rsp,%rdi; 67 68/* 69 * Assumes that %rdi holds a pointer to the 'vmxctx'. 70 * 71 * On "return" all registers are updated to reflect guest state. The two 72 * exceptions are %rip and %rsp. These registers are atomically switched 73 * by hardware from the guest area of the vmcs. 74 * 75 * We modify %rsp to point to the 'vmxctx' so we can use it to restore 76 * host context in case of an error with 'vmlaunch' or 'vmresume'. 77 */ 78#define VMX_GUEST_RESTORE \ 79 movq %rdi,%rsp; \ 80 movq VMXCTX_GUEST_CR2(%rdi),%rsi; \ 81 movq %rsi,%cr2; \ 82 movq VMXCTX_GUEST_RSI(%rdi),%rsi; \ 83 movq VMXCTX_GUEST_RDX(%rdi),%rdx; \ 84 movq VMXCTX_GUEST_RCX(%rdi),%rcx; \ 85 movq VMXCTX_GUEST_R8(%rdi),%r8; \ 86 movq VMXCTX_GUEST_R9(%rdi),%r9; \ 87 movq VMXCTX_GUEST_RAX(%rdi),%rax; \ 88 movq VMXCTX_GUEST_RBX(%rdi),%rbx; \ 89 movq VMXCTX_GUEST_RBP(%rdi),%rbp; \ 90 movq VMXCTX_GUEST_R10(%rdi),%r10; \ 91 movq VMXCTX_GUEST_R11(%rdi),%r11; \ 92 movq VMXCTX_GUEST_R12(%rdi),%r12; \ 93 movq VMXCTX_GUEST_R13(%rdi),%r13; \ 94 movq VMXCTX_GUEST_R14(%rdi),%r14; \ 95 movq VMXCTX_GUEST_R15(%rdi),%r15; \ 96 movq VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */ 97 98/* 99 * Clobber the remaining registers with guest contents so they can't 100 * be misused. 101 */ 102#define VMX_GUEST_CLOBBER \ 103 xor %rax, %rax; \ 104 xor %rcx, %rcx; \ 105 xor %rdx, %rdx; \ 106 xor %rsi, %rsi; \ 107 xor %r8, %r8; \ 108 xor %r9, %r9; \ 109 xor %r10, %r10; \ 110 xor %r11, %r11; 111 112/* 113 * Save and restore the host context. 114 * 115 * Assumes that %rdi holds a pointer to the 'vmxctx'. 116 */ 117#define VMX_HOST_SAVE \ 118 movq %r15, VMXCTX_HOST_R15(%rdi); \ 119 movq %r14, VMXCTX_HOST_R14(%rdi); \ 120 movq %r13, VMXCTX_HOST_R13(%rdi); \ 121 movq %r12, VMXCTX_HOST_R12(%rdi); \ 122 movq %rbp, VMXCTX_HOST_RBP(%rdi); \ 123 movq %rsp, VMXCTX_HOST_RSP(%rdi); \ 124 movq %rbx, VMXCTX_HOST_RBX(%rdi); \ 125 126#define VMX_HOST_RESTORE \ 127 movq VMXCTX_HOST_R15(%rdi), %r15; \ 128 movq VMXCTX_HOST_R14(%rdi), %r14; \ 129 movq VMXCTX_HOST_R13(%rdi), %r13; \ 130 movq VMXCTX_HOST_R12(%rdi), %r12; \ 131 movq VMXCTX_HOST_RBP(%rdi), %rbp; \ 132 movq VMXCTX_HOST_RSP(%rdi), %rsp; \ 133 movq VMXCTX_HOST_RBX(%rdi), %rbx; \ 134 135/* 136 * vmx_enter_guest(struct vmxctx *vmxctx, int launched) 137 * %rdi: pointer to the 'vmxctx' 138 * %rsi: pointer to the 'vmx' 139 * %edx: launch state of the VMCS 140 * Interrupts must be disabled on entry. 141 */ 142ENTRY(vmx_enter_guest) 143 VENTER 144 /* 145 * Save host state before doing anything else. 146 */ 147 VMX_HOST_SAVE 148 149guest_restore: 150 movl %edx, %r8d 151 cmpb $0, guest_l1d_flush_sw(%rip) 152 je after_l1d 153 call flush_l1d_sw 154after_l1d: 155 cmpl $0, %r8d 156 je do_launch 157 VMX_GUEST_RESTORE 158 vmresume 159 /* 160 * In the common case 'vmresume' returns back to the host through 161 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'. 162 * 163 * If there is an error we return VMX_VMRESUME_ERROR to the caller. 164 */ 165 movq %rsp, %rdi /* point %rdi back to 'vmxctx' */ 166 movl $VMX_VMRESUME_ERROR, %eax 167 jmp decode_inst_error 168 169do_launch: 170 VMX_GUEST_RESTORE 171 vmlaunch 172 /* 173 * In the common case 'vmlaunch' returns back to the host through 174 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'. 175 * 176 * If there is an error we return VMX_VMLAUNCH_ERROR to the caller. 177 */ 178 movq %rsp, %rdi /* point %rdi back to 'vmxctx' */ 179 movl $VMX_VMLAUNCH_ERROR, %eax 180 jmp decode_inst_error 181 182decode_inst_error: 183 movl $VM_FAIL_VALID, %r11d 184 jz inst_error 185 movl $VM_FAIL_INVALID, %r11d 186inst_error: 187 movl %r11d, VMXCTX_INST_FAIL_STATUS(%rdi) 188 189 /* 190 * The return value is already populated in %eax so we cannot use 191 * it as a scratch register beyond this point. 192 */ 193 194 VMX_HOST_RESTORE 195 VLEAVE 196 ret 197 198/* 199 * Non-error VM-exit from the guest. Make this a label so it can 200 * be used by C code when setting up the VMCS. 201 * The VMCS-restored %rsp points to the struct vmxctx 202 */ 203 ALIGN_TEXT 204 .globl vmx_exit_guest_flush_rsb 205vmx_exit_guest_flush_rsb: 206 /* 207 * Save guest state that is not automatically saved in the vmcs. 208 */ 209 VMX_GUEST_SAVE 210 211 VMX_HOST_RESTORE 212 213 VMX_GUEST_CLOBBER 214 215 /* 216 * To prevent malicious branch target predictions from 217 * affecting the host, overwrite all entries in the RSB upon 218 * exiting a guest. 219 */ 220 mov $16, %ecx /* 16 iterations, two calls per loop */ 221 mov %rsp, %rax 2220: call 2f /* create an RSB entry. */ 2231: pause 224 call 1b /* capture rogue speculation. */ 2252: call 2f /* create an RSB entry. */ 2261: pause 227 call 1b /* capture rogue speculation. */ 2282: sub $1, %ecx 229 jnz 0b 230 mov %rax, %rsp 231 232 /* 233 * This will return to the caller of 'vmx_enter_guest()' with a return 234 * value of VMX_GUEST_VMEXIT. 235 */ 236 movl $VMX_GUEST_VMEXIT, %eax 237 VLEAVE 238 ret 239 240 .globl vmx_exit_guest 241vmx_exit_guest: 242 /* 243 * Save guest state that is not automatically saved in the vmcs. 244 */ 245 VMX_GUEST_SAVE 246 247 VMX_HOST_RESTORE 248 249 VMX_GUEST_CLOBBER 250 251 /* 252 * This will return to the caller of 'vmx_enter_guest()' with a return 253 * value of VMX_GUEST_VMEXIT. 254 */ 255 movl $VMX_GUEST_VMEXIT, %eax 256 VLEAVE 257 ret 258END(vmx_enter_guest) 259 260/* 261 * %rdi = interrupt handler entry point 262 * 263 * Calling sequence described in the "Instruction Set Reference" for the "INT" 264 * instruction in Intel SDM, Vol 2. 265 */ 266ENTRY(vmx_call_isr) 267 VENTER 268 mov %rsp, %r11 /* save %rsp */ 269 and $~0xf, %rsp /* align on 16-byte boundary */ 270 pushq $KERNEL_SS /* %ss */ 271 pushq %r11 /* %rsp */ 272 pushfq /* %rflags */ 273 pushq $KERNEL_CS /* %cs */ 274 cli /* disable interrupts */ 275 callq *%rdi /* push %rip and call isr */ 276 VLEAVE 277 ret 278END(vmx_call_isr) 279