1/*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * Copyright (c) 2013 Neel Natu <neel@freebsd.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30#include <machine/asmacros.h> 31#include <machine/specialreg.h> 32 33#include "vmx_assym.h" 34 35/* Be friendly to DTrace FBT's prologue/epilogue pattern matching */ 36#define VENTER push %rbp ; mov %rsp,%rbp 37#define VLEAVE pop %rbp 38 39/* 40 * Save the guest context. 41 */ 42#define VMX_GUEST_SAVE \ 43 movq %rdi,VMXCTX_GUEST_RDI(%rsp); \ 44 movq %rsi,VMXCTX_GUEST_RSI(%rsp); \ 45 movq %rdx,VMXCTX_GUEST_RDX(%rsp); \ 46 movq %rcx,VMXCTX_GUEST_RCX(%rsp); \ 47 movq %r8,VMXCTX_GUEST_R8(%rsp); \ 48 movq %r9,VMXCTX_GUEST_R9(%rsp); \ 49 movq %rax,VMXCTX_GUEST_RAX(%rsp); \ 50 movq %rbx,VMXCTX_GUEST_RBX(%rsp); \ 51 movq %rbp,VMXCTX_GUEST_RBP(%rsp); \ 52 movq %r10,VMXCTX_GUEST_R10(%rsp); \ 53 movq %r11,VMXCTX_GUEST_R11(%rsp); \ 54 movq %r12,VMXCTX_GUEST_R12(%rsp); \ 55 movq %r13,VMXCTX_GUEST_R13(%rsp); \ 56 movq %r14,VMXCTX_GUEST_R14(%rsp); \ 57 movq %r15,VMXCTX_GUEST_R15(%rsp); \ 58 movq %cr2,%rdi; \ 59 movq %rdi,VMXCTX_GUEST_CR2(%rsp); \ 60 movq %rsp,%rdi; 61 62/* 63 * Assumes that %rdi holds a pointer to the 'vmxctx'. 64 * 65 * On "return" all registers are updated to reflect guest state. The two 66 * exceptions are %rip and %rsp. These registers are atomically switched 67 * by hardware from the guest area of the vmcs. 68 * 69 * We modify %rsp to point to the 'vmxctx' so we can use it to restore 70 * host context in case of an error with 'vmlaunch' or 'vmresume'. 71 */ 72#define VMX_GUEST_RESTORE \ 73 movq %rdi,%rsp; \ 74 movq VMXCTX_GUEST_CR2(%rdi),%rsi; \ 75 movq %rsi,%cr2; \ 76 movq VMXCTX_GUEST_RSI(%rdi),%rsi; \ 77 movq VMXCTX_GUEST_RDX(%rdi),%rdx; \ 78 movq VMXCTX_GUEST_RCX(%rdi),%rcx; \ 79 movq VMXCTX_GUEST_R8(%rdi),%r8; \ 80 movq VMXCTX_GUEST_R9(%rdi),%r9; \ 81 movq VMXCTX_GUEST_RAX(%rdi),%rax; \ 82 movq VMXCTX_GUEST_RBX(%rdi),%rbx; \ 83 movq VMXCTX_GUEST_RBP(%rdi),%rbp; \ 84 movq VMXCTX_GUEST_R10(%rdi),%r10; \ 85 movq VMXCTX_GUEST_R11(%rdi),%r11; \ 86 movq VMXCTX_GUEST_R12(%rdi),%r12; \ 87 movq VMXCTX_GUEST_R13(%rdi),%r13; \ 88 movq VMXCTX_GUEST_R14(%rdi),%r14; \ 89 movq VMXCTX_GUEST_R15(%rdi),%r15; \ 90 movq VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */ 91 92/* 93 * Clobber the remaining registers with guest contents so they can't 94 * be misused. 95 */ 96#define VMX_GUEST_CLOBBER \ 97 xor %rax, %rax; \ 98 xor %rcx, %rcx; \ 99 xor %rdx, %rdx; \ 100 xor %rsi, %rsi; \ 101 xor %r8, %r8; \ 102 xor %r9, %r9; \ 103 xor %r10, %r10; \ 104 xor %r11, %r11; 105 106/* 107 * Save and restore the host context. 108 * 109 * Assumes that %rdi holds a pointer to the 'vmxctx'. 110 */ 111#define VMX_HOST_SAVE \ 112 movq %r15, VMXCTX_HOST_R15(%rdi); \ 113 movq %r14, VMXCTX_HOST_R14(%rdi); \ 114 movq %r13, VMXCTX_HOST_R13(%rdi); \ 115 movq %r12, VMXCTX_HOST_R12(%rdi); \ 116 movq %rbp, VMXCTX_HOST_RBP(%rdi); \ 117 movq %rsp, VMXCTX_HOST_RSP(%rdi); \ 118 movq %rbx, VMXCTX_HOST_RBX(%rdi); \ 119 120#define VMX_HOST_RESTORE \ 121 movq VMXCTX_HOST_R15(%rdi), %r15; \ 122 movq VMXCTX_HOST_R14(%rdi), %r14; \ 123 movq VMXCTX_HOST_R13(%rdi), %r13; \ 124 movq VMXCTX_HOST_R12(%rdi), %r12; \ 125 movq VMXCTX_HOST_RBP(%rdi), %rbp; \ 126 movq VMXCTX_HOST_RSP(%rdi), %rsp; \ 127 movq VMXCTX_HOST_RBX(%rdi), %rbx; \ 128 129/* 130 * vmx_enter_guest(struct vmxctx *vmxctx, int launched) 131 * %rdi: pointer to the 'vmxctx' 132 * %rsi: pointer to the 'vmx' 133 * %edx: launch state of the VMCS 134 * Interrupts must be disabled on entry. 135 */ 136ENTRY(vmx_enter_guest) 137 VENTER 138 /* 139 * Save host state before doing anything else. 140 */ 141 VMX_HOST_SAVE 142 143guest_restore: 144 movl %edx, %r8d 145 cmpb $0, guest_l1d_flush_sw(%rip) 146 je after_l1d 147 call flush_l1d_sw 148after_l1d: 149 cmpl $0, %r8d 150 je do_launch 151 VMX_GUEST_RESTORE 152 vmresume 153 /* 154 * In the common case 'vmresume' returns back to the host through 155 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'. 156 * 157 * If there is an error we return VMX_VMRESUME_ERROR to the caller. 158 */ 159 movq %rsp, %rdi /* point %rdi back to 'vmxctx' */ 160 movl $VMX_VMRESUME_ERROR, %eax 161 jmp decode_inst_error 162 163do_launch: 164 VMX_GUEST_RESTORE 165 vmlaunch 166 /* 167 * In the common case 'vmlaunch' returns back to the host through 168 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'. 169 * 170 * If there is an error we return VMX_VMLAUNCH_ERROR to the caller. 171 */ 172 movq %rsp, %rdi /* point %rdi back to 'vmxctx' */ 173 movl $VMX_VMLAUNCH_ERROR, %eax 174 jmp decode_inst_error 175 176decode_inst_error: 177 movl $VM_FAIL_VALID, %r11d 178 jz inst_error 179 movl $VM_FAIL_INVALID, %r11d 180inst_error: 181 movl %r11d, VMXCTX_INST_FAIL_STATUS(%rdi) 182 183 /* 184 * The return value is already populated in %eax so we cannot use 185 * it as a scratch register beyond this point. 186 */ 187 188 VMX_HOST_RESTORE 189 VLEAVE 190 ret 191 192/* 193 * Non-error VM-exit from the guest. Make this a label so it can 194 * be used by C code when setting up the VMCS. 195 * The VMCS-restored %rsp points to the struct vmxctx 196 */ 197 ALIGN_TEXT 198 .globl vmx_exit_guest_flush_rsb 199vmx_exit_guest_flush_rsb: 200 /* 201 * Save guest state that is not automatically saved in the vmcs. 202 */ 203 VMX_GUEST_SAVE 204 205 VMX_HOST_RESTORE 206 207 VMX_GUEST_CLOBBER 208 209 /* 210 * To prevent malicious branch target predictions from 211 * affecting the host, overwrite all entries in the RSB upon 212 * exiting a guest. 213 */ 214 mov $16, %ecx /* 16 iterations, two calls per loop */ 215 mov %rsp, %rax 2160: call 2f /* create an RSB entry. */ 2171: pause 218 call 1b /* capture rogue speculation. */ 2192: call 2f /* create an RSB entry. */ 2201: pause 221 call 1b /* capture rogue speculation. */ 2222: sub $1, %ecx 223 jnz 0b 224 mov %rax, %rsp 225 226 /* 227 * This will return to the caller of 'vmx_enter_guest()' with a return 228 * value of VMX_GUEST_VMEXIT. 229 */ 230 movl $VMX_GUEST_VMEXIT, %eax 231 VLEAVE 232 ret 233 234 .globl vmx_exit_guest 235vmx_exit_guest: 236 /* 237 * Save guest state that is not automatically saved in the vmcs. 238 */ 239 VMX_GUEST_SAVE 240 241 VMX_HOST_RESTORE 242 243 VMX_GUEST_CLOBBER 244 245 /* 246 * This will return to the caller of 'vmx_enter_guest()' with a return 247 * value of VMX_GUEST_VMEXIT. 248 */ 249 movl $VMX_GUEST_VMEXIT, %eax 250 VLEAVE 251 ret 252END(vmx_enter_guest) 253 254/* 255 * %rdi = interrupt handler entry point 256 * 257 * Calling sequence described in the "Instruction Set Reference" for the "INT" 258 * instruction in Intel SDM, Vol 2. 259 */ 260ENTRY(vmx_call_isr) 261 VENTER 262 mov %rsp, %r11 /* save %rsp */ 263 and $~0xf, %rsp /* align on 16-byte boundary */ 264 pushq $KERNEL_SS /* %ss */ 265 pushq %r11 /* %rsp */ 266 pushfq /* %rflags */ 267 pushq $KERNEL_CS /* %cs */ 268 cli /* disable interrupts */ 269 callq *%rdi /* push %rip and call isr */ 270 VLEAVE 271 ret 272END(vmx_call_isr) 273