xref: /illumos-gate/usr/src/uts/intel/io/vmm/intel/vmx_support.S (revision b0bb0d63258be430b0e22afcb1581974bd7b568e)
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * Copyright (c) 2013 Neel Natu <neel@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD$
28 */
29/*
30 * This file and its contents are supplied under the terms of the
31 * Common Development and Distribution License ("CDDL"), version 1.0.
32 * You may only use this file in accordance with the terms of version
33 * 1.0 of the CDDL.
34 *
35 * A full copy of the text of the CDDL should have accompanied this
36 * source.  A copy of the CDDL is also available via the Internet at
37 * http://www.illumos.org/license/CDDL.
38 *
39 * Copyright 2013 Pluribus Networks Inc.
40 * Copyright 2018 Joyent, Inc.
41 * Copyright 2022 MNX Cloud, Inc.
42 */
43
44#include <sys/asm_linkage.h>
45#include <sys/segments.h>
46
47/* Porting note: This is named 'vmx_support.S' upstream. */
48
49#include "vmx_assym.h"
50#include "vmcs.h"
51
52/*
53 * Assumes that %rdi holds a pointer to the 'vmxctx'.
54 *
55 * On "return" all registers are updated to reflect guest state. The two
56 * exceptions are %rip and %rsp. These registers are atomically switched
57 * by hardware from the guest area of the vmcs.
58 *
59 * We modify %rsp to point to the 'vmxctx' so we can use it to restore
60 * host context in case of an error with 'vmlaunch' or 'vmresume'.
61 */
62/* BEGIN CSTYLED */
63#define	VMX_GUEST_RESTORE						\
64	movq	VMXCTX_GUEST_CR2(%rdi),%rsi;				\
65	movq	%rsi,%cr2;						\
66	movq	VMXCTX_GUEST_RSI(%rdi),%rsi;				\
67	movq	VMXCTX_GUEST_RDX(%rdi),%rdx;				\
68	movq	VMXCTX_GUEST_RCX(%rdi),%rcx;				\
69	movq	VMXCTX_GUEST_R8(%rdi),%r8;				\
70	movq	VMXCTX_GUEST_R9(%rdi),%r9;				\
71	movq	VMXCTX_GUEST_RAX(%rdi),%rax;				\
72	movq	VMXCTX_GUEST_RBX(%rdi),%rbx;				\
73	movq	VMXCTX_GUEST_RBP(%rdi),%rbp;				\
74	movq	VMXCTX_GUEST_R10(%rdi),%r10;				\
75	movq	VMXCTX_GUEST_R11(%rdi),%r11;				\
76	movq	VMXCTX_GUEST_R12(%rdi),%r12;				\
77	movq	VMXCTX_GUEST_R13(%rdi),%r13;				\
78	movq	VMXCTX_GUEST_R14(%rdi),%r14;				\
79	movq	VMXCTX_GUEST_R15(%rdi),%r15;				\
80	movq	VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */
81
82#define	VMX_GUEST_SAVE							\
83	movq	%rdi, VMXSTK_TMPRDI(%rsp);				\
84	movq	VMXSTK_RDI(%rsp), %rdi;					\
85	movq	%rbp, VMXCTX_GUEST_RBP(%rdi);				\
86	leaq	VMXSTK_FP(%rsp), %rbp;					\
87	movq	%rsi, VMXCTX_GUEST_RSI(%rdi);				\
88	movq	%rdx, VMXCTX_GUEST_RDX(%rdi);				\
89	movq	%rcx, VMXCTX_GUEST_RCX(%rdi);				\
90	movq	%r8, VMXCTX_GUEST_R8(%rdi);				\
91	movq	%r9, VMXCTX_GUEST_R9(%rdi);				\
92	movq	%rax, VMXCTX_GUEST_RAX(%rdi);				\
93	movq	%rbx, VMXCTX_GUEST_RBX(%rdi);				\
94	movq	%r10, VMXCTX_GUEST_R10(%rdi);				\
95	movq	%r11, VMXCTX_GUEST_R11(%rdi);				\
96	movq	%r12, VMXCTX_GUEST_R12(%rdi);				\
97	movq	%r13, VMXCTX_GUEST_R13(%rdi);				\
98	movq	%r14, VMXCTX_GUEST_R14(%rdi);				\
99	movq	%r15, VMXCTX_GUEST_R15(%rdi);				\
100	movq	%cr2, %rbx;						\
101	movq	%rbx, VMXCTX_GUEST_CR2(%rdi);				\
102	movq	VMXSTK_TMPRDI(%rsp), %rdx;				\
103	movq	%rdx, VMXCTX_GUEST_RDI(%rdi);
104/* END CSTYLED */
105
106
107/*
108 * Flush scratch registers to avoid lingering guest state being used for
109 * Spectre v1 attacks when returning from guest entry.
110 */
111#define	VMX_GUEST_FLUSH_SCRATCH						\
112	xorl	%edi, %edi;						\
113	xorl	%esi, %esi;						\
114	xorl	%edx, %edx;						\
115	xorl	%ecx, %ecx;						\
116	xorl	%r8d, %r8d;						\
117	xorl	%r9d, %r9d;						\
118	xorl	%r10d, %r10d;						\
119	xorl	%r11d, %r11d;
120
121
122/* Stack layout (offset from %rsp) for vmx_enter_guest */
123#define	VMXSTK_TMPRDI	0x00	/* temp store %rdi on vmexit		*/
124#define	VMXSTK_R15	0x08	/* callee saved %r15			*/
125#define	VMXSTK_R14	0x10	/* callee saved %r14			*/
126#define	VMXSTK_R13	0x18	/* callee saved %r13			*/
127#define	VMXSTK_R12	0x20	/* callee saved %r12			*/
128#define	VMXSTK_RBX	0x28	/* callee saved %rbx			*/
129#define	VMXSTK_RDX	0x30	/* save-args %rdx (int launched)	*/
130#define	VMXSTK_RSI	0x38	/* save-args %rsi (struct vmx *vmx)	*/
131#define	VMXSTK_RDI	0x40	/* save-args %rdi (struct vmxctx *ctx)	*/
132#define	VMXSTK_FP	0x48	/* frame pointer %rbp			*/
133#define	VMXSTKSIZE	VMXSTK_FP
134
135/*
136 * vmx_enter_guest(struct vmxctx *ctx, struct vmx *vmx, int launched)
137 * Interrupts must be disabled on entry.
138 */
139ENTRY_NP(vmx_enter_guest)
140	pushq	%rbp
141	movq	%rsp, %rbp
142	subq	$VMXSTKSIZE, %rsp
143	movq	%r15, VMXSTK_R15(%rsp)
144	movq	%r14, VMXSTK_R14(%rsp)
145	movq	%r13, VMXSTK_R13(%rsp)
146	movq	%r12, VMXSTK_R12(%rsp)
147	movq	%rbx, VMXSTK_RBX(%rsp)
148	movq	%rdx, VMXSTK_RDX(%rsp)
149	movq	%rsi, VMXSTK_RSI(%rsp)
150	movq	%rdi, VMXSTK_RDI(%rsp)
151
152	movq	%rdi, %r12	/* vmxctx */
153	movq	%rsi, %r13	/* vmx */
154	movl	%edx, %r14d	/* launch state */
155
156	/* Write the current %rsp into the VMCS to be restored on vmexit */
157	movl	$VMCS_HOST_RSP, %eax
158	vmwrite	%rsp, %rax
159	jbe	vmwrite_error
160
161	/* Check if vmresume is adequate or a full vmlaunch is required */
162	cmpl	$0, %r14d
163	je	do_launch
164
165	VMX_GUEST_RESTORE
166	vmresume
167	/*
168	 * In the common case, 'vmresume' returns back to the host through
169	 * 'vmx_exit_guest'. If there is an error we return VMX_VMRESUME_ERROR
170	 * to the caller.
171	 */
172	leaq	VMXSTK_FP(%rsp), %rbp
173	movq	VMXSTK_RDI(%rsp), %rdi
174	movl	$VMX_VMRESUME_ERROR, %eax
175	jmp	decode_inst_error
176
177do_launch:
178	VMX_GUEST_RESTORE
179	vmlaunch
180	/*
181	 * In the common case, 'vmlaunch' returns back to the host through
182	 * 'vmx_exit_guest'. If there is an error we return VMX_VMLAUNCH_ERROR
183	 * to the caller.
184	 */
185	leaq	VMXSTK_FP(%rsp), %rbp
186	movq	VMXSTK_RDI(%rsp), %rdi
187	movl	$VMX_VMLAUNCH_ERROR, %eax
188	jmp	decode_inst_error
189
190vmwrite_error:
191	movl	$VMX_VMWRITE_ERROR, %eax
192	jmp	decode_inst_error
193decode_inst_error:
194	movl	$VM_FAIL_VALID, %r11d
195	jz	inst_error
196	movl	$VM_FAIL_INVALID, %r11d
197inst_error:
198	movl	%r11d, VMXCTX_INST_FAIL_STATUS(%rdi)
199
200	movq	VMXSTK_RBX(%rsp), %rbx
201	movq	VMXSTK_R12(%rsp), %r12
202	movq	VMXSTK_R13(%rsp), %r13
203	movq	VMXSTK_R14(%rsp), %r14
204	movq	VMXSTK_R15(%rsp), %r15
205
206	VMX_GUEST_FLUSH_SCRATCH
207
208	addq	$VMXSTKSIZE, %rsp
209	popq	%rbp
210	ret
211
212/*
213 * Non-error VM-exit from the guest. Make this a label so it can
214 * be used by C code when setting up the VMCS.
215 * The VMCS-restored %rsp points to the struct vmxctx
216 */
217.align	ASM_ENTRY_ALIGN;
218ENTRY_NP(vmx_exit_guest)
219	/* Save guest state that is not automatically saved in the vmcs. */
220	VMX_GUEST_SAVE
221
222	VMX_GUEST_FLUSH_SCRATCH
223
224	/*
225	 * To prevent malicious branch target predictions from affecting the
226	 * host, overwrite all entries in the RSB upon exiting a guest.
227	 *
228	 * NOTE: If RSB mitigations are disabled (see cpuid.c), this call is
229	 * entirely a NOP.
230	 */
231	call	x86_rsb_stuff
232
233	/*
234	 * This will return to the caller of 'vmx_enter_guest()' with a return
235	 * value of VMX_GUEST_VMEXIT.
236	 */
237	movl	$VMX_GUEST_VMEXIT, %eax
238	movq	VMXSTK_RBX(%rsp), %rbx
239	movq	VMXSTK_R12(%rsp), %r12
240	movq	VMXSTK_R13(%rsp), %r13
241	movq	VMXSTK_R14(%rsp), %r14
242	movq	VMXSTK_R15(%rsp), %r15
243
244	addq	$VMXSTKSIZE, %rsp
245	popq	%rbp
246	ret
247SET_SIZE(vmx_exit_guest)
248
249/*
250 * %rdi = trapno
251 *
252 * We need to do enough to convince cmnint - and its iretting tail - that we're
253 * a legit interrupt stack frame.
254 */
255ENTRY_NP(vmx_call_isr)
256	pushq	%rbp
257	movq	%rsp, %rbp
258	movq	%rsp, %r11
259	andq	$~0xf, %rsp	/* align stack */
260	pushq	$KDS_SEL	/* %ss */
261	pushq	%r11		/* %rsp */
262	pushfq			/* %rflags */
263	pushq	$KCS_SEL	/* %cs */
264	leaq	.iret_dest(%rip), %rcx
265	pushq	%rcx		/* %rip */
266	pushq	$0		/* err */
267	pushq	%rdi		/* trapno */
268	cli
269	jmp	cmnint		/* %rip (and call) */
270.iret_dest:
271	popq	%rbp
272	ret
273SET_SIZE(vmx_call_isr)
274