xref: /illumos-gate/usr/src/uts/intel/io/vmm/amd/svm_support.S (revision b0bb0d63258be430b0e22afcb1581974bd7b568e)
1/*-
2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29/*
30 * Copyright 2019 Joyent, Inc.
31 * Copyright 2020 Oxide Computer Company
32 */
33
34#include <sys/asm_linkage.h>
35
36#include "svm_assym.h"
37
38/* Porting note: This is named 'svm_support.S' upstream. */
39
40
41/*
42 * Flush scratch registers to avoid lingering guest state being used for
43 * Spectre v1 attacks when returning from guest entry.
44 */
45#define	SVM_GUEST_FLUSH_SCRATCH						\
46	xorl	%edi, %edi;						\
47	xorl	%esi, %esi;						\
48	xorl	%edx, %edx;						\
49	xorl	%ecx, %ecx;						\
50	xorl	%r8d, %r8d;						\
51	xorl	%r9d, %r9d;						\
52	xorl	%r10d, %r10d;						\
53	xorl	%r11d, %r11d;
54
55/* Stack layout (offset from %rsp) for svm_launch */
56#define	SVMSTK_R15	0x00	/* callee saved %r15			*/
57#define	SVMSTK_R14	0x08	/* callee saved %r14			*/
58#define	SVMSTK_R13	0x10	/* callee saved %r13			*/
59#define	SVMSTK_R12	0x18	/* callee saved %r12			*/
60#define	SVMSTK_RBX	0x20	/* callee saved %rbx			*/
61#define	SVMSTK_RDX	0x28	/* save-args %rdx (struct cpu *)	*/
62#define	SVMSTK_RSI	0x30	/* save-args %rsi (struct svm_regctx *)	*/
63#define	SVMSTK_RDI	0x38	/* save-args %rdi (uint64_t vmcb_pa)	*/
64#define	SVMSTK_FP	0x40	/* frame pointer %rbp			*/
65#define	SVMSTKSIZE	SVMSTK_FP
66
67/*
68 * svm_launch(uint64_t vmcb, struct svm_regctx *gctx, struct pcpu *pcpu)
69 * %rdi: physical address of VMCB
70 * %rsi: pointer to guest context
71 * %rdx: pointer to the pcpu data
72 */
73ENTRY_NP(svm_launch)
74	pushq	%rbp
75	movq	%rsp, %rbp
76	subq	$SVMSTKSIZE, %rsp
77	movq	%r15, SVMSTK_R15(%rsp)
78	movq	%r14, SVMSTK_R14(%rsp)
79	movq	%r13, SVMSTK_R13(%rsp)
80	movq	%r12, SVMSTK_R12(%rsp)
81	movq	%rbx, SVMSTK_RBX(%rsp)
82	movq	%rdx, SVMSTK_RDX(%rsp)
83	movq	%rsi, SVMSTK_RSI(%rsp)
84	movq	%rdi, SVMSTK_RDI(%rsp)
85
86	/* Save the physical address of the VMCB in %rax */
87	movq	%rdi, %rax
88
89	/* Restore guest state. */
90	movq	SCTX_R8(%rsi), %r8
91	movq	SCTX_R9(%rsi), %r9
92	movq	SCTX_R10(%rsi), %r10
93	movq	SCTX_R11(%rsi), %r11
94	movq	SCTX_R12(%rsi), %r12
95	movq	SCTX_R13(%rsi), %r13
96	movq	SCTX_R14(%rsi), %r14
97	movq	SCTX_R15(%rsi), %r15
98	movq	SCTX_RBP(%rsi), %rbp
99	movq	SCTX_RBX(%rsi), %rbx
100	movq	SCTX_RCX(%rsi), %rcx
101	movq	SCTX_RDX(%rsi), %rdx
102	movq	SCTX_RDI(%rsi), %rdi
103	movq	SCTX_RSI(%rsi), %rsi	/* %rsi must be restored last */
104
105	vmload	%rax
106	vmrun	%rax
107	vmsave	%rax
108
109	/* Grab the svm_regctx pointer */
110	movq	SVMSTK_RSI(%rsp), %rax
111
112	/* Save guest state. */
113	movq	%r8, SCTX_R8(%rax)
114	movq	%r9, SCTX_R9(%rax)
115	movq	%r10, SCTX_R10(%rax)
116	movq	%r11, SCTX_R11(%rax)
117	movq	%r12, SCTX_R12(%rax)
118	movq	%r13, SCTX_R13(%rax)
119	movq	%r14, SCTX_R14(%rax)
120	movq	%r15, SCTX_R15(%rax)
121	movq	%rbp, SCTX_RBP(%rax)
122	movq	%rbx, SCTX_RBX(%rax)
123	movq	%rcx, SCTX_RCX(%rax)
124	movq	%rdx, SCTX_RDX(%rax)
125	movq	%rdi, SCTX_RDI(%rax)
126	movq	%rsi, SCTX_RSI(%rax)
127
128	/* Restore callee-saved registers */
129	movq	SVMSTK_R15(%rsp), %r15
130	movq	SVMSTK_R14(%rsp), %r14
131	movq	SVMSTK_R13(%rsp), %r13
132	movq	SVMSTK_R12(%rsp), %r12
133	movq	SVMSTK_RBX(%rsp), %rbx
134
135	/* Fix %gsbase to point back to the correct 'struct cpu *' */
136	movq	SVMSTK_RDX(%rsp), %rdx
137	movl	%edx, %eax
138	shrq	$32, %rdx
139	movl	$MSR_GSBASE, %ecx
140	wrmsr
141
142	/*
143	 * While SVM will save/restore the GDTR and IDTR, the TR does not enjoy
144	 * such treatment.  Reload the KTSS immediately, since it is used by
145	 * dtrace and other fault/trap handlers.
146	 */
147	movq	SVMSTK_RDX(%rsp), %rdi		/* %rdi = CPU */
148	movq	CPU_GDT(%rdi), %rdi		/* %rdi = cpu->cpu_gdt */
149	leaq	GDT_KTSS_OFF(%rdi), %rdi	/* %rdi = &cpu_gdt[GDT_KTSS] */
150	andb	$0xfd, SSD_TYPE(%rdi)		/* ssd_type.busy = 0 */
151	movw	$KTSS_SEL, %ax			/* reload kernel TSS */
152	ltr	%ax
153
154	SVM_GUEST_FLUSH_SCRATCH
155
156	addq	$SVMSTKSIZE, %rsp
157	popq	%rbp
158	ret
159SET_SIZE(svm_launch)
160