xref: /freebsd/sys/amd64/vmm/amd/svm_support.S (revision 0b57cec536236d46e3dba9bd041533462f33dbb7)
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice unmodified, this list of conditions, and the following
12 *    disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30#include <machine/asmacros.h>
31
32#include "svm_assym.h"
33
34/*
35 * Be friendly to DTrace FBT's prologue/epilogue pattern matching.
36 *
37 * They are also responsible for saving/restoring the host %rbp across VMRUN.
38 */
39#define	VENTER  push %rbp ; mov %rsp,%rbp
40#define	VLEAVE  pop %rbp
41
42#define	VMLOAD	.byte 0x0f, 0x01, 0xda
43#define	VMRUN	.byte 0x0f, 0x01, 0xd8
44#define	VMSAVE	.byte 0x0f, 0x01, 0xdb
45
46/*
47 * svm_launch(uint64_t vmcb, struct svm_regctx *gctx, struct pcpu *pcpu)
48 * %rdi: physical address of VMCB
49 * %rsi: pointer to guest context
50 * %rdx: pointer to the pcpu data
51 */
52ENTRY(svm_launch)
53	VENTER
54
55	/* save pointer to the pcpu data */
56	push %rdx
57
58	/*
59	 * Host register state saved across a VMRUN.
60	 *
61	 * All "callee saved registers" except:
62	 * %rsp: because it is preserved by the processor across VMRUN.
63	 * %rbp: because it is saved/restored by the function prologue/epilogue.
64	 */
65	push %rbx
66	push %r12
67	push %r13
68	push %r14
69	push %r15
70
71	/* Save the physical address of the VMCB in %rax */
72	movq %rdi, %rax
73
74	push %rsi		/* push guest context pointer on the stack */
75
76	/*
77	 * Restore guest state.
78	 */
79	movq SCTX_R8(%rsi), %r8
80	movq SCTX_R9(%rsi), %r9
81	movq SCTX_R10(%rsi), %r10
82	movq SCTX_R11(%rsi), %r11
83	movq SCTX_R12(%rsi), %r12
84	movq SCTX_R13(%rsi), %r13
85	movq SCTX_R14(%rsi), %r14
86	movq SCTX_R15(%rsi), %r15
87	movq SCTX_RBP(%rsi), %rbp
88	movq SCTX_RBX(%rsi), %rbx
89	movq SCTX_RCX(%rsi), %rcx
90	movq SCTX_RDX(%rsi), %rdx
91	movq SCTX_RDI(%rsi), %rdi
92	movq SCTX_RSI(%rsi), %rsi	/* %rsi must be restored last */
93
94	VMLOAD
95	VMRUN
96	VMSAVE
97
98	pop %rax		/* pop guest context pointer from the stack */
99
100	/*
101	 * Save guest state.
102	 */
103	movq %r8, SCTX_R8(%rax)
104	movq %r9, SCTX_R9(%rax)
105	movq %r10, SCTX_R10(%rax)
106	movq %r11, SCTX_R11(%rax)
107	movq %r12, SCTX_R12(%rax)
108	movq %r13, SCTX_R13(%rax)
109	movq %r14, SCTX_R14(%rax)
110	movq %r15, SCTX_R15(%rax)
111	movq %rbp, SCTX_RBP(%rax)
112	movq %rbx, SCTX_RBX(%rax)
113	movq %rcx, SCTX_RCX(%rax)
114	movq %rdx, SCTX_RDX(%rax)
115	movq %rdi, SCTX_RDI(%rax)
116	movq %rsi, SCTX_RSI(%rax)
117
118	/*
119	 * To prevent malicious branch target predictions from
120	 * affecting the host, overwrite all entries in the RSB upon
121	 * exiting a guest.
122	 */
123	mov $16, %ecx	/* 16 iterations, two calls per loop */
124	mov %rsp, %rax
1250:	call 2f		/* create an RSB entry. */
1261:	pause
127	call 1b		/* capture rogue speculation. */
1282:	call 2f		/* create an RSB entry. */
1291:	pause
130	call 1b		/* capture rogue speculation. */
1312:	sub $1, %ecx
132	jnz 0b
133	mov %rax, %rsp
134
135	/* Restore host state */
136	pop %r15
137	pop %r14
138	pop %r13
139	pop %r12
140	pop %rbx
141
142	/* Restore %GS.base to point to the host's pcpu data */
143	pop %rdx
144	mov %edx, %eax
145	shr $32, %rdx
146	mov $MSR_GSBASE, %rcx
147	wrmsr
148
149	/*
150	 * Clobber the remaining registers with guest contents so they
151	 * can't be misused.
152	 */
153	xor %rbp, %rbp
154	xor %rdi, %rdi
155	xor %rsi, %rsi
156	xor %r8, %r8
157	xor %r9, %r9
158	xor %r10, %r10
159	xor %r11, %r11
160
161	VLEAVE
162	ret
163END(svm_launch)
164