xref: /freebsd/sys/amd64/vmm/amd/svm_support.S (revision 0e97acdf58fe27b09c4824a474b0344daf997c5f)
1/*-
2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26#include <machine/asmacros.h>
27
28#include "svm_assym.h"
29
30/*
31 * Be friendly to DTrace FBT's prologue/epilogue pattern matching.
32 *
33 * They are also responsible for saving/restoring the host %rbp across VMRUN.
34 */
35#define	VENTER  push %rbp ; mov %rsp,%rbp
36#define	VLEAVE  pop %rbp
37
38/*
39 * svm_launch(uint64_t vmcb, struct svm_regctx *gctx)
40 * %rdi: physical address of VMCB
41 * %rsi: pointer to guest context
42 */
43ENTRY(svm_launch)
44	VENTER
45
46	/*
47	 * Host register state saved across a VMRUN.
48	 *
49	 * All "callee saved registers" except:
50	 * %rsp: because it is preserved by the processor across VMRUN.
51	 * %rbp: because it is saved/restored by the function prologue/epilogue.
52	 */
53	push %rbx
54	push %r12
55	push %r13
56	push %r14
57	push %r15
58
59	/* Save the physical address of the VMCB in %rax */
60	movq %rdi, %rax
61
62	push %rsi		/* push guest context pointer on the stack */
63
64	/*
65	 * Restore guest state.
66	 */
67	movq SCTX_R8(%rsi), %r8
68	movq SCTX_R9(%rsi), %r9
69	movq SCTX_R10(%rsi), %r10
70	movq SCTX_R11(%rsi), %r11
71	movq SCTX_R12(%rsi), %r12
72	movq SCTX_R13(%rsi), %r13
73	movq SCTX_R14(%rsi), %r14
74	movq SCTX_R15(%rsi), %r15
75	movq SCTX_RBP(%rsi), %rbp
76	movq SCTX_RBX(%rsi), %rbx
77	movq SCTX_RCX(%rsi), %rcx
78	movq SCTX_RDX(%rsi), %rdx
79	movq SCTX_RDI(%rsi), %rdi
80	movq SCTX_RSI(%rsi), %rsi	/* %rsi must be restored last */
81
82	vmload %rax
83	vmrun %rax
84	vmsave %rax
85
86	pop %rax		/* pop guest context pointer from the stack */
87
88	/*
89	 * Save guest state.
90	 */
91	movq %r8, SCTX_R8(%rax)
92	movq %r9, SCTX_R9(%rax)
93	movq %r10, SCTX_R10(%rax)
94	movq %r11, SCTX_R11(%rax)
95	movq %r12, SCTX_R12(%rax)
96	movq %r13, SCTX_R13(%rax)
97	movq %r14, SCTX_R14(%rax)
98	movq %r15, SCTX_R15(%rax)
99	movq %rbp, SCTX_RBP(%rax)
100	movq %rbx, SCTX_RBX(%rax)
101	movq %rcx, SCTX_RCX(%rax)
102	movq %rdx, SCTX_RDX(%rax)
103	movq %rdi, SCTX_RDI(%rax)
104	movq %rsi, SCTX_RSI(%rax)
105
106	/* Restore host state */
107	pop %r15
108	pop %r14
109	pop %r13
110	pop %r12
111	pop %rbx
112
113	VLEAVE
114	ret
115END(svm_launch)
116