xref: /linux/arch/x86/entry/vdso/vsgx.S (revision f9bff0e31881d03badf191d3b0005839391f5f2b)
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#include <linux/linkage.h>
4#include <asm/export.h>
5#include <asm/errno.h>
6#include <asm/enclu.h>
7
8#include "extable.h"
9
10/* Relative to %rbp. */
11#define SGX_ENCLAVE_OFFSET_OF_RUN		16
12
13/* The offsets relative to struct sgx_enclave_run. */
14#define SGX_ENCLAVE_RUN_TCS			0
15#define SGX_ENCLAVE_RUN_LEAF			8
16#define SGX_ENCLAVE_RUN_EXCEPTION_VECTOR	12
17#define SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE	14
18#define SGX_ENCLAVE_RUN_EXCEPTION_ADDR		16
19#define SGX_ENCLAVE_RUN_USER_HANDLER		24
20#define SGX_ENCLAVE_RUN_USER_DATA		32	/* not used */
21#define SGX_ENCLAVE_RUN_RESERVED_START		40
22#define SGX_ENCLAVE_RUN_RESERVED_END		256
23
24.code64
25.section .text, "ax"
26
27SYM_FUNC_START(__vdso_sgx_enter_enclave)
28	/* Prolog */
29	.cfi_startproc
30	push	%rbp
31	.cfi_adjust_cfa_offset	8
32	.cfi_rel_offset		%rbp, 0
33	mov	%rsp, %rbp
34	.cfi_def_cfa_register	%rbp
35	push	%rbx
36	.cfi_rel_offset		%rbx, -8
37
38	mov	%ecx, %eax
39.Lenter_enclave:
40	/* EENTER <= function <= ERESUME */
41	cmp	$EENTER, %eax
42	jb	.Linvalid_input
43	cmp	$ERESUME, %eax
44	ja	.Linvalid_input
45
46	mov	SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rcx
47
48	/* Validate that the reserved area contains only zeros. */
49	mov	$SGX_ENCLAVE_RUN_RESERVED_START, %rbx
501:
51	cmpq	$0, (%rcx, %rbx)
52	jne	.Linvalid_input
53	add	$8, %rbx
54	cmpq	$SGX_ENCLAVE_RUN_RESERVED_END, %rbx
55	jne	1b
56
57	/* Load TCS and AEP */
58	mov	SGX_ENCLAVE_RUN_TCS(%rcx), %rbx
59	lea	.Lasync_exit_pointer(%rip), %rcx
60
61	/* Single ENCLU serving as both EENTER and AEP (ERESUME) */
62.Lasync_exit_pointer:
63.Lenclu_eenter_eresume:
64	enclu
65
66	/* EEXIT jumps here unless the enclave is doing something fancy. */
67	mov	SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx
68
69	/* Set exit_reason. */
70	movl	$EEXIT, SGX_ENCLAVE_RUN_LEAF(%rbx)
71
72	/* Invoke userspace's exit handler if one was provided. */
73.Lhandle_exit:
74	cmpq	$0, SGX_ENCLAVE_RUN_USER_HANDLER(%rbx)
75	jne	.Linvoke_userspace_handler
76
77	/* Success, in the sense that ENCLU was attempted. */
78	xor	%eax, %eax
79
80.Lout:
81	pop	%rbx
82	leave
83	.cfi_def_cfa		%rsp, 8
84	RET
85
86	/* The out-of-line code runs with the pre-leave stack frame. */
87	.cfi_def_cfa		%rbp, 16
88
89.Linvalid_input:
90	mov	$(-EINVAL), %eax
91	jmp	.Lout
92
93.Lhandle_exception:
94	mov	SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx
95
96	/* Set the exception info. */
97	mov	%eax, (SGX_ENCLAVE_RUN_LEAF)(%rbx)
98	mov	%di,  (SGX_ENCLAVE_RUN_EXCEPTION_VECTOR)(%rbx)
99	mov	%si,  (SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE)(%rbx)
100	mov	%rdx, (SGX_ENCLAVE_RUN_EXCEPTION_ADDR)(%rbx)
101	jmp	.Lhandle_exit
102
103.Linvoke_userspace_handler:
104	/* Pass the untrusted RSP (at exit) to the callback via %rcx. */
105	mov	%rsp, %rcx
106
107	/* Save struct sgx_enclave_exception %rbx is about to be clobbered. */
108	mov	%rbx, %rax
109
110	/* Save the untrusted RSP offset in %rbx (non-volatile register). */
111	mov	%rsp, %rbx
112	and	$0xf, %rbx
113
114	/*
115	 * Align stack per x86_64 ABI. Note, %rsp needs to be 16-byte aligned
116	 * _after_ pushing the parameters on the stack, hence the bonus push.
117	 */
118	and	$-0x10, %rsp
119	push	%rax
120
121	/* Push struct sgx_enclave_exception as a param to the callback. */
122	push	%rax
123
124	/* Clear RFLAGS.DF per x86_64 ABI */
125	cld
126
127	/*
128	 * Load the callback pointer to %rax and lfence for LVI (load value
129	 * injection) protection before making the call.
130	 */
131	mov	SGX_ENCLAVE_RUN_USER_HANDLER(%rax), %rax
132	lfence
133	call	*%rax
134
135	/* Undo the post-exit %rsp adjustment. */
136	lea	0x10(%rsp, %rbx), %rsp
137
138	/*
139	 * If the return from callback is zero or negative, return immediately,
140	 * else re-execute ENCLU with the positive return value interpreted as
141	 * the requested ENCLU function.
142	 */
143	cmp	$0, %eax
144	jle	.Lout
145	jmp	.Lenter_enclave
146
147	.cfi_endproc
148
149_ASM_VDSO_EXTABLE_HANDLE(.Lenclu_eenter_eresume, .Lhandle_exception)
150
151SYM_FUNC_END(__vdso_sgx_enter_enclave)
152