xref: /linux/arch/x86/entry/vdso/vsgx.S (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#include <linux/linkage.h>
4#include <asm/errno.h>
5#include <asm/enclu.h>
6
7#include "extable.h"
8
9/* Relative to %rbp. */
10#define SGX_ENCLAVE_OFFSET_OF_RUN		16
11
12/* The offsets relative to struct sgx_enclave_run. */
13#define SGX_ENCLAVE_RUN_TCS			0
14#define SGX_ENCLAVE_RUN_LEAF			8
15#define SGX_ENCLAVE_RUN_EXCEPTION_VECTOR	12
16#define SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE	14
17#define SGX_ENCLAVE_RUN_EXCEPTION_ADDR		16
18#define SGX_ENCLAVE_RUN_USER_HANDLER		24
19#define SGX_ENCLAVE_RUN_USER_DATA		32	/* not used */
20#define SGX_ENCLAVE_RUN_RESERVED_START		40
21#define SGX_ENCLAVE_RUN_RESERVED_END		256
22
23.code64
24.section .text, "ax"
25
26SYM_FUNC_START(__vdso_sgx_enter_enclave)
27	/* Prolog */
28	.cfi_startproc
29	push	%rbp
30	.cfi_adjust_cfa_offset	8
31	.cfi_rel_offset		%rbp, 0
32	mov	%rsp, %rbp
33	.cfi_def_cfa_register	%rbp
34	push	%rbx
35	.cfi_rel_offset		%rbx, -8
36
37	mov	%ecx, %eax
38.Lenter_enclave:
39	/* EENTER <= function <= ERESUME */
40	cmp	$EENTER, %eax
41	jb	.Linvalid_input
42	cmp	$ERESUME, %eax
43	ja	.Linvalid_input
44
45	mov	SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rcx
46
47	/* Validate that the reserved area contains only zeros. */
48	mov	$SGX_ENCLAVE_RUN_RESERVED_START, %rbx
491:
50	cmpq	$0, (%rcx, %rbx)
51	jne	.Linvalid_input
52	add	$8, %rbx
53	cmpq	$SGX_ENCLAVE_RUN_RESERVED_END, %rbx
54	jne	1b
55
56	/* Load TCS and AEP */
57	mov	SGX_ENCLAVE_RUN_TCS(%rcx), %rbx
58	lea	.Lasync_exit_pointer(%rip), %rcx
59
60	/* Single ENCLU serving as both EENTER and AEP (ERESUME) */
61.Lasync_exit_pointer:
62.Lenclu_eenter_eresume:
63	enclu
64
65	/* EEXIT jumps here unless the enclave is doing something fancy. */
66	mov	SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx
67
68	/* Set exit_reason. */
69	movl	$EEXIT, SGX_ENCLAVE_RUN_LEAF(%rbx)
70
71	/* Invoke userspace's exit handler if one was provided. */
72.Lhandle_exit:
73	cmpq	$0, SGX_ENCLAVE_RUN_USER_HANDLER(%rbx)
74	jne	.Linvoke_userspace_handler
75
76	/* Success, in the sense that ENCLU was attempted. */
77	xor	%eax, %eax
78
79.Lout:
80	pop	%rbx
81	leave
82	.cfi_def_cfa		%rsp, 8
83	RET
84
85	/* The out-of-line code runs with the pre-leave stack frame. */
86	.cfi_def_cfa		%rbp, 16
87
88.Linvalid_input:
89	mov	$(-EINVAL), %eax
90	jmp	.Lout
91
92.Lhandle_exception:
93	mov	SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx
94
95	/* Set the exception info. */
96	mov	%eax, (SGX_ENCLAVE_RUN_LEAF)(%rbx)
97	mov	%di,  (SGX_ENCLAVE_RUN_EXCEPTION_VECTOR)(%rbx)
98	mov	%si,  (SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE)(%rbx)
99	mov	%rdx, (SGX_ENCLAVE_RUN_EXCEPTION_ADDR)(%rbx)
100	jmp	.Lhandle_exit
101
102.Linvoke_userspace_handler:
103	/* Pass the untrusted RSP (at exit) to the callback via %rcx. */
104	mov	%rsp, %rcx
105
106	/* Save struct sgx_enclave_exception %rbx is about to be clobbered. */
107	mov	%rbx, %rax
108
109	/* Save the untrusted RSP offset in %rbx (non-volatile register). */
110	mov	%rsp, %rbx
111	and	$0xf, %rbx
112
113	/*
114	 * Align stack per x86_64 ABI. Note, %rsp needs to be 16-byte aligned
115	 * _after_ pushing the parameters on the stack, hence the bonus push.
116	 */
117	and	$-0x10, %rsp
118	push	%rax
119
120	/* Push struct sgx_enclave_exception as a param to the callback. */
121	push	%rax
122
123	/* Clear RFLAGS.DF per x86_64 ABI */
124	cld
125
126	/*
127	 * Load the callback pointer to %rax and lfence for LVI (load value
128	 * injection) protection before making the call.
129	 */
130	mov	SGX_ENCLAVE_RUN_USER_HANDLER(%rax), %rax
131	lfence
132	call	*%rax
133
134	/* Undo the post-exit %rsp adjustment. */
135	lea	0x10(%rsp, %rbx), %rsp
136
137	/*
138	 * If the return from callback is zero or negative, return immediately,
139	 * else re-execute ENCLU with the positive return value interpreted as
140	 * the requested ENCLU function.
141	 */
142	cmp	$0, %eax
143	jle	.Lout
144	jmp	.Lenter_enclave
145
146	.cfi_endproc
147
148_ASM_VDSO_EXTABLE_HANDLE(.Lenclu_eenter_eresume, .Lhandle_exception)
149
150SYM_FUNC_END(__vdso_sgx_enter_enclave)
151