xref: /linux/arch/x86/entry/vdso/vdso64/vsgx.S (revision 6f7e6393d1ce636bb7ec77a7fe7b77458fddf701)
1*693c819fSH. Peter Anvin/* SPDX-License-Identifier: GPL-2.0 */
2*693c819fSH. Peter Anvin
3*693c819fSH. Peter Anvin#include <linux/linkage.h>
4*693c819fSH. Peter Anvin#include <asm/errno.h>
5*693c819fSH. Peter Anvin#include <asm/enclu.h>
6*693c819fSH. Peter Anvin
7*693c819fSH. Peter Anvin#include "extable.h"
8*693c819fSH. Peter Anvin
9*693c819fSH. Peter Anvin/* Relative to %rbp. */
10*693c819fSH. Peter Anvin#define SGX_ENCLAVE_OFFSET_OF_RUN		16
11*693c819fSH. Peter Anvin
12*693c819fSH. Peter Anvin/* The offsets relative to struct sgx_enclave_run. */
13*693c819fSH. Peter Anvin#define SGX_ENCLAVE_RUN_TCS			0
14*693c819fSH. Peter Anvin#define SGX_ENCLAVE_RUN_LEAF			8
15*693c819fSH. Peter Anvin#define SGX_ENCLAVE_RUN_EXCEPTION_VECTOR	12
16*693c819fSH. Peter Anvin#define SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE	14
17*693c819fSH. Peter Anvin#define SGX_ENCLAVE_RUN_EXCEPTION_ADDR		16
18*693c819fSH. Peter Anvin#define SGX_ENCLAVE_RUN_USER_HANDLER		24
19*693c819fSH. Peter Anvin#define SGX_ENCLAVE_RUN_USER_DATA		32	/* not used */
20*693c819fSH. Peter Anvin#define SGX_ENCLAVE_RUN_RESERVED_START		40
21*693c819fSH. Peter Anvin#define SGX_ENCLAVE_RUN_RESERVED_END		256
22*693c819fSH. Peter Anvin
23*693c819fSH. Peter Anvin.code64
24*693c819fSH. Peter Anvin.section .text, "ax"
25*693c819fSH. Peter Anvin
26*693c819fSH. Peter AnvinSYM_FUNC_START(__vdso_sgx_enter_enclave)
27*693c819fSH. Peter Anvin	/* Prolog */
28*693c819fSH. Peter Anvin	.cfi_startproc
29*693c819fSH. Peter Anvin	push	%rbp
30*693c819fSH. Peter Anvin	.cfi_adjust_cfa_offset	8
31*693c819fSH. Peter Anvin	.cfi_rel_offset		%rbp, 0
32*693c819fSH. Peter Anvin	mov	%rsp, %rbp
33*693c819fSH. Peter Anvin	.cfi_def_cfa_register	%rbp
34*693c819fSH. Peter Anvin	push	%rbx
35*693c819fSH. Peter Anvin	.cfi_rel_offset		%rbx, -8
36*693c819fSH. Peter Anvin
37*693c819fSH. Peter Anvin	mov	%ecx, %eax
38*693c819fSH. Peter Anvin.Lenter_enclave:
39*693c819fSH. Peter Anvin	/* EENTER <= function <= ERESUME */
40*693c819fSH. Peter Anvin	cmp	$EENTER, %eax
41*693c819fSH. Peter Anvin	jb	.Linvalid_input
42*693c819fSH. Peter Anvin	cmp	$ERESUME, %eax
43*693c819fSH. Peter Anvin	ja	.Linvalid_input
44*693c819fSH. Peter Anvin
45*693c819fSH. Peter Anvin	mov	SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rcx
46*693c819fSH. Peter Anvin
47*693c819fSH. Peter Anvin	/* Validate that the reserved area contains only zeros. */
48*693c819fSH. Peter Anvin	mov	$SGX_ENCLAVE_RUN_RESERVED_START, %rbx
49*693c819fSH. Peter Anvin1:
50*693c819fSH. Peter Anvin	cmpq	$0, (%rcx, %rbx)
51*693c819fSH. Peter Anvin	jne	.Linvalid_input
52*693c819fSH. Peter Anvin	add	$8, %rbx
53*693c819fSH. Peter Anvin	cmpq	$SGX_ENCLAVE_RUN_RESERVED_END, %rbx
54*693c819fSH. Peter Anvin	jne	1b
55*693c819fSH. Peter Anvin
56*693c819fSH. Peter Anvin	/* Load TCS and AEP */
57*693c819fSH. Peter Anvin	mov	SGX_ENCLAVE_RUN_TCS(%rcx), %rbx
58*693c819fSH. Peter Anvin	lea	.Lasync_exit_pointer(%rip), %rcx
59*693c819fSH. Peter Anvin
60*693c819fSH. Peter Anvin	/* Single ENCLU serving as both EENTER and AEP (ERESUME) */
61*693c819fSH. Peter Anvin.Lasync_exit_pointer:
62*693c819fSH. Peter Anvin.Lenclu_eenter_eresume:
63*693c819fSH. Peter Anvin	enclu
64*693c819fSH. Peter Anvin
65*693c819fSH. Peter Anvin	/* EEXIT jumps here unless the enclave is doing something fancy. */
66*693c819fSH. Peter Anvin	mov	SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx
67*693c819fSH. Peter Anvin
68*693c819fSH. Peter Anvin	/* Set exit_reason. */
69*693c819fSH. Peter Anvin	movl	$EEXIT, SGX_ENCLAVE_RUN_LEAF(%rbx)
70*693c819fSH. Peter Anvin
71*693c819fSH. Peter Anvin	/* Invoke userspace's exit handler if one was provided. */
72*693c819fSH. Peter Anvin.Lhandle_exit:
73*693c819fSH. Peter Anvin	cmpq	$0, SGX_ENCLAVE_RUN_USER_HANDLER(%rbx)
74*693c819fSH. Peter Anvin	jne	.Linvoke_userspace_handler
75*693c819fSH. Peter Anvin
76*693c819fSH. Peter Anvin	/* Success, in the sense that ENCLU was attempted. */
77*693c819fSH. Peter Anvin	xor	%eax, %eax
78*693c819fSH. Peter Anvin
79*693c819fSH. Peter Anvin.Lout:
80*693c819fSH. Peter Anvin	pop	%rbx
81*693c819fSH. Peter Anvin	leave
82*693c819fSH. Peter Anvin	.cfi_def_cfa		%rsp, 8
83*693c819fSH. Peter Anvin	RET
84*693c819fSH. Peter Anvin
85*693c819fSH. Peter Anvin	/* The out-of-line code runs with the pre-leave stack frame. */
86*693c819fSH. Peter Anvin	.cfi_def_cfa		%rbp, 16
87*693c819fSH. Peter Anvin
88*693c819fSH. Peter Anvin.Linvalid_input:
89*693c819fSH. Peter Anvin	mov	$(-EINVAL), %eax
90*693c819fSH. Peter Anvin	jmp	.Lout
91*693c819fSH. Peter Anvin
92*693c819fSH. Peter Anvin.Lhandle_exception:
93*693c819fSH. Peter Anvin	mov	SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx
94*693c819fSH. Peter Anvin
95*693c819fSH. Peter Anvin	/* Set the exception info. */
96*693c819fSH. Peter Anvin	mov	%eax, (SGX_ENCLAVE_RUN_LEAF)(%rbx)
97*693c819fSH. Peter Anvin	mov	%di,  (SGX_ENCLAVE_RUN_EXCEPTION_VECTOR)(%rbx)
98*693c819fSH. Peter Anvin	mov	%si,  (SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE)(%rbx)
99*693c819fSH. Peter Anvin	mov	%rdx, (SGX_ENCLAVE_RUN_EXCEPTION_ADDR)(%rbx)
100*693c819fSH. Peter Anvin	jmp	.Lhandle_exit
101*693c819fSH. Peter Anvin
102*693c819fSH. Peter Anvin.Linvoke_userspace_handler:
103*693c819fSH. Peter Anvin	/* Pass the untrusted RSP (at exit) to the callback via %rcx. */
104*693c819fSH. Peter Anvin	mov	%rsp, %rcx
105*693c819fSH. Peter Anvin
106*693c819fSH. Peter Anvin	/* Save struct sgx_enclave_exception %rbx is about to be clobbered. */
107*693c819fSH. Peter Anvin	mov	%rbx, %rax
108*693c819fSH. Peter Anvin
109*693c819fSH. Peter Anvin	/* Save the untrusted RSP offset in %rbx (non-volatile register). */
110*693c819fSH. Peter Anvin	mov	%rsp, %rbx
111*693c819fSH. Peter Anvin	and	$0xf, %rbx
112*693c819fSH. Peter Anvin
113*693c819fSH. Peter Anvin	/*
114*693c819fSH. Peter Anvin	 * Align stack per x86_64 ABI. Note, %rsp needs to be 16-byte aligned
115*693c819fSH. Peter Anvin	 * _after_ pushing the parameters on the stack, hence the bonus push.
116*693c819fSH. Peter Anvin	 */
117*693c819fSH. Peter Anvin	and	$-0x10, %rsp
118*693c819fSH. Peter Anvin	push	%rax
119*693c819fSH. Peter Anvin
120*693c819fSH. Peter Anvin	/* Push struct sgx_enclave_exception as a param to the callback. */
121*693c819fSH. Peter Anvin	push	%rax
122*693c819fSH. Peter Anvin
123*693c819fSH. Peter Anvin	/* Clear RFLAGS.DF per x86_64 ABI */
124*693c819fSH. Peter Anvin	cld
125*693c819fSH. Peter Anvin
126*693c819fSH. Peter Anvin	/*
127*693c819fSH. Peter Anvin	 * Load the callback pointer to %rax and lfence for LVI (load value
128*693c819fSH. Peter Anvin	 * injection) protection before making the call.
129*693c819fSH. Peter Anvin	 */
130*693c819fSH. Peter Anvin	mov	SGX_ENCLAVE_RUN_USER_HANDLER(%rax), %rax
131*693c819fSH. Peter Anvin	lfence
132*693c819fSH. Peter Anvin	call	*%rax
133*693c819fSH. Peter Anvin
134*693c819fSH. Peter Anvin	/* Undo the post-exit %rsp adjustment. */
135*693c819fSH. Peter Anvin	lea	0x10(%rsp, %rbx), %rsp
136*693c819fSH. Peter Anvin
137*693c819fSH. Peter Anvin	/*
138*693c819fSH. Peter Anvin	 * If the return from callback is zero or negative, return immediately,
139*693c819fSH. Peter Anvin	 * else re-execute ENCLU with the positive return value interpreted as
140*693c819fSH. Peter Anvin	 * the requested ENCLU function.
141*693c819fSH. Peter Anvin	 */
142*693c819fSH. Peter Anvin	cmp	$0, %eax
143*693c819fSH. Peter Anvin	jle	.Lout
144*693c819fSH. Peter Anvin	jmp	.Lenter_enclave
145*693c819fSH. Peter Anvin
146*693c819fSH. Peter Anvin	.cfi_endproc
147*693c819fSH. Peter Anvin
148*693c819fSH. Peter Anvin_ASM_VDSO_EXTABLE_HANDLE(.Lenclu_eenter_eresume, .Lhandle_exception)
149*693c819fSH. Peter Anvin
150*693c819fSH. Peter AnvinSYM_FUNC_END(__vdso_sgx_enter_enclave)
151