xref: /freebsd/sys/amd64/sgx/sgx.c (revision 2164af29a083d67122fdf9c294a792c258c7a14d)
1*2164af29SRuslan Bukin /*-
2*2164af29SRuslan Bukin  * Copyright (c) 2017 Ruslan Bukin <br@bsdpad.com>
3*2164af29SRuslan Bukin  * All rights reserved.
4*2164af29SRuslan Bukin  *
5*2164af29SRuslan Bukin  * This software was developed by BAE Systems, the University of Cambridge
6*2164af29SRuslan Bukin  * Computer Laboratory, and Memorial University under DARPA/AFRL contract
7*2164af29SRuslan Bukin  * FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent Computing
8*2164af29SRuslan Bukin  * (TC) research program.
9*2164af29SRuslan Bukin  *
10*2164af29SRuslan Bukin  * Redistribution and use in source and binary forms, with or without
11*2164af29SRuslan Bukin  * modification, are permitted provided that the following conditions
12*2164af29SRuslan Bukin  * are met:
13*2164af29SRuslan Bukin  * 1. Redistributions of source code must retain the above copyright
14*2164af29SRuslan Bukin  *    notice, this list of conditions and the following disclaimer.
15*2164af29SRuslan Bukin  * 2. Redistributions in binary form must reproduce the above copyright
16*2164af29SRuslan Bukin  *    notice, this list of conditions and the following disclaimer in the
17*2164af29SRuslan Bukin  *    documentation and/or other materials provided with the distribution.
18*2164af29SRuslan Bukin  *
19*2164af29SRuslan Bukin  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20*2164af29SRuslan Bukin  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21*2164af29SRuslan Bukin  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22*2164af29SRuslan Bukin  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23*2164af29SRuslan Bukin  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24*2164af29SRuslan Bukin  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25*2164af29SRuslan Bukin  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26*2164af29SRuslan Bukin  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27*2164af29SRuslan Bukin  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28*2164af29SRuslan Bukin  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29*2164af29SRuslan Bukin  * SUCH DAMAGE.
30*2164af29SRuslan Bukin  */
31*2164af29SRuslan Bukin 
32*2164af29SRuslan Bukin /*
33*2164af29SRuslan Bukin  * Design overview.
34*2164af29SRuslan Bukin  *
35*2164af29SRuslan Bukin  * The driver provides character device for mmap(2) and ioctl(2) system calls
36*2164af29SRuslan Bukin  * allowing user to manage isolated compartments ("enclaves") in user VA space.
37*2164af29SRuslan Bukin  *
38*2164af29SRuslan Bukin  * The driver duties is EPC pages management, enclave management, user data
39*2164af29SRuslan Bukin  * validation.
40*2164af29SRuslan Bukin  *
41*2164af29SRuslan Bukin  * This driver requires Intel SGX support from hardware.
42*2164af29SRuslan Bukin  *
43*2164af29SRuslan Bukin  * /dev/sgx:
44*2164af29SRuslan Bukin  *    .mmap:
45*2164af29SRuslan Bukin  *        sgx_mmap_single() allocates VM object with following pager
46*2164af29SRuslan Bukin  *        operations:
47*2164af29SRuslan Bukin  *              a) sgx_pg_ctor():
48*2164af29SRuslan Bukin  *                  VM object constructor does nothing
49*2164af29SRuslan Bukin  *              b) sgx_pg_dtor():
50*2164af29SRuslan Bukin  *                  VM object destructor destroys the SGX enclave associated
51*2164af29SRuslan Bukin  *                  with the object: it frees all the EPC pages allocated for
52*2164af29SRuslan Bukin  *                  enclave and removes the enclave.
53*2164af29SRuslan Bukin  *              c) sgx_pg_fault():
54*2164af29SRuslan Bukin  *                  VM object fault handler does nothing
55*2164af29SRuslan Bukin  *
56*2164af29SRuslan Bukin  *    .ioctl:
57*2164af29SRuslan Bukin  *        sgx_ioctl():
58*2164af29SRuslan Bukin  *               a) SGX_IOC_ENCLAVE_CREATE
59*2164af29SRuslan Bukin  *                   Adds Enclave SECS page: initial step of enclave creation.
60*2164af29SRuslan Bukin  *               b) SGX_IOC_ENCLAVE_ADD_PAGE
61*2164af29SRuslan Bukin  *                   Adds TCS, REG pages to the enclave.
62*2164af29SRuslan Bukin  *               c) SGX_IOC_ENCLAVE_INIT
63*2164af29SRuslan Bukin  *                   Finalizes enclave creation.
64*2164af29SRuslan Bukin  *
65*2164af29SRuslan Bukin  * Enclave lifecycle:
66*2164af29SRuslan Bukin  *          .-- ECREATE  -- Add SECS page
67*2164af29SRuslan Bukin  *   Kernel |   EADD     -- Add TCS, REG pages
68*2164af29SRuslan Bukin  *    space |   EEXTEND  -- Measure the page (take unique hash)
69*2164af29SRuslan Bukin  *    ENCLS |   EPA      -- Allocate version array page
70*2164af29SRuslan Bukin  *          '-- EINIT    -- Finalize enclave creation
71*2164af29SRuslan Bukin  *   User   .-- EENTER   -- Go to entry point of enclave
72*2164af29SRuslan Bukin  *    space |   EEXIT    -- Exit back to main application
73*2164af29SRuslan Bukin  *    ENCLU '-- ERESUME  -- Resume enclave execution (e.g. after exception)
74*2164af29SRuslan Bukin  *
75*2164af29SRuslan Bukin  * Enclave lifecycle from driver point of view:
76*2164af29SRuslan Bukin  *  1) User calls mmap() on /dev/sgx: we allocate a VM object
77*2164af29SRuslan Bukin  *  2) User calls ioctl SGX_IOC_ENCLAVE_CREATE: we look for the VM object
78*2164af29SRuslan Bukin  *     associated with user process created on step 1, create SECS physical
79*2164af29SRuslan Bukin  *     page and store it in enclave's VM object queue by special index
80*2164af29SRuslan Bukin  *     SGX_SECS_VM_OBJECT_INDEX.
81*2164af29SRuslan Bukin  *  3) User calls ioctl SGX_IOC_ENCLAVE_ADD_PAGE: we look for enclave created
82*2164af29SRuslan Bukin  *     on step 2, create TCS or REG physical page and map it to specified by
83*2164af29SRuslan Bukin  *     user address of enclave VM object.
84*2164af29SRuslan Bukin  *  4) User finalizes enclave creation with ioctl SGX_IOC_ENCLAVE_INIT call.
85*2164af29SRuslan Bukin  *  5) User can freely enter to and exit from enclave using ENCLU instructions
86*2164af29SRuslan Bukin  *     from userspace: the driver does nothing here.
87*2164af29SRuslan Bukin  *  6) User proceed munmap(2) system call (or the process with enclave dies):
88*2164af29SRuslan Bukin  *     we destroy the enclave associated with the object.
89*2164af29SRuslan Bukin  *
90*2164af29SRuslan Bukin  * EPC page types and their indexes in VM object queue:
91*2164af29SRuslan Bukin  *   - PT_SECS index is special and equals SGX_SECS_VM_OBJECT_INDEX (-1);
92*2164af29SRuslan Bukin  *   - PT_TCS and PT_REG indexes are specified by user in addr field of ioctl
93*2164af29SRuslan Bukin  *     request data and determined as follows:
94*2164af29SRuslan Bukin  *       pidx = OFF_TO_IDX(addp->addr - vmh->base);
95*2164af29SRuslan Bukin  *   - PT_VA index is special, created for PT_REG, PT_TCS and PT_SECS pages
96*2164af29SRuslan Bukin  *     and determined by formula:
97*2164af29SRuslan Bukin  *       va_page_idx = - SGX_VA_PAGES_OFFS - (page_idx / SGX_VA_PAGE_SLOTS);
98*2164af29SRuslan Bukin  *     PT_VA page can hold versions of up to 512 pages, and slot for each
99*2164af29SRuslan Bukin  *     page in PT_VA page is determined as follows:
100*2164af29SRuslan Bukin  *       va_slot_idx = page_idx % SGX_VA_PAGE_SLOTS;
101*2164af29SRuslan Bukin  *   - PT_TRIM is unused.
102*2164af29SRuslan Bukin  *
103*2164af29SRuslan Bukin  * Locking:
104*2164af29SRuslan Bukin  *    SGX ENCLS set of instructions have limitations on concurrency:
105*2164af29SRuslan Bukin  *    some instructions can't be executed same time on different CPUs.
106*2164af29SRuslan Bukin  *    We use sc->mtx_encls lock around them to prevent concurrent execution.
107*2164af29SRuslan Bukin  *    sc->mtx lock is used to manage list of created enclaves and the state of
108*2164af29SRuslan Bukin  *    SGX driver.
109*2164af29SRuslan Bukin  *
110*2164af29SRuslan Bukin  * Eviction of EPC pages:
111*2164af29SRuslan Bukin  *    Eviction support is not implemented in this driver, however the driver
112*2164af29SRuslan Bukin  *    manages VA (version array) pages: it allocates a VA slot for each EPC
113*2164af29SRuslan Bukin  *    page. This will be required for eviction support in future.
114*2164af29SRuslan Bukin  *    VA pages and slots are currently unused.
115*2164af29SRuslan Bukin  *
116*2164af29SRuslan Bukin  * Intel® 64 and IA-32 Architectures Software Developer's Manual
117*2164af29SRuslan Bukin  * https://software.intel.com/en-us/articles/intel-sdm
118*2164af29SRuslan Bukin  */
119*2164af29SRuslan Bukin 
120*2164af29SRuslan Bukin #include <sys/cdefs.h>
121*2164af29SRuslan Bukin __FBSDID("$FreeBSD$");
122*2164af29SRuslan Bukin 
123*2164af29SRuslan Bukin #include <sys/param.h>
124*2164af29SRuslan Bukin #include <sys/systm.h>
125*2164af29SRuslan Bukin #include <sys/ioccom.h>
126*2164af29SRuslan Bukin #include <sys/malloc.h>
127*2164af29SRuslan Bukin #include <sys/kernel.h>
128*2164af29SRuslan Bukin #include <sys/lock.h>
129*2164af29SRuslan Bukin #include <sys/mutex.h>
130*2164af29SRuslan Bukin #include <sys/rwlock.h>
131*2164af29SRuslan Bukin #include <sys/conf.h>
132*2164af29SRuslan Bukin #include <sys/module.h>
133*2164af29SRuslan Bukin #include <sys/proc.h>
134*2164af29SRuslan Bukin #include <sys/vmem.h>
135*2164af29SRuslan Bukin #include <sys/vmmeter.h>
136*2164af29SRuslan Bukin 
137*2164af29SRuslan Bukin #include <vm/vm.h>
138*2164af29SRuslan Bukin #include <vm/vm_param.h>
139*2164af29SRuslan Bukin #include <vm/vm_extern.h>
140*2164af29SRuslan Bukin #include <vm/vm_kern.h>
141*2164af29SRuslan Bukin #include <vm/vm_page.h>
142*2164af29SRuslan Bukin #include <vm/vm_map.h>
143*2164af29SRuslan Bukin #include <vm/vm_object.h>
144*2164af29SRuslan Bukin #include <vm/vm_pager.h>
145*2164af29SRuslan Bukin #include <vm/vm_phys.h>
146*2164af29SRuslan Bukin #include <vm/vm_radix.h>
147*2164af29SRuslan Bukin #include <vm/pmap.h>
148*2164af29SRuslan Bukin 
149*2164af29SRuslan Bukin #include <machine/md_var.h>
150*2164af29SRuslan Bukin #include <machine/specialreg.h>
151*2164af29SRuslan Bukin #include <machine/cpufunc.h>
152*2164af29SRuslan Bukin #include <machine/sgx.h>
153*2164af29SRuslan Bukin #include <machine/sgxreg.h>
154*2164af29SRuslan Bukin 
155*2164af29SRuslan Bukin #include <amd64/sgx/sgxvar.h>
156*2164af29SRuslan Bukin 
157*2164af29SRuslan Bukin #define	DEBUG
158*2164af29SRuslan Bukin #undef	DEBUG
159*2164af29SRuslan Bukin 
160*2164af29SRuslan Bukin #ifdef	DEBUG
161*2164af29SRuslan Bukin #define	dprintf(fmt, ...)	printf(fmt, ##__VA_ARGS__)
162*2164af29SRuslan Bukin #else
163*2164af29SRuslan Bukin #define	dprintf(fmt, ...)
164*2164af29SRuslan Bukin #endif
165*2164af29SRuslan Bukin 
166*2164af29SRuslan Bukin static struct cdev_pager_ops sgx_pg_ops;
167*2164af29SRuslan Bukin struct sgx_softc sgx_sc;
168*2164af29SRuslan Bukin 
169*2164af29SRuslan Bukin static int
170*2164af29SRuslan Bukin sgx_get_epc_page(struct sgx_softc *sc, struct epc_page **epc)
171*2164af29SRuslan Bukin {
172*2164af29SRuslan Bukin 	vmem_addr_t addr;
173*2164af29SRuslan Bukin 	int i;
174*2164af29SRuslan Bukin 
175*2164af29SRuslan Bukin 	if (vmem_alloc(sc->vmem_epc, PAGE_SIZE, M_FIRSTFIT | M_NOWAIT,
176*2164af29SRuslan Bukin 	    &addr) == 0) {
177*2164af29SRuslan Bukin 		i = (addr - sc->epc_base) / PAGE_SIZE;
178*2164af29SRuslan Bukin 		*epc = &sc->epc_pages[i];
179*2164af29SRuslan Bukin 		return (0);
180*2164af29SRuslan Bukin 	}
181*2164af29SRuslan Bukin 
182*2164af29SRuslan Bukin 	return (ENOMEM);
183*2164af29SRuslan Bukin }
184*2164af29SRuslan Bukin 
185*2164af29SRuslan Bukin static void
186*2164af29SRuslan Bukin sgx_put_epc_page(struct sgx_softc *sc, struct epc_page *epc)
187*2164af29SRuslan Bukin {
188*2164af29SRuslan Bukin 	vmem_addr_t addr;
189*2164af29SRuslan Bukin 
190*2164af29SRuslan Bukin 	if (epc == NULL)
191*2164af29SRuslan Bukin 		return;
192*2164af29SRuslan Bukin 
193*2164af29SRuslan Bukin 	addr = (epc->index * PAGE_SIZE) + sc->epc_base;
194*2164af29SRuslan Bukin 	vmem_free(sc->vmem_epc, addr, PAGE_SIZE);
195*2164af29SRuslan Bukin }
196*2164af29SRuslan Bukin 
197*2164af29SRuslan Bukin static int
198*2164af29SRuslan Bukin sgx_va_slot_init_by_index(struct sgx_softc *sc, vm_object_t object,
199*2164af29SRuslan Bukin     uint64_t idx)
200*2164af29SRuslan Bukin {
201*2164af29SRuslan Bukin 	struct epc_page *epc;
202*2164af29SRuslan Bukin 	vm_page_t page;
203*2164af29SRuslan Bukin 	vm_page_t p;
204*2164af29SRuslan Bukin 	int ret;
205*2164af29SRuslan Bukin 
206*2164af29SRuslan Bukin 	VM_OBJECT_ASSERT_WLOCKED(object);
207*2164af29SRuslan Bukin 
208*2164af29SRuslan Bukin 	p = vm_page_lookup(object, idx);
209*2164af29SRuslan Bukin 	if (p == NULL) {
210*2164af29SRuslan Bukin 		ret = sgx_get_epc_page(sc, &epc);
211*2164af29SRuslan Bukin 		if (ret) {
212*2164af29SRuslan Bukin 			dprintf("%s: No free EPC pages available.\n",
213*2164af29SRuslan Bukin 			    __func__);
214*2164af29SRuslan Bukin 			return (ret);
215*2164af29SRuslan Bukin 		}
216*2164af29SRuslan Bukin 
217*2164af29SRuslan Bukin 		mtx_lock(&sc->mtx_encls);
218*2164af29SRuslan Bukin 		sgx_epa((void *)epc->base);
219*2164af29SRuslan Bukin 		mtx_unlock(&sc->mtx_encls);
220*2164af29SRuslan Bukin 
221*2164af29SRuslan Bukin 		page = PHYS_TO_VM_PAGE(epc->phys);
222*2164af29SRuslan Bukin 
223*2164af29SRuslan Bukin 		vm_page_insert(page, object, idx);
224*2164af29SRuslan Bukin 		page->valid = VM_PAGE_BITS_ALL;
225*2164af29SRuslan Bukin 	}
226*2164af29SRuslan Bukin 
227*2164af29SRuslan Bukin 	return (0);
228*2164af29SRuslan Bukin }
229*2164af29SRuslan Bukin 
230*2164af29SRuslan Bukin static int
231*2164af29SRuslan Bukin sgx_va_slot_init(struct sgx_softc *sc,
232*2164af29SRuslan Bukin     struct sgx_enclave *enclave,
233*2164af29SRuslan Bukin     uint64_t addr)
234*2164af29SRuslan Bukin {
235*2164af29SRuslan Bukin 	vm_pindex_t pidx;
236*2164af29SRuslan Bukin 	uint64_t va_page_idx;
237*2164af29SRuslan Bukin 	uint64_t idx;
238*2164af29SRuslan Bukin 	vm_object_t object;
239*2164af29SRuslan Bukin 	int va_slot;
240*2164af29SRuslan Bukin 	int ret;
241*2164af29SRuslan Bukin 
242*2164af29SRuslan Bukin 	object = enclave->object;
243*2164af29SRuslan Bukin 
244*2164af29SRuslan Bukin 	VM_OBJECT_ASSERT_WLOCKED(object);
245*2164af29SRuslan Bukin 
246*2164af29SRuslan Bukin 	pidx = OFF_TO_IDX(addr);
247*2164af29SRuslan Bukin 
248*2164af29SRuslan Bukin 	va_slot = pidx % SGX_VA_PAGE_SLOTS;
249*2164af29SRuslan Bukin 	va_page_idx = pidx / SGX_VA_PAGE_SLOTS;
250*2164af29SRuslan Bukin 	idx = - SGX_VA_PAGES_OFFS - va_page_idx;
251*2164af29SRuslan Bukin 
252*2164af29SRuslan Bukin 	ret = sgx_va_slot_init_by_index(sc, object, idx);
253*2164af29SRuslan Bukin 
254*2164af29SRuslan Bukin 	return (ret);
255*2164af29SRuslan Bukin }
256*2164af29SRuslan Bukin 
257*2164af29SRuslan Bukin static int
258*2164af29SRuslan Bukin sgx_mem_find(struct sgx_softc *sc, uint64_t addr,
259*2164af29SRuslan Bukin     vm_map_entry_t *entry0, vm_object_t *object0)
260*2164af29SRuslan Bukin {
261*2164af29SRuslan Bukin 	vm_map_t map;
262*2164af29SRuslan Bukin 	vm_map_entry_t entry;
263*2164af29SRuslan Bukin 	vm_object_t object;
264*2164af29SRuslan Bukin 
265*2164af29SRuslan Bukin 	map = &curproc->p_vmspace->vm_map;
266*2164af29SRuslan Bukin 
267*2164af29SRuslan Bukin 	vm_map_lock_read(map);
268*2164af29SRuslan Bukin 	if (!vm_map_lookup_entry(map, addr, &entry)) {
269*2164af29SRuslan Bukin 		vm_map_unlock_read(map);
270*2164af29SRuslan Bukin 		dprintf("%s: Can't find enclave.\n", __func__);
271*2164af29SRuslan Bukin 		return (EINVAL);
272*2164af29SRuslan Bukin 	}
273*2164af29SRuslan Bukin 
274*2164af29SRuslan Bukin 	object = entry->object.vm_object;
275*2164af29SRuslan Bukin 	if (object == NULL || object->handle == NULL) {
276*2164af29SRuslan Bukin 		vm_map_unlock_read(map);
277*2164af29SRuslan Bukin 		return (EINVAL);
278*2164af29SRuslan Bukin 	}
279*2164af29SRuslan Bukin 
280*2164af29SRuslan Bukin 	if (object->type != OBJT_MGTDEVICE ||
281*2164af29SRuslan Bukin 	    object->un_pager.devp.ops != &sgx_pg_ops) {
282*2164af29SRuslan Bukin 		vm_map_unlock_read(map);
283*2164af29SRuslan Bukin 		return (EINVAL);
284*2164af29SRuslan Bukin 	}
285*2164af29SRuslan Bukin 
286*2164af29SRuslan Bukin 	vm_object_reference(object);
287*2164af29SRuslan Bukin 
288*2164af29SRuslan Bukin 	*object0 = object;
289*2164af29SRuslan Bukin 	*entry0 = entry;
290*2164af29SRuslan Bukin 	vm_map_unlock_read(map);
291*2164af29SRuslan Bukin 
292*2164af29SRuslan Bukin 	return (0);
293*2164af29SRuslan Bukin }
294*2164af29SRuslan Bukin 
295*2164af29SRuslan Bukin static int
296*2164af29SRuslan Bukin sgx_enclave_find(struct sgx_softc *sc, uint64_t addr,
297*2164af29SRuslan Bukin     struct sgx_enclave **encl)
298*2164af29SRuslan Bukin {
299*2164af29SRuslan Bukin 	struct sgx_vm_handle *vmh;
300*2164af29SRuslan Bukin 	struct sgx_enclave *enclave;
301*2164af29SRuslan Bukin 	vm_map_entry_t entry;
302*2164af29SRuslan Bukin 	vm_object_t object;
303*2164af29SRuslan Bukin 	int ret;
304*2164af29SRuslan Bukin 
305*2164af29SRuslan Bukin 	ret = sgx_mem_find(sc, addr, &entry, &object);
306*2164af29SRuslan Bukin 	if (ret)
307*2164af29SRuslan Bukin 		return (ret);
308*2164af29SRuslan Bukin 
309*2164af29SRuslan Bukin 	vmh = object->handle;
310*2164af29SRuslan Bukin 	if (vmh == NULL) {
311*2164af29SRuslan Bukin 		vm_object_deallocate(object);
312*2164af29SRuslan Bukin 		return (EINVAL);
313*2164af29SRuslan Bukin 	}
314*2164af29SRuslan Bukin 
315*2164af29SRuslan Bukin 	enclave = vmh->enclave;
316*2164af29SRuslan Bukin 	if (enclave == NULL || enclave->object == NULL) {
317*2164af29SRuslan Bukin 		vm_object_deallocate(object);
318*2164af29SRuslan Bukin 		return (EINVAL);
319*2164af29SRuslan Bukin 	}
320*2164af29SRuslan Bukin 
321*2164af29SRuslan Bukin 	*encl = enclave;
322*2164af29SRuslan Bukin 
323*2164af29SRuslan Bukin 	return (0);
324*2164af29SRuslan Bukin }
325*2164af29SRuslan Bukin 
326*2164af29SRuslan Bukin static int
327*2164af29SRuslan Bukin sgx_enclave_alloc(struct sgx_softc *sc, struct secs *secs,
328*2164af29SRuslan Bukin     struct sgx_enclave **enclave0)
329*2164af29SRuslan Bukin {
330*2164af29SRuslan Bukin 	struct sgx_enclave *enclave;
331*2164af29SRuslan Bukin 
332*2164af29SRuslan Bukin 	enclave = malloc(sizeof(struct sgx_enclave),
333*2164af29SRuslan Bukin 	    M_SGX, M_WAITOK | M_ZERO);
334*2164af29SRuslan Bukin 
335*2164af29SRuslan Bukin 	enclave->base = secs->base;
336*2164af29SRuslan Bukin 	enclave->size = secs->size;
337*2164af29SRuslan Bukin 
338*2164af29SRuslan Bukin 	*enclave0 = enclave;
339*2164af29SRuslan Bukin 
340*2164af29SRuslan Bukin 	return (0);
341*2164af29SRuslan Bukin }
342*2164af29SRuslan Bukin 
343*2164af29SRuslan Bukin static void
344*2164af29SRuslan Bukin sgx_epc_page_remove(struct sgx_softc *sc,
345*2164af29SRuslan Bukin     struct epc_page *epc)
346*2164af29SRuslan Bukin {
347*2164af29SRuslan Bukin 
348*2164af29SRuslan Bukin 	mtx_lock(&sc->mtx_encls);
349*2164af29SRuslan Bukin 	sgx_eremove((void *)epc->base);
350*2164af29SRuslan Bukin 	mtx_unlock(&sc->mtx_encls);
351*2164af29SRuslan Bukin }
352*2164af29SRuslan Bukin 
353*2164af29SRuslan Bukin static void
354*2164af29SRuslan Bukin sgx_page_remove(struct sgx_softc *sc, vm_page_t p)
355*2164af29SRuslan Bukin {
356*2164af29SRuslan Bukin 	struct epc_page *epc;
357*2164af29SRuslan Bukin 	vm_paddr_t pa;
358*2164af29SRuslan Bukin 	uint64_t offs;
359*2164af29SRuslan Bukin 
360*2164af29SRuslan Bukin 	vm_page_lock(p);
361*2164af29SRuslan Bukin 	vm_page_remove(p);
362*2164af29SRuslan Bukin 	vm_page_unlock(p);
363*2164af29SRuslan Bukin 
364*2164af29SRuslan Bukin 	dprintf("%s: p->pidx %ld\n", __func__, p->pindex);
365*2164af29SRuslan Bukin 
366*2164af29SRuslan Bukin 	pa = VM_PAGE_TO_PHYS(p);
367*2164af29SRuslan Bukin 	epc = &sc->epc_pages[0];
368*2164af29SRuslan Bukin 	offs = (pa - epc->phys) / PAGE_SIZE;
369*2164af29SRuslan Bukin 	epc = &sc->epc_pages[offs];
370*2164af29SRuslan Bukin 
371*2164af29SRuslan Bukin 	sgx_epc_page_remove(sc, epc);
372*2164af29SRuslan Bukin 	sgx_put_epc_page(sc, epc);
373*2164af29SRuslan Bukin }
374*2164af29SRuslan Bukin 
375*2164af29SRuslan Bukin static void
376*2164af29SRuslan Bukin sgx_enclave_remove(struct sgx_softc *sc,
377*2164af29SRuslan Bukin     struct sgx_enclave *enclave)
378*2164af29SRuslan Bukin {
379*2164af29SRuslan Bukin 	vm_object_t object;
380*2164af29SRuslan Bukin 	vm_page_t p, p_secs, p_next;
381*2164af29SRuslan Bukin 
382*2164af29SRuslan Bukin 	mtx_lock(&sc->mtx);
383*2164af29SRuslan Bukin 	TAILQ_REMOVE(&sc->enclaves, enclave, next);
384*2164af29SRuslan Bukin 	mtx_unlock(&sc->mtx);
385*2164af29SRuslan Bukin 
386*2164af29SRuslan Bukin 	object = enclave->object;
387*2164af29SRuslan Bukin 
388*2164af29SRuslan Bukin 	VM_OBJECT_WLOCK(object);
389*2164af29SRuslan Bukin 
390*2164af29SRuslan Bukin 	/*
391*2164af29SRuslan Bukin 	 * First remove all the pages except SECS,
392*2164af29SRuslan Bukin 	 * then remove SECS page.
393*2164af29SRuslan Bukin 	 */
394*2164af29SRuslan Bukin 	p_secs = NULL;
395*2164af29SRuslan Bukin 	TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) {
396*2164af29SRuslan Bukin 		if (p->pindex == SGX_SECS_VM_OBJECT_INDEX) {
397*2164af29SRuslan Bukin 			p_secs = p;
398*2164af29SRuslan Bukin 			continue;
399*2164af29SRuslan Bukin 		}
400*2164af29SRuslan Bukin 		sgx_page_remove(sc, p);
401*2164af29SRuslan Bukin 	}
402*2164af29SRuslan Bukin 	/* Now remove SECS page */
403*2164af29SRuslan Bukin 	if (p_secs != NULL)
404*2164af29SRuslan Bukin 		sgx_page_remove(sc, p_secs);
405*2164af29SRuslan Bukin 
406*2164af29SRuslan Bukin 	KASSERT(TAILQ_EMPTY(&object->memq) == 1, ("not empty"));
407*2164af29SRuslan Bukin 	KASSERT(object->resident_page_count == 0, ("count"));
408*2164af29SRuslan Bukin 
409*2164af29SRuslan Bukin 	VM_OBJECT_WUNLOCK(object);
410*2164af29SRuslan Bukin }
411*2164af29SRuslan Bukin 
412*2164af29SRuslan Bukin static int
413*2164af29SRuslan Bukin sgx_measure_page(struct sgx_softc *sc, struct epc_page *secs,
414*2164af29SRuslan Bukin     struct epc_page *epc, uint16_t mrmask)
415*2164af29SRuslan Bukin {
416*2164af29SRuslan Bukin 	int i, j;
417*2164af29SRuslan Bukin 	int ret;
418*2164af29SRuslan Bukin 
419*2164af29SRuslan Bukin 	mtx_lock(&sc->mtx_encls);
420*2164af29SRuslan Bukin 
421*2164af29SRuslan Bukin 	for (i = 0, j = 1; i < PAGE_SIZE; i += 0x100, j <<= 1) {
422*2164af29SRuslan Bukin 		if (!(j & mrmask))
423*2164af29SRuslan Bukin 			continue;
424*2164af29SRuslan Bukin 
425*2164af29SRuslan Bukin 		ret = sgx_eextend((void *)secs->base,
426*2164af29SRuslan Bukin 		    (void *)(epc->base + i));
427*2164af29SRuslan Bukin 		if (ret == SGX_EFAULT) {
428*2164af29SRuslan Bukin 			mtx_unlock(&sc->mtx_encls);
429*2164af29SRuslan Bukin 			return (ret);
430*2164af29SRuslan Bukin 		}
431*2164af29SRuslan Bukin 	}
432*2164af29SRuslan Bukin 
433*2164af29SRuslan Bukin 	mtx_unlock(&sc->mtx_encls);
434*2164af29SRuslan Bukin 
435*2164af29SRuslan Bukin 	return (0);
436*2164af29SRuslan Bukin }
437*2164af29SRuslan Bukin 
438*2164af29SRuslan Bukin static int
439*2164af29SRuslan Bukin sgx_secs_validate(struct sgx_softc *sc, struct secs *secs)
440*2164af29SRuslan Bukin {
441*2164af29SRuslan Bukin 	struct secs_attr *attr;
442*2164af29SRuslan Bukin 	int i;
443*2164af29SRuslan Bukin 
444*2164af29SRuslan Bukin 	if (secs->size == 0)
445*2164af29SRuslan Bukin 		return (EINVAL);
446*2164af29SRuslan Bukin 
447*2164af29SRuslan Bukin 	/* BASEADDR must be naturally aligned on an SECS.SIZE boundary. */
448*2164af29SRuslan Bukin 	if (secs->base & (secs->size - 1))
449*2164af29SRuslan Bukin 		return (EINVAL);
450*2164af29SRuslan Bukin 
451*2164af29SRuslan Bukin 	/* SECS.SIZE must be at least 2 pages. */
452*2164af29SRuslan Bukin 	if (secs->size < 2 * PAGE_SIZE)
453*2164af29SRuslan Bukin 		return (EINVAL);
454*2164af29SRuslan Bukin 
455*2164af29SRuslan Bukin 	if ((secs->size & (secs->size - 1)) != 0)
456*2164af29SRuslan Bukin 		return (EINVAL);
457*2164af29SRuslan Bukin 
458*2164af29SRuslan Bukin 	attr = &secs->attributes;
459*2164af29SRuslan Bukin 
460*2164af29SRuslan Bukin 	if (attr->reserved1 != 0 ||
461*2164af29SRuslan Bukin 	    attr->reserved2 != 0 ||
462*2164af29SRuslan Bukin 	    attr->reserved3 != 0)
463*2164af29SRuslan Bukin 		return (EINVAL);
464*2164af29SRuslan Bukin 
465*2164af29SRuslan Bukin 	for (i = 0; i < SECS_ATTR_RSV4_SIZE; i++)
466*2164af29SRuslan Bukin 		if (attr->reserved4[i])
467*2164af29SRuslan Bukin 			return (EINVAL);
468*2164af29SRuslan Bukin 
469*2164af29SRuslan Bukin 	/*
470*2164af29SRuslan Bukin 	 * Intel® Software Guard Extensions Programming Reference
471*2164af29SRuslan Bukin 	 * 6.7.2 Relevant Fields in Various Data Structures
472*2164af29SRuslan Bukin 	 * 6.7.2.1 SECS.ATTRIBUTES.XFRM
473*2164af29SRuslan Bukin 	 * XFRM[1:0] must be set to 0x3.
474*2164af29SRuslan Bukin 	 */
475*2164af29SRuslan Bukin 	if ((attr->xfrm & 0x3) != 0x3)
476*2164af29SRuslan Bukin 		return (EINVAL);
477*2164af29SRuslan Bukin 
478*2164af29SRuslan Bukin 	if (!attr->mode64bit)
479*2164af29SRuslan Bukin 		return (EINVAL);
480*2164af29SRuslan Bukin 
481*2164af29SRuslan Bukin 	if (secs->size > sc->enclave_size_max)
482*2164af29SRuslan Bukin 		return (EINVAL);
483*2164af29SRuslan Bukin 
484*2164af29SRuslan Bukin 	for (i = 0; i < SECS_RSV1_SIZE; i++)
485*2164af29SRuslan Bukin 		if (secs->reserved1[i])
486*2164af29SRuslan Bukin 			return (EINVAL);
487*2164af29SRuslan Bukin 
488*2164af29SRuslan Bukin 	for (i = 0; i < SECS_RSV2_SIZE; i++)
489*2164af29SRuslan Bukin 		if (secs->reserved2[i])
490*2164af29SRuslan Bukin 			return (EINVAL);
491*2164af29SRuslan Bukin 
492*2164af29SRuslan Bukin 	for (i = 0; i < SECS_RSV3_SIZE; i++)
493*2164af29SRuslan Bukin 		if (secs->reserved3[i])
494*2164af29SRuslan Bukin 			return (EINVAL);
495*2164af29SRuslan Bukin 
496*2164af29SRuslan Bukin 	for (i = 0; i < SECS_RSV4_SIZE; i++)
497*2164af29SRuslan Bukin 		if (secs->reserved4[i])
498*2164af29SRuslan Bukin 			return (EINVAL);
499*2164af29SRuslan Bukin 
500*2164af29SRuslan Bukin 	return (0);
501*2164af29SRuslan Bukin }
502*2164af29SRuslan Bukin 
503*2164af29SRuslan Bukin static int
504*2164af29SRuslan Bukin sgx_tcs_validate(struct tcs *tcs)
505*2164af29SRuslan Bukin {
506*2164af29SRuslan Bukin 	int i;
507*2164af29SRuslan Bukin 
508*2164af29SRuslan Bukin 	if ((tcs->flags) ||
509*2164af29SRuslan Bukin 	    (tcs->ossa & (PAGE_SIZE - 1)) ||
510*2164af29SRuslan Bukin 	    (tcs->ofsbasgx & (PAGE_SIZE - 1)) ||
511*2164af29SRuslan Bukin 	    (tcs->ogsbasgx & (PAGE_SIZE - 1)) ||
512*2164af29SRuslan Bukin 	    ((tcs->fslimit & 0xfff) != 0xfff) ||
513*2164af29SRuslan Bukin 	    ((tcs->gslimit & 0xfff) != 0xfff))
514*2164af29SRuslan Bukin 		return (EINVAL);
515*2164af29SRuslan Bukin 
516*2164af29SRuslan Bukin 	for (i = 0; i < nitems(tcs->reserved3); i++)
517*2164af29SRuslan Bukin 		if (tcs->reserved3[i])
518*2164af29SRuslan Bukin 			return (EINVAL);
519*2164af29SRuslan Bukin 
520*2164af29SRuslan Bukin 	return (0);
521*2164af29SRuslan Bukin }
522*2164af29SRuslan Bukin 
523*2164af29SRuslan Bukin static void
524*2164af29SRuslan Bukin sgx_tcs_dump(struct sgx_softc *sc, struct tcs *t)
525*2164af29SRuslan Bukin {
526*2164af29SRuslan Bukin 
527*2164af29SRuslan Bukin 	dprintf("t->flags %lx\n", t->flags);
528*2164af29SRuslan Bukin 	dprintf("t->ossa %lx\n", t->ossa);
529*2164af29SRuslan Bukin 	dprintf("t->cssa %x\n", t->cssa);
530*2164af29SRuslan Bukin 	dprintf("t->nssa %x\n", t->nssa);
531*2164af29SRuslan Bukin 	dprintf("t->oentry %lx\n", t->oentry);
532*2164af29SRuslan Bukin 	dprintf("t->ofsbasgx %lx\n", t->ofsbasgx);
533*2164af29SRuslan Bukin 	dprintf("t->ogsbasgx %lx\n", t->ogsbasgx);
534*2164af29SRuslan Bukin 	dprintf("t->fslimit %x\n", t->fslimit);
535*2164af29SRuslan Bukin 	dprintf("t->gslimit %x\n", t->gslimit);
536*2164af29SRuslan Bukin }
537*2164af29SRuslan Bukin 
538*2164af29SRuslan Bukin static int
539*2164af29SRuslan Bukin sgx_pg_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
540*2164af29SRuslan Bukin     vm_ooffset_t foff, struct ucred *cred, u_short *color)
541*2164af29SRuslan Bukin {
542*2164af29SRuslan Bukin 	struct sgx_vm_handle *vmh;
543*2164af29SRuslan Bukin 
544*2164af29SRuslan Bukin 	vmh = handle;
545*2164af29SRuslan Bukin 	if (vmh == NULL) {
546*2164af29SRuslan Bukin 		dprintf("%s: vmh not found.\n", __func__);
547*2164af29SRuslan Bukin 		return (0);
548*2164af29SRuslan Bukin 	}
549*2164af29SRuslan Bukin 
550*2164af29SRuslan Bukin 	dprintf("%s: vmh->base %lx foff 0x%lx size 0x%lx\n",
551*2164af29SRuslan Bukin 	    __func__, vmh->base, foff, size);
552*2164af29SRuslan Bukin 
553*2164af29SRuslan Bukin 	return (0);
554*2164af29SRuslan Bukin }
555*2164af29SRuslan Bukin 
556*2164af29SRuslan Bukin static void
557*2164af29SRuslan Bukin sgx_pg_dtor(void *handle)
558*2164af29SRuslan Bukin {
559*2164af29SRuslan Bukin 	struct sgx_vm_handle *vmh;
560*2164af29SRuslan Bukin 	struct sgx_softc *sc;
561*2164af29SRuslan Bukin 
562*2164af29SRuslan Bukin 	vmh = handle;
563*2164af29SRuslan Bukin 	if (vmh == NULL) {
564*2164af29SRuslan Bukin 		dprintf("%s: vmh not found.\n", __func__);
565*2164af29SRuslan Bukin 		return;
566*2164af29SRuslan Bukin 	}
567*2164af29SRuslan Bukin 
568*2164af29SRuslan Bukin 	sc = vmh->sc;
569*2164af29SRuslan Bukin 	if (sc == NULL) {
570*2164af29SRuslan Bukin 		dprintf("%s: sc is NULL\n", __func__);
571*2164af29SRuslan Bukin 		return;
572*2164af29SRuslan Bukin 	}
573*2164af29SRuslan Bukin 
574*2164af29SRuslan Bukin 	if (vmh->enclave == NULL) {
575*2164af29SRuslan Bukin 		dprintf("%s: Enclave not found.\n", __func__);
576*2164af29SRuslan Bukin 		return;
577*2164af29SRuslan Bukin 	}
578*2164af29SRuslan Bukin 
579*2164af29SRuslan Bukin 	sgx_enclave_remove(sc, vmh->enclave);
580*2164af29SRuslan Bukin 
581*2164af29SRuslan Bukin 	free(vmh->enclave, M_SGX);
582*2164af29SRuslan Bukin 	free(vmh, M_SGX);
583*2164af29SRuslan Bukin }
584*2164af29SRuslan Bukin 
585*2164af29SRuslan Bukin static int
586*2164af29SRuslan Bukin sgx_pg_fault(vm_object_t object, vm_ooffset_t offset,
587*2164af29SRuslan Bukin     int prot, vm_page_t *mres)
588*2164af29SRuslan Bukin {
589*2164af29SRuslan Bukin 
590*2164af29SRuslan Bukin 	/*
591*2164af29SRuslan Bukin 	 * The purpose of this trivial handler is to handle the race
592*2164af29SRuslan Bukin 	 * when user tries to access mmaped region before or during
593*2164af29SRuslan Bukin 	 * enclave creation ioctl calls.
594*2164af29SRuslan Bukin 	 */
595*2164af29SRuslan Bukin 
596*2164af29SRuslan Bukin 	dprintf("%s: offset 0x%lx\n", __func__, offset);
597*2164af29SRuslan Bukin 
598*2164af29SRuslan Bukin 	return (VM_PAGER_FAIL);
599*2164af29SRuslan Bukin }
600*2164af29SRuslan Bukin 
601*2164af29SRuslan Bukin static struct cdev_pager_ops sgx_pg_ops = {
602*2164af29SRuslan Bukin 	.cdev_pg_ctor = sgx_pg_ctor,
603*2164af29SRuslan Bukin 	.cdev_pg_dtor = sgx_pg_dtor,
604*2164af29SRuslan Bukin 	.cdev_pg_fault = sgx_pg_fault,
605*2164af29SRuslan Bukin };
606*2164af29SRuslan Bukin 
607*2164af29SRuslan Bukin 
608*2164af29SRuslan Bukin static void
609*2164af29SRuslan Bukin sgx_insert_epc_page_by_index(vm_page_t page, vm_object_t object,
610*2164af29SRuslan Bukin     vm_pindex_t pidx)
611*2164af29SRuslan Bukin {
612*2164af29SRuslan Bukin 
613*2164af29SRuslan Bukin 	VM_OBJECT_ASSERT_WLOCKED(object);
614*2164af29SRuslan Bukin 
615*2164af29SRuslan Bukin 	vm_page_insert(page, object, pidx);
616*2164af29SRuslan Bukin 	page->valid = VM_PAGE_BITS_ALL;
617*2164af29SRuslan Bukin }
618*2164af29SRuslan Bukin 
619*2164af29SRuslan Bukin static void
620*2164af29SRuslan Bukin sgx_insert_epc_page(struct sgx_enclave *enclave,
621*2164af29SRuslan Bukin     struct epc_page *epc, uint64_t addr)
622*2164af29SRuslan Bukin {
623*2164af29SRuslan Bukin 	vm_pindex_t pidx;
624*2164af29SRuslan Bukin 	vm_page_t page;
625*2164af29SRuslan Bukin 
626*2164af29SRuslan Bukin 	VM_OBJECT_ASSERT_WLOCKED(enclave->object);
627*2164af29SRuslan Bukin 
628*2164af29SRuslan Bukin 	pidx = OFF_TO_IDX(addr);
629*2164af29SRuslan Bukin 	page = PHYS_TO_VM_PAGE(epc->phys);
630*2164af29SRuslan Bukin 
631*2164af29SRuslan Bukin 	sgx_insert_epc_page_by_index(page, enclave->object, pidx);
632*2164af29SRuslan Bukin }
633*2164af29SRuslan Bukin 
634*2164af29SRuslan Bukin static int
635*2164af29SRuslan Bukin sgx_ioctl_create(struct sgx_softc *sc, struct sgx_enclave_create *param)
636*2164af29SRuslan Bukin {
637*2164af29SRuslan Bukin 	struct sgx_vm_handle *vmh;
638*2164af29SRuslan Bukin 	vm_map_entry_t entry;
639*2164af29SRuslan Bukin 	vm_page_t p;
640*2164af29SRuslan Bukin 	struct page_info pginfo;
641*2164af29SRuslan Bukin 	struct secinfo secinfo;
642*2164af29SRuslan Bukin 	struct sgx_enclave *enclave;
643*2164af29SRuslan Bukin 	struct epc_page *epc;
644*2164af29SRuslan Bukin 	struct secs *secs;
645*2164af29SRuslan Bukin 	vm_object_t object;
646*2164af29SRuslan Bukin 	vm_page_t page;
647*2164af29SRuslan Bukin 	int ret;
648*2164af29SRuslan Bukin 
649*2164af29SRuslan Bukin 	epc = NULL;
650*2164af29SRuslan Bukin 	secs = NULL;
651*2164af29SRuslan Bukin 	enclave = NULL;
652*2164af29SRuslan Bukin 	object = NULL;
653*2164af29SRuslan Bukin 
654*2164af29SRuslan Bukin 	/* SGX Enclave Control Structure (SECS) */
655*2164af29SRuslan Bukin 	secs = malloc(PAGE_SIZE, M_SGX, M_WAITOK | M_ZERO);
656*2164af29SRuslan Bukin 	ret = copyin((void *)param->src, secs, sizeof(struct secs));
657*2164af29SRuslan Bukin 	if (ret) {
658*2164af29SRuslan Bukin 		dprintf("%s: Can't copy SECS.\n", __func__);
659*2164af29SRuslan Bukin 		goto error;
660*2164af29SRuslan Bukin 	}
661*2164af29SRuslan Bukin 
662*2164af29SRuslan Bukin 	ret = sgx_secs_validate(sc, secs);
663*2164af29SRuslan Bukin 	if (ret) {
664*2164af29SRuslan Bukin 		dprintf("%s: SECS validation failed.\n", __func__);
665*2164af29SRuslan Bukin 		goto error;
666*2164af29SRuslan Bukin 	}
667*2164af29SRuslan Bukin 
668*2164af29SRuslan Bukin 	ret = sgx_mem_find(sc, secs->base, &entry, &object);
669*2164af29SRuslan Bukin 	if (ret) {
670*2164af29SRuslan Bukin 		dprintf("%s: Can't find vm_map.\n", __func__);
671*2164af29SRuslan Bukin 		goto error;
672*2164af29SRuslan Bukin 	}
673*2164af29SRuslan Bukin 
674*2164af29SRuslan Bukin 	vmh = object->handle;
675*2164af29SRuslan Bukin 	if (!vmh) {
676*2164af29SRuslan Bukin 		dprintf("%s: Can't find vmh.\n", __func__);
677*2164af29SRuslan Bukin 		ret = ENXIO;
678*2164af29SRuslan Bukin 		goto error;
679*2164af29SRuslan Bukin 	}
680*2164af29SRuslan Bukin 
681*2164af29SRuslan Bukin 	dprintf("%s: entry start %lx offset %lx\n",
682*2164af29SRuslan Bukin 	    __func__, entry->start, entry->offset);
683*2164af29SRuslan Bukin 	vmh->base = (entry->start - entry->offset);
684*2164af29SRuslan Bukin 
685*2164af29SRuslan Bukin 	ret = sgx_enclave_alloc(sc, secs, &enclave);
686*2164af29SRuslan Bukin 	if (ret) {
687*2164af29SRuslan Bukin 		dprintf("%s: Can't alloc enclave.\n", __func__);
688*2164af29SRuslan Bukin 		goto error;
689*2164af29SRuslan Bukin 	}
690*2164af29SRuslan Bukin 	enclave->object = object;
691*2164af29SRuslan Bukin 	enclave->vmh = vmh;
692*2164af29SRuslan Bukin 
693*2164af29SRuslan Bukin 	memset(&secinfo, 0, sizeof(struct secinfo));
694*2164af29SRuslan Bukin 	memset(&pginfo, 0, sizeof(struct page_info));
695*2164af29SRuslan Bukin 	pginfo.linaddr = 0;
696*2164af29SRuslan Bukin 	pginfo.srcpge = (uint64_t)secs;
697*2164af29SRuslan Bukin 	pginfo.secinfo = &secinfo;
698*2164af29SRuslan Bukin 	pginfo.secs = 0;
699*2164af29SRuslan Bukin 
700*2164af29SRuslan Bukin 	ret = sgx_get_epc_page(sc, &epc);
701*2164af29SRuslan Bukin 	if (ret) {
702*2164af29SRuslan Bukin 		dprintf("%s: Failed to get free epc page.\n", __func__);
703*2164af29SRuslan Bukin 		goto error;
704*2164af29SRuslan Bukin 	}
705*2164af29SRuslan Bukin 	enclave->secs_epc_page = epc;
706*2164af29SRuslan Bukin 
707*2164af29SRuslan Bukin 	VM_OBJECT_WLOCK(object);
708*2164af29SRuslan Bukin 	p = vm_page_lookup(object, SGX_SECS_VM_OBJECT_INDEX);
709*2164af29SRuslan Bukin 	if (p) {
710*2164af29SRuslan Bukin 		VM_OBJECT_WUNLOCK(object);
711*2164af29SRuslan Bukin 		/* SECS page already added. */
712*2164af29SRuslan Bukin 		ret = ENXIO;
713*2164af29SRuslan Bukin 		goto error;
714*2164af29SRuslan Bukin 	}
715*2164af29SRuslan Bukin 
716*2164af29SRuslan Bukin 	ret = sgx_va_slot_init_by_index(sc, object,
717*2164af29SRuslan Bukin 	    - SGX_VA_PAGES_OFFS - SGX_SECS_VM_OBJECT_INDEX);
718*2164af29SRuslan Bukin 	if (ret) {
719*2164af29SRuslan Bukin 		VM_OBJECT_WUNLOCK(object);
720*2164af29SRuslan Bukin 		dprintf("%s: Can't init va slot.\n", __func__);
721*2164af29SRuslan Bukin 		goto error;
722*2164af29SRuslan Bukin 	}
723*2164af29SRuslan Bukin 
724*2164af29SRuslan Bukin 	mtx_lock(&sc->mtx);
725*2164af29SRuslan Bukin 	if ((sc->state & SGX_STATE_RUNNING) == 0) {
726*2164af29SRuslan Bukin 		mtx_unlock(&sc->mtx);
727*2164af29SRuslan Bukin 		/* Remove VA page that was just created for SECS page. */
728*2164af29SRuslan Bukin 		p = vm_page_lookup(enclave->object,
729*2164af29SRuslan Bukin 		    - SGX_VA_PAGES_OFFS - SGX_SECS_VM_OBJECT_INDEX);
730*2164af29SRuslan Bukin 		sgx_page_remove(sc, p);
731*2164af29SRuslan Bukin 		VM_OBJECT_WUNLOCK(object);
732*2164af29SRuslan Bukin 		goto error;
733*2164af29SRuslan Bukin 	}
734*2164af29SRuslan Bukin 	mtx_lock(&sc->mtx_encls);
735*2164af29SRuslan Bukin 	ret = sgx_ecreate(&pginfo, (void *)epc->base);
736*2164af29SRuslan Bukin 	mtx_unlock(&sc->mtx_encls);
737*2164af29SRuslan Bukin 	if (ret == SGX_EFAULT) {
738*2164af29SRuslan Bukin 		dprintf("%s: gp fault\n", __func__);
739*2164af29SRuslan Bukin 		mtx_unlock(&sc->mtx);
740*2164af29SRuslan Bukin 		/* Remove VA page that was just created for SECS page. */
741*2164af29SRuslan Bukin 		p = vm_page_lookup(enclave->object,
742*2164af29SRuslan Bukin 		    - SGX_VA_PAGES_OFFS - SGX_SECS_VM_OBJECT_INDEX);
743*2164af29SRuslan Bukin 		sgx_page_remove(sc, p);
744*2164af29SRuslan Bukin 		VM_OBJECT_WUNLOCK(object);
745*2164af29SRuslan Bukin 		goto error;
746*2164af29SRuslan Bukin 	}
747*2164af29SRuslan Bukin 
748*2164af29SRuslan Bukin 	TAILQ_INSERT_TAIL(&sc->enclaves, enclave, next);
749*2164af29SRuslan Bukin 	mtx_unlock(&sc->mtx);
750*2164af29SRuslan Bukin 
751*2164af29SRuslan Bukin 	vmh->enclave = enclave;
752*2164af29SRuslan Bukin 
753*2164af29SRuslan Bukin 	page = PHYS_TO_VM_PAGE(epc->phys);
754*2164af29SRuslan Bukin 	sgx_insert_epc_page_by_index(page, enclave->object,
755*2164af29SRuslan Bukin 	    SGX_SECS_VM_OBJECT_INDEX);
756*2164af29SRuslan Bukin 
757*2164af29SRuslan Bukin 	VM_OBJECT_WUNLOCK(object);
758*2164af29SRuslan Bukin 
759*2164af29SRuslan Bukin 	/* Release the reference. */
760*2164af29SRuslan Bukin 	vm_object_deallocate(object);
761*2164af29SRuslan Bukin 
762*2164af29SRuslan Bukin 	free(secs, M_SGX);
763*2164af29SRuslan Bukin 
764*2164af29SRuslan Bukin 	return (0);
765*2164af29SRuslan Bukin 
766*2164af29SRuslan Bukin error:
767*2164af29SRuslan Bukin 	free(secs, M_SGX);
768*2164af29SRuslan Bukin 	sgx_put_epc_page(sc, epc);
769*2164af29SRuslan Bukin 	free(enclave, M_SGX);
770*2164af29SRuslan Bukin 	vm_object_deallocate(object);
771*2164af29SRuslan Bukin 
772*2164af29SRuslan Bukin 	return (ret);
773*2164af29SRuslan Bukin }
774*2164af29SRuslan Bukin 
775*2164af29SRuslan Bukin static int
776*2164af29SRuslan Bukin sgx_ioctl_add_page(struct sgx_softc *sc,
777*2164af29SRuslan Bukin     struct sgx_enclave_add_page *addp)
778*2164af29SRuslan Bukin {
779*2164af29SRuslan Bukin 	struct epc_page *secs_epc_page;
780*2164af29SRuslan Bukin 	struct sgx_enclave *enclave;
781*2164af29SRuslan Bukin 	struct sgx_vm_handle *vmh;
782*2164af29SRuslan Bukin 	struct epc_page *epc;
783*2164af29SRuslan Bukin 	struct page_info pginfo;
784*2164af29SRuslan Bukin 	struct secinfo secinfo;
785*2164af29SRuslan Bukin 	vm_object_t object;
786*2164af29SRuslan Bukin 	void *tmp_vaddr;
787*2164af29SRuslan Bukin 	uint64_t page_type;
788*2164af29SRuslan Bukin 	struct tcs *t;
789*2164af29SRuslan Bukin 	uint64_t addr;
790*2164af29SRuslan Bukin 	uint64_t pidx;
791*2164af29SRuslan Bukin 	vm_page_t p;
792*2164af29SRuslan Bukin 	int ret;
793*2164af29SRuslan Bukin 
794*2164af29SRuslan Bukin 	tmp_vaddr = NULL;
795*2164af29SRuslan Bukin 	epc = NULL;
796*2164af29SRuslan Bukin 	object = NULL;
797*2164af29SRuslan Bukin 
798*2164af29SRuslan Bukin 	/* Find and get reference to VM object. */
799*2164af29SRuslan Bukin 	ret = sgx_enclave_find(sc, addp->addr, &enclave);
800*2164af29SRuslan Bukin 	if (ret) {
801*2164af29SRuslan Bukin 		dprintf("%s: Failed to find enclave.\n", __func__);
802*2164af29SRuslan Bukin 		goto error;
803*2164af29SRuslan Bukin 	}
804*2164af29SRuslan Bukin 
805*2164af29SRuslan Bukin 	object = enclave->object;
806*2164af29SRuslan Bukin 	KASSERT(object != NULL, ("vm object is NULL\n"));
807*2164af29SRuslan Bukin 	vmh = object->handle;
808*2164af29SRuslan Bukin 
809*2164af29SRuslan Bukin 	ret = sgx_get_epc_page(sc, &epc);
810*2164af29SRuslan Bukin 	if (ret) {
811*2164af29SRuslan Bukin 		dprintf("%s: Failed to get free epc page.\n", __func__);
812*2164af29SRuslan Bukin 		goto error;
813*2164af29SRuslan Bukin 	}
814*2164af29SRuslan Bukin 
815*2164af29SRuslan Bukin 	memset(&secinfo, 0, sizeof(struct secinfo));
816*2164af29SRuslan Bukin 	ret = copyin((void *)addp->secinfo, &secinfo,
817*2164af29SRuslan Bukin 	    sizeof(struct secinfo));
818*2164af29SRuslan Bukin 	if (ret) {
819*2164af29SRuslan Bukin 		dprintf("%s: Failed to copy secinfo.\n", __func__);
820*2164af29SRuslan Bukin 		goto error;
821*2164af29SRuslan Bukin 	}
822*2164af29SRuslan Bukin 
823*2164af29SRuslan Bukin 	tmp_vaddr = malloc(PAGE_SIZE, M_SGX, M_WAITOK | M_ZERO);
824*2164af29SRuslan Bukin 	ret = copyin((void *)addp->src, tmp_vaddr, PAGE_SIZE);
825*2164af29SRuslan Bukin 	if (ret) {
826*2164af29SRuslan Bukin 		dprintf("%s: Failed to copy page.\n", __func__);
827*2164af29SRuslan Bukin 		goto error;
828*2164af29SRuslan Bukin 	}
829*2164af29SRuslan Bukin 
830*2164af29SRuslan Bukin 	page_type = (secinfo.flags & SECINFO_FLAGS_PT_M) >>
831*2164af29SRuslan Bukin 	    SECINFO_FLAGS_PT_S;
832*2164af29SRuslan Bukin 	if (page_type != SGX_PT_TCS && page_type != SGX_PT_REG) {
833*2164af29SRuslan Bukin 		dprintf("%s: page can't be added.\n", __func__);
834*2164af29SRuslan Bukin 		goto error;
835*2164af29SRuslan Bukin 	}
836*2164af29SRuslan Bukin 	if (page_type == SGX_PT_TCS) {
837*2164af29SRuslan Bukin 		t = (struct tcs *)tmp_vaddr;
838*2164af29SRuslan Bukin 		ret = sgx_tcs_validate(t);
839*2164af29SRuslan Bukin 		if (ret) {
840*2164af29SRuslan Bukin 			dprintf("%s: TCS page validation failed.\n",
841*2164af29SRuslan Bukin 			    __func__);
842*2164af29SRuslan Bukin 			goto error;
843*2164af29SRuslan Bukin 		}
844*2164af29SRuslan Bukin 		sgx_tcs_dump(sc, t);
845*2164af29SRuslan Bukin 	}
846*2164af29SRuslan Bukin 
847*2164af29SRuslan Bukin 	addr = (addp->addr - vmh->base);
848*2164af29SRuslan Bukin 	pidx = OFF_TO_IDX(addr);
849*2164af29SRuslan Bukin 
850*2164af29SRuslan Bukin 	VM_OBJECT_WLOCK(object);
851*2164af29SRuslan Bukin 	p = vm_page_lookup(object, pidx);
852*2164af29SRuslan Bukin 	if (p) {
853*2164af29SRuslan Bukin 		VM_OBJECT_WUNLOCK(object);
854*2164af29SRuslan Bukin 		/* Page already added. */
855*2164af29SRuslan Bukin 		ret = ENXIO;
856*2164af29SRuslan Bukin 		goto error;
857*2164af29SRuslan Bukin 	}
858*2164af29SRuslan Bukin 
859*2164af29SRuslan Bukin 	ret = sgx_va_slot_init(sc, enclave, addr);
860*2164af29SRuslan Bukin 	if (ret) {
861*2164af29SRuslan Bukin 		VM_OBJECT_WUNLOCK(object);
862*2164af29SRuslan Bukin 		dprintf("%s: Can't init va slot.\n", __func__);
863*2164af29SRuslan Bukin 		goto error;
864*2164af29SRuslan Bukin 	}
865*2164af29SRuslan Bukin 
866*2164af29SRuslan Bukin 	secs_epc_page = enclave->secs_epc_page;
867*2164af29SRuslan Bukin 	memset(&pginfo, 0, sizeof(struct page_info));
868*2164af29SRuslan Bukin 	pginfo.linaddr = (uint64_t)addp->addr;
869*2164af29SRuslan Bukin 	pginfo.srcpge = (uint64_t)tmp_vaddr;
870*2164af29SRuslan Bukin 	pginfo.secinfo = &secinfo;
871*2164af29SRuslan Bukin 	pginfo.secs = (uint64_t)secs_epc_page->base;
872*2164af29SRuslan Bukin 
873*2164af29SRuslan Bukin 	mtx_lock(&sc->mtx_encls);
874*2164af29SRuslan Bukin 	ret = sgx_eadd(&pginfo, (void *)epc->base);
875*2164af29SRuslan Bukin 	if (ret == SGX_EFAULT) {
876*2164af29SRuslan Bukin 		dprintf("%s: gp fault on eadd\n", __func__);
877*2164af29SRuslan Bukin 		mtx_unlock(&sc->mtx_encls);
878*2164af29SRuslan Bukin 		VM_OBJECT_WUNLOCK(object);
879*2164af29SRuslan Bukin 		goto error;
880*2164af29SRuslan Bukin 	}
881*2164af29SRuslan Bukin 	mtx_unlock(&sc->mtx_encls);
882*2164af29SRuslan Bukin 
883*2164af29SRuslan Bukin 	ret = sgx_measure_page(sc, enclave->secs_epc_page, epc, addp->mrmask);
884*2164af29SRuslan Bukin 	if (ret == SGX_EFAULT) {
885*2164af29SRuslan Bukin 		dprintf("%s: gp fault on eextend\n", __func__);
886*2164af29SRuslan Bukin 		sgx_epc_page_remove(sc, epc);
887*2164af29SRuslan Bukin 		VM_OBJECT_WUNLOCK(object);
888*2164af29SRuslan Bukin 		goto error;
889*2164af29SRuslan Bukin 	}
890*2164af29SRuslan Bukin 
891*2164af29SRuslan Bukin 	sgx_insert_epc_page(enclave, epc, addr);
892*2164af29SRuslan Bukin 
893*2164af29SRuslan Bukin 	VM_OBJECT_WUNLOCK(object);
894*2164af29SRuslan Bukin 
895*2164af29SRuslan Bukin 	/* Release the reference. */
896*2164af29SRuslan Bukin 	vm_object_deallocate(object);
897*2164af29SRuslan Bukin 
898*2164af29SRuslan Bukin 	free(tmp_vaddr, M_SGX);
899*2164af29SRuslan Bukin 
900*2164af29SRuslan Bukin 	return (0);
901*2164af29SRuslan Bukin 
902*2164af29SRuslan Bukin error:
903*2164af29SRuslan Bukin 	free(tmp_vaddr, M_SGX);
904*2164af29SRuslan Bukin 	sgx_put_epc_page(sc, epc);
905*2164af29SRuslan Bukin 	vm_object_deallocate(object);
906*2164af29SRuslan Bukin 
907*2164af29SRuslan Bukin 	return (ret);
908*2164af29SRuslan Bukin }
909*2164af29SRuslan Bukin 
910*2164af29SRuslan Bukin static int
911*2164af29SRuslan Bukin sgx_ioctl_init(struct sgx_softc *sc, struct sgx_enclave_init *initp)
912*2164af29SRuslan Bukin {
913*2164af29SRuslan Bukin 	struct epc_page *secs_epc_page;
914*2164af29SRuslan Bukin 	struct sgx_enclave *enclave;
915*2164af29SRuslan Bukin 	struct thread *td;
916*2164af29SRuslan Bukin 	void *tmp_vaddr;
917*2164af29SRuslan Bukin 	void *einittoken;
918*2164af29SRuslan Bukin 	void *sigstruct;
919*2164af29SRuslan Bukin 	vm_object_t object;
920*2164af29SRuslan Bukin 	int retry;
921*2164af29SRuslan Bukin 	int ret;
922*2164af29SRuslan Bukin 
923*2164af29SRuslan Bukin 	td = curthread;
924*2164af29SRuslan Bukin 	tmp_vaddr = NULL;
925*2164af29SRuslan Bukin 	object = NULL;
926*2164af29SRuslan Bukin 
927*2164af29SRuslan Bukin 	dprintf("%s: addr %lx, sigstruct %lx, einittoken %lx\n",
928*2164af29SRuslan Bukin 	    __func__, initp->addr, initp->sigstruct, initp->einittoken);
929*2164af29SRuslan Bukin 
930*2164af29SRuslan Bukin 	/* Find and get reference to VM object. */
931*2164af29SRuslan Bukin 	ret = sgx_enclave_find(sc, initp->addr, &enclave);
932*2164af29SRuslan Bukin 	if (ret) {
933*2164af29SRuslan Bukin 		dprintf("%s: Failed to find enclave.\n", __func__);
934*2164af29SRuslan Bukin 		goto error;
935*2164af29SRuslan Bukin 	}
936*2164af29SRuslan Bukin 
937*2164af29SRuslan Bukin 	object = enclave->object;
938*2164af29SRuslan Bukin 
939*2164af29SRuslan Bukin 	tmp_vaddr = malloc(PAGE_SIZE, M_SGX, M_WAITOK | M_ZERO);
940*2164af29SRuslan Bukin 	sigstruct = tmp_vaddr;
941*2164af29SRuslan Bukin 	einittoken = (void *)((uint64_t)sigstruct + PAGE_SIZE / 2);
942*2164af29SRuslan Bukin 
943*2164af29SRuslan Bukin 	ret = copyin((void *)initp->sigstruct, sigstruct,
944*2164af29SRuslan Bukin 	    SGX_SIGSTRUCT_SIZE);
945*2164af29SRuslan Bukin 	if (ret) {
946*2164af29SRuslan Bukin 		dprintf("%s: Failed to copy SIGSTRUCT page.\n", __func__);
947*2164af29SRuslan Bukin 		goto error;
948*2164af29SRuslan Bukin 	}
949*2164af29SRuslan Bukin 
950*2164af29SRuslan Bukin 	ret = copyin((void *)initp->einittoken, einittoken,
951*2164af29SRuslan Bukin 	    SGX_EINITTOKEN_SIZE);
952*2164af29SRuslan Bukin 	if (ret) {
953*2164af29SRuslan Bukin 		dprintf("%s: Failed to copy EINITTOKEN page.\n", __func__);
954*2164af29SRuslan Bukin 		goto error;
955*2164af29SRuslan Bukin 	}
956*2164af29SRuslan Bukin 
957*2164af29SRuslan Bukin 	secs_epc_page = enclave->secs_epc_page;
958*2164af29SRuslan Bukin 	retry = 16;
959*2164af29SRuslan Bukin 	do {
960*2164af29SRuslan Bukin 		mtx_lock(&sc->mtx_encls);
961*2164af29SRuslan Bukin 		ret = sgx_einit(sigstruct, (void *)secs_epc_page->base,
962*2164af29SRuslan Bukin 		    einittoken);
963*2164af29SRuslan Bukin 		mtx_unlock(&sc->mtx_encls);
964*2164af29SRuslan Bukin 		dprintf("%s: sgx_einit returned %d\n", __func__, ret);
965*2164af29SRuslan Bukin 	} while (ret == SGX_UNMASKED_EVENT && retry--);
966*2164af29SRuslan Bukin 
967*2164af29SRuslan Bukin 	if (ret) {
968*2164af29SRuslan Bukin 		dprintf("%s: Failed init enclave: %d\n", __func__, ret);
969*2164af29SRuslan Bukin 		td->td_retval[0] = ret;
970*2164af29SRuslan Bukin 		ret = 0;
971*2164af29SRuslan Bukin 	}
972*2164af29SRuslan Bukin 
973*2164af29SRuslan Bukin error:
974*2164af29SRuslan Bukin 	free(tmp_vaddr, M_SGX);
975*2164af29SRuslan Bukin 
976*2164af29SRuslan Bukin 	/* Release the reference. */
977*2164af29SRuslan Bukin 	vm_object_deallocate(object);
978*2164af29SRuslan Bukin 
979*2164af29SRuslan Bukin 	return (ret);
980*2164af29SRuslan Bukin }
981*2164af29SRuslan Bukin 
982*2164af29SRuslan Bukin static int
983*2164af29SRuslan Bukin sgx_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
984*2164af29SRuslan Bukin     struct thread *td)
985*2164af29SRuslan Bukin {
986*2164af29SRuslan Bukin 	struct sgx_enclave_add_page *addp;
987*2164af29SRuslan Bukin 	struct sgx_enclave_create *param;
988*2164af29SRuslan Bukin 	struct sgx_enclave_init *initp;
989*2164af29SRuslan Bukin 	struct sgx_softc *sc;
990*2164af29SRuslan Bukin 	int ret;
991*2164af29SRuslan Bukin 	int len;
992*2164af29SRuslan Bukin 
993*2164af29SRuslan Bukin 	sc = &sgx_sc;
994*2164af29SRuslan Bukin 
995*2164af29SRuslan Bukin 	len = IOCPARM_LEN(cmd);
996*2164af29SRuslan Bukin 
997*2164af29SRuslan Bukin 	dprintf("%s: cmd %lx, addr %lx, len %d\n",
998*2164af29SRuslan Bukin 	    __func__, cmd, (uint64_t)addr, len);
999*2164af29SRuslan Bukin 
1000*2164af29SRuslan Bukin 	if (len > SGX_IOCTL_MAX_DATA_LEN)
1001*2164af29SRuslan Bukin 		return (EINVAL);
1002*2164af29SRuslan Bukin 
1003*2164af29SRuslan Bukin 	switch (cmd) {
1004*2164af29SRuslan Bukin 	case SGX_IOC_ENCLAVE_CREATE:
1005*2164af29SRuslan Bukin 		param = (struct sgx_enclave_create *)addr;
1006*2164af29SRuslan Bukin 		ret = sgx_ioctl_create(sc, param);
1007*2164af29SRuslan Bukin 		break;
1008*2164af29SRuslan Bukin 	case SGX_IOC_ENCLAVE_ADD_PAGE:
1009*2164af29SRuslan Bukin 		addp = (struct sgx_enclave_add_page *)addr;
1010*2164af29SRuslan Bukin 		ret = sgx_ioctl_add_page(sc, addp);
1011*2164af29SRuslan Bukin 		break;
1012*2164af29SRuslan Bukin 	case SGX_IOC_ENCLAVE_INIT:
1013*2164af29SRuslan Bukin 		initp = (struct sgx_enclave_init *)addr;
1014*2164af29SRuslan Bukin 		ret = sgx_ioctl_init(sc, initp);
1015*2164af29SRuslan Bukin 		break;
1016*2164af29SRuslan Bukin 	default:
1017*2164af29SRuslan Bukin 		return (EINVAL);
1018*2164af29SRuslan Bukin 	}
1019*2164af29SRuslan Bukin 
1020*2164af29SRuslan Bukin 	return (ret);
1021*2164af29SRuslan Bukin }
1022*2164af29SRuslan Bukin 
1023*2164af29SRuslan Bukin static int
1024*2164af29SRuslan Bukin sgx_mmap_single(struct cdev *cdev, vm_ooffset_t *offset,
1025*2164af29SRuslan Bukin     vm_size_t mapsize, struct vm_object **objp, int nprot)
1026*2164af29SRuslan Bukin {
1027*2164af29SRuslan Bukin 	struct sgx_vm_handle *vmh;
1028*2164af29SRuslan Bukin 	struct sgx_softc *sc;
1029*2164af29SRuslan Bukin 
1030*2164af29SRuslan Bukin 	sc = &sgx_sc;
1031*2164af29SRuslan Bukin 
1032*2164af29SRuslan Bukin 	dprintf("%s: mapsize 0x%lx, offset %lx\n",
1033*2164af29SRuslan Bukin 	    __func__, mapsize, *offset);
1034*2164af29SRuslan Bukin 
1035*2164af29SRuslan Bukin 	vmh = malloc(sizeof(struct sgx_vm_handle),
1036*2164af29SRuslan Bukin 	    M_SGX, M_WAITOK | M_ZERO);
1037*2164af29SRuslan Bukin 	vmh->sc = sc;
1038*2164af29SRuslan Bukin 	vmh->size = mapsize;
1039*2164af29SRuslan Bukin 	vmh->mem = cdev_pager_allocate(vmh, OBJT_MGTDEVICE, &sgx_pg_ops,
1040*2164af29SRuslan Bukin 	    mapsize, nprot, *offset, NULL);
1041*2164af29SRuslan Bukin 	if (vmh->mem == NULL) {
1042*2164af29SRuslan Bukin 		free(vmh, M_SGX);
1043*2164af29SRuslan Bukin 		return (ENOMEM);
1044*2164af29SRuslan Bukin 	}
1045*2164af29SRuslan Bukin 
1046*2164af29SRuslan Bukin 	VM_OBJECT_WLOCK(vmh->mem);
1047*2164af29SRuslan Bukin 	vm_object_set_flag(vmh->mem, OBJ_PG_DTOR);
1048*2164af29SRuslan Bukin 	VM_OBJECT_WUNLOCK(vmh->mem);
1049*2164af29SRuslan Bukin 
1050*2164af29SRuslan Bukin 	*objp = vmh->mem;
1051*2164af29SRuslan Bukin 
1052*2164af29SRuslan Bukin 	return (0);
1053*2164af29SRuslan Bukin }
1054*2164af29SRuslan Bukin 
1055*2164af29SRuslan Bukin static struct cdevsw sgx_cdevsw = {
1056*2164af29SRuslan Bukin 	.d_version =		D_VERSION,
1057*2164af29SRuslan Bukin 	.d_ioctl =		sgx_ioctl,
1058*2164af29SRuslan Bukin 	.d_mmap_single =	sgx_mmap_single,
1059*2164af29SRuslan Bukin 	.d_name =		"Intel SGX",
1060*2164af29SRuslan Bukin };
1061*2164af29SRuslan Bukin 
1062*2164af29SRuslan Bukin static int
1063*2164af29SRuslan Bukin sgx_get_epc_area(struct sgx_softc *sc)
1064*2164af29SRuslan Bukin {
1065*2164af29SRuslan Bukin 	vm_offset_t epc_base_vaddr;
1066*2164af29SRuslan Bukin 	u_int cp[4];
1067*2164af29SRuslan Bukin 	int error;
1068*2164af29SRuslan Bukin 	int i;
1069*2164af29SRuslan Bukin 
1070*2164af29SRuslan Bukin 	cpuid_count(SGX_CPUID, 0x2, cp);
1071*2164af29SRuslan Bukin 
1072*2164af29SRuslan Bukin 	sc->epc_base = ((uint64_t)(cp[1] & 0xfffff) << 32) +
1073*2164af29SRuslan Bukin 	    (cp[0] & 0xfffff000);
1074*2164af29SRuslan Bukin 	sc->epc_size = ((uint64_t)(cp[3] & 0xfffff) << 32) +
1075*2164af29SRuslan Bukin 	    (cp[2] & 0xfffff000);
1076*2164af29SRuslan Bukin 	sc->npages = sc->epc_size / SGX_PAGE_SIZE;
1077*2164af29SRuslan Bukin 
1078*2164af29SRuslan Bukin 	if (cp[3] & 0xffff)
1079*2164af29SRuslan Bukin 		sc->enclave_size_max = (1 << ((cp[3] >> 8) & 0xff));
1080*2164af29SRuslan Bukin 	else
1081*2164af29SRuslan Bukin 		sc->enclave_size_max = SGX_ENCL_SIZE_MAX_DEF;
1082*2164af29SRuslan Bukin 
1083*2164af29SRuslan Bukin 	epc_base_vaddr = (vm_offset_t)pmap_mapdev_attr(sc->epc_base,
1084*2164af29SRuslan Bukin 	    sc->epc_size, VM_MEMATTR_DEFAULT);
1085*2164af29SRuslan Bukin 
1086*2164af29SRuslan Bukin 	sc->epc_pages = malloc(sizeof(struct epc_page) * sc->npages,
1087*2164af29SRuslan Bukin 	    M_DEVBUF, M_WAITOK | M_ZERO);
1088*2164af29SRuslan Bukin 
1089*2164af29SRuslan Bukin 	for (i = 0; i < sc->npages; i++) {
1090*2164af29SRuslan Bukin 		sc->epc_pages[i].base = epc_base_vaddr + SGX_PAGE_SIZE * i;
1091*2164af29SRuslan Bukin 		sc->epc_pages[i].phys = sc->epc_base + SGX_PAGE_SIZE * i;
1092*2164af29SRuslan Bukin 		sc->epc_pages[i].index = i;
1093*2164af29SRuslan Bukin 	}
1094*2164af29SRuslan Bukin 
1095*2164af29SRuslan Bukin 	sc->vmem_epc = vmem_create("SGX EPC", sc->epc_base, sc->epc_size,
1096*2164af29SRuslan Bukin 	    PAGE_SIZE, PAGE_SIZE, M_FIRSTFIT | M_WAITOK);
1097*2164af29SRuslan Bukin 	if (sc->vmem_epc == NULL) {
1098*2164af29SRuslan Bukin 		printf("%s: Can't create vmem arena.\n", __func__);
1099*2164af29SRuslan Bukin 		free(sc->epc_pages, M_SGX);
1100*2164af29SRuslan Bukin 		return (EINVAL);
1101*2164af29SRuslan Bukin 	}
1102*2164af29SRuslan Bukin 
1103*2164af29SRuslan Bukin 	error = vm_phys_fictitious_reg_range(sc->epc_base,
1104*2164af29SRuslan Bukin 	    sc->epc_base + sc->epc_size, VM_MEMATTR_DEFAULT);
1105*2164af29SRuslan Bukin 	if (error) {
1106*2164af29SRuslan Bukin 		printf("%s: Can't register fictitious space.\n", __func__);
1107*2164af29SRuslan Bukin 		free(sc->epc_pages, M_SGX);
1108*2164af29SRuslan Bukin 		return (EINVAL);
1109*2164af29SRuslan Bukin 	}
1110*2164af29SRuslan Bukin 
1111*2164af29SRuslan Bukin 	return (0);
1112*2164af29SRuslan Bukin }
1113*2164af29SRuslan Bukin 
1114*2164af29SRuslan Bukin static void
1115*2164af29SRuslan Bukin sgx_put_epc_area(struct sgx_softc *sc)
1116*2164af29SRuslan Bukin {
1117*2164af29SRuslan Bukin 
1118*2164af29SRuslan Bukin 	vm_phys_fictitious_unreg_range(sc->epc_base,
1119*2164af29SRuslan Bukin 	    sc->epc_base + sc->epc_size);
1120*2164af29SRuslan Bukin 
1121*2164af29SRuslan Bukin 	free(sc->epc_pages, M_SGX);
1122*2164af29SRuslan Bukin }
1123*2164af29SRuslan Bukin 
1124*2164af29SRuslan Bukin static int
1125*2164af29SRuslan Bukin sgx_load(void)
1126*2164af29SRuslan Bukin {
1127*2164af29SRuslan Bukin 	struct sgx_softc *sc;
1128*2164af29SRuslan Bukin 	int error;
1129*2164af29SRuslan Bukin 
1130*2164af29SRuslan Bukin 	sc = &sgx_sc;
1131*2164af29SRuslan Bukin 
1132*2164af29SRuslan Bukin 	if ((cpu_stdext_feature & CPUID_STDEXT_SGX) == 0)
1133*2164af29SRuslan Bukin 		return (ENXIO);
1134*2164af29SRuslan Bukin 
1135*2164af29SRuslan Bukin 	mtx_init(&sc->mtx_encls, "SGX ENCLS", NULL, MTX_DEF);
1136*2164af29SRuslan Bukin 	mtx_init(&sc->mtx, "SGX driver", NULL, MTX_DEF);
1137*2164af29SRuslan Bukin 
1138*2164af29SRuslan Bukin 	error = sgx_get_epc_area(sc);
1139*2164af29SRuslan Bukin 	if (error) {
1140*2164af29SRuslan Bukin 		printf("%s: Failed to get Processor Reserved Memory area.\n",
1141*2164af29SRuslan Bukin 		    __func__);
1142*2164af29SRuslan Bukin 		return (ENXIO);
1143*2164af29SRuslan Bukin 	}
1144*2164af29SRuslan Bukin 
1145*2164af29SRuslan Bukin 	TAILQ_INIT(&sc->enclaves);
1146*2164af29SRuslan Bukin 
1147*2164af29SRuslan Bukin 	sc->sgx_cdev = make_dev(&sgx_cdevsw, 0, UID_ROOT, GID_WHEEL,
1148*2164af29SRuslan Bukin 	    0600, "isgx");
1149*2164af29SRuslan Bukin 
1150*2164af29SRuslan Bukin 	sc->state |= SGX_STATE_RUNNING;
1151*2164af29SRuslan Bukin 
1152*2164af29SRuslan Bukin 	printf("SGX initialized: EPC base 0x%lx size %ld (%d pages)\n",
1153*2164af29SRuslan Bukin 	    sc->epc_base, sc->epc_size, sc->npages);
1154*2164af29SRuslan Bukin 
1155*2164af29SRuslan Bukin 	return (0);
1156*2164af29SRuslan Bukin }
1157*2164af29SRuslan Bukin 
1158*2164af29SRuslan Bukin static int
1159*2164af29SRuslan Bukin sgx_unload(void)
1160*2164af29SRuslan Bukin {
1161*2164af29SRuslan Bukin 	struct sgx_softc *sc;
1162*2164af29SRuslan Bukin 
1163*2164af29SRuslan Bukin 	sc = &sgx_sc;
1164*2164af29SRuslan Bukin 
1165*2164af29SRuslan Bukin 	mtx_lock(&sc->mtx);
1166*2164af29SRuslan Bukin 	if (!TAILQ_EMPTY(&sc->enclaves)) {
1167*2164af29SRuslan Bukin 		mtx_unlock(&sc->mtx);
1168*2164af29SRuslan Bukin 		return (EBUSY);
1169*2164af29SRuslan Bukin 	}
1170*2164af29SRuslan Bukin 	sc->state &= ~SGX_STATE_RUNNING;
1171*2164af29SRuslan Bukin 	mtx_unlock(&sc->mtx);
1172*2164af29SRuslan Bukin 
1173*2164af29SRuslan Bukin 	destroy_dev(sc->sgx_cdev);
1174*2164af29SRuslan Bukin 
1175*2164af29SRuslan Bukin 	vmem_destroy(sc->vmem_epc);
1176*2164af29SRuslan Bukin 	sgx_put_epc_area(sc);
1177*2164af29SRuslan Bukin 
1178*2164af29SRuslan Bukin 	mtx_destroy(&sc->mtx_encls);
1179*2164af29SRuslan Bukin 	mtx_destroy(&sc->mtx);
1180*2164af29SRuslan Bukin 
1181*2164af29SRuslan Bukin 	return (0);
1182*2164af29SRuslan Bukin }
1183*2164af29SRuslan Bukin 
1184*2164af29SRuslan Bukin static int
1185*2164af29SRuslan Bukin sgx_handler(module_t mod, int what, void *arg)
1186*2164af29SRuslan Bukin {
1187*2164af29SRuslan Bukin 	int error;
1188*2164af29SRuslan Bukin 
1189*2164af29SRuslan Bukin 	switch (what) {
1190*2164af29SRuslan Bukin 	case MOD_LOAD:
1191*2164af29SRuslan Bukin 		error = sgx_load();
1192*2164af29SRuslan Bukin 		break;
1193*2164af29SRuslan Bukin 	case MOD_UNLOAD:
1194*2164af29SRuslan Bukin 		error = sgx_unload();
1195*2164af29SRuslan Bukin 		break;
1196*2164af29SRuslan Bukin 	default:
1197*2164af29SRuslan Bukin 		error = 0;
1198*2164af29SRuslan Bukin 		break;
1199*2164af29SRuslan Bukin 	}
1200*2164af29SRuslan Bukin 
1201*2164af29SRuslan Bukin 	return (error);
1202*2164af29SRuslan Bukin }
1203*2164af29SRuslan Bukin 
1204*2164af29SRuslan Bukin static moduledata_t sgx_kmod = {
1205*2164af29SRuslan Bukin 	"sgx",
1206*2164af29SRuslan Bukin 	sgx_handler,
1207*2164af29SRuslan Bukin 	NULL
1208*2164af29SRuslan Bukin };
1209*2164af29SRuslan Bukin 
1210*2164af29SRuslan Bukin DECLARE_MODULE(sgx, sgx_kmod, SI_SUB_LAST, SI_ORDER_ANY);
1211*2164af29SRuslan Bukin MODULE_VERSION(sgx, 1);
1212