xref: /freebsd/sys/amd64/sgx/sgx.c (revision 036d2e814bf0f5d88ffb4b24c159320894541757)
1 /*-
2  * Copyright (c) 2017 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by BAE Systems, the University of Cambridge
6  * Computer Laboratory, and Memorial University under DARPA/AFRL contract
7  * FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent Computing
8  * (TC) research program.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 /*
33  * Design overview.
34  *
35  * The driver provides character device for mmap(2) and ioctl(2) system calls
36  * allowing user to manage isolated compartments ("enclaves") in user VA space.
37  *
38  * The driver duties is EPC pages management, enclave management, user data
39  * validation.
40  *
41  * This driver requires Intel SGX support from hardware.
42  *
43  * /dev/sgx:
44  *    .mmap:
45  *        sgx_mmap_single() allocates VM object with following pager
46  *        operations:
47  *              a) sgx_pg_ctor():
48  *                  VM object constructor does nothing
49  *              b) sgx_pg_dtor():
50  *                  VM object destructor destroys the SGX enclave associated
51  *                  with the object: it frees all the EPC pages allocated for
52  *                  enclave and removes the enclave.
53  *              c) sgx_pg_fault():
54  *                  VM object fault handler does nothing
55  *
56  *    .ioctl:
57  *        sgx_ioctl():
58  *               a) SGX_IOC_ENCLAVE_CREATE
59  *                   Adds Enclave SECS page: initial step of enclave creation.
60  *               b) SGX_IOC_ENCLAVE_ADD_PAGE
61  *                   Adds TCS, REG pages to the enclave.
62  *               c) SGX_IOC_ENCLAVE_INIT
63  *                   Finalizes enclave creation.
64  *
65  * Enclave lifecycle:
66  *          .-- ECREATE  -- Add SECS page
67  *   Kernel |   EADD     -- Add TCS, REG pages
68  *    space |   EEXTEND  -- Measure the page (take unique hash)
69  *    ENCLS |   EPA      -- Allocate version array page
70  *          '-- EINIT    -- Finalize enclave creation
71  *   User   .-- EENTER   -- Go to entry point of enclave
72  *    space |   EEXIT    -- Exit back to main application
73  *    ENCLU '-- ERESUME  -- Resume enclave execution (e.g. after exception)
74  *
75  * Enclave lifecycle from driver point of view:
76  *  1) User calls mmap() on /dev/sgx: we allocate a VM object
77  *  2) User calls ioctl SGX_IOC_ENCLAVE_CREATE: we look for the VM object
78  *     associated with user process created on step 1, create SECS physical
79  *     page and store it in enclave's VM object queue by special index
80  *     SGX_SECS_VM_OBJECT_INDEX.
81  *  3) User calls ioctl SGX_IOC_ENCLAVE_ADD_PAGE: we look for enclave created
82  *     on step 2, create TCS or REG physical page and map it to specified by
83  *     user address of enclave VM object.
84  *  4) User finalizes enclave creation with ioctl SGX_IOC_ENCLAVE_INIT call.
85  *  5) User can freely enter to and exit from enclave using ENCLU instructions
86  *     from userspace: the driver does nothing here.
87  *  6) User proceed munmap(2) system call (or the process with enclave dies):
88  *     we destroy the enclave associated with the object.
89  *
90  * EPC page types and their indexes in VM object queue:
91  *   - PT_SECS index is special and equals SGX_SECS_VM_OBJECT_INDEX (-1);
92  *   - PT_TCS and PT_REG indexes are specified by user in addr field of ioctl
93  *     request data and determined as follows:
94  *       pidx = OFF_TO_IDX(addp->addr - vmh->base);
95  *   - PT_VA index is special, created for PT_REG, PT_TCS and PT_SECS pages
96  *     and determined by formula:
97  *       va_page_idx = - SGX_VA_PAGES_OFFS - (page_idx / SGX_VA_PAGE_SLOTS);
98  *     PT_VA page can hold versions of up to 512 pages, and slot for each
99  *     page in PT_VA page is determined as follows:
100  *       va_slot_idx = page_idx % SGX_VA_PAGE_SLOTS;
101  *   - PT_TRIM is unused.
102  *
103  * Locking:
104  *    SGX ENCLS set of instructions have limitations on concurrency:
105  *    some instructions can't be executed same time on different CPUs.
106  *    We use sc->mtx_encls lock around them to prevent concurrent execution.
107  *    sc->mtx lock is used to manage list of created enclaves and the state of
108  *    SGX driver.
109  *
110  * Eviction of EPC pages:
111  *    Eviction support is not implemented in this driver, however the driver
112  *    manages VA (version array) pages: it allocates a VA slot for each EPC
113  *    page. This will be required for eviction support in future.
114  *    VA pages and slots are currently unused.
115  *
116  * Intel® 64 and IA-32 Architectures Software Developer's Manual
117  * https://software.intel.com/en-us/articles/intel-sdm
118  */
119 
120 #include <sys/cdefs.h>
121 __FBSDID("$FreeBSD$");
122 
123 #include <sys/param.h>
124 #include <sys/systm.h>
125 #include <sys/ioccom.h>
126 #include <sys/malloc.h>
127 #include <sys/kernel.h>
128 #include <sys/lock.h>
129 #include <sys/mutex.h>
130 #include <sys/rwlock.h>
131 #include <sys/conf.h>
132 #include <sys/module.h>
133 #include <sys/proc.h>
134 #include <sys/vmem.h>
135 #include <sys/vmmeter.h>
136 
137 #include <vm/vm.h>
138 #include <vm/vm_param.h>
139 #include <vm/vm_extern.h>
140 #include <vm/vm_kern.h>
141 #include <vm/vm_page.h>
142 #include <vm/vm_map.h>
143 #include <vm/vm_object.h>
144 #include <vm/vm_pager.h>
145 #include <vm/vm_phys.h>
146 #include <vm/vm_radix.h>
147 #include <vm/pmap.h>
148 
149 #include <machine/md_var.h>
150 #include <machine/specialreg.h>
151 #include <machine/cpufunc.h>
152 #include <machine/sgx.h>
153 #include <machine/sgxreg.h>
154 
155 #include <amd64/sgx/sgxvar.h>
156 
157 #define	SGX_DEBUG
158 #undef	SGX_DEBUG
159 
160 #ifdef	SGX_DEBUG
161 #define	dprintf(fmt, ...)	printf(fmt, ##__VA_ARGS__)
162 #else
163 #define	dprintf(fmt, ...)
164 #endif
165 
166 static struct cdev_pager_ops sgx_pg_ops;
167 struct sgx_softc sgx_sc;
168 
169 static int
170 sgx_get_epc_page(struct sgx_softc *sc, struct epc_page **epc)
171 {
172 	vmem_addr_t addr;
173 	int i;
174 
175 	if (vmem_alloc(sc->vmem_epc, PAGE_SIZE, M_FIRSTFIT | M_NOWAIT,
176 	    &addr) == 0) {
177 		i = (addr - sc->epc_base) / PAGE_SIZE;
178 		*epc = &sc->epc_pages[i];
179 		return (0);
180 	}
181 
182 	return (ENOMEM);
183 }
184 
185 static void
186 sgx_put_epc_page(struct sgx_softc *sc, struct epc_page *epc)
187 {
188 	vmem_addr_t addr;
189 
190 	if (epc == NULL)
191 		return;
192 
193 	addr = (epc->index * PAGE_SIZE) + sc->epc_base;
194 	vmem_free(sc->vmem_epc, addr, PAGE_SIZE);
195 }
196 
197 static int
198 sgx_va_slot_init_by_index(struct sgx_softc *sc, vm_object_t object,
199     uint64_t idx)
200 {
201 	struct epc_page *epc;
202 	vm_page_t page;
203 	vm_page_t p;
204 	int ret;
205 
206 	VM_OBJECT_ASSERT_WLOCKED(object);
207 
208 	p = vm_page_lookup(object, idx);
209 	if (p == NULL) {
210 		ret = sgx_get_epc_page(sc, &epc);
211 		if (ret) {
212 			dprintf("%s: No free EPC pages available.\n",
213 			    __func__);
214 			return (ret);
215 		}
216 
217 		mtx_lock(&sc->mtx_encls);
218 		sgx_epa((void *)epc->base);
219 		mtx_unlock(&sc->mtx_encls);
220 
221 		page = PHYS_TO_VM_PAGE(epc->phys);
222 
223 		vm_page_insert(page, object, idx);
224 		page->valid = VM_PAGE_BITS_ALL;
225 	}
226 
227 	return (0);
228 }
229 
230 static int
231 sgx_va_slot_init(struct sgx_softc *sc,
232     struct sgx_enclave *enclave,
233     uint64_t addr)
234 {
235 	vm_pindex_t pidx;
236 	uint64_t va_page_idx;
237 	uint64_t idx;
238 	vm_object_t object;
239 	int va_slot;
240 	int ret;
241 
242 	object = enclave->object;
243 
244 	VM_OBJECT_ASSERT_WLOCKED(object);
245 
246 	pidx = OFF_TO_IDX(addr);
247 
248 	va_slot = pidx % SGX_VA_PAGE_SLOTS;
249 	va_page_idx = pidx / SGX_VA_PAGE_SLOTS;
250 	idx = - SGX_VA_PAGES_OFFS - va_page_idx;
251 
252 	ret = sgx_va_slot_init_by_index(sc, object, idx);
253 
254 	return (ret);
255 }
256 
257 static int
258 sgx_mem_find(struct sgx_softc *sc, uint64_t addr,
259     vm_map_entry_t *entry0, vm_object_t *object0)
260 {
261 	vm_map_t map;
262 	vm_map_entry_t entry;
263 	vm_object_t object;
264 
265 	map = &curproc->p_vmspace->vm_map;
266 
267 	vm_map_lock_read(map);
268 	if (!vm_map_lookup_entry(map, addr, &entry)) {
269 		vm_map_unlock_read(map);
270 		dprintf("%s: Can't find enclave.\n", __func__);
271 		return (EINVAL);
272 	}
273 
274 	object = entry->object.vm_object;
275 	if (object == NULL || object->handle == NULL) {
276 		vm_map_unlock_read(map);
277 		return (EINVAL);
278 	}
279 
280 	if (object->type != OBJT_MGTDEVICE ||
281 	    object->un_pager.devp.ops != &sgx_pg_ops) {
282 		vm_map_unlock_read(map);
283 		return (EINVAL);
284 	}
285 
286 	vm_object_reference(object);
287 
288 	*object0 = object;
289 	*entry0 = entry;
290 	vm_map_unlock_read(map);
291 
292 	return (0);
293 }
294 
295 static int
296 sgx_enclave_find(struct sgx_softc *sc, uint64_t addr,
297     struct sgx_enclave **encl)
298 {
299 	struct sgx_vm_handle *vmh;
300 	struct sgx_enclave *enclave;
301 	vm_map_entry_t entry;
302 	vm_object_t object;
303 	int ret;
304 
305 	ret = sgx_mem_find(sc, addr, &entry, &object);
306 	if (ret)
307 		return (ret);
308 
309 	vmh = object->handle;
310 	if (vmh == NULL) {
311 		vm_object_deallocate(object);
312 		return (EINVAL);
313 	}
314 
315 	enclave = vmh->enclave;
316 	if (enclave == NULL || enclave->object == NULL) {
317 		vm_object_deallocate(object);
318 		return (EINVAL);
319 	}
320 
321 	*encl = enclave;
322 
323 	return (0);
324 }
325 
326 static int
327 sgx_enclave_alloc(struct sgx_softc *sc, struct secs *secs,
328     struct sgx_enclave **enclave0)
329 {
330 	struct sgx_enclave *enclave;
331 
332 	enclave = malloc(sizeof(struct sgx_enclave),
333 	    M_SGX, M_WAITOK | M_ZERO);
334 
335 	enclave->base = secs->base;
336 	enclave->size = secs->size;
337 
338 	*enclave0 = enclave;
339 
340 	return (0);
341 }
342 
343 static void
344 sgx_epc_page_remove(struct sgx_softc *sc,
345     struct epc_page *epc)
346 {
347 
348 	mtx_lock(&sc->mtx_encls);
349 	sgx_eremove((void *)epc->base);
350 	mtx_unlock(&sc->mtx_encls);
351 }
352 
353 static void
354 sgx_page_remove(struct sgx_softc *sc, vm_page_t p)
355 {
356 	struct epc_page *epc;
357 	vm_paddr_t pa;
358 	uint64_t offs;
359 
360 	(void)vm_page_remove(p);
361 
362 	dprintf("%s: p->pidx %ld\n", __func__, p->pindex);
363 
364 	pa = VM_PAGE_TO_PHYS(p);
365 	epc = &sc->epc_pages[0];
366 	offs = (pa - epc->phys) / PAGE_SIZE;
367 	epc = &sc->epc_pages[offs];
368 
369 	sgx_epc_page_remove(sc, epc);
370 	sgx_put_epc_page(sc, epc);
371 }
372 
373 static void
374 sgx_enclave_remove(struct sgx_softc *sc,
375     struct sgx_enclave *enclave)
376 {
377 	vm_object_t object;
378 	vm_page_t p, p_secs, p_next;
379 
380 	mtx_lock(&sc->mtx);
381 	TAILQ_REMOVE(&sc->enclaves, enclave, next);
382 	mtx_unlock(&sc->mtx);
383 
384 	object = enclave->object;
385 
386 	VM_OBJECT_WLOCK(object);
387 
388 	/*
389 	 * First remove all the pages except SECS,
390 	 * then remove SECS page.
391 	 */
392 	p_secs = NULL;
393 	TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) {
394 		if (p->pindex == SGX_SECS_VM_OBJECT_INDEX) {
395 			p_secs = p;
396 			continue;
397 		}
398 		sgx_page_remove(sc, p);
399 	}
400 	/* Now remove SECS page */
401 	if (p_secs != NULL)
402 		sgx_page_remove(sc, p_secs);
403 
404 	KASSERT(TAILQ_EMPTY(&object->memq) == 1, ("not empty"));
405 	KASSERT(object->resident_page_count == 0, ("count"));
406 
407 	VM_OBJECT_WUNLOCK(object);
408 }
409 
410 static int
411 sgx_measure_page(struct sgx_softc *sc, struct epc_page *secs,
412     struct epc_page *epc, uint16_t mrmask)
413 {
414 	int i, j;
415 	int ret;
416 
417 	mtx_lock(&sc->mtx_encls);
418 
419 	for (i = 0, j = 1; i < PAGE_SIZE; i += 0x100, j <<= 1) {
420 		if (!(j & mrmask))
421 			continue;
422 
423 		ret = sgx_eextend((void *)secs->base,
424 		    (void *)(epc->base + i));
425 		if (ret == SGX_EFAULT) {
426 			mtx_unlock(&sc->mtx_encls);
427 			return (ret);
428 		}
429 	}
430 
431 	mtx_unlock(&sc->mtx_encls);
432 
433 	return (0);
434 }
435 
436 static int
437 sgx_secs_validate(struct sgx_softc *sc, struct secs *secs)
438 {
439 	struct secs_attr *attr;
440 	int i;
441 
442 	if (secs->size == 0)
443 		return (EINVAL);
444 
445 	/* BASEADDR must be naturally aligned on an SECS.SIZE boundary. */
446 	if (secs->base & (secs->size - 1))
447 		return (EINVAL);
448 
449 	/* SECS.SIZE must be at least 2 pages. */
450 	if (secs->size < 2 * PAGE_SIZE)
451 		return (EINVAL);
452 
453 	if ((secs->size & (secs->size - 1)) != 0)
454 		return (EINVAL);
455 
456 	attr = &secs->attributes;
457 
458 	if (attr->reserved1 != 0 ||
459 	    attr->reserved2 != 0 ||
460 	    attr->reserved3 != 0)
461 		return (EINVAL);
462 
463 	for (i = 0; i < SECS_ATTR_RSV4_SIZE; i++)
464 		if (attr->reserved4[i])
465 			return (EINVAL);
466 
467 	/*
468 	 * Intel® Software Guard Extensions Programming Reference
469 	 * 6.7.2 Relevant Fields in Various Data Structures
470 	 * 6.7.2.1 SECS.ATTRIBUTES.XFRM
471 	 * XFRM[1:0] must be set to 0x3.
472 	 */
473 	if ((attr->xfrm & 0x3) != 0x3)
474 		return (EINVAL);
475 
476 	if (!attr->mode64bit)
477 		return (EINVAL);
478 
479 	if (secs->size > sc->enclave_size_max)
480 		return (EINVAL);
481 
482 	for (i = 0; i < SECS_RSV1_SIZE; i++)
483 		if (secs->reserved1[i])
484 			return (EINVAL);
485 
486 	for (i = 0; i < SECS_RSV2_SIZE; i++)
487 		if (secs->reserved2[i])
488 			return (EINVAL);
489 
490 	for (i = 0; i < SECS_RSV3_SIZE; i++)
491 		if (secs->reserved3[i])
492 			return (EINVAL);
493 
494 	for (i = 0; i < SECS_RSV4_SIZE; i++)
495 		if (secs->reserved4[i])
496 			return (EINVAL);
497 
498 	return (0);
499 }
500 
501 static int
502 sgx_tcs_validate(struct tcs *tcs)
503 {
504 	int i;
505 
506 	if ((tcs->flags) ||
507 	    (tcs->ossa & (PAGE_SIZE - 1)) ||
508 	    (tcs->ofsbasgx & (PAGE_SIZE - 1)) ||
509 	    (tcs->ogsbasgx & (PAGE_SIZE - 1)) ||
510 	    ((tcs->fslimit & 0xfff) != 0xfff) ||
511 	    ((tcs->gslimit & 0xfff) != 0xfff))
512 		return (EINVAL);
513 
514 	for (i = 0; i < nitems(tcs->reserved3); i++)
515 		if (tcs->reserved3[i])
516 			return (EINVAL);
517 
518 	return (0);
519 }
520 
521 static void
522 sgx_tcs_dump(struct sgx_softc *sc, struct tcs *t)
523 {
524 
525 	dprintf("t->flags %lx\n", t->flags);
526 	dprintf("t->ossa %lx\n", t->ossa);
527 	dprintf("t->cssa %x\n", t->cssa);
528 	dprintf("t->nssa %x\n", t->nssa);
529 	dprintf("t->oentry %lx\n", t->oentry);
530 	dprintf("t->ofsbasgx %lx\n", t->ofsbasgx);
531 	dprintf("t->ogsbasgx %lx\n", t->ogsbasgx);
532 	dprintf("t->fslimit %x\n", t->fslimit);
533 	dprintf("t->gslimit %x\n", t->gslimit);
534 }
535 
536 static int
537 sgx_pg_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
538     vm_ooffset_t foff, struct ucred *cred, u_short *color)
539 {
540 	struct sgx_vm_handle *vmh;
541 
542 	vmh = handle;
543 	if (vmh == NULL) {
544 		dprintf("%s: vmh not found.\n", __func__);
545 		return (0);
546 	}
547 
548 	dprintf("%s: vmh->base %lx foff 0x%lx size 0x%lx\n",
549 	    __func__, vmh->base, foff, size);
550 
551 	return (0);
552 }
553 
554 static void
555 sgx_pg_dtor(void *handle)
556 {
557 	struct sgx_vm_handle *vmh;
558 	struct sgx_softc *sc;
559 
560 	vmh = handle;
561 	if (vmh == NULL) {
562 		dprintf("%s: vmh not found.\n", __func__);
563 		return;
564 	}
565 
566 	sc = vmh->sc;
567 	if (sc == NULL) {
568 		dprintf("%s: sc is NULL\n", __func__);
569 		return;
570 	}
571 
572 	if (vmh->enclave == NULL) {
573 		dprintf("%s: Enclave not found.\n", __func__);
574 		return;
575 	}
576 
577 	sgx_enclave_remove(sc, vmh->enclave);
578 
579 	free(vmh->enclave, M_SGX);
580 	free(vmh, M_SGX);
581 }
582 
583 static int
584 sgx_pg_fault(vm_object_t object, vm_ooffset_t offset,
585     int prot, vm_page_t *mres)
586 {
587 
588 	/*
589 	 * The purpose of this trivial handler is to handle the race
590 	 * when user tries to access mmaped region before or during
591 	 * enclave creation ioctl calls.
592 	 */
593 
594 	dprintf("%s: offset 0x%lx\n", __func__, offset);
595 
596 	return (VM_PAGER_FAIL);
597 }
598 
599 static struct cdev_pager_ops sgx_pg_ops = {
600 	.cdev_pg_ctor = sgx_pg_ctor,
601 	.cdev_pg_dtor = sgx_pg_dtor,
602 	.cdev_pg_fault = sgx_pg_fault,
603 };
604 
605 
606 static void
607 sgx_insert_epc_page_by_index(vm_page_t page, vm_object_t object,
608     vm_pindex_t pidx)
609 {
610 
611 	VM_OBJECT_ASSERT_WLOCKED(object);
612 
613 	vm_page_insert(page, object, pidx);
614 	page->valid = VM_PAGE_BITS_ALL;
615 }
616 
617 static void
618 sgx_insert_epc_page(struct sgx_enclave *enclave,
619     struct epc_page *epc, uint64_t addr)
620 {
621 	vm_pindex_t pidx;
622 	vm_page_t page;
623 
624 	VM_OBJECT_ASSERT_WLOCKED(enclave->object);
625 
626 	pidx = OFF_TO_IDX(addr);
627 	page = PHYS_TO_VM_PAGE(epc->phys);
628 
629 	sgx_insert_epc_page_by_index(page, enclave->object, pidx);
630 }
631 
632 static int
633 sgx_ioctl_create(struct sgx_softc *sc, struct sgx_enclave_create *param)
634 {
635 	struct sgx_vm_handle *vmh;
636 	vm_map_entry_t entry;
637 	vm_page_t p;
638 	struct page_info pginfo;
639 	struct secinfo secinfo;
640 	struct sgx_enclave *enclave;
641 	struct epc_page *epc;
642 	struct secs *secs;
643 	vm_object_t object;
644 	vm_page_t page;
645 	int ret;
646 
647 	epc = NULL;
648 	secs = NULL;
649 	enclave = NULL;
650 	object = NULL;
651 
652 	/* SGX Enclave Control Structure (SECS) */
653 	secs = malloc(PAGE_SIZE, M_SGX, M_WAITOK | M_ZERO);
654 	ret = copyin((void *)param->src, secs, sizeof(struct secs));
655 	if (ret) {
656 		dprintf("%s: Can't copy SECS.\n", __func__);
657 		goto error;
658 	}
659 
660 	ret = sgx_secs_validate(sc, secs);
661 	if (ret) {
662 		dprintf("%s: SECS validation failed.\n", __func__);
663 		goto error;
664 	}
665 
666 	ret = sgx_mem_find(sc, secs->base, &entry, &object);
667 	if (ret) {
668 		dprintf("%s: Can't find vm_map.\n", __func__);
669 		goto error;
670 	}
671 
672 	vmh = object->handle;
673 	if (!vmh) {
674 		dprintf("%s: Can't find vmh.\n", __func__);
675 		ret = ENXIO;
676 		goto error;
677 	}
678 
679 	dprintf("%s: entry start %lx offset %lx\n",
680 	    __func__, entry->start, entry->offset);
681 	vmh->base = (entry->start - entry->offset);
682 
683 	ret = sgx_enclave_alloc(sc, secs, &enclave);
684 	if (ret) {
685 		dprintf("%s: Can't alloc enclave.\n", __func__);
686 		goto error;
687 	}
688 	enclave->object = object;
689 	enclave->vmh = vmh;
690 
691 	memset(&secinfo, 0, sizeof(struct secinfo));
692 	memset(&pginfo, 0, sizeof(struct page_info));
693 	pginfo.linaddr = 0;
694 	pginfo.srcpge = (uint64_t)secs;
695 	pginfo.secinfo = &secinfo;
696 	pginfo.secs = 0;
697 
698 	ret = sgx_get_epc_page(sc, &epc);
699 	if (ret) {
700 		dprintf("%s: Failed to get free epc page.\n", __func__);
701 		goto error;
702 	}
703 	enclave->secs_epc_page = epc;
704 
705 	VM_OBJECT_WLOCK(object);
706 	p = vm_page_lookup(object, SGX_SECS_VM_OBJECT_INDEX);
707 	if (p) {
708 		VM_OBJECT_WUNLOCK(object);
709 		/* SECS page already added. */
710 		ret = ENXIO;
711 		goto error;
712 	}
713 
714 	ret = sgx_va_slot_init_by_index(sc, object,
715 	    - SGX_VA_PAGES_OFFS - SGX_SECS_VM_OBJECT_INDEX);
716 	if (ret) {
717 		VM_OBJECT_WUNLOCK(object);
718 		dprintf("%s: Can't init va slot.\n", __func__);
719 		goto error;
720 	}
721 
722 	mtx_lock(&sc->mtx);
723 	if ((sc->state & SGX_STATE_RUNNING) == 0) {
724 		mtx_unlock(&sc->mtx);
725 		/* Remove VA page that was just created for SECS page. */
726 		p = vm_page_lookup(enclave->object,
727 		    - SGX_VA_PAGES_OFFS - SGX_SECS_VM_OBJECT_INDEX);
728 		sgx_page_remove(sc, p);
729 		VM_OBJECT_WUNLOCK(object);
730 		goto error;
731 	}
732 	mtx_lock(&sc->mtx_encls);
733 	ret = sgx_ecreate(&pginfo, (void *)epc->base);
734 	mtx_unlock(&sc->mtx_encls);
735 	if (ret == SGX_EFAULT) {
736 		dprintf("%s: gp fault\n", __func__);
737 		mtx_unlock(&sc->mtx);
738 		/* Remove VA page that was just created for SECS page. */
739 		p = vm_page_lookup(enclave->object,
740 		    - SGX_VA_PAGES_OFFS - SGX_SECS_VM_OBJECT_INDEX);
741 		sgx_page_remove(sc, p);
742 		VM_OBJECT_WUNLOCK(object);
743 		goto error;
744 	}
745 
746 	TAILQ_INSERT_TAIL(&sc->enclaves, enclave, next);
747 	mtx_unlock(&sc->mtx);
748 
749 	vmh->enclave = enclave;
750 
751 	page = PHYS_TO_VM_PAGE(epc->phys);
752 	sgx_insert_epc_page_by_index(page, enclave->object,
753 	    SGX_SECS_VM_OBJECT_INDEX);
754 
755 	VM_OBJECT_WUNLOCK(object);
756 
757 	/* Release the reference. */
758 	vm_object_deallocate(object);
759 
760 	free(secs, M_SGX);
761 
762 	return (0);
763 
764 error:
765 	free(secs, M_SGX);
766 	sgx_put_epc_page(sc, epc);
767 	free(enclave, M_SGX);
768 	vm_object_deallocate(object);
769 
770 	return (ret);
771 }
772 
773 static int
774 sgx_ioctl_add_page(struct sgx_softc *sc,
775     struct sgx_enclave_add_page *addp)
776 {
777 	struct epc_page *secs_epc_page;
778 	struct sgx_enclave *enclave;
779 	struct sgx_vm_handle *vmh;
780 	struct epc_page *epc;
781 	struct page_info pginfo;
782 	struct secinfo secinfo;
783 	vm_object_t object;
784 	void *tmp_vaddr;
785 	uint64_t page_type;
786 	struct tcs *t;
787 	uint64_t addr;
788 	uint64_t pidx;
789 	vm_page_t p;
790 	int ret;
791 
792 	tmp_vaddr = NULL;
793 	epc = NULL;
794 	object = NULL;
795 
796 	/* Find and get reference to VM object. */
797 	ret = sgx_enclave_find(sc, addp->addr, &enclave);
798 	if (ret) {
799 		dprintf("%s: Failed to find enclave.\n", __func__);
800 		goto error;
801 	}
802 
803 	object = enclave->object;
804 	KASSERT(object != NULL, ("vm object is NULL\n"));
805 	vmh = object->handle;
806 
807 	ret = sgx_get_epc_page(sc, &epc);
808 	if (ret) {
809 		dprintf("%s: Failed to get free epc page.\n", __func__);
810 		goto error;
811 	}
812 
813 	memset(&secinfo, 0, sizeof(struct secinfo));
814 	ret = copyin((void *)addp->secinfo, &secinfo,
815 	    sizeof(struct secinfo));
816 	if (ret) {
817 		dprintf("%s: Failed to copy secinfo.\n", __func__);
818 		goto error;
819 	}
820 
821 	tmp_vaddr = malloc(PAGE_SIZE, M_SGX, M_WAITOK | M_ZERO);
822 	ret = copyin((void *)addp->src, tmp_vaddr, PAGE_SIZE);
823 	if (ret) {
824 		dprintf("%s: Failed to copy page.\n", __func__);
825 		goto error;
826 	}
827 
828 	page_type = (secinfo.flags & SECINFO_FLAGS_PT_M) >>
829 	    SECINFO_FLAGS_PT_S;
830 	if (page_type != SGX_PT_TCS && page_type != SGX_PT_REG) {
831 		dprintf("%s: page can't be added.\n", __func__);
832 		goto error;
833 	}
834 	if (page_type == SGX_PT_TCS) {
835 		t = (struct tcs *)tmp_vaddr;
836 		ret = sgx_tcs_validate(t);
837 		if (ret) {
838 			dprintf("%s: TCS page validation failed.\n",
839 			    __func__);
840 			goto error;
841 		}
842 		sgx_tcs_dump(sc, t);
843 	}
844 
845 	addr = (addp->addr - vmh->base);
846 	pidx = OFF_TO_IDX(addr);
847 
848 	VM_OBJECT_WLOCK(object);
849 	p = vm_page_lookup(object, pidx);
850 	if (p) {
851 		VM_OBJECT_WUNLOCK(object);
852 		/* Page already added. */
853 		ret = ENXIO;
854 		goto error;
855 	}
856 
857 	ret = sgx_va_slot_init(sc, enclave, addr);
858 	if (ret) {
859 		VM_OBJECT_WUNLOCK(object);
860 		dprintf("%s: Can't init va slot.\n", __func__);
861 		goto error;
862 	}
863 
864 	secs_epc_page = enclave->secs_epc_page;
865 	memset(&pginfo, 0, sizeof(struct page_info));
866 	pginfo.linaddr = (uint64_t)addp->addr;
867 	pginfo.srcpge = (uint64_t)tmp_vaddr;
868 	pginfo.secinfo = &secinfo;
869 	pginfo.secs = (uint64_t)secs_epc_page->base;
870 
871 	mtx_lock(&sc->mtx_encls);
872 	ret = sgx_eadd(&pginfo, (void *)epc->base);
873 	if (ret == SGX_EFAULT) {
874 		dprintf("%s: gp fault on eadd\n", __func__);
875 		mtx_unlock(&sc->mtx_encls);
876 		VM_OBJECT_WUNLOCK(object);
877 		goto error;
878 	}
879 	mtx_unlock(&sc->mtx_encls);
880 
881 	ret = sgx_measure_page(sc, enclave->secs_epc_page, epc, addp->mrmask);
882 	if (ret == SGX_EFAULT) {
883 		dprintf("%s: gp fault on eextend\n", __func__);
884 		sgx_epc_page_remove(sc, epc);
885 		VM_OBJECT_WUNLOCK(object);
886 		goto error;
887 	}
888 
889 	sgx_insert_epc_page(enclave, epc, addr);
890 
891 	VM_OBJECT_WUNLOCK(object);
892 
893 	/* Release the reference. */
894 	vm_object_deallocate(object);
895 
896 	free(tmp_vaddr, M_SGX);
897 
898 	return (0);
899 
900 error:
901 	free(tmp_vaddr, M_SGX);
902 	sgx_put_epc_page(sc, epc);
903 	vm_object_deallocate(object);
904 
905 	return (ret);
906 }
907 
908 static int
909 sgx_ioctl_init(struct sgx_softc *sc, struct sgx_enclave_init *initp)
910 {
911 	struct epc_page *secs_epc_page;
912 	struct sgx_enclave *enclave;
913 	struct thread *td;
914 	void *tmp_vaddr;
915 	void *einittoken;
916 	void *sigstruct;
917 	vm_object_t object;
918 	int retry;
919 	int ret;
920 
921 	td = curthread;
922 	tmp_vaddr = NULL;
923 	object = NULL;
924 
925 	dprintf("%s: addr %lx, sigstruct %lx, einittoken %lx\n",
926 	    __func__, initp->addr, initp->sigstruct, initp->einittoken);
927 
928 	/* Find and get reference to VM object. */
929 	ret = sgx_enclave_find(sc, initp->addr, &enclave);
930 	if (ret) {
931 		dprintf("%s: Failed to find enclave.\n", __func__);
932 		goto error;
933 	}
934 
935 	object = enclave->object;
936 
937 	tmp_vaddr = malloc(PAGE_SIZE, M_SGX, M_WAITOK | M_ZERO);
938 	sigstruct = tmp_vaddr;
939 	einittoken = (void *)((uint64_t)sigstruct + PAGE_SIZE / 2);
940 
941 	ret = copyin((void *)initp->sigstruct, sigstruct,
942 	    SGX_SIGSTRUCT_SIZE);
943 	if (ret) {
944 		dprintf("%s: Failed to copy SIGSTRUCT page.\n", __func__);
945 		goto error;
946 	}
947 
948 	ret = copyin((void *)initp->einittoken, einittoken,
949 	    SGX_EINITTOKEN_SIZE);
950 	if (ret) {
951 		dprintf("%s: Failed to copy EINITTOKEN page.\n", __func__);
952 		goto error;
953 	}
954 
955 	secs_epc_page = enclave->secs_epc_page;
956 	retry = 16;
957 	do {
958 		mtx_lock(&sc->mtx_encls);
959 		ret = sgx_einit(sigstruct, (void *)secs_epc_page->base,
960 		    einittoken);
961 		mtx_unlock(&sc->mtx_encls);
962 		dprintf("%s: sgx_einit returned %d\n", __func__, ret);
963 	} while (ret == SGX_UNMASKED_EVENT && retry--);
964 
965 	if (ret) {
966 		dprintf("%s: Failed init enclave: %d\n", __func__, ret);
967 		td->td_retval[0] = ret;
968 		ret = 0;
969 	}
970 
971 error:
972 	free(tmp_vaddr, M_SGX);
973 
974 	/* Release the reference. */
975 	vm_object_deallocate(object);
976 
977 	return (ret);
978 }
979 
980 static int
981 sgx_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
982     struct thread *td)
983 {
984 	struct sgx_enclave_add_page *addp;
985 	struct sgx_enclave_create *param;
986 	struct sgx_enclave_init *initp;
987 	struct sgx_softc *sc;
988 	int ret;
989 	int len;
990 
991 	sc = &sgx_sc;
992 
993 	len = IOCPARM_LEN(cmd);
994 
995 	dprintf("%s: cmd %lx, addr %lx, len %d\n",
996 	    __func__, cmd, (uint64_t)addr, len);
997 
998 	if (len > SGX_IOCTL_MAX_DATA_LEN)
999 		return (EINVAL);
1000 
1001 	switch (cmd) {
1002 	case SGX_IOC_ENCLAVE_CREATE:
1003 		param = (struct sgx_enclave_create *)addr;
1004 		ret = sgx_ioctl_create(sc, param);
1005 		break;
1006 	case SGX_IOC_ENCLAVE_ADD_PAGE:
1007 		addp = (struct sgx_enclave_add_page *)addr;
1008 		ret = sgx_ioctl_add_page(sc, addp);
1009 		break;
1010 	case SGX_IOC_ENCLAVE_INIT:
1011 		initp = (struct sgx_enclave_init *)addr;
1012 		ret = sgx_ioctl_init(sc, initp);
1013 		break;
1014 	default:
1015 		return (EINVAL);
1016 	}
1017 
1018 	return (ret);
1019 }
1020 
1021 static int
1022 sgx_mmap_single(struct cdev *cdev, vm_ooffset_t *offset,
1023     vm_size_t mapsize, struct vm_object **objp, int nprot)
1024 {
1025 	struct sgx_vm_handle *vmh;
1026 	struct sgx_softc *sc;
1027 
1028 	sc = &sgx_sc;
1029 
1030 	dprintf("%s: mapsize 0x%lx, offset %lx\n",
1031 	    __func__, mapsize, *offset);
1032 
1033 	vmh = malloc(sizeof(struct sgx_vm_handle),
1034 	    M_SGX, M_WAITOK | M_ZERO);
1035 	vmh->sc = sc;
1036 	vmh->size = mapsize;
1037 	vmh->mem = cdev_pager_allocate(vmh, OBJT_MGTDEVICE, &sgx_pg_ops,
1038 	    mapsize, nprot, *offset, NULL);
1039 	if (vmh->mem == NULL) {
1040 		free(vmh, M_SGX);
1041 		return (ENOMEM);
1042 	}
1043 
1044 	VM_OBJECT_WLOCK(vmh->mem);
1045 	vm_object_set_flag(vmh->mem, OBJ_PG_DTOR);
1046 	VM_OBJECT_WUNLOCK(vmh->mem);
1047 
1048 	*objp = vmh->mem;
1049 
1050 	return (0);
1051 }
1052 
1053 static struct cdevsw sgx_cdevsw = {
1054 	.d_version =		D_VERSION,
1055 	.d_ioctl =		sgx_ioctl,
1056 	.d_mmap_single =	sgx_mmap_single,
1057 	.d_name =		"Intel SGX",
1058 };
1059 
1060 static int
1061 sgx_get_epc_area(struct sgx_softc *sc)
1062 {
1063 	vm_offset_t epc_base_vaddr;
1064 	u_int cp[4];
1065 	int error;
1066 	int i;
1067 
1068 	cpuid_count(SGX_CPUID, 0x2, cp);
1069 
1070 	sc->epc_base = ((uint64_t)(cp[1] & 0xfffff) << 32) +
1071 	    (cp[0] & 0xfffff000);
1072 	sc->epc_size = ((uint64_t)(cp[3] & 0xfffff) << 32) +
1073 	    (cp[2] & 0xfffff000);
1074 	sc->npages = sc->epc_size / SGX_PAGE_SIZE;
1075 
1076 	if (sc->epc_size == 0 || sc->epc_base == 0) {
1077 		printf("%s: Incorrect EPC data: EPC base %lx, size %lu\n",
1078 		    __func__, sc->epc_base, sc->epc_size);
1079 		return (EINVAL);
1080 	}
1081 
1082 	if (cp[3] & 0xffff)
1083 		sc->enclave_size_max = (1 << ((cp[3] >> 8) & 0xff));
1084 	else
1085 		sc->enclave_size_max = SGX_ENCL_SIZE_MAX_DEF;
1086 
1087 	epc_base_vaddr = (vm_offset_t)pmap_mapdev_attr(sc->epc_base,
1088 	    sc->epc_size, VM_MEMATTR_DEFAULT);
1089 
1090 	sc->epc_pages = malloc(sizeof(struct epc_page) * sc->npages,
1091 	    M_DEVBUF, M_WAITOK | M_ZERO);
1092 
1093 	for (i = 0; i < sc->npages; i++) {
1094 		sc->epc_pages[i].base = epc_base_vaddr + SGX_PAGE_SIZE * i;
1095 		sc->epc_pages[i].phys = sc->epc_base + SGX_PAGE_SIZE * i;
1096 		sc->epc_pages[i].index = i;
1097 	}
1098 
1099 	sc->vmem_epc = vmem_create("SGX EPC", sc->epc_base, sc->epc_size,
1100 	    PAGE_SIZE, PAGE_SIZE, M_FIRSTFIT | M_WAITOK);
1101 	if (sc->vmem_epc == NULL) {
1102 		printf("%s: Can't create vmem arena.\n", __func__);
1103 		free(sc->epc_pages, M_SGX);
1104 		return (EINVAL);
1105 	}
1106 
1107 	error = vm_phys_fictitious_reg_range(sc->epc_base,
1108 	    sc->epc_base + sc->epc_size, VM_MEMATTR_DEFAULT);
1109 	if (error) {
1110 		printf("%s: Can't register fictitious space.\n", __func__);
1111 		free(sc->epc_pages, M_SGX);
1112 		return (EINVAL);
1113 	}
1114 
1115 	return (0);
1116 }
1117 
1118 static void
1119 sgx_put_epc_area(struct sgx_softc *sc)
1120 {
1121 
1122 	vm_phys_fictitious_unreg_range(sc->epc_base,
1123 	    sc->epc_base + sc->epc_size);
1124 
1125 	free(sc->epc_pages, M_SGX);
1126 }
1127 
1128 static int
1129 sgx_load(void)
1130 {
1131 	struct sgx_softc *sc;
1132 	int error;
1133 
1134 	sc = &sgx_sc;
1135 
1136 	if ((cpu_stdext_feature & CPUID_STDEXT_SGX) == 0)
1137 		return (ENXIO);
1138 
1139 	error = sgx_get_epc_area(sc);
1140 	if (error) {
1141 		printf("%s: Failed to get Processor Reserved Memory area.\n",
1142 		    __func__);
1143 		return (ENXIO);
1144 	}
1145 
1146 	mtx_init(&sc->mtx_encls, "SGX ENCLS", NULL, MTX_DEF);
1147 	mtx_init(&sc->mtx, "SGX driver", NULL, MTX_DEF);
1148 
1149 	TAILQ_INIT(&sc->enclaves);
1150 
1151 	sc->sgx_cdev = make_dev(&sgx_cdevsw, 0, UID_ROOT, GID_WHEEL,
1152 	    0600, "isgx");
1153 
1154 	sc->state |= SGX_STATE_RUNNING;
1155 
1156 	printf("SGX initialized: EPC base 0x%lx size %ld (%d pages)\n",
1157 	    sc->epc_base, sc->epc_size, sc->npages);
1158 
1159 	return (0);
1160 }
1161 
1162 static int
1163 sgx_unload(void)
1164 {
1165 	struct sgx_softc *sc;
1166 
1167 	sc = &sgx_sc;
1168 
1169 	if ((sc->state & SGX_STATE_RUNNING) == 0)
1170 		return (0);
1171 
1172 	mtx_lock(&sc->mtx);
1173 	if (!TAILQ_EMPTY(&sc->enclaves)) {
1174 		mtx_unlock(&sc->mtx);
1175 		return (EBUSY);
1176 	}
1177 	sc->state &= ~SGX_STATE_RUNNING;
1178 	mtx_unlock(&sc->mtx);
1179 
1180 	destroy_dev(sc->sgx_cdev);
1181 
1182 	vmem_destroy(sc->vmem_epc);
1183 	sgx_put_epc_area(sc);
1184 
1185 	mtx_destroy(&sc->mtx_encls);
1186 	mtx_destroy(&sc->mtx);
1187 
1188 	return (0);
1189 }
1190 
1191 static int
1192 sgx_handler(module_t mod, int what, void *arg)
1193 {
1194 	int error;
1195 
1196 	switch (what) {
1197 	case MOD_LOAD:
1198 		error = sgx_load();
1199 		break;
1200 	case MOD_UNLOAD:
1201 		error = sgx_unload();
1202 		break;
1203 	default:
1204 		error = 0;
1205 		break;
1206 	}
1207 
1208 	return (error);
1209 }
1210 
1211 static moduledata_t sgx_kmod = {
1212 	"sgx",
1213 	sgx_handler,
1214 	NULL
1215 };
1216 
1217 DECLARE_MODULE(sgx, sgx_kmod, SI_SUB_LAST, SI_ORDER_ANY);
1218 MODULE_VERSION(sgx, 1);
1219