1 /*- 2 * Copyright (c) 2017 Ruslan Bukin <br@bsdpad.com> 3 * All rights reserved. 4 * 5 * This software was developed by BAE Systems, the University of Cambridge 6 * Computer Laboratory, and Memorial University under DARPA/AFRL contract 7 * FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent Computing 8 * (TC) research program. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /* 33 * Design overview. 34 * 35 * The driver provides character device for mmap(2) and ioctl(2) system calls 36 * allowing user to manage isolated compartments ("enclaves") in user VA space. 37 * 38 * The driver duties is EPC pages management, enclave management, user data 39 * validation. 40 * 41 * This driver requires Intel SGX support from hardware. 42 * 43 * /dev/sgx: 44 * .mmap: 45 * sgx_mmap_single() allocates VM object with following pager 46 * operations: 47 * a) sgx_pg_ctor(): 48 * VM object constructor does nothing 49 * b) sgx_pg_dtor(): 50 * VM object destructor destroys the SGX enclave associated 51 * with the object: it frees all the EPC pages allocated for 52 * enclave and removes the enclave. 53 * c) sgx_pg_fault(): 54 * VM object fault handler does nothing 55 * 56 * .ioctl: 57 * sgx_ioctl(): 58 * a) SGX_IOC_ENCLAVE_CREATE 59 * Adds Enclave SECS page: initial step of enclave creation. 60 * b) SGX_IOC_ENCLAVE_ADD_PAGE 61 * Adds TCS, REG pages to the enclave. 62 * c) SGX_IOC_ENCLAVE_INIT 63 * Finalizes enclave creation. 64 * 65 * Enclave lifecycle: 66 * .-- ECREATE -- Add SECS page 67 * Kernel | EADD -- Add TCS, REG pages 68 * space | EEXTEND -- Measure the page (take unique hash) 69 * ENCLS | EPA -- Allocate version array page 70 * '-- EINIT -- Finalize enclave creation 71 * User .-- EENTER -- Go to entry point of enclave 72 * space | EEXIT -- Exit back to main application 73 * ENCLU '-- ERESUME -- Resume enclave execution (e.g. after exception) 74 * 75 * Enclave lifecycle from driver point of view: 76 * 1) User calls mmap() on /dev/sgx: we allocate a VM object 77 * 2) User calls ioctl SGX_IOC_ENCLAVE_CREATE: we look for the VM object 78 * associated with user process created on step 1, create SECS physical 79 * page and store it in enclave's VM object queue by special index 80 * SGX_SECS_VM_OBJECT_INDEX. 81 * 3) User calls ioctl SGX_IOC_ENCLAVE_ADD_PAGE: we look for enclave created 82 * on step 2, create TCS or REG physical page and map it to specified by 83 * user address of enclave VM object. 84 * 4) User finalizes enclave creation with ioctl SGX_IOC_ENCLAVE_INIT call. 85 * 5) User can freely enter to and exit from enclave using ENCLU instructions 86 * from userspace: the driver does nothing here. 87 * 6) User proceed munmap(2) system call (or the process with enclave dies): 88 * we destroy the enclave associated with the object. 89 * 90 * EPC page types and their indexes in VM object queue: 91 * - PT_SECS index is special and equals SGX_SECS_VM_OBJECT_INDEX (-1); 92 * - PT_TCS and PT_REG indexes are specified by user in addr field of ioctl 93 * request data and determined as follows: 94 * pidx = OFF_TO_IDX(addp->addr - vmh->base); 95 * - PT_VA index is special, created for PT_REG, PT_TCS and PT_SECS pages 96 * and determined by formula: 97 * va_page_idx = - SGX_VA_PAGES_OFFS - (page_idx / SGX_VA_PAGE_SLOTS); 98 * PT_VA page can hold versions of up to 512 pages, and slot for each 99 * page in PT_VA page is determined as follows: 100 * va_slot_idx = page_idx % SGX_VA_PAGE_SLOTS; 101 * - PT_TRIM is unused. 102 * 103 * Locking: 104 * SGX ENCLS set of instructions have limitations on concurrency: 105 * some instructions can't be executed same time on different CPUs. 106 * We use sc->mtx_encls lock around them to prevent concurrent execution. 107 * sc->mtx lock is used to manage list of created enclaves and the state of 108 * SGX driver. 109 * 110 * Eviction of EPC pages: 111 * Eviction support is not implemented in this driver, however the driver 112 * manages VA (version array) pages: it allocates a VA slot for each EPC 113 * page. This will be required for eviction support in future. 114 * VA pages and slots are currently unused. 115 * 116 * Intel® 64 and IA-32 Architectures Software Developer's Manual 117 * https://software.intel.com/en-us/articles/intel-sdm 118 */ 119 120 #include <sys/cdefs.h> 121 __FBSDID("$FreeBSD$"); 122 123 #include <sys/param.h> 124 #include <sys/systm.h> 125 #include <sys/ioccom.h> 126 #include <sys/malloc.h> 127 #include <sys/kernel.h> 128 #include <sys/lock.h> 129 #include <sys/mutex.h> 130 #include <sys/rwlock.h> 131 #include <sys/conf.h> 132 #include <sys/module.h> 133 #include <sys/proc.h> 134 #include <sys/vmem.h> 135 #include <sys/vmmeter.h> 136 137 #include <vm/vm.h> 138 #include <vm/vm_param.h> 139 #include <vm/vm_extern.h> 140 #include <vm/vm_kern.h> 141 #include <vm/vm_page.h> 142 #include <vm/vm_map.h> 143 #include <vm/vm_object.h> 144 #include <vm/vm_pager.h> 145 #include <vm/vm_phys.h> 146 #include <vm/vm_radix.h> 147 #include <vm/pmap.h> 148 149 #include <machine/md_var.h> 150 #include <machine/specialreg.h> 151 #include <machine/cpufunc.h> 152 #include <machine/sgx.h> 153 #include <machine/sgxreg.h> 154 155 #include <amd64/sgx/sgxvar.h> 156 157 #define SGX_DEBUG 158 #undef SGX_DEBUG 159 160 #ifdef SGX_DEBUG 161 #define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__) 162 #else 163 #define dprintf(fmt, ...) 164 #endif 165 166 static struct cdev_pager_ops sgx_pg_ops; 167 struct sgx_softc sgx_sc; 168 169 static int 170 sgx_get_epc_page(struct sgx_softc *sc, struct epc_page **epc) 171 { 172 vmem_addr_t addr; 173 int i; 174 175 if (vmem_alloc(sc->vmem_epc, PAGE_SIZE, M_FIRSTFIT | M_NOWAIT, 176 &addr) == 0) { 177 i = (addr - sc->epc_base) / PAGE_SIZE; 178 *epc = &sc->epc_pages[i]; 179 return (0); 180 } 181 182 return (ENOMEM); 183 } 184 185 static void 186 sgx_put_epc_page(struct sgx_softc *sc, struct epc_page *epc) 187 { 188 vmem_addr_t addr; 189 190 if (epc == NULL) 191 return; 192 193 addr = (epc->index * PAGE_SIZE) + sc->epc_base; 194 vmem_free(sc->vmem_epc, addr, PAGE_SIZE); 195 } 196 197 static int 198 sgx_va_slot_init_by_index(struct sgx_softc *sc, vm_object_t object, 199 uint64_t idx) 200 { 201 struct epc_page *epc; 202 vm_page_t page; 203 vm_page_t p; 204 int ret; 205 206 VM_OBJECT_ASSERT_WLOCKED(object); 207 208 p = vm_page_lookup(object, idx); 209 if (p == NULL) { 210 ret = sgx_get_epc_page(sc, &epc); 211 if (ret) { 212 dprintf("%s: No free EPC pages available.\n", 213 __func__); 214 return (ret); 215 } 216 217 mtx_lock(&sc->mtx_encls); 218 sgx_epa((void *)epc->base); 219 mtx_unlock(&sc->mtx_encls); 220 221 page = PHYS_TO_VM_PAGE(epc->phys); 222 223 vm_page_insert(page, object, idx); 224 page->valid = VM_PAGE_BITS_ALL; 225 } 226 227 return (0); 228 } 229 230 static int 231 sgx_va_slot_init(struct sgx_softc *sc, 232 struct sgx_enclave *enclave, 233 uint64_t addr) 234 { 235 vm_pindex_t pidx; 236 uint64_t va_page_idx; 237 uint64_t idx; 238 vm_object_t object; 239 int va_slot; 240 int ret; 241 242 object = enclave->object; 243 244 VM_OBJECT_ASSERT_WLOCKED(object); 245 246 pidx = OFF_TO_IDX(addr); 247 248 va_slot = pidx % SGX_VA_PAGE_SLOTS; 249 va_page_idx = pidx / SGX_VA_PAGE_SLOTS; 250 idx = - SGX_VA_PAGES_OFFS - va_page_idx; 251 252 ret = sgx_va_slot_init_by_index(sc, object, idx); 253 254 return (ret); 255 } 256 257 static int 258 sgx_mem_find(struct sgx_softc *sc, uint64_t addr, 259 vm_map_entry_t *entry0, vm_object_t *object0) 260 { 261 vm_map_t map; 262 vm_map_entry_t entry; 263 vm_object_t object; 264 265 map = &curproc->p_vmspace->vm_map; 266 267 vm_map_lock_read(map); 268 if (!vm_map_lookup_entry(map, addr, &entry)) { 269 vm_map_unlock_read(map); 270 dprintf("%s: Can't find enclave.\n", __func__); 271 return (EINVAL); 272 } 273 274 object = entry->object.vm_object; 275 if (object == NULL || object->handle == NULL) { 276 vm_map_unlock_read(map); 277 return (EINVAL); 278 } 279 280 if (object->type != OBJT_MGTDEVICE || 281 object->un_pager.devp.ops != &sgx_pg_ops) { 282 vm_map_unlock_read(map); 283 return (EINVAL); 284 } 285 286 vm_object_reference(object); 287 288 *object0 = object; 289 *entry0 = entry; 290 vm_map_unlock_read(map); 291 292 return (0); 293 } 294 295 static int 296 sgx_enclave_find(struct sgx_softc *sc, uint64_t addr, 297 struct sgx_enclave **encl) 298 { 299 struct sgx_vm_handle *vmh; 300 struct sgx_enclave *enclave; 301 vm_map_entry_t entry; 302 vm_object_t object; 303 int ret; 304 305 ret = sgx_mem_find(sc, addr, &entry, &object); 306 if (ret) 307 return (ret); 308 309 vmh = object->handle; 310 if (vmh == NULL) { 311 vm_object_deallocate(object); 312 return (EINVAL); 313 } 314 315 enclave = vmh->enclave; 316 if (enclave == NULL || enclave->object == NULL) { 317 vm_object_deallocate(object); 318 return (EINVAL); 319 } 320 321 *encl = enclave; 322 323 return (0); 324 } 325 326 static int 327 sgx_enclave_alloc(struct sgx_softc *sc, struct secs *secs, 328 struct sgx_enclave **enclave0) 329 { 330 struct sgx_enclave *enclave; 331 332 enclave = malloc(sizeof(struct sgx_enclave), 333 M_SGX, M_WAITOK | M_ZERO); 334 335 enclave->base = secs->base; 336 enclave->size = secs->size; 337 338 *enclave0 = enclave; 339 340 return (0); 341 } 342 343 static void 344 sgx_epc_page_remove(struct sgx_softc *sc, 345 struct epc_page *epc) 346 { 347 348 mtx_lock(&sc->mtx_encls); 349 sgx_eremove((void *)epc->base); 350 mtx_unlock(&sc->mtx_encls); 351 } 352 353 static void 354 sgx_page_remove(struct sgx_softc *sc, vm_page_t p) 355 { 356 struct epc_page *epc; 357 vm_paddr_t pa; 358 uint64_t offs; 359 360 vm_page_lock(p); 361 vm_page_remove(p); 362 vm_page_unlock(p); 363 364 dprintf("%s: p->pidx %ld\n", __func__, p->pindex); 365 366 pa = VM_PAGE_TO_PHYS(p); 367 epc = &sc->epc_pages[0]; 368 offs = (pa - epc->phys) / PAGE_SIZE; 369 epc = &sc->epc_pages[offs]; 370 371 sgx_epc_page_remove(sc, epc); 372 sgx_put_epc_page(sc, epc); 373 } 374 375 static void 376 sgx_enclave_remove(struct sgx_softc *sc, 377 struct sgx_enclave *enclave) 378 { 379 vm_object_t object; 380 vm_page_t p, p_secs, p_next; 381 382 mtx_lock(&sc->mtx); 383 TAILQ_REMOVE(&sc->enclaves, enclave, next); 384 mtx_unlock(&sc->mtx); 385 386 object = enclave->object; 387 388 VM_OBJECT_WLOCK(object); 389 390 /* 391 * First remove all the pages except SECS, 392 * then remove SECS page. 393 */ 394 p_secs = NULL; 395 TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) { 396 if (p->pindex == SGX_SECS_VM_OBJECT_INDEX) { 397 p_secs = p; 398 continue; 399 } 400 sgx_page_remove(sc, p); 401 } 402 /* Now remove SECS page */ 403 if (p_secs != NULL) 404 sgx_page_remove(sc, p_secs); 405 406 KASSERT(TAILQ_EMPTY(&object->memq) == 1, ("not empty")); 407 KASSERT(object->resident_page_count == 0, ("count")); 408 409 VM_OBJECT_WUNLOCK(object); 410 } 411 412 static int 413 sgx_measure_page(struct sgx_softc *sc, struct epc_page *secs, 414 struct epc_page *epc, uint16_t mrmask) 415 { 416 int i, j; 417 int ret; 418 419 mtx_lock(&sc->mtx_encls); 420 421 for (i = 0, j = 1; i < PAGE_SIZE; i += 0x100, j <<= 1) { 422 if (!(j & mrmask)) 423 continue; 424 425 ret = sgx_eextend((void *)secs->base, 426 (void *)(epc->base + i)); 427 if (ret == SGX_EFAULT) { 428 mtx_unlock(&sc->mtx_encls); 429 return (ret); 430 } 431 } 432 433 mtx_unlock(&sc->mtx_encls); 434 435 return (0); 436 } 437 438 static int 439 sgx_secs_validate(struct sgx_softc *sc, struct secs *secs) 440 { 441 struct secs_attr *attr; 442 int i; 443 444 if (secs->size == 0) 445 return (EINVAL); 446 447 /* BASEADDR must be naturally aligned on an SECS.SIZE boundary. */ 448 if (secs->base & (secs->size - 1)) 449 return (EINVAL); 450 451 /* SECS.SIZE must be at least 2 pages. */ 452 if (secs->size < 2 * PAGE_SIZE) 453 return (EINVAL); 454 455 if ((secs->size & (secs->size - 1)) != 0) 456 return (EINVAL); 457 458 attr = &secs->attributes; 459 460 if (attr->reserved1 != 0 || 461 attr->reserved2 != 0 || 462 attr->reserved3 != 0) 463 return (EINVAL); 464 465 for (i = 0; i < SECS_ATTR_RSV4_SIZE; i++) 466 if (attr->reserved4[i]) 467 return (EINVAL); 468 469 /* 470 * Intel® Software Guard Extensions Programming Reference 471 * 6.7.2 Relevant Fields in Various Data Structures 472 * 6.7.2.1 SECS.ATTRIBUTES.XFRM 473 * XFRM[1:0] must be set to 0x3. 474 */ 475 if ((attr->xfrm & 0x3) != 0x3) 476 return (EINVAL); 477 478 if (!attr->mode64bit) 479 return (EINVAL); 480 481 if (secs->size > sc->enclave_size_max) 482 return (EINVAL); 483 484 for (i = 0; i < SECS_RSV1_SIZE; i++) 485 if (secs->reserved1[i]) 486 return (EINVAL); 487 488 for (i = 0; i < SECS_RSV2_SIZE; i++) 489 if (secs->reserved2[i]) 490 return (EINVAL); 491 492 for (i = 0; i < SECS_RSV3_SIZE; i++) 493 if (secs->reserved3[i]) 494 return (EINVAL); 495 496 for (i = 0; i < SECS_RSV4_SIZE; i++) 497 if (secs->reserved4[i]) 498 return (EINVAL); 499 500 return (0); 501 } 502 503 static int 504 sgx_tcs_validate(struct tcs *tcs) 505 { 506 int i; 507 508 if ((tcs->flags) || 509 (tcs->ossa & (PAGE_SIZE - 1)) || 510 (tcs->ofsbasgx & (PAGE_SIZE - 1)) || 511 (tcs->ogsbasgx & (PAGE_SIZE - 1)) || 512 ((tcs->fslimit & 0xfff) != 0xfff) || 513 ((tcs->gslimit & 0xfff) != 0xfff)) 514 return (EINVAL); 515 516 for (i = 0; i < nitems(tcs->reserved3); i++) 517 if (tcs->reserved3[i]) 518 return (EINVAL); 519 520 return (0); 521 } 522 523 static void 524 sgx_tcs_dump(struct sgx_softc *sc, struct tcs *t) 525 { 526 527 dprintf("t->flags %lx\n", t->flags); 528 dprintf("t->ossa %lx\n", t->ossa); 529 dprintf("t->cssa %x\n", t->cssa); 530 dprintf("t->nssa %x\n", t->nssa); 531 dprintf("t->oentry %lx\n", t->oentry); 532 dprintf("t->ofsbasgx %lx\n", t->ofsbasgx); 533 dprintf("t->ogsbasgx %lx\n", t->ogsbasgx); 534 dprintf("t->fslimit %x\n", t->fslimit); 535 dprintf("t->gslimit %x\n", t->gslimit); 536 } 537 538 static int 539 sgx_pg_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 540 vm_ooffset_t foff, struct ucred *cred, u_short *color) 541 { 542 struct sgx_vm_handle *vmh; 543 544 vmh = handle; 545 if (vmh == NULL) { 546 dprintf("%s: vmh not found.\n", __func__); 547 return (0); 548 } 549 550 dprintf("%s: vmh->base %lx foff 0x%lx size 0x%lx\n", 551 __func__, vmh->base, foff, size); 552 553 return (0); 554 } 555 556 static void 557 sgx_pg_dtor(void *handle) 558 { 559 struct sgx_vm_handle *vmh; 560 struct sgx_softc *sc; 561 562 vmh = handle; 563 if (vmh == NULL) { 564 dprintf("%s: vmh not found.\n", __func__); 565 return; 566 } 567 568 sc = vmh->sc; 569 if (sc == NULL) { 570 dprintf("%s: sc is NULL\n", __func__); 571 return; 572 } 573 574 if (vmh->enclave == NULL) { 575 dprintf("%s: Enclave not found.\n", __func__); 576 return; 577 } 578 579 sgx_enclave_remove(sc, vmh->enclave); 580 581 free(vmh->enclave, M_SGX); 582 free(vmh, M_SGX); 583 } 584 585 static int 586 sgx_pg_fault(vm_object_t object, vm_ooffset_t offset, 587 int prot, vm_page_t *mres) 588 { 589 590 /* 591 * The purpose of this trivial handler is to handle the race 592 * when user tries to access mmaped region before or during 593 * enclave creation ioctl calls. 594 */ 595 596 dprintf("%s: offset 0x%lx\n", __func__, offset); 597 598 return (VM_PAGER_FAIL); 599 } 600 601 static struct cdev_pager_ops sgx_pg_ops = { 602 .cdev_pg_ctor = sgx_pg_ctor, 603 .cdev_pg_dtor = sgx_pg_dtor, 604 .cdev_pg_fault = sgx_pg_fault, 605 }; 606 607 608 static void 609 sgx_insert_epc_page_by_index(vm_page_t page, vm_object_t object, 610 vm_pindex_t pidx) 611 { 612 613 VM_OBJECT_ASSERT_WLOCKED(object); 614 615 vm_page_insert(page, object, pidx); 616 page->valid = VM_PAGE_BITS_ALL; 617 } 618 619 static void 620 sgx_insert_epc_page(struct sgx_enclave *enclave, 621 struct epc_page *epc, uint64_t addr) 622 { 623 vm_pindex_t pidx; 624 vm_page_t page; 625 626 VM_OBJECT_ASSERT_WLOCKED(enclave->object); 627 628 pidx = OFF_TO_IDX(addr); 629 page = PHYS_TO_VM_PAGE(epc->phys); 630 631 sgx_insert_epc_page_by_index(page, enclave->object, pidx); 632 } 633 634 static int 635 sgx_ioctl_create(struct sgx_softc *sc, struct sgx_enclave_create *param) 636 { 637 struct sgx_vm_handle *vmh; 638 vm_map_entry_t entry; 639 vm_page_t p; 640 struct page_info pginfo; 641 struct secinfo secinfo; 642 struct sgx_enclave *enclave; 643 struct epc_page *epc; 644 struct secs *secs; 645 vm_object_t object; 646 vm_page_t page; 647 int ret; 648 649 epc = NULL; 650 secs = NULL; 651 enclave = NULL; 652 object = NULL; 653 654 /* SGX Enclave Control Structure (SECS) */ 655 secs = malloc(PAGE_SIZE, M_SGX, M_WAITOK | M_ZERO); 656 ret = copyin((void *)param->src, secs, sizeof(struct secs)); 657 if (ret) { 658 dprintf("%s: Can't copy SECS.\n", __func__); 659 goto error; 660 } 661 662 ret = sgx_secs_validate(sc, secs); 663 if (ret) { 664 dprintf("%s: SECS validation failed.\n", __func__); 665 goto error; 666 } 667 668 ret = sgx_mem_find(sc, secs->base, &entry, &object); 669 if (ret) { 670 dprintf("%s: Can't find vm_map.\n", __func__); 671 goto error; 672 } 673 674 vmh = object->handle; 675 if (!vmh) { 676 dprintf("%s: Can't find vmh.\n", __func__); 677 ret = ENXIO; 678 goto error; 679 } 680 681 dprintf("%s: entry start %lx offset %lx\n", 682 __func__, entry->start, entry->offset); 683 vmh->base = (entry->start - entry->offset); 684 685 ret = sgx_enclave_alloc(sc, secs, &enclave); 686 if (ret) { 687 dprintf("%s: Can't alloc enclave.\n", __func__); 688 goto error; 689 } 690 enclave->object = object; 691 enclave->vmh = vmh; 692 693 memset(&secinfo, 0, sizeof(struct secinfo)); 694 memset(&pginfo, 0, sizeof(struct page_info)); 695 pginfo.linaddr = 0; 696 pginfo.srcpge = (uint64_t)secs; 697 pginfo.secinfo = &secinfo; 698 pginfo.secs = 0; 699 700 ret = sgx_get_epc_page(sc, &epc); 701 if (ret) { 702 dprintf("%s: Failed to get free epc page.\n", __func__); 703 goto error; 704 } 705 enclave->secs_epc_page = epc; 706 707 VM_OBJECT_WLOCK(object); 708 p = vm_page_lookup(object, SGX_SECS_VM_OBJECT_INDEX); 709 if (p) { 710 VM_OBJECT_WUNLOCK(object); 711 /* SECS page already added. */ 712 ret = ENXIO; 713 goto error; 714 } 715 716 ret = sgx_va_slot_init_by_index(sc, object, 717 - SGX_VA_PAGES_OFFS - SGX_SECS_VM_OBJECT_INDEX); 718 if (ret) { 719 VM_OBJECT_WUNLOCK(object); 720 dprintf("%s: Can't init va slot.\n", __func__); 721 goto error; 722 } 723 724 mtx_lock(&sc->mtx); 725 if ((sc->state & SGX_STATE_RUNNING) == 0) { 726 mtx_unlock(&sc->mtx); 727 /* Remove VA page that was just created for SECS page. */ 728 p = vm_page_lookup(enclave->object, 729 - SGX_VA_PAGES_OFFS - SGX_SECS_VM_OBJECT_INDEX); 730 sgx_page_remove(sc, p); 731 VM_OBJECT_WUNLOCK(object); 732 goto error; 733 } 734 mtx_lock(&sc->mtx_encls); 735 ret = sgx_ecreate(&pginfo, (void *)epc->base); 736 mtx_unlock(&sc->mtx_encls); 737 if (ret == SGX_EFAULT) { 738 dprintf("%s: gp fault\n", __func__); 739 mtx_unlock(&sc->mtx); 740 /* Remove VA page that was just created for SECS page. */ 741 p = vm_page_lookup(enclave->object, 742 - SGX_VA_PAGES_OFFS - SGX_SECS_VM_OBJECT_INDEX); 743 sgx_page_remove(sc, p); 744 VM_OBJECT_WUNLOCK(object); 745 goto error; 746 } 747 748 TAILQ_INSERT_TAIL(&sc->enclaves, enclave, next); 749 mtx_unlock(&sc->mtx); 750 751 vmh->enclave = enclave; 752 753 page = PHYS_TO_VM_PAGE(epc->phys); 754 sgx_insert_epc_page_by_index(page, enclave->object, 755 SGX_SECS_VM_OBJECT_INDEX); 756 757 VM_OBJECT_WUNLOCK(object); 758 759 /* Release the reference. */ 760 vm_object_deallocate(object); 761 762 free(secs, M_SGX); 763 764 return (0); 765 766 error: 767 free(secs, M_SGX); 768 sgx_put_epc_page(sc, epc); 769 free(enclave, M_SGX); 770 vm_object_deallocate(object); 771 772 return (ret); 773 } 774 775 static int 776 sgx_ioctl_add_page(struct sgx_softc *sc, 777 struct sgx_enclave_add_page *addp) 778 { 779 struct epc_page *secs_epc_page; 780 struct sgx_enclave *enclave; 781 struct sgx_vm_handle *vmh; 782 struct epc_page *epc; 783 struct page_info pginfo; 784 struct secinfo secinfo; 785 vm_object_t object; 786 void *tmp_vaddr; 787 uint64_t page_type; 788 struct tcs *t; 789 uint64_t addr; 790 uint64_t pidx; 791 vm_page_t p; 792 int ret; 793 794 tmp_vaddr = NULL; 795 epc = NULL; 796 object = NULL; 797 798 /* Find and get reference to VM object. */ 799 ret = sgx_enclave_find(sc, addp->addr, &enclave); 800 if (ret) { 801 dprintf("%s: Failed to find enclave.\n", __func__); 802 goto error; 803 } 804 805 object = enclave->object; 806 KASSERT(object != NULL, ("vm object is NULL\n")); 807 vmh = object->handle; 808 809 ret = sgx_get_epc_page(sc, &epc); 810 if (ret) { 811 dprintf("%s: Failed to get free epc page.\n", __func__); 812 goto error; 813 } 814 815 memset(&secinfo, 0, sizeof(struct secinfo)); 816 ret = copyin((void *)addp->secinfo, &secinfo, 817 sizeof(struct secinfo)); 818 if (ret) { 819 dprintf("%s: Failed to copy secinfo.\n", __func__); 820 goto error; 821 } 822 823 tmp_vaddr = malloc(PAGE_SIZE, M_SGX, M_WAITOK | M_ZERO); 824 ret = copyin((void *)addp->src, tmp_vaddr, PAGE_SIZE); 825 if (ret) { 826 dprintf("%s: Failed to copy page.\n", __func__); 827 goto error; 828 } 829 830 page_type = (secinfo.flags & SECINFO_FLAGS_PT_M) >> 831 SECINFO_FLAGS_PT_S; 832 if (page_type != SGX_PT_TCS && page_type != SGX_PT_REG) { 833 dprintf("%s: page can't be added.\n", __func__); 834 goto error; 835 } 836 if (page_type == SGX_PT_TCS) { 837 t = (struct tcs *)tmp_vaddr; 838 ret = sgx_tcs_validate(t); 839 if (ret) { 840 dprintf("%s: TCS page validation failed.\n", 841 __func__); 842 goto error; 843 } 844 sgx_tcs_dump(sc, t); 845 } 846 847 addr = (addp->addr - vmh->base); 848 pidx = OFF_TO_IDX(addr); 849 850 VM_OBJECT_WLOCK(object); 851 p = vm_page_lookup(object, pidx); 852 if (p) { 853 VM_OBJECT_WUNLOCK(object); 854 /* Page already added. */ 855 ret = ENXIO; 856 goto error; 857 } 858 859 ret = sgx_va_slot_init(sc, enclave, addr); 860 if (ret) { 861 VM_OBJECT_WUNLOCK(object); 862 dprintf("%s: Can't init va slot.\n", __func__); 863 goto error; 864 } 865 866 secs_epc_page = enclave->secs_epc_page; 867 memset(&pginfo, 0, sizeof(struct page_info)); 868 pginfo.linaddr = (uint64_t)addp->addr; 869 pginfo.srcpge = (uint64_t)tmp_vaddr; 870 pginfo.secinfo = &secinfo; 871 pginfo.secs = (uint64_t)secs_epc_page->base; 872 873 mtx_lock(&sc->mtx_encls); 874 ret = sgx_eadd(&pginfo, (void *)epc->base); 875 if (ret == SGX_EFAULT) { 876 dprintf("%s: gp fault on eadd\n", __func__); 877 mtx_unlock(&sc->mtx_encls); 878 VM_OBJECT_WUNLOCK(object); 879 goto error; 880 } 881 mtx_unlock(&sc->mtx_encls); 882 883 ret = sgx_measure_page(sc, enclave->secs_epc_page, epc, addp->mrmask); 884 if (ret == SGX_EFAULT) { 885 dprintf("%s: gp fault on eextend\n", __func__); 886 sgx_epc_page_remove(sc, epc); 887 VM_OBJECT_WUNLOCK(object); 888 goto error; 889 } 890 891 sgx_insert_epc_page(enclave, epc, addr); 892 893 VM_OBJECT_WUNLOCK(object); 894 895 /* Release the reference. */ 896 vm_object_deallocate(object); 897 898 free(tmp_vaddr, M_SGX); 899 900 return (0); 901 902 error: 903 free(tmp_vaddr, M_SGX); 904 sgx_put_epc_page(sc, epc); 905 vm_object_deallocate(object); 906 907 return (ret); 908 } 909 910 static int 911 sgx_ioctl_init(struct sgx_softc *sc, struct sgx_enclave_init *initp) 912 { 913 struct epc_page *secs_epc_page; 914 struct sgx_enclave *enclave; 915 struct thread *td; 916 void *tmp_vaddr; 917 void *einittoken; 918 void *sigstruct; 919 vm_object_t object; 920 int retry; 921 int ret; 922 923 td = curthread; 924 tmp_vaddr = NULL; 925 object = NULL; 926 927 dprintf("%s: addr %lx, sigstruct %lx, einittoken %lx\n", 928 __func__, initp->addr, initp->sigstruct, initp->einittoken); 929 930 /* Find and get reference to VM object. */ 931 ret = sgx_enclave_find(sc, initp->addr, &enclave); 932 if (ret) { 933 dprintf("%s: Failed to find enclave.\n", __func__); 934 goto error; 935 } 936 937 object = enclave->object; 938 939 tmp_vaddr = malloc(PAGE_SIZE, M_SGX, M_WAITOK | M_ZERO); 940 sigstruct = tmp_vaddr; 941 einittoken = (void *)((uint64_t)sigstruct + PAGE_SIZE / 2); 942 943 ret = copyin((void *)initp->sigstruct, sigstruct, 944 SGX_SIGSTRUCT_SIZE); 945 if (ret) { 946 dprintf("%s: Failed to copy SIGSTRUCT page.\n", __func__); 947 goto error; 948 } 949 950 ret = copyin((void *)initp->einittoken, einittoken, 951 SGX_EINITTOKEN_SIZE); 952 if (ret) { 953 dprintf("%s: Failed to copy EINITTOKEN page.\n", __func__); 954 goto error; 955 } 956 957 secs_epc_page = enclave->secs_epc_page; 958 retry = 16; 959 do { 960 mtx_lock(&sc->mtx_encls); 961 ret = sgx_einit(sigstruct, (void *)secs_epc_page->base, 962 einittoken); 963 mtx_unlock(&sc->mtx_encls); 964 dprintf("%s: sgx_einit returned %d\n", __func__, ret); 965 } while (ret == SGX_UNMASKED_EVENT && retry--); 966 967 if (ret) { 968 dprintf("%s: Failed init enclave: %d\n", __func__, ret); 969 td->td_retval[0] = ret; 970 ret = 0; 971 } 972 973 error: 974 free(tmp_vaddr, M_SGX); 975 976 /* Release the reference. */ 977 vm_object_deallocate(object); 978 979 return (ret); 980 } 981 982 static int 983 sgx_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, 984 struct thread *td) 985 { 986 struct sgx_enclave_add_page *addp; 987 struct sgx_enclave_create *param; 988 struct sgx_enclave_init *initp; 989 struct sgx_softc *sc; 990 int ret; 991 int len; 992 993 sc = &sgx_sc; 994 995 len = IOCPARM_LEN(cmd); 996 997 dprintf("%s: cmd %lx, addr %lx, len %d\n", 998 __func__, cmd, (uint64_t)addr, len); 999 1000 if (len > SGX_IOCTL_MAX_DATA_LEN) 1001 return (EINVAL); 1002 1003 switch (cmd) { 1004 case SGX_IOC_ENCLAVE_CREATE: 1005 param = (struct sgx_enclave_create *)addr; 1006 ret = sgx_ioctl_create(sc, param); 1007 break; 1008 case SGX_IOC_ENCLAVE_ADD_PAGE: 1009 addp = (struct sgx_enclave_add_page *)addr; 1010 ret = sgx_ioctl_add_page(sc, addp); 1011 break; 1012 case SGX_IOC_ENCLAVE_INIT: 1013 initp = (struct sgx_enclave_init *)addr; 1014 ret = sgx_ioctl_init(sc, initp); 1015 break; 1016 default: 1017 return (EINVAL); 1018 } 1019 1020 return (ret); 1021 } 1022 1023 static int 1024 sgx_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, 1025 vm_size_t mapsize, struct vm_object **objp, int nprot) 1026 { 1027 struct sgx_vm_handle *vmh; 1028 struct sgx_softc *sc; 1029 1030 sc = &sgx_sc; 1031 1032 dprintf("%s: mapsize 0x%lx, offset %lx\n", 1033 __func__, mapsize, *offset); 1034 1035 vmh = malloc(sizeof(struct sgx_vm_handle), 1036 M_SGX, M_WAITOK | M_ZERO); 1037 vmh->sc = sc; 1038 vmh->size = mapsize; 1039 vmh->mem = cdev_pager_allocate(vmh, OBJT_MGTDEVICE, &sgx_pg_ops, 1040 mapsize, nprot, *offset, NULL); 1041 if (vmh->mem == NULL) { 1042 free(vmh, M_SGX); 1043 return (ENOMEM); 1044 } 1045 1046 VM_OBJECT_WLOCK(vmh->mem); 1047 vm_object_set_flag(vmh->mem, OBJ_PG_DTOR); 1048 VM_OBJECT_WUNLOCK(vmh->mem); 1049 1050 *objp = vmh->mem; 1051 1052 return (0); 1053 } 1054 1055 static struct cdevsw sgx_cdevsw = { 1056 .d_version = D_VERSION, 1057 .d_ioctl = sgx_ioctl, 1058 .d_mmap_single = sgx_mmap_single, 1059 .d_name = "Intel SGX", 1060 }; 1061 1062 static int 1063 sgx_get_epc_area(struct sgx_softc *sc) 1064 { 1065 vm_offset_t epc_base_vaddr; 1066 u_int cp[4]; 1067 int error; 1068 int i; 1069 1070 cpuid_count(SGX_CPUID, 0x2, cp); 1071 1072 sc->epc_base = ((uint64_t)(cp[1] & 0xfffff) << 32) + 1073 (cp[0] & 0xfffff000); 1074 sc->epc_size = ((uint64_t)(cp[3] & 0xfffff) << 32) + 1075 (cp[2] & 0xfffff000); 1076 sc->npages = sc->epc_size / SGX_PAGE_SIZE; 1077 1078 if (cp[3] & 0xffff) 1079 sc->enclave_size_max = (1 << ((cp[3] >> 8) & 0xff)); 1080 else 1081 sc->enclave_size_max = SGX_ENCL_SIZE_MAX_DEF; 1082 1083 epc_base_vaddr = (vm_offset_t)pmap_mapdev_attr(sc->epc_base, 1084 sc->epc_size, VM_MEMATTR_DEFAULT); 1085 1086 sc->epc_pages = malloc(sizeof(struct epc_page) * sc->npages, 1087 M_DEVBUF, M_WAITOK | M_ZERO); 1088 1089 for (i = 0; i < sc->npages; i++) { 1090 sc->epc_pages[i].base = epc_base_vaddr + SGX_PAGE_SIZE * i; 1091 sc->epc_pages[i].phys = sc->epc_base + SGX_PAGE_SIZE * i; 1092 sc->epc_pages[i].index = i; 1093 } 1094 1095 sc->vmem_epc = vmem_create("SGX EPC", sc->epc_base, sc->epc_size, 1096 PAGE_SIZE, PAGE_SIZE, M_FIRSTFIT | M_WAITOK); 1097 if (sc->vmem_epc == NULL) { 1098 printf("%s: Can't create vmem arena.\n", __func__); 1099 free(sc->epc_pages, M_SGX); 1100 return (EINVAL); 1101 } 1102 1103 error = vm_phys_fictitious_reg_range(sc->epc_base, 1104 sc->epc_base + sc->epc_size, VM_MEMATTR_DEFAULT); 1105 if (error) { 1106 printf("%s: Can't register fictitious space.\n", __func__); 1107 free(sc->epc_pages, M_SGX); 1108 return (EINVAL); 1109 } 1110 1111 return (0); 1112 } 1113 1114 static void 1115 sgx_put_epc_area(struct sgx_softc *sc) 1116 { 1117 1118 vm_phys_fictitious_unreg_range(sc->epc_base, 1119 sc->epc_base + sc->epc_size); 1120 1121 free(sc->epc_pages, M_SGX); 1122 } 1123 1124 static int 1125 sgx_load(void) 1126 { 1127 struct sgx_softc *sc; 1128 int error; 1129 1130 sc = &sgx_sc; 1131 1132 if ((cpu_stdext_feature & CPUID_STDEXT_SGX) == 0) 1133 return (ENXIO); 1134 1135 error = sgx_get_epc_area(sc); 1136 if (error) { 1137 printf("%s: Failed to get Processor Reserved Memory area.\n", 1138 __func__); 1139 return (ENXIO); 1140 } 1141 1142 mtx_init(&sc->mtx_encls, "SGX ENCLS", NULL, MTX_DEF); 1143 mtx_init(&sc->mtx, "SGX driver", NULL, MTX_DEF); 1144 1145 TAILQ_INIT(&sc->enclaves); 1146 1147 sc->sgx_cdev = make_dev(&sgx_cdevsw, 0, UID_ROOT, GID_WHEEL, 1148 0600, "isgx"); 1149 1150 sc->state |= SGX_STATE_RUNNING; 1151 1152 printf("SGX initialized: EPC base 0x%lx size %ld (%d pages)\n", 1153 sc->epc_base, sc->epc_size, sc->npages); 1154 1155 return (0); 1156 } 1157 1158 static int 1159 sgx_unload(void) 1160 { 1161 struct sgx_softc *sc; 1162 1163 sc = &sgx_sc; 1164 1165 if ((sc->state & SGX_STATE_RUNNING) == 0) 1166 return (0); 1167 1168 mtx_lock(&sc->mtx); 1169 if (!TAILQ_EMPTY(&sc->enclaves)) { 1170 mtx_unlock(&sc->mtx); 1171 return (EBUSY); 1172 } 1173 sc->state &= ~SGX_STATE_RUNNING; 1174 mtx_unlock(&sc->mtx); 1175 1176 destroy_dev(sc->sgx_cdev); 1177 1178 vmem_destroy(sc->vmem_epc); 1179 sgx_put_epc_area(sc); 1180 1181 mtx_destroy(&sc->mtx_encls); 1182 mtx_destroy(&sc->mtx); 1183 1184 return (0); 1185 } 1186 1187 static int 1188 sgx_handler(module_t mod, int what, void *arg) 1189 { 1190 int error; 1191 1192 switch (what) { 1193 case MOD_LOAD: 1194 error = sgx_load(); 1195 break; 1196 case MOD_UNLOAD: 1197 error = sgx_unload(); 1198 break; 1199 default: 1200 error = 0; 1201 break; 1202 } 1203 1204 return (error); 1205 } 1206 1207 static moduledata_t sgx_kmod = { 1208 "sgx", 1209 sgx_handler, 1210 NULL 1211 }; 1212 1213 DECLARE_MODULE(sgx, sgx_kmod, SI_SUB_LAST, SI_ORDER_ANY); 1214 MODULE_VERSION(sgx, 1); 1215