1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2003 Peter Wemm. 5 * Copyright (c) 1993 The Regents of the University of California. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* 34 * Functions to provide access to special i386 instructions. 35 * This in included in sys/systm.h, and that file should be 36 * used in preference to this. 37 */ 38 39 #ifdef __i386__ 40 #include <i386/cpufunc.h> 41 #else /* !__i386__ */ 42 43 #ifndef _MACHINE_CPUFUNC_H_ 44 #define _MACHINE_CPUFUNC_H_ 45 46 struct region_descriptor; 47 48 #define readb(va) (*(volatile uint8_t *) (va)) 49 #define readw(va) (*(volatile uint16_t *) (va)) 50 #define readl(va) (*(volatile uint32_t *) (va)) 51 #define readq(va) (*(volatile uint64_t *) (va)) 52 53 #define writeb(va, d) (*(volatile uint8_t *) (va) = (d)) 54 #define writew(va, d) (*(volatile uint16_t *) (va) = (d)) 55 #define writel(va, d) (*(volatile uint32_t *) (va) = (d)) 56 #define writeq(va, d) (*(volatile uint64_t *) (va) = (d)) 57 58 static __inline void 59 breakpoint(void) 60 { 61 __asm __volatile("int $3"); 62 } 63 64 #define bsfl(mask) __builtin_ctz(mask) 65 66 #define bsfq(mask) __builtin_ctzl(mask) 67 68 static __inline void 69 clflush(u_long addr) 70 { 71 72 __asm __volatile("clflush %0" : : "m" (*(char *)addr)); 73 } 74 75 static __inline void 76 clflushopt(u_long addr) 77 { 78 79 __asm __volatile(".byte 0x66;clflush %0" : : "m" (*(char *)addr)); 80 } 81 82 static __inline void 83 clwb(u_long addr) 84 { 85 86 __asm __volatile("clwb %0" : : "m" (*(char *)addr)); 87 } 88 89 static __inline void 90 clts(void) 91 { 92 93 __asm __volatile("clts"); 94 } 95 96 static __inline void 97 disable_intr(void) 98 { 99 __asm __volatile("cli" : : : "memory"); 100 } 101 102 static __inline void 103 do_cpuid(u_int ax, u_int *p) 104 { 105 __asm __volatile("cpuid" 106 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 107 : "0" (ax)); 108 } 109 110 static __inline void 111 cpuid_count(u_int ax, u_int cx, u_int *p) 112 { 113 __asm __volatile("cpuid" 114 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 115 : "0" (ax), "c" (cx)); 116 } 117 118 static __inline void 119 enable_intr(void) 120 { 121 __asm __volatile("sti"); 122 } 123 124 static __inline void 125 halt(void) 126 { 127 __asm __volatile("hlt"); 128 } 129 130 static __inline u_char 131 inb(u_int port) 132 { 133 u_char data; 134 135 __asm __volatile("inb %w1, %0" : "=a" (data) : "Nd" (port)); 136 return (data); 137 } 138 139 static __inline u_int 140 inl(u_int port) 141 { 142 u_int data; 143 144 __asm __volatile("inl %w1, %0" : "=a" (data) : "Nd" (port)); 145 return (data); 146 } 147 148 static __inline void 149 insb(u_int port, void *addr, size_t count) 150 { 151 __asm __volatile("rep; insb" 152 : "+D" (addr), "+c" (count) 153 : "d" (port) 154 : "memory"); 155 } 156 157 static __inline void 158 insw(u_int port, void *addr, size_t count) 159 { 160 __asm __volatile("rep; insw" 161 : "+D" (addr), "+c" (count) 162 : "d" (port) 163 : "memory"); 164 } 165 166 static __inline void 167 insl(u_int port, void *addr, size_t count) 168 { 169 __asm __volatile("rep; insl" 170 : "+D" (addr), "+c" (count) 171 : "d" (port) 172 : "memory"); 173 } 174 175 static __inline void 176 invd(void) 177 { 178 __asm __volatile("invd"); 179 } 180 181 static __inline u_short 182 inw(u_int port) 183 { 184 u_short data; 185 186 __asm __volatile("inw %w1, %0" : "=a" (data) : "Nd" (port)); 187 return (data); 188 } 189 190 static __inline void 191 outb(u_int port, u_char data) 192 { 193 __asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port)); 194 } 195 196 static __inline void 197 outl(u_int port, u_int data) 198 { 199 __asm __volatile("outl %0, %w1" : : "a" (data), "Nd" (port)); 200 } 201 202 static __inline void 203 outsb(u_int port, const void *addr, size_t count) 204 { 205 __asm __volatile("rep; outsb" 206 : "+S" (addr), "+c" (count) 207 : "d" (port)); 208 } 209 210 static __inline void 211 outsw(u_int port, const void *addr, size_t count) 212 { 213 __asm __volatile("rep; outsw" 214 : "+S" (addr), "+c" (count) 215 : "d" (port)); 216 } 217 218 static __inline void 219 outsl(u_int port, const void *addr, size_t count) 220 { 221 __asm __volatile("rep; outsl" 222 : "+S" (addr), "+c" (count) 223 : "d" (port)); 224 } 225 226 static __inline void 227 outw(u_int port, u_short data) 228 { 229 __asm __volatile("outw %0, %w1" : : "a" (data), "Nd" (port)); 230 } 231 232 static __inline u_long 233 popcntq(u_long mask) 234 { 235 u_long result; 236 237 __asm __volatile("popcntq %1,%0" : "=r" (result) : "rm" (mask)); 238 return (result); 239 } 240 241 static __inline void 242 lfence(void) 243 { 244 245 __asm __volatile("lfence" : : : "memory"); 246 } 247 248 static __inline void 249 mfence(void) 250 { 251 252 __asm __volatile("mfence" : : : "memory"); 253 } 254 255 static __inline void 256 sfence(void) 257 { 258 259 __asm __volatile("sfence" : : : "memory"); 260 } 261 262 static __inline void 263 ia32_pause(void) 264 { 265 __asm __volatile("pause"); 266 } 267 268 static __inline u_long 269 read_rflags(void) 270 { 271 u_long rf; 272 273 __asm __volatile("pushfq; popq %0" : "=r" (rf)); 274 return (rf); 275 } 276 277 static __inline uint64_t 278 rdmsr(u_int msr) 279 { 280 uint32_t low, high; 281 282 __asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr)); 283 return (low | ((uint64_t)high << 32)); 284 } 285 286 static __inline uint32_t 287 rdmsr32(u_int msr) 288 { 289 uint32_t low; 290 291 __asm __volatile("rdmsr" : "=a" (low) : "c" (msr) : "rdx"); 292 return (low); 293 } 294 295 static __inline uint64_t 296 rdpmc(u_int pmc) 297 { 298 uint32_t low, high; 299 300 __asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc)); 301 return (low | ((uint64_t)high << 32)); 302 } 303 304 static __inline uint64_t 305 rdtsc(void) 306 { 307 uint32_t low, high; 308 309 __asm __volatile("rdtsc" : "=a" (low), "=d" (high)); 310 return (low | ((uint64_t)high << 32)); 311 } 312 313 static __inline uint64_t 314 rdtsc_ordered_lfence(void) 315 { 316 lfence(); 317 return (rdtsc()); 318 } 319 320 static __inline uint64_t 321 rdtsc_ordered_mfence(void) 322 { 323 mfence(); 324 return (rdtsc()); 325 } 326 327 static __inline uint64_t 328 rdtscp(void) 329 { 330 uint32_t low, high; 331 332 __asm __volatile("rdtscp" : "=a" (low), "=d" (high) : : "ecx"); 333 return (low | ((uint64_t)high << 32)); 334 } 335 336 static __inline uint64_t 337 rdtscp_aux(uint32_t *aux) 338 { 339 uint32_t low, high; 340 341 __asm __volatile("rdtscp" : "=a" (low), "=d" (high), "=c" (*aux)); 342 return (low | ((uint64_t)high << 32)); 343 } 344 345 static __inline uint32_t 346 rdtsc32(void) 347 { 348 uint32_t rv; 349 350 __asm __volatile("rdtsc" : "=a" (rv) : : "edx"); 351 return (rv); 352 } 353 354 static __inline uint32_t 355 rdtscp32(void) 356 { 357 uint32_t rv; 358 359 __asm __volatile("rdtscp" : "=a" (rv) : : "ecx", "edx"); 360 return (rv); 361 } 362 363 static __inline void 364 wbinvd(void) 365 { 366 __asm __volatile("wbinvd"); 367 } 368 369 static __inline void 370 write_rflags(u_long rf) 371 { 372 __asm __volatile("pushq %0; popfq" : : "r" (rf)); 373 } 374 375 static __inline void 376 wrmsr(u_int msr, uint64_t newval) 377 { 378 uint32_t low, high; 379 380 low = newval; 381 high = newval >> 32; 382 __asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr)); 383 } 384 385 static __inline void 386 load_cr0(u_long data) 387 { 388 389 __asm __volatile("movq %0,%%cr0" : : "r" (data)); 390 } 391 392 static __inline u_long 393 rcr0(void) 394 { 395 u_long data; 396 397 __asm __volatile("movq %%cr0,%0" : "=r" (data)); 398 return (data); 399 } 400 401 static __inline u_long 402 rcr2(void) 403 { 404 u_long data; 405 406 __asm __volatile("movq %%cr2,%0" : "=r" (data)); 407 return (data); 408 } 409 410 static __inline void 411 load_cr3(u_long data) 412 { 413 414 __asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory"); 415 } 416 417 static __inline u_long 418 rcr3(void) 419 { 420 u_long data; 421 422 __asm __volatile("movq %%cr3,%0" : "=r" (data)); 423 return (data); 424 } 425 426 static __inline void 427 load_cr4(u_long data) 428 { 429 __asm __volatile("movq %0,%%cr4" : : "r" (data)); 430 } 431 432 static __inline u_long 433 rcr4(void) 434 { 435 u_long data; 436 437 __asm __volatile("movq %%cr4,%0" : "=r" (data)); 438 return (data); 439 } 440 441 static __inline u_long 442 rxcr(u_int reg) 443 { 444 u_int low, high; 445 446 __asm __volatile("xgetbv" : "=a" (low), "=d" (high) : "c" (reg)); 447 return (low | ((uint64_t)high << 32)); 448 } 449 450 static __inline void 451 load_xcr(u_int reg, u_long val) 452 { 453 u_int low, high; 454 455 low = val; 456 high = val >> 32; 457 __asm __volatile("xsetbv" : : "c" (reg), "a" (low), "d" (high)); 458 } 459 460 /* 461 * Global TLB flush (except for thise for pages marked PG_G) 462 */ 463 static __inline void 464 invltlb(void) 465 { 466 467 load_cr3(rcr3()); 468 } 469 470 #ifndef CR4_PGE 471 #define CR4_PGE 0x00000080 /* Page global enable */ 472 #endif 473 474 /* 475 * Perform the guaranteed invalidation of all TLB entries. This 476 * includes the global entries, and entries in all PCIDs, not only the 477 * current context. The function works both on non-PCID CPUs and CPUs 478 * with the PCID turned off or on. See IA-32 SDM Vol. 3a 4.10.4.1 479 * Operations that Invalidate TLBs and Paging-Structure Caches. 480 */ 481 static __inline void 482 invltlb_glob(void) 483 { 484 uint64_t cr4; 485 486 cr4 = rcr4(); 487 load_cr4(cr4 & ~CR4_PGE); 488 /* 489 * Although preemption at this point could be detrimental to 490 * performance, it would not lead to an error. PG_G is simply 491 * ignored if CR4.PGE is clear. Moreover, in case this block 492 * is re-entered, the load_cr4() either above or below will 493 * modify CR4.PGE flushing the TLB. 494 */ 495 load_cr4(cr4 | CR4_PGE); 496 } 497 498 /* 499 * TLB flush for an individual page (even if it has PG_G). 500 * Only works on 486+ CPUs (i386 does not have PG_G). 501 */ 502 static __inline void 503 invlpg(u_long addr) 504 { 505 506 __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory"); 507 } 508 509 #define INVPCID_ADDR 0 510 #define INVPCID_CTX 1 511 #define INVPCID_CTXGLOB 2 512 #define INVPCID_ALLCTX 3 513 514 struct invpcid_descr { 515 uint64_t pcid:12 __packed; 516 uint64_t pad:52 __packed; 517 uint64_t addr; 518 } __packed; 519 520 static __inline void 521 invpcid(struct invpcid_descr *d, int type) 522 { 523 524 __asm __volatile("invpcid (%0),%1" 525 : : "r" (d), "r" ((u_long)type) : "memory"); 526 } 527 528 #define INVLPGB_VA 0x0001 529 #define INVLPGB_PCID 0x0002 530 #define INVLPGB_ASID 0x0004 531 #define INVLPGB_GLOB 0x0008 532 #define INVLPGB_FIN 0x0010 533 #define INVLPGB_NEST 0x0020 534 535 #define INVLPGB_DESCR(asid, pcid) (((pcid) << 16) | (asid)) 536 537 #define INVLPGB_2M_CNT (1u << 31) 538 539 static __inline void 540 invlpgb(uint64_t rax, uint32_t edx, uint32_t ecx) 541 { 542 __asm __volatile("invlpgb" : : "a" (rax), "d" (edx), "c" (ecx)); 543 } 544 545 static __inline void 546 tlbsync(void) 547 { 548 __asm __volatile("tlbsync"); 549 } 550 551 static __inline u_short 552 rfs(void) 553 { 554 u_short sel; 555 __asm __volatile("movw %%fs,%0" : "=rm" (sel)); 556 return (sel); 557 } 558 559 static __inline u_short 560 rgs(void) 561 { 562 u_short sel; 563 __asm __volatile("movw %%gs,%0" : "=rm" (sel)); 564 return (sel); 565 } 566 567 static __inline u_short 568 rss(void) 569 { 570 u_short sel; 571 __asm __volatile("movw %%ss,%0" : "=rm" (sel)); 572 return (sel); 573 } 574 575 static __inline void 576 load_ds(u_short sel) 577 { 578 __asm __volatile("movw %0,%%ds" : : "rm" (sel)); 579 } 580 581 static __inline void 582 load_es(u_short sel) 583 { 584 __asm __volatile("movw %0,%%es" : : "rm" (sel)); 585 } 586 587 static __inline void 588 cpu_monitor(const void *addr, u_long extensions, u_int hints) 589 { 590 591 __asm __volatile("monitor" 592 : : "a" (addr), "c" (extensions), "d" (hints)); 593 } 594 595 static __inline void 596 cpu_mwait(u_long extensions, u_int hints) 597 { 598 599 __asm __volatile("mwait" : : "a" (hints), "c" (extensions)); 600 } 601 602 static __inline uint32_t 603 rdpkru(void) 604 { 605 uint32_t res; 606 607 __asm __volatile("rdpkru" : "=a" (res) : "c" (0) : "edx"); 608 return (res); 609 } 610 611 static __inline void 612 wrpkru(uint32_t mask) 613 { 614 615 __asm __volatile("wrpkru" : : "a" (mask), "c" (0), "d" (0)); 616 } 617 618 #ifdef _KERNEL 619 /* This is defined in <machine/specialreg.h> but is too painful to get to */ 620 #ifndef MSR_FSBASE 621 #define MSR_FSBASE 0xc0000100 622 #endif 623 static __inline void 624 load_fs(u_short sel) 625 { 626 /* Preserve the fsbase value across the selector load */ 627 __asm __volatile("rdmsr; movw %0,%%fs; wrmsr" 628 : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx"); 629 } 630 631 #ifndef MSR_GSBASE 632 #define MSR_GSBASE 0xc0000101 633 #endif 634 static __inline void 635 load_gs(u_short sel) 636 { 637 /* 638 * Preserve the gsbase value across the selector load. 639 * Note that we have to disable interrupts because the gsbase 640 * being trashed happens to be the kernel gsbase at the time. 641 */ 642 __asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq" 643 : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx"); 644 } 645 #else 646 /* Usable by userland */ 647 static __inline void 648 load_fs(u_short sel) 649 { 650 __asm __volatile("movw %0,%%fs" : : "rm" (sel)); 651 } 652 653 static __inline void 654 load_gs(u_short sel) 655 { 656 __asm __volatile("movw %0,%%gs" : : "rm" (sel)); 657 } 658 #endif 659 660 static __inline uint64_t 661 rdfsbase(void) 662 { 663 uint64_t x; 664 665 __asm __volatile("rdfsbase %0" : "=r" (x)); 666 return (x); 667 } 668 669 static __inline void 670 wrfsbase(uint64_t x) 671 { 672 673 __asm __volatile("wrfsbase %0" : : "r" (x)); 674 } 675 676 static __inline uint64_t 677 rdgsbase(void) 678 { 679 uint64_t x; 680 681 __asm __volatile("rdgsbase %0" : "=r" (x)); 682 return (x); 683 } 684 685 static __inline void 686 wrgsbase(uint64_t x) 687 { 688 689 __asm __volatile("wrgsbase %0" : : "r" (x)); 690 } 691 692 static __inline void 693 bare_lgdt(struct region_descriptor *addr) 694 { 695 __asm __volatile("lgdt (%0)" : : "r" (addr)); 696 } 697 698 static __inline void 699 sgdt(struct region_descriptor *addr) 700 { 701 char *loc; 702 703 loc = (char *)addr; 704 __asm __volatile("sgdt %0" : "=m" (*loc) : : "memory"); 705 } 706 707 static __inline void 708 lidt(struct region_descriptor *addr) 709 { 710 __asm __volatile("lidt (%0)" : : "r" (addr)); 711 } 712 713 static __inline void 714 sidt(struct region_descriptor *addr) 715 { 716 char *loc; 717 718 loc = (char *)addr; 719 __asm __volatile("sidt %0" : "=m" (*loc) : : "memory"); 720 } 721 722 static __inline void 723 lldt(u_short sel) 724 { 725 __asm __volatile("lldt %0" : : "r" (sel)); 726 } 727 728 static __inline u_short 729 sldt(void) 730 { 731 u_short sel; 732 733 __asm __volatile("sldt %0" : "=r" (sel)); 734 return (sel); 735 } 736 737 static __inline void 738 ltr(u_short sel) 739 { 740 __asm __volatile("ltr %0" : : "r" (sel)); 741 } 742 743 static __inline uint32_t 744 read_tr(void) 745 { 746 u_short sel; 747 748 __asm __volatile("str %0" : "=r" (sel)); 749 return (sel); 750 } 751 752 static __inline uint64_t 753 rdr0(void) 754 { 755 uint64_t data; 756 __asm __volatile("movq %%dr0,%0" : "=r" (data)); 757 return (data); 758 } 759 760 static __inline void 761 load_dr0(uint64_t dr0) 762 { 763 __asm __volatile("movq %0,%%dr0" : : "r" (dr0)); 764 } 765 766 static __inline uint64_t 767 rdr1(void) 768 { 769 uint64_t data; 770 __asm __volatile("movq %%dr1,%0" : "=r" (data)); 771 return (data); 772 } 773 774 static __inline void 775 load_dr1(uint64_t dr1) 776 { 777 __asm __volatile("movq %0,%%dr1" : : "r" (dr1)); 778 } 779 780 static __inline uint64_t 781 rdr2(void) 782 { 783 uint64_t data; 784 __asm __volatile("movq %%dr2,%0" : "=r" (data)); 785 return (data); 786 } 787 788 static __inline void 789 load_dr2(uint64_t dr2) 790 { 791 __asm __volatile("movq %0,%%dr2" : : "r" (dr2)); 792 } 793 794 static __inline uint64_t 795 rdr3(void) 796 { 797 uint64_t data; 798 __asm __volatile("movq %%dr3,%0" : "=r" (data)); 799 return (data); 800 } 801 802 static __inline void 803 load_dr3(uint64_t dr3) 804 { 805 __asm __volatile("movq %0,%%dr3" : : "r" (dr3)); 806 } 807 808 static __inline uint64_t 809 rdr6(void) 810 { 811 uint64_t data; 812 __asm __volatile("movq %%dr6,%0" : "=r" (data)); 813 return (data); 814 } 815 816 static __inline void 817 load_dr6(uint64_t dr6) 818 { 819 __asm __volatile("movq %0,%%dr6" : : "r" (dr6)); 820 } 821 822 static __inline uint64_t 823 rdr7(void) 824 { 825 uint64_t data; 826 __asm __volatile("movq %%dr7,%0" : "=r" (data)); 827 return (data); 828 } 829 830 static __inline void 831 load_dr7(uint64_t dr7) 832 { 833 __asm __volatile("movq %0,%%dr7" : : "r" (dr7)); 834 } 835 836 static __inline register_t 837 intr_disable(void) 838 { 839 register_t rflags; 840 841 rflags = read_rflags(); 842 disable_intr(); 843 return (rflags); 844 } 845 846 static __inline void 847 intr_restore(register_t rflags) 848 { 849 write_rflags(rflags); 850 } 851 852 static __inline void 853 stac(void) 854 { 855 856 __asm __volatile("stac" : : : "cc"); 857 } 858 859 static __inline void 860 clac(void) 861 { 862 863 __asm __volatile("clac" : : : "cc"); 864 } 865 866 enum { 867 SGX_ECREATE = 0x0, 868 SGX_EADD = 0x1, 869 SGX_EINIT = 0x2, 870 SGX_EREMOVE = 0x3, 871 SGX_EDGBRD = 0x4, 872 SGX_EDGBWR = 0x5, 873 SGX_EEXTEND = 0x6, 874 SGX_ELDU = 0x8, 875 SGX_EBLOCK = 0x9, 876 SGX_EPA = 0xA, 877 SGX_EWB = 0xB, 878 SGX_ETRACK = 0xC, 879 }; 880 881 enum { 882 SGX_PT_SECS = 0x00, 883 SGX_PT_TCS = 0x01, 884 SGX_PT_REG = 0x02, 885 SGX_PT_VA = 0x03, 886 SGX_PT_TRIM = 0x04, 887 }; 888 889 int sgx_encls(uint32_t eax, uint64_t rbx, uint64_t rcx, uint64_t rdx); 890 891 static __inline int 892 sgx_ecreate(void *pginfo, void *secs) 893 { 894 895 return (sgx_encls(SGX_ECREATE, (uint64_t)pginfo, 896 (uint64_t)secs, 0)); 897 } 898 899 static __inline int 900 sgx_eadd(void *pginfo, void *epc) 901 { 902 903 return (sgx_encls(SGX_EADD, (uint64_t)pginfo, 904 (uint64_t)epc, 0)); 905 } 906 907 static __inline int 908 sgx_einit(void *sigstruct, void *secs, void *einittoken) 909 { 910 911 return (sgx_encls(SGX_EINIT, (uint64_t)sigstruct, 912 (uint64_t)secs, (uint64_t)einittoken)); 913 } 914 915 static __inline int 916 sgx_eextend(void *secs, void *epc) 917 { 918 919 return (sgx_encls(SGX_EEXTEND, (uint64_t)secs, 920 (uint64_t)epc, 0)); 921 } 922 923 static __inline int 924 sgx_epa(void *epc) 925 { 926 927 return (sgx_encls(SGX_EPA, SGX_PT_VA, (uint64_t)epc, 0)); 928 } 929 930 static __inline int 931 sgx_eldu(uint64_t rbx, uint64_t rcx, 932 uint64_t rdx) 933 { 934 935 return (sgx_encls(SGX_ELDU, rbx, rcx, rdx)); 936 } 937 938 static __inline int 939 sgx_eremove(void *epc) 940 { 941 942 return (sgx_encls(SGX_EREMOVE, 0, (uint64_t)epc, 0)); 943 } 944 945 void reset_dbregs(void); 946 947 #ifdef _KERNEL 948 int rdmsr_safe(u_int msr, uint64_t *val); 949 int wrmsr_safe(u_int msr, uint64_t newval); 950 #endif 951 952 #endif /* !_MACHINE_CPUFUNC_H_ */ 953 954 #endif /* __i386__ */ 955