1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2003 Peter Wemm. 5 * Copyright (c) 1993 The Regents of the University of California. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* 34 * Functions to provide access to special i386 instructions. 35 * This in included in sys/systm.h, and that file should be 36 * used in preference to this. 37 */ 38 39 #ifdef __i386__ 40 #include <i386/cpufunc.h> 41 #else /* !__i386__ */ 42 43 #ifndef _MACHINE_CPUFUNC_H_ 44 #define _MACHINE_CPUFUNC_H_ 45 46 struct region_descriptor; 47 48 #define readb(va) (*(volatile uint8_t *) (va)) 49 #define readw(va) (*(volatile uint16_t *) (va)) 50 #define readl(va) (*(volatile uint32_t *) (va)) 51 #define readq(va) (*(volatile uint64_t *) (va)) 52 53 #define writeb(va, d) (*(volatile uint8_t *) (va) = (d)) 54 #define writew(va, d) (*(volatile uint16_t *) (va) = (d)) 55 #define writel(va, d) (*(volatile uint32_t *) (va) = (d)) 56 #define writeq(va, d) (*(volatile uint64_t *) (va) = (d)) 57 58 static __inline void 59 breakpoint(void) 60 { 61 __asm __volatile("int $3"); 62 } 63 64 #define bsfl(mask) __builtin_ctz(mask) 65 66 #define bsfq(mask) __builtin_ctzl(mask) 67 68 #define bsrl(mask) (__builtin_clz(mask) ^ 0x1f) 69 70 #define bsrq(mask) (__builtin_clzl(mask) ^ 0x3f) 71 72 static __inline void 73 clflush(u_long addr) 74 { 75 76 __asm __volatile("clflush %0" : : "m" (*(char *)addr)); 77 } 78 79 static __inline void 80 clflushopt(u_long addr) 81 { 82 83 __asm __volatile(".byte 0x66;clflush %0" : : "m" (*(char *)addr)); 84 } 85 86 static __inline void 87 clwb(u_long addr) 88 { 89 90 __asm __volatile("clwb %0" : : "m" (*(char *)addr)); 91 } 92 93 static __inline void 94 clts(void) 95 { 96 97 __asm __volatile("clts"); 98 } 99 100 static __inline void 101 disable_intr(void) 102 { 103 __asm __volatile("cli" : : : "memory"); 104 } 105 106 static __inline void 107 do_cpuid(u_int ax, u_int *p) 108 { 109 __asm __volatile("cpuid" 110 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 111 : "0" (ax)); 112 } 113 114 static __inline void 115 cpuid_count(u_int ax, u_int cx, u_int *p) 116 { 117 __asm __volatile("cpuid" 118 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 119 : "0" (ax), "c" (cx)); 120 } 121 122 static __inline void 123 enable_intr(void) 124 { 125 __asm __volatile("sti"); 126 } 127 128 static __inline void 129 halt(void) 130 { 131 __asm __volatile("hlt"); 132 } 133 134 static __inline u_char 135 inb(u_int port) 136 { 137 u_char data; 138 139 __asm __volatile("inb %w1, %0" : "=a" (data) : "Nd" (port)); 140 return (data); 141 } 142 143 static __inline u_int 144 inl(u_int port) 145 { 146 u_int data; 147 148 __asm __volatile("inl %w1, %0" : "=a" (data) : "Nd" (port)); 149 return (data); 150 } 151 152 static __inline void 153 insb(u_int port, void *addr, size_t count) 154 { 155 __asm __volatile("rep; insb" 156 : "+D" (addr), "+c" (count) 157 : "d" (port) 158 : "memory"); 159 } 160 161 static __inline void 162 insw(u_int port, void *addr, size_t count) 163 { 164 __asm __volatile("rep; insw" 165 : "+D" (addr), "+c" (count) 166 : "d" (port) 167 : "memory"); 168 } 169 170 static __inline void 171 insl(u_int port, void *addr, size_t count) 172 { 173 __asm __volatile("rep; insl" 174 : "+D" (addr), "+c" (count) 175 : "d" (port) 176 : "memory"); 177 } 178 179 static __inline void 180 invd(void) 181 { 182 __asm __volatile("invd"); 183 } 184 185 static __inline u_short 186 inw(u_int port) 187 { 188 u_short data; 189 190 __asm __volatile("inw %w1, %0" : "=a" (data) : "Nd" (port)); 191 return (data); 192 } 193 194 static __inline void 195 outb(u_int port, u_char data) 196 { 197 __asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port)); 198 } 199 200 static __inline void 201 outl(u_int port, u_int data) 202 { 203 __asm __volatile("outl %0, %w1" : : "a" (data), "Nd" (port)); 204 } 205 206 static __inline void 207 outsb(u_int port, const void *addr, size_t count) 208 { 209 __asm __volatile("rep; outsb" 210 : "+S" (addr), "+c" (count) 211 : "d" (port)); 212 } 213 214 static __inline void 215 outsw(u_int port, const void *addr, size_t count) 216 { 217 __asm __volatile("rep; outsw" 218 : "+S" (addr), "+c" (count) 219 : "d" (port)); 220 } 221 222 static __inline void 223 outsl(u_int port, const void *addr, size_t count) 224 { 225 __asm __volatile("rep; outsl" 226 : "+S" (addr), "+c" (count) 227 : "d" (port)); 228 } 229 230 static __inline void 231 outw(u_int port, u_short data) 232 { 233 __asm __volatile("outw %0, %w1" : : "a" (data), "Nd" (port)); 234 } 235 236 static __inline u_long 237 popcntq(u_long mask) 238 { 239 u_long result; 240 241 __asm __volatile("popcntq %1,%0" : "=r" (result) : "rm" (mask)); 242 return (result); 243 } 244 245 static __inline void 246 lfence(void) 247 { 248 249 __asm __volatile("lfence" : : : "memory"); 250 } 251 252 static __inline void 253 mfence(void) 254 { 255 256 __asm __volatile("mfence" : : : "memory"); 257 } 258 259 static __inline void 260 sfence(void) 261 { 262 263 __asm __volatile("sfence" : : : "memory"); 264 } 265 266 static __inline void 267 ia32_pause(void) 268 { 269 __asm __volatile("pause"); 270 } 271 272 static __inline u_long 273 read_rflags(void) 274 { 275 u_long rf; 276 277 __asm __volatile("pushfq; popq %0" : "=r" (rf)); 278 return (rf); 279 } 280 281 static __inline uint64_t 282 rdmsr(u_int msr) 283 { 284 uint32_t low, high; 285 286 __asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr)); 287 return (low | ((uint64_t)high << 32)); 288 } 289 290 static __inline uint32_t 291 rdmsr32(u_int msr) 292 { 293 uint32_t low; 294 295 __asm __volatile("rdmsr" : "=a" (low) : "c" (msr) : "rdx"); 296 return (low); 297 } 298 299 static __inline uint64_t 300 rdpmc(u_int pmc) 301 { 302 uint32_t low, high; 303 304 __asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc)); 305 return (low | ((uint64_t)high << 32)); 306 } 307 308 static __inline uint64_t 309 rdtsc(void) 310 { 311 uint32_t low, high; 312 313 __asm __volatile("rdtsc" : "=a" (low), "=d" (high)); 314 return (low | ((uint64_t)high << 32)); 315 } 316 317 static __inline uint64_t 318 rdtsc_ordered_lfence(void) 319 { 320 lfence(); 321 return (rdtsc()); 322 } 323 324 static __inline uint64_t 325 rdtsc_ordered_mfence(void) 326 { 327 mfence(); 328 return (rdtsc()); 329 } 330 331 static __inline uint64_t 332 rdtscp(void) 333 { 334 uint32_t low, high; 335 336 __asm __volatile("rdtscp" : "=a" (low), "=d" (high) : : "ecx"); 337 return (low | ((uint64_t)high << 32)); 338 } 339 340 static __inline uint64_t 341 rdtscp_aux(uint32_t *aux) 342 { 343 uint32_t low, high; 344 345 __asm __volatile("rdtscp" : "=a" (low), "=d" (high), "=c" (*aux)); 346 return (low | ((uint64_t)high << 32)); 347 } 348 349 static __inline uint32_t 350 rdtsc32(void) 351 { 352 uint32_t rv; 353 354 __asm __volatile("rdtsc" : "=a" (rv) : : "edx"); 355 return (rv); 356 } 357 358 static __inline uint32_t 359 rdtscp32(void) 360 { 361 uint32_t rv; 362 363 __asm __volatile("rdtscp" : "=a" (rv) : : "ecx", "edx"); 364 return (rv); 365 } 366 367 static __inline void 368 wbinvd(void) 369 { 370 __asm __volatile("wbinvd"); 371 } 372 373 static __inline void 374 write_rflags(u_long rf) 375 { 376 __asm __volatile("pushq %0; popfq" : : "r" (rf)); 377 } 378 379 static __inline void 380 wrmsr(u_int msr, uint64_t newval) 381 { 382 uint32_t low, high; 383 384 low = newval; 385 high = newval >> 32; 386 __asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr)); 387 } 388 389 static __inline void 390 load_cr0(u_long data) 391 { 392 393 __asm __volatile("movq %0,%%cr0" : : "r" (data)); 394 } 395 396 static __inline u_long 397 rcr0(void) 398 { 399 u_long data; 400 401 __asm __volatile("movq %%cr0,%0" : "=r" (data)); 402 return (data); 403 } 404 405 static __inline u_long 406 rcr2(void) 407 { 408 u_long data; 409 410 __asm __volatile("movq %%cr2,%0" : "=r" (data)); 411 return (data); 412 } 413 414 static __inline void 415 load_cr3(u_long data) 416 { 417 418 __asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory"); 419 } 420 421 static __inline u_long 422 rcr3(void) 423 { 424 u_long data; 425 426 __asm __volatile("movq %%cr3,%0" : "=r" (data)); 427 return (data); 428 } 429 430 static __inline void 431 load_cr4(u_long data) 432 { 433 __asm __volatile("movq %0,%%cr4" : : "r" (data)); 434 } 435 436 static __inline u_long 437 rcr4(void) 438 { 439 u_long data; 440 441 __asm __volatile("movq %%cr4,%0" : "=r" (data)); 442 return (data); 443 } 444 445 static __inline u_long 446 rxcr(u_int reg) 447 { 448 u_int low, high; 449 450 __asm __volatile("xgetbv" : "=a" (low), "=d" (high) : "c" (reg)); 451 return (low | ((uint64_t)high << 32)); 452 } 453 454 static __inline void 455 load_xcr(u_int reg, u_long val) 456 { 457 u_int low, high; 458 459 low = val; 460 high = val >> 32; 461 __asm __volatile("xsetbv" : : "c" (reg), "a" (low), "d" (high)); 462 } 463 464 /* 465 * Global TLB flush (except for thise for pages marked PG_G) 466 */ 467 static __inline void 468 invltlb(void) 469 { 470 471 load_cr3(rcr3()); 472 } 473 474 #ifndef CR4_PGE 475 #define CR4_PGE 0x00000080 /* Page global enable */ 476 #endif 477 478 /* 479 * Perform the guaranteed invalidation of all TLB entries. This 480 * includes the global entries, and entries in all PCIDs, not only the 481 * current context. The function works both on non-PCID CPUs and CPUs 482 * with the PCID turned off or on. See IA-32 SDM Vol. 3a 4.10.4.1 483 * Operations that Invalidate TLBs and Paging-Structure Caches. 484 */ 485 static __inline void 486 invltlb_glob(void) 487 { 488 uint64_t cr4; 489 490 cr4 = rcr4(); 491 load_cr4(cr4 & ~CR4_PGE); 492 /* 493 * Although preemption at this point could be detrimental to 494 * performance, it would not lead to an error. PG_G is simply 495 * ignored if CR4.PGE is clear. Moreover, in case this block 496 * is re-entered, the load_cr4() either above or below will 497 * modify CR4.PGE flushing the TLB. 498 */ 499 load_cr4(cr4 | CR4_PGE); 500 } 501 502 /* 503 * TLB flush for an individual page (even if it has PG_G). 504 * Only works on 486+ CPUs (i386 does not have PG_G). 505 */ 506 static __inline void 507 invlpg(u_long addr) 508 { 509 510 __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory"); 511 } 512 513 #define INVPCID_ADDR 0 514 #define INVPCID_CTX 1 515 #define INVPCID_CTXGLOB 2 516 #define INVPCID_ALLCTX 3 517 518 struct invpcid_descr { 519 uint64_t pcid:12 __packed; 520 uint64_t pad:52 __packed; 521 uint64_t addr; 522 } __packed; 523 524 static __inline void 525 invpcid(struct invpcid_descr *d, int type) 526 { 527 528 __asm __volatile("invpcid (%0),%1" 529 : : "r" (d), "r" ((u_long)type) : "memory"); 530 } 531 532 static __inline u_short 533 rfs(void) 534 { 535 u_short sel; 536 __asm __volatile("movw %%fs,%0" : "=rm" (sel)); 537 return (sel); 538 } 539 540 static __inline u_short 541 rgs(void) 542 { 543 u_short sel; 544 __asm __volatile("movw %%gs,%0" : "=rm" (sel)); 545 return (sel); 546 } 547 548 static __inline u_short 549 rss(void) 550 { 551 u_short sel; 552 __asm __volatile("movw %%ss,%0" : "=rm" (sel)); 553 return (sel); 554 } 555 556 static __inline void 557 load_ds(u_short sel) 558 { 559 __asm __volatile("movw %0,%%ds" : : "rm" (sel)); 560 } 561 562 static __inline void 563 load_es(u_short sel) 564 { 565 __asm __volatile("movw %0,%%es" : : "rm" (sel)); 566 } 567 568 static __inline void 569 cpu_monitor(const void *addr, u_long extensions, u_int hints) 570 { 571 572 __asm __volatile("monitor" 573 : : "a" (addr), "c" (extensions), "d" (hints)); 574 } 575 576 static __inline void 577 cpu_mwait(u_long extensions, u_int hints) 578 { 579 580 __asm __volatile("mwait" : : "a" (hints), "c" (extensions)); 581 } 582 583 static __inline uint32_t 584 rdpkru(void) 585 { 586 uint32_t res; 587 588 __asm __volatile("rdpkru" : "=a" (res) : "c" (0) : "edx"); 589 return (res); 590 } 591 592 static __inline void 593 wrpkru(uint32_t mask) 594 { 595 596 __asm __volatile("wrpkru" : : "a" (mask), "c" (0), "d" (0)); 597 } 598 599 #ifdef _KERNEL 600 /* This is defined in <machine/specialreg.h> but is too painful to get to */ 601 #ifndef MSR_FSBASE 602 #define MSR_FSBASE 0xc0000100 603 #endif 604 static __inline void 605 load_fs(u_short sel) 606 { 607 /* Preserve the fsbase value across the selector load */ 608 __asm __volatile("rdmsr; movw %0,%%fs; wrmsr" 609 : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx"); 610 } 611 612 #ifndef MSR_GSBASE 613 #define MSR_GSBASE 0xc0000101 614 #endif 615 static __inline void 616 load_gs(u_short sel) 617 { 618 /* 619 * Preserve the gsbase value across the selector load. 620 * Note that we have to disable interrupts because the gsbase 621 * being trashed happens to be the kernel gsbase at the time. 622 */ 623 __asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq" 624 : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx"); 625 } 626 #else 627 /* Usable by userland */ 628 static __inline void 629 load_fs(u_short sel) 630 { 631 __asm __volatile("movw %0,%%fs" : : "rm" (sel)); 632 } 633 634 static __inline void 635 load_gs(u_short sel) 636 { 637 __asm __volatile("movw %0,%%gs" : : "rm" (sel)); 638 } 639 #endif 640 641 static __inline uint64_t 642 rdfsbase(void) 643 { 644 uint64_t x; 645 646 __asm __volatile("rdfsbase %0" : "=r" (x)); 647 return (x); 648 } 649 650 static __inline void 651 wrfsbase(uint64_t x) 652 { 653 654 __asm __volatile("wrfsbase %0" : : "r" (x)); 655 } 656 657 static __inline uint64_t 658 rdgsbase(void) 659 { 660 uint64_t x; 661 662 __asm __volatile("rdgsbase %0" : "=r" (x)); 663 return (x); 664 } 665 666 static __inline void 667 wrgsbase(uint64_t x) 668 { 669 670 __asm __volatile("wrgsbase %0" : : "r" (x)); 671 } 672 673 static __inline void 674 bare_lgdt(struct region_descriptor *addr) 675 { 676 __asm __volatile("lgdt (%0)" : : "r" (addr)); 677 } 678 679 static __inline void 680 sgdt(struct region_descriptor *addr) 681 { 682 char *loc; 683 684 loc = (char *)addr; 685 __asm __volatile("sgdt %0" : "=m" (*loc) : : "memory"); 686 } 687 688 static __inline void 689 lidt(struct region_descriptor *addr) 690 { 691 __asm __volatile("lidt (%0)" : : "r" (addr)); 692 } 693 694 static __inline void 695 sidt(struct region_descriptor *addr) 696 { 697 char *loc; 698 699 loc = (char *)addr; 700 __asm __volatile("sidt %0" : "=m" (*loc) : : "memory"); 701 } 702 703 static __inline void 704 lldt(u_short sel) 705 { 706 __asm __volatile("lldt %0" : : "r" (sel)); 707 } 708 709 static __inline u_short 710 sldt(void) 711 { 712 u_short sel; 713 714 __asm __volatile("sldt %0" : "=r" (sel)); 715 return (sel); 716 } 717 718 static __inline void 719 ltr(u_short sel) 720 { 721 __asm __volatile("ltr %0" : : "r" (sel)); 722 } 723 724 static __inline uint32_t 725 read_tr(void) 726 { 727 u_short sel; 728 729 __asm __volatile("str %0" : "=r" (sel)); 730 return (sel); 731 } 732 733 static __inline uint64_t 734 rdr0(void) 735 { 736 uint64_t data; 737 __asm __volatile("movq %%dr0,%0" : "=r" (data)); 738 return (data); 739 } 740 741 static __inline void 742 load_dr0(uint64_t dr0) 743 { 744 __asm __volatile("movq %0,%%dr0" : : "r" (dr0)); 745 } 746 747 static __inline uint64_t 748 rdr1(void) 749 { 750 uint64_t data; 751 __asm __volatile("movq %%dr1,%0" : "=r" (data)); 752 return (data); 753 } 754 755 static __inline void 756 load_dr1(uint64_t dr1) 757 { 758 __asm __volatile("movq %0,%%dr1" : : "r" (dr1)); 759 } 760 761 static __inline uint64_t 762 rdr2(void) 763 { 764 uint64_t data; 765 __asm __volatile("movq %%dr2,%0" : "=r" (data)); 766 return (data); 767 } 768 769 static __inline void 770 load_dr2(uint64_t dr2) 771 { 772 __asm __volatile("movq %0,%%dr2" : : "r" (dr2)); 773 } 774 775 static __inline uint64_t 776 rdr3(void) 777 { 778 uint64_t data; 779 __asm __volatile("movq %%dr3,%0" : "=r" (data)); 780 return (data); 781 } 782 783 static __inline void 784 load_dr3(uint64_t dr3) 785 { 786 __asm __volatile("movq %0,%%dr3" : : "r" (dr3)); 787 } 788 789 static __inline uint64_t 790 rdr6(void) 791 { 792 uint64_t data; 793 __asm __volatile("movq %%dr6,%0" : "=r" (data)); 794 return (data); 795 } 796 797 static __inline void 798 load_dr6(uint64_t dr6) 799 { 800 __asm __volatile("movq %0,%%dr6" : : "r" (dr6)); 801 } 802 803 static __inline uint64_t 804 rdr7(void) 805 { 806 uint64_t data; 807 __asm __volatile("movq %%dr7,%0" : "=r" (data)); 808 return (data); 809 } 810 811 static __inline void 812 load_dr7(uint64_t dr7) 813 { 814 __asm __volatile("movq %0,%%dr7" : : "r" (dr7)); 815 } 816 817 static __inline register_t 818 intr_disable(void) 819 { 820 register_t rflags; 821 822 rflags = read_rflags(); 823 disable_intr(); 824 return (rflags); 825 } 826 827 static __inline void 828 intr_restore(register_t rflags) 829 { 830 write_rflags(rflags); 831 } 832 833 static __inline void 834 stac(void) 835 { 836 837 __asm __volatile("stac" : : : "cc"); 838 } 839 840 static __inline void 841 clac(void) 842 { 843 844 __asm __volatile("clac" : : : "cc"); 845 } 846 847 enum { 848 SGX_ECREATE = 0x0, 849 SGX_EADD = 0x1, 850 SGX_EINIT = 0x2, 851 SGX_EREMOVE = 0x3, 852 SGX_EDGBRD = 0x4, 853 SGX_EDGBWR = 0x5, 854 SGX_EEXTEND = 0x6, 855 SGX_ELDU = 0x8, 856 SGX_EBLOCK = 0x9, 857 SGX_EPA = 0xA, 858 SGX_EWB = 0xB, 859 SGX_ETRACK = 0xC, 860 }; 861 862 enum { 863 SGX_PT_SECS = 0x00, 864 SGX_PT_TCS = 0x01, 865 SGX_PT_REG = 0x02, 866 SGX_PT_VA = 0x03, 867 SGX_PT_TRIM = 0x04, 868 }; 869 870 int sgx_encls(uint32_t eax, uint64_t rbx, uint64_t rcx, uint64_t rdx); 871 872 static __inline int 873 sgx_ecreate(void *pginfo, void *secs) 874 { 875 876 return (sgx_encls(SGX_ECREATE, (uint64_t)pginfo, 877 (uint64_t)secs, 0)); 878 } 879 880 static __inline int 881 sgx_eadd(void *pginfo, void *epc) 882 { 883 884 return (sgx_encls(SGX_EADD, (uint64_t)pginfo, 885 (uint64_t)epc, 0)); 886 } 887 888 static __inline int 889 sgx_einit(void *sigstruct, void *secs, void *einittoken) 890 { 891 892 return (sgx_encls(SGX_EINIT, (uint64_t)sigstruct, 893 (uint64_t)secs, (uint64_t)einittoken)); 894 } 895 896 static __inline int 897 sgx_eextend(void *secs, void *epc) 898 { 899 900 return (sgx_encls(SGX_EEXTEND, (uint64_t)secs, 901 (uint64_t)epc, 0)); 902 } 903 904 static __inline int 905 sgx_epa(void *epc) 906 { 907 908 return (sgx_encls(SGX_EPA, SGX_PT_VA, (uint64_t)epc, 0)); 909 } 910 911 static __inline int 912 sgx_eldu(uint64_t rbx, uint64_t rcx, 913 uint64_t rdx) 914 { 915 916 return (sgx_encls(SGX_ELDU, rbx, rcx, rdx)); 917 } 918 919 static __inline int 920 sgx_eremove(void *epc) 921 { 922 923 return (sgx_encls(SGX_EREMOVE, 0, (uint64_t)epc, 0)); 924 } 925 926 void reset_dbregs(void); 927 928 #ifdef _KERNEL 929 int rdmsr_safe(u_int msr, uint64_t *val); 930 int wrmsr_safe(u_int msr, uint64_t newval); 931 #endif 932 933 #endif /* !_MACHINE_CPUFUNC_H_ */ 934 935 #endif /* __i386__ */ 936