1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2003 Peter Wemm. 5 * Copyright (c) 1993 The Regents of the University of California. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $FreeBSD$ 33 */ 34 35 /* 36 * Functions to provide access to special i386 instructions. 37 * This in included in sys/systm.h, and that file should be 38 * used in preference to this. 39 */ 40 41 #ifdef __i386__ 42 #include <i386/cpufunc.h> 43 #else /* !__i386__ */ 44 45 #ifndef _MACHINE_CPUFUNC_H_ 46 #define _MACHINE_CPUFUNC_H_ 47 48 struct region_descriptor; 49 50 #define readb(va) (*(volatile uint8_t *) (va)) 51 #define readw(va) (*(volatile uint16_t *) (va)) 52 #define readl(va) (*(volatile uint32_t *) (va)) 53 #define readq(va) (*(volatile uint64_t *) (va)) 54 55 #define writeb(va, d) (*(volatile uint8_t *) (va) = (d)) 56 #define writew(va, d) (*(volatile uint16_t *) (va) = (d)) 57 #define writel(va, d) (*(volatile uint32_t *) (va) = (d)) 58 #define writeq(va, d) (*(volatile uint64_t *) (va) = (d)) 59 60 static __inline void 61 breakpoint(void) 62 { 63 __asm __volatile("int $3"); 64 } 65 66 #define bsfl(mask) __builtin_ctz(mask) 67 68 #define bsfq(mask) __builtin_ctzl(mask) 69 70 #define bsrl(mask) (__builtin_clz(mask) ^ 0x1f) 71 72 #define bsrq(mask) (__builtin_clzl(mask) ^ 0x3f) 73 74 static __inline void 75 clflush(u_long addr) 76 { 77 78 __asm __volatile("clflush %0" : : "m" (*(char *)addr)); 79 } 80 81 static __inline void 82 clflushopt(u_long addr) 83 { 84 85 __asm __volatile(".byte 0x66;clflush %0" : : "m" (*(char *)addr)); 86 } 87 88 static __inline void 89 clwb(u_long addr) 90 { 91 92 __asm __volatile("clwb %0" : : "m" (*(char *)addr)); 93 } 94 95 static __inline void 96 clts(void) 97 { 98 99 __asm __volatile("clts"); 100 } 101 102 static __inline void 103 disable_intr(void) 104 { 105 __asm __volatile("cli" : : : "memory"); 106 } 107 108 static __inline void 109 do_cpuid(u_int ax, u_int *p) 110 { 111 __asm __volatile("cpuid" 112 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 113 : "0" (ax)); 114 } 115 116 static __inline void 117 cpuid_count(u_int ax, u_int cx, u_int *p) 118 { 119 __asm __volatile("cpuid" 120 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 121 : "0" (ax), "c" (cx)); 122 } 123 124 static __inline void 125 enable_intr(void) 126 { 127 __asm __volatile("sti"); 128 } 129 130 #ifdef _KERNEL 131 132 #define HAVE_INLINE_FFS 133 #define ffs(x) __builtin_ffs(x) 134 135 #define HAVE_INLINE_FFSL 136 #define ffsl(x) __builtin_ffsl(x) 137 138 #define HAVE_INLINE_FFSLL 139 #define ffsll(x) __builtin_ffsll(x) 140 141 #define HAVE_INLINE_FLS 142 143 static __inline __pure2 int 144 fls(int mask) 145 { 146 return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1); 147 } 148 149 #define HAVE_INLINE_FLSL 150 151 static __inline __pure2 int 152 flsl(long mask) 153 { 154 return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1); 155 } 156 157 #define HAVE_INLINE_FLSLL 158 159 static __inline __pure2 int 160 flsll(long long mask) 161 { 162 return (flsl((long)mask)); 163 } 164 165 #endif /* _KERNEL */ 166 167 static __inline void 168 halt(void) 169 { 170 __asm __volatile("hlt"); 171 } 172 173 static __inline u_char 174 inb(u_int port) 175 { 176 u_char data; 177 178 __asm __volatile("inb %w1, %0" : "=a" (data) : "Nd" (port)); 179 return (data); 180 } 181 182 static __inline u_int 183 inl(u_int port) 184 { 185 u_int data; 186 187 __asm __volatile("inl %w1, %0" : "=a" (data) : "Nd" (port)); 188 return (data); 189 } 190 191 static __inline void 192 insb(u_int port, void *addr, size_t count) 193 { 194 __asm __volatile("rep; insb" 195 : "+D" (addr), "+c" (count) 196 : "d" (port) 197 : "memory"); 198 } 199 200 static __inline void 201 insw(u_int port, void *addr, size_t count) 202 { 203 __asm __volatile("rep; insw" 204 : "+D" (addr), "+c" (count) 205 : "d" (port) 206 : "memory"); 207 } 208 209 static __inline void 210 insl(u_int port, void *addr, size_t count) 211 { 212 __asm __volatile("rep; insl" 213 : "+D" (addr), "+c" (count) 214 : "d" (port) 215 : "memory"); 216 } 217 218 static __inline void 219 invd(void) 220 { 221 __asm __volatile("invd"); 222 } 223 224 static __inline u_short 225 inw(u_int port) 226 { 227 u_short data; 228 229 __asm __volatile("inw %w1, %0" : "=a" (data) : "Nd" (port)); 230 return (data); 231 } 232 233 static __inline void 234 outb(u_int port, u_char data) 235 { 236 __asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port)); 237 } 238 239 static __inline void 240 outl(u_int port, u_int data) 241 { 242 __asm __volatile("outl %0, %w1" : : "a" (data), "Nd" (port)); 243 } 244 245 static __inline void 246 outsb(u_int port, const void *addr, size_t count) 247 { 248 __asm __volatile("rep; outsb" 249 : "+S" (addr), "+c" (count) 250 : "d" (port)); 251 } 252 253 static __inline void 254 outsw(u_int port, const void *addr, size_t count) 255 { 256 __asm __volatile("rep; outsw" 257 : "+S" (addr), "+c" (count) 258 : "d" (port)); 259 } 260 261 static __inline void 262 outsl(u_int port, const void *addr, size_t count) 263 { 264 __asm __volatile("rep; outsl" 265 : "+S" (addr), "+c" (count) 266 : "d" (port)); 267 } 268 269 static __inline void 270 outw(u_int port, u_short data) 271 { 272 __asm __volatile("outw %0, %w1" : : "a" (data), "Nd" (port)); 273 } 274 275 static __inline u_long 276 popcntq(u_long mask) 277 { 278 u_long result; 279 280 __asm __volatile("popcntq %1,%0" : "=r" (result) : "rm" (mask)); 281 return (result); 282 } 283 284 static __inline void 285 lfence(void) 286 { 287 288 __asm __volatile("lfence" : : : "memory"); 289 } 290 291 static __inline void 292 mfence(void) 293 { 294 295 __asm __volatile("mfence" : : : "memory"); 296 } 297 298 static __inline void 299 sfence(void) 300 { 301 302 __asm __volatile("sfence" : : : "memory"); 303 } 304 305 static __inline void 306 ia32_pause(void) 307 { 308 __asm __volatile("pause"); 309 } 310 311 static __inline u_long 312 read_rflags(void) 313 { 314 u_long rf; 315 316 __asm __volatile("pushfq; popq %0" : "=r" (rf)); 317 return (rf); 318 } 319 320 static __inline uint64_t 321 rdmsr(u_int msr) 322 { 323 uint32_t low, high; 324 325 __asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr)); 326 return (low | ((uint64_t)high << 32)); 327 } 328 329 static __inline uint32_t 330 rdmsr32(u_int msr) 331 { 332 uint32_t low; 333 334 __asm __volatile("rdmsr" : "=a" (low) : "c" (msr) : "rdx"); 335 return (low); 336 } 337 338 static __inline uint64_t 339 rdpmc(u_int pmc) 340 { 341 uint32_t low, high; 342 343 __asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc)); 344 return (low | ((uint64_t)high << 32)); 345 } 346 347 static __inline uint64_t 348 rdtsc(void) 349 { 350 uint32_t low, high; 351 352 __asm __volatile("rdtsc" : "=a" (low), "=d" (high)); 353 return (low | ((uint64_t)high << 32)); 354 } 355 356 static __inline uint64_t 357 rdtsc_ordered_lfence(void) 358 { 359 lfence(); 360 return (rdtsc()); 361 } 362 363 static __inline uint64_t 364 rdtsc_ordered_mfence(void) 365 { 366 mfence(); 367 return (rdtsc()); 368 } 369 370 static __inline uint64_t 371 rdtscp(void) 372 { 373 uint32_t low, high; 374 375 __asm __volatile("rdtscp" : "=a" (low), "=d" (high) : : "ecx"); 376 return (low | ((uint64_t)high << 32)); 377 } 378 379 static __inline uint64_t 380 rdtscp_aux(uint32_t *aux) 381 { 382 uint32_t low, high; 383 384 __asm __volatile("rdtscp" : "=a" (low), "=d" (high), "=c" (*aux)); 385 return (low | ((uint64_t)high << 32)); 386 } 387 388 static __inline uint32_t 389 rdtsc32(void) 390 { 391 uint32_t rv; 392 393 __asm __volatile("rdtsc" : "=a" (rv) : : "edx"); 394 return (rv); 395 } 396 397 static __inline uint32_t 398 rdtscp32(void) 399 { 400 uint32_t rv; 401 402 __asm __volatile("rdtscp" : "=a" (rv) : : "ecx", "edx"); 403 return (rv); 404 } 405 406 static __inline void 407 wbinvd(void) 408 { 409 __asm __volatile("wbinvd"); 410 } 411 412 static __inline void 413 write_rflags(u_long rf) 414 { 415 __asm __volatile("pushq %0; popfq" : : "r" (rf)); 416 } 417 418 static __inline void 419 wrmsr(u_int msr, uint64_t newval) 420 { 421 uint32_t low, high; 422 423 low = newval; 424 high = newval >> 32; 425 __asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr)); 426 } 427 428 static __inline void 429 load_cr0(u_long data) 430 { 431 432 __asm __volatile("movq %0,%%cr0" : : "r" (data)); 433 } 434 435 static __inline u_long 436 rcr0(void) 437 { 438 u_long data; 439 440 __asm __volatile("movq %%cr0,%0" : "=r" (data)); 441 return (data); 442 } 443 444 static __inline u_long 445 rcr2(void) 446 { 447 u_long data; 448 449 __asm __volatile("movq %%cr2,%0" : "=r" (data)); 450 return (data); 451 } 452 453 static __inline void 454 load_cr3(u_long data) 455 { 456 457 __asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory"); 458 } 459 460 static __inline u_long 461 rcr3(void) 462 { 463 u_long data; 464 465 __asm __volatile("movq %%cr3,%0" : "=r" (data)); 466 return (data); 467 } 468 469 static __inline void 470 load_cr4(u_long data) 471 { 472 __asm __volatile("movq %0,%%cr4" : : "r" (data)); 473 } 474 475 static __inline u_long 476 rcr4(void) 477 { 478 u_long data; 479 480 __asm __volatile("movq %%cr4,%0" : "=r" (data)); 481 return (data); 482 } 483 484 static __inline u_long 485 rxcr(u_int reg) 486 { 487 u_int low, high; 488 489 __asm __volatile("xgetbv" : "=a" (low), "=d" (high) : "c" (reg)); 490 return (low | ((uint64_t)high << 32)); 491 } 492 493 static __inline void 494 load_xcr(u_int reg, u_long val) 495 { 496 u_int low, high; 497 498 low = val; 499 high = val >> 32; 500 __asm __volatile("xsetbv" : : "c" (reg), "a" (low), "d" (high)); 501 } 502 503 /* 504 * Global TLB flush (except for thise for pages marked PG_G) 505 */ 506 static __inline void 507 invltlb(void) 508 { 509 510 load_cr3(rcr3()); 511 } 512 513 #ifndef CR4_PGE 514 #define CR4_PGE 0x00000080 /* Page global enable */ 515 #endif 516 517 /* 518 * Perform the guaranteed invalidation of all TLB entries. This 519 * includes the global entries, and entries in all PCIDs, not only the 520 * current context. The function works both on non-PCID CPUs and CPUs 521 * with the PCID turned off or on. See IA-32 SDM Vol. 3a 4.10.4.1 522 * Operations that Invalidate TLBs and Paging-Structure Caches. 523 */ 524 static __inline void 525 invltlb_glob(void) 526 { 527 uint64_t cr4; 528 529 cr4 = rcr4(); 530 load_cr4(cr4 & ~CR4_PGE); 531 /* 532 * Although preemption at this point could be detrimental to 533 * performance, it would not lead to an error. PG_G is simply 534 * ignored if CR4.PGE is clear. Moreover, in case this block 535 * is re-entered, the load_cr4() either above or below will 536 * modify CR4.PGE flushing the TLB. 537 */ 538 load_cr4(cr4 | CR4_PGE); 539 } 540 541 /* 542 * TLB flush for an individual page (even if it has PG_G). 543 * Only works on 486+ CPUs (i386 does not have PG_G). 544 */ 545 static __inline void 546 invlpg(u_long addr) 547 { 548 549 __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory"); 550 } 551 552 #define INVPCID_ADDR 0 553 #define INVPCID_CTX 1 554 #define INVPCID_CTXGLOB 2 555 #define INVPCID_ALLCTX 3 556 557 struct invpcid_descr { 558 uint64_t pcid:12 __packed; 559 uint64_t pad:52 __packed; 560 uint64_t addr; 561 } __packed; 562 563 static __inline void 564 invpcid(struct invpcid_descr *d, int type) 565 { 566 567 __asm __volatile("invpcid (%0),%1" 568 : : "r" (d), "r" ((u_long)type) : "memory"); 569 } 570 571 static __inline u_short 572 rfs(void) 573 { 574 u_short sel; 575 __asm __volatile("movw %%fs,%0" : "=rm" (sel)); 576 return (sel); 577 } 578 579 static __inline u_short 580 rgs(void) 581 { 582 u_short sel; 583 __asm __volatile("movw %%gs,%0" : "=rm" (sel)); 584 return (sel); 585 } 586 587 static __inline u_short 588 rss(void) 589 { 590 u_short sel; 591 __asm __volatile("movw %%ss,%0" : "=rm" (sel)); 592 return (sel); 593 } 594 595 static __inline void 596 load_ds(u_short sel) 597 { 598 __asm __volatile("movw %0,%%ds" : : "rm" (sel)); 599 } 600 601 static __inline void 602 load_es(u_short sel) 603 { 604 __asm __volatile("movw %0,%%es" : : "rm" (sel)); 605 } 606 607 static __inline void 608 cpu_monitor(const void *addr, u_long extensions, u_int hints) 609 { 610 611 __asm __volatile("monitor" 612 : : "a" (addr), "c" (extensions), "d" (hints)); 613 } 614 615 static __inline void 616 cpu_mwait(u_long extensions, u_int hints) 617 { 618 619 __asm __volatile("mwait" : : "a" (hints), "c" (extensions)); 620 } 621 622 static __inline uint32_t 623 rdpkru(void) 624 { 625 uint32_t res; 626 627 __asm __volatile("rdpkru" : "=a" (res) : "c" (0) : "edx"); 628 return (res); 629 } 630 631 static __inline void 632 wrpkru(uint32_t mask) 633 { 634 635 __asm __volatile("wrpkru" : : "a" (mask), "c" (0), "d" (0)); 636 } 637 638 #ifdef _KERNEL 639 /* This is defined in <machine/specialreg.h> but is too painful to get to */ 640 #ifndef MSR_FSBASE 641 #define MSR_FSBASE 0xc0000100 642 #endif 643 static __inline void 644 load_fs(u_short sel) 645 { 646 /* Preserve the fsbase value across the selector load */ 647 __asm __volatile("rdmsr; movw %0,%%fs; wrmsr" 648 : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx"); 649 } 650 651 #ifndef MSR_GSBASE 652 #define MSR_GSBASE 0xc0000101 653 #endif 654 static __inline void 655 load_gs(u_short sel) 656 { 657 /* 658 * Preserve the gsbase value across the selector load. 659 * Note that we have to disable interrupts because the gsbase 660 * being trashed happens to be the kernel gsbase at the time. 661 */ 662 __asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq" 663 : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx"); 664 } 665 #else 666 /* Usable by userland */ 667 static __inline void 668 load_fs(u_short sel) 669 { 670 __asm __volatile("movw %0,%%fs" : : "rm" (sel)); 671 } 672 673 static __inline void 674 load_gs(u_short sel) 675 { 676 __asm __volatile("movw %0,%%gs" : : "rm" (sel)); 677 } 678 #endif 679 680 static __inline uint64_t 681 rdfsbase(void) 682 { 683 uint64_t x; 684 685 __asm __volatile("rdfsbase %0" : "=r" (x)); 686 return (x); 687 } 688 689 static __inline void 690 wrfsbase(uint64_t x) 691 { 692 693 __asm __volatile("wrfsbase %0" : : "r" (x)); 694 } 695 696 static __inline uint64_t 697 rdgsbase(void) 698 { 699 uint64_t x; 700 701 __asm __volatile("rdgsbase %0" : "=r" (x)); 702 return (x); 703 } 704 705 static __inline void 706 wrgsbase(uint64_t x) 707 { 708 709 __asm __volatile("wrgsbase %0" : : "r" (x)); 710 } 711 712 static __inline void 713 bare_lgdt(struct region_descriptor *addr) 714 { 715 __asm __volatile("lgdt (%0)" : : "r" (addr)); 716 } 717 718 static __inline void 719 sgdt(struct region_descriptor *addr) 720 { 721 char *loc; 722 723 loc = (char *)addr; 724 __asm __volatile("sgdt %0" : "=m" (*loc) : : "memory"); 725 } 726 727 static __inline void 728 lidt(struct region_descriptor *addr) 729 { 730 __asm __volatile("lidt (%0)" : : "r" (addr)); 731 } 732 733 static __inline void 734 sidt(struct region_descriptor *addr) 735 { 736 char *loc; 737 738 loc = (char *)addr; 739 __asm __volatile("sidt %0" : "=m" (*loc) : : "memory"); 740 } 741 742 static __inline void 743 lldt(u_short sel) 744 { 745 __asm __volatile("lldt %0" : : "r" (sel)); 746 } 747 748 static __inline u_short 749 sldt(void) 750 { 751 u_short sel; 752 753 __asm __volatile("sldt %0" : "=r" (sel)); 754 return (sel); 755 } 756 757 static __inline void 758 ltr(u_short sel) 759 { 760 __asm __volatile("ltr %0" : : "r" (sel)); 761 } 762 763 static __inline uint32_t 764 read_tr(void) 765 { 766 u_short sel; 767 768 __asm __volatile("str %0" : "=r" (sel)); 769 return (sel); 770 } 771 772 static __inline uint64_t 773 rdr0(void) 774 { 775 uint64_t data; 776 __asm __volatile("movq %%dr0,%0" : "=r" (data)); 777 return (data); 778 } 779 780 static __inline void 781 load_dr0(uint64_t dr0) 782 { 783 __asm __volatile("movq %0,%%dr0" : : "r" (dr0)); 784 } 785 786 static __inline uint64_t 787 rdr1(void) 788 { 789 uint64_t data; 790 __asm __volatile("movq %%dr1,%0" : "=r" (data)); 791 return (data); 792 } 793 794 static __inline void 795 load_dr1(uint64_t dr1) 796 { 797 __asm __volatile("movq %0,%%dr1" : : "r" (dr1)); 798 } 799 800 static __inline uint64_t 801 rdr2(void) 802 { 803 uint64_t data; 804 __asm __volatile("movq %%dr2,%0" : "=r" (data)); 805 return (data); 806 } 807 808 static __inline void 809 load_dr2(uint64_t dr2) 810 { 811 __asm __volatile("movq %0,%%dr2" : : "r" (dr2)); 812 } 813 814 static __inline uint64_t 815 rdr3(void) 816 { 817 uint64_t data; 818 __asm __volatile("movq %%dr3,%0" : "=r" (data)); 819 return (data); 820 } 821 822 static __inline void 823 load_dr3(uint64_t dr3) 824 { 825 __asm __volatile("movq %0,%%dr3" : : "r" (dr3)); 826 } 827 828 static __inline uint64_t 829 rdr6(void) 830 { 831 uint64_t data; 832 __asm __volatile("movq %%dr6,%0" : "=r" (data)); 833 return (data); 834 } 835 836 static __inline void 837 load_dr6(uint64_t dr6) 838 { 839 __asm __volatile("movq %0,%%dr6" : : "r" (dr6)); 840 } 841 842 static __inline uint64_t 843 rdr7(void) 844 { 845 uint64_t data; 846 __asm __volatile("movq %%dr7,%0" : "=r" (data)); 847 return (data); 848 } 849 850 static __inline void 851 load_dr7(uint64_t dr7) 852 { 853 __asm __volatile("movq %0,%%dr7" : : "r" (dr7)); 854 } 855 856 static __inline register_t 857 intr_disable(void) 858 { 859 register_t rflags; 860 861 rflags = read_rflags(); 862 disable_intr(); 863 return (rflags); 864 } 865 866 static __inline void 867 intr_restore(register_t rflags) 868 { 869 write_rflags(rflags); 870 } 871 872 static __inline void 873 stac(void) 874 { 875 876 __asm __volatile("stac" : : : "cc"); 877 } 878 879 static __inline void 880 clac(void) 881 { 882 883 __asm __volatile("clac" : : : "cc"); 884 } 885 886 enum { 887 SGX_ECREATE = 0x0, 888 SGX_EADD = 0x1, 889 SGX_EINIT = 0x2, 890 SGX_EREMOVE = 0x3, 891 SGX_EDGBRD = 0x4, 892 SGX_EDGBWR = 0x5, 893 SGX_EEXTEND = 0x6, 894 SGX_ELDU = 0x8, 895 SGX_EBLOCK = 0x9, 896 SGX_EPA = 0xA, 897 SGX_EWB = 0xB, 898 SGX_ETRACK = 0xC, 899 }; 900 901 enum { 902 SGX_PT_SECS = 0x00, 903 SGX_PT_TCS = 0x01, 904 SGX_PT_REG = 0x02, 905 SGX_PT_VA = 0x03, 906 SGX_PT_TRIM = 0x04, 907 }; 908 909 int sgx_encls(uint32_t eax, uint64_t rbx, uint64_t rcx, uint64_t rdx); 910 911 static __inline int 912 sgx_ecreate(void *pginfo, void *secs) 913 { 914 915 return (sgx_encls(SGX_ECREATE, (uint64_t)pginfo, 916 (uint64_t)secs, 0)); 917 } 918 919 static __inline int 920 sgx_eadd(void *pginfo, void *epc) 921 { 922 923 return (sgx_encls(SGX_EADD, (uint64_t)pginfo, 924 (uint64_t)epc, 0)); 925 } 926 927 static __inline int 928 sgx_einit(void *sigstruct, void *secs, void *einittoken) 929 { 930 931 return (sgx_encls(SGX_EINIT, (uint64_t)sigstruct, 932 (uint64_t)secs, (uint64_t)einittoken)); 933 } 934 935 static __inline int 936 sgx_eextend(void *secs, void *epc) 937 { 938 939 return (sgx_encls(SGX_EEXTEND, (uint64_t)secs, 940 (uint64_t)epc, 0)); 941 } 942 943 static __inline int 944 sgx_epa(void *epc) 945 { 946 947 return (sgx_encls(SGX_EPA, SGX_PT_VA, (uint64_t)epc, 0)); 948 } 949 950 static __inline int 951 sgx_eldu(uint64_t rbx, uint64_t rcx, 952 uint64_t rdx) 953 { 954 955 return (sgx_encls(SGX_ELDU, rbx, rcx, rdx)); 956 } 957 958 static __inline int 959 sgx_eremove(void *epc) 960 { 961 962 return (sgx_encls(SGX_EREMOVE, 0, (uint64_t)epc, 0)); 963 } 964 965 void reset_dbregs(void); 966 967 #ifdef _KERNEL 968 int rdmsr_safe(u_int msr, uint64_t *val); 969 int wrmsr_safe(u_int msr, uint64_t newval); 970 #endif 971 972 #endif /* !_MACHINE_CPUFUNC_H_ */ 973 974 #endif /* __i386__ */ 975