1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2003 Peter Wemm. 5 * Copyright (c) 1993 The Regents of the University of California. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $FreeBSD$ 33 */ 34 35 /* 36 * Functions to provide access to special i386 instructions. 37 * This in included in sys/systm.h, and that file should be 38 * used in preference to this. 39 */ 40 41 #ifndef _MACHINE_CPUFUNC_H_ 42 #define _MACHINE_CPUFUNC_H_ 43 44 struct region_descriptor; 45 46 #define readb(va) (*(volatile uint8_t *) (va)) 47 #define readw(va) (*(volatile uint16_t *) (va)) 48 #define readl(va) (*(volatile uint32_t *) (va)) 49 #define readq(va) (*(volatile uint64_t *) (va)) 50 51 #define writeb(va, d) (*(volatile uint8_t *) (va) = (d)) 52 #define writew(va, d) (*(volatile uint16_t *) (va) = (d)) 53 #define writel(va, d) (*(volatile uint32_t *) (va) = (d)) 54 #define writeq(va, d) (*(volatile uint64_t *) (va) = (d)) 55 56 static __inline void 57 breakpoint(void) 58 { 59 __asm __volatile("int $3"); 60 } 61 62 #define bsfl(mask) __builtin_ctz(mask) 63 64 #define bsfq(mask) __builtin_ctzl(mask) 65 66 #define bsrl(mask) (__builtin_clz(mask) ^ 0x1f) 67 68 #define bsrq(mask) (__builtin_clzl(mask) ^ 0x3f) 69 70 static __inline void 71 clflush(u_long addr) 72 { 73 74 __asm __volatile("clflush %0" : : "m" (*(char *)addr)); 75 } 76 77 static __inline void 78 clflushopt(u_long addr) 79 { 80 81 __asm __volatile(".byte 0x66;clflush %0" : : "m" (*(char *)addr)); 82 } 83 84 static __inline void 85 clwb(u_long addr) 86 { 87 88 __asm __volatile("clwb %0" : : "m" (*(char *)addr)); 89 } 90 91 static __inline void 92 clts(void) 93 { 94 95 __asm __volatile("clts"); 96 } 97 98 static __inline void 99 disable_intr(void) 100 { 101 __asm __volatile("cli" : : : "memory"); 102 } 103 104 static __inline void 105 do_cpuid(u_int ax, u_int *p) 106 { 107 __asm __volatile("cpuid" 108 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 109 : "0" (ax)); 110 } 111 112 static __inline void 113 cpuid_count(u_int ax, u_int cx, u_int *p) 114 { 115 __asm __volatile("cpuid" 116 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 117 : "0" (ax), "c" (cx)); 118 } 119 120 static __inline void 121 enable_intr(void) 122 { 123 __asm __volatile("sti"); 124 } 125 126 #ifdef _KERNEL 127 128 #define HAVE_INLINE_FFS 129 #define ffs(x) __builtin_ffs(x) 130 131 #define HAVE_INLINE_FFSL 132 #define ffsl(x) __builtin_ffsl(x) 133 134 #define HAVE_INLINE_FFSLL 135 #define ffsll(x) __builtin_ffsll(x) 136 137 #define HAVE_INLINE_FLS 138 139 static __inline __pure2 int 140 fls(int mask) 141 { 142 return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1); 143 } 144 145 #define HAVE_INLINE_FLSL 146 147 static __inline __pure2 int 148 flsl(long mask) 149 { 150 return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1); 151 } 152 153 #define HAVE_INLINE_FLSLL 154 155 static __inline __pure2 int 156 flsll(long long mask) 157 { 158 return (flsl((long)mask)); 159 } 160 161 #endif /* _KERNEL */ 162 163 static __inline void 164 halt(void) 165 { 166 __asm __volatile("hlt"); 167 } 168 169 static __inline u_char 170 inb(u_int port) 171 { 172 u_char data; 173 174 __asm __volatile("inb %w1, %0" : "=a" (data) : "Nd" (port)); 175 return (data); 176 } 177 178 static __inline u_int 179 inl(u_int port) 180 { 181 u_int data; 182 183 __asm __volatile("inl %w1, %0" : "=a" (data) : "Nd" (port)); 184 return (data); 185 } 186 187 static __inline void 188 insb(u_int port, void *addr, size_t count) 189 { 190 __asm __volatile("rep; insb" 191 : "+D" (addr), "+c" (count) 192 : "d" (port) 193 : "memory"); 194 } 195 196 static __inline void 197 insw(u_int port, void *addr, size_t count) 198 { 199 __asm __volatile("rep; insw" 200 : "+D" (addr), "+c" (count) 201 : "d" (port) 202 : "memory"); 203 } 204 205 static __inline void 206 insl(u_int port, void *addr, size_t count) 207 { 208 __asm __volatile("rep; insl" 209 : "+D" (addr), "+c" (count) 210 : "d" (port) 211 : "memory"); 212 } 213 214 static __inline void 215 invd(void) 216 { 217 __asm __volatile("invd"); 218 } 219 220 static __inline u_short 221 inw(u_int port) 222 { 223 u_short data; 224 225 __asm __volatile("inw %w1, %0" : "=a" (data) : "Nd" (port)); 226 return (data); 227 } 228 229 static __inline void 230 outb(u_int port, u_char data) 231 { 232 __asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port)); 233 } 234 235 static __inline void 236 outl(u_int port, u_int data) 237 { 238 __asm __volatile("outl %0, %w1" : : "a" (data), "Nd" (port)); 239 } 240 241 static __inline void 242 outsb(u_int port, const void *addr, size_t count) 243 { 244 __asm __volatile("rep; outsb" 245 : "+S" (addr), "+c" (count) 246 : "d" (port)); 247 } 248 249 static __inline void 250 outsw(u_int port, const void *addr, size_t count) 251 { 252 __asm __volatile("rep; outsw" 253 : "+S" (addr), "+c" (count) 254 : "d" (port)); 255 } 256 257 static __inline void 258 outsl(u_int port, const void *addr, size_t count) 259 { 260 __asm __volatile("rep; outsl" 261 : "+S" (addr), "+c" (count) 262 : "d" (port)); 263 } 264 265 static __inline void 266 outw(u_int port, u_short data) 267 { 268 __asm __volatile("outw %0, %w1" : : "a" (data), "Nd" (port)); 269 } 270 271 static __inline u_long 272 popcntq(u_long mask) 273 { 274 u_long result; 275 276 __asm __volatile("popcntq %1,%0" : "=r" (result) : "rm" (mask)); 277 return (result); 278 } 279 280 static __inline void 281 lfence(void) 282 { 283 284 __asm __volatile("lfence" : : : "memory"); 285 } 286 287 static __inline void 288 mfence(void) 289 { 290 291 __asm __volatile("mfence" : : : "memory"); 292 } 293 294 static __inline void 295 sfence(void) 296 { 297 298 __asm __volatile("sfence" : : : "memory"); 299 } 300 301 static __inline void 302 ia32_pause(void) 303 { 304 __asm __volatile("pause"); 305 } 306 307 static __inline u_long 308 read_rflags(void) 309 { 310 u_long rf; 311 312 __asm __volatile("pushfq; popq %0" : "=r" (rf)); 313 return (rf); 314 } 315 316 static __inline uint64_t 317 rdmsr(u_int msr) 318 { 319 uint32_t low, high; 320 321 __asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr)); 322 return (low | ((uint64_t)high << 32)); 323 } 324 325 static __inline uint32_t 326 rdmsr32(u_int msr) 327 { 328 uint32_t low; 329 330 __asm __volatile("rdmsr" : "=a" (low) : "c" (msr) : "rdx"); 331 return (low); 332 } 333 334 static __inline uint64_t 335 rdpmc(u_int pmc) 336 { 337 uint32_t low, high; 338 339 __asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc)); 340 return (low | ((uint64_t)high << 32)); 341 } 342 343 static __inline uint64_t 344 rdtsc(void) 345 { 346 uint32_t low, high; 347 348 __asm __volatile("rdtsc" : "=a" (low), "=d" (high)); 349 return (low | ((uint64_t)high << 32)); 350 } 351 352 static __inline uint64_t 353 rdtsc_ordered_lfence(void) 354 { 355 lfence(); 356 return (rdtsc()); 357 } 358 359 static __inline uint64_t 360 rdtsc_ordered_mfence(void) 361 { 362 mfence(); 363 return (rdtsc()); 364 } 365 366 static __inline uint64_t 367 rdtscp(void) 368 { 369 uint32_t low, high; 370 371 __asm __volatile("rdtscp" : "=a" (low), "=d" (high) : : "ecx"); 372 return (low | ((uint64_t)high << 32)); 373 } 374 375 static __inline uint64_t 376 rdtscp_aux(uint32_t *aux) 377 { 378 uint32_t low, high; 379 380 __asm __volatile("rdtscp" : "=a" (low), "=d" (high), "=c" (*aux)); 381 return (low | ((uint64_t)high << 32)); 382 } 383 384 static __inline uint32_t 385 rdtsc32(void) 386 { 387 uint32_t rv; 388 389 __asm __volatile("rdtsc" : "=a" (rv) : : "edx"); 390 return (rv); 391 } 392 393 static __inline uint32_t 394 rdtscp32(void) 395 { 396 uint32_t rv; 397 398 __asm __volatile("rdtscp" : "=a" (rv) : : "ecx", "edx"); 399 return (rv); 400 } 401 402 static __inline void 403 wbinvd(void) 404 { 405 __asm __volatile("wbinvd"); 406 } 407 408 static __inline void 409 write_rflags(u_long rf) 410 { 411 __asm __volatile("pushq %0; popfq" : : "r" (rf)); 412 } 413 414 static __inline void 415 wrmsr(u_int msr, uint64_t newval) 416 { 417 uint32_t low, high; 418 419 low = newval; 420 high = newval >> 32; 421 __asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr)); 422 } 423 424 static __inline void 425 load_cr0(u_long data) 426 { 427 428 __asm __volatile("movq %0,%%cr0" : : "r" (data)); 429 } 430 431 static __inline u_long 432 rcr0(void) 433 { 434 u_long data; 435 436 __asm __volatile("movq %%cr0,%0" : "=r" (data)); 437 return (data); 438 } 439 440 static __inline u_long 441 rcr2(void) 442 { 443 u_long data; 444 445 __asm __volatile("movq %%cr2,%0" : "=r" (data)); 446 return (data); 447 } 448 449 static __inline void 450 load_cr3(u_long data) 451 { 452 453 __asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory"); 454 } 455 456 static __inline u_long 457 rcr3(void) 458 { 459 u_long data; 460 461 __asm __volatile("movq %%cr3,%0" : "=r" (data)); 462 return (data); 463 } 464 465 static __inline void 466 load_cr4(u_long data) 467 { 468 __asm __volatile("movq %0,%%cr4" : : "r" (data)); 469 } 470 471 static __inline u_long 472 rcr4(void) 473 { 474 u_long data; 475 476 __asm __volatile("movq %%cr4,%0" : "=r" (data)); 477 return (data); 478 } 479 480 static __inline u_long 481 rxcr(u_int reg) 482 { 483 u_int low, high; 484 485 __asm __volatile("xgetbv" : "=a" (low), "=d" (high) : "c" (reg)); 486 return (low | ((uint64_t)high << 32)); 487 } 488 489 static __inline void 490 load_xcr(u_int reg, u_long val) 491 { 492 u_int low, high; 493 494 low = val; 495 high = val >> 32; 496 __asm __volatile("xsetbv" : : "c" (reg), "a" (low), "d" (high)); 497 } 498 499 /* 500 * Global TLB flush (except for thise for pages marked PG_G) 501 */ 502 static __inline void 503 invltlb(void) 504 { 505 506 load_cr3(rcr3()); 507 } 508 509 #ifndef CR4_PGE 510 #define CR4_PGE 0x00000080 /* Page global enable */ 511 #endif 512 513 /* 514 * Perform the guaranteed invalidation of all TLB entries. This 515 * includes the global entries, and entries in all PCIDs, not only the 516 * current context. The function works both on non-PCID CPUs and CPUs 517 * with the PCID turned off or on. See IA-32 SDM Vol. 3a 4.10.4.1 518 * Operations that Invalidate TLBs and Paging-Structure Caches. 519 */ 520 static __inline void 521 invltlb_glob(void) 522 { 523 uint64_t cr4; 524 525 cr4 = rcr4(); 526 load_cr4(cr4 & ~CR4_PGE); 527 /* 528 * Although preemption at this point could be detrimental to 529 * performance, it would not lead to an error. PG_G is simply 530 * ignored if CR4.PGE is clear. Moreover, in case this block 531 * is re-entered, the load_cr4() either above or below will 532 * modify CR4.PGE flushing the TLB. 533 */ 534 load_cr4(cr4 | CR4_PGE); 535 } 536 537 /* 538 * TLB flush for an individual page (even if it has PG_G). 539 * Only works on 486+ CPUs (i386 does not have PG_G). 540 */ 541 static __inline void 542 invlpg(u_long addr) 543 { 544 545 __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory"); 546 } 547 548 #define INVPCID_ADDR 0 549 #define INVPCID_CTX 1 550 #define INVPCID_CTXGLOB 2 551 #define INVPCID_ALLCTX 3 552 553 struct invpcid_descr { 554 uint64_t pcid:12 __packed; 555 uint64_t pad:52 __packed; 556 uint64_t addr; 557 } __packed; 558 559 static __inline void 560 invpcid(struct invpcid_descr *d, int type) 561 { 562 563 __asm __volatile("invpcid (%0),%1" 564 : : "r" (d), "r" ((u_long)type) : "memory"); 565 } 566 567 static __inline u_short 568 rfs(void) 569 { 570 u_short sel; 571 __asm __volatile("movw %%fs,%0" : "=rm" (sel)); 572 return (sel); 573 } 574 575 static __inline u_short 576 rgs(void) 577 { 578 u_short sel; 579 __asm __volatile("movw %%gs,%0" : "=rm" (sel)); 580 return (sel); 581 } 582 583 static __inline u_short 584 rss(void) 585 { 586 u_short sel; 587 __asm __volatile("movw %%ss,%0" : "=rm" (sel)); 588 return (sel); 589 } 590 591 static __inline void 592 load_ds(u_short sel) 593 { 594 __asm __volatile("movw %0,%%ds" : : "rm" (sel)); 595 } 596 597 static __inline void 598 load_es(u_short sel) 599 { 600 __asm __volatile("movw %0,%%es" : : "rm" (sel)); 601 } 602 603 static __inline void 604 cpu_monitor(const void *addr, u_long extensions, u_int hints) 605 { 606 607 __asm __volatile("monitor" 608 : : "a" (addr), "c" (extensions), "d" (hints)); 609 } 610 611 static __inline void 612 cpu_mwait(u_long extensions, u_int hints) 613 { 614 615 __asm __volatile("mwait" : : "a" (hints), "c" (extensions)); 616 } 617 618 static __inline uint32_t 619 rdpkru(void) 620 { 621 uint32_t res; 622 623 __asm __volatile("rdpkru" : "=a" (res) : "c" (0) : "edx"); 624 return (res); 625 } 626 627 static __inline void 628 wrpkru(uint32_t mask) 629 { 630 631 __asm __volatile("wrpkru" : : "a" (mask), "c" (0), "d" (0)); 632 } 633 634 #ifdef _KERNEL 635 /* This is defined in <machine/specialreg.h> but is too painful to get to */ 636 #ifndef MSR_FSBASE 637 #define MSR_FSBASE 0xc0000100 638 #endif 639 static __inline void 640 load_fs(u_short sel) 641 { 642 /* Preserve the fsbase value across the selector load */ 643 __asm __volatile("rdmsr; movw %0,%%fs; wrmsr" 644 : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx"); 645 } 646 647 #ifndef MSR_GSBASE 648 #define MSR_GSBASE 0xc0000101 649 #endif 650 static __inline void 651 load_gs(u_short sel) 652 { 653 /* 654 * Preserve the gsbase value across the selector load. 655 * Note that we have to disable interrupts because the gsbase 656 * being trashed happens to be the kernel gsbase at the time. 657 */ 658 __asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq" 659 : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx"); 660 } 661 #else 662 /* Usable by userland */ 663 static __inline void 664 load_fs(u_short sel) 665 { 666 __asm __volatile("movw %0,%%fs" : : "rm" (sel)); 667 } 668 669 static __inline void 670 load_gs(u_short sel) 671 { 672 __asm __volatile("movw %0,%%gs" : : "rm" (sel)); 673 } 674 #endif 675 676 static __inline uint64_t 677 rdfsbase(void) 678 { 679 uint64_t x; 680 681 __asm __volatile("rdfsbase %0" : "=r" (x)); 682 return (x); 683 } 684 685 static __inline void 686 wrfsbase(uint64_t x) 687 { 688 689 __asm __volatile("wrfsbase %0" : : "r" (x)); 690 } 691 692 static __inline uint64_t 693 rdgsbase(void) 694 { 695 uint64_t x; 696 697 __asm __volatile("rdgsbase %0" : "=r" (x)); 698 return (x); 699 } 700 701 static __inline void 702 wrgsbase(uint64_t x) 703 { 704 705 __asm __volatile("wrgsbase %0" : : "r" (x)); 706 } 707 708 static __inline void 709 bare_lgdt(struct region_descriptor *addr) 710 { 711 __asm __volatile("lgdt (%0)" : : "r" (addr)); 712 } 713 714 static __inline void 715 sgdt(struct region_descriptor *addr) 716 { 717 char *loc; 718 719 loc = (char *)addr; 720 __asm __volatile("sgdt %0" : "=m" (*loc) : : "memory"); 721 } 722 723 static __inline void 724 lidt(struct region_descriptor *addr) 725 { 726 __asm __volatile("lidt (%0)" : : "r" (addr)); 727 } 728 729 static __inline void 730 sidt(struct region_descriptor *addr) 731 { 732 char *loc; 733 734 loc = (char *)addr; 735 __asm __volatile("sidt %0" : "=m" (*loc) : : "memory"); 736 } 737 738 static __inline void 739 lldt(u_short sel) 740 { 741 __asm __volatile("lldt %0" : : "r" (sel)); 742 } 743 744 static __inline u_short 745 sldt(void) 746 { 747 u_short sel; 748 749 __asm __volatile("sldt %0" : "=r" (sel)); 750 return (sel); 751 } 752 753 static __inline void 754 ltr(u_short sel) 755 { 756 __asm __volatile("ltr %0" : : "r" (sel)); 757 } 758 759 static __inline uint32_t 760 read_tr(void) 761 { 762 u_short sel; 763 764 __asm __volatile("str %0" : "=r" (sel)); 765 return (sel); 766 } 767 768 static __inline uint64_t 769 rdr0(void) 770 { 771 uint64_t data; 772 __asm __volatile("movq %%dr0,%0" : "=r" (data)); 773 return (data); 774 } 775 776 static __inline void 777 load_dr0(uint64_t dr0) 778 { 779 __asm __volatile("movq %0,%%dr0" : : "r" (dr0)); 780 } 781 782 static __inline uint64_t 783 rdr1(void) 784 { 785 uint64_t data; 786 __asm __volatile("movq %%dr1,%0" : "=r" (data)); 787 return (data); 788 } 789 790 static __inline void 791 load_dr1(uint64_t dr1) 792 { 793 __asm __volatile("movq %0,%%dr1" : : "r" (dr1)); 794 } 795 796 static __inline uint64_t 797 rdr2(void) 798 { 799 uint64_t data; 800 __asm __volatile("movq %%dr2,%0" : "=r" (data)); 801 return (data); 802 } 803 804 static __inline void 805 load_dr2(uint64_t dr2) 806 { 807 __asm __volatile("movq %0,%%dr2" : : "r" (dr2)); 808 } 809 810 static __inline uint64_t 811 rdr3(void) 812 { 813 uint64_t data; 814 __asm __volatile("movq %%dr3,%0" : "=r" (data)); 815 return (data); 816 } 817 818 static __inline void 819 load_dr3(uint64_t dr3) 820 { 821 __asm __volatile("movq %0,%%dr3" : : "r" (dr3)); 822 } 823 824 static __inline uint64_t 825 rdr6(void) 826 { 827 uint64_t data; 828 __asm __volatile("movq %%dr6,%0" : "=r" (data)); 829 return (data); 830 } 831 832 static __inline void 833 load_dr6(uint64_t dr6) 834 { 835 __asm __volatile("movq %0,%%dr6" : : "r" (dr6)); 836 } 837 838 static __inline uint64_t 839 rdr7(void) 840 { 841 uint64_t data; 842 __asm __volatile("movq %%dr7,%0" : "=r" (data)); 843 return (data); 844 } 845 846 static __inline void 847 load_dr7(uint64_t dr7) 848 { 849 __asm __volatile("movq %0,%%dr7" : : "r" (dr7)); 850 } 851 852 static __inline register_t 853 intr_disable(void) 854 { 855 register_t rflags; 856 857 rflags = read_rflags(); 858 disable_intr(); 859 return (rflags); 860 } 861 862 static __inline void 863 intr_restore(register_t rflags) 864 { 865 write_rflags(rflags); 866 } 867 868 static __inline void 869 stac(void) 870 { 871 872 __asm __volatile("stac" : : : "cc"); 873 } 874 875 static __inline void 876 clac(void) 877 { 878 879 __asm __volatile("clac" : : : "cc"); 880 } 881 882 enum { 883 SGX_ECREATE = 0x0, 884 SGX_EADD = 0x1, 885 SGX_EINIT = 0x2, 886 SGX_EREMOVE = 0x3, 887 SGX_EDGBRD = 0x4, 888 SGX_EDGBWR = 0x5, 889 SGX_EEXTEND = 0x6, 890 SGX_ELDU = 0x8, 891 SGX_EBLOCK = 0x9, 892 SGX_EPA = 0xA, 893 SGX_EWB = 0xB, 894 SGX_ETRACK = 0xC, 895 }; 896 897 enum { 898 SGX_PT_SECS = 0x00, 899 SGX_PT_TCS = 0x01, 900 SGX_PT_REG = 0x02, 901 SGX_PT_VA = 0x03, 902 SGX_PT_TRIM = 0x04, 903 }; 904 905 int sgx_encls(uint32_t eax, uint64_t rbx, uint64_t rcx, uint64_t rdx); 906 907 static __inline int 908 sgx_ecreate(void *pginfo, void *secs) 909 { 910 911 return (sgx_encls(SGX_ECREATE, (uint64_t)pginfo, 912 (uint64_t)secs, 0)); 913 } 914 915 static __inline int 916 sgx_eadd(void *pginfo, void *epc) 917 { 918 919 return (sgx_encls(SGX_EADD, (uint64_t)pginfo, 920 (uint64_t)epc, 0)); 921 } 922 923 static __inline int 924 sgx_einit(void *sigstruct, void *secs, void *einittoken) 925 { 926 927 return (sgx_encls(SGX_EINIT, (uint64_t)sigstruct, 928 (uint64_t)secs, (uint64_t)einittoken)); 929 } 930 931 static __inline int 932 sgx_eextend(void *secs, void *epc) 933 { 934 935 return (sgx_encls(SGX_EEXTEND, (uint64_t)secs, 936 (uint64_t)epc, 0)); 937 } 938 939 static __inline int 940 sgx_epa(void *epc) 941 { 942 943 return (sgx_encls(SGX_EPA, SGX_PT_VA, (uint64_t)epc, 0)); 944 } 945 946 static __inline int 947 sgx_eldu(uint64_t rbx, uint64_t rcx, 948 uint64_t rdx) 949 { 950 951 return (sgx_encls(SGX_ELDU, rbx, rcx, rdx)); 952 } 953 954 static __inline int 955 sgx_eremove(void *epc) 956 { 957 958 return (sgx_encls(SGX_EREMOVE, 0, (uint64_t)epc, 0)); 959 } 960 961 void reset_dbregs(void); 962 963 #ifdef _KERNEL 964 int rdmsr_safe(u_int msr, uint64_t *val); 965 int wrmsr_safe(u_int msr, uint64_t newval); 966 #endif 967 968 #endif /* !_MACHINE_CPUFUNC_H_ */ 969