1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2003 Peter Wemm. 5 * Copyright (c) 1993 The Regents of the University of California. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $FreeBSD$ 33 */ 34 35 /* 36 * Functions to provide access to special i386 instructions. 37 * This in included in sys/systm.h, and that file should be 38 * used in preference to this. 39 */ 40 41 #ifndef _MACHINE_CPUFUNC_H_ 42 #define _MACHINE_CPUFUNC_H_ 43 44 #ifndef _SYS_CDEFS_H_ 45 #error this file needs sys/cdefs.h as a prerequisite 46 #endif 47 48 struct region_descriptor; 49 50 #define readb(va) (*(volatile uint8_t *) (va)) 51 #define readw(va) (*(volatile uint16_t *) (va)) 52 #define readl(va) (*(volatile uint32_t *) (va)) 53 #define readq(va) (*(volatile uint64_t *) (va)) 54 55 #define writeb(va, d) (*(volatile uint8_t *) (va) = (d)) 56 #define writew(va, d) (*(volatile uint16_t *) (va) = (d)) 57 #define writel(va, d) (*(volatile uint32_t *) (va) = (d)) 58 #define writeq(va, d) (*(volatile uint64_t *) (va) = (d)) 59 60 #if defined(__GNUCLIKE_ASM) && defined(__CC_SUPPORTS___INLINE) 61 62 static __inline void 63 breakpoint(void) 64 { 65 __asm __volatile("int $3"); 66 } 67 68 static __inline __pure2 u_int 69 bsfl(u_int mask) 70 { 71 u_int result; 72 73 __asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask)); 74 return (result); 75 } 76 77 static __inline __pure2 u_long 78 bsfq(u_long mask) 79 { 80 u_long result; 81 82 __asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask)); 83 return (result); 84 } 85 86 static __inline __pure2 u_int 87 bsrl(u_int mask) 88 { 89 u_int result; 90 91 __asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask)); 92 return (result); 93 } 94 95 static __inline __pure2 u_long 96 bsrq(u_long mask) 97 { 98 u_long result; 99 100 __asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask)); 101 return (result); 102 } 103 104 static __inline void 105 clflush(u_long addr) 106 { 107 108 __asm __volatile("clflush %0" : : "m" (*(char *)addr)); 109 } 110 111 static __inline void 112 clflushopt(u_long addr) 113 { 114 115 __asm __volatile(".byte 0x66;clflush %0" : : "m" (*(char *)addr)); 116 } 117 118 static __inline void 119 clwb(u_long addr) 120 { 121 122 __asm __volatile("clwb %0" : : "m" (*(char *)addr)); 123 } 124 125 static __inline void 126 clts(void) 127 { 128 129 __asm __volatile("clts"); 130 } 131 132 static __inline void 133 disable_intr(void) 134 { 135 __asm __volatile("cli" : : : "memory"); 136 } 137 138 static __inline void 139 do_cpuid(u_int ax, u_int *p) 140 { 141 __asm __volatile("cpuid" 142 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 143 : "0" (ax)); 144 } 145 146 static __inline void 147 cpuid_count(u_int ax, u_int cx, u_int *p) 148 { 149 __asm __volatile("cpuid" 150 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 151 : "0" (ax), "c" (cx)); 152 } 153 154 static __inline void 155 enable_intr(void) 156 { 157 __asm __volatile("sti"); 158 } 159 160 #ifdef _KERNEL 161 162 #define HAVE_INLINE_FFS 163 #define ffs(x) __builtin_ffs(x) 164 165 #define HAVE_INLINE_FFSL 166 167 static __inline __pure2 int 168 ffsl(long mask) 169 { 170 return (mask == 0 ? mask : (int)bsfq((u_long)mask) + 1); 171 } 172 173 #define HAVE_INLINE_FFSLL 174 175 static __inline __pure2 int 176 ffsll(long long mask) 177 { 178 return (ffsl((long)mask)); 179 } 180 181 #define HAVE_INLINE_FLS 182 183 static __inline __pure2 int 184 fls(int mask) 185 { 186 return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1); 187 } 188 189 #define HAVE_INLINE_FLSL 190 191 static __inline __pure2 int 192 flsl(long mask) 193 { 194 return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1); 195 } 196 197 #define HAVE_INLINE_FLSLL 198 199 static __inline __pure2 int 200 flsll(long long mask) 201 { 202 return (flsl((long)mask)); 203 } 204 205 #endif /* _KERNEL */ 206 207 static __inline void 208 halt(void) 209 { 210 __asm __volatile("hlt"); 211 } 212 213 static __inline u_char 214 inb(u_int port) 215 { 216 u_char data; 217 218 __asm __volatile("inb %w1, %0" : "=a" (data) : "Nd" (port)); 219 return (data); 220 } 221 222 static __inline u_int 223 inl(u_int port) 224 { 225 u_int data; 226 227 __asm __volatile("inl %w1, %0" : "=a" (data) : "Nd" (port)); 228 return (data); 229 } 230 231 static __inline void 232 insb(u_int port, void *addr, size_t count) 233 { 234 __asm __volatile("cld; rep; insb" 235 : "+D" (addr), "+c" (count) 236 : "d" (port) 237 : "memory"); 238 } 239 240 static __inline void 241 insw(u_int port, void *addr, size_t count) 242 { 243 __asm __volatile("cld; rep; insw" 244 : "+D" (addr), "+c" (count) 245 : "d" (port) 246 : "memory"); 247 } 248 249 static __inline void 250 insl(u_int port, void *addr, size_t count) 251 { 252 __asm __volatile("cld; rep; insl" 253 : "+D" (addr), "+c" (count) 254 : "d" (port) 255 : "memory"); 256 } 257 258 static __inline void 259 invd(void) 260 { 261 __asm __volatile("invd"); 262 } 263 264 static __inline u_short 265 inw(u_int port) 266 { 267 u_short data; 268 269 __asm __volatile("inw %w1, %0" : "=a" (data) : "Nd" (port)); 270 return (data); 271 } 272 273 static __inline void 274 outb(u_int port, u_char data) 275 { 276 __asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port)); 277 } 278 279 static __inline void 280 outl(u_int port, u_int data) 281 { 282 __asm __volatile("outl %0, %w1" : : "a" (data), "Nd" (port)); 283 } 284 285 static __inline void 286 outsb(u_int port, const void *addr, size_t count) 287 { 288 __asm __volatile("cld; rep; outsb" 289 : "+S" (addr), "+c" (count) 290 : "d" (port)); 291 } 292 293 static __inline void 294 outsw(u_int port, const void *addr, size_t count) 295 { 296 __asm __volatile("cld; rep; outsw" 297 : "+S" (addr), "+c" (count) 298 : "d" (port)); 299 } 300 301 static __inline void 302 outsl(u_int port, const void *addr, size_t count) 303 { 304 __asm __volatile("cld; rep; outsl" 305 : "+S" (addr), "+c" (count) 306 : "d" (port)); 307 } 308 309 static __inline void 310 outw(u_int port, u_short data) 311 { 312 __asm __volatile("outw %0, %w1" : : "a" (data), "Nd" (port)); 313 } 314 315 static __inline u_long 316 popcntq(u_long mask) 317 { 318 u_long result; 319 320 __asm __volatile("popcntq %1,%0" : "=r" (result) : "rm" (mask)); 321 return (result); 322 } 323 324 static __inline void 325 lfence(void) 326 { 327 328 __asm __volatile("lfence" : : : "memory"); 329 } 330 331 static __inline void 332 mfence(void) 333 { 334 335 __asm __volatile("mfence" : : : "memory"); 336 } 337 338 static __inline void 339 sfence(void) 340 { 341 342 __asm __volatile("sfence" : : : "memory"); 343 } 344 345 static __inline void 346 ia32_pause(void) 347 { 348 __asm __volatile("pause"); 349 } 350 351 static __inline u_long 352 read_rflags(void) 353 { 354 u_long rf; 355 356 __asm __volatile("pushfq; popq %0" : "=r" (rf)); 357 return (rf); 358 } 359 360 static __inline uint64_t 361 rdmsr(u_int msr) 362 { 363 uint32_t low, high; 364 365 __asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr)); 366 return (low | ((uint64_t)high << 32)); 367 } 368 369 static __inline uint32_t 370 rdmsr32(u_int msr) 371 { 372 uint32_t low; 373 374 __asm __volatile("rdmsr" : "=a" (low) : "c" (msr) : "rdx"); 375 return (low); 376 } 377 378 static __inline uint64_t 379 rdpmc(u_int pmc) 380 { 381 uint32_t low, high; 382 383 __asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc)); 384 return (low | ((uint64_t)high << 32)); 385 } 386 387 static __inline uint64_t 388 rdtsc(void) 389 { 390 uint32_t low, high; 391 392 __asm __volatile("rdtsc" : "=a" (low), "=d" (high)); 393 return (low | ((uint64_t)high << 32)); 394 } 395 396 static __inline uint64_t 397 rdtscp(void) 398 { 399 uint32_t low, high; 400 401 __asm __volatile("rdtscp" : "=a" (low), "=d" (high) : : "ecx"); 402 return (low | ((uint64_t)high << 32)); 403 } 404 405 static __inline uint32_t 406 rdtsc32(void) 407 { 408 uint32_t rv; 409 410 __asm __volatile("rdtsc" : "=a" (rv) : : "edx"); 411 return (rv); 412 } 413 414 static __inline void 415 wbinvd(void) 416 { 417 __asm __volatile("wbinvd"); 418 } 419 420 static __inline void 421 write_rflags(u_long rf) 422 { 423 __asm __volatile("pushq %0; popfq" : : "r" (rf)); 424 } 425 426 static __inline void 427 wrmsr(u_int msr, uint64_t newval) 428 { 429 uint32_t low, high; 430 431 low = newval; 432 high = newval >> 32; 433 __asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr)); 434 } 435 436 static __inline void 437 load_cr0(u_long data) 438 { 439 440 __asm __volatile("movq %0,%%cr0" : : "r" (data)); 441 } 442 443 static __inline u_long 444 rcr0(void) 445 { 446 u_long data; 447 448 __asm __volatile("movq %%cr0,%0" : "=r" (data)); 449 return (data); 450 } 451 452 static __inline u_long 453 rcr2(void) 454 { 455 u_long data; 456 457 __asm __volatile("movq %%cr2,%0" : "=r" (data)); 458 return (data); 459 } 460 461 static __inline void 462 load_cr3(u_long data) 463 { 464 465 __asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory"); 466 } 467 468 static __inline u_long 469 rcr3(void) 470 { 471 u_long data; 472 473 __asm __volatile("movq %%cr3,%0" : "=r" (data)); 474 return (data); 475 } 476 477 static __inline void 478 load_cr4(u_long data) 479 { 480 __asm __volatile("movq %0,%%cr4" : : "r" (data)); 481 } 482 483 static __inline u_long 484 rcr4(void) 485 { 486 u_long data; 487 488 __asm __volatile("movq %%cr4,%0" : "=r" (data)); 489 return (data); 490 } 491 492 static __inline u_long 493 rxcr(u_int reg) 494 { 495 u_int low, high; 496 497 __asm __volatile("xgetbv" : "=a" (low), "=d" (high) : "c" (reg)); 498 return (low | ((uint64_t)high << 32)); 499 } 500 501 static __inline void 502 load_xcr(u_int reg, u_long val) 503 { 504 u_int low, high; 505 506 low = val; 507 high = val >> 32; 508 __asm __volatile("xsetbv" : : "c" (reg), "a" (low), "d" (high)); 509 } 510 511 /* 512 * Global TLB flush (except for thise for pages marked PG_G) 513 */ 514 static __inline void 515 invltlb(void) 516 { 517 518 load_cr3(rcr3()); 519 } 520 521 #ifndef CR4_PGE 522 #define CR4_PGE 0x00000080 /* Page global enable */ 523 #endif 524 525 /* 526 * Perform the guaranteed invalidation of all TLB entries. This 527 * includes the global entries, and entries in all PCIDs, not only the 528 * current context. The function works both on non-PCID CPUs and CPUs 529 * with the PCID turned off or on. See IA-32 SDM Vol. 3a 4.10.4.1 530 * Operations that Invalidate TLBs and Paging-Structure Caches. 531 */ 532 static __inline void 533 invltlb_glob(void) 534 { 535 uint64_t cr4; 536 537 cr4 = rcr4(); 538 load_cr4(cr4 & ~CR4_PGE); 539 /* 540 * Although preemption at this point could be detrimental to 541 * performance, it would not lead to an error. PG_G is simply 542 * ignored if CR4.PGE is clear. Moreover, in case this block 543 * is re-entered, the load_cr4() either above or below will 544 * modify CR4.PGE flushing the TLB. 545 */ 546 load_cr4(cr4 | CR4_PGE); 547 } 548 549 /* 550 * TLB flush for an individual page (even if it has PG_G). 551 * Only works on 486+ CPUs (i386 does not have PG_G). 552 */ 553 static __inline void 554 invlpg(u_long addr) 555 { 556 557 __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory"); 558 } 559 560 #define INVPCID_ADDR 0 561 #define INVPCID_CTX 1 562 #define INVPCID_CTXGLOB 2 563 #define INVPCID_ALLCTX 3 564 565 struct invpcid_descr { 566 uint64_t pcid:12 __packed; 567 uint64_t pad:52 __packed; 568 uint64_t addr; 569 } __packed; 570 571 static __inline void 572 invpcid(struct invpcid_descr *d, int type) 573 { 574 575 __asm __volatile("invpcid (%0),%1" 576 : : "r" (d), "r" ((u_long)type) : "memory"); 577 } 578 579 static __inline u_short 580 rfs(void) 581 { 582 u_short sel; 583 __asm __volatile("movw %%fs,%0" : "=rm" (sel)); 584 return (sel); 585 } 586 587 static __inline u_short 588 rgs(void) 589 { 590 u_short sel; 591 __asm __volatile("movw %%gs,%0" : "=rm" (sel)); 592 return (sel); 593 } 594 595 static __inline u_short 596 rss(void) 597 { 598 u_short sel; 599 __asm __volatile("movw %%ss,%0" : "=rm" (sel)); 600 return (sel); 601 } 602 603 static __inline void 604 load_ds(u_short sel) 605 { 606 __asm __volatile("movw %0,%%ds" : : "rm" (sel)); 607 } 608 609 static __inline void 610 load_es(u_short sel) 611 { 612 __asm __volatile("movw %0,%%es" : : "rm" (sel)); 613 } 614 615 static __inline void 616 cpu_monitor(const void *addr, u_long extensions, u_int hints) 617 { 618 619 __asm __volatile("monitor" 620 : : "a" (addr), "c" (extensions), "d" (hints)); 621 } 622 623 static __inline void 624 cpu_mwait(u_long extensions, u_int hints) 625 { 626 627 __asm __volatile("mwait" : : "a" (hints), "c" (extensions)); 628 } 629 630 #ifdef _KERNEL 631 /* This is defined in <machine/specialreg.h> but is too painful to get to */ 632 #ifndef MSR_FSBASE 633 #define MSR_FSBASE 0xc0000100 634 #endif 635 static __inline void 636 load_fs(u_short sel) 637 { 638 /* Preserve the fsbase value across the selector load */ 639 __asm __volatile("rdmsr; movw %0,%%fs; wrmsr" 640 : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx"); 641 } 642 643 #ifndef MSR_GSBASE 644 #define MSR_GSBASE 0xc0000101 645 #endif 646 static __inline void 647 load_gs(u_short sel) 648 { 649 /* 650 * Preserve the gsbase value across the selector load. 651 * Note that we have to disable interrupts because the gsbase 652 * being trashed happens to be the kernel gsbase at the time. 653 */ 654 __asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq" 655 : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx"); 656 } 657 #else 658 /* Usable by userland */ 659 static __inline void 660 load_fs(u_short sel) 661 { 662 __asm __volatile("movw %0,%%fs" : : "rm" (sel)); 663 } 664 665 static __inline void 666 load_gs(u_short sel) 667 { 668 __asm __volatile("movw %0,%%gs" : : "rm" (sel)); 669 } 670 #endif 671 672 static __inline uint64_t 673 rdfsbase(void) 674 { 675 uint64_t x; 676 677 __asm __volatile("rdfsbase %0" : "=r" (x)); 678 return (x); 679 } 680 681 static __inline void 682 wrfsbase(uint64_t x) 683 { 684 685 __asm __volatile("wrfsbase %0" : : "r" (x)); 686 } 687 688 static __inline uint64_t 689 rdgsbase(void) 690 { 691 uint64_t x; 692 693 __asm __volatile("rdgsbase %0" : "=r" (x)); 694 return (x); 695 } 696 697 static __inline void 698 wrgsbase(uint64_t x) 699 { 700 701 __asm __volatile("wrgsbase %0" : : "r" (x)); 702 } 703 704 static __inline void 705 bare_lgdt(struct region_descriptor *addr) 706 { 707 __asm __volatile("lgdt (%0)" : : "r" (addr)); 708 } 709 710 static __inline void 711 sgdt(struct region_descriptor *addr) 712 { 713 char *loc; 714 715 loc = (char *)addr; 716 __asm __volatile("sgdt %0" : "=m" (*loc) : : "memory"); 717 } 718 719 static __inline void 720 lidt(struct region_descriptor *addr) 721 { 722 __asm __volatile("lidt (%0)" : : "r" (addr)); 723 } 724 725 static __inline void 726 sidt(struct region_descriptor *addr) 727 { 728 char *loc; 729 730 loc = (char *)addr; 731 __asm __volatile("sidt %0" : "=m" (*loc) : : "memory"); 732 } 733 734 static __inline void 735 lldt(u_short sel) 736 { 737 __asm __volatile("lldt %0" : : "r" (sel)); 738 } 739 740 static __inline u_short 741 sldt(void) 742 { 743 u_short sel; 744 745 __asm __volatile("sldt %0" : "=r" (sel)); 746 return (sel); 747 } 748 749 static __inline void 750 ltr(u_short sel) 751 { 752 __asm __volatile("ltr %0" : : "r" (sel)); 753 } 754 755 static __inline uint32_t 756 read_tr(void) 757 { 758 u_short sel; 759 760 __asm __volatile("str %0" : "=r" (sel)); 761 return (sel); 762 } 763 764 static __inline uint64_t 765 rdr0(void) 766 { 767 uint64_t data; 768 __asm __volatile("movq %%dr0,%0" : "=r" (data)); 769 return (data); 770 } 771 772 static __inline void 773 load_dr0(uint64_t dr0) 774 { 775 __asm __volatile("movq %0,%%dr0" : : "r" (dr0)); 776 } 777 778 static __inline uint64_t 779 rdr1(void) 780 { 781 uint64_t data; 782 __asm __volatile("movq %%dr1,%0" : "=r" (data)); 783 return (data); 784 } 785 786 static __inline void 787 load_dr1(uint64_t dr1) 788 { 789 __asm __volatile("movq %0,%%dr1" : : "r" (dr1)); 790 } 791 792 static __inline uint64_t 793 rdr2(void) 794 { 795 uint64_t data; 796 __asm __volatile("movq %%dr2,%0" : "=r" (data)); 797 return (data); 798 } 799 800 static __inline void 801 load_dr2(uint64_t dr2) 802 { 803 __asm __volatile("movq %0,%%dr2" : : "r" (dr2)); 804 } 805 806 static __inline uint64_t 807 rdr3(void) 808 { 809 uint64_t data; 810 __asm __volatile("movq %%dr3,%0" : "=r" (data)); 811 return (data); 812 } 813 814 static __inline void 815 load_dr3(uint64_t dr3) 816 { 817 __asm __volatile("movq %0,%%dr3" : : "r" (dr3)); 818 } 819 820 static __inline uint64_t 821 rdr6(void) 822 { 823 uint64_t data; 824 __asm __volatile("movq %%dr6,%0" : "=r" (data)); 825 return (data); 826 } 827 828 static __inline void 829 load_dr6(uint64_t dr6) 830 { 831 __asm __volatile("movq %0,%%dr6" : : "r" (dr6)); 832 } 833 834 static __inline uint64_t 835 rdr7(void) 836 { 837 uint64_t data; 838 __asm __volatile("movq %%dr7,%0" : "=r" (data)); 839 return (data); 840 } 841 842 static __inline void 843 load_dr7(uint64_t dr7) 844 { 845 __asm __volatile("movq %0,%%dr7" : : "r" (dr7)); 846 } 847 848 static __inline register_t 849 intr_disable(void) 850 { 851 register_t rflags; 852 853 rflags = read_rflags(); 854 disable_intr(); 855 return (rflags); 856 } 857 858 static __inline void 859 intr_restore(register_t rflags) 860 { 861 write_rflags(rflags); 862 } 863 864 static __inline void 865 stac(void) 866 { 867 868 __asm __volatile("stac" : : : "cc"); 869 } 870 871 static __inline void 872 clac(void) 873 { 874 875 __asm __volatile("clac" : : : "cc"); 876 } 877 878 enum { 879 SGX_ECREATE = 0x0, 880 SGX_EADD = 0x1, 881 SGX_EINIT = 0x2, 882 SGX_EREMOVE = 0x3, 883 SGX_EDGBRD = 0x4, 884 SGX_EDGBWR = 0x5, 885 SGX_EEXTEND = 0x6, 886 SGX_ELDU = 0x8, 887 SGX_EBLOCK = 0x9, 888 SGX_EPA = 0xA, 889 SGX_EWB = 0xB, 890 SGX_ETRACK = 0xC, 891 }; 892 893 enum { 894 SGX_PT_SECS = 0x00, 895 SGX_PT_TCS = 0x01, 896 SGX_PT_REG = 0x02, 897 SGX_PT_VA = 0x03, 898 SGX_PT_TRIM = 0x04, 899 }; 900 901 int sgx_encls(uint32_t eax, uint64_t rbx, uint64_t rcx, uint64_t rdx); 902 903 static __inline int 904 sgx_ecreate(void *pginfo, void *secs) 905 { 906 907 return (sgx_encls(SGX_ECREATE, (uint64_t)pginfo, 908 (uint64_t)secs, 0)); 909 } 910 911 static __inline int 912 sgx_eadd(void *pginfo, void *epc) 913 { 914 915 return (sgx_encls(SGX_EADD, (uint64_t)pginfo, 916 (uint64_t)epc, 0)); 917 } 918 919 static __inline int 920 sgx_einit(void *sigstruct, void *secs, void *einittoken) 921 { 922 923 return (sgx_encls(SGX_EINIT, (uint64_t)sigstruct, 924 (uint64_t)secs, (uint64_t)einittoken)); 925 } 926 927 static __inline int 928 sgx_eextend(void *secs, void *epc) 929 { 930 931 return (sgx_encls(SGX_EEXTEND, (uint64_t)secs, 932 (uint64_t)epc, 0)); 933 } 934 935 static __inline int 936 sgx_epa(void *epc) 937 { 938 939 return (sgx_encls(SGX_EPA, SGX_PT_VA, (uint64_t)epc, 0)); 940 } 941 942 static __inline int 943 sgx_eldu(uint64_t rbx, uint64_t rcx, 944 uint64_t rdx) 945 { 946 947 return (sgx_encls(SGX_ELDU, rbx, rcx, rdx)); 948 } 949 950 static __inline int 951 sgx_eremove(void *epc) 952 { 953 954 return (sgx_encls(SGX_EREMOVE, 0, (uint64_t)epc, 0)); 955 } 956 957 #else /* !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */ 958 959 int breakpoint(void); 960 u_int bsfl(u_int mask); 961 u_int bsrl(u_int mask); 962 void clflush(u_long addr); 963 void clts(void); 964 void cpuid_count(u_int ax, u_int cx, u_int *p); 965 void disable_intr(void); 966 void do_cpuid(u_int ax, u_int *p); 967 void enable_intr(void); 968 void halt(void); 969 void ia32_pause(void); 970 u_char inb(u_int port); 971 u_int inl(u_int port); 972 void insb(u_int port, void *addr, size_t count); 973 void insl(u_int port, void *addr, size_t count); 974 void insw(u_int port, void *addr, size_t count); 975 register_t intr_disable(void); 976 void intr_restore(register_t rf); 977 void invd(void); 978 void invlpg(u_int addr); 979 void invltlb(void); 980 u_short inw(u_int port); 981 void lidt(struct region_descriptor *addr); 982 void lldt(u_short sel); 983 void load_cr0(u_long cr0); 984 void load_cr3(u_long cr3); 985 void load_cr4(u_long cr4); 986 void load_dr0(uint64_t dr0); 987 void load_dr1(uint64_t dr1); 988 void load_dr2(uint64_t dr2); 989 void load_dr3(uint64_t dr3); 990 void load_dr6(uint64_t dr6); 991 void load_dr7(uint64_t dr7); 992 void load_fs(u_short sel); 993 void load_gs(u_short sel); 994 void ltr(u_short sel); 995 void outb(u_int port, u_char data); 996 void outl(u_int port, u_int data); 997 void outsb(u_int port, const void *addr, size_t count); 998 void outsl(u_int port, const void *addr, size_t count); 999 void outsw(u_int port, const void *addr, size_t count); 1000 void outw(u_int port, u_short data); 1001 u_long rcr0(void); 1002 u_long rcr2(void); 1003 u_long rcr3(void); 1004 u_long rcr4(void); 1005 uint64_t rdmsr(u_int msr); 1006 uint32_t rdmsr32(u_int msr); 1007 uint64_t rdpmc(u_int pmc); 1008 uint64_t rdr0(void); 1009 uint64_t rdr1(void); 1010 uint64_t rdr2(void); 1011 uint64_t rdr3(void); 1012 uint64_t rdr6(void); 1013 uint64_t rdr7(void); 1014 uint64_t rdtsc(void); 1015 u_long read_rflags(void); 1016 u_int rfs(void); 1017 u_int rgs(void); 1018 void wbinvd(void); 1019 void write_rflags(u_int rf); 1020 void wrmsr(u_int msr, uint64_t newval); 1021 1022 #endif /* __GNUCLIKE_ASM && __CC_SUPPORTS___INLINE */ 1023 1024 void reset_dbregs(void); 1025 1026 #ifdef _KERNEL 1027 int rdmsr_safe(u_int msr, uint64_t *val); 1028 int wrmsr_safe(u_int msr, uint64_t newval); 1029 #endif 1030 1031 #endif /* !_MACHINE_CPUFUNC_H_ */ 1032