1 /*- 2 * Copyright (c) 2003 Peter Wemm. 3 * Copyright (c) 1993 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 4. Neither the name of the University nor the names of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * $FreeBSD$ 31 */ 32 33 /* 34 * Functions to provide access to special i386 instructions. 35 * This in included in sys/systm.h, and that file should be 36 * used in preference to this. 37 */ 38 39 #ifndef _MACHINE_CPUFUNC_H_ 40 #define _MACHINE_CPUFUNC_H_ 41 42 #ifndef _SYS_CDEFS_H_ 43 #error this file needs sys/cdefs.h as a prerequisite 44 #endif 45 46 struct region_descriptor; 47 48 #define readb(va) (*(volatile u_int8_t *) (va)) 49 #define readw(va) (*(volatile u_int16_t *) (va)) 50 #define readl(va) (*(volatile u_int32_t *) (va)) 51 #define readq(va) (*(volatile u_int64_t *) (va)) 52 53 #define writeb(va, d) (*(volatile u_int8_t *) (va) = (d)) 54 #define writew(va, d) (*(volatile u_int16_t *) (va) = (d)) 55 #define writel(va, d) (*(volatile u_int32_t *) (va) = (d)) 56 #define writeq(va, d) (*(volatile u_int64_t *) (va) = (d)) 57 58 #if defined(__GNUCLIKE_ASM) && defined(__CC_SUPPORTS___INLINE) 59 60 static __inline void 61 breakpoint(void) 62 { 63 __asm __volatile("int $3"); 64 } 65 66 static __inline u_int 67 bsfl(u_int mask) 68 { 69 u_int result; 70 71 __asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask)); 72 return (result); 73 } 74 75 static __inline u_long 76 bsfq(u_long mask) 77 { 78 u_long result; 79 80 __asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask)); 81 return (result); 82 } 83 84 static __inline u_int 85 bsrl(u_int mask) 86 { 87 u_int result; 88 89 __asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask)); 90 return (result); 91 } 92 93 static __inline u_long 94 bsrq(u_long mask) 95 { 96 u_long result; 97 98 __asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask)); 99 return (result); 100 } 101 102 static __inline void 103 clflush(u_long addr) 104 { 105 106 __asm __volatile("clflush %0" : : "m" (*(char *)addr)); 107 } 108 109 static __inline void 110 disable_intr(void) 111 { 112 __asm __volatile("cli" : : : "memory"); 113 } 114 115 static __inline void 116 do_cpuid(u_int ax, u_int *p) 117 { 118 __asm __volatile("cpuid" 119 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 120 : "0" (ax)); 121 } 122 123 static __inline void 124 cpuid_count(u_int ax, u_int cx, u_int *p) 125 { 126 __asm __volatile("cpuid" 127 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 128 : "0" (ax), "c" (cx)); 129 } 130 131 static __inline void 132 enable_intr(void) 133 { 134 __asm __volatile("sti"); 135 } 136 137 #ifdef _KERNEL 138 139 #define HAVE_INLINE_FFS 140 #define ffs(x) __builtin_ffs(x) 141 142 #define HAVE_INLINE_FFSL 143 144 static __inline int 145 ffsl(long mask) 146 { 147 return (mask == 0 ? mask : (int)bsfq((u_long)mask) + 1); 148 } 149 150 #define HAVE_INLINE_FLS 151 152 static __inline int 153 fls(int mask) 154 { 155 return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1); 156 } 157 158 #define HAVE_INLINE_FLSL 159 160 static __inline int 161 flsl(long mask) 162 { 163 return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1); 164 } 165 166 #endif /* _KERNEL */ 167 168 static __inline void 169 halt(void) 170 { 171 __asm __volatile("hlt"); 172 } 173 174 static __inline u_char 175 inb(u_int port) 176 { 177 u_char data; 178 179 __asm volatile("inb %w1, %0" : "=a" (data) : "Nd" (port)); 180 return (data); 181 } 182 183 static __inline u_int 184 inl(u_int port) 185 { 186 u_int data; 187 188 __asm volatile("inl %w1, %0" : "=a" (data) : "Nd" (port)); 189 return (data); 190 } 191 192 static __inline void 193 insb(u_int port, void *addr, size_t count) 194 { 195 __asm __volatile("cld; rep; insb" 196 : "+D" (addr), "+c" (count) 197 : "d" (port) 198 : "memory"); 199 } 200 201 static __inline void 202 insw(u_int port, void *addr, size_t count) 203 { 204 __asm __volatile("cld; rep; insw" 205 : "+D" (addr), "+c" (count) 206 : "d" (port) 207 : "memory"); 208 } 209 210 static __inline void 211 insl(u_int port, void *addr, size_t count) 212 { 213 __asm __volatile("cld; rep; insl" 214 : "+D" (addr), "+c" (count) 215 : "d" (port) 216 : "memory"); 217 } 218 219 static __inline void 220 invd(void) 221 { 222 __asm __volatile("invd"); 223 } 224 225 static __inline u_short 226 inw(u_int port) 227 { 228 u_short data; 229 230 __asm volatile("inw %w1, %0" : "=a" (data) : "Nd" (port)); 231 return (data); 232 } 233 234 static __inline void 235 outb(u_int port, u_char data) 236 { 237 __asm volatile("outb %0, %w1" : : "a" (data), "Nd" (port)); 238 } 239 240 static __inline void 241 outl(u_int port, u_int data) 242 { 243 __asm volatile("outl %0, %w1" : : "a" (data), "Nd" (port)); 244 } 245 246 static __inline void 247 outsb(u_int port, const void *addr, size_t count) 248 { 249 __asm __volatile("cld; rep; outsb" 250 : "+S" (addr), "+c" (count) 251 : "d" (port)); 252 } 253 254 static __inline void 255 outsw(u_int port, const void *addr, size_t count) 256 { 257 __asm __volatile("cld; rep; outsw" 258 : "+S" (addr), "+c" (count) 259 : "d" (port)); 260 } 261 262 static __inline void 263 outsl(u_int port, const void *addr, size_t count) 264 { 265 __asm __volatile("cld; rep; outsl" 266 : "+S" (addr), "+c" (count) 267 : "d" (port)); 268 } 269 270 static __inline void 271 outw(u_int port, u_short data) 272 { 273 __asm volatile("outw %0, %w1" : : "a" (data), "Nd" (port)); 274 } 275 276 static __inline void 277 mfence(void) 278 { 279 280 __asm __volatile("mfence" : : : "memory"); 281 } 282 283 static __inline void 284 ia32_pause(void) 285 { 286 __asm __volatile("pause"); 287 } 288 289 static __inline u_long 290 read_rflags(void) 291 { 292 u_long rf; 293 294 __asm __volatile("pushfq; popq %0" : "=r" (rf)); 295 return (rf); 296 } 297 298 static __inline u_int64_t 299 rdmsr(u_int msr) 300 { 301 u_int32_t low, high; 302 303 __asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr)); 304 return (low | ((u_int64_t)high << 32)); 305 } 306 307 static __inline u_int64_t 308 rdpmc(u_int pmc) 309 { 310 u_int32_t low, high; 311 312 __asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc)); 313 return (low | ((u_int64_t)high << 32)); 314 } 315 316 static __inline u_int64_t 317 rdtsc(void) 318 { 319 u_int32_t low, high; 320 321 __asm __volatile("rdtsc" : "=a" (low), "=d" (high)); 322 return (low | ((u_int64_t)high << 32)); 323 } 324 325 static __inline void 326 wbinvd(void) 327 { 328 __asm __volatile("wbinvd"); 329 } 330 331 static __inline void 332 write_rflags(u_long rf) 333 { 334 __asm __volatile("pushq %0; popfq" : : "r" (rf)); 335 } 336 337 static __inline void 338 wrmsr(u_int msr, u_int64_t newval) 339 { 340 u_int32_t low, high; 341 342 low = newval; 343 high = newval >> 32; 344 __asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr)); 345 } 346 347 static __inline void 348 load_cr0(u_long data) 349 { 350 351 __asm __volatile("movq %0,%%cr0" : : "r" (data)); 352 } 353 354 static __inline u_long 355 rcr0(void) 356 { 357 u_long data; 358 359 __asm __volatile("movq %%cr0,%0" : "=r" (data)); 360 return (data); 361 } 362 363 static __inline u_long 364 rcr2(void) 365 { 366 u_long data; 367 368 __asm __volatile("movq %%cr2,%0" : "=r" (data)); 369 return (data); 370 } 371 372 static __inline void 373 load_cr3(u_long data) 374 { 375 376 __asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory"); 377 } 378 379 static __inline u_long 380 rcr3(void) 381 { 382 u_long data; 383 384 __asm __volatile("movq %%cr3,%0" : "=r" (data)); 385 return (data); 386 } 387 388 static __inline void 389 load_cr4(u_long data) 390 { 391 __asm __volatile("movq %0,%%cr4" : : "r" (data)); 392 } 393 394 static __inline u_long 395 rcr4(void) 396 { 397 u_long data; 398 399 __asm __volatile("movq %%cr4,%0" : "=r" (data)); 400 return (data); 401 } 402 403 /* 404 * Global TLB flush (except for thise for pages marked PG_G) 405 */ 406 static __inline void 407 invltlb(void) 408 { 409 410 load_cr3(rcr3()); 411 } 412 413 /* 414 * TLB flush for an individual page (even if it has PG_G). 415 * Only works on 486+ CPUs (i386 does not have PG_G). 416 */ 417 static __inline void 418 invlpg(u_long addr) 419 { 420 421 __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory"); 422 } 423 424 static __inline u_int 425 rfs(void) 426 { 427 u_int sel; 428 __asm __volatile("mov %%fs,%0" : "=rm" (sel)); 429 return (sel); 430 } 431 432 static __inline u_int 433 rgs(void) 434 { 435 u_int sel; 436 __asm __volatile("mov %%gs,%0" : "=rm" (sel)); 437 return (sel); 438 } 439 440 static __inline u_int 441 rss(void) 442 { 443 u_int sel; 444 __asm __volatile("mov %%ss,%0" : "=rm" (sel)); 445 return (sel); 446 } 447 448 static __inline void 449 load_ds(u_int sel) 450 { 451 __asm __volatile("mov %0,%%ds" : : "rm" (sel)); 452 } 453 454 static __inline void 455 load_es(u_int sel) 456 { 457 __asm __volatile("mov %0,%%es" : : "rm" (sel)); 458 } 459 460 static __inline void 461 cpu_monitor(const void *addr, int extensions, int hints) 462 { 463 __asm __volatile("monitor;" 464 : :"a" (addr), "c" (extensions), "d"(hints)); 465 } 466 467 static __inline void 468 cpu_mwait(int extensions, int hints) 469 { 470 __asm __volatile("mwait;" : :"a" (hints), "c" (extensions)); 471 } 472 473 #ifdef _KERNEL 474 /* This is defined in <machine/specialreg.h> but is too painful to get to */ 475 #ifndef MSR_FSBASE 476 #define MSR_FSBASE 0xc0000100 477 #endif 478 static __inline void 479 load_fs(u_int sel) 480 { 481 /* Preserve the fsbase value across the selector load */ 482 __asm __volatile("rdmsr; mov %0,%%fs; wrmsr" 483 : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx"); 484 } 485 486 #ifndef MSR_GSBASE 487 #define MSR_GSBASE 0xc0000101 488 #endif 489 static __inline void 490 load_gs(u_int sel) 491 { 492 /* 493 * Preserve the gsbase value across the selector load. 494 * Note that we have to disable interrupts because the gsbase 495 * being trashed happens to be the kernel gsbase at the time. 496 */ 497 __asm __volatile("pushfq; cli; rdmsr; mov %0,%%gs; wrmsr; popfq" 498 : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx"); 499 } 500 #else 501 /* Usable by userland */ 502 static __inline void 503 load_fs(u_int sel) 504 { 505 __asm __volatile("mov %0,%%fs" : : "rm" (sel)); 506 } 507 508 static __inline void 509 load_gs(u_int sel) 510 { 511 __asm __volatile("mov %0,%%gs" : : "rm" (sel)); 512 } 513 #endif 514 515 static __inline void 516 lidt(struct region_descriptor *addr) 517 { 518 __asm __volatile("lidt (%0)" : : "r" (addr)); 519 } 520 521 static __inline void 522 lldt(u_short sel) 523 { 524 __asm __volatile("lldt %0" : : "r" (sel)); 525 } 526 527 static __inline void 528 ltr(u_short sel) 529 { 530 __asm __volatile("ltr %0" : : "r" (sel)); 531 } 532 533 static __inline u_int64_t 534 rdr0(void) 535 { 536 u_int64_t data; 537 __asm __volatile("movq %%dr0,%0" : "=r" (data)); 538 return (data); 539 } 540 541 static __inline void 542 load_dr0(u_int64_t dr0) 543 { 544 __asm __volatile("movq %0,%%dr0" : : "r" (dr0)); 545 } 546 547 static __inline u_int64_t 548 rdr1(void) 549 { 550 u_int64_t data; 551 __asm __volatile("movq %%dr1,%0" : "=r" (data)); 552 return (data); 553 } 554 555 static __inline void 556 load_dr1(u_int64_t dr1) 557 { 558 __asm __volatile("movq %0,%%dr1" : : "r" (dr1)); 559 } 560 561 static __inline u_int64_t 562 rdr2(void) 563 { 564 u_int64_t data; 565 __asm __volatile("movq %%dr2,%0" : "=r" (data)); 566 return (data); 567 } 568 569 static __inline void 570 load_dr2(u_int64_t dr2) 571 { 572 __asm __volatile("movq %0,%%dr2" : : "r" (dr2)); 573 } 574 575 static __inline u_int64_t 576 rdr3(void) 577 { 578 u_int64_t data; 579 __asm __volatile("movq %%dr3,%0" : "=r" (data)); 580 return (data); 581 } 582 583 static __inline void 584 load_dr3(u_int64_t dr3) 585 { 586 __asm __volatile("movq %0,%%dr3" : : "r" (dr3)); 587 } 588 589 static __inline u_int64_t 590 rdr4(void) 591 { 592 u_int64_t data; 593 __asm __volatile("movq %%dr4,%0" : "=r" (data)); 594 return (data); 595 } 596 597 static __inline void 598 load_dr4(u_int64_t dr4) 599 { 600 __asm __volatile("movq %0,%%dr4" : : "r" (dr4)); 601 } 602 603 static __inline u_int64_t 604 rdr5(void) 605 { 606 u_int64_t data; 607 __asm __volatile("movq %%dr5,%0" : "=r" (data)); 608 return (data); 609 } 610 611 static __inline void 612 load_dr5(u_int64_t dr5) 613 { 614 __asm __volatile("movq %0,%%dr5" : : "r" (dr5)); 615 } 616 617 static __inline u_int64_t 618 rdr6(void) 619 { 620 u_int64_t data; 621 __asm __volatile("movq %%dr6,%0" : "=r" (data)); 622 return (data); 623 } 624 625 static __inline void 626 load_dr6(u_int64_t dr6) 627 { 628 __asm __volatile("movq %0,%%dr6" : : "r" (dr6)); 629 } 630 631 static __inline u_int64_t 632 rdr7(void) 633 { 634 u_int64_t data; 635 __asm __volatile("movq %%dr7,%0" : "=r" (data)); 636 return (data); 637 } 638 639 static __inline void 640 load_dr7(u_int64_t dr7) 641 { 642 __asm __volatile("movq %0,%%dr7" : : "r" (dr7)); 643 } 644 645 static __inline register_t 646 intr_disable(void) 647 { 648 register_t rflags; 649 650 rflags = read_rflags(); 651 disable_intr(); 652 return (rflags); 653 } 654 655 static __inline void 656 intr_restore(register_t rflags) 657 { 658 write_rflags(rflags); 659 } 660 661 #else /* !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */ 662 663 int breakpoint(void); 664 u_int bsfl(u_int mask); 665 u_int bsrl(u_int mask); 666 void disable_intr(void); 667 void do_cpuid(u_int ax, u_int *p); 668 void enable_intr(void); 669 void halt(void); 670 void ia32_pause(void); 671 u_char inb(u_int port); 672 u_int inl(u_int port); 673 void insb(u_int port, void *addr, size_t count); 674 void insl(u_int port, void *addr, size_t count); 675 void insw(u_int port, void *addr, size_t count); 676 register_t intr_disable(void); 677 void intr_restore(register_t rf); 678 void invd(void); 679 void invlpg(u_int addr); 680 void invltlb(void); 681 u_short inw(u_int port); 682 void lidt(struct region_descriptor *addr); 683 void lldt(u_short sel); 684 void load_cr0(u_long cr0); 685 void load_cr3(u_long cr3); 686 void load_cr4(u_long cr4); 687 void load_dr0(u_int64_t dr0); 688 void load_dr1(u_int64_t dr1); 689 void load_dr2(u_int64_t dr2); 690 void load_dr3(u_int64_t dr3); 691 void load_dr4(u_int64_t dr4); 692 void load_dr5(u_int64_t dr5); 693 void load_dr6(u_int64_t dr6); 694 void load_dr7(u_int64_t dr7); 695 void load_fs(u_int sel); 696 void load_gs(u_int sel); 697 void ltr(u_short sel); 698 void outb(u_int port, u_char data); 699 void outl(u_int port, u_int data); 700 void outsb(u_int port, const void *addr, size_t count); 701 void outsl(u_int port, const void *addr, size_t count); 702 void outsw(u_int port, const void *addr, size_t count); 703 void outw(u_int port, u_short data); 704 u_long rcr0(void); 705 u_long rcr2(void); 706 u_long rcr3(void); 707 u_long rcr4(void); 708 u_int64_t rdmsr(u_int msr); 709 u_int64_t rdpmc(u_int pmc); 710 u_int64_t rdr0(void); 711 u_int64_t rdr1(void); 712 u_int64_t rdr2(void); 713 u_int64_t rdr3(void); 714 u_int64_t rdr4(void); 715 u_int64_t rdr5(void); 716 u_int64_t rdr6(void); 717 u_int64_t rdr7(void); 718 u_int64_t rdtsc(void); 719 u_int read_rflags(void); 720 u_int rfs(void); 721 u_int rgs(void); 722 void wbinvd(void); 723 void write_rflags(u_int rf); 724 void wrmsr(u_int msr, u_int64_t newval); 725 726 #endif /* __GNUCLIKE_ASM && __CC_SUPPORTS___INLINE */ 727 728 void reset_dbregs(void); 729 730 #ifdef _KERNEL 731 int rdmsr_safe(u_int msr, uint64_t *val); 732 int wrmsr_safe(u_int msr, uint64_t newval); 733 #endif 734 735 #endif /* !_MACHINE_CPUFUNC_H_ */ 736