1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27/* 28 * Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. 29 * Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T 30 * All Rights Reserved 31 */ 32 33#pragma ident "%Z%%M% %I% %E% SMI" 34 35/* 36 * General assembly language routines. 37 * It is the intent of this file to contain routines that are 38 * independent of the specific kernel architecture, and those that are 39 * common across kernel architectures. 40 * As architectures diverge, and implementations of specific 41 * architecture-dependent routines change, the routines should be moved 42 * from this file into the respective ../`arch -k`/subr.s file. 43 */ 44 45#include <sys/asm_linkage.h> 46#include <sys/asm_misc.h> 47#include <sys/panic.h> 48#include <sys/ontrap.h> 49#include <sys/regset.h> 50#include <sys/privregs.h> 51#include <sys/reboot.h> 52#include <sys/psw.h> 53#include <sys/x86_archext.h> 54 55#if defined(__lint) 56#include <sys/types.h> 57#include <sys/systm.h> 58#include <sys/thread.h> 59#include <sys/archsystm.h> 60#include <sys/byteorder.h> 61#include <sys/dtrace.h> 62#else /* __lint */ 63#include "assym.h" 64#endif /* __lint */ 65#include <sys/dditypes.h> 66 67/* 68 * on_fault() 69 * Catch lofault faults. Like setjmp except it returns one 70 * if code following causes uncorrectable fault. Turned off 71 * by calling no_fault(). 72 */ 73 74#if defined(__lint) 75 76/* ARGSUSED */ 77int 78on_fault(label_t *ljb) 79{ return (0); } 80 81void 82no_fault(void) 83{} 84 85#else /* __lint */ 86 87#if defined(__amd64) 88 89 ENTRY(on_fault) 90 movq %gs:CPU_THREAD, %rsi 91 leaq catch_fault(%rip), %rdx 92 movq %rdi, T_ONFAULT(%rsi) /* jumpbuf in t_onfault */ 93 movq %rdx, T_LOFAULT(%rsi) /* catch_fault in t_lofault */ 94 jmp setjmp /* let setjmp do the rest */ 95 96catch_fault: 97 movq %gs:CPU_THREAD, %rsi 98 movq T_ONFAULT(%rsi), %rdi /* address of save area */ 99 xorl %eax, %eax 100 movq %rax, T_ONFAULT(%rsi) /* turn off onfault */ 101 movq %rax, T_LOFAULT(%rsi) /* turn off lofault */ 102 jmp longjmp /* let longjmp do the rest */ 103 SET_SIZE(on_fault) 104 105 ENTRY(no_fault) 106 movq %gs:CPU_THREAD, %rsi 107 xorl %eax, %eax 108 movq %rax, T_ONFAULT(%rsi) /* turn off onfault */ 109 movq %rax, T_LOFAULT(%rsi) /* turn off lofault */ 110 ret 111 SET_SIZE(no_fault) 112 113#elif defined(__i386) 114 115 ENTRY(on_fault) 116 movl %gs:CPU_THREAD, %edx 117 movl 4(%esp), %eax /* jumpbuf address */ 118 leal catch_fault, %ecx 119 movl %eax, T_ONFAULT(%edx) /* jumpbuf in t_onfault */ 120 movl %ecx, T_LOFAULT(%edx) /* catch_fault in t_lofault */ 121 jmp setjmp /* let setjmp do the rest */ 122 123catch_fault: 124 movl %gs:CPU_THREAD, %edx 125 xorl %eax, %eax 126 movl T_ONFAULT(%edx), %ecx /* address of save area */ 127 movl %eax, T_ONFAULT(%edx) /* turn off onfault */ 128 movl %eax, T_LOFAULT(%edx) /* turn off lofault */ 129 pushl %ecx 130 call longjmp /* let longjmp do the rest */ 131 SET_SIZE(on_fault) 132 133 ENTRY(no_fault) 134 movl %gs:CPU_THREAD, %edx 135 xorl %eax, %eax 136 movl %eax, T_ONFAULT(%edx) /* turn off onfault */ 137 movl %eax, T_LOFAULT(%edx) /* turn off lofault */ 138 ret 139 SET_SIZE(no_fault) 140 141#endif /* __i386 */ 142#endif /* __lint */ 143 144/* 145 * Default trampoline code for on_trap() (see <sys/ontrap.h>). We just 146 * do a longjmp(&curthread->t_ontrap->ot_jmpbuf) if this is ever called. 147 */ 148 149#if defined(lint) 150 151void 152on_trap_trampoline(void) 153{} 154 155#else /* __lint */ 156 157#if defined(__amd64) 158 159 ENTRY(on_trap_trampoline) 160 movq %gs:CPU_THREAD, %rsi 161 movq T_ONTRAP(%rsi), %rdi 162 addq $OT_JMPBUF, %rdi 163 jmp longjmp 164 SET_SIZE(on_trap_trampoline) 165 166#elif defined(__i386) 167 168 ENTRY(on_trap_trampoline) 169 movl %gs:CPU_THREAD, %eax 170 movl T_ONTRAP(%eax), %eax 171 addl $OT_JMPBUF, %eax 172 pushl %eax 173 call longjmp 174 SET_SIZE(on_trap_trampoline) 175 176#endif /* __i386 */ 177#endif /* __lint */ 178 179/* 180 * Push a new element on to the t_ontrap stack. Refer to <sys/ontrap.h> for 181 * more information about the on_trap() mechanism. If the on_trap_data is the 182 * same as the topmost stack element, we just modify that element. 183 */ 184#if defined(lint) 185 186/*ARGSUSED*/ 187int 188on_trap(on_trap_data_t *otp, uint_t prot) 189{ return (0); } 190 191#else /* __lint */ 192 193#if defined(__amd64) 194 195 ENTRY(on_trap) 196 movw %si, OT_PROT(%rdi) /* ot_prot = prot */ 197 movw $0, OT_TRAP(%rdi) /* ot_trap = 0 */ 198 leaq on_trap_trampoline(%rip), %rdx /* rdx = &on_trap_trampoline */ 199 movq %rdx, OT_TRAMPOLINE(%rdi) /* ot_trampoline = rdx */ 200 xorl %ecx, %ecx 201 movq %rcx, OT_HANDLE(%rdi) /* ot_handle = NULL */ 202 movq %rcx, OT_PAD1(%rdi) /* ot_pad1 = NULL */ 203 movq %gs:CPU_THREAD, %rdx /* rdx = curthread */ 204 movq T_ONTRAP(%rdx), %rcx /* rcx = curthread->t_ontrap */ 205 cmpq %rdi, %rcx /* if (otp == %rcx) */ 206 je 0f /* don't modify t_ontrap */ 207 208 movq %rcx, OT_PREV(%rdi) /* ot_prev = t_ontrap */ 209 movq %rdi, T_ONTRAP(%rdx) /* curthread->t_ontrap = otp */ 210 2110: addq $OT_JMPBUF, %rdi /* &ot_jmpbuf */ 212 jmp setjmp 213 SET_SIZE(on_trap) 214 215#elif defined(__i386) 216 217 ENTRY(on_trap) 218 movl 4(%esp), %eax /* %eax = otp */ 219 movl 8(%esp), %edx /* %edx = prot */ 220 221 movw %dx, OT_PROT(%eax) /* ot_prot = prot */ 222 movw $0, OT_TRAP(%eax) /* ot_trap = 0 */ 223 leal on_trap_trampoline, %edx /* %edx = &on_trap_trampoline */ 224 movl %edx, OT_TRAMPOLINE(%eax) /* ot_trampoline = %edx */ 225 movl $0, OT_HANDLE(%eax) /* ot_handle = NULL */ 226 movl $0, OT_PAD1(%eax) /* ot_pad1 = NULL */ 227 movl %gs:CPU_THREAD, %edx /* %edx = curthread */ 228 movl T_ONTRAP(%edx), %ecx /* %ecx = curthread->t_ontrap */ 229 cmpl %eax, %ecx /* if (otp == %ecx) */ 230 je 0f /* don't modify t_ontrap */ 231 232 movl %ecx, OT_PREV(%eax) /* ot_prev = t_ontrap */ 233 movl %eax, T_ONTRAP(%edx) /* curthread->t_ontrap = otp */ 234 2350: addl $OT_JMPBUF, %eax /* %eax = &ot_jmpbuf */ 236 movl %eax, 4(%esp) /* put %eax back on the stack */ 237 jmp setjmp /* let setjmp do the rest */ 238 SET_SIZE(on_trap) 239 240#endif /* __i386 */ 241#endif /* __lint */ 242 243/* 244 * Setjmp and longjmp implement non-local gotos using state vectors 245 * type label_t. 246 */ 247 248#if defined(__lint) 249 250/* ARGSUSED */ 251int 252setjmp(label_t *lp) 253{ return (0); } 254 255/* ARGSUSED */ 256void 257longjmp(label_t *lp) 258{} 259 260#else /* __lint */ 261 262#if LABEL_PC != 0 263#error LABEL_PC MUST be defined as 0 for setjmp/longjmp to work as coded 264#endif /* LABEL_PC != 0 */ 265 266#if defined(__amd64) 267 268 ENTRY(setjmp) 269 movq %rsp, LABEL_SP(%rdi) 270 movq %rbp, LABEL_RBP(%rdi) 271 movq %rbx, LABEL_RBX(%rdi) 272 movq %r12, LABEL_R12(%rdi) 273 movq %r13, LABEL_R13(%rdi) 274 movq %r14, LABEL_R14(%rdi) 275 movq %r15, LABEL_R15(%rdi) 276 movq (%rsp), %rdx /* return address */ 277 movq %rdx, (%rdi) /* LABEL_PC is 0 */ 278 xorl %eax, %eax /* return 0 */ 279 ret 280 SET_SIZE(setjmp) 281 282 ENTRY(longjmp) 283 movq LABEL_SP(%rdi), %rsp 284 movq LABEL_RBP(%rdi), %rbp 285 movq LABEL_RBX(%rdi), %rbx 286 movq LABEL_R12(%rdi), %r12 287 movq LABEL_R13(%rdi), %r13 288 movq LABEL_R14(%rdi), %r14 289 movq LABEL_R15(%rdi), %r15 290 movq (%rdi), %rdx /* return address; LABEL_PC is 0 */ 291 movq %rdx, (%rsp) 292 xorl %eax, %eax 293 incl %eax /* return 1 */ 294 ret 295 SET_SIZE(longjmp) 296 297#elif defined(__i386) 298 299 ENTRY(setjmp) 300 movl 4(%esp), %edx /* address of save area */ 301 movl %ebp, LABEL_EBP(%edx) 302 movl %ebx, LABEL_EBX(%edx) 303 movl %esi, LABEL_ESI(%edx) 304 movl %edi, LABEL_EDI(%edx) 305 movl %esp, 4(%edx) 306 movl (%esp), %ecx /* %eip (return address) */ 307 movl %ecx, (%edx) /* LABEL_PC is 0 */ 308 subl %eax, %eax /* return 0 */ 309 ret 310 SET_SIZE(setjmp) 311 312 ENTRY(longjmp) 313 movl 4(%esp), %edx /* address of save area */ 314 movl LABEL_EBP(%edx), %ebp 315 movl LABEL_EBX(%edx), %ebx 316 movl LABEL_ESI(%edx), %esi 317 movl LABEL_EDI(%edx), %edi 318 movl 4(%edx), %esp 319 movl (%edx), %ecx /* %eip (return addr); LABEL_PC is 0 */ 320 movl $1, %eax 321 addl $4, %esp /* pop ret adr */ 322 jmp *%ecx /* indirect */ 323 SET_SIZE(longjmp) 324 325#endif /* __i386 */ 326#endif /* __lint */ 327 328/* 329 * if a() calls b() calls caller(), 330 * caller() returns return address in a(). 331 * (Note: We assume a() and b() are C routines which do the normal entry/exit 332 * sequence.) 333 */ 334 335#if defined(__lint) 336 337caddr_t 338caller(void) 339{ return (0); } 340 341#else /* __lint */ 342 343#if defined(__amd64) 344 345 ENTRY(caller) 346 movq 8(%rbp), %rax /* b()'s return pc, in a() */ 347 ret 348 SET_SIZE(caller) 349 350#elif defined(__i386) 351 352 ENTRY(caller) 353 movl 4(%ebp), %eax /* b()'s return pc, in a() */ 354 ret 355 SET_SIZE(caller) 356 357#endif /* __i386 */ 358#endif /* __lint */ 359 360/* 361 * if a() calls callee(), callee() returns the 362 * return address in a(); 363 */ 364 365#if defined(__lint) 366 367caddr_t 368callee(void) 369{ return (0); } 370 371#else /* __lint */ 372 373#if defined(__amd64) 374 375 ENTRY(callee) 376 movq (%rsp), %rax /* callee()'s return pc, in a() */ 377 ret 378 SET_SIZE(callee) 379 380#elif defined(__i386) 381 382 ENTRY(callee) 383 movl (%esp), %eax /* callee()'s return pc, in a() */ 384 ret 385 SET_SIZE(callee) 386 387#endif /* __i386 */ 388#endif /* __lint */ 389 390/* 391 * return the current frame pointer 392 */ 393 394#if defined(__lint) 395 396greg_t 397getfp(void) 398{ return (0); } 399 400#else /* __lint */ 401 402#if defined(__amd64) 403 404 ENTRY(getfp) 405 movq %rbp, %rax 406 ret 407 SET_SIZE(getfp) 408 409#elif defined(__i386) 410 411 ENTRY(getfp) 412 movl %ebp, %eax 413 ret 414 SET_SIZE(getfp) 415 416#endif /* __i386 */ 417#endif /* __lint */ 418 419/* 420 * Invalidate a single page table entry in the TLB 421 */ 422 423#if defined(__lint) 424 425/* ARGSUSED */ 426void 427mmu_tlbflush_entry(caddr_t m) 428{} 429 430#else /* __lint */ 431 432#if defined(__amd64) 433 434 ENTRY(mmu_tlbflush_entry) 435 invlpg (%rdi) 436 ret 437 SET_SIZE(mmu_tlbflush_entry) 438 439#elif defined(__i386) 440 441 ENTRY(mmu_tlbflush_entry) 442 movl 4(%esp), %eax 443 invlpg (%eax) 444 ret 445 SET_SIZE(mmu_tlbflush_entry) 446 447#endif /* __i386 */ 448#endif /* __lint */ 449 450 451/* 452 * Get/Set the value of various control registers 453 */ 454 455#if defined(__lint) 456 457ulong_t 458getcr0(void) 459{ return (0); } 460 461/* ARGSUSED */ 462void 463setcr0(ulong_t value) 464{} 465 466ulong_t 467getcr2(void) 468{ return (0); } 469 470ulong_t 471getcr3(void) 472{ return (0); } 473 474/* ARGSUSED */ 475void 476setcr3(ulong_t val) 477{} 478 479void 480reload_cr3(void) 481{} 482 483ulong_t 484getcr4(void) 485{ return (0); } 486 487/* ARGSUSED */ 488void 489setcr4(ulong_t val) 490{} 491 492#if defined(__amd64) 493 494ulong_t 495getcr8(void) 496{ return (0); } 497 498/* ARGSUSED */ 499void 500setcr8(ulong_t val) 501{} 502 503#endif /* __amd64 */ 504 505#else /* __lint */ 506 507#if defined(__amd64) 508 509 ENTRY(getcr0) 510 movq %cr0, %rax 511 ret 512 SET_SIZE(getcr0) 513 514 ENTRY(setcr0) 515 movq %rdi, %cr0 516 ret 517 SET_SIZE(setcr0) 518 519 ENTRY(getcr2) 520 movq %cr2, %rax 521 ret 522 SET_SIZE(getcr2) 523 524 ENTRY(getcr3) 525 movq %cr3, %rax 526 ret 527 SET_SIZE(getcr3) 528 529 ENTRY(setcr3) 530 movq %rdi, %cr3 531 ret 532 SET_SIZE(setcr3) 533 534 ENTRY(reload_cr3) 535 movq %cr3, %rdi 536 movq %rdi, %cr3 537 ret 538 SET_SIZE(reload_cr3) 539 540 ENTRY(getcr4) 541 movq %cr4, %rax 542 ret 543 SET_SIZE(getcr4) 544 545 ENTRY(setcr4) 546 movq %rdi, %cr4 547 ret 548 SET_SIZE(setcr4) 549 550 ENTRY(getcr8) 551 movq %cr8, %rax 552 ret 553 SET_SIZE(getcr8) 554 555 ENTRY(setcr8) 556 movq %rdi, %cr8 557 ret 558 SET_SIZE(setcr8) 559 560#elif defined(__i386) 561 562 ENTRY(getcr0) 563 movl %cr0, %eax 564 ret 565 SET_SIZE(getcr0) 566 567 ENTRY(setcr0) 568 movl 4(%esp), %eax 569 movl %eax, %cr0 570 ret 571 SET_SIZE(setcr0) 572 573 ENTRY(getcr2) 574 movl %cr2, %eax 575 ret 576 SET_SIZE(getcr2) 577 578 ENTRY(getcr3) 579 movl %cr3, %eax 580 ret 581 SET_SIZE(getcr3) 582 583 ENTRY(setcr3) 584 movl 4(%esp), %eax 585 movl %eax, %cr3 586 ret 587 SET_SIZE(setcr3) 588 589 ENTRY(reload_cr3) 590 movl %cr3, %eax 591 movl %eax, %cr3 592 ret 593 SET_SIZE(reload_cr3) 594 595 ENTRY(getcr4) 596 movl %cr4, %eax 597 ret 598 SET_SIZE(getcr4) 599 600 ENTRY(setcr4) 601 movl 4(%esp), %eax 602 movl %eax, %cr4 603 ret 604 SET_SIZE(setcr4) 605 606#endif /* __i386 */ 607#endif /* __lint */ 608 609#if defined(__lint) 610 611/*ARGSUSED*/ 612uint32_t 613__cpuid_insn(struct cpuid_regs *regs) 614{ return (0); } 615 616#else /* __lint */ 617 618#if defined(__amd64) 619 620 ENTRY(__cpuid_insn) 621 movq %rbx, %r8 622 movq %rcx, %r9 623 movq %rdx, %r11 624 movl (%rdi), %eax /* %eax = regs->cp_eax */ 625 movl 0x4(%rdi), %ebx /* %ebx = regs->cp_ebx */ 626 movl 0x8(%rdi), %ecx /* %ecx = regs->cp_ecx */ 627 movl 0xc(%rdi), %edx /* %edx = regs->cp_edx */ 628 cpuid 629 movl %eax, (%rdi) /* regs->cp_eax = %eax */ 630 movl %ebx, 0x4(%rdi) /* regs->cp_ebx = %ebx */ 631 movl %ecx, 0x8(%rdi) /* regs->cp_ecx = %ecx */ 632 movl %edx, 0xc(%rdi) /* regs->cp_edx = %edx */ 633 movq %r8, %rbx 634 movq %r9, %rcx 635 movq %r11, %rdx 636 ret 637 SET_SIZE(__cpuid_insn) 638 639#elif defined(__i386) 640 641 ENTRY(__cpuid_insn) 642 pushl %ebp 643 movl 0x8(%esp), %ebp /* %ebp = regs */ 644 pushl %ebx 645 pushl %ecx 646 pushl %edx 647 movl (%ebp), %eax /* %eax = regs->cp_eax */ 648 movl 0x4(%ebp), %ebx /* %ebx = regs->cp_ebx */ 649 movl 0x8(%ebp), %ecx /* %ecx = regs->cp_ecx */ 650 movl 0xc(%ebp), %edx /* %edx = regs->cp_edx */ 651 cpuid 652 movl %eax, (%ebp) /* regs->cp_eax = %eax */ 653 movl %ebx, 0x4(%ebp) /* regs->cp_ebx = %ebx */ 654 movl %ecx, 0x8(%ebp) /* regs->cp_ecx = %ecx */ 655 movl %edx, 0xc(%ebp) /* regs->cp_edx = %edx */ 656 popl %edx 657 popl %ecx 658 popl %ebx 659 popl %ebp 660 ret 661 SET_SIZE(__cpuid_insn) 662 663#endif /* __i386 */ 664#endif /* __lint */ 665 666 667#if defined(__lint) 668 669hrtime_t 670tsc_read(void) 671{ 672 return (0); 673} 674 675#else /* __lint */ 676 677 ENTRY_NP(tsc_read) 678 rdtsc 679#if defined(__amd64) 680 shlq $32, %rdx 681 orq %rdx, %rax 682#endif 683 ret 684 SET_SIZE(tsc_read) 685 686#endif /* __lint */ 687 688/* 689 * Insert entryp after predp in a doubly linked list. 690 */ 691 692#if defined(__lint) 693 694/*ARGSUSED*/ 695void 696_insque(caddr_t entryp, caddr_t predp) 697{} 698 699#else /* __lint */ 700 701#if defined(__amd64) 702 703 ENTRY(_insque) 704 movq (%rsi), %rax /* predp->forw */ 705 movq %rsi, CPTRSIZE(%rdi) /* entryp->back = predp */ 706 movq %rax, (%rdi) /* entryp->forw = predp->forw */ 707 movq %rdi, (%rsi) /* predp->forw = entryp */ 708 movq %rdi, CPTRSIZE(%rax) /* predp->forw->back = entryp */ 709 ret 710 SET_SIZE(_insque) 711 712#elif defined(__i386) 713 714 ENTRY(_insque) 715 movl 8(%esp), %edx 716 movl 4(%esp), %ecx 717 movl (%edx), %eax /* predp->forw */ 718 movl %edx, CPTRSIZE(%ecx) /* entryp->back = predp */ 719 movl %eax, (%ecx) /* entryp->forw = predp->forw */ 720 movl %ecx, (%edx) /* predp->forw = entryp */ 721 movl %ecx, CPTRSIZE(%eax) /* predp->forw->back = entryp */ 722 ret 723 SET_SIZE(_insque) 724 725#endif /* __i386 */ 726#endif /* __lint */ 727 728/* 729 * Remove entryp from a doubly linked list 730 */ 731 732#if defined(__lint) 733 734/*ARGSUSED*/ 735void 736_remque(caddr_t entryp) 737{} 738 739#else /* __lint */ 740 741#if defined(__amd64) 742 743 ENTRY(_remque) 744 movq (%rdi), %rax /* entry->forw */ 745 movq CPTRSIZE(%rdi), %rdx /* entry->back */ 746 movq %rax, (%rdx) /* entry->back->forw = entry->forw */ 747 movq %rdx, CPTRSIZE(%rax) /* entry->forw->back = entry->back */ 748 ret 749 SET_SIZE(_remque) 750 751#elif defined(__i386) 752 753 ENTRY(_remque) 754 movl 4(%esp), %ecx 755 movl (%ecx), %eax /* entry->forw */ 756 movl CPTRSIZE(%ecx), %edx /* entry->back */ 757 movl %eax, (%edx) /* entry->back->forw = entry->forw */ 758 movl %edx, CPTRSIZE(%eax) /* entry->forw->back = entry->back */ 759 ret 760 SET_SIZE(_remque) 761 762#endif /* __i386 */ 763#endif /* __lint */ 764 765/* 766 * Returns the number of 767 * non-NULL bytes in string argument. 768 */ 769 770#if defined(__lint) 771 772/* ARGSUSED */ 773size_t 774strlen(const char *str) 775{ return (0); } 776 777#else /* __lint */ 778 779#if defined(__amd64) 780 781/* 782 * This is close to a simple transliteration of a C version of this 783 * routine. We should either just -make- this be a C version, or 784 * justify having it in assembler by making it significantly faster. 785 * 786 * size_t 787 * strlen(const char *s) 788 * { 789 * const char *s0; 790 * #if defined(DEBUG) 791 * if ((uintptr_t)s < KERNELBASE) 792 * panic(.str_panic_msg); 793 * #endif 794 * for (s0 = s; *s; s++) 795 * ; 796 * return (s - s0); 797 * } 798 */ 799 800 ENTRY(strlen) 801#ifdef DEBUG 802 movq postbootkernelbase(%rip), %rax 803 cmpq %rax, %rdi 804 jae str_valid 805 pushq %rbp 806 movq %rsp, %rbp 807 leaq .str_panic_msg(%rip), %rdi 808 xorl %eax, %eax 809 call panic 810#endif /* DEBUG */ 811str_valid: 812 cmpb $0, (%rdi) 813 movq %rdi, %rax 814 je .null_found 815 .align 4 816.strlen_loop: 817 incq %rdi 818 cmpb $0, (%rdi) 819 jne .strlen_loop 820.null_found: 821 subq %rax, %rdi 822 movq %rdi, %rax 823 ret 824 SET_SIZE(strlen) 825 826#elif defined(__i386) 827 828 ENTRY(strlen) 829#ifdef DEBUG 830 movl postbootkernelbase, %eax 831 cmpl %eax, 4(%esp) 832 jae str_valid 833 pushl %ebp 834 movl %esp, %ebp 835 pushl $.str_panic_msg 836 call panic 837#endif /* DEBUG */ 838 839str_valid: 840 movl 4(%esp), %eax /* %eax = string address */ 841 testl $3, %eax /* if %eax not word aligned */ 842 jnz .not_word_aligned /* goto .not_word_aligned */ 843 .align 4 844.word_aligned: 845 movl (%eax), %edx /* move 1 word from (%eax) to %edx */ 846 movl $0x7f7f7f7f, %ecx 847 andl %edx, %ecx /* %ecx = %edx & 0x7f7f7f7f */ 848 addl $4, %eax /* next word */ 849 addl $0x7f7f7f7f, %ecx /* %ecx += 0x7f7f7f7f */ 850 orl %edx, %ecx /* %ecx |= %edx */ 851 andl $0x80808080, %ecx /* %ecx &= 0x80808080 */ 852 cmpl $0x80808080, %ecx /* if no null byte in this word */ 853 je .word_aligned /* goto .word_aligned */ 854 subl $4, %eax /* post-incremented */ 855.not_word_aligned: 856 cmpb $0, (%eax) /* if a byte in (%eax) is null */ 857 je .null_found /* goto .null_found */ 858 incl %eax /* next byte */ 859 testl $3, %eax /* if %eax not word aligned */ 860 jnz .not_word_aligned /* goto .not_word_aligned */ 861 jmp .word_aligned /* goto .word_aligned */ 862 .align 4 863.null_found: 864 subl 4(%esp), %eax /* %eax -= string address */ 865 ret 866 SET_SIZE(strlen) 867 868#endif /* __i386 */ 869 870#ifdef DEBUG 871 .text 872.str_panic_msg: 873 .string "strlen: argument below kernelbase" 874#endif /* DEBUG */ 875 876#endif /* __lint */ 877 878 /* 879 * Berkley 4.3 introduced symbolically named interrupt levels 880 * as a way deal with priority in a machine independent fashion. 881 * Numbered priorities are machine specific, and should be 882 * discouraged where possible. 883 * 884 * Note, for the machine specific priorities there are 885 * examples listed for devices that use a particular priority. 886 * It should not be construed that all devices of that 887 * type should be at that priority. It is currently were 888 * the current devices fit into the priority scheme based 889 * upon time criticalness. 890 * 891 * The underlying assumption of these assignments is that 892 * IPL 10 is the highest level from which a device 893 * routine can call wakeup. Devices that interrupt from higher 894 * levels are restricted in what they can do. If they need 895 * kernels services they should schedule a routine at a lower 896 * level (via software interrupt) to do the required 897 * processing. 898 * 899 * Examples of this higher usage: 900 * Level Usage 901 * 14 Profiling clock (and PROM uart polling clock) 902 * 12 Serial ports 903 * 904 * The serial ports request lower level processing on level 6. 905 * 906 * Also, almost all splN routines (where N is a number or a 907 * mnemonic) will do a RAISE(), on the assumption that they are 908 * never used to lower our priority. 909 * The exceptions are: 910 * spl8() Because you can't be above 15 to begin with! 911 * splzs() Because this is used at boot time to lower our 912 * priority, to allow the PROM to poll the uart. 913 * spl0() Used to lower priority to 0. 914 */ 915 916#if defined(__lint) 917 918int spl0(void) { return (0); } 919int spl6(void) { return (0); } 920int spl7(void) { return (0); } 921int spl8(void) { return (0); } 922int splhigh(void) { return (0); } 923int splhi(void) { return (0); } 924int splzs(void) { return (0); } 925 926/* ARGSUSED */ 927void 928splx(int level) 929{} 930 931#else /* __lint */ 932 933#if defined(__amd64) 934 935#define SETPRI(level) \ 936 movl $/**/level, %edi; /* new priority */ \ 937 jmp do_splx /* redirect to do_splx */ 938 939#define RAISE(level) \ 940 movl $/**/level, %edi; /* new priority */ \ 941 jmp splr /* redirect to splr */ 942 943#elif defined(__i386) 944 945#define SETPRI(level) \ 946 pushl $/**/level; /* new priority */ \ 947 call do_splx; /* invoke common splx code */ \ 948 addl $4, %esp; /* unstack arg */ \ 949 ret 950 951#define RAISE(level) \ 952 pushl $/**/level; /* new priority */ \ 953 call splr; /* invoke common splr code */ \ 954 addl $4, %esp; /* unstack args */ \ 955 ret 956 957#endif /* __i386 */ 958 959 /* locks out all interrupts, including memory errors */ 960 ENTRY(spl8) 961 SETPRI(15) 962 SET_SIZE(spl8) 963 964 /* just below the level that profiling runs */ 965 ENTRY(spl7) 966 RAISE(13) 967 SET_SIZE(spl7) 968 969 /* sun specific - highest priority onboard serial i/o asy ports */ 970 ENTRY(splzs) 971 SETPRI(12) /* Can't be a RAISE, as it's used to lower us */ 972 SET_SIZE(splzs) 973 974 ENTRY(splhi) 975 ALTENTRY(splhigh) 976 ALTENTRY(spl6) 977 ALTENTRY(i_ddi_splhigh) 978 979 RAISE(DISP_LEVEL) 980 981 SET_SIZE(i_ddi_splhigh) 982 SET_SIZE(spl6) 983 SET_SIZE(splhigh) 984 SET_SIZE(splhi) 985 986 /* allow all interrupts */ 987 ENTRY(spl0) 988 SETPRI(0) 989 SET_SIZE(spl0) 990 991 992 /* splx implentation */ 993 ENTRY(splx) 994 jmp do_splx /* redirect to common splx code */ 995 SET_SIZE(splx) 996 997#endif /* __lint */ 998 999#if defined(__i386) 1000 1001/* 1002 * Read and write the %gs register 1003 */ 1004 1005#if defined(__lint) 1006 1007/*ARGSUSED*/ 1008uint16_t 1009getgs(void) 1010{ return (0); } 1011 1012/*ARGSUSED*/ 1013void 1014setgs(uint16_t sel) 1015{} 1016 1017#else /* __lint */ 1018 1019 ENTRY(getgs) 1020 clr %eax 1021 movw %gs, %ax 1022 ret 1023 SET_SIZE(getgs) 1024 1025 ENTRY(setgs) 1026 movw 4(%esp), %gs 1027 ret 1028 SET_SIZE(setgs) 1029 1030#endif /* __lint */ 1031#endif /* __i386 */ 1032 1033#if defined(__lint) 1034 1035void 1036pc_reset(void) 1037{} 1038 1039void 1040efi_reset(void) 1041{} 1042 1043#else /* __lint */ 1044 1045 ENTRY(wait_500ms) 1046 push %ebx 1047 movl $50000, %ebx 10481: 1049 call tenmicrosec 1050 decl %ebx 1051 jnz 1b 1052 pop %ebx 1053 ret 1054 SET_SIZE(wait_500ms) 1055 1056#define RESET_METHOD_KBC 1 1057#define RESET_METHOD_PORT92 2 1058#define RESET_METHOD_PCI 4 1059 1060 DGDEF3(pc_reset_methods, 4, 8) 1061 .long RESET_METHOD_KBC|RESET_METHOD_PORT92|RESET_METHOD_PCI; 1062 1063 ENTRY(pc_reset) 1064 1065#if defined(__i386) 1066 testl $RESET_METHOD_KBC, pc_reset_methods 1067#elif defined(__amd64) 1068 testl $RESET_METHOD_KBC, pc_reset_methods(%rip) 1069#endif 1070 jz 1f 1071 1072 / 1073 / Try the classic keyboard controller-triggered reset. 1074 / 1075 movw $0x64, %dx 1076 movb $0xfe, %al 1077 outb (%dx) 1078 1079 / Wait up to 500 milliseconds here for the keyboard controller 1080 / to pull the reset line. On some systems where the keyboard 1081 / controller is slow to pull the reset line, the next reset method 1082 / may be executed (which may be bad if those systems hang when the 1083 / next reset method is used, e.g. Ferrari 3400 (doesn't like port 92), 1084 / and Ferrari 4000 (doesn't like the cf9 reset method)) 1085 1086 call wait_500ms 1087 10881: 1089#if defined(__i386) 1090 testl $RESET_METHOD_PORT92, pc_reset_methods 1091#elif defined(__amd64) 1092 testl $RESET_METHOD_PORT92, pc_reset_methods(%rip) 1093#endif 1094 jz 3f 1095 1096 / 1097 / Try port 0x92 fast reset 1098 / 1099 movw $0x92, %dx 1100 inb (%dx) 1101 cmpb $0xff, %al / If port's not there, we should get back 0xFF 1102 je 1f 1103 testb $1, %al / If bit 0 1104 jz 2f / is clear, jump to perform the reset 1105 andb $0xfe, %al / otherwise, 1106 outb (%dx) / clear bit 0 first, then 11072: 1108 orb $1, %al / Set bit 0 1109 outb (%dx) / and reset the system 11101: 1111 1112 call wait_500ms 1113 11143: 1115#if defined(__i386) 1116 testl $RESET_METHOD_PCI, pc_reset_methods 1117#elif defined(__amd64) 1118 testl $RESET_METHOD_PCI, pc_reset_methods(%rip) 1119#endif 1120 jz 4f 1121 1122 / Try the PCI (soft) reset vector (should work on all modern systems, 1123 / but has been shown to cause problems on 450NX systems, and some newer 1124 / systems (e.g. ATI IXP400-equipped systems)) 1125 / When resetting via this method, 2 writes are required. The first 1126 / targets bit 1 (0=hard reset without power cycle, 1=hard reset with 1127 / power cycle). 1128 / The reset occurs on the second write, during bit 2's transition from 1129 / 0->1. 1130 movw $0xcf9, %dx 1131 movb $0x2, %al / Reset mode = hard, no power cycle 1132 outb (%dx) 1133 movb $0x6, %al 1134 outb (%dx) 1135 1136 call wait_500ms 1137 11384: 1139 / 1140 / port 0xcf9 failed also. Last-ditch effort is to 1141 / triple-fault the CPU. 1142 / Also, use triple fault for EFI firmware 1143 / 1144 ENTRY(efi_reset) 1145#if defined(__amd64) 1146 pushq $0x0 1147 pushq $0x0 / IDT base of 0, limit of 0 + 2 unused bytes 1148 lidt (%rsp) 1149#elif defined(__i386) 1150 pushl $0x0 1151 pushl $0x0 / IDT base of 0, limit of 0 + 2 unused bytes 1152 lidt (%esp) 1153#endif 1154 int $0x0 / Trigger interrupt, generate triple-fault 1155 1156 cli 1157 hlt / Wait forever 1158 /*NOTREACHED*/ 1159 SET_SIZE(efi_reset) 1160 SET_SIZE(pc_reset) 1161 1162#endif /* __lint */ 1163 1164/* 1165 * C callable in and out routines 1166 */ 1167 1168#if defined(__lint) 1169 1170/* ARGSUSED */ 1171void 1172outl(int port_address, uint32_t val) 1173{} 1174 1175#else /* __lint */ 1176 1177#if defined(__amd64) 1178 1179 ENTRY(outl) 1180 movw %di, %dx 1181 movl %esi, %eax 1182 outl (%dx) 1183 ret 1184 SET_SIZE(outl) 1185 1186#elif defined(__i386) 1187 1188 .set PORT, 4 1189 .set VAL, 8 1190 1191 ENTRY(outl) 1192 movw PORT(%esp), %dx 1193 movl VAL(%esp), %eax 1194 outl (%dx) 1195 ret 1196 SET_SIZE(outl) 1197 1198#endif /* __i386 */ 1199#endif /* __lint */ 1200 1201#if defined(__lint) 1202 1203/* ARGSUSED */ 1204void 1205outw(int port_address, uint16_t val) 1206{} 1207 1208#else /* __lint */ 1209 1210#if defined(__amd64) 1211 1212 ENTRY(outw) 1213 movw %di, %dx 1214 movw %si, %ax 1215 D16 outl (%dx) /* XX64 why not outw? */ 1216 ret 1217 SET_SIZE(outw) 1218 1219#elif defined(__i386) 1220 1221 ENTRY(outw) 1222 movw PORT(%esp), %dx 1223 movw VAL(%esp), %ax 1224 D16 outl (%dx) 1225 ret 1226 SET_SIZE(outw) 1227 1228#endif /* __i386 */ 1229#endif /* __lint */ 1230 1231#if defined(__lint) 1232 1233/* ARGSUSED */ 1234void 1235outb(int port_address, uint8_t val) 1236{} 1237 1238#else /* __lint */ 1239 1240#if defined(__amd64) 1241 1242 ENTRY(outb) 1243 movw %di, %dx 1244 movb %sil, %al 1245 outb (%dx) 1246 ret 1247 SET_SIZE(outb) 1248 1249#elif defined(__i386) 1250 1251 ENTRY(outb) 1252 movw PORT(%esp), %dx 1253 movb VAL(%esp), %al 1254 outb (%dx) 1255 ret 1256 SET_SIZE(outb) 1257 1258#endif /* __i386 */ 1259#endif /* __lint */ 1260 1261#if defined(__lint) 1262 1263/* ARGSUSED */ 1264uint32_t 1265inl(int port_address) 1266{ return (0); } 1267 1268#else /* __lint */ 1269 1270#if defined(__amd64) 1271 1272 ENTRY(inl) 1273 xorl %eax, %eax 1274 movw %di, %dx 1275 inl (%dx) 1276 ret 1277 SET_SIZE(inl) 1278 1279#elif defined(__i386) 1280 1281 ENTRY(inl) 1282 movw PORT(%esp), %dx 1283 inl (%dx) 1284 ret 1285 SET_SIZE(inl) 1286 1287#endif /* __i386 */ 1288#endif /* __lint */ 1289 1290#if defined(__lint) 1291 1292/* ARGSUSED */ 1293uint16_t 1294inw(int port_address) 1295{ return (0); } 1296 1297#else /* __lint */ 1298 1299#if defined(__amd64) 1300 1301 ENTRY(inw) 1302 xorl %eax, %eax 1303 movw %di, %dx 1304 D16 inl (%dx) 1305 ret 1306 SET_SIZE(inw) 1307 1308#elif defined(__i386) 1309 1310 ENTRY(inw) 1311 subl %eax, %eax 1312 movw PORT(%esp), %dx 1313 D16 inl (%dx) 1314 ret 1315 SET_SIZE(inw) 1316 1317#endif /* __i386 */ 1318#endif /* __lint */ 1319 1320 1321#if defined(__lint) 1322 1323/* ARGSUSED */ 1324uint8_t 1325inb(int port_address) 1326{ return (0); } 1327 1328#else /* __lint */ 1329 1330#if defined(__amd64) 1331 1332 ENTRY(inb) 1333 xorl %eax, %eax 1334 movw %di, %dx 1335 inb (%dx) 1336 ret 1337 SET_SIZE(inb) 1338 1339#elif defined(__i386) 1340 1341 ENTRY(inb) 1342 subl %eax, %eax 1343 movw PORT(%esp), %dx 1344 inb (%dx) 1345 ret 1346 SET_SIZE(inb) 1347 1348#endif /* __i386 */ 1349#endif /* __lint */ 1350 1351 1352#if defined(__lint) 1353 1354/* ARGSUSED */ 1355void 1356repoutsw(int port, uint16_t *addr, int cnt) 1357{} 1358 1359#else /* __lint */ 1360 1361#if defined(__amd64) 1362 1363 ENTRY(repoutsw) 1364 movl %edx, %ecx 1365 movw %di, %dx 1366 rep 1367 D16 outsl 1368 ret 1369 SET_SIZE(repoutsw) 1370 1371#elif defined(__i386) 1372 1373 /* 1374 * The arguments and saved registers are on the stack in the 1375 * following order: 1376 * | cnt | +16 1377 * | *addr | +12 1378 * | port | +8 1379 * | eip | +4 1380 * | esi | <-- %esp 1381 * If additional values are pushed onto the stack, make sure 1382 * to adjust the following constants accordingly. 1383 */ 1384 .set PORT, 8 1385 .set ADDR, 12 1386 .set COUNT, 16 1387 1388 ENTRY(repoutsw) 1389 pushl %esi 1390 movl PORT(%esp), %edx 1391 movl ADDR(%esp), %esi 1392 movl COUNT(%esp), %ecx 1393 rep 1394 D16 outsl 1395 popl %esi 1396 ret 1397 SET_SIZE(repoutsw) 1398 1399#endif /* __i386 */ 1400#endif /* __lint */ 1401 1402 1403#if defined(__lint) 1404 1405/* ARGSUSED */ 1406void 1407repinsw(int port_addr, uint16_t *addr, int cnt) 1408{} 1409 1410#else /* __lint */ 1411 1412#if defined(__amd64) 1413 1414 ENTRY(repinsw) 1415 movl %edx, %ecx 1416 movw %di, %dx 1417 rep 1418 D16 insl 1419 ret 1420 SET_SIZE(repinsw) 1421 1422#elif defined(__i386) 1423 1424 ENTRY(repinsw) 1425 pushl %edi 1426 movl PORT(%esp), %edx 1427 movl ADDR(%esp), %edi 1428 movl COUNT(%esp), %ecx 1429 rep 1430 D16 insl 1431 popl %edi 1432 ret 1433 SET_SIZE(repinsw) 1434 1435#endif /* __i386 */ 1436#endif /* __lint */ 1437 1438 1439#if defined(__lint) 1440 1441/* ARGSUSED */ 1442void 1443repinsb(int port, uint8_t *addr, int count) 1444{} 1445 1446#else /* __lint */ 1447 1448#if defined(__amd64) 1449 1450 ENTRY(repinsb) 1451 movl %edx, %ecx 1452 movw %di, %dx 1453 movq %rsi, %rdi 1454 rep 1455 insb 1456 ret 1457 SET_SIZE(repinsb) 1458 1459#elif defined(__i386) 1460 1461 /* 1462 * The arguments and saved registers are on the stack in the 1463 * following order: 1464 * | cnt | +16 1465 * | *addr | +12 1466 * | port | +8 1467 * | eip | +4 1468 * | esi | <-- %esp 1469 * If additional values are pushed onto the stack, make sure 1470 * to adjust the following constants accordingly. 1471 */ 1472 .set IO_PORT, 8 1473 .set IO_ADDR, 12 1474 .set IO_COUNT, 16 1475 1476 ENTRY(repinsb) 1477 pushl %edi 1478 movl IO_ADDR(%esp), %edi 1479 movl IO_COUNT(%esp), %ecx 1480 movl IO_PORT(%esp), %edx 1481 rep 1482 insb 1483 popl %edi 1484 ret 1485 SET_SIZE(repinsb) 1486 1487#endif /* __i386 */ 1488#endif /* __lint */ 1489 1490 1491/* 1492 * Input a stream of 32-bit words. 1493 * NOTE: count is a DWORD count. 1494 */ 1495#if defined(__lint) 1496 1497/* ARGSUSED */ 1498void 1499repinsd(int port, uint32_t *addr, int count) 1500{} 1501 1502#else /* __lint */ 1503 1504#if defined(__amd64) 1505 1506 ENTRY(repinsd) 1507 movl %edx, %ecx 1508 movw %di, %dx 1509 movq %rsi, %rdi 1510 rep 1511 insl 1512 ret 1513 SET_SIZE(repinsd) 1514 1515#elif defined(__i386) 1516 1517 ENTRY(repinsd) 1518 pushl %edi 1519 movl IO_ADDR(%esp), %edi 1520 movl IO_COUNT(%esp), %ecx 1521 movl IO_PORT(%esp), %edx 1522 rep 1523 insl 1524 popl %edi 1525 ret 1526 SET_SIZE(repinsd) 1527 1528#endif /* __i386 */ 1529#endif /* __lint */ 1530 1531/* 1532 * Output a stream of bytes 1533 * NOTE: count is a byte count 1534 */ 1535#if defined(__lint) 1536 1537/* ARGSUSED */ 1538void 1539repoutsb(int port, uint8_t *addr, int count) 1540{} 1541 1542#else /* __lint */ 1543 1544#if defined(__amd64) 1545 1546 ENTRY(repoutsb) 1547 movl %edx, %ecx 1548 movw %di, %dx 1549 rep 1550 outsb 1551 ret 1552 SET_SIZE(repoutsb) 1553 1554#elif defined(__i386) 1555 1556 ENTRY(repoutsb) 1557 pushl %esi 1558 movl IO_ADDR(%esp), %esi 1559 movl IO_COUNT(%esp), %ecx 1560 movl IO_PORT(%esp), %edx 1561 rep 1562 outsb 1563 popl %esi 1564 ret 1565 SET_SIZE(repoutsb) 1566 1567#endif /* __i386 */ 1568#endif /* __lint */ 1569 1570/* 1571 * Output a stream of 32-bit words 1572 * NOTE: count is a DWORD count 1573 */ 1574#if defined(__lint) 1575 1576/* ARGSUSED */ 1577void 1578repoutsd(int port, uint32_t *addr, int count) 1579{} 1580 1581#else /* __lint */ 1582 1583#if defined(__amd64) 1584 1585 ENTRY(repoutsd) 1586 movl %edx, %ecx 1587 movw %di, %dx 1588 rep 1589 outsl 1590 ret 1591 SET_SIZE(repoutsd) 1592 1593#elif defined(__i386) 1594 1595 ENTRY(repoutsd) 1596 pushl %esi 1597 movl IO_ADDR(%esp), %esi 1598 movl IO_COUNT(%esp), %ecx 1599 movl IO_PORT(%esp), %edx 1600 rep 1601 outsl 1602 popl %esi 1603 ret 1604 SET_SIZE(repoutsd) 1605 1606#endif /* __i386 */ 1607#endif /* __lint */ 1608 1609/* 1610 * void int3(void) 1611 * void int18(void) 1612 * void int20(void) 1613 */ 1614 1615#if defined(__lint) 1616 1617void 1618int3(void) 1619{} 1620 1621void 1622int18(void) 1623{} 1624 1625void 1626int20(void) 1627{} 1628 1629#else /* __lint */ 1630 1631 ENTRY(int3) 1632 int $T_BPTFLT 1633 ret 1634 SET_SIZE(int3) 1635 1636 ENTRY(int18) 1637 int $T_MCE 1638 ret 1639 SET_SIZE(int18) 1640 1641 ENTRY(int20) 1642 movl boothowto, %eax 1643 andl $RB_DEBUG, %eax 1644 jz 1f 1645 1646 int $T_DBGENTR 16471: 1648 rep; ret /* use 2 byte return instruction when branch target */ 1649 /* AMD Software Optimization Guide - Section 6.2 */ 1650 SET_SIZE(int20) 1651 1652#endif /* __lint */ 1653 1654#if defined(__lint) 1655 1656/* ARGSUSED */ 1657int 1658scanc(size_t size, uchar_t *cp, uchar_t *table, uchar_t mask) 1659{ return (0); } 1660 1661#else /* __lint */ 1662 1663#if defined(__amd64) 1664 1665 ENTRY(scanc) 1666 /* rdi == size */ 1667 /* rsi == cp */ 1668 /* rdx == table */ 1669 /* rcx == mask */ 1670 addq %rsi, %rdi /* end = &cp[size] */ 1671.scanloop: 1672 cmpq %rdi, %rsi /* while (cp < end */ 1673 jnb .scandone 1674 movzbq (%rsi), %r8 /* %r8 = *cp */ 1675 incq %rsi /* cp++ */ 1676 testb %cl, (%r8, %rdx) 1677 jz .scanloop /* && (table[*cp] & mask) == 0) */ 1678 decq %rsi /* (fix post-increment) */ 1679.scandone: 1680 movl %edi, %eax 1681 subl %esi, %eax /* return (end - cp) */ 1682 ret 1683 SET_SIZE(scanc) 1684 1685#elif defined(__i386) 1686 1687 ENTRY(scanc) 1688 pushl %edi 1689 pushl %esi 1690 movb 24(%esp), %cl /* mask = %cl */ 1691 movl 16(%esp), %esi /* cp = %esi */ 1692 movl 20(%esp), %edx /* table = %edx */ 1693 movl %esi, %edi 1694 addl 12(%esp), %edi /* end = &cp[size]; */ 1695.scanloop: 1696 cmpl %edi, %esi /* while (cp < end */ 1697 jnb .scandone 1698 movzbl (%esi), %eax /* %al = *cp */ 1699 incl %esi /* cp++ */ 1700 movb (%edx, %eax), %al /* %al = table[*cp] */ 1701 testb %al, %cl 1702 jz .scanloop /* && (table[*cp] & mask) == 0) */ 1703 dec %esi /* post-incremented */ 1704.scandone: 1705 movl %edi, %eax 1706 subl %esi, %eax /* return (end - cp) */ 1707 popl %esi 1708 popl %edi 1709 ret 1710 SET_SIZE(scanc) 1711 1712#endif /* __i386 */ 1713#endif /* __lint */ 1714 1715/* 1716 * Replacement functions for ones that are normally inlined. 1717 * In addition to the copy in i86.il, they are defined here just in case. 1718 */ 1719 1720#if defined(__lint) 1721 1722ulong_t 1723intr_clear(void) 1724{ return (0); } 1725 1726ulong_t 1727clear_int_flag(void) 1728{ return (0); } 1729 1730#else /* __lint */ 1731 1732#if defined(__amd64) 1733 1734 ENTRY(intr_clear) 1735 ENTRY(clear_int_flag) 1736 pushfq 1737 popq %rax 1738 CLI(%rdi) 1739 ret 1740 SET_SIZE(clear_int_flag) 1741 SET_SIZE(intr_clear) 1742 1743#elif defined(__i386) 1744 1745 ENTRY(intr_clear) 1746 ENTRY(clear_int_flag) 1747 pushfl 1748 popl %eax 1749 CLI(%edx) 1750 ret 1751 SET_SIZE(clear_int_flag) 1752 SET_SIZE(intr_clear) 1753 1754#endif /* __i386 */ 1755#endif /* __lint */ 1756 1757#if defined(__lint) 1758 1759struct cpu * 1760curcpup(void) 1761{ return 0; } 1762 1763#else /* __lint */ 1764 1765#if defined(__amd64) 1766 1767 ENTRY(curcpup) 1768 movq %gs:CPU_SELF, %rax 1769 ret 1770 SET_SIZE(curcpup) 1771 1772#elif defined(__i386) 1773 1774 ENTRY(curcpup) 1775 movl %gs:CPU_SELF, %eax 1776 ret 1777 SET_SIZE(curcpup) 1778 1779#endif /* __i386 */ 1780#endif /* __lint */ 1781 1782#if defined(__lint) 1783 1784/* ARGSUSED */ 1785uint32_t 1786htonl(uint32_t i) 1787{ return (0); } 1788 1789/* ARGSUSED */ 1790uint32_t 1791ntohl(uint32_t i) 1792{ return (0); } 1793 1794#else /* __lint */ 1795 1796#if defined(__amd64) 1797 1798 /* XX64 there must be shorter sequences for this */ 1799 ENTRY(htonl) 1800 ALTENTRY(ntohl) 1801 movl %edi, %eax 1802 bswap %eax 1803 ret 1804 SET_SIZE(ntohl) 1805 SET_SIZE(htonl) 1806 1807#elif defined(__i386) 1808 1809 ENTRY(htonl) 1810 ALTENTRY(ntohl) 1811 movl 4(%esp), %eax 1812 bswap %eax 1813 ret 1814 SET_SIZE(ntohl) 1815 SET_SIZE(htonl) 1816 1817#endif /* __i386 */ 1818#endif /* __lint */ 1819 1820#if defined(__lint) 1821 1822/* ARGSUSED */ 1823uint16_t 1824htons(uint16_t i) 1825{ return (0); } 1826 1827/* ARGSUSED */ 1828uint16_t 1829ntohs(uint16_t i) 1830{ return (0); } 1831 1832 1833#else /* __lint */ 1834 1835#if defined(__amd64) 1836 1837 /* XX64 there must be better sequences for this */ 1838 ENTRY(htons) 1839 ALTENTRY(ntohs) 1840 movl %edi, %eax 1841 bswap %eax 1842 shrl $16, %eax 1843 ret 1844 SET_SIZE(ntohs) 1845 SET_SIZE(htons) 1846 1847#elif defined(__i386) 1848 1849 ENTRY(htons) 1850 ALTENTRY(ntohs) 1851 movl 4(%esp), %eax 1852 bswap %eax 1853 shrl $16, %eax 1854 ret 1855 SET_SIZE(ntohs) 1856 SET_SIZE(htons) 1857 1858#endif /* __i386 */ 1859#endif /* __lint */ 1860 1861 1862#if defined(__lint) 1863 1864/* ARGSUSED */ 1865void 1866intr_restore(ulong_t i) 1867{ return; } 1868 1869/* ARGSUSED */ 1870void 1871restore_int_flag(ulong_t i) 1872{ return; } 1873 1874#else /* __lint */ 1875 1876#if defined(__amd64) 1877 1878 ENTRY(intr_restore) 1879 ENTRY(restore_int_flag) 1880 pushq %rdi 1881 popfq 1882 ret 1883 SET_SIZE(restore_int_flag) 1884 SET_SIZE(intr_restore) 1885 1886#elif defined(__i386) 1887 1888 ENTRY(intr_restore) 1889 ENTRY(restore_int_flag) 1890 movl 4(%esp), %eax 1891 pushl %eax 1892 popfl 1893 ret 1894 SET_SIZE(restore_int_flag) 1895 SET_SIZE(intr_restore) 1896 1897#endif /* __i386 */ 1898#endif /* __lint */ 1899 1900#if defined(__lint) 1901 1902void 1903sti(void) 1904{} 1905 1906void 1907cli(void) 1908{} 1909 1910#else /* __lint */ 1911 1912 ENTRY(sti) 1913 STI 1914 ret 1915 SET_SIZE(sti) 1916 1917 ENTRY(cli) 1918#if defined(__amd64) 1919 CLI(%rax) 1920#elif defined(__i386) 1921 CLI(%eax) 1922#endif /* __i386 */ 1923 ret 1924 SET_SIZE(cli) 1925 1926#endif /* __lint */ 1927 1928#if defined(__lint) 1929 1930dtrace_icookie_t 1931dtrace_interrupt_disable(void) 1932{ return (0); } 1933 1934#else /* __lint */ 1935 1936#if defined(__amd64) 1937 1938 ENTRY(dtrace_interrupt_disable) 1939 pushfq 1940 popq %rax 1941 CLI(%rdx) 1942 ret 1943 SET_SIZE(dtrace_interrupt_disable) 1944 1945#elif defined(__i386) 1946 1947 ENTRY(dtrace_interrupt_disable) 1948 pushfl 1949 popl %eax 1950 CLI(%edx) 1951 ret 1952 SET_SIZE(dtrace_interrupt_disable) 1953 1954#endif /* __i386 */ 1955#endif /* __lint */ 1956 1957#if defined(__lint) 1958 1959/*ARGSUSED*/ 1960void 1961dtrace_interrupt_enable(dtrace_icookie_t cookie) 1962{} 1963 1964#else /* __lint */ 1965 1966#if defined(__amd64) 1967 1968 ENTRY(dtrace_interrupt_enable) 1969 pushq %rdi 1970 popfq 1971 ret 1972 SET_SIZE(dtrace_interrupt_enable) 1973 1974#elif defined(__i386) 1975 1976 ENTRY(dtrace_interrupt_enable) 1977 movl 4(%esp), %eax 1978 pushl %eax 1979 popfl 1980 ret 1981 SET_SIZE(dtrace_interrupt_enable) 1982 1983#endif /* __i386 */ 1984#endif /* __lint */ 1985 1986 1987#if defined(lint) 1988 1989void 1990dtrace_membar_producer(void) 1991{} 1992 1993void 1994dtrace_membar_consumer(void) 1995{} 1996 1997#else /* __lint */ 1998 1999 ENTRY(dtrace_membar_producer) 2000 rep; ret /* use 2 byte return instruction when branch target */ 2001 /* AMD Software Optimization Guide - Section 6.2 */ 2002 SET_SIZE(dtrace_membar_producer) 2003 2004 ENTRY(dtrace_membar_consumer) 2005 rep; ret /* use 2 byte return instruction when branch target */ 2006 /* AMD Software Optimization Guide - Section 6.2 */ 2007 SET_SIZE(dtrace_membar_consumer) 2008 2009#endif /* __lint */ 2010 2011#if defined(__lint) 2012 2013kthread_id_t 2014threadp(void) 2015{ return ((kthread_id_t)0); } 2016 2017#else /* __lint */ 2018 2019#if defined(__amd64) 2020 2021 ENTRY(threadp) 2022 movq %gs:CPU_THREAD, %rax 2023 ret 2024 SET_SIZE(threadp) 2025 2026#elif defined(__i386) 2027 2028 ENTRY(threadp) 2029 movl %gs:CPU_THREAD, %eax 2030 ret 2031 SET_SIZE(threadp) 2032 2033#endif /* __i386 */ 2034#endif /* __lint */ 2035 2036/* 2037 * Checksum routine for Internet Protocol Headers 2038 */ 2039 2040#if defined(__lint) 2041 2042/* ARGSUSED */ 2043unsigned int 2044ip_ocsum( 2045 ushort_t *address, /* ptr to 1st message buffer */ 2046 int halfword_count, /* length of data */ 2047 unsigned int sum) /* partial checksum */ 2048{ 2049 int i; 2050 unsigned int psum = 0; /* partial sum */ 2051 2052 for (i = 0; i < halfword_count; i++, address++) { 2053 psum += *address; 2054 } 2055 2056 while ((psum >> 16) != 0) { 2057 psum = (psum & 0xffff) + (psum >> 16); 2058 } 2059 2060 psum += sum; 2061 2062 while ((psum >> 16) != 0) { 2063 psum = (psum & 0xffff) + (psum >> 16); 2064 } 2065 2066 return (psum); 2067} 2068 2069#else /* __lint */ 2070 2071#if defined(__amd64) 2072 2073 ENTRY(ip_ocsum) 2074 pushq %rbp 2075 movq %rsp, %rbp 2076#ifdef DEBUG 2077 movq postbootkernelbase(%rip), %rax 2078 cmpq %rax, %rdi 2079 jnb 1f 2080 xorl %eax, %eax 2081 movq %rdi, %rsi 2082 leaq .ip_ocsum_panic_msg(%rip), %rdi 2083 call panic 2084 /*NOTREACHED*/ 2085.ip_ocsum_panic_msg: 2086 .string "ip_ocsum: address 0x%p below kernelbase\n" 20871: 2088#endif 2089 movl %esi, %ecx /* halfword_count */ 2090 movq %rdi, %rsi /* address */ 2091 /* partial sum in %edx */ 2092 xorl %eax, %eax 2093 testl %ecx, %ecx 2094 jz .ip_ocsum_done 2095 testq $3, %rsi 2096 jnz .ip_csum_notaligned 2097.ip_csum_aligned: /* XX64 opportunities for 8-byte operations? */ 2098.next_iter: 2099 /* XX64 opportunities for prefetch? */ 2100 /* XX64 compute csum with 64 bit quantities? */ 2101 subl $32, %ecx 2102 jl .less_than_32 2103 2104 addl 0(%rsi), %edx 2105.only60: 2106 adcl 4(%rsi), %eax 2107.only56: 2108 adcl 8(%rsi), %edx 2109.only52: 2110 adcl 12(%rsi), %eax 2111.only48: 2112 adcl 16(%rsi), %edx 2113.only44: 2114 adcl 20(%rsi), %eax 2115.only40: 2116 adcl 24(%rsi), %edx 2117.only36: 2118 adcl 28(%rsi), %eax 2119.only32: 2120 adcl 32(%rsi), %edx 2121.only28: 2122 adcl 36(%rsi), %eax 2123.only24: 2124 adcl 40(%rsi), %edx 2125.only20: 2126 adcl 44(%rsi), %eax 2127.only16: 2128 adcl 48(%rsi), %edx 2129.only12: 2130 adcl 52(%rsi), %eax 2131.only8: 2132 adcl 56(%rsi), %edx 2133.only4: 2134 adcl 60(%rsi), %eax /* could be adding -1 and -1 with a carry */ 2135.only0: 2136 adcl $0, %eax /* could be adding -1 in eax with a carry */ 2137 adcl $0, %eax 2138 2139 addq $64, %rsi 2140 testl %ecx, %ecx 2141 jnz .next_iter 2142 2143.ip_ocsum_done: 2144 addl %eax, %edx 2145 adcl $0, %edx 2146 movl %edx, %eax /* form a 16 bit checksum by */ 2147 shrl $16, %eax /* adding two halves of 32 bit checksum */ 2148 addw %dx, %ax 2149 adcw $0, %ax 2150 andl $0xffff, %eax 2151 leave 2152 ret 2153 2154.ip_csum_notaligned: 2155 xorl %edi, %edi 2156 movw (%rsi), %di 2157 addl %edi, %edx 2158 adcl $0, %edx 2159 addq $2, %rsi 2160 decl %ecx 2161 jmp .ip_csum_aligned 2162 2163.less_than_32: 2164 addl $32, %ecx 2165 testl $1, %ecx 2166 jz .size_aligned 2167 andl $0xfe, %ecx 2168 movzwl (%rsi, %rcx, 2), %edi 2169 addl %edi, %edx 2170 adcl $0, %edx 2171.size_aligned: 2172 movl %ecx, %edi 2173 shrl $1, %ecx 2174 shl $1, %edi 2175 subq $64, %rdi 2176 addq %rdi, %rsi 2177 leaq .ip_ocsum_jmptbl(%rip), %rdi 2178 leaq (%rdi, %rcx, 8), %rdi 2179 xorl %ecx, %ecx 2180 clc 2181 jmp *(%rdi) 2182 2183 .align 8 2184.ip_ocsum_jmptbl: 2185 .quad .only0, .only4, .only8, .only12, .only16, .only20 2186 .quad .only24, .only28, .only32, .only36, .only40, .only44 2187 .quad .only48, .only52, .only56, .only60 2188 SET_SIZE(ip_ocsum) 2189 2190#elif defined(__i386) 2191 2192 ENTRY(ip_ocsum) 2193 pushl %ebp 2194 movl %esp, %ebp 2195 pushl %ebx 2196 pushl %esi 2197 pushl %edi 2198 movl 12(%ebp), %ecx /* count of half words */ 2199 movl 16(%ebp), %edx /* partial checksum */ 2200 movl 8(%ebp), %esi 2201 xorl %eax, %eax 2202 testl %ecx, %ecx 2203 jz .ip_ocsum_done 2204 2205 testl $3, %esi 2206 jnz .ip_csum_notaligned 2207.ip_csum_aligned: 2208.next_iter: 2209 subl $32, %ecx 2210 jl .less_than_32 2211 2212 addl 0(%esi), %edx 2213.only60: 2214 adcl 4(%esi), %eax 2215.only56: 2216 adcl 8(%esi), %edx 2217.only52: 2218 adcl 12(%esi), %eax 2219.only48: 2220 adcl 16(%esi), %edx 2221.only44: 2222 adcl 20(%esi), %eax 2223.only40: 2224 adcl 24(%esi), %edx 2225.only36: 2226 adcl 28(%esi), %eax 2227.only32: 2228 adcl 32(%esi), %edx 2229.only28: 2230 adcl 36(%esi), %eax 2231.only24: 2232 adcl 40(%esi), %edx 2233.only20: 2234 adcl 44(%esi), %eax 2235.only16: 2236 adcl 48(%esi), %edx 2237.only12: 2238 adcl 52(%esi), %eax 2239.only8: 2240 adcl 56(%esi), %edx 2241.only4: 2242 adcl 60(%esi), %eax /* We could be adding -1 and -1 with a carry */ 2243.only0: 2244 adcl $0, %eax /* we could be adding -1 in eax with a carry */ 2245 adcl $0, %eax 2246 2247 addl $64, %esi 2248 andl %ecx, %ecx 2249 jnz .next_iter 2250 2251.ip_ocsum_done: 2252 addl %eax, %edx 2253 adcl $0, %edx 2254 movl %edx, %eax /* form a 16 bit checksum by */ 2255 shrl $16, %eax /* adding two halves of 32 bit checksum */ 2256 addw %dx, %ax 2257 adcw $0, %ax 2258 andl $0xffff, %eax 2259 popl %edi /* restore registers */ 2260 popl %esi 2261 popl %ebx 2262 leave 2263 ret 2264 2265.ip_csum_notaligned: 2266 xorl %edi, %edi 2267 movw (%esi), %di 2268 addl %edi, %edx 2269 adcl $0, %edx 2270 addl $2, %esi 2271 decl %ecx 2272 jmp .ip_csum_aligned 2273 2274.less_than_32: 2275 addl $32, %ecx 2276 testl $1, %ecx 2277 jz .size_aligned 2278 andl $0xfe, %ecx 2279 movzwl (%esi, %ecx, 2), %edi 2280 addl %edi, %edx 2281 adcl $0, %edx 2282.size_aligned: 2283 movl %ecx, %edi 2284 shrl $1, %ecx 2285 shl $1, %edi 2286 subl $64, %edi 2287 addl %edi, %esi 2288 movl $.ip_ocsum_jmptbl, %edi 2289 lea (%edi, %ecx, 4), %edi 2290 xorl %ecx, %ecx 2291 clc 2292 jmp *(%edi) 2293 SET_SIZE(ip_ocsum) 2294 2295 .data 2296 .align 4 2297 2298.ip_ocsum_jmptbl: 2299 .long .only0, .only4, .only8, .only12, .only16, .only20 2300 .long .only24, .only28, .only32, .only36, .only40, .only44 2301 .long .only48, .only52, .only56, .only60 2302 2303 2304#endif /* __i386 */ 2305#endif /* __lint */ 2306 2307/* 2308 * multiply two long numbers and yield a u_longlong_t result, callable from C. 2309 * Provided to manipulate hrtime_t values. 2310 */ 2311#if defined(__lint) 2312 2313/* result = a * b; */ 2314 2315/* ARGSUSED */ 2316unsigned long long 2317mul32(uint_t a, uint_t b) 2318{ return (0); } 2319 2320#else /* __lint */ 2321 2322#if defined(__amd64) 2323 2324 ENTRY(mul32) 2325 xorl %edx, %edx /* XX64 joe, paranoia? */ 2326 movl %edi, %eax 2327 mull %esi 2328 shlq $32, %rdx 2329 orq %rdx, %rax 2330 ret 2331 SET_SIZE(mul32) 2332 2333#elif defined(__i386) 2334 2335 ENTRY(mul32) 2336 movl 8(%esp), %eax 2337 movl 4(%esp), %ecx 2338 mull %ecx 2339 ret 2340 SET_SIZE(mul32) 2341 2342#endif /* __i386 */ 2343#endif /* __lint */ 2344 2345#if defined(notused) 2346#if defined(__lint) 2347/* ARGSUSED */ 2348void 2349load_pte64(uint64_t *pte, uint64_t pte_value) 2350{} 2351#else /* __lint */ 2352 .globl load_pte64 2353load_pte64: 2354 movl 4(%esp), %eax 2355 movl 8(%esp), %ecx 2356 movl 12(%esp), %edx 2357 movl %edx, 4(%eax) 2358 movl %ecx, (%eax) 2359 ret 2360#endif /* __lint */ 2361#endif /* notused */ 2362 2363#if defined(__lint) 2364 2365/*ARGSUSED*/ 2366void 2367scan_memory(caddr_t addr, size_t size) 2368{} 2369 2370#else /* __lint */ 2371 2372#if defined(__amd64) 2373 2374 ENTRY(scan_memory) 2375 shrq $3, %rsi /* convert %rsi from byte to quadword count */ 2376 jz .scanm_done 2377 movq %rsi, %rcx /* move count into rep control register */ 2378 movq %rdi, %rsi /* move addr into lodsq control reg. */ 2379 rep lodsq /* scan the memory range */ 2380.scanm_done: 2381 rep; ret /* use 2 byte return instruction when branch target */ 2382 /* AMD Software Optimization Guide - Section 6.2 */ 2383 SET_SIZE(scan_memory) 2384 2385#elif defined(__i386) 2386 2387 ENTRY(scan_memory) 2388 pushl %ecx 2389 pushl %esi 2390 movl 16(%esp), %ecx /* move 2nd arg into rep control register */ 2391 shrl $2, %ecx /* convert from byte count to word count */ 2392 jz .scanm_done 2393 movl 12(%esp), %esi /* move 1st arg into lodsw control register */ 2394 .byte 0xf3 /* rep prefix. lame assembler. sigh. */ 2395 lodsl 2396.scanm_done: 2397 popl %esi 2398 popl %ecx 2399 ret 2400 SET_SIZE(scan_memory) 2401 2402#endif /* __i386 */ 2403#endif /* __lint */ 2404 2405 2406#if defined(__lint) 2407 2408/*ARGSUSED */ 2409int 2410lowbit(ulong_t i) 2411{ return (0); } 2412 2413#else /* __lint */ 2414 2415#if defined(__amd64) 2416 2417 ENTRY(lowbit) 2418 movl $-1, %eax 2419 bsfq %rdi, %rax 2420 incl %eax 2421 ret 2422 SET_SIZE(lowbit) 2423 2424#elif defined(__i386) 2425 2426 ENTRY(lowbit) 2427 movl $-1, %eax 2428 bsfl 4(%esp), %eax 2429 incl %eax 2430 ret 2431 SET_SIZE(lowbit) 2432 2433#endif /* __i386 */ 2434#endif /* __lint */ 2435 2436#if defined(__lint) 2437 2438/*ARGSUSED*/ 2439int 2440highbit(ulong_t i) 2441{ return (0); } 2442 2443#else /* __lint */ 2444 2445#if defined(__amd64) 2446 2447 ENTRY(highbit) 2448 movl $-1, %eax 2449 bsrq %rdi, %rax 2450 incl %eax 2451 ret 2452 SET_SIZE(highbit) 2453 2454#elif defined(__i386) 2455 2456 ENTRY(highbit) 2457 movl $-1, %eax 2458 bsrl 4(%esp), %eax 2459 incl %eax 2460 ret 2461 SET_SIZE(highbit) 2462 2463#endif /* __i386 */ 2464#endif /* __lint */ 2465 2466#if defined(__lint) 2467 2468/*ARGSUSED*/ 2469uint64_t 2470rdmsr(uint_t r) 2471{ return (0); } 2472 2473/*ARGSUSED*/ 2474void 2475wrmsr(uint_t r, const uint64_t val) 2476{} 2477 2478/*ARGSUSED*/ 2479uint64_t 2480xrdmsr(uint_t r) 2481{ return (0); } 2482 2483/*ARGSUSED*/ 2484void 2485xwrmsr(uint_t r, const uint64_t val) 2486{} 2487 2488void 2489invalidate_cache(void) 2490{} 2491 2492#else /* __lint */ 2493 2494#define XMSR_ACCESS_VAL $0x9c5a203a 2495 2496#if defined(__amd64) 2497 2498 ENTRY(rdmsr) 2499 movl %edi, %ecx 2500 rdmsr 2501 shlq $32, %rdx 2502 orq %rdx, %rax 2503 ret 2504 SET_SIZE(rdmsr) 2505 2506 ENTRY(wrmsr) 2507 movq %rsi, %rdx 2508 shrq $32, %rdx 2509 movl %esi, %eax 2510 movl %edi, %ecx 2511 wrmsr 2512 ret 2513 SET_SIZE(wrmsr) 2514 2515 ENTRY(xrdmsr) 2516 pushq %rbp 2517 movq %rsp, %rbp 2518 movl %edi, %ecx 2519 movl XMSR_ACCESS_VAL, %edi /* this value is needed to access MSR */ 2520 rdmsr 2521 shlq $32, %rdx 2522 orq %rdx, %rax 2523 leave 2524 ret 2525 SET_SIZE(xrdmsr) 2526 2527 ENTRY(xwrmsr) 2528 pushq %rbp 2529 movq %rsp, %rbp 2530 movl %edi, %ecx 2531 movl XMSR_ACCESS_VAL, %edi /* this value is needed to access MSR */ 2532 movq %rsi, %rdx 2533 shrq $32, %rdx 2534 movl %esi, %eax 2535 wrmsr 2536 leave 2537 ret 2538 SET_SIZE(xwrmsr) 2539 2540#elif defined(__i386) 2541 2542 ENTRY(rdmsr) 2543 movl 4(%esp), %ecx 2544 rdmsr 2545 ret 2546 SET_SIZE(rdmsr) 2547 2548 ENTRY(wrmsr) 2549 movl 4(%esp), %ecx 2550 movl 8(%esp), %eax 2551 movl 12(%esp), %edx 2552 wrmsr 2553 ret 2554 SET_SIZE(wrmsr) 2555 2556 ENTRY(xrdmsr) 2557 pushl %ebp 2558 movl %esp, %ebp 2559 movl 8(%esp), %ecx 2560 pushl %edi 2561 movl XMSR_ACCESS_VAL, %edi /* this value is needed to access MSR */ 2562 rdmsr 2563 popl %edi 2564 leave 2565 ret 2566 SET_SIZE(xrdmsr) 2567 2568 ENTRY(xwrmsr) 2569 pushl %ebp 2570 movl %esp, %ebp 2571 movl 8(%esp), %ecx 2572 movl 12(%esp), %eax 2573 movl 16(%esp), %edx 2574 pushl %edi 2575 movl XMSR_ACCESS_VAL, %edi /* this value is needed to access MSR */ 2576 wrmsr 2577 popl %edi 2578 leave 2579 ret 2580 SET_SIZE(xwrmsr) 2581 2582#endif /* __i386 */ 2583 2584 ENTRY(invalidate_cache) 2585 wbinvd 2586 ret 2587 SET_SIZE(invalidate_cache) 2588 2589#endif /* __lint */ 2590 2591#if defined(__lint) 2592 2593/*ARGSUSED*/ 2594void 2595getcregs(struct cregs *crp) 2596{} 2597 2598#else /* __lint */ 2599 2600#if defined(__amd64) 2601 2602 ENTRY_NP(getcregs) 2603 2604#define GETMSR(r, off, d) \ 2605 movl $r, %ecx; \ 2606 rdmsr; \ 2607 movl %eax, off(d); \ 2608 movl %edx, off+4(d) 2609 2610 xorl %eax, %eax 2611 movq %rax, CREG_GDT+8(%rdi) 2612 sgdt CREG_GDT(%rdi) /* 10 bytes */ 2613 movq %rax, CREG_IDT+8(%rdi) 2614 sidt CREG_IDT(%rdi) /* 10 bytes */ 2615 movq %rax, CREG_LDT(%rdi) 2616 sldt CREG_LDT(%rdi) /* 2 bytes */ 2617 movq %rax, CREG_TASKR(%rdi) 2618 str CREG_TASKR(%rdi) /* 2 bytes */ 2619 movq %cr0, %rax 2620 movq %rax, CREG_CR0(%rdi) /* cr0 */ 2621 movq %cr2, %rax 2622 movq %rax, CREG_CR2(%rdi) /* cr2 */ 2623 movq %cr3, %rax 2624 movq %rax, CREG_CR3(%rdi) /* cr3 */ 2625 movq %cr4, %rax 2626 movq %rax, CREG_CR4(%rdi) /* cr4 */ 2627 movq %cr8, %rax 2628 movq %rax, CREG_CR8(%rdi) /* cr8 */ 2629 GETMSR(MSR_AMD_KGSBASE, CREG_KGSBASE, %rdi) 2630 GETMSR(MSR_AMD_EFER, CREG_EFER, %rdi) 2631 ret 2632 SET_SIZE(getcregs) 2633 2634#undef GETMSR 2635 2636#elif defined(__i386) 2637 2638 ENTRY_NP(getcregs) 2639 movl 4(%esp), %edx 2640 movw $0, CREG_GDT+6(%edx) 2641 movw $0, CREG_IDT+6(%edx) 2642 sgdt CREG_GDT(%edx) /* gdt */ 2643 sidt CREG_IDT(%edx) /* idt */ 2644 sldt CREG_LDT(%edx) /* ldt */ 2645 str CREG_TASKR(%edx) /* task */ 2646 movl %cr0, %eax 2647 movl %eax, CREG_CR0(%edx) /* cr0 */ 2648 movl %cr2, %eax 2649 movl %eax, CREG_CR2(%edx) /* cr2 */ 2650 movl %cr3, %eax 2651 movl %eax, CREG_CR3(%edx) /* cr3 */ 2652 testl $X86_LARGEPAGE, x86_feature 2653 jz .nocr4 2654 movl %cr4, %eax 2655 movl %eax, CREG_CR4(%edx) /* cr4 */ 2656 jmp .skip 2657.nocr4: 2658 movl $0, CREG_CR4(%edx) 2659.skip: 2660 ret 2661 SET_SIZE(getcregs) 2662 2663#endif /* __i386 */ 2664#endif /* __lint */ 2665 2666 2667/* 2668 * A panic trigger is a word which is updated atomically and can only be set 2669 * once. We atomically store 0xDEFACEDD and load the old value. If the 2670 * previous value was 0, we succeed and return 1; otherwise return 0. 2671 * This allows a partially corrupt trigger to still trigger correctly. DTrace 2672 * has its own version of this function to allow it to panic correctly from 2673 * probe context. 2674 */ 2675#if defined(__lint) 2676 2677/*ARGSUSED*/ 2678int 2679panic_trigger(int *tp) 2680{ return (0); } 2681 2682/*ARGSUSED*/ 2683int 2684dtrace_panic_trigger(int *tp) 2685{ return (0); } 2686 2687#else /* __lint */ 2688 2689#if defined(__amd64) 2690 2691 ENTRY_NP(panic_trigger) 2692 xorl %eax, %eax 2693 movl $0xdefacedd, %edx 2694 lock 2695 xchgl %edx, (%rdi) 2696 cmpl $0, %edx 2697 je 0f 2698 movl $0, %eax 2699 ret 27000: movl $1, %eax 2701 ret 2702 SET_SIZE(panic_trigger) 2703 2704 ENTRY_NP(dtrace_panic_trigger) 2705 xorl %eax, %eax 2706 movl $0xdefacedd, %edx 2707 lock 2708 xchgl %edx, (%rdi) 2709 cmpl $0, %edx 2710 je 0f 2711 movl $0, %eax 2712 ret 27130: movl $1, %eax 2714 ret 2715 SET_SIZE(dtrace_panic_trigger) 2716 2717#elif defined(__i386) 2718 2719 ENTRY_NP(panic_trigger) 2720 movl 4(%esp), %edx / %edx = address of trigger 2721 movl $0xdefacedd, %eax / %eax = 0xdefacedd 2722 lock / assert lock 2723 xchgl %eax, (%edx) / exchange %eax and the trigger 2724 cmpl $0, %eax / if (%eax == 0x0) 2725 je 0f / return (1); 2726 movl $0, %eax / else 2727 ret / return (0); 27280: movl $1, %eax 2729 ret 2730 SET_SIZE(panic_trigger) 2731 2732 ENTRY_NP(dtrace_panic_trigger) 2733 movl 4(%esp), %edx / %edx = address of trigger 2734 movl $0xdefacedd, %eax / %eax = 0xdefacedd 2735 lock / assert lock 2736 xchgl %eax, (%edx) / exchange %eax and the trigger 2737 cmpl $0, %eax / if (%eax == 0x0) 2738 je 0f / return (1); 2739 movl $0, %eax / else 2740 ret / return (0); 27410: movl $1, %eax 2742 ret 2743 SET_SIZE(dtrace_panic_trigger) 2744 2745#endif /* __i386 */ 2746#endif /* __lint */ 2747 2748/* 2749 * The panic() and cmn_err() functions invoke vpanic() as a common entry point 2750 * into the panic code implemented in panicsys(). vpanic() is responsible 2751 * for passing through the format string and arguments, and constructing a 2752 * regs structure on the stack into which it saves the current register 2753 * values. If we are not dying due to a fatal trap, these registers will 2754 * then be preserved in panicbuf as the current processor state. Before 2755 * invoking panicsys(), vpanic() activates the first panic trigger (see 2756 * common/os/panic.c) and switches to the panic_stack if successful. Note that 2757 * DTrace takes a slightly different panic path if it must panic from probe 2758 * context. Instead of calling panic, it calls into dtrace_vpanic(), which 2759 * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and 2760 * branches back into vpanic(). 2761 */ 2762#if defined(__lint) 2763 2764/*ARGSUSED*/ 2765void 2766vpanic(const char *format, va_list alist) 2767{} 2768 2769/*ARGSUSED*/ 2770void 2771dtrace_vpanic(const char *format, va_list alist) 2772{} 2773 2774#else /* __lint */ 2775 2776#if defined(__amd64) 2777 2778 ENTRY_NP(vpanic) /* Initial stack layout: */ 2779 2780 pushq %rbp /* | %rip | 0x60 */ 2781 movq %rsp, %rbp /* | %rbp | 0x58 */ 2782 pushfq /* | rfl | 0x50 */ 2783 pushq %r11 /* | %r11 | 0x48 */ 2784 pushq %r10 /* | %r10 | 0x40 */ 2785 pushq %rbx /* | %rbx | 0x38 */ 2786 pushq %rax /* | %rax | 0x30 */ 2787 pushq %r9 /* | %r9 | 0x28 */ 2788 pushq %r8 /* | %r8 | 0x20 */ 2789 pushq %rcx /* | %rcx | 0x18 */ 2790 pushq %rdx /* | %rdx | 0x10 */ 2791 pushq %rsi /* | %rsi | 0x8 alist */ 2792 pushq %rdi /* | %rdi | 0x0 format */ 2793 2794 movq %rsp, %rbx /* %rbx = current %rsp */ 2795 2796 leaq panic_quiesce(%rip), %rdi /* %rdi = &panic_quiesce */ 2797 call panic_trigger /* %eax = panic_trigger() */ 2798 2799vpanic_common: 2800 /* 2801 * The panic_trigger result is in %eax from the call above, and 2802 * dtrace_panic places it in %eax before branching here. 2803 * The rdmsr instructions that follow below will clobber %eax so 2804 * we stash the panic_trigger result in %r11d. 2805 */ 2806 movl %eax, %r11d 2807 cmpl $0, %r11d 2808 je 0f 2809 2810 /* 2811 * If panic_trigger() was successful, we are the first to initiate a 2812 * panic: we now switch to the reserved panic_stack before continuing. 2813 */ 2814 leaq panic_stack(%rip), %rsp 2815 addq $PANICSTKSIZE, %rsp 28160: subq $REGSIZE, %rsp 2817 /* 2818 * Now that we've got everything set up, store the register values as 2819 * they were when we entered vpanic() to the designated location in 2820 * the regs structure we allocated on the stack. 2821 */ 2822 movq 0x0(%rbx), %rcx 2823 movq %rcx, REGOFF_RDI(%rsp) 2824 movq 0x8(%rbx), %rcx 2825 movq %rcx, REGOFF_RSI(%rsp) 2826 movq 0x10(%rbx), %rcx 2827 movq %rcx, REGOFF_RDX(%rsp) 2828 movq 0x18(%rbx), %rcx 2829 movq %rcx, REGOFF_RCX(%rsp) 2830 movq 0x20(%rbx), %rcx 2831 2832 movq %rcx, REGOFF_R8(%rsp) 2833 movq 0x28(%rbx), %rcx 2834 movq %rcx, REGOFF_R9(%rsp) 2835 movq 0x30(%rbx), %rcx 2836 movq %rcx, REGOFF_RAX(%rsp) 2837 movq 0x38(%rbx), %rcx 2838 movq %rcx, REGOFF_RBX(%rsp) 2839 movq 0x58(%rbx), %rcx 2840 2841 movq %rcx, REGOFF_RBP(%rsp) 2842 movq 0x40(%rbx), %rcx 2843 movq %rcx, REGOFF_R10(%rsp) 2844 movq 0x48(%rbx), %rcx 2845 movq %rcx, REGOFF_R11(%rsp) 2846 movq %r12, REGOFF_R12(%rsp) 2847 2848 movq %r13, REGOFF_R13(%rsp) 2849 movq %r14, REGOFF_R14(%rsp) 2850 movq %r15, REGOFF_R15(%rsp) 2851 2852 xorl %ecx, %ecx 2853 movw %ds, %cx 2854 movq %rcx, REGOFF_DS(%rsp) 2855 movw %es, %cx 2856 movq %rcx, REGOFF_ES(%rsp) 2857 movw %fs, %cx 2858 movq %rcx, REGOFF_FS(%rsp) 2859 movw %gs, %cx 2860 movq %rcx, REGOFF_GS(%rsp) 2861 2862 movq $0, REGOFF_TRAPNO(%rsp) 2863 2864 movq $0, REGOFF_ERR(%rsp) 2865 leaq vpanic(%rip), %rcx 2866 movq %rcx, REGOFF_RIP(%rsp) 2867 movw %cs, %cx 2868 movzwq %cx, %rcx 2869 movq %rcx, REGOFF_CS(%rsp) 2870 movq 0x50(%rbx), %rcx 2871 movq %rcx, REGOFF_RFL(%rsp) 2872 movq %rbx, %rcx 2873 addq $0x60, %rcx 2874 movq %rcx, REGOFF_RSP(%rsp) 2875 movw %ss, %cx 2876 movzwq %cx, %rcx 2877 movq %rcx, REGOFF_SS(%rsp) 2878 2879 /* 2880 * panicsys(format, alist, rp, on_panic_stack) 2881 */ 2882 movq REGOFF_RDI(%rsp), %rdi /* format */ 2883 movq REGOFF_RSI(%rsp), %rsi /* alist */ 2884 movq %rsp, %rdx /* struct regs */ 2885 movl %r11d, %ecx /* on_panic_stack */ 2886 call panicsys 2887 addq $REGSIZE, %rsp 2888 popq %rdi 2889 popq %rsi 2890 popq %rdx 2891 popq %rcx 2892 popq %r8 2893 popq %r9 2894 popq %rax 2895 popq %rbx 2896 popq %r10 2897 popq %r11 2898 popfq 2899 leave 2900 ret 2901 SET_SIZE(vpanic) 2902 2903 ENTRY_NP(dtrace_vpanic) /* Initial stack layout: */ 2904 2905 pushq %rbp /* | %rip | 0x60 */ 2906 movq %rsp, %rbp /* | %rbp | 0x58 */ 2907 pushfq /* | rfl | 0x50 */ 2908 pushq %r11 /* | %r11 | 0x48 */ 2909 pushq %r10 /* | %r10 | 0x40 */ 2910 pushq %rbx /* | %rbx | 0x38 */ 2911 pushq %rax /* | %rax | 0x30 */ 2912 pushq %r9 /* | %r9 | 0x28 */ 2913 pushq %r8 /* | %r8 | 0x20 */ 2914 pushq %rcx /* | %rcx | 0x18 */ 2915 pushq %rdx /* | %rdx | 0x10 */ 2916 pushq %rsi /* | %rsi | 0x8 alist */ 2917 pushq %rdi /* | %rdi | 0x0 format */ 2918 2919 movq %rsp, %rbx /* %rbx = current %rsp */ 2920 2921 leaq panic_quiesce(%rip), %rdi /* %rdi = &panic_quiesce */ 2922 call dtrace_panic_trigger /* %eax = dtrace_panic_trigger() */ 2923 jmp vpanic_common 2924 2925 SET_SIZE(dtrace_vpanic) 2926 2927#elif defined(__i386) 2928 2929 ENTRY_NP(vpanic) / Initial stack layout: 2930 2931 pushl %ebp / | %eip | 20 2932 movl %esp, %ebp / | %ebp | 16 2933 pushl %eax / | %eax | 12 2934 pushl %ebx / | %ebx | 8 2935 pushl %ecx / | %ecx | 4 2936 pushl %edx / | %edx | 0 2937 2938 movl %esp, %ebx / %ebx = current stack pointer 2939 2940 lea panic_quiesce, %eax / %eax = &panic_quiesce 2941 pushl %eax / push &panic_quiesce 2942 call panic_trigger / %eax = panic_trigger() 2943 addl $4, %esp / reset stack pointer 2944 2945vpanic_common: 2946 cmpl $0, %eax / if (%eax == 0) 2947 je 0f / goto 0f; 2948 2949 /* 2950 * If panic_trigger() was successful, we are the first to initiate a 2951 * panic: we now switch to the reserved panic_stack before continuing. 2952 */ 2953 lea panic_stack, %esp / %esp = panic_stack 2954 addl $PANICSTKSIZE, %esp / %esp += PANICSTKSIZE 2955 29560: subl $REGSIZE, %esp / allocate struct regs 2957 2958 /* 2959 * Now that we've got everything set up, store the register values as 2960 * they were when we entered vpanic() to the designated location in 2961 * the regs structure we allocated on the stack. 2962 */ 2963#if !defined(__GNUC_AS__) 2964 movw %gs, %edx 2965 movl %edx, REGOFF_GS(%esp) 2966 movw %fs, %edx 2967 movl %edx, REGOFF_FS(%esp) 2968 movw %es, %edx 2969 movl %edx, REGOFF_ES(%esp) 2970 movw %ds, %edx 2971 movl %edx, REGOFF_DS(%esp) 2972#else /* __GNUC_AS__ */ 2973 mov %gs, %edx 2974 mov %edx, REGOFF_GS(%esp) 2975 mov %fs, %edx 2976 mov %edx, REGOFF_FS(%esp) 2977 mov %es, %edx 2978 mov %edx, REGOFF_ES(%esp) 2979 mov %ds, %edx 2980 mov %edx, REGOFF_DS(%esp) 2981#endif /* __GNUC_AS__ */ 2982 movl %edi, REGOFF_EDI(%esp) 2983 movl %esi, REGOFF_ESI(%esp) 2984 movl 16(%ebx), %ecx 2985 movl %ecx, REGOFF_EBP(%esp) 2986 movl %ebx, %ecx 2987 addl $20, %ecx 2988 movl %ecx, REGOFF_ESP(%esp) 2989 movl 8(%ebx), %ecx 2990 movl %ecx, REGOFF_EBX(%esp) 2991 movl 0(%ebx), %ecx 2992 movl %ecx, REGOFF_EDX(%esp) 2993 movl 4(%ebx), %ecx 2994 movl %ecx, REGOFF_ECX(%esp) 2995 movl 12(%ebx), %ecx 2996 movl %ecx, REGOFF_EAX(%esp) 2997 movl $0, REGOFF_TRAPNO(%esp) 2998 movl $0, REGOFF_ERR(%esp) 2999 lea vpanic, %ecx 3000 movl %ecx, REGOFF_EIP(%esp) 3001#if !defined(__GNUC_AS__) 3002 movw %cs, %edx 3003#else /* __GNUC_AS__ */ 3004 mov %cs, %edx 3005#endif /* __GNUC_AS__ */ 3006 movl %edx, REGOFF_CS(%esp) 3007 pushfl 3008 popl %ecx 3009 movl %ecx, REGOFF_EFL(%esp) 3010 movl $0, REGOFF_UESP(%esp) 3011#if !defined(__GNUC_AS__) 3012 movw %ss, %edx 3013#else /* __GNUC_AS__ */ 3014 mov %ss, %edx 3015#endif /* __GNUC_AS__ */ 3016 movl %edx, REGOFF_SS(%esp) 3017 3018 movl %esp, %ecx / %ecx = ®s 3019 pushl %eax / push on_panic_stack 3020 pushl %ecx / push ®s 3021 movl 12(%ebp), %ecx / %ecx = alist 3022 pushl %ecx / push alist 3023 movl 8(%ebp), %ecx / %ecx = format 3024 pushl %ecx / push format 3025 call panicsys / panicsys(); 3026 addl $16, %esp / pop arguments 3027 3028 addl $REGSIZE, %esp 3029 popl %edx 3030 popl %ecx 3031 popl %ebx 3032 popl %eax 3033 leave 3034 ret 3035 SET_SIZE(vpanic) 3036 3037 ENTRY_NP(dtrace_vpanic) / Initial stack layout: 3038 3039 pushl %ebp / | %eip | 20 3040 movl %esp, %ebp / | %ebp | 16 3041 pushl %eax / | %eax | 12 3042 pushl %ebx / | %ebx | 8 3043 pushl %ecx / | %ecx | 4 3044 pushl %edx / | %edx | 0 3045 3046 movl %esp, %ebx / %ebx = current stack pointer 3047 3048 lea panic_quiesce, %eax / %eax = &panic_quiesce 3049 pushl %eax / push &panic_quiesce 3050 call dtrace_panic_trigger / %eax = dtrace_panic_trigger() 3051 addl $4, %esp / reset stack pointer 3052 jmp vpanic_common / jump back to common code 3053 3054 SET_SIZE(dtrace_vpanic) 3055 3056#endif /* __i386 */ 3057#endif /* __lint */ 3058 3059#if defined(__lint) 3060 3061void 3062hres_tick(void) 3063{} 3064 3065int64_t timedelta; 3066hrtime_t hres_last_tick; 3067timestruc_t hrestime; 3068int64_t hrestime_adj; 3069volatile int hres_lock; 3070hrtime_t hrtime_base; 3071 3072#else /* __lint */ 3073 3074 DGDEF3(hrestime, _MUL(2, CLONGSIZE), 8) 3075 .NWORD 0, 0 3076 3077 DGDEF3(hrestime_adj, 8, 8) 3078 .long 0, 0 3079 3080 DGDEF3(hres_last_tick, 8, 8) 3081 .long 0, 0 3082 3083 DGDEF3(timedelta, 8, 8) 3084 .long 0, 0 3085 3086 DGDEF3(hres_lock, 4, 8) 3087 .long 0 3088 3089 /* 3090 * initialized to a non zero value to make pc_gethrtime() 3091 * work correctly even before clock is initialized 3092 */ 3093 DGDEF3(hrtime_base, 8, 8) 3094 .long _MUL(NSEC_PER_CLOCK_TICK, 6), 0 3095 3096 DGDEF3(adj_shift, 4, 4) 3097 .long ADJ_SHIFT 3098 3099#if defined(__amd64) 3100 3101 ENTRY_NP(hres_tick) 3102 pushq %rbp 3103 movq %rsp, %rbp 3104 3105 /* 3106 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously, 3107 * hres_last_tick can only be modified while holding CLOCK_LOCK). 3108 * At worst, performing this now instead of under CLOCK_LOCK may 3109 * introduce some jitter in pc_gethrestime(). 3110 */ 3111 call *gethrtimef(%rip) 3112 movq %rax, %r8 3113 3114 leaq hres_lock(%rip), %rax 3115 movb $-1, %dl 3116.CL1: 3117 xchgb %dl, (%rax) 3118 testb %dl, %dl 3119 jz .CL3 /* got it */ 3120.CL2: 3121 cmpb $0, (%rax) /* possible to get lock? */ 3122 pause 3123 jne .CL2 3124 jmp .CL1 /* yes, try again */ 3125.CL3: 3126 /* 3127 * compute the interval since last time hres_tick was called 3128 * and adjust hrtime_base and hrestime accordingly 3129 * hrtime_base is an 8 byte value (in nsec), hrestime is 3130 * a timestruc_t (sec, nsec) 3131 */ 3132 leaq hres_last_tick(%rip), %rax 3133 movq %r8, %r11 3134 subq (%rax), %r8 3135 addq %r8, hrtime_base(%rip) /* add interval to hrtime_base */ 3136 addq %r8, hrestime+8(%rip) /* add interval to hrestime.tv_nsec */ 3137 /* 3138 * Now that we have CLOCK_LOCK, we can update hres_last_tick 3139 */ 3140 movq %r11, (%rax) 3141 3142 call __adj_hrestime 3143 3144 /* 3145 * release the hres_lock 3146 */ 3147 incl hres_lock(%rip) 3148 leave 3149 ret 3150 SET_SIZE(hres_tick) 3151 3152#elif defined(__i386) 3153 3154 ENTRY_NP(hres_tick) 3155 pushl %ebp 3156 movl %esp, %ebp 3157 pushl %esi 3158 pushl %ebx 3159 3160 /* 3161 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously, 3162 * hres_last_tick can only be modified while holding CLOCK_LOCK). 3163 * At worst, performing this now instead of under CLOCK_LOCK may 3164 * introduce some jitter in pc_gethrestime(). 3165 */ 3166 call *gethrtimef 3167 movl %eax, %ebx 3168 movl %edx, %esi 3169 3170 movl $hres_lock, %eax 3171 movl $-1, %edx 3172.CL1: 3173 xchgb %dl, (%eax) 3174 testb %dl, %dl 3175 jz .CL3 / got it 3176.CL2: 3177 cmpb $0, (%eax) / possible to get lock? 3178 pause 3179 jne .CL2 3180 jmp .CL1 / yes, try again 3181.CL3: 3182 /* 3183 * compute the interval since last time hres_tick was called 3184 * and adjust hrtime_base and hrestime accordingly 3185 * hrtime_base is an 8 byte value (in nsec), hrestime is 3186 * timestruc_t (sec, nsec) 3187 */ 3188 3189 lea hres_last_tick, %eax 3190 3191 movl %ebx, %edx 3192 movl %esi, %ecx 3193 3194 subl (%eax), %edx 3195 sbbl 4(%eax), %ecx 3196 3197 addl %edx, hrtime_base / add interval to hrtime_base 3198 adcl %ecx, hrtime_base+4 3199 3200 addl %edx, hrestime+4 / add interval to hrestime.tv_nsec 3201 3202 / 3203 / Now that we have CLOCK_LOCK, we can update hres_last_tick. 3204 / 3205 movl %ebx, (%eax) 3206 movl %esi, 4(%eax) 3207 3208 / get hrestime at this moment. used as base for pc_gethrestime 3209 / 3210 / Apply adjustment, if any 3211 / 3212 / #define HRES_ADJ (NSEC_PER_CLOCK_TICK >> ADJ_SHIFT) 3213 / (max_hres_adj) 3214 / 3215 / void 3216 / adj_hrestime() 3217 / { 3218 / long long adj; 3219 / 3220 / if (hrestime_adj == 0) 3221 / adj = 0; 3222 / else if (hrestime_adj > 0) { 3223 / if (hrestime_adj < HRES_ADJ) 3224 / adj = hrestime_adj; 3225 / else 3226 / adj = HRES_ADJ; 3227 / } 3228 / else { 3229 / if (hrestime_adj < -(HRES_ADJ)) 3230 / adj = -(HRES_ADJ); 3231 / else 3232 / adj = hrestime_adj; 3233 / } 3234 / 3235 / timedelta -= adj; 3236 / hrestime_adj = timedelta; 3237 / hrestime.tv_nsec += adj; 3238 / 3239 / while (hrestime.tv_nsec >= NANOSEC) { 3240 / one_sec++; 3241 / hrestime.tv_sec++; 3242 / hrestime.tv_nsec -= NANOSEC; 3243 / } 3244 / } 3245__adj_hrestime: 3246 movl hrestime_adj, %esi / if (hrestime_adj == 0) 3247 movl hrestime_adj+4, %edx 3248 andl %esi, %esi 3249 jne .CL4 / no 3250 andl %edx, %edx 3251 jne .CL4 / no 3252 subl %ecx, %ecx / yes, adj = 0; 3253 subl %edx, %edx 3254 jmp .CL5 3255.CL4: 3256 subl %ecx, %ecx 3257 subl %eax, %eax 3258 subl %esi, %ecx 3259 sbbl %edx, %eax 3260 andl %eax, %eax / if (hrestime_adj > 0) 3261 jge .CL6 3262 3263 / In the following comments, HRES_ADJ is used, while in the code 3264 / max_hres_adj is used. 3265 / 3266 / The test for "hrestime_adj < HRES_ADJ" is complicated because 3267 / hrestime_adj is 64-bits, while HRES_ADJ is 32-bits. We rely 3268 / on the logical equivalence of: 3269 / 3270 / !(hrestime_adj < HRES_ADJ) 3271 / 3272 / and the two step sequence: 3273 / 3274 / (HRES_ADJ - lsw(hrestime_adj)) generates a Borrow/Carry 3275 / 3276 / which computes whether or not the least significant 32-bits 3277 / of hrestime_adj is greater than HRES_ADJ, followed by: 3278 / 3279 / Previous Borrow/Carry + -1 + msw(hrestime_adj) generates a Carry 3280 / 3281 / which generates a carry whenever step 1 is true or the most 3282 / significant long of the longlong hrestime_adj is non-zero. 3283 3284 movl max_hres_adj, %ecx / hrestime_adj is positive 3285 subl %esi, %ecx 3286 movl %edx, %eax 3287 adcl $-1, %eax 3288 jnc .CL7 3289 movl max_hres_adj, %ecx / adj = HRES_ADJ; 3290 subl %edx, %edx 3291 jmp .CL5 3292 3293 / The following computation is similar to the one above. 3294 / 3295 / The test for "hrestime_adj < -(HRES_ADJ)" is complicated because 3296 / hrestime_adj is 64-bits, while HRES_ADJ is 32-bits. We rely 3297 / on the logical equivalence of: 3298 / 3299 / (hrestime_adj > -HRES_ADJ) 3300 / 3301 / and the two step sequence: 3302 / 3303 / (HRES_ADJ + lsw(hrestime_adj)) generates a Carry 3304 / 3305 / which means the least significant 32-bits of hrestime_adj is 3306 / greater than -HRES_ADJ, followed by: 3307 / 3308 / Previous Carry + 0 + msw(hrestime_adj) generates a Carry 3309 / 3310 / which generates a carry only when step 1 is true and the most 3311 / significant long of the longlong hrestime_adj is -1. 3312 3313.CL6: / hrestime_adj is negative 3314 movl %esi, %ecx 3315 addl max_hres_adj, %ecx 3316 movl %edx, %eax 3317 adcl $0, %eax 3318 jc .CL7 3319 xor %ecx, %ecx 3320 subl max_hres_adj, %ecx / adj = -(HRES_ADJ); 3321 movl $-1, %edx 3322 jmp .CL5 3323.CL7: 3324 movl %esi, %ecx / adj = hrestime_adj; 3325.CL5: 3326 movl timedelta, %esi 3327 subl %ecx, %esi 3328 movl timedelta+4, %eax 3329 sbbl %edx, %eax 3330 movl %esi, timedelta 3331 movl %eax, timedelta+4 / timedelta -= adj; 3332 movl %esi, hrestime_adj 3333 movl %eax, hrestime_adj+4 / hrestime_adj = timedelta; 3334 addl hrestime+4, %ecx 3335 3336 movl %ecx, %eax / eax = tv_nsec 33371: 3338 cmpl $NANOSEC, %eax / if ((unsigned long)tv_nsec >= NANOSEC) 3339 jb .CL8 / no 3340 incl one_sec / yes, one_sec++; 3341 incl hrestime / hrestime.tv_sec++; 3342 addl $-NANOSEC, %eax / tv_nsec -= NANOSEC 3343 jmp 1b / check for more seconds 3344 3345.CL8: 3346 movl %eax, hrestime+4 / store final into hrestime.tv_nsec 3347 incl hres_lock / release the hres_lock 3348 3349 popl %ebx 3350 popl %esi 3351 leave 3352 ret 3353 SET_SIZE(hres_tick) 3354 3355#endif /* __i386 */ 3356#endif /* __lint */ 3357 3358/* 3359 * void prefetch_smap_w(void *) 3360 * 3361 * Prefetch ahead within a linear list of smap structures. 3362 * Not implemented for ia32. Stub for compatibility. 3363 */ 3364 3365#if defined(__lint) 3366 3367/*ARGSUSED*/ 3368void prefetch_smap_w(void *smp) 3369{} 3370 3371#else /* __lint */ 3372 3373 ENTRY(prefetch_smap_w) 3374 rep; ret /* use 2 byte return instruction when branch target */ 3375 /* AMD Software Optimization Guide - Section 6.2 */ 3376 SET_SIZE(prefetch_smap_w) 3377 3378#endif /* __lint */ 3379 3380/* 3381 * prefetch_page_r(page_t *) 3382 * issue prefetch instructions for a page_t 3383 */ 3384#if defined(__lint) 3385 3386/*ARGSUSED*/ 3387void 3388prefetch_page_r(void *pp) 3389{} 3390 3391#else /* __lint */ 3392 3393 ENTRY(prefetch_page_r) 3394 rep; ret /* use 2 byte return instruction when branch target */ 3395 /* AMD Software Optimization Guide - Section 6.2 */ 3396 SET_SIZE(prefetch_page_r) 3397 3398#endif /* __lint */ 3399 3400#if defined(__lint) 3401 3402/*ARGSUSED*/ 3403int 3404bcmp(const void *s1, const void *s2, size_t count) 3405{ return (0); } 3406 3407#else /* __lint */ 3408 3409#if defined(__amd64) 3410 3411 ENTRY(bcmp) 3412 pushq %rbp 3413 movq %rsp, %rbp 3414#ifdef DEBUG 3415 movq postbootkernelbase(%rip), %r11 3416 cmpq %r11, %rdi 3417 jb 0f 3418 cmpq %r11, %rsi 3419 jnb 1f 34200: leaq .bcmp_panic_msg(%rip), %rdi 3421 xorl %eax, %eax 3422 call panic 34231: 3424#endif /* DEBUG */ 3425 call memcmp 3426 testl %eax, %eax 3427 setne %dl 3428 leave 3429 movzbl %dl, %eax 3430 ret 3431 SET_SIZE(bcmp) 3432 3433#elif defined(__i386) 3434 3435#define ARG_S1 8 3436#define ARG_S2 12 3437#define ARG_LENGTH 16 3438 3439 ENTRY(bcmp) 3440 pushl %ebp 3441 movl %esp, %ebp / create new stack frame 3442#ifdef DEBUG 3443 movl postbootkernelbase, %eax 3444 cmpl %eax, ARG_S1(%ebp) 3445 jb 0f 3446 cmpl %eax, ARG_S2(%ebp) 3447 jnb 1f 34480: pushl $.bcmp_panic_msg 3449 call panic 34501: 3451#endif /* DEBUG */ 3452 3453 pushl %edi / save register variable 3454 movl ARG_S1(%ebp), %eax / %eax = address of string 1 3455 movl ARG_S2(%ebp), %ecx / %ecx = address of string 2 3456 cmpl %eax, %ecx / if the same string 3457 je .equal / goto .equal 3458 movl ARG_LENGTH(%ebp), %edi / %edi = length in bytes 3459 cmpl $4, %edi / if %edi < 4 3460 jb .byte_check / goto .byte_check 3461 .align 4 3462.word_loop: 3463 movl (%ecx), %edx / move 1 word from (%ecx) to %edx 3464 leal -4(%edi), %edi / %edi -= 4 3465 cmpl (%eax), %edx / compare 1 word from (%eax) with %edx 3466 jne .word_not_equal / if not equal, goto .word_not_equal 3467 leal 4(%ecx), %ecx / %ecx += 4 (next word) 3468 leal 4(%eax), %eax / %eax += 4 (next word) 3469 cmpl $4, %edi / if %edi >= 4 3470 jae .word_loop / goto .word_loop 3471.byte_check: 3472 cmpl $0, %edi / if %edi == 0 3473 je .equal / goto .equal 3474 jmp .byte_loop / goto .byte_loop (checks in bytes) 3475.word_not_equal: 3476 leal 4(%edi), %edi / %edi += 4 (post-decremented) 3477 .align 4 3478.byte_loop: 3479 movb (%ecx), %dl / move 1 byte from (%ecx) to %dl 3480 cmpb %dl, (%eax) / compare %dl with 1 byte from (%eax) 3481 jne .not_equal / if not equal, goto .not_equal 3482 incl %ecx / %ecx++ (next byte) 3483 incl %eax / %eax++ (next byte) 3484 decl %edi / %edi-- 3485 jnz .byte_loop / if not zero, goto .byte_loop 3486.equal: 3487 xorl %eax, %eax / %eax = 0 3488 popl %edi / restore register variable 3489 leave / restore old stack frame 3490 ret / return (NULL) 3491 .align 4 3492.not_equal: 3493 movl $1, %eax / return 1 3494 popl %edi / restore register variable 3495 leave / restore old stack frame 3496 ret / return (NULL) 3497 SET_SIZE(bcmp) 3498 3499#endif /* __i386 */ 3500 3501#ifdef DEBUG 3502 .text 3503.bcmp_panic_msg: 3504 .string "bcmp: arguments below kernelbase" 3505#endif /* DEBUG */ 3506 3507#endif /* __lint */ 3508 3509#if defined(__lint) 3510 3511uint_t 3512bsrw_insn(uint16_t mask) 3513{ 3514 uint_t index = sizeof (mask) * NBBY - 1; 3515 3516 while ((mask & (1 << index)) == 0) 3517 index--; 3518 return (index); 3519} 3520 3521#else /* __lint */ 3522 3523#if defined(__amd64) 3524 3525 ENTRY_NP(bsrw_insn) 3526 xorl %eax, %eax 3527 bsrw %di, %ax 3528 ret 3529 SET_SIZE(bsrw_insn) 3530 3531#elif defined(__i386) 3532 3533 ENTRY_NP(bsrw_insn) 3534 movw 4(%esp), %cx 3535 xorl %eax, %eax 3536 bsrw %cx, %ax 3537 ret 3538 SET_SIZE(bsrw_insn) 3539 3540#endif /* __i386 */ 3541#endif /* __lint */ 3542 3543#if defined(__lint) 3544 3545uint_t 3546atomic_btr32(uint32_t *pending, uint_t pil) 3547{ 3548 return (*pending &= ~(1 << pil)); 3549} 3550 3551#else /* __lint */ 3552 3553#if defined(__i386) 3554 3555 ENTRY_NP(atomic_btr32) 3556 movl 4(%esp), %ecx 3557 movl 8(%esp), %edx 3558 xorl %eax, %eax 3559 lock 3560 btrl %edx, (%ecx) 3561 setc %al 3562 ret 3563 SET_SIZE(atomic_btr32) 3564 3565#endif /* __i386 */ 3566#endif /* __lint */ 3567 3568#if defined(__lint) 3569 3570/*ARGSUSED*/ 3571void 3572switch_sp_and_call(void *newsp, void (*func)(uint_t, uint_t), uint_t arg1, 3573 uint_t arg2) 3574{} 3575 3576#else /* __lint */ 3577 3578#if defined(__amd64) 3579 3580 ENTRY_NP(switch_sp_and_call) 3581 pushq %rbp 3582 movq %rsp, %rbp /* set up stack frame */ 3583 movq %rdi, %rsp /* switch stack pointer */ 3584 movq %rdx, %rdi /* pass func arg 1 */ 3585 movq %rsi, %r11 /* save function to call */ 3586 movq %rcx, %rsi /* pass func arg 2 */ 3587 call *%r11 /* call function */ 3588 leave /* restore stack */ 3589 ret 3590 SET_SIZE(switch_sp_and_call) 3591 3592#elif defined(__i386) 3593 3594 ENTRY_NP(switch_sp_and_call) 3595 pushl %ebp 3596 mov %esp, %ebp /* set up stack frame */ 3597 movl 8(%ebp), %esp /* switch stack pointer */ 3598 pushl 20(%ebp) /* push func arg 2 */ 3599 pushl 16(%ebp) /* push func arg 1 */ 3600 call *12(%ebp) /* call function */ 3601 addl $8, %esp /* pop arguments */ 3602 leave /* restore stack */ 3603 ret 3604 SET_SIZE(switch_sp_and_call) 3605 3606#endif /* __i386 */ 3607#endif /* __lint */ 3608 3609#if defined(__lint) 3610 3611void 3612kmdb_enter(void) 3613{} 3614 3615#else /* __lint */ 3616 3617#if defined(__amd64) 3618 3619 ENTRY_NP(kmdb_enter) 3620 pushq %rbp 3621 movq %rsp, %rbp 3622 3623 /* 3624 * Save flags, do a 'cli' then return the saved flags 3625 */ 3626 call intr_clear 3627 3628 int $T_DBGENTR 3629 3630 /* 3631 * Restore the saved flags 3632 */ 3633 movq %rax, %rdi 3634 call intr_restore 3635 3636 leave 3637 ret 3638 SET_SIZE(kmdb_enter) 3639 3640#elif defined(__i386) 3641 3642 ENTRY_NP(kmdb_enter) 3643 pushl %ebp 3644 movl %esp, %ebp 3645 3646 /* 3647 * Save flags, do a 'cli' then return the saved flags 3648 */ 3649 call intr_clear 3650 3651 int $T_DBGENTR 3652 3653 /* 3654 * Restore the saved flags 3655 */ 3656 pushl %eax 3657 call intr_restore 3658 addl $4, %esp 3659 3660 leave 3661 ret 3662 SET_SIZE(kmdb_enter) 3663 3664#endif /* __i386 */ 3665#endif /* __lint */ 3666 3667#if defined(__lint) 3668 3669void 3670return_instr(void) 3671{} 3672 3673#else /* __lint */ 3674 3675 ENTRY_NP(return_instr) 3676 rep; ret /* use 2 byte instruction when branch target */ 3677 /* AMD Software Optimization Guide - Section 6.2 */ 3678 SET_SIZE(return_instr) 3679 3680#endif /* __lint */ 3681 3682#if defined(__lint) 3683 3684ulong_t 3685getflags(void) 3686{ 3687 return (0); 3688} 3689 3690#else /* __lint */ 3691 3692#if defined(__amd64) 3693 3694 ENTRY(getflags) 3695 pushfq 3696 popq %rax 3697 ret 3698 SET_SIZE(getflags) 3699 3700#elif defined(__i386) 3701 3702 ENTRY(getflags) 3703 pushfl 3704 popl %eax 3705 ret 3706 SET_SIZE(getflags) 3707 3708#endif /* __i386 */ 3709 3710#endif /* __lint */ 3711