1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27/* 28 * Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. 29 * Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T 30 * All Rights Reserved 31 */ 32 33#pragma ident "%Z%%M% %I% %E% SMI" 34 35/* 36 * General assembly language routines. 37 * It is the intent of this file to contain routines that are 38 * independent of the specific kernel architecture, and those that are 39 * common across kernel architectures. 40 * As architectures diverge, and implementations of specific 41 * architecture-dependent routines change, the routines should be moved 42 * from this file into the respective ../`arch -k`/subr.s file. 43 */ 44 45#include <sys/asm_linkage.h> 46#include <sys/asm_misc.h> 47#include <sys/panic.h> 48#include <sys/ontrap.h> 49#include <sys/regset.h> 50#include <sys/privregs.h> 51#include <sys/reboot.h> 52#include <sys/psw.h> 53#include <sys/x86_archext.h> 54 55#if defined(__lint) 56#include <sys/types.h> 57#include <sys/systm.h> 58#include <sys/thread.h> 59#include <sys/archsystm.h> 60#include <sys/byteorder.h> 61#include <sys/dtrace.h> 62#include <sys/ftrace.h> 63#else /* __lint */ 64#include "assym.h" 65#endif /* __lint */ 66#include <sys/dditypes.h> 67 68/* 69 * on_fault() 70 * Catch lofault faults. Like setjmp except it returns one 71 * if code following causes uncorrectable fault. Turned off 72 * by calling no_fault(). 73 */ 74 75#if defined(__lint) 76 77/* ARGSUSED */ 78int 79on_fault(label_t *ljb) 80{ return (0); } 81 82void 83no_fault(void) 84{} 85 86#else /* __lint */ 87 88#if defined(__amd64) 89 90 ENTRY(on_fault) 91 movq %gs:CPU_THREAD, %rsi 92 leaq catch_fault(%rip), %rdx 93 movq %rdi, T_ONFAULT(%rsi) /* jumpbuf in t_onfault */ 94 movq %rdx, T_LOFAULT(%rsi) /* catch_fault in t_lofault */ 95 jmp setjmp /* let setjmp do the rest */ 96 97catch_fault: 98 movq %gs:CPU_THREAD, %rsi 99 movq T_ONFAULT(%rsi), %rdi /* address of save area */ 100 xorl %eax, %eax 101 movq %rax, T_ONFAULT(%rsi) /* turn off onfault */ 102 movq %rax, T_LOFAULT(%rsi) /* turn off lofault */ 103 jmp longjmp /* let longjmp do the rest */ 104 SET_SIZE(on_fault) 105 106 ENTRY(no_fault) 107 movq %gs:CPU_THREAD, %rsi 108 xorl %eax, %eax 109 movq %rax, T_ONFAULT(%rsi) /* turn off onfault */ 110 movq %rax, T_LOFAULT(%rsi) /* turn off lofault */ 111 ret 112 SET_SIZE(no_fault) 113 114#elif defined(__i386) 115 116 ENTRY(on_fault) 117 movl %gs:CPU_THREAD, %edx 118 movl 4(%esp), %eax /* jumpbuf address */ 119 leal catch_fault, %ecx 120 movl %eax, T_ONFAULT(%edx) /* jumpbuf in t_onfault */ 121 movl %ecx, T_LOFAULT(%edx) /* catch_fault in t_lofault */ 122 jmp setjmp /* let setjmp do the rest */ 123 124catch_fault: 125 movl %gs:CPU_THREAD, %edx 126 xorl %eax, %eax 127 movl T_ONFAULT(%edx), %ecx /* address of save area */ 128 movl %eax, T_ONFAULT(%edx) /* turn off onfault */ 129 movl %eax, T_LOFAULT(%edx) /* turn off lofault */ 130 pushl %ecx 131 call longjmp /* let longjmp do the rest */ 132 SET_SIZE(on_fault) 133 134 ENTRY(no_fault) 135 movl %gs:CPU_THREAD, %edx 136 xorl %eax, %eax 137 movl %eax, T_ONFAULT(%edx) /* turn off onfault */ 138 movl %eax, T_LOFAULT(%edx) /* turn off lofault */ 139 ret 140 SET_SIZE(no_fault) 141 142#endif /* __i386 */ 143#endif /* __lint */ 144 145/* 146 * Default trampoline code for on_trap() (see <sys/ontrap.h>). We just 147 * do a longjmp(&curthread->t_ontrap->ot_jmpbuf) if this is ever called. 148 */ 149 150#if defined(lint) 151 152void 153on_trap_trampoline(void) 154{} 155 156#else /* __lint */ 157 158#if defined(__amd64) 159 160 ENTRY(on_trap_trampoline) 161 movq %gs:CPU_THREAD, %rsi 162 movq T_ONTRAP(%rsi), %rdi 163 addq $OT_JMPBUF, %rdi 164 jmp longjmp 165 SET_SIZE(on_trap_trampoline) 166 167#elif defined(__i386) 168 169 ENTRY(on_trap_trampoline) 170 movl %gs:CPU_THREAD, %eax 171 movl T_ONTRAP(%eax), %eax 172 addl $OT_JMPBUF, %eax 173 pushl %eax 174 call longjmp 175 SET_SIZE(on_trap_trampoline) 176 177#endif /* __i386 */ 178#endif /* __lint */ 179 180/* 181 * Push a new element on to the t_ontrap stack. Refer to <sys/ontrap.h> for 182 * more information about the on_trap() mechanism. If the on_trap_data is the 183 * same as the topmost stack element, we just modify that element. 184 */ 185#if defined(lint) 186 187/*ARGSUSED*/ 188int 189on_trap(on_trap_data_t *otp, uint_t prot) 190{ return (0); } 191 192#else /* __lint */ 193 194#if defined(__amd64) 195 196 ENTRY(on_trap) 197 movw %si, OT_PROT(%rdi) /* ot_prot = prot */ 198 movw $0, OT_TRAP(%rdi) /* ot_trap = 0 */ 199 leaq on_trap_trampoline(%rip), %rdx /* rdx = &on_trap_trampoline */ 200 movq %rdx, OT_TRAMPOLINE(%rdi) /* ot_trampoline = rdx */ 201 xorl %ecx, %ecx 202 movq %rcx, OT_HANDLE(%rdi) /* ot_handle = NULL */ 203 movq %rcx, OT_PAD1(%rdi) /* ot_pad1 = NULL */ 204 movq %gs:CPU_THREAD, %rdx /* rdx = curthread */ 205 movq T_ONTRAP(%rdx), %rcx /* rcx = curthread->t_ontrap */ 206 cmpq %rdi, %rcx /* if (otp == %rcx) */ 207 je 0f /* don't modify t_ontrap */ 208 209 movq %rcx, OT_PREV(%rdi) /* ot_prev = t_ontrap */ 210 movq %rdi, T_ONTRAP(%rdx) /* curthread->t_ontrap = otp */ 211 2120: addq $OT_JMPBUF, %rdi /* &ot_jmpbuf */ 213 jmp setjmp 214 SET_SIZE(on_trap) 215 216#elif defined(__i386) 217 218 ENTRY(on_trap) 219 movl 4(%esp), %eax /* %eax = otp */ 220 movl 8(%esp), %edx /* %edx = prot */ 221 222 movw %dx, OT_PROT(%eax) /* ot_prot = prot */ 223 movw $0, OT_TRAP(%eax) /* ot_trap = 0 */ 224 leal on_trap_trampoline, %edx /* %edx = &on_trap_trampoline */ 225 movl %edx, OT_TRAMPOLINE(%eax) /* ot_trampoline = %edx */ 226 movl $0, OT_HANDLE(%eax) /* ot_handle = NULL */ 227 movl $0, OT_PAD1(%eax) /* ot_pad1 = NULL */ 228 movl %gs:CPU_THREAD, %edx /* %edx = curthread */ 229 movl T_ONTRAP(%edx), %ecx /* %ecx = curthread->t_ontrap */ 230 cmpl %eax, %ecx /* if (otp == %ecx) */ 231 je 0f /* don't modify t_ontrap */ 232 233 movl %ecx, OT_PREV(%eax) /* ot_prev = t_ontrap */ 234 movl %eax, T_ONTRAP(%edx) /* curthread->t_ontrap = otp */ 235 2360: addl $OT_JMPBUF, %eax /* %eax = &ot_jmpbuf */ 237 movl %eax, 4(%esp) /* put %eax back on the stack */ 238 jmp setjmp /* let setjmp do the rest */ 239 SET_SIZE(on_trap) 240 241#endif /* __i386 */ 242#endif /* __lint */ 243 244/* 245 * Setjmp and longjmp implement non-local gotos using state vectors 246 * type label_t. 247 */ 248 249#if defined(__lint) 250 251/* ARGSUSED */ 252int 253setjmp(label_t *lp) 254{ return (0); } 255 256/* ARGSUSED */ 257void 258longjmp(label_t *lp) 259{} 260 261#else /* __lint */ 262 263#if LABEL_PC != 0 264#error LABEL_PC MUST be defined as 0 for setjmp/longjmp to work as coded 265#endif /* LABEL_PC != 0 */ 266 267#if defined(__amd64) 268 269 ENTRY(setjmp) 270 movq %rsp, LABEL_SP(%rdi) 271 movq %rbp, LABEL_RBP(%rdi) 272 movq %rbx, LABEL_RBX(%rdi) 273 movq %r12, LABEL_R12(%rdi) 274 movq %r13, LABEL_R13(%rdi) 275 movq %r14, LABEL_R14(%rdi) 276 movq %r15, LABEL_R15(%rdi) 277 movq (%rsp), %rdx /* return address */ 278 movq %rdx, (%rdi) /* LABEL_PC is 0 */ 279 xorl %eax, %eax /* return 0 */ 280 ret 281 SET_SIZE(setjmp) 282 283 ENTRY(longjmp) 284 movq LABEL_SP(%rdi), %rsp 285 movq LABEL_RBP(%rdi), %rbp 286 movq LABEL_RBX(%rdi), %rbx 287 movq LABEL_R12(%rdi), %r12 288 movq LABEL_R13(%rdi), %r13 289 movq LABEL_R14(%rdi), %r14 290 movq LABEL_R15(%rdi), %r15 291 movq (%rdi), %rdx /* return address; LABEL_PC is 0 */ 292 movq %rdx, (%rsp) 293 xorl %eax, %eax 294 incl %eax /* return 1 */ 295 ret 296 SET_SIZE(longjmp) 297 298#elif defined(__i386) 299 300 ENTRY(setjmp) 301 movl 4(%esp), %edx /* address of save area */ 302 movl %ebp, LABEL_EBP(%edx) 303 movl %ebx, LABEL_EBX(%edx) 304 movl %esi, LABEL_ESI(%edx) 305 movl %edi, LABEL_EDI(%edx) 306 movl %esp, 4(%edx) 307 movl (%esp), %ecx /* %eip (return address) */ 308 movl %ecx, (%edx) /* LABEL_PC is 0 */ 309 subl %eax, %eax /* return 0 */ 310 ret 311 SET_SIZE(setjmp) 312 313 ENTRY(longjmp) 314 movl 4(%esp), %edx /* address of save area */ 315 movl LABEL_EBP(%edx), %ebp 316 movl LABEL_EBX(%edx), %ebx 317 movl LABEL_ESI(%edx), %esi 318 movl LABEL_EDI(%edx), %edi 319 movl 4(%edx), %esp 320 movl (%edx), %ecx /* %eip (return addr); LABEL_PC is 0 */ 321 movl $1, %eax 322 addl $4, %esp /* pop ret adr */ 323 jmp *%ecx /* indirect */ 324 SET_SIZE(longjmp) 325 326#endif /* __i386 */ 327#endif /* __lint */ 328 329/* 330 * if a() calls b() calls caller(), 331 * caller() returns return address in a(). 332 * (Note: We assume a() and b() are C routines which do the normal entry/exit 333 * sequence.) 334 */ 335 336#if defined(__lint) 337 338caddr_t 339caller(void) 340{ return (0); } 341 342#else /* __lint */ 343 344#if defined(__amd64) 345 346 ENTRY(caller) 347 movq 8(%rbp), %rax /* b()'s return pc, in a() */ 348 ret 349 SET_SIZE(caller) 350 351#elif defined(__i386) 352 353 ENTRY(caller) 354 movl 4(%ebp), %eax /* b()'s return pc, in a() */ 355 ret 356 SET_SIZE(caller) 357 358#endif /* __i386 */ 359#endif /* __lint */ 360 361/* 362 * if a() calls callee(), callee() returns the 363 * return address in a(); 364 */ 365 366#if defined(__lint) 367 368caddr_t 369callee(void) 370{ return (0); } 371 372#else /* __lint */ 373 374#if defined(__amd64) 375 376 ENTRY(callee) 377 movq (%rsp), %rax /* callee()'s return pc, in a() */ 378 ret 379 SET_SIZE(callee) 380 381#elif defined(__i386) 382 383 ENTRY(callee) 384 movl (%esp), %eax /* callee()'s return pc, in a() */ 385 ret 386 SET_SIZE(callee) 387 388#endif /* __i386 */ 389#endif /* __lint */ 390 391/* 392 * return the current frame pointer 393 */ 394 395#if defined(__lint) 396 397greg_t 398getfp(void) 399{ return (0); } 400 401#else /* __lint */ 402 403#if defined(__amd64) 404 405 ENTRY(getfp) 406 movq %rbp, %rax 407 ret 408 SET_SIZE(getfp) 409 410#elif defined(__i386) 411 412 ENTRY(getfp) 413 movl %ebp, %eax 414 ret 415 SET_SIZE(getfp) 416 417#endif /* __i386 */ 418#endif /* __lint */ 419 420/* 421 * Invalidate a single page table entry in the TLB 422 */ 423 424#if defined(__lint) 425 426/* ARGSUSED */ 427void 428mmu_tlbflush_entry(caddr_t m) 429{} 430 431#else /* __lint */ 432 433#if defined(__amd64) 434 435 ENTRY(mmu_tlbflush_entry) 436 invlpg (%rdi) 437 ret 438 SET_SIZE(mmu_tlbflush_entry) 439 440#elif defined(__i386) 441 442 ENTRY(mmu_tlbflush_entry) 443 movl 4(%esp), %eax 444 invlpg (%eax) 445 ret 446 SET_SIZE(mmu_tlbflush_entry) 447 448#endif /* __i386 */ 449#endif /* __lint */ 450 451 452/* 453 * Get/Set the value of various control registers 454 */ 455 456#if defined(__lint) 457 458ulong_t 459getcr0(void) 460{ return (0); } 461 462/* ARGSUSED */ 463void 464setcr0(ulong_t value) 465{} 466 467ulong_t 468getcr2(void) 469{ return (0); } 470 471ulong_t 472getcr3(void) 473{ return (0); } 474 475/* ARGSUSED */ 476void 477setcr3(ulong_t val) 478{} 479 480void 481reload_cr3(void) 482{} 483 484ulong_t 485getcr4(void) 486{ return (0); } 487 488/* ARGSUSED */ 489void 490setcr4(ulong_t val) 491{} 492 493#if defined(__amd64) 494 495ulong_t 496getcr8(void) 497{ return (0); } 498 499/* ARGSUSED */ 500void 501setcr8(ulong_t val) 502{} 503 504#endif /* __amd64 */ 505 506#else /* __lint */ 507 508#if defined(__amd64) 509 510 ENTRY(getcr0) 511 movq %cr0, %rax 512 ret 513 SET_SIZE(getcr0) 514 515 ENTRY(setcr0) 516 movq %rdi, %cr0 517 ret 518 SET_SIZE(setcr0) 519 520 ENTRY(getcr2) 521 movq %cr2, %rax 522 ret 523 SET_SIZE(getcr2) 524 525 ENTRY(getcr3) 526 movq %cr3, %rax 527 ret 528 SET_SIZE(getcr3) 529 530 ENTRY(setcr3) 531 movq %rdi, %cr3 532 ret 533 SET_SIZE(setcr3) 534 535 ENTRY(reload_cr3) 536 movq %cr3, %rdi 537 movq %rdi, %cr3 538 ret 539 SET_SIZE(reload_cr3) 540 541 ENTRY(getcr4) 542 movq %cr4, %rax 543 ret 544 SET_SIZE(getcr4) 545 546 ENTRY(setcr4) 547 movq %rdi, %cr4 548 ret 549 SET_SIZE(setcr4) 550 551 ENTRY(getcr8) 552 movq %cr8, %rax 553 ret 554 SET_SIZE(getcr8) 555 556 ENTRY(setcr8) 557 movq %rdi, %cr8 558 ret 559 SET_SIZE(setcr8) 560 561#elif defined(__i386) 562 563 ENTRY(getcr0) 564 movl %cr0, %eax 565 ret 566 SET_SIZE(getcr0) 567 568 ENTRY(setcr0) 569 movl 4(%esp), %eax 570 movl %eax, %cr0 571 ret 572 SET_SIZE(setcr0) 573 574 ENTRY(getcr2) 575 movl %cr2, %eax 576 ret 577 SET_SIZE(getcr2) 578 579 ENTRY(getcr3) 580 movl %cr3, %eax 581 ret 582 SET_SIZE(getcr3) 583 584 ENTRY(setcr3) 585 movl 4(%esp), %eax 586 movl %eax, %cr3 587 ret 588 SET_SIZE(setcr3) 589 590 ENTRY(reload_cr3) 591 movl %cr3, %eax 592 movl %eax, %cr3 593 ret 594 SET_SIZE(reload_cr3) 595 596 ENTRY(getcr4) 597 movl %cr4, %eax 598 ret 599 SET_SIZE(getcr4) 600 601 ENTRY(setcr4) 602 movl 4(%esp), %eax 603 movl %eax, %cr4 604 ret 605 SET_SIZE(setcr4) 606 607#endif /* __i386 */ 608#endif /* __lint */ 609 610#if defined(__lint) 611 612/*ARGSUSED*/ 613uint32_t 614__cpuid_insn(struct cpuid_regs *regs) 615{ return (0); } 616 617#else /* __lint */ 618 619#if defined(__amd64) 620 621 ENTRY(__cpuid_insn) 622 movq %rbx, %r8 623 movq %rcx, %r9 624 movq %rdx, %r11 625 movl (%rdi), %eax /* %eax = regs->cp_eax */ 626 movl 0x4(%rdi), %ebx /* %ebx = regs->cp_ebx */ 627 movl 0x8(%rdi), %ecx /* %ecx = regs->cp_ecx */ 628 movl 0xc(%rdi), %edx /* %edx = regs->cp_edx */ 629 cpuid 630 movl %eax, (%rdi) /* regs->cp_eax = %eax */ 631 movl %ebx, 0x4(%rdi) /* regs->cp_ebx = %ebx */ 632 movl %ecx, 0x8(%rdi) /* regs->cp_ecx = %ecx */ 633 movl %edx, 0xc(%rdi) /* regs->cp_edx = %edx */ 634 movq %r8, %rbx 635 movq %r9, %rcx 636 movq %r11, %rdx 637 ret 638 SET_SIZE(__cpuid_insn) 639 640#elif defined(__i386) 641 642 ENTRY(__cpuid_insn) 643 pushl %ebp 644 movl 0x8(%esp), %ebp /* %ebp = regs */ 645 pushl %ebx 646 pushl %ecx 647 pushl %edx 648 movl (%ebp), %eax /* %eax = regs->cp_eax */ 649 movl 0x4(%ebp), %ebx /* %ebx = regs->cp_ebx */ 650 movl 0x8(%ebp), %ecx /* %ecx = regs->cp_ecx */ 651 movl 0xc(%ebp), %edx /* %edx = regs->cp_edx */ 652 cpuid 653 movl %eax, (%ebp) /* regs->cp_eax = %eax */ 654 movl %ebx, 0x4(%ebp) /* regs->cp_ebx = %ebx */ 655 movl %ecx, 0x8(%ebp) /* regs->cp_ecx = %ecx */ 656 movl %edx, 0xc(%ebp) /* regs->cp_edx = %edx */ 657 popl %edx 658 popl %ecx 659 popl %ebx 660 popl %ebp 661 ret 662 SET_SIZE(__cpuid_insn) 663 664#endif /* __i386 */ 665#endif /* __lint */ 666 667 668#if defined(__lint) 669 670hrtime_t 671tsc_read(void) 672{ 673 return (0); 674} 675 676#else /* __lint */ 677 678 ENTRY_NP(tsc_read) 679 rdtsc 680#if defined(__amd64) 681 shlq $32, %rdx 682 orq %rdx, %rax 683#endif 684 ret 685 SET_SIZE(tsc_read) 686 687#endif /* __lint */ 688 689/* 690 * Insert entryp after predp in a doubly linked list. 691 */ 692 693#if defined(__lint) 694 695/*ARGSUSED*/ 696void 697_insque(caddr_t entryp, caddr_t predp) 698{} 699 700#else /* __lint */ 701 702#if defined(__amd64) 703 704 ENTRY(_insque) 705 movq (%rsi), %rax /* predp->forw */ 706 movq %rsi, CPTRSIZE(%rdi) /* entryp->back = predp */ 707 movq %rax, (%rdi) /* entryp->forw = predp->forw */ 708 movq %rdi, (%rsi) /* predp->forw = entryp */ 709 movq %rdi, CPTRSIZE(%rax) /* predp->forw->back = entryp */ 710 ret 711 SET_SIZE(_insque) 712 713#elif defined(__i386) 714 715 ENTRY(_insque) 716 movl 8(%esp), %edx 717 movl 4(%esp), %ecx 718 movl (%edx), %eax /* predp->forw */ 719 movl %edx, CPTRSIZE(%ecx) /* entryp->back = predp */ 720 movl %eax, (%ecx) /* entryp->forw = predp->forw */ 721 movl %ecx, (%edx) /* predp->forw = entryp */ 722 movl %ecx, CPTRSIZE(%eax) /* predp->forw->back = entryp */ 723 ret 724 SET_SIZE(_insque) 725 726#endif /* __i386 */ 727#endif /* __lint */ 728 729/* 730 * Remove entryp from a doubly linked list 731 */ 732 733#if defined(__lint) 734 735/*ARGSUSED*/ 736void 737_remque(caddr_t entryp) 738{} 739 740#else /* __lint */ 741 742#if defined(__amd64) 743 744 ENTRY(_remque) 745 movq (%rdi), %rax /* entry->forw */ 746 movq CPTRSIZE(%rdi), %rdx /* entry->back */ 747 movq %rax, (%rdx) /* entry->back->forw = entry->forw */ 748 movq %rdx, CPTRSIZE(%rax) /* entry->forw->back = entry->back */ 749 ret 750 SET_SIZE(_remque) 751 752#elif defined(__i386) 753 754 ENTRY(_remque) 755 movl 4(%esp), %ecx 756 movl (%ecx), %eax /* entry->forw */ 757 movl CPTRSIZE(%ecx), %edx /* entry->back */ 758 movl %eax, (%edx) /* entry->back->forw = entry->forw */ 759 movl %edx, CPTRSIZE(%eax) /* entry->forw->back = entry->back */ 760 ret 761 SET_SIZE(_remque) 762 763#endif /* __i386 */ 764#endif /* __lint */ 765 766/* 767 * Returns the number of 768 * non-NULL bytes in string argument. 769 */ 770 771#if defined(__lint) 772 773/* ARGSUSED */ 774size_t 775strlen(const char *str) 776{ return (0); } 777 778#else /* __lint */ 779 780#if defined(__amd64) 781 782/* 783 * This is close to a simple transliteration of a C version of this 784 * routine. We should either just -make- this be a C version, or 785 * justify having it in assembler by making it significantly faster. 786 * 787 * size_t 788 * strlen(const char *s) 789 * { 790 * const char *s0; 791 * #if defined(DEBUG) 792 * if ((uintptr_t)s < KERNELBASE) 793 * panic(.str_panic_msg); 794 * #endif 795 * for (s0 = s; *s; s++) 796 * ; 797 * return (s - s0); 798 * } 799 */ 800 801 ENTRY(strlen) 802#ifdef DEBUG 803 movq postbootkernelbase(%rip), %rax 804 cmpq %rax, %rdi 805 jae str_valid 806 pushq %rbp 807 movq %rsp, %rbp 808 leaq .str_panic_msg(%rip), %rdi 809 xorl %eax, %eax 810 call panic 811#endif /* DEBUG */ 812str_valid: 813 cmpb $0, (%rdi) 814 movq %rdi, %rax 815 je .null_found 816 .align 4 817.strlen_loop: 818 incq %rdi 819 cmpb $0, (%rdi) 820 jne .strlen_loop 821.null_found: 822 subq %rax, %rdi 823 movq %rdi, %rax 824 ret 825 SET_SIZE(strlen) 826 827#elif defined(__i386) 828 829 ENTRY(strlen) 830#ifdef DEBUG 831 movl postbootkernelbase, %eax 832 cmpl %eax, 4(%esp) 833 jae str_valid 834 pushl %ebp 835 movl %esp, %ebp 836 pushl $.str_panic_msg 837 call panic 838#endif /* DEBUG */ 839 840str_valid: 841 movl 4(%esp), %eax /* %eax = string address */ 842 testl $3, %eax /* if %eax not word aligned */ 843 jnz .not_word_aligned /* goto .not_word_aligned */ 844 .align 4 845.word_aligned: 846 movl (%eax), %edx /* move 1 word from (%eax) to %edx */ 847 movl $0x7f7f7f7f, %ecx 848 andl %edx, %ecx /* %ecx = %edx & 0x7f7f7f7f */ 849 addl $4, %eax /* next word */ 850 addl $0x7f7f7f7f, %ecx /* %ecx += 0x7f7f7f7f */ 851 orl %edx, %ecx /* %ecx |= %edx */ 852 andl $0x80808080, %ecx /* %ecx &= 0x80808080 */ 853 cmpl $0x80808080, %ecx /* if no null byte in this word */ 854 je .word_aligned /* goto .word_aligned */ 855 subl $4, %eax /* post-incremented */ 856.not_word_aligned: 857 cmpb $0, (%eax) /* if a byte in (%eax) is null */ 858 je .null_found /* goto .null_found */ 859 incl %eax /* next byte */ 860 testl $3, %eax /* if %eax not word aligned */ 861 jnz .not_word_aligned /* goto .not_word_aligned */ 862 jmp .word_aligned /* goto .word_aligned */ 863 .align 4 864.null_found: 865 subl 4(%esp), %eax /* %eax -= string address */ 866 ret 867 SET_SIZE(strlen) 868 869#endif /* __i386 */ 870 871#ifdef DEBUG 872 .text 873.str_panic_msg: 874 .string "strlen: argument below kernelbase" 875#endif /* DEBUG */ 876 877#endif /* __lint */ 878 879 /* 880 * Berkley 4.3 introduced symbolically named interrupt levels 881 * as a way deal with priority in a machine independent fashion. 882 * Numbered priorities are machine specific, and should be 883 * discouraged where possible. 884 * 885 * Note, for the machine specific priorities there are 886 * examples listed for devices that use a particular priority. 887 * It should not be construed that all devices of that 888 * type should be at that priority. It is currently were 889 * the current devices fit into the priority scheme based 890 * upon time criticalness. 891 * 892 * The underlying assumption of these assignments is that 893 * IPL 10 is the highest level from which a device 894 * routine can call wakeup. Devices that interrupt from higher 895 * levels are restricted in what they can do. If they need 896 * kernels services they should schedule a routine at a lower 897 * level (via software interrupt) to do the required 898 * processing. 899 * 900 * Examples of this higher usage: 901 * Level Usage 902 * 14 Profiling clock (and PROM uart polling clock) 903 * 12 Serial ports 904 * 905 * The serial ports request lower level processing on level 6. 906 * 907 * Also, almost all splN routines (where N is a number or a 908 * mnemonic) will do a RAISE(), on the assumption that they are 909 * never used to lower our priority. 910 * The exceptions are: 911 * spl8() Because you can't be above 15 to begin with! 912 * splzs() Because this is used at boot time to lower our 913 * priority, to allow the PROM to poll the uart. 914 * spl0() Used to lower priority to 0. 915 */ 916 917#if defined(__lint) 918 919int spl0(void) { return (0); } 920int spl6(void) { return (0); } 921int spl7(void) { return (0); } 922int spl8(void) { return (0); } 923int splhigh(void) { return (0); } 924int splhi(void) { return (0); } 925int splzs(void) { return (0); } 926 927/* ARGSUSED */ 928void 929splx(int level) 930{} 931 932#else /* __lint */ 933 934#if defined(__amd64) 935 936#define SETPRI(level) \ 937 movl $/**/level, %edi; /* new priority */ \ 938 jmp do_splx /* redirect to do_splx */ 939 940#define RAISE(level) \ 941 movl $/**/level, %edi; /* new priority */ \ 942 jmp splr /* redirect to splr */ 943 944#elif defined(__i386) 945 946#define SETPRI(level) \ 947 pushl $/**/level; /* new priority */ \ 948 call do_splx; /* invoke common splx code */ \ 949 addl $4, %esp; /* unstack arg */ \ 950 ret 951 952#define RAISE(level) \ 953 pushl $/**/level; /* new priority */ \ 954 call splr; /* invoke common splr code */ \ 955 addl $4, %esp; /* unstack args */ \ 956 ret 957 958#endif /* __i386 */ 959 960 /* locks out all interrupts, including memory errors */ 961 ENTRY(spl8) 962 SETPRI(15) 963 SET_SIZE(spl8) 964 965 /* just below the level that profiling runs */ 966 ENTRY(spl7) 967 RAISE(13) 968 SET_SIZE(spl7) 969 970 /* sun specific - highest priority onboard serial i/o asy ports */ 971 ENTRY(splzs) 972 SETPRI(12) /* Can't be a RAISE, as it's used to lower us */ 973 SET_SIZE(splzs) 974 975 ENTRY(splhi) 976 ALTENTRY(splhigh) 977 ALTENTRY(spl6) 978 ALTENTRY(i_ddi_splhigh) 979 980 RAISE(DISP_LEVEL) 981 982 SET_SIZE(i_ddi_splhigh) 983 SET_SIZE(spl6) 984 SET_SIZE(splhigh) 985 SET_SIZE(splhi) 986 987 /* allow all interrupts */ 988 ENTRY(spl0) 989 SETPRI(0) 990 SET_SIZE(spl0) 991 992 993 /* splx implentation */ 994 ENTRY(splx) 995 jmp do_splx /* redirect to common splx code */ 996 SET_SIZE(splx) 997 998#endif /* __lint */ 999 1000#if defined(__i386) 1001 1002/* 1003 * Read and write the %gs register 1004 */ 1005 1006#if defined(__lint) 1007 1008/*ARGSUSED*/ 1009uint16_t 1010getgs(void) 1011{ return (0); } 1012 1013/*ARGSUSED*/ 1014void 1015setgs(uint16_t sel) 1016{} 1017 1018#else /* __lint */ 1019 1020 ENTRY(getgs) 1021 clr %eax 1022 movw %gs, %ax 1023 ret 1024 SET_SIZE(getgs) 1025 1026 ENTRY(setgs) 1027 movw 4(%esp), %gs 1028 ret 1029 SET_SIZE(setgs) 1030 1031#endif /* __lint */ 1032#endif /* __i386 */ 1033 1034#if defined(__lint) 1035 1036void 1037pc_reset(void) 1038{} 1039 1040void 1041efi_reset(void) 1042{} 1043 1044#else /* __lint */ 1045 1046 ENTRY(wait_500ms) 1047 push %ebx 1048 movl $50000, %ebx 10491: 1050 call tenmicrosec 1051 decl %ebx 1052 jnz 1b 1053 pop %ebx 1054 ret 1055 SET_SIZE(wait_500ms) 1056 1057#define RESET_METHOD_KBC 1 1058#define RESET_METHOD_PORT92 2 1059#define RESET_METHOD_PCI 4 1060 1061 DGDEF3(pc_reset_methods, 4, 8) 1062 .long RESET_METHOD_KBC|RESET_METHOD_PORT92|RESET_METHOD_PCI; 1063 1064 ENTRY(pc_reset) 1065 1066#if defined(__i386) 1067 testl $RESET_METHOD_KBC, pc_reset_methods 1068#elif defined(__amd64) 1069 testl $RESET_METHOD_KBC, pc_reset_methods(%rip) 1070#endif 1071 jz 1f 1072 1073 / 1074 / Try the classic keyboard controller-triggered reset. 1075 / 1076 movw $0x64, %dx 1077 movb $0xfe, %al 1078 outb (%dx) 1079 1080 / Wait up to 500 milliseconds here for the keyboard controller 1081 / to pull the reset line. On some systems where the keyboard 1082 / controller is slow to pull the reset line, the next reset method 1083 / may be executed (which may be bad if those systems hang when the 1084 / next reset method is used, e.g. Ferrari 3400 (doesn't like port 92), 1085 / and Ferrari 4000 (doesn't like the cf9 reset method)) 1086 1087 call wait_500ms 1088 10891: 1090#if defined(__i386) 1091 testl $RESET_METHOD_PORT92, pc_reset_methods 1092#elif defined(__amd64) 1093 testl $RESET_METHOD_PORT92, pc_reset_methods(%rip) 1094#endif 1095 jz 3f 1096 1097 / 1098 / Try port 0x92 fast reset 1099 / 1100 movw $0x92, %dx 1101 inb (%dx) 1102 cmpb $0xff, %al / If port's not there, we should get back 0xFF 1103 je 1f 1104 testb $1, %al / If bit 0 1105 jz 2f / is clear, jump to perform the reset 1106 andb $0xfe, %al / otherwise, 1107 outb (%dx) / clear bit 0 first, then 11082: 1109 orb $1, %al / Set bit 0 1110 outb (%dx) / and reset the system 11111: 1112 1113 call wait_500ms 1114 11153: 1116#if defined(__i386) 1117 testl $RESET_METHOD_PCI, pc_reset_methods 1118#elif defined(__amd64) 1119 testl $RESET_METHOD_PCI, pc_reset_methods(%rip) 1120#endif 1121 jz 4f 1122 1123 / Try the PCI (soft) reset vector (should work on all modern systems, 1124 / but has been shown to cause problems on 450NX systems, and some newer 1125 / systems (e.g. ATI IXP400-equipped systems)) 1126 / When resetting via this method, 2 writes are required. The first 1127 / targets bit 1 (0=hard reset without power cycle, 1=hard reset with 1128 / power cycle). 1129 / The reset occurs on the second write, during bit 2's transition from 1130 / 0->1. 1131 movw $0xcf9, %dx 1132 movb $0x2, %al / Reset mode = hard, no power cycle 1133 outb (%dx) 1134 movb $0x6, %al 1135 outb (%dx) 1136 1137 call wait_500ms 1138 11394: 1140 / 1141 / port 0xcf9 failed also. Last-ditch effort is to 1142 / triple-fault the CPU. 1143 / Also, use triple fault for EFI firmware 1144 / 1145 ENTRY(efi_reset) 1146#if defined(__amd64) 1147 pushq $0x0 1148 pushq $0x0 / IDT base of 0, limit of 0 + 2 unused bytes 1149 lidt (%rsp) 1150#elif defined(__i386) 1151 pushl $0x0 1152 pushl $0x0 / IDT base of 0, limit of 0 + 2 unused bytes 1153 lidt (%esp) 1154#endif 1155 int $0x0 / Trigger interrupt, generate triple-fault 1156 1157 cli 1158 hlt / Wait forever 1159 /*NOTREACHED*/ 1160 SET_SIZE(efi_reset) 1161 SET_SIZE(pc_reset) 1162 1163#endif /* __lint */ 1164 1165/* 1166 * C callable in and out routines 1167 */ 1168 1169#if defined(__lint) 1170 1171/* ARGSUSED */ 1172void 1173outl(int port_address, uint32_t val) 1174{} 1175 1176#else /* __lint */ 1177 1178#if defined(__amd64) 1179 1180 ENTRY(outl) 1181 movw %di, %dx 1182 movl %esi, %eax 1183 outl (%dx) 1184 ret 1185 SET_SIZE(outl) 1186 1187#elif defined(__i386) 1188 1189 .set PORT, 4 1190 .set VAL, 8 1191 1192 ENTRY(outl) 1193 movw PORT(%esp), %dx 1194 movl VAL(%esp), %eax 1195 outl (%dx) 1196 ret 1197 SET_SIZE(outl) 1198 1199#endif /* __i386 */ 1200#endif /* __lint */ 1201 1202#if defined(__lint) 1203 1204/* ARGSUSED */ 1205void 1206outw(int port_address, uint16_t val) 1207{} 1208 1209#else /* __lint */ 1210 1211#if defined(__amd64) 1212 1213 ENTRY(outw) 1214 movw %di, %dx 1215 movw %si, %ax 1216 D16 outl (%dx) /* XX64 why not outw? */ 1217 ret 1218 SET_SIZE(outw) 1219 1220#elif defined(__i386) 1221 1222 ENTRY(outw) 1223 movw PORT(%esp), %dx 1224 movw VAL(%esp), %ax 1225 D16 outl (%dx) 1226 ret 1227 SET_SIZE(outw) 1228 1229#endif /* __i386 */ 1230#endif /* __lint */ 1231 1232#if defined(__lint) 1233 1234/* ARGSUSED */ 1235void 1236outb(int port_address, uint8_t val) 1237{} 1238 1239#else /* __lint */ 1240 1241#if defined(__amd64) 1242 1243 ENTRY(outb) 1244 movw %di, %dx 1245 movb %sil, %al 1246 outb (%dx) 1247 ret 1248 SET_SIZE(outb) 1249 1250#elif defined(__i386) 1251 1252 ENTRY(outb) 1253 movw PORT(%esp), %dx 1254 movb VAL(%esp), %al 1255 outb (%dx) 1256 ret 1257 SET_SIZE(outb) 1258 1259#endif /* __i386 */ 1260#endif /* __lint */ 1261 1262#if defined(__lint) 1263 1264/* ARGSUSED */ 1265uint32_t 1266inl(int port_address) 1267{ return (0); } 1268 1269#else /* __lint */ 1270 1271#if defined(__amd64) 1272 1273 ENTRY(inl) 1274 xorl %eax, %eax 1275 movw %di, %dx 1276 inl (%dx) 1277 ret 1278 SET_SIZE(inl) 1279 1280#elif defined(__i386) 1281 1282 ENTRY(inl) 1283 movw PORT(%esp), %dx 1284 inl (%dx) 1285 ret 1286 SET_SIZE(inl) 1287 1288#endif /* __i386 */ 1289#endif /* __lint */ 1290 1291#if defined(__lint) 1292 1293/* ARGSUSED */ 1294uint16_t 1295inw(int port_address) 1296{ return (0); } 1297 1298#else /* __lint */ 1299 1300#if defined(__amd64) 1301 1302 ENTRY(inw) 1303 xorl %eax, %eax 1304 movw %di, %dx 1305 D16 inl (%dx) 1306 ret 1307 SET_SIZE(inw) 1308 1309#elif defined(__i386) 1310 1311 ENTRY(inw) 1312 subl %eax, %eax 1313 movw PORT(%esp), %dx 1314 D16 inl (%dx) 1315 ret 1316 SET_SIZE(inw) 1317 1318#endif /* __i386 */ 1319#endif /* __lint */ 1320 1321 1322#if defined(__lint) 1323 1324/* ARGSUSED */ 1325uint8_t 1326inb(int port_address) 1327{ return (0); } 1328 1329#else /* __lint */ 1330 1331#if defined(__amd64) 1332 1333 ENTRY(inb) 1334 xorl %eax, %eax 1335 movw %di, %dx 1336 inb (%dx) 1337 ret 1338 SET_SIZE(inb) 1339 1340#elif defined(__i386) 1341 1342 ENTRY(inb) 1343 subl %eax, %eax 1344 movw PORT(%esp), %dx 1345 inb (%dx) 1346 ret 1347 SET_SIZE(inb) 1348 1349#endif /* __i386 */ 1350#endif /* __lint */ 1351 1352 1353#if defined(__lint) 1354 1355/* ARGSUSED */ 1356void 1357repoutsw(int port, uint16_t *addr, int cnt) 1358{} 1359 1360#else /* __lint */ 1361 1362#if defined(__amd64) 1363 1364 ENTRY(repoutsw) 1365 movl %edx, %ecx 1366 movw %di, %dx 1367 rep 1368 D16 outsl 1369 ret 1370 SET_SIZE(repoutsw) 1371 1372#elif defined(__i386) 1373 1374 /* 1375 * The arguments and saved registers are on the stack in the 1376 * following order: 1377 * | cnt | +16 1378 * | *addr | +12 1379 * | port | +8 1380 * | eip | +4 1381 * | esi | <-- %esp 1382 * If additional values are pushed onto the stack, make sure 1383 * to adjust the following constants accordingly. 1384 */ 1385 .set PORT, 8 1386 .set ADDR, 12 1387 .set COUNT, 16 1388 1389 ENTRY(repoutsw) 1390 pushl %esi 1391 movl PORT(%esp), %edx 1392 movl ADDR(%esp), %esi 1393 movl COUNT(%esp), %ecx 1394 rep 1395 D16 outsl 1396 popl %esi 1397 ret 1398 SET_SIZE(repoutsw) 1399 1400#endif /* __i386 */ 1401#endif /* __lint */ 1402 1403 1404#if defined(__lint) 1405 1406/* ARGSUSED */ 1407void 1408repinsw(int port_addr, uint16_t *addr, int cnt) 1409{} 1410 1411#else /* __lint */ 1412 1413#if defined(__amd64) 1414 1415 ENTRY(repinsw) 1416 movl %edx, %ecx 1417 movw %di, %dx 1418 rep 1419 D16 insl 1420 ret 1421 SET_SIZE(repinsw) 1422 1423#elif defined(__i386) 1424 1425 ENTRY(repinsw) 1426 pushl %edi 1427 movl PORT(%esp), %edx 1428 movl ADDR(%esp), %edi 1429 movl COUNT(%esp), %ecx 1430 rep 1431 D16 insl 1432 popl %edi 1433 ret 1434 SET_SIZE(repinsw) 1435 1436#endif /* __i386 */ 1437#endif /* __lint */ 1438 1439 1440#if defined(__lint) 1441 1442/* ARGSUSED */ 1443void 1444repinsb(int port, uint8_t *addr, int count) 1445{} 1446 1447#else /* __lint */ 1448 1449#if defined(__amd64) 1450 1451 ENTRY(repinsb) 1452 movl %edx, %ecx 1453 movw %di, %dx 1454 movq %rsi, %rdi 1455 rep 1456 insb 1457 ret 1458 SET_SIZE(repinsb) 1459 1460#elif defined(__i386) 1461 1462 /* 1463 * The arguments and saved registers are on the stack in the 1464 * following order: 1465 * | cnt | +16 1466 * | *addr | +12 1467 * | port | +8 1468 * | eip | +4 1469 * | esi | <-- %esp 1470 * If additional values are pushed onto the stack, make sure 1471 * to adjust the following constants accordingly. 1472 */ 1473 .set IO_PORT, 8 1474 .set IO_ADDR, 12 1475 .set IO_COUNT, 16 1476 1477 ENTRY(repinsb) 1478 pushl %edi 1479 movl IO_ADDR(%esp), %edi 1480 movl IO_COUNT(%esp), %ecx 1481 movl IO_PORT(%esp), %edx 1482 rep 1483 insb 1484 popl %edi 1485 ret 1486 SET_SIZE(repinsb) 1487 1488#endif /* __i386 */ 1489#endif /* __lint */ 1490 1491 1492/* 1493 * Input a stream of 32-bit words. 1494 * NOTE: count is a DWORD count. 1495 */ 1496#if defined(__lint) 1497 1498/* ARGSUSED */ 1499void 1500repinsd(int port, uint32_t *addr, int count) 1501{} 1502 1503#else /* __lint */ 1504 1505#if defined(__amd64) 1506 1507 ENTRY(repinsd) 1508 movl %edx, %ecx 1509 movw %di, %dx 1510 movq %rsi, %rdi 1511 rep 1512 insl 1513 ret 1514 SET_SIZE(repinsd) 1515 1516#elif defined(__i386) 1517 1518 ENTRY(repinsd) 1519 pushl %edi 1520 movl IO_ADDR(%esp), %edi 1521 movl IO_COUNT(%esp), %ecx 1522 movl IO_PORT(%esp), %edx 1523 rep 1524 insl 1525 popl %edi 1526 ret 1527 SET_SIZE(repinsd) 1528 1529#endif /* __i386 */ 1530#endif /* __lint */ 1531 1532/* 1533 * Output a stream of bytes 1534 * NOTE: count is a byte count 1535 */ 1536#if defined(__lint) 1537 1538/* ARGSUSED */ 1539void 1540repoutsb(int port, uint8_t *addr, int count) 1541{} 1542 1543#else /* __lint */ 1544 1545#if defined(__amd64) 1546 1547 ENTRY(repoutsb) 1548 movl %edx, %ecx 1549 movw %di, %dx 1550 rep 1551 outsb 1552 ret 1553 SET_SIZE(repoutsb) 1554 1555#elif defined(__i386) 1556 1557 ENTRY(repoutsb) 1558 pushl %esi 1559 movl IO_ADDR(%esp), %esi 1560 movl IO_COUNT(%esp), %ecx 1561 movl IO_PORT(%esp), %edx 1562 rep 1563 outsb 1564 popl %esi 1565 ret 1566 SET_SIZE(repoutsb) 1567 1568#endif /* __i386 */ 1569#endif /* __lint */ 1570 1571/* 1572 * Output a stream of 32-bit words 1573 * NOTE: count is a DWORD count 1574 */ 1575#if defined(__lint) 1576 1577/* ARGSUSED */ 1578void 1579repoutsd(int port, uint32_t *addr, int count) 1580{} 1581 1582#else /* __lint */ 1583 1584#if defined(__amd64) 1585 1586 ENTRY(repoutsd) 1587 movl %edx, %ecx 1588 movw %di, %dx 1589 rep 1590 outsl 1591 ret 1592 SET_SIZE(repoutsd) 1593 1594#elif defined(__i386) 1595 1596 ENTRY(repoutsd) 1597 pushl %esi 1598 movl IO_ADDR(%esp), %esi 1599 movl IO_COUNT(%esp), %ecx 1600 movl IO_PORT(%esp), %edx 1601 rep 1602 outsl 1603 popl %esi 1604 ret 1605 SET_SIZE(repoutsd) 1606 1607#endif /* __i386 */ 1608#endif /* __lint */ 1609 1610/* 1611 * void int3(void) 1612 * void int18(void) 1613 * void int20(void) 1614 */ 1615 1616#if defined(__lint) 1617 1618void 1619int3(void) 1620{} 1621 1622void 1623int18(void) 1624{} 1625 1626void 1627int20(void) 1628{} 1629 1630#else /* __lint */ 1631 1632 ENTRY(int3) 1633 int $T_BPTFLT 1634 ret 1635 SET_SIZE(int3) 1636 1637 ENTRY(int18) 1638 int $T_MCE 1639 ret 1640 SET_SIZE(int18) 1641 1642 ENTRY(int20) 1643 movl boothowto, %eax 1644 andl $RB_DEBUG, %eax 1645 jz 1f 1646 1647 int $T_DBGENTR 16481: 1649 rep; ret /* use 2 byte return instruction when branch target */ 1650 /* AMD Software Optimization Guide - Section 6.2 */ 1651 SET_SIZE(int20) 1652 1653#endif /* __lint */ 1654 1655#if defined(__lint) 1656 1657/* ARGSUSED */ 1658int 1659scanc(size_t size, uchar_t *cp, uchar_t *table, uchar_t mask) 1660{ return (0); } 1661 1662#else /* __lint */ 1663 1664#if defined(__amd64) 1665 1666 ENTRY(scanc) 1667 /* rdi == size */ 1668 /* rsi == cp */ 1669 /* rdx == table */ 1670 /* rcx == mask */ 1671 addq %rsi, %rdi /* end = &cp[size] */ 1672.scanloop: 1673 cmpq %rdi, %rsi /* while (cp < end */ 1674 jnb .scandone 1675 movzbq (%rsi), %r8 /* %r8 = *cp */ 1676 incq %rsi /* cp++ */ 1677 testb %cl, (%r8, %rdx) 1678 jz .scanloop /* && (table[*cp] & mask) == 0) */ 1679 decq %rsi /* (fix post-increment) */ 1680.scandone: 1681 movl %edi, %eax 1682 subl %esi, %eax /* return (end - cp) */ 1683 ret 1684 SET_SIZE(scanc) 1685 1686#elif defined(__i386) 1687 1688 ENTRY(scanc) 1689 pushl %edi 1690 pushl %esi 1691 movb 24(%esp), %cl /* mask = %cl */ 1692 movl 16(%esp), %esi /* cp = %esi */ 1693 movl 20(%esp), %edx /* table = %edx */ 1694 movl %esi, %edi 1695 addl 12(%esp), %edi /* end = &cp[size]; */ 1696.scanloop: 1697 cmpl %edi, %esi /* while (cp < end */ 1698 jnb .scandone 1699 movzbl (%esi), %eax /* %al = *cp */ 1700 incl %esi /* cp++ */ 1701 movb (%edx, %eax), %al /* %al = table[*cp] */ 1702 testb %al, %cl 1703 jz .scanloop /* && (table[*cp] & mask) == 0) */ 1704 dec %esi /* post-incremented */ 1705.scandone: 1706 movl %edi, %eax 1707 subl %esi, %eax /* return (end - cp) */ 1708 popl %esi 1709 popl %edi 1710 ret 1711 SET_SIZE(scanc) 1712 1713#endif /* __i386 */ 1714#endif /* __lint */ 1715 1716/* 1717 * Replacement functions for ones that are normally inlined. 1718 * In addition to the copy in i86.il, they are defined here just in case. 1719 */ 1720 1721#if defined(__lint) 1722 1723ulong_t 1724intr_clear(void) 1725{ return (0); } 1726 1727ulong_t 1728clear_int_flag(void) 1729{ return (0); } 1730 1731#else /* __lint */ 1732 1733#if defined(__amd64) 1734 1735 ENTRY(intr_clear) 1736 ENTRY(clear_int_flag) 1737 pushfq 1738 popq %rax 1739 CLI(%rdi) 1740 ret 1741 SET_SIZE(clear_int_flag) 1742 SET_SIZE(intr_clear) 1743 1744#elif defined(__i386) 1745 1746 ENTRY(intr_clear) 1747 ENTRY(clear_int_flag) 1748 pushfl 1749 popl %eax 1750 CLI(%edx) 1751 ret 1752 SET_SIZE(clear_int_flag) 1753 SET_SIZE(intr_clear) 1754 1755#endif /* __i386 */ 1756#endif /* __lint */ 1757 1758#if defined(__lint) 1759 1760struct cpu * 1761curcpup(void) 1762{ return 0; } 1763 1764#else /* __lint */ 1765 1766#if defined(__amd64) 1767 1768 ENTRY(curcpup) 1769 movq %gs:CPU_SELF, %rax 1770 ret 1771 SET_SIZE(curcpup) 1772 1773#elif defined(__i386) 1774 1775 ENTRY(curcpup) 1776 movl %gs:CPU_SELF, %eax 1777 ret 1778 SET_SIZE(curcpup) 1779 1780#endif /* __i386 */ 1781#endif /* __lint */ 1782 1783#if defined(__lint) 1784 1785/* ARGSUSED */ 1786uint32_t 1787htonl(uint32_t i) 1788{ return (0); } 1789 1790/* ARGSUSED */ 1791uint32_t 1792ntohl(uint32_t i) 1793{ return (0); } 1794 1795#else /* __lint */ 1796 1797#if defined(__amd64) 1798 1799 /* XX64 there must be shorter sequences for this */ 1800 ENTRY(htonl) 1801 ALTENTRY(ntohl) 1802 movl %edi, %eax 1803 bswap %eax 1804 ret 1805 SET_SIZE(ntohl) 1806 SET_SIZE(htonl) 1807 1808#elif defined(__i386) 1809 1810 ENTRY(htonl) 1811 ALTENTRY(ntohl) 1812 movl 4(%esp), %eax 1813 bswap %eax 1814 ret 1815 SET_SIZE(ntohl) 1816 SET_SIZE(htonl) 1817 1818#endif /* __i386 */ 1819#endif /* __lint */ 1820 1821#if defined(__lint) 1822 1823/* ARGSUSED */ 1824uint16_t 1825htons(uint16_t i) 1826{ return (0); } 1827 1828/* ARGSUSED */ 1829uint16_t 1830ntohs(uint16_t i) 1831{ return (0); } 1832 1833 1834#else /* __lint */ 1835 1836#if defined(__amd64) 1837 1838 /* XX64 there must be better sequences for this */ 1839 ENTRY(htons) 1840 ALTENTRY(ntohs) 1841 movl %edi, %eax 1842 bswap %eax 1843 shrl $16, %eax 1844 ret 1845 SET_SIZE(ntohs) 1846 SET_SIZE(htons) 1847 1848#elif defined(__i386) 1849 1850 ENTRY(htons) 1851 ALTENTRY(ntohs) 1852 movl 4(%esp), %eax 1853 bswap %eax 1854 shrl $16, %eax 1855 ret 1856 SET_SIZE(ntohs) 1857 SET_SIZE(htons) 1858 1859#endif /* __i386 */ 1860#endif /* __lint */ 1861 1862 1863#if defined(__lint) 1864 1865/* ARGSUSED */ 1866void 1867intr_restore(ulong_t i) 1868{ return; } 1869 1870/* ARGSUSED */ 1871void 1872restore_int_flag(ulong_t i) 1873{ return; } 1874 1875#else /* __lint */ 1876 1877#if defined(__amd64) 1878 1879 ENTRY(intr_restore) 1880 ENTRY(restore_int_flag) 1881 pushq %rdi 1882 popfq 1883 ret 1884 SET_SIZE(restore_int_flag) 1885 SET_SIZE(intr_restore) 1886 1887#elif defined(__i386) 1888 1889 ENTRY(intr_restore) 1890 ENTRY(restore_int_flag) 1891 movl 4(%esp), %eax 1892 pushl %eax 1893 popfl 1894 ret 1895 SET_SIZE(restore_int_flag) 1896 SET_SIZE(intr_restore) 1897 1898#endif /* __i386 */ 1899#endif /* __lint */ 1900 1901#if defined(__lint) 1902 1903void 1904sti(void) 1905{} 1906 1907void 1908cli(void) 1909{} 1910 1911#else /* __lint */ 1912 1913 ENTRY(sti) 1914 STI 1915 ret 1916 SET_SIZE(sti) 1917 1918 ENTRY(cli) 1919#if defined(__amd64) 1920 CLI(%rax) 1921#elif defined(__i386) 1922 CLI(%eax) 1923#endif /* __i386 */ 1924 ret 1925 SET_SIZE(cli) 1926 1927#endif /* __lint */ 1928 1929#if defined(__lint) 1930 1931dtrace_icookie_t 1932dtrace_interrupt_disable(void) 1933{ return (0); } 1934 1935#else /* __lint */ 1936 1937#if defined(__amd64) 1938 1939 ENTRY(dtrace_interrupt_disable) 1940 pushfq 1941 popq %rax 1942 CLI(%rdx) 1943 ret 1944 SET_SIZE(dtrace_interrupt_disable) 1945 1946#elif defined(__i386) 1947 1948 ENTRY(dtrace_interrupt_disable) 1949 pushfl 1950 popl %eax 1951 CLI(%edx) 1952 ret 1953 SET_SIZE(dtrace_interrupt_disable) 1954 1955#endif /* __i386 */ 1956#endif /* __lint */ 1957 1958#if defined(__lint) 1959 1960/*ARGSUSED*/ 1961void 1962dtrace_interrupt_enable(dtrace_icookie_t cookie) 1963{} 1964 1965#else /* __lint */ 1966 1967#if defined(__amd64) 1968 1969 ENTRY(dtrace_interrupt_enable) 1970 pushq %rdi 1971 popfq 1972 ret 1973 SET_SIZE(dtrace_interrupt_enable) 1974 1975#elif defined(__i386) 1976 1977 ENTRY(dtrace_interrupt_enable) 1978 movl 4(%esp), %eax 1979 pushl %eax 1980 popfl 1981 ret 1982 SET_SIZE(dtrace_interrupt_enable) 1983 1984#endif /* __i386 */ 1985#endif /* __lint */ 1986 1987 1988#if defined(lint) 1989 1990void 1991dtrace_membar_producer(void) 1992{} 1993 1994void 1995dtrace_membar_consumer(void) 1996{} 1997 1998#else /* __lint */ 1999 2000 ENTRY(dtrace_membar_producer) 2001 rep; ret /* use 2 byte return instruction when branch target */ 2002 /* AMD Software Optimization Guide - Section 6.2 */ 2003 SET_SIZE(dtrace_membar_producer) 2004 2005 ENTRY(dtrace_membar_consumer) 2006 rep; ret /* use 2 byte return instruction when branch target */ 2007 /* AMD Software Optimization Guide - Section 6.2 */ 2008 SET_SIZE(dtrace_membar_consumer) 2009 2010#endif /* __lint */ 2011 2012#if defined(__lint) 2013 2014kthread_id_t 2015threadp(void) 2016{ return ((kthread_id_t)0); } 2017 2018#else /* __lint */ 2019 2020#if defined(__amd64) 2021 2022 ENTRY(threadp) 2023 movq %gs:CPU_THREAD, %rax 2024 ret 2025 SET_SIZE(threadp) 2026 2027#elif defined(__i386) 2028 2029 ENTRY(threadp) 2030 movl %gs:CPU_THREAD, %eax 2031 ret 2032 SET_SIZE(threadp) 2033 2034#endif /* __i386 */ 2035#endif /* __lint */ 2036 2037/* 2038 * Checksum routine for Internet Protocol Headers 2039 */ 2040 2041#if defined(__lint) 2042 2043/* ARGSUSED */ 2044unsigned int 2045ip_ocsum( 2046 ushort_t *address, /* ptr to 1st message buffer */ 2047 int halfword_count, /* length of data */ 2048 unsigned int sum) /* partial checksum */ 2049{ 2050 int i; 2051 unsigned int psum = 0; /* partial sum */ 2052 2053 for (i = 0; i < halfword_count; i++, address++) { 2054 psum += *address; 2055 } 2056 2057 while ((psum >> 16) != 0) { 2058 psum = (psum & 0xffff) + (psum >> 16); 2059 } 2060 2061 psum += sum; 2062 2063 while ((psum >> 16) != 0) { 2064 psum = (psum & 0xffff) + (psum >> 16); 2065 } 2066 2067 return (psum); 2068} 2069 2070#else /* __lint */ 2071 2072#if defined(__amd64) 2073 2074 ENTRY(ip_ocsum) 2075 pushq %rbp 2076 movq %rsp, %rbp 2077#ifdef DEBUG 2078 movq postbootkernelbase(%rip), %rax 2079 cmpq %rax, %rdi 2080 jnb 1f 2081 xorl %eax, %eax 2082 movq %rdi, %rsi 2083 leaq .ip_ocsum_panic_msg(%rip), %rdi 2084 call panic 2085 /*NOTREACHED*/ 2086.ip_ocsum_panic_msg: 2087 .string "ip_ocsum: address 0x%p below kernelbase\n" 20881: 2089#endif 2090 movl %esi, %ecx /* halfword_count */ 2091 movq %rdi, %rsi /* address */ 2092 /* partial sum in %edx */ 2093 xorl %eax, %eax 2094 testl %ecx, %ecx 2095 jz .ip_ocsum_done 2096 testq $3, %rsi 2097 jnz .ip_csum_notaligned 2098.ip_csum_aligned: /* XX64 opportunities for 8-byte operations? */ 2099.next_iter: 2100 /* XX64 opportunities for prefetch? */ 2101 /* XX64 compute csum with 64 bit quantities? */ 2102 subl $32, %ecx 2103 jl .less_than_32 2104 2105 addl 0(%rsi), %edx 2106.only60: 2107 adcl 4(%rsi), %eax 2108.only56: 2109 adcl 8(%rsi), %edx 2110.only52: 2111 adcl 12(%rsi), %eax 2112.only48: 2113 adcl 16(%rsi), %edx 2114.only44: 2115 adcl 20(%rsi), %eax 2116.only40: 2117 adcl 24(%rsi), %edx 2118.only36: 2119 adcl 28(%rsi), %eax 2120.only32: 2121 adcl 32(%rsi), %edx 2122.only28: 2123 adcl 36(%rsi), %eax 2124.only24: 2125 adcl 40(%rsi), %edx 2126.only20: 2127 adcl 44(%rsi), %eax 2128.only16: 2129 adcl 48(%rsi), %edx 2130.only12: 2131 adcl 52(%rsi), %eax 2132.only8: 2133 adcl 56(%rsi), %edx 2134.only4: 2135 adcl 60(%rsi), %eax /* could be adding -1 and -1 with a carry */ 2136.only0: 2137 adcl $0, %eax /* could be adding -1 in eax with a carry */ 2138 adcl $0, %eax 2139 2140 addq $64, %rsi 2141 testl %ecx, %ecx 2142 jnz .next_iter 2143 2144.ip_ocsum_done: 2145 addl %eax, %edx 2146 adcl $0, %edx 2147 movl %edx, %eax /* form a 16 bit checksum by */ 2148 shrl $16, %eax /* adding two halves of 32 bit checksum */ 2149 addw %dx, %ax 2150 adcw $0, %ax 2151 andl $0xffff, %eax 2152 leave 2153 ret 2154 2155.ip_csum_notaligned: 2156 xorl %edi, %edi 2157 movw (%rsi), %di 2158 addl %edi, %edx 2159 adcl $0, %edx 2160 addq $2, %rsi 2161 decl %ecx 2162 jmp .ip_csum_aligned 2163 2164.less_than_32: 2165 addl $32, %ecx 2166 testl $1, %ecx 2167 jz .size_aligned 2168 andl $0xfe, %ecx 2169 movzwl (%rsi, %rcx, 2), %edi 2170 addl %edi, %edx 2171 adcl $0, %edx 2172.size_aligned: 2173 movl %ecx, %edi 2174 shrl $1, %ecx 2175 shl $1, %edi 2176 subq $64, %rdi 2177 addq %rdi, %rsi 2178 leaq .ip_ocsum_jmptbl(%rip), %rdi 2179 leaq (%rdi, %rcx, 8), %rdi 2180 xorl %ecx, %ecx 2181 clc 2182 jmp *(%rdi) 2183 2184 .align 8 2185.ip_ocsum_jmptbl: 2186 .quad .only0, .only4, .only8, .only12, .only16, .only20 2187 .quad .only24, .only28, .only32, .only36, .only40, .only44 2188 .quad .only48, .only52, .only56, .only60 2189 SET_SIZE(ip_ocsum) 2190 2191#elif defined(__i386) 2192 2193 ENTRY(ip_ocsum) 2194 pushl %ebp 2195 movl %esp, %ebp 2196 pushl %ebx 2197 pushl %esi 2198 pushl %edi 2199 movl 12(%ebp), %ecx /* count of half words */ 2200 movl 16(%ebp), %edx /* partial checksum */ 2201 movl 8(%ebp), %esi 2202 xorl %eax, %eax 2203 testl %ecx, %ecx 2204 jz .ip_ocsum_done 2205 2206 testl $3, %esi 2207 jnz .ip_csum_notaligned 2208.ip_csum_aligned: 2209.next_iter: 2210 subl $32, %ecx 2211 jl .less_than_32 2212 2213 addl 0(%esi), %edx 2214.only60: 2215 adcl 4(%esi), %eax 2216.only56: 2217 adcl 8(%esi), %edx 2218.only52: 2219 adcl 12(%esi), %eax 2220.only48: 2221 adcl 16(%esi), %edx 2222.only44: 2223 adcl 20(%esi), %eax 2224.only40: 2225 adcl 24(%esi), %edx 2226.only36: 2227 adcl 28(%esi), %eax 2228.only32: 2229 adcl 32(%esi), %edx 2230.only28: 2231 adcl 36(%esi), %eax 2232.only24: 2233 adcl 40(%esi), %edx 2234.only20: 2235 adcl 44(%esi), %eax 2236.only16: 2237 adcl 48(%esi), %edx 2238.only12: 2239 adcl 52(%esi), %eax 2240.only8: 2241 adcl 56(%esi), %edx 2242.only4: 2243 adcl 60(%esi), %eax /* We could be adding -1 and -1 with a carry */ 2244.only0: 2245 adcl $0, %eax /* we could be adding -1 in eax with a carry */ 2246 adcl $0, %eax 2247 2248 addl $64, %esi 2249 andl %ecx, %ecx 2250 jnz .next_iter 2251 2252.ip_ocsum_done: 2253 addl %eax, %edx 2254 adcl $0, %edx 2255 movl %edx, %eax /* form a 16 bit checksum by */ 2256 shrl $16, %eax /* adding two halves of 32 bit checksum */ 2257 addw %dx, %ax 2258 adcw $0, %ax 2259 andl $0xffff, %eax 2260 popl %edi /* restore registers */ 2261 popl %esi 2262 popl %ebx 2263 leave 2264 ret 2265 2266.ip_csum_notaligned: 2267 xorl %edi, %edi 2268 movw (%esi), %di 2269 addl %edi, %edx 2270 adcl $0, %edx 2271 addl $2, %esi 2272 decl %ecx 2273 jmp .ip_csum_aligned 2274 2275.less_than_32: 2276 addl $32, %ecx 2277 testl $1, %ecx 2278 jz .size_aligned 2279 andl $0xfe, %ecx 2280 movzwl (%esi, %ecx, 2), %edi 2281 addl %edi, %edx 2282 adcl $0, %edx 2283.size_aligned: 2284 movl %ecx, %edi 2285 shrl $1, %ecx 2286 shl $1, %edi 2287 subl $64, %edi 2288 addl %edi, %esi 2289 movl $.ip_ocsum_jmptbl, %edi 2290 lea (%edi, %ecx, 4), %edi 2291 xorl %ecx, %ecx 2292 clc 2293 jmp *(%edi) 2294 SET_SIZE(ip_ocsum) 2295 2296 .data 2297 .align 4 2298 2299.ip_ocsum_jmptbl: 2300 .long .only0, .only4, .only8, .only12, .only16, .only20 2301 .long .only24, .only28, .only32, .only36, .only40, .only44 2302 .long .only48, .only52, .only56, .only60 2303 2304 2305#endif /* __i386 */ 2306#endif /* __lint */ 2307 2308/* 2309 * multiply two long numbers and yield a u_longlong_t result, callable from C. 2310 * Provided to manipulate hrtime_t values. 2311 */ 2312#if defined(__lint) 2313 2314/* result = a * b; */ 2315 2316/* ARGSUSED */ 2317unsigned long long 2318mul32(uint_t a, uint_t b) 2319{ return (0); } 2320 2321#else /* __lint */ 2322 2323#if defined(__amd64) 2324 2325 ENTRY(mul32) 2326 xorl %edx, %edx /* XX64 joe, paranoia? */ 2327 movl %edi, %eax 2328 mull %esi 2329 shlq $32, %rdx 2330 orq %rdx, %rax 2331 ret 2332 SET_SIZE(mul32) 2333 2334#elif defined(__i386) 2335 2336 ENTRY(mul32) 2337 movl 8(%esp), %eax 2338 movl 4(%esp), %ecx 2339 mull %ecx 2340 ret 2341 SET_SIZE(mul32) 2342 2343#endif /* __i386 */ 2344#endif /* __lint */ 2345 2346#if defined(notused) 2347#if defined(__lint) 2348/* ARGSUSED */ 2349void 2350load_pte64(uint64_t *pte, uint64_t pte_value) 2351{} 2352#else /* __lint */ 2353 .globl load_pte64 2354load_pte64: 2355 movl 4(%esp), %eax 2356 movl 8(%esp), %ecx 2357 movl 12(%esp), %edx 2358 movl %edx, 4(%eax) 2359 movl %ecx, (%eax) 2360 ret 2361#endif /* __lint */ 2362#endif /* notused */ 2363 2364#if defined(__lint) 2365 2366/*ARGSUSED*/ 2367void 2368scan_memory(caddr_t addr, size_t size) 2369{} 2370 2371#else /* __lint */ 2372 2373#if defined(__amd64) 2374 2375 ENTRY(scan_memory) 2376 shrq $3, %rsi /* convert %rsi from byte to quadword count */ 2377 jz .scanm_done 2378 movq %rsi, %rcx /* move count into rep control register */ 2379 movq %rdi, %rsi /* move addr into lodsq control reg. */ 2380 rep lodsq /* scan the memory range */ 2381.scanm_done: 2382 rep; ret /* use 2 byte return instruction when branch target */ 2383 /* AMD Software Optimization Guide - Section 6.2 */ 2384 SET_SIZE(scan_memory) 2385 2386#elif defined(__i386) 2387 2388 ENTRY(scan_memory) 2389 pushl %ecx 2390 pushl %esi 2391 movl 16(%esp), %ecx /* move 2nd arg into rep control register */ 2392 shrl $2, %ecx /* convert from byte count to word count */ 2393 jz .scanm_done 2394 movl 12(%esp), %esi /* move 1st arg into lodsw control register */ 2395 .byte 0xf3 /* rep prefix. lame assembler. sigh. */ 2396 lodsl 2397.scanm_done: 2398 popl %esi 2399 popl %ecx 2400 ret 2401 SET_SIZE(scan_memory) 2402 2403#endif /* __i386 */ 2404#endif /* __lint */ 2405 2406 2407#if defined(__lint) 2408 2409/*ARGSUSED */ 2410int 2411lowbit(ulong_t i) 2412{ return (0); } 2413 2414#else /* __lint */ 2415 2416#if defined(__amd64) 2417 2418 ENTRY(lowbit) 2419 movl $-1, %eax 2420 bsfq %rdi, %rax 2421 incl %eax 2422 ret 2423 SET_SIZE(lowbit) 2424 2425#elif defined(__i386) 2426 2427 ENTRY(lowbit) 2428 movl $-1, %eax 2429 bsfl 4(%esp), %eax 2430 incl %eax 2431 ret 2432 SET_SIZE(lowbit) 2433 2434#endif /* __i386 */ 2435#endif /* __lint */ 2436 2437#if defined(__lint) 2438 2439/*ARGSUSED*/ 2440int 2441highbit(ulong_t i) 2442{ return (0); } 2443 2444#else /* __lint */ 2445 2446#if defined(__amd64) 2447 2448 ENTRY(highbit) 2449 movl $-1, %eax 2450 bsrq %rdi, %rax 2451 incl %eax 2452 ret 2453 SET_SIZE(highbit) 2454 2455#elif defined(__i386) 2456 2457 ENTRY(highbit) 2458 movl $-1, %eax 2459 bsrl 4(%esp), %eax 2460 incl %eax 2461 ret 2462 SET_SIZE(highbit) 2463 2464#endif /* __i386 */ 2465#endif /* __lint */ 2466 2467#if defined(__lint) 2468 2469/*ARGSUSED*/ 2470uint64_t 2471rdmsr(uint_t r) 2472{ return (0); } 2473 2474/*ARGSUSED*/ 2475void 2476wrmsr(uint_t r, const uint64_t val) 2477{} 2478 2479/*ARGSUSED*/ 2480uint64_t 2481xrdmsr(uint_t r) 2482{ return (0); } 2483 2484/*ARGSUSED*/ 2485void 2486xwrmsr(uint_t r, const uint64_t val) 2487{} 2488 2489void 2490invalidate_cache(void) 2491{} 2492 2493#else /* __lint */ 2494 2495#define XMSR_ACCESS_VAL $0x9c5a203a 2496 2497#if defined(__amd64) 2498 2499 ENTRY(rdmsr) 2500 movl %edi, %ecx 2501 rdmsr 2502 shlq $32, %rdx 2503 orq %rdx, %rax 2504 ret 2505 SET_SIZE(rdmsr) 2506 2507 ENTRY(wrmsr) 2508 movq %rsi, %rdx 2509 shrq $32, %rdx 2510 movl %esi, %eax 2511 movl %edi, %ecx 2512 wrmsr 2513 ret 2514 SET_SIZE(wrmsr) 2515 2516 ENTRY(xrdmsr) 2517 pushq %rbp 2518 movq %rsp, %rbp 2519 movl %edi, %ecx 2520 movl XMSR_ACCESS_VAL, %edi /* this value is needed to access MSR */ 2521 rdmsr 2522 shlq $32, %rdx 2523 orq %rdx, %rax 2524 leave 2525 ret 2526 SET_SIZE(xrdmsr) 2527 2528 ENTRY(xwrmsr) 2529 pushq %rbp 2530 movq %rsp, %rbp 2531 movl %edi, %ecx 2532 movl XMSR_ACCESS_VAL, %edi /* this value is needed to access MSR */ 2533 movq %rsi, %rdx 2534 shrq $32, %rdx 2535 movl %esi, %eax 2536 wrmsr 2537 leave 2538 ret 2539 SET_SIZE(xwrmsr) 2540 2541#elif defined(__i386) 2542 2543 ENTRY(rdmsr) 2544 movl 4(%esp), %ecx 2545 rdmsr 2546 ret 2547 SET_SIZE(rdmsr) 2548 2549 ENTRY(wrmsr) 2550 movl 4(%esp), %ecx 2551 movl 8(%esp), %eax 2552 movl 12(%esp), %edx 2553 wrmsr 2554 ret 2555 SET_SIZE(wrmsr) 2556 2557 ENTRY(xrdmsr) 2558 pushl %ebp 2559 movl %esp, %ebp 2560 movl 8(%esp), %ecx 2561 pushl %edi 2562 movl XMSR_ACCESS_VAL, %edi /* this value is needed to access MSR */ 2563 rdmsr 2564 popl %edi 2565 leave 2566 ret 2567 SET_SIZE(xrdmsr) 2568 2569 ENTRY(xwrmsr) 2570 pushl %ebp 2571 movl %esp, %ebp 2572 movl 8(%esp), %ecx 2573 movl 12(%esp), %eax 2574 movl 16(%esp), %edx 2575 pushl %edi 2576 movl XMSR_ACCESS_VAL, %edi /* this value is needed to access MSR */ 2577 wrmsr 2578 popl %edi 2579 leave 2580 ret 2581 SET_SIZE(xwrmsr) 2582 2583#endif /* __i386 */ 2584 2585 ENTRY(invalidate_cache) 2586 wbinvd 2587 ret 2588 SET_SIZE(invalidate_cache) 2589 2590#endif /* __lint */ 2591 2592#if defined(__lint) 2593 2594/*ARGSUSED*/ 2595void 2596getcregs(struct cregs *crp) 2597{} 2598 2599#else /* __lint */ 2600 2601#if defined(__amd64) 2602 2603 ENTRY_NP(getcregs) 2604 2605#define GETMSR(r, off, d) \ 2606 movl $r, %ecx; \ 2607 rdmsr; \ 2608 movl %eax, off(d); \ 2609 movl %edx, off+4(d) 2610 2611 xorl %eax, %eax 2612 movq %rax, CREG_GDT+8(%rdi) 2613 sgdt CREG_GDT(%rdi) /* 10 bytes */ 2614 movq %rax, CREG_IDT+8(%rdi) 2615 sidt CREG_IDT(%rdi) /* 10 bytes */ 2616 movq %rax, CREG_LDT(%rdi) 2617 sldt CREG_LDT(%rdi) /* 2 bytes */ 2618 movq %rax, CREG_TASKR(%rdi) 2619 str CREG_TASKR(%rdi) /* 2 bytes */ 2620 movq %cr0, %rax 2621 movq %rax, CREG_CR0(%rdi) /* cr0 */ 2622 movq %cr2, %rax 2623 movq %rax, CREG_CR2(%rdi) /* cr2 */ 2624 movq %cr3, %rax 2625 movq %rax, CREG_CR3(%rdi) /* cr3 */ 2626 movq %cr4, %rax 2627 movq %rax, CREG_CR4(%rdi) /* cr4 */ 2628 movq %cr8, %rax 2629 movq %rax, CREG_CR8(%rdi) /* cr8 */ 2630 GETMSR(MSR_AMD_KGSBASE, CREG_KGSBASE, %rdi) 2631 GETMSR(MSR_AMD_EFER, CREG_EFER, %rdi) 2632 ret 2633 SET_SIZE(getcregs) 2634 2635#undef GETMSR 2636 2637#elif defined(__i386) 2638 2639 ENTRY_NP(getcregs) 2640 movl 4(%esp), %edx 2641 movw $0, CREG_GDT+6(%edx) 2642 movw $0, CREG_IDT+6(%edx) 2643 sgdt CREG_GDT(%edx) /* gdt */ 2644 sidt CREG_IDT(%edx) /* idt */ 2645 sldt CREG_LDT(%edx) /* ldt */ 2646 str CREG_TASKR(%edx) /* task */ 2647 movl %cr0, %eax 2648 movl %eax, CREG_CR0(%edx) /* cr0 */ 2649 movl %cr2, %eax 2650 movl %eax, CREG_CR2(%edx) /* cr2 */ 2651 movl %cr3, %eax 2652 movl %eax, CREG_CR3(%edx) /* cr3 */ 2653 testl $X86_LARGEPAGE, x86_feature 2654 jz .nocr4 2655 movl %cr4, %eax 2656 movl %eax, CREG_CR4(%edx) /* cr4 */ 2657 jmp .skip 2658.nocr4: 2659 movl $0, CREG_CR4(%edx) 2660.skip: 2661 ret 2662 SET_SIZE(getcregs) 2663 2664#endif /* __i386 */ 2665#endif /* __lint */ 2666 2667 2668/* 2669 * A panic trigger is a word which is updated atomically and can only be set 2670 * once. We atomically store 0xDEFACEDD and load the old value. If the 2671 * previous value was 0, we succeed and return 1; otherwise return 0. 2672 * This allows a partially corrupt trigger to still trigger correctly. DTrace 2673 * has its own version of this function to allow it to panic correctly from 2674 * probe context. 2675 */ 2676#if defined(__lint) 2677 2678/*ARGSUSED*/ 2679int 2680panic_trigger(int *tp) 2681{ return (0); } 2682 2683/*ARGSUSED*/ 2684int 2685dtrace_panic_trigger(int *tp) 2686{ return (0); } 2687 2688#else /* __lint */ 2689 2690#if defined(__amd64) 2691 2692 ENTRY_NP(panic_trigger) 2693 xorl %eax, %eax 2694 movl $0xdefacedd, %edx 2695 lock 2696 xchgl %edx, (%rdi) 2697 cmpl $0, %edx 2698 je 0f 2699 movl $0, %eax 2700 ret 27010: movl $1, %eax 2702 ret 2703 SET_SIZE(panic_trigger) 2704 2705 ENTRY_NP(dtrace_panic_trigger) 2706 xorl %eax, %eax 2707 movl $0xdefacedd, %edx 2708 lock 2709 xchgl %edx, (%rdi) 2710 cmpl $0, %edx 2711 je 0f 2712 movl $0, %eax 2713 ret 27140: movl $1, %eax 2715 ret 2716 SET_SIZE(dtrace_panic_trigger) 2717 2718#elif defined(__i386) 2719 2720 ENTRY_NP(panic_trigger) 2721 movl 4(%esp), %edx / %edx = address of trigger 2722 movl $0xdefacedd, %eax / %eax = 0xdefacedd 2723 lock / assert lock 2724 xchgl %eax, (%edx) / exchange %eax and the trigger 2725 cmpl $0, %eax / if (%eax == 0x0) 2726 je 0f / return (1); 2727 movl $0, %eax / else 2728 ret / return (0); 27290: movl $1, %eax 2730 ret 2731 SET_SIZE(panic_trigger) 2732 2733 ENTRY_NP(dtrace_panic_trigger) 2734 movl 4(%esp), %edx / %edx = address of trigger 2735 movl $0xdefacedd, %eax / %eax = 0xdefacedd 2736 lock / assert lock 2737 xchgl %eax, (%edx) / exchange %eax and the trigger 2738 cmpl $0, %eax / if (%eax == 0x0) 2739 je 0f / return (1); 2740 movl $0, %eax / else 2741 ret / return (0); 27420: movl $1, %eax 2743 ret 2744 SET_SIZE(dtrace_panic_trigger) 2745 2746#endif /* __i386 */ 2747#endif /* __lint */ 2748 2749/* 2750 * The panic() and cmn_err() functions invoke vpanic() as a common entry point 2751 * into the panic code implemented in panicsys(). vpanic() is responsible 2752 * for passing through the format string and arguments, and constructing a 2753 * regs structure on the stack into which it saves the current register 2754 * values. If we are not dying due to a fatal trap, these registers will 2755 * then be preserved in panicbuf as the current processor state. Before 2756 * invoking panicsys(), vpanic() activates the first panic trigger (see 2757 * common/os/panic.c) and switches to the panic_stack if successful. Note that 2758 * DTrace takes a slightly different panic path if it must panic from probe 2759 * context. Instead of calling panic, it calls into dtrace_vpanic(), which 2760 * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and 2761 * branches back into vpanic(). 2762 */ 2763#if defined(__lint) 2764 2765/*ARGSUSED*/ 2766void 2767vpanic(const char *format, va_list alist) 2768{} 2769 2770/*ARGSUSED*/ 2771void 2772dtrace_vpanic(const char *format, va_list alist) 2773{} 2774 2775#else /* __lint */ 2776 2777#if defined(__amd64) 2778 2779 ENTRY_NP(vpanic) /* Initial stack layout: */ 2780 2781 pushq %rbp /* | %rip | 0x60 */ 2782 movq %rsp, %rbp /* | %rbp | 0x58 */ 2783 pushfq /* | rfl | 0x50 */ 2784 pushq %r11 /* | %r11 | 0x48 */ 2785 pushq %r10 /* | %r10 | 0x40 */ 2786 pushq %rbx /* | %rbx | 0x38 */ 2787 pushq %rax /* | %rax | 0x30 */ 2788 pushq %r9 /* | %r9 | 0x28 */ 2789 pushq %r8 /* | %r8 | 0x20 */ 2790 pushq %rcx /* | %rcx | 0x18 */ 2791 pushq %rdx /* | %rdx | 0x10 */ 2792 pushq %rsi /* | %rsi | 0x8 alist */ 2793 pushq %rdi /* | %rdi | 0x0 format */ 2794 2795 movq %rsp, %rbx /* %rbx = current %rsp */ 2796 2797 leaq panic_quiesce(%rip), %rdi /* %rdi = &panic_quiesce */ 2798 call panic_trigger /* %eax = panic_trigger() */ 2799 2800vpanic_common: 2801 /* 2802 * The panic_trigger result is in %eax from the call above, and 2803 * dtrace_panic places it in %eax before branching here. 2804 * The rdmsr instructions that follow below will clobber %eax so 2805 * we stash the panic_trigger result in %r11d. 2806 */ 2807 movl %eax, %r11d 2808 cmpl $0, %r11d 2809 je 0f 2810 2811 /* 2812 * If panic_trigger() was successful, we are the first to initiate a 2813 * panic: we now switch to the reserved panic_stack before continuing. 2814 */ 2815 leaq panic_stack(%rip), %rsp 2816 addq $PANICSTKSIZE, %rsp 28170: subq $REGSIZE, %rsp 2818 /* 2819 * Now that we've got everything set up, store the register values as 2820 * they were when we entered vpanic() to the designated location in 2821 * the regs structure we allocated on the stack. 2822 */ 2823 movq 0x0(%rbx), %rcx 2824 movq %rcx, REGOFF_RDI(%rsp) 2825 movq 0x8(%rbx), %rcx 2826 movq %rcx, REGOFF_RSI(%rsp) 2827 movq 0x10(%rbx), %rcx 2828 movq %rcx, REGOFF_RDX(%rsp) 2829 movq 0x18(%rbx), %rcx 2830 movq %rcx, REGOFF_RCX(%rsp) 2831 movq 0x20(%rbx), %rcx 2832 2833 movq %rcx, REGOFF_R8(%rsp) 2834 movq 0x28(%rbx), %rcx 2835 movq %rcx, REGOFF_R9(%rsp) 2836 movq 0x30(%rbx), %rcx 2837 movq %rcx, REGOFF_RAX(%rsp) 2838 movq 0x38(%rbx), %rcx 2839 movq %rcx, REGOFF_RBX(%rsp) 2840 movq 0x58(%rbx), %rcx 2841 2842 movq %rcx, REGOFF_RBP(%rsp) 2843 movq 0x40(%rbx), %rcx 2844 movq %rcx, REGOFF_R10(%rsp) 2845 movq 0x48(%rbx), %rcx 2846 movq %rcx, REGOFF_R11(%rsp) 2847 movq %r12, REGOFF_R12(%rsp) 2848 2849 movq %r13, REGOFF_R13(%rsp) 2850 movq %r14, REGOFF_R14(%rsp) 2851 movq %r15, REGOFF_R15(%rsp) 2852 2853 xorl %ecx, %ecx 2854 movw %ds, %cx 2855 movq %rcx, REGOFF_DS(%rsp) 2856 movw %es, %cx 2857 movq %rcx, REGOFF_ES(%rsp) 2858 movw %fs, %cx 2859 movq %rcx, REGOFF_FS(%rsp) 2860 movw %gs, %cx 2861 movq %rcx, REGOFF_GS(%rsp) 2862 2863 movq $0, REGOFF_TRAPNO(%rsp) 2864 2865 movq $0, REGOFF_ERR(%rsp) 2866 leaq vpanic(%rip), %rcx 2867 movq %rcx, REGOFF_RIP(%rsp) 2868 movw %cs, %cx 2869 movzwq %cx, %rcx 2870 movq %rcx, REGOFF_CS(%rsp) 2871 movq 0x50(%rbx), %rcx 2872 movq %rcx, REGOFF_RFL(%rsp) 2873 movq %rbx, %rcx 2874 addq $0x60, %rcx 2875 movq %rcx, REGOFF_RSP(%rsp) 2876 movw %ss, %cx 2877 movzwq %cx, %rcx 2878 movq %rcx, REGOFF_SS(%rsp) 2879 2880 /* 2881 * panicsys(format, alist, rp, on_panic_stack) 2882 */ 2883 movq REGOFF_RDI(%rsp), %rdi /* format */ 2884 movq REGOFF_RSI(%rsp), %rsi /* alist */ 2885 movq %rsp, %rdx /* struct regs */ 2886 movl %r11d, %ecx /* on_panic_stack */ 2887 call panicsys 2888 addq $REGSIZE, %rsp 2889 popq %rdi 2890 popq %rsi 2891 popq %rdx 2892 popq %rcx 2893 popq %r8 2894 popq %r9 2895 popq %rax 2896 popq %rbx 2897 popq %r10 2898 popq %r11 2899 popfq 2900 leave 2901 ret 2902 SET_SIZE(vpanic) 2903 2904 ENTRY_NP(dtrace_vpanic) /* Initial stack layout: */ 2905 2906 pushq %rbp /* | %rip | 0x60 */ 2907 movq %rsp, %rbp /* | %rbp | 0x58 */ 2908 pushfq /* | rfl | 0x50 */ 2909 pushq %r11 /* | %r11 | 0x48 */ 2910 pushq %r10 /* | %r10 | 0x40 */ 2911 pushq %rbx /* | %rbx | 0x38 */ 2912 pushq %rax /* | %rax | 0x30 */ 2913 pushq %r9 /* | %r9 | 0x28 */ 2914 pushq %r8 /* | %r8 | 0x20 */ 2915 pushq %rcx /* | %rcx | 0x18 */ 2916 pushq %rdx /* | %rdx | 0x10 */ 2917 pushq %rsi /* | %rsi | 0x8 alist */ 2918 pushq %rdi /* | %rdi | 0x0 format */ 2919 2920 movq %rsp, %rbx /* %rbx = current %rsp */ 2921 2922 leaq panic_quiesce(%rip), %rdi /* %rdi = &panic_quiesce */ 2923 call dtrace_panic_trigger /* %eax = dtrace_panic_trigger() */ 2924 jmp vpanic_common 2925 2926 SET_SIZE(dtrace_vpanic) 2927 2928#elif defined(__i386) 2929 2930 ENTRY_NP(vpanic) / Initial stack layout: 2931 2932 pushl %ebp / | %eip | 20 2933 movl %esp, %ebp / | %ebp | 16 2934 pushl %eax / | %eax | 12 2935 pushl %ebx / | %ebx | 8 2936 pushl %ecx / | %ecx | 4 2937 pushl %edx / | %edx | 0 2938 2939 movl %esp, %ebx / %ebx = current stack pointer 2940 2941 lea panic_quiesce, %eax / %eax = &panic_quiesce 2942 pushl %eax / push &panic_quiesce 2943 call panic_trigger / %eax = panic_trigger() 2944 addl $4, %esp / reset stack pointer 2945 2946vpanic_common: 2947 cmpl $0, %eax / if (%eax == 0) 2948 je 0f / goto 0f; 2949 2950 /* 2951 * If panic_trigger() was successful, we are the first to initiate a 2952 * panic: we now switch to the reserved panic_stack before continuing. 2953 */ 2954 lea panic_stack, %esp / %esp = panic_stack 2955 addl $PANICSTKSIZE, %esp / %esp += PANICSTKSIZE 2956 29570: subl $REGSIZE, %esp / allocate struct regs 2958 2959 /* 2960 * Now that we've got everything set up, store the register values as 2961 * they were when we entered vpanic() to the designated location in 2962 * the regs structure we allocated on the stack. 2963 */ 2964#if !defined(__GNUC_AS__) 2965 movw %gs, %edx 2966 movl %edx, REGOFF_GS(%esp) 2967 movw %fs, %edx 2968 movl %edx, REGOFF_FS(%esp) 2969 movw %es, %edx 2970 movl %edx, REGOFF_ES(%esp) 2971 movw %ds, %edx 2972 movl %edx, REGOFF_DS(%esp) 2973#else /* __GNUC_AS__ */ 2974 mov %gs, %edx 2975 mov %edx, REGOFF_GS(%esp) 2976 mov %fs, %edx 2977 mov %edx, REGOFF_FS(%esp) 2978 mov %es, %edx 2979 mov %edx, REGOFF_ES(%esp) 2980 mov %ds, %edx 2981 mov %edx, REGOFF_DS(%esp) 2982#endif /* __GNUC_AS__ */ 2983 movl %edi, REGOFF_EDI(%esp) 2984 movl %esi, REGOFF_ESI(%esp) 2985 movl 16(%ebx), %ecx 2986 movl %ecx, REGOFF_EBP(%esp) 2987 movl %ebx, %ecx 2988 addl $20, %ecx 2989 movl %ecx, REGOFF_ESP(%esp) 2990 movl 8(%ebx), %ecx 2991 movl %ecx, REGOFF_EBX(%esp) 2992 movl 0(%ebx), %ecx 2993 movl %ecx, REGOFF_EDX(%esp) 2994 movl 4(%ebx), %ecx 2995 movl %ecx, REGOFF_ECX(%esp) 2996 movl 12(%ebx), %ecx 2997 movl %ecx, REGOFF_EAX(%esp) 2998 movl $0, REGOFF_TRAPNO(%esp) 2999 movl $0, REGOFF_ERR(%esp) 3000 lea vpanic, %ecx 3001 movl %ecx, REGOFF_EIP(%esp) 3002#if !defined(__GNUC_AS__) 3003 movw %cs, %edx 3004#else /* __GNUC_AS__ */ 3005 mov %cs, %edx 3006#endif /* __GNUC_AS__ */ 3007 movl %edx, REGOFF_CS(%esp) 3008 pushfl 3009 popl %ecx 3010 movl %ecx, REGOFF_EFL(%esp) 3011 movl $0, REGOFF_UESP(%esp) 3012#if !defined(__GNUC_AS__) 3013 movw %ss, %edx 3014#else /* __GNUC_AS__ */ 3015 mov %ss, %edx 3016#endif /* __GNUC_AS__ */ 3017 movl %edx, REGOFF_SS(%esp) 3018 3019 movl %esp, %ecx / %ecx = ®s 3020 pushl %eax / push on_panic_stack 3021 pushl %ecx / push ®s 3022 movl 12(%ebp), %ecx / %ecx = alist 3023 pushl %ecx / push alist 3024 movl 8(%ebp), %ecx / %ecx = format 3025 pushl %ecx / push format 3026 call panicsys / panicsys(); 3027 addl $16, %esp / pop arguments 3028 3029 addl $REGSIZE, %esp 3030 popl %edx 3031 popl %ecx 3032 popl %ebx 3033 popl %eax 3034 leave 3035 ret 3036 SET_SIZE(vpanic) 3037 3038 ENTRY_NP(dtrace_vpanic) / Initial stack layout: 3039 3040 pushl %ebp / | %eip | 20 3041 movl %esp, %ebp / | %ebp | 16 3042 pushl %eax / | %eax | 12 3043 pushl %ebx / | %ebx | 8 3044 pushl %ecx / | %ecx | 4 3045 pushl %edx / | %edx | 0 3046 3047 movl %esp, %ebx / %ebx = current stack pointer 3048 3049 lea panic_quiesce, %eax / %eax = &panic_quiesce 3050 pushl %eax / push &panic_quiesce 3051 call dtrace_panic_trigger / %eax = dtrace_panic_trigger() 3052 addl $4, %esp / reset stack pointer 3053 jmp vpanic_common / jump back to common code 3054 3055 SET_SIZE(dtrace_vpanic) 3056 3057#endif /* __i386 */ 3058#endif /* __lint */ 3059 3060#if defined(__lint) 3061 3062void 3063hres_tick(void) 3064{} 3065 3066int64_t timedelta; 3067hrtime_t hres_last_tick; 3068timestruc_t hrestime; 3069int64_t hrestime_adj; 3070volatile int hres_lock; 3071hrtime_t hrtime_base; 3072 3073#else /* __lint */ 3074 3075 DGDEF3(hrestime, _MUL(2, CLONGSIZE), 8) 3076 .NWORD 0, 0 3077 3078 DGDEF3(hrestime_adj, 8, 8) 3079 .long 0, 0 3080 3081 DGDEF3(hres_last_tick, 8, 8) 3082 .long 0, 0 3083 3084 DGDEF3(timedelta, 8, 8) 3085 .long 0, 0 3086 3087 DGDEF3(hres_lock, 4, 8) 3088 .long 0 3089 3090 /* 3091 * initialized to a non zero value to make pc_gethrtime() 3092 * work correctly even before clock is initialized 3093 */ 3094 DGDEF3(hrtime_base, 8, 8) 3095 .long _MUL(NSEC_PER_CLOCK_TICK, 6), 0 3096 3097 DGDEF3(adj_shift, 4, 4) 3098 .long ADJ_SHIFT 3099 3100#if defined(__amd64) 3101 3102 ENTRY_NP(hres_tick) 3103 pushq %rbp 3104 movq %rsp, %rbp 3105 3106 /* 3107 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously, 3108 * hres_last_tick can only be modified while holding CLOCK_LOCK). 3109 * At worst, performing this now instead of under CLOCK_LOCK may 3110 * introduce some jitter in pc_gethrestime(). 3111 */ 3112 call *gethrtimef(%rip) 3113 movq %rax, %r8 3114 3115 leaq hres_lock(%rip), %rax 3116 movb $-1, %dl 3117.CL1: 3118 xchgb %dl, (%rax) 3119 testb %dl, %dl 3120 jz .CL3 /* got it */ 3121.CL2: 3122 cmpb $0, (%rax) /* possible to get lock? */ 3123 pause 3124 jne .CL2 3125 jmp .CL1 /* yes, try again */ 3126.CL3: 3127 /* 3128 * compute the interval since last time hres_tick was called 3129 * and adjust hrtime_base and hrestime accordingly 3130 * hrtime_base is an 8 byte value (in nsec), hrestime is 3131 * a timestruc_t (sec, nsec) 3132 */ 3133 leaq hres_last_tick(%rip), %rax 3134 movq %r8, %r11 3135 subq (%rax), %r8 3136 addq %r8, hrtime_base(%rip) /* add interval to hrtime_base */ 3137 addq %r8, hrestime+8(%rip) /* add interval to hrestime.tv_nsec */ 3138 /* 3139 * Now that we have CLOCK_LOCK, we can update hres_last_tick 3140 */ 3141 movq %r11, (%rax) 3142 3143 call __adj_hrestime 3144 3145 /* 3146 * release the hres_lock 3147 */ 3148 incl hres_lock(%rip) 3149 leave 3150 ret 3151 SET_SIZE(hres_tick) 3152 3153#elif defined(__i386) 3154 3155 ENTRY_NP(hres_tick) 3156 pushl %ebp 3157 movl %esp, %ebp 3158 pushl %esi 3159 pushl %ebx 3160 3161 /* 3162 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously, 3163 * hres_last_tick can only be modified while holding CLOCK_LOCK). 3164 * At worst, performing this now instead of under CLOCK_LOCK may 3165 * introduce some jitter in pc_gethrestime(). 3166 */ 3167 call *gethrtimef 3168 movl %eax, %ebx 3169 movl %edx, %esi 3170 3171 movl $hres_lock, %eax 3172 movl $-1, %edx 3173.CL1: 3174 xchgb %dl, (%eax) 3175 testb %dl, %dl 3176 jz .CL3 / got it 3177.CL2: 3178 cmpb $0, (%eax) / possible to get lock? 3179 pause 3180 jne .CL2 3181 jmp .CL1 / yes, try again 3182.CL3: 3183 /* 3184 * compute the interval since last time hres_tick was called 3185 * and adjust hrtime_base and hrestime accordingly 3186 * hrtime_base is an 8 byte value (in nsec), hrestime is 3187 * timestruc_t (sec, nsec) 3188 */ 3189 3190 lea hres_last_tick, %eax 3191 3192 movl %ebx, %edx 3193 movl %esi, %ecx 3194 3195 subl (%eax), %edx 3196 sbbl 4(%eax), %ecx 3197 3198 addl %edx, hrtime_base / add interval to hrtime_base 3199 adcl %ecx, hrtime_base+4 3200 3201 addl %edx, hrestime+4 / add interval to hrestime.tv_nsec 3202 3203 / 3204 / Now that we have CLOCK_LOCK, we can update hres_last_tick. 3205 / 3206 movl %ebx, (%eax) 3207 movl %esi, 4(%eax) 3208 3209 / get hrestime at this moment. used as base for pc_gethrestime 3210 / 3211 / Apply adjustment, if any 3212 / 3213 / #define HRES_ADJ (NSEC_PER_CLOCK_TICK >> ADJ_SHIFT) 3214 / (max_hres_adj) 3215 / 3216 / void 3217 / adj_hrestime() 3218 / { 3219 / long long adj; 3220 / 3221 / if (hrestime_adj == 0) 3222 / adj = 0; 3223 / else if (hrestime_adj > 0) { 3224 / if (hrestime_adj < HRES_ADJ) 3225 / adj = hrestime_adj; 3226 / else 3227 / adj = HRES_ADJ; 3228 / } 3229 / else { 3230 / if (hrestime_adj < -(HRES_ADJ)) 3231 / adj = -(HRES_ADJ); 3232 / else 3233 / adj = hrestime_adj; 3234 / } 3235 / 3236 / timedelta -= adj; 3237 / hrestime_adj = timedelta; 3238 / hrestime.tv_nsec += adj; 3239 / 3240 / while (hrestime.tv_nsec >= NANOSEC) { 3241 / one_sec++; 3242 / hrestime.tv_sec++; 3243 / hrestime.tv_nsec -= NANOSEC; 3244 / } 3245 / } 3246__adj_hrestime: 3247 movl hrestime_adj, %esi / if (hrestime_adj == 0) 3248 movl hrestime_adj+4, %edx 3249 andl %esi, %esi 3250 jne .CL4 / no 3251 andl %edx, %edx 3252 jne .CL4 / no 3253 subl %ecx, %ecx / yes, adj = 0; 3254 subl %edx, %edx 3255 jmp .CL5 3256.CL4: 3257 subl %ecx, %ecx 3258 subl %eax, %eax 3259 subl %esi, %ecx 3260 sbbl %edx, %eax 3261 andl %eax, %eax / if (hrestime_adj > 0) 3262 jge .CL6 3263 3264 / In the following comments, HRES_ADJ is used, while in the code 3265 / max_hres_adj is used. 3266 / 3267 / The test for "hrestime_adj < HRES_ADJ" is complicated because 3268 / hrestime_adj is 64-bits, while HRES_ADJ is 32-bits. We rely 3269 / on the logical equivalence of: 3270 / 3271 / !(hrestime_adj < HRES_ADJ) 3272 / 3273 / and the two step sequence: 3274 / 3275 / (HRES_ADJ - lsw(hrestime_adj)) generates a Borrow/Carry 3276 / 3277 / which computes whether or not the least significant 32-bits 3278 / of hrestime_adj is greater than HRES_ADJ, followed by: 3279 / 3280 / Previous Borrow/Carry + -1 + msw(hrestime_adj) generates a Carry 3281 / 3282 / which generates a carry whenever step 1 is true or the most 3283 / significant long of the longlong hrestime_adj is non-zero. 3284 3285 movl max_hres_adj, %ecx / hrestime_adj is positive 3286 subl %esi, %ecx 3287 movl %edx, %eax 3288 adcl $-1, %eax 3289 jnc .CL7 3290 movl max_hres_adj, %ecx / adj = HRES_ADJ; 3291 subl %edx, %edx 3292 jmp .CL5 3293 3294 / The following computation is similar to the one above. 3295 / 3296 / The test for "hrestime_adj < -(HRES_ADJ)" is complicated because 3297 / hrestime_adj is 64-bits, while HRES_ADJ is 32-bits. We rely 3298 / on the logical equivalence of: 3299 / 3300 / (hrestime_adj > -HRES_ADJ) 3301 / 3302 / and the two step sequence: 3303 / 3304 / (HRES_ADJ + lsw(hrestime_adj)) generates a Carry 3305 / 3306 / which means the least significant 32-bits of hrestime_adj is 3307 / greater than -HRES_ADJ, followed by: 3308 / 3309 / Previous Carry + 0 + msw(hrestime_adj) generates a Carry 3310 / 3311 / which generates a carry only when step 1 is true and the most 3312 / significant long of the longlong hrestime_adj is -1. 3313 3314.CL6: / hrestime_adj is negative 3315 movl %esi, %ecx 3316 addl max_hres_adj, %ecx 3317 movl %edx, %eax 3318 adcl $0, %eax 3319 jc .CL7 3320 xor %ecx, %ecx 3321 subl max_hres_adj, %ecx / adj = -(HRES_ADJ); 3322 movl $-1, %edx 3323 jmp .CL5 3324.CL7: 3325 movl %esi, %ecx / adj = hrestime_adj; 3326.CL5: 3327 movl timedelta, %esi 3328 subl %ecx, %esi 3329 movl timedelta+4, %eax 3330 sbbl %edx, %eax 3331 movl %esi, timedelta 3332 movl %eax, timedelta+4 / timedelta -= adj; 3333 movl %esi, hrestime_adj 3334 movl %eax, hrestime_adj+4 / hrestime_adj = timedelta; 3335 addl hrestime+4, %ecx 3336 3337 movl %ecx, %eax / eax = tv_nsec 33381: 3339 cmpl $NANOSEC, %eax / if ((unsigned long)tv_nsec >= NANOSEC) 3340 jb .CL8 / no 3341 incl one_sec / yes, one_sec++; 3342 incl hrestime / hrestime.tv_sec++; 3343 addl $-NANOSEC, %eax / tv_nsec -= NANOSEC 3344 jmp 1b / check for more seconds 3345 3346.CL8: 3347 movl %eax, hrestime+4 / store final into hrestime.tv_nsec 3348 incl hres_lock / release the hres_lock 3349 3350 popl %ebx 3351 popl %esi 3352 leave 3353 ret 3354 SET_SIZE(hres_tick) 3355 3356#endif /* __i386 */ 3357#endif /* __lint */ 3358 3359/* 3360 * void prefetch_smap_w(void *) 3361 * 3362 * Prefetch ahead within a linear list of smap structures. 3363 * Not implemented for ia32. Stub for compatibility. 3364 */ 3365 3366#if defined(__lint) 3367 3368/*ARGSUSED*/ 3369void prefetch_smap_w(void *smp) 3370{} 3371 3372#else /* __lint */ 3373 3374 ENTRY(prefetch_smap_w) 3375 rep; ret /* use 2 byte return instruction when branch target */ 3376 /* AMD Software Optimization Guide - Section 6.2 */ 3377 SET_SIZE(prefetch_smap_w) 3378 3379#endif /* __lint */ 3380 3381/* 3382 * prefetch_page_r(page_t *) 3383 * issue prefetch instructions for a page_t 3384 */ 3385#if defined(__lint) 3386 3387/*ARGSUSED*/ 3388void 3389prefetch_page_r(void *pp) 3390{} 3391 3392#else /* __lint */ 3393 3394 ENTRY(prefetch_page_r) 3395 rep; ret /* use 2 byte return instruction when branch target */ 3396 /* AMD Software Optimization Guide - Section 6.2 */ 3397 SET_SIZE(prefetch_page_r) 3398 3399#endif /* __lint */ 3400 3401#if defined(__lint) 3402 3403/*ARGSUSED*/ 3404int 3405bcmp(const void *s1, const void *s2, size_t count) 3406{ return (0); } 3407 3408#else /* __lint */ 3409 3410#if defined(__amd64) 3411 3412 ENTRY(bcmp) 3413 pushq %rbp 3414 movq %rsp, %rbp 3415#ifdef DEBUG 3416 movq postbootkernelbase(%rip), %r11 3417 cmpq %r11, %rdi 3418 jb 0f 3419 cmpq %r11, %rsi 3420 jnb 1f 34210: leaq .bcmp_panic_msg(%rip), %rdi 3422 xorl %eax, %eax 3423 call panic 34241: 3425#endif /* DEBUG */ 3426 call memcmp 3427 testl %eax, %eax 3428 setne %dl 3429 leave 3430 movzbl %dl, %eax 3431 ret 3432 SET_SIZE(bcmp) 3433 3434#elif defined(__i386) 3435 3436#define ARG_S1 8 3437#define ARG_S2 12 3438#define ARG_LENGTH 16 3439 3440 ENTRY(bcmp) 3441 pushl %ebp 3442 movl %esp, %ebp / create new stack frame 3443#ifdef DEBUG 3444 movl postbootkernelbase, %eax 3445 cmpl %eax, ARG_S1(%ebp) 3446 jb 0f 3447 cmpl %eax, ARG_S2(%ebp) 3448 jnb 1f 34490: pushl $.bcmp_panic_msg 3450 call panic 34511: 3452#endif /* DEBUG */ 3453 3454 pushl %edi / save register variable 3455 movl ARG_S1(%ebp), %eax / %eax = address of string 1 3456 movl ARG_S2(%ebp), %ecx / %ecx = address of string 2 3457 cmpl %eax, %ecx / if the same string 3458 je .equal / goto .equal 3459 movl ARG_LENGTH(%ebp), %edi / %edi = length in bytes 3460 cmpl $4, %edi / if %edi < 4 3461 jb .byte_check / goto .byte_check 3462 .align 4 3463.word_loop: 3464 movl (%ecx), %edx / move 1 word from (%ecx) to %edx 3465 leal -4(%edi), %edi / %edi -= 4 3466 cmpl (%eax), %edx / compare 1 word from (%eax) with %edx 3467 jne .word_not_equal / if not equal, goto .word_not_equal 3468 leal 4(%ecx), %ecx / %ecx += 4 (next word) 3469 leal 4(%eax), %eax / %eax += 4 (next word) 3470 cmpl $4, %edi / if %edi >= 4 3471 jae .word_loop / goto .word_loop 3472.byte_check: 3473 cmpl $0, %edi / if %edi == 0 3474 je .equal / goto .equal 3475 jmp .byte_loop / goto .byte_loop (checks in bytes) 3476.word_not_equal: 3477 leal 4(%edi), %edi / %edi += 4 (post-decremented) 3478 .align 4 3479.byte_loop: 3480 movb (%ecx), %dl / move 1 byte from (%ecx) to %dl 3481 cmpb %dl, (%eax) / compare %dl with 1 byte from (%eax) 3482 jne .not_equal / if not equal, goto .not_equal 3483 incl %ecx / %ecx++ (next byte) 3484 incl %eax / %eax++ (next byte) 3485 decl %edi / %edi-- 3486 jnz .byte_loop / if not zero, goto .byte_loop 3487.equal: 3488 xorl %eax, %eax / %eax = 0 3489 popl %edi / restore register variable 3490 leave / restore old stack frame 3491 ret / return (NULL) 3492 .align 4 3493.not_equal: 3494 movl $1, %eax / return 1 3495 popl %edi / restore register variable 3496 leave / restore old stack frame 3497 ret / return (NULL) 3498 SET_SIZE(bcmp) 3499 3500#endif /* __i386 */ 3501 3502#ifdef DEBUG 3503 .text 3504.bcmp_panic_msg: 3505 .string "bcmp: arguments below kernelbase" 3506#endif /* DEBUG */ 3507 3508#endif /* __lint */ 3509 3510#if defined(__lint) 3511 3512uint_t 3513bsrw_insn(uint16_t mask) 3514{ 3515 uint_t index = sizeof (mask) * NBBY - 1; 3516 3517 while ((mask & (1 << index)) == 0) 3518 index--; 3519 return (index); 3520} 3521 3522#else /* __lint */ 3523 3524#if defined(__amd64) 3525 3526 ENTRY_NP(bsrw_insn) 3527 xorl %eax, %eax 3528 bsrw %di, %ax 3529 ret 3530 SET_SIZE(bsrw_insn) 3531 3532#elif defined(__i386) 3533 3534 ENTRY_NP(bsrw_insn) 3535 movw 4(%esp), %cx 3536 xorl %eax, %eax 3537 bsrw %cx, %ax 3538 ret 3539 SET_SIZE(bsrw_insn) 3540 3541#endif /* __i386 */ 3542#endif /* __lint */ 3543 3544#if defined(__lint) 3545 3546uint_t 3547atomic_btr32(uint32_t *pending, uint_t pil) 3548{ 3549 return (*pending &= ~(1 << pil)); 3550} 3551 3552#else /* __lint */ 3553 3554#if defined(__i386) 3555 3556 ENTRY_NP(atomic_btr32) 3557 movl 4(%esp), %ecx 3558 movl 8(%esp), %edx 3559 xorl %eax, %eax 3560 lock 3561 btrl %edx, (%ecx) 3562 setc %al 3563 ret 3564 SET_SIZE(atomic_btr32) 3565 3566#endif /* __i386 */ 3567#endif /* __lint */ 3568 3569#if defined(__lint) 3570 3571/*ARGSUSED*/ 3572void 3573switch_sp_and_call(void *newsp, void (*func)(uint_t, uint_t), uint_t arg1, 3574 uint_t arg2) 3575{} 3576 3577#else /* __lint */ 3578 3579#if defined(__amd64) 3580 3581 ENTRY_NP(switch_sp_and_call) 3582 pushq %rbp 3583 movq %rsp, %rbp /* set up stack frame */ 3584 movq %rdi, %rsp /* switch stack pointer */ 3585 movq %rdx, %rdi /* pass func arg 1 */ 3586 movq %rsi, %r11 /* save function to call */ 3587 movq %rcx, %rsi /* pass func arg 2 */ 3588 call *%r11 /* call function */ 3589 leave /* restore stack */ 3590 ret 3591 SET_SIZE(switch_sp_and_call) 3592 3593#elif defined(__i386) 3594 3595 ENTRY_NP(switch_sp_and_call) 3596 pushl %ebp 3597 mov %esp, %ebp /* set up stack frame */ 3598 movl 8(%ebp), %esp /* switch stack pointer */ 3599 pushl 20(%ebp) /* push func arg 2 */ 3600 pushl 16(%ebp) /* push func arg 1 */ 3601 call *12(%ebp) /* call function */ 3602 addl $8, %esp /* pop arguments */ 3603 leave /* restore stack */ 3604 ret 3605 SET_SIZE(switch_sp_and_call) 3606 3607#endif /* __i386 */ 3608#endif /* __lint */ 3609 3610#if defined(__lint) 3611 3612void 3613kmdb_enter(void) 3614{} 3615 3616#else /* __lint */ 3617 3618#if defined(__amd64) 3619 3620 ENTRY_NP(kmdb_enter) 3621 pushq %rbp 3622 movq %rsp, %rbp 3623 3624 /* 3625 * Save flags, do a 'cli' then return the saved flags 3626 */ 3627 call intr_clear 3628 3629 int $T_DBGENTR 3630 3631 /* 3632 * Restore the saved flags 3633 */ 3634 movq %rax, %rdi 3635 call intr_restore 3636 3637 leave 3638 ret 3639 SET_SIZE(kmdb_enter) 3640 3641#elif defined(__i386) 3642 3643 ENTRY_NP(kmdb_enter) 3644 pushl %ebp 3645 movl %esp, %ebp 3646 3647 /* 3648 * Save flags, do a 'cli' then return the saved flags 3649 */ 3650 call intr_clear 3651 3652 int $T_DBGENTR 3653 3654 /* 3655 * Restore the saved flags 3656 */ 3657 pushl %eax 3658 call intr_restore 3659 addl $4, %esp 3660 3661 leave 3662 ret 3663 SET_SIZE(kmdb_enter) 3664 3665#endif /* __i386 */ 3666#endif /* __lint */ 3667 3668#if defined(__lint) 3669 3670void 3671return_instr(void) 3672{} 3673 3674#else /* __lint */ 3675 3676 ENTRY_NP(return_instr) 3677 rep; ret /* use 2 byte instruction when branch target */ 3678 /* AMD Software Optimization Guide - Section 6.2 */ 3679 SET_SIZE(return_instr) 3680 3681#endif /* __lint */ 3682 3683#if defined(__lint) 3684 3685ulong_t 3686getflags(void) 3687{ 3688 return (0); 3689} 3690 3691#else /* __lint */ 3692 3693#if defined(__amd64) 3694 3695 ENTRY(getflags) 3696 pushfq 3697 popq %rax 3698 ret 3699 SET_SIZE(getflags) 3700 3701#elif defined(__i386) 3702 3703 ENTRY(getflags) 3704 pushfl 3705 popl %eax 3706 ret 3707 SET_SIZE(getflags) 3708 3709#endif /* __i386 */ 3710 3711#endif /* __lint */ 3712 3713#if defined(__lint) 3714 3715ftrace_icookie_t 3716ftrace_interrupt_disable(void) 3717{ return (0); } 3718 3719#else /* __lint */ 3720 3721#if defined(__amd64) 3722 3723 ENTRY(ftrace_interrupt_disable) 3724 pushfq 3725 popq %rax 3726 CLI(%rdx) 3727 ret 3728 SET_SIZE(ftrace_interrupt_disable) 3729 3730#elif defined(__i386) 3731 3732 ENTRY(ftrace_interrupt_disable) 3733 pushfl 3734 popl %eax 3735 CLI(%edx) 3736 ret 3737 SET_SIZE(ftrace_interrupt_disable) 3738 3739#endif /* __i386 */ 3740#endif /* __lint */ 3741 3742#if defined(__lint) 3743 3744/*ARGSUSED*/ 3745void 3746ftrace_interrupt_enable(ftrace_icookie_t cookie) 3747{} 3748 3749#else /* __lint */ 3750 3751#if defined(__amd64) 3752 3753 ENTRY(ftrace_interrupt_enable) 3754 pushq %rdi 3755 popfq 3756 ret 3757 SET_SIZE(ftrace_interrupt_enable) 3758 3759#elif defined(__i386) 3760 3761 ENTRY(ftrace_interrupt_enable) 3762 movl 4(%esp), %eax 3763 pushl %eax 3764 popfl 3765 ret 3766 SET_SIZE(ftrace_interrupt_enable) 3767 3768#endif /* __i386 */ 3769#endif /* __lint */ 3770