1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25#include <sys/asm_linkage.h> 26#include <sys/asm_misc.h> 27#include <sys/regset.h> 28#include <sys/privregs.h> 29#include <sys/x86_archext.h> 30#include <sys/cpr_wakecode.h> 31 32#if !defined(__lint) 33#include <sys/segments.h> 34#include "assym.h" 35#endif 36 37#ifdef DEBUG 38#define LED 1 39#define SERIAL 1 40#endif /* DEBUG */ 41 42#ifdef DEBUG 43#define COM1 0x3f8 44#define COM2 0x2f8 45#define WC_COM COM2 /* either COM1 or COM2 */ 46#define WC_LED 0x80 /* diagnostic led port ON motherboard */ 47 48/* 49 * defined as offsets from the data register 50 */ 51#define DLL 0 /* divisor latch (lsb) */ 52#define DLH 1 /* divisor latch (msb) */ 53#define LCR 3 /* line control register */ 54#define MCR 4 /* modem control register */ 55 56 57#define DLAB 0x80 /* divisor latch access bit */ 58#define B9600L 0X0c /* lsb bit pattern for 9600 baud */ 59#define B9600H 0X0 /* hsb bit pattern for 9600 baud */ 60#define DTR 0x01 /* Data Terminal Ready */ 61#define RTS 0x02 /* Request To Send */ 62#define STOP1 0x00 /* 1 stop bit */ 63#define BITS8 0x03 /* 8 bits per char */ 64 65#endif /* DEBUG */ 66 67/* 68 * This file contains the low level routines involved in getting 69 * into and out of ACPI S3, including those needed for restarting 70 * the non-boot cpus. 71 * 72 * Our assumptions: 73 * 74 * Our actions: 75 * 76 */ 77 78#if defined(lint) || defined(__lint) 79 80/*ARGSUSED*/ 81int 82wc_save_context(wc_cpu_t *pcpu) 83{ return 0; } 84 85#else /* lint */ 86 87#if defined(__GNU_AS__) 88 89 NOTHING AT ALL YET! 90 91#else /* !defined(__GNU_AS__) */ 92 93#if defined(__amd64) 94 95 ENTRY_NP(wc_save_context) 96 97 movq (%rsp), %rdx / return address 98 movq %rdx, WC_RETADDR(%rdi) 99 pushq %rbp 100 movq %rsp,%rbp 101 102 movq %rdi, WC_VIRTADDR(%rdi) 103 movq %rdi, WC_RDI(%rdi) 104 105 movq %rdx, WC_RDX(%rdi) 106 107/ stash everything else we need 108 sgdt WC_GDT(%rdi) 109 sidt WC_IDT(%rdi) 110 sldt WC_LDT(%rdi) 111 str WC_TR(%rdi) 112 113 movq %cr0, %rdx 114 movq %rdx, WC_CR0(%rdi) 115 movq %cr3, %rdx 116 movq %rdx, WC_CR3(%rdi) 117 movq %cr4, %rdx 118 movq %rdx, WC_CR4(%rdi) 119 movq %cr8, %rdx 120 movq %rdx, WC_CR8(%rdi) 121 122 movq %r8, WC_R8(%rdi) 123 movq %r9, WC_R9(%rdi) 124 movq %r10, WC_R10(%rdi) 125 movq %r11, WC_R11(%rdi) 126 movq %r12, WC_R12(%rdi) 127 movq %r13, WC_R13(%rdi) 128 movq %r14, WC_R14(%rdi) 129 movq %r15, WC_R15(%rdi) 130 movq %rax, WC_RAX(%rdi) 131 movq %rbp, WC_RBP(%rdi) 132 movq %rbx, WC_RBX(%rdi) 133 movq %rcx, WC_RCX(%rdi) 134 movq %rsi, WC_RSI(%rdi) 135 movq %rsp, WC_RSP(%rdi) 136 137 movw %ss, WC_SS(%rdi) 138 movw %cs, WC_CS(%rdi) 139 movw %ds, WC_DS(%rdi) 140 movw %es, WC_ES(%rdi) 141 142 movq $0, %rcx / save %fs register 143 movw %fs, %cx 144 movq %rcx, WC_FS(%rdi) 145 146 movl $MSR_AMD_FSBASE, %ecx 147 rdmsr 148 movl %eax, WC_FSBASE(%rdi) 149 movl %edx, WC_FSBASE+4(%rdi) 150 151 movq $0, %rcx / save %gs register 152 movw %gs, %cx 153 movq %rcx, WC_GS(%rdi) 154 155 movl $MSR_AMD_GSBASE, %ecx / save gsbase msr 156 rdmsr 157 movl %eax, WC_GSBASE(%rdi) 158 movl %edx, WC_GSBASE+4(%rdi) 159 160 movl $MSR_AMD_KGSBASE, %ecx / save kgsbase msr 161 rdmsr 162 movl %eax, WC_KGSBASE(%rdi) 163 movl %edx, WC_KGSBASE+4(%rdi) 164 165 movq %gs:CPU_ID, %rax / save current cpu id 166 movq %rax, WC_CPU_ID(%rdi) 167 168 pushfq 169 popq WC_EFLAGS(%rdi) 170 171 wbinvd / flush the cache 172 mfence 173 174 movq $1, %rax / at suspend return 1 175 176 leave 177 178 ret 179 180 SET_SIZE(wc_save_context) 181 182#elif defined(__i386) 183 184 ENTRY_NP(wc_save_context) 185 186 movl 4(%esp), %eax / wc_cpu_t * 187 movl %eax, WC_VIRTADDR(%eax) 188 189 movl (%esp), %edx / return address 190 movl %edx, WC_RETADDR(%eax) 191 192 str WC_TR(%eax) / stash everything else we need 193 sgdt WC_GDT(%eax) 194 sldt WC_LDT(%eax) 195 sidt WC_IDT(%eax) 196 197 movl %cr0, %edx 198 movl %edx, WC_CR0(%eax) 199 movl %cr3, %edx 200 movl %edx, WC_CR3(%eax) 201 movl %cr4, %edx 202 movl %edx, WC_CR4(%eax) 203 204 movl %ebx, WC_EBX(%eax) 205 movl %edi, WC_EDI(%eax) 206 movl %esi, WC_ESI(%eax) 207 movl %ebp, WC_EBP(%eax) 208 movl %esp, WC_ESP(%eax) 209 210 movw %ss, WC_SS(%eax) 211 movw %cs, WC_CS(%eax) 212 movw %ds, WC_DS(%eax) 213 movw %es, WC_ES(%eax) 214 movw %fs, WC_FS(%eax) 215 movw %gs, WC_GS(%eax) 216 217 pushfl 218 popl WC_EFLAGS(%eax) 219 220 pushl %gs:CPU_ID / save current cpu id 221 popl WC_CPU_ID(%eax) 222 223 wbinvd / flush the cache 224 mfence 225 226 movl $1, %eax / at suspend return 1 227 ret 228 229 SET_SIZE(wc_save_context) 230 231#endif /* __amd64 */ 232 233#endif /* __GNU_AS__ */ 234 235#endif /* lint */ 236 237 238/* 239 * Our assumptions: 240 * - We are running in real mode. 241 * - Interrupts are disabled. 242 * 243 * Our actions: 244 * - We start using our GDT by loading correct values in the 245 * selector registers (cs=KCS_SEL, ds=es=ss=KDS_SEL, fs=KFS_SEL, 246 * gs=KGS_SEL). 247 * - We change over to using our IDT. 248 * - We load the default LDT into the hardware LDT register. 249 * - We load the default TSS into the hardware task register. 250 * - We restore registers 251 * - We return to original caller (a la setjmp) 252 */ 253 254#if defined(lint) || defined(__lint) 255 256void 257wc_rm_start(void) 258{} 259 260void 261wc_rm_end(void) 262{} 263 264#else /* lint */ 265 266#if defined(__GNU_AS__) 267 268 NOTHING AT ALL YET! 269 270#else /* __GNU_AS__ */ 271 272#if defined(__amd64) 273 274 ENTRY_NP(wc_rm_start) 275 276 /* 277 * For vulcan as we need to do a .code32 and mentally invert the 278 * meaning of the addr16 and data16 prefixes to get 32-bit access when 279 * generating code to be executed in 16-bit mode (sigh...) 280 */ 281 282 .code32 283 284 cli 285 movw %cs, %ax 286 movw %ax, %ds / establish ds ... 287 movw %ax, %ss / ... and ss:esp 288 D16 movl $WC_STKSTART, %esp 289/ using the following value blows up machines! - DO NOT USE 290/ D16 movl 0xffc, %esp 291 292 293#if LED 294 D16 movl $WC_LED, %edx 295 D16 movb $0xd1, %al 296 outb (%dx) 297#endif 298 299#if SERIAL 300 D16 movl $WC_COM, %edx 301 D16 movb $0x61, %al 302 outb (%dx) 303#endif 304 305 D16 call cominit 306 307 /* 308 * Enable protected-mode, write protect, and alignment mask 309 * %cr0 has already been initialsed to zero 310 */ 311 movl %cr0, %eax 312 D16 orl $[CR0_PE|CR0_WP|CR0_AM], %eax 313 movl %eax, %cr0 314 315 /* 316 * Do a jmp immediately after writing to cr0 when enabling protected 317 * mode to clear the real mode prefetch queue (per Intel's docs) 318 */ 319 jmp pestart 320pestart: 321 322#if LED 323 D16 movl $WC_LED, %edx 324 D16 movb $0xd2, %al 325 outb (%dx) 326#endif 327 328#if SERIAL 329 D16 movl $WC_COM, %edx 330 D16 movb $0x62, %al 331 outb (%dx) 332#endif 333 334 /* 335 * 16-bit protected mode is now active, so prepare to turn on long 336 * mode 337 */ 338 339#if LED 340 D16 movl $WC_LED, %edx 341 D16 movb $0xd3, %al 342 outb (%dx) 343#endif 344 345#if SERIAL 346 D16 movl $WC_COM, %edx 347 D16 movb $0x63, %al 348 outb (%dx) 349#endif 350 351 /* 352 * Add any initial cr4 bits 353 */ 354 movl %cr4, %eax 355 A16 D16 orl CR4OFF, %eax 356 357 /* 358 * Enable PAE mode (CR4.PAE) 359 */ 360 D16 orl $CR4_PAE, %eax 361 movl %eax, %cr4 362 363#if LED 364 D16 movl $WC_LED, %edx 365 D16 movb $0xd4, %al 366 outb (%dx) 367#endif 368 369#if SERIAL 370 D16 movl $WC_COM, %edx 371 D16 movb $0x64, %al 372 outb (%dx) 373#endif 374 375 /* 376 * Point cr3 to the 64-bit long mode page tables. 377 * 378 * Note that these MUST exist in 32-bit space, as we don't have 379 * a way to load %cr3 with a 64-bit base address for the page tables 380 * until the CPU is actually executing in 64-bit long mode. 381 */ 382 A16 D16 movl CR3OFF, %eax 383 movl %eax, %cr3 384 385 /* 386 * Set long mode enable in EFER (EFER.LME = 1) 387 */ 388 D16 movl $MSR_AMD_EFER, %ecx 389 rdmsr 390 391 D16 orl $AMD_EFER_LME, %eax 392 wrmsr 393 394#if LED 395 D16 movl $WC_LED, %edx 396 D16 movb $0xd5, %al 397 outb (%dx) 398#endif 399 400#if SERIAL 401 D16 movl $WC_COM, %edx 402 D16 movb $0x65, %al 403 outb (%dx) 404#endif 405 406 /* 407 * Finally, turn on paging (CR0.PG = 1) to activate long mode. 408 */ 409 movl %cr0, %eax 410 D16 orl $CR0_PG, %eax 411 movl %eax, %cr0 412 413 /* 414 * The instruction after enabling paging in CR0 MUST be a branch. 415 */ 416 jmp long_mode_active 417 418long_mode_active: 419 420#if LED 421 D16 movl $WC_LED, %edx 422 D16 movb $0xd6, %al 423 outb (%dx) 424#endif 425 426#if SERIAL 427 D16 movl $WC_COM, %edx 428 D16 movb $0x66, %al 429 outb (%dx) 430#endif 431 432 /* 433 * Long mode is now active but since we're still running with the 434 * original 16-bit CS we're actually in 16-bit compatability mode. 435 * 436 * We have to load an intermediate GDT and IDT here that we know are 437 * in 32-bit space before we can use the kernel's GDT and IDT, which 438 * may be in the 64-bit address space, and since we're in compatability 439 * mode, we only have access to 16 and 32-bit instructions at the 440 * moment. 441 */ 442 A16 D16 lgdt TEMPGDTOFF /* load temporary GDT */ 443 A16 D16 lidt TEMPIDTOFF /* load temporary IDT */ 444 445 446 /* 447 * Do a far transfer to 64-bit mode. Set the CS selector to a 64-bit 448 * long mode selector (CS.L=1) in the temporary 32-bit GDT and jump 449 * to the real mode platter address of wc_long_mode_64 as until the 450 * 64-bit CS is in place we don't have access to 64-bit instructions 451 * and thus can't reference a 64-bit %rip. 452 */ 453 454#if LED 455 D16 movl $WC_LED, %edx 456 D16 movb $0xd7, %al 457 outb (%dx) 458#endif 459 460#if SERIAL 461 D16 movl $WC_COM, %edx 462 D16 movb $0x67, %al 463 outb (%dx) 464#endif 465 466 D16 pushl $TEMP_CS64_SEL 467 A16 D16 pushl LM64OFF 468 469 D16 lret 470 471 472/* 473 * Support routine to re-initialize VGA subsystem 474 */ 475vgainit: 476 D16 ret 477 478/* 479 * Support routine to re-initialize keyboard (which is USB - help!) 480 */ 481kbdinit: 482 D16 ret 483 484/* 485 * Support routine to re-initialize COM ports to something sane 486 */ 487cominit: 488 / init COM1 & COM2 489 490#if DEBUG 491/* 492 * on debug kernels we need to initialize COM1 & COM2 here, so that 493 * we can get debug output before the asy driver has resumed 494 */ 495 496/ select COM1 497 D16 movl $[COM1+LCR], %edx 498 D16 movb $DLAB, %al / divisor latch 499 outb (%dx) 500 501 D16 movl $[COM1+DLL], %edx / divisor latch lsb 502 D16 movb $B9600L, %al / divisor latch 503 outb (%dx) 504 505 D16 movl $[COM1+DLH], %edx / divisor latch hsb 506 D16 movb $B9600H, %al / divisor latch 507 outb (%dx) 508 509 D16 movl $[COM1+LCR], %edx / select COM1 510 D16 movb $[STOP1|BITS8], %al / 1 stop bit, 8bit word len 511 outb (%dx) 512 513 D16 movl $[COM1+MCR], %edx / select COM1 514 D16 movb $[RTS|DTR], %al / data term ready & req to send 515 outb (%dx) 516 517/ select COM2 518 D16 movl $[COM2+LCR], %edx 519 D16 movb $DLAB, %al / divisor latch 520 outb (%dx) 521 522 D16 movl $[COM2+DLL], %edx / divisor latch lsb 523 D16 movb $B9600L, %al / divisor latch 524 outb (%dx) 525 526 D16 movl $[COM2+DLH], %edx / divisor latch hsb 527 D16 movb $B9600H, %al / divisor latch 528 outb (%dx) 529 530 D16 movl $[COM2+LCR], %edx / select COM1 531 D16 movb $[STOP1|BITS8], %al / 1 stop bit, 8bit word len 532 outb (%dx) 533 534 D16 movl $[COM2+MCR], %edx / select COM1 535 D16 movb $[RTS|DTR], %al / data term ready & req to send 536 outb (%dx) 537#endif /* DEBUG */ 538 539 D16 ret 540 541 .code64 542 543 .globl wc_long_mode_64 544wc_long_mode_64: 545 546#if LED 547 movw $WC_LED, %dx 548 movb $0xd8, %al 549 outb (%dx) 550#endif 551 552#if SERIAL 553 movw $WC_COM, %dx 554 movb $0x68, %al 555 outb (%dx) 556#endif 557 558 /* 559 * We are now running in long mode with a 64-bit CS (EFER.LMA=1, 560 * CS.L=1) so we now have access to 64-bit instructions. 561 * 562 * First, set the 64-bit GDT base. 563 */ 564 .globl rm_platter_pa 565 movl rm_platter_pa, %eax 566 567 lgdtq GDTROFF(%rax) /* load 64-bit GDT */ 568 569 /* 570 * Save the CPU number in %r11; get the value here since it's saved in 571 * the real mode platter. 572 */ 573/ JAN 574/ the following is wrong! need to figure out MP systems 575/ movl CPUNOFF(%rax), %r11d 576 577 /* 578 * Add rm_platter_pa to %rsp to point it to the same location as seen 579 * from 64-bit mode. 580 */ 581 addq %rax, %rsp 582 583 /* 584 * Now do an lretq to load CS with the appropriate selector for the 585 * kernel's 64-bit GDT and to start executing 64-bit setup code at the 586 * virtual address where boot originally loaded this code rather than 587 * the copy in the real mode platter's rm_code array as we've been 588 * doing so far. 589 */ 590 591#if LED 592 movw $WC_LED, %dx 593 movb $0xd9, %al 594 outb (%dx) 595#endif 596 597/ JAN this should produce 'i' but we get 'g' instead ??? 598#if SERIAL 599 movw $WC_COM, %dx 600 movb $0x69, %al 601 outb (%dx) 602#endif 603 604 pushq $KCS_SEL 605 pushq $kernel_wc_code 606 lretq 607 608 .globl kernel_wc_code 609kernel_wc_code: 610 611#if LED 612 movw $WC_LED, %dx 613 movb $0xda, %al 614 outb (%dx) 615#endif 616 617/ JAN this should produce 'j' but we get 'g' instead ??? 618#if SERIAL 619 movw $WC_COM, %dx 620 movb $0x6a, %al 621 outb (%dx) 622#endif 623 624 /* 625 * Complete the balance of the setup we need to before executing 626 * 64-bit kernel code (namely init rsp, TSS, LGDT, FS and GS). 627 */ 628 .globl rm_platter_va 629 movq rm_platter_va, %rbx 630 addq $WC_CPU, %rbx 631 632#if LED 633 movw $WC_LED, %dx 634 movb $0xdb, %al 635 outb (%dx) 636#endif 637 638#if SERIAL 639 movw $WC_COM, %dx 640 movw $0x6b, %ax 641 outb (%dx) 642#endif 643 644 /* 645 * restore the rest of the registers 646 */ 647 648 lidtq WC_IDT(%rbx) 649 650#if LED 651 movw $WC_LED, %dx 652 movb $0xdc, %al 653 outb (%dx) 654#endif 655 656#if SERIAL 657 movw $WC_COM, %dx 658 movw $0x6c, %ax 659 outb (%dx) 660#endif 661 662 /* 663 * restore the rest of the registers 664 */ 665 666 movw $KDS_SEL, %ax 667 movw %ax, %ds 668 movw %ax, %es 669 movw %ax, %ss 670 671 /* 672 * Before proceeding, enable usage of the page table NX bit if 673 * that's how the page tables are set up. 674 */ 675 bt $X86FSET_NX, x86_featureset(%rip) 676 jnc 1f 677 movl $MSR_AMD_EFER, %ecx 678 rdmsr 679 orl $AMD_EFER_NXE, %eax 680 wrmsr 6811: 682 683 movq WC_CR4(%rbx), %rax / restore full cr4 (with Global Enable) 684 movq %rax, %cr4 685 686 lldt WC_LDT(%rbx) 687 movzwq WC_TR(%rbx), %rax / clear TSS busy bit 688 addq WC_GDT+2(%rbx), %rax 689 andl $0xfffffdff, 4(%rax) 690 movq 4(%rax), %rcx 691 ltr WC_TR(%rbx) 692 693#if LED 694 movw $WC_LED, %dx 695 movb $0xdd, %al 696 outb (%dx) 697#endif 698 699#if SERIAL 700 movw $WC_COM, %dx 701 movw $0x6d, %ax 702 outb (%dx) 703#endif 704 705/ restore %fsbase %gsbase %kgbase registers using wrmsr instruction 706 707 movq WC_FS(%rbx), %rcx / restore fs register 708 movw %cx, %fs 709 710 movl $MSR_AMD_FSBASE, %ecx 711 movl WC_FSBASE(%rbx), %eax 712 movl WC_FSBASE+4(%rbx), %edx 713 wrmsr 714 715 movq WC_GS(%rbx), %rcx / restore gs register 716 movw %cx, %gs 717 718 movl $MSR_AMD_GSBASE, %ecx / restore gsbase msr 719 movl WC_GSBASE(%rbx), %eax 720 movl WC_GSBASE+4(%rbx), %edx 721 wrmsr 722 723 movl $MSR_AMD_KGSBASE, %ecx / restore kgsbase msr 724 movl WC_KGSBASE(%rbx), %eax 725 movl WC_KGSBASE+4(%rbx), %edx 726 wrmsr 727 728 movq WC_CR0(%rbx), %rdx 729 movq %rdx, %cr0 730 movq WC_CR3(%rbx), %rdx 731 movq %rdx, %cr3 732 movq WC_CR8(%rbx), %rdx 733 movq %rdx, %cr8 734 735#if LED 736 movw $WC_LED, %dx 737 movb $0xde, %al 738 outb (%dx) 739#endif 740 741#if SERIAL 742 movw $WC_COM, %dx 743 movb $0x6e, %al 744 outb (%dx) 745#endif 746 747 /* 748 * if we are not running on the boot CPU restore stack contents by 749 * calling i_cpr_restore_stack(curthread, save_stack); 750 */ 751 movq %rsp, %rbp 752 call i_cpr_bootcpuid 753 cmpl %eax, WC_CPU_ID(%rbx) 754 je 2f 755 756 movq %gs:CPU_THREAD, %rdi 757 movq WC_SAVED_STACK(%rbx), %rsi 758 call i_cpr_restore_stack 7592: 760 761 movq WC_RSP(%rbx), %rsp / restore stack pointer 762 763 /* 764 * APIC initialization 765 */ 766 movq %rsp, %rbp 767 768 /* 769 * skip iff function pointer is NULL 770 */ 771 cmpq $0, ap_mlsetup 772 je 3f 773 call *ap_mlsetup 7743: 775 776 call *cpr_start_cpu_func 777 778/ restore %rbx to the value it ahd before we called the functions above 779 movq rm_platter_va, %rbx 780 addq $WC_CPU, %rbx 781 782 movq WC_R8(%rbx), %r8 783 movq WC_R9(%rbx), %r9 784 movq WC_R10(%rbx), %r10 785 movq WC_R11(%rbx), %r11 786 movq WC_R12(%rbx), %r12 787 movq WC_R13(%rbx), %r13 788 movq WC_R14(%rbx), %r14 789 movq WC_R15(%rbx), %r15 790/ movq WC_RAX(%rbx), %rax 791 movq WC_RBP(%rbx), %rbp 792 movq WC_RCX(%rbx), %rcx 793/ movq WC_RDX(%rbx), %rdx 794 movq WC_RDI(%rbx), %rdi 795 movq WC_RSI(%rbx), %rsi 796 797 798/ assume that %cs does not need to be restored 799/ %ds, %es & %ss are ignored in 64bit mode 800 movw WC_SS(%rbx), %ss 801 movw WC_DS(%rbx), %ds 802 movw WC_ES(%rbx), %es 803 804#if LED 805 movw $WC_LED, %dx 806 movb $0xdf, %al 807 outb (%dx) 808#endif 809 810#if SERIAL 811 movw $WC_COM, %dx 812 movb $0x6f, %al 813 outb (%dx) 814#endif 815 816 817 movq WC_RBP(%rbx), %rbp 818 movq WC_RSP(%rbx), %rsp 819 820#if LED 821 movw $WC_LED, %dx 822 movb $0xe0, %al 823 outb (%dx) 824#endif 825 826#if SERIAL 827 movw $WC_COM, %dx 828 movb $0x70, %al 829 outb (%dx) 830#endif 831 832 833 movq WC_RCX(%rbx), %rcx 834 835 pushq WC_EFLAGS(%rbx) / restore flags 836 popfq 837 838#if LED 839 movw $WC_LED, %dx 840 movb $0xe1, %al 841 outb (%dx) 842#endif 843 844#if SERIAL 845 movw $WC_COM, %dx 846 movb $0x71, %al 847 outb (%dx) 848#endif 849 850/* 851 * can not use outb after this point, because doing so would mean using 852 * %dx which would modify %rdx which is restored here 853 */ 854 855 movq %rbx, %rax 856 movq WC_RDX(%rax), %rdx 857 movq WC_RBX(%rax), %rbx 858 859 leave 860 861 movq WC_RETADDR(%rax), %rax 862 movq %rax, (%rsp) / return to caller of wc_save_context 863 864 xorl %eax, %eax / at wakeup return 0 865 ret 866 867 868 SET_SIZE(wc_rm_start) 869 870 ENTRY_NP(asmspin) 871 872 movl %edi, %ecx 873A1: 874 loop A1 875 876 SET_SIZE(asmspin) 877 878 .globl wc_rm_end 879wc_rm_end: 880 nop 881 882#elif defined(__i386) 883 884 ENTRY_NP(wc_rm_start) 885 886/entry: jmp entry / stop here for HDT 887 888 cli 889 movw %cs, %ax 890 movw %ax, %ds / establish ds ... 891 movw %ax, %ss / ... and ss:esp 892 D16 movl $WC_STKSTART, %esp 893 894#if LED 895 D16 movl $WC_LED, %edx 896 D16 movb $0xd1, %al 897 outb (%dx) 898#endif 899 900#if SERIAL 901 D16 movl $WC_COM, %edx 902 D16 movb $0x61, %al 903 outb (%dx) 904#endif 905 906 907 D16 call vgainit 908 D16 call kbdinit 909 D16 call cominit 910 911#if LED 912 D16 movl $WC_LED, %edx 913 D16 movb $0xd2, %al 914 outb (%dx) 915#endif 916 917#if SERIAL 918 D16 movl $WC_COM, %edx 919 D16 movb $0x62, %al 920 outb (%dx) 921#endif 922 923 D16 A16 movl $WC_CPU, %ebx / base add of wc_cpu_t 924 925#if LED 926 D16 movb $0xd3, %al 927 outb $WC_LED 928#endif 929 930#if SERIAL 931 D16 movl $WC_COM, %edx 932 D16 movb $0x63, %al 933 outb (%dx) 934#endif 935 936 D16 A16 movl %cs:WC_DS(%ebx), %edx / %ds post prot/paging transit 937 938#if LED 939 D16 movb $0xd4, %al 940 outb $WC_LED 941#endif 942 943 D16 A16 lgdt %cs:WC_GDT(%ebx) / restore gdt and idtr 944 D16 A16 lidt %cs:WC_IDT(%ebx) 945 946#if LED 947 D16 movb $0xd5, %al 948 outb $WC_LED 949#endif 950 951 D16 A16 movl %cs:WC_CR4(%ebx), %eax / restore cr4 952 D16 andl $-1!CR4_PGE, %eax / don't set Global Enable yet 953 movl %eax, %cr4 954 955#if LED 956 D16 movb $0xd6, %al 957 outb $WC_LED 958#endif 959 960 D16 A16 movl %cs:WC_CR3(%ebx), %eax / set PDPT 961 movl %eax, %cr3 962 963#if LED 964 D16 movb $0xd7, %al 965 outb $WC_LED 966#endif 967 968 D16 A16 movl %cs:WC_CR0(%ebx), %eax / enable prot/paging, etc. 969 movl %eax, %cr0 970 971#if LED 972 D16 movb $0xd8, %al 973 outb $WC_LED 974#endif 975 976 D16 A16 movl %cs:WC_VIRTADDR(%ebx), %ebx / virtaddr of wc_cpu_t 977 978#if LED 979 D16 movb $0xd9, %al 980 outb $WC_LED 981#endif 982 983#if LED 984 D16 movb $0xda, %al 985 outb $WC_LED 986#endif 987 988 jmp flush / flush prefetch queue 989flush: 990 D16 pushl $KCS_SEL 991 D16 pushl $kernel_wc_code 992 D16 lret / re-appear at kernel_wc_code 993 994 995/* 996 * Support routine to re-initialize VGA subsystem 997 */ 998vgainit: 999 D16 ret 1000 1001/* 1002 * Support routine to re-initialize keyboard (which is USB - help!) 1003 */ 1004kbdinit: 1005 D16 ret 1006 1007/* 1008 * Support routine to re-initialize COM ports to something sane for debug output 1009 */ 1010cominit: 1011#if DEBUG 1012/* 1013 * on debug kernels we need to initialize COM1 & COM2 here, so that 1014 * we can get debug output before the asy driver has resumed 1015 */ 1016 1017/ select COM1 1018 D16 movl $[COM1+LCR], %edx 1019 D16 movb $DLAB, %al / divisor latch 1020 outb (%dx) 1021 1022 D16 movl $[COM1+DLL], %edx / divisor latch lsb 1023 D16 movb $B9600L, %al / divisor latch 1024 outb (%dx) 1025 1026 D16 movl $[COM1+DLH], %edx / divisor latch hsb 1027 D16 movb $B9600H, %al / divisor latch 1028 outb (%dx) 1029 1030 D16 movl $[COM1+LCR], %edx / select COM1 1031 D16 movb $[STOP1|BITS8], %al / 1 stop bit, 8bit word len 1032 outb (%dx) 1033 1034 D16 movl $[COM1+MCR], %edx / select COM1 1035 D16 movb $[RTS|DTR], %al / 1 stop bit, 8bit word len 1036 outb (%dx) 1037 1038/ select COM2 1039 D16 movl $[COM2+LCR], %edx 1040 D16 movb $DLAB, %al / divisor latch 1041 outb (%dx) 1042 1043 D16 movl $[COM2+DLL], %edx / divisor latch lsb 1044 D16 movb $B9600L, %al / divisor latch 1045 outb (%dx) 1046 1047 D16 movl $[COM2+DLH], %edx / divisor latch hsb 1048 D16 movb $B9600H, %al / divisor latch 1049 outb (%dx) 1050 1051 D16 movl $[COM2+LCR], %edx / select COM1 1052 D16 movb $[STOP1|BITS8], %al / 1 stop bit, 8bit word len 1053 outb (%dx) 1054 1055 D16 movl $[COM2+MCR], %edx / select COM1 1056 D16 movb $[RTS|DTR], %al / 1 stop bit, 8bit word len 1057 outb (%dx) 1058#endif /* DEBUG */ 1059 1060 D16 ret 1061 1062 .globl wc_rm_end 1063wc_rm_end: 1064 nop 1065 1066 .globl kernel_wc_code 1067kernel_wc_code: 1068 / At this point we are with kernel's cs and proper eip. 1069 / We will be executing not from the copy in real mode platter, 1070 / but from the original code where boot loaded us. 1071 / By this time GDT and IDT are loaded as is cr0, cr3 and cr4. 1072 / %ebx is wc_cpu 1073 / %dx is our ds 1074 1075#if LED 1076 D16 movb $0xdb, %al 1077 outb $WC_LED 1078#endif 1079 1080/ got here OK 1081 1082 movw %dx, %ds / $KDS_SEL 1083 1084#if LED 1085 movb $0xdc, %al 1086 outb $WC_LED 1087#endif 1088 1089 /* 1090 * Before proceeding, enable usage of the page table NX bit if 1091 * that's how the page tables are set up. 1092 */ 1093 bt $X86FSET_NX, x86_featureset 1094 jnc 1f 1095 movl $MSR_AMD_EFER, %ecx 1096 rdmsr 1097 orl $AMD_EFER_NXE, %eax 1098 wrmsr 10991: 1100 1101 movl WC_CR4(%ebx), %eax / restore full cr4 (with Global Enable) 1102 movl %eax, %cr4 1103 1104 1105 lldt WC_LDT(%ebx) / $LDT_SEL 1106 1107 movzwl WC_TR(%ebx), %eax / clear TSS busy bit 1108 addl WC_GDT+2(%ebx), %eax 1109 andl $-1!0x200, 4(%eax) 1110 ltr WC_TR(%ebx) / $UTSS_SEL 1111 1112 movw WC_SS(%ebx), %ss / restore segment registers 1113 movw WC_ES(%ebx), %es 1114 movw WC_FS(%ebx), %fs 1115 movw WC_GS(%ebx), %gs 1116 1117 /* 1118 * set the stack pointer to point into the identity mapped page 1119 * temporarily, so we can make function calls 1120 */ 1121 .globl rm_platter_va 1122 movl rm_platter_va, %eax 1123 movl $WC_STKSTART, %esp 1124 addl %eax, %esp 1125 movl %esp, %ebp 1126 1127 /* 1128 * if we are not running on the boot CPU restore stack contents by 1129 * calling i_cpr_restore_stack(curthread, save_stack); 1130 */ 1131 call i_cpr_bootcpuid 1132 cmpl %eax, WC_CPU_ID(%ebx) 1133 je 2f 1134 1135 pushl WC_SAVED_STACK(%ebx) 1136 pushl %gs:CPU_THREAD 1137 call i_cpr_restore_stack 1138 addl $0x10, %esp 11392: 1140 1141 movl WC_ESP(%ebx), %esp 1142 movl %esp, %ebp 1143 1144 movl WC_RETADDR(%ebx), %eax / return to caller of wc_save_context 1145 movl %eax, (%esp) 1146 1147 /* 1148 * APIC initialization, skip iff function pointer is NULL 1149 */ 1150 cmpl $0, ap_mlsetup 1151 je 3f 1152 call *ap_mlsetup 11533: 1154 1155 call *cpr_start_cpu_func 1156 1157 pushl WC_EFLAGS(%ebx) / restore flags 1158 popfl 1159 1160 movl WC_EDI(%ebx), %edi / restore general registers 1161 movl WC_ESI(%ebx), %esi 1162 movl WC_EBP(%ebx), %ebp 1163 movl WC_EBX(%ebx), %ebx 1164 1165/exit: jmp exit / stop here for HDT 1166 1167 xorl %eax, %eax / at wakeup return 0 1168 ret 1169 1170 SET_SIZE(wc_rm_start) 1171 1172 1173#endif /* defined(__amd64) */ 1174 1175#endif /* !defined(__GNU_AS__) */ 1176 1177#endif /* lint */ 1178 1179