1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26#include <sys/asm_linkage.h> 27#include <sys/asm_misc.h> 28#include <sys/regset.h> 29#include <sys/privregs.h> 30#include <sys/x86_archext.h> 31#include <sys/cpr_wakecode.h> 32 33#if !defined(__lint) 34#include <sys/segments.h> 35#include "assym.h" 36#endif 37 38#ifdef DEBUG 39#define LED 1 40#define SERIAL 1 41#endif /* DEBUG */ 42 43#ifdef DEBUG 44#define COM1 0x3f8 45#define COM2 0x2f8 46#define WC_COM COM2 /* either COM1 or COM2 */ 47#define WC_LED 0x80 /* diagnostic led port ON motherboard */ 48 49/* 50 * defined as offsets from the data register 51 */ 52#define DLL 0 /* divisor latch (lsb) */ 53#define DLH 1 /* divisor latch (msb) */ 54#define LCR 3 /* line control register */ 55#define MCR 4 /* modem control register */ 56 57 58#define DLAB 0x80 /* divisor latch access bit */ 59#define B9600L 0X0c /* lsb bit pattern for 9600 baud */ 60#define B9600H 0X0 /* hsb bit pattern for 9600 baud */ 61#define DTR 0x01 /* Data Terminal Ready */ 62#define RTS 0x02 /* Request To Send */ 63#define STOP1 0x00 /* 1 stop bit */ 64#define BITS8 0x03 /* 8 bits per char */ 65 66#endif /* DEBUG */ 67 68/* 69 * This file contains the low level routines involved in getting 70 * into and out of ACPI S3, including those needed for restarting 71 * the non-boot cpus. 72 * 73 * Our assumptions: 74 * 75 * Our actions: 76 * 77 */ 78 79#if defined(lint) || defined(__lint) 80 81/*ARGSUSED*/ 82int 83wc_save_context(wc_cpu_t *pcpu) 84{ return 0; } 85 86#else /* lint */ 87 88#if defined(__GNU_AS__) 89 90 NOTHING AT ALL YET! 91 92#else /* !defined(__GNU_AS__) */ 93 94#if defined(__amd64) 95 96 ENTRY_NP(wc_save_context) 97 98 movq (%rsp), %rdx / return address 99 movq %rdx, WC_RETADDR(%rdi) 100 pushq %rbp 101 movq %rsp,%rbp 102 103 movq %rdi, WC_VIRTADDR(%rdi) 104 movq %rdi, WC_RDI(%rdi) 105 106 movq %rdx, WC_RDX(%rdi) 107 108/ stash everything else we need 109 sgdt WC_GDT(%rdi) 110 sidt WC_IDT(%rdi) 111 sldt WC_LDT(%rdi) 112 str WC_TR(%rdi) 113 114 movq %cr0, %rdx 115 movq %rdx, WC_CR0(%rdi) 116 movq %cr3, %rdx 117 movq %rdx, WC_CR3(%rdi) 118 movq %cr4, %rdx 119 movq %rdx, WC_CR4(%rdi) 120 movq %cr8, %rdx 121 movq %rdx, WC_CR8(%rdi) 122 123 movq %r8, WC_R8(%rdi) 124 movq %r9, WC_R9(%rdi) 125 movq %r10, WC_R10(%rdi) 126 movq %r11, WC_R11(%rdi) 127 movq %r12, WC_R12(%rdi) 128 movq %r13, WC_R13(%rdi) 129 movq %r14, WC_R14(%rdi) 130 movq %r15, WC_R15(%rdi) 131 movq %rax, WC_RAX(%rdi) 132 movq %rbp, WC_RBP(%rdi) 133 movq %rbx, WC_RBX(%rdi) 134 movq %rcx, WC_RCX(%rdi) 135 movq %rsi, WC_RSI(%rdi) 136 movq %rsp, WC_RSP(%rdi) 137 138 movw %ss, WC_SS(%rdi) 139 movw %cs, WC_CS(%rdi) 140 movw %ds, WC_DS(%rdi) 141 movw %es, WC_ES(%rdi) 142 143 movq $0, %rcx / save %fs register 144 movw %fs, %cx 145 movq %rcx, WC_FS(%rdi) 146 147 movl $MSR_AMD_FSBASE, %ecx 148 rdmsr 149 movl %eax, WC_FSBASE(%rdi) 150 movl %edx, WC_FSBASE+4(%rdi) 151 152 movq $0, %rcx / save %gs register 153 movw %gs, %cx 154 movq %rcx, WC_GS(%rdi) 155 156 movl $MSR_AMD_GSBASE, %ecx / save gsbase msr 157 rdmsr 158 movl %eax, WC_GSBASE(%rdi) 159 movl %edx, WC_GSBASE+4(%rdi) 160 161 movl $MSR_AMD_KGSBASE, %ecx / save kgsbase msr 162 rdmsr 163 movl %eax, WC_KGSBASE(%rdi) 164 movl %edx, WC_KGSBASE+4(%rdi) 165 166 movq %gs:CPU_ID, %rax / save current cpu id 167 movq %rax, WC_CPU_ID(%rdi) 168 169 pushfq 170 popq WC_EFLAGS(%rdi) 171 172 wbinvd / flush the cache 173 mfence 174 175 movq $1, %rax / at suspend return 1 176 177 leave 178 179 ret 180 181 SET_SIZE(wc_save_context) 182 183#elif defined(__i386) 184 185 ENTRY_NP(wc_save_context) 186 187 movl 4(%esp), %eax / wc_cpu_t * 188 movl %eax, WC_VIRTADDR(%eax) 189 190 movl (%esp), %edx / return address 191 movl %edx, WC_RETADDR(%eax) 192 193 str WC_TR(%eax) / stash everything else we need 194 sgdt WC_GDT(%eax) 195 sldt WC_LDT(%eax) 196 sidt WC_IDT(%eax) 197 198 movl %cr0, %edx 199 movl %edx, WC_CR0(%eax) 200 movl %cr3, %edx 201 movl %edx, WC_CR3(%eax) 202 movl %cr4, %edx 203 movl %edx, WC_CR4(%eax) 204 205 movl %ebx, WC_EBX(%eax) 206 movl %edi, WC_EDI(%eax) 207 movl %esi, WC_ESI(%eax) 208 movl %ebp, WC_EBP(%eax) 209 movl %esp, WC_ESP(%eax) 210 211 movw %ss, WC_SS(%eax) 212 movw %cs, WC_CS(%eax) 213 movw %ds, WC_DS(%eax) 214 movw %es, WC_ES(%eax) 215 movw %fs, WC_FS(%eax) 216 movw %gs, WC_GS(%eax) 217 218 pushfl 219 popl WC_EFLAGS(%eax) 220 221 pushl %gs:CPU_ID / save current cpu id 222 popl WC_CPU_ID(%eax) 223 224 wbinvd / flush the cache 225 mfence 226 227 movl $1, %eax / at suspend return 1 228 ret 229 230 SET_SIZE(wc_save_context) 231 232#endif /* __amd64 */ 233 234#endif /* __GNU_AS__ */ 235 236#endif /* lint */ 237 238 239/* 240 * Our assumptions: 241 * - We are running in real mode. 242 * - Interrupts are disabled. 243 * 244 * Our actions: 245 * - We start using our GDT by loading correct values in the 246 * selector registers (cs=KCS_SEL, ds=es=ss=KDS_SEL, fs=KFS_SEL, 247 * gs=KGS_SEL). 248 * - We change over to using our IDT. 249 * - We load the default LDT into the hardware LDT register. 250 * - We load the default TSS into the hardware task register. 251 * - We restore registers 252 * - We return to original caller (a la setjmp) 253 */ 254 255#if defined(lint) || defined(__lint) 256 257void 258wc_rm_start(void) 259{} 260 261void 262wc_rm_end(void) 263{} 264 265#else /* lint */ 266 267#if defined(__GNU_AS__) 268 269 NOTHING AT ALL YET! 270 271#else /* __GNU_AS__ */ 272 273#if defined(__amd64) 274 275 ENTRY_NP(wc_rm_start) 276 277 /* 278 * For vulcan as we need to do a .code32 and mentally invert the 279 * meaning of the addr16 and data16 prefixes to get 32-bit access when 280 * generating code to be executed in 16-bit mode (sigh...) 281 */ 282 283 .code32 284 285 cli 286 movw %cs, %ax 287 movw %ax, %ds / establish ds ... 288 movw %ax, %ss / ... and ss:esp 289 D16 movl $WC_STKSTART, %esp 290/ using the following value blows up machines! - DO NOT USE 291/ D16 movl 0xffc, %esp 292 293 294#if LED 295 D16 movl $WC_LED, %edx 296 D16 movb $0xd1, %al 297 outb (%dx) 298#endif 299 300#if SERIAL 301 D16 movl $WC_COM, %edx 302 D16 movb $0x61, %al 303 outb (%dx) 304#endif 305 306 D16 call cominit 307 308 /* 309 * Enable protected-mode, write protect, and alignment mask 310 * %cr0 has already been initialsed to zero 311 */ 312 movl %cr0, %eax 313 D16 orl $[CR0_PE|CR0_WP|CR0_AM], %eax 314 movl %eax, %cr0 315 316 /* 317 * Do a jmp immediately after writing to cr0 when enabling protected 318 * mode to clear the real mode prefetch queue (per Intel's docs) 319 */ 320 jmp pestart 321pestart: 322 323#if LED 324 D16 movl $WC_LED, %edx 325 D16 movb $0xd2, %al 326 outb (%dx) 327#endif 328 329#if SERIAL 330 D16 movl $WC_COM, %edx 331 D16 movb $0x62, %al 332 outb (%dx) 333#endif 334 335 /* 336 * 16-bit protected mode is now active, so prepare to turn on long 337 * mode 338 */ 339 340#if LED 341 D16 movl $WC_LED, %edx 342 D16 movb $0xd3, %al 343 outb (%dx) 344#endif 345 346#if SERIAL 347 D16 movl $WC_COM, %edx 348 D16 movb $0x63, %al 349 outb (%dx) 350#endif 351 352 /* 353 * Add any initial cr4 bits 354 */ 355 movl %cr4, %eax 356 A16 D16 orl CR4OFF, %eax 357 358 /* 359 * Enable PAE mode (CR4.PAE) 360 */ 361 D16 orl $CR4_PAE, %eax 362 movl %eax, %cr4 363 364#if LED 365 D16 movl $WC_LED, %edx 366 D16 movb $0xd4, %al 367 outb (%dx) 368#endif 369 370#if SERIAL 371 D16 movl $WC_COM, %edx 372 D16 movb $0x64, %al 373 outb (%dx) 374#endif 375 376 /* 377 * Point cr3 to the 64-bit long mode page tables. 378 * 379 * Note that these MUST exist in 32-bit space, as we don't have 380 * a way to load %cr3 with a 64-bit base address for the page tables 381 * until the CPU is actually executing in 64-bit long mode. 382 */ 383 A16 D16 movl CR3OFF, %eax 384 movl %eax, %cr3 385 386 /* 387 * Set long mode enable in EFER (EFER.LME = 1) 388 */ 389 D16 movl $MSR_AMD_EFER, %ecx 390 rdmsr 391 392 D16 orl $AMD_EFER_LME, %eax 393 wrmsr 394 395#if LED 396 D16 movl $WC_LED, %edx 397 D16 movb $0xd5, %al 398 outb (%dx) 399#endif 400 401#if SERIAL 402 D16 movl $WC_COM, %edx 403 D16 movb $0x65, %al 404 outb (%dx) 405#endif 406 407 /* 408 * Finally, turn on paging (CR0.PG = 1) to activate long mode. 409 */ 410 movl %cr0, %eax 411 D16 orl $CR0_PG, %eax 412 movl %eax, %cr0 413 414 /* 415 * The instruction after enabling paging in CR0 MUST be a branch. 416 */ 417 jmp long_mode_active 418 419long_mode_active: 420 421#if LED 422 D16 movl $WC_LED, %edx 423 D16 movb $0xd6, %al 424 outb (%dx) 425#endif 426 427#if SERIAL 428 D16 movl $WC_COM, %edx 429 D16 movb $0x66, %al 430 outb (%dx) 431#endif 432 433 /* 434 * Long mode is now active but since we're still running with the 435 * original 16-bit CS we're actually in 16-bit compatability mode. 436 * 437 * We have to load an intermediate GDT and IDT here that we know are 438 * in 32-bit space before we can use the kernel's GDT and IDT, which 439 * may be in the 64-bit address space, and since we're in compatability 440 * mode, we only have access to 16 and 32-bit instructions at the 441 * moment. 442 */ 443 A16 D16 lgdt TEMPGDTOFF /* load temporary GDT */ 444 A16 D16 lidt TEMPIDTOFF /* load temporary IDT */ 445 446 447 /* 448 * Do a far transfer to 64-bit mode. Set the CS selector to a 64-bit 449 * long mode selector (CS.L=1) in the temporary 32-bit GDT and jump 450 * to the real mode platter address of wc_long_mode_64 as until the 451 * 64-bit CS is in place we don't have access to 64-bit instructions 452 * and thus can't reference a 64-bit %rip. 453 */ 454 455#if LED 456 D16 movl $WC_LED, %edx 457 D16 movb $0xd7, %al 458 outb (%dx) 459#endif 460 461#if SERIAL 462 D16 movl $WC_COM, %edx 463 D16 movb $0x67, %al 464 outb (%dx) 465#endif 466 467 D16 pushl $TEMP_CS64_SEL 468 A16 D16 pushl LM64OFF 469 470 D16 lret 471 472 473/* 474 * Support routine to re-initialize VGA subsystem 475 */ 476vgainit: 477 D16 ret 478 479/* 480 * Support routine to re-initialize keyboard (which is USB - help!) 481 */ 482kbdinit: 483 D16 ret 484 485/* 486 * Support routine to re-initialize COM ports to something sane 487 */ 488cominit: 489 / init COM1 & COM2 490 491#if DEBUG 492/* 493 * on debug kernels we need to initialize COM1 & COM2 here, so that 494 * we can get debug output before the asy driver has resumed 495 */ 496 497/ select COM1 498 D16 movl $[COM1+LCR], %edx 499 D16 movb $DLAB, %al / divisor latch 500 outb (%dx) 501 502 D16 movl $[COM1+DLL], %edx / divisor latch lsb 503 D16 movb $B9600L, %al / divisor latch 504 outb (%dx) 505 506 D16 movl $[COM1+DLH], %edx / divisor latch hsb 507 D16 movb $B9600H, %al / divisor latch 508 outb (%dx) 509 510 D16 movl $[COM1+LCR], %edx / select COM1 511 D16 movb $[STOP1|BITS8], %al / 1 stop bit, 8bit word len 512 outb (%dx) 513 514 D16 movl $[COM1+MCR], %edx / select COM1 515 D16 movb $[RTS|DTR], %al / data term ready & req to send 516 outb (%dx) 517 518/ select COM2 519 D16 movl $[COM2+LCR], %edx 520 D16 movb $DLAB, %al / divisor latch 521 outb (%dx) 522 523 D16 movl $[COM2+DLL], %edx / divisor latch lsb 524 D16 movb $B9600L, %al / divisor latch 525 outb (%dx) 526 527 D16 movl $[COM2+DLH], %edx / divisor latch hsb 528 D16 movb $B9600H, %al / divisor latch 529 outb (%dx) 530 531 D16 movl $[COM2+LCR], %edx / select COM1 532 D16 movb $[STOP1|BITS8], %al / 1 stop bit, 8bit word len 533 outb (%dx) 534 535 D16 movl $[COM2+MCR], %edx / select COM1 536 D16 movb $[RTS|DTR], %al / data term ready & req to send 537 outb (%dx) 538#endif /* DEBUG */ 539 540 D16 ret 541 542 .code64 543 544 .globl wc_long_mode_64 545wc_long_mode_64: 546 547#if LED 548 movw $WC_LED, %dx 549 movb $0xd8, %al 550 outb (%dx) 551#endif 552 553#if SERIAL 554 movw $WC_COM, %dx 555 movb $0x68, %al 556 outb (%dx) 557#endif 558 559 /* 560 * We are now running in long mode with a 64-bit CS (EFER.LMA=1, 561 * CS.L=1) so we now have access to 64-bit instructions. 562 * 563 * First, set the 64-bit GDT base. 564 */ 565 .globl rm_platter_pa 566 movl rm_platter_pa, %eax 567 568 lgdtq GDTROFF(%rax) /* load 64-bit GDT */ 569 570 /* 571 * Save the CPU number in %r11; get the value here since it's saved in 572 * the real mode platter. 573 */ 574/ JAN 575/ the following is wrong! need to figure out MP systems 576/ movl CPUNOFF(%rax), %r11d 577 578 /* 579 * Add rm_platter_pa to %rsp to point it to the same location as seen 580 * from 64-bit mode. 581 */ 582 addq %rax, %rsp 583 584 /* 585 * Now do an lretq to load CS with the appropriate selector for the 586 * kernel's 64-bit GDT and to start executing 64-bit setup code at the 587 * virtual address where boot originally loaded this code rather than 588 * the copy in the real mode platter's rm_code array as we've been 589 * doing so far. 590 */ 591 592#if LED 593 movw $WC_LED, %dx 594 movb $0xd9, %al 595 outb (%dx) 596#endif 597 598/ JAN this should produce 'i' but we get 'g' instead ??? 599#if SERIAL 600 movw $WC_COM, %dx 601 movb $0x69, %al 602 outb (%dx) 603#endif 604 605 pushq $KCS_SEL 606 pushq $kernel_wc_code 607 lretq 608 609 .globl kernel_wc_code 610kernel_wc_code: 611 612#if LED 613 movw $WC_LED, %dx 614 movb $0xda, %al 615 outb (%dx) 616#endif 617 618/ JAN this should produce 'j' but we get 'g' instead ??? 619#if SERIAL 620 movw $WC_COM, %dx 621 movb $0x6a, %al 622 outb (%dx) 623#endif 624 625 /* 626 * Complete the balance of the setup we need to before executing 627 * 64-bit kernel code (namely init rsp, TSS, LGDT, FS and GS). 628 */ 629 .globl rm_platter_va 630 movq rm_platter_va, %rbx 631 addq $WC_CPU, %rbx 632 633#if LED 634 movw $WC_LED, %dx 635 movb $0xdb, %al 636 outb (%dx) 637#endif 638 639#if SERIAL 640 movw $WC_COM, %dx 641 movw $0x6b, %ax 642 outb (%dx) 643#endif 644 645 /* 646 * restore the rest of the registers 647 */ 648 649 lidtq WC_IDT(%rbx) 650 651#if LED 652 movw $WC_LED, %dx 653 movb $0xdc, %al 654 outb (%dx) 655#endif 656 657#if SERIAL 658 movw $WC_COM, %dx 659 movw $0x6c, %ax 660 outb (%dx) 661#endif 662 663 /* 664 * restore the rest of the registers 665 */ 666 667 movw $KDS_SEL, %ax 668 movw %ax, %ds 669 movw %ax, %es 670 movw %ax, %ss 671 672 /* 673 * Before proceeding, enable usage of the page table NX bit if 674 * that's how the page tables are set up. 675 */ 676 movl x86_feature, %ecx 677 andl $X86_NX, %ecx 678 jz 1f 679 movl $MSR_AMD_EFER, %ecx 680 rdmsr 681 orl $AMD_EFER_NXE, %eax 682 wrmsr 6831: 684 685 movq WC_CR4(%rbx), %rax / restore full cr4 (with Global Enable) 686 movq %rax, %cr4 687 688 lldt WC_LDT(%rbx) 689 movzwq WC_TR(%rbx), %rax / clear TSS busy bit 690 addq WC_GDT+2(%rbx), %rax 691 andl $0xfffffdff, 4(%rax) 692 movq 4(%rax), %rcx 693 ltr WC_TR(%rbx) 694 695#if LED 696 movw $WC_LED, %dx 697 movb $0xdd, %al 698 outb (%dx) 699#endif 700 701#if SERIAL 702 movw $WC_COM, %dx 703 movw $0x6d, %ax 704 outb (%dx) 705#endif 706 707/ restore %fsbase %gsbase %kgbase registers using wrmsr instruction 708 709 movq WC_FS(%rbx), %rcx / restore fs register 710 movw %cx, %fs 711 712 movl $MSR_AMD_FSBASE, %ecx 713 movl WC_FSBASE(%rbx), %eax 714 movl WC_FSBASE+4(%rbx), %edx 715 wrmsr 716 717 movq WC_GS(%rbx), %rcx / restore gs register 718 movw %cx, %gs 719 720 movl $MSR_AMD_GSBASE, %ecx / restore gsbase msr 721 movl WC_GSBASE(%rbx), %eax 722 movl WC_GSBASE+4(%rbx), %edx 723 wrmsr 724 725 movl $MSR_AMD_KGSBASE, %ecx / restore kgsbase msr 726 movl WC_KGSBASE(%rbx), %eax 727 movl WC_KGSBASE+4(%rbx), %edx 728 wrmsr 729 730 movq WC_CR0(%rbx), %rdx 731 movq %rdx, %cr0 732 movq WC_CR3(%rbx), %rdx 733 movq %rdx, %cr3 734 movq WC_CR8(%rbx), %rdx 735 movq %rdx, %cr8 736 737#if LED 738 movw $WC_LED, %dx 739 movb $0xde, %al 740 outb (%dx) 741#endif 742 743#if SERIAL 744 movw $WC_COM, %dx 745 movb $0x6e, %al 746 outb (%dx) 747#endif 748 749 /* 750 * if we are not running on the boot CPU restore stack contents by 751 * calling i_cpr_restore_stack(curthread, save_stack); 752 */ 753 movq %rsp, %rbp 754 call i_cpr_bootcpuid 755 cmpl %eax, WC_CPU_ID(%rbx) 756 je 2f 757 758 movq %gs:CPU_THREAD, %rdi 759 movq WC_SAVED_STACK(%rbx), %rsi 760 call i_cpr_restore_stack 7612: 762 763 movq WC_RSP(%rbx), %rsp / restore stack pointer 764 765 /* 766 * APIC initialization 767 */ 768 movq %rsp, %rbp 769 770 /* 771 * skip iff function pointer is NULL 772 */ 773 cmpq $0, ap_mlsetup 774 je 3f 775 call *ap_mlsetup 7763: 777 778 call *cpr_start_cpu_func 779 780/ restore %rbx to the value it ahd before we called the functions above 781 movq rm_platter_va, %rbx 782 addq $WC_CPU, %rbx 783 784 movq WC_R8(%rbx), %r8 785 movq WC_R9(%rbx), %r9 786 movq WC_R10(%rbx), %r10 787 movq WC_R11(%rbx), %r11 788 movq WC_R12(%rbx), %r12 789 movq WC_R13(%rbx), %r13 790 movq WC_R14(%rbx), %r14 791 movq WC_R15(%rbx), %r15 792/ movq WC_RAX(%rbx), %rax 793 movq WC_RBP(%rbx), %rbp 794 movq WC_RCX(%rbx), %rcx 795/ movq WC_RDX(%rbx), %rdx 796 movq WC_RDI(%rbx), %rdi 797 movq WC_RSI(%rbx), %rsi 798 799 800/ assume that %cs does not need to be restored 801/ %ds, %es & %ss are ignored in 64bit mode 802 movw WC_SS(%rbx), %ss 803 movw WC_DS(%rbx), %ds 804 movw WC_ES(%rbx), %es 805 806#if LED 807 movw $WC_LED, %dx 808 movb $0xdf, %al 809 outb (%dx) 810#endif 811 812#if SERIAL 813 movw $WC_COM, %dx 814 movb $0x6f, %al 815 outb (%dx) 816#endif 817 818 819 movq WC_RBP(%rbx), %rbp 820 movq WC_RSP(%rbx), %rsp 821 822#if LED 823 movw $WC_LED, %dx 824 movb $0xe0, %al 825 outb (%dx) 826#endif 827 828#if SERIAL 829 movw $WC_COM, %dx 830 movb $0x70, %al 831 outb (%dx) 832#endif 833 834 835 movq WC_RCX(%rbx), %rcx 836 837 pushq WC_EFLAGS(%rbx) / restore flags 838 popfq 839 840#if LED 841 movw $WC_LED, %dx 842 movb $0xe1, %al 843 outb (%dx) 844#endif 845 846#if SERIAL 847 movw $WC_COM, %dx 848 movb $0x71, %al 849 outb (%dx) 850#endif 851 852/* 853 * can not use outb after this point, because doing so would mean using 854 * %dx which would modify %rdx which is restored here 855 */ 856 857 movq %rbx, %rax 858 movq WC_RDX(%rax), %rdx 859 movq WC_RBX(%rax), %rbx 860 861 leave 862 863 movq WC_RETADDR(%rax), %rax 864 movq %rax, (%rsp) / return to caller of wc_save_context 865 866 xorl %eax, %eax / at wakeup return 0 867 ret 868 869 870 SET_SIZE(wc_rm_start) 871 872 ENTRY_NP(asmspin) 873 874 movl %edi, %ecx 875A1: 876 loop A1 877 878 SET_SIZE(asmspin) 879 880 .globl wc_rm_end 881wc_rm_end: 882 nop 883 884#elif defined(__i386) 885 886 ENTRY_NP(wc_rm_start) 887 888/entry: jmp entry / stop here for HDT 889 890 cli 891 movw %cs, %ax 892 movw %ax, %ds / establish ds ... 893 movw %ax, %ss / ... and ss:esp 894 D16 movl $WC_STKSTART, %esp 895 896#if LED 897 D16 movl $WC_LED, %edx 898 D16 movb $0xd1, %al 899 outb (%dx) 900#endif 901 902#if SERIAL 903 D16 movl $WC_COM, %edx 904 D16 movb $0x61, %al 905 outb (%dx) 906#endif 907 908 909 D16 call vgainit 910 D16 call kbdinit 911 D16 call cominit 912 913#if LED 914 D16 movl $WC_LED, %edx 915 D16 movb $0xd2, %al 916 outb (%dx) 917#endif 918 919#if SERIAL 920 D16 movl $WC_COM, %edx 921 D16 movb $0x62, %al 922 outb (%dx) 923#endif 924 925 D16 A16 movl $WC_CPU, %ebx / base add of wc_cpu_t 926 927#if LED 928 D16 movb $0xd3, %al 929 outb $WC_LED 930#endif 931 932#if SERIAL 933 D16 movl $WC_COM, %edx 934 D16 movb $0x63, %al 935 outb (%dx) 936#endif 937 938 D16 A16 movl %cs:WC_DS(%ebx), %edx / %ds post prot/paging transit 939 940#if LED 941 D16 movb $0xd4, %al 942 outb $WC_LED 943#endif 944 945 D16 A16 lgdt %cs:WC_GDT(%ebx) / restore gdt and idtr 946 D16 A16 lidt %cs:WC_IDT(%ebx) 947 948#if LED 949 D16 movb $0xd5, %al 950 outb $WC_LED 951#endif 952 953 D16 A16 movl %cs:WC_CR4(%ebx), %eax / restore cr4 954 D16 andl $-1!CR4_PGE, %eax / don't set Global Enable yet 955 movl %eax, %cr4 956 957#if LED 958 D16 movb $0xd6, %al 959 outb $WC_LED 960#endif 961 962 D16 A16 movl %cs:WC_CR3(%ebx), %eax / set PDPT 963 movl %eax, %cr3 964 965#if LED 966 D16 movb $0xd7, %al 967 outb $WC_LED 968#endif 969 970 D16 A16 movl %cs:WC_CR0(%ebx), %eax / enable prot/paging, etc. 971 movl %eax, %cr0 972 973#if LED 974 D16 movb $0xd8, %al 975 outb $WC_LED 976#endif 977 978 D16 A16 movl %cs:WC_VIRTADDR(%ebx), %ebx / virtaddr of wc_cpu_t 979 980#if LED 981 D16 movb $0xd9, %al 982 outb $WC_LED 983#endif 984 985#if LED 986 D16 movb $0xda, %al 987 outb $WC_LED 988#endif 989 990 jmp flush / flush prefetch queue 991flush: 992 D16 pushl $KCS_SEL 993 D16 pushl $kernel_wc_code 994 D16 lret / re-appear at kernel_wc_code 995 996 997/* 998 * Support routine to re-initialize VGA subsystem 999 */ 1000vgainit: 1001 D16 ret 1002 1003/* 1004 * Support routine to re-initialize keyboard (which is USB - help!) 1005 */ 1006kbdinit: 1007 D16 ret 1008 1009/* 1010 * Support routine to re-initialize COM ports to something sane for debug output 1011 */ 1012cominit: 1013#if DEBUG 1014/* 1015 * on debug kernels we need to initialize COM1 & COM2 here, so that 1016 * we can get debug output before the asy driver has resumed 1017 */ 1018 1019/ select COM1 1020 D16 movl $[COM1+LCR], %edx 1021 D16 movb $DLAB, %al / divisor latch 1022 outb (%dx) 1023 1024 D16 movl $[COM1+DLL], %edx / divisor latch lsb 1025 D16 movb $B9600L, %al / divisor latch 1026 outb (%dx) 1027 1028 D16 movl $[COM1+DLH], %edx / divisor latch hsb 1029 D16 movb $B9600H, %al / divisor latch 1030 outb (%dx) 1031 1032 D16 movl $[COM1+LCR], %edx / select COM1 1033 D16 movb $[STOP1|BITS8], %al / 1 stop bit, 8bit word len 1034 outb (%dx) 1035 1036 D16 movl $[COM1+MCR], %edx / select COM1 1037 D16 movb $[RTS|DTR], %al / 1 stop bit, 8bit word len 1038 outb (%dx) 1039 1040/ select COM2 1041 D16 movl $[COM2+LCR], %edx 1042 D16 movb $DLAB, %al / divisor latch 1043 outb (%dx) 1044 1045 D16 movl $[COM2+DLL], %edx / divisor latch lsb 1046 D16 movb $B9600L, %al / divisor latch 1047 outb (%dx) 1048 1049 D16 movl $[COM2+DLH], %edx / divisor latch hsb 1050 D16 movb $B9600H, %al / divisor latch 1051 outb (%dx) 1052 1053 D16 movl $[COM2+LCR], %edx / select COM1 1054 D16 movb $[STOP1|BITS8], %al / 1 stop bit, 8bit word len 1055 outb (%dx) 1056 1057 D16 movl $[COM2+MCR], %edx / select COM1 1058 D16 movb $[RTS|DTR], %al / 1 stop bit, 8bit word len 1059 outb (%dx) 1060#endif /* DEBUG */ 1061 1062 D16 ret 1063 1064 .globl wc_rm_end 1065wc_rm_end: 1066 nop 1067 1068 .globl kernel_wc_code 1069kernel_wc_code: 1070 / At this point we are with kernel's cs and proper eip. 1071 / We will be executing not from the copy in real mode platter, 1072 / but from the original code where boot loaded us. 1073 / By this time GDT and IDT are loaded as is cr0, cr3 and cr4. 1074 / %ebx is wc_cpu 1075 / %dx is our ds 1076 1077#if LED 1078 D16 movb $0xdb, %al 1079 outb $WC_LED 1080#endif 1081 1082/ got here OK 1083 1084 movw %dx, %ds / $KDS_SEL 1085 1086#if LED 1087 movb $0xdc, %al 1088 outb $WC_LED 1089#endif 1090 1091 /* 1092 * Before proceeding, enable usage of the page table NX bit if 1093 * that's how the page tables are set up. 1094 */ 1095 movl x86_feature, %ecx 1096 andl $X86_NX, %ecx 1097 jz 1f 1098 movl $MSR_AMD_EFER, %ecx 1099 rdmsr 1100 orl $AMD_EFER_NXE, %eax 1101 wrmsr 11021: 1103 1104 movl WC_CR4(%ebx), %eax / restore full cr4 (with Global Enable) 1105 movl %eax, %cr4 1106 1107 1108 lldt WC_LDT(%ebx) / $LDT_SEL 1109 1110 movzwl WC_TR(%ebx), %eax / clear TSS busy bit 1111 addl WC_GDT+2(%ebx), %eax 1112 andl $-1!0x200, 4(%eax) 1113 ltr WC_TR(%ebx) / $UTSS_SEL 1114 1115 movw WC_SS(%ebx), %ss / restore segment registers 1116 movw WC_ES(%ebx), %es 1117 movw WC_FS(%ebx), %fs 1118 movw WC_GS(%ebx), %gs 1119 1120 /* 1121 * set the stack pointer to point into the identity mapped page 1122 * temporarily, so we can make function calls 1123 */ 1124 .globl rm_platter_va 1125 movl rm_platter_va, %eax 1126 movl $WC_STKSTART, %esp 1127 addl %eax, %esp 1128 movl %esp, %ebp 1129 1130 /* 1131 * if we are not running on the boot CPU restore stack contents by 1132 * calling i_cpr_restore_stack(curthread, save_stack); 1133 */ 1134 call i_cpr_bootcpuid 1135 cmpl %eax, WC_CPU_ID(%ebx) 1136 je 2f 1137 1138 pushl WC_SAVED_STACK(%ebx) 1139 pushl %gs:CPU_THREAD 1140 call i_cpr_restore_stack 1141 addl $0x10, %esp 11422: 1143 1144 movl WC_ESP(%ebx), %esp 1145 movl %esp, %ebp 1146 1147 movl WC_RETADDR(%ebx), %eax / return to caller of wc_save_context 1148 movl %eax, (%esp) 1149 1150 /* 1151 * APIC initialization, skip iff function pointer is NULL 1152 */ 1153 cmpl $0, ap_mlsetup 1154 je 3f 1155 call *ap_mlsetup 11563: 1157 1158 call *cpr_start_cpu_func 1159 1160 pushl WC_EFLAGS(%ebx) / restore flags 1161 popfl 1162 1163 movl WC_EDI(%ebx), %edi / restore general registers 1164 movl WC_ESI(%ebx), %esi 1165 movl WC_EBP(%ebx), %ebp 1166 movl WC_EBX(%ebx), %ebx 1167 1168/exit: jmp exit / stop here for HDT 1169 1170 xorl %eax, %eax / at wakeup return 0 1171 ret 1172 1173 SET_SIZE(wc_rm_start) 1174 1175 1176#endif /* defined(__amd64) */ 1177 1178#endif /* !defined(__GNU_AS__) */ 1179 1180#endif /* lint */ 1181 1182