1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2020 OmniOS Community Edition (OmniOSce) Association. 24 */ 25 26#include <sys/asm_linkage.h> 27#include <sys/asm_misc.h> 28#include <sys/regset.h> 29#include <sys/privregs.h> 30#include <sys/x86_archext.h> 31#include <sys/cpr_wakecode.h> 32 33#if !defined(__lint) 34#include <sys/segments.h> 35#include "assym.h" 36#endif 37 38#ifdef DEBUG 39#define LED 1 40#define SERIAL 1 41#endif /* DEBUG */ 42 43#ifdef DEBUG 44#define COM1 0x3f8 45#define COM2 0x2f8 46#define WC_COM COM2 /* either COM1 or COM2 */ 47#define WC_LED 0x80 /* diagnostic led port ON motherboard */ 48 49/* 50 * defined as offsets from the data register 51 */ 52#define DLL 0 /* divisor latch (lsb) */ 53#define DLH 1 /* divisor latch (msb) */ 54#define LCR 3 /* line control register */ 55#define MCR 4 /* modem control register */ 56 57 58#define DLAB 0x80 /* divisor latch access bit */ 59#define B9600L 0X0c /* lsb bit pattern for 9600 baud */ 60#define B9600H 0X0 /* hsb bit pattern for 9600 baud */ 61#define DTR 0x01 /* Data Terminal Ready */ 62#define RTS 0x02 /* Request To Send */ 63#define STOP1 0x00 /* 1 stop bit */ 64#define BITS8 0x03 /* 8 bits per char */ 65 66#endif /* DEBUG */ 67 68/* 69 * This file contains the low level routines involved in getting 70 * into and out of ACPI S3, including those needed for restarting 71 * the non-boot cpus. 72 * 73 * Our assumptions: 74 * 75 * Our actions: 76 * 77 */ 78 79#if defined(lint) || defined(__lint) 80 81/*ARGSUSED*/ 82int 83wc_save_context(wc_cpu_t *pcpu) 84{ return 0; } 85 86#else /* lint */ 87 88#if defined(__amd64) 89 90 ENTRY_NP(wc_save_context) 91 92 movq (%rsp), %rdx / return address 93 movq %rdx, WC_RETADDR(%rdi) 94 pushq %rbp 95 movq %rsp,%rbp 96 97 movq %rdi, WC_VIRTADDR(%rdi) 98 movq %rdi, WC_RDI(%rdi) 99 100 movq %rdx, WC_RDX(%rdi) 101 102/ stash everything else we need 103 sgdt WC_GDT(%rdi) 104 sidt WC_IDT(%rdi) 105 sldt WC_LDT(%rdi) 106 str WC_TR(%rdi) 107 108 movq %cr0, %rdx 109 movq %rdx, WC_CR0(%rdi) 110 movq %cr3, %rdx 111 movq %rdx, WC_CR3(%rdi) 112 movq %cr4, %rdx 113 movq %rdx, WC_CR4(%rdi) 114 movq %cr8, %rdx 115 movq %rdx, WC_CR8(%rdi) 116 117 movq %r8, WC_R8(%rdi) 118 movq %r9, WC_R9(%rdi) 119 movq %r10, WC_R10(%rdi) 120 movq %r11, WC_R11(%rdi) 121 movq %r12, WC_R12(%rdi) 122 movq %r13, WC_R13(%rdi) 123 movq %r14, WC_R14(%rdi) 124 movq %r15, WC_R15(%rdi) 125 movq %rax, WC_RAX(%rdi) 126 movq %rbp, WC_RBP(%rdi) 127 movq %rbx, WC_RBX(%rdi) 128 movq %rcx, WC_RCX(%rdi) 129 movq %rsi, WC_RSI(%rdi) 130 movq %rsp, WC_RSP(%rdi) 131 132 movw %ss, WC_SS(%rdi) 133 movw %cs, WC_CS(%rdi) 134 movw %ds, WC_DS(%rdi) 135 movw %es, WC_ES(%rdi) 136 137 movq $0, %rcx / save %fs register 138 movw %fs, %cx 139 movq %rcx, WC_FS(%rdi) 140 141 movl $MSR_AMD_FSBASE, %ecx 142 rdmsr 143 movl %eax, WC_FSBASE(%rdi) 144 movl %edx, WC_FSBASE+4(%rdi) 145 146 movq $0, %rcx / save %gs register 147 movw %gs, %cx 148 movq %rcx, WC_GS(%rdi) 149 150 movl $MSR_AMD_GSBASE, %ecx / save gsbase msr 151 rdmsr 152 movl %eax, WC_GSBASE(%rdi) 153 movl %edx, WC_GSBASE+4(%rdi) 154 155 movl $MSR_AMD_KGSBASE, %ecx / save kgsbase msr 156 rdmsr 157 movl %eax, WC_KGSBASE(%rdi) 158 movl %edx, WC_KGSBASE+4(%rdi) 159 160 movq %gs:CPU_ID, %rax / save current cpu id 161 movq %rax, WC_CPU_ID(%rdi) 162 163 pushfq 164 popq WC_EFLAGS(%rdi) 165 166 wbinvd / flush the cache 167 mfence 168 169 movq $1, %rax / at suspend return 1 170 171 leave 172 173 ret 174 175 SET_SIZE(wc_save_context) 176 177#elif defined(__i386) 178 179 ENTRY_NP(wc_save_context) 180 181 movl 4(%esp), %eax / wc_cpu_t * 182 movl %eax, WC_VIRTADDR(%eax) 183 184 movl (%esp), %edx / return address 185 movl %edx, WC_RETADDR(%eax) 186 187 str WC_TR(%eax) / stash everything else we need 188 sgdt WC_GDT(%eax) 189 sldt WC_LDT(%eax) 190 sidt WC_IDT(%eax) 191 192 movl %cr0, %edx 193 movl %edx, WC_CR0(%eax) 194 movl %cr3, %edx 195 movl %edx, WC_CR3(%eax) 196 movl %cr4, %edx 197 movl %edx, WC_CR4(%eax) 198 199 movl %ebx, WC_EBX(%eax) 200 movl %edi, WC_EDI(%eax) 201 movl %esi, WC_ESI(%eax) 202 movl %ebp, WC_EBP(%eax) 203 movl %esp, WC_ESP(%eax) 204 205 movw %ss, WC_SS(%eax) 206 movw %cs, WC_CS(%eax) 207 movw %ds, WC_DS(%eax) 208 movw %es, WC_ES(%eax) 209 movw %fs, WC_FS(%eax) 210 movw %gs, WC_GS(%eax) 211 212 pushfl 213 popl WC_EFLAGS(%eax) 214 215 pushl %gs:CPU_ID / save current cpu id 216 popl WC_CPU_ID(%eax) 217 218 wbinvd / flush the cache 219 mfence 220 221 movl $1, %eax / at suspend return 1 222 ret 223 224 SET_SIZE(wc_save_context) 225 226#endif /* __amd64 */ 227 228#endif /* lint */ 229 230 231/* 232 * Our assumptions: 233 * - We are running in real mode. 234 * - Interrupts are disabled. 235 * 236 * Our actions: 237 * - We start using our GDT by loading correct values in the 238 * selector registers (cs=KCS_SEL, ds=es=ss=KDS_SEL, fs=KFS_SEL, 239 * gs=KGS_SEL). 240 * - We change over to using our IDT. 241 * - We load the default LDT into the hardware LDT register. 242 * - We load the default TSS into the hardware task register. 243 * - We restore registers 244 * - We return to original caller (a la setjmp) 245 */ 246 247#if defined(lint) || defined(__lint) 248 249void 250wc_rm_start(void) 251{} 252 253void 254wc_rm_end(void) 255{} 256 257#else /* lint */ 258 259#if defined(__amd64) 260 261 ENTRY_NP(wc_rm_start) 262 263 /* 264 * For the Sun Studio 10 assembler we needed to do a .code32 and 265 * mentally invert the meaning of the addr16 and data16 prefixes to 266 * get 32-bit access when generating code to be executed in 16-bit 267 * mode (sigh...) 268 * 269 * This code, despite always being built with GNU as, has inherited 270 * the conceptual damage. 271 */ 272 273 .code32 274 275 cli 276 movw %cs, %ax 277 movw %ax, %ds / establish ds ... 278 movw %ax, %ss / ... and ss:esp 279 D16 movl $WC_STKSTART, %esp 280/ using the following value blows up machines! - DO NOT USE 281/ D16 movl 0xffc, %esp 282 283 284#if LED 285 D16 movl $WC_LED, %edx 286 D16 movb $0xd1, %al 287 outb (%dx) 288#endif 289 290#if SERIAL 291 D16 movl $WC_COM, %edx 292 D16 movb $0x61, %al 293 outb (%dx) 294#endif 295 296 D16 call cominit 297 298 /* 299 * Enable protected-mode, write protect, and alignment mask 300 * %cr0 has already been initialsed to zero 301 */ 302 movl %cr0, %eax 303 D16 orl $_CONST(CR0_PE|CR0_WP|CR0_AM), %eax 304 movl %eax, %cr0 305 306 /* 307 * Do a jmp immediately after writing to cr0 when enabling protected 308 * mode to clear the real mode prefetch queue (per Intel's docs) 309 */ 310 jmp pestart 311pestart: 312 313#if LED 314 D16 movl $WC_LED, %edx 315 D16 movb $0xd2, %al 316 outb (%dx) 317#endif 318 319#if SERIAL 320 D16 movl $WC_COM, %edx 321 D16 movb $0x62, %al 322 outb (%dx) 323#endif 324 325 /* 326 * 16-bit protected mode is now active, so prepare to turn on long 327 * mode 328 */ 329 330#if LED 331 D16 movl $WC_LED, %edx 332 D16 movb $0xd3, %al 333 outb (%dx) 334#endif 335 336#if SERIAL 337 D16 movl $WC_COM, %edx 338 D16 movb $0x63, %al 339 outb (%dx) 340#endif 341 342 /* 343 * Add any initial cr4 bits 344 */ 345 movl %cr4, %eax 346 A16 D16 orl CR4OFF, %eax 347 348 /* 349 * Enable PAE mode (CR4.PAE) 350 */ 351 D16 orl $CR4_PAE, %eax 352 movl %eax, %cr4 353 354#if LED 355 D16 movl $WC_LED, %edx 356 D16 movb $0xd4, %al 357 outb (%dx) 358#endif 359 360#if SERIAL 361 D16 movl $WC_COM, %edx 362 D16 movb $0x64, %al 363 outb (%dx) 364#endif 365 366 /* 367 * Point cr3 to the 64-bit long mode page tables. 368 * 369 * Note that these MUST exist in 32-bit space, as we don't have 370 * a way to load %cr3 with a 64-bit base address for the page tables 371 * until the CPU is actually executing in 64-bit long mode. 372 */ 373 A16 D16 movl CR3OFF, %eax 374 movl %eax, %cr3 375 376 /* 377 * Set long mode enable in EFER (EFER.LME = 1) 378 */ 379 D16 movl $MSR_AMD_EFER, %ecx 380 rdmsr 381 382 D16 orl $AMD_EFER_LME, %eax 383 wrmsr 384 385#if LED 386 D16 movl $WC_LED, %edx 387 D16 movb $0xd5, %al 388 outb (%dx) 389#endif 390 391#if SERIAL 392 D16 movl $WC_COM, %edx 393 D16 movb $0x65, %al 394 outb (%dx) 395#endif 396 397 /* 398 * Finally, turn on paging (CR0.PG = 1) to activate long mode. 399 */ 400 movl %cr0, %eax 401 D16 orl $CR0_PG, %eax 402 movl %eax, %cr0 403 404 /* 405 * The instruction after enabling paging in CR0 MUST be a branch. 406 */ 407 jmp long_mode_active 408 409long_mode_active: 410 411#if LED 412 D16 movl $WC_LED, %edx 413 D16 movb $0xd6, %al 414 outb (%dx) 415#endif 416 417#if SERIAL 418 D16 movl $WC_COM, %edx 419 D16 movb $0x66, %al 420 outb (%dx) 421#endif 422 423 /* 424 * Long mode is now active but since we're still running with the 425 * original 16-bit CS we're actually in 16-bit compatability mode. 426 * 427 * We have to load an intermediate GDT and IDT here that we know are 428 * in 32-bit space before we can use the kernel's GDT and IDT, which 429 * may be in the 64-bit address space, and since we're in compatability 430 * mode, we only have access to 16 and 32-bit instructions at the 431 * moment. 432 */ 433 A16 D16 lgdt TEMPGDTOFF /* load temporary GDT */ 434 A16 D16 lidt TEMPIDTOFF /* load temporary IDT */ 435 436 437 /* 438 * Do a far transfer to 64-bit mode. Set the CS selector to a 64-bit 439 * long mode selector (CS.L=1) in the temporary 32-bit GDT and jump 440 * to the real mode platter address of wc_long_mode_64 as until the 441 * 64-bit CS is in place we don't have access to 64-bit instructions 442 * and thus can't reference a 64-bit %rip. 443 */ 444 445#if LED 446 D16 movl $WC_LED, %edx 447 D16 movb $0xd7, %al 448 outb (%dx) 449#endif 450 451#if SERIAL 452 D16 movl $WC_COM, %edx 453 D16 movb $0x67, %al 454 outb (%dx) 455#endif 456 457 D16 pushl $TEMP_CS64_SEL 458 A16 D16 pushl LM64OFF 459 460 D16 lret 461 462 463/* 464 * Support routine to re-initialize VGA subsystem 465 */ 466vgainit: 467 D16 ret 468 469/* 470 * Support routine to re-initialize keyboard (which is USB - help!) 471 */ 472kbdinit: 473 D16 ret 474 475/* 476 * Support routine to re-initialize COM ports to something sane 477 */ 478cominit: 479 / init COM1 & COM2 480 481#if DEBUG 482/* 483 * on debug kernels we need to initialize COM1 & COM2 here, so that 484 * we can get debug output before the asy driver has resumed 485 */ 486 487/ select COM1 488 D16 movl $_CONST(COM1+LCR), %edx 489 D16 movb $DLAB, %al / divisor latch 490 outb (%dx) 491 492 D16 movl $_CONST(COM1+DLL), %edx / divisor latch lsb 493 D16 movb $B9600L, %al / divisor latch 494 outb (%dx) 495 496 D16 movl $_CONST(COM1+DLH), %edx / divisor latch hsb 497 D16 movb $B9600H, %al / divisor latch 498 outb (%dx) 499 500 D16 movl $_CONST(COM1+LCR), %edx / select COM1 501 D16 movb $_CONST(STOP1|BITS8), %al / 1 stop bit, 8bit word len 502 outb (%dx) 503 504 D16 movl $_CONST(COM1+MCR), %edx / select COM1 505 D16 movb $_CONST(RTS|DTR), %al / data term ready & req to send 506 outb (%dx) 507 508/ select COM2 509 D16 movl $_CONST(COM2+LCR), %edx 510 D16 movb $DLAB, %al / divisor latch 511 outb (%dx) 512 513 D16 movl $_CONST(COM2+DLL), %edx / divisor latch lsb 514 D16 movb $B9600L, %al / divisor latch 515 outb (%dx) 516 517 D16 movl $_CONST(COM2+DLH), %edx / divisor latch hsb 518 D16 movb $B9600H, %al / divisor latch 519 outb (%dx) 520 521 D16 movl $_CONST(COM2+LCR), %edx / select COM1 522 D16 movb $_CONST(STOP1|BITS8), %al / 1 stop bit, 8bit word len 523 outb (%dx) 524 525 D16 movl $_CONST(COM2+MCR), %edx / select COM1 526 D16 movb $_CONST(RTS|DTR), %al / data term ready & req to send 527 outb (%dx) 528#endif /* DEBUG */ 529 530 D16 ret 531 532 .code64 533 534 .globl wc_long_mode_64 535wc_long_mode_64: 536 537#if LED 538 movw $WC_LED, %dx 539 movb $0xd8, %al 540 outb (%dx) 541#endif 542 543#if SERIAL 544 movw $WC_COM, %dx 545 movb $0x68, %al 546 outb (%dx) 547#endif 548 549 /* 550 * We are now running in long mode with a 64-bit CS (EFER.LMA=1, 551 * CS.L=1) so we now have access to 64-bit instructions. 552 * 553 * First, set the 64-bit GDT base. 554 */ 555 .globl rm_platter_pa 556 movl rm_platter_pa, %eax 557 558 lgdtq GDTROFF(%rax) /* load 64-bit GDT */ 559 560 /* 561 * Save the CPU number in %r11; get the value here since it's saved in 562 * the real mode platter. 563 */ 564/ JAN 565/ the following is wrong! need to figure out MP systems 566/ movl CPUNOFF(%rax), %r11d 567 568 /* 569 * Add rm_platter_pa to %rsp to point it to the same location as seen 570 * from 64-bit mode. 571 */ 572 addq %rax, %rsp 573 574 /* 575 * Now do an lretq to load CS with the appropriate selector for the 576 * kernel's 64-bit GDT and to start executing 64-bit setup code at the 577 * virtual address where boot originally loaded this code rather than 578 * the copy in the real mode platter's rm_code array as we've been 579 * doing so far. 580 */ 581 582#if LED 583 movw $WC_LED, %dx 584 movb $0xd9, %al 585 outb (%dx) 586#endif 587 588/ JAN this should produce 'i' but we get 'g' instead ??? 589#if SERIAL 590 movw $WC_COM, %dx 591 movb $0x69, %al 592 outb (%dx) 593#endif 594 595 pushq $KCS_SEL 596 pushq $kernel_wc_code 597 lretq 598 599 .globl kernel_wc_code 600kernel_wc_code: 601 602#if LED 603 movw $WC_LED, %dx 604 movb $0xda, %al 605 outb (%dx) 606#endif 607 608/ JAN this should produce 'j' but we get 'g' instead ??? 609#if SERIAL 610 movw $WC_COM, %dx 611 movb $0x6a, %al 612 outb (%dx) 613#endif 614 615 /* 616 * Complete the balance of the setup we need to before executing 617 * 64-bit kernel code (namely init rsp, TSS, LGDT, FS and GS). 618 */ 619 .globl rm_platter_va 620 movq rm_platter_va, %rbx 621 addq $WC_CPU, %rbx 622 623#if LED 624 movw $WC_LED, %dx 625 movb $0xdb, %al 626 outb (%dx) 627#endif 628 629#if SERIAL 630 movw $WC_COM, %dx 631 movw $0x6b, %ax 632 outb (%dx) 633#endif 634 635 /* 636 * restore the rest of the registers 637 */ 638 639 lidtq WC_IDT(%rbx) 640 641#if LED 642 movw $WC_LED, %dx 643 movb $0xdc, %al 644 outb (%dx) 645#endif 646 647#if SERIAL 648 movw $WC_COM, %dx 649 movw $0x6c, %ax 650 outb (%dx) 651#endif 652 653 /* 654 * restore the rest of the registers 655 */ 656 657 movw $KDS_SEL, %ax 658 movw %ax, %ds 659 movw %ax, %es 660 movw %ax, %ss 661 662 /* 663 * Before proceeding, enable usage of the page table NX bit if 664 * that's how the page tables are set up. 665 */ 666 btl $X86FSET_NX, x86_featureset(%rip) 667 jnc 1f 668 movl $MSR_AMD_EFER, %ecx 669 rdmsr 670 orl $AMD_EFER_NXE, %eax 671 wrmsr 6721: 673 674 movq WC_CR4(%rbx), %rax / restore full cr4 (with Global Enable) 675 movq %rax, %cr4 676 677 lldt WC_LDT(%rbx) 678 movzwq WC_TR(%rbx), %rax / clear TSS busy bit 679 addq WC_GDT+2(%rbx), %rax 680 andl $0xfffffdff, 4(%rax) 681 movq 4(%rax), %rcx 682 ltr WC_TR(%rbx) 683 684#if LED 685 movw $WC_LED, %dx 686 movb $0xdd, %al 687 outb (%dx) 688#endif 689 690#if SERIAL 691 movw $WC_COM, %dx 692 movw $0x6d, %ax 693 outb (%dx) 694#endif 695 696/ restore %fsbase %gsbase %kgbase registers using wrmsr instruction 697 698 movq WC_FS(%rbx), %rcx / restore fs register 699 movw %cx, %fs 700 701 movl $MSR_AMD_FSBASE, %ecx 702 movl WC_FSBASE(%rbx), %eax 703 movl WC_FSBASE+4(%rbx), %edx 704 wrmsr 705 706 movq WC_GS(%rbx), %rcx / restore gs register 707 movw %cx, %gs 708 709 movl $MSR_AMD_GSBASE, %ecx / restore gsbase msr 710 movl WC_GSBASE(%rbx), %eax 711 movl WC_GSBASE+4(%rbx), %edx 712 wrmsr 713 714 movl $MSR_AMD_KGSBASE, %ecx / restore kgsbase msr 715 movl WC_KGSBASE(%rbx), %eax 716 movl WC_KGSBASE+4(%rbx), %edx 717 wrmsr 718 719 movq WC_CR0(%rbx), %rdx 720 movq %rdx, %cr0 721 movq WC_CR3(%rbx), %rdx 722 movq %rdx, %cr3 723 movq WC_CR8(%rbx), %rdx 724 movq %rdx, %cr8 725 726#if LED 727 movw $WC_LED, %dx 728 movb $0xde, %al 729 outb (%dx) 730#endif 731 732#if SERIAL 733 movw $WC_COM, %dx 734 movb $0x6e, %al 735 outb (%dx) 736#endif 737 738 /* 739 * if we are not running on the boot CPU restore stack contents by 740 * calling i_cpr_restore_stack(curthread, save_stack); 741 */ 742 movq %rsp, %rbp 743 call i_cpr_bootcpuid 744 cmpl %eax, WC_CPU_ID(%rbx) 745 je 2f 746 747 movq %gs:CPU_THREAD, %rdi 748 movq WC_SAVED_STACK(%rbx), %rsi 749 call i_cpr_restore_stack 7502: 751 752 movq WC_RSP(%rbx), %rsp / restore stack pointer 753 754 /* 755 * APIC initialization 756 */ 757 movq %rsp, %rbp 758 759 /* 760 * skip iff function pointer is NULL 761 */ 762 cmpq $0, ap_mlsetup 763 je 3f 764 call *ap_mlsetup 7653: 766 767 call *cpr_start_cpu_func 768 769/ restore %rbx to the value it ahd before we called the functions above 770 movq rm_platter_va, %rbx 771 addq $WC_CPU, %rbx 772 773 movq WC_R8(%rbx), %r8 774 movq WC_R9(%rbx), %r9 775 movq WC_R10(%rbx), %r10 776 movq WC_R11(%rbx), %r11 777 movq WC_R12(%rbx), %r12 778 movq WC_R13(%rbx), %r13 779 movq WC_R14(%rbx), %r14 780 movq WC_R15(%rbx), %r15 781/ movq WC_RAX(%rbx), %rax 782 movq WC_RBP(%rbx), %rbp 783 movq WC_RCX(%rbx), %rcx 784/ movq WC_RDX(%rbx), %rdx 785 movq WC_RDI(%rbx), %rdi 786 movq WC_RSI(%rbx), %rsi 787 788 789/ assume that %cs does not need to be restored 790/ %ds, %es & %ss are ignored in 64bit mode 791 movw WC_SS(%rbx), %ss 792 movw WC_DS(%rbx), %ds 793 movw WC_ES(%rbx), %es 794 795#if LED 796 movw $WC_LED, %dx 797 movb $0xdf, %al 798 outb (%dx) 799#endif 800 801#if SERIAL 802 movw $WC_COM, %dx 803 movb $0x6f, %al 804 outb (%dx) 805#endif 806 807 808 movq WC_RBP(%rbx), %rbp 809 movq WC_RSP(%rbx), %rsp 810 811#if LED 812 movw $WC_LED, %dx 813 movb $0xe0, %al 814 outb (%dx) 815#endif 816 817#if SERIAL 818 movw $WC_COM, %dx 819 movb $0x70, %al 820 outb (%dx) 821#endif 822 823 824 movq WC_RCX(%rbx), %rcx 825 826 pushq WC_EFLAGS(%rbx) / restore flags 827 popfq 828 829#if LED 830 movw $WC_LED, %dx 831 movb $0xe1, %al 832 outb (%dx) 833#endif 834 835#if SERIAL 836 movw $WC_COM, %dx 837 movb $0x71, %al 838 outb (%dx) 839#endif 840 841/* 842 * can not use outb after this point, because doing so would mean using 843 * %dx which would modify %rdx which is restored here 844 */ 845 846 movq %rbx, %rax 847 movq WC_RDX(%rax), %rdx 848 movq WC_RBX(%rax), %rbx 849 850 leave 851 852 movq WC_RETADDR(%rax), %rax 853 movq %rax, (%rsp) / return to caller of wc_save_context 854 855 xorl %eax, %eax / at wakeup return 0 856 ret 857 858 859 SET_SIZE(wc_rm_start) 860 861 ENTRY_NP(asmspin) 862 863 movl %edi, %ecx 864A1: 865 loop A1 866 867 SET_SIZE(asmspin) 868 869 .globl wc_rm_end 870wc_rm_end: 871 nop 872 873#elif defined(__i386) 874 875 ENTRY_NP(wc_rm_start) 876 877/entry: jmp entry / stop here for HDT 878 879 cli 880 movw %cs, %ax 881 movw %ax, %ds / establish ds ... 882 movw %ax, %ss / ... and ss:esp 883 D16 movl $WC_STKSTART, %esp 884 885#if LED 886 D16 movl $WC_LED, %edx 887 D16 movb $0xd1, %al 888 outb (%dx) 889#endif 890 891#if SERIAL 892 D16 movl $WC_COM, %edx 893 D16 movb $0x61, %al 894 outb (%dx) 895#endif 896 897 898 D16 call vgainit 899 D16 call kbdinit 900 D16 call cominit 901 902#if LED 903 D16 movl $WC_LED, %edx 904 D16 movb $0xd2, %al 905 outb (%dx) 906#endif 907 908#if SERIAL 909 D16 movl $WC_COM, %edx 910 D16 movb $0x62, %al 911 outb (%dx) 912#endif 913 914 D16 A16 movl $WC_CPU, %ebx / base add of wc_cpu_t 915 916#if LED 917 D16 movb $0xd3, %al 918 outb $WC_LED 919#endif 920 921#if SERIAL 922 D16 movl $WC_COM, %edx 923 D16 movb $0x63, %al 924 outb (%dx) 925#endif 926 927 D16 A16 movl %cs:WC_DS(%ebx), %edx / %ds post prot/paging transit 928 929#if LED 930 D16 movb $0xd4, %al 931 outb $WC_LED 932#endif 933 934 D16 A16 lgdt %cs:WC_GDT(%ebx) / restore gdt and idtr 935 D16 A16 lidt %cs:WC_IDT(%ebx) 936 937#if LED 938 D16 movb $0xd5, %al 939 outb $WC_LED 940#endif 941 942 D16 A16 movl %cs:WC_CR4(%ebx), %eax / restore cr4 943 D16 andl $_BITNOT(CR4_PGE), %eax / don't set Global Enable yet 944 movl %eax, %cr4 945 946#if LED 947 D16 movb $0xd6, %al 948 outb $WC_LED 949#endif 950 951 D16 A16 movl %cs:WC_CR3(%ebx), %eax / set PDPT 952 movl %eax, %cr3 953 954#if LED 955 D16 movb $0xd7, %al 956 outb $WC_LED 957#endif 958 959 D16 A16 movl %cs:WC_CR0(%ebx), %eax / enable prot/paging, etc. 960 movl %eax, %cr0 961 962#if LED 963 D16 movb $0xd8, %al 964 outb $WC_LED 965#endif 966 967 D16 A16 movl %cs:WC_VIRTADDR(%ebx), %ebx / virtaddr of wc_cpu_t 968 969#if LED 970 D16 movb $0xd9, %al 971 outb $WC_LED 972#endif 973 974#if LED 975 D16 movb $0xda, %al 976 outb $WC_LED 977#endif 978 979 jmp flush / flush prefetch queue 980flush: 981 D16 pushl $KCS_SEL 982 D16 pushl $kernel_wc_code 983 D16 lret / re-appear at kernel_wc_code 984 985 986/* 987 * Support routine to re-initialize VGA subsystem 988 */ 989vgainit: 990 D16 ret 991 992/* 993 * Support routine to re-initialize keyboard (which is USB - help!) 994 */ 995kbdinit: 996 D16 ret 997 998/* 999 * Support routine to re-initialize COM ports to something sane for debug output 1000 */ 1001cominit: 1002#if DEBUG 1003/* 1004 * on debug kernels we need to initialize COM1 & COM2 here, so that 1005 * we can get debug output before the asy driver has resumed 1006 */ 1007 1008/ select COM1 1009 D16 movl $_CONST(COM1+LCR), %edx 1010 D16 movb $DLAB, %al / divisor latch 1011 outb (%dx) 1012 1013 D16 movl $_CONST(COM1+DLL), %edx / divisor latch lsb 1014 D16 movb $B9600L, %al / divisor latch 1015 outb (%dx) 1016 1017 D16 movl $_CONST(COM1+DLH), %edx / divisor latch hsb 1018 D16 movb $B9600H, %al / divisor latch 1019 outb (%dx) 1020 1021 D16 movl $_CONST(COM1+LCR), %edx / select COM1 1022 D16 movb $_CONST(STOP1|BITS8), %al / 1 stop bit, 8bit word len 1023 outb (%dx) 1024 1025 D16 movl $_CONST(COM1+MCR), %edx / select COM1 1026 D16 movb $_CONST(RTS|DTR), %al / 1 stop bit, 8bit word len 1027 outb (%dx) 1028 1029/ select COM2 1030 D16 movl $_CONST(COM2+LCR), %edx 1031 D16 movb $DLAB, %al / divisor latch 1032 outb (%dx) 1033 1034 D16 movl $_CONST(COM2+DLL), %edx / divisor latch lsb 1035 D16 movb $B9600L, %al / divisor latch 1036 outb (%dx) 1037 1038 D16 movl $_CONST(COM2+DLH), %edx / divisor latch hsb 1039 D16 movb $B9600H, %al / divisor latch 1040 outb (%dx) 1041 1042 D16 movl $_CONST(COM2+LCR), %edx / select COM1 1043 D16 movb $_CONST(STOP1|BITS8), %al / 1 stop bit, 8bit word len 1044 outb (%dx) 1045 1046 D16 movl $_CONST(COM2+MCR), %edx / select COM1 1047 D16 movb $_CONST(RTS|DTR), %al / 1 stop bit, 8bit word len 1048 outb (%dx) 1049#endif /* DEBUG */ 1050 1051 D16 ret 1052 1053 .globl wc_rm_end 1054wc_rm_end: 1055 nop 1056 1057 .globl kernel_wc_code 1058kernel_wc_code: 1059 / At this point we are with kernel's cs and proper eip. 1060 / We will be executing not from the copy in real mode platter, 1061 / but from the original code where boot loaded us. 1062 / By this time GDT and IDT are loaded as is cr0, cr3 and cr4. 1063 / %ebx is wc_cpu 1064 / %dx is our ds 1065 1066#if LED 1067 D16 movb $0xdb, %al 1068 outb $WC_LED 1069#endif 1070 1071/ got here OK 1072 1073 movw %dx, %ds / $KDS_SEL 1074 1075#if LED 1076 movb $0xdc, %al 1077 outb $WC_LED 1078#endif 1079 1080 /* 1081 * Before proceeding, enable usage of the page table NX bit if 1082 * that's how the page tables are set up. 1083 */ 1084 bt $X86FSET_NX, x86_featureset 1085 jnc 1f 1086 movl $MSR_AMD_EFER, %ecx 1087 rdmsr 1088 orl $AMD_EFER_NXE, %eax 1089 wrmsr 10901: 1091 1092 movl WC_CR4(%ebx), %eax / restore full cr4 (with Global Enable) 1093 movl %eax, %cr4 1094 1095 1096 lldt WC_LDT(%ebx) / $LDT_SEL 1097 1098 movzwl WC_TR(%ebx), %eax / clear TSS busy bit 1099 addl WC_GDT+2(%ebx), %eax 1100 andl $_BITNOT(0x200), 4(%eax) 1101 ltr WC_TR(%ebx) / $UTSS_SEL 1102 1103 movw WC_SS(%ebx), %ss / restore segment registers 1104 movw WC_ES(%ebx), %es 1105 movw WC_FS(%ebx), %fs 1106 movw WC_GS(%ebx), %gs 1107 1108 /* 1109 * set the stack pointer to point into the identity mapped page 1110 * temporarily, so we can make function calls 1111 */ 1112 .globl rm_platter_va 1113 movl rm_platter_va, %eax 1114 movl $WC_STKSTART, %esp 1115 addl %eax, %esp 1116 movl %esp, %ebp 1117 1118 /* 1119 * if we are not running on the boot CPU restore stack contents by 1120 * calling i_cpr_restore_stack(curthread, save_stack); 1121 */ 1122 call i_cpr_bootcpuid 1123 cmpl %eax, WC_CPU_ID(%ebx) 1124 je 2f 1125 1126 pushl WC_SAVED_STACK(%ebx) 1127 pushl %gs:CPU_THREAD 1128 call i_cpr_restore_stack 1129 addl $0x10, %esp 11302: 1131 1132 movl WC_ESP(%ebx), %esp 1133 movl %esp, %ebp 1134 1135 movl WC_RETADDR(%ebx), %eax / return to caller of wc_save_context 1136 movl %eax, (%esp) 1137 1138 /* 1139 * APIC initialization, skip iff function pointer is NULL 1140 */ 1141 cmpl $0, ap_mlsetup 1142 je 3f 1143 call *ap_mlsetup 11443: 1145 1146 call *cpr_start_cpu_func 1147 1148 pushl WC_EFLAGS(%ebx) / restore flags 1149 popfl 1150 1151 movl WC_EDI(%ebx), %edi / restore general registers 1152 movl WC_ESI(%ebx), %esi 1153 movl WC_EBP(%ebx), %ebp 1154 movl WC_EBX(%ebx), %ebx 1155 1156/exit: jmp exit / stop here for HDT 1157 1158 xorl %eax, %eax / at wakeup return 0 1159 ret 1160 1161 SET_SIZE(wc_rm_start) 1162 1163 1164#endif /* defined(__amd64) */ 1165 1166#endif /* lint */ 1167 1168