xref: /titanic_51/usr/src/uts/i86pc/ml/cpr_wakecode.s (revision dad255286ee5ada77255c1f9f132ceee0bc314aa)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25#include <sys/asm_linkage.h>
26#include <sys/asm_misc.h>
27#include <sys/regset.h>
28#include <sys/privregs.h>
29#include <sys/x86_archext.h>
30#include <sys/cpr_wakecode.h>
31
32#if !defined(__lint)
33#include <sys/segments.h>
34#include "assym.h"
35#endif
36
37#ifdef  DEBUG
38#define LED     1
39#define SERIAL  1
40#endif	/*	DEBUG	*/
41
42#ifdef	DEBUG
43#define	COM1	0x3f8
44#define	COM2	0x2f8
45#define	WC_COM	COM2	/* either COM1 or COM2			*/
46#define	WC_LED	0x80    /* diagnostic led port ON motherboard	*/
47
48/*
49 * defined as offsets from the data register
50 */
51#define	DLL	0	/* divisor latch (lsb) */
52#define	DLH	1	/* divisor latch (msb) */
53#define	LCR	3	/* line control register		*/
54#define	MCR	4	/* modem control register		*/
55
56
57#define	DLAB	0x80    /* divisor latch access bit		*/
58#define	B9600L	0X0c	/* lsb bit pattern for 9600 baud	*/
59#define	B9600H	0X0	/* hsb bit pattern for 9600 baud	*/
60#define	DTR	0x01    /* Data Terminal Ready			*/
61#define	RTS	0x02    /* Request To Send			*/
62#define	STOP1	0x00	/* 1 stop bit				*/
63#define	BITS8	0x03    /* 8 bits per char			*/
64
65#endif	/*	DEBUG	*/
66
67/*
68 *	This file contains the low level routines involved in getting
69 *	into and out of ACPI S3, including those needed for restarting
70 *	the non-boot cpus.
71 *
72 *	Our assumptions:
73 *
74 *	Our actions:
75 *
76 */
77
78#if defined(lint) || defined(__lint)
79
80/*ARGSUSED*/
81int
82wc_save_context(wc_cpu_t *pcpu)
83{ return 0; }
84
85#else	/* lint */
86
87#if defined(__amd64)
88
89	ENTRY_NP(wc_save_context)
90
91	movq	(%rsp), %rdx		/ return address
92	movq	%rdx, WC_RETADDR(%rdi)
93	pushq	%rbp
94	movq	%rsp,%rbp
95
96	movq    %rdi, WC_VIRTADDR(%rdi)
97	movq    %rdi, WC_RDI(%rdi)
98
99	movq    %rdx, WC_RDX(%rdi)
100
101/ stash everything else we need
102	sgdt	WC_GDT(%rdi)
103	sidt	WC_IDT(%rdi)
104	sldt	WC_LDT(%rdi)
105	str	WC_TR(%rdi)
106
107	movq	%cr0, %rdx
108	movq	%rdx, WC_CR0(%rdi)
109	movq	%cr3, %rdx
110	movq	%rdx, WC_CR3(%rdi)
111	movq	%cr4, %rdx
112	movq	%rdx, WC_CR4(%rdi)
113	movq	%cr8, %rdx
114	movq	%rdx, WC_CR8(%rdi)
115
116	movq    %r8, WC_R8(%rdi)
117	movq    %r9, WC_R9(%rdi)
118	movq    %r10, WC_R10(%rdi)
119	movq    %r11, WC_R11(%rdi)
120	movq    %r12, WC_R12(%rdi)
121	movq    %r13, WC_R13(%rdi)
122	movq    %r14, WC_R14(%rdi)
123	movq    %r15, WC_R15(%rdi)
124	movq    %rax, WC_RAX(%rdi)
125	movq    %rbp, WC_RBP(%rdi)
126	movq    %rbx, WC_RBX(%rdi)
127	movq    %rcx, WC_RCX(%rdi)
128	movq    %rsi, WC_RSI(%rdi)
129	movq    %rsp, WC_RSP(%rdi)
130
131	movw	%ss, WC_SS(%rdi)
132	movw	%cs, WC_CS(%rdi)
133	movw	%ds, WC_DS(%rdi)
134	movw	%es, WC_ES(%rdi)
135
136	movq	$0, %rcx		/ save %fs register
137	movw    %fs, %cx
138	movq    %rcx, WC_FS(%rdi)
139
140	movl    $MSR_AMD_FSBASE, %ecx
141	rdmsr
142	movl    %eax, WC_FSBASE(%rdi)
143	movl    %edx, WC_FSBASE+4(%rdi)
144
145	movq	$0, %rcx		/ save %gs register
146	movw    %gs, %cx
147	movq    %rcx, WC_GS(%rdi)
148
149	movl    $MSR_AMD_GSBASE, %ecx	/ save gsbase msr
150	rdmsr
151	movl    %eax, WC_GSBASE(%rdi)
152	movl    %edx, WC_GSBASE+4(%rdi)
153
154	movl    $MSR_AMD_KGSBASE, %ecx	/ save kgsbase msr
155	rdmsr
156	movl    %eax, WC_KGSBASE(%rdi)
157	movl    %edx, WC_KGSBASE+4(%rdi)
158
159	movq	%gs:CPU_ID, %rax	/ save current cpu id
160	movq	%rax, WC_CPU_ID(%rdi)
161
162	pushfq
163	popq	WC_EFLAGS(%rdi)
164
165	wbinvd				/ flush the cache
166	mfence
167
168	movq	$1, %rax		/ at suspend return 1
169
170	leave
171
172	ret
173
174	SET_SIZE(wc_save_context)
175
176#elif defined(__i386)
177
178	ENTRY_NP(wc_save_context)
179
180	movl	4(%esp), %eax		/ wc_cpu_t *
181	movl	%eax, WC_VIRTADDR(%eax)
182
183	movl	(%esp), %edx		/ return address
184	movl	%edx, WC_RETADDR(%eax)
185
186	str	WC_TR(%eax)		/ stash everything else we need
187	sgdt	WC_GDT(%eax)
188	sldt	WC_LDT(%eax)
189	sidt	WC_IDT(%eax)
190
191	movl	%cr0, %edx
192	movl	%edx, WC_CR0(%eax)
193	movl	%cr3, %edx
194	movl	%edx, WC_CR3(%eax)
195	movl	%cr4, %edx
196	movl	%edx, WC_CR4(%eax)
197
198	movl	%ebx, WC_EBX(%eax)
199	movl	%edi, WC_EDI(%eax)
200	movl	%esi, WC_ESI(%eax)
201	movl	%ebp, WC_EBP(%eax)
202	movl	%esp, WC_ESP(%eax)
203
204	movw	%ss, WC_SS(%eax)
205	movw	%cs, WC_CS(%eax)
206	movw	%ds, WC_DS(%eax)
207	movw	%es, WC_ES(%eax)
208	movw	%fs, WC_FS(%eax)
209	movw	%gs, WC_GS(%eax)
210
211	pushfl
212	popl	WC_EFLAGS(%eax)
213
214	pushl	%gs:CPU_ID		/ save current cpu id
215	popl	WC_CPU_ID(%eax)
216
217	wbinvd				/ flush the cache
218	mfence
219
220	movl	$1, %eax		/ at suspend return 1
221	ret
222
223	SET_SIZE(wc_save_context)
224
225#endif	/* __amd64 */
226
227#endif /* lint */
228
229
230/*
231 *	Our assumptions:
232 *		- We are running in real mode.
233 *		- Interrupts are disabled.
234 *
235 *	Our actions:
236 *		- We start using our GDT by loading correct values in the
237 *		  selector registers (cs=KCS_SEL, ds=es=ss=KDS_SEL, fs=KFS_SEL,
238 *		  gs=KGS_SEL).
239 *		- We change over to using our IDT.
240 *		- We load the default LDT into the hardware LDT register.
241 *		- We load the default TSS into the hardware task register.
242 *		- We restore registers
243 *		- We return to original caller (a la setjmp)
244 */
245
246#if defined(lint) || defined(__lint)
247
248void
249wc_rm_start(void)
250{}
251
252void
253wc_rm_end(void)
254{}
255
256#else	/* lint */
257
258#if defined(__amd64)
259
260	ENTRY_NP(wc_rm_start)
261
262	/*
263	 * For the Sun Studio 10 assembler we needed to do a .code32 and
264	 * mentally invert the meaning of the addr16 and data16 prefixes to
265	 * get 32-bit access when generating code to be executed in 16-bit
266	 * mode (sigh...)
267	 *
268	 * This code, despite always being built with GNU as, has inherited
269	 * the conceptual damage.
270	 */
271
272	.code32
273
274	cli
275	movw		%cs, %ax
276	movw		%ax, %ds		/ establish ds ...
277	movw		%ax, %ss		/ ... and ss:esp
278	D16 movl	$WC_STKSTART, %esp
279/ using the following value blows up machines! - DO NOT USE
280/	D16 movl	0xffc, %esp
281
282
283#if     LED
284	D16 movl        $WC_LED, %edx
285	D16 movb        $0xd1, %al
286	outb    (%dx)
287#endif
288
289#if     SERIAL
290	D16 movl        $WC_COM, %edx
291	D16 movb        $0x61, %al
292	outb    (%dx)
293#endif
294
295	D16 call	cominit
296
297	/*
298	 * Enable protected-mode, write protect, and alignment mask
299	 * %cr0 has already been initialsed to zero
300	 */
301	movl		%cr0, %eax
302	D16 orl		$_CONST(CR0_PE|CR0_WP|CR0_AM), %eax
303	movl		%eax, %cr0
304
305	/*
306	 * Do a jmp immediately after writing to cr0 when enabling protected
307	 * mode to clear the real mode prefetch queue (per Intel's docs)
308	 */
309	jmp		pestart
310pestart:
311
312#if     LED
313	D16 movl        $WC_LED, %edx
314	D16 movb        $0xd2, %al
315	outb    (%dx)
316#endif
317
318#if     SERIAL
319	D16 movl        $WC_COM, %edx
320	D16 movb        $0x62, %al
321	outb    (%dx)
322#endif
323
324	/*
325	 * 16-bit protected mode is now active, so prepare to turn on long
326	 * mode
327	 */
328
329#if     LED
330	D16 movl        $WC_LED, %edx
331	D16 movb        $0xd3, %al
332	outb    (%dx)
333#endif
334
335#if     SERIAL
336	D16 movl        $WC_COM, %edx
337	D16 movb        $0x63, %al
338	outb    (%dx)
339#endif
340
341	/*
342 	 * Add any initial cr4 bits
343	 */
344	movl		%cr4, %eax
345	A16 D16 orl	CR4OFF, %eax
346
347	/*
348	 * Enable PAE mode (CR4.PAE)
349	 */
350	D16 orl		$CR4_PAE, %eax
351	movl		%eax, %cr4
352
353#if     LED
354	D16 movl        $WC_LED, %edx
355	D16 movb        $0xd4, %al
356	outb    (%dx)
357#endif
358
359#if     SERIAL
360	D16 movl        $WC_COM, %edx
361	D16 movb        $0x64, %al
362	outb    (%dx)
363#endif
364
365	/*
366	 * Point cr3 to the 64-bit long mode page tables.
367	 *
368	 * Note that these MUST exist in 32-bit space, as we don't have
369	 * a way to load %cr3 with a 64-bit base address for the page tables
370	 * until the CPU is actually executing in 64-bit long mode.
371	 */
372	A16 D16 movl	CR3OFF, %eax
373	movl		%eax, %cr3
374
375	/*
376	 * Set long mode enable in EFER (EFER.LME = 1)
377	 */
378	D16 movl	$MSR_AMD_EFER, %ecx
379	rdmsr
380
381	D16 orl		$AMD_EFER_LME, %eax
382	wrmsr
383
384#if     LED
385	D16 movl        $WC_LED, %edx
386	D16 movb        $0xd5, %al
387	outb    (%dx)
388#endif
389
390#if     SERIAL
391	D16 movl        $WC_COM, %edx
392	D16 movb        $0x65, %al
393	outb    (%dx)
394#endif
395
396	/*
397	 * Finally, turn on paging (CR0.PG = 1) to activate long mode.
398	 */
399	movl		%cr0, %eax
400	D16 orl		$CR0_PG, %eax
401	movl		%eax, %cr0
402
403	/*
404	 * The instruction after enabling paging in CR0 MUST be a branch.
405	 */
406	jmp		long_mode_active
407
408long_mode_active:
409
410#if     LED
411	D16 movl        $WC_LED, %edx
412	D16 movb        $0xd6, %al
413	outb    (%dx)
414#endif
415
416#if     SERIAL
417	D16 movl        $WC_COM, %edx
418	D16 movb        $0x66, %al
419	outb    (%dx)
420#endif
421
422	/*
423	 * Long mode is now active but since we're still running with the
424	 * original 16-bit CS we're actually in 16-bit compatability mode.
425	 *
426	 * We have to load an intermediate GDT and IDT here that we know are
427	 * in 32-bit space before we can use the kernel's GDT and IDT, which
428	 * may be in the 64-bit address space, and since we're in compatability
429	 * mode, we only have access to 16 and 32-bit instructions at the
430	 * moment.
431	 */
432	A16 D16 lgdt	TEMPGDTOFF	/* load temporary GDT */
433	A16 D16 lidt	TEMPIDTOFF	/* load temporary IDT */
434
435
436	/*
437 	 * Do a far transfer to 64-bit mode.  Set the CS selector to a 64-bit
438	 * long mode selector (CS.L=1) in the temporary 32-bit GDT and jump
439	 * to the real mode platter address of wc_long_mode_64 as until the
440	 * 64-bit CS is in place we don't have access to 64-bit instructions
441	 * and thus can't reference a 64-bit %rip.
442	 */
443
444#if     LED
445	D16 movl        $WC_LED, %edx
446	D16 movb        $0xd7, %al
447	outb    (%dx)
448#endif
449
450#if     SERIAL
451	D16 movl        $WC_COM, %edx
452	D16 movb        $0x67, %al
453	outb    (%dx)
454#endif
455
456	D16 	pushl 	$TEMP_CS64_SEL
457	A16 D16 pushl	LM64OFF
458
459	D16 lret
460
461
462/*
463 * Support routine to re-initialize VGA subsystem
464 */
465vgainit:
466	D16 ret
467
468/*
469 * Support routine to re-initialize keyboard (which is USB - help!)
470 */
471kbdinit:
472	D16 ret
473
474/*
475 * Support routine to re-initialize COM ports to something sane
476 */
477cominit:
478	/ init COM1 & COM2
479
480#if     DEBUG
481/*
482 * on debug kernels we need to initialize COM1 & COM2 here, so that
483 * we can get debug output before the asy driver has resumed
484 */
485
486/ select COM1
487	D16 movl	$_CONST(COM1+LCR), %edx
488	D16 movb	$DLAB, %al		/ divisor latch
489	outb	(%dx)
490
491	D16 movl	$_CONST(COM1+DLL), %edx	/ divisor latch lsb
492	D16 movb	$B9600L, %al		/ divisor latch
493	outb	(%dx)
494
495	D16 movl	$_CONST(COM1+DLH), %edx	/ divisor latch hsb
496	D16 movb	$B9600H, %al		/ divisor latch
497	outb	(%dx)
498
499	D16 movl	$_CONST(COM1+LCR), %edx	/ select COM1
500	D16 movb	$_CONST(STOP1|BITS8), %al	/ 1 stop bit, 8bit word len
501	outb	(%dx)
502
503	D16 movl	$_CONST(COM1+MCR), %edx	/ select COM1
504	D16 movb	$_CONST(RTS|DTR), %al		/ data term ready & req to send
505	outb	(%dx)
506
507/ select COM2
508	D16 movl	$_CONST(COM2+LCR), %edx
509	D16 movb	$DLAB, %al		/ divisor latch
510	outb	(%dx)
511
512	D16 movl	$_CONST(COM2+DLL), %edx	/ divisor latch lsb
513	D16 movb	$B9600L, %al		/ divisor latch
514	outb	(%dx)
515
516	D16 movl	$_CONST(COM2+DLH), %edx	/ divisor latch hsb
517	D16 movb	$B9600H, %al		/ divisor latch
518	outb	(%dx)
519
520	D16 movl	$_CONST(COM2+LCR), %edx	/ select COM1
521	D16 movb	$_CONST(STOP1|BITS8), %al	/ 1 stop bit, 8bit word len
522	outb	(%dx)
523
524	D16 movl	$_CONST(COM2+MCR), %edx	/ select COM1
525	D16 movb	$_CONST(RTS|DTR), %al		/ data term ready & req to send
526	outb	(%dx)
527#endif	/*	DEBUG	*/
528
529	D16 ret
530
531	.code64
532
533	.globl wc_long_mode_64
534wc_long_mode_64:
535
536#if     LED
537	movw        $WC_LED, %dx
538	movb        $0xd8, %al
539	outb    (%dx)
540#endif
541
542#if     SERIAL
543	movw        $WC_COM, %dx
544	movb        $0x68, %al
545	outb    (%dx)
546#endif
547
548	/*
549	 * We are now running in long mode with a 64-bit CS (EFER.LMA=1,
550	 * CS.L=1) so we now have access to 64-bit instructions.
551	 *
552	 * First, set the 64-bit GDT base.
553	 */
554	.globl	rm_platter_pa
555	movl	rm_platter_pa, %eax
556
557	lgdtq	GDTROFF(%rax)		/* load 64-bit GDT */
558
559	/*
560	 * Save the CPU number in %r11; get the value here since it's saved in
561	 * the real mode platter.
562	 */
563/ JAN
564/ the following is wrong! need to figure out MP systems
565/	movl	CPUNOFF(%rax), %r11d
566
567	/*
568	 * Add rm_platter_pa to %rsp to point it to the same location as seen
569	 * from 64-bit mode.
570	 */
571	addq	%rax, %rsp
572
573	/*
574	 * Now do an lretq to load CS with the appropriate selector for the
575	 * kernel's 64-bit GDT and to start executing 64-bit setup code at the
576	 * virtual address where boot originally loaded this code rather than
577	 * the copy in the real mode platter's rm_code array as we've been
578	 * doing so far.
579	 */
580
581#if     LED
582	movw        $WC_LED, %dx
583	movb        $0xd9, %al
584	outb    (%dx)
585#endif
586
587/ JAN this should produce 'i' but we get 'g' instead ???
588#if     SERIAL
589	movw        $WC_COM, %dx
590	movb        $0x69, %al
591	outb    (%dx)
592#endif
593
594	pushq	$KCS_SEL
595	pushq	$kernel_wc_code
596	lretq
597
598	.globl kernel_wc_code
599kernel_wc_code:
600
601#if     LED
602	movw        $WC_LED, %dx
603	movb        $0xda, %al
604	outb    (%dx)
605#endif
606
607/ JAN this should produce 'j' but we get 'g' instead ???
608#if     SERIAL
609	movw        $WC_COM, %dx
610	movb        $0x6a, %al
611	outb    (%dx)
612#endif
613
614	/*
615	 * Complete the balance of the setup we need to before executing
616	 * 64-bit kernel code (namely init rsp, TSS, LGDT, FS and GS).
617	 */
618	.globl  rm_platter_va
619	movq    rm_platter_va, %rbx
620	addq	$WC_CPU, %rbx
621
622#if     LED
623	movw        $WC_LED, %dx
624	movb        $0xdb, %al
625	outb    (%dx)
626#endif
627
628#if     SERIAL
629	movw        $WC_COM, %dx
630	movw        $0x6b, %ax
631	outb    (%dx)
632#endif
633
634	/*
635	 * restore the rest of the registers
636	 */
637
638	lidtq	WC_IDT(%rbx)
639
640#if     LED
641	movw        $WC_LED, %dx
642	movb        $0xdc, %al
643	outb    (%dx)
644#endif
645
646#if     SERIAL
647	movw        $WC_COM, %dx
648	movw        $0x6c, %ax
649	outb    (%dx)
650#endif
651
652	/*
653	 * restore the rest of the registers
654	 */
655
656	movw    $KDS_SEL, %ax
657	movw    %ax, %ds
658	movw    %ax, %es
659	movw    %ax, %ss
660
661	/*
662	 * Before proceeding, enable usage of the page table NX bit if
663	 * that's how the page tables are set up.
664	 */
665	bt      $X86FSET_NX, x86_featureset(%rip)
666	jnc     1f
667	movl    $MSR_AMD_EFER, %ecx
668	rdmsr
669	orl     $AMD_EFER_NXE, %eax
670	wrmsr
6711:
672
673	movq	WC_CR4(%rbx), %rax	/ restore full cr4 (with Global Enable)
674	movq	%rax, %cr4
675
676	lldt	WC_LDT(%rbx)
677	movzwq	WC_TR(%rbx), %rax	/ clear TSS busy bit
678	addq	WC_GDT+2(%rbx), %rax
679	andl	$0xfffffdff, 4(%rax)
680	movq	4(%rax), %rcx
681	ltr	WC_TR(%rbx)
682
683#if     LED
684	movw        $WC_LED, %dx
685	movb        $0xdd, %al
686	outb    (%dx)
687#endif
688
689#if     SERIAL
690	movw        $WC_COM, %dx
691	movw        $0x6d, %ax
692	outb    (%dx)
693#endif
694
695/ restore %fsbase %gsbase %kgbase registers using wrmsr instruction
696
697	movq    WC_FS(%rbx), %rcx	/ restore fs register
698	movw    %cx, %fs
699
700	movl    $MSR_AMD_FSBASE, %ecx
701	movl    WC_FSBASE(%rbx), %eax
702	movl    WC_FSBASE+4(%rbx), %edx
703	wrmsr
704
705	movq    WC_GS(%rbx), %rcx	/ restore gs register
706	movw    %cx, %gs
707
708	movl    $MSR_AMD_GSBASE, %ecx	/ restore gsbase msr
709	movl    WC_GSBASE(%rbx), %eax
710	movl    WC_GSBASE+4(%rbx), %edx
711	wrmsr
712
713	movl    $MSR_AMD_KGSBASE, %ecx	/ restore kgsbase msr
714	movl    WC_KGSBASE(%rbx), %eax
715	movl    WC_KGSBASE+4(%rbx), %edx
716	wrmsr
717
718	movq	WC_CR0(%rbx), %rdx
719	movq	%rdx, %cr0
720	movq	WC_CR3(%rbx), %rdx
721	movq	%rdx, %cr3
722	movq	WC_CR8(%rbx), %rdx
723	movq	%rdx, %cr8
724
725#if     LED
726	movw        $WC_LED, %dx
727	movb        $0xde, %al
728	outb    (%dx)
729#endif
730
731#if     SERIAL
732	movw        $WC_COM, %dx
733	movb        $0x6e, %al
734	outb    (%dx)
735#endif
736
737	/*
738	 * if we are not running on the boot CPU restore stack contents by
739	 * calling i_cpr_restore_stack(curthread, save_stack);
740	 */
741	movq    %rsp, %rbp
742	call	i_cpr_bootcpuid
743	cmpl	%eax, WC_CPU_ID(%rbx)
744	je	2f
745
746	movq	%gs:CPU_THREAD, %rdi
747	movq	WC_SAVED_STACK(%rbx), %rsi
748	call	i_cpr_restore_stack
7492:
750
751	movq    WC_RSP(%rbx), %rsp	/ restore stack pointer
752
753	/*
754	 * APIC initialization
755	 */
756	movq    %rsp, %rbp
757
758	/*
759	 * skip iff function pointer is NULL
760	 */
761	cmpq	$0, ap_mlsetup
762	je	3f
763	call	*ap_mlsetup
7643:
765
766	call    *cpr_start_cpu_func
767
768/ restore %rbx to the value it ahd before we called the functions above
769	movq    rm_platter_va, %rbx
770	addq	$WC_CPU, %rbx
771
772	movq    WC_R8(%rbx), %r8
773	movq    WC_R9(%rbx), %r9
774	movq    WC_R10(%rbx), %r10
775	movq    WC_R11(%rbx), %r11
776	movq    WC_R12(%rbx), %r12
777	movq    WC_R13(%rbx), %r13
778	movq    WC_R14(%rbx), %r14
779	movq    WC_R15(%rbx), %r15
780/	movq    WC_RAX(%rbx), %rax
781	movq    WC_RBP(%rbx), %rbp
782	movq    WC_RCX(%rbx), %rcx
783/	movq    WC_RDX(%rbx), %rdx
784	movq    WC_RDI(%rbx), %rdi
785	movq    WC_RSI(%rbx), %rsi
786
787
788/ assume that %cs does not need to be restored
789/ %ds, %es & %ss are ignored in 64bit mode
790	movw	WC_SS(%rbx), %ss
791	movw	WC_DS(%rbx), %ds
792	movw	WC_ES(%rbx), %es
793
794#if     LED
795	movw        $WC_LED, %dx
796	movb        $0xdf, %al
797	outb    (%dx)
798#endif
799
800#if     SERIAL
801	movw        $WC_COM, %dx
802	movb        $0x6f, %al
803	outb    (%dx)
804#endif
805
806
807	movq    WC_RBP(%rbx), %rbp
808	movq    WC_RSP(%rbx), %rsp
809
810#if     LED
811	movw        $WC_LED, %dx
812	movb        $0xe0, %al
813	outb    (%dx)
814#endif
815
816#if     SERIAL
817	movw        $WC_COM, %dx
818	movb        $0x70, %al
819	outb    (%dx)
820#endif
821
822
823	movq    WC_RCX(%rbx), %rcx
824
825	pushq	WC_EFLAGS(%rbx)			/ restore flags
826	popfq
827
828#if     LED
829	movw        $WC_LED, %dx
830	movb        $0xe1, %al
831	outb    (%dx)
832#endif
833
834#if     SERIAL
835	movw        $WC_COM, %dx
836	movb        $0x71, %al
837	outb    (%dx)
838#endif
839
840/*
841 * can not use outb after this point, because doing so would mean using
842 * %dx which would modify %rdx which is restored here
843 */
844
845	movq	%rbx, %rax
846	movq    WC_RDX(%rax), %rdx
847	movq    WC_RBX(%rax), %rbx
848
849	leave
850
851	movq	WC_RETADDR(%rax), %rax
852	movq	%rax, (%rsp)		/ return to caller of wc_save_context
853
854	xorl	%eax, %eax			/ at wakeup return 0
855	ret
856
857
858	SET_SIZE(wc_rm_start)
859
860	ENTRY_NP(asmspin)
861
862	movl	%edi, %ecx
863A1:
864	loop	A1
865
866	SET_SIZE(asmspin)
867
868	.globl wc_rm_end
869wc_rm_end:
870	nop
871
872#elif defined(__i386)
873
874	ENTRY_NP(wc_rm_start)
875
876/entry:	jmp		entry			/ stop here for HDT
877
878	cli
879	movw		%cs, %ax
880	movw		%ax, %ds		/ establish ds ...
881	movw		%ax, %ss		/ ... and ss:esp
882	D16 movl	$WC_STKSTART, %esp
883
884#if     LED
885	D16 movl        $WC_LED, %edx
886	D16 movb        $0xd1, %al
887	outb    (%dx)
888#endif
889
890#if     SERIAL
891	D16 movl        $WC_COM, %edx
892	D16 movb        $0x61, %al
893	outb    (%dx)
894#endif
895
896
897	D16 call	vgainit
898	D16 call	kbdinit
899	D16 call	cominit
900
901#if     LED
902	D16 movl        $WC_LED, %edx
903	D16 movb        $0xd2, %al
904	outb    (%dx)
905#endif
906
907#if     SERIAL
908	D16 movl        $WC_COM, %edx
909	D16 movb        $0x62, %al
910	outb    (%dx)
911#endif
912
913	D16 A16 movl	$WC_CPU, %ebx		/ base add of wc_cpu_t
914
915#if     LED
916	D16 movb        $0xd3, %al
917	outb    $WC_LED
918#endif
919
920#if     SERIAL
921	D16 movl        $WC_COM, %edx
922	D16 movb        $0x63, %al
923	outb    (%dx)
924#endif
925
926	D16 A16 movl	%cs:WC_DS(%ebx), %edx	/ %ds post prot/paging transit
927
928#if     LED
929	D16 movb        $0xd4, %al
930	outb    $WC_LED
931#endif
932
933	D16 A16 lgdt	%cs:WC_GDT(%ebx)	/ restore gdt and idtr
934	D16 A16 lidt	%cs:WC_IDT(%ebx)
935
936#if     LED
937	D16 movb        $0xd5, %al
938	outb    $WC_LED
939#endif
940
941	D16 A16 movl	%cs:WC_CR4(%ebx), %eax	/ restore cr4
942	D16 andl	$_BITNOT(CR4_PGE), %eax / don't set Global Enable yet
943	movl		%eax, %cr4
944
945#if     LED
946	D16 movb        $0xd6, %al
947	outb    $WC_LED
948#endif
949
950	D16 A16 movl	%cs:WC_CR3(%ebx), %eax	/ set PDPT
951	movl		%eax, %cr3
952
953#if     LED
954	D16 movb        $0xd7, %al
955	outb    $WC_LED
956#endif
957
958	D16 A16 movl	%cs:WC_CR0(%ebx), %eax	/ enable prot/paging, etc.
959	movl		%eax, %cr0
960
961#if     LED
962	D16 movb        $0xd8, %al
963	outb    $WC_LED
964#endif
965
966	D16 A16 movl	%cs:WC_VIRTADDR(%ebx), %ebx	/ virtaddr of wc_cpu_t
967
968#if     LED
969	D16 movb        $0xd9, %al
970	outb    $WC_LED
971#endif
972
973#if     LED
974	D16 movb        $0xda, %al
975	outb    $WC_LED
976#endif
977
978	jmp		flush			/ flush prefetch queue
979flush:
980	D16 pushl	$KCS_SEL
981	D16 pushl	$kernel_wc_code
982	D16 lret				/ re-appear at kernel_wc_code
983
984
985/*
986 * Support routine to re-initialize VGA subsystem
987 */
988vgainit:
989	D16 ret
990
991/*
992 * Support routine to re-initialize keyboard (which is USB - help!)
993 */
994kbdinit:
995	D16 ret
996
997/*
998 * Support routine to re-initialize COM ports to something sane for debug output
999 */
1000cominit:
1001#if     DEBUG
1002/*
1003 * on debug kernels we need to initialize COM1 & COM2 here, so that
1004 * we can get debug output before the asy driver has resumed
1005 */
1006
1007/ select COM1
1008	D16 movl	$_CONST(COM1+LCR), %edx
1009	D16 movb	$DLAB, %al		/ divisor latch
1010	outb	(%dx)
1011
1012	D16 movl	$_CONST(COM1+DLL), %edx	/ divisor latch lsb
1013	D16 movb	$B9600L, %al		/ divisor latch
1014	outb	(%dx)
1015
1016	D16 movl	$_CONST(COM1+DLH), %edx	/ divisor latch hsb
1017	D16 movb	$B9600H, %al		/ divisor latch
1018	outb	(%dx)
1019
1020	D16 movl	$_CONST(COM1+LCR), %edx	/ select COM1
1021	D16 movb	$_CONST(STOP1|BITS8), %al	/ 1 stop bit, 8bit word len
1022	outb	(%dx)
1023
1024	D16 movl	$_CONST(COM1+MCR), %edx	/ select COM1
1025	D16 movb	$_CONST(RTS|DTR), %al		/ 1 stop bit, 8bit word len
1026	outb	(%dx)
1027
1028/ select COM2
1029	D16 movl	$_CONST(COM2+LCR), %edx
1030	D16 movb	$DLAB, %al		/ divisor latch
1031	outb	(%dx)
1032
1033	D16 movl	$_CONST(COM2+DLL), %edx	/ divisor latch lsb
1034	D16 movb	$B9600L, %al		/ divisor latch
1035	outb	(%dx)
1036
1037	D16 movl	$_CONST(COM2+DLH), %edx	/ divisor latch hsb
1038	D16 movb	$B9600H, %al		/ divisor latch
1039	outb	(%dx)
1040
1041	D16 movl	$_CONST(COM2+LCR), %edx	/ select COM1
1042	D16 movb	$_CONST(STOP1|BITS8), %al	/ 1 stop bit, 8bit word len
1043	outb	(%dx)
1044
1045	D16 movl	$_CONST(COM2+MCR), %edx	/ select COM1
1046	D16 movb	$_CONST(RTS|DTR), %al		/ 1 stop bit, 8bit word len
1047	outb	(%dx)
1048#endif	/*	DEBUG	*/
1049
1050	D16 ret
1051
1052	.globl wc_rm_end
1053wc_rm_end:
1054	nop
1055
1056	.globl	kernel_wc_code
1057kernel_wc_code:
1058	/ At this point we are with kernel's cs and proper eip.
1059	/ We will be executing not from the copy in real mode platter,
1060	/ but from the original code where boot loaded us.
1061	/ By this time GDT and IDT are loaded as is cr0, cr3 and cr4.
1062	/ %ebx is wc_cpu
1063	/ %dx is our ds
1064
1065#if     LED
1066	D16 movb        $0xdb, %al
1067	outb	$WC_LED
1068#endif
1069
1070/ got here OK
1071
1072	movw	%dx, %ds		/ $KDS_SEL
1073
1074#if     LED
1075	movb	$0xdc, %al
1076	outb	$WC_LED
1077#endif
1078
1079	/*
1080	 * Before proceeding, enable usage of the page table NX bit if
1081	 * that's how the page tables are set up.
1082	 */
1083	bt      $X86FSET_NX, x86_featureset
1084	jnc     1f
1085	movl    $MSR_AMD_EFER, %ecx
1086	rdmsr
1087	orl     $AMD_EFER_NXE, %eax
1088	wrmsr
10891:
1090
1091	movl	WC_CR4(%ebx), %eax	/ restore full cr4 (with Global Enable)
1092	movl	%eax, %cr4
1093
1094
1095	lldt	WC_LDT(%ebx)		/ $LDT_SEL
1096
1097	movzwl	WC_TR(%ebx), %eax	/ clear TSS busy bit
1098	addl	WC_GDT+2(%ebx), %eax
1099	andl	$_BITNOT(0x200), 4(%eax)
1100	ltr	WC_TR(%ebx)		/ $UTSS_SEL
1101
1102	movw	WC_SS(%ebx), %ss	/ restore segment registers
1103	movw	WC_ES(%ebx), %es
1104	movw	WC_FS(%ebx), %fs
1105	movw	WC_GS(%ebx), %gs
1106
1107	/*
1108	 * set the stack pointer to point into the identity mapped page
1109	 * temporarily, so we can make function calls
1110	 */
1111	.globl  rm_platter_va
1112	movl    rm_platter_va, %eax
1113	movl	$WC_STKSTART, %esp
1114	addl	%eax, %esp
1115	movl	%esp, %ebp
1116
1117	/*
1118	 * if we are not running on the boot CPU restore stack contents by
1119	 * calling i_cpr_restore_stack(curthread, save_stack);
1120	 */
1121	call	i_cpr_bootcpuid
1122	cmpl	%eax, WC_CPU_ID(%ebx)
1123	je	2f
1124
1125	pushl	WC_SAVED_STACK(%ebx)
1126	pushl	%gs:CPU_THREAD
1127	call	i_cpr_restore_stack
1128	addl	$0x10, %esp
11292:
1130
1131	movl	WC_ESP(%ebx), %esp
1132	movl	%esp, %ebp
1133
1134	movl	WC_RETADDR(%ebx), %eax	/ return to caller of wc_save_context
1135	movl	%eax, (%esp)
1136
1137	/*
1138	 * APIC initialization, skip iff function pointer is NULL
1139	 */
1140	cmpl	$0, ap_mlsetup
1141	je	3f
1142	call	*ap_mlsetup
11433:
1144
1145	call    *cpr_start_cpu_func
1146
1147	pushl	WC_EFLAGS(%ebx)		/ restore flags
1148	popfl
1149
1150	movl	WC_EDI(%ebx), %edi	/ restore general registers
1151	movl	WC_ESI(%ebx), %esi
1152	movl	WC_EBP(%ebx), %ebp
1153	movl	WC_EBX(%ebx), %ebx
1154
1155/exit:	jmp	exit			/ stop here for HDT
1156
1157	xorl	%eax, %eax		/ at wakeup return 0
1158	ret
1159
1160	SET_SIZE(wc_rm_start)
1161
1162
1163#endif	/* defined(__amd64) */
1164
1165#endif /* lint */
1166
1167