xref: /freebsd/sys/i386/acpica/acpi_wakecode.S (revision c98323078dede7579020518ec84cdcb478e5c142)
1/*-
2 * Copyright (c) 2001 Takanori Watanabe <takawata@jp.freebsd.org>
3 * Copyright (c) 2001 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD$
28 */
29
30#define LOCORE
31
32#include <machine/asmacros.h>
33#include <machine/param.h>
34#include <machine/specialreg.h>
35
36	.align 4
37	.code16
38wakeup_16:
39	nop
40	cli
41
42	/*
43	 * Set up segment registers for real mode and a small stack for
44	 * any calls we make.
45	 */
46	movw	%cs,%ax
47	movw	%ax,%ds
48	movw	%ax,%ss
49	movw	$PAGE_SIZE,%sp
50
51	/* Re-initialize video BIOS if the reset_video tunable is set. */
52	cmp	$0,reset_video
53	je	wakeup_16_gdt
54	lcall	$0xc000,$3
55
56	/*
57	 * Set up segment registers for real mode again in case the
58	 * previous BIOS call clobbers them.
59	 */
60	movw	%cs,%ax
61	movw	%ax,%ds
62	movw	%ax,%ss
63
64wakeup_16_gdt:
65	/* Load GDT for real mode */
66	lgdt	physical_gdt
67
68	/* Restore CR2, CR3 and CR4 */
69	mov	previous_cr2,%eax
70	mov	%eax,%cr2
71	mov	previous_cr3,%eax
72	mov	%eax,%cr3
73	mov	previous_cr4,%eax
74	mov	%eax,%cr4
75
76	/* Transfer some values to protected mode */
77#define NVALUES	9
78#define TRANSFER_STACK32(val, idx)	\
79	mov	val,%eax;		\
80	mov	%eax,wakeup_32stack+(idx+1)+(idx*4);
81
82	TRANSFER_STACK32(previous_ss,		(NVALUES - 9))
83	TRANSFER_STACK32(previous_fs,		(NVALUES - 8))
84	TRANSFER_STACK32(previous_ds,		(NVALUES - 7))
85	TRANSFER_STACK32(physical_gdt+2,	(NVALUES - 6))
86	TRANSFER_STACK32(where_to_recover,	(NVALUES - 5))
87	TRANSFER_STACK32(previous_idt+2,	(NVALUES - 4))
88	TRANSFER_STACK32(previous_ldt,		(NVALUES - 3))
89	TRANSFER_STACK32(previous_gdt+2,	(NVALUES - 2))
90	TRANSFER_STACK32(previous_tr,		(NVALUES - 1))
91	TRANSFER_STACK32(previous_cr0,		(NVALUES - 0))
92
93	mov	physical_esp,%esi	/* to be used in 32bit code */
94
95	/* Enable protected mode */
96	mov	%cr0,%eax
97	orl	$(CR0_PE),%eax
98	mov	%eax,%cr0
99
100wakeup_sw32:
101	/* Switch to protected mode by intersegmental jump */
102	ljmpl	$0x8,$0x12345678	/* Code location, to be replaced */
103
104	.code32
105wakeup_32:
106	/*
107	 * Switched to protected mode w/o paging
108	 *	%esi:	KERNEL stack pointer (physical address)
109	 */
110
111	nop
112
113	/* Set up segment registers for protected mode */
114	movw	$0x10,%ax		/* KDSEL to segment registers */
115	movw	%ax,%ds
116	movw	%ax,%es
117	movw	%ax,%gs
118	movw	%ax,%ss
119	movw	$0x18,%ax		/* KPSEL to %fs */
120	movw	%ax,%fs
121	movl	%esi,%esp		/* physical address stack pointer */
122
123wakeup_32stack:
124	/* Operands are overwritten in 16bit code */
125	pushl	$0xabcdef09		/* ss + dummy */
126	pushl	$0xabcdef08		/* fs + gs */
127	pushl	$0xabcdef07		/* ds + es */
128	pushl	$0xabcdef06		/* gdt:base (physical address) */
129	pushl	$0xabcdef05		/* recover address */
130	pushl	$0xabcdef04		/* idt:base */
131	pushl	$0xabcdef03		/* ldt + idt:limit */
132	pushl	$0xabcdef02		/* gdt:base */
133	pushl	$0xabcdef01		/* TR + gdt:limit */
134	pushl	$0xabcdef00		/* CR0 */
135
136	movl	%esp,%ebp
137#define CR0_REGISTER		0(%ebp)
138#define TASK_REGISTER		4(%ebp)
139#define PREVIOUS_GDT		6(%ebp)
140#define PREVIOUS_LDT		12(%ebp)
141#define PREVIOUS_IDT		14(%ebp)
142#define RECOVER_ADDR		20(%ebp)
143#define PHYSICAL_GDT_BASE	24(%ebp)
144#define PREVIOUS_DS		28(%ebp)
145#define PREVIOUS_ES		30(%ebp)
146#define PREVIOUS_FS		32(%ebp)
147#define PREVIOUS_GS		34(%ebp)
148#define PREVIOUS_SS		36(%ebp)
149
150	/* Fixup TSS type field */
151#define TSS_TYPEFIX_MASK	0xf9
152	xorl	%esi,%esi
153	movl	PHYSICAL_GDT_BASE,%ebx
154	movw	TASK_REGISTER,%si
155	leal	(%ebx,%esi),%eax	/* get TSS segment descriptor */
156	andb	$TSS_TYPEFIX_MASK,5(%eax)
157
158	/* Prepare to return to sleep/wakeup code point */
159	lgdt	PREVIOUS_GDT
160	lidt	PREVIOUS_IDT
161
162	xorl	%eax,%eax
163	movl	%eax,%ebx
164	movl	%eax,%ecx
165	movl	%eax,%edx
166	movl	%eax,%esi
167	movl	%eax,%edi
168	movl	PREVIOUS_DS,%ebx
169	movl	PREVIOUS_FS,%ecx
170	movl	PREVIOUS_SS,%edx
171	movw	TASK_REGISTER,%si
172	shll	$16,%esi
173	movw	PREVIOUS_LDT,%si
174	movl	RECOVER_ADDR,%edi
175
176	/* Enable paging and etc. */
177	movl	CR0_REGISTER,%eax
178	movl	%eax,%cr0
179
180	/* Flush the prefetch queue */
181	jmp	1f
1821:	jmp	1f
1831:
184	/*
185	 * Now that we are in kernel virtual memory addressing
186	 *	%ebx:	ds + es
187	 *	%ecx:	fs + gs
188	 *	%edx:	ss + dummy
189	 *	%esi:	LDTR + TR
190	 *	%edi:	recover address
191	 */
192
193	nop
194
195	movl	%esi,%eax		/* LDTR + TR */
196	lldt	%ax			/* load LDT register */
197	shrl	$16,%eax
198	ltr	%ax			/* load task register */
199
200	/* Restore segment registers */
201	movl	%ebx,%eax		/* ds + es */
202	movw	%ax,%ds
203	shrl	$16,%eax
204	movw	%ax,%es
205	movl	%ecx,%eax		/* fs + gs */
206	movw	%ax,%fs
207	shrl	$16,%eax
208	movw	%ax,%gs
209	movl	%edx,%eax		/* ss */
210	movw	%ax,%ss
211
212	/* Jump to acpi_restorecpu() */
213	jmp	*%edi
214
215/* used in real mode */
216physical_gdt:		.word 0
217			.long 0
218physical_esp:		.long 0
219previous_cr2:		.long 0
220previous_cr3:		.long 0
221previous_cr4:		.long 0
222reset_video:		.long 0
223
224/* transfer from real mode to protected mode */
225previous_cr0:		.long 0
226previous_tr:		.word 0
227previous_gdt:		.word 0
228			.long 0
229previous_ldt:		.word 0
230previous_idt:		.word 0
231			.long 0
232where_to_recover:	.long 0
233previous_ds:		.word 0
234previous_es:		.word 0
235previous_fs:		.word 0
236previous_gs:		.word 0
237previous_ss:		.word 0
238dummy:			.word 0
239