xref: /titanic_50/usr/src/uts/i86pc/ml/bios_call_src.s (revision dfb96a4f56fb431b915bc67e5d9d5c8d4f4f6679)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident	"%Z%%M%	%I%	%E% SMI"
28
29#if defined(__lint)
30
31int silence_lint = 0;
32
33#else
34
35#include <sys/segments.h>
36#include <sys/controlregs.h>
37
38/*
39 * Do a call into BIOS.  This goes down to 16 bit real mode and back again.
40 */
41
42/*
43 * instruction prefix to change operand size in instruction
44 */
45#define DATASZ	.byte 0x66;
46
47#if defined(__amd64)
48#define	MOVCR(x, y)	movq  x,%rax; movq  %rax, y
49#define LOAD_XAX(sym)	leaq	sym, %rax
50#elif defined(__i386)
51#define	MOVCR(x, y)	movl  x,%eax; movl  %eax, y
52#define LOAD_XAX(sym)	leal	sym, %eax
53#endif
54
55	.globl	_start
56_start:
57
58#if defined(__i386)
59
60	/*
61	 * Save caller registers
62	 */
63	movl	%ebp, save_ebp
64	movl	%esp, save_esp
65	movl	%ebx, save_ebx
66	movl	%esi, save_esi
67	movl	%edi, save_edi
68
69	/* get registers argument into esi */
70	movl	8(%esp), %esi
71
72	/* put interrupt number in %bl */
73	movl	4(%esp), %ebx
74
75	/* Switch to a low memory stack */
76	movl	$_start, %esp
77
78	/* allocate space for args on stack */
79	subl	$18, %esp
80	movl	%esp, %edi
81
82#elif defined(__amd64)
83
84	/*
85	 * Save caller registers
86	 */
87	movq	%rbp, save_rbp
88	movq	%rsp, save_rsp
89	movq	%rbx, save_rbx
90	movq	%rsi, save_rsi
91	movq	%r12, save_r12
92	movq	%r13, save_r13
93	movq	%r14, save_r14
94	movq	%r15, save_r15
95
96	/* Switch to a low memory stack */
97	movq	$_start, %rsp
98
99	/* put interrupt number in %bl */
100	movq	%rdi, %rbx
101
102	/* allocate space for args on stack */
103	subq	$18, %rsp
104	movq	%rsp, %rdi
105
106#endif
107
108	/* copy args from high memory to stack in low memory */
109	cld
110	movl	$18, %ecx
111	rep
112	movsb
113
114	/*
115	 * Save system registers
116	 */
117	sidt	save_idt
118	sgdt	save_gdt
119	str	save_tr
120	movw	%cs, save_cs
121	movw	%ds, save_ds
122	movw	%ss, save_ss
123	movw	%es, save_es
124	movw	%fs, save_fs
125	movw	%gs, save_gs
126	MOVCR(	%cr4, save_cr4)
127	MOVCR(	%cr3, save_cr3)
128	MOVCR(	%cr0, save_cr0)
129
130#if defined(__amd64)
131	/*
132	 * save/clear the extension parts of the fs/gs base registers and cr8
133	 */
134	movl	$MSR_AMD_FSBASE, %ecx
135	rdmsr
136	movl	%eax, save_fsbase
137	movl	%edx, save_fsbase + 4
138	xorl	%eax, %eax
139	xorl	%edx, %edx
140	wrmsr
141
142	movl	$MSR_AMD_GSBASE, %ecx
143	rdmsr
144	movl	%eax, save_gsbase
145	movl	%edx, save_gsbase + 4
146	xorl	%eax, %eax
147	xorl	%edx, %edx
148	wrmsr
149
150	movl	$MSR_AMD_KGSBASE, %ecx
151	rdmsr
152	movl	%eax, save_kgsbase
153	movl	%edx, save_kgsbase + 4
154	xorl	%eax, %eax
155	xorl	%edx, %edx
156	wrmsr
157
158	movq	%cr8, %rax
159	movq	%rax, save_cr8
160#endif
161
162	/*
163	 * set offsets in 16 bit ljmp instructions below
164	 */
165	LOAD_XAX(enter_real)
166	movw	%ax, enter_real_ljmp
167
168	LOAD_XAX(enter_protected)
169	movw	%ax, enter_protected_ljmp
170
171	LOAD_XAX(gdt_info)
172	movw	%ax, gdt_info_load
173
174	/*
175	 * insert BIOS interrupt number into later instruction
176	 */
177	movb    %bl, int_instr+1
178	jmp     1f
1791:
180
181	/*
182	 * zero out all the registers to make sure they're 16 bit clean
183	 */
184#if defined(__amd64)
185	xorq	%r8, %r8
186	xorq	%r9, %r9
187	xorq	%r10, %r10
188	xorq	%r11, %r11
189	xorq	%r12, %r12
190	xorq	%r13, %r13
191	xorq	%r14, %r14
192	xorq	%r15, %r15
193#endif
194	xorl	%eax, %eax
195	xorl	%ebx, %ebx
196	xorl	%ecx, %ecx
197	xorl	%edx, %edx
198	xorl	%ebp, %ebp
199	xorl	%esi, %esi
200	xorl	%edi, %edi
201
202	/*
203	 * Load our own GDT/IDT
204	 */
205	lgdt	gdt_info
206	lidt	idt_info
207
208#if defined(__amd64)
209	/*
210	 * Shut down 64 bit mode. First get into compatiblity mode.
211	 */
212	movq	%rsp, %rax
213	pushq	$B32DATA_SEL
214	pushq	%rax
215	pushf
216	pushq	$B32CODE_SEL
217	pushq	$1f
218	iretq
2191:
220	.code32
221
222	/*
223	 * disable long mode by:
224	 * - shutting down paging (bit 31 of cr0)
225	 * - flushing the TLB
226	 * - disabling LME (long made enable) in EFER (extended feature reg)
227	 */
228	movl	%cr0, %eax
229	btcl	$31, %eax		/* disable paging */
230	movl	%eax, %cr0
231	ljmp	$B32CODE_SEL, $1f
2321:
233
234	xorl	%eax, %eax
235	movl	%eax, %cr3		/* flushes TLB */
236
237	movl	$MSR_AMD_EFER, %ecx	/* Extended Feature Enable */
238	rdmsr
239	btcl	$8, %eax		/* bit 8 Long Mode Enable bit */
240	wrmsr
241#endif
242
243	/*
244	 * ok.. now enter 16 bit mode, so we can shut down protected mode
245	 *
246	 * We'll have to act like we're still in a 32 bit section.
247	 * So the code from this point has DATASZ in front of it to get 32 bit
248	 * operands. If DATASZ is missing the operands will be 16 bit.
249	 *
250	 * Now shut down paging and protected (ie. segmentation) modes.
251	 */
252	ljmp	$B16CODE_SEL, $enter_16_bit
253enter_16_bit:
254
255	/*
256	 * Make sure hidden parts of segment registers are 16 bit clean
257	 */
258	DATASZ	movl	$B16DATA_SEL, %eax
259		movw    %ax, %ss
260		movw    %ax, %ds
261		movw    %ax, %es
262		movw    %ax, %fs
263		movw    %ax, %gs
264
265
266	DATASZ	movl	$0x0, %eax	/* put us in real mode */
267	DATASZ	movl	%eax, %cr0
268	.byte	0xea			/* ljmp */
269enter_real_ljmp:
270	.value	0			/* addr (16 bit) */
271	.value	0x0			/* value for %cs */
272enter_real:
273
274	/*
275	 * zero out the remaining segment registers
276	 */
277	DATASZ	xorl	%eax, %eax
278		movw    %ax, %ss
279		movw    %ax, %ds
280		movw    %ax, %es
281		movw    %ax, %fs
282		movw    %ax, %gs
283
284	/*
285	 * load the arguments to the BIOS call from the stack
286	 */
287	popl	%eax	/* really executes a 16 bit pop */
288	popl	%ebx
289	popl	%ecx
290	popl	%edx
291	popl	%esi
292	popl	%edi
293	popl	%ebp
294	pop	%es
295	pop	%ds
296
297	/*
298	 * do the actual BIOS call
299	 */
300	sti
301int_instr:
302	int	$0x10		/* this int number is overwritten */
303	cli			/* ensure interrupts remain disabled */
304
305	/*
306	 * save results of the BIOS call
307	 */
308	pushf
309	push	%ds
310	push	%es
311	pushl	%ebp		/* still executes as 16 bit */
312	pushl	%edi
313	pushl	%esi
314	pushl	%edx
315	pushl	%ecx
316	pushl	%ebx
317	pushl	%eax
318
319	/*
320	 * Restore protected mode and 32 bit execution
321	 */
322	push	$0			/* make sure %ds is zero before lgdt */
323	pop	%ds
324	.byte	0x0f, 0x01, 0x16	/* lgdt */
325gdt_info_load:
326	.value	0	/* temp GDT in currently addressible mem */
327
328	DATASZ	movl	$0x1, %eax
329	DATASZ	movl	%eax, %cr0
330
331	.byte	0xea			/* ljmp */
332enter_protected_ljmp:
333	.value	0			/* addr (still in 16 bit) */
334	.value	B32CODE_SEL		/* %cs value */
335enter_protected:
336
337	/*
338	 * We are now back in a 32 bit code section, fix data/stack segments
339	 */
340	.code32
341	movw	$B32DATA_SEL, %ax
342	movw	%ax, %ds
343	movw	%ax, %ss
344
345	/*
346	 * Re-enable paging. Note we only use 32 bit mov's to restore these
347	 * control registers. That's OK as the upper 32 bits are always zero.
348	 */
349	movl	save_cr4, %eax
350	movl	%eax, %cr4
351	movl	save_cr3, %eax
352	movl	%eax, %cr3
353
354#if defined(__amd64)
355	/*
356	 * re-enable long mode
357	 */
358	movl	$MSR_AMD_EFER, %ecx
359	rdmsr
360	btsl	$8, %eax
361	wrmsr
362#endif
363
364	movl	save_cr0, %eax
365	movl	%eax, %cr0
366	jmp	enter_paging
367enter_paging:
368
369
370#if defined(__amd64)
371	/*
372	 * transition back to 64 bit mode
373	 */
374	pushl	$B64CODE_SEL
375	pushl	$longmode
376	lret
377longmode:
378	.code64
379#endif
380	/*
381	 * restore caller frame pointer and segment registers
382	 */
383	lgdt	save_gdt
384	lidt	save_idt
385
386	/*
387	 * Before loading the task register we need to reset the busy bit
388	 * in its corresponding GDT selector. The busy bit is the 2nd bit in
389	 * the 5th byte of the selector.
390	 */
391#if defined(__i386)
392	movzwl	save_tr, %eax
393	addl	save_gdt+2, %eax
394	btcl	$1, 5(%eax)
395#elif defined(__amd64)
396	movzwq	save_tr, %rax
397	addq	save_gdt+2, %rax
398	btcl	$1, 5(%rax)
399#endif
400	ltr	save_tr
401	movw	save_ds, %ds
402	movw	save_ss, %ss
403	movw	save_es, %es
404	movw	save_fs, %fs
405	movw	save_gs, %gs
406
407#if defined(__i386)
408	pushl	save_cs
409	pushl	$.newcs
410	lret
411#elif defined(__amd64)
412	pushq	save_cs
413	pushq	$.newcs
414	lretq
415#endif
416.newcs:
417
418#if defined(__amd64)
419	/*
420	 * restore the hidden kernel segment base register values
421	 */
422	movl	save_fsbase, %eax
423	movl	save_fsbase + 4, %edx
424	movl	$MSR_AMD_FSBASE, %ecx
425	wrmsr
426
427	movl	save_gsbase, %eax
428	movl	save_gsbase + 4, %edx
429	movl	$MSR_AMD_GSBASE, %ecx
430	wrmsr
431
432	movl	save_kgsbase, %eax
433	movl	save_kgsbase + 4, %edx
434	movl	$MSR_AMD_KGSBASE, %ecx
435	wrmsr
436
437	movq	save_cr8, %rax
438	cmpq	$0, %rax
439	je	1f
440	movq	%rax, %cr8
4411:
442#endif
443
444	/*
445	 * copy results to caller's location, then restore remaining registers
446	 */
447#if defined(__i386)
448	movl    save_esp, %edi
449	movl	8(%edi), %edi
450	movl	%esp, %esi
451	movl	$18, %ecx
452	rep
453	movsb
454	movw	18(%esp), %ax
455	andl	$0xffff, %eax
456	movl    save_ebx, %ebx
457	movl    save_esi, %esi
458	movl    save_edi, %edi
459	movl    save_esp, %esp
460	movl    save_ebp, %ebp
461	movl    save_esp, %esp
462	ret
463
464#elif defined(__amd64)
465	movq    save_rsi, %rdi
466	movq	%rsp, %rsi
467	movq	$18, %rcx
468	rep
469	movsb
470	movw	18(%rsp), %ax
471	andq	$0xffff, %rax
472	movq    save_r12, %r12
473	movq    save_r13, %r13
474	movq    save_r14, %r14
475	movq    save_r15, %r15
476	movq    save_rbx, %rbx
477	movq    save_rbp, %rbp
478	movq    save_rsp, %rsp
479	ret
480
481#endif
482
483
484/*
485 * Caller's registers to restore
486 */
487	.align 4
488save_esi:
489	.long	0
490save_edi:
491	.long	0
492save_ebx:
493	.long	0
494save_ebp:
495	.long	0
496save_esp:
497	.long	0
498
499	.align 8
500#if defined(__amd64)
501save_rsi:
502	.quad	0
503save_rbx:
504	.quad	0
505save_rbp:
506	.quad	0
507save_rsp:
508	.quad	0
509save_r12:
510	.quad	0
511save_r13:
512	.quad	0
513save_r14:
514	.quad	0
515save_r15:
516	.quad	0
517save_kgsbase:
518	.quad	0
519save_gsbase:
520	.quad	0
521save_fsbase:
522	.quad	0
523save_cr8:
524	.quad	0
525#endif	/* __amd64 */
526
527save_idt:
528	.quad	0
529	.quad	0
530
531save_gdt:
532	.quad	0
533	.quad	0
534
535save_cr0:
536	.quad	0
537save_cr3:
538	.quad	0
539save_cr4:
540	.quad	0
541save_cs:
542	.quad	0
543save_ss:
544	.value	0
545save_ds:
546	.value	0
547save_es:
548	.value	0
549save_fs:
550	.value	0
551save_gs:
552	.value	0
553save_tr:
554	.value	0
555
556idt_info:
557	.value 0x3ff
558	.quad 0
559
560
561/*
562 * We need to trampoline thru a gdt we have in low memory.
563 */
564#include "../boot/boot_gdt.s"
565#endif /* __lint */
566