xref: /linux/arch/x86/kernel/head_64.S (revision 1698872b5c772aebc5c43ca445cc0a79f12b9fcc)
1/*
2 *  linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
3 *
4 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7 *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
8 *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
9 */
10
11
12#include <linux/linkage.h>
13#include <linux/threads.h>
14#include <linux/init.h>
15#include <asm/segment.h>
16#include <asm/pgtable.h>
17#include <asm/page.h>
18#include <asm/msr.h>
19#include <asm/cache.h>
20#include <asm/processor-flags.h>
21#include <asm/percpu.h>
22#include <asm/nops.h>
23
24#ifdef CONFIG_PARAVIRT
25#include <asm/asm-offsets.h>
26#include <asm/paravirt.h>
27#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg
28#else
29#define GET_CR2_INTO(reg) movq %cr2, reg
30#define INTERRUPT_RETURN iretq
31#endif
32
33/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
34 * because we need identity-mapped pages.
35 *
36 */
37
38#define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
39
40L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
41L4_START_KERNEL = pgd_index(__START_KERNEL_map)
42L3_START_KERNEL = pud_index(__START_KERNEL_map)
43
44	.text
45	__HEAD
46	.code64
47	.globl startup_64
48startup_64:
49	/*
50	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
51	 * and someone has loaded an identity mapped page table
52	 * for us.  These identity mapped page tables map all of the
53	 * kernel pages and possibly all of memory.
54	 *
55	 * %rsi holds a physical pointer to real_mode_data.
56	 *
57	 * We come here either directly from a 64bit bootloader, or from
58	 * arch/x86/boot/compressed/head_64.S.
59	 *
60	 * We only come here initially at boot nothing else comes here.
61	 *
62	 * Since we may be loaded at an address different from what we were
63	 * compiled to run at we first fixup the physical addresses in our page
64	 * tables and then reload them.
65	 */
66
67	/* Sanitize CPU configuration */
68	call verify_cpu
69
70	/*
71	 * Compute the delta between the address I am compiled to run at and the
72	 * address I am actually running at.
73	 */
74	leaq	_text(%rip), %rbp
75	subq	$_text - __START_KERNEL_map, %rbp
76
77	/* Is the address not 2M aligned? */
78	movq	%rbp, %rax
79	andl	$~PMD_PAGE_MASK, %eax
80	testl	%eax, %eax
81	jnz	bad_address
82
83	/*
84	 * Is the address too large?
85	 */
86	leaq	_text(%rip), %rax
87	shrq	$MAX_PHYSMEM_BITS, %rax
88	jnz	bad_address
89
90	/*
91	 * Fixup the physical addresses in the page table
92	 */
93	addq	%rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
94
95	addq	%rbp, level3_kernel_pgt + (510*8)(%rip)
96	addq	%rbp, level3_kernel_pgt + (511*8)(%rip)
97
98	addq	%rbp, level2_fixmap_pgt + (506*8)(%rip)
99
100	/*
101	 * Set up the identity mapping for the switchover.  These
102	 * entries should *NOT* have the global bit set!  This also
103	 * creates a bunch of nonsense entries but that is fine --
104	 * it avoids problems around wraparound.
105	 */
106	leaq	_text(%rip), %rdi
107	leaq	early_level4_pgt(%rip), %rbx
108
109	movq	%rdi, %rax
110	shrq	$PGDIR_SHIFT, %rax
111
112	leaq	(4096 + _KERNPG_TABLE)(%rbx), %rdx
113	movq	%rdx, 0(%rbx,%rax,8)
114	movq	%rdx, 8(%rbx,%rax,8)
115
116	addq	$4096, %rdx
117	movq	%rdi, %rax
118	shrq	$PUD_SHIFT, %rax
119	andl	$(PTRS_PER_PUD-1), %eax
120	movq	%rdx, 4096(%rbx,%rax,8)
121	incl	%eax
122	andl	$(PTRS_PER_PUD-1), %eax
123	movq	%rdx, 4096(%rbx,%rax,8)
124
125	addq	$8192, %rbx
126	movq	%rdi, %rax
127	shrq	$PMD_SHIFT, %rdi
128	addq	$(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax
129	leaq	(_end - 1)(%rip), %rcx
130	shrq	$PMD_SHIFT, %rcx
131	subq	%rdi, %rcx
132	incl	%ecx
133
1341:
135	andq	$(PTRS_PER_PMD - 1), %rdi
136	movq	%rax, (%rbx,%rdi,8)
137	incq	%rdi
138	addq	$PMD_SIZE, %rax
139	decl	%ecx
140	jnz	1b
141
142	/*
143	 * Fixup the kernel text+data virtual addresses. Note that
144	 * we might write invalid pmds, when the kernel is relocated
145	 * cleanup_highmap() fixes this up along with the mappings
146	 * beyond _end.
147	 */
148	leaq	level2_kernel_pgt(%rip), %rdi
149	leaq	4096(%rdi), %r8
150	/* See if it is a valid page table entry */
1511:	testb	$1, 0(%rdi)
152	jz	2f
153	addq	%rbp, 0(%rdi)
154	/* Go to the next page */
1552:	addq	$8, %rdi
156	cmp	%r8, %rdi
157	jne	1b
158
159	/* Fixup phys_base */
160	addq	%rbp, phys_base(%rip)
161
162	movq	$(early_level4_pgt - __START_KERNEL_map), %rax
163	jmp 1f
164ENTRY(secondary_startup_64)
165	/*
166	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
167	 * and someone has loaded a mapped page table.
168	 *
169	 * %rsi holds a physical pointer to real_mode_data.
170	 *
171	 * We come here either from startup_64 (using physical addresses)
172	 * or from trampoline.S (using virtual addresses).
173	 *
174	 * Using virtual addresses from trampoline.S removes the need
175	 * to have any identity mapped pages in the kernel page table
176	 * after the boot processor executes this code.
177	 */
178
179	/* Sanitize CPU configuration */
180	call verify_cpu
181
182	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
1831:
184
185	/* Enable PAE mode and PGE */
186	movl	$(X86_CR4_PAE | X86_CR4_PGE), %ecx
187	movq	%rcx, %cr4
188
189	/* Setup early boot stage 4 level pagetables. */
190	addq	phys_base(%rip), %rax
191	movq	%rax, %cr3
192
193	/* Ensure I am executing from virtual addresses */
194	movq	$1f, %rax
195	jmp	*%rax
1961:
197
198	/* Check if nx is implemented */
199	movl	$0x80000001, %eax
200	cpuid
201	movl	%edx,%edi
202
203	/* Setup EFER (Extended Feature Enable Register) */
204	movl	$MSR_EFER, %ecx
205	rdmsr
206	btsl	$_EFER_SCE, %eax	/* Enable System Call */
207	btl	$20,%edi		/* No Execute supported? */
208	jnc     1f
209	btsl	$_EFER_NX, %eax
210	btsq	$_PAGE_BIT_NX,early_pmd_flags(%rip)
2111:	wrmsr				/* Make changes effective */
212
213	/* Setup cr0 */
214#define CR0_STATE	(X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
215			 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
216			 X86_CR0_PG)
217	movl	$CR0_STATE, %eax
218	/* Make changes effective */
219	movq	%rax, %cr0
220
221	/* Setup a boot time stack */
222	movq stack_start(%rip), %rsp
223
224	/* zero EFLAGS after setting rsp */
225	pushq $0
226	popfq
227
228	/*
229	 * We must switch to a new descriptor in kernel space for the GDT
230	 * because soon the kernel won't have access anymore to the userspace
231	 * addresses where we're currently running on. We have to do that here
232	 * because in 32bit we couldn't load a 64bit linear address.
233	 */
234	lgdt	early_gdt_descr(%rip)
235
236	/* set up data segments */
237	xorl %eax,%eax
238	movl %eax,%ds
239	movl %eax,%ss
240	movl %eax,%es
241
242	/*
243	 * We don't really need to load %fs or %gs, but load them anyway
244	 * to kill any stale realmode selectors.  This allows execution
245	 * under VT hardware.
246	 */
247	movl %eax,%fs
248	movl %eax,%gs
249
250	/* Set up %gs.
251	 *
252	 * The base of %gs always points to the bottom of the irqstack
253	 * union.  If the stack protector canary is enabled, it is
254	 * located at %gs:40.  Note that, on SMP, the boot cpu uses
255	 * init data section till per cpu areas are set up.
256	 */
257	movl	$MSR_GS_BASE,%ecx
258	movl	initial_gs(%rip),%eax
259	movl	initial_gs+4(%rip),%edx
260	wrmsr
261
262	/* rsi is pointer to real mode structure with interesting info.
263	   pass it to C */
264	movq	%rsi, %rdi
265
266	/* Finally jump to run C code and to be on real kernel address
267	 * Since we are running on identity-mapped space we have to jump
268	 * to the full 64bit address, this is only possible as indirect
269	 * jump.  In addition we need to ensure %cs is set so we make this
270	 * a far return.
271	 *
272	 * Note: do not change to far jump indirect with 64bit offset.
273	 *
274	 * AMD does not support far jump indirect with 64bit offset.
275	 * AMD64 Architecture Programmer's Manual, Volume 3: states only
276	 *	JMP FAR mem16:16 FF /5 Far jump indirect,
277	 *		with the target specified by a far pointer in memory.
278	 *	JMP FAR mem16:32 FF /5 Far jump indirect,
279	 *		with the target specified by a far pointer in memory.
280	 *
281	 * Intel64 does support 64bit offset.
282	 * Software Developer Manual Vol 2: states:
283	 *	FF /5 JMP m16:16 Jump far, absolute indirect,
284	 *		address given in m16:16
285	 *	FF /5 JMP m16:32 Jump far, absolute indirect,
286	 *		address given in m16:32.
287	 *	REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
288	 *		address given in m16:64.
289	 */
290	movq	initial_code(%rip),%rax
291	pushq	$0		# fake return address to stop unwinder
292	pushq	$__KERNEL_CS	# set correct cs
293	pushq	%rax		# target address in negative space
294	lretq
295
296#include "verify_cpu.S"
297
298#ifdef CONFIG_HOTPLUG_CPU
299/*
300 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
301 * up already except stack. We just set up stack here. Then call
302 * start_secondary().
303 */
304ENTRY(start_cpu0)
305	movq stack_start(%rip),%rsp
306	movq	initial_code(%rip),%rax
307	pushq	$0		# fake return address to stop unwinder
308	pushq	$__KERNEL_CS	# set correct cs
309	pushq	%rax		# target address in negative space
310	lretq
311ENDPROC(start_cpu0)
312#endif
313
314	/* SMP bootup changes these two */
315	__REFDATA
316	.balign	8
317	GLOBAL(initial_code)
318	.quad	x86_64_start_kernel
319	GLOBAL(initial_gs)
320	.quad	INIT_PER_CPU_VAR(irq_stack_union)
321
322	GLOBAL(stack_start)
323	.quad  init_thread_union+THREAD_SIZE-8
324	.word  0
325	__FINITDATA
326
327bad_address:
328	jmp bad_address
329
330	__INIT
331ENTRY(early_idt_handler_array)
332	# 104(%rsp) %rflags
333	#  96(%rsp) %cs
334	#  88(%rsp) %rip
335	#  80(%rsp) error code
336	i = 0
337	.rept NUM_EXCEPTION_VECTORS
338	.ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
339	pushq $0		# Dummy error code, to make stack frame uniform
340	.endif
341	pushq $i		# 72(%rsp) Vector number
342	jmp early_idt_handler_common
343	i = i + 1
344	.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
345	.endr
346ENDPROC(early_idt_handler_array)
347
348early_idt_handler_common:
349	/*
350	 * The stack is the hardware frame, an error code or zero, and the
351	 * vector number.
352	 */
353	cld
354
355	cmpl $2,(%rsp)		# X86_TRAP_NMI
356	je .Lis_nmi		# Ignore NMI
357
358	cmpl $2,early_recursion_flag(%rip)
359	jz  1f
360	incl early_recursion_flag(%rip)
361
362	pushq %rax		# 64(%rsp)
363	pushq %rcx		# 56(%rsp)
364	pushq %rdx		# 48(%rsp)
365	pushq %rsi		# 40(%rsp)
366	pushq %rdi		# 32(%rsp)
367	pushq %r8		# 24(%rsp)
368	pushq %r9		# 16(%rsp)
369	pushq %r10		#  8(%rsp)
370	pushq %r11		#  0(%rsp)
371
372	cmpl $__KERNEL_CS,96(%rsp)
373	jne 11f
374
375	cmpl $14,72(%rsp)	# Page fault?
376	jnz 10f
377	GET_CR2_INTO(%rdi)	# can clobber any volatile register if pv
378	call early_make_pgtable
379	andl %eax,%eax
380	jz 20f			# All good
381
38210:
383	leaq 88(%rsp),%rdi	# Pointer to %rip
384	call early_fixup_exception
385	andl %eax,%eax
386	jnz 20f			# Found an exception entry
387
38811:
389#ifdef CONFIG_EARLY_PRINTK
390	GET_CR2_INTO(%r9)	# can clobber any volatile register if pv
391	movl 80(%rsp),%r8d	# error code
392	movl 72(%rsp),%esi	# vector number
393	movl 96(%rsp),%edx	# %cs
394	movq 88(%rsp),%rcx	# %rip
395	xorl %eax,%eax
396	leaq early_idt_msg(%rip),%rdi
397	call early_printk
398	cmpl $2,early_recursion_flag(%rip)
399	jz  1f
400	call dump_stack
401#ifdef CONFIG_KALLSYMS
402	leaq early_idt_ripmsg(%rip),%rdi
403	movq 40(%rsp),%rsi	# %rip again
404	call __print_symbol
405#endif
406#endif /* EARLY_PRINTK */
4071:	hlt
408	jmp 1b
409
41020:	# Exception table entry found or page table generated
411	popq %r11
412	popq %r10
413	popq %r9
414	popq %r8
415	popq %rdi
416	popq %rsi
417	popq %rdx
418	popq %rcx
419	popq %rax
420	decl early_recursion_flag(%rip)
421.Lis_nmi:
422	addq $16,%rsp		# drop vector number and error code
423	INTERRUPT_RETURN
424ENDPROC(early_idt_handler_common)
425
426	__INITDATA
427
428	.balign 4
429early_recursion_flag:
430	.long 0
431
432#ifdef CONFIG_EARLY_PRINTK
433early_idt_msg:
434	.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
435early_idt_ripmsg:
436	.asciz "RIP %s\n"
437#endif /* CONFIG_EARLY_PRINTK */
438
439#define NEXT_PAGE(name) \
440	.balign	PAGE_SIZE; \
441GLOBAL(name)
442
443/* Automate the creation of 1 to 1 mapping pmd entries */
444#define PMDS(START, PERM, COUNT)			\
445	i = 0 ;						\
446	.rept (COUNT) ;					\
447	.quad	(START) + (i << PMD_SHIFT) + (PERM) ;	\
448	i = i + 1 ;					\
449	.endr
450
451	__INITDATA
452NEXT_PAGE(early_level4_pgt)
453	.fill	511,8,0
454	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
455
456NEXT_PAGE(early_dynamic_pgts)
457	.fill	512*EARLY_DYNAMIC_PAGE_TABLES,8,0
458
459	.data
460
461#ifndef CONFIG_XEN
462NEXT_PAGE(init_level4_pgt)
463	.fill	512,8,0
464#else
465NEXT_PAGE(init_level4_pgt)
466	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
467	.org    init_level4_pgt + L4_PAGE_OFFSET*8, 0
468	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
469	.org    init_level4_pgt + L4_START_KERNEL*8, 0
470	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
471	.quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
472
473NEXT_PAGE(level3_ident_pgt)
474	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
475	.fill	511, 8, 0
476NEXT_PAGE(level2_ident_pgt)
477	/* Since I easily can, map the first 1G.
478	 * Don't set NX because code runs from these pages.
479	 */
480	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
481#endif
482
483NEXT_PAGE(level3_kernel_pgt)
484	.fill	L3_START_KERNEL,8,0
485	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
486	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
487	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
488
489NEXT_PAGE(level2_kernel_pgt)
490	/*
491	 * 512 MB kernel mapping. We spend a full page on this pagetable
492	 * anyway.
493	 *
494	 * The kernel code+data+bss must not be bigger than that.
495	 *
496	 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
497	 *  If you want to increase this then increase MODULES_VADDR
498	 *  too.)
499	 */
500	PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
501		KERNEL_IMAGE_SIZE/PMD_SIZE)
502
503NEXT_PAGE(level2_fixmap_pgt)
504	.fill	506,8,0
505	.quad	level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
506	/* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
507	.fill	5,8,0
508
509NEXT_PAGE(level1_fixmap_pgt)
510	.fill	512,8,0
511
512#undef PMDS
513
514	.data
515	.align 16
516	.globl early_gdt_descr
517early_gdt_descr:
518	.word	GDT_ENTRIES*8-1
519early_gdt_descr_base:
520	.quad	INIT_PER_CPU_VAR(gdt_page)
521
522ENTRY(phys_base)
523	/* This must match the first entry in level2_kernel_pgt */
524	.quad   0x0000000000000000
525
526#include "../../x86/xen/xen-head.S"
527
528	__PAGE_ALIGNED_BSS
529NEXT_PAGE(empty_zero_page)
530	.skip PAGE_SIZE
531
532