xref: /linux/arch/x86/kernel/head_64.S (revision 79997eda0d31bc68203c95ecb978773ee6ce7a1f)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *  linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
4 *
5 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7 *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
8 *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
9 *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
10 */
11
12#include <linux/export.h>
13#include <linux/linkage.h>
14#include <linux/threads.h>
15#include <linux/init.h>
16#include <linux/pgtable.h>
17#include <asm/segment.h>
18#include <asm/page.h>
19#include <asm/msr.h>
20#include <asm/cache.h>
21#include <asm/processor-flags.h>
22#include <asm/percpu.h>
23#include <asm/nops.h>
24#include "../entry/calling.h"
25#include <asm/nospec-branch.h>
26#include <asm/apicdef.h>
27#include <asm/fixmap.h>
28#include <asm/smp.h>
29
30/*
31 * We are not able to switch in one step to the final KERNEL ADDRESS SPACE
32 * because we need identity-mapped pages.
33 */
34#define l4_index(x)	(((x) >> 39) & 511)
35#define pud_index(x)	(((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
36
37L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4)
38L4_START_KERNEL = l4_index(__START_KERNEL_map)
39
40L3_START_KERNEL = pud_index(__START_KERNEL_map)
41
42	.text
43	__HEAD
44	.code64
45SYM_CODE_START_NOALIGN(startup_64)
46	UNWIND_HINT_END_OF_STACK
47	/*
48	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
49	 * and someone has loaded an identity mapped page table
50	 * for us.  These identity mapped page tables map all of the
51	 * kernel pages and possibly all of memory.
52	 *
53	 * %RSI holds the physical address of the boot_params structure
54	 * provided by the bootloader. Preserve it in %R15 so C function calls
55	 * will not clobber it.
56	 *
57	 * We come here either directly from a 64bit bootloader, or from
58	 * arch/x86/boot/compressed/head_64.S.
59	 *
60	 * We only come here initially at boot nothing else comes here.
61	 *
62	 * Since we may be loaded at an address different from what we were
63	 * compiled to run at we first fixup the physical addresses in our page
64	 * tables and then reload them.
65	 */
66	mov	%rsi, %r15
67
68	/* Set up the stack for verify_cpu() */
69	leaq	(__end_init_task - PTREGS_SIZE)(%rip), %rsp
70
71	leaq	_text(%rip), %rdi
72
73	/* Setup GSBASE to allow stack canary access for C code */
74	movl	$MSR_GS_BASE, %ecx
75	leaq	INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
76	movl	%edx, %eax
77	shrq	$32,  %rdx
78	wrmsr
79
80	call	startup_64_setup_env
81
82	/* Now switch to __KERNEL_CS so IRET works reliably */
83	pushq	$__KERNEL_CS
84	leaq	.Lon_kernel_cs(%rip), %rax
85	pushq	%rax
86	lretq
87
88.Lon_kernel_cs:
89	UNWIND_HINT_END_OF_STACK
90
91#ifdef CONFIG_AMD_MEM_ENCRYPT
92	/*
93	 * Activate SEV/SME memory encryption if supported/enabled. This needs to
94	 * be done now, since this also includes setup of the SEV-SNP CPUID table,
95	 * which needs to be done before any CPUID instructions are executed in
96	 * subsequent code. Pass the boot_params pointer as the first argument.
97	 */
98	movq	%r15, %rdi
99	call	sme_enable
100#endif
101
102	/* Sanitize CPU configuration */
103	call verify_cpu
104
105	/*
106	 * Perform pagetable fixups. Additionally, if SME is active, encrypt
107	 * the kernel and retrieve the modifier (SME encryption mask if SME
108	 * is active) to be added to the initial pgdir entry that will be
109	 * programmed into CR3.
110	 */
111	leaq	_text(%rip), %rdi
112	movq	%r15, %rsi
113	call	__startup_64
114
115	/* Form the CR3 value being sure to include the CR3 modifier */
116	addq	$(early_top_pgt - __START_KERNEL_map), %rax
117	jmp 1f
118SYM_CODE_END(startup_64)
119
120SYM_CODE_START(secondary_startup_64)
121	UNWIND_HINT_END_OF_STACK
122	ANNOTATE_NOENDBR
123	/*
124	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
125	 * and someone has loaded a mapped page table.
126	 *
127	 * We come here either from startup_64 (using physical addresses)
128	 * or from trampoline.S (using virtual addresses).
129	 *
130	 * Using virtual addresses from trampoline.S removes the need
131	 * to have any identity mapped pages in the kernel page table
132	 * after the boot processor executes this code.
133	 */
134
135	/* Sanitize CPU configuration */
136	call verify_cpu
137
138	/*
139	 * The secondary_startup_64_no_verify entry point is only used by
140	 * SEV-ES guests. In those guests the call to verify_cpu() would cause
141	 * #VC exceptions which can not be handled at this stage of secondary
142	 * CPU bringup.
143	 *
144	 * All non SEV-ES systems, especially Intel systems, need to execute
145	 * verify_cpu() above to make sure NX is enabled.
146	 */
147SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
148	UNWIND_HINT_END_OF_STACK
149	ANNOTATE_NOENDBR
150
151	/* Clear %R15 which holds the boot_params pointer on the boot CPU */
152	xorq	%r15, %r15
153
154	/*
155	 * Retrieve the modifier (SME encryption mask if SME is active) to be
156	 * added to the initial pgdir entry that will be programmed into CR3.
157	 */
158#ifdef CONFIG_AMD_MEM_ENCRYPT
159	movq	sme_me_mask, %rax
160#else
161	xorq	%rax, %rax
162#endif
163
164	/* Form the CR3 value being sure to include the CR3 modifier */
165	addq	$(init_top_pgt - __START_KERNEL_map), %rax
1661:
167
168#ifdef CONFIG_X86_MCE
169	/*
170	 * Preserve CR4.MCE if the kernel will enable #MC support.
171	 * Clearing MCE may fault in some environments (that also force #MC
172	 * support). Any machine check that occurs before #MC support is fully
173	 * configured will crash the system regardless of the CR4.MCE value set
174	 * here.
175	 */
176	movq	%cr4, %rcx
177	andl	$X86_CR4_MCE, %ecx
178#else
179	movl	$0, %ecx
180#endif
181
182	/* Enable PAE mode, PSE, PGE and LA57 */
183	orl	$(X86_CR4_PAE | X86_CR4_PSE | X86_CR4_PGE), %ecx
184#ifdef CONFIG_X86_5LEVEL
185	testl	$1, __pgtable_l5_enabled(%rip)
186	jz	1f
187	orl	$X86_CR4_LA57, %ecx
1881:
189#endif
190	movq	%rcx, %cr4
191
192	/* Setup early boot stage 4-/5-level pagetables. */
193	addq	phys_base(%rip), %rax
194
195	/*
196	 * For SEV guests: Verify that the C-bit is correct. A malicious
197	 * hypervisor could lie about the C-bit position to perform a ROP
198	 * attack on the guest by writing to the unencrypted stack and wait for
199	 * the next RET instruction.
200	 */
201	movq	%rax, %rdi
202	call	sev_verify_cbit
203
204	/*
205	 * Switch to new page-table
206	 *
207	 * For the boot CPU this switches to early_top_pgt which still has the
208	 * indentity mappings present. The secondary CPUs will switch to the
209	 * init_top_pgt here, away from the trampoline_pgd and unmap the
210	 * indentity mapped ranges.
211	 */
212	movq	%rax, %cr3
213
214	/*
215	 * Do a global TLB flush after the CR3 switch to make sure the TLB
216	 * entries from the identity mapping are flushed.
217	 */
218	movq	%cr4, %rcx
219	movq	%rcx, %rax
220	xorq	$X86_CR4_PGE, %rcx
221	movq	%rcx, %cr4
222	movq	%rax, %cr4
223
224	/* Ensure I am executing from virtual addresses */
225	movq	$1f, %rax
226	ANNOTATE_RETPOLINE_SAFE
227	jmp	*%rax
2281:
229	UNWIND_HINT_END_OF_STACK
230	ANNOTATE_NOENDBR // above
231
232#ifdef CONFIG_SMP
233	/*
234	 * For parallel boot, the APIC ID is read from the APIC, and then
235	 * used to look up the CPU number.  For booting a single CPU, the
236	 * CPU number is encoded in smpboot_control.
237	 *
238	 * Bit 31	STARTUP_READ_APICID (Read APICID from APIC)
239	 * Bit 0-23	CPU# if STARTUP_xx flags are not set
240	 */
241	movl	smpboot_control(%rip), %ecx
242	testl	$STARTUP_READ_APICID, %ecx
243	jnz	.Lread_apicid
244	/*
245	 * No control bit set, single CPU bringup. CPU number is provided
246	 * in bit 0-23. This is also the boot CPU case (CPU number 0).
247	 */
248	andl	$(~STARTUP_PARALLEL_MASK), %ecx
249	jmp	.Lsetup_cpu
250
251.Lread_apicid:
252	/* Check whether X2APIC mode is already enabled */
253	mov	$MSR_IA32_APICBASE, %ecx
254	rdmsr
255	testl	$X2APIC_ENABLE, %eax
256	jnz	.Lread_apicid_msr
257
258	/* Read the APIC ID from the fix-mapped MMIO space. */
259	movq	apic_mmio_base(%rip), %rcx
260	addq	$APIC_ID, %rcx
261	movl	(%rcx), %eax
262	shr	$24, %eax
263	jmp	.Llookup_AP
264
265.Lread_apicid_msr:
266	mov	$APIC_X2APIC_ID_MSR, %ecx
267	rdmsr
268
269.Llookup_AP:
270	/* EAX contains the APIC ID of the current CPU */
271	xorq	%rcx, %rcx
272	leaq	cpuid_to_apicid(%rip), %rbx
273
274.Lfind_cpunr:
275	cmpl	(%rbx,%rcx,4), %eax
276	jz	.Lsetup_cpu
277	inc	%ecx
278#ifdef CONFIG_FORCE_NR_CPUS
279	cmpl	$NR_CPUS, %ecx
280#else
281	cmpl	nr_cpu_ids(%rip), %ecx
282#endif
283	jb	.Lfind_cpunr
284
285	/*  APIC ID not found in the table. Drop the trampoline lock and bail. */
286	movq	trampoline_lock(%rip), %rax
287	movl	$0, (%rax)
288
2891:	cli
290	hlt
291	jmp	1b
292
293.Lsetup_cpu:
294	/* Get the per cpu offset for the given CPU# which is in ECX */
295	movq	__per_cpu_offset(,%rcx,8), %rdx
296#else
297	xorl	%edx, %edx /* zero-extended to clear all of RDX */
298#endif /* CONFIG_SMP */
299
300	/*
301	 * Setup a boot time stack - Any secondary CPU will have lost its stack
302	 * by now because the cr3-switch above unmaps the real-mode stack.
303	 *
304	 * RDX contains the per-cpu offset
305	 */
306	movq	pcpu_hot + X86_current_task(%rdx), %rax
307	movq	TASK_threadsp(%rax), %rsp
308
309	/*
310	 * Now that this CPU is running on its own stack, drop the realmode
311	 * protection. For the boot CPU the pointer is NULL!
312	 */
313	movq	trampoline_lock(%rip), %rax
314	testq	%rax, %rax
315	jz	.Lsetup_gdt
316	movl	$0, (%rax)
317
318.Lsetup_gdt:
319	/*
320	 * We must switch to a new descriptor in kernel space for the GDT
321	 * because soon the kernel won't have access anymore to the userspace
322	 * addresses where we're currently running on. We have to do that here
323	 * because in 32bit we couldn't load a 64bit linear address.
324	 */
325	subq	$16, %rsp
326	movw	$(GDT_SIZE-1), (%rsp)
327	leaq	gdt_page(%rdx), %rax
328	movq	%rax, 2(%rsp)
329	lgdt	(%rsp)
330	addq	$16, %rsp
331
332	/* set up data segments */
333	xorl %eax,%eax
334	movl %eax,%ds
335	movl %eax,%ss
336	movl %eax,%es
337
338	/*
339	 * We don't really need to load %fs or %gs, but load them anyway
340	 * to kill any stale realmode selectors.  This allows execution
341	 * under VT hardware.
342	 */
343	movl %eax,%fs
344	movl %eax,%gs
345
346	/* Set up %gs.
347	 *
348	 * The base of %gs always points to fixed_percpu_data. If the
349	 * stack protector canary is enabled, it is located at %gs:40.
350	 * Note that, on SMP, the boot cpu uses init data section until
351	 * the per cpu areas are set up.
352	 */
353	movl	$MSR_GS_BASE,%ecx
354#ifndef CONFIG_SMP
355	leaq	INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
356#endif
357	movl	%edx, %eax
358	shrq	$32, %rdx
359	wrmsr
360
361	/* Setup and Load IDT */
362	call	early_setup_idt
363
364	/* Check if nx is implemented */
365	movl	$0x80000001, %eax
366	cpuid
367	movl	%edx,%edi
368
369	/* Setup EFER (Extended Feature Enable Register) */
370	movl	$MSR_EFER, %ecx
371	rdmsr
372	/*
373	 * Preserve current value of EFER for comparison and to skip
374	 * EFER writes if no change was made (for TDX guest)
375	 */
376	movl    %eax, %edx
377	btsl	$_EFER_SCE, %eax	/* Enable System Call */
378	btl	$20,%edi		/* No Execute supported? */
379	jnc     1f
380	btsl	$_EFER_NX, %eax
381	btsq	$_PAGE_BIT_NX,early_pmd_flags(%rip)
382
383	/* Avoid writing EFER if no change was made (for TDX guest) */
3841:	cmpl	%edx, %eax
385	je	1f
386	xor	%edx, %edx
387	wrmsr				/* Make changes effective */
3881:
389	/* Setup cr0 */
390	movl	$CR0_STATE, %eax
391	/* Make changes effective */
392	movq	%rax, %cr0
393
394	/* zero EFLAGS after setting rsp */
395	pushq $0
396	popfq
397
398	/* Pass the boot_params pointer as first argument */
399	movq	%r15, %rdi
400
401.Ljump_to_C_code:
402	/*
403	 * Jump to run C code and to be on a real kernel address.
404	 * Since we are running on identity-mapped space we have to jump
405	 * to the full 64bit address, this is only possible as indirect
406	 * jump.  In addition we need to ensure %cs is set so we make this
407	 * a far return.
408	 *
409	 * Note: do not change to far jump indirect with 64bit offset.
410	 *
411	 * AMD does not support far jump indirect with 64bit offset.
412	 * AMD64 Architecture Programmer's Manual, Volume 3: states only
413	 *	JMP FAR mem16:16 FF /5 Far jump indirect,
414	 *		with the target specified by a far pointer in memory.
415	 *	JMP FAR mem16:32 FF /5 Far jump indirect,
416	 *		with the target specified by a far pointer in memory.
417	 *
418	 * Intel64 does support 64bit offset.
419	 * Software Developer Manual Vol 2: states:
420	 *	FF /5 JMP m16:16 Jump far, absolute indirect,
421	 *		address given in m16:16
422	 *	FF /5 JMP m16:32 Jump far, absolute indirect,
423	 *		address given in m16:32.
424	 *	REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
425	 *		address given in m16:64.
426	 */
427	pushq	$.Lafter_lret	# put return address on stack for unwinder
428	xorl	%ebp, %ebp	# clear frame pointer
429	movq	initial_code(%rip), %rax
430	pushq	$__KERNEL_CS	# set correct cs
431	pushq	%rax		# target address in negative space
432	lretq
433.Lafter_lret:
434	ANNOTATE_NOENDBR
435SYM_CODE_END(secondary_startup_64)
436
437#include "verify_cpu.S"
438#include "sev_verify_cbit.S"
439
440#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT)
441/*
442 * Entry point for soft restart of a CPU. Invoked from xxx_play_dead() for
443 * restarting the boot CPU or for restarting SEV guest CPUs after CPU hot
444 * unplug. Everything is set up already except the stack.
445 */
446SYM_CODE_START(soft_restart_cpu)
447	ANNOTATE_NOENDBR
448	UNWIND_HINT_END_OF_STACK
449
450	/* Find the idle task stack */
451	movq	PER_CPU_VAR(pcpu_hot) + X86_current_task, %rcx
452	movq	TASK_threadsp(%rcx), %rsp
453
454	jmp	.Ljump_to_C_code
455SYM_CODE_END(soft_restart_cpu)
456#endif
457
458#ifdef CONFIG_AMD_MEM_ENCRYPT
459/*
460 * VC Exception handler used during early boot when running on kernel
461 * addresses, but before the switch to the idt_table can be made.
462 * The early_idt_handler_array can't be used here because it calls into a lot
463 * of __init code and this handler is also used during CPU offlining/onlining.
464 * Therefore this handler ends up in the .text section so that it stays around
465 * when .init.text is freed.
466 */
467SYM_CODE_START_NOALIGN(vc_boot_ghcb)
468	UNWIND_HINT_IRET_REGS offset=8
469	ENDBR
470
471	/* Build pt_regs */
472	PUSH_AND_CLEAR_REGS
473
474	/* Call C handler */
475	movq    %rsp, %rdi
476	movq	ORIG_RAX(%rsp), %rsi
477	movq	initial_vc_handler(%rip), %rax
478	ANNOTATE_RETPOLINE_SAFE
479	call	*%rax
480
481	/* Unwind pt_regs */
482	POP_REGS
483
484	/* Remove Error Code */
485	addq    $8, %rsp
486
487	iretq
488SYM_CODE_END(vc_boot_ghcb)
489#endif
490
491	/* Both SMP bootup and ACPI suspend change these variables */
492	__REFDATA
493	.balign	8
494SYM_DATA(initial_code,	.quad x86_64_start_kernel)
495#ifdef CONFIG_AMD_MEM_ENCRYPT
496SYM_DATA(initial_vc_handler,	.quad handle_vc_boot_ghcb)
497#endif
498
499SYM_DATA(trampoline_lock, .quad 0);
500	__FINITDATA
501
502	__INIT
503SYM_CODE_START(early_idt_handler_array)
504	i = 0
505	.rept NUM_EXCEPTION_VECTORS
506	.if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0
507		UNWIND_HINT_IRET_REGS
508		ENDBR
509		pushq $0	# Dummy error code, to make stack frame uniform
510	.else
511		UNWIND_HINT_IRET_REGS offset=8
512		ENDBR
513	.endif
514	pushq $i		# 72(%rsp) Vector number
515	jmp early_idt_handler_common
516	UNWIND_HINT_IRET_REGS
517	i = i + 1
518	.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
519	.endr
520SYM_CODE_END(early_idt_handler_array)
521	ANNOTATE_NOENDBR // early_idt_handler_array[NUM_EXCEPTION_VECTORS]
522
523SYM_CODE_START_LOCAL(early_idt_handler_common)
524	UNWIND_HINT_IRET_REGS offset=16
525	/*
526	 * The stack is the hardware frame, an error code or zero, and the
527	 * vector number.
528	 */
529	cld
530
531	incl early_recursion_flag(%rip)
532
533	/* The vector number is currently in the pt_regs->di slot. */
534	pushq %rsi				/* pt_regs->si */
535	movq 8(%rsp), %rsi			/* RSI = vector number */
536	movq %rdi, 8(%rsp)			/* pt_regs->di = RDI */
537	pushq %rdx				/* pt_regs->dx */
538	pushq %rcx				/* pt_regs->cx */
539	pushq %rax				/* pt_regs->ax */
540	pushq %r8				/* pt_regs->r8 */
541	pushq %r9				/* pt_regs->r9 */
542	pushq %r10				/* pt_regs->r10 */
543	pushq %r11				/* pt_regs->r11 */
544	pushq %rbx				/* pt_regs->bx */
545	pushq %rbp				/* pt_regs->bp */
546	pushq %r12				/* pt_regs->r12 */
547	pushq %r13				/* pt_regs->r13 */
548	pushq %r14				/* pt_regs->r14 */
549	pushq %r15				/* pt_regs->r15 */
550	UNWIND_HINT_REGS
551
552	movq %rsp,%rdi		/* RDI = pt_regs; RSI is already trapnr */
553	call do_early_exception
554
555	decl early_recursion_flag(%rip)
556	jmp restore_regs_and_return_to_kernel
557SYM_CODE_END(early_idt_handler_common)
558
559#ifdef CONFIG_AMD_MEM_ENCRYPT
560/*
561 * VC Exception handler used during very early boot. The
562 * early_idt_handler_array can't be used because it returns via the
563 * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early.
564 *
565 * XXX it does, fix this.
566 *
567 * This handler will end up in the .init.text section and not be
568 * available to boot secondary CPUs.
569 */
570SYM_CODE_START_NOALIGN(vc_no_ghcb)
571	UNWIND_HINT_IRET_REGS offset=8
572	ENDBR
573
574	/* Build pt_regs */
575	PUSH_AND_CLEAR_REGS
576
577	/* Call C handler */
578	movq    %rsp, %rdi
579	movq	ORIG_RAX(%rsp), %rsi
580	call    do_vc_no_ghcb
581
582	/* Unwind pt_regs */
583	POP_REGS
584
585	/* Remove Error Code */
586	addq    $8, %rsp
587
588	/* Pure iret required here - don't use INTERRUPT_RETURN */
589	iretq
590SYM_CODE_END(vc_no_ghcb)
591#endif
592
593#define SYM_DATA_START_PAGE_ALIGNED(name)			\
594	SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
595
596#ifdef CONFIG_PAGE_TABLE_ISOLATION
597/*
598 * Each PGD needs to be 8k long and 8k aligned.  We do not
599 * ever go out to userspace with these, so we do not
600 * strictly *need* the second page, but this allows us to
601 * have a single set_pgd() implementation that does not
602 * need to worry about whether it has 4k or 8k to work
603 * with.
604 *
605 * This ensures PGDs are 8k long:
606 */
607#define PTI_USER_PGD_FILL	512
608/* This ensures they are 8k-aligned: */
609#define SYM_DATA_START_PTI_ALIGNED(name) \
610	SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE)
611#else
612#define SYM_DATA_START_PTI_ALIGNED(name) \
613	SYM_DATA_START_PAGE_ALIGNED(name)
614#define PTI_USER_PGD_FILL	0
615#endif
616
617/* Automate the creation of 1 to 1 mapping pmd entries */
618#define PMDS(START, PERM, COUNT)			\
619	i = 0 ;						\
620	.rept (COUNT) ;					\
621	.quad	(START) + (i << PMD_SHIFT) + (PERM) ;	\
622	i = i + 1 ;					\
623	.endr
624
625	__INITDATA
626	.balign 4
627
628SYM_DATA_START_PTI_ALIGNED(early_top_pgt)
629	.fill	512,8,0
630	.fill	PTI_USER_PGD_FILL,8,0
631SYM_DATA_END(early_top_pgt)
632
633SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts)
634	.fill	512*EARLY_DYNAMIC_PAGE_TABLES,8,0
635SYM_DATA_END(early_dynamic_pgts)
636
637SYM_DATA(early_recursion_flag, .long 0)
638
639	.data
640
641#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH)
642SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
643	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
644	.org    init_top_pgt + L4_PAGE_OFFSET*8, 0
645	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
646	.org    init_top_pgt + L4_START_KERNEL*8, 0
647	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
648	.quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
649	.fill	PTI_USER_PGD_FILL,8,0
650SYM_DATA_END(init_top_pgt)
651
652SYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt)
653	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
654	.fill	511, 8, 0
655SYM_DATA_END(level3_ident_pgt)
656SYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt)
657	/*
658	 * Since I easily can, map the first 1G.
659	 * Don't set NX because code runs from these pages.
660	 *
661	 * Note: This sets _PAGE_GLOBAL despite whether
662	 * the CPU supports it or it is enabled.  But,
663	 * the CPU should ignore the bit.
664	 */
665	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
666SYM_DATA_END(level2_ident_pgt)
667#else
668SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
669	.fill	512,8,0
670	.fill	PTI_USER_PGD_FILL,8,0
671SYM_DATA_END(init_top_pgt)
672#endif
673
674#ifdef CONFIG_X86_5LEVEL
675SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt)
676	.fill	511,8,0
677	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
678SYM_DATA_END(level4_kernel_pgt)
679#endif
680
681SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt)
682	.fill	L3_START_KERNEL,8,0
683	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
684	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
685	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
686SYM_DATA_END(level3_kernel_pgt)
687
688SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
689	/*
690	 * Kernel high mapping.
691	 *
692	 * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in
693	 * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled,
694	 * 512 MiB otherwise.
695	 *
696	 * (NOTE: after that starts the module area, see MODULES_VADDR.)
697	 *
698	 * This table is eventually used by the kernel during normal runtime.
699	 * Care must be taken to clear out undesired bits later, like _PAGE_RW
700	 * or _PAGE_GLOBAL in some cases.
701	 */
702	PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
703SYM_DATA_END(level2_kernel_pgt)
704
705SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)
706	.fill	(512 - 4 - FIXMAP_PMD_NUM),8,0
707	pgtno = 0
708	.rept (FIXMAP_PMD_NUM)
709	.quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
710		+ _PAGE_TABLE_NOENC;
711	pgtno = pgtno + 1
712	.endr
713	/* 6 MB reserved space + a 2MB hole */
714	.fill	4,8,0
715SYM_DATA_END(level2_fixmap_pgt)
716
717SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt)
718	.rept (FIXMAP_PMD_NUM)
719	.fill	512,8,0
720	.endr
721SYM_DATA_END(level1_fixmap_pgt)
722
723#undef PMDS
724
725	.data
726	.align 16
727
728SYM_DATA(smpboot_control,		.long 0)
729
730	.align 16
731/* This must match the first entry in level2_kernel_pgt */
732SYM_DATA(phys_base, .quad 0x0)
733EXPORT_SYMBOL(phys_base)
734
735#include "../../x86/xen/xen-head.S"
736
737	__PAGE_ALIGNED_BSS
738SYM_DATA_START_PAGE_ALIGNED(empty_zero_page)
739	.skip PAGE_SIZE
740SYM_DATA_END(empty_zero_page)
741EXPORT_SYMBOL(empty_zero_page)
742
743