xref: /linux/arch/x86/kernel/head_64.S (revision 52c996d3f40b40f87ef9dc80596903309682acc3)
1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */
2250c2277SThomas Gleixner/*
35b171e82SAlexander Kuleshov *  linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
4250c2277SThomas Gleixner *
5250c2277SThomas Gleixner *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6250c2277SThomas Gleixner *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7250c2277SThomas Gleixner *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
8250c2277SThomas Gleixner *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
9250c2277SThomas Gleixner *  Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
10250c2277SThomas Gleixner */
11250c2277SThomas Gleixner
1294ea9c05SMasahiro Yamada#include <linux/export.h>
13250c2277SThomas Gleixner#include <linux/linkage.h>
14250c2277SThomas Gleixner#include <linux/threads.h>
15250c2277SThomas Gleixner#include <linux/init.h>
16ca5999fdSMike Rapoport#include <linux/pgtable.h>
1765fddcfcSMike Rapoport#include <asm/segment.h>
18250c2277SThomas Gleixner#include <asm/page.h>
19250c2277SThomas Gleixner#include <asm/msr.h>
20250c2277SThomas Gleixner#include <asm/cache.h>
21369101daSCyrill Gorcunov#include <asm/processor-flags.h>
22b12d8db8STejun Heo#include <asm/percpu.h>
239900aa2fSH. Peter Anvin#include <asm/nops.h>
247bbcdb1cSAndy Lutomirski#include "../entry/calling.h"
25bd89004fSPeter Zijlstra#include <asm/nospec-branch.h>
267e75178aSDavid Woodhouse#include <asm/apicdef.h>
2705ab1d8aSFeng Tang#include <asm/fixmap.h>
287e75178aSDavid Woodhouse#include <asm/smp.h>
29c416b5baSXin Li (Intel)#include <asm/thread_info.h>
30250c2277SThomas Gleixner
3175da04f7SThomas Gleixner/*
3275da04f7SThomas Gleixner * We are not able to switch in one step to the final KERNEL ADDRESS SPACE
33250c2277SThomas Gleixner * because we need identity-mapped pages.
34250c2277SThomas Gleixner */
35a6523748SEduardo Habkost
364ae59b91STim Abbott	__HEAD
37250c2277SThomas Gleixner	.code64
3837818afdSJiri SlabySYM_CODE_START_NOALIGN(startup_64)
39fb799447SJosh Poimboeuf	UNWIND_HINT_END_OF_STACK
40250c2277SThomas Gleixner	/*
411256276cSKonrad Rzeszutek Wilk	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
42250c2277SThomas Gleixner	 * and someone has loaded an identity mapped page table
43250c2277SThomas Gleixner	 * for us.  These identity mapped page tables map all of the
44250c2277SThomas Gleixner	 * kernel pages and possibly all of memory.
45250c2277SThomas Gleixner	 *
462f69a81aSArd Biesheuvel	 * %RSI holds the physical address of the boot_params structure
472f69a81aSArd Biesheuvel	 * provided by the bootloader. Preserve it in %R15 so C function calls
482f69a81aSArd Biesheuvel	 * will not clobber it.
49250c2277SThomas Gleixner	 *
50250c2277SThomas Gleixner	 * We come here either directly from a 64bit bootloader, or from
515b171e82SAlexander Kuleshov	 * arch/x86/boot/compressed/head_64.S.
52250c2277SThomas Gleixner	 *
53250c2277SThomas Gleixner	 * We only come here initially at boot nothing else comes here.
54250c2277SThomas Gleixner	 *
55250c2277SThomas Gleixner	 * Since we may be loaded at an address different from what we were
56250c2277SThomas Gleixner	 * compiled to run at we first fixup the physical addresses in our page
57250c2277SThomas Gleixner	 * tables and then reload them.
58250c2277SThomas Gleixner	 */
592f69a81aSArd Biesheuvel	mov	%rsi, %r15
60250c2277SThomas Gleixner
613adee777SBrian Gerst	/* Set up the stack for verify_cpu() */
622cb16181SBrian Gerst	leaq	__top_init_kernel_stack(%rip), %rsp
6391ed140dSBorislav Petkov
648f6be6d8SBrian Gerst	/* Setup GSBASE to allow stack canary access for C code */
65469693d8SMichael Roth	movl	$MSR_GS_BASE, %ecx
668f6be6d8SBrian Gerst	leaq	INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
678f6be6d8SBrian Gerst	movl	%edx, %eax
68469693d8SMichael Roth	shrq	$32,  %rdx
69469693d8SMichael Roth	wrmsr
70469693d8SMichael Roth
715da79367SArd Biesheuvel	call	startup_64_setup_gdt_idt
72866b556eSJoerg Roedel
73a37f2699STom Lendacky	/* Now switch to __KERNEL_CS so IRET works reliably */
74a37f2699STom Lendacky	pushq	$__KERNEL_CS
75a37f2699STom Lendacky	leaq	.Lon_kernel_cs(%rip), %rax
76a37f2699STom Lendacky	pushq	%rax
77a37f2699STom Lendacky	lretq
78a37f2699STom Lendacky
79a37f2699STom Lendacky.Lon_kernel_cs:
80a37f2699STom Lendacky	UNWIND_HINT_END_OF_STACK
81a37f2699STom Lendacky
82bcce8290SMichael Roth#ifdef CONFIG_AMD_MEM_ENCRYPT
83bcce8290SMichael Roth	/*
84bcce8290SMichael Roth	 * Activate SEV/SME memory encryption if supported/enabled. This needs to
85bcce8290SMichael Roth	 * be done now, since this also includes setup of the SEV-SNP CPUID table,
86bcce8290SMichael Roth	 * which needs to be done before any CPUID instructions are executed in
872f69a81aSArd Biesheuvel	 * subsequent code. Pass the boot_params pointer as the first argument.
88bcce8290SMichael Roth	 */
892f69a81aSArd Biesheuvel	movq	%r15, %rdi
90bcce8290SMichael Roth	call	sme_enable
91bcce8290SMichael Roth#endif
92bcce8290SMichael Roth
9304633df0SBorislav Petkov	/* Sanitize CPU configuration */
9404633df0SBorislav Petkov	call verify_cpu
9504633df0SBorislav Petkov
965868f365STom Lendacky	/*
975868f365STom Lendacky	 * Perform pagetable fixups. Additionally, if SME is active, encrypt
985868f365STom Lendacky	 * the kernel and retrieve the modifier (SME encryption mask if SME
995868f365STom Lendacky	 * is active) to be added to the initial pgdir entry that will be
1005868f365STom Lendacky	 * programmed into CR3.
1015868f365STom Lendacky	 */
102250c2277SThomas Gleixner	leaq	_text(%rip), %rdi
1032f69a81aSArd Biesheuvel	movq	%r15, %rsi
104c88d7150SKirill A. Shutemov	call	__startup_64
105250c2277SThomas Gleixner
1065868f365STom Lendacky	/* Form the CR3 value being sure to include the CR3 modifier */
107d6a41f18SArd Biesheuvel	leaq	early_top_pgt(%rip), %rcx
108d6a41f18SArd Biesheuvel	addq	%rcx, %rax
10930579c8bSBorislav Petkov (AMD)
11030579c8bSBorislav Petkov (AMD)#ifdef CONFIG_AMD_MEM_ENCRYPT
11130579c8bSBorislav Petkov (AMD)	mov	%rax, %rdi
11230579c8bSBorislav Petkov (AMD)
11330579c8bSBorislav Petkov (AMD)	/*
11430579c8bSBorislav Petkov (AMD)	 * For SEV guests: Verify that the C-bit is correct. A malicious
11530579c8bSBorislav Petkov (AMD)	 * hypervisor could lie about the C-bit position to perform a ROP
11630579c8bSBorislav Petkov (AMD)	 * attack on the guest by writing to the unencrypted stack and wait for
11730579c8bSBorislav Petkov (AMD)	 * the next RET instruction.
11830579c8bSBorislav Petkov (AMD)	 */
11930579c8bSBorislav Petkov (AMD)	call	sev_verify_cbit
12030579c8bSBorislav Petkov (AMD)#endif
12130579c8bSBorislav Petkov (AMD)
12282826395SArd Biesheuvel	/*
12382826395SArd Biesheuvel	 * Switch to early_top_pgt which still has the identity mappings
12482826395SArd Biesheuvel	 * present.
12582826395SArd Biesheuvel	 */
12682826395SArd Biesheuvel	movq	%rax, %cr3
12782826395SArd Biesheuvel
12882826395SArd Biesheuvel	/* Branch to the common startup code at its kernel virtual address */
12982826395SArd Biesheuvel	ANNOTATE_RETPOLINE_SAFE
13082826395SArd Biesheuvel	jmp	*0f(%rip)
13137818afdSJiri SlabySYM_CODE_END(startup_64)
13237818afdSJiri Slaby
13382826395SArd Biesheuvel	__INITRODATA
13482826395SArd Biesheuvel0:	.quad	common_startup_64
13582826395SArd Biesheuvel
13682826395SArd Biesheuvel	.text
137bc7b11c0SJiri SlabySYM_CODE_START(secondary_startup_64)
138fb799447SJosh Poimboeuf	UNWIND_HINT_END_OF_STACK
1393e3f0695SPeter Zijlstra	ANNOTATE_NOENDBR
140250c2277SThomas Gleixner	/*
1411256276cSKonrad Rzeszutek Wilk	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
142250c2277SThomas Gleixner	 * and someone has loaded a mapped page table.
143250c2277SThomas Gleixner	 *
144250c2277SThomas Gleixner	 * We come here either from startup_64 (using physical addresses)
145250c2277SThomas Gleixner	 * or from trampoline.S (using virtual addresses).
146250c2277SThomas Gleixner	 *
147250c2277SThomas Gleixner	 * Using virtual addresses from trampoline.S removes the need
148250c2277SThomas Gleixner	 * to have any identity mapped pages in the kernel page table
149250c2277SThomas Gleixner	 * after the boot processor executes this code.
150250c2277SThomas Gleixner	 */
151250c2277SThomas Gleixner
15204633df0SBorislav Petkov	/* Sanitize CPU configuration */
15304633df0SBorislav Petkov	call verify_cpu
15404633df0SBorislav Petkov
1555868f365STom Lendacky	/*
1563ecacdbdSJoerg Roedel	 * The secondary_startup_64_no_verify entry point is only used by
1573ecacdbdSJoerg Roedel	 * SEV-ES guests. In those guests the call to verify_cpu() would cause
1583ecacdbdSJoerg Roedel	 * #VC exceptions which can not be handled at this stage of secondary
1593ecacdbdSJoerg Roedel	 * CPU bringup.
1603ecacdbdSJoerg Roedel	 *
1613ecacdbdSJoerg Roedel	 * All non SEV-ES systems, especially Intel systems, need to execute
1623ecacdbdSJoerg Roedel	 * verify_cpu() above to make sure NX is enabled.
1633ecacdbdSJoerg Roedel	 */
1643ecacdbdSJoerg RoedelSYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
165fb799447SJosh Poimboeuf	UNWIND_HINT_END_OF_STACK
1663e3f0695SPeter Zijlstra	ANNOTATE_NOENDBR
1673ecacdbdSJoerg Roedel
1682f69a81aSArd Biesheuvel	/* Clear %R15 which holds the boot_params pointer on the boot CPU */
169721f791cSUros Bizjak	xorl	%r15d, %r15d
1702f69a81aSArd Biesheuvel
171d6a41f18SArd Biesheuvel	/* Derive the runtime physical address of init_top_pgt[] */
172d6a41f18SArd Biesheuvel	movq	phys_base(%rip), %rax
173d6a41f18SArd Biesheuvel	addq	$(init_top_pgt - __START_KERNEL_map), %rax
174d6a41f18SArd Biesheuvel
1753ecacdbdSJoerg Roedel	/*
1765868f365STom Lendacky	 * Retrieve the modifier (SME encryption mask if SME is active) to be
1775868f365STom Lendacky	 * added to the initial pgdir entry that will be programmed into CR3.
1785868f365STom Lendacky	 */
179469693d8SMichael Roth#ifdef CONFIG_AMD_MEM_ENCRYPT
180d6a41f18SArd Biesheuvel	addq	sme_me_mask(%rip), %rax
181469693d8SMichael Roth#endif
18282826395SArd Biesheuvel	/*
18382826395SArd Biesheuvel	 * Switch to the init_top_pgt here, away from the trampoline_pgd and
18482826395SArd Biesheuvel	 * unmap the identity mapped ranges.
18582826395SArd Biesheuvel	 */
18682826395SArd Biesheuvel	movq	%rax, %cr3
1875868f365STom Lendacky
18882826395SArd BiesheuvelSYM_INNER_LABEL(common_startup_64, SYM_L_LOCAL)
18982826395SArd Biesheuvel	UNWIND_HINT_END_OF_STACK
19082826395SArd Biesheuvel	ANNOTATE_NOENDBR
1918170e6beSH. Peter Anvin
192dada8587SArd Biesheuvel	/*
193dada8587SArd Biesheuvel	 * Create a mask of CR4 bits to preserve. Omit PGE in order to flush
194dada8587SArd Biesheuvel	 * global 1:1 translations from the TLBs.
195dada8587SArd Biesheuvel	 *
196dada8587SArd Biesheuvel	 * From the SDM:
197dada8587SArd Biesheuvel	 * "If CR4.PGE is changing from 0 to 1, there were no global TLB
198dada8587SArd Biesheuvel	 *  entries before the execution; if CR4.PGE is changing from 1 to 0,
199dada8587SArd Biesheuvel	 *  there will be no global TLB entries after the execution."
200dada8587SArd Biesheuvel	 */
201dada8587SArd Biesheuvel	movl	$(X86_CR4_PAE | X86_CR4_LA57), %edx
20277a512e3SSean Christopherson#ifdef CONFIG_X86_MCE
20377a512e3SSean Christopherson	/*
20477a512e3SSean Christopherson	 * Preserve CR4.MCE if the kernel will enable #MC support.
20577a512e3SSean Christopherson	 * Clearing MCE may fault in some environments (that also force #MC
20677a512e3SSean Christopherson	 * support). Any machine check that occurs before #MC support is fully
20777a512e3SSean Christopherson	 * configured will crash the system regardless of the CR4.MCE value set
20877a512e3SSean Christopherson	 * here.
20977a512e3SSean Christopherson	 */
210dada8587SArd Biesheuvel	orl	$X86_CR4_MCE, %edx
211dada8587SArd Biesheuvel#endif
21277a512e3SSean Christopherson	movq	%cr4, %rcx
213dada8587SArd Biesheuvel	andl	%edx, %ecx
21477a512e3SSean Christopherson
215dada8587SArd Biesheuvel	/* Even if ignored in long mode, set PSE uniformly on all logical CPUs. */
216dada8587SArd Biesheuvel	btsl	$X86_CR4_PSE_BIT, %ecx
2178170e6beSH. Peter Anvin	movq	%rcx, %cr4
218250c2277SThomas Gleixner
219c9f09539SJoerg Roedel	/*
220dada8587SArd Biesheuvel	 * Set CR4.PGE to re-enable global translations.
221f154f290SJoerg Roedel	 */
222dada8587SArd Biesheuvel	btsl	$X86_CR4_PGE_BIT, %ecx
223f154f290SJoerg Roedel	movq	%rcx, %cr4
224f154f290SJoerg Roedel
2253adee777SBrian Gerst#ifdef CONFIG_SMP
2267e75178aSDavid Woodhouse	/*
2277e75178aSDavid Woodhouse	 * For parallel boot, the APIC ID is read from the APIC, and then
2287e75178aSDavid Woodhouse	 * used to look up the CPU number.  For booting a single CPU, the
2297e75178aSDavid Woodhouse	 * CPU number is encoded in smpboot_control.
2307e75178aSDavid Woodhouse	 *
2317e75178aSDavid Woodhouse	 * Bit 31	STARTUP_READ_APICID (Read APICID from APIC)
2327e75178aSDavid Woodhouse	 * Bit 0-23	CPU# if STARTUP_xx flags are not set
2337e75178aSDavid Woodhouse	 */
2343adee777SBrian Gerst	movl	smpboot_control(%rip), %ecx
2357e75178aSDavid Woodhouse	testl	$STARTUP_READ_APICID, %ecx
2367e75178aSDavid Woodhouse	jnz	.Lread_apicid
2377e75178aSDavid Woodhouse	/*
2387e75178aSDavid Woodhouse	 * No control bit set, single CPU bringup. CPU number is provided
2397e75178aSDavid Woodhouse	 * in bit 0-23. This is also the boot CPU case (CPU number 0).
2407e75178aSDavid Woodhouse	 */
2417e75178aSDavid Woodhouse	andl	$(~STARTUP_PARALLEL_MASK), %ecx
2427e75178aSDavid Woodhouse	jmp	.Lsetup_cpu
2433adee777SBrian Gerst
2447e75178aSDavid Woodhouse.Lread_apicid:
2457e75178aSDavid Woodhouse	/* Check whether X2APIC mode is already enabled */
2467e75178aSDavid Woodhouse	mov	$MSR_IA32_APICBASE, %ecx
2477e75178aSDavid Woodhouse	rdmsr
2487e75178aSDavid Woodhouse	testl	$X2APIC_ENABLE, %eax
2497e75178aSDavid Woodhouse	jnz	.Lread_apicid_msr
2507e75178aSDavid Woodhouse
25169a7386cSThomas Gleixner#ifdef CONFIG_X86_X2APIC
25269a7386cSThomas Gleixner	/*
25369a7386cSThomas Gleixner	 * If system is in X2APIC mode then MMIO base might not be
25469a7386cSThomas Gleixner	 * mapped causing the MMIO read below to fault. Faults can't
25569a7386cSThomas Gleixner	 * be handled at that point.
25669a7386cSThomas Gleixner	 */
25769a7386cSThomas Gleixner	cmpl	$0, x2apic_mode(%rip)
25869a7386cSThomas Gleixner	jz	.Lread_apicid_mmio
25969a7386cSThomas Gleixner
26069a7386cSThomas Gleixner	/* Force the AP into X2APIC mode. */
26169a7386cSThomas Gleixner	orl	$X2APIC_ENABLE, %eax
26269a7386cSThomas Gleixner	wrmsr
26369a7386cSThomas Gleixner	jmp	.Lread_apicid_msr
26469a7386cSThomas Gleixner#endif
26569a7386cSThomas Gleixner
26669a7386cSThomas Gleixner.Lread_apicid_mmio:
2677e75178aSDavid Woodhouse	/* Read the APIC ID from the fix-mapped MMIO space. */
2687e75178aSDavid Woodhouse	movq	apic_mmio_base(%rip), %rcx
2697e75178aSDavid Woodhouse	addq	$APIC_ID, %rcx
2707e75178aSDavid Woodhouse	movl	(%rcx), %eax
2717e75178aSDavid Woodhouse	shr	$24, %eax
2727e75178aSDavid Woodhouse	jmp	.Llookup_AP
2737e75178aSDavid Woodhouse
2747e75178aSDavid Woodhouse.Lread_apicid_msr:
2757e75178aSDavid Woodhouse	mov	$APIC_X2APIC_ID_MSR, %ecx
2767e75178aSDavid Woodhouse	rdmsr
2777e75178aSDavid Woodhouse
2787e75178aSDavid Woodhouse.Llookup_AP:
2797e75178aSDavid Woodhouse	/* EAX contains the APIC ID of the current CPU */
280721f791cSUros Bizjak	xorl	%ecx, %ecx
2817e75178aSDavid Woodhouse	leaq	cpuid_to_apicid(%rip), %rbx
2827e75178aSDavid Woodhouse
2837e75178aSDavid Woodhouse.Lfind_cpunr:
2847e75178aSDavid Woodhouse	cmpl	(%rbx,%rcx,4), %eax
2857e75178aSDavid Woodhouse	jz	.Lsetup_cpu
2867e75178aSDavid Woodhouse	inc	%ecx
2877e75178aSDavid Woodhouse#ifdef CONFIG_FORCE_NR_CPUS
2887e75178aSDavid Woodhouse	cmpl	$NR_CPUS, %ecx
2897e75178aSDavid Woodhouse#else
2907e75178aSDavid Woodhouse	cmpl	nr_cpu_ids(%rip), %ecx
2917e75178aSDavid Woodhouse#endif
2927e75178aSDavid Woodhouse	jb	.Lfind_cpunr
2937e75178aSDavid Woodhouse
2947e75178aSDavid Woodhouse	/*  APIC ID not found in the table. Drop the trampoline lock and bail. */
2957e75178aSDavid Woodhouse	movq	trampoline_lock(%rip), %rax
2967e75178aSDavid Woodhouse	movl	$0, (%rax)
2977e75178aSDavid Woodhouse
2987e75178aSDavid Woodhouse1:	cli
2997e75178aSDavid Woodhouse	hlt
3007e75178aSDavid Woodhouse	jmp	1b
3017e75178aSDavid Woodhouse
3027e75178aSDavid Woodhouse.Lsetup_cpu:
3033adee777SBrian Gerst	/* Get the per cpu offset for the given CPU# which is in ECX */
3043adee777SBrian Gerst	movq	__per_cpu_offset(,%rcx,8), %rdx
3053adee777SBrian Gerst#else
3063adee777SBrian Gerst	xorl	%edx, %edx /* zero-extended to clear all of RDX */
3073adee777SBrian Gerst#endif /* CONFIG_SMP */
3083adee777SBrian Gerst
3093adee777SBrian Gerst	/*
3103adee777SBrian Gerst	 * Setup a boot time stack - Any secondary CPU will have lost its stack
3113adee777SBrian Gerst	 * by now because the cr3-switch above unmaps the real-mode stack.
3123adee777SBrian Gerst	 *
3133adee777SBrian Gerst	 * RDX contains the per-cpu offset
3143adee777SBrian Gerst	 */
3153adee777SBrian Gerst	movq	pcpu_hot + X86_current_task(%rdx), %rax
3163adee777SBrian Gerst	movq	TASK_threadsp(%rax), %rsp
3173adee777SBrian Gerst
318e04b8833SJoerg Roedel	/*
319f6f1ae91SThomas Gleixner	 * Now that this CPU is running on its own stack, drop the realmode
320f6f1ae91SThomas Gleixner	 * protection. For the boot CPU the pointer is NULL!
321f6f1ae91SThomas Gleixner	 */
322f6f1ae91SThomas Gleixner	movq	trampoline_lock(%rip), %rax
323f6f1ae91SThomas Gleixner	testq	%rax, %rax
324f6f1ae91SThomas Gleixner	jz	.Lsetup_gdt
325f6f1ae91SThomas Gleixner	movl	$0, (%rax)
326f6f1ae91SThomas Gleixner
327f6f1ae91SThomas Gleixner.Lsetup_gdt:
328f6f1ae91SThomas Gleixner	/*
329e04b8833SJoerg Roedel	 * We must switch to a new descriptor in kernel space for the GDT
330e04b8833SJoerg Roedel	 * because soon the kernel won't have access anymore to the userspace
331e04b8833SJoerg Roedel	 * addresses where we're currently running on. We have to do that here
332e04b8833SJoerg Roedel	 * because in 32bit we couldn't load a 64bit linear address.
333e04b8833SJoerg Roedel	 */
334c253b640SBrian Gerst	subq	$16, %rsp
335c253b640SBrian Gerst	movw	$(GDT_SIZE-1), (%rsp)
336c253b640SBrian Gerst	leaq	gdt_page(%rdx), %rax
337c253b640SBrian Gerst	movq	%rax, 2(%rsp)
338c253b640SBrian Gerst	lgdt	(%rsp)
339c253b640SBrian Gerst	addq	$16, %rsp
340e04b8833SJoerg Roedel
3417b99819dSJoerg Roedel	/* set up data segments */
3427b99819dSJoerg Roedel	xorl %eax,%eax
3437b99819dSJoerg Roedel	movl %eax,%ds
3447b99819dSJoerg Roedel	movl %eax,%ss
3457b99819dSJoerg Roedel	movl %eax,%es
3467b99819dSJoerg Roedel
3477b99819dSJoerg Roedel	/*
3487b99819dSJoerg Roedel	 * We don't really need to load %fs or %gs, but load them anyway
3497b99819dSJoerg Roedel	 * to kill any stale realmode selectors.  This allows execution
3507b99819dSJoerg Roedel	 * under VT hardware.
3517b99819dSJoerg Roedel	 */
3527b99819dSJoerg Roedel	movl %eax,%fs
3537b99819dSJoerg Roedel	movl %eax,%gs
3547b99819dSJoerg Roedel
3557b99819dSJoerg Roedel	/* Set up %gs.
3567b99819dSJoerg Roedel	 *
3577b99819dSJoerg Roedel	 * The base of %gs always points to fixed_percpu_data. If the
3587b99819dSJoerg Roedel	 * stack protector canary is enabled, it is located at %gs:40.
3597b99819dSJoerg Roedel	 * Note that, on SMP, the boot cpu uses init data section until
3607b99819dSJoerg Roedel	 * the per cpu areas are set up.
3617b99819dSJoerg Roedel	 */
3627b99819dSJoerg Roedel	movl	$MSR_GS_BASE,%ecx
3638f6be6d8SBrian Gerst#ifndef CONFIG_SMP
3648f6be6d8SBrian Gerst	leaq	INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
3658f6be6d8SBrian Gerst#endif
3668f6be6d8SBrian Gerst	movl	%edx, %eax
3678f6be6d8SBrian Gerst	shrq	$32, %rdx
3687b99819dSJoerg Roedel	wrmsr
3697b99819dSJoerg Roedel
370f5963ba7SJoerg Roedel	/* Setup and Load IDT */
371f5963ba7SJoerg Roedel	call	early_setup_idt
372f5963ba7SJoerg Roedel
373250c2277SThomas Gleixner	/* Check if nx is implemented */
374250c2277SThomas Gleixner	movl	$0x80000001, %eax
375250c2277SThomas Gleixner	cpuid
376250c2277SThomas Gleixner	movl	%edx,%edi
377250c2277SThomas Gleixner
378250c2277SThomas Gleixner	/* Setup EFER (Extended Feature Enable Register) */
379250c2277SThomas Gleixner	movl	$MSR_EFER, %ecx
380250c2277SThomas Gleixner	rdmsr
38177a512e3SSean Christopherson	/*
38277a512e3SSean Christopherson	 * Preserve current value of EFER for comparison and to skip
38377a512e3SSean Christopherson	 * EFER writes if no change was made (for TDX guest)
38477a512e3SSean Christopherson	 */
38577a512e3SSean Christopherson	movl    %eax, %edx
386250c2277SThomas Gleixner	btsl	$_EFER_SCE, %eax	/* Enable System Call */
387250c2277SThomas Gleixner	btl	$20,%edi		/* No Execute supported? */
388250c2277SThomas Gleixner	jnc     1f
389250c2277SThomas Gleixner	btsl	$_EFER_NX, %eax
39078d77df7SH. Peter Anvin	btsq	$_PAGE_BIT_NX,early_pmd_flags(%rip)
391250c2277SThomas Gleixner
39277a512e3SSean Christopherson	/* Avoid writing EFER if no change was made (for TDX guest) */
39377a512e3SSean Christopherson1:	cmpl	%edx, %eax
39477a512e3SSean Christopherson	je	1f
39577a512e3SSean Christopherson	xor	%edx, %edx
39677a512e3SSean Christopherson	wrmsr				/* Make changes effective */
39777a512e3SSean Christopherson1:
398250c2277SThomas Gleixner	/* Setup cr0 */
399369101daSCyrill Gorcunov	movl	$CR0_STATE, %eax
400250c2277SThomas Gleixner	/* Make changes effective */
401250c2277SThomas Gleixner	movq	%rax, %cr0
402250c2277SThomas Gleixner
403250c2277SThomas Gleixner	/* zero EFLAGS after setting rsp */
404250c2277SThomas Gleixner	pushq $0
405250c2277SThomas Gleixner	popfq
406250c2277SThomas Gleixner
4072f69a81aSArd Biesheuvel	/* Pass the boot_params pointer as first argument */
4082f69a81aSArd Biesheuvel	movq	%r15, %rdi
409250c2277SThomas Gleixner
41079d243a0SBorislav Petkov.Ljump_to_C_code:
411a7bea830SJan Beulich	xorl	%ebp, %ebp	# clear frame pointer
41215675706SArd Biesheuvel	ANNOTATE_RETPOLINE_SAFE
41315675706SArd Biesheuvel	callq	*initial_code(%rip)
41415675706SArd Biesheuvel	ud2
415bc7b11c0SJiri SlabySYM_CODE_END(secondary_startup_64)
416250c2277SThomas Gleixner
41704633df0SBorislav Petkov#include "verify_cpu.S"
418c9f09539SJoerg Roedel#include "sev_verify_cbit.S"
41904633df0SBorislav Petkov
420cded3679SThomas Gleixner#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT)
42142e78e97SFenghua Yu/*
422666e1156SThomas Gleixner * Entry point for soft restart of a CPU. Invoked from xxx_play_dead() for
423666e1156SThomas Gleixner * restarting the boot CPU or for restarting SEV guest CPUs after CPU hot
424666e1156SThomas Gleixner * unplug. Everything is set up already except the stack.
42542e78e97SFenghua Yu */
426666e1156SThomas GleixnerSYM_CODE_START(soft_restart_cpu)
427e81dc127SThomas Gleixner	ANNOTATE_NOENDBR
428fb799447SJosh Poimboeuf	UNWIND_HINT_END_OF_STACK
4293adee777SBrian Gerst
4303adee777SBrian Gerst	/* Find the idle task stack */
43139d64ee5SUros Bizjak	movq	PER_CPU_VAR(pcpu_hot + X86_current_task), %rcx
4323adee777SBrian Gerst	movq	TASK_threadsp(%rcx), %rsp
4333adee777SBrian Gerst
43479d243a0SBorislav Petkov	jmp	.Ljump_to_C_code
435666e1156SThomas GleixnerSYM_CODE_END(soft_restart_cpu)
43642e78e97SFenghua Yu#endif
43742e78e97SFenghua Yu
4381aa9aa8eSJoerg Roedel#ifdef CONFIG_AMD_MEM_ENCRYPT
4391aa9aa8eSJoerg Roedel/*
4401aa9aa8eSJoerg Roedel * VC Exception handler used during early boot when running on kernel
4411aa9aa8eSJoerg Roedel * addresses, but before the switch to the idt_table can be made.
4421aa9aa8eSJoerg Roedel * The early_idt_handler_array can't be used here because it calls into a lot
4431aa9aa8eSJoerg Roedel * of __init code and this handler is also used during CPU offlining/onlining.
4441aa9aa8eSJoerg Roedel * Therefore this handler ends up in the .text section so that it stays around
4451aa9aa8eSJoerg Roedel * when .init.text is freed.
4461aa9aa8eSJoerg Roedel */
4471aa9aa8eSJoerg RoedelSYM_CODE_START_NOALIGN(vc_boot_ghcb)
4481aa9aa8eSJoerg Roedel	UNWIND_HINT_IRET_REGS offset=8
449e8d61bdfSPeter Zijlstra	ENDBR
4501aa9aa8eSJoerg Roedel
4511aa9aa8eSJoerg Roedel	/* Build pt_regs */
4521aa9aa8eSJoerg Roedel	PUSH_AND_CLEAR_REGS
4531aa9aa8eSJoerg Roedel
4541aa9aa8eSJoerg Roedel	/* Call C handler */
4551aa9aa8eSJoerg Roedel	movq    %rsp, %rdi
4561aa9aa8eSJoerg Roedel	movq	ORIG_RAX(%rsp), %rsi
4571aa9aa8eSJoerg Roedel	movq	initial_vc_handler(%rip), %rax
4581aa9aa8eSJoerg Roedel	ANNOTATE_RETPOLINE_SAFE
4591aa9aa8eSJoerg Roedel	call	*%rax
4601aa9aa8eSJoerg Roedel
4611aa9aa8eSJoerg Roedel	/* Unwind pt_regs */
4621aa9aa8eSJoerg Roedel	POP_REGS
4631aa9aa8eSJoerg Roedel
4641aa9aa8eSJoerg Roedel	/* Remove Error Code */
4651aa9aa8eSJoerg Roedel	addq    $8, %rsp
4661aa9aa8eSJoerg Roedel
4671aa9aa8eSJoerg Roedel	iretq
4681aa9aa8eSJoerg RoedelSYM_CODE_END(vc_boot_ghcb)
4691aa9aa8eSJoerg Roedel#endif
4701aa9aa8eSJoerg Roedel
471b32f96c7SJosh Poimboeuf	/* Both SMP bootup and ACPI suspend change these variables */
472da5968aeSSam Ravnborg	__REFDATA
4738170e6beSH. Peter Anvin	.balign	8
474b1bd27b9SJiri SlabySYM_DATA(initial_code,	.quad x86_64_start_kernel)
4751aa9aa8eSJoerg Roedel#ifdef CONFIG_AMD_MEM_ENCRYPT
4761aa9aa8eSJoerg RoedelSYM_DATA(initial_vc_handler,	.quad handle_vc_boot_ghcb)
4771aa9aa8eSJoerg Roedel#endif
478f6f1ae91SThomas Gleixner
479f6f1ae91SThomas GleixnerSYM_DATA(trampoline_lock, .quad 0);
480b9af7c0dSSuresh Siddha	__FINITDATA
481250c2277SThomas Gleixner
4828170e6beSH. Peter Anvin	__INIT
483bc7b11c0SJiri SlabySYM_CODE_START(early_idt_handler_array)
484749c970aSAndi Kleen	i = 0
485749c970aSAndi Kleen	.rept NUM_EXCEPTION_VECTORS
48682c62fa0SJosh Poimboeuf	.if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0
4872704fbb6SJosh Poimboeuf		UNWIND_HINT_IRET_REGS
4888f93402bSPeter Zijlstra		ENDBR
4899900aa2fSH. Peter Anvin		pushq $0	# Dummy error code, to make stack frame uniform
4902704fbb6SJosh Poimboeuf	.else
4912704fbb6SJosh Poimboeuf		UNWIND_HINT_IRET_REGS offset=8
4928f93402bSPeter Zijlstra		ENDBR
4939900aa2fSH. Peter Anvin	.endif
4949900aa2fSH. Peter Anvin	pushq $i		# 72(%rsp) Vector number
495cdeb6048SAndy Lutomirski	jmp early_idt_handler_common
4962704fbb6SJosh Poimboeuf	UNWIND_HINT_IRET_REGS
497749c970aSAndi Kleen	i = i + 1
498cdeb6048SAndy Lutomirski	.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
499749c970aSAndi Kleen	.endr
500bc7b11c0SJiri SlabySYM_CODE_END(early_idt_handler_array)
5015b2fc515SPeter Zijlstra	ANNOTATE_NOENDBR // early_idt_handler_array[NUM_EXCEPTION_VECTORS]
5028866cd9dSRoland McGrath
503ef77e688SJiri SlabySYM_CODE_START_LOCAL(early_idt_handler_common)
5048f93402bSPeter Zijlstra	UNWIND_HINT_IRET_REGS offset=16
505cdeb6048SAndy Lutomirski	/*
506cdeb6048SAndy Lutomirski	 * The stack is the hardware frame, an error code or zero, and the
507cdeb6048SAndy Lutomirski	 * vector number.
508cdeb6048SAndy Lutomirski	 */
5099900aa2fSH. Peter Anvin	cld
5109900aa2fSH. Peter Anvin
511250c2277SThomas Gleixner	incl early_recursion_flag(%rip)
5129900aa2fSH. Peter Anvin
5137bbcdb1cSAndy Lutomirski	/* The vector number is currently in the pt_regs->di slot. */
5147bbcdb1cSAndy Lutomirski	pushq %rsi				/* pt_regs->si */
5157bbcdb1cSAndy Lutomirski	movq 8(%rsp), %rsi			/* RSI = vector number */
5167bbcdb1cSAndy Lutomirski	movq %rdi, 8(%rsp)			/* pt_regs->di = RDI */
5177bbcdb1cSAndy Lutomirski	pushq %rdx				/* pt_regs->dx */
5187bbcdb1cSAndy Lutomirski	pushq %rcx				/* pt_regs->cx */
5197bbcdb1cSAndy Lutomirski	pushq %rax				/* pt_regs->ax */
5207bbcdb1cSAndy Lutomirski	pushq %r8				/* pt_regs->r8 */
5217bbcdb1cSAndy Lutomirski	pushq %r9				/* pt_regs->r9 */
5227bbcdb1cSAndy Lutomirski	pushq %r10				/* pt_regs->r10 */
5237bbcdb1cSAndy Lutomirski	pushq %r11				/* pt_regs->r11 */
5247bbcdb1cSAndy Lutomirski	pushq %rbx				/* pt_regs->bx */
5257bbcdb1cSAndy Lutomirski	pushq %rbp				/* pt_regs->bp */
5267bbcdb1cSAndy Lutomirski	pushq %r12				/* pt_regs->r12 */
5277bbcdb1cSAndy Lutomirski	pushq %r13				/* pt_regs->r13 */
5287bbcdb1cSAndy Lutomirski	pushq %r14				/* pt_regs->r14 */
5297bbcdb1cSAndy Lutomirski	pushq %r15				/* pt_regs->r15 */
5302704fbb6SJosh Poimboeuf	UNWIND_HINT_REGS
5319900aa2fSH. Peter Anvin
5327bbcdb1cSAndy Lutomirski	movq %rsp,%rdi		/* RDI = pt_regs; RSI is already trapnr */
5334b47cdbdSJoerg Roedel	call do_early_exception
5349900aa2fSH. Peter Anvin
5359900aa2fSH. Peter Anvin	decl early_recursion_flag(%rip)
53626c4ef9cSAndy Lutomirski	jmp restore_regs_and_return_to_kernel
537ef77e688SJiri SlabySYM_CODE_END(early_idt_handler_common)
5389900aa2fSH. Peter Anvin
53974d8d9d5SJoerg Roedel#ifdef CONFIG_AMD_MEM_ENCRYPT
54074d8d9d5SJoerg Roedel/*
54174d8d9d5SJoerg Roedel * VC Exception handler used during very early boot. The
54274d8d9d5SJoerg Roedel * early_idt_handler_array can't be used because it returns via the
54374d8d9d5SJoerg Roedel * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early.
54474d8d9d5SJoerg Roedel *
5458b87d8ceSPeter Zijlstra * XXX it does, fix this.
5468b87d8ceSPeter Zijlstra *
54774d8d9d5SJoerg Roedel * This handler will end up in the .init.text section and not be
54874d8d9d5SJoerg Roedel * available to boot secondary CPUs.
54974d8d9d5SJoerg Roedel */
55074d8d9d5SJoerg RoedelSYM_CODE_START_NOALIGN(vc_no_ghcb)
55174d8d9d5SJoerg Roedel	UNWIND_HINT_IRET_REGS offset=8
552e8d61bdfSPeter Zijlstra	ENDBR
55374d8d9d5SJoerg Roedel
55474d8d9d5SJoerg Roedel	/* Build pt_regs */
55574d8d9d5SJoerg Roedel	PUSH_AND_CLEAR_REGS
55674d8d9d5SJoerg Roedel
55774d8d9d5SJoerg Roedel	/* Call C handler */
55874d8d9d5SJoerg Roedel	movq    %rsp, %rdi
55974d8d9d5SJoerg Roedel	movq	ORIG_RAX(%rsp), %rsi
56074d8d9d5SJoerg Roedel	call    do_vc_no_ghcb
56174d8d9d5SJoerg Roedel
56274d8d9d5SJoerg Roedel	/* Unwind pt_regs */
56374d8d9d5SJoerg Roedel	POP_REGS
56474d8d9d5SJoerg Roedel
56574d8d9d5SJoerg Roedel	/* Remove Error Code */
56674d8d9d5SJoerg Roedel	addq    $8, %rsp
56774d8d9d5SJoerg Roedel
56874d8d9d5SJoerg Roedel	/* Pure iret required here - don't use INTERRUPT_RETURN */
56974d8d9d5SJoerg Roedel	iretq
57074d8d9d5SJoerg RoedelSYM_CODE_END(vc_no_ghcb)
57174d8d9d5SJoerg Roedel#endif
572b1bd27b9SJiri Slaby
573ea4654e0SBreno Leitao#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
574d9e9a641SDave Hansen/*
575d9e9a641SDave Hansen * Each PGD needs to be 8k long and 8k aligned.  We do not
576d9e9a641SDave Hansen * ever go out to userspace with these, so we do not
577d9e9a641SDave Hansen * strictly *need* the second page, but this allows us to
578d9e9a641SDave Hansen * have a single set_pgd() implementation that does not
579d9e9a641SDave Hansen * need to worry about whether it has 4k or 8k to work
580d9e9a641SDave Hansen * with.
581d9e9a641SDave Hansen *
582d9e9a641SDave Hansen * This ensures PGDs are 8k long:
583d9e9a641SDave Hansen */
584d9e9a641SDave Hansen#define PTI_USER_PGD_FILL	512
585d9e9a641SDave Hansen/* This ensures they are 8k-aligned: */
586b1bd27b9SJiri Slaby#define SYM_DATA_START_PTI_ALIGNED(name) \
587b1bd27b9SJiri Slaby	SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE)
588d9e9a641SDave Hansen#else
589b1bd27b9SJiri Slaby#define SYM_DATA_START_PTI_ALIGNED(name) \
590b1bd27b9SJiri Slaby	SYM_DATA_START_PAGE_ALIGNED(name)
591d9e9a641SDave Hansen#define PTI_USER_PGD_FILL	0
592d9e9a641SDave Hansen#endif
593d9e9a641SDave Hansen
5948170e6beSH. Peter Anvin	__INITDATA
5951a8770b7SJiri Slaby	.balign 4
5961a8770b7SJiri Slaby
597b1bd27b9SJiri SlabySYM_DATA_START_PTI_ALIGNED(early_top_pgt)
598533568e0SArd Biesheuvel	.fill	511,8,0
599533568e0SArd Biesheuvel	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
600d9e9a641SDave Hansen	.fill	PTI_USER_PGD_FILL,8,0
601b1bd27b9SJiri SlabySYM_DATA_END(early_top_pgt)
6028170e6beSH. Peter Anvin
603b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts)
6048170e6beSH. Peter Anvin	.fill	512*EARLY_DYNAMIC_PAGE_TABLES,8,0
605b1bd27b9SJiri SlabySYM_DATA_END(early_dynamic_pgts)
6068170e6beSH. Peter Anvin
607b1bd27b9SJiri SlabySYM_DATA(early_recursion_flag, .long 0)
6081a8770b7SJiri Slaby
609b9af7c0dSSuresh Siddha	.data
6108170e6beSH. Peter Anvin
6117733607fSMaran Wilson#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH)
612b1bd27b9SJiri SlabySYM_DATA_START_PTI_ALIGNED(init_top_pgt)
61321729f81STom Lendacky	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
614b9952ec7SKirill A. Shutemov	.org    init_top_pgt + L4_PAGE_OFFSET*8, 0
61521729f81STom Lendacky	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
616b9952ec7SKirill A. Shutemov	.org    init_top_pgt + L4_START_KERNEL*8, 0
617250c2277SThomas Gleixner	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
61821729f81STom Lendacky	.quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
619d9e9a641SDave Hansen	.fill	PTI_USER_PGD_FILL,8,0
620b1bd27b9SJiri SlabySYM_DATA_END(init_top_pgt)
621250c2277SThomas Gleixner
622b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt)
62321729f81STom Lendacky	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
624250c2277SThomas Gleixner	.fill	511, 8, 0
625b1bd27b9SJiri SlabySYM_DATA_END(level3_ident_pgt)
626b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt)
627430d4005SDave Hansen	/*
628430d4005SDave Hansen	 * Since I easily can, map the first 1G.
6298170e6beSH. Peter Anvin	 * Don't set NX because code runs from these pages.
630430d4005SDave Hansen	 *
631430d4005SDave Hansen	 * Note: This sets _PAGE_GLOBAL despite whether
632430d4005SDave Hansen	 * the CPU supports it or it is enabled.  But,
633430d4005SDave Hansen	 * the CPU should ignore the bit.
6348170e6beSH. Peter Anvin	 */
6358170e6beSH. Peter Anvin	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
636b1bd27b9SJiri SlabySYM_DATA_END(level2_ident_pgt)
6374375c299SKirill A. Shutemov#else
638b1bd27b9SJiri SlabySYM_DATA_START_PTI_ALIGNED(init_top_pgt)
6394375c299SKirill A. Shutemov	.fill	512,8,0
640d9e9a641SDave Hansen	.fill	PTI_USER_PGD_FILL,8,0
641b1bd27b9SJiri SlabySYM_DATA_END(init_top_pgt)
6428170e6beSH. Peter Anvin#endif
643250c2277SThomas Gleixner
644032370b9SKirill A. Shutemov#ifdef CONFIG_X86_5LEVEL
645b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt)
646032370b9SKirill A. Shutemov	.fill	511,8,0
64721729f81STom Lendacky	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
648b1bd27b9SJiri SlabySYM_DATA_END(level4_kernel_pgt)
649032370b9SKirill A. Shutemov#endif
650032370b9SKirill A. Shutemov
651b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt)
652a6523748SEduardo Habkost	.fill	L3_START_KERNEL,8,0
653250c2277SThomas Gleixner	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
65421729f81STom Lendacky	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
65521729f81STom Lendacky	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
656b1bd27b9SJiri SlabySYM_DATA_END(level3_kernel_pgt)
657250c2277SThomas Gleixner
658b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
65988f3aec7SIngo Molnar	/*
660ea3186b9SArvind Sankar	 * Kernel high mapping.
66188f3aec7SIngo Molnar	 *
662ea3186b9SArvind Sankar	 * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in
663ea3186b9SArvind Sankar	 * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled,
664ea3186b9SArvind Sankar	 * 512 MiB otherwise.
66588f3aec7SIngo Molnar	 *
666ea3186b9SArvind Sankar	 * (NOTE: after that starts the module area, see MODULES_VADDR.)
667430d4005SDave Hansen	 *
668ea3186b9SArvind Sankar	 * This table is eventually used by the kernel during normal runtime.
669ea3186b9SArvind Sankar	 * Care must be taken to clear out undesired bits later, like _PAGE_RW
670ea3186b9SArvind Sankar	 * or _PAGE_GLOBAL in some cases.
67188f3aec7SIngo Molnar	 */
672ea3186b9SArvind Sankar	PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
673b1bd27b9SJiri SlabySYM_DATA_END(level2_kernel_pgt)
674250c2277SThomas Gleixner
675b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)
67605ab1d8aSFeng Tang	.fill	(512 - 4 - FIXMAP_PMD_NUM),8,0
67705ab1d8aSFeng Tang	pgtno = 0
67805ab1d8aSFeng Tang	.rept (FIXMAP_PMD_NUM)
67905ab1d8aSFeng Tang	.quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
68005ab1d8aSFeng Tang		+ _PAGE_TABLE_NOENC;
68105ab1d8aSFeng Tang	pgtno = pgtno + 1
68205ab1d8aSFeng Tang	.endr
68305ab1d8aSFeng Tang	/* 6 MB reserved space + a 2MB hole */
68405ab1d8aSFeng Tang	.fill	4,8,0
685b1bd27b9SJiri SlabySYM_DATA_END(level2_fixmap_pgt)
6868170e6beSH. Peter Anvin
687b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt)
68805ab1d8aSFeng Tang	.rept (FIXMAP_PMD_NUM)
689250c2277SThomas Gleixner	.fill	512,8,0
69005ab1d8aSFeng Tang	.endr
691b1bd27b9SJiri SlabySYM_DATA_END(level1_fixmap_pgt)
692250c2277SThomas Gleixner
693250c2277SThomas Gleixner	.data
694250c2277SThomas Gleixner	.align 16
695250c2277SThomas Gleixner
6963adee777SBrian GerstSYM_DATA(smpboot_control,		.long 0)
6973adee777SBrian Gerst
6983adee777SBrian Gerst	.align 16
699250c2277SThomas Gleixner/* This must match the first entry in level2_kernel_pgt */
700b1bd27b9SJiri SlabySYM_DATA(phys_base, .quad 0x0)
701784d5699SAl ViroEXPORT_SYMBOL(phys_base)
702250c2277SThomas Gleixner
703*c3262d3dSYuntao Wang#include "../xen/xen-head.S"
704250c2277SThomas Gleixner
70502b7da37STim Abbott	__PAGE_ALIGNED_BSS
706b1bd27b9SJiri SlabySYM_DATA_START_PAGE_ALIGNED(empty_zero_page)
707250c2277SThomas Gleixner	.skip PAGE_SIZE
708b1bd27b9SJiri SlabySYM_DATA_END(empty_zero_page)
709784d5699SAl ViroEXPORT_SYMBOL(empty_zero_page)
710ef7f0d6aSAndrey Ryabinin
711