head_64.S (39d64ee59ceee0fb61243eab3c4b7b4492f80df2) head_64.S (ea4654e0885348f0faa47f6d7b44a08d75ad16e9)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
4 *
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
8 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
9 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
10 */
11
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
4 *
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
8 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
9 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
10 */
11
12
12#include <linux/export.h>
13#include <linux/linkage.h>
14#include <linux/threads.h>
15#include <linux/init.h>
16#include <linux/pgtable.h>
17#include <asm/segment.h>
18#include <asm/page.h>
19#include <asm/msr.h>
20#include <asm/cache.h>
21#include <asm/processor-flags.h>
22#include <asm/percpu.h>
23#include <asm/nops.h>
24#include "../entry/calling.h"
13#include <linux/linkage.h>
14#include <linux/threads.h>
15#include <linux/init.h>
16#include <linux/pgtable.h>
17#include <asm/segment.h>
18#include <asm/page.h>
19#include <asm/msr.h>
20#include <asm/cache.h>
21#include <asm/processor-flags.h>
22#include <asm/percpu.h>
23#include <asm/nops.h>
24#include "../entry/calling.h"
25#include <asm/export.h>
26#include <asm/nospec-branch.h>
27#include <asm/apicdef.h>
28#include <asm/fixmap.h>
29#include <asm/smp.h>
30
31/*
32 * We are not able to switch in one step to the final KERNEL ADDRESS SPACE
33 * because we need identity-mapped pages.

--- 76 unchanged lines hidden (view full) ---

110 * programmed into CR3.
111 */
112 leaq _text(%rip), %rdi
113 movq %r15, %rsi
114 call __startup_64
115
116 /* Form the CR3 value being sure to include the CR3 modifier */
117 addq $(early_top_pgt - __START_KERNEL_map), %rax
25#include <asm/nospec-branch.h>
26#include <asm/apicdef.h>
27#include <asm/fixmap.h>
28#include <asm/smp.h>
29
30/*
31 * We are not able to switch in one step to the final KERNEL ADDRESS SPACE
32 * because we need identity-mapped pages.

--- 76 unchanged lines hidden (view full) ---

109 * programmed into CR3.
110 */
111 leaq _text(%rip), %rdi
112 movq %r15, %rsi
113 call __startup_64
114
115 /* Form the CR3 value being sure to include the CR3 modifier */
116 addq $(early_top_pgt - __START_KERNEL_map), %rax
117
118#ifdef CONFIG_AMD_MEM_ENCRYPT
119 mov %rax, %rdi
120 mov %rax, %r14
121
122 addq phys_base(%rip), %rdi
123
124 /*
125 * For SEV guests: Verify that the C-bit is correct. A malicious
126 * hypervisor could lie about the C-bit position to perform a ROP
127 * attack on the guest by writing to the unencrypted stack and wait for
128 * the next RET instruction.
129 */
130 call sev_verify_cbit
131
132 /*
133 * Restore CR3 value without the phys_base which will be added
134 * below, before writing %cr3.
135 */
136 mov %r14, %rax
137#endif
138
118 jmp 1f
119SYM_CODE_END(startup_64)
120
121SYM_CODE_START(secondary_startup_64)
122 UNWIND_HINT_END_OF_STACK
123 ANNOTATE_NOENDBR
124 /*
125 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,

--- 49 unchanged lines hidden (view full) ---

175 * here.
176 */
177 movq %cr4, %rcx
178 andl $X86_CR4_MCE, %ecx
179#else
180 movl $0, %ecx
181#endif
182
139 jmp 1f
140SYM_CODE_END(startup_64)
141
142SYM_CODE_START(secondary_startup_64)
143 UNWIND_HINT_END_OF_STACK
144 ANNOTATE_NOENDBR
145 /*
146 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,

--- 49 unchanged lines hidden (view full) ---

196 * here.
197 */
198 movq %cr4, %rcx
199 andl $X86_CR4_MCE, %ecx
200#else
201 movl $0, %ecx
202#endif
203
183 /* Enable PAE mode, PGE and LA57 */
184 orl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
204 /* Enable PAE mode, PSE, PGE and LA57 */
205 orl $(X86_CR4_PAE | X86_CR4_PSE | X86_CR4_PGE), %ecx
185#ifdef CONFIG_X86_5LEVEL
206#ifdef CONFIG_X86_5LEVEL
186 testl $1, __pgtable_l5_enabled(%rip)
207 testb $1, __pgtable_l5_enabled(%rip)
187 jz 1f
188 orl $X86_CR4_LA57, %ecx
1891:
190#endif
191 movq %rcx, %cr4
192
193 /* Setup early boot stage 4-/5-level pagetables. */
194 addq phys_base(%rip), %rax
195
196 /*
208 jz 1f
209 orl $X86_CR4_LA57, %ecx
2101:
211#endif
212 movq %rcx, %cr4
213
214 /* Setup early boot stage 4-/5-level pagetables. */
215 addq phys_base(%rip), %rax
216
217 /*
197 * For SEV guests: Verify that the C-bit is correct. A malicious
198 * hypervisor could lie about the C-bit position to perform a ROP
199 * attack on the guest by writing to the unencrypted stack and wait for
200 * the next RET instruction.
201 */
202 movq %rax, %rdi
203 call sev_verify_cbit
204
205 /*
206 * Switch to new page-table
207 *
208 * For the boot CPU this switches to early_top_pgt which still has the
218 * Switch to new page-table
219 *
220 * For the boot CPU this switches to early_top_pgt which still has the
209 * indentity mappings present. The secondary CPUs will switch to the
221 * identity mappings present. The secondary CPUs will switch to the
210 * init_top_pgt here, away from the trampoline_pgd and unmap the
222 * init_top_pgt here, away from the trampoline_pgd and unmap the
211 * indentity mapped ranges.
223 * identity mapped ranges.
212 */
213 movq %rax, %cr3
214
215 /*
216 * Do a global TLB flush after the CR3 switch to make sure the TLB
217 * entries from the identity mapping are flushed.
218 */
219 movq %cr4, %rcx

--- 31 unchanged lines hidden (view full) ---

251
252.Lread_apicid:
253 /* Check whether X2APIC mode is already enabled */
254 mov $MSR_IA32_APICBASE, %ecx
255 rdmsr
256 testl $X2APIC_ENABLE, %eax
257 jnz .Lread_apicid_msr
258
224 */
225 movq %rax, %cr3
226
227 /*
228 * Do a global TLB flush after the CR3 switch to make sure the TLB
229 * entries from the identity mapping are flushed.
230 */
231 movq %cr4, %rcx

--- 31 unchanged lines hidden (view full) ---

263
264.Lread_apicid:
265 /* Check whether X2APIC mode is already enabled */
266 mov $MSR_IA32_APICBASE, %ecx
267 rdmsr
268 testl $X2APIC_ENABLE, %eax
269 jnz .Lread_apicid_msr
270
271#ifdef CONFIG_X86_X2APIC
272 /*
273 * If system is in X2APIC mode then MMIO base might not be
274 * mapped causing the MMIO read below to fault. Faults can't
275 * be handled at that point.
276 */
277 cmpl $0, x2apic_mode(%rip)
278 jz .Lread_apicid_mmio
279
280 /* Force the AP into X2APIC mode. */
281 orl $X2APIC_ENABLE, %eax
282 wrmsr
283 jmp .Lread_apicid_msr
284#endif
285
286.Lread_apicid_mmio:
259 /* Read the APIC ID from the fix-mapped MMIO space. */
260 movq apic_mmio_base(%rip), %rcx
261 addq $APIC_ID, %rcx
262 movl (%rcx), %eax
263 shr $24, %eax
264 jmp .Llookup_AP
265
266.Lread_apicid_msr:

--- 177 unchanged lines hidden (view full) ---

444 * restarting the boot CPU or for restarting SEV guest CPUs after CPU hot
445 * unplug. Everything is set up already except the stack.
446 */
447SYM_CODE_START(soft_restart_cpu)
448 ANNOTATE_NOENDBR
449 UNWIND_HINT_END_OF_STACK
450
451 /* Find the idle task stack */
287 /* Read the APIC ID from the fix-mapped MMIO space. */
288 movq apic_mmio_base(%rip), %rcx
289 addq $APIC_ID, %rcx
290 movl (%rcx), %eax
291 shr $24, %eax
292 jmp .Llookup_AP
293
294.Lread_apicid_msr:

--- 177 unchanged lines hidden (view full) ---

472 * restarting the boot CPU or for restarting SEV guest CPUs after CPU hot
473 * unplug. Everything is set up already except the stack.
474 */
475SYM_CODE_START(soft_restart_cpu)
476 ANNOTATE_NOENDBR
477 UNWIND_HINT_END_OF_STACK
478
479 /* Find the idle task stack */
452 movq PER_CPU_VAR(pcpu_hot + X86_current_task), %rcx
480 movq PER_CPU_VAR(pcpu_hot) + X86_current_task, %rcx
453 movq TASK_threadsp(%rcx), %rsp
454
455 jmp .Ljump_to_C_code
456SYM_CODE_END(soft_restart_cpu)
457#endif
458
459#ifdef CONFIG_AMD_MEM_ENCRYPT
460/*

--- 128 unchanged lines hidden (view full) ---

589 /* Pure iret required here - don't use INTERRUPT_RETURN */
590 iretq
591SYM_CODE_END(vc_no_ghcb)
592#endif
593
594#define SYM_DATA_START_PAGE_ALIGNED(name) \
595 SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
596
481 movq TASK_threadsp(%rcx), %rsp
482
483 jmp .Ljump_to_C_code
484SYM_CODE_END(soft_restart_cpu)
485#endif
486
487#ifdef CONFIG_AMD_MEM_ENCRYPT
488/*

--- 128 unchanged lines hidden (view full) ---

617 /* Pure iret required here - don't use INTERRUPT_RETURN */
618 iretq
619SYM_CODE_END(vc_no_ghcb)
620#endif
621
622#define SYM_DATA_START_PAGE_ALIGNED(name) \
623 SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
624
597#ifdef CONFIG_PAGE_TABLE_ISOLATION
625#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
598/*
599 * Each PGD needs to be 8k long and 8k aligned. We do not
600 * ever go out to userspace with these, so we do not
601 * strictly *need* the second page, but this allows us to
602 * have a single set_pgd() implementation that does not
603 * need to worry about whether it has 4k or 8k to work
604 * with.
605 *

--- 138 unchanged lines hidden ---
626/*
627 * Each PGD needs to be 8k long and 8k aligned. We do not
628 * ever go out to userspace with these, so we do not
629 * strictly *need* the second page, but this allows us to
630 * have a single set_pgd() implementation that does not
631 * need to worry about whether it has 4k or 8k to work
632 * with.
633 *

--- 138 unchanged lines hidden ---