1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * prepare to run common code
4 *
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 */
7
8 /* cpu_feature_enabled() cannot be used this early */
9 #define USE_EARLY_PGTABLE_L5
10
11 #include <linux/init.h>
12 #include <linux/linkage.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/percpu.h>
17 #include <linux/start_kernel.h>
18 #include <linux/io.h>
19 #include <linux/memblock.h>
20 #include <linux/cc_platform.h>
21 #include <linux/pgtable.h>
22
23 #include <asm/asm.h>
24 #include <asm/page_64.h>
25 #include <asm/processor.h>
26 #include <asm/proto.h>
27 #include <asm/smp.h>
28 #include <asm/setup.h>
29 #include <asm/desc.h>
30 #include <asm/tlbflush.h>
31 #include <asm/sections.h>
32 #include <asm/kdebug.h>
33 #include <asm/e820/api.h>
34 #include <asm/bios_ebda.h>
35 #include <asm/bootparam_utils.h>
36 #include <asm/microcode.h>
37 #include <asm/kasan.h>
38 #include <asm/fixmap.h>
39 #include <asm/realmode.h>
40 #include <asm/extable.h>
41 #include <asm/trapnr.h>
42 #include <asm/sev.h>
43 #include <asm/tdx.h>
44 #include <asm/init.h>
45
46 /*
47 * Manage page tables very early on.
48 */
49 extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
50 unsigned int __initdata next_early_pgt;
51 SYM_PIC_ALIAS(next_early_pgt);
52 pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
53
54 unsigned int __pgtable_l5_enabled __ro_after_init;
55 SYM_PIC_ALIAS(__pgtable_l5_enabled);
56 unsigned int pgdir_shift __ro_after_init = 39;
57 EXPORT_SYMBOL(pgdir_shift);
58 SYM_PIC_ALIAS(pgdir_shift);
59 unsigned int ptrs_per_p4d __ro_after_init = 1;
60 EXPORT_SYMBOL(ptrs_per_p4d);
61 SYM_PIC_ALIAS(ptrs_per_p4d);
62
63 unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4;
64 EXPORT_SYMBOL(page_offset_base);
65 unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4;
66 EXPORT_SYMBOL(vmalloc_base);
67 unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4;
68 EXPORT_SYMBOL(vmemmap_base);
69
70 /* Wipe all early page tables except for the kernel symbol map */
reset_early_page_tables(void)71 static void __init reset_early_page_tables(void)
72 {
73 memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
74 next_early_pgt = 0;
75 write_cr3(__sme_pa_nodebug(early_top_pgt));
76 }
77
78 /* Create a new PMD entry */
__early_make_pgtable(unsigned long address,pmdval_t pmd)79 bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
80 {
81 unsigned long physaddr = address - __PAGE_OFFSET;
82 pgdval_t pgd, *pgd_p;
83 p4dval_t p4d, *p4d_p;
84 pudval_t pud, *pud_p;
85 pmdval_t *pmd_p;
86
87 /* Invalid address or early pgt is done ? */
88 if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt))
89 return false;
90
91 again:
92 pgd_p = &early_top_pgt[pgd_index(address)].pgd;
93 pgd = *pgd_p;
94
95 /*
96 * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
97 * critical -- __PAGE_OFFSET would point us back into the dynamic
98 * range and we might end up looping forever...
99 */
100 if (!pgtable_l5_enabled())
101 p4d_p = pgd_p;
102 else if (pgd)
103 p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
104 else {
105 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
106 reset_early_page_tables();
107 goto again;
108 }
109
110 p4d_p = (p4dval_t *)early_dynamic_pgts[next_early_pgt++];
111 memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
112 *pgd_p = (pgdval_t)p4d_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
113 }
114 p4d_p += p4d_index(address);
115 p4d = *p4d_p;
116
117 if (p4d)
118 pud_p = (pudval_t *)((p4d & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
119 else {
120 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
121 reset_early_page_tables();
122 goto again;
123 }
124
125 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
126 memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
127 *p4d_p = (p4dval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
128 }
129 pud_p += pud_index(address);
130 pud = *pud_p;
131
132 if (pud)
133 pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
134 else {
135 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
136 reset_early_page_tables();
137 goto again;
138 }
139
140 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
141 memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
142 *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
143 }
144 pmd_p[pmd_index(address)] = pmd;
145
146 return true;
147 }
148
early_make_pgtable(unsigned long address)149 static bool __init early_make_pgtable(unsigned long address)
150 {
151 unsigned long physaddr = address - __PAGE_OFFSET;
152 pmdval_t pmd;
153
154 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
155
156 return __early_make_pgtable(address, pmd);
157 }
158
do_early_exception(struct pt_regs * regs,int trapnr)159 void __init do_early_exception(struct pt_regs *regs, int trapnr)
160 {
161 if (trapnr == X86_TRAP_PF &&
162 early_make_pgtable(native_read_cr2()))
163 return;
164
165 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT) &&
166 trapnr == X86_TRAP_VC && handle_vc_boot_ghcb(regs))
167 return;
168
169 if (trapnr == X86_TRAP_VE && tdx_early_handle_ve(regs))
170 return;
171
172 early_fixup_exception(regs, trapnr);
173 }
174
175 /* Don't add a printk in there. printk relies on the PDA which is not initialized
176 yet. */
clear_bss(void)177 void __init clear_bss(void)
178 {
179 memset(__bss_start, 0,
180 (unsigned long) __bss_stop - (unsigned long) __bss_start);
181 memset(__brk_base, 0,
182 (unsigned long) __brk_limit - (unsigned long) __brk_base);
183 }
184
get_cmd_line_ptr(void)185 static unsigned long get_cmd_line_ptr(void)
186 {
187 unsigned long cmd_line_ptr = boot_params.hdr.cmd_line_ptr;
188
189 cmd_line_ptr |= (u64)boot_params.ext_cmd_line_ptr << 32;
190
191 return cmd_line_ptr;
192 }
193
copy_bootdata(char * real_mode_data)194 static void __init copy_bootdata(char *real_mode_data)
195 {
196 char * command_line;
197 unsigned long cmd_line_ptr;
198
199 /*
200 * If SME is active, this will create decrypted mappings of the
201 * boot data in advance of the copy operations.
202 */
203 sme_map_bootdata(real_mode_data);
204
205 memcpy(&boot_params, real_mode_data, sizeof(boot_params));
206 sanitize_boot_params(&boot_params);
207 cmd_line_ptr = get_cmd_line_ptr();
208 if (cmd_line_ptr) {
209 command_line = __va(cmd_line_ptr);
210 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
211 }
212
213 /*
214 * The old boot data is no longer needed and won't be reserved,
215 * freeing up that memory for use by the system. If SME is active,
216 * we need to remove the mappings that were created so that the
217 * memory doesn't remain mapped as decrypted.
218 */
219 sme_unmap_bootdata(real_mode_data);
220 }
221
x86_64_start_kernel(char * real_mode_data)222 asmlinkage __visible void __init __noreturn x86_64_start_kernel(char * real_mode_data)
223 {
224 /*
225 * Build-time sanity checks on the kernel image and module
226 * area mappings. (these are purely build-time and produce no code)
227 */
228 BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
229 BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
230 BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
231 BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
232 BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
233 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
234 MAYBE_BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
235 (__START_KERNEL & PGDIR_MASK)));
236 BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
237
238 cr4_init_shadow();
239
240 /* Kill off the identity-map trampoline */
241 reset_early_page_tables();
242
243 if (pgtable_l5_enabled()) {
244 page_offset_base = __PAGE_OFFSET_BASE_L5;
245 vmalloc_base = __VMALLOC_BASE_L5;
246 vmemmap_base = __VMEMMAP_BASE_L5;
247 }
248
249 clear_bss();
250
251 /*
252 * This needs to happen *before* kasan_early_init() because latter maps stuff
253 * into that page.
254 */
255 clear_page(init_top_pgt);
256
257 /*
258 * SME support may update early_pmd_flags to include the memory
259 * encryption mask, so it needs to be called before anything
260 * that may generate a page fault.
261 */
262 sme_early_init();
263
264 kasan_early_init();
265
266 /*
267 * Flush global TLB entries which could be left over from the trampoline page
268 * table.
269 *
270 * This needs to happen *after* kasan_early_init() as KASAN-enabled .configs
271 * instrument native_write_cr4() so KASAN must be initialized for that
272 * instrumentation to work.
273 */
274 __native_tlb_flush_global(this_cpu_read(cpu_tlbstate.cr4));
275
276 idt_setup_early_handler();
277
278 /* Needed before cc_platform_has() can be used for TDX */
279 tdx_early_init();
280
281 copy_bootdata(__va(real_mode_data));
282
283 /*
284 * Load microcode early on BSP.
285 */
286 load_ucode_bsp();
287
288 /* set init_top_pgt kernel high mapping*/
289 init_top_pgt[511] = early_top_pgt[511];
290
291 x86_64_start_reservations(real_mode_data);
292 }
293
x86_64_start_reservations(char * real_mode_data)294 void __init __noreturn x86_64_start_reservations(char *real_mode_data)
295 {
296 /* version is always not zero if it is copied */
297 if (!boot_params.hdr.version)
298 copy_bootdata(__va(real_mode_data));
299
300 x86_early_init_platform_quirks();
301
302 switch (boot_params.hdr.hardware_subarch) {
303 case X86_SUBARCH_INTEL_MID:
304 x86_intel_mid_early_setup();
305 break;
306 default:
307 break;
308 }
309
310 start_kernel();
311 }
312
early_setup_idt(void)313 void early_setup_idt(void)
314 {
315 void *handler = NULL;
316
317 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
318 setup_ghcb();
319 handler = vc_boot_ghcb;
320 }
321
322 __pi_startup_64_load_idt(handler);
323 }
324