xref: /linux/arch/x86/kernel/head64.c (revision e8d235d4d8fb8957bae5f6ed4521115203a00d8b)
1 /*
2  *  prepare to run common code
3  *
4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5  */
6 
7 #include <linux/init.h>
8 #include <linux/linkage.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/string.h>
12 #include <linux/percpu.h>
13 #include <linux/start_kernel.h>
14 #include <linux/io.h>
15 #include <linux/memblock.h>
16 
17 #include <asm/processor.h>
18 #include <asm/proto.h>
19 #include <asm/smp.h>
20 #include <asm/setup.h>
21 #include <asm/desc.h>
22 #include <asm/pgtable.h>
23 #include <asm/tlbflush.h>
24 #include <asm/sections.h>
25 #include <asm/kdebug.h>
26 #include <asm/e820.h>
27 #include <asm/bios_ebda.h>
28 #include <asm/bootparam_utils.h>
29 #include <asm/microcode.h>
30 #include <asm/kasan.h>
31 
32 /*
33  * Manage page tables very early on.
34  */
35 extern pgd_t early_level4_pgt[PTRS_PER_PGD];
36 extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
37 static unsigned int __initdata next_early_pgt = 2;
38 pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
39 
40 /* Wipe all early page tables except for the kernel symbol map */
41 static void __init reset_early_page_tables(void)
42 {
43 	unsigned long i;
44 
45 	for (i = 0; i < PTRS_PER_PGD-1; i++)
46 		early_level4_pgt[i].pgd = 0;
47 
48 	next_early_pgt = 0;
49 
50 	write_cr3(__pa_nodebug(early_level4_pgt));
51 }
52 
53 /* Create a new PMD entry */
54 int __init early_make_pgtable(unsigned long address)
55 {
56 	unsigned long physaddr = address - __PAGE_OFFSET;
57 	unsigned long i;
58 	pgdval_t pgd, *pgd_p;
59 	pudval_t pud, *pud_p;
60 	pmdval_t pmd, *pmd_p;
61 
62 	/* Invalid address or early pgt is done ?  */
63 	if (physaddr >= MAXMEM || read_cr3() != __pa_nodebug(early_level4_pgt))
64 		return -1;
65 
66 again:
67 	pgd_p = &early_level4_pgt[pgd_index(address)].pgd;
68 	pgd = *pgd_p;
69 
70 	/*
71 	 * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
72 	 * critical -- __PAGE_OFFSET would point us back into the dynamic
73 	 * range and we might end up looping forever...
74 	 */
75 	if (pgd)
76 		pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
77 	else {
78 		if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
79 			reset_early_page_tables();
80 			goto again;
81 		}
82 
83 		pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
84 		for (i = 0; i < PTRS_PER_PUD; i++)
85 			pud_p[i] = 0;
86 		*pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
87 	}
88 	pud_p += pud_index(address);
89 	pud = *pud_p;
90 
91 	if (pud)
92 		pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
93 	else {
94 		if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
95 			reset_early_page_tables();
96 			goto again;
97 		}
98 
99 		pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
100 		for (i = 0; i < PTRS_PER_PMD; i++)
101 			pmd_p[i] = 0;
102 		*pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
103 	}
104 	pmd = (physaddr & PMD_MASK) + early_pmd_flags;
105 	pmd_p[pmd_index(address)] = pmd;
106 
107 	return 0;
108 }
109 
110 /* Don't add a printk in there. printk relies on the PDA which is not initialized
111    yet. */
112 static void __init clear_bss(void)
113 {
114 	memset(__bss_start, 0,
115 	       (unsigned long) __bss_stop - (unsigned long) __bss_start);
116 }
117 
118 static unsigned long get_cmd_line_ptr(void)
119 {
120 	unsigned long cmd_line_ptr = boot_params.hdr.cmd_line_ptr;
121 
122 	cmd_line_ptr |= (u64)boot_params.ext_cmd_line_ptr << 32;
123 
124 	return cmd_line_ptr;
125 }
126 
127 static void __init copy_bootdata(char *real_mode_data)
128 {
129 	char * command_line;
130 	unsigned long cmd_line_ptr;
131 
132 	memcpy(&boot_params, real_mode_data, sizeof boot_params);
133 	sanitize_boot_params(&boot_params);
134 	cmd_line_ptr = get_cmd_line_ptr();
135 	if (cmd_line_ptr) {
136 		command_line = __va(cmd_line_ptr);
137 		memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
138 	}
139 }
140 
141 asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
142 {
143 	int i;
144 
145 	/*
146 	 * Build-time sanity checks on the kernel image and module
147 	 * area mappings. (these are purely build-time and produce no code)
148 	 */
149 	BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
150 	BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
151 	BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
152 	BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
153 	BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
154 	BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
155 	BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
156 				(__START_KERNEL & PGDIR_MASK)));
157 	BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
158 
159 	cr4_init_shadow();
160 
161 	/* Kill off the identity-map trampoline */
162 	reset_early_page_tables();
163 
164 	kasan_map_early_shadow(early_level4_pgt);
165 
166 	/* clear bss before set_intr_gate with early_idt_handler */
167 	clear_bss();
168 
169 	for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
170 		set_intr_gate(i, early_idt_handlers[i]);
171 	load_idt((const struct desc_ptr *)&idt_descr);
172 
173 	copy_bootdata(__va(real_mode_data));
174 
175 	/*
176 	 * Load microcode early on BSP.
177 	 */
178 	load_ucode_bsp();
179 
180 	clear_page(init_level4_pgt);
181 	/* set init_level4_pgt kernel high mapping*/
182 	init_level4_pgt[511] = early_level4_pgt[511];
183 
184 	kasan_map_early_shadow(init_level4_pgt);
185 
186 	x86_64_start_reservations(real_mode_data);
187 }
188 
189 void __init x86_64_start_reservations(char *real_mode_data)
190 {
191 	/* version is always not zero if it is copied */
192 	if (!boot_params.hdr.version)
193 		copy_bootdata(__va(real_mode_data));
194 
195 	reserve_ebda_region();
196 
197 	start_kernel();
198 }
199