xref: /linux/arch/x86/realmode/init.c (revision 071bf69a0220253a44acb8b2a27f7a262b9a46bf)
1 #include <linux/io.h>
2 #include <linux/memblock.h>
3 
4 #include <asm/cacheflush.h>
5 #include <asm/pgtable.h>
6 #include <asm/realmode.h>
7 
8 struct real_mode_header *real_mode_header;
9 u32 *trampoline_cr4_features;
10 
11 /* Hold the pgd entry used on booting additional CPUs */
12 pgd_t trampoline_pgd_entry;
13 
14 void __init reserve_real_mode(void)
15 {
16 	phys_addr_t mem;
17 	unsigned char *base;
18 	size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
19 
20 	/* Has to be under 1M so we can execute real-mode AP code. */
21 	mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
22 	if (!mem)
23 		panic("Cannot allocate trampoline\n");
24 
25 	base = __va(mem);
26 	memblock_reserve(mem, size);
27 	real_mode_header = (struct real_mode_header *) base;
28 	printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
29 	       base, (unsigned long long)mem, size);
30 }
31 
32 void __init setup_real_mode(void)
33 {
34 	u16 real_mode_seg;
35 	const u32 *rel;
36 	u32 count;
37 	unsigned char *base;
38 	unsigned long phys_base;
39 	struct trampoline_header *trampoline_header;
40 	size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
41 #ifdef CONFIG_X86_64
42 	u64 *trampoline_pgd;
43 	u64 efer;
44 #endif
45 
46 	base = (unsigned char *)real_mode_header;
47 
48 	memcpy(base, real_mode_blob, size);
49 
50 	phys_base = __pa(base);
51 	real_mode_seg = phys_base >> 4;
52 
53 	rel = (u32 *) real_mode_relocs;
54 
55 	/* 16-bit segment relocations. */
56 	count = *rel++;
57 	while (count--) {
58 		u16 *seg = (u16 *) (base + *rel++);
59 		*seg = real_mode_seg;
60 	}
61 
62 	/* 32-bit linear relocations. */
63 	count = *rel++;
64 	while (count--) {
65 		u32 *ptr = (u32 *) (base + *rel++);
66 		*ptr += phys_base;
67 	}
68 
69 	/* Must be perfomed *after* relocation. */
70 	trampoline_header = (struct trampoline_header *)
71 		__va(real_mode_header->trampoline_header);
72 
73 #ifdef CONFIG_X86_32
74 	trampoline_header->start = __pa_symbol(startup_32_smp);
75 	trampoline_header->gdt_limit = __BOOT_DS + 7;
76 	trampoline_header->gdt_base = __pa_symbol(boot_gdt);
77 #else
78 	/*
79 	 * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
80 	 * so we need to mask it out.
81 	 */
82 	rdmsrl(MSR_EFER, efer);
83 	trampoline_header->efer = efer & ~EFER_LMA;
84 
85 	trampoline_header->start = (u64) secondary_startup_64;
86 	trampoline_cr4_features = &trampoline_header->cr4;
87 	*trampoline_cr4_features = __read_cr4();
88 
89 	trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
90 	trampoline_pgd[0] = trampoline_pgd_entry.pgd;
91 	trampoline_pgd[511] = init_level4_pgt[511].pgd;
92 #endif
93 }
94 
95 /*
96  * reserve_real_mode() gets called very early, to guarantee the
97  * availability of low memory. This is before the proper kernel page
98  * tables are set up, so we cannot set page permissions in that
99  * function. Also trampoline code will be executed by APs so we
100  * need to mark it executable at do_pre_smp_initcalls() at least,
101  * thus run it as a early_initcall().
102  */
103 static int __init set_real_mode_permissions(void)
104 {
105 	unsigned char *base = (unsigned char *) real_mode_header;
106 	size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
107 
108 	size_t ro_size =
109 		PAGE_ALIGN(real_mode_header->ro_end) -
110 		__pa(base);
111 
112 	size_t text_size =
113 		PAGE_ALIGN(real_mode_header->ro_end) -
114 		real_mode_header->text_start;
115 
116 	unsigned long text_start =
117 		(unsigned long) __va(real_mode_header->text_start);
118 
119 	set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
120 	set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
121 	set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
122 
123 	return 0;
124 }
125 early_initcall(set_real_mode_permissions);
126