1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/io.h> 3 #include <linux/slab.h> 4 #include <linux/memblock.h> 5 #include <linux/cc_platform.h> 6 #include <linux/pgtable.h> 7 8 #include <asm/set_memory.h> 9 #include <asm/realmode.h> 10 #include <asm/tlbflush.h> 11 #include <asm/crash.h> 12 #include <asm/msr.h> 13 #include <asm/sev.h> 14 15 struct real_mode_header *real_mode_header; 16 u32 *trampoline_cr4_features; 17 18 /* Hold the pgd entry used on booting additional CPUs */ 19 pgd_t trampoline_pgd_entry; 20 21 void load_trampoline_pgtable(void) 22 { 23 #ifdef CONFIG_X86_32 24 load_cr3(initial_page_table); 25 #else 26 /* 27 * This function is called before exiting to real-mode and that will 28 * fail with CR4.PCIDE still set. 29 */ 30 if (boot_cpu_has(X86_FEATURE_PCID)) 31 cr4_clear_bits(X86_CR4_PCIDE); 32 33 write_cr3(real_mode_header->trampoline_pgd); 34 #endif 35 36 /* 37 * The CR3 write above will not flush global TLB entries. 38 * Stale, global entries from previous page tables may still be 39 * present. Flush those stale entries. 40 * 41 * This ensures that memory accessed while running with 42 * trampoline_pgd is *actually* mapped into trampoline_pgd. 43 */ 44 __flush_tlb_all(); 45 } 46 47 void __init reserve_real_mode(void) 48 { 49 phys_addr_t mem; 50 size_t size = real_mode_size_needed(); 51 52 if (!size) 53 return; 54 55 WARN_ON(slab_is_available()); 56 57 /* Has to be under 1M so we can execute real-mode AP code. */ 58 mem = memblock_phys_alloc_range(size, PAGE_SIZE, 0, 1<<20); 59 if (!mem) 60 pr_info("No sub-1M memory is available for the trampoline\n"); 61 else 62 set_real_mode_mem(mem); 63 64 /* 65 * Unconditionally reserve the entire first 1M, see comment in 66 * setup_arch(). 67 */ 68 memblock_reserve(0, SZ_1M); 69 } 70 71 static void __init sme_sev_setup_real_mode(struct trampoline_header *th) 72 { 73 #ifdef CONFIG_AMD_MEM_ENCRYPT 74 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) 75 th->flags |= TH_FLAGS_SME_ACTIVE; 76 77 if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) { 78 /* 79 * Skip the call to verify_cpu() in secondary_startup_64 as it 80 * will cause #VC exceptions when the AP can't handle them yet. 81 */ 82 th->start = (u64) secondary_startup_64_no_verify; 83 84 if (sev_es_setup_ap_jump_table(real_mode_header)) 85 panic("Failed to get/update SEV-ES AP Jump Table"); 86 } 87 #endif 88 } 89 90 static void __init setup_real_mode(void) 91 { 92 u16 real_mode_seg; 93 const u32 *rel; 94 u32 count; 95 unsigned char *base; 96 unsigned long phys_base; 97 struct trampoline_header *trampoline_header; 98 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); 99 #ifdef CONFIG_X86_64 100 u64 *trampoline_pgd; 101 u64 efer; 102 int i; 103 #endif 104 105 base = (unsigned char *)real_mode_header; 106 107 /* 108 * If SME is active, the trampoline area will need to be in 109 * decrypted memory in order to bring up other processors 110 * successfully. This is not needed for SEV. 111 */ 112 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) 113 set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT); 114 115 memcpy(base, real_mode_blob, size); 116 117 phys_base = __pa(base); 118 real_mode_seg = phys_base >> 4; 119 120 rel = (u32 *) real_mode_relocs; 121 122 /* 16-bit segment relocations. */ 123 count = *rel++; 124 while (count--) { 125 u16 *seg = (u16 *) (base + *rel++); 126 *seg = real_mode_seg; 127 } 128 129 /* 32-bit linear relocations. */ 130 count = *rel++; 131 while (count--) { 132 u32 *ptr = (u32 *) (base + *rel++); 133 *ptr += phys_base; 134 } 135 136 /* Must be performed *after* relocation. */ 137 trampoline_header = (struct trampoline_header *) 138 __va(real_mode_header->trampoline_header); 139 140 #ifdef CONFIG_X86_32 141 trampoline_header->start = __pa_symbol(startup_32_smp); 142 trampoline_header->gdt_limit = __BOOT_DS + 7; 143 trampoline_header->gdt_base = __pa_symbol(boot_gdt); 144 #else 145 /* 146 * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR 147 * so we need to mask it out. 148 */ 149 rdmsrq(MSR_EFER, efer); 150 trampoline_header->efer = efer & ~EFER_LMA; 151 152 trampoline_header->start = (u64) secondary_startup_64; 153 trampoline_cr4_features = &trampoline_header->cr4; 154 *trampoline_cr4_features = mmu_cr4_features; 155 156 trampoline_header->flags = 0; 157 158 trampoline_lock = &trampoline_header->lock; 159 *trampoline_lock = 0; 160 161 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); 162 163 /* Map the real mode stub as virtual == physical */ 164 trampoline_pgd[0] = trampoline_pgd_entry.pgd; 165 166 /* 167 * Include the entirety of the kernel mapping into the trampoline 168 * PGD. This way, all mappings present in the normal kernel page 169 * tables are usable while running on trampoline_pgd. 170 */ 171 for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++) 172 trampoline_pgd[i] = init_top_pgt[i].pgd; 173 #endif 174 175 sme_sev_setup_real_mode(trampoline_header); 176 } 177 178 /* 179 * reserve_real_mode() gets called very early, to guarantee the 180 * availability of low memory. This is before the proper kernel page 181 * tables are set up, so we cannot set page permissions in that 182 * function. Also trampoline code will be executed by APs so we 183 * need to mark it executable at do_pre_smp_initcalls() at least, 184 * thus run it as a early_initcall(). 185 */ 186 static void __init set_real_mode_permissions(void) 187 { 188 unsigned char *base = (unsigned char *) real_mode_header; 189 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); 190 191 size_t ro_size = 192 PAGE_ALIGN(real_mode_header->ro_end) - 193 __pa(base); 194 195 size_t text_size = 196 PAGE_ALIGN(real_mode_header->ro_end) - 197 real_mode_header->text_start; 198 199 unsigned long text_start = 200 (unsigned long) __va(real_mode_header->text_start); 201 202 set_memory_nx((unsigned long) base, size >> PAGE_SHIFT); 203 set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT); 204 set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT); 205 } 206 207 void __init init_real_mode(void) 208 { 209 if (!real_mode_header) 210 panic("Real mode trampoline was not allocated"); 211 212 setup_real_mode(); 213 set_real_mode_permissions(); 214 } 215 216 static int __init do_init_real_mode(void) 217 { 218 x86_platform.realmode_init(); 219 return 0; 220 } 221 early_initcall(do_init_real_mode); 222