1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/string.h> 3 #include <linux/elf.h> 4 #include <asm/boot_data.h> 5 #include <asm/sections.h> 6 #include <asm/cpu_mf.h> 7 #include <asm/setup.h> 8 #include <asm/kexec.h> 9 #include <asm/sclp.h> 10 #include <asm/diag.h> 11 #include <asm/uv.h> 12 #include "compressed/decompressor.h" 13 #include "boot.h" 14 15 extern char __boot_data_start[], __boot_data_end[]; 16 extern char __boot_data_preserved_start[], __boot_data_preserved_end[]; 17 unsigned long __bootdata_preserved(__kaslr_offset); 18 unsigned long __bootdata(ident_map_size); 19 20 /* 21 * Some code and data needs to stay below 2 GB, even when the kernel would be 22 * relocated above 2 GB, because it has to use 31 bit addresses. 23 * Such code and data is part of the .dma section, and its location is passed 24 * over to the decompressed / relocated kernel via the .boot.preserved.data 25 * section. 26 */ 27 extern char _sdma[], _edma[]; 28 extern char _stext_dma[], _etext_dma[]; 29 extern struct exception_table_entry _start_dma_ex_table[]; 30 extern struct exception_table_entry _stop_dma_ex_table[]; 31 unsigned long __bootdata_preserved(__sdma) = __pa(&_sdma); 32 unsigned long __bootdata_preserved(__edma) = __pa(&_edma); 33 unsigned long __bootdata_preserved(__stext_dma) = __pa(&_stext_dma); 34 unsigned long __bootdata_preserved(__etext_dma) = __pa(&_etext_dma); 35 struct exception_table_entry * 36 __bootdata_preserved(__start_dma_ex_table) = _start_dma_ex_table; 37 struct exception_table_entry * 38 __bootdata_preserved(__stop_dma_ex_table) = _stop_dma_ex_table; 39 40 int _diag210_dma(struct diag210 *addr); 41 int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode); 42 int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode); 43 void _diag0c_dma(struct hypfs_diag0c_entry *entry); 44 void _diag308_reset_dma(void); 45 struct diag_ops __bootdata_preserved(diag_dma_ops) = { 46 .diag210 = _diag210_dma, 47 .diag26c = _diag26c_dma, 48 .diag14 = _diag14_dma, 49 .diag0c = _diag0c_dma, 50 .diag308_reset = _diag308_reset_dma 51 }; 52 static struct diag210 _diag210_tmp_dma __section(".dma.data"); 53 struct diag210 *__bootdata_preserved(__diag210_tmp_dma) = &_diag210_tmp_dma; 54 55 void error(char *x) 56 { 57 sclp_early_printk("\n\n"); 58 sclp_early_printk(x); 59 sclp_early_printk("\n\n -- System halted"); 60 61 disabled_wait(); 62 } 63 64 static void setup_lpp(void) 65 { 66 S390_lowcore.current_pid = 0; 67 S390_lowcore.lpp = LPP_MAGIC; 68 if (test_facility(40)) 69 lpp(&S390_lowcore.lpp); 70 } 71 72 #ifdef CONFIG_KERNEL_UNCOMPRESSED 73 unsigned long mem_safe_offset(void) 74 { 75 return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size; 76 } 77 #endif 78 79 static void rescue_initrd(unsigned long addr) 80 { 81 if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD)) 82 return; 83 if (!INITRD_START || !INITRD_SIZE) 84 return; 85 if (addr <= INITRD_START) 86 return; 87 memmove((void *)addr, (void *)INITRD_START, INITRD_SIZE); 88 INITRD_START = addr; 89 } 90 91 static void copy_bootdata(void) 92 { 93 if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size) 94 error(".boot.data section size mismatch"); 95 memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size); 96 if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size) 97 error(".boot.preserved.data section size mismatch"); 98 memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size); 99 } 100 101 static void handle_relocs(unsigned long offset) 102 { 103 Elf64_Rela *rela_start, *rela_end, *rela; 104 int r_type, r_sym, rc; 105 Elf64_Addr loc, val; 106 Elf64_Sym *dynsym; 107 108 rela_start = (Elf64_Rela *) vmlinux.rela_dyn_start; 109 rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end; 110 dynsym = (Elf64_Sym *) vmlinux.dynsym_start; 111 for (rela = rela_start; rela < rela_end; rela++) { 112 loc = rela->r_offset + offset; 113 val = rela->r_addend; 114 r_sym = ELF64_R_SYM(rela->r_info); 115 if (r_sym) { 116 if (dynsym[r_sym].st_shndx != SHN_UNDEF) 117 val += dynsym[r_sym].st_value + offset; 118 } else { 119 /* 120 * 0 == undefined symbol table index (STN_UNDEF), 121 * used for R_390_RELATIVE, only add KASLR offset 122 */ 123 val += offset; 124 } 125 r_type = ELF64_R_TYPE(rela->r_info); 126 rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0); 127 if (rc) 128 error("Unknown relocation type"); 129 } 130 } 131 132 /* 133 * Merge information from several sources into a single ident_map_size value. 134 * "ident_map_size" represents the upper limit of physical memory we may ever 135 * reach. It might not be all online memory, but also include standby (offline) 136 * memory. "ident_map_size" could be lower then actual standby or even online 137 * memory present, due to limiting factors. We should never go above this limit. 138 * It is the size of our identity mapping. 139 * 140 * Consider the following factors: 141 * 1. max_physmem_end - end of physical memory online or standby. 142 * Always <= end of the last online memory block (get_mem_detect_end()). 143 * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the 144 * kernel is able to support. 145 * 3. "mem=" kernel command line option which limits physical memory usage. 146 * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as 147 * crash kernel. 148 * 5. "hsa" size which is a memory limit when the kernel is executed during 149 * zfcp/nvme dump. 150 */ 151 static void setup_ident_map_size(unsigned long max_physmem_end) 152 { 153 unsigned long hsa_size; 154 155 ident_map_size = max_physmem_end; 156 if (memory_limit) 157 ident_map_size = min(ident_map_size, memory_limit); 158 ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS); 159 160 #ifdef CONFIG_CRASH_DUMP 161 if (OLDMEM_BASE) { 162 kaslr_enabled = 0; 163 ident_map_size = min(ident_map_size, OLDMEM_SIZE); 164 } else if (ipl_block_valid && is_ipl_block_dump()) { 165 kaslr_enabled = 0; 166 if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size) 167 ident_map_size = min(ident_map_size, hsa_size); 168 } 169 #endif 170 } 171 172 /* 173 * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's. 174 */ 175 static void clear_bss_section(void) 176 { 177 memset((void *)vmlinux.default_lma + vmlinux.image_size, 0, vmlinux.bss_size); 178 } 179 180 /* 181 * Set vmalloc area size to an 8th of (potential) physical memory 182 * size, unless size has been set by kernel command line parameter. 183 */ 184 static void setup_vmalloc_size(void) 185 { 186 unsigned long size; 187 188 if (vmalloc_size_set) 189 return; 190 size = round_up(ident_map_size / 8, _SEGMENT_SIZE); 191 vmalloc_size = max(size, vmalloc_size); 192 } 193 194 void startup_kernel(void) 195 { 196 unsigned long random_lma; 197 unsigned long safe_addr; 198 void *img; 199 200 setup_lpp(); 201 store_ipl_parmblock(); 202 safe_addr = mem_safe_offset(); 203 safe_addr = read_ipl_report(safe_addr); 204 uv_query_info(); 205 rescue_initrd(safe_addr); 206 sclp_early_read_info(); 207 setup_boot_command_line(); 208 parse_boot_command_line(); 209 setup_ident_map_size(detect_memory()); 210 setup_vmalloc_size(); 211 212 random_lma = __kaslr_offset = 0; 213 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) { 214 random_lma = get_random_base(safe_addr); 215 if (random_lma) { 216 __kaslr_offset = random_lma - vmlinux.default_lma; 217 img = (void *)vmlinux.default_lma; 218 vmlinux.default_lma += __kaslr_offset; 219 vmlinux.entry += __kaslr_offset; 220 vmlinux.bootdata_off += __kaslr_offset; 221 vmlinux.bootdata_preserved_off += __kaslr_offset; 222 vmlinux.rela_dyn_start += __kaslr_offset; 223 vmlinux.rela_dyn_end += __kaslr_offset; 224 vmlinux.dynsym_start += __kaslr_offset; 225 } 226 } 227 228 if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) { 229 img = decompress_kernel(); 230 memmove((void *)vmlinux.default_lma, img, vmlinux.image_size); 231 } else if (__kaslr_offset) 232 memcpy((void *)vmlinux.default_lma, img, vmlinux.image_size); 233 234 clear_bss_section(); 235 copy_bootdata(); 236 if (IS_ENABLED(CONFIG_RELOCATABLE)) 237 handle_relocs(__kaslr_offset); 238 239 if (__kaslr_offset) { 240 /* 241 * Save KASLR offset for early dumps, before vmcore_info is set. 242 * Mark as uneven to distinguish from real vmcore_info pointer. 243 */ 244 S390_lowcore.vmcore_info = __kaslr_offset | 0x1UL; 245 /* Clear non-relocated kernel */ 246 if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) 247 memset(img, 0, vmlinux.image_size); 248 } 249 vmlinux.entry(); 250 } 251