1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/string.h> 3 #include <linux/elf.h> 4 #include <asm/page-states.h> 5 #include <asm/boot_data.h> 6 #include <asm/extmem.h> 7 #include <asm/sections.h> 8 #include <asm/maccess.h> 9 #include <asm/cpu_mf.h> 10 #include <asm/setup.h> 11 #include <asm/kasan.h> 12 #include <asm/kexec.h> 13 #include <asm/sclp.h> 14 #include <asm/diag.h> 15 #include <asm/uv.h> 16 #include <asm/abs_lowcore.h> 17 #include <asm/physmem_info.h> 18 #include "decompressor.h" 19 #include "boot.h" 20 #include "uv.h" 21 22 struct vm_layout __bootdata_preserved(vm_layout); 23 unsigned long __bootdata_preserved(__abs_lowcore); 24 unsigned long __bootdata_preserved(__memcpy_real_area); 25 pte_t *__bootdata_preserved(memcpy_real_ptep); 26 unsigned long __bootdata_preserved(VMALLOC_START); 27 unsigned long __bootdata_preserved(VMALLOC_END); 28 struct page *__bootdata_preserved(vmemmap); 29 unsigned long __bootdata_preserved(vmemmap_size); 30 unsigned long __bootdata_preserved(MODULES_VADDR); 31 unsigned long __bootdata_preserved(MODULES_END); 32 unsigned long __bootdata_preserved(max_mappable); 33 int __bootdata_preserved(relocate_lowcore); 34 35 u64 __bootdata_preserved(stfle_fac_list[16]); 36 struct oldmem_data __bootdata_preserved(oldmem_data); 37 38 struct machine_info machine; 39 40 void error(char *x) 41 { 42 sclp_early_printk("\n\n"); 43 sclp_early_printk(x); 44 sclp_early_printk("\n\n -- System halted"); 45 46 disabled_wait(); 47 } 48 49 static void detect_facilities(void) 50 { 51 if (test_facility(8)) { 52 machine.has_edat1 = 1; 53 local_ctl_set_bit(0, CR0_EDAT_BIT); 54 } 55 if (test_facility(78)) 56 machine.has_edat2 = 1; 57 if (test_facility(130)) 58 machine.has_nx = 1; 59 } 60 61 static int cmma_test_essa(void) 62 { 63 unsigned long reg1, reg2, tmp = 0; 64 int rc = 1; 65 psw_t old; 66 67 /* Test ESSA_GET_STATE */ 68 asm volatile( 69 " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n" 70 " epsw %[reg1],%[reg2]\n" 71 " st %[reg1],0(%[psw_pgm])\n" 72 " st %[reg2],4(%[psw_pgm])\n" 73 " larl %[reg1],1f\n" 74 " stg %[reg1],8(%[psw_pgm])\n" 75 " .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n" 76 " la %[rc],0\n" 77 "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n" 78 : [reg1] "=&d" (reg1), 79 [reg2] "=&a" (reg2), 80 [rc] "+&d" (rc), 81 [tmp] "=&d" (tmp), 82 "+Q" (get_lowcore()->program_new_psw), 83 "=Q" (old) 84 : [psw_old] "a" (&old), 85 [psw_pgm] "a" (&get_lowcore()->program_new_psw), 86 [cmd] "i" (ESSA_GET_STATE) 87 : "cc", "memory"); 88 return rc; 89 } 90 91 static void cmma_init(void) 92 { 93 if (!cmma_flag) 94 return; 95 if (cmma_test_essa()) { 96 cmma_flag = 0; 97 return; 98 } 99 if (test_facility(147)) 100 cmma_flag = 2; 101 } 102 103 static void setup_lpp(void) 104 { 105 get_lowcore()->current_pid = 0; 106 get_lowcore()->lpp = LPP_MAGIC; 107 if (test_facility(40)) 108 lpp(&get_lowcore()->lpp); 109 } 110 111 #ifdef CONFIG_KERNEL_UNCOMPRESSED 112 static unsigned long mem_safe_offset(void) 113 { 114 return (unsigned long)_compressed_start; 115 } 116 117 static void deploy_kernel(void *output) 118 { 119 void *uncompressed_start = (void *)_compressed_start; 120 121 if (output == uncompressed_start) 122 return; 123 memmove(output, uncompressed_start, vmlinux.image_size); 124 memset(uncompressed_start, 0, vmlinux.image_size); 125 } 126 #endif 127 128 static void rescue_initrd(unsigned long min, unsigned long max) 129 { 130 unsigned long old_addr, addr, size; 131 132 if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD)) 133 return; 134 if (!get_physmem_reserved(RR_INITRD, &addr, &size)) 135 return; 136 if (addr >= min && addr + size <= max) 137 return; 138 old_addr = addr; 139 physmem_free(RR_INITRD); 140 addr = physmem_alloc_top_down(RR_INITRD, size, 0); 141 memmove((void *)addr, (void *)old_addr, size); 142 } 143 144 static void copy_bootdata(void) 145 { 146 if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size) 147 error(".boot.data section size mismatch"); 148 memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size); 149 if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size) 150 error(".boot.preserved.data section size mismatch"); 151 memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size); 152 } 153 154 static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr, 155 unsigned long offset, unsigned long phys_offset) 156 { 157 int *reloc; 158 long loc; 159 160 /* Adjust R_390_64 relocations */ 161 for (reloc = (int *)__vmlinux_relocs_64_start; reloc < (int *)__vmlinux_relocs_64_end; reloc++) { 162 loc = (long)*reloc + phys_offset; 163 if (loc < min_addr || loc > max_addr) 164 error("64-bit relocation outside of kernel!\n"); 165 *(u64 *)loc += offset - __START_KERNEL; 166 } 167 } 168 169 static void kaslr_adjust_got(unsigned long offset) 170 { 171 u64 *entry; 172 173 /* 174 * Adjust GOT entries, except for ones for undefined weak symbols 175 * that resolved to zero. This also skips the first three reserved 176 * entries on s390x that are zero. 177 */ 178 for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++) { 179 if (*entry) 180 *entry += offset - __START_KERNEL; 181 } 182 } 183 184 /* 185 * Merge information from several sources into a single ident_map_size value. 186 * "ident_map_size" represents the upper limit of physical memory we may ever 187 * reach. It might not be all online memory, but also include standby (offline) 188 * memory. "ident_map_size" could be lower then actual standby or even online 189 * memory present, due to limiting factors. We should never go above this limit. 190 * It is the size of our identity mapping. 191 * 192 * Consider the following factors: 193 * 1. max_physmem_end - end of physical memory online or standby. 194 * Always >= end of the last online memory range (get_physmem_online_end()). 195 * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the 196 * kernel is able to support. 197 * 3. "mem=" kernel command line option which limits physical memory usage. 198 * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as 199 * crash kernel. 200 * 5. "hsa" size which is a memory limit when the kernel is executed during 201 * zfcp/nvme dump. 202 */ 203 static void setup_ident_map_size(unsigned long max_physmem_end) 204 { 205 unsigned long hsa_size; 206 207 ident_map_size = max_physmem_end; 208 if (memory_limit) 209 ident_map_size = min(ident_map_size, memory_limit); 210 ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS); 211 212 #ifdef CONFIG_CRASH_DUMP 213 if (oldmem_data.start) { 214 __kaslr_enabled = 0; 215 ident_map_size = min(ident_map_size, oldmem_data.size); 216 } else if (ipl_block_valid && is_ipl_block_dump()) { 217 __kaslr_enabled = 0; 218 if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size) 219 ident_map_size = min(ident_map_size, hsa_size); 220 } 221 #endif 222 } 223 224 #define FIXMAP_SIZE round_up(MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE, sizeof(struct lowcore)) 225 226 static unsigned long get_vmem_size(unsigned long identity_size, 227 unsigned long vmemmap_size, 228 unsigned long vmalloc_size, 229 unsigned long rte_size) 230 { 231 unsigned long max_mappable, vsize; 232 233 max_mappable = max(identity_size, MAX_DCSS_ADDR); 234 vsize = round_up(SZ_2G + max_mappable, rte_size) + 235 round_up(vmemmap_size, rte_size) + 236 FIXMAP_SIZE + MODULES_LEN + KASLR_LEN; 237 return size_add(vsize, vmalloc_size); 238 } 239 240 static unsigned long setup_kernel_memory_layout(unsigned long kernel_size) 241 { 242 unsigned long vmemmap_start; 243 unsigned long kernel_start; 244 unsigned long asce_limit; 245 unsigned long rte_size; 246 unsigned long pages; 247 unsigned long vsize; 248 unsigned long vmax; 249 250 pages = ident_map_size / PAGE_SIZE; 251 /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */ 252 vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page); 253 254 /* choose kernel address space layout: 4 or 3 levels. */ 255 BUILD_BUG_ON(!IS_ALIGNED(__START_KERNEL, THREAD_SIZE)); 256 BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE)); 257 BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE); 258 vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE); 259 if (IS_ENABLED(CONFIG_KASAN) || __NO_KASLR_END_KERNEL > _REGION2_SIZE || 260 (vsize > _REGION2_SIZE && kaslr_enabled())) { 261 asce_limit = _REGION1_SIZE; 262 if (__NO_KASLR_END_KERNEL > _REGION2_SIZE) { 263 rte_size = _REGION2_SIZE; 264 vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION2_SIZE); 265 } else { 266 rte_size = _REGION3_SIZE; 267 } 268 } else { 269 asce_limit = _REGION2_SIZE; 270 rte_size = _REGION3_SIZE; 271 } 272 273 /* 274 * Forcing modules and vmalloc area under the ultravisor 275 * secure storage limit, so that any vmalloc allocation 276 * we do could be used to back secure guest storage. 277 * 278 * Assume the secure storage limit always exceeds _REGION2_SIZE, 279 * otherwise asce_limit and rte_size would have been adjusted. 280 */ 281 vmax = adjust_to_uv_max(asce_limit); 282 #ifdef CONFIG_KASAN 283 BUILD_BUG_ON(__NO_KASLR_END_KERNEL > KASAN_SHADOW_START); 284 /* force vmalloc and modules below kasan shadow */ 285 vmax = min(vmax, KASAN_SHADOW_START); 286 #endif 287 vsize = min(vsize, vmax); 288 if (kaslr_enabled()) { 289 unsigned long kernel_end, kaslr_len, slots, pos; 290 291 kaslr_len = max(KASLR_LEN, vmax - vsize); 292 slots = DIV_ROUND_UP(kaslr_len - kernel_size, THREAD_SIZE); 293 if (get_random(slots, &pos)) 294 pos = 0; 295 kernel_end = vmax - pos * THREAD_SIZE; 296 kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE); 297 } else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) { 298 kernel_start = round_down(vmax - kernel_size, THREAD_SIZE); 299 decompressor_printk("The kernel base address is forced to %lx\n", kernel_start); 300 } else { 301 kernel_start = __NO_KASLR_START_KERNEL; 302 } 303 __kaslr_offset = kernel_start; 304 305 MODULES_END = round_down(kernel_start, _SEGMENT_SIZE); 306 MODULES_VADDR = MODULES_END - MODULES_LEN; 307 VMALLOC_END = MODULES_VADDR; 308 if (IS_ENABLED(CONFIG_KMSAN)) 309 VMALLOC_END -= MODULES_LEN * 2; 310 311 /* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */ 312 vsize = (VMALLOC_END - FIXMAP_SIZE) / 2; 313 vsize = round_down(vsize, _SEGMENT_SIZE); 314 vmalloc_size = min(vmalloc_size, vsize); 315 if (IS_ENABLED(CONFIG_KMSAN)) { 316 /* take 2/3 of vmalloc area for KMSAN shadow and origins */ 317 vmalloc_size = round_down(vmalloc_size / 3, _SEGMENT_SIZE); 318 VMALLOC_END -= vmalloc_size * 2; 319 } 320 VMALLOC_START = VMALLOC_END - vmalloc_size; 321 322 __memcpy_real_area = round_down(VMALLOC_START - MEMCPY_REAL_SIZE, PAGE_SIZE); 323 __abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE, 324 sizeof(struct lowcore)); 325 326 /* split remaining virtual space between 1:1 mapping & vmemmap array */ 327 pages = __abs_lowcore / (PAGE_SIZE + sizeof(struct page)); 328 pages = SECTION_ALIGN_UP(pages); 329 /* keep vmemmap_start aligned to a top level region table entry */ 330 vmemmap_start = round_down(__abs_lowcore - pages * sizeof(struct page), rte_size); 331 /* make sure identity map doesn't overlay with vmemmap */ 332 ident_map_size = min(ident_map_size, vmemmap_start); 333 vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page); 334 /* make sure vmemmap doesn't overlay with absolute lowcore area */ 335 if (vmemmap_start + vmemmap_size > __abs_lowcore) { 336 vmemmap_size = SECTION_ALIGN_DOWN(ident_map_size / PAGE_SIZE) * sizeof(struct page); 337 ident_map_size = vmemmap_size / sizeof(struct page) * PAGE_SIZE; 338 } 339 vmemmap = (struct page *)vmemmap_start; 340 /* maximum address for which linear mapping could be created (DCSS, memory) */ 341 BUILD_BUG_ON(MAX_DCSS_ADDR > (1UL << MAX_PHYSMEM_BITS)); 342 max_mappable = max(ident_map_size, MAX_DCSS_ADDR); 343 max_mappable = min(max_mappable, vmemmap_start); 344 __identity_base = round_down(vmemmap_start - max_mappable, rte_size); 345 346 return asce_limit; 347 } 348 349 /* 350 * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's. 351 */ 352 static void clear_bss_section(unsigned long kernel_start) 353 { 354 memset((void *)kernel_start + vmlinux.image_size, 0, vmlinux.bss_size); 355 } 356 357 /* 358 * Set vmalloc area size to an 8th of (potential) physical memory 359 * size, unless size has been set by kernel command line parameter. 360 */ 361 static void setup_vmalloc_size(void) 362 { 363 unsigned long size; 364 365 if (vmalloc_size_set) 366 return; 367 size = round_up(ident_map_size / 8, _SEGMENT_SIZE); 368 vmalloc_size = max(size, vmalloc_size); 369 } 370 371 static void kaslr_adjust_vmlinux_info(long offset) 372 { 373 vmlinux.bootdata_off += offset; 374 vmlinux.bootdata_preserved_off += offset; 375 vmlinux.got_start += offset; 376 vmlinux.got_end += offset; 377 vmlinux.init_mm_off += offset; 378 vmlinux.swapper_pg_dir_off += offset; 379 vmlinux.invalid_pg_dir_off += offset; 380 vmlinux.alt_instructions += offset; 381 vmlinux.alt_instructions_end += offset; 382 #ifdef CONFIG_KASAN 383 vmlinux.kasan_early_shadow_page_off += offset; 384 vmlinux.kasan_early_shadow_pte_off += offset; 385 vmlinux.kasan_early_shadow_pmd_off += offset; 386 vmlinux.kasan_early_shadow_pud_off += offset; 387 vmlinux.kasan_early_shadow_p4d_off += offset; 388 #endif 389 } 390 391 static void fixup_vmlinux_info(void) 392 { 393 vmlinux.entry -= __START_KERNEL; 394 kaslr_adjust_vmlinux_info(-__START_KERNEL); 395 } 396 397 void startup_kernel(void) 398 { 399 unsigned long kernel_size = vmlinux.image_size + vmlinux.bss_size; 400 unsigned long nokaslr_offset_phys, kaslr_large_page_offset; 401 unsigned long amode31_lma = 0; 402 unsigned long max_physmem_end; 403 unsigned long asce_limit; 404 unsigned long safe_addr; 405 psw_t psw; 406 407 fixup_vmlinux_info(); 408 setup_lpp(); 409 410 /* 411 * Non-randomized kernel physical start address must be _SEGMENT_SIZE 412 * aligned (see blow). 413 */ 414 nokaslr_offset_phys = ALIGN(mem_safe_offset(), _SEGMENT_SIZE); 415 safe_addr = PAGE_ALIGN(nokaslr_offset_phys + kernel_size); 416 417 /* 418 * Reserve decompressor memory together with decompression heap, 419 * buffer and memory which might be occupied by uncompressed kernel 420 * (if KASLR is off or failed). 421 */ 422 physmem_reserve(RR_DECOMPRESSOR, 0, safe_addr); 423 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && parmarea.initrd_size) 424 physmem_reserve(RR_INITRD, parmarea.initrd_start, parmarea.initrd_size); 425 oldmem_data.start = parmarea.oldmem_base; 426 oldmem_data.size = parmarea.oldmem_size; 427 428 store_ipl_parmblock(); 429 read_ipl_report(); 430 uv_query_info(); 431 sclp_early_read_info(); 432 setup_boot_command_line(); 433 parse_boot_command_line(); 434 detect_facilities(); 435 cmma_init(); 436 sanitize_prot_virt_host(); 437 max_physmem_end = detect_max_physmem_end(); 438 setup_ident_map_size(max_physmem_end); 439 setup_vmalloc_size(); 440 asce_limit = setup_kernel_memory_layout(kernel_size); 441 /* got final ident_map_size, physmem allocations could be performed now */ 442 physmem_set_usable_limit(ident_map_size); 443 detect_physmem_online_ranges(max_physmem_end); 444 save_ipl_cert_comp_list(); 445 rescue_initrd(safe_addr, ident_map_size); 446 447 /* 448 * __kaslr_offset_phys must be _SEGMENT_SIZE aligned, so the lower 449 * 20 bits (the offset within a large page) are zero. Copy the last 450 * 20 bits of __kaslr_offset, which is THREAD_SIZE aligned, to 451 * __kaslr_offset_phys. 452 * 453 * With this the last 20 bits of __kaslr_offset_phys and __kaslr_offset 454 * are identical, which is required to allow for large mappings of the 455 * kernel image. 456 */ 457 kaslr_large_page_offset = __kaslr_offset & ~_SEGMENT_MASK; 458 if (kaslr_enabled()) { 459 unsigned long end = ident_map_size - kaslr_large_page_offset; 460 461 __kaslr_offset_phys = randomize_within_range(kernel_size, _SEGMENT_SIZE, 0, end); 462 } 463 if (!__kaslr_offset_phys) 464 __kaslr_offset_phys = nokaslr_offset_phys; 465 __kaslr_offset_phys |= kaslr_large_page_offset; 466 kaslr_adjust_vmlinux_info(__kaslr_offset_phys); 467 physmem_reserve(RR_VMLINUX, __kaslr_offset_phys, kernel_size); 468 deploy_kernel((void *)__kaslr_offset_phys); 469 470 /* vmlinux decompression is done, shrink reserved low memory */ 471 physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end); 472 473 /* 474 * In case KASLR is enabled the randomized location of .amode31 475 * section might overlap with .vmlinux.relocs section. To avoid that 476 * the below randomize_within_range() could have been called with 477 * __vmlinux_relocs_64_end as the lower range address. However, 478 * .amode31 section is written to by the decompressed kernel - at 479 * that time the contents of .vmlinux.relocs is not needed anymore. 480 * Conversly, .vmlinux.relocs is read only by the decompressor, even 481 * before the kernel started. Therefore, in case the two sections 482 * overlap there is no risk of corrupting any data. 483 */ 484 if (kaslr_enabled()) { 485 unsigned long amode31_min; 486 487 amode31_min = (unsigned long)_decompressor_end; 488 amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, amode31_min, SZ_2G); 489 } 490 if (!amode31_lma) 491 amode31_lma = __kaslr_offset_phys - vmlinux.amode31_size; 492 physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size); 493 494 /* 495 * The order of the following operations is important: 496 * 497 * - kaslr_adjust_relocs() must follow clear_bss_section() to establish 498 * static memory references to data in .bss to be used by setup_vmem() 499 * (i.e init_mm.pgd) 500 * 501 * - setup_vmem() must follow kaslr_adjust_relocs() to be able using 502 * static memory references to data in .bss (i.e init_mm.pgd) 503 * 504 * - copy_bootdata() must follow setup_vmem() to propagate changes 505 * to bootdata made by setup_vmem() 506 */ 507 clear_bss_section(__kaslr_offset_phys); 508 kaslr_adjust_relocs(__kaslr_offset_phys, __kaslr_offset_phys + vmlinux.image_size, 509 __kaslr_offset, __kaslr_offset_phys); 510 kaslr_adjust_got(__kaslr_offset); 511 setup_vmem(__kaslr_offset, __kaslr_offset + kernel_size, asce_limit); 512 copy_bootdata(); 513 __apply_alternatives((struct alt_instr *)_vmlinux_info.alt_instructions, 514 (struct alt_instr *)_vmlinux_info.alt_instructions_end, 515 ALT_CTX_EARLY); 516 517 /* 518 * Save KASLR offset for early dumps, before vmcore_info is set. 519 * Mark as uneven to distinguish from real vmcore_info pointer. 520 */ 521 get_lowcore()->vmcore_info = __kaslr_offset_phys ? __kaslr_offset_phys | 0x1UL : 0; 522 523 /* 524 * Jump to the decompressed kernel entry point and switch DAT mode on. 525 */ 526 psw.addr = __kaslr_offset + vmlinux.entry; 527 psw.mask = PSW_KERNEL_BITS; 528 __load_psw(psw); 529 } 530