1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/string.h> 3 #include <linux/elf.h> 4 #include <asm/page-states.h> 5 #include <asm/boot_data.h> 6 #include <asm/extmem.h> 7 #include <asm/sections.h> 8 #include <asm/maccess.h> 9 #include <asm/cpu_mf.h> 10 #include <asm/setup.h> 11 #include <asm/kasan.h> 12 #include <asm/kexec.h> 13 #include <asm/sclp.h> 14 #include <asm/diag.h> 15 #include <asm/uv.h> 16 #include <asm/abs_lowcore.h> 17 #include <asm/physmem_info.h> 18 #include "decompressor.h" 19 #include "boot.h" 20 #include "uv.h" 21 22 struct vm_layout __bootdata_preserved(vm_layout); 23 unsigned long __bootdata_preserved(__abs_lowcore); 24 unsigned long __bootdata_preserved(__memcpy_real_area); 25 pte_t *__bootdata_preserved(memcpy_real_ptep); 26 unsigned long __bootdata_preserved(VMALLOC_START); 27 unsigned long __bootdata_preserved(VMALLOC_END); 28 struct page *__bootdata_preserved(vmemmap); 29 unsigned long __bootdata_preserved(vmemmap_size); 30 unsigned long __bootdata_preserved(MODULES_VADDR); 31 unsigned long __bootdata_preserved(MODULES_END); 32 unsigned long __bootdata_preserved(max_mappable); 33 int __bootdata_preserved(relocate_lowcore); 34 35 u64 __bootdata_preserved(stfle_fac_list[16]); 36 struct oldmem_data __bootdata_preserved(oldmem_data); 37 38 struct machine_info machine; 39 40 void error(char *x) 41 { 42 boot_printk("\n\n%s\n\n -- System halted", x); 43 disabled_wait(); 44 } 45 46 static void detect_facilities(void) 47 { 48 if (test_facility(8)) { 49 machine.has_edat1 = 1; 50 local_ctl_set_bit(0, CR0_EDAT_BIT); 51 } 52 if (test_facility(78)) 53 machine.has_edat2 = 1; 54 if (test_facility(130)) 55 machine.has_nx = 1; 56 } 57 58 static int cmma_test_essa(void) 59 { 60 unsigned long reg1, reg2, tmp = 0; 61 int rc = 1; 62 psw_t old; 63 64 /* Test ESSA_GET_STATE */ 65 asm volatile( 66 " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n" 67 " epsw %[reg1],%[reg2]\n" 68 " st %[reg1],0(%[psw_pgm])\n" 69 " st %[reg2],4(%[psw_pgm])\n" 70 " larl %[reg1],1f\n" 71 " stg %[reg1],8(%[psw_pgm])\n" 72 " .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n" 73 " la %[rc],0\n" 74 "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n" 75 : [reg1] "=&d" (reg1), 76 [reg2] "=&a" (reg2), 77 [rc] "+&d" (rc), 78 [tmp] "=&d" (tmp), 79 "+Q" (get_lowcore()->program_new_psw), 80 "=Q" (old) 81 : [psw_old] "a" (&old), 82 [psw_pgm] "a" (&get_lowcore()->program_new_psw), 83 [cmd] "i" (ESSA_GET_STATE) 84 : "cc", "memory"); 85 return rc; 86 } 87 88 static void cmma_init(void) 89 { 90 if (!cmma_flag) 91 return; 92 if (cmma_test_essa()) { 93 cmma_flag = 0; 94 return; 95 } 96 if (test_facility(147)) 97 cmma_flag = 2; 98 } 99 100 static void setup_lpp(void) 101 { 102 get_lowcore()->current_pid = 0; 103 get_lowcore()->lpp = LPP_MAGIC; 104 if (test_facility(40)) 105 lpp(&get_lowcore()->lpp); 106 } 107 108 #ifdef CONFIG_KERNEL_UNCOMPRESSED 109 static unsigned long mem_safe_offset(void) 110 { 111 return (unsigned long)_compressed_start; 112 } 113 114 static void deploy_kernel(void *output) 115 { 116 void *uncompressed_start = (void *)_compressed_start; 117 118 if (output == uncompressed_start) 119 return; 120 memmove(output, uncompressed_start, vmlinux.image_size); 121 memset(uncompressed_start, 0, vmlinux.image_size); 122 } 123 #endif 124 125 static void rescue_initrd(unsigned long min, unsigned long max) 126 { 127 unsigned long old_addr, addr, size; 128 129 if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD)) 130 return; 131 if (!get_physmem_reserved(RR_INITRD, &addr, &size)) 132 return; 133 if (addr >= min && addr + size <= max) 134 return; 135 old_addr = addr; 136 physmem_free(RR_INITRD); 137 addr = physmem_alloc_top_down(RR_INITRD, size, 0); 138 memmove((void *)addr, (void *)old_addr, size); 139 } 140 141 static void copy_bootdata(void) 142 { 143 if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size) 144 error(".boot.data section size mismatch"); 145 memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size); 146 if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size) 147 error(".boot.preserved.data section size mismatch"); 148 memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size); 149 } 150 151 static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr, 152 unsigned long offset, unsigned long phys_offset) 153 { 154 int *reloc; 155 long loc; 156 157 /* Adjust R_390_64 relocations */ 158 for (reloc = (int *)__vmlinux_relocs_64_start; reloc < (int *)__vmlinux_relocs_64_end; reloc++) { 159 loc = (long)*reloc + phys_offset; 160 if (loc < min_addr || loc > max_addr) 161 error("64-bit relocation outside of kernel!\n"); 162 *(u64 *)loc += offset; 163 } 164 } 165 166 static void kaslr_adjust_got(unsigned long offset) 167 { 168 u64 *entry; 169 170 /* 171 * Adjust GOT entries, except for ones for undefined weak symbols 172 * that resolved to zero. This also skips the first three reserved 173 * entries on s390x that are zero. 174 */ 175 for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++) { 176 if (*entry) 177 *entry += offset; 178 } 179 } 180 181 /* 182 * Merge information from several sources into a single ident_map_size value. 183 * "ident_map_size" represents the upper limit of physical memory we may ever 184 * reach. It might not be all online memory, but also include standby (offline) 185 * memory. "ident_map_size" could be lower then actual standby or even online 186 * memory present, due to limiting factors. We should never go above this limit. 187 * It is the size of our identity mapping. 188 * 189 * Consider the following factors: 190 * 1. max_physmem_end - end of physical memory online or standby. 191 * Always >= end of the last online memory range (get_physmem_online_end()). 192 * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the 193 * kernel is able to support. 194 * 3. "mem=" kernel command line option which limits physical memory usage. 195 * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as 196 * crash kernel. 197 * 5. "hsa" size which is a memory limit when the kernel is executed during 198 * zfcp/nvme dump. 199 */ 200 static void setup_ident_map_size(unsigned long max_physmem_end) 201 { 202 unsigned long hsa_size; 203 204 ident_map_size = max_physmem_end; 205 if (memory_limit) 206 ident_map_size = min(ident_map_size, memory_limit); 207 ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS); 208 209 #ifdef CONFIG_CRASH_DUMP 210 if (oldmem_data.start) { 211 __kaslr_enabled = 0; 212 ident_map_size = min(ident_map_size, oldmem_data.size); 213 } else if (ipl_block_valid && is_ipl_block_dump()) { 214 __kaslr_enabled = 0; 215 if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size) 216 ident_map_size = min(ident_map_size, hsa_size); 217 } 218 #endif 219 } 220 221 #define FIXMAP_SIZE round_up(MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE, sizeof(struct lowcore)) 222 223 static unsigned long get_vmem_size(unsigned long identity_size, 224 unsigned long vmemmap_size, 225 unsigned long vmalloc_size, 226 unsigned long rte_size) 227 { 228 unsigned long max_mappable, vsize; 229 230 max_mappable = max(identity_size, MAX_DCSS_ADDR); 231 vsize = round_up(SZ_2G + max_mappable, rte_size) + 232 round_up(vmemmap_size, rte_size) + 233 FIXMAP_SIZE + MODULES_LEN + KASLR_LEN; 234 return size_add(vsize, vmalloc_size); 235 } 236 237 static unsigned long setup_kernel_memory_layout(unsigned long kernel_size) 238 { 239 unsigned long vmemmap_start; 240 unsigned long kernel_start; 241 unsigned long asce_limit; 242 unsigned long rte_size; 243 unsigned long pages; 244 unsigned long vsize; 245 unsigned long vmax; 246 247 pages = ident_map_size / PAGE_SIZE; 248 /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */ 249 vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page); 250 251 /* choose kernel address space layout: 4 or 3 levels. */ 252 BUILD_BUG_ON(!IS_ALIGNED(TEXT_OFFSET, THREAD_SIZE)); 253 BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE)); 254 BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE); 255 vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE); 256 if (IS_ENABLED(CONFIG_KASAN) || __NO_KASLR_END_KERNEL > _REGION2_SIZE || 257 (vsize > _REGION2_SIZE && kaslr_enabled())) { 258 asce_limit = _REGION1_SIZE; 259 if (__NO_KASLR_END_KERNEL > _REGION2_SIZE) { 260 rte_size = _REGION2_SIZE; 261 vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION2_SIZE); 262 } else { 263 rte_size = _REGION3_SIZE; 264 } 265 } else { 266 asce_limit = _REGION2_SIZE; 267 rte_size = _REGION3_SIZE; 268 } 269 270 /* 271 * Forcing modules and vmalloc area under the ultravisor 272 * secure storage limit, so that any vmalloc allocation 273 * we do could be used to back secure guest storage. 274 * 275 * Assume the secure storage limit always exceeds _REGION2_SIZE, 276 * otherwise asce_limit and rte_size would have been adjusted. 277 */ 278 vmax = adjust_to_uv_max(asce_limit); 279 #ifdef CONFIG_KASAN 280 BUILD_BUG_ON(__NO_KASLR_END_KERNEL > KASAN_SHADOW_START); 281 /* force vmalloc and modules below kasan shadow */ 282 vmax = min(vmax, KASAN_SHADOW_START); 283 #endif 284 vsize = min(vsize, vmax); 285 if (kaslr_enabled()) { 286 unsigned long kernel_end, kaslr_len, slots, pos; 287 288 kaslr_len = max(KASLR_LEN, vmax - vsize); 289 slots = DIV_ROUND_UP(kaslr_len - kernel_size, THREAD_SIZE); 290 if (get_random(slots, &pos)) 291 pos = 0; 292 kernel_end = vmax - pos * THREAD_SIZE; 293 kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE); 294 } else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) { 295 kernel_start = round_down(vmax - kernel_size, THREAD_SIZE); 296 boot_printk("The kernel base address is forced to %lx\n", kernel_start); 297 } else { 298 kernel_start = __NO_KASLR_START_KERNEL; 299 } 300 __kaslr_offset = kernel_start; 301 302 MODULES_END = round_down(kernel_start, _SEGMENT_SIZE); 303 MODULES_VADDR = MODULES_END - MODULES_LEN; 304 VMALLOC_END = MODULES_VADDR; 305 if (IS_ENABLED(CONFIG_KMSAN)) 306 VMALLOC_END -= MODULES_LEN * 2; 307 308 /* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */ 309 vsize = (VMALLOC_END - FIXMAP_SIZE) / 2; 310 vsize = round_down(vsize, _SEGMENT_SIZE); 311 vmalloc_size = min(vmalloc_size, vsize); 312 if (IS_ENABLED(CONFIG_KMSAN)) { 313 /* take 2/3 of vmalloc area for KMSAN shadow and origins */ 314 vmalloc_size = round_down(vmalloc_size / 3, _SEGMENT_SIZE); 315 VMALLOC_END -= vmalloc_size * 2; 316 } 317 VMALLOC_START = VMALLOC_END - vmalloc_size; 318 319 __memcpy_real_area = round_down(VMALLOC_START - MEMCPY_REAL_SIZE, PAGE_SIZE); 320 __abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE, 321 sizeof(struct lowcore)); 322 323 /* split remaining virtual space between 1:1 mapping & vmemmap array */ 324 pages = __abs_lowcore / (PAGE_SIZE + sizeof(struct page)); 325 pages = SECTION_ALIGN_UP(pages); 326 /* keep vmemmap_start aligned to a top level region table entry */ 327 vmemmap_start = round_down(__abs_lowcore - pages * sizeof(struct page), rte_size); 328 /* make sure identity map doesn't overlay with vmemmap */ 329 ident_map_size = min(ident_map_size, vmemmap_start); 330 vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page); 331 /* make sure vmemmap doesn't overlay with absolute lowcore area */ 332 if (vmemmap_start + vmemmap_size > __abs_lowcore) { 333 vmemmap_size = SECTION_ALIGN_DOWN(ident_map_size / PAGE_SIZE) * sizeof(struct page); 334 ident_map_size = vmemmap_size / sizeof(struct page) * PAGE_SIZE; 335 } 336 vmemmap = (struct page *)vmemmap_start; 337 /* maximum address for which linear mapping could be created (DCSS, memory) */ 338 BUILD_BUG_ON(MAX_DCSS_ADDR > (1UL << MAX_PHYSMEM_BITS)); 339 max_mappable = max(ident_map_size, MAX_DCSS_ADDR); 340 max_mappable = min(max_mappable, vmemmap_start); 341 if (IS_ENABLED(CONFIG_RANDOMIZE_IDENTITY_BASE)) 342 __identity_base = round_down(vmemmap_start - max_mappable, rte_size); 343 344 return asce_limit; 345 } 346 347 /* 348 * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's. 349 */ 350 static void clear_bss_section(unsigned long kernel_start) 351 { 352 memset((void *)kernel_start + vmlinux.image_size, 0, vmlinux.bss_size); 353 } 354 355 /* 356 * Set vmalloc area size to an 8th of (potential) physical memory 357 * size, unless size has been set by kernel command line parameter. 358 */ 359 static void setup_vmalloc_size(void) 360 { 361 unsigned long size; 362 363 if (vmalloc_size_set) 364 return; 365 size = round_up(ident_map_size / 8, _SEGMENT_SIZE); 366 vmalloc_size = max(size, vmalloc_size); 367 } 368 369 static void kaslr_adjust_vmlinux_info(long offset) 370 { 371 vmlinux.bootdata_off += offset; 372 vmlinux.bootdata_preserved_off += offset; 373 vmlinux.got_start += offset; 374 vmlinux.got_end += offset; 375 vmlinux.init_mm_off += offset; 376 vmlinux.swapper_pg_dir_off += offset; 377 vmlinux.invalid_pg_dir_off += offset; 378 vmlinux.alt_instructions += offset; 379 vmlinux.alt_instructions_end += offset; 380 #ifdef CONFIG_KASAN 381 vmlinux.kasan_early_shadow_page_off += offset; 382 vmlinux.kasan_early_shadow_pte_off += offset; 383 vmlinux.kasan_early_shadow_pmd_off += offset; 384 vmlinux.kasan_early_shadow_pud_off += offset; 385 vmlinux.kasan_early_shadow_p4d_off += offset; 386 #endif 387 } 388 389 void startup_kernel(void) 390 { 391 unsigned long vmlinux_size = vmlinux.image_size + vmlinux.bss_size; 392 unsigned long nokaslr_text_lma, text_lma = 0, amode31_lma = 0; 393 unsigned long kernel_size = TEXT_OFFSET + vmlinux_size; 394 unsigned long kaslr_large_page_offset; 395 unsigned long max_physmem_end; 396 unsigned long asce_limit; 397 unsigned long safe_addr; 398 psw_t psw; 399 400 setup_lpp(); 401 402 /* 403 * Non-randomized kernel physical start address must be _SEGMENT_SIZE 404 * aligned (see blow). 405 */ 406 nokaslr_text_lma = ALIGN(mem_safe_offset(), _SEGMENT_SIZE); 407 safe_addr = PAGE_ALIGN(nokaslr_text_lma + vmlinux_size); 408 409 /* 410 * Reserve decompressor memory together with decompression heap, 411 * buffer and memory which might be occupied by uncompressed kernel 412 * (if KASLR is off or failed). 413 */ 414 physmem_reserve(RR_DECOMPRESSOR, 0, safe_addr); 415 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && parmarea.initrd_size) 416 physmem_reserve(RR_INITRD, parmarea.initrd_start, parmarea.initrd_size); 417 oldmem_data.start = parmarea.oldmem_base; 418 oldmem_data.size = parmarea.oldmem_size; 419 420 store_ipl_parmblock(); 421 read_ipl_report(); 422 uv_query_info(); 423 sclp_early_read_info(); 424 setup_boot_command_line(); 425 parse_boot_command_line(); 426 detect_facilities(); 427 cmma_init(); 428 sanitize_prot_virt_host(); 429 max_physmem_end = detect_max_physmem_end(); 430 setup_ident_map_size(max_physmem_end); 431 setup_vmalloc_size(); 432 asce_limit = setup_kernel_memory_layout(kernel_size); 433 /* got final ident_map_size, physmem allocations could be performed now */ 434 physmem_set_usable_limit(ident_map_size); 435 detect_physmem_online_ranges(max_physmem_end); 436 save_ipl_cert_comp_list(); 437 rescue_initrd(safe_addr, ident_map_size); 438 439 /* 440 * __kaslr_offset_phys must be _SEGMENT_SIZE aligned, so the lower 441 * 20 bits (the offset within a large page) are zero. Copy the last 442 * 20 bits of __kaslr_offset, which is THREAD_SIZE aligned, to 443 * __kaslr_offset_phys. 444 * 445 * With this the last 20 bits of __kaslr_offset_phys and __kaslr_offset 446 * are identical, which is required to allow for large mappings of the 447 * kernel image. 448 */ 449 kaslr_large_page_offset = __kaslr_offset & ~_SEGMENT_MASK; 450 if (kaslr_enabled()) { 451 unsigned long size = vmlinux_size + kaslr_large_page_offset; 452 453 text_lma = randomize_within_range(size, _SEGMENT_SIZE, TEXT_OFFSET, ident_map_size); 454 } 455 if (!text_lma) 456 text_lma = nokaslr_text_lma; 457 text_lma |= kaslr_large_page_offset; 458 459 /* 460 * [__kaslr_offset_phys..__kaslr_offset_phys + TEXT_OFFSET] region is 461 * never accessed via the kernel image mapping as per the linker script: 462 * 463 * . = TEXT_OFFSET; 464 * 465 * Therefore, this region could be used for something else and does 466 * not need to be reserved. See how it is skipped in setup_vmem(). 467 */ 468 __kaslr_offset_phys = text_lma - TEXT_OFFSET; 469 kaslr_adjust_vmlinux_info(__kaslr_offset_phys); 470 physmem_reserve(RR_VMLINUX, text_lma, vmlinux_size); 471 deploy_kernel((void *)text_lma); 472 473 /* vmlinux decompression is done, shrink reserved low memory */ 474 physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end); 475 476 /* 477 * In case KASLR is enabled the randomized location of .amode31 478 * section might overlap with .vmlinux.relocs section. To avoid that 479 * the below randomize_within_range() could have been called with 480 * __vmlinux_relocs_64_end as the lower range address. However, 481 * .amode31 section is written to by the decompressed kernel - at 482 * that time the contents of .vmlinux.relocs is not needed anymore. 483 * Conversly, .vmlinux.relocs is read only by the decompressor, even 484 * before the kernel started. Therefore, in case the two sections 485 * overlap there is no risk of corrupting any data. 486 */ 487 if (kaslr_enabled()) { 488 unsigned long amode31_min; 489 490 amode31_min = (unsigned long)_decompressor_end; 491 amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, amode31_min, SZ_2G); 492 } 493 if (!amode31_lma) 494 amode31_lma = text_lma - vmlinux.amode31_size; 495 physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size); 496 497 /* 498 * The order of the following operations is important: 499 * 500 * - kaslr_adjust_relocs() must follow clear_bss_section() to establish 501 * static memory references to data in .bss to be used by setup_vmem() 502 * (i.e init_mm.pgd) 503 * 504 * - setup_vmem() must follow kaslr_adjust_relocs() to be able using 505 * static memory references to data in .bss (i.e init_mm.pgd) 506 * 507 * - copy_bootdata() must follow setup_vmem() to propagate changes 508 * to bootdata made by setup_vmem() 509 */ 510 clear_bss_section(text_lma); 511 kaslr_adjust_relocs(text_lma, text_lma + vmlinux.image_size, 512 __kaslr_offset, __kaslr_offset_phys); 513 kaslr_adjust_got(__kaslr_offset); 514 setup_vmem(__kaslr_offset, __kaslr_offset + kernel_size, asce_limit); 515 copy_bootdata(); 516 __apply_alternatives((struct alt_instr *)_vmlinux_info.alt_instructions, 517 (struct alt_instr *)_vmlinux_info.alt_instructions_end, 518 ALT_CTX_EARLY); 519 520 /* 521 * Save KASLR offset for early dumps, before vmcore_info is set. 522 * Mark as uneven to distinguish from real vmcore_info pointer. 523 */ 524 get_lowcore()->vmcore_info = __kaslr_offset_phys ? __kaslr_offset_phys | 0x1UL : 0; 525 526 /* 527 * Jump to the decompressed kernel entry point and switch DAT mode on. 528 */ 529 psw.addr = __kaslr_offset + vmlinux.entry; 530 psw.mask = PSW_KERNEL_BITS; 531 __load_psw(psw); 532 } 533