1 // SPDX-License-Identifier: GPL-2.0 2 #define boot_fmt(fmt) "startup: " fmt 3 #include <linux/string.h> 4 #include <linux/elf.h> 5 #include <asm/page-states.h> 6 #include <asm/boot_data.h> 7 #include <asm/extmem.h> 8 #include <asm/sections.h> 9 #include <asm/diag288.h> 10 #include <asm/maccess.h> 11 #include <asm/machine.h> 12 #include <asm/sysinfo.h> 13 #include <asm/cpu_mf.h> 14 #include <asm/setup.h> 15 #include <asm/timex.h> 16 #include <asm/kasan.h> 17 #include <asm/kexec.h> 18 #include <asm/sclp.h> 19 #include <asm/diag.h> 20 #include <asm/uv.h> 21 #include <asm/abs_lowcore.h> 22 #include <asm/physmem_info.h> 23 #include <asm/stacktrace.h> 24 #include <asm/asm-offsets.h> 25 #include <asm/arch-stackprotector.h> 26 #include "decompressor.h" 27 #include "boot.h" 28 #include "uv.h" 29 30 struct vm_layout __bootdata_preserved(vm_layout); 31 unsigned long __bootdata_preserved(__abs_lowcore); 32 unsigned long __bootdata_preserved(__memcpy_real_area); 33 pte_t *__bootdata_preserved(memcpy_real_ptep); 34 unsigned long __bootdata_preserved(VMALLOC_START); 35 unsigned long __bootdata_preserved(VMALLOC_END); 36 struct page *__bootdata_preserved(vmemmap); 37 unsigned long __bootdata_preserved(vmemmap_size); 38 unsigned long __bootdata_preserved(MODULES_VADDR); 39 unsigned long __bootdata_preserved(MODULES_END); 40 unsigned long __bootdata_preserved(max_mappable); 41 unsigned long __bootdata_preserved(page_noexec_mask); 42 unsigned long __bootdata_preserved(segment_noexec_mask); 43 unsigned long __bootdata_preserved(region_noexec_mask); 44 union tod_clock __bootdata_preserved(tod_clock_base); 45 u64 __bootdata_preserved(clock_comparator_max) = -1UL; 46 47 u64 __bootdata_preserved(stfle_fac_list[16]); 48 struct oldmem_data __bootdata_preserved(oldmem_data); 49 50 static char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE); 51 52 static void detect_machine_type(void) 53 { 54 struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page; 55 56 /* Check current-configuration-level */ 57 if (stsi(NULL, 0, 0, 0) <= 2) { 58 set_machine_feature(MFEATURE_LPAR); 59 return; 60 } 61 /* Get virtual-machine cpu information. */ 62 if (stsi(vmms, 3, 2, 2) || !vmms->count) 63 return; 64 /* Detect known hypervisors */ 65 if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3)) 66 set_machine_feature(MFEATURE_KVM); 67 else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4)) 68 set_machine_feature(MFEATURE_VM); 69 } 70 71 static void detect_diag288(void) 72 { 73 /* "BEGIN" in EBCDIC character set */ 74 static const char cmd[] = "\xc2\xc5\xc7\xc9\xd5"; 75 unsigned long action, len; 76 77 action = machine_is_vm() ? (unsigned long)cmd : LPARWDT_RESTART; 78 len = machine_is_vm() ? sizeof(cmd) : 0; 79 if (__diag288(WDT_FUNC_INIT, MIN_INTERVAL, action, len)) 80 return; 81 __diag288(WDT_FUNC_CANCEL, 0, 0, 0); 82 set_machine_feature(MFEATURE_DIAG288); 83 } 84 85 static void detect_diag9c(void) 86 { 87 unsigned int cpu; 88 int rc = 1; 89 90 cpu = stap(); 91 asm_inline volatile( 92 " diag %[cpu],%%r0,0x9c\n" 93 "0: lhi %[rc],0\n" 94 "1:\n" 95 EX_TABLE(0b, 1b) 96 : [rc] "+d" (rc) 97 : [cpu] "d" (cpu) 98 : "cc", "memory"); 99 if (!rc) 100 set_machine_feature(MFEATURE_DIAG9C); 101 } 102 103 static void reset_tod_clock(void) 104 { 105 union tod_clock clk; 106 107 if (store_tod_clock_ext_cc(&clk) == 0) 108 return; 109 /* TOD clock not running. Set the clock to Unix Epoch. */ 110 if (set_tod_clock(TOD_UNIX_EPOCH) || store_tod_clock_ext_cc(&clk)) 111 disabled_wait(); 112 memset(&tod_clock_base, 0, sizeof(tod_clock_base)); 113 tod_clock_base.tod = TOD_UNIX_EPOCH; 114 get_lowcore()->last_update_clock = TOD_UNIX_EPOCH; 115 } 116 117 static void detect_facilities(void) 118 { 119 if (cpu_has_edat1()) 120 local_ctl_set_bit(0, CR0_EDAT_BIT); 121 page_noexec_mask = -1UL; 122 segment_noexec_mask = -1UL; 123 region_noexec_mask = -1UL; 124 if (!cpu_has_nx()) { 125 page_noexec_mask &= ~_PAGE_NOEXEC; 126 segment_noexec_mask &= ~_SEGMENT_ENTRY_NOEXEC; 127 region_noexec_mask &= ~_REGION_ENTRY_NOEXEC; 128 } 129 if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) 130 set_machine_feature(MFEATURE_PCI_MIO); 131 reset_tod_clock(); 132 if (test_facility(139) && (tod_clock_base.tod >> 63)) { 133 /* Enable signed clock comparator comparisons */ 134 set_machine_feature(MFEATURE_SCC); 135 clock_comparator_max = -1UL >> 1; 136 local_ctl_set_bit(0, CR0_CLOCK_COMPARATOR_SIGN_BIT); 137 } 138 if (test_facility(50) && test_facility(73)) { 139 set_machine_feature(MFEATURE_TX); 140 local_ctl_set_bit(0, CR0_TRANSACTIONAL_EXECUTION_BIT); 141 } 142 if (cpu_has_vx()) 143 local_ctl_set_bit(0, CR0_VECTOR_BIT); 144 } 145 146 static int cmma_test_essa(void) 147 { 148 unsigned long tmp = 0; 149 int rc = 1; 150 151 /* Test ESSA_GET_STATE */ 152 asm_inline volatile( 153 " .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n" 154 "0: lhi %[rc],0\n" 155 "1:\n" 156 EX_TABLE(0b, 1b) 157 : [rc] "+d" (rc), [tmp] "+d" (tmp) 158 : [cmd] "i" (ESSA_GET_STATE) 159 : "cc", "memory"); 160 return rc; 161 } 162 163 static void cmma_init(void) 164 { 165 if (!cmma_flag) 166 return; 167 if (cmma_test_essa()) { 168 cmma_flag = 0; 169 return; 170 } 171 if (test_facility(147)) 172 cmma_flag = 2; 173 } 174 175 static void setup_lpp(void) 176 { 177 get_lowcore()->current_pid = 0; 178 get_lowcore()->lpp = LPP_MAGIC; 179 if (test_facility(40)) 180 lpp(&get_lowcore()->lpp); 181 } 182 183 #ifdef CONFIG_KERNEL_UNCOMPRESSED 184 static unsigned long mem_safe_offset(void) 185 { 186 return (unsigned long)_compressed_start; 187 } 188 189 static void deploy_kernel(void *output) 190 { 191 void *uncompressed_start = (void *)_compressed_start; 192 193 if (output == uncompressed_start) 194 return; 195 memmove(output, uncompressed_start, vmlinux.image_size); 196 memset(uncompressed_start, 0, vmlinux.image_size); 197 } 198 #endif 199 200 static void rescue_initrd(unsigned long min, unsigned long max) 201 { 202 unsigned long old_addr, addr, size; 203 204 if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD)) 205 return; 206 if (!get_physmem_reserved(RR_INITRD, &addr, &size)) 207 return; 208 if (addr >= min && addr + size <= max) 209 return; 210 old_addr = addr; 211 physmem_free(RR_INITRD); 212 addr = physmem_alloc_or_die(RR_INITRD, size, 0); 213 memmove((void *)addr, (void *)old_addr, size); 214 } 215 216 static void copy_bootdata(void) 217 { 218 if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size) 219 boot_panic(".boot.data section size mismatch\n"); 220 memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size); 221 if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size) 222 boot_panic(".boot.preserved.data section size mismatch\n"); 223 memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size); 224 } 225 226 static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr, 227 unsigned long offset, unsigned long phys_offset) 228 { 229 int *reloc; 230 long loc; 231 232 /* Adjust R_390_64 relocations */ 233 for (reloc = (int *)__vmlinux_relocs_64_start; reloc < (int *)__vmlinux_relocs_64_end; reloc++) { 234 loc = (long)*reloc + phys_offset; 235 if (loc < min_addr || loc > max_addr) 236 boot_panic("64-bit relocation outside of kernel!\n"); 237 *(u64 *)loc += offset; 238 } 239 } 240 241 static void kaslr_adjust_got(unsigned long offset) 242 { 243 u64 *entry; 244 245 /* 246 * Adjust GOT entries, except for ones for undefined weak symbols 247 * that resolved to zero. This also skips the first three reserved 248 * entries on s390x that are zero. 249 */ 250 for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++) { 251 if (*entry) 252 *entry += offset; 253 } 254 } 255 256 /* 257 * Merge information from several sources into a single ident_map_size value. 258 * "ident_map_size" represents the upper limit of physical memory we may ever 259 * reach. It might not be all online memory, but also include standby (offline) 260 * memory or memory areas reserved for other means (e.g., memory devices such as 261 * virtio-mem). 262 * 263 * "ident_map_size" could be lower then actual standby/reserved or even online 264 * memory present, due to limiting factors. We should never go above this limit. 265 * It is the size of our identity mapping. 266 * 267 * Consider the following factors: 268 * 1. max_physmem_end - end of physical memory online, standby or reserved. 269 * Always >= end of the last online memory range (get_physmem_online_end()). 270 * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the 271 * kernel is able to support. 272 * 3. "mem=" kernel command line option which limits physical memory usage. 273 * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as 274 * crash kernel. 275 * 5. "hsa" size which is a memory limit when the kernel is executed during 276 * zfcp/nvme dump. 277 */ 278 static void setup_ident_map_size(unsigned long max_physmem_end) 279 { 280 unsigned long hsa_size; 281 282 ident_map_size = max_physmem_end; 283 if (memory_limit) 284 ident_map_size = min(ident_map_size, memory_limit); 285 ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS); 286 287 #ifdef CONFIG_CRASH_DUMP 288 if (oldmem_data.start) { 289 __kaslr_enabled = 0; 290 ident_map_size = min(ident_map_size, oldmem_data.size); 291 boot_debug("kdump memory limit: 0x%016lx\n", oldmem_data.size); 292 } else if (ipl_block_valid && is_ipl_block_dump()) { 293 __kaslr_enabled = 0; 294 if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size) { 295 ident_map_size = min(ident_map_size, hsa_size); 296 boot_debug("Stand-alone dump limit: 0x%016lx\n", hsa_size); 297 } 298 } 299 #endif 300 boot_debug("Identity map size: 0x%016lx\n", ident_map_size); 301 } 302 303 #define FIXMAP_SIZE round_up(MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE, sizeof(struct lowcore)) 304 305 static unsigned long get_vmem_size(unsigned long identity_size, 306 unsigned long vmemmap_size, 307 unsigned long vmalloc_size, 308 unsigned long rte_size) 309 { 310 unsigned long max_mappable, vsize; 311 312 max_mappable = max(identity_size, MAX_DCSS_ADDR); 313 vsize = round_up(SZ_2G + max_mappable, rte_size) + 314 round_up(vmemmap_size, rte_size) + 315 FIXMAP_SIZE + MODULES_LEN + KASLR_LEN; 316 if (IS_ENABLED(CONFIG_KMSAN)) 317 vsize += MODULES_LEN * 2; 318 return size_add(vsize, vmalloc_size); 319 } 320 321 static unsigned long setup_kernel_memory_layout(unsigned long kernel_size) 322 { 323 unsigned long vmemmap_start; 324 unsigned long kernel_start; 325 unsigned long asce_limit; 326 unsigned long rte_size; 327 unsigned long pages; 328 unsigned long vsize; 329 unsigned long vmax; 330 331 pages = ident_map_size / PAGE_SIZE; 332 /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */ 333 vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page); 334 335 /* choose kernel address space layout: 4 or 3 levels. */ 336 BUILD_BUG_ON(!IS_ALIGNED(TEXT_OFFSET, THREAD_SIZE)); 337 BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE)); 338 BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE); 339 vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE); 340 boot_debug("vmem size estimated: 0x%016lx\n", vsize); 341 if (IS_ENABLED(CONFIG_KASAN) || __NO_KASLR_END_KERNEL > _REGION2_SIZE || 342 (vsize > _REGION2_SIZE && kaslr_enabled())) { 343 asce_limit = _REGION1_SIZE; 344 if (__NO_KASLR_END_KERNEL > _REGION2_SIZE) { 345 rte_size = _REGION2_SIZE; 346 vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION2_SIZE); 347 } else { 348 rte_size = _REGION3_SIZE; 349 } 350 } else { 351 asce_limit = _REGION2_SIZE; 352 rte_size = _REGION3_SIZE; 353 } 354 355 /* 356 * Forcing modules and vmalloc area under the ultravisor 357 * secure storage limit, so that any vmalloc allocation 358 * we do could be used to back secure guest storage. 359 * 360 * Assume the secure storage limit always exceeds _REGION2_SIZE, 361 * otherwise asce_limit and rte_size would have been adjusted. 362 */ 363 vmax = adjust_to_uv_max(asce_limit); 364 boot_debug("%d level paging 0x%016lx vmax\n", vmax == _REGION1_SIZE ? 4 : 3, vmax); 365 #ifdef CONFIG_KASAN 366 BUILD_BUG_ON(__NO_KASLR_END_KERNEL > KASAN_SHADOW_START); 367 boot_debug("KASAN shadow area: 0x%016lx-0x%016lx\n", KASAN_SHADOW_START, KASAN_SHADOW_END); 368 /* force vmalloc and modules below kasan shadow */ 369 vmax = min(vmax, KASAN_SHADOW_START); 370 #endif 371 vsize = min(vsize, vmax); 372 if (kaslr_enabled()) { 373 unsigned long kernel_end, kaslr_len, slots, pos; 374 375 kaslr_len = max(KASLR_LEN, vmax - vsize); 376 slots = DIV_ROUND_UP(kaslr_len - kernel_size, THREAD_SIZE); 377 if (get_random(slots, &pos)) 378 pos = 0; 379 kernel_end = vmax - pos * THREAD_SIZE; 380 kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE); 381 boot_debug("Randomization range: 0x%016lx-0x%016lx\n", vmax - kaslr_len, vmax); 382 boot_debug("kernel image: 0x%016lx-0x%016lx (kaslr)\n", kernel_start, 383 kernel_start + kernel_size); 384 } else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) { 385 kernel_start = round_down(vmax - kernel_size, THREAD_SIZE); 386 boot_debug("kernel image: 0x%016lx-0x%016lx (constrained)\n", kernel_start, 387 kernel_start + kernel_size); 388 } else { 389 kernel_start = __NO_KASLR_START_KERNEL; 390 boot_debug("kernel image: 0x%016lx-0x%016lx (nokaslr)\n", kernel_start, 391 kernel_start + kernel_size); 392 } 393 __kaslr_offset = kernel_start; 394 boot_debug("__kaslr_offset: 0x%016lx\n", __kaslr_offset); 395 396 MODULES_END = round_down(kernel_start, _SEGMENT_SIZE); 397 MODULES_VADDR = MODULES_END - MODULES_LEN; 398 VMALLOC_END = MODULES_VADDR; 399 if (IS_ENABLED(CONFIG_KMSAN)) 400 VMALLOC_END -= MODULES_LEN * 2; 401 boot_debug("modules area: 0x%016lx-0x%016lx\n", MODULES_VADDR, MODULES_END); 402 403 /* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */ 404 vsize = (VMALLOC_END - FIXMAP_SIZE) / 2; 405 vsize = round_down(vsize, _SEGMENT_SIZE); 406 vmalloc_size = min(vmalloc_size, vsize); 407 if (IS_ENABLED(CONFIG_KMSAN)) { 408 /* take 2/3 of vmalloc area for KMSAN shadow and origins */ 409 vmalloc_size = round_down(vmalloc_size / 3, _SEGMENT_SIZE); 410 VMALLOC_END -= vmalloc_size * 2; 411 } 412 VMALLOC_START = VMALLOC_END - vmalloc_size; 413 boot_debug("vmalloc area: 0x%016lx-0x%016lx\n", VMALLOC_START, VMALLOC_END); 414 415 __memcpy_real_area = round_down(VMALLOC_START - MEMCPY_REAL_SIZE, PAGE_SIZE); 416 boot_debug("memcpy real area: 0x%016lx-0x%016lx\n", __memcpy_real_area, 417 __memcpy_real_area + MEMCPY_REAL_SIZE); 418 __abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE, 419 sizeof(struct lowcore)); 420 boot_debug("abs lowcore: 0x%016lx-0x%016lx\n", __abs_lowcore, 421 __abs_lowcore + ABS_LOWCORE_MAP_SIZE); 422 423 /* split remaining virtual space between 1:1 mapping & vmemmap array */ 424 pages = __abs_lowcore / (PAGE_SIZE + sizeof(struct page)); 425 pages = SECTION_ALIGN_UP(pages); 426 /* keep vmemmap_start aligned to a top level region table entry */ 427 vmemmap_start = round_down(__abs_lowcore - pages * sizeof(struct page), rte_size); 428 /* make sure identity map doesn't overlay with vmemmap */ 429 ident_map_size = min(ident_map_size, vmemmap_start); 430 vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page); 431 /* make sure vmemmap doesn't overlay with absolute lowcore area */ 432 if (vmemmap_start + vmemmap_size > __abs_lowcore) { 433 vmemmap_size = SECTION_ALIGN_DOWN(ident_map_size / PAGE_SIZE) * sizeof(struct page); 434 ident_map_size = vmemmap_size / sizeof(struct page) * PAGE_SIZE; 435 } 436 vmemmap = (struct page *)vmemmap_start; 437 /* maximum address for which linear mapping could be created (DCSS, memory) */ 438 BUILD_BUG_ON(MAX_DCSS_ADDR > (1UL << MAX_PHYSMEM_BITS)); 439 max_mappable = max(ident_map_size, MAX_DCSS_ADDR); 440 max_mappable = min(max_mappable, vmemmap_start); 441 #ifdef CONFIG_RANDOMIZE_IDENTITY_BASE 442 __identity_base = round_down(vmemmap_start - max_mappable, rte_size); 443 #endif 444 boot_debug("identity map: 0x%016lx-0x%016lx\n", __identity_base, 445 __identity_base + ident_map_size); 446 447 return asce_limit; 448 } 449 450 /* 451 * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's. 452 */ 453 static void clear_bss_section(unsigned long kernel_start) 454 { 455 memset((void *)kernel_start + vmlinux.image_size, 0, vmlinux.bss_size); 456 } 457 458 /* 459 * Set vmalloc area size to an 8th of (potential) physical memory 460 * size, unless size has been set by kernel command line parameter. 461 */ 462 static void setup_vmalloc_size(void) 463 { 464 unsigned long size; 465 466 if (vmalloc_size_set) 467 return; 468 size = round_up(ident_map_size / 8, _SEGMENT_SIZE); 469 vmalloc_size = max(size, vmalloc_size); 470 } 471 472 static void kaslr_adjust_vmlinux_info(long offset) 473 { 474 vmlinux.bootdata_off += offset; 475 vmlinux.bootdata_preserved_off += offset; 476 vmlinux.got_start += offset; 477 vmlinux.got_end += offset; 478 vmlinux.init_mm_off += offset; 479 vmlinux.swapper_pg_dir_off += offset; 480 vmlinux.invalid_pg_dir_off += offset; 481 vmlinux.alt_instructions += offset; 482 vmlinux.alt_instructions_end += offset; 483 #ifdef CONFIG_STACKPROTECTOR 484 vmlinux.stack_prot_start += offset; 485 vmlinux.stack_prot_end += offset; 486 #endif 487 #ifdef CONFIG_KASAN 488 vmlinux.kasan_early_shadow_page_off += offset; 489 vmlinux.kasan_early_shadow_pte_off += offset; 490 vmlinux.kasan_early_shadow_pmd_off += offset; 491 vmlinux.kasan_early_shadow_pud_off += offset; 492 vmlinux.kasan_early_shadow_p4d_off += offset; 493 #endif 494 } 495 496 void startup_kernel(void) 497 { 498 unsigned long vmlinux_size = vmlinux.image_size + vmlinux.bss_size; 499 unsigned long nokaslr_text_lma, text_lma = 0, amode31_lma = 0; 500 unsigned long kernel_size = TEXT_OFFSET + vmlinux_size; 501 unsigned long kaslr_large_page_offset; 502 unsigned long max_physmem_end; 503 unsigned long asce_limit; 504 unsigned long safe_addr; 505 psw_t psw; 506 507 setup_lpp(); 508 store_ipl_parmblock(); 509 uv_query_info(); 510 setup_boot_command_line(); 511 parse_boot_command_line(); 512 513 /* 514 * Non-randomized kernel physical start address must be _SEGMENT_SIZE 515 * aligned (see blow). 516 */ 517 nokaslr_text_lma = ALIGN(mem_safe_offset(), _SEGMENT_SIZE); 518 safe_addr = PAGE_ALIGN(nokaslr_text_lma + vmlinux_size); 519 520 /* 521 * Reserve decompressor memory together with decompression heap, 522 * buffer and memory which might be occupied by uncompressed kernel 523 * (if KASLR is off or failed). 524 */ 525 physmem_reserve(RR_DECOMPRESSOR, 0, safe_addr); 526 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && parmarea.initrd_size) 527 physmem_reserve(RR_INITRD, parmarea.initrd_start, parmarea.initrd_size); 528 oldmem_data.start = parmarea.oldmem_base; 529 oldmem_data.size = parmarea.oldmem_size; 530 531 read_ipl_report(); 532 sclp_early_read_info(); 533 sclp_early_detect_machine_features(); 534 detect_facilities(); 535 detect_diag9c(); 536 detect_machine_type(); 537 /* detect_diag288() needs machine type */ 538 detect_diag288(); 539 cmma_init(); 540 sanitize_prot_virt_host(); 541 max_physmem_end = detect_max_physmem_end(); 542 setup_ident_map_size(max_physmem_end); 543 setup_vmalloc_size(); 544 asce_limit = setup_kernel_memory_layout(kernel_size); 545 /* got final ident_map_size, physmem allocations could be performed now */ 546 physmem_set_usable_limit(ident_map_size); 547 detect_physmem_online_ranges(max_physmem_end); 548 save_ipl_cert_comp_list(); 549 rescue_initrd(safe_addr, ident_map_size); 550 551 /* 552 * __kaslr_offset_phys must be _SEGMENT_SIZE aligned, so the lower 553 * 20 bits (the offset within a large page) are zero. Copy the last 554 * 20 bits of __kaslr_offset, which is THREAD_SIZE aligned, to 555 * __kaslr_offset_phys. 556 * 557 * With this the last 20 bits of __kaslr_offset_phys and __kaslr_offset 558 * are identical, which is required to allow for large mappings of the 559 * kernel image. 560 */ 561 kaslr_large_page_offset = __kaslr_offset & ~_SEGMENT_MASK; 562 if (kaslr_enabled()) { 563 unsigned long size = vmlinux_size + kaslr_large_page_offset; 564 565 text_lma = randomize_within_range(size, _SEGMENT_SIZE, TEXT_OFFSET, ident_map_size); 566 } 567 if (!text_lma) 568 text_lma = nokaslr_text_lma; 569 text_lma |= kaslr_large_page_offset; 570 571 /* 572 * [__kaslr_offset_phys..__kaslr_offset_phys + TEXT_OFFSET] region is 573 * never accessed via the kernel image mapping as per the linker script: 574 * 575 * . = TEXT_OFFSET; 576 * 577 * Therefore, this region could be used for something else and does 578 * not need to be reserved. See how it is skipped in setup_vmem(). 579 */ 580 __kaslr_offset_phys = text_lma - TEXT_OFFSET; 581 kaslr_adjust_vmlinux_info(__kaslr_offset_phys); 582 physmem_reserve(RR_VMLINUX, text_lma, vmlinux_size); 583 deploy_kernel((void *)text_lma); 584 585 /* vmlinux decompression is done, shrink reserved low memory */ 586 physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end); 587 588 /* 589 * In case KASLR is enabled the randomized location of .amode31 590 * section might overlap with .vmlinux.relocs section. To avoid that 591 * the below randomize_within_range() could have been called with 592 * __vmlinux_relocs_64_end as the lower range address. However, 593 * .amode31 section is written to by the decompressed kernel - at 594 * that time the contents of .vmlinux.relocs is not needed anymore. 595 * Conversely, .vmlinux.relocs is read only by the decompressor, even 596 * before the kernel started. Therefore, in case the two sections 597 * overlap there is no risk of corrupting any data. 598 */ 599 if (kaslr_enabled()) { 600 unsigned long amode31_min; 601 602 amode31_min = (unsigned long)_decompressor_end; 603 amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, amode31_min, SZ_2G); 604 } 605 if (!amode31_lma) 606 amode31_lma = text_lma - vmlinux.amode31_size; 607 physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size); 608 609 /* 610 * The order of the following operations is important: 611 * 612 * - kaslr_adjust_relocs() must follow clear_bss_section() to establish 613 * static memory references to data in .bss to be used by setup_vmem() 614 * (i.e init_mm.pgd) 615 * 616 * - setup_vmem() must follow kaslr_adjust_relocs() to be able using 617 * static memory references to data in .bss (i.e init_mm.pgd) 618 * 619 * - copy_bootdata() must follow setup_vmem() to propagate changes 620 * to bootdata made by setup_vmem() 621 */ 622 clear_bss_section(text_lma); 623 kaslr_adjust_relocs(text_lma, text_lma + vmlinux.image_size, 624 __kaslr_offset, __kaslr_offset_phys); 625 kaslr_adjust_got(__kaslr_offset); 626 setup_vmem(__kaslr_offset, __kaslr_offset + kernel_size, asce_limit); 627 dump_physmem_reserved(); 628 copy_bootdata(); 629 __apply_alternatives((struct alt_instr *)_vmlinux_info.alt_instructions, 630 (struct alt_instr *)_vmlinux_info.alt_instructions_end, 631 ALT_CTX_EARLY); 632 stack_protector_apply_early(text_lma); 633 634 /* 635 * Save KASLR offset for early dumps, before vmcore_info is set. 636 * Mark as uneven to distinguish from real vmcore_info pointer. 637 */ 638 get_lowcore()->vmcore_info = __kaslr_offset_phys ? __kaslr_offset_phys | 0x1UL : 0; 639 640 /* 641 * Jump to the decompressed kernel entry point and switch DAT mode on. 642 */ 643 psw.addr = __kaslr_offset + vmlinux.entry; 644 psw.mask = PSW_KERNEL_BITS; 645 boot_debug("Starting kernel at: 0x%016lx\n", psw.addr); 646 jump_to_kernel(&psw); 647 } 648