1 // SPDX-License-Identifier: GPL-2.0
2 #define boot_fmt(fmt) "startup: " fmt
3 #include <linux/string.h>
4 #include <linux/elf.h>
5 #include <asm/page-states.h>
6 #include <asm/boot_data.h>
7 #include <asm/extmem.h>
8 #include <asm/sections.h>
9 #include <asm/diag288.h>
10 #include <asm/maccess.h>
11 #include <asm/machine.h>
12 #include <asm/sysinfo.h>
13 #include <asm/cpu_mf.h>
14 #include <asm/setup.h>
15 #include <asm/timex.h>
16 #include <asm/kasan.h>
17 #include <asm/kexec.h>
18 #include <asm/sclp.h>
19 #include <asm/diag.h>
20 #include <asm/uv.h>
21 #include <asm/abs_lowcore.h>
22 #include <asm/physmem_info.h>
23 #include <asm/stacktrace.h>
24 #include <asm/asm-offsets.h>
25 #include <asm/arch-stackprotector.h>
26 #include "decompressor.h"
27 #include "boot.h"
28 #include "uv.h"
29
30 struct vm_layout __bootdata_preserved(vm_layout);
31 unsigned long __bootdata_preserved(__abs_lowcore);
32 unsigned long __bootdata_preserved(__memcpy_real_area);
33 pte_t *__bootdata_preserved(memcpy_real_ptep);
34 unsigned long __bootdata_preserved(VMALLOC_START);
35 unsigned long __bootdata_preserved(VMALLOC_END);
36 struct page *__bootdata_preserved(vmemmap);
37 unsigned long __bootdata_preserved(vmemmap_size);
38 unsigned long __bootdata_preserved(MODULES_VADDR);
39 unsigned long __bootdata_preserved(MODULES_END);
40 unsigned long __bootdata_preserved(max_mappable);
41 unsigned long __bootdata_preserved(page_noexec_mask);
42 unsigned long __bootdata_preserved(segment_noexec_mask);
43 unsigned long __bootdata_preserved(region_noexec_mask);
44 union tod_clock __bootdata_preserved(tod_clock_base);
45 u64 __bootdata_preserved(clock_comparator_max) = -1UL;
46
47 u64 __bootdata_preserved(stfle_fac_list[16]);
48 struct oldmem_data __bootdata_preserved(oldmem_data);
49
50 static char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
51
detect_machine_type(void)52 static void detect_machine_type(void)
53 {
54 struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page;
55
56 /* Check current-configuration-level */
57 if (stsi(NULL, 0, 0, 0) <= 2) {
58 set_machine_feature(MFEATURE_LPAR);
59 return;
60 }
61 /* Get virtual-machine cpu information. */
62 if (stsi(vmms, 3, 2, 2) || !vmms->count)
63 return;
64 /* Detect known hypervisors */
65 if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
66 set_machine_feature(MFEATURE_KVM);
67 else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
68 set_machine_feature(MFEATURE_VM);
69 }
70
detect_diag288(void)71 static void detect_diag288(void)
72 {
73 /* "BEGIN" in EBCDIC character set */
74 static const char cmd[] = "\xc2\xc5\xc7\xc9\xd5";
75 unsigned long action, len;
76
77 action = machine_is_vm() ? (unsigned long)cmd : LPARWDT_RESTART;
78 len = machine_is_vm() ? sizeof(cmd) : 0;
79 if (__diag288(WDT_FUNC_INIT, MIN_INTERVAL, action, len))
80 return;
81 __diag288(WDT_FUNC_CANCEL, 0, 0, 0);
82 set_machine_feature(MFEATURE_DIAG288);
83 }
84
detect_diag9c(void)85 static void detect_diag9c(void)
86 {
87 unsigned int cpu;
88 int rc = 1;
89
90 cpu = stap();
91 asm_inline volatile(
92 " diag %[cpu],%%r0,0x9c\n"
93 "0: lhi %[rc],0\n"
94 "1:\n"
95 EX_TABLE(0b, 1b)
96 : [rc] "+d" (rc)
97 : [cpu] "d" (cpu)
98 : "cc", "memory");
99 if (!rc)
100 set_machine_feature(MFEATURE_DIAG9C);
101 }
102
reset_tod_clock(void)103 static void reset_tod_clock(void)
104 {
105 union tod_clock clk;
106
107 if (store_tod_clock_ext_cc(&clk) == 0)
108 return;
109 /* TOD clock not running. Set the clock to Unix Epoch. */
110 if (set_tod_clock(TOD_UNIX_EPOCH) || store_tod_clock_ext_cc(&clk))
111 disabled_wait();
112 memset(&tod_clock_base, 0, sizeof(tod_clock_base));
113 tod_clock_base.tod = TOD_UNIX_EPOCH;
114 get_lowcore()->last_update_clock = TOD_UNIX_EPOCH;
115 }
116
detect_facilities(void)117 static void detect_facilities(void)
118 {
119 if (cpu_has_edat1())
120 local_ctl_set_bit(0, CR0_EDAT_BIT);
121 page_noexec_mask = -1UL;
122 segment_noexec_mask = -1UL;
123 region_noexec_mask = -1UL;
124 if (!cpu_has_nx()) {
125 page_noexec_mask &= ~_PAGE_NOEXEC;
126 segment_noexec_mask &= ~_SEGMENT_ENTRY_NOEXEC;
127 region_noexec_mask &= ~_REGION_ENTRY_NOEXEC;
128 }
129 if (IS_ENABLED(CONFIG_PCI) && test_facility(153))
130 set_machine_feature(MFEATURE_PCI_MIO);
131 reset_tod_clock();
132 if (test_facility(139) && (tod_clock_base.tod >> 63)) {
133 /* Enable signed clock comparator comparisons */
134 set_machine_feature(MFEATURE_SCC);
135 clock_comparator_max = -1UL >> 1;
136 local_ctl_set_bit(0, CR0_CLOCK_COMPARATOR_SIGN_BIT);
137 }
138 if (test_facility(50) && test_facility(73)) {
139 set_machine_feature(MFEATURE_TX);
140 local_ctl_set_bit(0, CR0_TRANSACTIONAL_EXECUTION_BIT);
141 }
142 if (cpu_has_vx())
143 local_ctl_set_bit(0, CR0_VECTOR_BIT);
144 }
145
cmma_test_essa(void)146 static int cmma_test_essa(void)
147 {
148 unsigned long tmp = 0;
149 int rc = 1;
150
151 /* Test ESSA_GET_STATE */
152 asm_inline volatile(
153 " .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
154 "0: lhi %[rc],0\n"
155 "1:\n"
156 EX_TABLE(0b, 1b)
157 : [rc] "+d" (rc), [tmp] "+d" (tmp)
158 : [cmd] "i" (ESSA_GET_STATE)
159 : "cc", "memory");
160 return rc;
161 }
162
cmma_init(void)163 static void cmma_init(void)
164 {
165 if (!cmma_flag)
166 return;
167 if (cmma_test_essa()) {
168 cmma_flag = 0;
169 return;
170 }
171 if (test_facility(147))
172 cmma_flag = 2;
173 }
174
setup_lpp(void)175 static void setup_lpp(void)
176 {
177 get_lowcore()->current_pid = 0;
178 get_lowcore()->lpp = LPP_MAGIC;
179 if (test_facility(40))
180 lpp(&get_lowcore()->lpp);
181 }
182
183 #ifdef CONFIG_KERNEL_UNCOMPRESSED
mem_safe_offset(void)184 static unsigned long mem_safe_offset(void)
185 {
186 return (unsigned long)_compressed_start;
187 }
188
deploy_kernel(void * output)189 static void deploy_kernel(void *output)
190 {
191 void *uncompressed_start = (void *)_compressed_start;
192
193 if (output == uncompressed_start)
194 return;
195 memmove(output, uncompressed_start, vmlinux.image_size);
196 memset(uncompressed_start, 0, vmlinux.image_size);
197 }
198 #endif
199
rescue_initrd(unsigned long min,unsigned long max)200 static void rescue_initrd(unsigned long min, unsigned long max)
201 {
202 unsigned long old_addr, addr, size;
203
204 if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
205 return;
206 if (!get_physmem_reserved(RR_INITRD, &addr, &size))
207 return;
208 if (addr >= min && addr + size <= max)
209 return;
210 old_addr = addr;
211 physmem_free(RR_INITRD);
212 addr = physmem_alloc_or_die(RR_INITRD, size, 0);
213 memmove((void *)addr, (void *)old_addr, size);
214 }
215
copy_bootdata(void)216 static void copy_bootdata(void)
217 {
218 if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
219 boot_panic(".boot.data section size mismatch\n");
220 memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
221 if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
222 boot_panic(".boot.preserved.data section size mismatch\n");
223 memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
224 }
225
kaslr_adjust_relocs(unsigned long min_addr,unsigned long max_addr,unsigned long offset,unsigned long phys_offset)226 static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr,
227 unsigned long offset, unsigned long phys_offset)
228 {
229 int *reloc;
230 long loc;
231
232 /* Adjust R_390_64 relocations */
233 for (reloc = (int *)__vmlinux_relocs_64_start; reloc < (int *)__vmlinux_relocs_64_end; reloc++) {
234 loc = (long)*reloc + phys_offset;
235 if (loc < min_addr || loc > max_addr)
236 boot_panic("64-bit relocation outside of kernel!\n");
237 *(u64 *)loc += offset;
238 }
239 }
240
kaslr_adjust_got(unsigned long offset)241 static void kaslr_adjust_got(unsigned long offset)
242 {
243 u64 *entry;
244
245 /*
246 * Adjust GOT entries, except for ones for undefined weak symbols
247 * that resolved to zero. This also skips the first three reserved
248 * entries on s390x that are zero.
249 */
250 for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++) {
251 if (*entry)
252 *entry += offset;
253 }
254 }
255
256 /*
257 * Merge information from several sources into a single ident_map_size value.
258 * "ident_map_size" represents the upper limit of physical memory we may ever
259 * reach. It might not be all online memory, but also include standby (offline)
260 * memory or memory areas reserved for other means (e.g., memory devices such as
261 * virtio-mem).
262 *
263 * "ident_map_size" could be lower then actual standby/reserved or even online
264 * memory present, due to limiting factors. We should never go above this limit.
265 * It is the size of our identity mapping.
266 *
267 * Consider the following factors:
268 * 1. max_physmem_end - end of physical memory online, standby or reserved.
269 * Always >= end of the last online memory range (get_physmem_online_end()).
270 * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
271 * kernel is able to support.
272 * 3. "mem=" kernel command line option which limits physical memory usage.
273 * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as
274 * crash kernel.
275 * 5. "hsa" size which is a memory limit when the kernel is executed during
276 * zfcp/nvme dump.
277 */
setup_ident_map_size(unsigned long max_physmem_end)278 static void setup_ident_map_size(unsigned long max_physmem_end)
279 {
280 unsigned long hsa_size;
281
282 ident_map_size = max_physmem_end;
283 if (memory_limit)
284 ident_map_size = min(ident_map_size, memory_limit);
285 ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
286
287 #ifdef CONFIG_CRASH_DUMP
288 if (oldmem_data.start) {
289 __kaslr_enabled = 0;
290 ident_map_size = min(ident_map_size, oldmem_data.size);
291 boot_debug("kdump memory limit: 0x%016lx\n", oldmem_data.size);
292 } else if (ipl_block_valid && is_ipl_block_dump()) {
293 __kaslr_enabled = 0;
294 if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size) {
295 ident_map_size = min(ident_map_size, hsa_size);
296 boot_debug("Stand-alone dump limit: 0x%016lx\n", hsa_size);
297 }
298 }
299 #endif
300 boot_debug("Identity map size: 0x%016lx\n", ident_map_size);
301 }
302
303 #define FIXMAP_SIZE round_up(MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE, sizeof(struct lowcore))
304
get_vmem_size(unsigned long identity_size,unsigned long vmemmap_size,unsigned long vmalloc_size,unsigned long rte_size)305 static unsigned long get_vmem_size(unsigned long identity_size,
306 unsigned long vmemmap_size,
307 unsigned long vmalloc_size,
308 unsigned long rte_size)
309 {
310 unsigned long max_mappable, vsize;
311
312 max_mappable = max(identity_size, MAX_DCSS_ADDR);
313 vsize = round_up(SZ_2G + max_mappable, rte_size) +
314 round_up(vmemmap_size, rte_size) +
315 FIXMAP_SIZE + MODULES_LEN + KASLR_LEN;
316 if (IS_ENABLED(CONFIG_KMSAN))
317 vsize += MODULES_LEN * 2;
318 return size_add(vsize, vmalloc_size);
319 }
320
setup_kernel_memory_layout(unsigned long kernel_size)321 static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
322 {
323 unsigned long vmemmap_start;
324 unsigned long kernel_start;
325 unsigned long asce_limit;
326 unsigned long rte_size;
327 unsigned long pages;
328 unsigned long vsize;
329 unsigned long vmax;
330
331 pages = ident_map_size / PAGE_SIZE;
332 /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
333 vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
334
335 /* choose kernel address space layout: 4 or 3 levels. */
336 BUILD_BUG_ON(!IS_ALIGNED(TEXT_OFFSET, THREAD_SIZE));
337 BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE));
338 BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE);
339 BUILD_BUG_ON(CONFIG_ILLEGAL_POINTER_VALUE < _REGION1_SIZE);
340 vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE);
341 boot_debug("vmem size estimated: 0x%016lx\n", vsize);
342 if (IS_ENABLED(CONFIG_KASAN) || __NO_KASLR_END_KERNEL > _REGION2_SIZE ||
343 (vsize > _REGION2_SIZE && kaslr_enabled())) {
344 asce_limit = _REGION1_SIZE;
345 if (__NO_KASLR_END_KERNEL > _REGION2_SIZE) {
346 rte_size = _REGION2_SIZE;
347 vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION2_SIZE);
348 } else {
349 rte_size = _REGION3_SIZE;
350 }
351 } else {
352 asce_limit = _REGION2_SIZE;
353 rte_size = _REGION3_SIZE;
354 }
355
356 /*
357 * Forcing modules and vmalloc area under the ultravisor
358 * secure storage limit, so that any vmalloc allocation
359 * we do could be used to back secure guest storage.
360 *
361 * Assume the secure storage limit always exceeds _REGION2_SIZE,
362 * otherwise asce_limit and rte_size would have been adjusted.
363 */
364 vmax = adjust_to_uv_max(asce_limit);
365 boot_debug("%d level paging 0x%016lx vmax\n", vmax == _REGION1_SIZE ? 4 : 3, vmax);
366 #ifdef CONFIG_KASAN
367 BUILD_BUG_ON(__NO_KASLR_END_KERNEL > KASAN_SHADOW_START);
368 boot_debug("KASAN shadow area: 0x%016lx-0x%016lx\n", KASAN_SHADOW_START, KASAN_SHADOW_END);
369 /* force vmalloc and modules below kasan shadow */
370 vmax = min(vmax, KASAN_SHADOW_START);
371 #endif
372 vsize = min(vsize, vmax);
373 if (kaslr_enabled()) {
374 unsigned long kernel_end, kaslr_len, slots, pos;
375
376 kaslr_len = max(KASLR_LEN, vmax - vsize);
377 slots = DIV_ROUND_UP(kaslr_len - kernel_size, THREAD_SIZE);
378 if (get_random(slots, &pos))
379 pos = 0;
380 kernel_end = vmax - pos * THREAD_SIZE;
381 kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE);
382 boot_debug("Randomization range: 0x%016lx-0x%016lx\n", vmax - kaslr_len, vmax);
383 boot_debug("kernel image: 0x%016lx-0x%016lx (kaslr)\n", kernel_start,
384 kernel_start + kernel_size);
385 } else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) {
386 kernel_start = round_down(vmax - kernel_size, THREAD_SIZE);
387 boot_debug("kernel image: 0x%016lx-0x%016lx (constrained)\n", kernel_start,
388 kernel_start + kernel_size);
389 } else {
390 kernel_start = __NO_KASLR_START_KERNEL;
391 boot_debug("kernel image: 0x%016lx-0x%016lx (nokaslr)\n", kernel_start,
392 kernel_start + kernel_size);
393 }
394 __kaslr_offset = kernel_start;
395 boot_debug("__kaslr_offset: 0x%016lx\n", __kaslr_offset);
396
397 MODULES_END = round_down(kernel_start, _SEGMENT_SIZE);
398 MODULES_VADDR = MODULES_END - MODULES_LEN;
399 VMALLOC_END = MODULES_VADDR;
400 if (IS_ENABLED(CONFIG_KMSAN))
401 VMALLOC_END -= MODULES_LEN * 2;
402 boot_debug("modules area: 0x%016lx-0x%016lx\n", MODULES_VADDR, MODULES_END);
403
404 /* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
405 vsize = (VMALLOC_END - FIXMAP_SIZE) / 2;
406 vsize = round_down(vsize, _SEGMENT_SIZE);
407 vmalloc_size = min(vmalloc_size, vsize);
408 if (IS_ENABLED(CONFIG_KMSAN)) {
409 /* take 2/3 of vmalloc area for KMSAN shadow and origins */
410 vmalloc_size = round_down(vmalloc_size / 3, _SEGMENT_SIZE);
411 VMALLOC_END -= vmalloc_size * 2;
412 }
413 VMALLOC_START = VMALLOC_END - vmalloc_size;
414 boot_debug("vmalloc area: 0x%016lx-0x%016lx\n", VMALLOC_START, VMALLOC_END);
415
416 __memcpy_real_area = round_down(VMALLOC_START - MEMCPY_REAL_SIZE, PAGE_SIZE);
417 boot_debug("memcpy real area: 0x%016lx-0x%016lx\n", __memcpy_real_area,
418 __memcpy_real_area + MEMCPY_REAL_SIZE);
419 __abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
420 sizeof(struct lowcore));
421 boot_debug("abs lowcore: 0x%016lx-0x%016lx\n", __abs_lowcore,
422 __abs_lowcore + ABS_LOWCORE_MAP_SIZE);
423
424 /* split remaining virtual space between 1:1 mapping & vmemmap array */
425 pages = __abs_lowcore / (PAGE_SIZE + sizeof(struct page));
426 pages = SECTION_ALIGN_UP(pages);
427 /* keep vmemmap_start aligned to a top level region table entry */
428 vmemmap_start = round_down(__abs_lowcore - pages * sizeof(struct page), rte_size);
429 /* make sure identity map doesn't overlay with vmemmap */
430 ident_map_size = min(ident_map_size, vmemmap_start);
431 vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
432 /* make sure vmemmap doesn't overlay with absolute lowcore area */
433 if (vmemmap_start + vmemmap_size > __abs_lowcore) {
434 vmemmap_size = SECTION_ALIGN_DOWN(ident_map_size / PAGE_SIZE) * sizeof(struct page);
435 ident_map_size = vmemmap_size / sizeof(struct page) * PAGE_SIZE;
436 }
437 vmemmap = (struct page *)vmemmap_start;
438 /* maximum address for which linear mapping could be created (DCSS, memory) */
439 BUILD_BUG_ON(MAX_DCSS_ADDR > (1UL << MAX_PHYSMEM_BITS));
440 max_mappable = max(ident_map_size, MAX_DCSS_ADDR);
441 max_mappable = min(max_mappable, vmemmap_start);
442 #ifdef CONFIG_RANDOMIZE_IDENTITY_BASE
443 if (kaslr_enabled())
444 __identity_base = round_down(vmemmap_start - max_mappable, rte_size);
445 #endif
446 boot_debug("identity map: 0x%016lx-0x%016lx\n", __identity_base,
447 __identity_base + ident_map_size);
448
449 return asce_limit;
450 }
451
452 /*
453 * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
454 */
clear_bss_section(unsigned long kernel_start)455 static void clear_bss_section(unsigned long kernel_start)
456 {
457 memset((void *)kernel_start + vmlinux.image_size, 0, vmlinux.bss_size);
458 }
459
460 /*
461 * Set vmalloc area size to an 8th of (potential) physical memory
462 * size, unless size has been set by kernel command line parameter.
463 */
setup_vmalloc_size(void)464 static void setup_vmalloc_size(void)
465 {
466 unsigned long size;
467
468 if (vmalloc_size_set)
469 return;
470 size = round_up(ident_map_size / 8, _SEGMENT_SIZE);
471 vmalloc_size = max(size, vmalloc_size);
472 }
473
kaslr_adjust_vmlinux_info(long offset)474 static void kaslr_adjust_vmlinux_info(long offset)
475 {
476 vmlinux.bootdata_off += offset;
477 vmlinux.bootdata_preserved_off += offset;
478 vmlinux.got_start += offset;
479 vmlinux.got_end += offset;
480 vmlinux.init_mm_off += offset;
481 vmlinux.swapper_pg_dir_off += offset;
482 vmlinux.invalid_pg_dir_off += offset;
483 vmlinux.alt_instructions += offset;
484 vmlinux.alt_instructions_end += offset;
485 #ifdef CONFIG_STACKPROTECTOR
486 vmlinux.stack_prot_start += offset;
487 vmlinux.stack_prot_end += offset;
488 #endif
489 #ifdef CONFIG_KASAN
490 vmlinux.kasan_early_shadow_page_off += offset;
491 vmlinux.kasan_early_shadow_pte_off += offset;
492 vmlinux.kasan_early_shadow_pmd_off += offset;
493 vmlinux.kasan_early_shadow_pud_off += offset;
494 vmlinux.kasan_early_shadow_p4d_off += offset;
495 #endif
496 }
497
startup_kernel(void)498 void startup_kernel(void)
499 {
500 unsigned long vmlinux_size = vmlinux.image_size + vmlinux.bss_size;
501 unsigned long nokaslr_text_lma, text_lma = 0, amode31_lma = 0;
502 unsigned long kernel_size = TEXT_OFFSET + vmlinux_size;
503 unsigned long kaslr_large_page_offset;
504 unsigned long max_physmem_end;
505 unsigned long asce_limit;
506 unsigned long safe_addr;
507 psw_t psw;
508
509 setup_lpp();
510 store_ipl_parmblock();
511 uv_query_info();
512 setup_boot_command_line();
513 parse_boot_command_line();
514
515 /*
516 * Non-randomized kernel physical start address must be _SEGMENT_SIZE
517 * aligned (see blow).
518 */
519 nokaslr_text_lma = ALIGN(mem_safe_offset(), _SEGMENT_SIZE);
520 safe_addr = PAGE_ALIGN(nokaslr_text_lma + vmlinux_size);
521
522 /*
523 * Reserve decompressor memory together with decompression heap,
524 * buffer and memory which might be occupied by uncompressed kernel
525 * (if KASLR is off or failed).
526 */
527 physmem_reserve(RR_DECOMPRESSOR, 0, safe_addr);
528 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && parmarea.initrd_size)
529 physmem_reserve(RR_INITRD, parmarea.initrd_start, parmarea.initrd_size);
530 oldmem_data.start = parmarea.oldmem_base;
531 oldmem_data.size = parmarea.oldmem_size;
532
533 read_ipl_report();
534 sclp_early_read_info();
535 sclp_early_detect_machine_features();
536 detect_facilities();
537 detect_diag9c();
538 detect_machine_type();
539 /* detect_diag288() needs machine type */
540 detect_diag288();
541 cmma_init();
542 sanitize_prot_virt_host();
543 max_physmem_end = detect_max_physmem_end();
544 setup_ident_map_size(max_physmem_end);
545 setup_vmalloc_size();
546 asce_limit = setup_kernel_memory_layout(kernel_size);
547 /* got final ident_map_size, physmem allocations could be performed now */
548 physmem_set_usable_limit(ident_map_size);
549 detect_physmem_online_ranges(max_physmem_end);
550 save_ipl_cert_comp_list();
551 rescue_initrd(safe_addr, ident_map_size);
552
553 /*
554 * __kaslr_offset_phys must be _SEGMENT_SIZE aligned, so the lower
555 * 20 bits (the offset within a large page) are zero. Copy the last
556 * 20 bits of __kaslr_offset, which is THREAD_SIZE aligned, to
557 * __kaslr_offset_phys.
558 *
559 * With this the last 20 bits of __kaslr_offset_phys and __kaslr_offset
560 * are identical, which is required to allow for large mappings of the
561 * kernel image.
562 */
563 kaslr_large_page_offset = __kaslr_offset & ~_SEGMENT_MASK;
564 if (kaslr_enabled()) {
565 unsigned long size = vmlinux_size + kaslr_large_page_offset;
566
567 text_lma = randomize_within_range(size, _SEGMENT_SIZE, TEXT_OFFSET, ident_map_size);
568 }
569 if (!text_lma)
570 text_lma = nokaslr_text_lma;
571 text_lma |= kaslr_large_page_offset;
572
573 /*
574 * [__kaslr_offset_phys..__kaslr_offset_phys + TEXT_OFFSET] region is
575 * never accessed via the kernel image mapping as per the linker script:
576 *
577 * . = TEXT_OFFSET;
578 *
579 * Therefore, this region could be used for something else and does
580 * not need to be reserved. See how it is skipped in setup_vmem().
581 */
582 __kaslr_offset_phys = text_lma - TEXT_OFFSET;
583 kaslr_adjust_vmlinux_info(__kaslr_offset_phys);
584 physmem_reserve(RR_VMLINUX, text_lma, vmlinux_size);
585 deploy_kernel((void *)text_lma);
586
587 /* vmlinux decompression is done, shrink reserved low memory */
588 physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end);
589
590 /*
591 * In case KASLR is enabled the randomized location of .amode31
592 * section might overlap with .vmlinux.relocs section. To avoid that
593 * the below randomize_within_range() could have been called with
594 * __vmlinux_relocs_64_end as the lower range address. However,
595 * .amode31 section is written to by the decompressed kernel - at
596 * that time the contents of .vmlinux.relocs is not needed anymore.
597 * Conversely, .vmlinux.relocs is read only by the decompressor, even
598 * before the kernel started. Therefore, in case the two sections
599 * overlap there is no risk of corrupting any data.
600 */
601 if (kaslr_enabled()) {
602 unsigned long amode31_min;
603
604 amode31_min = (unsigned long)_decompressor_end;
605 amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, amode31_min, SZ_2G);
606 }
607 if (!amode31_lma)
608 amode31_lma = text_lma - vmlinux.amode31_size;
609 physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
610
611 /*
612 * The order of the following operations is important:
613 *
614 * - kaslr_adjust_relocs() must follow clear_bss_section() to establish
615 * static memory references to data in .bss to be used by setup_vmem()
616 * (i.e init_mm.pgd)
617 *
618 * - setup_vmem() must follow kaslr_adjust_relocs() to be able using
619 * static memory references to data in .bss (i.e init_mm.pgd)
620 *
621 * - copy_bootdata() must follow setup_vmem() to propagate changes
622 * to bootdata made by setup_vmem()
623 */
624 clear_bss_section(text_lma);
625 kaslr_adjust_relocs(text_lma, text_lma + vmlinux.image_size,
626 __kaslr_offset, __kaslr_offset_phys);
627 kaslr_adjust_got(__kaslr_offset);
628 setup_vmem(__kaslr_offset, __kaslr_offset + kernel_size, asce_limit);
629 dump_physmem_reserved();
630 copy_bootdata();
631 __apply_alternatives((struct alt_instr *)_vmlinux_info.alt_instructions,
632 (struct alt_instr *)_vmlinux_info.alt_instructions_end,
633 ALT_CTX_EARLY);
634 stack_protector_apply_early(text_lma);
635
636 /*
637 * Save KASLR offset for early dumps, before vmcore_info is set.
638 * Mark as uneven to distinguish from real vmcore_info pointer.
639 */
640 get_lowcore()->vmcore_info = __kaslr_offset_phys ? __kaslr_offset_phys | 0x1UL : 0;
641
642 /*
643 * Jump to the decompressed kernel entry point and switch DAT mode on.
644 */
645 psw.addr = __kaslr_offset + vmlinux.entry;
646 psw.mask = PSW_KERNEL_BITS;
647 boot_debug("Starting kernel at: 0x%016lx\n", psw.addr);
648 jump_to_kernel(&psw);
649 }
650