xref: /linux/arch/s390/boot/startup.c (revision 9cc220a422113f665e13364be1411c7bba9e3e30)
1 // SPDX-License-Identifier: GPL-2.0
2 #define boot_fmt(fmt) "startup: " fmt
3 #include <linux/string.h>
4 #include <linux/elf.h>
5 #include <asm/page-states.h>
6 #include <asm/boot_data.h>
7 #include <asm/extmem.h>
8 #include <asm/sections.h>
9 #include <asm/diag288.h>
10 #include <asm/maccess.h>
11 #include <asm/machine.h>
12 #include <asm/sysinfo.h>
13 #include <asm/cpu_mf.h>
14 #include <asm/setup.h>
15 #include <asm/timex.h>
16 #include <asm/kasan.h>
17 #include <asm/kexec.h>
18 #include <asm/sclp.h>
19 #include <asm/diag.h>
20 #include <asm/uv.h>
21 #include <asm/abs_lowcore.h>
22 #include <asm/physmem_info.h>
23 #include "decompressor.h"
24 #include "boot.h"
25 #include "uv.h"
26 
27 struct vm_layout __bootdata_preserved(vm_layout);
28 unsigned long __bootdata_preserved(__abs_lowcore);
29 unsigned long __bootdata_preserved(__memcpy_real_area);
30 pte_t *__bootdata_preserved(memcpy_real_ptep);
31 unsigned long __bootdata_preserved(VMALLOC_START);
32 unsigned long __bootdata_preserved(VMALLOC_END);
33 struct page *__bootdata_preserved(vmemmap);
34 unsigned long __bootdata_preserved(vmemmap_size);
35 unsigned long __bootdata_preserved(MODULES_VADDR);
36 unsigned long __bootdata_preserved(MODULES_END);
37 unsigned long __bootdata_preserved(max_mappable);
38 unsigned long __bootdata_preserved(page_noexec_mask);
39 unsigned long __bootdata_preserved(segment_noexec_mask);
40 unsigned long __bootdata_preserved(region_noexec_mask);
41 union tod_clock __bootdata_preserved(tod_clock_base);
42 u64 __bootdata_preserved(clock_comparator_max) = -1UL;
43 
44 u64 __bootdata_preserved(stfle_fac_list[16]);
45 struct oldmem_data __bootdata_preserved(oldmem_data);
46 
47 static char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
48 
detect_machine_type(void)49 static void detect_machine_type(void)
50 {
51 	struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page;
52 
53 	/* Check current-configuration-level */
54 	if (stsi(NULL, 0, 0, 0) <= 2) {
55 		set_machine_feature(MFEATURE_LPAR);
56 		return;
57 	}
58 	/* Get virtual-machine cpu information. */
59 	if (stsi(vmms, 3, 2, 2) || !vmms->count)
60 		return;
61 	/* Detect known hypervisors */
62 	if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
63 		set_machine_feature(MFEATURE_KVM);
64 	else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
65 		set_machine_feature(MFEATURE_VM);
66 }
67 
detect_diag288(void)68 static void detect_diag288(void)
69 {
70 	/* "BEGIN" in EBCDIC character set */
71 	static const char cmd[] = "\xc2\xc5\xc7\xc9\xd5";
72 	unsigned long action, len;
73 
74 	action = machine_is_vm() ? (unsigned long)cmd : LPARWDT_RESTART;
75 	len = machine_is_vm() ? sizeof(cmd) : 0;
76 	if (__diag288(WDT_FUNC_INIT, MIN_INTERVAL, action, len))
77 		return;
78 	__diag288(WDT_FUNC_CANCEL, 0, 0, 0);
79 	set_machine_feature(MFEATURE_DIAG288);
80 }
81 
detect_diag9c(void)82 static void detect_diag9c(void)
83 {
84 	unsigned int cpu;
85 	int rc = 1;
86 
87 	cpu = stap();
88 	asm_inline volatile(
89 		"	diag	%[cpu],%%r0,0x9c\n"
90 		"0:	lhi	%[rc],0\n"
91 		"1:\n"
92 		EX_TABLE(0b, 1b)
93 		: [rc] "+d" (rc)
94 		: [cpu] "d" (cpu)
95 		: "cc", "memory");
96 	if (!rc)
97 		set_machine_feature(MFEATURE_DIAG9C);
98 }
99 
reset_tod_clock(void)100 static void reset_tod_clock(void)
101 {
102 	union tod_clock clk;
103 
104 	if (store_tod_clock_ext_cc(&clk) == 0)
105 		return;
106 	/* TOD clock not running. Set the clock to Unix Epoch. */
107 	if (set_tod_clock(TOD_UNIX_EPOCH) || store_tod_clock_ext_cc(&clk))
108 		disabled_wait();
109 	memset(&tod_clock_base, 0, sizeof(tod_clock_base));
110 	tod_clock_base.tod = TOD_UNIX_EPOCH;
111 	get_lowcore()->last_update_clock = TOD_UNIX_EPOCH;
112 }
113 
detect_facilities(void)114 static void detect_facilities(void)
115 {
116 	if (cpu_has_edat1())
117 		local_ctl_set_bit(0, CR0_EDAT_BIT);
118 	page_noexec_mask = -1UL;
119 	segment_noexec_mask = -1UL;
120 	region_noexec_mask = -1UL;
121 	if (!cpu_has_nx()) {
122 		page_noexec_mask &= ~_PAGE_NOEXEC;
123 		segment_noexec_mask &= ~_SEGMENT_ENTRY_NOEXEC;
124 		region_noexec_mask &= ~_REGION_ENTRY_NOEXEC;
125 	}
126 	if (IS_ENABLED(CONFIG_PCI) && test_facility(153))
127 		set_machine_feature(MFEATURE_PCI_MIO);
128 	reset_tod_clock();
129 	if (test_facility(139) && (tod_clock_base.tod >> 63)) {
130 		/* Enable signed clock comparator comparisons */
131 		set_machine_feature(MFEATURE_SCC);
132 		clock_comparator_max = -1UL >> 1;
133 		local_ctl_set_bit(0, CR0_CLOCK_COMPARATOR_SIGN_BIT);
134 	}
135 	if (test_facility(50) && test_facility(73)) {
136 		set_machine_feature(MFEATURE_TX);
137 		local_ctl_set_bit(0, CR0_TRANSACTIONAL_EXECUTION_BIT);
138 	}
139 	if (cpu_has_vx())
140 		local_ctl_set_bit(0, CR0_VECTOR_BIT);
141 }
142 
cmma_test_essa(void)143 static int cmma_test_essa(void)
144 {
145 	unsigned long tmp = 0;
146 	int rc = 1;
147 
148 	/* Test ESSA_GET_STATE */
149 	asm_inline volatile(
150 		"	.insn	rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
151 		"0:	lhi	%[rc],0\n"
152 		"1:\n"
153 		EX_TABLE(0b, 1b)
154 		: [rc] "+d" (rc), [tmp] "+d" (tmp)
155 		: [cmd] "i" (ESSA_GET_STATE)
156 		: "cc", "memory");
157 	return rc;
158 }
159 
cmma_init(void)160 static void cmma_init(void)
161 {
162 	if (!cmma_flag)
163 		return;
164 	if (cmma_test_essa()) {
165 		cmma_flag = 0;
166 		return;
167 	}
168 	if (test_facility(147))
169 		cmma_flag = 2;
170 }
171 
setup_lpp(void)172 static void setup_lpp(void)
173 {
174 	get_lowcore()->current_pid = 0;
175 	get_lowcore()->lpp = LPP_MAGIC;
176 	if (test_facility(40))
177 		lpp(&get_lowcore()->lpp);
178 }
179 
180 #ifdef CONFIG_KERNEL_UNCOMPRESSED
mem_safe_offset(void)181 static unsigned long mem_safe_offset(void)
182 {
183 	return (unsigned long)_compressed_start;
184 }
185 
deploy_kernel(void * output)186 static void deploy_kernel(void *output)
187 {
188 	void *uncompressed_start = (void *)_compressed_start;
189 
190 	if (output == uncompressed_start)
191 		return;
192 	memmove(output, uncompressed_start, vmlinux.image_size);
193 	memset(uncompressed_start, 0, vmlinux.image_size);
194 }
195 #endif
196 
rescue_initrd(unsigned long min,unsigned long max)197 static void rescue_initrd(unsigned long min, unsigned long max)
198 {
199 	unsigned long old_addr, addr, size;
200 
201 	if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
202 		return;
203 	if (!get_physmem_reserved(RR_INITRD, &addr, &size))
204 		return;
205 	if (addr >= min && addr + size <= max)
206 		return;
207 	old_addr = addr;
208 	physmem_free(RR_INITRD);
209 	addr = physmem_alloc_or_die(RR_INITRD, size, 0);
210 	memmove((void *)addr, (void *)old_addr, size);
211 }
212 
copy_bootdata(void)213 static void copy_bootdata(void)
214 {
215 	if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
216 		boot_panic(".boot.data section size mismatch\n");
217 	memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
218 	if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
219 		boot_panic(".boot.preserved.data section size mismatch\n");
220 	memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
221 }
222 
kaslr_adjust_relocs(unsigned long min_addr,unsigned long max_addr,unsigned long offset,unsigned long phys_offset)223 static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr,
224 				unsigned long offset, unsigned long phys_offset)
225 {
226 	int *reloc;
227 	long loc;
228 
229 	/* Adjust R_390_64 relocations */
230 	for (reloc = (int *)__vmlinux_relocs_64_start; reloc < (int *)__vmlinux_relocs_64_end; reloc++) {
231 		loc = (long)*reloc + phys_offset;
232 		if (loc < min_addr || loc > max_addr)
233 			boot_panic("64-bit relocation outside of kernel!\n");
234 		*(u64 *)loc += offset;
235 	}
236 }
237 
kaslr_adjust_got(unsigned long offset)238 static void kaslr_adjust_got(unsigned long offset)
239 {
240 	u64 *entry;
241 
242 	/*
243 	 * Adjust GOT entries, except for ones for undefined weak symbols
244 	 * that resolved to zero. This also skips the first three reserved
245 	 * entries on s390x that are zero.
246 	 */
247 	for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++) {
248 		if (*entry)
249 			*entry += offset;
250 	}
251 }
252 
253 /*
254  * Merge information from several sources into a single ident_map_size value.
255  * "ident_map_size" represents the upper limit of physical memory we may ever
256  * reach. It might not be all online memory, but also include standby (offline)
257  * memory or memory areas reserved for other means (e.g., memory devices such as
258  * virtio-mem).
259  *
260  * "ident_map_size" could be lower then actual standby/reserved or even online
261  * memory present, due to limiting factors. We should never go above this limit.
262  * It is the size of our identity mapping.
263  *
264  * Consider the following factors:
265  * 1. max_physmem_end - end of physical memory online, standby or reserved.
266  *    Always >= end of the last online memory range (get_physmem_online_end()).
267  * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
268  *    kernel is able to support.
269  * 3. "mem=" kernel command line option which limits physical memory usage.
270  * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as
271  *    crash kernel.
272  * 5. "hsa" size which is a memory limit when the kernel is executed during
273  *    zfcp/nvme dump.
274  */
setup_ident_map_size(unsigned long max_physmem_end)275 static void setup_ident_map_size(unsigned long max_physmem_end)
276 {
277 	unsigned long hsa_size;
278 
279 	ident_map_size = max_physmem_end;
280 	if (memory_limit)
281 		ident_map_size = min(ident_map_size, memory_limit);
282 	ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
283 
284 #ifdef CONFIG_CRASH_DUMP
285 	if (oldmem_data.start) {
286 		__kaslr_enabled = 0;
287 		ident_map_size = min(ident_map_size, oldmem_data.size);
288 		boot_debug("kdump memory limit:  0x%016lx\n", oldmem_data.size);
289 	} else if (ipl_block_valid && is_ipl_block_dump()) {
290 		__kaslr_enabled = 0;
291 		if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size) {
292 			ident_map_size = min(ident_map_size, hsa_size);
293 			boot_debug("Stand-alone dump limit: 0x%016lx\n", hsa_size);
294 		}
295 	}
296 #endif
297 	boot_debug("Identity map size:   0x%016lx\n", ident_map_size);
298 }
299 
300 #define FIXMAP_SIZE	round_up(MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE, sizeof(struct lowcore))
301 
get_vmem_size(unsigned long identity_size,unsigned long vmemmap_size,unsigned long vmalloc_size,unsigned long rte_size)302 static unsigned long get_vmem_size(unsigned long identity_size,
303 				   unsigned long vmemmap_size,
304 				   unsigned long vmalloc_size,
305 				   unsigned long rte_size)
306 {
307 	unsigned long max_mappable, vsize;
308 
309 	max_mappable = max(identity_size, MAX_DCSS_ADDR);
310 	vsize = round_up(SZ_2G + max_mappable, rte_size) +
311 		round_up(vmemmap_size, rte_size) +
312 		FIXMAP_SIZE + MODULES_LEN + KASLR_LEN;
313 	if (IS_ENABLED(CONFIG_KMSAN))
314 		vsize += MODULES_LEN * 2;
315 	return size_add(vsize, vmalloc_size);
316 }
317 
setup_kernel_memory_layout(unsigned long kernel_size)318 static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
319 {
320 	unsigned long vmemmap_start;
321 	unsigned long kernel_start;
322 	unsigned long asce_limit;
323 	unsigned long rte_size;
324 	unsigned long pages;
325 	unsigned long vsize;
326 	unsigned long vmax;
327 
328 	pages = ident_map_size / PAGE_SIZE;
329 	/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
330 	vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
331 
332 	/* choose kernel address space layout: 4 or 3 levels. */
333 	BUILD_BUG_ON(!IS_ALIGNED(TEXT_OFFSET, THREAD_SIZE));
334 	BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE));
335 	BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE);
336 	vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE);
337 	boot_debug("vmem size estimated: 0x%016lx\n", vsize);
338 	if (IS_ENABLED(CONFIG_KASAN) || __NO_KASLR_END_KERNEL > _REGION2_SIZE ||
339 	    (vsize > _REGION2_SIZE && kaslr_enabled())) {
340 		asce_limit = _REGION1_SIZE;
341 		if (__NO_KASLR_END_KERNEL > _REGION2_SIZE) {
342 			rte_size = _REGION2_SIZE;
343 			vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION2_SIZE);
344 		} else {
345 			rte_size = _REGION3_SIZE;
346 		}
347 	} else {
348 		asce_limit = _REGION2_SIZE;
349 		rte_size = _REGION3_SIZE;
350 	}
351 
352 	/*
353 	 * Forcing modules and vmalloc area under the ultravisor
354 	 * secure storage limit, so that any vmalloc allocation
355 	 * we do could be used to back secure guest storage.
356 	 *
357 	 * Assume the secure storage limit always exceeds _REGION2_SIZE,
358 	 * otherwise asce_limit and rte_size would have been adjusted.
359 	 */
360 	vmax = adjust_to_uv_max(asce_limit);
361 	boot_debug("%d level paging       0x%016lx vmax\n", vmax == _REGION1_SIZE ? 4 : 3, vmax);
362 #ifdef CONFIG_KASAN
363 	BUILD_BUG_ON(__NO_KASLR_END_KERNEL > KASAN_SHADOW_START);
364 	boot_debug("KASAN shadow area:   0x%016lx-0x%016lx\n", KASAN_SHADOW_START, KASAN_SHADOW_END);
365 	/* force vmalloc and modules below kasan shadow */
366 	vmax = min(vmax, KASAN_SHADOW_START);
367 #endif
368 	vsize = min(vsize, vmax);
369 	if (kaslr_enabled()) {
370 		unsigned long kernel_end, kaslr_len, slots, pos;
371 
372 		kaslr_len = max(KASLR_LEN, vmax - vsize);
373 		slots = DIV_ROUND_UP(kaslr_len - kernel_size, THREAD_SIZE);
374 		if (get_random(slots, &pos))
375 			pos = 0;
376 		kernel_end = vmax - pos * THREAD_SIZE;
377 		kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE);
378 		boot_debug("Randomization range: 0x%016lx-0x%016lx\n", vmax - kaslr_len, vmax);
379 		boot_debug("kernel image:        0x%016lx-0x%016lx (kaslr)\n", kernel_start,
380 			   kernel_start + kernel_size);
381 	} else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) {
382 		kernel_start = round_down(vmax - kernel_size, THREAD_SIZE);
383 		boot_debug("kernel image:        0x%016lx-0x%016lx (constrained)\n", kernel_start,
384 			   kernel_start + kernel_size);
385 	} else {
386 		kernel_start = __NO_KASLR_START_KERNEL;
387 		boot_debug("kernel image:        0x%016lx-0x%016lx (nokaslr)\n", kernel_start,
388 			   kernel_start + kernel_size);
389 	}
390 	__kaslr_offset = kernel_start;
391 	boot_debug("__kaslr_offset:      0x%016lx\n", __kaslr_offset);
392 
393 	MODULES_END = round_down(kernel_start, _SEGMENT_SIZE);
394 	MODULES_VADDR = MODULES_END - MODULES_LEN;
395 	VMALLOC_END = MODULES_VADDR;
396 	if (IS_ENABLED(CONFIG_KMSAN))
397 		VMALLOC_END -= MODULES_LEN * 2;
398 	boot_debug("modules area:        0x%016lx-0x%016lx\n", MODULES_VADDR, MODULES_END);
399 
400 	/* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
401 	vsize = (VMALLOC_END - FIXMAP_SIZE) / 2;
402 	vsize = round_down(vsize, _SEGMENT_SIZE);
403 	vmalloc_size = min(vmalloc_size, vsize);
404 	if (IS_ENABLED(CONFIG_KMSAN)) {
405 		/* take 2/3 of vmalloc area for KMSAN shadow and origins */
406 		vmalloc_size = round_down(vmalloc_size / 3, _SEGMENT_SIZE);
407 		VMALLOC_END -= vmalloc_size * 2;
408 	}
409 	VMALLOC_START = VMALLOC_END - vmalloc_size;
410 	boot_debug("vmalloc area:        0x%016lx-0x%016lx\n", VMALLOC_START, VMALLOC_END);
411 
412 	__memcpy_real_area = round_down(VMALLOC_START - MEMCPY_REAL_SIZE, PAGE_SIZE);
413 	boot_debug("memcpy real area:    0x%016lx-0x%016lx\n", __memcpy_real_area,
414 		   __memcpy_real_area + MEMCPY_REAL_SIZE);
415 	__abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
416 				   sizeof(struct lowcore));
417 	boot_debug("abs lowcore:         0x%016lx-0x%016lx\n", __abs_lowcore,
418 		   __abs_lowcore + ABS_LOWCORE_MAP_SIZE);
419 
420 	/* split remaining virtual space between 1:1 mapping & vmemmap array */
421 	pages = __abs_lowcore / (PAGE_SIZE + sizeof(struct page));
422 	pages = SECTION_ALIGN_UP(pages);
423 	/* keep vmemmap_start aligned to a top level region table entry */
424 	vmemmap_start = round_down(__abs_lowcore - pages * sizeof(struct page), rte_size);
425 	/* make sure identity map doesn't overlay with vmemmap */
426 	ident_map_size = min(ident_map_size, vmemmap_start);
427 	vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
428 	/* make sure vmemmap doesn't overlay with absolute lowcore area */
429 	if (vmemmap_start + vmemmap_size > __abs_lowcore) {
430 		vmemmap_size = SECTION_ALIGN_DOWN(ident_map_size / PAGE_SIZE) * sizeof(struct page);
431 		ident_map_size = vmemmap_size / sizeof(struct page) * PAGE_SIZE;
432 	}
433 	vmemmap = (struct page *)vmemmap_start;
434 	/* maximum address for which linear mapping could be created (DCSS, memory) */
435 	BUILD_BUG_ON(MAX_DCSS_ADDR > (1UL << MAX_PHYSMEM_BITS));
436 	max_mappable = max(ident_map_size, MAX_DCSS_ADDR);
437 	max_mappable = min(max_mappable, vmemmap_start);
438 #ifdef CONFIG_RANDOMIZE_IDENTITY_BASE
439 	__identity_base = round_down(vmemmap_start - max_mappable, rte_size);
440 #endif
441 	boot_debug("identity map:        0x%016lx-0x%016lx\n", __identity_base,
442 		   __identity_base + ident_map_size);
443 
444 	return asce_limit;
445 }
446 
447 /*
448  * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
449  */
clear_bss_section(unsigned long kernel_start)450 static void clear_bss_section(unsigned long kernel_start)
451 {
452 	memset((void *)kernel_start + vmlinux.image_size, 0, vmlinux.bss_size);
453 }
454 
455 /*
456  * Set vmalloc area size to an 8th of (potential) physical memory
457  * size, unless size has been set by kernel command line parameter.
458  */
setup_vmalloc_size(void)459 static void setup_vmalloc_size(void)
460 {
461 	unsigned long size;
462 
463 	if (vmalloc_size_set)
464 		return;
465 	size = round_up(ident_map_size / 8, _SEGMENT_SIZE);
466 	vmalloc_size = max(size, vmalloc_size);
467 }
468 
kaslr_adjust_vmlinux_info(long offset)469 static void kaslr_adjust_vmlinux_info(long offset)
470 {
471 	vmlinux.bootdata_off += offset;
472 	vmlinux.bootdata_preserved_off += offset;
473 	vmlinux.got_start += offset;
474 	vmlinux.got_end += offset;
475 	vmlinux.init_mm_off += offset;
476 	vmlinux.swapper_pg_dir_off += offset;
477 	vmlinux.invalid_pg_dir_off += offset;
478 	vmlinux.alt_instructions += offset;
479 	vmlinux.alt_instructions_end += offset;
480 #ifdef CONFIG_KASAN
481 	vmlinux.kasan_early_shadow_page_off += offset;
482 	vmlinux.kasan_early_shadow_pte_off += offset;
483 	vmlinux.kasan_early_shadow_pmd_off += offset;
484 	vmlinux.kasan_early_shadow_pud_off += offset;
485 	vmlinux.kasan_early_shadow_p4d_off += offset;
486 #endif
487 }
488 
startup_kernel(void)489 void startup_kernel(void)
490 {
491 	unsigned long vmlinux_size = vmlinux.image_size + vmlinux.bss_size;
492 	unsigned long nokaslr_text_lma, text_lma = 0, amode31_lma = 0;
493 	unsigned long kernel_size = TEXT_OFFSET + vmlinux_size;
494 	unsigned long kaslr_large_page_offset;
495 	unsigned long max_physmem_end;
496 	unsigned long asce_limit;
497 	unsigned long safe_addr;
498 	psw_t psw;
499 
500 	setup_lpp();
501 	store_ipl_parmblock();
502 	uv_query_info();
503 	setup_boot_command_line();
504 	parse_boot_command_line();
505 
506 	/*
507 	 * Non-randomized kernel physical start address must be _SEGMENT_SIZE
508 	 * aligned (see blow).
509 	 */
510 	nokaslr_text_lma = ALIGN(mem_safe_offset(), _SEGMENT_SIZE);
511 	safe_addr = PAGE_ALIGN(nokaslr_text_lma + vmlinux_size);
512 
513 	/*
514 	 * Reserve decompressor memory together with decompression heap,
515 	 * buffer and memory which might be occupied by uncompressed kernel
516 	 * (if KASLR is off or failed).
517 	 */
518 	physmem_reserve(RR_DECOMPRESSOR, 0, safe_addr);
519 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && parmarea.initrd_size)
520 		physmem_reserve(RR_INITRD, parmarea.initrd_start, parmarea.initrd_size);
521 	oldmem_data.start = parmarea.oldmem_base;
522 	oldmem_data.size = parmarea.oldmem_size;
523 
524 	read_ipl_report();
525 	sclp_early_read_info();
526 	sclp_early_detect_machine_features();
527 	detect_facilities();
528 	detect_diag9c();
529 	detect_machine_type();
530 	/* detect_diag288() needs machine type */
531 	detect_diag288();
532 	cmma_init();
533 	sanitize_prot_virt_host();
534 	max_physmem_end = detect_max_physmem_end();
535 	setup_ident_map_size(max_physmem_end);
536 	setup_vmalloc_size();
537 	asce_limit = setup_kernel_memory_layout(kernel_size);
538 	/* got final ident_map_size, physmem allocations could be performed now */
539 	physmem_set_usable_limit(ident_map_size);
540 	detect_physmem_online_ranges(max_physmem_end);
541 	save_ipl_cert_comp_list();
542 	rescue_initrd(safe_addr, ident_map_size);
543 
544 	/*
545 	 * __kaslr_offset_phys must be _SEGMENT_SIZE aligned, so the lower
546 	 * 20 bits (the offset within a large page) are zero. Copy the last
547 	 * 20 bits of __kaslr_offset, which is THREAD_SIZE aligned, to
548 	 * __kaslr_offset_phys.
549 	 *
550 	 * With this the last 20 bits of __kaslr_offset_phys and __kaslr_offset
551 	 * are identical, which is required to allow for large mappings of the
552 	 * kernel image.
553 	 */
554 	kaslr_large_page_offset = __kaslr_offset & ~_SEGMENT_MASK;
555 	if (kaslr_enabled()) {
556 		unsigned long size = vmlinux_size + kaslr_large_page_offset;
557 
558 		text_lma = randomize_within_range(size, _SEGMENT_SIZE, TEXT_OFFSET, ident_map_size);
559 	}
560 	if (!text_lma)
561 		text_lma = nokaslr_text_lma;
562 	text_lma |= kaslr_large_page_offset;
563 
564 	/*
565 	 * [__kaslr_offset_phys..__kaslr_offset_phys + TEXT_OFFSET] region is
566 	 * never accessed via the kernel image mapping as per the linker script:
567 	 *
568 	 *	. = TEXT_OFFSET;
569 	 *
570 	 * Therefore, this region could be used for something else and does
571 	 * not need to be reserved. See how it is skipped in setup_vmem().
572 	 */
573 	__kaslr_offset_phys = text_lma - TEXT_OFFSET;
574 	kaslr_adjust_vmlinux_info(__kaslr_offset_phys);
575 	physmem_reserve(RR_VMLINUX, text_lma, vmlinux_size);
576 	deploy_kernel((void *)text_lma);
577 
578 	/* vmlinux decompression is done, shrink reserved low memory */
579 	physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end);
580 
581 	/*
582 	 * In case KASLR is enabled the randomized location of .amode31
583 	 * section might overlap with .vmlinux.relocs section. To avoid that
584 	 * the below randomize_within_range() could have been called with
585 	 * __vmlinux_relocs_64_end as the lower range address. However,
586 	 * .amode31 section is written to by the decompressed kernel - at
587 	 * that time the contents of .vmlinux.relocs is not needed anymore.
588 	 * Conversely, .vmlinux.relocs is read only by the decompressor, even
589 	 * before the kernel started. Therefore, in case the two sections
590 	 * overlap there is no risk of corrupting any data.
591 	 */
592 	if (kaslr_enabled()) {
593 		unsigned long amode31_min;
594 
595 		amode31_min = (unsigned long)_decompressor_end;
596 		amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, amode31_min, SZ_2G);
597 	}
598 	if (!amode31_lma)
599 		amode31_lma = text_lma - vmlinux.amode31_size;
600 	physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
601 
602 	/*
603 	 * The order of the following operations is important:
604 	 *
605 	 * - kaslr_adjust_relocs() must follow clear_bss_section() to establish
606 	 *   static memory references to data in .bss to be used by setup_vmem()
607 	 *   (i.e init_mm.pgd)
608 	 *
609 	 * - setup_vmem() must follow kaslr_adjust_relocs() to be able using
610 	 *   static memory references to data in .bss (i.e init_mm.pgd)
611 	 *
612 	 * - copy_bootdata() must follow setup_vmem() to propagate changes
613 	 *   to bootdata made by setup_vmem()
614 	 */
615 	clear_bss_section(text_lma);
616 	kaslr_adjust_relocs(text_lma, text_lma + vmlinux.image_size,
617 			    __kaslr_offset, __kaslr_offset_phys);
618 	kaslr_adjust_got(__kaslr_offset);
619 	setup_vmem(__kaslr_offset, __kaslr_offset + kernel_size, asce_limit);
620 	dump_physmem_reserved();
621 	copy_bootdata();
622 	__apply_alternatives((struct alt_instr *)_vmlinux_info.alt_instructions,
623 			     (struct alt_instr *)_vmlinux_info.alt_instructions_end,
624 			     ALT_CTX_EARLY);
625 
626 	/*
627 	 * Save KASLR offset for early dumps, before vmcore_info is set.
628 	 * Mark as uneven to distinguish from real vmcore_info pointer.
629 	 */
630 	get_lowcore()->vmcore_info = __kaslr_offset_phys ? __kaslr_offset_phys | 0x1UL : 0;
631 
632 	/*
633 	 * Jump to the decompressed kernel entry point and switch DAT mode on.
634 	 */
635 	psw.addr = __kaslr_offset + vmlinux.entry;
636 	psw.mask = PSW_KERNEL_BITS;
637 	boot_debug("Starting kernel at:  0x%016lx\n", psw.addr);
638 	jump_to_kernel(&psw);
639 }
640