xref: /linux/arch/s390/boot/startup.c (revision cd59f1d80a6d01326e37318218a072a46899d237)
1 // SPDX-License-Identifier: GPL-2.0
2 #define boot_fmt(fmt) "startup: " fmt
3 #include <linux/string.h>
4 #include <linux/elf.h>
5 #include <asm/page-states.h>
6 #include <asm/boot_data.h>
7 #include <asm/extmem.h>
8 #include <asm/sections.h>
9 #include <asm/maccess.h>
10 #include <asm/cpu_mf.h>
11 #include <asm/setup.h>
12 #include <asm/kasan.h>
13 #include <asm/kexec.h>
14 #include <asm/sclp.h>
15 #include <asm/diag.h>
16 #include <asm/uv.h>
17 #include <asm/abs_lowcore.h>
18 #include <asm/physmem_info.h>
19 #include "decompressor.h"
20 #include "boot.h"
21 #include "uv.h"
22 
23 struct vm_layout __bootdata_preserved(vm_layout);
24 unsigned long __bootdata_preserved(__abs_lowcore);
25 unsigned long __bootdata_preserved(__memcpy_real_area);
26 pte_t *__bootdata_preserved(memcpy_real_ptep);
27 unsigned long __bootdata_preserved(VMALLOC_START);
28 unsigned long __bootdata_preserved(VMALLOC_END);
29 struct page *__bootdata_preserved(vmemmap);
30 unsigned long __bootdata_preserved(vmemmap_size);
31 unsigned long __bootdata_preserved(MODULES_VADDR);
32 unsigned long __bootdata_preserved(MODULES_END);
33 unsigned long __bootdata_preserved(max_mappable);
34 unsigned long __bootdata_preserved(page_noexec_mask);
35 unsigned long __bootdata_preserved(segment_noexec_mask);
36 unsigned long __bootdata_preserved(region_noexec_mask);
37 int __bootdata_preserved(relocate_lowcore);
38 
39 u64 __bootdata_preserved(stfle_fac_list[16]);
40 struct oldmem_data __bootdata_preserved(oldmem_data);
41 
42 struct machine_info machine;
43 
error(char * x)44 void error(char *x)
45 {
46 	boot_emerg("%s\n", x);
47 	boot_emerg(" -- System halted\n");
48 	disabled_wait();
49 }
50 
detect_facilities(void)51 static void detect_facilities(void)
52 {
53 	if (test_facility(8)) {
54 		machine.has_edat1 = 1;
55 		local_ctl_set_bit(0, CR0_EDAT_BIT);
56 	}
57 	if (test_facility(78))
58 		machine.has_edat2 = 1;
59 	page_noexec_mask = -1UL;
60 	segment_noexec_mask = -1UL;
61 	region_noexec_mask = -1UL;
62 	if (!test_facility(130)) {
63 		page_noexec_mask &= ~_PAGE_NOEXEC;
64 		segment_noexec_mask &= ~_SEGMENT_ENTRY_NOEXEC;
65 		region_noexec_mask &= ~_REGION_ENTRY_NOEXEC;
66 	}
67 }
68 
cmma_test_essa(void)69 static int cmma_test_essa(void)
70 {
71 	unsigned long reg1, reg2, tmp = 0;
72 	int rc = 1;
73 	psw_t old;
74 
75 	/* Test ESSA_GET_STATE */
76 	asm volatile(
77 		"	mvc	0(16,%[psw_old]),0(%[psw_pgm])\n"
78 		"	epsw	%[reg1],%[reg2]\n"
79 		"	st	%[reg1],0(%[psw_pgm])\n"
80 		"	st	%[reg2],4(%[psw_pgm])\n"
81 		"	larl	%[reg1],1f\n"
82 		"	stg	%[reg1],8(%[psw_pgm])\n"
83 		"	.insn	rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
84 		"	la	%[rc],0\n"
85 		"1:	mvc	0(16,%[psw_pgm]),0(%[psw_old])\n"
86 		: [reg1] "=&d" (reg1),
87 		  [reg2] "=&a" (reg2),
88 		  [rc] "+&d" (rc),
89 		  [tmp] "+&d" (tmp),
90 		  "+Q" (get_lowcore()->program_new_psw),
91 		  "=Q" (old)
92 		: [psw_old] "a" (&old),
93 		  [psw_pgm] "a" (&get_lowcore()->program_new_psw),
94 		  [cmd] "i" (ESSA_GET_STATE)
95 		: "cc", "memory");
96 	return rc;
97 }
98 
cmma_init(void)99 static void cmma_init(void)
100 {
101 	if (!cmma_flag)
102 		return;
103 	if (cmma_test_essa()) {
104 		cmma_flag = 0;
105 		return;
106 	}
107 	if (test_facility(147))
108 		cmma_flag = 2;
109 }
110 
setup_lpp(void)111 static void setup_lpp(void)
112 {
113 	get_lowcore()->current_pid = 0;
114 	get_lowcore()->lpp = LPP_MAGIC;
115 	if (test_facility(40))
116 		lpp(&get_lowcore()->lpp);
117 }
118 
119 #ifdef CONFIG_KERNEL_UNCOMPRESSED
mem_safe_offset(void)120 static unsigned long mem_safe_offset(void)
121 {
122 	return (unsigned long)_compressed_start;
123 }
124 
deploy_kernel(void * output)125 static void deploy_kernel(void *output)
126 {
127 	void *uncompressed_start = (void *)_compressed_start;
128 
129 	if (output == uncompressed_start)
130 		return;
131 	memmove(output, uncompressed_start, vmlinux.image_size);
132 	memset(uncompressed_start, 0, vmlinux.image_size);
133 }
134 #endif
135 
rescue_initrd(unsigned long min,unsigned long max)136 static void rescue_initrd(unsigned long min, unsigned long max)
137 {
138 	unsigned long old_addr, addr, size;
139 
140 	if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
141 		return;
142 	if (!get_physmem_reserved(RR_INITRD, &addr, &size))
143 		return;
144 	if (addr >= min && addr + size <= max)
145 		return;
146 	old_addr = addr;
147 	physmem_free(RR_INITRD);
148 	addr = physmem_alloc_or_die(RR_INITRD, size, 0);
149 	memmove((void *)addr, (void *)old_addr, size);
150 }
151 
copy_bootdata(void)152 static void copy_bootdata(void)
153 {
154 	if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
155 		error(".boot.data section size mismatch");
156 	memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
157 	if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
158 		error(".boot.preserved.data section size mismatch");
159 	memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
160 }
161 
kaslr_adjust_relocs(unsigned long min_addr,unsigned long max_addr,unsigned long offset,unsigned long phys_offset)162 static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr,
163 				unsigned long offset, unsigned long phys_offset)
164 {
165 	int *reloc;
166 	long loc;
167 
168 	/* Adjust R_390_64 relocations */
169 	for (reloc = (int *)__vmlinux_relocs_64_start; reloc < (int *)__vmlinux_relocs_64_end; reloc++) {
170 		loc = (long)*reloc + phys_offset;
171 		if (loc < min_addr || loc > max_addr)
172 			error("64-bit relocation outside of kernel!\n");
173 		*(u64 *)loc += offset;
174 	}
175 }
176 
kaslr_adjust_got(unsigned long offset)177 static void kaslr_adjust_got(unsigned long offset)
178 {
179 	u64 *entry;
180 
181 	/*
182 	 * Adjust GOT entries, except for ones for undefined weak symbols
183 	 * that resolved to zero. This also skips the first three reserved
184 	 * entries on s390x that are zero.
185 	 */
186 	for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++) {
187 		if (*entry)
188 			*entry += offset;
189 	}
190 }
191 
192 /*
193  * Merge information from several sources into a single ident_map_size value.
194  * "ident_map_size" represents the upper limit of physical memory we may ever
195  * reach. It might not be all online memory, but also include standby (offline)
196  * memory or memory areas reserved for other means (e.g., memory devices such as
197  * virtio-mem).
198  *
199  * "ident_map_size" could be lower then actual standby/reserved or even online
200  * memory present, due to limiting factors. We should never go above this limit.
201  * It is the size of our identity mapping.
202  *
203  * Consider the following factors:
204  * 1. max_physmem_end - end of physical memory online, standby or reserved.
205  *    Always >= end of the last online memory range (get_physmem_online_end()).
206  * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
207  *    kernel is able to support.
208  * 3. "mem=" kernel command line option which limits physical memory usage.
209  * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as
210  *    crash kernel.
211  * 5. "hsa" size which is a memory limit when the kernel is executed during
212  *    zfcp/nvme dump.
213  */
setup_ident_map_size(unsigned long max_physmem_end)214 static void setup_ident_map_size(unsigned long max_physmem_end)
215 {
216 	unsigned long hsa_size;
217 
218 	ident_map_size = max_physmem_end;
219 	if (memory_limit)
220 		ident_map_size = min(ident_map_size, memory_limit);
221 	ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
222 
223 #ifdef CONFIG_CRASH_DUMP
224 	if (oldmem_data.start) {
225 		__kaslr_enabled = 0;
226 		ident_map_size = min(ident_map_size, oldmem_data.size);
227 		boot_debug("kdump memory limit:  0x%016lx\n", oldmem_data.size);
228 	} else if (ipl_block_valid && is_ipl_block_dump()) {
229 		__kaslr_enabled = 0;
230 		if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size) {
231 			ident_map_size = min(ident_map_size, hsa_size);
232 			boot_debug("Stand-alone dump limit: 0x%016lx\n", hsa_size);
233 		}
234 	}
235 #endif
236 	boot_debug("Identity map size:   0x%016lx\n", ident_map_size);
237 }
238 
239 #define FIXMAP_SIZE	round_up(MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE, sizeof(struct lowcore))
240 
get_vmem_size(unsigned long identity_size,unsigned long vmemmap_size,unsigned long vmalloc_size,unsigned long rte_size)241 static unsigned long get_vmem_size(unsigned long identity_size,
242 				   unsigned long vmemmap_size,
243 				   unsigned long vmalloc_size,
244 				   unsigned long rte_size)
245 {
246 	unsigned long max_mappable, vsize;
247 
248 	max_mappable = max(identity_size, MAX_DCSS_ADDR);
249 	vsize = round_up(SZ_2G + max_mappable, rte_size) +
250 		round_up(vmemmap_size, rte_size) +
251 		FIXMAP_SIZE + MODULES_LEN + KASLR_LEN;
252 	if (IS_ENABLED(CONFIG_KMSAN))
253 		vsize += MODULES_LEN * 2;
254 	return size_add(vsize, vmalloc_size);
255 }
256 
setup_kernel_memory_layout(unsigned long kernel_size)257 static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
258 {
259 	unsigned long vmemmap_start;
260 	unsigned long kernel_start;
261 	unsigned long asce_limit;
262 	unsigned long rte_size;
263 	unsigned long pages;
264 	unsigned long vsize;
265 	unsigned long vmax;
266 
267 	pages = ident_map_size / PAGE_SIZE;
268 	/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
269 	vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
270 
271 	/* choose kernel address space layout: 4 or 3 levels. */
272 	BUILD_BUG_ON(!IS_ALIGNED(TEXT_OFFSET, THREAD_SIZE));
273 	BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE));
274 	BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE);
275 	vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE);
276 	boot_debug("vmem size estimated: 0x%016lx\n", vsize);
277 	if (IS_ENABLED(CONFIG_KASAN) || __NO_KASLR_END_KERNEL > _REGION2_SIZE ||
278 	    (vsize > _REGION2_SIZE && kaslr_enabled())) {
279 		asce_limit = _REGION1_SIZE;
280 		if (__NO_KASLR_END_KERNEL > _REGION2_SIZE) {
281 			rte_size = _REGION2_SIZE;
282 			vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION2_SIZE);
283 		} else {
284 			rte_size = _REGION3_SIZE;
285 		}
286 	} else {
287 		asce_limit = _REGION2_SIZE;
288 		rte_size = _REGION3_SIZE;
289 	}
290 
291 	/*
292 	 * Forcing modules and vmalloc area under the ultravisor
293 	 * secure storage limit, so that any vmalloc allocation
294 	 * we do could be used to back secure guest storage.
295 	 *
296 	 * Assume the secure storage limit always exceeds _REGION2_SIZE,
297 	 * otherwise asce_limit and rte_size would have been adjusted.
298 	 */
299 	vmax = adjust_to_uv_max(asce_limit);
300 	boot_debug("%d level paging       0x%016lx vmax\n", vmax == _REGION1_SIZE ? 4 : 3, vmax);
301 #ifdef CONFIG_KASAN
302 	BUILD_BUG_ON(__NO_KASLR_END_KERNEL > KASAN_SHADOW_START);
303 	boot_debug("KASAN shadow area:   0x%016lx-0x%016lx\n", KASAN_SHADOW_START, KASAN_SHADOW_END);
304 	/* force vmalloc and modules below kasan shadow */
305 	vmax = min(vmax, KASAN_SHADOW_START);
306 #endif
307 	vsize = min(vsize, vmax);
308 	if (kaslr_enabled()) {
309 		unsigned long kernel_end, kaslr_len, slots, pos;
310 
311 		kaslr_len = max(KASLR_LEN, vmax - vsize);
312 		slots = DIV_ROUND_UP(kaslr_len - kernel_size, THREAD_SIZE);
313 		if (get_random(slots, &pos))
314 			pos = 0;
315 		kernel_end = vmax - pos * THREAD_SIZE;
316 		kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE);
317 		boot_debug("Randomization range: 0x%016lx-0x%016lx\n", vmax - kaslr_len, vmax);
318 		boot_debug("kernel image:        0x%016lx-0x%016lx (kaslr)\n", kernel_start,
319 			   kernel_size + kernel_size);
320 	} else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) {
321 		kernel_start = round_down(vmax - kernel_size, THREAD_SIZE);
322 		boot_debug("kernel image:        0x%016lx-0x%016lx (constrained)\n", kernel_start,
323 			   kernel_start + kernel_size);
324 	} else {
325 		kernel_start = __NO_KASLR_START_KERNEL;
326 		boot_debug("kernel image:        0x%016lx-0x%016lx (nokaslr)\n", kernel_start,
327 			   kernel_start + kernel_size);
328 	}
329 	__kaslr_offset = kernel_start;
330 	boot_debug("__kaslr_offset:      0x%016lx\n", __kaslr_offset);
331 
332 	MODULES_END = round_down(kernel_start, _SEGMENT_SIZE);
333 	MODULES_VADDR = MODULES_END - MODULES_LEN;
334 	VMALLOC_END = MODULES_VADDR;
335 	if (IS_ENABLED(CONFIG_KMSAN))
336 		VMALLOC_END -= MODULES_LEN * 2;
337 	boot_debug("modules area:        0x%016lx-0x%016lx\n", MODULES_VADDR, MODULES_END);
338 
339 	/* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
340 	vsize = (VMALLOC_END - FIXMAP_SIZE) / 2;
341 	vsize = round_down(vsize, _SEGMENT_SIZE);
342 	vmalloc_size = min(vmalloc_size, vsize);
343 	if (IS_ENABLED(CONFIG_KMSAN)) {
344 		/* take 2/3 of vmalloc area for KMSAN shadow and origins */
345 		vmalloc_size = round_down(vmalloc_size / 3, _SEGMENT_SIZE);
346 		VMALLOC_END -= vmalloc_size * 2;
347 	}
348 	VMALLOC_START = VMALLOC_END - vmalloc_size;
349 	boot_debug("vmalloc area:        0x%016lx-0x%016lx\n", VMALLOC_START, VMALLOC_END);
350 
351 	__memcpy_real_area = round_down(VMALLOC_START - MEMCPY_REAL_SIZE, PAGE_SIZE);
352 	boot_debug("memcpy real area:    0x%016lx-0x%016lx\n", __memcpy_real_area,
353 		   __memcpy_real_area + MEMCPY_REAL_SIZE);
354 	__abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
355 				   sizeof(struct lowcore));
356 	boot_debug("abs lowcore:         0x%016lx-0x%016lx\n", __abs_lowcore,
357 		   __abs_lowcore + ABS_LOWCORE_MAP_SIZE);
358 
359 	/* split remaining virtual space between 1:1 mapping & vmemmap array */
360 	pages = __abs_lowcore / (PAGE_SIZE + sizeof(struct page));
361 	pages = SECTION_ALIGN_UP(pages);
362 	/* keep vmemmap_start aligned to a top level region table entry */
363 	vmemmap_start = round_down(__abs_lowcore - pages * sizeof(struct page), rte_size);
364 	/* make sure identity map doesn't overlay with vmemmap */
365 	ident_map_size = min(ident_map_size, vmemmap_start);
366 	vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
367 	/* make sure vmemmap doesn't overlay with absolute lowcore area */
368 	if (vmemmap_start + vmemmap_size > __abs_lowcore) {
369 		vmemmap_size = SECTION_ALIGN_DOWN(ident_map_size / PAGE_SIZE) * sizeof(struct page);
370 		ident_map_size = vmemmap_size / sizeof(struct page) * PAGE_SIZE;
371 	}
372 	vmemmap = (struct page *)vmemmap_start;
373 	/* maximum address for which linear mapping could be created (DCSS, memory) */
374 	BUILD_BUG_ON(MAX_DCSS_ADDR > (1UL << MAX_PHYSMEM_BITS));
375 	max_mappable = max(ident_map_size, MAX_DCSS_ADDR);
376 	max_mappable = min(max_mappable, vmemmap_start);
377 #ifdef CONFIG_RANDOMIZE_IDENTITY_BASE
378 	__identity_base = round_down(vmemmap_start - max_mappable, rte_size);
379 #endif
380 	boot_debug("identity map:        0x%016lx-0x%016lx\n", __identity_base,
381 		   __identity_base + ident_map_size);
382 
383 	return asce_limit;
384 }
385 
386 /*
387  * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
388  */
clear_bss_section(unsigned long kernel_start)389 static void clear_bss_section(unsigned long kernel_start)
390 {
391 	memset((void *)kernel_start + vmlinux.image_size, 0, vmlinux.bss_size);
392 }
393 
394 /*
395  * Set vmalloc area size to an 8th of (potential) physical memory
396  * size, unless size has been set by kernel command line parameter.
397  */
setup_vmalloc_size(void)398 static void setup_vmalloc_size(void)
399 {
400 	unsigned long size;
401 
402 	if (vmalloc_size_set)
403 		return;
404 	size = round_up(ident_map_size / 8, _SEGMENT_SIZE);
405 	vmalloc_size = max(size, vmalloc_size);
406 }
407 
kaslr_adjust_vmlinux_info(long offset)408 static void kaslr_adjust_vmlinux_info(long offset)
409 {
410 	vmlinux.bootdata_off += offset;
411 	vmlinux.bootdata_preserved_off += offset;
412 	vmlinux.got_start += offset;
413 	vmlinux.got_end += offset;
414 	vmlinux.init_mm_off += offset;
415 	vmlinux.swapper_pg_dir_off += offset;
416 	vmlinux.invalid_pg_dir_off += offset;
417 	vmlinux.alt_instructions += offset;
418 	vmlinux.alt_instructions_end += offset;
419 #ifdef CONFIG_KASAN
420 	vmlinux.kasan_early_shadow_page_off += offset;
421 	vmlinux.kasan_early_shadow_pte_off += offset;
422 	vmlinux.kasan_early_shadow_pmd_off += offset;
423 	vmlinux.kasan_early_shadow_pud_off += offset;
424 	vmlinux.kasan_early_shadow_p4d_off += offset;
425 #endif
426 }
427 
startup_kernel(void)428 void startup_kernel(void)
429 {
430 	unsigned long vmlinux_size = vmlinux.image_size + vmlinux.bss_size;
431 	unsigned long nokaslr_text_lma, text_lma = 0, amode31_lma = 0;
432 	unsigned long kernel_size = TEXT_OFFSET + vmlinux_size;
433 	unsigned long kaslr_large_page_offset;
434 	unsigned long max_physmem_end;
435 	unsigned long asce_limit;
436 	unsigned long safe_addr;
437 	psw_t psw;
438 
439 	setup_lpp();
440 	store_ipl_parmblock();
441 	uv_query_info();
442 	setup_boot_command_line();
443 	parse_boot_command_line();
444 
445 	/*
446 	 * Non-randomized kernel physical start address must be _SEGMENT_SIZE
447 	 * aligned (see blow).
448 	 */
449 	nokaslr_text_lma = ALIGN(mem_safe_offset(), _SEGMENT_SIZE);
450 	safe_addr = PAGE_ALIGN(nokaslr_text_lma + vmlinux_size);
451 
452 	/*
453 	 * Reserve decompressor memory together with decompression heap,
454 	 * buffer and memory which might be occupied by uncompressed kernel
455 	 * (if KASLR is off or failed).
456 	 */
457 	physmem_reserve(RR_DECOMPRESSOR, 0, safe_addr);
458 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && parmarea.initrd_size)
459 		physmem_reserve(RR_INITRD, parmarea.initrd_start, parmarea.initrd_size);
460 	oldmem_data.start = parmarea.oldmem_base;
461 	oldmem_data.size = parmarea.oldmem_size;
462 
463 	read_ipl_report();
464 	sclp_early_read_info();
465 	detect_facilities();
466 	cmma_init();
467 	sanitize_prot_virt_host();
468 	max_physmem_end = detect_max_physmem_end();
469 	setup_ident_map_size(max_physmem_end);
470 	setup_vmalloc_size();
471 	asce_limit = setup_kernel_memory_layout(kernel_size);
472 	/* got final ident_map_size, physmem allocations could be performed now */
473 	physmem_set_usable_limit(ident_map_size);
474 	detect_physmem_online_ranges(max_physmem_end);
475 	save_ipl_cert_comp_list();
476 	rescue_initrd(safe_addr, ident_map_size);
477 
478 	/*
479 	 * __kaslr_offset_phys must be _SEGMENT_SIZE aligned, so the lower
480 	 * 20 bits (the offset within a large page) are zero. Copy the last
481 	 * 20 bits of __kaslr_offset, which is THREAD_SIZE aligned, to
482 	 * __kaslr_offset_phys.
483 	 *
484 	 * With this the last 20 bits of __kaslr_offset_phys and __kaslr_offset
485 	 * are identical, which is required to allow for large mappings of the
486 	 * kernel image.
487 	 */
488 	kaslr_large_page_offset = __kaslr_offset & ~_SEGMENT_MASK;
489 	if (kaslr_enabled()) {
490 		unsigned long size = vmlinux_size + kaslr_large_page_offset;
491 
492 		text_lma = randomize_within_range(size, _SEGMENT_SIZE, TEXT_OFFSET, ident_map_size);
493 	}
494 	if (!text_lma)
495 		text_lma = nokaslr_text_lma;
496 	text_lma |= kaslr_large_page_offset;
497 
498 	/*
499 	 * [__kaslr_offset_phys..__kaslr_offset_phys + TEXT_OFFSET] region is
500 	 * never accessed via the kernel image mapping as per the linker script:
501 	 *
502 	 *	. = TEXT_OFFSET;
503 	 *
504 	 * Therefore, this region could be used for something else and does
505 	 * not need to be reserved. See how it is skipped in setup_vmem().
506 	 */
507 	__kaslr_offset_phys = text_lma - TEXT_OFFSET;
508 	kaslr_adjust_vmlinux_info(__kaslr_offset_phys);
509 	physmem_reserve(RR_VMLINUX, text_lma, vmlinux_size);
510 	deploy_kernel((void *)text_lma);
511 
512 	/* vmlinux decompression is done, shrink reserved low memory */
513 	physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end);
514 
515 	/*
516 	 * In case KASLR is enabled the randomized location of .amode31
517 	 * section might overlap with .vmlinux.relocs section. To avoid that
518 	 * the below randomize_within_range() could have been called with
519 	 * __vmlinux_relocs_64_end as the lower range address. However,
520 	 * .amode31 section is written to by the decompressed kernel - at
521 	 * that time the contents of .vmlinux.relocs is not needed anymore.
522 	 * Conversely, .vmlinux.relocs is read only by the decompressor, even
523 	 * before the kernel started. Therefore, in case the two sections
524 	 * overlap there is no risk of corrupting any data.
525 	 */
526 	if (kaslr_enabled()) {
527 		unsigned long amode31_min;
528 
529 		amode31_min = (unsigned long)_decompressor_end;
530 		amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, amode31_min, SZ_2G);
531 	}
532 	if (!amode31_lma)
533 		amode31_lma = text_lma - vmlinux.amode31_size;
534 	physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
535 
536 	/*
537 	 * The order of the following operations is important:
538 	 *
539 	 * - kaslr_adjust_relocs() must follow clear_bss_section() to establish
540 	 *   static memory references to data in .bss to be used by setup_vmem()
541 	 *   (i.e init_mm.pgd)
542 	 *
543 	 * - setup_vmem() must follow kaslr_adjust_relocs() to be able using
544 	 *   static memory references to data in .bss (i.e init_mm.pgd)
545 	 *
546 	 * - copy_bootdata() must follow setup_vmem() to propagate changes
547 	 *   to bootdata made by setup_vmem()
548 	 */
549 	clear_bss_section(text_lma);
550 	kaslr_adjust_relocs(text_lma, text_lma + vmlinux.image_size,
551 			    __kaslr_offset, __kaslr_offset_phys);
552 	kaslr_adjust_got(__kaslr_offset);
553 	setup_vmem(__kaslr_offset, __kaslr_offset + kernel_size, asce_limit);
554 	dump_physmem_reserved();
555 	copy_bootdata();
556 	__apply_alternatives((struct alt_instr *)_vmlinux_info.alt_instructions,
557 			     (struct alt_instr *)_vmlinux_info.alt_instructions_end,
558 			     ALT_CTX_EARLY);
559 
560 	/*
561 	 * Save KASLR offset for early dumps, before vmcore_info is set.
562 	 * Mark as uneven to distinguish from real vmcore_info pointer.
563 	 */
564 	get_lowcore()->vmcore_info = __kaslr_offset_phys ? __kaslr_offset_phys | 0x1UL : 0;
565 
566 	/*
567 	 * Jump to the decompressed kernel entry point and switch DAT mode on.
568 	 */
569 	psw.addr = __kaslr_offset + vmlinux.entry;
570 	psw.mask = PSW_KERNEL_BITS;
571 	boot_debug("Starting kernel at:  0x%016lx\n", psw.addr);
572 	__load_psw(psw);
573 }
574