xref: /linux/arch/s390/boot/startup.c (revision c98d2ecae08f02bd2dccd24e7e485e9f0211db65)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/string.h>
3 #include <linux/elf.h>
4 #include <asm/page-states.h>
5 #include <asm/boot_data.h>
6 #include <asm/extmem.h>
7 #include <asm/sections.h>
8 #include <asm/maccess.h>
9 #include <asm/cpu_mf.h>
10 #include <asm/setup.h>
11 #include <asm/kasan.h>
12 #include <asm/kexec.h>
13 #include <asm/sclp.h>
14 #include <asm/diag.h>
15 #include <asm/uv.h>
16 #include <asm/abs_lowcore.h>
17 #include <asm/physmem_info.h>
18 #include "decompressor.h"
19 #include "boot.h"
20 #include "uv.h"
21 
22 struct vm_layout __bootdata_preserved(vm_layout);
23 unsigned long __bootdata_preserved(__abs_lowcore);
24 unsigned long __bootdata_preserved(__memcpy_real_area);
25 pte_t *__bootdata_preserved(memcpy_real_ptep);
26 unsigned long __bootdata_preserved(VMALLOC_START);
27 unsigned long __bootdata_preserved(VMALLOC_END);
28 struct page *__bootdata_preserved(vmemmap);
29 unsigned long __bootdata_preserved(vmemmap_size);
30 unsigned long __bootdata_preserved(MODULES_VADDR);
31 unsigned long __bootdata_preserved(MODULES_END);
32 unsigned long __bootdata_preserved(max_mappable);
33 
34 u64 __bootdata_preserved(stfle_fac_list[16]);
35 u64 __bootdata_preserved(alt_stfle_fac_list[16]);
36 struct oldmem_data __bootdata_preserved(oldmem_data);
37 
38 struct machine_info machine;
39 
40 void error(char *x)
41 {
42 	sclp_early_printk("\n\n");
43 	sclp_early_printk(x);
44 	sclp_early_printk("\n\n -- System halted");
45 
46 	disabled_wait();
47 }
48 
49 static void detect_facilities(void)
50 {
51 	if (test_facility(8)) {
52 		machine.has_edat1 = 1;
53 		local_ctl_set_bit(0, CR0_EDAT_BIT);
54 	}
55 	if (test_facility(78))
56 		machine.has_edat2 = 1;
57 	if (test_facility(130))
58 		machine.has_nx = 1;
59 }
60 
61 static int cmma_test_essa(void)
62 {
63 	unsigned long reg1, reg2, tmp = 0;
64 	int rc = 1;
65 	psw_t old;
66 
67 	/* Test ESSA_GET_STATE */
68 	asm volatile(
69 		"	mvc	0(16,%[psw_old]),0(%[psw_pgm])\n"
70 		"	epsw	%[reg1],%[reg2]\n"
71 		"	st	%[reg1],0(%[psw_pgm])\n"
72 		"	st	%[reg2],4(%[psw_pgm])\n"
73 		"	larl	%[reg1],1f\n"
74 		"	stg	%[reg1],8(%[psw_pgm])\n"
75 		"	.insn	rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
76 		"	la	%[rc],0\n"
77 		"1:	mvc	0(16,%[psw_pgm]),0(%[psw_old])\n"
78 		: [reg1] "=&d" (reg1),
79 		  [reg2] "=&a" (reg2),
80 		  [rc] "+&d" (rc),
81 		  [tmp] "=&d" (tmp),
82 		  "+Q" (S390_lowcore.program_new_psw),
83 		  "=Q" (old)
84 		: [psw_old] "a" (&old),
85 		  [psw_pgm] "a" (&S390_lowcore.program_new_psw),
86 		  [cmd] "i" (ESSA_GET_STATE)
87 		: "cc", "memory");
88 	return rc;
89 }
90 
91 static void cmma_init(void)
92 {
93 	if (!cmma_flag)
94 		return;
95 	if (cmma_test_essa()) {
96 		cmma_flag = 0;
97 		return;
98 	}
99 	if (test_facility(147))
100 		cmma_flag = 2;
101 }
102 
103 static void setup_lpp(void)
104 {
105 	S390_lowcore.current_pid = 0;
106 	S390_lowcore.lpp = LPP_MAGIC;
107 	if (test_facility(40))
108 		lpp(&S390_lowcore.lpp);
109 }
110 
111 #ifdef CONFIG_KERNEL_UNCOMPRESSED
112 unsigned long mem_safe_offset(void)
113 {
114 	return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
115 }
116 #endif
117 
118 static void rescue_initrd(unsigned long min, unsigned long max)
119 {
120 	unsigned long old_addr, addr, size;
121 
122 	if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
123 		return;
124 	if (!get_physmem_reserved(RR_INITRD, &addr, &size))
125 		return;
126 	if (addr >= min && addr + size <= max)
127 		return;
128 	old_addr = addr;
129 	physmem_free(RR_INITRD);
130 	addr = physmem_alloc_top_down(RR_INITRD, size, 0);
131 	memmove((void *)addr, (void *)old_addr, size);
132 }
133 
134 static void copy_bootdata(void)
135 {
136 	if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
137 		error(".boot.data section size mismatch");
138 	memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
139 	if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
140 		error(".boot.preserved.data section size mismatch");
141 	memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
142 }
143 
144 #ifdef CONFIG_PIE_BUILD
145 static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr,
146 				unsigned long offset, unsigned long phys_offset)
147 {
148 	Elf64_Rela *rela_start, *rela_end, *rela;
149 	int r_type, r_sym, rc;
150 	Elf64_Addr loc, val;
151 	Elf64_Sym *dynsym;
152 
153 	rela_start = (Elf64_Rela *) vmlinux.rela_dyn_start;
154 	rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end;
155 	dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
156 	for (rela = rela_start; rela < rela_end; rela++) {
157 		loc = rela->r_offset + phys_offset;
158 		val = rela->r_addend;
159 		r_sym = ELF64_R_SYM(rela->r_info);
160 		if (r_sym) {
161 			if (dynsym[r_sym].st_shndx != SHN_UNDEF)
162 				val += dynsym[r_sym].st_value + offset;
163 		} else {
164 			/*
165 			 * 0 == undefined symbol table index (STN_UNDEF),
166 			 * used for R_390_RELATIVE, only add KASLR offset
167 			 */
168 			val += offset;
169 		}
170 		r_type = ELF64_R_TYPE(rela->r_info);
171 		rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
172 		if (rc)
173 			error("Unknown relocation type");
174 	}
175 }
176 
177 static void kaslr_adjust_got(unsigned long offset) {}
178 static void rescue_relocs(void) {}
179 static void free_relocs(void) {}
180 #else
181 static int *vmlinux_relocs_64_start;
182 static int *vmlinux_relocs_64_end;
183 
184 static void rescue_relocs(void)
185 {
186 	unsigned long size = __vmlinux_relocs_64_end - __vmlinux_relocs_64_start;
187 
188 	vmlinux_relocs_64_start = (void *)physmem_alloc_top_down(RR_RELOC, size, 0);
189 	vmlinux_relocs_64_end = (void *)vmlinux_relocs_64_start + size;
190 	memmove(vmlinux_relocs_64_start, __vmlinux_relocs_64_start, size);
191 }
192 
193 static void free_relocs(void)
194 {
195 	physmem_free(RR_RELOC);
196 }
197 
198 static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr,
199 				unsigned long offset, unsigned long phys_offset)
200 {
201 	int *reloc;
202 	long loc;
203 
204 	/* Adjust R_390_64 relocations */
205 	for (reloc = vmlinux_relocs_64_start; reloc < vmlinux_relocs_64_end; reloc++) {
206 		loc = (long)*reloc + phys_offset;
207 		if (loc < min_addr || loc > max_addr)
208 			error("64-bit relocation outside of kernel!\n");
209 		*(u64 *)loc += offset;
210 	}
211 }
212 
213 static void kaslr_adjust_got(unsigned long offset)
214 {
215 	u64 *entry;
216 
217 	/*
218 	 * Even without -fPIE, Clang still uses a global offset table for some
219 	 * reason. Adjust the GOT entries.
220 	 */
221 	for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++)
222 		*entry += offset;
223 }
224 #endif
225 
226 /*
227  * Merge information from several sources into a single ident_map_size value.
228  * "ident_map_size" represents the upper limit of physical memory we may ever
229  * reach. It might not be all online memory, but also include standby (offline)
230  * memory. "ident_map_size" could be lower then actual standby or even online
231  * memory present, due to limiting factors. We should never go above this limit.
232  * It is the size of our identity mapping.
233  *
234  * Consider the following factors:
235  * 1. max_physmem_end - end of physical memory online or standby.
236  *    Always >= end of the last online memory range (get_physmem_online_end()).
237  * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
238  *    kernel is able to support.
239  * 3. "mem=" kernel command line option which limits physical memory usage.
240  * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as
241  *    crash kernel.
242  * 5. "hsa" size which is a memory limit when the kernel is executed during
243  *    zfcp/nvme dump.
244  */
245 static void setup_ident_map_size(unsigned long max_physmem_end)
246 {
247 	unsigned long hsa_size;
248 
249 	ident_map_size = max_physmem_end;
250 	if (memory_limit)
251 		ident_map_size = min(ident_map_size, memory_limit);
252 	ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
253 
254 #ifdef CONFIG_CRASH_DUMP
255 	if (oldmem_data.start) {
256 		__kaslr_enabled = 0;
257 		ident_map_size = min(ident_map_size, oldmem_data.size);
258 	} else if (ipl_block_valid && is_ipl_block_dump()) {
259 		__kaslr_enabled = 0;
260 		if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
261 			ident_map_size = min(ident_map_size, hsa_size);
262 	}
263 #endif
264 }
265 
266 #define FIXMAP_SIZE	round_up(MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE, sizeof(struct lowcore))
267 
268 static unsigned long get_vmem_size(unsigned long identity_size,
269 				   unsigned long vmemmap_size,
270 				   unsigned long vmalloc_size,
271 				   unsigned long rte_size)
272 {
273 	unsigned long max_mappable, vsize;
274 
275 	max_mappable = max(identity_size, MAX_DCSS_ADDR);
276 	vsize = round_up(SZ_2G + max_mappable, rte_size) +
277 		round_up(vmemmap_size, rte_size) +
278 		FIXMAP_SIZE + MODULES_LEN + KASLR_LEN;
279 	return size_add(vsize, vmalloc_size);
280 }
281 
282 static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
283 {
284 	unsigned long kernel_start, kernel_end;
285 	unsigned long vmemmap_start;
286 	unsigned long asce_limit;
287 	unsigned long rte_size;
288 	unsigned long pages;
289 	unsigned long vsize;
290 	unsigned long vmax;
291 
292 	pages = ident_map_size / PAGE_SIZE;
293 	/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
294 	vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
295 
296 	/* choose kernel address space layout: 4 or 3 levels. */
297 	vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE);
298 	if (IS_ENABLED(CONFIG_KASAN) || (vsize > _REGION2_SIZE)) {
299 		asce_limit = _REGION1_SIZE;
300 		rte_size = _REGION2_SIZE;
301 		vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION2_SIZE);
302 	} else {
303 		asce_limit = _REGION2_SIZE;
304 		rte_size = _REGION3_SIZE;
305 	}
306 
307 	/*
308 	 * Forcing modules and vmalloc area under the ultravisor
309 	 * secure storage limit, so that any vmalloc allocation
310 	 * we do could be used to back secure guest storage.
311 	 */
312 	vmax = adjust_to_uv_max(asce_limit);
313 #ifdef CONFIG_KASAN
314 	/* force vmalloc and modules below kasan shadow */
315 	vmax = min(vmax, KASAN_SHADOW_START);
316 #endif
317 	kernel_end = vmax;
318 	if (kaslr_enabled()) {
319 		unsigned long kaslr_len, slots, pos;
320 
321 		vsize = min(vsize, vmax);
322 		kaslr_len = max(KASLR_LEN, vmax - vsize);
323 		slots = DIV_ROUND_UP(kaslr_len - kernel_size, THREAD_SIZE);
324 		if (get_random(slots, &pos))
325 			pos = 0;
326 		kernel_end -= pos * THREAD_SIZE;
327 	}
328 	kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE);
329 	__kaslr_offset = kernel_start;
330 
331 	MODULES_END = round_down(kernel_start, _SEGMENT_SIZE);
332 	MODULES_VADDR = MODULES_END - MODULES_LEN;
333 	VMALLOC_END = MODULES_VADDR;
334 
335 	/* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
336 	vsize = (VMALLOC_END - FIXMAP_SIZE) / 2;
337 	vsize = round_down(vsize, _SEGMENT_SIZE);
338 	vmalloc_size = min(vmalloc_size, vsize);
339 	VMALLOC_START = VMALLOC_END - vmalloc_size;
340 
341 	__memcpy_real_area = round_down(VMALLOC_START - MEMCPY_REAL_SIZE, PAGE_SIZE);
342 	__abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
343 				   sizeof(struct lowcore));
344 
345 	/* split remaining virtual space between 1:1 mapping & vmemmap array */
346 	pages = __abs_lowcore / (PAGE_SIZE + sizeof(struct page));
347 	pages = SECTION_ALIGN_UP(pages);
348 	/* keep vmemmap_start aligned to a top level region table entry */
349 	vmemmap_start = round_down(__abs_lowcore - pages * sizeof(struct page), rte_size);
350 	/* make sure identity map doesn't overlay with vmemmap */
351 	ident_map_size = min(ident_map_size, vmemmap_start);
352 	vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
353 	/* make sure vmemmap doesn't overlay with absolute lowcore area */
354 	if (vmemmap_start + vmemmap_size > __abs_lowcore) {
355 		vmemmap_size = SECTION_ALIGN_DOWN(ident_map_size / PAGE_SIZE) * sizeof(struct page);
356 		ident_map_size = vmemmap_size / sizeof(struct page) * PAGE_SIZE;
357 	}
358 	vmemmap = (struct page *)vmemmap_start;
359 	/* maximum address for which linear mapping could be created (DCSS, memory) */
360 	BUILD_BUG_ON(MAX_DCSS_ADDR > (1UL << MAX_PHYSMEM_BITS));
361 	max_mappable = max(ident_map_size, MAX_DCSS_ADDR);
362 	max_mappable = min(max_mappable, vmemmap_start);
363 	__identity_base = round_down(vmemmap_start - max_mappable, rte_size);
364 
365 	return asce_limit;
366 }
367 
368 /*
369  * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
370  */
371 static void clear_bss_section(unsigned long vmlinux_lma)
372 {
373 	memset((void *)vmlinux_lma + vmlinux.image_size, 0, vmlinux.bss_size);
374 }
375 
376 /*
377  * Set vmalloc area size to an 8th of (potential) physical memory
378  * size, unless size has been set by kernel command line parameter.
379  */
380 static void setup_vmalloc_size(void)
381 {
382 	unsigned long size;
383 
384 	if (vmalloc_size_set)
385 		return;
386 	size = round_up(ident_map_size / 8, _SEGMENT_SIZE);
387 	vmalloc_size = max(size, vmalloc_size);
388 }
389 
390 static void kaslr_adjust_vmlinux_info(unsigned long offset)
391 {
392 	vmlinux.bootdata_off += offset;
393 	vmlinux.bootdata_preserved_off += offset;
394 #ifdef CONFIG_PIE_BUILD
395 	vmlinux.rela_dyn_start += offset;
396 	vmlinux.rela_dyn_end += offset;
397 	vmlinux.dynsym_start += offset;
398 #else
399 	vmlinux.got_start += offset;
400 	vmlinux.got_end += offset;
401 #endif
402 	vmlinux.init_mm_off += offset;
403 	vmlinux.swapper_pg_dir_off += offset;
404 	vmlinux.invalid_pg_dir_off += offset;
405 #ifdef CONFIG_KASAN
406 	vmlinux.kasan_early_shadow_page_off += offset;
407 	vmlinux.kasan_early_shadow_pte_off += offset;
408 	vmlinux.kasan_early_shadow_pmd_off += offset;
409 	vmlinux.kasan_early_shadow_pud_off += offset;
410 	vmlinux.kasan_early_shadow_p4d_off += offset;
411 #endif
412 }
413 
414 void startup_kernel(void)
415 {
416 	unsigned long max_physmem_end;
417 	unsigned long vmlinux_lma = 0;
418 	unsigned long amode31_lma = 0;
419 	unsigned long kernel_size;
420 	unsigned long asce_limit;
421 	unsigned long safe_addr;
422 	void *img;
423 	psw_t psw;
424 
425 	setup_lpp();
426 	safe_addr = mem_safe_offset();
427 
428 	/*
429 	 * Reserve decompressor memory together with decompression heap, buffer and
430 	 * memory which might be occupied by uncompressed kernel at default 1Mb
431 	 * position (if KASLR is off or failed).
432 	 */
433 	physmem_reserve(RR_DECOMPRESSOR, 0, safe_addr);
434 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && parmarea.initrd_size)
435 		physmem_reserve(RR_INITRD, parmarea.initrd_start, parmarea.initrd_size);
436 	oldmem_data.start = parmarea.oldmem_base;
437 	oldmem_data.size = parmarea.oldmem_size;
438 
439 	store_ipl_parmblock();
440 	read_ipl_report();
441 	uv_query_info();
442 	sclp_early_read_info();
443 	setup_boot_command_line();
444 	parse_boot_command_line();
445 	detect_facilities();
446 	cmma_init();
447 	sanitize_prot_virt_host();
448 	max_physmem_end = detect_max_physmem_end();
449 	setup_ident_map_size(max_physmem_end);
450 	setup_vmalloc_size();
451 	kernel_size = vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
452 	asce_limit = setup_kernel_memory_layout(kernel_size);
453 	/* got final ident_map_size, physmem allocations could be performed now */
454 	physmem_set_usable_limit(ident_map_size);
455 	detect_physmem_online_ranges(max_physmem_end);
456 	save_ipl_cert_comp_list();
457 	rescue_initrd(safe_addr, ident_map_size);
458 	rescue_relocs();
459 
460 	if (kaslr_enabled()) {
461 		vmlinux_lma = randomize_within_range(vmlinux.image_size + vmlinux.bss_size,
462 						     THREAD_SIZE, vmlinux.default_lma,
463 						     ident_map_size);
464 		if (vmlinux_lma) {
465 			__kaslr_offset_phys = vmlinux_lma - vmlinux.default_lma;
466 			kaslr_adjust_vmlinux_info(__kaslr_offset_phys);
467 		}
468 	}
469 	vmlinux_lma = vmlinux_lma ?: vmlinux.default_lma;
470 	physmem_reserve(RR_VMLINUX, vmlinux_lma, vmlinux.image_size + vmlinux.bss_size);
471 
472 	if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
473 		img = decompress_kernel();
474 		memmove((void *)vmlinux_lma, img, vmlinux.image_size);
475 	} else if (__kaslr_offset_phys) {
476 		img = (void *)vmlinux.default_lma;
477 		memmove((void *)vmlinux_lma, img, vmlinux.image_size);
478 		memset(img, 0, vmlinux.image_size);
479 	}
480 
481 	/* vmlinux decompression is done, shrink reserved low memory */
482 	physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end);
483 	if (kaslr_enabled())
484 		amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, 0, SZ_2G);
485 	amode31_lma = amode31_lma ?: vmlinux.default_lma - vmlinux.amode31_size;
486 	physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
487 
488 	/*
489 	 * The order of the following operations is important:
490 	 *
491 	 * - kaslr_adjust_relocs() must follow clear_bss_section() to establish
492 	 *   static memory references to data in .bss to be used by setup_vmem()
493 	 *   (i.e init_mm.pgd)
494 	 *
495 	 * - setup_vmem() must follow kaslr_adjust_relocs() to be able using
496 	 *   static memory references to data in .bss (i.e init_mm.pgd)
497 	 *
498 	 * - copy_bootdata() must follow setup_vmem() to propagate changes
499 	 *   to bootdata made by setup_vmem()
500 	 */
501 	clear_bss_section(vmlinux_lma);
502 	kaslr_adjust_relocs(vmlinux_lma, vmlinux_lma + vmlinux.image_size,
503 			    __kaslr_offset, __kaslr_offset_phys);
504 	kaslr_adjust_got(__kaslr_offset);
505 	free_relocs();
506 	setup_vmem(__kaslr_offset, __kaslr_offset + kernel_size, asce_limit);
507 	copy_bootdata();
508 
509 	/*
510 	 * Save KASLR offset for early dumps, before vmcore_info is set.
511 	 * Mark as uneven to distinguish from real vmcore_info pointer.
512 	 */
513 	S390_lowcore.vmcore_info = __kaslr_offset_phys ? __kaslr_offset_phys | 0x1UL : 0;
514 
515 	/*
516 	 * Jump to the decompressed kernel entry point and switch DAT mode on.
517 	 */
518 	psw.addr = __kaslr_offset + vmlinux.entry;
519 	psw.mask = PSW_KERNEL_BITS;
520 	__load_psw(psw);
521 }
522