1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/string.h>
3 #include <linux/elf.h>
4 #include <asm/page-states.h>
5 #include <asm/boot_data.h>
6 #include <asm/extmem.h>
7 #include <asm/sections.h>
8 #include <asm/maccess.h>
9 #include <asm/cpu_mf.h>
10 #include <asm/setup.h>
11 #include <asm/kasan.h>
12 #include <asm/kexec.h>
13 #include <asm/sclp.h>
14 #include <asm/diag.h>
15 #include <asm/uv.h>
16 #include <asm/abs_lowcore.h>
17 #include <asm/physmem_info.h>
18 #include "decompressor.h"
19 #include "boot.h"
20 #include "uv.h"
21
22 struct vm_layout __bootdata_preserved(vm_layout);
23 unsigned long __bootdata_preserved(__abs_lowcore);
24 unsigned long __bootdata_preserved(__memcpy_real_area);
25 pte_t *__bootdata_preserved(memcpy_real_ptep);
26 unsigned long __bootdata_preserved(VMALLOC_START);
27 unsigned long __bootdata_preserved(VMALLOC_END);
28 struct page *__bootdata_preserved(vmemmap);
29 unsigned long __bootdata_preserved(vmemmap_size);
30 unsigned long __bootdata_preserved(MODULES_VADDR);
31 unsigned long __bootdata_preserved(MODULES_END);
32 unsigned long __bootdata_preserved(max_mappable);
33 int __bootdata_preserved(relocate_lowcore);
34
35 u64 __bootdata_preserved(stfle_fac_list[16]);
36 struct oldmem_data __bootdata_preserved(oldmem_data);
37
38 struct machine_info machine;
39
error(char * x)40 void error(char *x)
41 {
42 boot_printk("\n\n%s\n\n -- System halted", x);
43 disabled_wait();
44 }
45
detect_facilities(void)46 static void detect_facilities(void)
47 {
48 if (test_facility(8)) {
49 machine.has_edat1 = 1;
50 local_ctl_set_bit(0, CR0_EDAT_BIT);
51 }
52 if (test_facility(78))
53 machine.has_edat2 = 1;
54 if (test_facility(130))
55 machine.has_nx = 1;
56 }
57
cmma_test_essa(void)58 static int cmma_test_essa(void)
59 {
60 unsigned long reg1, reg2, tmp = 0;
61 int rc = 1;
62 psw_t old;
63
64 /* Test ESSA_GET_STATE */
65 asm volatile(
66 " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
67 " epsw %[reg1],%[reg2]\n"
68 " st %[reg1],0(%[psw_pgm])\n"
69 " st %[reg2],4(%[psw_pgm])\n"
70 " larl %[reg1],1f\n"
71 " stg %[reg1],8(%[psw_pgm])\n"
72 " .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
73 " la %[rc],0\n"
74 "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
75 : [reg1] "=&d" (reg1),
76 [reg2] "=&a" (reg2),
77 [rc] "+&d" (rc),
78 [tmp] "=&d" (tmp),
79 "+Q" (get_lowcore()->program_new_psw),
80 "=Q" (old)
81 : [psw_old] "a" (&old),
82 [psw_pgm] "a" (&get_lowcore()->program_new_psw),
83 [cmd] "i" (ESSA_GET_STATE)
84 : "cc", "memory");
85 return rc;
86 }
87
cmma_init(void)88 static void cmma_init(void)
89 {
90 if (!cmma_flag)
91 return;
92 if (cmma_test_essa()) {
93 cmma_flag = 0;
94 return;
95 }
96 if (test_facility(147))
97 cmma_flag = 2;
98 }
99
setup_lpp(void)100 static void setup_lpp(void)
101 {
102 get_lowcore()->current_pid = 0;
103 get_lowcore()->lpp = LPP_MAGIC;
104 if (test_facility(40))
105 lpp(&get_lowcore()->lpp);
106 }
107
108 #ifdef CONFIG_KERNEL_UNCOMPRESSED
mem_safe_offset(void)109 static unsigned long mem_safe_offset(void)
110 {
111 return (unsigned long)_compressed_start;
112 }
113
deploy_kernel(void * output)114 static void deploy_kernel(void *output)
115 {
116 void *uncompressed_start = (void *)_compressed_start;
117
118 if (output == uncompressed_start)
119 return;
120 memmove(output, uncompressed_start, vmlinux.image_size);
121 memset(uncompressed_start, 0, vmlinux.image_size);
122 }
123 #endif
124
rescue_initrd(unsigned long min,unsigned long max)125 static void rescue_initrd(unsigned long min, unsigned long max)
126 {
127 unsigned long old_addr, addr, size;
128
129 if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
130 return;
131 if (!get_physmem_reserved(RR_INITRD, &addr, &size))
132 return;
133 if (addr >= min && addr + size <= max)
134 return;
135 old_addr = addr;
136 physmem_free(RR_INITRD);
137 addr = physmem_alloc_top_down(RR_INITRD, size, 0);
138 memmove((void *)addr, (void *)old_addr, size);
139 }
140
copy_bootdata(void)141 static void copy_bootdata(void)
142 {
143 if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
144 error(".boot.data section size mismatch");
145 memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
146 if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
147 error(".boot.preserved.data section size mismatch");
148 memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
149 }
150
kaslr_adjust_relocs(unsigned long min_addr,unsigned long max_addr,unsigned long offset,unsigned long phys_offset)151 static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr,
152 unsigned long offset, unsigned long phys_offset)
153 {
154 int *reloc;
155 long loc;
156
157 /* Adjust R_390_64 relocations */
158 for (reloc = (int *)__vmlinux_relocs_64_start; reloc < (int *)__vmlinux_relocs_64_end; reloc++) {
159 loc = (long)*reloc + phys_offset;
160 if (loc < min_addr || loc > max_addr)
161 error("64-bit relocation outside of kernel!\n");
162 *(u64 *)loc += offset;
163 }
164 }
165
kaslr_adjust_got(unsigned long offset)166 static void kaslr_adjust_got(unsigned long offset)
167 {
168 u64 *entry;
169
170 /*
171 * Adjust GOT entries, except for ones for undefined weak symbols
172 * that resolved to zero. This also skips the first three reserved
173 * entries on s390x that are zero.
174 */
175 for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++) {
176 if (*entry)
177 *entry += offset;
178 }
179 }
180
181 /*
182 * Merge information from several sources into a single ident_map_size value.
183 * "ident_map_size" represents the upper limit of physical memory we may ever
184 * reach. It might not be all online memory, but also include standby (offline)
185 * memory or memory areas reserved for other means (e.g., memory devices such as
186 * virtio-mem).
187 *
188 * "ident_map_size" could be lower then actual standby/reserved or even online
189 * memory present, due to limiting factors. We should never go above this limit.
190 * It is the size of our identity mapping.
191 *
192 * Consider the following factors:
193 * 1. max_physmem_end - end of physical memory online, standby or reserved.
194 * Always >= end of the last online memory range (get_physmem_online_end()).
195 * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
196 * kernel is able to support.
197 * 3. "mem=" kernel command line option which limits physical memory usage.
198 * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as
199 * crash kernel.
200 * 5. "hsa" size which is a memory limit when the kernel is executed during
201 * zfcp/nvme dump.
202 */
setup_ident_map_size(unsigned long max_physmem_end)203 static void setup_ident_map_size(unsigned long max_physmem_end)
204 {
205 unsigned long hsa_size;
206
207 ident_map_size = max_physmem_end;
208 if (memory_limit)
209 ident_map_size = min(ident_map_size, memory_limit);
210 ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
211
212 #ifdef CONFIG_CRASH_DUMP
213 if (oldmem_data.start) {
214 __kaslr_enabled = 0;
215 ident_map_size = min(ident_map_size, oldmem_data.size);
216 } else if (ipl_block_valid && is_ipl_block_dump()) {
217 __kaslr_enabled = 0;
218 if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
219 ident_map_size = min(ident_map_size, hsa_size);
220 }
221 #endif
222 }
223
224 #define FIXMAP_SIZE round_up(MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE, sizeof(struct lowcore))
225
get_vmem_size(unsigned long identity_size,unsigned long vmemmap_size,unsigned long vmalloc_size,unsigned long rte_size)226 static unsigned long get_vmem_size(unsigned long identity_size,
227 unsigned long vmemmap_size,
228 unsigned long vmalloc_size,
229 unsigned long rte_size)
230 {
231 unsigned long max_mappable, vsize;
232
233 max_mappable = max(identity_size, MAX_DCSS_ADDR);
234 vsize = round_up(SZ_2G + max_mappable, rte_size) +
235 round_up(vmemmap_size, rte_size) +
236 FIXMAP_SIZE + MODULES_LEN + KASLR_LEN;
237 if (IS_ENABLED(CONFIG_KMSAN))
238 vsize += MODULES_LEN * 2;
239 return size_add(vsize, vmalloc_size);
240 }
241
setup_kernel_memory_layout(unsigned long kernel_size)242 static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
243 {
244 unsigned long vmemmap_start;
245 unsigned long kernel_start;
246 unsigned long asce_limit;
247 unsigned long rte_size;
248 unsigned long pages;
249 unsigned long vsize;
250 unsigned long vmax;
251
252 pages = ident_map_size / PAGE_SIZE;
253 /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
254 vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
255
256 /* choose kernel address space layout: 4 or 3 levels. */
257 BUILD_BUG_ON(!IS_ALIGNED(TEXT_OFFSET, THREAD_SIZE));
258 BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE));
259 BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE);
260 vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE);
261 if (IS_ENABLED(CONFIG_KASAN) || __NO_KASLR_END_KERNEL > _REGION2_SIZE ||
262 (vsize > _REGION2_SIZE && kaslr_enabled())) {
263 asce_limit = _REGION1_SIZE;
264 if (__NO_KASLR_END_KERNEL > _REGION2_SIZE) {
265 rte_size = _REGION2_SIZE;
266 vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION2_SIZE);
267 } else {
268 rte_size = _REGION3_SIZE;
269 }
270 } else {
271 asce_limit = _REGION2_SIZE;
272 rte_size = _REGION3_SIZE;
273 }
274
275 /*
276 * Forcing modules and vmalloc area under the ultravisor
277 * secure storage limit, so that any vmalloc allocation
278 * we do could be used to back secure guest storage.
279 *
280 * Assume the secure storage limit always exceeds _REGION2_SIZE,
281 * otherwise asce_limit and rte_size would have been adjusted.
282 */
283 vmax = adjust_to_uv_max(asce_limit);
284 #ifdef CONFIG_KASAN
285 BUILD_BUG_ON(__NO_KASLR_END_KERNEL > KASAN_SHADOW_START);
286 /* force vmalloc and modules below kasan shadow */
287 vmax = min(vmax, KASAN_SHADOW_START);
288 #endif
289 vsize = min(vsize, vmax);
290 if (kaslr_enabled()) {
291 unsigned long kernel_end, kaslr_len, slots, pos;
292
293 kaslr_len = max(KASLR_LEN, vmax - vsize);
294 slots = DIV_ROUND_UP(kaslr_len - kernel_size, THREAD_SIZE);
295 if (get_random(slots, &pos))
296 pos = 0;
297 kernel_end = vmax - pos * THREAD_SIZE;
298 kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE);
299 } else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) {
300 kernel_start = round_down(vmax - kernel_size, THREAD_SIZE);
301 boot_printk("The kernel base address is forced to %lx\n", kernel_start);
302 } else {
303 kernel_start = __NO_KASLR_START_KERNEL;
304 }
305 __kaslr_offset = kernel_start;
306
307 MODULES_END = round_down(kernel_start, _SEGMENT_SIZE);
308 MODULES_VADDR = MODULES_END - MODULES_LEN;
309 VMALLOC_END = MODULES_VADDR;
310 if (IS_ENABLED(CONFIG_KMSAN))
311 VMALLOC_END -= MODULES_LEN * 2;
312
313 /* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
314 vsize = (VMALLOC_END - FIXMAP_SIZE) / 2;
315 vsize = round_down(vsize, _SEGMENT_SIZE);
316 vmalloc_size = min(vmalloc_size, vsize);
317 if (IS_ENABLED(CONFIG_KMSAN)) {
318 /* take 2/3 of vmalloc area for KMSAN shadow and origins */
319 vmalloc_size = round_down(vmalloc_size / 3, _SEGMENT_SIZE);
320 VMALLOC_END -= vmalloc_size * 2;
321 }
322 VMALLOC_START = VMALLOC_END - vmalloc_size;
323
324 __memcpy_real_area = round_down(VMALLOC_START - MEMCPY_REAL_SIZE, PAGE_SIZE);
325 __abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
326 sizeof(struct lowcore));
327
328 /* split remaining virtual space between 1:1 mapping & vmemmap array */
329 pages = __abs_lowcore / (PAGE_SIZE + sizeof(struct page));
330 pages = SECTION_ALIGN_UP(pages);
331 /* keep vmemmap_start aligned to a top level region table entry */
332 vmemmap_start = round_down(__abs_lowcore - pages * sizeof(struct page), rte_size);
333 /* make sure identity map doesn't overlay with vmemmap */
334 ident_map_size = min(ident_map_size, vmemmap_start);
335 vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
336 /* make sure vmemmap doesn't overlay with absolute lowcore area */
337 if (vmemmap_start + vmemmap_size > __abs_lowcore) {
338 vmemmap_size = SECTION_ALIGN_DOWN(ident_map_size / PAGE_SIZE) * sizeof(struct page);
339 ident_map_size = vmemmap_size / sizeof(struct page) * PAGE_SIZE;
340 }
341 vmemmap = (struct page *)vmemmap_start;
342 /* maximum address for which linear mapping could be created (DCSS, memory) */
343 BUILD_BUG_ON(MAX_DCSS_ADDR > (1UL << MAX_PHYSMEM_BITS));
344 max_mappable = max(ident_map_size, MAX_DCSS_ADDR);
345 max_mappable = min(max_mappable, vmemmap_start);
346 if (IS_ENABLED(CONFIG_RANDOMIZE_IDENTITY_BASE))
347 __identity_base = round_down(vmemmap_start - max_mappable, rte_size);
348
349 return asce_limit;
350 }
351
352 /*
353 * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
354 */
clear_bss_section(unsigned long kernel_start)355 static void clear_bss_section(unsigned long kernel_start)
356 {
357 memset((void *)kernel_start + vmlinux.image_size, 0, vmlinux.bss_size);
358 }
359
360 /*
361 * Set vmalloc area size to an 8th of (potential) physical memory
362 * size, unless size has been set by kernel command line parameter.
363 */
setup_vmalloc_size(void)364 static void setup_vmalloc_size(void)
365 {
366 unsigned long size;
367
368 if (vmalloc_size_set)
369 return;
370 size = round_up(ident_map_size / 8, _SEGMENT_SIZE);
371 vmalloc_size = max(size, vmalloc_size);
372 }
373
kaslr_adjust_vmlinux_info(long offset)374 static void kaslr_adjust_vmlinux_info(long offset)
375 {
376 vmlinux.bootdata_off += offset;
377 vmlinux.bootdata_preserved_off += offset;
378 vmlinux.got_start += offset;
379 vmlinux.got_end += offset;
380 vmlinux.init_mm_off += offset;
381 vmlinux.swapper_pg_dir_off += offset;
382 vmlinux.invalid_pg_dir_off += offset;
383 vmlinux.alt_instructions += offset;
384 vmlinux.alt_instructions_end += offset;
385 #ifdef CONFIG_KASAN
386 vmlinux.kasan_early_shadow_page_off += offset;
387 vmlinux.kasan_early_shadow_pte_off += offset;
388 vmlinux.kasan_early_shadow_pmd_off += offset;
389 vmlinux.kasan_early_shadow_pud_off += offset;
390 vmlinux.kasan_early_shadow_p4d_off += offset;
391 #endif
392 }
393
startup_kernel(void)394 void startup_kernel(void)
395 {
396 unsigned long vmlinux_size = vmlinux.image_size + vmlinux.bss_size;
397 unsigned long nokaslr_text_lma, text_lma = 0, amode31_lma = 0;
398 unsigned long kernel_size = TEXT_OFFSET + vmlinux_size;
399 unsigned long kaslr_large_page_offset;
400 unsigned long max_physmem_end;
401 unsigned long asce_limit;
402 unsigned long safe_addr;
403 psw_t psw;
404
405 setup_lpp();
406
407 /*
408 * Non-randomized kernel physical start address must be _SEGMENT_SIZE
409 * aligned (see blow).
410 */
411 nokaslr_text_lma = ALIGN(mem_safe_offset(), _SEGMENT_SIZE);
412 safe_addr = PAGE_ALIGN(nokaslr_text_lma + vmlinux_size);
413
414 /*
415 * Reserve decompressor memory together with decompression heap,
416 * buffer and memory which might be occupied by uncompressed kernel
417 * (if KASLR is off or failed).
418 */
419 physmem_reserve(RR_DECOMPRESSOR, 0, safe_addr);
420 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && parmarea.initrd_size)
421 physmem_reserve(RR_INITRD, parmarea.initrd_start, parmarea.initrd_size);
422 oldmem_data.start = parmarea.oldmem_base;
423 oldmem_data.size = parmarea.oldmem_size;
424
425 store_ipl_parmblock();
426 read_ipl_report();
427 uv_query_info();
428 sclp_early_read_info();
429 setup_boot_command_line();
430 parse_boot_command_line();
431 detect_facilities();
432 cmma_init();
433 sanitize_prot_virt_host();
434 max_physmem_end = detect_max_physmem_end();
435 setup_ident_map_size(max_physmem_end);
436 setup_vmalloc_size();
437 asce_limit = setup_kernel_memory_layout(kernel_size);
438 /* got final ident_map_size, physmem allocations could be performed now */
439 physmem_set_usable_limit(ident_map_size);
440 detect_physmem_online_ranges(max_physmem_end);
441 save_ipl_cert_comp_list();
442 rescue_initrd(safe_addr, ident_map_size);
443
444 /*
445 * __kaslr_offset_phys must be _SEGMENT_SIZE aligned, so the lower
446 * 20 bits (the offset within a large page) are zero. Copy the last
447 * 20 bits of __kaslr_offset, which is THREAD_SIZE aligned, to
448 * __kaslr_offset_phys.
449 *
450 * With this the last 20 bits of __kaslr_offset_phys and __kaslr_offset
451 * are identical, which is required to allow for large mappings of the
452 * kernel image.
453 */
454 kaslr_large_page_offset = __kaslr_offset & ~_SEGMENT_MASK;
455 if (kaslr_enabled()) {
456 unsigned long size = vmlinux_size + kaslr_large_page_offset;
457
458 text_lma = randomize_within_range(size, _SEGMENT_SIZE, TEXT_OFFSET, ident_map_size);
459 }
460 if (!text_lma)
461 text_lma = nokaslr_text_lma;
462 text_lma |= kaslr_large_page_offset;
463
464 /*
465 * [__kaslr_offset_phys..__kaslr_offset_phys + TEXT_OFFSET] region is
466 * never accessed via the kernel image mapping as per the linker script:
467 *
468 * . = TEXT_OFFSET;
469 *
470 * Therefore, this region could be used for something else and does
471 * not need to be reserved. See how it is skipped in setup_vmem().
472 */
473 __kaslr_offset_phys = text_lma - TEXT_OFFSET;
474 kaslr_adjust_vmlinux_info(__kaslr_offset_phys);
475 physmem_reserve(RR_VMLINUX, text_lma, vmlinux_size);
476 deploy_kernel((void *)text_lma);
477
478 /* vmlinux decompression is done, shrink reserved low memory */
479 physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end);
480
481 /*
482 * In case KASLR is enabled the randomized location of .amode31
483 * section might overlap with .vmlinux.relocs section. To avoid that
484 * the below randomize_within_range() could have been called with
485 * __vmlinux_relocs_64_end as the lower range address. However,
486 * .amode31 section is written to by the decompressed kernel - at
487 * that time the contents of .vmlinux.relocs is not needed anymore.
488 * Conversely, .vmlinux.relocs is read only by the decompressor, even
489 * before the kernel started. Therefore, in case the two sections
490 * overlap there is no risk of corrupting any data.
491 */
492 if (kaslr_enabled()) {
493 unsigned long amode31_min;
494
495 amode31_min = (unsigned long)_decompressor_end;
496 amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, amode31_min, SZ_2G);
497 }
498 if (!amode31_lma)
499 amode31_lma = text_lma - vmlinux.amode31_size;
500 physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
501
502 /*
503 * The order of the following operations is important:
504 *
505 * - kaslr_adjust_relocs() must follow clear_bss_section() to establish
506 * static memory references to data in .bss to be used by setup_vmem()
507 * (i.e init_mm.pgd)
508 *
509 * - setup_vmem() must follow kaslr_adjust_relocs() to be able using
510 * static memory references to data in .bss (i.e init_mm.pgd)
511 *
512 * - copy_bootdata() must follow setup_vmem() to propagate changes
513 * to bootdata made by setup_vmem()
514 */
515 clear_bss_section(text_lma);
516 kaslr_adjust_relocs(text_lma, text_lma + vmlinux.image_size,
517 __kaslr_offset, __kaslr_offset_phys);
518 kaslr_adjust_got(__kaslr_offset);
519 setup_vmem(__kaslr_offset, __kaslr_offset + kernel_size, asce_limit);
520 copy_bootdata();
521 __apply_alternatives((struct alt_instr *)_vmlinux_info.alt_instructions,
522 (struct alt_instr *)_vmlinux_info.alt_instructions_end,
523 ALT_CTX_EARLY);
524
525 /*
526 * Save KASLR offset for early dumps, before vmcore_info is set.
527 * Mark as uneven to distinguish from real vmcore_info pointer.
528 */
529 get_lowcore()->vmcore_info = __kaslr_offset_phys ? __kaslr_offset_phys | 0x1UL : 0;
530
531 /*
532 * Jump to the decompressed kernel entry point and switch DAT mode on.
533 */
534 psw.addr = __kaslr_offset + vmlinux.entry;
535 psw.mask = PSW_KERNEL_BITS;
536 __load_psw(psw);
537 }
538