xref: /linux/arch/s390/kernel/setup.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  *  S390 version
3  *    Copyright IBM Corp. 1999, 2012
4  *    Author(s): Hartmut Penner (hp@de.ibm.com),
5  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
6  *
7  *  Derived from "arch/i386/kernel/setup.c"
8  *    Copyright (C) 1995, Linus Torvalds
9  */
10 
11 /*
12  * This file handles the architecture-dependent parts of initialization
13  */
14 
15 #define KMSG_COMPONENT "setup"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17 
18 #include <linux/errno.h>
19 #include <linux/export.h>
20 #include <linux/sched.h>
21 #include <linux/sched/task.h>
22 #include <linux/cpu.h>
23 #include <linux/kernel.h>
24 #include <linux/memblock.h>
25 #include <linux/mm.h>
26 #include <linux/stddef.h>
27 #include <linux/unistd.h>
28 #include <linux/ptrace.h>
29 #include <linux/random.h>
30 #include <linux/user.h>
31 #include <linux/tty.h>
32 #include <linux/ioport.h>
33 #include <linux/delay.h>
34 #include <linux/init.h>
35 #include <linux/initrd.h>
36 #include <linux/bootmem.h>
37 #include <linux/root_dev.h>
38 #include <linux/console.h>
39 #include <linux/kernel_stat.h>
40 #include <linux/dma-contiguous.h>
41 #include <linux/device.h>
42 #include <linux/notifier.h>
43 #include <linux/pfn.h>
44 #include <linux/ctype.h>
45 #include <linux/reboot.h>
46 #include <linux/topology.h>
47 #include <linux/kexec.h>
48 #include <linux/crash_dump.h>
49 #include <linux/memory.h>
50 #include <linux/compat.h>
51 
52 #include <asm/ipl.h>
53 #include <asm/facility.h>
54 #include <asm/smp.h>
55 #include <asm/mmu_context.h>
56 #include <asm/cpcmd.h>
57 #include <asm/lowcore.h>
58 #include <asm/irq.h>
59 #include <asm/page.h>
60 #include <asm/ptrace.h>
61 #include <asm/sections.h>
62 #include <asm/ebcdic.h>
63 #include <asm/kvm_virtio.h>
64 #include <asm/diag.h>
65 #include <asm/os_info.h>
66 #include <asm/sclp.h>
67 #include <asm/sysinfo.h>
68 #include <asm/numa.h>
69 #include "entry.h"
70 
71 /*
72  * Machine setup..
73  */
74 unsigned int console_mode = 0;
75 EXPORT_SYMBOL(console_mode);
76 
77 unsigned int console_devno = -1;
78 EXPORT_SYMBOL(console_devno);
79 
80 unsigned int console_irq = -1;
81 EXPORT_SYMBOL(console_irq);
82 
83 unsigned long elf_hwcap __read_mostly = 0;
84 char elf_platform[ELF_PLATFORM_SIZE];
85 
86 unsigned long int_hwcap = 0;
87 
88 int __initdata memory_end_set;
89 unsigned long __initdata memory_end;
90 unsigned long __initdata max_physmem_end;
91 
92 unsigned long VMALLOC_START;
93 EXPORT_SYMBOL(VMALLOC_START);
94 
95 unsigned long VMALLOC_END;
96 EXPORT_SYMBOL(VMALLOC_END);
97 
98 struct page *vmemmap;
99 EXPORT_SYMBOL(vmemmap);
100 
101 unsigned long MODULES_VADDR;
102 unsigned long MODULES_END;
103 
104 /* An array with a pointer to the lowcore of every CPU. */
105 struct lowcore *lowcore_ptr[NR_CPUS];
106 EXPORT_SYMBOL(lowcore_ptr);
107 
108 /*
109  * This is set up by the setup-routine at boot-time
110  * for S390 need to find out, what we have to setup
111  * using address 0x10400 ...
112  */
113 
114 #include <asm/setup.h>
115 
116 /*
117  * condev= and conmode= setup parameter.
118  */
119 
120 static int __init condev_setup(char *str)
121 {
122 	int vdev;
123 
124 	vdev = simple_strtoul(str, &str, 0);
125 	if (vdev >= 0 && vdev < 65536) {
126 		console_devno = vdev;
127 		console_irq = -1;
128 	}
129 	return 1;
130 }
131 
132 __setup("condev=", condev_setup);
133 
134 static void __init set_preferred_console(void)
135 {
136 	if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
137 		add_preferred_console("ttyS", 0, NULL);
138 	else if (CONSOLE_IS_3270)
139 		add_preferred_console("tty3270", 0, NULL);
140 	else if (CONSOLE_IS_VT220)
141 		add_preferred_console("ttyS", 1, NULL);
142 	else if (CONSOLE_IS_HVC)
143 		add_preferred_console("hvc", 0, NULL);
144 }
145 
146 static int __init conmode_setup(char *str)
147 {
148 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
149 	if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
150                 SET_CONSOLE_SCLP;
151 #endif
152 #if defined(CONFIG_TN3215_CONSOLE)
153 	if (strncmp(str, "3215", 5) == 0)
154 		SET_CONSOLE_3215;
155 #endif
156 #if defined(CONFIG_TN3270_CONSOLE)
157 	if (strncmp(str, "3270", 5) == 0)
158 		SET_CONSOLE_3270;
159 #endif
160 	set_preferred_console();
161         return 1;
162 }
163 
164 __setup("conmode=", conmode_setup);
165 
166 static void __init conmode_default(void)
167 {
168 	char query_buffer[1024];
169 	char *ptr;
170 
171         if (MACHINE_IS_VM) {
172 		cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
173 		console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
174 		ptr = strstr(query_buffer, "SUBCHANNEL =");
175 		console_irq = simple_strtoul(ptr + 13, NULL, 16);
176 		cpcmd("QUERY TERM", query_buffer, 1024, NULL);
177 		ptr = strstr(query_buffer, "CONMODE");
178 		/*
179 		 * Set the conmode to 3215 so that the device recognition
180 		 * will set the cu_type of the console to 3215. If the
181 		 * conmode is 3270 and we don't set it back then both
182 		 * 3215 and the 3270 driver will try to access the console
183 		 * device (3215 as console and 3270 as normal tty).
184 		 */
185 		cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
186 		if (ptr == NULL) {
187 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
188 			SET_CONSOLE_SCLP;
189 #endif
190 			return;
191 		}
192 		if (strncmp(ptr + 8, "3270", 4) == 0) {
193 #if defined(CONFIG_TN3270_CONSOLE)
194 			SET_CONSOLE_3270;
195 #elif defined(CONFIG_TN3215_CONSOLE)
196 			SET_CONSOLE_3215;
197 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
198 			SET_CONSOLE_SCLP;
199 #endif
200 		} else if (strncmp(ptr + 8, "3215", 4) == 0) {
201 #if defined(CONFIG_TN3215_CONSOLE)
202 			SET_CONSOLE_3215;
203 #elif defined(CONFIG_TN3270_CONSOLE)
204 			SET_CONSOLE_3270;
205 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
206 			SET_CONSOLE_SCLP;
207 #endif
208 		}
209 	} else if (MACHINE_IS_KVM) {
210 		if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
211 			SET_CONSOLE_VT220;
212 		else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
213 			SET_CONSOLE_SCLP;
214 		else
215 			SET_CONSOLE_HVC;
216 	} else {
217 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
218 		SET_CONSOLE_SCLP;
219 #endif
220 	}
221 }
222 
223 #ifdef CONFIG_CRASH_DUMP
224 static void __init setup_zfcpdump(void)
225 {
226 	if (ipl_info.type != IPL_TYPE_FCP_DUMP)
227 		return;
228 	if (OLDMEM_BASE)
229 		return;
230 	strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
231 	console_loglevel = 2;
232 }
233 #else
234 static inline void setup_zfcpdump(void) {}
235 #endif /* CONFIG_CRASH_DUMP */
236 
237  /*
238  * Reboot, halt and power_off stubs. They just call _machine_restart,
239  * _machine_halt or _machine_power_off.
240  */
241 
242 void machine_restart(char *command)
243 {
244 	if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
245 		/*
246 		 * Only unblank the console if we are called in enabled
247 		 * context or a bust_spinlocks cleared the way for us.
248 		 */
249 		console_unblank();
250 	_machine_restart(command);
251 }
252 
253 void machine_halt(void)
254 {
255 	if (!in_interrupt() || oops_in_progress)
256 		/*
257 		 * Only unblank the console if we are called in enabled
258 		 * context or a bust_spinlocks cleared the way for us.
259 		 */
260 		console_unblank();
261 	_machine_halt();
262 }
263 
264 void machine_power_off(void)
265 {
266 	if (!in_interrupt() || oops_in_progress)
267 		/*
268 		 * Only unblank the console if we are called in enabled
269 		 * context or a bust_spinlocks cleared the way for us.
270 		 */
271 		console_unblank();
272 	_machine_power_off();
273 }
274 
275 /*
276  * Dummy power off function.
277  */
278 void (*pm_power_off)(void) = machine_power_off;
279 EXPORT_SYMBOL_GPL(pm_power_off);
280 
281 static int __init early_parse_mem(char *p)
282 {
283 	memory_end = memparse(p, &p);
284 	memory_end &= PAGE_MASK;
285 	memory_end_set = 1;
286 	return 0;
287 }
288 early_param("mem", early_parse_mem);
289 
290 static int __init parse_vmalloc(char *arg)
291 {
292 	if (!arg)
293 		return -EINVAL;
294 	VMALLOC_END = (memparse(arg, &arg) + PAGE_SIZE - 1) & PAGE_MASK;
295 	return 0;
296 }
297 early_param("vmalloc", parse_vmalloc);
298 
299 void *restart_stack __section(.data);
300 
301 static void __init setup_lowcore(void)
302 {
303 	struct lowcore *lc;
304 
305 	/*
306 	 * Setup lowcore for boot cpu
307 	 */
308 	BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096);
309 	lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc));
310 	lc->restart_psw.mask = PSW_KERNEL_BITS;
311 	lc->restart_psw.addr = (unsigned long) restart_int_handler;
312 	lc->external_new_psw.mask = PSW_KERNEL_BITS |
313 		PSW_MASK_DAT | PSW_MASK_MCHECK;
314 	lc->external_new_psw.addr = (unsigned long) ext_int_handler;
315 	lc->svc_new_psw.mask = PSW_KERNEL_BITS |
316 		PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
317 	lc->svc_new_psw.addr = (unsigned long) system_call;
318 	lc->program_new_psw.mask = PSW_KERNEL_BITS |
319 		PSW_MASK_DAT | PSW_MASK_MCHECK;
320 	lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
321 	lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
322 	lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
323 	lc->io_new_psw.mask = PSW_KERNEL_BITS |
324 		PSW_MASK_DAT | PSW_MASK_MCHECK;
325 	lc->io_new_psw.addr = (unsigned long) io_int_handler;
326 	lc->clock_comparator = -1ULL;
327 	lc->kernel_stack = ((unsigned long) &init_thread_union)
328 		+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
329 	lc->async_stack = (unsigned long)
330 		memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE)
331 		+ ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
332 	lc->panic_stack = (unsigned long)
333 		memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE)
334 		+ PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
335 	lc->current_task = (unsigned long)&init_task;
336 	lc->lpp = LPP_MAGIC;
337 	lc->machine_flags = S390_lowcore.machine_flags;
338 	lc->preempt_count = S390_lowcore.preempt_count;
339 	lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
340 	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
341 	       MAX_FACILITY_BIT/8);
342 	if (MACHINE_HAS_VX)
343 		lc->vector_save_area_addr =
344 			(unsigned long) &lc->vector_save_area;
345 	lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
346 	lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
347 	lc->async_enter_timer = S390_lowcore.async_enter_timer;
348 	lc->exit_timer = S390_lowcore.exit_timer;
349 	lc->user_timer = S390_lowcore.user_timer;
350 	lc->system_timer = S390_lowcore.system_timer;
351 	lc->steal_timer = S390_lowcore.steal_timer;
352 	lc->last_update_timer = S390_lowcore.last_update_timer;
353 	lc->last_update_clock = S390_lowcore.last_update_clock;
354 
355 	restart_stack = memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE);
356 	restart_stack += ASYNC_SIZE;
357 
358 	/*
359 	 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
360 	 * restart data to the absolute zero lowcore. This is necessary if
361 	 * PSW restart is done on an offline CPU that has lowcore zero.
362 	 */
363 	lc->restart_stack = (unsigned long) restart_stack;
364 	lc->restart_fn = (unsigned long) do_restart;
365 	lc->restart_data = 0;
366 	lc->restart_source = -1UL;
367 
368 	/* Setup absolute zero lowcore */
369 	mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
370 	mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
371 	mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
372 	mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
373 	mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
374 
375 #ifdef CONFIG_SMP
376 	lc->spinlock_lockval = arch_spin_lockval(0);
377 #endif
378 
379 	set_prefix((u32)(unsigned long) lc);
380 	lowcore_ptr[0] = lc;
381 }
382 
383 static struct resource code_resource = {
384 	.name  = "Kernel code",
385 	.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
386 };
387 
388 static struct resource data_resource = {
389 	.name = "Kernel data",
390 	.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
391 };
392 
393 static struct resource bss_resource = {
394 	.name = "Kernel bss",
395 	.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
396 };
397 
398 static struct resource __initdata *standard_resources[] = {
399 	&code_resource,
400 	&data_resource,
401 	&bss_resource,
402 };
403 
404 static void __init setup_resources(void)
405 {
406 	struct resource *res, *std_res, *sub_res;
407 	struct memblock_region *reg;
408 	int j;
409 
410 	code_resource.start = (unsigned long) &_text;
411 	code_resource.end = (unsigned long) &_etext - 1;
412 	data_resource.start = (unsigned long) &_etext;
413 	data_resource.end = (unsigned long) &_edata - 1;
414 	bss_resource.start = (unsigned long) &__bss_start;
415 	bss_resource.end = (unsigned long) &__bss_stop - 1;
416 
417 	for_each_memblock(memory, reg) {
418 		res = memblock_virt_alloc(sizeof(*res), 8);
419 		res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
420 
421 		res->name = "System RAM";
422 		res->start = reg->base;
423 		res->end = reg->base + reg->size - 1;
424 		request_resource(&iomem_resource, res);
425 
426 		for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
427 			std_res = standard_resources[j];
428 			if (std_res->start < res->start ||
429 			    std_res->start > res->end)
430 				continue;
431 			if (std_res->end > res->end) {
432 				sub_res = memblock_virt_alloc(sizeof(*sub_res), 8);
433 				*sub_res = *std_res;
434 				sub_res->end = res->end;
435 				std_res->start = res->end + 1;
436 				request_resource(res, sub_res);
437 			} else {
438 				request_resource(res, std_res);
439 			}
440 		}
441 	}
442 #ifdef CONFIG_CRASH_DUMP
443 	/*
444 	 * Re-add removed crash kernel memory as reserved memory. This makes
445 	 * sure it will be mapped with the identity mapping and struct pages
446 	 * will be created, so it can be resized later on.
447 	 * However add it later since the crash kernel resource should not be
448 	 * part of the System RAM resource.
449 	 */
450 	if (crashk_res.end) {
451 		memblock_add_node(crashk_res.start, resource_size(&crashk_res), 0);
452 		memblock_reserve(crashk_res.start, resource_size(&crashk_res));
453 		insert_resource(&iomem_resource, &crashk_res);
454 	}
455 #endif
456 }
457 
458 static void __init setup_memory_end(void)
459 {
460 	unsigned long vmax, vmalloc_size, tmp;
461 
462 	/* Choose kernel address space layout: 2, 3, or 4 levels. */
463 	vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
464 	tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
465 	tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
466 	if (tmp + vmalloc_size + MODULES_LEN <= (1UL << 42))
467 		vmax = 1UL << 42;	/* 3-level kernel page table */
468 	else
469 		vmax = 1UL << 53;	/* 4-level kernel page table */
470 	/* module area is at the end of the kernel address space. */
471 	MODULES_END = vmax;
472 	MODULES_VADDR = MODULES_END - MODULES_LEN;
473 	VMALLOC_END = MODULES_VADDR;
474 	VMALLOC_START = vmax - vmalloc_size;
475 
476 	/* Split remaining virtual space between 1:1 mapping & vmemmap array */
477 	tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
478 	/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
479 	tmp = SECTION_ALIGN_UP(tmp);
480 	tmp = VMALLOC_START - tmp * sizeof(struct page);
481 	tmp &= ~((vmax >> 11) - 1);	/* align to page table level */
482 	tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
483 	vmemmap = (struct page *) tmp;
484 
485 	/* Take care that memory_end is set and <= vmemmap */
486 	memory_end = min(memory_end ?: max_physmem_end, tmp);
487 	max_pfn = max_low_pfn = PFN_DOWN(memory_end);
488 	memblock_remove(memory_end, ULONG_MAX);
489 
490 	pr_notice("The maximum memory size is %luMB\n", memory_end >> 20);
491 }
492 
493 static void __init setup_vmcoreinfo(void)
494 {
495 	mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
496 }
497 
498 #ifdef CONFIG_CRASH_DUMP
499 
500 /*
501  * When kdump is enabled, we have to ensure that no memory from
502  * the area [0 - crashkernel memory size] and
503  * [crashk_res.start - crashk_res.end] is set offline.
504  */
505 static int kdump_mem_notifier(struct notifier_block *nb,
506 			      unsigned long action, void *data)
507 {
508 	struct memory_notify *arg = data;
509 
510 	if (action != MEM_GOING_OFFLINE)
511 		return NOTIFY_OK;
512 	if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
513 		return NOTIFY_BAD;
514 	if (arg->start_pfn > PFN_DOWN(crashk_res.end))
515 		return NOTIFY_OK;
516 	if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start))
517 		return NOTIFY_OK;
518 	return NOTIFY_BAD;
519 }
520 
521 static struct notifier_block kdump_mem_nb = {
522 	.notifier_call = kdump_mem_notifier,
523 };
524 
525 #endif
526 
527 /*
528  * Make sure that the area behind memory_end is protected
529  */
530 static void reserve_memory_end(void)
531 {
532 #ifdef CONFIG_CRASH_DUMP
533 	if (ipl_info.type == IPL_TYPE_FCP_DUMP &&
534 	    !OLDMEM_BASE && sclp.hsa_size) {
535 		memory_end = sclp.hsa_size;
536 		memory_end &= PAGE_MASK;
537 		memory_end_set = 1;
538 	}
539 #endif
540 	if (!memory_end_set)
541 		return;
542 	memblock_reserve(memory_end, ULONG_MAX);
543 }
544 
545 /*
546  * Make sure that oldmem, where the dump is stored, is protected
547  */
548 static void reserve_oldmem(void)
549 {
550 #ifdef CONFIG_CRASH_DUMP
551 	if (OLDMEM_BASE)
552 		/* Forget all memory above the running kdump system */
553 		memblock_reserve(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
554 #endif
555 }
556 
557 /*
558  * Make sure that oldmem, where the dump is stored, is protected
559  */
560 static void remove_oldmem(void)
561 {
562 #ifdef CONFIG_CRASH_DUMP
563 	if (OLDMEM_BASE)
564 		/* Forget all memory above the running kdump system */
565 		memblock_remove(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
566 #endif
567 }
568 
569 /*
570  * Reserve memory for kdump kernel to be loaded with kexec
571  */
572 static void __init reserve_crashkernel(void)
573 {
574 #ifdef CONFIG_CRASH_DUMP
575 	unsigned long long crash_base, crash_size;
576 	phys_addr_t low, high;
577 	int rc;
578 
579 	rc = parse_crashkernel(boot_command_line, memory_end, &crash_size,
580 			       &crash_base);
581 
582 	crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
583 	crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
584 	if (rc || crash_size == 0)
585 		return;
586 
587 	if (memblock.memory.regions[0].size < crash_size) {
588 		pr_info("crashkernel reservation failed: %s\n",
589 			"first memory chunk must be at least crashkernel size");
590 		return;
591 	}
592 
593 	low = crash_base ?: OLDMEM_BASE;
594 	high = low + crash_size;
595 	if (low >= OLDMEM_BASE && high <= OLDMEM_BASE + OLDMEM_SIZE) {
596 		/* The crashkernel fits into OLDMEM, reuse OLDMEM */
597 		crash_base = low;
598 	} else {
599 		/* Find suitable area in free memory */
600 		low = max_t(unsigned long, crash_size, sclp.hsa_size);
601 		high = crash_base ? crash_base + crash_size : ULONG_MAX;
602 
603 		if (crash_base && crash_base < low) {
604 			pr_info("crashkernel reservation failed: %s\n",
605 				"crash_base too low");
606 			return;
607 		}
608 		low = crash_base ?: low;
609 		crash_base = memblock_find_in_range(low, high, crash_size,
610 						    KEXEC_CRASH_MEM_ALIGN);
611 	}
612 
613 	if (!crash_base) {
614 		pr_info("crashkernel reservation failed: %s\n",
615 			"no suitable area found");
616 		return;
617 	}
618 
619 	if (register_memory_notifier(&kdump_mem_nb))
620 		return;
621 
622 	if (!OLDMEM_BASE && MACHINE_IS_VM)
623 		diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
624 	crashk_res.start = crash_base;
625 	crashk_res.end = crash_base + crash_size - 1;
626 	memblock_remove(crash_base, crash_size);
627 	pr_info("Reserving %lluMB of memory at %lluMB "
628 		"for crashkernel (System RAM: %luMB)\n",
629 		crash_size >> 20, crash_base >> 20,
630 		(unsigned long)memblock.memory.total_size >> 20);
631 	os_info_crashkernel_add(crash_base, crash_size);
632 #endif
633 }
634 
635 /*
636  * Reserve the initrd from being used by memblock
637  */
638 static void __init reserve_initrd(void)
639 {
640 #ifdef CONFIG_BLK_DEV_INITRD
641 	if (!INITRD_START || !INITRD_SIZE)
642 		return;
643 	initrd_start = INITRD_START;
644 	initrd_end = initrd_start + INITRD_SIZE;
645 	memblock_reserve(INITRD_START, INITRD_SIZE);
646 #endif
647 }
648 
649 /*
650  * Check for initrd being in usable memory
651  */
652 static void __init check_initrd(void)
653 {
654 #ifdef CONFIG_BLK_DEV_INITRD
655 	if (INITRD_START && INITRD_SIZE &&
656 	    !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) {
657 		pr_err("The initial RAM disk does not fit into the memory\n");
658 		memblock_free(INITRD_START, INITRD_SIZE);
659 		initrd_start = initrd_end = 0;
660 	}
661 #endif
662 }
663 
664 /*
665  * Reserve memory used for lowcore/command line/kernel image.
666  */
667 static void __init reserve_kernel(void)
668 {
669 	unsigned long start_pfn = PFN_UP(__pa(&_end));
670 
671 #ifdef CONFIG_DMA_API_DEBUG
672 	/*
673 	 * DMA_API_DEBUG code stumbles over addresses from the
674 	 * range [_ehead, _stext]. Mark the memory as reserved
675 	 * so it is not used for CONFIG_DMA_API_DEBUG=y.
676 	 */
677 	memblock_reserve(0, PFN_PHYS(start_pfn));
678 #else
679 	memblock_reserve(0, (unsigned long)_ehead);
680 	memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
681 			 - (unsigned long)_stext);
682 #endif
683 }
684 
685 static void __init setup_memory(void)
686 {
687 	struct memblock_region *reg;
688 
689 	/*
690 	 * Init storage key for present memory
691 	 */
692 	for_each_memblock(memory, reg) {
693 		storage_key_init_range(reg->base, reg->base + reg->size);
694 	}
695 	psw_set_key(PAGE_DEFAULT_KEY);
696 
697 	/* Only cosmetics */
698 	memblock_enforce_memory_limit(memblock_end_of_DRAM());
699 }
700 
701 /*
702  * Setup hardware capabilities.
703  */
704 static int __init setup_hwcaps(void)
705 {
706 	static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
707 	struct cpuid cpu_id;
708 	int i;
709 
710 	/*
711 	 * The store facility list bits numbers as found in the principles
712 	 * of operation are numbered with bit 1UL<<31 as number 0 to
713 	 * bit 1UL<<0 as number 31.
714 	 *   Bit 0: instructions named N3, "backported" to esa-mode
715 	 *   Bit 2: z/Architecture mode is active
716 	 *   Bit 7: the store-facility-list-extended facility is installed
717 	 *   Bit 17: the message-security assist is installed
718 	 *   Bit 19: the long-displacement facility is installed
719 	 *   Bit 21: the extended-immediate facility is installed
720 	 *   Bit 22: extended-translation facility 3 is installed
721 	 *   Bit 30: extended-translation facility 3 enhancement facility
722 	 * These get translated to:
723 	 *   HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1,
724 	 *   HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3,
725 	 *   HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and
726 	 *   HWCAP_S390_ETF3EH bit 8 (22 && 30).
727 	 */
728 	for (i = 0; i < 6; i++)
729 		if (test_facility(stfl_bits[i]))
730 			elf_hwcap |= 1UL << i;
731 
732 	if (test_facility(22) && test_facility(30))
733 		elf_hwcap |= HWCAP_S390_ETF3EH;
734 
735 	/*
736 	 * Check for additional facilities with store-facility-list-extended.
737 	 * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0
738 	 * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
739 	 * as stored by stfl, bits 32-xxx contain additional facilities.
740 	 * How many facility words are stored depends on the number of
741 	 * doublewords passed to the instruction. The additional facilities
742 	 * are:
743 	 *   Bit 42: decimal floating point facility is installed
744 	 *   Bit 44: perform floating point operation facility is installed
745 	 * translated to:
746 	 *   HWCAP_S390_DFP bit 6 (42 && 44).
747 	 */
748 	if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44))
749 		elf_hwcap |= HWCAP_S390_DFP;
750 
751 	/*
752 	 * Huge page support HWCAP_S390_HPAGE is bit 7.
753 	 */
754 	if (MACHINE_HAS_EDAT1)
755 		elf_hwcap |= HWCAP_S390_HPAGE;
756 
757 	/*
758 	 * 64-bit register support for 31-bit processes
759 	 * HWCAP_S390_HIGH_GPRS is bit 9.
760 	 */
761 	elf_hwcap |= HWCAP_S390_HIGH_GPRS;
762 
763 	/*
764 	 * Transactional execution support HWCAP_S390_TE is bit 10.
765 	 */
766 	if (test_facility(50) && test_facility(73))
767 		elf_hwcap |= HWCAP_S390_TE;
768 
769 	/*
770 	 * Vector extension HWCAP_S390_VXRS is bit 11. The Vector extension
771 	 * can be disabled with the "novx" parameter. Use MACHINE_HAS_VX
772 	 * instead of facility bit 129.
773 	 */
774 	if (MACHINE_HAS_VX) {
775 		elf_hwcap |= HWCAP_S390_VXRS;
776 		if (test_facility(134))
777 			elf_hwcap |= HWCAP_S390_VXRS_EXT;
778 		if (test_facility(135))
779 			elf_hwcap |= HWCAP_S390_VXRS_BCD;
780 	}
781 
782 	get_cpu_id(&cpu_id);
783 	add_device_randomness(&cpu_id, sizeof(cpu_id));
784 	switch (cpu_id.machine) {
785 	case 0x2064:
786 	case 0x2066:
787 	default:	/* Use "z900" as default for 64 bit kernels. */
788 		strcpy(elf_platform, "z900");
789 		break;
790 	case 0x2084:
791 	case 0x2086:
792 		strcpy(elf_platform, "z990");
793 		break;
794 	case 0x2094:
795 	case 0x2096:
796 		strcpy(elf_platform, "z9-109");
797 		break;
798 	case 0x2097:
799 	case 0x2098:
800 		strcpy(elf_platform, "z10");
801 		break;
802 	case 0x2817:
803 	case 0x2818:
804 		strcpy(elf_platform, "z196");
805 		break;
806 	case 0x2827:
807 	case 0x2828:
808 		strcpy(elf_platform, "zEC12");
809 		break;
810 	case 0x2964:
811 	case 0x2965:
812 		strcpy(elf_platform, "z13");
813 		break;
814 	}
815 
816 	/*
817 	 * Virtualization support HWCAP_INT_SIE is bit 0.
818 	 */
819 	if (sclp.has_sief2)
820 		int_hwcap |= HWCAP_INT_SIE;
821 
822 	return 0;
823 }
824 arch_initcall(setup_hwcaps);
825 
826 /*
827  * Add system information as device randomness
828  */
829 static void __init setup_randomness(void)
830 {
831 	struct sysinfo_3_2_2 *vmms;
832 
833 	vmms = (struct sysinfo_3_2_2 *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
834 	if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
835 		add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
836 	memblock_free((unsigned long) vmms, PAGE_SIZE);
837 }
838 
839 /*
840  * Find the correct size for the task_struct. This depends on
841  * the size of the struct fpu at the end of the thread_struct
842  * which is embedded in the task_struct.
843  */
844 static void __init setup_task_size(void)
845 {
846 	int task_size = sizeof(struct task_struct);
847 
848 	if (!MACHINE_HAS_VX) {
849 		task_size -= sizeof(__vector128) * __NUM_VXRS;
850 		task_size += sizeof(freg_t) * __NUM_FPRS;
851 	}
852 	arch_task_struct_size = task_size;
853 }
854 
855 /*
856  * Setup function called from init/main.c just after the banner
857  * was printed.
858  */
859 
860 void __init setup_arch(char **cmdline_p)
861 {
862         /*
863          * print what head.S has found out about the machine
864          */
865 	if (MACHINE_IS_VM)
866 		pr_info("Linux is running as a z/VM "
867 			"guest operating system in 64-bit mode\n");
868 	else if (MACHINE_IS_KVM)
869 		pr_info("Linux is running under KVM in 64-bit mode\n");
870 	else if (MACHINE_IS_LPAR)
871 		pr_info("Linux is running natively in 64-bit mode\n");
872 
873 	/* Have one command line that is parsed and saved in /proc/cmdline */
874 	/* boot_command_line has been already set up in early.c */
875 	*cmdline_p = boot_command_line;
876 
877         ROOT_DEV = Root_RAM0;
878 
879 	/* Is init_mm really needed? */
880 	init_mm.start_code = PAGE_OFFSET;
881 	init_mm.end_code = (unsigned long) &_etext;
882 	init_mm.end_data = (unsigned long) &_edata;
883 	init_mm.brk = (unsigned long) &_end;
884 
885 	parse_early_param();
886 #ifdef CONFIG_CRASH_DUMP
887 	/* Deactivate elfcorehdr= kernel parameter */
888 	elfcorehdr_addr = ELFCORE_ADDR_MAX;
889 #endif
890 
891 	os_info_init();
892 	setup_ipl();
893 	setup_task_size();
894 
895 	/* Do some memory reservations *before* memory is added to memblock */
896 	reserve_memory_end();
897 	reserve_oldmem();
898 	reserve_kernel();
899 	reserve_initrd();
900 	memblock_allow_resize();
901 
902 	/* Get information about *all* installed memory */
903 	detect_memory_memblock();
904 
905 	remove_oldmem();
906 
907 	/*
908 	 * Make sure all chunks are MAX_ORDER aligned so we don't need the
909 	 * extra checks that HOLES_IN_ZONE would require.
910 	 *
911 	 * Is this still required?
912 	 */
913 	memblock_trim_memory(1UL << (MAX_ORDER - 1 + PAGE_SHIFT));
914 
915 	setup_memory_end();
916 	setup_memory();
917 	dma_contiguous_reserve(memory_end);
918 
919 	check_initrd();
920 	reserve_crashkernel();
921 #ifdef CONFIG_CRASH_DUMP
922 	/*
923 	 * Be aware that smp_save_dump_cpus() triggers a system reset.
924 	 * Therefore CPU and device initialization should be done afterwards.
925 	 */
926 	smp_save_dump_cpus();
927 #endif
928 
929 	setup_resources();
930 	setup_vmcoreinfo();
931 	setup_lowcore();
932 	smp_fill_possible_mask();
933 	cpu_detect_mhz_feature();
934         cpu_init();
935 	numa_setup();
936 	smp_detect_cpus();
937 	topology_init_early();
938 
939 	/*
940 	 * Create kernel page tables and switch to virtual addressing.
941 	 */
942         paging_init();
943 
944         /* Setup default console */
945 	conmode_default();
946 	set_preferred_console();
947 
948 	/* Setup zfcpdump support */
949 	setup_zfcpdump();
950 
951 	/* Add system specific data to the random pool */
952 	setup_randomness();
953 }
954