xref: /linux/arch/s390/kernel/setup.c (revision f673ed4d5fdc123b1552525de30741cd8dfde53f)
1 /*
2  *  S390 version
3  *    Copyright IBM Corp. 1999, 2012
4  *    Author(s): Hartmut Penner (hp@de.ibm.com),
5  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
6  *
7  *  Derived from "arch/i386/kernel/setup.c"
8  *    Copyright (C) 1995, Linus Torvalds
9  */
10 
11 /*
12  * This file handles the architecture-dependent parts of initialization
13  */
14 
15 #define KMSG_COMPONENT "setup"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17 
18 #include <linux/errno.h>
19 #include <linux/export.h>
20 #include <linux/sched.h>
21 #include <linux/kernel.h>
22 #include <linux/memblock.h>
23 #include <linux/mm.h>
24 #include <linux/stddef.h>
25 #include <linux/unistd.h>
26 #include <linux/ptrace.h>
27 #include <linux/random.h>
28 #include <linux/user.h>
29 #include <linux/tty.h>
30 #include <linux/ioport.h>
31 #include <linux/delay.h>
32 #include <linux/init.h>
33 #include <linux/initrd.h>
34 #include <linux/bootmem.h>
35 #include <linux/root_dev.h>
36 #include <linux/console.h>
37 #include <linux/kernel_stat.h>
38 #include <linux/device.h>
39 #include <linux/notifier.h>
40 #include <linux/pfn.h>
41 #include <linux/ctype.h>
42 #include <linux/reboot.h>
43 #include <linux/topology.h>
44 #include <linux/kexec.h>
45 #include <linux/crash_dump.h>
46 #include <linux/memory.h>
47 #include <linux/compat.h>
48 
49 #include <asm/ipl.h>
50 #include <asm/facility.h>
51 #include <asm/smp.h>
52 #include <asm/mmu_context.h>
53 #include <asm/cpcmd.h>
54 #include <asm/lowcore.h>
55 #include <asm/irq.h>
56 #include <asm/page.h>
57 #include <asm/ptrace.h>
58 #include <asm/sections.h>
59 #include <asm/ebcdic.h>
60 #include <asm/kvm_virtio.h>
61 #include <asm/diag.h>
62 #include <asm/os_info.h>
63 #include <asm/sclp.h>
64 #include <asm/sysinfo.h>
65 #include <asm/numa.h>
66 #include "entry.h"
67 
68 /*
69  * Machine setup..
70  */
71 unsigned int console_mode = 0;
72 EXPORT_SYMBOL(console_mode);
73 
74 unsigned int console_devno = -1;
75 EXPORT_SYMBOL(console_devno);
76 
77 unsigned int console_irq = -1;
78 EXPORT_SYMBOL(console_irq);
79 
80 unsigned long elf_hwcap __read_mostly = 0;
81 char elf_platform[ELF_PLATFORM_SIZE];
82 
83 unsigned long int_hwcap = 0;
84 
85 int __initdata memory_end_set;
86 unsigned long __initdata memory_end;
87 unsigned long __initdata max_physmem_end;
88 
89 unsigned long VMALLOC_START;
90 EXPORT_SYMBOL(VMALLOC_START);
91 
92 unsigned long VMALLOC_END;
93 EXPORT_SYMBOL(VMALLOC_END);
94 
95 struct page *vmemmap;
96 EXPORT_SYMBOL(vmemmap);
97 
98 unsigned long MODULES_VADDR;
99 unsigned long MODULES_END;
100 
101 /* An array with a pointer to the lowcore of every CPU. */
102 struct lowcore *lowcore_ptr[NR_CPUS];
103 EXPORT_SYMBOL(lowcore_ptr);
104 
105 /*
106  * This is set up by the setup-routine at boot-time
107  * for S390 need to find out, what we have to setup
108  * using address 0x10400 ...
109  */
110 
111 #include <asm/setup.h>
112 
113 /*
114  * condev= and conmode= setup parameter.
115  */
116 
117 static int __init condev_setup(char *str)
118 {
119 	int vdev;
120 
121 	vdev = simple_strtoul(str, &str, 0);
122 	if (vdev >= 0 && vdev < 65536) {
123 		console_devno = vdev;
124 		console_irq = -1;
125 	}
126 	return 1;
127 }
128 
129 __setup("condev=", condev_setup);
130 
131 static void __init set_preferred_console(void)
132 {
133 	if (MACHINE_IS_KVM) {
134 		if (sclp.has_vt220)
135 			add_preferred_console("ttyS", 1, NULL);
136 		else if (sclp.has_linemode)
137 			add_preferred_console("ttyS", 0, NULL);
138 		else
139 			add_preferred_console("hvc", 0, NULL);
140 	} else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
141 		add_preferred_console("ttyS", 0, NULL);
142 	else if (CONSOLE_IS_3270)
143 		add_preferred_console("tty3270", 0, NULL);
144 }
145 
146 static int __init conmode_setup(char *str)
147 {
148 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
149 	if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
150                 SET_CONSOLE_SCLP;
151 #endif
152 #if defined(CONFIG_TN3215_CONSOLE)
153 	if (strncmp(str, "3215", 5) == 0)
154 		SET_CONSOLE_3215;
155 #endif
156 #if defined(CONFIG_TN3270_CONSOLE)
157 	if (strncmp(str, "3270", 5) == 0)
158 		SET_CONSOLE_3270;
159 #endif
160 	set_preferred_console();
161         return 1;
162 }
163 
164 __setup("conmode=", conmode_setup);
165 
166 static void __init conmode_default(void)
167 {
168 	char query_buffer[1024];
169 	char *ptr;
170 
171         if (MACHINE_IS_VM) {
172 		cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
173 		console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
174 		ptr = strstr(query_buffer, "SUBCHANNEL =");
175 		console_irq = simple_strtoul(ptr + 13, NULL, 16);
176 		cpcmd("QUERY TERM", query_buffer, 1024, NULL);
177 		ptr = strstr(query_buffer, "CONMODE");
178 		/*
179 		 * Set the conmode to 3215 so that the device recognition
180 		 * will set the cu_type of the console to 3215. If the
181 		 * conmode is 3270 and we don't set it back then both
182 		 * 3215 and the 3270 driver will try to access the console
183 		 * device (3215 as console and 3270 as normal tty).
184 		 */
185 		cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
186 		if (ptr == NULL) {
187 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
188 			SET_CONSOLE_SCLP;
189 #endif
190 			return;
191 		}
192 		if (strncmp(ptr + 8, "3270", 4) == 0) {
193 #if defined(CONFIG_TN3270_CONSOLE)
194 			SET_CONSOLE_3270;
195 #elif defined(CONFIG_TN3215_CONSOLE)
196 			SET_CONSOLE_3215;
197 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
198 			SET_CONSOLE_SCLP;
199 #endif
200 		} else if (strncmp(ptr + 8, "3215", 4) == 0) {
201 #if defined(CONFIG_TN3215_CONSOLE)
202 			SET_CONSOLE_3215;
203 #elif defined(CONFIG_TN3270_CONSOLE)
204 			SET_CONSOLE_3270;
205 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
206 			SET_CONSOLE_SCLP;
207 #endif
208 		}
209 	} else {
210 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
211 		SET_CONSOLE_SCLP;
212 #endif
213 	}
214 }
215 
216 #ifdef CONFIG_CRASH_DUMP
217 static void __init setup_zfcpdump(void)
218 {
219 	if (ipl_info.type != IPL_TYPE_FCP_DUMP)
220 		return;
221 	if (OLDMEM_BASE)
222 		return;
223 	strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
224 	console_loglevel = 2;
225 }
226 #else
227 static inline void setup_zfcpdump(void) {}
228 #endif /* CONFIG_CRASH_DUMP */
229 
230  /*
231  * Reboot, halt and power_off stubs. They just call _machine_restart,
232  * _machine_halt or _machine_power_off.
233  */
234 
235 void machine_restart(char *command)
236 {
237 	if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
238 		/*
239 		 * Only unblank the console if we are called in enabled
240 		 * context or a bust_spinlocks cleared the way for us.
241 		 */
242 		console_unblank();
243 	_machine_restart(command);
244 }
245 
246 void machine_halt(void)
247 {
248 	if (!in_interrupt() || oops_in_progress)
249 		/*
250 		 * Only unblank the console if we are called in enabled
251 		 * context or a bust_spinlocks cleared the way for us.
252 		 */
253 		console_unblank();
254 	_machine_halt();
255 }
256 
257 void machine_power_off(void)
258 {
259 	if (!in_interrupt() || oops_in_progress)
260 		/*
261 		 * Only unblank the console if we are called in enabled
262 		 * context or a bust_spinlocks cleared the way for us.
263 		 */
264 		console_unblank();
265 	_machine_power_off();
266 }
267 
268 /*
269  * Dummy power off function.
270  */
271 void (*pm_power_off)(void) = machine_power_off;
272 EXPORT_SYMBOL_GPL(pm_power_off);
273 
274 static int __init early_parse_mem(char *p)
275 {
276 	memory_end = memparse(p, &p);
277 	memory_end &= PAGE_MASK;
278 	memory_end_set = 1;
279 	return 0;
280 }
281 early_param("mem", early_parse_mem);
282 
283 static int __init parse_vmalloc(char *arg)
284 {
285 	if (!arg)
286 		return -EINVAL;
287 	VMALLOC_END = (memparse(arg, &arg) + PAGE_SIZE - 1) & PAGE_MASK;
288 	return 0;
289 }
290 early_param("vmalloc", parse_vmalloc);
291 
292 void *restart_stack __attribute__((__section__(".data")));
293 
294 static void __init setup_lowcore(void)
295 {
296 	struct lowcore *lc;
297 
298 	/*
299 	 * Setup lowcore for boot cpu
300 	 */
301 	BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096);
302 	lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
303 	lc->restart_psw.mask = PSW_KERNEL_BITS;
304 	lc->restart_psw.addr =
305 		PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
306 	lc->external_new_psw.mask = PSW_KERNEL_BITS |
307 		PSW_MASK_DAT | PSW_MASK_MCHECK;
308 	lc->external_new_psw.addr =
309 		PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
310 	lc->svc_new_psw.mask = PSW_KERNEL_BITS |
311 		PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
312 	lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
313 	lc->program_new_psw.mask = PSW_KERNEL_BITS |
314 		PSW_MASK_DAT | PSW_MASK_MCHECK;
315 	lc->program_new_psw.addr =
316 		PSW_ADDR_AMODE | (unsigned long) pgm_check_handler;
317 	lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
318 	lc->mcck_new_psw.addr =
319 		PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
320 	lc->io_new_psw.mask = PSW_KERNEL_BITS |
321 		PSW_MASK_DAT | PSW_MASK_MCHECK;
322 	lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
323 	lc->clock_comparator = -1ULL;
324 	lc->kernel_stack = ((unsigned long) &init_thread_union)
325 		+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
326 	lc->async_stack = (unsigned long)
327 		__alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0)
328 		+ ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
329 	lc->panic_stack = (unsigned long)
330 		__alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0)
331 		+ PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
332 	lc->current_task = (unsigned long) init_thread_union.thread_info.task;
333 	lc->thread_info = (unsigned long) &init_thread_union;
334 	lc->machine_flags = S390_lowcore.machine_flags;
335 	lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
336 	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
337 	       MAX_FACILITY_BIT/8);
338 	if (MACHINE_HAS_VX)
339 		lc->vector_save_area_addr =
340 			(unsigned long) &lc->vector_save_area;
341 	lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
342 	lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
343 	lc->async_enter_timer = S390_lowcore.async_enter_timer;
344 	lc->exit_timer = S390_lowcore.exit_timer;
345 	lc->user_timer = S390_lowcore.user_timer;
346 	lc->system_timer = S390_lowcore.system_timer;
347 	lc->steal_timer = S390_lowcore.steal_timer;
348 	lc->last_update_timer = S390_lowcore.last_update_timer;
349 	lc->last_update_clock = S390_lowcore.last_update_clock;
350 
351 	restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
352 	restart_stack += ASYNC_SIZE;
353 
354 	/*
355 	 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
356 	 * restart data to the absolute zero lowcore. This is necessary if
357 	 * PSW restart is done on an offline CPU that has lowcore zero.
358 	 */
359 	lc->restart_stack = (unsigned long) restart_stack;
360 	lc->restart_fn = (unsigned long) do_restart;
361 	lc->restart_data = 0;
362 	lc->restart_source = -1UL;
363 
364 	/* Setup absolute zero lowcore */
365 	mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
366 	mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
367 	mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
368 	mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
369 	mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
370 
371 #ifdef CONFIG_SMP
372 	lc->spinlock_lockval = arch_spin_lockval(0);
373 #endif
374 
375 	set_prefix((u32)(unsigned long) lc);
376 	lowcore_ptr[0] = lc;
377 }
378 
379 static struct resource code_resource = {
380 	.name  = "Kernel code",
381 	.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
382 };
383 
384 static struct resource data_resource = {
385 	.name = "Kernel data",
386 	.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
387 };
388 
389 static struct resource bss_resource = {
390 	.name = "Kernel bss",
391 	.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
392 };
393 
394 static struct resource __initdata *standard_resources[] = {
395 	&code_resource,
396 	&data_resource,
397 	&bss_resource,
398 };
399 
400 static void __init setup_resources(void)
401 {
402 	struct resource *res, *std_res, *sub_res;
403 	struct memblock_region *reg;
404 	int j;
405 
406 	code_resource.start = (unsigned long) &_text;
407 	code_resource.end = (unsigned long) &_etext - 1;
408 	data_resource.start = (unsigned long) &_etext;
409 	data_resource.end = (unsigned long) &_edata - 1;
410 	bss_resource.start = (unsigned long) &__bss_start;
411 	bss_resource.end = (unsigned long) &__bss_stop - 1;
412 
413 	for_each_memblock(memory, reg) {
414 		res = alloc_bootmem_low(sizeof(*res));
415 		res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
416 
417 		res->name = "System RAM";
418 		res->start = reg->base;
419 		res->end = reg->base + reg->size - 1;
420 		request_resource(&iomem_resource, res);
421 
422 		for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
423 			std_res = standard_resources[j];
424 			if (std_res->start < res->start ||
425 			    std_res->start > res->end)
426 				continue;
427 			if (std_res->end > res->end) {
428 				sub_res = alloc_bootmem_low(sizeof(*sub_res));
429 				*sub_res = *std_res;
430 				sub_res->end = res->end;
431 				std_res->start = res->end + 1;
432 				request_resource(res, sub_res);
433 			} else {
434 				request_resource(res, std_res);
435 			}
436 		}
437 	}
438 }
439 
440 static void __init setup_memory_end(void)
441 {
442 	unsigned long vmax, vmalloc_size, tmp;
443 
444 	/* Choose kernel address space layout: 2, 3, or 4 levels. */
445 	vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
446 	tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
447 	tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
448 	if (tmp + vmalloc_size + MODULES_LEN <= (1UL << 42))
449 		vmax = 1UL << 42;	/* 3-level kernel page table */
450 	else
451 		vmax = 1UL << 53;	/* 4-level kernel page table */
452 	/* module area is at the end of the kernel address space. */
453 	MODULES_END = vmax;
454 	MODULES_VADDR = MODULES_END - MODULES_LEN;
455 	VMALLOC_END = MODULES_VADDR;
456 	VMALLOC_START = vmax - vmalloc_size;
457 
458 	/* Split remaining virtual space between 1:1 mapping & vmemmap array */
459 	tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
460 	/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
461 	tmp = SECTION_ALIGN_UP(tmp);
462 	tmp = VMALLOC_START - tmp * sizeof(struct page);
463 	tmp &= ~((vmax >> 11) - 1);	/* align to page table level */
464 	tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
465 	vmemmap = (struct page *) tmp;
466 
467 	/* Take care that memory_end is set and <= vmemmap */
468 	memory_end = min(memory_end ?: max_physmem_end, tmp);
469 	max_pfn = max_low_pfn = PFN_DOWN(memory_end);
470 	memblock_remove(memory_end, ULONG_MAX);
471 
472 	pr_notice("Max memory size: %luMB\n", memory_end >> 20);
473 }
474 
475 static void __init setup_vmcoreinfo(void)
476 {
477 	mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
478 }
479 
480 #ifdef CONFIG_CRASH_DUMP
481 
482 /*
483  * When kdump is enabled, we have to ensure that no memory from
484  * the area [0 - crashkernel memory size] and
485  * [crashk_res.start - crashk_res.end] is set offline.
486  */
487 static int kdump_mem_notifier(struct notifier_block *nb,
488 			      unsigned long action, void *data)
489 {
490 	struct memory_notify *arg = data;
491 
492 	if (action != MEM_GOING_OFFLINE)
493 		return NOTIFY_OK;
494 	if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
495 		return NOTIFY_BAD;
496 	if (arg->start_pfn > PFN_DOWN(crashk_res.end))
497 		return NOTIFY_OK;
498 	if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start))
499 		return NOTIFY_OK;
500 	return NOTIFY_BAD;
501 }
502 
503 static struct notifier_block kdump_mem_nb = {
504 	.notifier_call = kdump_mem_notifier,
505 };
506 
507 #endif
508 
509 /*
510  * Make sure that the area behind memory_end is protected
511  */
512 static void reserve_memory_end(void)
513 {
514 #ifdef CONFIG_CRASH_DUMP
515 	if (ipl_info.type == IPL_TYPE_FCP_DUMP &&
516 	    !OLDMEM_BASE && sclp.hsa_size) {
517 		memory_end = sclp.hsa_size;
518 		memory_end &= PAGE_MASK;
519 		memory_end_set = 1;
520 	}
521 #endif
522 	if (!memory_end_set)
523 		return;
524 	memblock_reserve(memory_end, ULONG_MAX);
525 }
526 
527 /*
528  * Make sure that oldmem, where the dump is stored, is protected
529  */
530 static void reserve_oldmem(void)
531 {
532 #ifdef CONFIG_CRASH_DUMP
533 	if (OLDMEM_BASE)
534 		/* Forget all memory above the running kdump system */
535 		memblock_reserve(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
536 #endif
537 }
538 
539 /*
540  * Make sure that oldmem, where the dump is stored, is protected
541  */
542 static void remove_oldmem(void)
543 {
544 #ifdef CONFIG_CRASH_DUMP
545 	if (OLDMEM_BASE)
546 		/* Forget all memory above the running kdump system */
547 		memblock_remove(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
548 #endif
549 }
550 
551 /*
552  * Reserve memory for kdump kernel to be loaded with kexec
553  */
554 static void __init reserve_crashkernel(void)
555 {
556 #ifdef CONFIG_CRASH_DUMP
557 	unsigned long long crash_base, crash_size;
558 	phys_addr_t low, high;
559 	int rc;
560 
561 	rc = parse_crashkernel(boot_command_line, memory_end, &crash_size,
562 			       &crash_base);
563 
564 	crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
565 	crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
566 	if (rc || crash_size == 0)
567 		return;
568 
569 	if (memblock.memory.regions[0].size < crash_size) {
570 		pr_info("crashkernel reservation failed: %s\n",
571 			"first memory chunk must be at least crashkernel size");
572 		return;
573 	}
574 
575 	low = crash_base ?: OLDMEM_BASE;
576 	high = low + crash_size;
577 	if (low >= OLDMEM_BASE && high <= OLDMEM_BASE + OLDMEM_SIZE) {
578 		/* The crashkernel fits into OLDMEM, reuse OLDMEM */
579 		crash_base = low;
580 	} else {
581 		/* Find suitable area in free memory */
582 		low = max_t(unsigned long, crash_size, sclp.hsa_size);
583 		high = crash_base ? crash_base + crash_size : ULONG_MAX;
584 
585 		if (crash_base && crash_base < low) {
586 			pr_info("crashkernel reservation failed: %s\n",
587 				"crash_base too low");
588 			return;
589 		}
590 		low = crash_base ?: low;
591 		crash_base = memblock_find_in_range(low, high, crash_size,
592 						    KEXEC_CRASH_MEM_ALIGN);
593 	}
594 
595 	if (!crash_base) {
596 		pr_info("crashkernel reservation failed: %s\n",
597 			"no suitable area found");
598 		return;
599 	}
600 
601 	if (register_memory_notifier(&kdump_mem_nb))
602 		return;
603 
604 	if (!OLDMEM_BASE && MACHINE_IS_VM)
605 		diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
606 	crashk_res.start = crash_base;
607 	crashk_res.end = crash_base + crash_size - 1;
608 	insert_resource(&iomem_resource, &crashk_res);
609 	memblock_remove(crash_base, crash_size);
610 	pr_info("Reserving %lluMB of memory at %lluMB "
611 		"for crashkernel (System RAM: %luMB)\n",
612 		crash_size >> 20, crash_base >> 20,
613 		(unsigned long)memblock.memory.total_size >> 20);
614 	os_info_crashkernel_add(crash_base, crash_size);
615 #endif
616 }
617 
618 /*
619  * Reserve the initrd from being used by memblock
620  */
621 static void __init reserve_initrd(void)
622 {
623 #ifdef CONFIG_BLK_DEV_INITRD
624 	initrd_start = INITRD_START;
625 	initrd_end = initrd_start + INITRD_SIZE;
626 	memblock_reserve(INITRD_START, INITRD_SIZE);
627 #endif
628 }
629 
630 /*
631  * Check for initrd being in usable memory
632  */
633 static void __init check_initrd(void)
634 {
635 #ifdef CONFIG_BLK_DEV_INITRD
636 	if (INITRD_START && INITRD_SIZE &&
637 	    !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) {
638 		pr_err("initrd does not fit memory.\n");
639 		memblock_free(INITRD_START, INITRD_SIZE);
640 		initrd_start = initrd_end = 0;
641 	}
642 #endif
643 }
644 
645 /*
646  * Reserve memory used for lowcore/command line/kernel image.
647  */
648 static void __init reserve_kernel(void)
649 {
650 	unsigned long start_pfn = PFN_UP(__pa(&_end));
651 
652 #ifdef CONFIG_DMA_API_DEBUG
653 	/*
654 	 * DMA_API_DEBUG code stumbles over addresses from the
655 	 * range [_ehead, _stext]. Mark the memory as reserved
656 	 * so it is not used for CONFIG_DMA_API_DEBUG=y.
657 	 */
658 	memblock_reserve(0, PFN_PHYS(start_pfn));
659 #else
660 	memblock_reserve(0, (unsigned long)_ehead);
661 	memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
662 			 - (unsigned long)_stext);
663 #endif
664 }
665 
666 static void __init setup_memory(void)
667 {
668 	struct memblock_region *reg;
669 
670 	/*
671 	 * Init storage key for present memory
672 	 */
673 	for_each_memblock(memory, reg) {
674 		storage_key_init_range(reg->base, reg->base + reg->size);
675 	}
676 	psw_set_key(PAGE_DEFAULT_KEY);
677 
678 	/* Only cosmetics */
679 	memblock_enforce_memory_limit(memblock_end_of_DRAM());
680 }
681 
682 /*
683  * Setup hardware capabilities.
684  */
685 static int __init setup_hwcaps(void)
686 {
687 	static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
688 	struct cpuid cpu_id;
689 	int i;
690 
691 	/*
692 	 * The store facility list bits numbers as found in the principles
693 	 * of operation are numbered with bit 1UL<<31 as number 0 to
694 	 * bit 1UL<<0 as number 31.
695 	 *   Bit 0: instructions named N3, "backported" to esa-mode
696 	 *   Bit 2: z/Architecture mode is active
697 	 *   Bit 7: the store-facility-list-extended facility is installed
698 	 *   Bit 17: the message-security assist is installed
699 	 *   Bit 19: the long-displacement facility is installed
700 	 *   Bit 21: the extended-immediate facility is installed
701 	 *   Bit 22: extended-translation facility 3 is installed
702 	 *   Bit 30: extended-translation facility 3 enhancement facility
703 	 * These get translated to:
704 	 *   HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1,
705 	 *   HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3,
706 	 *   HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and
707 	 *   HWCAP_S390_ETF3EH bit 8 (22 && 30).
708 	 */
709 	for (i = 0; i < 6; i++)
710 		if (test_facility(stfl_bits[i]))
711 			elf_hwcap |= 1UL << i;
712 
713 	if (test_facility(22) && test_facility(30))
714 		elf_hwcap |= HWCAP_S390_ETF3EH;
715 
716 	/*
717 	 * Check for additional facilities with store-facility-list-extended.
718 	 * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0
719 	 * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
720 	 * as stored by stfl, bits 32-xxx contain additional facilities.
721 	 * How many facility words are stored depends on the number of
722 	 * doublewords passed to the instruction. The additional facilities
723 	 * are:
724 	 *   Bit 42: decimal floating point facility is installed
725 	 *   Bit 44: perform floating point operation facility is installed
726 	 * translated to:
727 	 *   HWCAP_S390_DFP bit 6 (42 && 44).
728 	 */
729 	if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44))
730 		elf_hwcap |= HWCAP_S390_DFP;
731 
732 	/*
733 	 * Huge page support HWCAP_S390_HPAGE is bit 7.
734 	 */
735 	if (MACHINE_HAS_HPAGE)
736 		elf_hwcap |= HWCAP_S390_HPAGE;
737 
738 	/*
739 	 * 64-bit register support for 31-bit processes
740 	 * HWCAP_S390_HIGH_GPRS is bit 9.
741 	 */
742 	elf_hwcap |= HWCAP_S390_HIGH_GPRS;
743 
744 	/*
745 	 * Transactional execution support HWCAP_S390_TE is bit 10.
746 	 */
747 	if (test_facility(50) && test_facility(73))
748 		elf_hwcap |= HWCAP_S390_TE;
749 
750 	/*
751 	 * Vector extension HWCAP_S390_VXRS is bit 11. The Vector extension
752 	 * can be disabled with the "novx" parameter. Use MACHINE_HAS_VX
753 	 * instead of facility bit 129.
754 	 */
755 	if (MACHINE_HAS_VX)
756 		elf_hwcap |= HWCAP_S390_VXRS;
757 	get_cpu_id(&cpu_id);
758 	add_device_randomness(&cpu_id, sizeof(cpu_id));
759 	switch (cpu_id.machine) {
760 	case 0x2064:
761 	case 0x2066:
762 	default:	/* Use "z900" as default for 64 bit kernels. */
763 		strcpy(elf_platform, "z900");
764 		break;
765 	case 0x2084:
766 	case 0x2086:
767 		strcpy(elf_platform, "z990");
768 		break;
769 	case 0x2094:
770 	case 0x2096:
771 		strcpy(elf_platform, "z9-109");
772 		break;
773 	case 0x2097:
774 	case 0x2098:
775 		strcpy(elf_platform, "z10");
776 		break;
777 	case 0x2817:
778 	case 0x2818:
779 		strcpy(elf_platform, "z196");
780 		break;
781 	case 0x2827:
782 	case 0x2828:
783 		strcpy(elf_platform, "zEC12");
784 		break;
785 	case 0x2964:
786 		strcpy(elf_platform, "z13");
787 		break;
788 	}
789 
790 	/*
791 	 * Virtualization support HWCAP_INT_SIE is bit 0.
792 	 */
793 	if (sclp.has_sief2)
794 		int_hwcap |= HWCAP_INT_SIE;
795 
796 	return 0;
797 }
798 arch_initcall(setup_hwcaps);
799 
800 /*
801  * Add system information as device randomness
802  */
803 static void __init setup_randomness(void)
804 {
805 	struct sysinfo_3_2_2 *vmms;
806 
807 	vmms = (struct sysinfo_3_2_2 *) alloc_page(GFP_KERNEL);
808 	if (vmms && stsi(vmms, 3, 2, 2) == 0 && vmms->count)
809 		add_device_randomness(&vmms, vmms->count);
810 	free_page((unsigned long) vmms);
811 }
812 
813 /*
814  * Setup function called from init/main.c just after the banner
815  * was printed.
816  */
817 
818 void __init setup_arch(char **cmdline_p)
819 {
820         /*
821          * print what head.S has found out about the machine
822          */
823 	if (MACHINE_IS_VM)
824 		pr_info("Linux is running as a z/VM "
825 			"guest operating system in 64-bit mode\n");
826 	else if (MACHINE_IS_KVM)
827 		pr_info("Linux is running under KVM in 64-bit mode\n");
828 	else if (MACHINE_IS_LPAR)
829 		pr_info("Linux is running natively in 64-bit mode\n");
830 
831 	/* Have one command line that is parsed and saved in /proc/cmdline */
832 	/* boot_command_line has been already set up in early.c */
833 	*cmdline_p = boot_command_line;
834 
835         ROOT_DEV = Root_RAM0;
836 
837 	/* Is init_mm really needed? */
838 	init_mm.start_code = PAGE_OFFSET;
839 	init_mm.end_code = (unsigned long) &_etext;
840 	init_mm.end_data = (unsigned long) &_edata;
841 	init_mm.brk = (unsigned long) &_end;
842 
843 	parse_early_param();
844 #ifdef CONFIG_CRASH_DUMP
845 	/* Deactivate elfcorehdr= kernel parameter */
846 	elfcorehdr_addr = ELFCORE_ADDR_MAX;
847 #endif
848 
849 	os_info_init();
850 	setup_ipl();
851 
852 	/* Do some memory reservations *before* memory is added to memblock */
853 	reserve_memory_end();
854 	reserve_oldmem();
855 	reserve_kernel();
856 	reserve_initrd();
857 	memblock_allow_resize();
858 
859 	/* Get information about *all* installed memory */
860 	detect_memory_memblock();
861 
862 	remove_oldmem();
863 
864 	/*
865 	 * Make sure all chunks are MAX_ORDER aligned so we don't need the
866 	 * extra checks that HOLES_IN_ZONE would require.
867 	 *
868 	 * Is this still required?
869 	 */
870 	memblock_trim_memory(1UL << (MAX_ORDER - 1 + PAGE_SHIFT));
871 
872 	setup_memory_end();
873 	setup_memory();
874 
875 	check_initrd();
876 	reserve_crashkernel();
877 #ifdef CONFIG_CRASH_DUMP
878 	/*
879 	 * Be aware that smp_save_dump_cpus() triggers a system reset.
880 	 * Therefore CPU and device initialization should be done afterwards.
881 	 */
882 	smp_save_dump_cpus();
883 #endif
884 
885 	setup_resources();
886 	setup_vmcoreinfo();
887 	setup_lowcore();
888 	smp_fill_possible_mask();
889         cpu_init();
890 	numa_setup();
891 
892 	/*
893 	 * Create kernel page tables and switch to virtual addressing.
894 	 */
895         paging_init();
896 
897         /* Setup default console */
898 	conmode_default();
899 	set_preferred_console();
900 
901 	/* Setup zfcpdump support */
902 	setup_zfcpdump();
903 
904 	/* Add system specific data to the random pool */
905 	setup_randomness();
906 }
907