xref: /linux/arch/s390/kernel/setup.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  *  S390 version
3  *    Copyright IBM Corp. 1999, 2012
4  *    Author(s): Hartmut Penner (hp@de.ibm.com),
5  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
6  *
7  *  Derived from "arch/i386/kernel/setup.c"
8  *    Copyright (C) 1995, Linus Torvalds
9  */
10 
11 /*
12  * This file handles the architecture-dependent parts of initialization
13  */
14 
15 #define KMSG_COMPONENT "setup"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17 
18 #include <linux/errno.h>
19 #include <linux/export.h>
20 #include <linux/sched.h>
21 #include <linux/kernel.h>
22 #include <linux/memblock.h>
23 #include <linux/mm.h>
24 #include <linux/stddef.h>
25 #include <linux/unistd.h>
26 #include <linux/ptrace.h>
27 #include <linux/random.h>
28 #include <linux/user.h>
29 #include <linux/tty.h>
30 #include <linux/ioport.h>
31 #include <linux/delay.h>
32 #include <linux/init.h>
33 #include <linux/initrd.h>
34 #include <linux/bootmem.h>
35 #include <linux/root_dev.h>
36 #include <linux/console.h>
37 #include <linux/kernel_stat.h>
38 #include <linux/device.h>
39 #include <linux/notifier.h>
40 #include <linux/pfn.h>
41 #include <linux/ctype.h>
42 #include <linux/reboot.h>
43 #include <linux/topology.h>
44 #include <linux/kexec.h>
45 #include <linux/crash_dump.h>
46 #include <linux/memory.h>
47 #include <linux/compat.h>
48 
49 #include <asm/ipl.h>
50 #include <asm/facility.h>
51 #include <asm/smp.h>
52 #include <asm/mmu_context.h>
53 #include <asm/cpcmd.h>
54 #include <asm/lowcore.h>
55 #include <asm/irq.h>
56 #include <asm/page.h>
57 #include <asm/ptrace.h>
58 #include <asm/sections.h>
59 #include <asm/ebcdic.h>
60 #include <asm/kvm_virtio.h>
61 #include <asm/diag.h>
62 #include <asm/os_info.h>
63 #include <asm/sclp.h>
64 #include <asm/sysinfo.h>
65 #include <asm/numa.h>
66 #include "entry.h"
67 
68 /*
69  * Machine setup..
70  */
71 unsigned int console_mode = 0;
72 EXPORT_SYMBOL(console_mode);
73 
74 unsigned int console_devno = -1;
75 EXPORT_SYMBOL(console_devno);
76 
77 unsigned int console_irq = -1;
78 EXPORT_SYMBOL(console_irq);
79 
80 unsigned long elf_hwcap __read_mostly = 0;
81 char elf_platform[ELF_PLATFORM_SIZE];
82 
83 int __initdata memory_end_set;
84 unsigned long __initdata memory_end;
85 unsigned long __initdata max_physmem_end;
86 
87 unsigned long VMALLOC_START;
88 EXPORT_SYMBOL(VMALLOC_START);
89 
90 unsigned long VMALLOC_END;
91 EXPORT_SYMBOL(VMALLOC_END);
92 
93 struct page *vmemmap;
94 EXPORT_SYMBOL(vmemmap);
95 
96 unsigned long MODULES_VADDR;
97 unsigned long MODULES_END;
98 
99 /* An array with a pointer to the lowcore of every CPU. */
100 struct _lowcore *lowcore_ptr[NR_CPUS];
101 EXPORT_SYMBOL(lowcore_ptr);
102 
103 /*
104  * This is set up by the setup-routine at boot-time
105  * for S390 need to find out, what we have to setup
106  * using address 0x10400 ...
107  */
108 
109 #include <asm/setup.h>
110 
111 /*
112  * condev= and conmode= setup parameter.
113  */
114 
115 static int __init condev_setup(char *str)
116 {
117 	int vdev;
118 
119 	vdev = simple_strtoul(str, &str, 0);
120 	if (vdev >= 0 && vdev < 65536) {
121 		console_devno = vdev;
122 		console_irq = -1;
123 	}
124 	return 1;
125 }
126 
127 __setup("condev=", condev_setup);
128 
129 static void __init set_preferred_console(void)
130 {
131 	if (MACHINE_IS_KVM) {
132 		if (sclp.has_vt220)
133 			add_preferred_console("ttyS", 1, NULL);
134 		else if (sclp.has_linemode)
135 			add_preferred_console("ttyS", 0, NULL);
136 		else
137 			add_preferred_console("hvc", 0, NULL);
138 	} else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
139 		add_preferred_console("ttyS", 0, NULL);
140 	else if (CONSOLE_IS_3270)
141 		add_preferred_console("tty3270", 0, NULL);
142 }
143 
144 static int __init conmode_setup(char *str)
145 {
146 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
147 	if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
148                 SET_CONSOLE_SCLP;
149 #endif
150 #if defined(CONFIG_TN3215_CONSOLE)
151 	if (strncmp(str, "3215", 5) == 0)
152 		SET_CONSOLE_3215;
153 #endif
154 #if defined(CONFIG_TN3270_CONSOLE)
155 	if (strncmp(str, "3270", 5) == 0)
156 		SET_CONSOLE_3270;
157 #endif
158 	set_preferred_console();
159         return 1;
160 }
161 
162 __setup("conmode=", conmode_setup);
163 
164 static void __init conmode_default(void)
165 {
166 	char query_buffer[1024];
167 	char *ptr;
168 
169         if (MACHINE_IS_VM) {
170 		cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
171 		console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
172 		ptr = strstr(query_buffer, "SUBCHANNEL =");
173 		console_irq = simple_strtoul(ptr + 13, NULL, 16);
174 		cpcmd("QUERY TERM", query_buffer, 1024, NULL);
175 		ptr = strstr(query_buffer, "CONMODE");
176 		/*
177 		 * Set the conmode to 3215 so that the device recognition
178 		 * will set the cu_type of the console to 3215. If the
179 		 * conmode is 3270 and we don't set it back then both
180 		 * 3215 and the 3270 driver will try to access the console
181 		 * device (3215 as console and 3270 as normal tty).
182 		 */
183 		cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
184 		if (ptr == NULL) {
185 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
186 			SET_CONSOLE_SCLP;
187 #endif
188 			return;
189 		}
190 		if (strncmp(ptr + 8, "3270", 4) == 0) {
191 #if defined(CONFIG_TN3270_CONSOLE)
192 			SET_CONSOLE_3270;
193 #elif defined(CONFIG_TN3215_CONSOLE)
194 			SET_CONSOLE_3215;
195 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
196 			SET_CONSOLE_SCLP;
197 #endif
198 		} else if (strncmp(ptr + 8, "3215", 4) == 0) {
199 #if defined(CONFIG_TN3215_CONSOLE)
200 			SET_CONSOLE_3215;
201 #elif defined(CONFIG_TN3270_CONSOLE)
202 			SET_CONSOLE_3270;
203 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
204 			SET_CONSOLE_SCLP;
205 #endif
206 		}
207 	} else {
208 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
209 		SET_CONSOLE_SCLP;
210 #endif
211 	}
212 }
213 
214 #ifdef CONFIG_CRASH_DUMP
215 static void __init setup_zfcpdump(void)
216 {
217 	if (ipl_info.type != IPL_TYPE_FCP_DUMP)
218 		return;
219 	if (OLDMEM_BASE)
220 		return;
221 	strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
222 	console_loglevel = 2;
223 }
224 #else
225 static inline void setup_zfcpdump(void) {}
226 #endif /* CONFIG_CRASH_DUMP */
227 
228  /*
229  * Reboot, halt and power_off stubs. They just call _machine_restart,
230  * _machine_halt or _machine_power_off.
231  */
232 
233 void machine_restart(char *command)
234 {
235 	if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
236 		/*
237 		 * Only unblank the console if we are called in enabled
238 		 * context or a bust_spinlocks cleared the way for us.
239 		 */
240 		console_unblank();
241 	_machine_restart(command);
242 }
243 
244 void machine_halt(void)
245 {
246 	if (!in_interrupt() || oops_in_progress)
247 		/*
248 		 * Only unblank the console if we are called in enabled
249 		 * context or a bust_spinlocks cleared the way for us.
250 		 */
251 		console_unblank();
252 	_machine_halt();
253 }
254 
255 void machine_power_off(void)
256 {
257 	if (!in_interrupt() || oops_in_progress)
258 		/*
259 		 * Only unblank the console if we are called in enabled
260 		 * context or a bust_spinlocks cleared the way for us.
261 		 */
262 		console_unblank();
263 	_machine_power_off();
264 }
265 
266 /*
267  * Dummy power off function.
268  */
269 void (*pm_power_off)(void) = machine_power_off;
270 EXPORT_SYMBOL_GPL(pm_power_off);
271 
272 static int __init early_parse_mem(char *p)
273 {
274 	memory_end = memparse(p, &p);
275 	memory_end &= PAGE_MASK;
276 	memory_end_set = 1;
277 	return 0;
278 }
279 early_param("mem", early_parse_mem);
280 
281 static int __init parse_vmalloc(char *arg)
282 {
283 	if (!arg)
284 		return -EINVAL;
285 	VMALLOC_END = (memparse(arg, &arg) + PAGE_SIZE - 1) & PAGE_MASK;
286 	return 0;
287 }
288 early_param("vmalloc", parse_vmalloc);
289 
290 void *restart_stack __attribute__((__section__(".data")));
291 
292 static void __init setup_lowcore(void)
293 {
294 	struct _lowcore *lc;
295 
296 	/*
297 	 * Setup lowcore for boot cpu
298 	 */
299 	BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096);
300 	lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
301 	lc->restart_psw.mask = PSW_KERNEL_BITS;
302 	lc->restart_psw.addr =
303 		PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
304 	lc->external_new_psw.mask = PSW_KERNEL_BITS |
305 		PSW_MASK_DAT | PSW_MASK_MCHECK;
306 	lc->external_new_psw.addr =
307 		PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
308 	lc->svc_new_psw.mask = PSW_KERNEL_BITS |
309 		PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
310 	lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
311 	lc->program_new_psw.mask = PSW_KERNEL_BITS |
312 		PSW_MASK_DAT | PSW_MASK_MCHECK;
313 	lc->program_new_psw.addr =
314 		PSW_ADDR_AMODE | (unsigned long) pgm_check_handler;
315 	lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
316 	lc->mcck_new_psw.addr =
317 		PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
318 	lc->io_new_psw.mask = PSW_KERNEL_BITS |
319 		PSW_MASK_DAT | PSW_MASK_MCHECK;
320 	lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
321 	lc->clock_comparator = -1ULL;
322 	lc->kernel_stack = ((unsigned long) &init_thread_union)
323 		+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
324 	lc->async_stack = (unsigned long)
325 		__alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0)
326 		+ ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
327 	lc->panic_stack = (unsigned long)
328 		__alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0)
329 		+ PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
330 	lc->current_task = (unsigned long) init_thread_union.thread_info.task;
331 	lc->thread_info = (unsigned long) &init_thread_union;
332 	lc->machine_flags = S390_lowcore.machine_flags;
333 	lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
334 	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
335 	       MAX_FACILITY_BIT/8);
336 	if (MACHINE_HAS_VX)
337 		lc->vector_save_area_addr =
338 			(unsigned long) &lc->vector_save_area;
339 	lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
340 	lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
341 	lc->async_enter_timer = S390_lowcore.async_enter_timer;
342 	lc->exit_timer = S390_lowcore.exit_timer;
343 	lc->user_timer = S390_lowcore.user_timer;
344 	lc->system_timer = S390_lowcore.system_timer;
345 	lc->steal_timer = S390_lowcore.steal_timer;
346 	lc->last_update_timer = S390_lowcore.last_update_timer;
347 	lc->last_update_clock = S390_lowcore.last_update_clock;
348 
349 	restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
350 	restart_stack += ASYNC_SIZE;
351 
352 	/*
353 	 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
354 	 * restart data to the absolute zero lowcore. This is necessary if
355 	 * PSW restart is done on an offline CPU that has lowcore zero.
356 	 */
357 	lc->restart_stack = (unsigned long) restart_stack;
358 	lc->restart_fn = (unsigned long) do_restart;
359 	lc->restart_data = 0;
360 	lc->restart_source = -1UL;
361 
362 	/* Setup absolute zero lowcore */
363 	mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
364 	mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
365 	mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
366 	mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
367 	mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
368 
369 #ifdef CONFIG_SMP
370 	lc->spinlock_lockval = arch_spin_lockval(0);
371 #endif
372 
373 	set_prefix((u32)(unsigned long) lc);
374 	lowcore_ptr[0] = lc;
375 }
376 
377 static struct resource code_resource = {
378 	.name  = "Kernel code",
379 	.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
380 };
381 
382 static struct resource data_resource = {
383 	.name = "Kernel data",
384 	.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
385 };
386 
387 static struct resource bss_resource = {
388 	.name = "Kernel bss",
389 	.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
390 };
391 
392 static struct resource __initdata *standard_resources[] = {
393 	&code_resource,
394 	&data_resource,
395 	&bss_resource,
396 };
397 
398 static void __init setup_resources(void)
399 {
400 	struct resource *res, *std_res, *sub_res;
401 	struct memblock_region *reg;
402 	int j;
403 
404 	code_resource.start = (unsigned long) &_text;
405 	code_resource.end = (unsigned long) &_etext - 1;
406 	data_resource.start = (unsigned long) &_etext;
407 	data_resource.end = (unsigned long) &_edata - 1;
408 	bss_resource.start = (unsigned long) &__bss_start;
409 	bss_resource.end = (unsigned long) &__bss_stop - 1;
410 
411 	for_each_memblock(memory, reg) {
412 		res = alloc_bootmem_low(sizeof(*res));
413 		res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
414 
415 		res->name = "System RAM";
416 		res->start = reg->base;
417 		res->end = reg->base + reg->size - 1;
418 		request_resource(&iomem_resource, res);
419 
420 		for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
421 			std_res = standard_resources[j];
422 			if (std_res->start < res->start ||
423 			    std_res->start > res->end)
424 				continue;
425 			if (std_res->end > res->end) {
426 				sub_res = alloc_bootmem_low(sizeof(*sub_res));
427 				*sub_res = *std_res;
428 				sub_res->end = res->end;
429 				std_res->start = res->end + 1;
430 				request_resource(res, sub_res);
431 			} else {
432 				request_resource(res, std_res);
433 			}
434 		}
435 	}
436 }
437 
438 static void __init setup_memory_end(void)
439 {
440 	unsigned long vmax, vmalloc_size, tmp;
441 
442 	/* Choose kernel address space layout: 2, 3, or 4 levels. */
443 	vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
444 	tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
445 	tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
446 	if (tmp + vmalloc_size + MODULES_LEN <= (1UL << 42))
447 		vmax = 1UL << 42;	/* 3-level kernel page table */
448 	else
449 		vmax = 1UL << 53;	/* 4-level kernel page table */
450 	/* module area is at the end of the kernel address space. */
451 	MODULES_END = vmax;
452 	MODULES_VADDR = MODULES_END - MODULES_LEN;
453 	VMALLOC_END = MODULES_VADDR;
454 	VMALLOC_START = vmax - vmalloc_size;
455 
456 	/* Split remaining virtual space between 1:1 mapping & vmemmap array */
457 	tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
458 	/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
459 	tmp = SECTION_ALIGN_UP(tmp);
460 	tmp = VMALLOC_START - tmp * sizeof(struct page);
461 	tmp &= ~((vmax >> 11) - 1);	/* align to page table level */
462 	tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
463 	vmemmap = (struct page *) tmp;
464 
465 	/* Take care that memory_end is set and <= vmemmap */
466 	memory_end = min(memory_end ?: max_physmem_end, tmp);
467 	max_pfn = max_low_pfn = PFN_DOWN(memory_end);
468 	memblock_remove(memory_end, ULONG_MAX);
469 
470 	pr_notice("Max memory size: %luMB\n", memory_end >> 20);
471 }
472 
473 static void __init setup_vmcoreinfo(void)
474 {
475 	mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
476 }
477 
478 #ifdef CONFIG_CRASH_DUMP
479 
480 /*
481  * When kdump is enabled, we have to ensure that no memory from
482  * the area [0 - crashkernel memory size] and
483  * [crashk_res.start - crashk_res.end] is set offline.
484  */
485 static int kdump_mem_notifier(struct notifier_block *nb,
486 			      unsigned long action, void *data)
487 {
488 	struct memory_notify *arg = data;
489 
490 	if (action != MEM_GOING_OFFLINE)
491 		return NOTIFY_OK;
492 	if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
493 		return NOTIFY_BAD;
494 	if (arg->start_pfn > PFN_DOWN(crashk_res.end))
495 		return NOTIFY_OK;
496 	if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start))
497 		return NOTIFY_OK;
498 	return NOTIFY_BAD;
499 }
500 
501 static struct notifier_block kdump_mem_nb = {
502 	.notifier_call = kdump_mem_notifier,
503 };
504 
505 #endif
506 
507 /*
508  * Make sure that the area behind memory_end is protected
509  */
510 static void reserve_memory_end(void)
511 {
512 #ifdef CONFIG_CRASH_DUMP
513 	if (ipl_info.type == IPL_TYPE_FCP_DUMP &&
514 	    !OLDMEM_BASE && sclp.hsa_size) {
515 		memory_end = sclp.hsa_size;
516 		memory_end &= PAGE_MASK;
517 		memory_end_set = 1;
518 	}
519 #endif
520 	if (!memory_end_set)
521 		return;
522 	memblock_reserve(memory_end, ULONG_MAX);
523 }
524 
525 /*
526  * Make sure that oldmem, where the dump is stored, is protected
527  */
528 static void reserve_oldmem(void)
529 {
530 #ifdef CONFIG_CRASH_DUMP
531 	if (OLDMEM_BASE)
532 		/* Forget all memory above the running kdump system */
533 		memblock_reserve(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
534 #endif
535 }
536 
537 /*
538  * Make sure that oldmem, where the dump is stored, is protected
539  */
540 static void remove_oldmem(void)
541 {
542 #ifdef CONFIG_CRASH_DUMP
543 	if (OLDMEM_BASE)
544 		/* Forget all memory above the running kdump system */
545 		memblock_remove(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
546 #endif
547 }
548 
549 /*
550  * Reserve memory for kdump kernel to be loaded with kexec
551  */
552 static void __init reserve_crashkernel(void)
553 {
554 #ifdef CONFIG_CRASH_DUMP
555 	unsigned long long crash_base, crash_size;
556 	phys_addr_t low, high;
557 	int rc;
558 
559 	rc = parse_crashkernel(boot_command_line, memory_end, &crash_size,
560 			       &crash_base);
561 
562 	crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
563 	crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
564 	if (rc || crash_size == 0)
565 		return;
566 
567 	if (memblock.memory.regions[0].size < crash_size) {
568 		pr_info("crashkernel reservation failed: %s\n",
569 			"first memory chunk must be at least crashkernel size");
570 		return;
571 	}
572 
573 	low = crash_base ?: OLDMEM_BASE;
574 	high = low + crash_size;
575 	if (low >= OLDMEM_BASE && high <= OLDMEM_BASE + OLDMEM_SIZE) {
576 		/* The crashkernel fits into OLDMEM, reuse OLDMEM */
577 		crash_base = low;
578 	} else {
579 		/* Find suitable area in free memory */
580 		low = max_t(unsigned long, crash_size, sclp.hsa_size);
581 		high = crash_base ? crash_base + crash_size : ULONG_MAX;
582 
583 		if (crash_base && crash_base < low) {
584 			pr_info("crashkernel reservation failed: %s\n",
585 				"crash_base too low");
586 			return;
587 		}
588 		low = crash_base ?: low;
589 		crash_base = memblock_find_in_range(low, high, crash_size,
590 						    KEXEC_CRASH_MEM_ALIGN);
591 	}
592 
593 	if (!crash_base) {
594 		pr_info("crashkernel reservation failed: %s\n",
595 			"no suitable area found");
596 		return;
597 	}
598 
599 	if (register_memory_notifier(&kdump_mem_nb))
600 		return;
601 
602 	if (!OLDMEM_BASE && MACHINE_IS_VM)
603 		diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
604 	crashk_res.start = crash_base;
605 	crashk_res.end = crash_base + crash_size - 1;
606 	insert_resource(&iomem_resource, &crashk_res);
607 	memblock_remove(crash_base, crash_size);
608 	pr_info("Reserving %lluMB of memory at %lluMB "
609 		"for crashkernel (System RAM: %luMB)\n",
610 		crash_size >> 20, crash_base >> 20,
611 		(unsigned long)memblock.memory.total_size >> 20);
612 	os_info_crashkernel_add(crash_base, crash_size);
613 #endif
614 }
615 
616 /*
617  * Reserve the initrd from being used by memblock
618  */
619 static void __init reserve_initrd(void)
620 {
621 #ifdef CONFIG_BLK_DEV_INITRD
622 	initrd_start = INITRD_START;
623 	initrd_end = initrd_start + INITRD_SIZE;
624 	memblock_reserve(INITRD_START, INITRD_SIZE);
625 #endif
626 }
627 
628 /*
629  * Check for initrd being in usable memory
630  */
631 static void __init check_initrd(void)
632 {
633 #ifdef CONFIG_BLK_DEV_INITRD
634 	if (INITRD_START && INITRD_SIZE &&
635 	    !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) {
636 		pr_err("initrd does not fit memory.\n");
637 		memblock_free(INITRD_START, INITRD_SIZE);
638 		initrd_start = initrd_end = 0;
639 	}
640 #endif
641 }
642 
643 /*
644  * Reserve memory used for lowcore/command line/kernel image.
645  */
646 static void __init reserve_kernel(void)
647 {
648 	unsigned long start_pfn = PFN_UP(__pa(&_end));
649 
650 #ifdef CONFIG_DMA_API_DEBUG
651 	/*
652 	 * DMA_API_DEBUG code stumbles over addresses from the
653 	 * range [_ehead, _stext]. Mark the memory as reserved
654 	 * so it is not used for CONFIG_DMA_API_DEBUG=y.
655 	 */
656 	memblock_reserve(0, PFN_PHYS(start_pfn));
657 #else
658 	memblock_reserve(0, (unsigned long)_ehead);
659 	memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
660 			 - (unsigned long)_stext);
661 #endif
662 }
663 
664 static void __init reserve_elfcorehdr(void)
665 {
666 #ifdef CONFIG_CRASH_DUMP
667 	if (is_kdump_kernel())
668 		memblock_reserve(elfcorehdr_addr - OLDMEM_BASE,
669 				 PAGE_ALIGN(elfcorehdr_size));
670 #endif
671 }
672 
673 static void __init setup_memory(void)
674 {
675 	struct memblock_region *reg;
676 
677 	/*
678 	 * Init storage key for present memory
679 	 */
680 	for_each_memblock(memory, reg) {
681 		storage_key_init_range(reg->base, reg->base + reg->size);
682 	}
683 	psw_set_key(PAGE_DEFAULT_KEY);
684 
685 	/* Only cosmetics */
686 	memblock_enforce_memory_limit(memblock_end_of_DRAM());
687 }
688 
689 /*
690  * Setup hardware capabilities.
691  */
692 static int __init setup_hwcaps(void)
693 {
694 	static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
695 	struct cpuid cpu_id;
696 	int i;
697 
698 	/*
699 	 * The store facility list bits numbers as found in the principles
700 	 * of operation are numbered with bit 1UL<<31 as number 0 to
701 	 * bit 1UL<<0 as number 31.
702 	 *   Bit 0: instructions named N3, "backported" to esa-mode
703 	 *   Bit 2: z/Architecture mode is active
704 	 *   Bit 7: the store-facility-list-extended facility is installed
705 	 *   Bit 17: the message-security assist is installed
706 	 *   Bit 19: the long-displacement facility is installed
707 	 *   Bit 21: the extended-immediate facility is installed
708 	 *   Bit 22: extended-translation facility 3 is installed
709 	 *   Bit 30: extended-translation facility 3 enhancement facility
710 	 * These get translated to:
711 	 *   HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1,
712 	 *   HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3,
713 	 *   HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and
714 	 *   HWCAP_S390_ETF3EH bit 8 (22 && 30).
715 	 */
716 	for (i = 0; i < 6; i++)
717 		if (test_facility(stfl_bits[i]))
718 			elf_hwcap |= 1UL << i;
719 
720 	if (test_facility(22) && test_facility(30))
721 		elf_hwcap |= HWCAP_S390_ETF3EH;
722 
723 	/*
724 	 * Check for additional facilities with store-facility-list-extended.
725 	 * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0
726 	 * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
727 	 * as stored by stfl, bits 32-xxx contain additional facilities.
728 	 * How many facility words are stored depends on the number of
729 	 * doublewords passed to the instruction. The additional facilities
730 	 * are:
731 	 *   Bit 42: decimal floating point facility is installed
732 	 *   Bit 44: perform floating point operation facility is installed
733 	 * translated to:
734 	 *   HWCAP_S390_DFP bit 6 (42 && 44).
735 	 */
736 	if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44))
737 		elf_hwcap |= HWCAP_S390_DFP;
738 
739 	/*
740 	 * Huge page support HWCAP_S390_HPAGE is bit 7.
741 	 */
742 	if (MACHINE_HAS_HPAGE)
743 		elf_hwcap |= HWCAP_S390_HPAGE;
744 
745 	/*
746 	 * 64-bit register support for 31-bit processes
747 	 * HWCAP_S390_HIGH_GPRS is bit 9.
748 	 */
749 	elf_hwcap |= HWCAP_S390_HIGH_GPRS;
750 
751 	/*
752 	 * Transactional execution support HWCAP_S390_TE is bit 10.
753 	 */
754 	if (test_facility(50) && test_facility(73))
755 		elf_hwcap |= HWCAP_S390_TE;
756 
757 	/*
758 	 * Vector extension HWCAP_S390_VXRS is bit 11. The Vector extension
759 	 * can be disabled with the "novx" parameter. Use MACHINE_HAS_VX
760 	 * instead of facility bit 129.
761 	 */
762 	if (MACHINE_HAS_VX)
763 		elf_hwcap |= HWCAP_S390_VXRS;
764 	get_cpu_id(&cpu_id);
765 	add_device_randomness(&cpu_id, sizeof(cpu_id));
766 	switch (cpu_id.machine) {
767 	case 0x9672:
768 		strcpy(elf_platform, "g5");
769 		break;
770 	case 0x2064:
771 	case 0x2066:
772 	default:	/* Use "z900" as default for 64 bit kernels. */
773 		strcpy(elf_platform, "z900");
774 		break;
775 	case 0x2084:
776 	case 0x2086:
777 		strcpy(elf_platform, "z990");
778 		break;
779 	case 0x2094:
780 	case 0x2096:
781 		strcpy(elf_platform, "z9-109");
782 		break;
783 	case 0x2097:
784 	case 0x2098:
785 		strcpy(elf_platform, "z10");
786 		break;
787 	case 0x2817:
788 	case 0x2818:
789 		strcpy(elf_platform, "z196");
790 		break;
791 	case 0x2827:
792 	case 0x2828:
793 		strcpy(elf_platform, "zEC12");
794 		break;
795 	case 0x2964:
796 		strcpy(elf_platform, "z13");
797 		break;
798 	}
799 	return 0;
800 }
801 arch_initcall(setup_hwcaps);
802 
803 /*
804  * Add system information as device randomness
805  */
806 static void __init setup_randomness(void)
807 {
808 	struct sysinfo_3_2_2 *vmms;
809 
810 	vmms = (struct sysinfo_3_2_2 *) alloc_page(GFP_KERNEL);
811 	if (vmms && stsi(vmms, 3, 2, 2) == 0 && vmms->count)
812 		add_device_randomness(&vmms, vmms->count);
813 	free_page((unsigned long) vmms);
814 }
815 
816 /*
817  * Setup function called from init/main.c just after the banner
818  * was printed.
819  */
820 
821 void __init setup_arch(char **cmdline_p)
822 {
823         /*
824          * print what head.S has found out about the machine
825          */
826 	if (MACHINE_IS_VM)
827 		pr_info("Linux is running as a z/VM "
828 			"guest operating system in 64-bit mode\n");
829 	else if (MACHINE_IS_KVM)
830 		pr_info("Linux is running under KVM in 64-bit mode\n");
831 	else if (MACHINE_IS_LPAR)
832 		pr_info("Linux is running natively in 64-bit mode\n");
833 
834 	/* Have one command line that is parsed and saved in /proc/cmdline */
835 	/* boot_command_line has been already set up in early.c */
836 	*cmdline_p = boot_command_line;
837 
838         ROOT_DEV = Root_RAM0;
839 
840 	/* Is init_mm really needed? */
841 	init_mm.start_code = PAGE_OFFSET;
842 	init_mm.end_code = (unsigned long) &_etext;
843 	init_mm.end_data = (unsigned long) &_edata;
844 	init_mm.brk = (unsigned long) &_end;
845 
846 	parse_early_param();
847 	os_info_init();
848 	setup_ipl();
849 
850 	/* Do some memory reservations *before* memory is added to memblock */
851 	reserve_memory_end();
852 	reserve_oldmem();
853 	reserve_kernel();
854 	reserve_initrd();
855 	reserve_elfcorehdr();
856 	memblock_allow_resize();
857 
858 	/* Get information about *all* installed memory */
859 	detect_memory_memblock();
860 
861 	remove_oldmem();
862 
863 	/*
864 	 * Make sure all chunks are MAX_ORDER aligned so we don't need the
865 	 * extra checks that HOLES_IN_ZONE would require.
866 	 *
867 	 * Is this still required?
868 	 */
869 	memblock_trim_memory(1UL << (MAX_ORDER - 1 + PAGE_SHIFT));
870 
871 	setup_memory_end();
872 	setup_memory();
873 
874 	check_initrd();
875 	reserve_crashkernel();
876 	/*
877 	 * Be aware that smp_save_dump_cpus() triggers a system reset.
878 	 * Therefore CPU and device initialization should be done afterwards.
879 	 */
880 	smp_save_dump_cpus();
881 
882 	setup_resources();
883 	setup_vmcoreinfo();
884 	setup_lowcore();
885 	smp_fill_possible_mask();
886         cpu_init();
887 	numa_setup();
888 
889 	/*
890 	 * Create kernel page tables and switch to virtual addressing.
891 	 */
892         paging_init();
893 
894         /* Setup default console */
895 	conmode_default();
896 	set_preferred_console();
897 
898 	/* Setup zfcpdump support */
899 	setup_zfcpdump();
900 
901 	/* Add system specific data to the random pool */
902 	setup_randomness();
903 }
904