1 /* 2 * arch/s390/kernel/setup.c 3 * 4 * S390 version 5 * Copyright (C) IBM Corp. 1999,2010 6 * Author(s): Hartmut Penner (hp@de.ibm.com), 7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 * 9 * Derived from "arch/i386/kernel/setup.c" 10 * Copyright (C) 1995, Linus Torvalds 11 */ 12 13 /* 14 * This file handles the architecture-dependent parts of initialization 15 */ 16 17 #define KMSG_COMPONENT "setup" 18 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 19 20 #include <linux/errno.h> 21 #include <linux/module.h> 22 #include <linux/sched.h> 23 #include <linux/kernel.h> 24 #include <linux/memblock.h> 25 #include <linux/mm.h> 26 #include <linux/stddef.h> 27 #include <linux/unistd.h> 28 #include <linux/ptrace.h> 29 #include <linux/user.h> 30 #include <linux/tty.h> 31 #include <linux/ioport.h> 32 #include <linux/delay.h> 33 #include <linux/init.h> 34 #include <linux/initrd.h> 35 #include <linux/bootmem.h> 36 #include <linux/root_dev.h> 37 #include <linux/console.h> 38 #include <linux/kernel_stat.h> 39 #include <linux/device.h> 40 #include <linux/notifier.h> 41 #include <linux/pfn.h> 42 #include <linux/ctype.h> 43 #include <linux/reboot.h> 44 #include <linux/topology.h> 45 #include <linux/ftrace.h> 46 #include <linux/kexec.h> 47 #include <linux/crash_dump.h> 48 #include <linux/memory.h> 49 50 #include <asm/ipl.h> 51 #include <asm/uaccess.h> 52 #include <asm/system.h> 53 #include <asm/smp.h> 54 #include <asm/mmu_context.h> 55 #include <asm/cpcmd.h> 56 #include <asm/lowcore.h> 57 #include <asm/irq.h> 58 #include <asm/page.h> 59 #include <asm/ptrace.h> 60 #include <asm/sections.h> 61 #include <asm/ebcdic.h> 62 #include <asm/compat.h> 63 #include <asm/kvm_virtio.h> 64 #include <asm/diag.h> 65 66 long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY | 67 PSW_MASK_EA | PSW_MASK_BA; 68 long psw_user_bits = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | 69 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | 70 PSW_MASK_PSTATE | PSW_ASC_HOME; 71 72 /* 73 * User copy operations. 74 */ 75 struct uaccess_ops uaccess; 76 EXPORT_SYMBOL(uaccess); 77 78 /* 79 * Machine setup.. 80 */ 81 unsigned int console_mode = 0; 82 EXPORT_SYMBOL(console_mode); 83 84 unsigned int console_devno = -1; 85 EXPORT_SYMBOL(console_devno); 86 87 unsigned int console_irq = -1; 88 EXPORT_SYMBOL(console_irq); 89 90 unsigned long elf_hwcap = 0; 91 char elf_platform[ELF_PLATFORM_SIZE]; 92 93 struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS]; 94 95 int __initdata memory_end_set; 96 unsigned long __initdata memory_end; 97 98 unsigned long VMALLOC_START; 99 EXPORT_SYMBOL(VMALLOC_START); 100 101 unsigned long VMALLOC_END; 102 EXPORT_SYMBOL(VMALLOC_END); 103 104 struct page *vmemmap; 105 EXPORT_SYMBOL(vmemmap); 106 107 /* An array with a pointer to the lowcore of every CPU. */ 108 struct _lowcore *lowcore_ptr[NR_CPUS]; 109 EXPORT_SYMBOL(lowcore_ptr); 110 111 /* 112 * This is set up by the setup-routine at boot-time 113 * for S390 need to find out, what we have to setup 114 * using address 0x10400 ... 115 */ 116 117 #include <asm/setup.h> 118 119 /* 120 * condev= and conmode= setup parameter. 121 */ 122 123 static int __init condev_setup(char *str) 124 { 125 int vdev; 126 127 vdev = simple_strtoul(str, &str, 0); 128 if (vdev >= 0 && vdev < 65536) { 129 console_devno = vdev; 130 console_irq = -1; 131 } 132 return 1; 133 } 134 135 __setup("condev=", condev_setup); 136 137 static void __init set_preferred_console(void) 138 { 139 if (MACHINE_IS_KVM) 140 add_preferred_console("hvc", 0, NULL); 141 else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP) 142 add_preferred_console("ttyS", 0, NULL); 143 else if (CONSOLE_IS_3270) 144 add_preferred_console("tty3270", 0, NULL); 145 } 146 147 static int __init conmode_setup(char *str) 148 { 149 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 150 if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0) 151 SET_CONSOLE_SCLP; 152 #endif 153 #if defined(CONFIG_TN3215_CONSOLE) 154 if (strncmp(str, "3215", 5) == 0) 155 SET_CONSOLE_3215; 156 #endif 157 #if defined(CONFIG_TN3270_CONSOLE) 158 if (strncmp(str, "3270", 5) == 0) 159 SET_CONSOLE_3270; 160 #endif 161 set_preferred_console(); 162 return 1; 163 } 164 165 __setup("conmode=", conmode_setup); 166 167 static void __init conmode_default(void) 168 { 169 char query_buffer[1024]; 170 char *ptr; 171 172 if (MACHINE_IS_VM) { 173 cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL); 174 console_devno = simple_strtoul(query_buffer + 5, NULL, 16); 175 ptr = strstr(query_buffer, "SUBCHANNEL ="); 176 console_irq = simple_strtoul(ptr + 13, NULL, 16); 177 cpcmd("QUERY TERM", query_buffer, 1024, NULL); 178 ptr = strstr(query_buffer, "CONMODE"); 179 /* 180 * Set the conmode to 3215 so that the device recognition 181 * will set the cu_type of the console to 3215. If the 182 * conmode is 3270 and we don't set it back then both 183 * 3215 and the 3270 driver will try to access the console 184 * device (3215 as console and 3270 as normal tty). 185 */ 186 cpcmd("TERM CONMODE 3215", NULL, 0, NULL); 187 if (ptr == NULL) { 188 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 189 SET_CONSOLE_SCLP; 190 #endif 191 return; 192 } 193 if (strncmp(ptr + 8, "3270", 4) == 0) { 194 #if defined(CONFIG_TN3270_CONSOLE) 195 SET_CONSOLE_3270; 196 #elif defined(CONFIG_TN3215_CONSOLE) 197 SET_CONSOLE_3215; 198 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 199 SET_CONSOLE_SCLP; 200 #endif 201 } else if (strncmp(ptr + 8, "3215", 4) == 0) { 202 #if defined(CONFIG_TN3215_CONSOLE) 203 SET_CONSOLE_3215; 204 #elif defined(CONFIG_TN3270_CONSOLE) 205 SET_CONSOLE_3270; 206 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 207 SET_CONSOLE_SCLP; 208 #endif 209 } 210 } else { 211 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 212 SET_CONSOLE_SCLP; 213 #endif 214 } 215 } 216 217 #ifdef CONFIG_ZFCPDUMP 218 static void __init setup_zfcpdump(unsigned int console_devno) 219 { 220 static char str[41]; 221 222 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 223 return; 224 if (OLDMEM_BASE) 225 return; 226 if (console_devno != -1) 227 sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x", 228 ipl_info.data.fcp.dev_id.devno, console_devno); 229 else 230 sprintf(str, " cio_ignore=all,!0.0.%04x", 231 ipl_info.data.fcp.dev_id.devno); 232 strcat(boot_command_line, str); 233 console_loglevel = 2; 234 } 235 #else 236 static inline void setup_zfcpdump(unsigned int console_devno) {} 237 #endif /* CONFIG_ZFCPDUMP */ 238 239 /* 240 * Reboot, halt and power_off stubs. They just call _machine_restart, 241 * _machine_halt or _machine_power_off. 242 */ 243 244 void machine_restart(char *command) 245 { 246 if ((!in_interrupt() && !in_atomic()) || oops_in_progress) 247 /* 248 * Only unblank the console if we are called in enabled 249 * context or a bust_spinlocks cleared the way for us. 250 */ 251 console_unblank(); 252 _machine_restart(command); 253 } 254 255 void machine_halt(void) 256 { 257 if (!in_interrupt() || oops_in_progress) 258 /* 259 * Only unblank the console if we are called in enabled 260 * context or a bust_spinlocks cleared the way for us. 261 */ 262 console_unblank(); 263 _machine_halt(); 264 } 265 266 void machine_power_off(void) 267 { 268 if (!in_interrupt() || oops_in_progress) 269 /* 270 * Only unblank the console if we are called in enabled 271 * context or a bust_spinlocks cleared the way for us. 272 */ 273 console_unblank(); 274 _machine_power_off(); 275 } 276 277 /* 278 * Dummy power off function. 279 */ 280 void (*pm_power_off)(void) = machine_power_off; 281 282 static int __init early_parse_mem(char *p) 283 { 284 memory_end = memparse(p, &p); 285 memory_end_set = 1; 286 return 0; 287 } 288 early_param("mem", early_parse_mem); 289 290 static int __init parse_vmalloc(char *arg) 291 { 292 if (!arg) 293 return -EINVAL; 294 VMALLOC_END = (memparse(arg, &arg) + PAGE_SIZE - 1) & PAGE_MASK; 295 return 0; 296 } 297 early_param("vmalloc", parse_vmalloc); 298 299 unsigned int user_mode = HOME_SPACE_MODE; 300 EXPORT_SYMBOL_GPL(user_mode); 301 302 static int set_amode_primary(void) 303 { 304 psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME; 305 psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY; 306 #ifdef CONFIG_COMPAT 307 psw32_user_bits = 308 (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY; 309 #endif 310 311 if (MACHINE_HAS_MVCOS) { 312 memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); 313 return 1; 314 } else { 315 memcpy(&uaccess, &uaccess_pt, sizeof(uaccess)); 316 return 0; 317 } 318 } 319 320 /* 321 * Switch kernel/user addressing modes? 322 */ 323 static int __init early_parse_switch_amode(char *p) 324 { 325 user_mode = PRIMARY_SPACE_MODE; 326 return 0; 327 } 328 early_param("switch_amode", early_parse_switch_amode); 329 330 static int __init early_parse_user_mode(char *p) 331 { 332 if (p && strcmp(p, "primary") == 0) 333 user_mode = PRIMARY_SPACE_MODE; 334 else if (!p || strcmp(p, "home") == 0) 335 user_mode = HOME_SPACE_MODE; 336 else 337 return 1; 338 return 0; 339 } 340 early_param("user_mode", early_parse_user_mode); 341 342 static void setup_addressing_mode(void) 343 { 344 if (user_mode == PRIMARY_SPACE_MODE) { 345 if (set_amode_primary()) 346 pr_info("Address spaces switched, " 347 "mvcos available\n"); 348 else 349 pr_info("Address spaces switched, " 350 "mvcos not available\n"); 351 } 352 } 353 354 static void __init 355 setup_lowcore(void) 356 { 357 struct _lowcore *lc; 358 359 /* 360 * Setup lowcore for boot cpu 361 */ 362 BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); 363 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); 364 lc->restart_psw.mask = psw_kernel_bits; 365 lc->restart_psw.addr = 366 PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; 367 lc->external_new_psw.mask = psw_kernel_bits | 368 PSW_MASK_DAT | PSW_MASK_MCHECK; 369 lc->external_new_psw.addr = 370 PSW_ADDR_AMODE | (unsigned long) ext_int_handler; 371 lc->svc_new_psw.mask = psw_kernel_bits | 372 PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; 373 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; 374 lc->program_new_psw.mask = psw_kernel_bits | 375 PSW_MASK_DAT | PSW_MASK_MCHECK; 376 lc->program_new_psw.addr = 377 PSW_ADDR_AMODE | (unsigned long) pgm_check_handler; 378 lc->mcck_new_psw.mask = psw_kernel_bits; 379 lc->mcck_new_psw.addr = 380 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; 381 lc->io_new_psw.mask = psw_kernel_bits | 382 PSW_MASK_DAT | PSW_MASK_MCHECK; 383 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; 384 lc->clock_comparator = -1ULL; 385 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; 386 lc->async_stack = (unsigned long) 387 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; 388 lc->panic_stack = (unsigned long) 389 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; 390 lc->current_task = (unsigned long) init_thread_union.thread_info.task; 391 lc->thread_info = (unsigned long) &init_thread_union; 392 lc->machine_flags = S390_lowcore.machine_flags; 393 lc->stfl_fac_list = S390_lowcore.stfl_fac_list; 394 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, 395 MAX_FACILITY_BIT/8); 396 #ifndef CONFIG_64BIT 397 if (MACHINE_HAS_IEEE) { 398 lc->extended_save_area_addr = (__u32) 399 __alloc_bootmem_low(PAGE_SIZE, PAGE_SIZE, 0); 400 /* enable extended save area */ 401 __ctl_set_bit(14, 29); 402 } 403 #else 404 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; 405 #endif 406 lc->sync_enter_timer = S390_lowcore.sync_enter_timer; 407 lc->async_enter_timer = S390_lowcore.async_enter_timer; 408 lc->exit_timer = S390_lowcore.exit_timer; 409 lc->user_timer = S390_lowcore.user_timer; 410 lc->system_timer = S390_lowcore.system_timer; 411 lc->steal_timer = S390_lowcore.steal_timer; 412 lc->last_update_timer = S390_lowcore.last_update_timer; 413 lc->last_update_clock = S390_lowcore.last_update_clock; 414 lc->ftrace_func = S390_lowcore.ftrace_func; 415 set_prefix((u32)(unsigned long) lc); 416 lowcore_ptr[0] = lc; 417 } 418 419 static struct resource code_resource = { 420 .name = "Kernel code", 421 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 422 }; 423 424 static struct resource data_resource = { 425 .name = "Kernel data", 426 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 427 }; 428 429 static struct resource bss_resource = { 430 .name = "Kernel bss", 431 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 432 }; 433 434 static struct resource __initdata *standard_resources[] = { 435 &code_resource, 436 &data_resource, 437 &bss_resource, 438 }; 439 440 static void __init setup_resources(void) 441 { 442 struct resource *res, *std_res, *sub_res; 443 int i, j; 444 445 code_resource.start = (unsigned long) &_text; 446 code_resource.end = (unsigned long) &_etext - 1; 447 data_resource.start = (unsigned long) &_etext; 448 data_resource.end = (unsigned long) &_edata - 1; 449 bss_resource.start = (unsigned long) &__bss_start; 450 bss_resource.end = (unsigned long) &__bss_stop - 1; 451 452 for (i = 0; i < MEMORY_CHUNKS; i++) { 453 if (!memory_chunk[i].size) 454 continue; 455 if (memory_chunk[i].type == CHUNK_OLDMEM || 456 memory_chunk[i].type == CHUNK_CRASHK) 457 continue; 458 res = alloc_bootmem_low(sizeof(*res)); 459 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; 460 switch (memory_chunk[i].type) { 461 case CHUNK_READ_WRITE: 462 case CHUNK_CRASHK: 463 res->name = "System RAM"; 464 break; 465 case CHUNK_READ_ONLY: 466 res->name = "System ROM"; 467 res->flags |= IORESOURCE_READONLY; 468 break; 469 default: 470 res->name = "reserved"; 471 } 472 res->start = memory_chunk[i].addr; 473 res->end = res->start + memory_chunk[i].size - 1; 474 request_resource(&iomem_resource, res); 475 476 for (j = 0; j < ARRAY_SIZE(standard_resources); j++) { 477 std_res = standard_resources[j]; 478 if (std_res->start < res->start || 479 std_res->start > res->end) 480 continue; 481 if (std_res->end > res->end) { 482 sub_res = alloc_bootmem_low(sizeof(*sub_res)); 483 *sub_res = *std_res; 484 sub_res->end = res->end; 485 std_res->start = res->end + 1; 486 request_resource(res, sub_res); 487 } else { 488 request_resource(res, std_res); 489 } 490 } 491 } 492 } 493 494 unsigned long real_memory_size; 495 EXPORT_SYMBOL_GPL(real_memory_size); 496 497 static void __init setup_memory_end(void) 498 { 499 unsigned long vmax, vmalloc_size, tmp; 500 int i; 501 502 503 #ifdef CONFIG_ZFCPDUMP 504 if (ipl_info.type == IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) { 505 memory_end = ZFCPDUMP_HSA_SIZE; 506 memory_end_set = 1; 507 } 508 #endif 509 real_memory_size = 0; 510 memory_end &= PAGE_MASK; 511 512 /* 513 * Make sure all chunks are MAX_ORDER aligned so we don't need the 514 * extra checks that HOLES_IN_ZONE would require. 515 */ 516 for (i = 0; i < MEMORY_CHUNKS; i++) { 517 unsigned long start, end; 518 struct mem_chunk *chunk; 519 unsigned long align; 520 521 chunk = &memory_chunk[i]; 522 align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1); 523 start = (chunk->addr + align - 1) & ~(align - 1); 524 end = (chunk->addr + chunk->size) & ~(align - 1); 525 if (start >= end) 526 memset(chunk, 0, sizeof(*chunk)); 527 else { 528 chunk->addr = start; 529 chunk->size = end - start; 530 } 531 real_memory_size = max(real_memory_size, 532 chunk->addr + chunk->size); 533 } 534 535 /* Choose kernel address space layout: 2, 3, or 4 levels. */ 536 #ifdef CONFIG_64BIT 537 vmalloc_size = VMALLOC_END ?: 128UL << 30; 538 tmp = (memory_end ?: real_memory_size) / PAGE_SIZE; 539 tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size; 540 if (tmp <= (1UL << 42)) 541 vmax = 1UL << 42; /* 3-level kernel page table */ 542 else 543 vmax = 1UL << 53; /* 4-level kernel page table */ 544 #else 545 vmalloc_size = VMALLOC_END ?: 96UL << 20; 546 vmax = 1UL << 31; /* 2-level kernel page table */ 547 #endif 548 /* vmalloc area is at the end of the kernel address space. */ 549 VMALLOC_END = vmax; 550 VMALLOC_START = vmax - vmalloc_size; 551 552 /* Split remaining virtual space between 1:1 mapping & vmemmap array */ 553 tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); 554 tmp = VMALLOC_START - tmp * sizeof(struct page); 555 tmp &= ~((vmax >> 11) - 1); /* align to page table level */ 556 tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS); 557 vmemmap = (struct page *) tmp; 558 559 /* Take care that memory_end is set and <= vmemmap */ 560 memory_end = min(memory_end ?: real_memory_size, tmp); 561 562 /* Fixup memory chunk array to fit into 0..memory_end */ 563 for (i = 0; i < MEMORY_CHUNKS; i++) { 564 struct mem_chunk *chunk = &memory_chunk[i]; 565 566 if (chunk->addr >= memory_end) { 567 memset(chunk, 0, sizeof(*chunk)); 568 continue; 569 } 570 if (chunk->addr + chunk->size > memory_end) 571 chunk->size = memory_end - chunk->addr; 572 } 573 } 574 575 void *restart_stack __attribute__((__section__(".data"))); 576 577 /* 578 * Setup new PSW and allocate stack for PSW restart interrupt 579 */ 580 static void __init setup_restart_psw(void) 581 { 582 psw_t psw; 583 584 restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0); 585 restart_stack += ASYNC_SIZE; 586 587 /* 588 * Setup restart PSW for absolute zero lowcore. This is necesary 589 * if PSW restart is done on an offline CPU that has lowcore zero 590 */ 591 psw.mask = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; 592 psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; 593 copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw)); 594 } 595 596 static void __init setup_vmcoreinfo(void) 597 { 598 #ifdef CONFIG_KEXEC 599 unsigned long ptr = paddr_vmcoreinfo_note(); 600 601 copy_to_absolute_zero(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr)); 602 #endif 603 } 604 605 #ifdef CONFIG_CRASH_DUMP 606 607 /* 608 * Find suitable location for crashkernel memory 609 */ 610 static unsigned long __init find_crash_base(unsigned long crash_size, 611 char **msg) 612 { 613 unsigned long crash_base; 614 struct mem_chunk *chunk; 615 int i; 616 617 if (memory_chunk[0].size < crash_size) { 618 *msg = "first memory chunk must be at least crashkernel size"; 619 return 0; 620 } 621 if (OLDMEM_BASE && crash_size == OLDMEM_SIZE) 622 return OLDMEM_BASE; 623 624 for (i = MEMORY_CHUNKS - 1; i >= 0; i--) { 625 chunk = &memory_chunk[i]; 626 if (chunk->size == 0) 627 continue; 628 if (chunk->type != CHUNK_READ_WRITE) 629 continue; 630 if (chunk->size < crash_size) 631 continue; 632 crash_base = (chunk->addr + chunk->size) - crash_size; 633 if (crash_base < crash_size) 634 continue; 635 if (crash_base < ZFCPDUMP_HSA_SIZE_MAX) 636 continue; 637 if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE) 638 continue; 639 return crash_base; 640 } 641 *msg = "no suitable area found"; 642 return 0; 643 } 644 645 /* 646 * Check if crash_base and crash_size is valid 647 */ 648 static int __init verify_crash_base(unsigned long crash_base, 649 unsigned long crash_size, 650 char **msg) 651 { 652 struct mem_chunk *chunk; 653 int i; 654 655 /* 656 * Because we do the swap to zero, we must have at least 'crash_size' 657 * bytes free space before crash_base 658 */ 659 if (crash_size > crash_base) { 660 *msg = "crashkernel offset must be greater than size"; 661 return -EINVAL; 662 } 663 664 /* First memory chunk must be at least crash_size */ 665 if (memory_chunk[0].size < crash_size) { 666 *msg = "first memory chunk must be at least crashkernel size"; 667 return -EINVAL; 668 } 669 /* Check if we fit into the respective memory chunk */ 670 for (i = 0; i < MEMORY_CHUNKS; i++) { 671 chunk = &memory_chunk[i]; 672 if (chunk->size == 0) 673 continue; 674 if (crash_base < chunk->addr) 675 continue; 676 if (crash_base >= chunk->addr + chunk->size) 677 continue; 678 /* we have found the memory chunk */ 679 if (crash_base + crash_size > chunk->addr + chunk->size) { 680 *msg = "selected memory chunk is too small for " 681 "crashkernel memory"; 682 return -EINVAL; 683 } 684 return 0; 685 } 686 *msg = "invalid memory range specified"; 687 return -EINVAL; 688 } 689 690 /* 691 * Reserve kdump memory by creating a memory hole in the mem_chunk array 692 */ 693 static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size, 694 int type) 695 { 696 create_mem_hole(memory_chunk, addr, size, type); 697 } 698 699 /* 700 * When kdump is enabled, we have to ensure that no memory from 701 * the area [0 - crashkernel memory size] and 702 * [crashk_res.start - crashk_res.end] is set offline. 703 */ 704 static int kdump_mem_notifier(struct notifier_block *nb, 705 unsigned long action, void *data) 706 { 707 struct memory_notify *arg = data; 708 709 if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res))) 710 return NOTIFY_BAD; 711 if (arg->start_pfn > PFN_DOWN(crashk_res.end)) 712 return NOTIFY_OK; 713 if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start)) 714 return NOTIFY_OK; 715 return NOTIFY_BAD; 716 } 717 718 static struct notifier_block kdump_mem_nb = { 719 .notifier_call = kdump_mem_notifier, 720 }; 721 722 #endif 723 724 /* 725 * Make sure that oldmem, where the dump is stored, is protected 726 */ 727 static void reserve_oldmem(void) 728 { 729 #ifdef CONFIG_CRASH_DUMP 730 if (!OLDMEM_BASE) 731 return; 732 733 reserve_kdump_bootmem(OLDMEM_BASE, OLDMEM_SIZE, CHUNK_OLDMEM); 734 reserve_kdump_bootmem(OLDMEM_SIZE, memory_end - OLDMEM_SIZE, 735 CHUNK_OLDMEM); 736 if (OLDMEM_BASE + OLDMEM_SIZE == real_memory_size) 737 saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1; 738 else 739 saved_max_pfn = PFN_DOWN(real_memory_size) - 1; 740 #endif 741 } 742 743 /* 744 * Reserve memory for kdump kernel to be loaded with kexec 745 */ 746 static void __init reserve_crashkernel(void) 747 { 748 #ifdef CONFIG_CRASH_DUMP 749 unsigned long long crash_base, crash_size; 750 char *msg; 751 int rc; 752 753 rc = parse_crashkernel(boot_command_line, memory_end, &crash_size, 754 &crash_base); 755 if (rc || crash_size == 0) 756 return; 757 crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN); 758 crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN); 759 if (register_memory_notifier(&kdump_mem_nb)) 760 return; 761 if (!crash_base) 762 crash_base = find_crash_base(crash_size, &msg); 763 if (!crash_base) { 764 pr_info("crashkernel reservation failed: %s\n", msg); 765 unregister_memory_notifier(&kdump_mem_nb); 766 return; 767 } 768 if (verify_crash_base(crash_base, crash_size, &msg)) { 769 pr_info("crashkernel reservation failed: %s\n", msg); 770 unregister_memory_notifier(&kdump_mem_nb); 771 return; 772 } 773 if (!OLDMEM_BASE && MACHINE_IS_VM) 774 diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size)); 775 crashk_res.start = crash_base; 776 crashk_res.end = crash_base + crash_size - 1; 777 insert_resource(&iomem_resource, &crashk_res); 778 reserve_kdump_bootmem(crash_base, crash_size, CHUNK_CRASHK); 779 pr_info("Reserving %lluMB of memory at %lluMB " 780 "for crashkernel (System RAM: %luMB)\n", 781 crash_size >> 20, crash_base >> 20, memory_end >> 20); 782 #endif 783 } 784 785 static void __init 786 setup_memory(void) 787 { 788 unsigned long bootmap_size; 789 unsigned long start_pfn, end_pfn; 790 int i; 791 792 /* 793 * partially used pages are not usable - thus 794 * we are rounding upwards: 795 */ 796 start_pfn = PFN_UP(__pa(&_end)); 797 end_pfn = max_pfn = PFN_DOWN(memory_end); 798 799 #ifdef CONFIG_BLK_DEV_INITRD 800 /* 801 * Move the initrd in case the bitmap of the bootmem allocater 802 * would overwrite it. 803 */ 804 805 if (INITRD_START && INITRD_SIZE) { 806 unsigned long bmap_size; 807 unsigned long start; 808 809 bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1); 810 bmap_size = PFN_PHYS(bmap_size); 811 812 if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { 813 start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; 814 815 #ifdef CONFIG_CRASH_DUMP 816 if (OLDMEM_BASE) { 817 /* Move initrd behind kdump oldmem */ 818 if (start + INITRD_SIZE > OLDMEM_BASE && 819 start < OLDMEM_BASE + OLDMEM_SIZE) 820 start = OLDMEM_BASE + OLDMEM_SIZE; 821 } 822 #endif 823 if (start + INITRD_SIZE > memory_end) { 824 pr_err("initrd extends beyond end of " 825 "memory (0x%08lx > 0x%08lx) " 826 "disabling initrd\n", 827 start + INITRD_SIZE, memory_end); 828 INITRD_START = INITRD_SIZE = 0; 829 } else { 830 pr_info("Moving initrd (0x%08lx -> " 831 "0x%08lx, size: %ld)\n", 832 INITRD_START, start, INITRD_SIZE); 833 memmove((void *) start, (void *) INITRD_START, 834 INITRD_SIZE); 835 INITRD_START = start; 836 } 837 } 838 } 839 #endif 840 841 /* 842 * Initialize the boot-time allocator 843 */ 844 bootmap_size = init_bootmem(start_pfn, end_pfn); 845 846 /* 847 * Register RAM areas with the bootmem allocator. 848 */ 849 850 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 851 unsigned long start_chunk, end_chunk, pfn; 852 853 if (memory_chunk[i].type != CHUNK_READ_WRITE && 854 memory_chunk[i].type != CHUNK_CRASHK) 855 continue; 856 start_chunk = PFN_DOWN(memory_chunk[i].addr); 857 end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); 858 end_chunk = min(end_chunk, end_pfn); 859 if (start_chunk >= end_chunk) 860 continue; 861 memblock_add_node(PFN_PHYS(start_chunk), 862 PFN_PHYS(end_chunk - start_chunk), 0); 863 pfn = max(start_chunk, start_pfn); 864 for (; pfn < end_chunk; pfn++) 865 page_set_storage_key(PFN_PHYS(pfn), 866 PAGE_DEFAULT_KEY, 0); 867 } 868 869 psw_set_key(PAGE_DEFAULT_KEY); 870 871 free_bootmem_with_active_regions(0, max_pfn); 872 873 /* 874 * Reserve memory used for lowcore/command line/kernel image. 875 */ 876 reserve_bootmem(0, (unsigned long)_ehead, BOOTMEM_DEFAULT); 877 reserve_bootmem((unsigned long)_stext, 878 PFN_PHYS(start_pfn) - (unsigned long)_stext, 879 BOOTMEM_DEFAULT); 880 /* 881 * Reserve the bootmem bitmap itself as well. We do this in two 882 * steps (first step was init_bootmem()) because this catches 883 * the (very unlikely) case of us accidentally initializing the 884 * bootmem allocator with an invalid RAM area. 885 */ 886 reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size, 887 BOOTMEM_DEFAULT); 888 889 #ifdef CONFIG_CRASH_DUMP 890 if (crashk_res.start) 891 reserve_bootmem(crashk_res.start, 892 crashk_res.end - crashk_res.start + 1, 893 BOOTMEM_DEFAULT); 894 if (is_kdump_kernel()) 895 reserve_bootmem(elfcorehdr_addr - OLDMEM_BASE, 896 PAGE_ALIGN(elfcorehdr_size), BOOTMEM_DEFAULT); 897 #endif 898 #ifdef CONFIG_BLK_DEV_INITRD 899 if (INITRD_START && INITRD_SIZE) { 900 if (INITRD_START + INITRD_SIZE <= memory_end) { 901 reserve_bootmem(INITRD_START, INITRD_SIZE, 902 BOOTMEM_DEFAULT); 903 initrd_start = INITRD_START; 904 initrd_end = initrd_start + INITRD_SIZE; 905 } else { 906 pr_err("initrd extends beyond end of " 907 "memory (0x%08lx > 0x%08lx) " 908 "disabling initrd\n", 909 initrd_start + INITRD_SIZE, memory_end); 910 initrd_start = initrd_end = 0; 911 } 912 } 913 #endif 914 } 915 916 /* 917 * Setup hardware capabilities. 918 */ 919 static void __init setup_hwcaps(void) 920 { 921 static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 }; 922 struct cpuid cpu_id; 923 int i; 924 925 /* 926 * The store facility list bits numbers as found in the principles 927 * of operation are numbered with bit 1UL<<31 as number 0 to 928 * bit 1UL<<0 as number 31. 929 * Bit 0: instructions named N3, "backported" to esa-mode 930 * Bit 2: z/Architecture mode is active 931 * Bit 7: the store-facility-list-extended facility is installed 932 * Bit 17: the message-security assist is installed 933 * Bit 19: the long-displacement facility is installed 934 * Bit 21: the extended-immediate facility is installed 935 * Bit 22: extended-translation facility 3 is installed 936 * Bit 30: extended-translation facility 3 enhancement facility 937 * These get translated to: 938 * HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1, 939 * HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3, 940 * HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and 941 * HWCAP_S390_ETF3EH bit 8 (22 && 30). 942 */ 943 for (i = 0; i < 6; i++) 944 if (test_facility(stfl_bits[i])) 945 elf_hwcap |= 1UL << i; 946 947 if (test_facility(22) && test_facility(30)) 948 elf_hwcap |= HWCAP_S390_ETF3EH; 949 950 /* 951 * Check for additional facilities with store-facility-list-extended. 952 * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0 953 * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information 954 * as stored by stfl, bits 32-xxx contain additional facilities. 955 * How many facility words are stored depends on the number of 956 * doublewords passed to the instruction. The additional facilities 957 * are: 958 * Bit 42: decimal floating point facility is installed 959 * Bit 44: perform floating point operation facility is installed 960 * translated to: 961 * HWCAP_S390_DFP bit 6 (42 && 44). 962 */ 963 if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44)) 964 elf_hwcap |= HWCAP_S390_DFP; 965 966 /* 967 * Huge page support HWCAP_S390_HPAGE is bit 7. 968 */ 969 if (MACHINE_HAS_HPAGE) 970 elf_hwcap |= HWCAP_S390_HPAGE; 971 972 /* 973 * 64-bit register support for 31-bit processes 974 * HWCAP_S390_HIGH_GPRS is bit 9. 975 */ 976 elf_hwcap |= HWCAP_S390_HIGH_GPRS; 977 978 get_cpu_id(&cpu_id); 979 switch (cpu_id.machine) { 980 case 0x9672: 981 #if !defined(CONFIG_64BIT) 982 default: /* Use "g5" as default for 31 bit kernels. */ 983 #endif 984 strcpy(elf_platform, "g5"); 985 break; 986 case 0x2064: 987 case 0x2066: 988 #if defined(CONFIG_64BIT) 989 default: /* Use "z900" as default for 64 bit kernels. */ 990 #endif 991 strcpy(elf_platform, "z900"); 992 break; 993 case 0x2084: 994 case 0x2086: 995 strcpy(elf_platform, "z990"); 996 break; 997 case 0x2094: 998 case 0x2096: 999 strcpy(elf_platform, "z9-109"); 1000 break; 1001 case 0x2097: 1002 case 0x2098: 1003 strcpy(elf_platform, "z10"); 1004 break; 1005 case 0x2817: 1006 case 0x2818: 1007 strcpy(elf_platform, "z196"); 1008 break; 1009 } 1010 } 1011 1012 /* 1013 * Setup function called from init/main.c just after the banner 1014 * was printed. 1015 */ 1016 1017 void __init 1018 setup_arch(char **cmdline_p) 1019 { 1020 /* 1021 * print what head.S has found out about the machine 1022 */ 1023 #ifndef CONFIG_64BIT 1024 if (MACHINE_IS_VM) 1025 pr_info("Linux is running as a z/VM " 1026 "guest operating system in 31-bit mode\n"); 1027 else if (MACHINE_IS_LPAR) 1028 pr_info("Linux is running natively in 31-bit mode\n"); 1029 if (MACHINE_HAS_IEEE) 1030 pr_info("The hardware system has IEEE compatible " 1031 "floating point units\n"); 1032 else 1033 pr_info("The hardware system has no IEEE compatible " 1034 "floating point units\n"); 1035 #else /* CONFIG_64BIT */ 1036 if (MACHINE_IS_VM) 1037 pr_info("Linux is running as a z/VM " 1038 "guest operating system in 64-bit mode\n"); 1039 else if (MACHINE_IS_KVM) 1040 pr_info("Linux is running under KVM in 64-bit mode\n"); 1041 else if (MACHINE_IS_LPAR) 1042 pr_info("Linux is running natively in 64-bit mode\n"); 1043 #endif /* CONFIG_64BIT */ 1044 1045 /* Have one command line that is parsed and saved in /proc/cmdline */ 1046 /* boot_command_line has been already set up in early.c */ 1047 *cmdline_p = boot_command_line; 1048 1049 ROOT_DEV = Root_RAM0; 1050 1051 init_mm.start_code = PAGE_OFFSET; 1052 init_mm.end_code = (unsigned long) &_etext; 1053 init_mm.end_data = (unsigned long) &_edata; 1054 init_mm.brk = (unsigned long) &_end; 1055 1056 if (MACHINE_HAS_MVCOS) 1057 memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess)); 1058 else 1059 memcpy(&uaccess, &uaccess_std, sizeof(uaccess)); 1060 1061 parse_early_param(); 1062 1063 setup_ipl(); 1064 setup_memory_end(); 1065 setup_addressing_mode(); 1066 reserve_oldmem(); 1067 reserve_crashkernel(); 1068 setup_memory(); 1069 setup_resources(); 1070 setup_vmcoreinfo(); 1071 setup_restart_psw(); 1072 setup_lowcore(); 1073 1074 cpu_init(); 1075 s390_init_cpu_topology(); 1076 1077 /* 1078 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). 1079 */ 1080 setup_hwcaps(); 1081 1082 /* 1083 * Create kernel page tables and switch to virtual addressing. 1084 */ 1085 paging_init(); 1086 1087 /* Setup default console */ 1088 conmode_default(); 1089 set_preferred_console(); 1090 1091 /* Setup zfcpdump support */ 1092 setup_zfcpdump(console_devno); 1093 } 1094