1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1995 Linus Torvalds 7 * Copyright (C) 1995 Waldorf Electronics 8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle 9 * Copyright (C) 1996 Stoned Elipot 10 * Copyright (C) 1999 Silicon Graphics, Inc. 11 * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki 12 */ 13 #include <linux/init.h> 14 #include <linux/cpu.h> 15 #include <linux/delay.h> 16 #include <linux/hex.h> 17 #include <linux/ioport.h> 18 #include <linux/export.h> 19 #include <linux/memblock.h> 20 #include <linux/initrd.h> 21 #include <linux/root_dev.h> 22 #include <linux/highmem.h> 23 #include <linux/console.h> 24 #include <linux/pfn.h> 25 #include <linux/debugfs.h> 26 #include <linux/kexec.h> 27 #include <linux/sizes.h> 28 #include <linux/device.h> 29 #include <linux/dma-map-ops.h> 30 #include <linux/decompress/generic.h> 31 #include <linux/of_fdt.h> 32 #include <linux/dmi.h> 33 #include <linux/crash_dump.h> 34 #include <linux/string.h> 35 36 #include <asm/addrspace.h> 37 #include <asm/bootinfo.h> 38 #include <asm/bugs.h> 39 #include <asm/cache.h> 40 #include <asm/cdmm.h> 41 #include <asm/cpu.h> 42 #include <asm/debug.h> 43 #include <asm/mmzone.h> 44 #include <asm/sections.h> 45 #include <asm/setup.h> 46 #include <asm/smp-ops.h> 47 #include <asm/mips-cps.h> 48 #include <asm/prom.h> 49 #include <asm/fw/fw.h> 50 51 #ifdef CONFIG_MIPS_ELF_APPENDED_DTB 52 char __section(".appended_dtb") __appended_dtb[0x100000]; 53 #endif /* CONFIG_MIPS_ELF_APPENDED_DTB */ 54 55 struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; 56 57 EXPORT_SYMBOL(cpu_data); 58 59 /* 60 * Setup information 61 * 62 * These are initialized so they are in the .data section 63 */ 64 unsigned long mips_machtype __read_mostly = MACH_UNKNOWN; 65 66 EXPORT_SYMBOL(mips_machtype); 67 68 static char __initdata command_line[COMMAND_LINE_SIZE]; 69 char __initdata arcs_cmdline[COMMAND_LINE_SIZE]; 70 71 #ifdef CONFIG_CMDLINE_BOOL 72 static const char builtin_cmdline[] __initconst = CONFIG_CMDLINE; 73 #else 74 static const char builtin_cmdline[] __initconst = ""; 75 #endif 76 77 /* 78 * mips_io_port_base is the begin of the address space to which x86 style 79 * I/O ports are mapped. 80 */ 81 unsigned long mips_io_port_base = -1; 82 EXPORT_SYMBOL(mips_io_port_base); 83 84 static struct resource code_resource = { .name = "Kernel code", }; 85 static struct resource data_resource = { .name = "Kernel data", }; 86 static struct resource bss_resource = { .name = "Kernel bss", }; 87 88 unsigned long __kaslr_offset __ro_after_init; 89 EXPORT_SYMBOL(__kaslr_offset); 90 91 static void *detect_magic __initdata = detect_memory_region; 92 93 #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET 94 unsigned long ARCH_PFN_OFFSET; 95 EXPORT_SYMBOL(ARCH_PFN_OFFSET); 96 #endif 97 98 void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max) 99 { 100 void *dm = &detect_magic; 101 phys_addr_t size; 102 103 for (size = sz_min; size < sz_max; size <<= 1) { 104 if (!memcmp(dm, dm + size, sizeof(detect_magic))) 105 break; 106 } 107 108 pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n", 109 ((unsigned long long) size) / SZ_1M, 110 (unsigned long long) start, 111 ((unsigned long long) sz_min) / SZ_1M, 112 ((unsigned long long) sz_max) / SZ_1M); 113 114 memblock_add(start, size); 115 } 116 117 /* 118 * Manage initrd 119 */ 120 #ifdef CONFIG_BLK_DEV_INITRD 121 122 static int __init rd_start_early(char *p) 123 { 124 unsigned long start = memparse(p, &p); 125 126 #ifdef CONFIG_64BIT 127 /* Guess if the sign extension was forgotten by bootloader */ 128 if (start < XKPHYS) 129 start = (int)start; 130 #endif 131 initrd_start = start; 132 initrd_end += start; 133 return 0; 134 } 135 early_param("rd_start", rd_start_early); 136 137 static int __init rd_size_early(char *p) 138 { 139 initrd_end += memparse(p, &p); 140 return 0; 141 } 142 early_param("rd_size", rd_size_early); 143 144 /* it returns the next free pfn after initrd */ 145 static unsigned long __init init_initrd(void) 146 { 147 unsigned long end; 148 149 /* 150 * Board specific code or command line parser should have 151 * already set up initrd_start and initrd_end. In these cases 152 * perform sanity checks and use them if all looks good. 153 */ 154 if (!initrd_start || initrd_end <= initrd_start) 155 goto disable; 156 157 if (initrd_start & ~PAGE_MASK) { 158 pr_err("initrd start must be page aligned\n"); 159 goto disable; 160 } 161 162 /* 163 * Sanitize initrd addresses. For example firmware 164 * can't guess if they need to pass them through 165 * 64-bits values if the kernel has been built in pure 166 * 32-bit. We need also to switch from KSEG0 to XKPHYS 167 * addresses now, so the code can now safely use __pa(). 168 */ 169 end = __pa(initrd_end); 170 initrd_end = (unsigned long)__va(end); 171 initrd_start = (unsigned long)__va(__pa(initrd_start)); 172 173 if (initrd_start < PAGE_OFFSET) { 174 pr_err("initrd start < PAGE_OFFSET\n"); 175 goto disable; 176 } 177 178 ROOT_DEV = Root_RAM0; 179 return PFN_UP(end); 180 disable: 181 initrd_start = 0; 182 initrd_end = 0; 183 return 0; 184 } 185 186 /* In some conditions (e.g. big endian bootloader with a little endian 187 kernel), the initrd might appear byte swapped. Try to detect this and 188 byte swap it if needed. */ 189 static void __init maybe_bswap_initrd(void) 190 { 191 #if defined(CONFIG_CPU_CAVIUM_OCTEON) 192 u64 buf; 193 194 /* Check for CPIO signature */ 195 if (!memcmp((void *)initrd_start, "070701", 6)) 196 return; 197 198 /* Check for compressed initrd */ 199 if (decompress_method((unsigned char *)initrd_start, 8, NULL)) 200 return; 201 202 /* Try again with a byte swapped header */ 203 buf = swab64p((u64 *)initrd_start); 204 if (!memcmp(&buf, "070701", 6) || 205 decompress_method((unsigned char *)(&buf), 8, NULL)) { 206 unsigned long i; 207 208 pr_info("Byteswapped initrd detected\n"); 209 for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8) 210 swab64s((u64 *)i); 211 } 212 #endif 213 } 214 215 static void __init finalize_initrd(void) 216 { 217 unsigned long size = initrd_end - initrd_start; 218 219 if (size == 0) { 220 printk(KERN_INFO "Initrd not found or empty"); 221 goto disable; 222 } 223 if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { 224 printk(KERN_ERR "Initrd extends beyond end of memory"); 225 goto disable; 226 } 227 228 maybe_bswap_initrd(); 229 230 memblock_reserve(__pa(initrd_start), size); 231 initrd_below_start_ok = 1; 232 233 pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n", 234 initrd_start, size); 235 return; 236 disable: 237 printk(KERN_CONT " - disabling initrd\n"); 238 initrd_start = 0; 239 initrd_end = 0; 240 } 241 242 #else /* !CONFIG_BLK_DEV_INITRD */ 243 244 static unsigned long __init init_initrd(void) 245 { 246 return 0; 247 } 248 249 #define finalize_initrd() do {} while (0) 250 251 #endif 252 253 /* 254 * Initialize the bootmem allocator. It also setup initrd related data 255 * if needed. 256 */ 257 #if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON64) && defined(CONFIG_NUMA)) 258 259 static void __init bootmem_init(void) 260 { 261 init_initrd(); 262 finalize_initrd(); 263 } 264 265 #else /* !CONFIG_SGI_IP27 */ 266 267 static void __init bootmem_init(void) 268 { 269 phys_addr_t ramstart, ramend; 270 unsigned long start, end; 271 int i; 272 273 ramstart = memblock_start_of_DRAM(); 274 ramend = memblock_end_of_DRAM(); 275 276 /* 277 * Sanity check any INITRD first. We don't take it into account 278 * for bootmem setup initially, rely on the end-of-kernel-code 279 * as our memory range starting point. Once bootmem is inited we 280 * will reserve the area used for the initrd. 281 */ 282 init_initrd(); 283 284 /* Reserve memory occupied by kernel. */ 285 memblock_reserve(__pa_symbol(&_text), 286 __pa_symbol(&_end) - __pa_symbol(&_text)); 287 288 /* max_low_pfn is not a number of pages but the end pfn of low mem */ 289 290 #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET 291 ARCH_PFN_OFFSET = PFN_UP(ramstart); 292 #else 293 /* 294 * Reserve any memory between the start of RAM and PHYS_OFFSET 295 */ 296 if (ramstart > PHYS_OFFSET) 297 memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET); 298 299 if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) { 300 pr_info("Wasting %lu bytes for tracking %lu unused pages\n", 301 (unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)), 302 (unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET)); 303 } 304 #endif 305 306 min_low_pfn = ARCH_PFN_OFFSET; 307 max_pfn = PFN_DOWN(ramend); 308 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { 309 /* 310 * Skip highmem here so we get an accurate max_low_pfn if low 311 * memory stops short of high memory. 312 * If the region overlaps HIGHMEM_START, end is clipped so 313 * max_pfn excludes the highmem portion. 314 */ 315 if (start >= PFN_DOWN(HIGHMEM_START)) 316 continue; 317 if (end > PFN_DOWN(HIGHMEM_START)) 318 end = PFN_DOWN(HIGHMEM_START); 319 if (end > max_low_pfn) 320 max_low_pfn = end; 321 } 322 323 if (min_low_pfn >= max_low_pfn) 324 panic("Incorrect memory mapping !!!"); 325 326 if (max_pfn > PFN_DOWN(HIGHMEM_START)) { 327 max_low_pfn = PFN_DOWN(HIGHMEM_START); 328 #ifdef CONFIG_HIGHMEM 329 highstart_pfn = max_low_pfn; 330 highend_pfn = max_pfn; 331 #else 332 max_pfn = max_low_pfn; 333 #endif 334 } 335 336 /* 337 * Reserve initrd memory if needed. 338 */ 339 finalize_initrd(); 340 } 341 342 #endif /* CONFIG_SGI_IP27 */ 343 344 static int usermem __initdata; 345 346 static int __init early_parse_mem(char *p) 347 { 348 phys_addr_t start, size; 349 350 if (!p) { 351 pr_err("mem parameter is empty, do nothing\n"); 352 return -EINVAL; 353 } 354 355 /* 356 * If a user specifies memory size, we 357 * blow away any automatically generated 358 * size. 359 */ 360 if (usermem == 0) { 361 usermem = 1; 362 memblock_remove(memblock_start_of_DRAM(), 363 memblock_end_of_DRAM() - memblock_start_of_DRAM()); 364 } 365 start = 0; 366 size = memparse(p, &p); 367 if (*p == '@') 368 start = memparse(p + 1, &p); 369 370 if (IS_ENABLED(CONFIG_NUMA)) 371 memblock_add_node(start, size, pa_to_nid(start), MEMBLOCK_NONE); 372 else 373 memblock_add(start, size); 374 375 return 0; 376 } 377 early_param("mem", early_parse_mem); 378 379 static int __init early_parse_memmap(char *p) 380 { 381 char *oldp; 382 u64 start_at, mem_size; 383 384 if (!p) 385 return -EINVAL; 386 387 if (!strncmp(p, "exactmap", 8)) { 388 pr_err("\"memmap=exactmap\" invalid on MIPS\n"); 389 return 0; 390 } 391 392 oldp = p; 393 mem_size = memparse(p, &p); 394 if (p == oldp) 395 return -EINVAL; 396 397 if (*p == '@') { 398 start_at = memparse(p+1, &p); 399 memblock_add(start_at, mem_size); 400 } else if (*p == '#') { 401 pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n"); 402 return -EINVAL; 403 } else if (*p == '$') { 404 start_at = memparse(p+1, &p); 405 memblock_add(start_at, mem_size); 406 memblock_reserve(start_at, mem_size); 407 } else { 408 pr_err("\"memmap\" invalid format!\n"); 409 return -EINVAL; 410 } 411 412 if (*p == '\0') { 413 usermem = 1; 414 return 0; 415 } else 416 return -EINVAL; 417 } 418 early_param("memmap", early_parse_memmap); 419 420 static void __init mips_reserve_vmcore(void) 421 { 422 #ifdef CONFIG_PROC_VMCORE 423 phys_addr_t start, end; 424 u64 i; 425 426 if (!elfcorehdr_size) { 427 for_each_mem_range(i, &start, &end) { 428 if (elfcorehdr_addr >= start && elfcorehdr_addr < end) { 429 /* 430 * Reserve from the elf core header to the end of 431 * the memory segment, that should all be kdump 432 * reserved memory. 433 */ 434 elfcorehdr_size = end - elfcorehdr_addr; 435 break; 436 } 437 } 438 } 439 440 pr_info("Reserving %ldKB of memory at %ldKB for kdump\n", 441 (unsigned long)elfcorehdr_size >> 10, (unsigned long)elfcorehdr_addr >> 10); 442 443 memblock_reserve(elfcorehdr_addr, elfcorehdr_size); 444 #endif 445 } 446 447 /* 64M alignment for crash kernel regions */ 448 #define CRASH_ALIGN SZ_64M 449 #define CRASH_ADDR_MAX SZ_512M 450 451 static void __init mips_parse_crashkernel(void) 452 { 453 unsigned long long total_mem; 454 unsigned long long crash_size, crash_base; 455 int ret; 456 457 if (!IS_ENABLED(CONFIG_CRASH_RESERVE)) 458 return; 459 460 total_mem = memblock_phys_mem_size(); 461 ret = parse_crashkernel(boot_command_line, total_mem, 462 &crash_size, &crash_base, 463 NULL, NULL, NULL); 464 if (ret != 0 || crash_size <= 0) 465 return; 466 467 if (crash_base <= 0) { 468 crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN, 469 CRASH_ALIGN, 470 CRASH_ADDR_MAX); 471 if (!crash_base) { 472 pr_warn("crashkernel reservation failed - No suitable area found.\n"); 473 return; 474 } 475 } else { 476 unsigned long long start; 477 478 start = memblock_phys_alloc_range(crash_size, 1, 479 crash_base, 480 crash_base + crash_size); 481 if (start != crash_base) { 482 pr_warn("Invalid memory region reserved for crash kernel\n"); 483 return; 484 } 485 } 486 487 crashk_res.start = crash_base; 488 crashk_res.end = crash_base + crash_size - 1; 489 } 490 491 static void __init request_crashkernel(struct resource *res) 492 { 493 int ret; 494 495 if (!IS_ENABLED(CONFIG_CRASH_RESERVE)) 496 return; 497 498 if (crashk_res.start == crashk_res.end) 499 return; 500 501 ret = request_resource(res, &crashk_res); 502 if (!ret) 503 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n", 504 (unsigned long)(resource_size(&crashk_res) >> 20), 505 (unsigned long)(crashk_res.start >> 20)); 506 } 507 508 static void __init check_kernel_sections_mem(void) 509 { 510 phys_addr_t start = __pa_symbol(&_text); 511 phys_addr_t size = __pa_symbol(&_end) - start; 512 513 if (!memblock_is_region_memory(start, size)) { 514 pr_info("Kernel sections are not in the memory maps\n"); 515 memblock_add(start, size); 516 } 517 } 518 519 static void __init bootcmdline_append(const char *s, size_t max) 520 { 521 if (!s[0] || !max) 522 return; 523 524 if (boot_command_line[0]) 525 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); 526 527 strlcat(boot_command_line, s, max); 528 } 529 530 #ifdef CONFIG_OF_EARLY_FLATTREE 531 532 static int __init bootcmdline_scan_chosen(unsigned long node, const char *uname, 533 int depth, void *data) 534 { 535 bool *dt_bootargs = data; 536 const char *p; 537 int l; 538 539 if (depth != 1 || !data || 540 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) 541 return 0; 542 543 p = of_get_flat_dt_prop(node, "bootargs", &l); 544 if (p != NULL && l > 0) { 545 if (strnlen(p, l) >= l) 546 return 1; 547 548 bootcmdline_append(p, min(l, COMMAND_LINE_SIZE)); 549 *dt_bootargs = true; 550 } 551 552 return 1; 553 } 554 555 #endif /* CONFIG_OF_EARLY_FLATTREE */ 556 557 static void __init bootcmdline_init(void) 558 { 559 bool dt_bootargs = false; 560 561 /* 562 * If CMDLINE_OVERRIDE is enabled then initializing the command line is 563 * trivial - we simply use the built-in command line unconditionally & 564 * unmodified. 565 */ 566 if (IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) { 567 strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); 568 return; 569 } 570 571 /* 572 * If the user specified a built-in command line & 573 * MIPS_CMDLINE_BUILTIN_EXTEND, then the built-in command line is 574 * prepended to arguments from the bootloader or DT so we'll copy them 575 * to the start of boot_command_line here. Otherwise, empty 576 * boot_command_line to undo anything early_init_dt_scan_chosen() did. 577 */ 578 if (IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)) 579 strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); 580 else 581 boot_command_line[0] = 0; 582 583 #ifdef CONFIG_OF_EARLY_FLATTREE 584 /* 585 * If we're configured to take boot arguments from DT, look for those 586 * now. 587 */ 588 if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) || 589 IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)) 590 of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs); 591 #endif 592 593 /* 594 * If we didn't get any arguments from DT (regardless of whether that's 595 * because we weren't configured to look for them, or because we looked 596 * & found none) then we'll take arguments from the bootloader. 597 * plat_mem_setup() should have filled arcs_cmdline with arguments from 598 * the bootloader. 599 */ 600 if (IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) || !dt_bootargs) 601 bootcmdline_append(arcs_cmdline, COMMAND_LINE_SIZE); 602 603 /* 604 * If the user specified a built-in command line & we didn't already 605 * prepend it, we append it to boot_command_line here. 606 */ 607 if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && 608 !IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)) 609 bootcmdline_append(builtin_cmdline, COMMAND_LINE_SIZE); 610 } 611 612 /* 613 * arch_mem_init - initialize memory management subsystem 614 * 615 * o plat_mem_setup() detects the memory configuration and will record detected 616 * memory areas using memblock_add. 617 * 618 * At this stage the memory configuration of the system is known to the 619 * kernel but generic memory management system is still entirely uninitialized. 620 * 621 * o bootmem_init() 622 * o pagetable_init() 623 * o dma_contiguous_reserve() 624 * 625 * At this stage the bootmem allocator is ready to use. 626 * 627 * NOTE: historically plat_mem_setup did the entire platform initialization. 628 * This was rather impractical because it meant plat_mem_setup had to 629 * get away without any kind of memory allocator. To keep old code from 630 * breaking plat_setup was just renamed to plat_mem_setup and a second platform 631 * initialization hook for anything else was introduced. 632 */ 633 static void __init arch_mem_init(char **cmdline_p) 634 { 635 /* call board setup routine */ 636 plat_mem_setup(); 637 memblock_set_bottom_up(true); 638 639 bootcmdline_init(); 640 strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE); 641 *cmdline_p = command_line; 642 643 parse_early_param(); 644 645 if (usermem) 646 pr_info("User-defined physical RAM map overwrite\n"); 647 648 check_kernel_sections_mem(); 649 650 early_init_fdt_reserve_self(); 651 early_init_fdt_scan_reserved_mem(); 652 653 #ifndef CONFIG_NUMA 654 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); 655 #endif 656 bootmem_init(); 657 658 /* 659 * Prevent memblock from allocating high memory. 660 * This cannot be done before max_low_pfn is detected, so up 661 * to this point is possible to only reserve physical memory 662 * with memblock_reserve; memblock_alloc* can be used 663 * only after this point 664 */ 665 memblock_set_current_limit(PFN_PHYS(max_low_pfn)); 666 667 mips_reserve_vmcore(); 668 669 mips_parse_crashkernel(); 670 device_tree_init(); 671 672 plat_swiotlb_setup(); 673 674 dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); 675 676 /* Reserve for hibernation. */ 677 memblock_reserve(__pa_symbol(&__nosave_begin), 678 __pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin)); 679 680 early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn)); 681 } 682 683 static void __init resource_init(void) 684 { 685 phys_addr_t start, end; 686 u64 i; 687 688 if (UNCAC_BASE != IO_BASE) 689 return; 690 691 code_resource.start = __pa_symbol(&_text); 692 code_resource.end = __pa_symbol(&_etext) - 1; 693 data_resource.start = __pa_symbol(&_etext); 694 data_resource.end = __pa_symbol(&_edata) - 1; 695 bss_resource.start = __pa_symbol(&__bss_start); 696 bss_resource.end = __pa_symbol(&__bss_stop) - 1; 697 698 for_each_mem_range(i, &start, &end) { 699 struct resource *res; 700 701 res = memblock_alloc_or_panic(sizeof(struct resource), SMP_CACHE_BYTES); 702 703 res->start = start; 704 /* 705 * In memblock, end points to the first byte after the 706 * range while in resourses, end points to the last byte in 707 * the range. 708 */ 709 res->end = end - 1; 710 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 711 res->name = "System RAM"; 712 713 request_resource(&iomem_resource, res); 714 715 /* 716 * We don't know which RAM region contains kernel data, 717 * so we try it repeatedly and let the resource manager 718 * test it. 719 */ 720 request_resource(res, &code_resource); 721 request_resource(res, &data_resource); 722 request_resource(res, &bss_resource); 723 request_crashkernel(res); 724 } 725 } 726 727 #ifdef CONFIG_SMP 728 static void __init prefill_possible_map(void) 729 { 730 int i, possible = num_possible_cpus(); 731 732 if (possible > nr_cpu_ids) 733 possible = nr_cpu_ids; 734 735 for (i = 0; i < possible; i++) 736 set_cpu_possible(i, true); 737 for (; i < NR_CPUS; i++) 738 set_cpu_possible(i, false); 739 740 set_nr_cpu_ids(possible); 741 } 742 #else 743 static inline void prefill_possible_map(void) {} 744 #endif 745 746 static void __init setup_rng_seed(void) 747 { 748 char *rng_seed_hex = fw_getenv("rngseed"); 749 u8 rng_seed[512]; 750 size_t len; 751 752 if (!rng_seed_hex) 753 return; 754 755 len = min(sizeof(rng_seed), strlen(rng_seed_hex) / 2); 756 if (hex2bin(rng_seed, rng_seed_hex, len)) 757 return; 758 759 add_bootloader_randomness(rng_seed, len); 760 memzero_explicit(rng_seed, len); 761 memzero_explicit(rng_seed_hex, len * 2); 762 } 763 764 void __init setup_arch(char **cmdline_p) 765 { 766 cpu_probe(); 767 mips_cm_probe(); 768 prom_init(); 769 770 setup_early_fdc_console(); 771 #ifdef CONFIG_EARLY_PRINTK 772 setup_early_printk(); 773 #endif 774 cpu_report(); 775 if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64)) 776 check_bugs64_early(); 777 778 arch_mem_init(cmdline_p); 779 dmi_setup(); 780 781 resource_init(); 782 plat_smp_setup(); 783 prefill_possible_map(); 784 785 cpu_cache_init(); 786 pagetable_init(); 787 788 memblock_dump_all(); 789 790 setup_rng_seed(); 791 } 792 793 unsigned long kernelsp[NR_CPUS]; 794 unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3; 795 796 #ifdef CONFIG_DEBUG_FS 797 struct dentry *mips_debugfs_dir; 798 static int __init debugfs_mips(void) 799 { 800 mips_debugfs_dir = debugfs_create_dir("mips", NULL); 801 return 0; 802 } 803 arch_initcall(debugfs_mips); 804 #endif 805 806 #ifdef CONFIG_DMA_NONCOHERENT 807 static int __init setcoherentio(char *str) 808 { 809 dma_default_coherent = true; 810 pr_info("Hardware DMA cache coherency (command line)\n"); 811 return 0; 812 } 813 early_param("coherentio", setcoherentio); 814 815 static int __init setnocoherentio(char *str) 816 { 817 dma_default_coherent = false; 818 pr_info("Software DMA cache coherency (command line)\n"); 819 return 0; 820 } 821 early_param("nocoherentio", setnocoherentio); 822 #endif 823 824 void __init arch_cpu_finalize_init(void) 825 { 826 unsigned int cpu = smp_processor_id(); 827 828 cpu_data[cpu].udelay_val = loops_per_jiffy; 829 check_bugs32(); 830 831 if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64)) 832 check_bugs64(); 833 } 834