1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1995 Linus Torvalds 7 * Copyright (C) 1995 Waldorf Electronics 8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle 9 * Copyright (C) 1996 Stoned Elipot 10 * Copyright (C) 1999 Silicon Graphics, Inc. 11 * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki 12 */ 13 #include <linux/init.h> 14 #include <linux/cpu.h> 15 #include <linux/delay.h> 16 #include <linux/ioport.h> 17 #include <linux/export.h> 18 #include <linux/screen_info.h> 19 #include <linux/memblock.h> 20 #include <linux/initrd.h> 21 #include <linux/root_dev.h> 22 #include <linux/highmem.h> 23 #include <linux/console.h> 24 #include <linux/pfn.h> 25 #include <linux/debugfs.h> 26 #include <linux/kexec.h> 27 #include <linux/sizes.h> 28 #include <linux/device.h> 29 #include <linux/dma-map-ops.h> 30 #include <linux/decompress/generic.h> 31 #include <linux/of_fdt.h> 32 #include <linux/dmi.h> 33 #include <linux/crash_dump.h> 34 35 #include <asm/addrspace.h> 36 #include <asm/bootinfo.h> 37 #include <asm/bugs.h> 38 #include <asm/cache.h> 39 #include <asm/cdmm.h> 40 #include <asm/cpu.h> 41 #include <asm/debug.h> 42 #include <asm/mmzone.h> 43 #include <asm/sections.h> 44 #include <asm/setup.h> 45 #include <asm/smp-ops.h> 46 #include <asm/prom.h> 47 #include <asm/fw/fw.h> 48 49 #ifdef CONFIG_MIPS_ELF_APPENDED_DTB 50 char __section(".appended_dtb") __appended_dtb[0x100000]; 51 #endif /* CONFIG_MIPS_ELF_APPENDED_DTB */ 52 53 struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; 54 55 EXPORT_SYMBOL(cpu_data); 56 57 #ifdef CONFIG_VT 58 struct screen_info screen_info; 59 #endif 60 61 /* 62 * Setup information 63 * 64 * These are initialized so they are in the .data section 65 */ 66 unsigned long mips_machtype __read_mostly = MACH_UNKNOWN; 67 68 EXPORT_SYMBOL(mips_machtype); 69 70 static char __initdata command_line[COMMAND_LINE_SIZE]; 71 char __initdata arcs_cmdline[COMMAND_LINE_SIZE]; 72 73 #ifdef CONFIG_CMDLINE_BOOL 74 static const char builtin_cmdline[] __initconst = CONFIG_CMDLINE; 75 #else 76 static const char builtin_cmdline[] __initconst = ""; 77 #endif 78 79 /* 80 * mips_io_port_base is the begin of the address space to which x86 style 81 * I/O ports are mapped. 82 */ 83 unsigned long mips_io_port_base = -1; 84 EXPORT_SYMBOL(mips_io_port_base); 85 86 static struct resource code_resource = { .name = "Kernel code", }; 87 static struct resource data_resource = { .name = "Kernel data", }; 88 static struct resource bss_resource = { .name = "Kernel bss", }; 89 90 unsigned long __kaslr_offset __ro_after_init; 91 EXPORT_SYMBOL(__kaslr_offset); 92 93 static void *detect_magic __initdata = detect_memory_region; 94 95 #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET 96 unsigned long ARCH_PFN_OFFSET; 97 EXPORT_SYMBOL(ARCH_PFN_OFFSET); 98 #endif 99 100 void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max) 101 { 102 void *dm = &detect_magic; 103 phys_addr_t size; 104 105 for (size = sz_min; size < sz_max; size <<= 1) { 106 if (!memcmp(dm, dm + size, sizeof(detect_magic))) 107 break; 108 } 109 110 pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n", 111 ((unsigned long long) size) / SZ_1M, 112 (unsigned long long) start, 113 ((unsigned long long) sz_min) / SZ_1M, 114 ((unsigned long long) sz_max) / SZ_1M); 115 116 memblock_add(start, size); 117 } 118 119 /* 120 * Manage initrd 121 */ 122 #ifdef CONFIG_BLK_DEV_INITRD 123 124 static int __init rd_start_early(char *p) 125 { 126 unsigned long start = memparse(p, &p); 127 128 #ifdef CONFIG_64BIT 129 /* Guess if the sign extension was forgotten by bootloader */ 130 if (start < XKPHYS) 131 start = (int)start; 132 #endif 133 initrd_start = start; 134 initrd_end += start; 135 return 0; 136 } 137 early_param("rd_start", rd_start_early); 138 139 static int __init rd_size_early(char *p) 140 { 141 initrd_end += memparse(p, &p); 142 return 0; 143 } 144 early_param("rd_size", rd_size_early); 145 146 /* it returns the next free pfn after initrd */ 147 static unsigned long __init init_initrd(void) 148 { 149 unsigned long end; 150 151 /* 152 * Board specific code or command line parser should have 153 * already set up initrd_start and initrd_end. In these cases 154 * perfom sanity checks and use them if all looks good. 155 */ 156 if (!initrd_start || initrd_end <= initrd_start) 157 goto disable; 158 159 if (initrd_start & ~PAGE_MASK) { 160 pr_err("initrd start must be page aligned\n"); 161 goto disable; 162 } 163 164 /* 165 * Sanitize initrd addresses. For example firmware 166 * can't guess if they need to pass them through 167 * 64-bits values if the kernel has been built in pure 168 * 32-bit. We need also to switch from KSEG0 to XKPHYS 169 * addresses now, so the code can now safely use __pa(). 170 */ 171 end = __pa(initrd_end); 172 initrd_end = (unsigned long)__va(end); 173 initrd_start = (unsigned long)__va(__pa(initrd_start)); 174 175 if (initrd_start < PAGE_OFFSET) { 176 pr_err("initrd start < PAGE_OFFSET\n"); 177 goto disable; 178 } 179 180 ROOT_DEV = Root_RAM0; 181 return PFN_UP(end); 182 disable: 183 initrd_start = 0; 184 initrd_end = 0; 185 return 0; 186 } 187 188 /* In some conditions (e.g. big endian bootloader with a little endian 189 kernel), the initrd might appear byte swapped. Try to detect this and 190 byte swap it if needed. */ 191 static void __init maybe_bswap_initrd(void) 192 { 193 #if defined(CONFIG_CPU_CAVIUM_OCTEON) 194 u64 buf; 195 196 /* Check for CPIO signature */ 197 if (!memcmp((void *)initrd_start, "070701", 6)) 198 return; 199 200 /* Check for compressed initrd */ 201 if (decompress_method((unsigned char *)initrd_start, 8, NULL)) 202 return; 203 204 /* Try again with a byte swapped header */ 205 buf = swab64p((u64 *)initrd_start); 206 if (!memcmp(&buf, "070701", 6) || 207 decompress_method((unsigned char *)(&buf), 8, NULL)) { 208 unsigned long i; 209 210 pr_info("Byteswapped initrd detected\n"); 211 for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8) 212 swab64s((u64 *)i); 213 } 214 #endif 215 } 216 217 static void __init finalize_initrd(void) 218 { 219 unsigned long size = initrd_end - initrd_start; 220 221 if (size == 0) { 222 printk(KERN_INFO "Initrd not found or empty"); 223 goto disable; 224 } 225 if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { 226 printk(KERN_ERR "Initrd extends beyond end of memory"); 227 goto disable; 228 } 229 230 maybe_bswap_initrd(); 231 232 memblock_reserve(__pa(initrd_start), size); 233 initrd_below_start_ok = 1; 234 235 pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n", 236 initrd_start, size); 237 return; 238 disable: 239 printk(KERN_CONT " - disabling initrd\n"); 240 initrd_start = 0; 241 initrd_end = 0; 242 } 243 244 #else /* !CONFIG_BLK_DEV_INITRD */ 245 246 static unsigned long __init init_initrd(void) 247 { 248 return 0; 249 } 250 251 #define finalize_initrd() do {} while (0) 252 253 #endif 254 255 /* 256 * Initialize the bootmem allocator. It also setup initrd related data 257 * if needed. 258 */ 259 #if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON64) && defined(CONFIG_NUMA)) 260 261 static void __init bootmem_init(void) 262 { 263 init_initrd(); 264 finalize_initrd(); 265 } 266 267 #else /* !CONFIG_SGI_IP27 */ 268 269 static void __init bootmem_init(void) 270 { 271 phys_addr_t ramstart, ramend; 272 unsigned long start, end; 273 int i; 274 275 ramstart = memblock_start_of_DRAM(); 276 ramend = memblock_end_of_DRAM(); 277 278 /* 279 * Sanity check any INITRD first. We don't take it into account 280 * for bootmem setup initially, rely on the end-of-kernel-code 281 * as our memory range starting point. Once bootmem is inited we 282 * will reserve the area used for the initrd. 283 */ 284 init_initrd(); 285 286 /* Reserve memory occupied by kernel. */ 287 memblock_reserve(__pa_symbol(&_text), 288 __pa_symbol(&_end) - __pa_symbol(&_text)); 289 290 /* max_low_pfn is not a number of pages but the end pfn of low mem */ 291 292 #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET 293 ARCH_PFN_OFFSET = PFN_UP(ramstart); 294 #else 295 /* 296 * Reserve any memory between the start of RAM and PHYS_OFFSET 297 */ 298 if (ramstart > PHYS_OFFSET) 299 memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET); 300 301 if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) { 302 pr_info("Wasting %lu bytes for tracking %lu unused pages\n", 303 (unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)), 304 (unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET)); 305 } 306 #endif 307 308 min_low_pfn = ARCH_PFN_OFFSET; 309 max_pfn = PFN_DOWN(ramend); 310 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { 311 /* 312 * Skip highmem here so we get an accurate max_low_pfn if low 313 * memory stops short of high memory. 314 * If the region overlaps HIGHMEM_START, end is clipped so 315 * max_pfn excludes the highmem portion. 316 */ 317 if (start >= PFN_DOWN(HIGHMEM_START)) 318 continue; 319 if (end > PFN_DOWN(HIGHMEM_START)) 320 end = PFN_DOWN(HIGHMEM_START); 321 if (end > max_low_pfn) 322 max_low_pfn = end; 323 } 324 325 if (min_low_pfn >= max_low_pfn) 326 panic("Incorrect memory mapping !!!"); 327 328 if (max_pfn > PFN_DOWN(HIGHMEM_START)) { 329 #ifdef CONFIG_HIGHMEM 330 highstart_pfn = PFN_DOWN(HIGHMEM_START); 331 highend_pfn = max_pfn; 332 #else 333 max_low_pfn = PFN_DOWN(HIGHMEM_START); 334 max_pfn = max_low_pfn; 335 #endif 336 } 337 338 /* 339 * Reserve initrd memory if needed. 340 */ 341 finalize_initrd(); 342 } 343 344 #endif /* CONFIG_SGI_IP27 */ 345 346 static int usermem __initdata; 347 348 static int __init early_parse_mem(char *p) 349 { 350 phys_addr_t start, size; 351 352 if (!p) { 353 pr_err("mem parameter is empty, do nothing\n"); 354 return -EINVAL; 355 } 356 357 /* 358 * If a user specifies memory size, we 359 * blow away any automatically generated 360 * size. 361 */ 362 if (usermem == 0) { 363 usermem = 1; 364 memblock_remove(memblock_start_of_DRAM(), 365 memblock_end_of_DRAM() - memblock_start_of_DRAM()); 366 } 367 start = 0; 368 size = memparse(p, &p); 369 if (*p == '@') 370 start = memparse(p + 1, &p); 371 372 if (IS_ENABLED(CONFIG_NUMA)) 373 memblock_add_node(start, size, pa_to_nid(start), MEMBLOCK_NONE); 374 else 375 memblock_add(start, size); 376 377 return 0; 378 } 379 early_param("mem", early_parse_mem); 380 381 static int __init early_parse_memmap(char *p) 382 { 383 char *oldp; 384 u64 start_at, mem_size; 385 386 if (!p) 387 return -EINVAL; 388 389 if (!strncmp(p, "exactmap", 8)) { 390 pr_err("\"memmap=exactmap\" invalid on MIPS\n"); 391 return 0; 392 } 393 394 oldp = p; 395 mem_size = memparse(p, &p); 396 if (p == oldp) 397 return -EINVAL; 398 399 if (*p == '@') { 400 start_at = memparse(p+1, &p); 401 memblock_add(start_at, mem_size); 402 } else if (*p == '#') { 403 pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n"); 404 return -EINVAL; 405 } else if (*p == '$') { 406 start_at = memparse(p+1, &p); 407 memblock_add(start_at, mem_size); 408 memblock_reserve(start_at, mem_size); 409 } else { 410 pr_err("\"memmap\" invalid format!\n"); 411 return -EINVAL; 412 } 413 414 if (*p == '\0') { 415 usermem = 1; 416 return 0; 417 } else 418 return -EINVAL; 419 } 420 early_param("memmap", early_parse_memmap); 421 422 static void __init mips_reserve_vmcore(void) 423 { 424 #ifdef CONFIG_PROC_VMCORE 425 phys_addr_t start, end; 426 u64 i; 427 428 if (!elfcorehdr_size) { 429 for_each_mem_range(i, &start, &end) { 430 if (elfcorehdr_addr >= start && elfcorehdr_addr < end) { 431 /* 432 * Reserve from the elf core header to the end of 433 * the memory segment, that should all be kdump 434 * reserved memory. 435 */ 436 elfcorehdr_size = end - elfcorehdr_addr; 437 break; 438 } 439 } 440 } 441 442 pr_info("Reserving %ldKB of memory at %ldKB for kdump\n", 443 (unsigned long)elfcorehdr_size >> 10, (unsigned long)elfcorehdr_addr >> 10); 444 445 memblock_reserve(elfcorehdr_addr, elfcorehdr_size); 446 #endif 447 } 448 449 #ifdef CONFIG_KEXEC 450 451 /* 64M alignment for crash kernel regions */ 452 #define CRASH_ALIGN SZ_64M 453 #define CRASH_ADDR_MAX SZ_512M 454 455 static void __init mips_parse_crashkernel(void) 456 { 457 unsigned long long total_mem; 458 unsigned long long crash_size, crash_base; 459 int ret; 460 461 total_mem = memblock_phys_mem_size(); 462 ret = parse_crashkernel(boot_command_line, total_mem, 463 &crash_size, &crash_base, 464 NULL, NULL); 465 if (ret != 0 || crash_size <= 0) 466 return; 467 468 if (crash_base <= 0) { 469 crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN, 470 CRASH_ALIGN, 471 CRASH_ADDR_MAX); 472 if (!crash_base) { 473 pr_warn("crashkernel reservation failed - No suitable area found.\n"); 474 return; 475 } 476 } else { 477 unsigned long long start; 478 479 start = memblock_phys_alloc_range(crash_size, 1, 480 crash_base, 481 crash_base + crash_size); 482 if (start != crash_base) { 483 pr_warn("Invalid memory region reserved for crash kernel\n"); 484 return; 485 } 486 } 487 488 crashk_res.start = crash_base; 489 crashk_res.end = crash_base + crash_size - 1; 490 } 491 492 static void __init request_crashkernel(struct resource *res) 493 { 494 int ret; 495 496 if (crashk_res.start == crashk_res.end) 497 return; 498 499 ret = request_resource(res, &crashk_res); 500 if (!ret) 501 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n", 502 (unsigned long)(resource_size(&crashk_res) >> 20), 503 (unsigned long)(crashk_res.start >> 20)); 504 } 505 #else /* !defined(CONFIG_KEXEC) */ 506 static void __init mips_parse_crashkernel(void) 507 { 508 } 509 510 static void __init request_crashkernel(struct resource *res) 511 { 512 } 513 #endif /* !defined(CONFIG_KEXEC) */ 514 515 static void __init check_kernel_sections_mem(void) 516 { 517 phys_addr_t start = __pa_symbol(&_text); 518 phys_addr_t size = __pa_symbol(&_end) - start; 519 520 if (!memblock_is_region_memory(start, size)) { 521 pr_info("Kernel sections are not in the memory maps\n"); 522 memblock_add(start, size); 523 } 524 } 525 526 static void __init bootcmdline_append(const char *s, size_t max) 527 { 528 if (!s[0] || !max) 529 return; 530 531 if (boot_command_line[0]) 532 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); 533 534 strlcat(boot_command_line, s, max); 535 } 536 537 #ifdef CONFIG_OF_EARLY_FLATTREE 538 539 static int __init bootcmdline_scan_chosen(unsigned long node, const char *uname, 540 int depth, void *data) 541 { 542 bool *dt_bootargs = data; 543 const char *p; 544 int l; 545 546 if (depth != 1 || !data || 547 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) 548 return 0; 549 550 p = of_get_flat_dt_prop(node, "bootargs", &l); 551 if (p != NULL && l > 0) { 552 bootcmdline_append(p, min(l, COMMAND_LINE_SIZE)); 553 *dt_bootargs = true; 554 } 555 556 return 1; 557 } 558 559 #endif /* CONFIG_OF_EARLY_FLATTREE */ 560 561 static void __init bootcmdline_init(void) 562 { 563 bool dt_bootargs = false; 564 565 /* 566 * If CMDLINE_OVERRIDE is enabled then initializing the command line is 567 * trivial - we simply use the built-in command line unconditionally & 568 * unmodified. 569 */ 570 if (IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) { 571 strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); 572 return; 573 } 574 575 /* 576 * If the user specified a built-in command line & 577 * MIPS_CMDLINE_BUILTIN_EXTEND, then the built-in command line is 578 * prepended to arguments from the bootloader or DT so we'll copy them 579 * to the start of boot_command_line here. Otherwise, empty 580 * boot_command_line to undo anything early_init_dt_scan_chosen() did. 581 */ 582 if (IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)) 583 strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); 584 else 585 boot_command_line[0] = 0; 586 587 #ifdef CONFIG_OF_EARLY_FLATTREE 588 /* 589 * If we're configured to take boot arguments from DT, look for those 590 * now. 591 */ 592 if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) || 593 IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)) 594 of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs); 595 #endif 596 597 /* 598 * If we didn't get any arguments from DT (regardless of whether that's 599 * because we weren't configured to look for them, or because we looked 600 * & found none) then we'll take arguments from the bootloader. 601 * plat_mem_setup() should have filled arcs_cmdline with arguments from 602 * the bootloader. 603 */ 604 if (IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) || !dt_bootargs) 605 bootcmdline_append(arcs_cmdline, COMMAND_LINE_SIZE); 606 607 /* 608 * If the user specified a built-in command line & we didn't already 609 * prepend it, we append it to boot_command_line here. 610 */ 611 if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && 612 !IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)) 613 bootcmdline_append(builtin_cmdline, COMMAND_LINE_SIZE); 614 } 615 616 /* 617 * arch_mem_init - initialize memory management subsystem 618 * 619 * o plat_mem_setup() detects the memory configuration and will record detected 620 * memory areas using memblock_add. 621 * 622 * At this stage the memory configuration of the system is known to the 623 * kernel but generic memory management system is still entirely uninitialized. 624 * 625 * o bootmem_init() 626 * o sparse_init() 627 * o paging_init() 628 * o dma_contiguous_reserve() 629 * 630 * At this stage the bootmem allocator is ready to use. 631 * 632 * NOTE: historically plat_mem_setup did the entire platform initialization. 633 * This was rather impractical because it meant plat_mem_setup had to 634 * get away without any kind of memory allocator. To keep old code from 635 * breaking plat_setup was just renamed to plat_mem_setup and a second platform 636 * initialization hook for anything else was introduced. 637 */ 638 static void __init arch_mem_init(char **cmdline_p) 639 { 640 /* call board setup routine */ 641 plat_mem_setup(); 642 memblock_set_bottom_up(true); 643 644 bootcmdline_init(); 645 strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE); 646 *cmdline_p = command_line; 647 648 parse_early_param(); 649 650 if (usermem) 651 pr_info("User-defined physical RAM map overwrite\n"); 652 653 check_kernel_sections_mem(); 654 655 early_init_fdt_reserve_self(); 656 early_init_fdt_scan_reserved_mem(); 657 658 #ifndef CONFIG_NUMA 659 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); 660 #endif 661 bootmem_init(); 662 663 /* 664 * Prevent memblock from allocating high memory. 665 * This cannot be done before max_low_pfn is detected, so up 666 * to this point is possible to only reserve physical memory 667 * with memblock_reserve; memblock_alloc* can be used 668 * only after this point 669 */ 670 memblock_set_current_limit(PFN_PHYS(max_low_pfn)); 671 672 mips_reserve_vmcore(); 673 674 mips_parse_crashkernel(); 675 device_tree_init(); 676 677 /* 678 * In order to reduce the possibility of kernel panic when failed to 679 * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate 680 * low memory as small as possible before plat_swiotlb_setup(), so 681 * make sparse_init() using top-down allocation. 682 */ 683 memblock_set_bottom_up(false); 684 sparse_init(); 685 memblock_set_bottom_up(true); 686 687 plat_swiotlb_setup(); 688 689 dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); 690 691 /* Reserve for hibernation. */ 692 memblock_reserve(__pa_symbol(&__nosave_begin), 693 __pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin)); 694 695 early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn)); 696 } 697 698 static void __init resource_init(void) 699 { 700 phys_addr_t start, end; 701 u64 i; 702 703 if (UNCAC_BASE != IO_BASE) 704 return; 705 706 code_resource.start = __pa_symbol(&_text); 707 code_resource.end = __pa_symbol(&_etext) - 1; 708 data_resource.start = __pa_symbol(&_etext); 709 data_resource.end = __pa_symbol(&_edata) - 1; 710 bss_resource.start = __pa_symbol(&__bss_start); 711 bss_resource.end = __pa_symbol(&__bss_stop) - 1; 712 713 for_each_mem_range(i, &start, &end) { 714 struct resource *res; 715 716 res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); 717 if (!res) 718 panic("%s: Failed to allocate %zu bytes\n", __func__, 719 sizeof(struct resource)); 720 721 res->start = start; 722 /* 723 * In memblock, end points to the first byte after the 724 * range while in resourses, end points to the last byte in 725 * the range. 726 */ 727 res->end = end - 1; 728 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 729 res->name = "System RAM"; 730 731 request_resource(&iomem_resource, res); 732 733 /* 734 * We don't know which RAM region contains kernel data, 735 * so we try it repeatedly and let the resource manager 736 * test it. 737 */ 738 request_resource(res, &code_resource); 739 request_resource(res, &data_resource); 740 request_resource(res, &bss_resource); 741 request_crashkernel(res); 742 } 743 } 744 745 #ifdef CONFIG_SMP 746 static void __init prefill_possible_map(void) 747 { 748 int i, possible = num_possible_cpus(); 749 750 if (possible > nr_cpu_ids) 751 possible = nr_cpu_ids; 752 753 for (i = 0; i < possible; i++) 754 set_cpu_possible(i, true); 755 for (; i < NR_CPUS; i++) 756 set_cpu_possible(i, false); 757 758 set_nr_cpu_ids(possible); 759 } 760 #else 761 static inline void prefill_possible_map(void) {} 762 #endif 763 764 static void __init setup_rng_seed(void) 765 { 766 char *rng_seed_hex = fw_getenv("rngseed"); 767 u8 rng_seed[512]; 768 size_t len; 769 770 if (!rng_seed_hex) 771 return; 772 773 len = min(sizeof(rng_seed), strlen(rng_seed_hex) / 2); 774 if (hex2bin(rng_seed, rng_seed_hex, len)) 775 return; 776 777 add_bootloader_randomness(rng_seed, len); 778 memzero_explicit(rng_seed, len); 779 memzero_explicit(rng_seed_hex, len * 2); 780 } 781 782 void __init setup_arch(char **cmdline_p) 783 { 784 cpu_probe(); 785 mips_cm_probe(); 786 prom_init(); 787 788 setup_early_fdc_console(); 789 #ifdef CONFIG_EARLY_PRINTK 790 setup_early_printk(); 791 #endif 792 cpu_report(); 793 if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64)) 794 check_bugs64_early(); 795 796 #if defined(CONFIG_VT) 797 #if defined(CONFIG_VGA_CONSOLE) 798 conswitchp = &vga_con; 799 #endif 800 #endif 801 802 arch_mem_init(cmdline_p); 803 dmi_setup(); 804 805 resource_init(); 806 plat_smp_setup(); 807 prefill_possible_map(); 808 809 cpu_cache_init(); 810 paging_init(); 811 812 memblock_dump_all(); 813 814 setup_rng_seed(); 815 } 816 817 unsigned long kernelsp[NR_CPUS]; 818 unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3; 819 820 #ifdef CONFIG_DEBUG_FS 821 struct dentry *mips_debugfs_dir; 822 static int __init debugfs_mips(void) 823 { 824 mips_debugfs_dir = debugfs_create_dir("mips", NULL); 825 return 0; 826 } 827 arch_initcall(debugfs_mips); 828 #endif 829 830 #ifdef CONFIG_DMA_NONCOHERENT 831 static int __init setcoherentio(char *str) 832 { 833 dma_default_coherent = true; 834 pr_info("Hardware DMA cache coherency (command line)\n"); 835 return 0; 836 } 837 early_param("coherentio", setcoherentio); 838 839 static int __init setnocoherentio(char *str) 840 { 841 dma_default_coherent = false; 842 pr_info("Software DMA cache coherency (command line)\n"); 843 return 0; 844 } 845 early_param("nocoherentio", setnocoherentio); 846 #endif 847 848 void __init arch_cpu_finalize_init(void) 849 { 850 unsigned int cpu = smp_processor_id(); 851 852 cpu_data[cpu].udelay_val = loops_per_jiffy; 853 check_bugs32(); 854 855 if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64)) 856 check_bugs64(); 857 } 858