1 /* 2 * linux/arch/arm/kernel/setup.c 3 * 4 * Copyright (C) 1995-2001 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/stddef.h> 13 #include <linux/ioport.h> 14 #include <linux/delay.h> 15 #include <linux/utsname.h> 16 #include <linux/initrd.h> 17 #include <linux/console.h> 18 #include <linux/bootmem.h> 19 #include <linux/seq_file.h> 20 #include <linux/screen_info.h> 21 #include <linux/init.h> 22 #include <linux/kexec.h> 23 #include <linux/of_fdt.h> 24 #include <linux/crash_dump.h> 25 #include <linux/root_dev.h> 26 #include <linux/cpu.h> 27 #include <linux/interrupt.h> 28 #include <linux/smp.h> 29 #include <linux/fs.h> 30 #include <linux/proc_fs.h> 31 #include <linux/memblock.h> 32 33 #include <asm/unified.h> 34 #include <asm/cpu.h> 35 #include <asm/cputype.h> 36 #include <asm/elf.h> 37 #include <asm/procinfo.h> 38 #include <asm/sections.h> 39 #include <asm/setup.h> 40 #include <asm/smp_plat.h> 41 #include <asm/mach-types.h> 42 #include <asm/cacheflush.h> 43 #include <asm/cachetype.h> 44 #include <asm/tlbflush.h> 45 46 #include <asm/prom.h> 47 #include <asm/mach/arch.h> 48 #include <asm/mach/irq.h> 49 #include <asm/mach/time.h> 50 #include <asm/traps.h> 51 #include <asm/unwind.h> 52 53 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT) 54 #include "compat.h" 55 #endif 56 #include "atags.h" 57 #include "tcm.h" 58 59 #ifndef MEM_SIZE 60 #define MEM_SIZE (16*1024*1024) 61 #endif 62 63 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) 64 char fpe_type[8]; 65 66 static int __init fpe_setup(char *line) 67 { 68 memcpy(fpe_type, line, 8); 69 return 1; 70 } 71 72 __setup("fpe=", fpe_setup); 73 #endif 74 75 extern void paging_init(struct machine_desc *desc); 76 extern void sanity_check_meminfo(void); 77 extern void reboot_setup(char *str); 78 79 unsigned int processor_id; 80 EXPORT_SYMBOL(processor_id); 81 unsigned int __machine_arch_type __read_mostly; 82 EXPORT_SYMBOL(__machine_arch_type); 83 unsigned int cacheid __read_mostly; 84 EXPORT_SYMBOL(cacheid); 85 86 unsigned int __atags_pointer __initdata; 87 88 unsigned int system_rev; 89 EXPORT_SYMBOL(system_rev); 90 91 unsigned int system_serial_low; 92 EXPORT_SYMBOL(system_serial_low); 93 94 unsigned int system_serial_high; 95 EXPORT_SYMBOL(system_serial_high); 96 97 unsigned int elf_hwcap __read_mostly; 98 EXPORT_SYMBOL(elf_hwcap); 99 100 101 #ifdef MULTI_CPU 102 struct processor processor __read_mostly; 103 #endif 104 #ifdef MULTI_TLB 105 struct cpu_tlb_fns cpu_tlb __read_mostly; 106 #endif 107 #ifdef MULTI_USER 108 struct cpu_user_fns cpu_user __read_mostly; 109 #endif 110 #ifdef MULTI_CACHE 111 struct cpu_cache_fns cpu_cache __read_mostly; 112 #endif 113 #ifdef CONFIG_OUTER_CACHE 114 struct outer_cache_fns outer_cache __read_mostly; 115 EXPORT_SYMBOL(outer_cache); 116 #endif 117 118 struct stack { 119 u32 irq[3]; 120 u32 abt[3]; 121 u32 und[3]; 122 } ____cacheline_aligned; 123 124 static struct stack stacks[NR_CPUS]; 125 126 char elf_platform[ELF_PLATFORM_SIZE]; 127 EXPORT_SYMBOL(elf_platform); 128 129 static const char *cpu_name; 130 static const char *machine_name; 131 static char __initdata cmd_line[COMMAND_LINE_SIZE]; 132 struct machine_desc *machine_desc __initdata; 133 134 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; 135 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; 136 #define ENDIANNESS ((char)endian_test.l) 137 138 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data); 139 140 /* 141 * Standard memory resources 142 */ 143 static struct resource mem_res[] = { 144 { 145 .name = "Video RAM", 146 .start = 0, 147 .end = 0, 148 .flags = IORESOURCE_MEM 149 }, 150 { 151 .name = "Kernel text", 152 .start = 0, 153 .end = 0, 154 .flags = IORESOURCE_MEM 155 }, 156 { 157 .name = "Kernel data", 158 .start = 0, 159 .end = 0, 160 .flags = IORESOURCE_MEM 161 } 162 }; 163 164 #define video_ram mem_res[0] 165 #define kernel_code mem_res[1] 166 #define kernel_data mem_res[2] 167 168 static struct resource io_res[] = { 169 { 170 .name = "reserved", 171 .start = 0x3bc, 172 .end = 0x3be, 173 .flags = IORESOURCE_IO | IORESOURCE_BUSY 174 }, 175 { 176 .name = "reserved", 177 .start = 0x378, 178 .end = 0x37f, 179 .flags = IORESOURCE_IO | IORESOURCE_BUSY 180 }, 181 { 182 .name = "reserved", 183 .start = 0x278, 184 .end = 0x27f, 185 .flags = IORESOURCE_IO | IORESOURCE_BUSY 186 } 187 }; 188 189 #define lp0 io_res[0] 190 #define lp1 io_res[1] 191 #define lp2 io_res[2] 192 193 static const char *proc_arch[] = { 194 "undefined/unknown", 195 "3", 196 "4", 197 "4T", 198 "5", 199 "5T", 200 "5TE", 201 "5TEJ", 202 "6TEJ", 203 "7", 204 "?(11)", 205 "?(12)", 206 "?(13)", 207 "?(14)", 208 "?(15)", 209 "?(16)", 210 "?(17)", 211 }; 212 213 int cpu_architecture(void) 214 { 215 int cpu_arch; 216 217 if ((read_cpuid_id() & 0x0008f000) == 0) { 218 cpu_arch = CPU_ARCH_UNKNOWN; 219 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) { 220 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3; 221 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) { 222 cpu_arch = (read_cpuid_id() >> 16) & 7; 223 if (cpu_arch) 224 cpu_arch += CPU_ARCH_ARMv3; 225 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { 226 unsigned int mmfr0; 227 228 /* Revised CPUID format. Read the Memory Model Feature 229 * Register 0 and check for VMSAv7 or PMSAv7 */ 230 asm("mrc p15, 0, %0, c0, c1, 4" 231 : "=r" (mmfr0)); 232 if ((mmfr0 & 0x0000000f) >= 0x00000003 || 233 (mmfr0 & 0x000000f0) >= 0x00000030) 234 cpu_arch = CPU_ARCH_ARMv7; 235 else if ((mmfr0 & 0x0000000f) == 0x00000002 || 236 (mmfr0 & 0x000000f0) == 0x00000020) 237 cpu_arch = CPU_ARCH_ARMv6; 238 else 239 cpu_arch = CPU_ARCH_UNKNOWN; 240 } else 241 cpu_arch = CPU_ARCH_UNKNOWN; 242 243 return cpu_arch; 244 } 245 246 static int cpu_has_aliasing_icache(unsigned int arch) 247 { 248 int aliasing_icache; 249 unsigned int id_reg, num_sets, line_size; 250 251 /* arch specifies the register format */ 252 switch (arch) { 253 case CPU_ARCH_ARMv7: 254 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR" 255 : /* No output operands */ 256 : "r" (1)); 257 isb(); 258 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR" 259 : "=r" (id_reg)); 260 line_size = 4 << ((id_reg & 0x7) + 2); 261 num_sets = ((id_reg >> 13) & 0x7fff) + 1; 262 aliasing_icache = (line_size * num_sets) > PAGE_SIZE; 263 break; 264 case CPU_ARCH_ARMv6: 265 aliasing_icache = read_cpuid_cachetype() & (1 << 11); 266 break; 267 default: 268 /* I-cache aliases will be handled by D-cache aliasing code */ 269 aliasing_icache = 0; 270 } 271 272 return aliasing_icache; 273 } 274 275 static void __init cacheid_init(void) 276 { 277 unsigned int cachetype = read_cpuid_cachetype(); 278 unsigned int arch = cpu_architecture(); 279 280 if (arch >= CPU_ARCH_ARMv6) { 281 if ((cachetype & (7 << 29)) == 4 << 29) { 282 /* ARMv7 register format */ 283 cacheid = CACHEID_VIPT_NONALIASING; 284 if ((cachetype & (3 << 14)) == 1 << 14) 285 cacheid |= CACHEID_ASID_TAGGED; 286 else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7)) 287 cacheid |= CACHEID_VIPT_I_ALIASING; 288 } else if (cachetype & (1 << 23)) { 289 cacheid = CACHEID_VIPT_ALIASING; 290 } else { 291 cacheid = CACHEID_VIPT_NONALIASING; 292 if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6)) 293 cacheid |= CACHEID_VIPT_I_ALIASING; 294 } 295 } else { 296 cacheid = CACHEID_VIVT; 297 } 298 299 printk("CPU: %s data cache, %s instruction cache\n", 300 cache_is_vivt() ? "VIVT" : 301 cache_is_vipt_aliasing() ? "VIPT aliasing" : 302 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown", 303 cache_is_vivt() ? "VIVT" : 304 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" : 305 icache_is_vipt_aliasing() ? "VIPT aliasing" : 306 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown"); 307 } 308 309 /* 310 * These functions re-use the assembly code in head.S, which 311 * already provide the required functionality. 312 */ 313 extern struct proc_info_list *lookup_processor_type(unsigned int); 314 315 void __init early_print(const char *str, ...) 316 { 317 extern void printascii(const char *); 318 char buf[256]; 319 va_list ap; 320 321 va_start(ap, str); 322 vsnprintf(buf, sizeof(buf), str, ap); 323 va_end(ap); 324 325 #ifdef CONFIG_DEBUG_LL 326 printascii(buf); 327 #endif 328 printk("%s", buf); 329 } 330 331 static void __init feat_v6_fixup(void) 332 { 333 int id = read_cpuid_id(); 334 335 if ((id & 0xff0f0000) != 0x41070000) 336 return; 337 338 /* 339 * HWCAP_TLS is available only on 1136 r1p0 and later, 340 * see also kuser_get_tls_init. 341 */ 342 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0)) 343 elf_hwcap &= ~HWCAP_TLS; 344 } 345 346 /* 347 * cpu_init - initialise one CPU. 348 * 349 * cpu_init sets up the per-CPU stacks. 350 */ 351 void cpu_init(void) 352 { 353 unsigned int cpu = smp_processor_id(); 354 struct stack *stk = &stacks[cpu]; 355 356 if (cpu >= NR_CPUS) { 357 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu); 358 BUG(); 359 } 360 361 cpu_proc_init(); 362 363 /* 364 * Define the placement constraint for the inline asm directive below. 365 * In Thumb-2, msr with an immediate value is not allowed. 366 */ 367 #ifdef CONFIG_THUMB2_KERNEL 368 #define PLC "r" 369 #else 370 #define PLC "I" 371 #endif 372 373 /* 374 * setup stacks for re-entrant exception handlers 375 */ 376 __asm__ ( 377 "msr cpsr_c, %1\n\t" 378 "add r14, %0, %2\n\t" 379 "mov sp, r14\n\t" 380 "msr cpsr_c, %3\n\t" 381 "add r14, %0, %4\n\t" 382 "mov sp, r14\n\t" 383 "msr cpsr_c, %5\n\t" 384 "add r14, %0, %6\n\t" 385 "mov sp, r14\n\t" 386 "msr cpsr_c, %7" 387 : 388 : "r" (stk), 389 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), 390 "I" (offsetof(struct stack, irq[0])), 391 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE), 392 "I" (offsetof(struct stack, abt[0])), 393 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE), 394 "I" (offsetof(struct stack, und[0])), 395 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) 396 : "r14"); 397 } 398 399 static void __init setup_processor(void) 400 { 401 struct proc_info_list *list; 402 403 /* 404 * locate processor in the list of supported processor 405 * types. The linker builds this table for us from the 406 * entries in arch/arm/mm/proc-*.S 407 */ 408 list = lookup_processor_type(read_cpuid_id()); 409 if (!list) { 410 printk("CPU configuration botched (ID %08x), unable " 411 "to continue.\n", read_cpuid_id()); 412 while (1); 413 } 414 415 cpu_name = list->cpu_name; 416 417 #ifdef MULTI_CPU 418 processor = *list->proc; 419 #endif 420 #ifdef MULTI_TLB 421 cpu_tlb = *list->tlb; 422 #endif 423 #ifdef MULTI_USER 424 cpu_user = *list->user; 425 #endif 426 #ifdef MULTI_CACHE 427 cpu_cache = *list->cache; 428 #endif 429 430 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", 431 cpu_name, read_cpuid_id(), read_cpuid_id() & 15, 432 proc_arch[cpu_architecture()], cr_alignment); 433 434 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS); 435 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); 436 elf_hwcap = list->elf_hwcap; 437 #ifndef CONFIG_ARM_THUMB 438 elf_hwcap &= ~HWCAP_THUMB; 439 #endif 440 441 feat_v6_fixup(); 442 443 cacheid_init(); 444 cpu_init(); 445 } 446 447 void __init dump_machine_table(void) 448 { 449 struct machine_desc *p; 450 451 early_print("Available machine support:\n\nID (hex)\tNAME\n"); 452 for_each_machine_desc(p) 453 early_print("%08x\t%s\n", p->nr, p->name); 454 455 early_print("\nPlease check your kernel config and/or bootloader.\n"); 456 457 while (true) 458 /* can't use cpu_relax() here as it may require MMU setup */; 459 } 460 461 int __init arm_add_memory(phys_addr_t start, unsigned long size) 462 { 463 struct membank *bank = &meminfo.bank[meminfo.nr_banks]; 464 465 if (meminfo.nr_banks >= NR_BANKS) { 466 printk(KERN_CRIT "NR_BANKS too low, " 467 "ignoring memory at 0x%08llx\n", (long long)start); 468 return -EINVAL; 469 } 470 471 /* 472 * Ensure that start/size are aligned to a page boundary. 473 * Size is appropriately rounded down, start is rounded up. 474 */ 475 size -= start & ~PAGE_MASK; 476 bank->start = PAGE_ALIGN(start); 477 bank->size = size & PAGE_MASK; 478 479 /* 480 * Check whether this memory region has non-zero size or 481 * invalid node number. 482 */ 483 if (bank->size == 0) 484 return -EINVAL; 485 486 meminfo.nr_banks++; 487 return 0; 488 } 489 490 /* 491 * Pick out the memory size. We look for mem=size@start, 492 * where start and size are "size[KkMm]" 493 */ 494 static int __init early_mem(char *p) 495 { 496 static int usermem __initdata = 0; 497 unsigned long size; 498 phys_addr_t start; 499 char *endp; 500 501 /* 502 * If the user specifies memory size, we 503 * blow away any automatically generated 504 * size. 505 */ 506 if (usermem == 0) { 507 usermem = 1; 508 meminfo.nr_banks = 0; 509 } 510 511 start = PHYS_OFFSET; 512 size = memparse(p, &endp); 513 if (*endp == '@') 514 start = memparse(endp + 1, NULL); 515 516 arm_add_memory(start, size); 517 518 return 0; 519 } 520 early_param("mem", early_mem); 521 522 static void __init 523 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz) 524 { 525 #ifdef CONFIG_BLK_DEV_RAM 526 extern int rd_size, rd_image_start, rd_prompt, rd_doload; 527 528 rd_image_start = image_start; 529 rd_prompt = prompt; 530 rd_doload = doload; 531 532 if (rd_sz) 533 rd_size = rd_sz; 534 #endif 535 } 536 537 static void __init request_standard_resources(struct machine_desc *mdesc) 538 { 539 struct memblock_region *region; 540 struct resource *res; 541 542 kernel_code.start = virt_to_phys(_text); 543 kernel_code.end = virt_to_phys(_etext - 1); 544 kernel_data.start = virt_to_phys(_sdata); 545 kernel_data.end = virt_to_phys(_end - 1); 546 547 for_each_memblock(memory, region) { 548 res = alloc_bootmem_low(sizeof(*res)); 549 res->name = "System RAM"; 550 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); 551 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; 552 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 553 554 request_resource(&iomem_resource, res); 555 556 if (kernel_code.start >= res->start && 557 kernel_code.end <= res->end) 558 request_resource(res, &kernel_code); 559 if (kernel_data.start >= res->start && 560 kernel_data.end <= res->end) 561 request_resource(res, &kernel_data); 562 } 563 564 if (mdesc->video_start) { 565 video_ram.start = mdesc->video_start; 566 video_ram.end = mdesc->video_end; 567 request_resource(&iomem_resource, &video_ram); 568 } 569 570 /* 571 * Some machines don't have the possibility of ever 572 * possessing lp0, lp1 or lp2 573 */ 574 if (mdesc->reserve_lp0) 575 request_resource(&ioport_resource, &lp0); 576 if (mdesc->reserve_lp1) 577 request_resource(&ioport_resource, &lp1); 578 if (mdesc->reserve_lp2) 579 request_resource(&ioport_resource, &lp2); 580 } 581 582 /* 583 * Tag parsing. 584 * 585 * This is the new way of passing data to the kernel at boot time. Rather 586 * than passing a fixed inflexible structure to the kernel, we pass a list 587 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE 588 * tag for the list to be recognised (to distinguish the tagged list from 589 * a param_struct). The list is terminated with a zero-length tag (this tag 590 * is not parsed in any way). 591 */ 592 static int __init parse_tag_core(const struct tag *tag) 593 { 594 if (tag->hdr.size > 2) { 595 if ((tag->u.core.flags & 1) == 0) 596 root_mountflags &= ~MS_RDONLY; 597 ROOT_DEV = old_decode_dev(tag->u.core.rootdev); 598 } 599 return 0; 600 } 601 602 __tagtable(ATAG_CORE, parse_tag_core); 603 604 static int __init parse_tag_mem32(const struct tag *tag) 605 { 606 return arm_add_memory(tag->u.mem.start, tag->u.mem.size); 607 } 608 609 __tagtable(ATAG_MEM, parse_tag_mem32); 610 611 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) 612 struct screen_info screen_info = { 613 .orig_video_lines = 30, 614 .orig_video_cols = 80, 615 .orig_video_mode = 0, 616 .orig_video_ega_bx = 0, 617 .orig_video_isVGA = 1, 618 .orig_video_points = 8 619 }; 620 621 static int __init parse_tag_videotext(const struct tag *tag) 622 { 623 screen_info.orig_x = tag->u.videotext.x; 624 screen_info.orig_y = tag->u.videotext.y; 625 screen_info.orig_video_page = tag->u.videotext.video_page; 626 screen_info.orig_video_mode = tag->u.videotext.video_mode; 627 screen_info.orig_video_cols = tag->u.videotext.video_cols; 628 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx; 629 screen_info.orig_video_lines = tag->u.videotext.video_lines; 630 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga; 631 screen_info.orig_video_points = tag->u.videotext.video_points; 632 return 0; 633 } 634 635 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext); 636 #endif 637 638 static int __init parse_tag_ramdisk(const struct tag *tag) 639 { 640 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0, 641 (tag->u.ramdisk.flags & 2) == 0, 642 tag->u.ramdisk.start, tag->u.ramdisk.size); 643 return 0; 644 } 645 646 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk); 647 648 static int __init parse_tag_serialnr(const struct tag *tag) 649 { 650 system_serial_low = tag->u.serialnr.low; 651 system_serial_high = tag->u.serialnr.high; 652 return 0; 653 } 654 655 __tagtable(ATAG_SERIAL, parse_tag_serialnr); 656 657 static int __init parse_tag_revision(const struct tag *tag) 658 { 659 system_rev = tag->u.revision.rev; 660 return 0; 661 } 662 663 __tagtable(ATAG_REVISION, parse_tag_revision); 664 665 static int __init parse_tag_cmdline(const struct tag *tag) 666 { 667 #if defined(CONFIG_CMDLINE_EXTEND) 668 strlcat(default_command_line, " ", COMMAND_LINE_SIZE); 669 strlcat(default_command_line, tag->u.cmdline.cmdline, 670 COMMAND_LINE_SIZE); 671 #elif defined(CONFIG_CMDLINE_FORCE) 672 pr_warning("Ignoring tag cmdline (using the default kernel command line)\n"); 673 #else 674 strlcpy(default_command_line, tag->u.cmdline.cmdline, 675 COMMAND_LINE_SIZE); 676 #endif 677 return 0; 678 } 679 680 __tagtable(ATAG_CMDLINE, parse_tag_cmdline); 681 682 /* 683 * Scan the tag table for this tag, and call its parse function. 684 * The tag table is built by the linker from all the __tagtable 685 * declarations. 686 */ 687 static int __init parse_tag(const struct tag *tag) 688 { 689 extern struct tagtable __tagtable_begin, __tagtable_end; 690 struct tagtable *t; 691 692 for (t = &__tagtable_begin; t < &__tagtable_end; t++) 693 if (tag->hdr.tag == t->tag) { 694 t->parse(tag); 695 break; 696 } 697 698 return t < &__tagtable_end; 699 } 700 701 /* 702 * Parse all tags in the list, checking both the global and architecture 703 * specific tag tables. 704 */ 705 static void __init parse_tags(const struct tag *t) 706 { 707 for (; t->hdr.size; t = tag_next(t)) 708 if (!parse_tag(t)) 709 printk(KERN_WARNING 710 "Ignoring unrecognised tag 0x%08x\n", 711 t->hdr.tag); 712 } 713 714 /* 715 * This holds our defaults. 716 */ 717 static struct init_tags { 718 struct tag_header hdr1; 719 struct tag_core core; 720 struct tag_header hdr2; 721 struct tag_mem32 mem; 722 struct tag_header hdr3; 723 } init_tags __initdata = { 724 { tag_size(tag_core), ATAG_CORE }, 725 { 1, PAGE_SIZE, 0xff }, 726 { tag_size(tag_mem32), ATAG_MEM }, 727 { MEM_SIZE }, 728 { 0, ATAG_NONE } 729 }; 730 731 static int __init customize_machine(void) 732 { 733 /* customizes platform devices, or adds new ones */ 734 if (machine_desc->init_machine) 735 machine_desc->init_machine(); 736 return 0; 737 } 738 arch_initcall(customize_machine); 739 740 #ifdef CONFIG_KEXEC 741 static inline unsigned long long get_total_mem(void) 742 { 743 unsigned long total; 744 745 total = max_low_pfn - min_low_pfn; 746 return total << PAGE_SHIFT; 747 } 748 749 /** 750 * reserve_crashkernel() - reserves memory are for crash kernel 751 * 752 * This function reserves memory area given in "crashkernel=" kernel command 753 * line parameter. The memory reserved is used by a dump capture kernel when 754 * primary kernel is crashing. 755 */ 756 static void __init reserve_crashkernel(void) 757 { 758 unsigned long long crash_size, crash_base; 759 unsigned long long total_mem; 760 int ret; 761 762 total_mem = get_total_mem(); 763 ret = parse_crashkernel(boot_command_line, total_mem, 764 &crash_size, &crash_base); 765 if (ret) 766 return; 767 768 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE); 769 if (ret < 0) { 770 printk(KERN_WARNING "crashkernel reservation failed - " 771 "memory is in use (0x%lx)\n", (unsigned long)crash_base); 772 return; 773 } 774 775 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " 776 "for crashkernel (System RAM: %ldMB)\n", 777 (unsigned long)(crash_size >> 20), 778 (unsigned long)(crash_base >> 20), 779 (unsigned long)(total_mem >> 20)); 780 781 crashk_res.start = crash_base; 782 crashk_res.end = crash_base + crash_size - 1; 783 insert_resource(&iomem_resource, &crashk_res); 784 } 785 #else 786 static inline void reserve_crashkernel(void) {} 787 #endif /* CONFIG_KEXEC */ 788 789 static void __init squash_mem_tags(struct tag *tag) 790 { 791 for (; tag->hdr.size; tag = tag_next(tag)) 792 if (tag->hdr.tag == ATAG_MEM) 793 tag->hdr.tag = ATAG_NONE; 794 } 795 796 static struct machine_desc * __init setup_machine_tags(unsigned int nr) 797 { 798 struct tag *tags = (struct tag *)&init_tags; 799 struct machine_desc *mdesc = NULL, *p; 800 char *from = default_command_line; 801 802 init_tags.mem.start = PHYS_OFFSET; 803 804 /* 805 * locate machine in the list of supported machines. 806 */ 807 for_each_machine_desc(p) 808 if (nr == p->nr) { 809 printk("Machine: %s\n", p->name); 810 mdesc = p; 811 break; 812 } 813 814 if (!mdesc) { 815 early_print("\nError: unrecognized/unsupported machine ID" 816 " (r1 = 0x%08x).\n\n", nr); 817 dump_machine_table(); /* does not return */ 818 } 819 820 if (__atags_pointer) 821 tags = phys_to_virt(__atags_pointer); 822 else if (mdesc->boot_params) { 823 #ifdef CONFIG_MMU 824 /* 825 * We still are executing with a minimal MMU mapping created 826 * with the presumption that the machine default for this 827 * is located in the first MB of RAM. Anything else will 828 * fault and silently hang the kernel at this point. 829 */ 830 if (mdesc->boot_params < PHYS_OFFSET || 831 mdesc->boot_params >= PHYS_OFFSET + SZ_1M) { 832 printk(KERN_WARNING 833 "Default boot params at physical 0x%08lx out of reach\n", 834 mdesc->boot_params); 835 } else 836 #endif 837 { 838 tags = phys_to_virt(mdesc->boot_params); 839 } 840 } 841 842 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT) 843 /* 844 * If we have the old style parameters, convert them to 845 * a tag list. 846 */ 847 if (tags->hdr.tag != ATAG_CORE) 848 convert_to_tag_list(tags); 849 #endif 850 851 if (tags->hdr.tag != ATAG_CORE) { 852 #if defined(CONFIG_OF) 853 /* 854 * If CONFIG_OF is set, then assume this is a reasonably 855 * modern system that should pass boot parameters 856 */ 857 early_print("Warning: Neither atags nor dtb found\n"); 858 #endif 859 tags = (struct tag *)&init_tags; 860 } 861 862 if (mdesc->fixup) 863 mdesc->fixup(mdesc, tags, &from, &meminfo); 864 865 if (tags->hdr.tag == ATAG_CORE) { 866 if (meminfo.nr_banks != 0) 867 squash_mem_tags(tags); 868 save_atags(tags); 869 parse_tags(tags); 870 } 871 872 /* parse_early_param needs a boot_command_line */ 873 strlcpy(boot_command_line, from, COMMAND_LINE_SIZE); 874 875 return mdesc; 876 } 877 878 879 void __init setup_arch(char **cmdline_p) 880 { 881 struct machine_desc *mdesc; 882 883 unwind_init(); 884 885 setup_processor(); 886 mdesc = setup_machine_fdt(__atags_pointer); 887 if (!mdesc) 888 mdesc = setup_machine_tags(machine_arch_type); 889 machine_desc = mdesc; 890 machine_name = mdesc->name; 891 892 if (mdesc->soft_reboot) 893 reboot_setup("s"); 894 895 init_mm.start_code = (unsigned long) _text; 896 init_mm.end_code = (unsigned long) _etext; 897 init_mm.end_data = (unsigned long) _edata; 898 init_mm.brk = (unsigned long) _end; 899 900 /* populate cmd_line too for later use, preserving boot_command_line */ 901 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); 902 *cmdline_p = cmd_line; 903 904 parse_early_param(); 905 906 sanity_check_meminfo(); 907 arm_memblock_init(&meminfo, mdesc); 908 909 paging_init(mdesc); 910 request_standard_resources(mdesc); 911 912 unflatten_device_tree(); 913 914 #ifdef CONFIG_SMP 915 if (is_smp()) 916 smp_init_cpus(); 917 #endif 918 reserve_crashkernel(); 919 920 tcm_init(); 921 922 #ifdef CONFIG_ZONE_DMA 923 if (mdesc->dma_zone_size) { 924 extern unsigned long arm_dma_zone_size; 925 arm_dma_zone_size = mdesc->dma_zone_size; 926 } 927 #endif 928 #ifdef CONFIG_MULTI_IRQ_HANDLER 929 handle_arch_irq = mdesc->handle_irq; 930 #endif 931 932 #ifdef CONFIG_VT 933 #if defined(CONFIG_VGA_CONSOLE) 934 conswitchp = &vga_con; 935 #elif defined(CONFIG_DUMMY_CONSOLE) 936 conswitchp = &dummy_con; 937 #endif 938 #endif 939 early_trap_init(); 940 941 if (mdesc->init_early) 942 mdesc->init_early(); 943 } 944 945 946 static int __init topology_init(void) 947 { 948 int cpu; 949 950 for_each_possible_cpu(cpu) { 951 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu); 952 cpuinfo->cpu.hotpluggable = 1; 953 register_cpu(&cpuinfo->cpu, cpu); 954 } 955 956 return 0; 957 } 958 subsys_initcall(topology_init); 959 960 #ifdef CONFIG_HAVE_PROC_CPU 961 static int __init proc_cpu_init(void) 962 { 963 struct proc_dir_entry *res; 964 965 res = proc_mkdir("cpu", NULL); 966 if (!res) 967 return -ENOMEM; 968 return 0; 969 } 970 fs_initcall(proc_cpu_init); 971 #endif 972 973 static const char *hwcap_str[] = { 974 "swp", 975 "half", 976 "thumb", 977 "26bit", 978 "fastmult", 979 "fpa", 980 "vfp", 981 "edsp", 982 "java", 983 "iwmmxt", 984 "crunch", 985 "thumbee", 986 "neon", 987 "vfpv3", 988 "vfpv3d16", 989 "tls", 990 "vfpv4", 991 "idiva", 992 "idivt", 993 NULL 994 }; 995 996 static int c_show(struct seq_file *m, void *v) 997 { 998 int i; 999 1000 seq_printf(m, "Processor\t: %s rev %d (%s)\n", 1001 cpu_name, read_cpuid_id() & 15, elf_platform); 1002 1003 #if defined(CONFIG_SMP) 1004 for_each_online_cpu(i) { 1005 /* 1006 * glibc reads /proc/cpuinfo to determine the number of 1007 * online processors, looking for lines beginning with 1008 * "processor". Give glibc what it expects. 1009 */ 1010 seq_printf(m, "processor\t: %d\n", i); 1011 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n", 1012 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), 1013 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); 1014 } 1015 #else /* CONFIG_SMP */ 1016 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", 1017 loops_per_jiffy / (500000/HZ), 1018 (loops_per_jiffy / (5000/HZ)) % 100); 1019 #endif 1020 1021 /* dump out the processor features */ 1022 seq_puts(m, "Features\t: "); 1023 1024 for (i = 0; hwcap_str[i]; i++) 1025 if (elf_hwcap & (1 << i)) 1026 seq_printf(m, "%s ", hwcap_str[i]); 1027 1028 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); 1029 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]); 1030 1031 if ((read_cpuid_id() & 0x0008f000) == 0x00000000) { 1032 /* pre-ARM7 */ 1033 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4); 1034 } else { 1035 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) { 1036 /* ARM7 */ 1037 seq_printf(m, "CPU variant\t: 0x%02x\n", 1038 (read_cpuid_id() >> 16) & 127); 1039 } else { 1040 /* post-ARM7 */ 1041 seq_printf(m, "CPU variant\t: 0x%x\n", 1042 (read_cpuid_id() >> 20) & 15); 1043 } 1044 seq_printf(m, "CPU part\t: 0x%03x\n", 1045 (read_cpuid_id() >> 4) & 0xfff); 1046 } 1047 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); 1048 1049 seq_puts(m, "\n"); 1050 1051 seq_printf(m, "Hardware\t: %s\n", machine_name); 1052 seq_printf(m, "Revision\t: %04x\n", system_rev); 1053 seq_printf(m, "Serial\t\t: %08x%08x\n", 1054 system_serial_high, system_serial_low); 1055 1056 return 0; 1057 } 1058 1059 static void *c_start(struct seq_file *m, loff_t *pos) 1060 { 1061 return *pos < 1 ? (void *)1 : NULL; 1062 } 1063 1064 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 1065 { 1066 ++*pos; 1067 return NULL; 1068 } 1069 1070 static void c_stop(struct seq_file *m, void *v) 1071 { 1072 } 1073 1074 const struct seq_operations cpuinfo_op = { 1075 .start = c_start, 1076 .next = c_next, 1077 .stop = c_stop, 1078 .show = c_show 1079 }; 1080