1 /* 2 * linux/arch/arm/kernel/setup.c 3 * 4 * Copyright (C) 1995-2001 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/stddef.h> 13 #include <linux/ioport.h> 14 #include <linux/delay.h> 15 #include <linux/utsname.h> 16 #include <linux/initrd.h> 17 #include <linux/console.h> 18 #include <linux/bootmem.h> 19 #include <linux/seq_file.h> 20 #include <linux/screen_info.h> 21 #include <linux/init.h> 22 #include <linux/root_dev.h> 23 #include <linux/cpu.h> 24 #include <linux/interrupt.h> 25 #include <linux/smp.h> 26 #include <linux/fs.h> 27 28 #include <asm/cpu.h> 29 #include <asm/cputype.h> 30 #include <asm/elf.h> 31 #include <asm/procinfo.h> 32 #include <asm/sections.h> 33 #include <asm/setup.h> 34 #include <asm/mach-types.h> 35 #include <asm/cacheflush.h> 36 #include <asm/cachetype.h> 37 #include <asm/tlbflush.h> 38 39 #include <asm/mach/arch.h> 40 #include <asm/mach/irq.h> 41 #include <asm/mach/time.h> 42 #include <asm/traps.h> 43 44 #include "compat.h" 45 #include "atags.h" 46 47 #ifndef MEM_SIZE 48 #define MEM_SIZE (16*1024*1024) 49 #endif 50 51 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) 52 char fpe_type[8]; 53 54 static int __init fpe_setup(char *line) 55 { 56 memcpy(fpe_type, line, 8); 57 return 1; 58 } 59 60 __setup("fpe=", fpe_setup); 61 #endif 62 63 extern void paging_init(struct machine_desc *desc); 64 extern void reboot_setup(char *str); 65 66 unsigned int processor_id; 67 EXPORT_SYMBOL(processor_id); 68 unsigned int __machine_arch_type; 69 EXPORT_SYMBOL(__machine_arch_type); 70 unsigned int cacheid; 71 EXPORT_SYMBOL(cacheid); 72 73 unsigned int __atags_pointer __initdata; 74 75 unsigned int system_rev; 76 EXPORT_SYMBOL(system_rev); 77 78 unsigned int system_serial_low; 79 EXPORT_SYMBOL(system_serial_low); 80 81 unsigned int system_serial_high; 82 EXPORT_SYMBOL(system_serial_high); 83 84 unsigned int elf_hwcap; 85 EXPORT_SYMBOL(elf_hwcap); 86 87 88 #ifdef MULTI_CPU 89 struct processor processor; 90 #endif 91 #ifdef MULTI_TLB 92 struct cpu_tlb_fns cpu_tlb; 93 #endif 94 #ifdef MULTI_USER 95 struct cpu_user_fns cpu_user; 96 #endif 97 #ifdef MULTI_CACHE 98 struct cpu_cache_fns cpu_cache; 99 #endif 100 #ifdef CONFIG_OUTER_CACHE 101 struct outer_cache_fns outer_cache; 102 #endif 103 104 struct stack { 105 u32 irq[3]; 106 u32 abt[3]; 107 u32 und[3]; 108 } ____cacheline_aligned; 109 110 static struct stack stacks[NR_CPUS]; 111 112 char elf_platform[ELF_PLATFORM_SIZE]; 113 EXPORT_SYMBOL(elf_platform); 114 115 static const char *cpu_name; 116 static const char *machine_name; 117 static char __initdata command_line[COMMAND_LINE_SIZE]; 118 119 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; 120 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; 121 #define ENDIANNESS ((char)endian_test.l) 122 123 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data); 124 125 /* 126 * Standard memory resources 127 */ 128 static struct resource mem_res[] = { 129 { 130 .name = "Video RAM", 131 .start = 0, 132 .end = 0, 133 .flags = IORESOURCE_MEM 134 }, 135 { 136 .name = "Kernel text", 137 .start = 0, 138 .end = 0, 139 .flags = IORESOURCE_MEM 140 }, 141 { 142 .name = "Kernel data", 143 .start = 0, 144 .end = 0, 145 .flags = IORESOURCE_MEM 146 } 147 }; 148 149 #define video_ram mem_res[0] 150 #define kernel_code mem_res[1] 151 #define kernel_data mem_res[2] 152 153 static struct resource io_res[] = { 154 { 155 .name = "reserved", 156 .start = 0x3bc, 157 .end = 0x3be, 158 .flags = IORESOURCE_IO | IORESOURCE_BUSY 159 }, 160 { 161 .name = "reserved", 162 .start = 0x378, 163 .end = 0x37f, 164 .flags = IORESOURCE_IO | IORESOURCE_BUSY 165 }, 166 { 167 .name = "reserved", 168 .start = 0x278, 169 .end = 0x27f, 170 .flags = IORESOURCE_IO | IORESOURCE_BUSY 171 } 172 }; 173 174 #define lp0 io_res[0] 175 #define lp1 io_res[1] 176 #define lp2 io_res[2] 177 178 static const char *proc_arch[] = { 179 "undefined/unknown", 180 "3", 181 "4", 182 "4T", 183 "5", 184 "5T", 185 "5TE", 186 "5TEJ", 187 "6TEJ", 188 "7", 189 "?(11)", 190 "?(12)", 191 "?(13)", 192 "?(14)", 193 "?(15)", 194 "?(16)", 195 "?(17)", 196 }; 197 198 int cpu_architecture(void) 199 { 200 int cpu_arch; 201 202 if ((read_cpuid_id() & 0x0008f000) == 0) { 203 cpu_arch = CPU_ARCH_UNKNOWN; 204 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) { 205 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3; 206 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) { 207 cpu_arch = (read_cpuid_id() >> 16) & 7; 208 if (cpu_arch) 209 cpu_arch += CPU_ARCH_ARMv3; 210 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { 211 unsigned int mmfr0; 212 213 /* Revised CPUID format. Read the Memory Model Feature 214 * Register 0 and check for VMSAv7 or PMSAv7 */ 215 asm("mrc p15, 0, %0, c0, c1, 4" 216 : "=r" (mmfr0)); 217 if ((mmfr0 & 0x0000000f) == 0x00000003 || 218 (mmfr0 & 0x000000f0) == 0x00000030) 219 cpu_arch = CPU_ARCH_ARMv7; 220 else if ((mmfr0 & 0x0000000f) == 0x00000002 || 221 (mmfr0 & 0x000000f0) == 0x00000020) 222 cpu_arch = CPU_ARCH_ARMv6; 223 else 224 cpu_arch = CPU_ARCH_UNKNOWN; 225 } else 226 cpu_arch = CPU_ARCH_UNKNOWN; 227 228 return cpu_arch; 229 } 230 231 static void __init cacheid_init(void) 232 { 233 unsigned int cachetype = read_cpuid_cachetype(); 234 unsigned int arch = cpu_architecture(); 235 236 if (arch >= CPU_ARCH_ARMv6) { 237 if ((cachetype & (7 << 29)) == 4 << 29) { 238 /* ARMv7 register format */ 239 cacheid = CACHEID_VIPT_NONALIASING; 240 if ((cachetype & (3 << 14)) == 1 << 14) 241 cacheid |= CACHEID_ASID_TAGGED; 242 } else if (cachetype & (1 << 23)) 243 cacheid = CACHEID_VIPT_ALIASING; 244 else 245 cacheid = CACHEID_VIPT_NONALIASING; 246 } else { 247 cacheid = CACHEID_VIVT; 248 } 249 250 printk("CPU: %s data cache, %s instruction cache\n", 251 cache_is_vivt() ? "VIVT" : 252 cache_is_vipt_aliasing() ? "VIPT aliasing" : 253 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown", 254 cache_is_vivt() ? "VIVT" : 255 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" : 256 cache_is_vipt_aliasing() ? "VIPT aliasing" : 257 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown"); 258 } 259 260 /* 261 * These functions re-use the assembly code in head.S, which 262 * already provide the required functionality. 263 */ 264 extern struct proc_info_list *lookup_processor_type(unsigned int); 265 extern struct machine_desc *lookup_machine_type(unsigned int); 266 267 static void __init setup_processor(void) 268 { 269 struct proc_info_list *list; 270 271 /* 272 * locate processor in the list of supported processor 273 * types. The linker builds this table for us from the 274 * entries in arch/arm/mm/proc-*.S 275 */ 276 list = lookup_processor_type(read_cpuid_id()); 277 if (!list) { 278 printk("CPU configuration botched (ID %08x), unable " 279 "to continue.\n", read_cpuid_id()); 280 while (1); 281 } 282 283 cpu_name = list->cpu_name; 284 285 #ifdef MULTI_CPU 286 processor = *list->proc; 287 #endif 288 #ifdef MULTI_TLB 289 cpu_tlb = *list->tlb; 290 #endif 291 #ifdef MULTI_USER 292 cpu_user = *list->user; 293 #endif 294 #ifdef MULTI_CACHE 295 cpu_cache = *list->cache; 296 #endif 297 298 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", 299 cpu_name, read_cpuid_id(), read_cpuid_id() & 15, 300 proc_arch[cpu_architecture()], cr_alignment); 301 302 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS); 303 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); 304 elf_hwcap = list->elf_hwcap; 305 #ifndef CONFIG_ARM_THUMB 306 elf_hwcap &= ~HWCAP_THUMB; 307 #endif 308 309 cacheid_init(); 310 cpu_proc_init(); 311 } 312 313 /* 314 * cpu_init - initialise one CPU. 315 * 316 * cpu_init sets up the per-CPU stacks. 317 */ 318 void cpu_init(void) 319 { 320 unsigned int cpu = smp_processor_id(); 321 struct stack *stk = &stacks[cpu]; 322 323 if (cpu >= NR_CPUS) { 324 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu); 325 BUG(); 326 } 327 328 /* 329 * setup stacks for re-entrant exception handlers 330 */ 331 __asm__ ( 332 "msr cpsr_c, %1\n\t" 333 "add sp, %0, %2\n\t" 334 "msr cpsr_c, %3\n\t" 335 "add sp, %0, %4\n\t" 336 "msr cpsr_c, %5\n\t" 337 "add sp, %0, %6\n\t" 338 "msr cpsr_c, %7" 339 : 340 : "r" (stk), 341 "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), 342 "I" (offsetof(struct stack, irq[0])), 343 "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE), 344 "I" (offsetof(struct stack, abt[0])), 345 "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE), 346 "I" (offsetof(struct stack, und[0])), 347 "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE) 348 : "r14"); 349 } 350 351 static struct machine_desc * __init setup_machine(unsigned int nr) 352 { 353 struct machine_desc *list; 354 355 /* 356 * locate machine in the list of supported machines. 357 */ 358 list = lookup_machine_type(nr); 359 if (!list) { 360 printk("Machine configuration botched (nr %d), unable " 361 "to continue.\n", nr); 362 while (1); 363 } 364 365 printk("Machine: %s\n", list->name); 366 367 return list; 368 } 369 370 static int __init arm_add_memory(unsigned long start, unsigned long size) 371 { 372 struct membank *bank = &meminfo.bank[meminfo.nr_banks]; 373 374 if (meminfo.nr_banks >= NR_BANKS) { 375 printk(KERN_CRIT "NR_BANKS too low, " 376 "ignoring memory at %#lx\n", start); 377 return -EINVAL; 378 } 379 380 /* 381 * Ensure that start/size are aligned to a page boundary. 382 * Size is appropriately rounded down, start is rounded up. 383 */ 384 size -= start & ~PAGE_MASK; 385 bank->start = PAGE_ALIGN(start); 386 bank->size = size & PAGE_MASK; 387 bank->node = PHYS_TO_NID(start); 388 389 /* 390 * Check whether this memory region has non-zero size or 391 * invalid node number. 392 */ 393 if (bank->size == 0 || bank->node >= MAX_NUMNODES) 394 return -EINVAL; 395 396 meminfo.nr_banks++; 397 return 0; 398 } 399 400 /* 401 * Pick out the memory size. We look for mem=size@start, 402 * where start and size are "size[KkMm]" 403 */ 404 static void __init early_mem(char **p) 405 { 406 static int usermem __initdata = 0; 407 unsigned long size, start; 408 409 /* 410 * If the user specifies memory size, we 411 * blow away any automatically generated 412 * size. 413 */ 414 if (usermem == 0) { 415 usermem = 1; 416 meminfo.nr_banks = 0; 417 } 418 419 start = PHYS_OFFSET; 420 size = memparse(*p, p); 421 if (**p == '@') 422 start = memparse(*p + 1, p); 423 424 arm_add_memory(start, size); 425 } 426 __early_param("mem=", early_mem); 427 428 /* 429 * Initial parsing of the command line. 430 */ 431 static void __init parse_cmdline(char **cmdline_p, char *from) 432 { 433 char c = ' ', *to = command_line; 434 int len = 0; 435 436 for (;;) { 437 if (c == ' ') { 438 extern struct early_params __early_begin, __early_end; 439 struct early_params *p; 440 441 for (p = &__early_begin; p < &__early_end; p++) { 442 int arglen = strlen(p->arg); 443 444 if (memcmp(from, p->arg, arglen) == 0) { 445 if (to != command_line) 446 to -= 1; 447 from += arglen; 448 p->fn(&from); 449 450 while (*from != ' ' && *from != '\0') 451 from++; 452 break; 453 } 454 } 455 } 456 c = *from++; 457 if (!c) 458 break; 459 if (COMMAND_LINE_SIZE <= ++len) 460 break; 461 *to++ = c; 462 } 463 *to = '\0'; 464 *cmdline_p = command_line; 465 } 466 467 static void __init 468 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz) 469 { 470 #ifdef CONFIG_BLK_DEV_RAM 471 extern int rd_size, rd_image_start, rd_prompt, rd_doload; 472 473 rd_image_start = image_start; 474 rd_prompt = prompt; 475 rd_doload = doload; 476 477 if (rd_sz) 478 rd_size = rd_sz; 479 #endif 480 } 481 482 static void __init 483 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc) 484 { 485 struct resource *res; 486 int i; 487 488 kernel_code.start = virt_to_phys(_text); 489 kernel_code.end = virt_to_phys(_etext - 1); 490 kernel_data.start = virt_to_phys(_data); 491 kernel_data.end = virt_to_phys(_end - 1); 492 493 for (i = 0; i < mi->nr_banks; i++) { 494 if (mi->bank[i].size == 0) 495 continue; 496 497 res = alloc_bootmem_low(sizeof(*res)); 498 res->name = "System RAM"; 499 res->start = mi->bank[i].start; 500 res->end = mi->bank[i].start + mi->bank[i].size - 1; 501 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 502 503 request_resource(&iomem_resource, res); 504 505 if (kernel_code.start >= res->start && 506 kernel_code.end <= res->end) 507 request_resource(res, &kernel_code); 508 if (kernel_data.start >= res->start && 509 kernel_data.end <= res->end) 510 request_resource(res, &kernel_data); 511 } 512 513 if (mdesc->video_start) { 514 video_ram.start = mdesc->video_start; 515 video_ram.end = mdesc->video_end; 516 request_resource(&iomem_resource, &video_ram); 517 } 518 519 /* 520 * Some machines don't have the possibility of ever 521 * possessing lp0, lp1 or lp2 522 */ 523 if (mdesc->reserve_lp0) 524 request_resource(&ioport_resource, &lp0); 525 if (mdesc->reserve_lp1) 526 request_resource(&ioport_resource, &lp1); 527 if (mdesc->reserve_lp2) 528 request_resource(&ioport_resource, &lp2); 529 } 530 531 /* 532 * Tag parsing. 533 * 534 * This is the new way of passing data to the kernel at boot time. Rather 535 * than passing a fixed inflexible structure to the kernel, we pass a list 536 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE 537 * tag for the list to be recognised (to distinguish the tagged list from 538 * a param_struct). The list is terminated with a zero-length tag (this tag 539 * is not parsed in any way). 540 */ 541 static int __init parse_tag_core(const struct tag *tag) 542 { 543 if (tag->hdr.size > 2) { 544 if ((tag->u.core.flags & 1) == 0) 545 root_mountflags &= ~MS_RDONLY; 546 ROOT_DEV = old_decode_dev(tag->u.core.rootdev); 547 } 548 return 0; 549 } 550 551 __tagtable(ATAG_CORE, parse_tag_core); 552 553 static int __init parse_tag_mem32(const struct tag *tag) 554 { 555 return arm_add_memory(tag->u.mem.start, tag->u.mem.size); 556 } 557 558 __tagtable(ATAG_MEM, parse_tag_mem32); 559 560 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) 561 struct screen_info screen_info = { 562 .orig_video_lines = 30, 563 .orig_video_cols = 80, 564 .orig_video_mode = 0, 565 .orig_video_ega_bx = 0, 566 .orig_video_isVGA = 1, 567 .orig_video_points = 8 568 }; 569 570 static int __init parse_tag_videotext(const struct tag *tag) 571 { 572 screen_info.orig_x = tag->u.videotext.x; 573 screen_info.orig_y = tag->u.videotext.y; 574 screen_info.orig_video_page = tag->u.videotext.video_page; 575 screen_info.orig_video_mode = tag->u.videotext.video_mode; 576 screen_info.orig_video_cols = tag->u.videotext.video_cols; 577 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx; 578 screen_info.orig_video_lines = tag->u.videotext.video_lines; 579 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga; 580 screen_info.orig_video_points = tag->u.videotext.video_points; 581 return 0; 582 } 583 584 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext); 585 #endif 586 587 static int __init parse_tag_ramdisk(const struct tag *tag) 588 { 589 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0, 590 (tag->u.ramdisk.flags & 2) == 0, 591 tag->u.ramdisk.start, tag->u.ramdisk.size); 592 return 0; 593 } 594 595 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk); 596 597 static int __init parse_tag_serialnr(const struct tag *tag) 598 { 599 system_serial_low = tag->u.serialnr.low; 600 system_serial_high = tag->u.serialnr.high; 601 return 0; 602 } 603 604 __tagtable(ATAG_SERIAL, parse_tag_serialnr); 605 606 static int __init parse_tag_revision(const struct tag *tag) 607 { 608 system_rev = tag->u.revision.rev; 609 return 0; 610 } 611 612 __tagtable(ATAG_REVISION, parse_tag_revision); 613 614 static int __init parse_tag_cmdline(const struct tag *tag) 615 { 616 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE); 617 return 0; 618 } 619 620 __tagtable(ATAG_CMDLINE, parse_tag_cmdline); 621 622 /* 623 * Scan the tag table for this tag, and call its parse function. 624 * The tag table is built by the linker from all the __tagtable 625 * declarations. 626 */ 627 static int __init parse_tag(const struct tag *tag) 628 { 629 extern struct tagtable __tagtable_begin, __tagtable_end; 630 struct tagtable *t; 631 632 for (t = &__tagtable_begin; t < &__tagtable_end; t++) 633 if (tag->hdr.tag == t->tag) { 634 t->parse(tag); 635 break; 636 } 637 638 return t < &__tagtable_end; 639 } 640 641 /* 642 * Parse all tags in the list, checking both the global and architecture 643 * specific tag tables. 644 */ 645 static void __init parse_tags(const struct tag *t) 646 { 647 for (; t->hdr.size; t = tag_next(t)) 648 if (!parse_tag(t)) 649 printk(KERN_WARNING 650 "Ignoring unrecognised tag 0x%08x\n", 651 t->hdr.tag); 652 } 653 654 /* 655 * This holds our defaults. 656 */ 657 static struct init_tags { 658 struct tag_header hdr1; 659 struct tag_core core; 660 struct tag_header hdr2; 661 struct tag_mem32 mem; 662 struct tag_header hdr3; 663 } init_tags __initdata = { 664 { tag_size(tag_core), ATAG_CORE }, 665 { 1, PAGE_SIZE, 0xff }, 666 { tag_size(tag_mem32), ATAG_MEM }, 667 { MEM_SIZE, PHYS_OFFSET }, 668 { 0, ATAG_NONE } 669 }; 670 671 static void (*init_machine)(void) __initdata; 672 673 static int __init customize_machine(void) 674 { 675 /* customizes platform devices, or adds new ones */ 676 if (init_machine) 677 init_machine(); 678 return 0; 679 } 680 arch_initcall(customize_machine); 681 682 void __init setup_arch(char **cmdline_p) 683 { 684 struct tag *tags = (struct tag *)&init_tags; 685 struct machine_desc *mdesc; 686 char *from = default_command_line; 687 688 setup_processor(); 689 mdesc = setup_machine(machine_arch_type); 690 machine_name = mdesc->name; 691 692 if (mdesc->soft_reboot) 693 reboot_setup("s"); 694 695 if (__atags_pointer) 696 tags = phys_to_virt(__atags_pointer); 697 else if (mdesc->boot_params) 698 tags = phys_to_virt(mdesc->boot_params); 699 700 /* 701 * If we have the old style parameters, convert them to 702 * a tag list. 703 */ 704 if (tags->hdr.tag != ATAG_CORE) 705 convert_to_tag_list(tags); 706 if (tags->hdr.tag != ATAG_CORE) 707 tags = (struct tag *)&init_tags; 708 709 if (mdesc->fixup) 710 mdesc->fixup(mdesc, tags, &from, &meminfo); 711 712 if (tags->hdr.tag == ATAG_CORE) { 713 if (meminfo.nr_banks != 0) 714 squash_mem_tags(tags); 715 save_atags(tags); 716 parse_tags(tags); 717 } 718 719 init_mm.start_code = (unsigned long) _text; 720 init_mm.end_code = (unsigned long) _etext; 721 init_mm.end_data = (unsigned long) _edata; 722 init_mm.brk = (unsigned long) _end; 723 724 memcpy(boot_command_line, from, COMMAND_LINE_SIZE); 725 boot_command_line[COMMAND_LINE_SIZE-1] = '\0'; 726 parse_cmdline(cmdline_p, from); 727 paging_init(mdesc); 728 request_standard_resources(&meminfo, mdesc); 729 730 #ifdef CONFIG_SMP 731 smp_init_cpus(); 732 #endif 733 734 cpu_init(); 735 736 /* 737 * Set up various architecture-specific pointers 738 */ 739 init_arch_irq = mdesc->init_irq; 740 system_timer = mdesc->timer; 741 init_machine = mdesc->init_machine; 742 743 #ifdef CONFIG_VT 744 #if defined(CONFIG_VGA_CONSOLE) 745 conswitchp = &vga_con; 746 #elif defined(CONFIG_DUMMY_CONSOLE) 747 conswitchp = &dummy_con; 748 #endif 749 #endif 750 early_trap_init(); 751 } 752 753 754 static int __init topology_init(void) 755 { 756 int cpu; 757 758 for_each_possible_cpu(cpu) { 759 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu); 760 cpuinfo->cpu.hotpluggable = 1; 761 register_cpu(&cpuinfo->cpu, cpu); 762 } 763 764 return 0; 765 } 766 767 subsys_initcall(topology_init); 768 769 static const char *hwcap_str[] = { 770 "swp", 771 "half", 772 "thumb", 773 "26bit", 774 "fastmult", 775 "fpa", 776 "vfp", 777 "edsp", 778 "java", 779 "iwmmxt", 780 "crunch", 781 "thumbee", 782 "neon", 783 NULL 784 }; 785 786 static int c_show(struct seq_file *m, void *v) 787 { 788 int i; 789 790 seq_printf(m, "Processor\t: %s rev %d (%s)\n", 791 cpu_name, read_cpuid_id() & 15, elf_platform); 792 793 #if defined(CONFIG_SMP) 794 for_each_online_cpu(i) { 795 /* 796 * glibc reads /proc/cpuinfo to determine the number of 797 * online processors, looking for lines beginning with 798 * "processor". Give glibc what it expects. 799 */ 800 seq_printf(m, "processor\t: %d\n", i); 801 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n", 802 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), 803 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); 804 } 805 #else /* CONFIG_SMP */ 806 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", 807 loops_per_jiffy / (500000/HZ), 808 (loops_per_jiffy / (5000/HZ)) % 100); 809 #endif 810 811 /* dump out the processor features */ 812 seq_puts(m, "Features\t: "); 813 814 for (i = 0; hwcap_str[i]; i++) 815 if (elf_hwcap & (1 << i)) 816 seq_printf(m, "%s ", hwcap_str[i]); 817 818 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); 819 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]); 820 821 if ((read_cpuid_id() & 0x0008f000) == 0x00000000) { 822 /* pre-ARM7 */ 823 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4); 824 } else { 825 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) { 826 /* ARM7 */ 827 seq_printf(m, "CPU variant\t: 0x%02x\n", 828 (read_cpuid_id() >> 16) & 127); 829 } else { 830 /* post-ARM7 */ 831 seq_printf(m, "CPU variant\t: 0x%x\n", 832 (read_cpuid_id() >> 20) & 15); 833 } 834 seq_printf(m, "CPU part\t: 0x%03x\n", 835 (read_cpuid_id() >> 4) & 0xfff); 836 } 837 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); 838 839 seq_puts(m, "\n"); 840 841 seq_printf(m, "Hardware\t: %s\n", machine_name); 842 seq_printf(m, "Revision\t: %04x\n", system_rev); 843 seq_printf(m, "Serial\t\t: %08x%08x\n", 844 system_serial_high, system_serial_low); 845 846 return 0; 847 } 848 849 static void *c_start(struct seq_file *m, loff_t *pos) 850 { 851 return *pos < 1 ? (void *)1 : NULL; 852 } 853 854 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 855 { 856 ++*pos; 857 return NULL; 858 } 859 860 static void c_stop(struct seq_file *m, void *v) 861 { 862 } 863 864 const struct seq_operations cpuinfo_op = { 865 .start = c_start, 866 .next = c_next, 867 .stop = c_stop, 868 .show = c_show 869 }; 870