1 /* 2 * linux/arch/arm/kernel/setup.c 3 * 4 * Copyright (C) 1995-2001 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/config.h> 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/stddef.h> 14 #include <linux/ioport.h> 15 #include <linux/delay.h> 16 #include <linux/utsname.h> 17 #include <linux/initrd.h> 18 #include <linux/console.h> 19 #include <linux/bootmem.h> 20 #include <linux/seq_file.h> 21 #include <linux/tty.h> 22 #include <linux/init.h> 23 #include <linux/root_dev.h> 24 #include <linux/cpu.h> 25 #include <linux/interrupt.h> 26 #include <linux/smp.h> 27 28 #include <asm/cpu.h> 29 #include <asm/elf.h> 30 #include <asm/procinfo.h> 31 #include <asm/setup.h> 32 #include <asm/mach-types.h> 33 #include <asm/cacheflush.h> 34 #include <asm/tlbflush.h> 35 36 #include <asm/mach/arch.h> 37 #include <asm/mach/irq.h> 38 #include <asm/mach/time.h> 39 40 #include "compat.h" 41 42 #ifndef MEM_SIZE 43 #define MEM_SIZE (16*1024*1024) 44 #endif 45 46 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) 47 char fpe_type[8]; 48 49 static int __init fpe_setup(char *line) 50 { 51 memcpy(fpe_type, line, 8); 52 return 1; 53 } 54 55 __setup("fpe=", fpe_setup); 56 #endif 57 58 extern void paging_init(struct meminfo *, struct machine_desc *desc); 59 extern void reboot_setup(char *str); 60 extern int root_mountflags; 61 extern void _stext, _text, _etext, __data_start, _edata, _end; 62 63 unsigned int processor_id; 64 unsigned int __machine_arch_type; 65 EXPORT_SYMBOL(__machine_arch_type); 66 67 unsigned int system_rev; 68 EXPORT_SYMBOL(system_rev); 69 70 unsigned int system_serial_low; 71 EXPORT_SYMBOL(system_serial_low); 72 73 unsigned int system_serial_high; 74 EXPORT_SYMBOL(system_serial_high); 75 76 unsigned int elf_hwcap; 77 EXPORT_SYMBOL(elf_hwcap); 78 79 80 #ifdef MULTI_CPU 81 struct processor processor; 82 #endif 83 #ifdef MULTI_TLB 84 struct cpu_tlb_fns cpu_tlb; 85 #endif 86 #ifdef MULTI_USER 87 struct cpu_user_fns cpu_user; 88 #endif 89 #ifdef MULTI_CACHE 90 struct cpu_cache_fns cpu_cache; 91 #endif 92 93 struct stack { 94 u32 irq[3]; 95 u32 abt[3]; 96 u32 und[3]; 97 } ____cacheline_aligned; 98 99 static struct stack stacks[NR_CPUS]; 100 101 char elf_platform[ELF_PLATFORM_SIZE]; 102 EXPORT_SYMBOL(elf_platform); 103 104 unsigned long phys_initrd_start __initdata = 0; 105 unsigned long phys_initrd_size __initdata = 0; 106 107 static struct meminfo meminfo __initdata = { 0, }; 108 static const char *cpu_name; 109 static const char *machine_name; 110 static char command_line[COMMAND_LINE_SIZE]; 111 112 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; 113 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; 114 #define ENDIANNESS ((char)endian_test.l) 115 116 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data); 117 118 /* 119 * Standard memory resources 120 */ 121 static struct resource mem_res[] = { 122 { 123 .name = "Video RAM", 124 .start = 0, 125 .end = 0, 126 .flags = IORESOURCE_MEM 127 }, 128 { 129 .name = "Kernel text", 130 .start = 0, 131 .end = 0, 132 .flags = IORESOURCE_MEM 133 }, 134 { 135 .name = "Kernel data", 136 .start = 0, 137 .end = 0, 138 .flags = IORESOURCE_MEM 139 } 140 }; 141 142 #define video_ram mem_res[0] 143 #define kernel_code mem_res[1] 144 #define kernel_data mem_res[2] 145 146 static struct resource io_res[] = { 147 { 148 .name = "reserved", 149 .start = 0x3bc, 150 .end = 0x3be, 151 .flags = IORESOURCE_IO | IORESOURCE_BUSY 152 }, 153 { 154 .name = "reserved", 155 .start = 0x378, 156 .end = 0x37f, 157 .flags = IORESOURCE_IO | IORESOURCE_BUSY 158 }, 159 { 160 .name = "reserved", 161 .start = 0x278, 162 .end = 0x27f, 163 .flags = IORESOURCE_IO | IORESOURCE_BUSY 164 } 165 }; 166 167 #define lp0 io_res[0] 168 #define lp1 io_res[1] 169 #define lp2 io_res[2] 170 171 static const char *cache_types[16] = { 172 "write-through", 173 "write-back", 174 "write-back", 175 "undefined 3", 176 "undefined 4", 177 "undefined 5", 178 "write-back", 179 "write-back", 180 "undefined 8", 181 "undefined 9", 182 "undefined 10", 183 "undefined 11", 184 "undefined 12", 185 "undefined 13", 186 "write-back", 187 "undefined 15", 188 }; 189 190 static const char *cache_clean[16] = { 191 "not required", 192 "read-block", 193 "cp15 c7 ops", 194 "undefined 3", 195 "undefined 4", 196 "undefined 5", 197 "cp15 c7 ops", 198 "cp15 c7 ops", 199 "undefined 8", 200 "undefined 9", 201 "undefined 10", 202 "undefined 11", 203 "undefined 12", 204 "undefined 13", 205 "cp15 c7 ops", 206 "undefined 15", 207 }; 208 209 static const char *cache_lockdown[16] = { 210 "not supported", 211 "not supported", 212 "not supported", 213 "undefined 3", 214 "undefined 4", 215 "undefined 5", 216 "format A", 217 "format B", 218 "undefined 8", 219 "undefined 9", 220 "undefined 10", 221 "undefined 11", 222 "undefined 12", 223 "undefined 13", 224 "format C", 225 "undefined 15", 226 }; 227 228 static const char *proc_arch[] = { 229 "undefined/unknown", 230 "3", 231 "4", 232 "4T", 233 "5", 234 "5T", 235 "5TE", 236 "5TEJ", 237 "6TEJ", 238 "7", 239 "?(11)", 240 "?(12)", 241 "?(13)", 242 "?(14)", 243 "?(15)", 244 "?(16)", 245 "?(17)", 246 }; 247 248 #define CACHE_TYPE(x) (((x) >> 25) & 15) 249 #define CACHE_S(x) ((x) & (1 << 24)) 250 #define CACHE_DSIZE(x) (((x) >> 12) & 4095) /* only if S=1 */ 251 #define CACHE_ISIZE(x) ((x) & 4095) 252 253 #define CACHE_SIZE(y) (((y) >> 6) & 7) 254 #define CACHE_ASSOC(y) (((y) >> 3) & 7) 255 #define CACHE_M(y) ((y) & (1 << 2)) 256 #define CACHE_LINE(y) ((y) & 3) 257 258 static inline void dump_cache(const char *prefix, int cpu, unsigned int cache) 259 { 260 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0); 261 262 printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n", 263 cpu, prefix, 264 mult << (8 + CACHE_SIZE(cache)), 265 (mult << CACHE_ASSOC(cache)) >> 1, 266 8 << CACHE_LINE(cache), 267 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) - 268 CACHE_LINE(cache))); 269 } 270 271 static void __init dump_cpu_info(int cpu) 272 { 273 unsigned int info = read_cpuid(CPUID_CACHETYPE); 274 275 if (info != processor_id) { 276 printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT", 277 cache_types[CACHE_TYPE(info)]); 278 if (CACHE_S(info)) { 279 dump_cache("I cache", cpu, CACHE_ISIZE(info)); 280 dump_cache("D cache", cpu, CACHE_DSIZE(info)); 281 } else { 282 dump_cache("cache", cpu, CACHE_ISIZE(info)); 283 } 284 } 285 286 if (arch_is_coherent()) 287 printk("Cache coherency enabled\n"); 288 } 289 290 int cpu_architecture(void) 291 { 292 int cpu_arch; 293 294 if ((processor_id & 0x0008f000) == 0) { 295 cpu_arch = CPU_ARCH_UNKNOWN; 296 } else if ((processor_id & 0x0008f000) == 0x00007000) { 297 cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3; 298 } else if ((processor_id & 0x00080000) == 0x00000000) { 299 cpu_arch = (processor_id >> 16) & 7; 300 if (cpu_arch) 301 cpu_arch += CPU_ARCH_ARMv3; 302 } else { 303 /* the revised CPUID */ 304 cpu_arch = ((processor_id >> 12) & 0xf) - 0xb + CPU_ARCH_ARMv6; 305 } 306 307 return cpu_arch; 308 } 309 310 /* 311 * These functions re-use the assembly code in head.S, which 312 * already provide the required functionality. 313 */ 314 extern struct proc_info_list *lookup_processor_type(unsigned int); 315 extern struct machine_desc *lookup_machine_type(unsigned int); 316 317 static void __init setup_processor(void) 318 { 319 struct proc_info_list *list; 320 321 /* 322 * locate processor in the list of supported processor 323 * types. The linker builds this table for us from the 324 * entries in arch/arm/mm/proc-*.S 325 */ 326 list = lookup_processor_type(processor_id); 327 if (!list) { 328 printk("CPU configuration botched (ID %08x), unable " 329 "to continue.\n", processor_id); 330 while (1); 331 } 332 333 cpu_name = list->cpu_name; 334 335 #ifdef MULTI_CPU 336 processor = *list->proc; 337 #endif 338 #ifdef MULTI_TLB 339 cpu_tlb = *list->tlb; 340 #endif 341 #ifdef MULTI_USER 342 cpu_user = *list->user; 343 #endif 344 #ifdef MULTI_CACHE 345 cpu_cache = *list->cache; 346 #endif 347 348 printk("CPU: %s [%08x] revision %d (ARMv%s)\n", 349 cpu_name, processor_id, (int)processor_id & 15, 350 proc_arch[cpu_architecture()]); 351 352 sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS); 353 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); 354 elf_hwcap = list->elf_hwcap; 355 #ifndef CONFIG_ARM_THUMB 356 elf_hwcap &= ~HWCAP_THUMB; 357 #endif 358 #ifndef CONFIG_VFP 359 elf_hwcap &= ~HWCAP_VFP; 360 #endif 361 362 cpu_proc_init(); 363 } 364 365 /* 366 * cpu_init - initialise one CPU. 367 * 368 * cpu_init dumps the cache information, initialises SMP specific 369 * information, and sets up the per-CPU stacks. 370 */ 371 void cpu_init(void) 372 { 373 unsigned int cpu = smp_processor_id(); 374 struct stack *stk = &stacks[cpu]; 375 376 if (cpu >= NR_CPUS) { 377 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu); 378 BUG(); 379 } 380 381 if (system_state == SYSTEM_BOOTING) 382 dump_cpu_info(cpu); 383 384 /* 385 * setup stacks for re-entrant exception handlers 386 */ 387 __asm__ ( 388 "msr cpsr_c, %1\n\t" 389 "add sp, %0, %2\n\t" 390 "msr cpsr_c, %3\n\t" 391 "add sp, %0, %4\n\t" 392 "msr cpsr_c, %5\n\t" 393 "add sp, %0, %6\n\t" 394 "msr cpsr_c, %7" 395 : 396 : "r" (stk), 397 "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), 398 "I" (offsetof(struct stack, irq[0])), 399 "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE), 400 "I" (offsetof(struct stack, abt[0])), 401 "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE), 402 "I" (offsetof(struct stack, und[0])), 403 "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE) 404 : "r14"); 405 } 406 407 static struct machine_desc * __init setup_machine(unsigned int nr) 408 { 409 struct machine_desc *list; 410 411 /* 412 * locate machine in the list of supported machines. 413 */ 414 list = lookup_machine_type(nr); 415 if (!list) { 416 printk("Machine configuration botched (nr %d), unable " 417 "to continue.\n", nr); 418 while (1); 419 } 420 421 printk("Machine: %s\n", list->name); 422 423 return list; 424 } 425 426 static void __init early_initrd(char **p) 427 { 428 unsigned long start, size; 429 430 start = memparse(*p, p); 431 if (**p == ',') { 432 size = memparse((*p) + 1, p); 433 434 phys_initrd_start = start; 435 phys_initrd_size = size; 436 } 437 } 438 __early_param("initrd=", early_initrd); 439 440 static void __init arm_add_memory(unsigned long start, unsigned long size) 441 { 442 /* 443 * Ensure that start/size are aligned to a page boundary. 444 * Size is appropriately rounded down, start is rounded up. 445 */ 446 size -= start & ~PAGE_MASK; 447 448 meminfo.bank[meminfo.nr_banks].start = PAGE_ALIGN(start); 449 meminfo.bank[meminfo.nr_banks].size = size & PAGE_MASK; 450 meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(start); 451 meminfo.nr_banks += 1; 452 } 453 454 /* 455 * Pick out the memory size. We look for mem=size@start, 456 * where start and size are "size[KkMm]" 457 */ 458 static void __init early_mem(char **p) 459 { 460 static int usermem __initdata = 0; 461 unsigned long size, start; 462 463 /* 464 * If the user specifies memory size, we 465 * blow away any automatically generated 466 * size. 467 */ 468 if (usermem == 0) { 469 usermem = 1; 470 meminfo.nr_banks = 0; 471 } 472 473 start = PHYS_OFFSET; 474 size = memparse(*p, p); 475 if (**p == '@') 476 start = memparse(*p + 1, p); 477 478 arm_add_memory(start, size); 479 } 480 __early_param("mem=", early_mem); 481 482 /* 483 * Initial parsing of the command line. 484 */ 485 static void __init parse_cmdline(char **cmdline_p, char *from) 486 { 487 char c = ' ', *to = command_line; 488 int len = 0; 489 490 for (;;) { 491 if (c == ' ') { 492 extern struct early_params __early_begin, __early_end; 493 struct early_params *p; 494 495 for (p = &__early_begin; p < &__early_end; p++) { 496 int len = strlen(p->arg); 497 498 if (memcmp(from, p->arg, len) == 0) { 499 if (to != command_line) 500 to -= 1; 501 from += len; 502 p->fn(&from); 503 504 while (*from != ' ' && *from != '\0') 505 from++; 506 break; 507 } 508 } 509 } 510 c = *from++; 511 if (!c) 512 break; 513 if (COMMAND_LINE_SIZE <= ++len) 514 break; 515 *to++ = c; 516 } 517 *to = '\0'; 518 *cmdline_p = command_line; 519 } 520 521 static void __init 522 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz) 523 { 524 #ifdef CONFIG_BLK_DEV_RAM 525 extern int rd_size, rd_image_start, rd_prompt, rd_doload; 526 527 rd_image_start = image_start; 528 rd_prompt = prompt; 529 rd_doload = doload; 530 531 if (rd_sz) 532 rd_size = rd_sz; 533 #endif 534 } 535 536 static void __init 537 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc) 538 { 539 struct resource *res; 540 int i; 541 542 kernel_code.start = virt_to_phys(&_text); 543 kernel_code.end = virt_to_phys(&_etext - 1); 544 kernel_data.start = virt_to_phys(&__data_start); 545 kernel_data.end = virt_to_phys(&_end - 1); 546 547 for (i = 0; i < mi->nr_banks; i++) { 548 unsigned long virt_start, virt_end; 549 550 if (mi->bank[i].size == 0) 551 continue; 552 553 virt_start = __phys_to_virt(mi->bank[i].start); 554 virt_end = virt_start + mi->bank[i].size - 1; 555 556 res = alloc_bootmem_low(sizeof(*res)); 557 res->name = "System RAM"; 558 res->start = __virt_to_phys(virt_start); 559 res->end = __virt_to_phys(virt_end); 560 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 561 562 request_resource(&iomem_resource, res); 563 564 if (kernel_code.start >= res->start && 565 kernel_code.end <= res->end) 566 request_resource(res, &kernel_code); 567 if (kernel_data.start >= res->start && 568 kernel_data.end <= res->end) 569 request_resource(res, &kernel_data); 570 } 571 572 if (mdesc->video_start) { 573 video_ram.start = mdesc->video_start; 574 video_ram.end = mdesc->video_end; 575 request_resource(&iomem_resource, &video_ram); 576 } 577 578 /* 579 * Some machines don't have the possibility of ever 580 * possessing lp0, lp1 or lp2 581 */ 582 if (mdesc->reserve_lp0) 583 request_resource(&ioport_resource, &lp0); 584 if (mdesc->reserve_lp1) 585 request_resource(&ioport_resource, &lp1); 586 if (mdesc->reserve_lp2) 587 request_resource(&ioport_resource, &lp2); 588 } 589 590 /* 591 * Tag parsing. 592 * 593 * This is the new way of passing data to the kernel at boot time. Rather 594 * than passing a fixed inflexible structure to the kernel, we pass a list 595 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE 596 * tag for the list to be recognised (to distinguish the tagged list from 597 * a param_struct). The list is terminated with a zero-length tag (this tag 598 * is not parsed in any way). 599 */ 600 static int __init parse_tag_core(const struct tag *tag) 601 { 602 if (tag->hdr.size > 2) { 603 if ((tag->u.core.flags & 1) == 0) 604 root_mountflags &= ~MS_RDONLY; 605 ROOT_DEV = old_decode_dev(tag->u.core.rootdev); 606 } 607 return 0; 608 } 609 610 __tagtable(ATAG_CORE, parse_tag_core); 611 612 static int __init parse_tag_mem32(const struct tag *tag) 613 { 614 if (meminfo.nr_banks >= NR_BANKS) { 615 printk(KERN_WARNING 616 "Ignoring memory bank 0x%08x size %dKB\n", 617 tag->u.mem.start, tag->u.mem.size / 1024); 618 return -EINVAL; 619 } 620 arm_add_memory(tag->u.mem.start, tag->u.mem.size); 621 return 0; 622 } 623 624 __tagtable(ATAG_MEM, parse_tag_mem32); 625 626 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) 627 struct screen_info screen_info = { 628 .orig_video_lines = 30, 629 .orig_video_cols = 80, 630 .orig_video_mode = 0, 631 .orig_video_ega_bx = 0, 632 .orig_video_isVGA = 1, 633 .orig_video_points = 8 634 }; 635 636 static int __init parse_tag_videotext(const struct tag *tag) 637 { 638 screen_info.orig_x = tag->u.videotext.x; 639 screen_info.orig_y = tag->u.videotext.y; 640 screen_info.orig_video_page = tag->u.videotext.video_page; 641 screen_info.orig_video_mode = tag->u.videotext.video_mode; 642 screen_info.orig_video_cols = tag->u.videotext.video_cols; 643 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx; 644 screen_info.orig_video_lines = tag->u.videotext.video_lines; 645 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga; 646 screen_info.orig_video_points = tag->u.videotext.video_points; 647 return 0; 648 } 649 650 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext); 651 #endif 652 653 static int __init parse_tag_ramdisk(const struct tag *tag) 654 { 655 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0, 656 (tag->u.ramdisk.flags & 2) == 0, 657 tag->u.ramdisk.start, tag->u.ramdisk.size); 658 return 0; 659 } 660 661 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk); 662 663 static int __init parse_tag_initrd(const struct tag *tag) 664 { 665 printk(KERN_WARNING "ATAG_INITRD is deprecated; " 666 "please update your bootloader.\n"); 667 phys_initrd_start = __virt_to_phys(tag->u.initrd.start); 668 phys_initrd_size = tag->u.initrd.size; 669 return 0; 670 } 671 672 __tagtable(ATAG_INITRD, parse_tag_initrd); 673 674 static int __init parse_tag_initrd2(const struct tag *tag) 675 { 676 phys_initrd_start = tag->u.initrd.start; 677 phys_initrd_size = tag->u.initrd.size; 678 return 0; 679 } 680 681 __tagtable(ATAG_INITRD2, parse_tag_initrd2); 682 683 static int __init parse_tag_serialnr(const struct tag *tag) 684 { 685 system_serial_low = tag->u.serialnr.low; 686 system_serial_high = tag->u.serialnr.high; 687 return 0; 688 } 689 690 __tagtable(ATAG_SERIAL, parse_tag_serialnr); 691 692 static int __init parse_tag_revision(const struct tag *tag) 693 { 694 system_rev = tag->u.revision.rev; 695 return 0; 696 } 697 698 __tagtable(ATAG_REVISION, parse_tag_revision); 699 700 static int __init parse_tag_cmdline(const struct tag *tag) 701 { 702 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE); 703 return 0; 704 } 705 706 __tagtable(ATAG_CMDLINE, parse_tag_cmdline); 707 708 /* 709 * Scan the tag table for this tag, and call its parse function. 710 * The tag table is built by the linker from all the __tagtable 711 * declarations. 712 */ 713 static int __init parse_tag(const struct tag *tag) 714 { 715 extern struct tagtable __tagtable_begin, __tagtable_end; 716 struct tagtable *t; 717 718 for (t = &__tagtable_begin; t < &__tagtable_end; t++) 719 if (tag->hdr.tag == t->tag) { 720 t->parse(tag); 721 break; 722 } 723 724 return t < &__tagtable_end; 725 } 726 727 /* 728 * Parse all tags in the list, checking both the global and architecture 729 * specific tag tables. 730 */ 731 static void __init parse_tags(const struct tag *t) 732 { 733 for (; t->hdr.size; t = tag_next(t)) 734 if (!parse_tag(t)) 735 printk(KERN_WARNING 736 "Ignoring unrecognised tag 0x%08x\n", 737 t->hdr.tag); 738 } 739 740 /* 741 * This holds our defaults. 742 */ 743 static struct init_tags { 744 struct tag_header hdr1; 745 struct tag_core core; 746 struct tag_header hdr2; 747 struct tag_mem32 mem; 748 struct tag_header hdr3; 749 } init_tags __initdata = { 750 { tag_size(tag_core), ATAG_CORE }, 751 { 1, PAGE_SIZE, 0xff }, 752 { tag_size(tag_mem32), ATAG_MEM }, 753 { MEM_SIZE, PHYS_OFFSET }, 754 { 0, ATAG_NONE } 755 }; 756 757 static void (*init_machine)(void) __initdata; 758 759 static int __init customize_machine(void) 760 { 761 /* customizes platform devices, or adds new ones */ 762 if (init_machine) 763 init_machine(); 764 return 0; 765 } 766 arch_initcall(customize_machine); 767 768 void __init setup_arch(char **cmdline_p) 769 { 770 struct tag *tags = (struct tag *)&init_tags; 771 struct machine_desc *mdesc; 772 char *from = default_command_line; 773 774 setup_processor(); 775 mdesc = setup_machine(machine_arch_type); 776 machine_name = mdesc->name; 777 778 if (mdesc->soft_reboot) 779 reboot_setup("s"); 780 781 if (mdesc->boot_params) 782 tags = phys_to_virt(mdesc->boot_params); 783 784 /* 785 * If we have the old style parameters, convert them to 786 * a tag list. 787 */ 788 if (tags->hdr.tag != ATAG_CORE) 789 convert_to_tag_list(tags); 790 if (tags->hdr.tag != ATAG_CORE) 791 tags = (struct tag *)&init_tags; 792 793 if (mdesc->fixup) 794 mdesc->fixup(mdesc, tags, &from, &meminfo); 795 796 if (tags->hdr.tag == ATAG_CORE) { 797 if (meminfo.nr_banks != 0) 798 squash_mem_tags(tags); 799 parse_tags(tags); 800 } 801 802 init_mm.start_code = (unsigned long) &_text; 803 init_mm.end_code = (unsigned long) &_etext; 804 init_mm.end_data = (unsigned long) &_edata; 805 init_mm.brk = (unsigned long) &_end; 806 807 memcpy(saved_command_line, from, COMMAND_LINE_SIZE); 808 saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; 809 parse_cmdline(cmdline_p, from); 810 paging_init(&meminfo, mdesc); 811 request_standard_resources(&meminfo, mdesc); 812 813 #ifdef CONFIG_SMP 814 smp_init_cpus(); 815 #endif 816 817 cpu_init(); 818 819 /* 820 * Set up various architecture-specific pointers 821 */ 822 init_arch_irq = mdesc->init_irq; 823 system_timer = mdesc->timer; 824 init_machine = mdesc->init_machine; 825 826 #ifdef CONFIG_VT 827 #if defined(CONFIG_VGA_CONSOLE) 828 conswitchp = &vga_con; 829 #elif defined(CONFIG_DUMMY_CONSOLE) 830 conswitchp = &dummy_con; 831 #endif 832 #endif 833 } 834 835 836 static int __init topology_init(void) 837 { 838 int cpu; 839 840 for_each_possible_cpu(cpu) 841 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu); 842 843 return 0; 844 } 845 846 subsys_initcall(topology_init); 847 848 static const char *hwcap_str[] = { 849 "swp", 850 "half", 851 "thumb", 852 "26bit", 853 "fastmult", 854 "fpa", 855 "vfp", 856 "edsp", 857 "java", 858 NULL 859 }; 860 861 static void 862 c_show_cache(struct seq_file *m, const char *type, unsigned int cache) 863 { 864 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0); 865 866 seq_printf(m, "%s size\t\t: %d\n" 867 "%s assoc\t\t: %d\n" 868 "%s line length\t: %d\n" 869 "%s sets\t\t: %d\n", 870 type, mult << (8 + CACHE_SIZE(cache)), 871 type, (mult << CACHE_ASSOC(cache)) >> 1, 872 type, 8 << CACHE_LINE(cache), 873 type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) - 874 CACHE_LINE(cache))); 875 } 876 877 static int c_show(struct seq_file *m, void *v) 878 { 879 int i; 880 881 seq_printf(m, "Processor\t: %s rev %d (%s)\n", 882 cpu_name, (int)processor_id & 15, elf_platform); 883 884 #if defined(CONFIG_SMP) 885 for_each_online_cpu(i) { 886 /* 887 * glibc reads /proc/cpuinfo to determine the number of 888 * online processors, looking for lines beginning with 889 * "processor". Give glibc what it expects. 890 */ 891 seq_printf(m, "processor\t: %d\n", i); 892 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n", 893 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), 894 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); 895 } 896 #else /* CONFIG_SMP */ 897 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", 898 loops_per_jiffy / (500000/HZ), 899 (loops_per_jiffy / (5000/HZ)) % 100); 900 #endif 901 902 /* dump out the processor features */ 903 seq_puts(m, "Features\t: "); 904 905 for (i = 0; hwcap_str[i]; i++) 906 if (elf_hwcap & (1 << i)) 907 seq_printf(m, "%s ", hwcap_str[i]); 908 909 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24); 910 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]); 911 912 if ((processor_id & 0x0008f000) == 0x00000000) { 913 /* pre-ARM7 */ 914 seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4); 915 } else { 916 if ((processor_id & 0x0008f000) == 0x00007000) { 917 /* ARM7 */ 918 seq_printf(m, "CPU variant\t: 0x%02x\n", 919 (processor_id >> 16) & 127); 920 } else { 921 /* post-ARM7 */ 922 seq_printf(m, "CPU variant\t: 0x%x\n", 923 (processor_id >> 20) & 15); 924 } 925 seq_printf(m, "CPU part\t: 0x%03x\n", 926 (processor_id >> 4) & 0xfff); 927 } 928 seq_printf(m, "CPU revision\t: %d\n", processor_id & 15); 929 930 { 931 unsigned int cache_info = read_cpuid(CPUID_CACHETYPE); 932 if (cache_info != processor_id) { 933 seq_printf(m, "Cache type\t: %s\n" 934 "Cache clean\t: %s\n" 935 "Cache lockdown\t: %s\n" 936 "Cache format\t: %s\n", 937 cache_types[CACHE_TYPE(cache_info)], 938 cache_clean[CACHE_TYPE(cache_info)], 939 cache_lockdown[CACHE_TYPE(cache_info)], 940 CACHE_S(cache_info) ? "Harvard" : "Unified"); 941 942 if (CACHE_S(cache_info)) { 943 c_show_cache(m, "I", CACHE_ISIZE(cache_info)); 944 c_show_cache(m, "D", CACHE_DSIZE(cache_info)); 945 } else { 946 c_show_cache(m, "Cache", CACHE_ISIZE(cache_info)); 947 } 948 } 949 } 950 951 seq_puts(m, "\n"); 952 953 seq_printf(m, "Hardware\t: %s\n", machine_name); 954 seq_printf(m, "Revision\t: %04x\n", system_rev); 955 seq_printf(m, "Serial\t\t: %08x%08x\n", 956 system_serial_high, system_serial_low); 957 958 return 0; 959 } 960 961 static void *c_start(struct seq_file *m, loff_t *pos) 962 { 963 return *pos < 1 ? (void *)1 : NULL; 964 } 965 966 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 967 { 968 ++*pos; 969 return NULL; 970 } 971 972 static void c_stop(struct seq_file *m, void *v) 973 { 974 } 975 976 struct seq_operations cpuinfo_op = { 977 .start = c_start, 978 .next = c_next, 979 .stop = c_stop, 980 .show = c_show 981 }; 982