xref: /linux/arch/arm/kernel/setup.c (revision 606d099cdd1080bbb50ea50dc52d98252f8f10a1)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/root_dev.h>
23 #include <linux/cpu.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 
27 #include <asm/cpu.h>
28 #include <asm/elf.h>
29 #include <asm/procinfo.h>
30 #include <asm/setup.h>
31 #include <asm/mach-types.h>
32 #include <asm/cacheflush.h>
33 #include <asm/tlbflush.h>
34 
35 #include <asm/mach/arch.h>
36 #include <asm/mach/irq.h>
37 #include <asm/mach/time.h>
38 
39 #include "compat.h"
40 
41 #ifndef MEM_SIZE
42 #define MEM_SIZE	(16*1024*1024)
43 #endif
44 
45 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
46 char fpe_type[8];
47 
48 static int __init fpe_setup(char *line)
49 {
50 	memcpy(fpe_type, line, 8);
51 	return 1;
52 }
53 
54 __setup("fpe=", fpe_setup);
55 #endif
56 
57 extern void paging_init(struct meminfo *, struct machine_desc *desc);
58 extern void reboot_setup(char *str);
59 extern int root_mountflags;
60 extern void _stext, _text, _etext, __data_start, _edata, _end;
61 
62 unsigned int processor_id;
63 unsigned int __machine_arch_type;
64 EXPORT_SYMBOL(__machine_arch_type);
65 
66 unsigned int system_rev;
67 EXPORT_SYMBOL(system_rev);
68 
69 unsigned int system_serial_low;
70 EXPORT_SYMBOL(system_serial_low);
71 
72 unsigned int system_serial_high;
73 EXPORT_SYMBOL(system_serial_high);
74 
75 unsigned int elf_hwcap;
76 EXPORT_SYMBOL(elf_hwcap);
77 
78 
79 #ifdef MULTI_CPU
80 struct processor processor;
81 #endif
82 #ifdef MULTI_TLB
83 struct cpu_tlb_fns cpu_tlb;
84 #endif
85 #ifdef MULTI_USER
86 struct cpu_user_fns cpu_user;
87 #endif
88 #ifdef MULTI_CACHE
89 struct cpu_cache_fns cpu_cache;
90 #endif
91 
92 struct stack {
93 	u32 irq[3];
94 	u32 abt[3];
95 	u32 und[3];
96 } ____cacheline_aligned;
97 
98 static struct stack stacks[NR_CPUS];
99 
100 char elf_platform[ELF_PLATFORM_SIZE];
101 EXPORT_SYMBOL(elf_platform);
102 
103 unsigned long phys_initrd_start __initdata = 0;
104 unsigned long phys_initrd_size __initdata = 0;
105 
106 static struct meminfo meminfo __initdata = { 0, };
107 static const char *cpu_name;
108 static const char *machine_name;
109 static char command_line[COMMAND_LINE_SIZE];
110 
111 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
112 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
113 #define ENDIANNESS ((char)endian_test.l)
114 
115 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
116 
117 /*
118  * Standard memory resources
119  */
120 static struct resource mem_res[] = {
121 	{
122 		.name = "Video RAM",
123 		.start = 0,
124 		.end = 0,
125 		.flags = IORESOURCE_MEM
126 	},
127 	{
128 		.name = "Kernel text",
129 		.start = 0,
130 		.end = 0,
131 		.flags = IORESOURCE_MEM
132 	},
133 	{
134 		.name = "Kernel data",
135 		.start = 0,
136 		.end = 0,
137 		.flags = IORESOURCE_MEM
138 	}
139 };
140 
141 #define video_ram   mem_res[0]
142 #define kernel_code mem_res[1]
143 #define kernel_data mem_res[2]
144 
145 static struct resource io_res[] = {
146 	{
147 		.name = "reserved",
148 		.start = 0x3bc,
149 		.end = 0x3be,
150 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
151 	},
152 	{
153 		.name = "reserved",
154 		.start = 0x378,
155 		.end = 0x37f,
156 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
157 	},
158 	{
159 		.name = "reserved",
160 		.start = 0x278,
161 		.end = 0x27f,
162 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
163 	}
164 };
165 
166 #define lp0 io_res[0]
167 #define lp1 io_res[1]
168 #define lp2 io_res[2]
169 
170 static const char *cache_types[16] = {
171 	"write-through",
172 	"write-back",
173 	"write-back",
174 	"undefined 3",
175 	"undefined 4",
176 	"undefined 5",
177 	"write-back",
178 	"write-back",
179 	"undefined 8",
180 	"undefined 9",
181 	"undefined 10",
182 	"undefined 11",
183 	"undefined 12",
184 	"undefined 13",
185 	"write-back",
186 	"undefined 15",
187 };
188 
189 static const char *cache_clean[16] = {
190 	"not required",
191 	"read-block",
192 	"cp15 c7 ops",
193 	"undefined 3",
194 	"undefined 4",
195 	"undefined 5",
196 	"cp15 c7 ops",
197 	"cp15 c7 ops",
198 	"undefined 8",
199 	"undefined 9",
200 	"undefined 10",
201 	"undefined 11",
202 	"undefined 12",
203 	"undefined 13",
204 	"cp15 c7 ops",
205 	"undefined 15",
206 };
207 
208 static const char *cache_lockdown[16] = {
209 	"not supported",
210 	"not supported",
211 	"not supported",
212 	"undefined 3",
213 	"undefined 4",
214 	"undefined 5",
215 	"format A",
216 	"format B",
217 	"undefined 8",
218 	"undefined 9",
219 	"undefined 10",
220 	"undefined 11",
221 	"undefined 12",
222 	"undefined 13",
223 	"format C",
224 	"undefined 15",
225 };
226 
227 static const char *proc_arch[] = {
228 	"undefined/unknown",
229 	"3",
230 	"4",
231 	"4T",
232 	"5",
233 	"5T",
234 	"5TE",
235 	"5TEJ",
236 	"6TEJ",
237 	"7",
238 	"?(11)",
239 	"?(12)",
240 	"?(13)",
241 	"?(14)",
242 	"?(15)",
243 	"?(16)",
244 	"?(17)",
245 };
246 
247 #define CACHE_TYPE(x)	(((x) >> 25) & 15)
248 #define CACHE_S(x)	((x) & (1 << 24))
249 #define CACHE_DSIZE(x)	(((x) >> 12) & 4095)	/* only if S=1 */
250 #define CACHE_ISIZE(x)	((x) & 4095)
251 
252 #define CACHE_SIZE(y)	(((y) >> 6) & 7)
253 #define CACHE_ASSOC(y)	(((y) >> 3) & 7)
254 #define CACHE_M(y)	((y) & (1 << 2))
255 #define CACHE_LINE(y)	((y) & 3)
256 
257 static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
258 {
259 	unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
260 
261 	printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
262 		cpu, prefix,
263 		mult << (8 + CACHE_SIZE(cache)),
264 		(mult << CACHE_ASSOC(cache)) >> 1,
265 		8 << CACHE_LINE(cache),
266 		1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
267 			CACHE_LINE(cache)));
268 }
269 
270 static void __init dump_cpu_info(int cpu)
271 {
272 	unsigned int info = read_cpuid(CPUID_CACHETYPE);
273 
274 	if (info != processor_id) {
275 		printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
276 		       cache_types[CACHE_TYPE(info)]);
277 		if (CACHE_S(info)) {
278 			dump_cache("I cache", cpu, CACHE_ISIZE(info));
279 			dump_cache("D cache", cpu, CACHE_DSIZE(info));
280 		} else {
281 			dump_cache("cache", cpu, CACHE_ISIZE(info));
282 		}
283 	}
284 
285 	if (arch_is_coherent())
286 		printk("Cache coherency enabled\n");
287 }
288 
289 int cpu_architecture(void)
290 {
291 	int cpu_arch;
292 
293 	if ((processor_id & 0x0008f000) == 0) {
294 		cpu_arch = CPU_ARCH_UNKNOWN;
295 	} else if ((processor_id & 0x0008f000) == 0x00007000) {
296 		cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
297 	} else if ((processor_id & 0x00080000) == 0x00000000) {
298 		cpu_arch = (processor_id >> 16) & 7;
299 		if (cpu_arch)
300 			cpu_arch += CPU_ARCH_ARMv3;
301 	} else {
302 		/* the revised CPUID */
303 		cpu_arch = ((processor_id >> 12) & 0xf) - 0xb + CPU_ARCH_ARMv6;
304 	}
305 
306 	return cpu_arch;
307 }
308 
309 /*
310  * These functions re-use the assembly code in head.S, which
311  * already provide the required functionality.
312  */
313 extern struct proc_info_list *lookup_processor_type(unsigned int);
314 extern struct machine_desc *lookup_machine_type(unsigned int);
315 
316 static void __init setup_processor(void)
317 {
318 	struct proc_info_list *list;
319 
320 	/*
321 	 * locate processor in the list of supported processor
322 	 * types.  The linker builds this table for us from the
323 	 * entries in arch/arm/mm/proc-*.S
324 	 */
325 	list = lookup_processor_type(processor_id);
326 	if (!list) {
327 		printk("CPU configuration botched (ID %08x), unable "
328 		       "to continue.\n", processor_id);
329 		while (1);
330 	}
331 
332 	cpu_name = list->cpu_name;
333 
334 #ifdef MULTI_CPU
335 	processor = *list->proc;
336 #endif
337 #ifdef MULTI_TLB
338 	cpu_tlb = *list->tlb;
339 #endif
340 #ifdef MULTI_USER
341 	cpu_user = *list->user;
342 #endif
343 #ifdef MULTI_CACHE
344 	cpu_cache = *list->cache;
345 #endif
346 
347 	printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
348 	       cpu_name, processor_id, (int)processor_id & 15,
349 	       proc_arch[cpu_architecture()], cr_alignment);
350 
351 	sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
352 	sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
353 	elf_hwcap = list->elf_hwcap;
354 #ifndef CONFIG_ARM_THUMB
355 	elf_hwcap &= ~HWCAP_THUMB;
356 #endif
357 #ifndef CONFIG_VFP
358 	elf_hwcap &= ~HWCAP_VFP;
359 #endif
360 
361 	cpu_proc_init();
362 }
363 
364 /*
365  * cpu_init - initialise one CPU.
366  *
367  * cpu_init dumps the cache information, initialises SMP specific
368  * information, and sets up the per-CPU stacks.
369  */
370 void cpu_init(void)
371 {
372 	unsigned int cpu = smp_processor_id();
373 	struct stack *stk = &stacks[cpu];
374 
375 	if (cpu >= NR_CPUS) {
376 		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
377 		BUG();
378 	}
379 
380 	if (system_state == SYSTEM_BOOTING)
381 		dump_cpu_info(cpu);
382 
383 	/*
384 	 * setup stacks for re-entrant exception handlers
385 	 */
386 	__asm__ (
387 	"msr	cpsr_c, %1\n\t"
388 	"add	sp, %0, %2\n\t"
389 	"msr	cpsr_c, %3\n\t"
390 	"add	sp, %0, %4\n\t"
391 	"msr	cpsr_c, %5\n\t"
392 	"add	sp, %0, %6\n\t"
393 	"msr	cpsr_c, %7"
394 	    :
395 	    : "r" (stk),
396 	      "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
397 	      "I" (offsetof(struct stack, irq[0])),
398 	      "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
399 	      "I" (offsetof(struct stack, abt[0])),
400 	      "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
401 	      "I" (offsetof(struct stack, und[0])),
402 	      "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
403 	    : "r14");
404 }
405 
406 static struct machine_desc * __init setup_machine(unsigned int nr)
407 {
408 	struct machine_desc *list;
409 
410 	/*
411 	 * locate machine in the list of supported machines.
412 	 */
413 	list = lookup_machine_type(nr);
414 	if (!list) {
415 		printk("Machine configuration botched (nr %d), unable "
416 		       "to continue.\n", nr);
417 		while (1);
418 	}
419 
420 	printk("Machine: %s\n", list->name);
421 
422 	return list;
423 }
424 
425 static void __init early_initrd(char **p)
426 {
427 	unsigned long start, size;
428 
429 	start = memparse(*p, p);
430 	if (**p == ',') {
431 		size = memparse((*p) + 1, p);
432 
433 		phys_initrd_start = start;
434 		phys_initrd_size = size;
435 	}
436 }
437 __early_param("initrd=", early_initrd);
438 
439 static void __init arm_add_memory(unsigned long start, unsigned long size)
440 {
441 	struct membank *bank;
442 
443 	/*
444 	 * Ensure that start/size are aligned to a page boundary.
445 	 * Size is appropriately rounded down, start is rounded up.
446 	 */
447 	size -= start & ~PAGE_MASK;
448 
449 	bank = &meminfo.bank[meminfo.nr_banks++];
450 
451 	bank->start = PAGE_ALIGN(start);
452 	bank->size  = size & PAGE_MASK;
453 	bank->node  = PHYS_TO_NID(start);
454 }
455 
456 /*
457  * Pick out the memory size.  We look for mem=size@start,
458  * where start and size are "size[KkMm]"
459  */
460 static void __init early_mem(char **p)
461 {
462 	static int usermem __initdata = 0;
463 	unsigned long size, start;
464 
465 	/*
466 	 * If the user specifies memory size, we
467 	 * blow away any automatically generated
468 	 * size.
469 	 */
470 	if (usermem == 0) {
471 		usermem = 1;
472 		meminfo.nr_banks = 0;
473 	}
474 
475 	start = PHYS_OFFSET;
476 	size  = memparse(*p, p);
477 	if (**p == '@')
478 		start = memparse(*p + 1, p);
479 
480 	arm_add_memory(start, size);
481 }
482 __early_param("mem=", early_mem);
483 
484 /*
485  * Initial parsing of the command line.
486  */
487 static void __init parse_cmdline(char **cmdline_p, char *from)
488 {
489 	char c = ' ', *to = command_line;
490 	int len = 0;
491 
492 	for (;;) {
493 		if (c == ' ') {
494 			extern struct early_params __early_begin, __early_end;
495 			struct early_params *p;
496 
497 			for (p = &__early_begin; p < &__early_end; p++) {
498 				int len = strlen(p->arg);
499 
500 				if (memcmp(from, p->arg, len) == 0) {
501 					if (to != command_line)
502 						to -= 1;
503 					from += len;
504 					p->fn(&from);
505 
506 					while (*from != ' ' && *from != '\0')
507 						from++;
508 					break;
509 				}
510 			}
511 		}
512 		c = *from++;
513 		if (!c)
514 			break;
515 		if (COMMAND_LINE_SIZE <= ++len)
516 			break;
517 		*to++ = c;
518 	}
519 	*to = '\0';
520 	*cmdline_p = command_line;
521 }
522 
523 static void __init
524 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
525 {
526 #ifdef CONFIG_BLK_DEV_RAM
527 	extern int rd_size, rd_image_start, rd_prompt, rd_doload;
528 
529 	rd_image_start = image_start;
530 	rd_prompt = prompt;
531 	rd_doload = doload;
532 
533 	if (rd_sz)
534 		rd_size = rd_sz;
535 #endif
536 }
537 
538 static void __init
539 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
540 {
541 	struct resource *res;
542 	int i;
543 
544 	kernel_code.start   = virt_to_phys(&_text);
545 	kernel_code.end     = virt_to_phys(&_etext - 1);
546 	kernel_data.start   = virt_to_phys(&__data_start);
547 	kernel_data.end     = virt_to_phys(&_end - 1);
548 
549 	for (i = 0; i < mi->nr_banks; i++) {
550 		unsigned long virt_start, virt_end;
551 
552 		if (mi->bank[i].size == 0)
553 			continue;
554 
555 		virt_start = __phys_to_virt(mi->bank[i].start);
556 		virt_end   = virt_start + mi->bank[i].size - 1;
557 
558 		res = alloc_bootmem_low(sizeof(*res));
559 		res->name  = "System RAM";
560 		res->start = __virt_to_phys(virt_start);
561 		res->end   = __virt_to_phys(virt_end);
562 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
563 
564 		request_resource(&iomem_resource, res);
565 
566 		if (kernel_code.start >= res->start &&
567 		    kernel_code.end <= res->end)
568 			request_resource(res, &kernel_code);
569 		if (kernel_data.start >= res->start &&
570 		    kernel_data.end <= res->end)
571 			request_resource(res, &kernel_data);
572 	}
573 
574 	if (mdesc->video_start) {
575 		video_ram.start = mdesc->video_start;
576 		video_ram.end   = mdesc->video_end;
577 		request_resource(&iomem_resource, &video_ram);
578 	}
579 
580 	/*
581 	 * Some machines don't have the possibility of ever
582 	 * possessing lp0, lp1 or lp2
583 	 */
584 	if (mdesc->reserve_lp0)
585 		request_resource(&ioport_resource, &lp0);
586 	if (mdesc->reserve_lp1)
587 		request_resource(&ioport_resource, &lp1);
588 	if (mdesc->reserve_lp2)
589 		request_resource(&ioport_resource, &lp2);
590 }
591 
592 /*
593  *  Tag parsing.
594  *
595  * This is the new way of passing data to the kernel at boot time.  Rather
596  * than passing a fixed inflexible structure to the kernel, we pass a list
597  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
598  * tag for the list to be recognised (to distinguish the tagged list from
599  * a param_struct).  The list is terminated with a zero-length tag (this tag
600  * is not parsed in any way).
601  */
602 static int __init parse_tag_core(const struct tag *tag)
603 {
604 	if (tag->hdr.size > 2) {
605 		if ((tag->u.core.flags & 1) == 0)
606 			root_mountflags &= ~MS_RDONLY;
607 		ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
608 	}
609 	return 0;
610 }
611 
612 __tagtable(ATAG_CORE, parse_tag_core);
613 
614 static int __init parse_tag_mem32(const struct tag *tag)
615 {
616 	if (meminfo.nr_banks >= NR_BANKS) {
617 		printk(KERN_WARNING
618 		       "Ignoring memory bank 0x%08x size %dKB\n",
619 			tag->u.mem.start, tag->u.mem.size / 1024);
620 		return -EINVAL;
621 	}
622 	arm_add_memory(tag->u.mem.start, tag->u.mem.size);
623 	return 0;
624 }
625 
626 __tagtable(ATAG_MEM, parse_tag_mem32);
627 
628 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
629 struct screen_info screen_info = {
630  .orig_video_lines	= 30,
631  .orig_video_cols	= 80,
632  .orig_video_mode	= 0,
633  .orig_video_ega_bx	= 0,
634  .orig_video_isVGA	= 1,
635  .orig_video_points	= 8
636 };
637 
638 static int __init parse_tag_videotext(const struct tag *tag)
639 {
640 	screen_info.orig_x            = tag->u.videotext.x;
641 	screen_info.orig_y            = tag->u.videotext.y;
642 	screen_info.orig_video_page   = tag->u.videotext.video_page;
643 	screen_info.orig_video_mode   = tag->u.videotext.video_mode;
644 	screen_info.orig_video_cols   = tag->u.videotext.video_cols;
645 	screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
646 	screen_info.orig_video_lines  = tag->u.videotext.video_lines;
647 	screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
648 	screen_info.orig_video_points = tag->u.videotext.video_points;
649 	return 0;
650 }
651 
652 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
653 #endif
654 
655 static int __init parse_tag_ramdisk(const struct tag *tag)
656 {
657 	setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
658 		      (tag->u.ramdisk.flags & 2) == 0,
659 		      tag->u.ramdisk.start, tag->u.ramdisk.size);
660 	return 0;
661 }
662 
663 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
664 
665 static int __init parse_tag_initrd(const struct tag *tag)
666 {
667 	printk(KERN_WARNING "ATAG_INITRD is deprecated; "
668 		"please update your bootloader.\n");
669 	phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
670 	phys_initrd_size = tag->u.initrd.size;
671 	return 0;
672 }
673 
674 __tagtable(ATAG_INITRD, parse_tag_initrd);
675 
676 static int __init parse_tag_initrd2(const struct tag *tag)
677 {
678 	phys_initrd_start = tag->u.initrd.start;
679 	phys_initrd_size = tag->u.initrd.size;
680 	return 0;
681 }
682 
683 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
684 
685 static int __init parse_tag_serialnr(const struct tag *tag)
686 {
687 	system_serial_low = tag->u.serialnr.low;
688 	system_serial_high = tag->u.serialnr.high;
689 	return 0;
690 }
691 
692 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
693 
694 static int __init parse_tag_revision(const struct tag *tag)
695 {
696 	system_rev = tag->u.revision.rev;
697 	return 0;
698 }
699 
700 __tagtable(ATAG_REVISION, parse_tag_revision);
701 
702 static int __init parse_tag_cmdline(const struct tag *tag)
703 {
704 	strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
705 	return 0;
706 }
707 
708 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
709 
710 /*
711  * Scan the tag table for this tag, and call its parse function.
712  * The tag table is built by the linker from all the __tagtable
713  * declarations.
714  */
715 static int __init parse_tag(const struct tag *tag)
716 {
717 	extern struct tagtable __tagtable_begin, __tagtable_end;
718 	struct tagtable *t;
719 
720 	for (t = &__tagtable_begin; t < &__tagtable_end; t++)
721 		if (tag->hdr.tag == t->tag) {
722 			t->parse(tag);
723 			break;
724 		}
725 
726 	return t < &__tagtable_end;
727 }
728 
729 /*
730  * Parse all tags in the list, checking both the global and architecture
731  * specific tag tables.
732  */
733 static void __init parse_tags(const struct tag *t)
734 {
735 	for (; t->hdr.size; t = tag_next(t))
736 		if (!parse_tag(t))
737 			printk(KERN_WARNING
738 				"Ignoring unrecognised tag 0x%08x\n",
739 				t->hdr.tag);
740 }
741 
742 /*
743  * This holds our defaults.
744  */
745 static struct init_tags {
746 	struct tag_header hdr1;
747 	struct tag_core   core;
748 	struct tag_header hdr2;
749 	struct tag_mem32  mem;
750 	struct tag_header hdr3;
751 } init_tags __initdata = {
752 	{ tag_size(tag_core), ATAG_CORE },
753 	{ 1, PAGE_SIZE, 0xff },
754 	{ tag_size(tag_mem32), ATAG_MEM },
755 	{ MEM_SIZE, PHYS_OFFSET },
756 	{ 0, ATAG_NONE }
757 };
758 
759 static void (*init_machine)(void) __initdata;
760 
761 static int __init customize_machine(void)
762 {
763 	/* customizes platform devices, or adds new ones */
764 	if (init_machine)
765 		init_machine();
766 	return 0;
767 }
768 arch_initcall(customize_machine);
769 
770 void __init setup_arch(char **cmdline_p)
771 {
772 	struct tag *tags = (struct tag *)&init_tags;
773 	struct machine_desc *mdesc;
774 	char *from = default_command_line;
775 
776 	setup_processor();
777 	mdesc = setup_machine(machine_arch_type);
778 	machine_name = mdesc->name;
779 
780 	if (mdesc->soft_reboot)
781 		reboot_setup("s");
782 
783 	if (mdesc->boot_params)
784 		tags = phys_to_virt(mdesc->boot_params);
785 
786 	/*
787 	 * If we have the old style parameters, convert them to
788 	 * a tag list.
789 	 */
790 	if (tags->hdr.tag != ATAG_CORE)
791 		convert_to_tag_list(tags);
792 	if (tags->hdr.tag != ATAG_CORE)
793 		tags = (struct tag *)&init_tags;
794 
795 	if (mdesc->fixup)
796 		mdesc->fixup(mdesc, tags, &from, &meminfo);
797 
798 	if (tags->hdr.tag == ATAG_CORE) {
799 		if (meminfo.nr_banks != 0)
800 			squash_mem_tags(tags);
801 		parse_tags(tags);
802 	}
803 
804 	init_mm.start_code = (unsigned long) &_text;
805 	init_mm.end_code   = (unsigned long) &_etext;
806 	init_mm.end_data   = (unsigned long) &_edata;
807 	init_mm.brk	   = (unsigned long) &_end;
808 
809 	memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
810 	saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
811 	parse_cmdline(cmdline_p, from);
812 	paging_init(&meminfo, mdesc);
813 	request_standard_resources(&meminfo, mdesc);
814 
815 #ifdef CONFIG_SMP
816 	smp_init_cpus();
817 #endif
818 
819 	cpu_init();
820 
821 	/*
822 	 * Set up various architecture-specific pointers
823 	 */
824 	init_arch_irq = mdesc->init_irq;
825 	system_timer = mdesc->timer;
826 	init_machine = mdesc->init_machine;
827 
828 #ifdef CONFIG_VT
829 #if defined(CONFIG_VGA_CONSOLE)
830 	conswitchp = &vga_con;
831 #elif defined(CONFIG_DUMMY_CONSOLE)
832 	conswitchp = &dummy_con;
833 #endif
834 #endif
835 }
836 
837 
838 static int __init topology_init(void)
839 {
840 	int cpu;
841 
842 	for_each_possible_cpu(cpu)
843 		register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
844 
845 	return 0;
846 }
847 
848 subsys_initcall(topology_init);
849 
850 static const char *hwcap_str[] = {
851 	"swp",
852 	"half",
853 	"thumb",
854 	"26bit",
855 	"fastmult",
856 	"fpa",
857 	"vfp",
858 	"edsp",
859 	"java",
860 	"iwmmxt",
861 	NULL
862 };
863 
864 static void
865 c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
866 {
867 	unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
868 
869 	seq_printf(m, "%s size\t\t: %d\n"
870 		      "%s assoc\t\t: %d\n"
871 		      "%s line length\t: %d\n"
872 		      "%s sets\t\t: %d\n",
873 		type, mult << (8 + CACHE_SIZE(cache)),
874 		type, (mult << CACHE_ASSOC(cache)) >> 1,
875 		type, 8 << CACHE_LINE(cache),
876 		type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
877 			    CACHE_LINE(cache)));
878 }
879 
880 static int c_show(struct seq_file *m, void *v)
881 {
882 	int i;
883 
884 	seq_printf(m, "Processor\t: %s rev %d (%s)\n",
885 		   cpu_name, (int)processor_id & 15, elf_platform);
886 
887 #if defined(CONFIG_SMP)
888 	for_each_online_cpu(i) {
889 		/*
890 		 * glibc reads /proc/cpuinfo to determine the number of
891 		 * online processors, looking for lines beginning with
892 		 * "processor".  Give glibc what it expects.
893 		 */
894 		seq_printf(m, "processor\t: %d\n", i);
895 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
896 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
897 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
898 	}
899 #else /* CONFIG_SMP */
900 	seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
901 		   loops_per_jiffy / (500000/HZ),
902 		   (loops_per_jiffy / (5000/HZ)) % 100);
903 #endif
904 
905 	/* dump out the processor features */
906 	seq_puts(m, "Features\t: ");
907 
908 	for (i = 0; hwcap_str[i]; i++)
909 		if (elf_hwcap & (1 << i))
910 			seq_printf(m, "%s ", hwcap_str[i]);
911 
912 	seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
913 	seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
914 
915 	if ((processor_id & 0x0008f000) == 0x00000000) {
916 		/* pre-ARM7 */
917 		seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4);
918 	} else {
919 		if ((processor_id & 0x0008f000) == 0x00007000) {
920 			/* ARM7 */
921 			seq_printf(m, "CPU variant\t: 0x%02x\n",
922 				   (processor_id >> 16) & 127);
923 		} else {
924 			/* post-ARM7 */
925 			seq_printf(m, "CPU variant\t: 0x%x\n",
926 				   (processor_id >> 20) & 15);
927 		}
928 		seq_printf(m, "CPU part\t: 0x%03x\n",
929 			   (processor_id >> 4) & 0xfff);
930 	}
931 	seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
932 
933 	{
934 		unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
935 		if (cache_info != processor_id) {
936 			seq_printf(m, "Cache type\t: %s\n"
937 				      "Cache clean\t: %s\n"
938 				      "Cache lockdown\t: %s\n"
939 				      "Cache format\t: %s\n",
940 				   cache_types[CACHE_TYPE(cache_info)],
941 				   cache_clean[CACHE_TYPE(cache_info)],
942 				   cache_lockdown[CACHE_TYPE(cache_info)],
943 				   CACHE_S(cache_info) ? "Harvard" : "Unified");
944 
945 			if (CACHE_S(cache_info)) {
946 				c_show_cache(m, "I", CACHE_ISIZE(cache_info));
947 				c_show_cache(m, "D", CACHE_DSIZE(cache_info));
948 			} else {
949 				c_show_cache(m, "Cache", CACHE_ISIZE(cache_info));
950 			}
951 		}
952 	}
953 
954 	seq_puts(m, "\n");
955 
956 	seq_printf(m, "Hardware\t: %s\n", machine_name);
957 	seq_printf(m, "Revision\t: %04x\n", system_rev);
958 	seq_printf(m, "Serial\t\t: %08x%08x\n",
959 		   system_serial_high, system_serial_low);
960 
961 	return 0;
962 }
963 
964 static void *c_start(struct seq_file *m, loff_t *pos)
965 {
966 	return *pos < 1 ? (void *)1 : NULL;
967 }
968 
969 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
970 {
971 	++*pos;
972 	return NULL;
973 }
974 
975 static void c_stop(struct seq_file *m, void *v)
976 {
977 }
978 
979 struct seq_operations cpuinfo_op = {
980 	.start	= c_start,
981 	.next	= c_next,
982 	.stop	= c_stop,
983 	.show	= c_show
984 };
985