xref: /linux/arch/arm/kernel/setup.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/root_dev.h>
23 #include <linux/cpu.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 
27 #include <asm/cpu.h>
28 #include <asm/elf.h>
29 #include <asm/procinfo.h>
30 #include <asm/setup.h>
31 #include <asm/mach-types.h>
32 #include <asm/cacheflush.h>
33 #include <asm/tlbflush.h>
34 
35 #include <asm/mach/arch.h>
36 #include <asm/mach/irq.h>
37 #include <asm/mach/time.h>
38 
39 #include "compat.h"
40 
41 #ifndef MEM_SIZE
42 #define MEM_SIZE	(16*1024*1024)
43 #endif
44 
45 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
46 char fpe_type[8];
47 
48 static int __init fpe_setup(char *line)
49 {
50 	memcpy(fpe_type, line, 8);
51 	return 1;
52 }
53 
54 __setup("fpe=", fpe_setup);
55 #endif
56 
57 extern void paging_init(struct meminfo *, struct machine_desc *desc);
58 extern void reboot_setup(char *str);
59 extern int root_mountflags;
60 extern void _stext, _text, _etext, __data_start, _edata, _end;
61 
62 unsigned int processor_id;
63 unsigned int __machine_arch_type;
64 EXPORT_SYMBOL(__machine_arch_type);
65 
66 unsigned int system_rev;
67 EXPORT_SYMBOL(system_rev);
68 
69 unsigned int system_serial_low;
70 EXPORT_SYMBOL(system_serial_low);
71 
72 unsigned int system_serial_high;
73 EXPORT_SYMBOL(system_serial_high);
74 
75 unsigned int elf_hwcap;
76 EXPORT_SYMBOL(elf_hwcap);
77 
78 
79 #ifdef MULTI_CPU
80 struct processor processor;
81 #endif
82 #ifdef MULTI_TLB
83 struct cpu_tlb_fns cpu_tlb;
84 #endif
85 #ifdef MULTI_USER
86 struct cpu_user_fns cpu_user;
87 #endif
88 #ifdef MULTI_CACHE
89 struct cpu_cache_fns cpu_cache;
90 #endif
91 
92 struct stack {
93 	u32 irq[3];
94 	u32 abt[3];
95 	u32 und[3];
96 } ____cacheline_aligned;
97 
98 static struct stack stacks[NR_CPUS];
99 
100 char elf_platform[ELF_PLATFORM_SIZE];
101 EXPORT_SYMBOL(elf_platform);
102 
103 unsigned long phys_initrd_start __initdata = 0;
104 unsigned long phys_initrd_size __initdata = 0;
105 
106 static struct meminfo meminfo __initdata = { 0, };
107 static const char *cpu_name;
108 static const char *machine_name;
109 static char command_line[COMMAND_LINE_SIZE];
110 
111 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
112 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
113 #define ENDIANNESS ((char)endian_test.l)
114 
115 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
116 
117 /*
118  * Standard memory resources
119  */
120 static struct resource mem_res[] = {
121 	{
122 		.name = "Video RAM",
123 		.start = 0,
124 		.end = 0,
125 		.flags = IORESOURCE_MEM
126 	},
127 	{
128 		.name = "Kernel text",
129 		.start = 0,
130 		.end = 0,
131 		.flags = IORESOURCE_MEM
132 	},
133 	{
134 		.name = "Kernel data",
135 		.start = 0,
136 		.end = 0,
137 		.flags = IORESOURCE_MEM
138 	}
139 };
140 
141 #define video_ram   mem_res[0]
142 #define kernel_code mem_res[1]
143 #define kernel_data mem_res[2]
144 
145 static struct resource io_res[] = {
146 	{
147 		.name = "reserved",
148 		.start = 0x3bc,
149 		.end = 0x3be,
150 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
151 	},
152 	{
153 		.name = "reserved",
154 		.start = 0x378,
155 		.end = 0x37f,
156 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
157 	},
158 	{
159 		.name = "reserved",
160 		.start = 0x278,
161 		.end = 0x27f,
162 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
163 	}
164 };
165 
166 #define lp0 io_res[0]
167 #define lp1 io_res[1]
168 #define lp2 io_res[2]
169 
170 static const char *cache_types[16] = {
171 	"write-through",
172 	"write-back",
173 	"write-back",
174 	"undefined 3",
175 	"undefined 4",
176 	"undefined 5",
177 	"write-back",
178 	"write-back",
179 	"undefined 8",
180 	"undefined 9",
181 	"undefined 10",
182 	"undefined 11",
183 	"undefined 12",
184 	"undefined 13",
185 	"write-back",
186 	"undefined 15",
187 };
188 
189 static const char *cache_clean[16] = {
190 	"not required",
191 	"read-block",
192 	"cp15 c7 ops",
193 	"undefined 3",
194 	"undefined 4",
195 	"undefined 5",
196 	"cp15 c7 ops",
197 	"cp15 c7 ops",
198 	"undefined 8",
199 	"undefined 9",
200 	"undefined 10",
201 	"undefined 11",
202 	"undefined 12",
203 	"undefined 13",
204 	"cp15 c7 ops",
205 	"undefined 15",
206 };
207 
208 static const char *cache_lockdown[16] = {
209 	"not supported",
210 	"not supported",
211 	"not supported",
212 	"undefined 3",
213 	"undefined 4",
214 	"undefined 5",
215 	"format A",
216 	"format B",
217 	"undefined 8",
218 	"undefined 9",
219 	"undefined 10",
220 	"undefined 11",
221 	"undefined 12",
222 	"undefined 13",
223 	"format C",
224 	"undefined 15",
225 };
226 
227 static const char *proc_arch[] = {
228 	"undefined/unknown",
229 	"3",
230 	"4",
231 	"4T",
232 	"5",
233 	"5T",
234 	"5TE",
235 	"5TEJ",
236 	"6TEJ",
237 	"7",
238 	"?(11)",
239 	"?(12)",
240 	"?(13)",
241 	"?(14)",
242 	"?(15)",
243 	"?(16)",
244 	"?(17)",
245 };
246 
247 #define CACHE_TYPE(x)	(((x) >> 25) & 15)
248 #define CACHE_S(x)	((x) & (1 << 24))
249 #define CACHE_DSIZE(x)	(((x) >> 12) & 4095)	/* only if S=1 */
250 #define CACHE_ISIZE(x)	((x) & 4095)
251 
252 #define CACHE_SIZE(y)	(((y) >> 6) & 7)
253 #define CACHE_ASSOC(y)	(((y) >> 3) & 7)
254 #define CACHE_M(y)	((y) & (1 << 2))
255 #define CACHE_LINE(y)	((y) & 3)
256 
257 static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
258 {
259 	unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
260 
261 	printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
262 		cpu, prefix,
263 		mult << (8 + CACHE_SIZE(cache)),
264 		(mult << CACHE_ASSOC(cache)) >> 1,
265 		8 << CACHE_LINE(cache),
266 		1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
267 			CACHE_LINE(cache)));
268 }
269 
270 static void __init dump_cpu_info(int cpu)
271 {
272 	unsigned int info = read_cpuid(CPUID_CACHETYPE);
273 
274 	if (info != processor_id) {
275 		printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
276 		       cache_types[CACHE_TYPE(info)]);
277 		if (CACHE_S(info)) {
278 			dump_cache("I cache", cpu, CACHE_ISIZE(info));
279 			dump_cache("D cache", cpu, CACHE_DSIZE(info));
280 		} else {
281 			dump_cache("cache", cpu, CACHE_ISIZE(info));
282 		}
283 	}
284 
285 	if (arch_is_coherent())
286 		printk("Cache coherency enabled\n");
287 }
288 
289 int cpu_architecture(void)
290 {
291 	int cpu_arch;
292 
293 	if ((processor_id & 0x0008f000) == 0) {
294 		cpu_arch = CPU_ARCH_UNKNOWN;
295 	} else if ((processor_id & 0x0008f000) == 0x00007000) {
296 		cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
297 	} else if ((processor_id & 0x00080000) == 0x00000000) {
298 		cpu_arch = (processor_id >> 16) & 7;
299 		if (cpu_arch)
300 			cpu_arch += CPU_ARCH_ARMv3;
301 	} else {
302 		/* the revised CPUID */
303 		cpu_arch = ((processor_id >> 12) & 0xf) - 0xb + CPU_ARCH_ARMv6;
304 	}
305 
306 	return cpu_arch;
307 }
308 
309 /*
310  * These functions re-use the assembly code in head.S, which
311  * already provide the required functionality.
312  */
313 extern struct proc_info_list *lookup_processor_type(unsigned int);
314 extern struct machine_desc *lookup_machine_type(unsigned int);
315 
316 static void __init setup_processor(void)
317 {
318 	struct proc_info_list *list;
319 
320 	/*
321 	 * locate processor in the list of supported processor
322 	 * types.  The linker builds this table for us from the
323 	 * entries in arch/arm/mm/proc-*.S
324 	 */
325 	list = lookup_processor_type(processor_id);
326 	if (!list) {
327 		printk("CPU configuration botched (ID %08x), unable "
328 		       "to continue.\n", processor_id);
329 		while (1);
330 	}
331 
332 	cpu_name = list->cpu_name;
333 
334 #ifdef MULTI_CPU
335 	processor = *list->proc;
336 #endif
337 #ifdef MULTI_TLB
338 	cpu_tlb = *list->tlb;
339 #endif
340 #ifdef MULTI_USER
341 	cpu_user = *list->user;
342 #endif
343 #ifdef MULTI_CACHE
344 	cpu_cache = *list->cache;
345 #endif
346 
347 	printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
348 	       cpu_name, processor_id, (int)processor_id & 15,
349 	       proc_arch[cpu_architecture()], cr_alignment);
350 
351 	sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS);
352 	sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
353 	elf_hwcap = list->elf_hwcap;
354 #ifndef CONFIG_ARM_THUMB
355 	elf_hwcap &= ~HWCAP_THUMB;
356 #endif
357 #ifndef CONFIG_VFP
358 	elf_hwcap &= ~HWCAP_VFP;
359 #endif
360 
361 	cpu_proc_init();
362 }
363 
364 /*
365  * cpu_init - initialise one CPU.
366  *
367  * cpu_init dumps the cache information, initialises SMP specific
368  * information, and sets up the per-CPU stacks.
369  */
370 void cpu_init(void)
371 {
372 	unsigned int cpu = smp_processor_id();
373 	struct stack *stk = &stacks[cpu];
374 
375 	if (cpu >= NR_CPUS) {
376 		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
377 		BUG();
378 	}
379 
380 	if (system_state == SYSTEM_BOOTING)
381 		dump_cpu_info(cpu);
382 
383 	/*
384 	 * setup stacks for re-entrant exception handlers
385 	 */
386 	__asm__ (
387 	"msr	cpsr_c, %1\n\t"
388 	"add	sp, %0, %2\n\t"
389 	"msr	cpsr_c, %3\n\t"
390 	"add	sp, %0, %4\n\t"
391 	"msr	cpsr_c, %5\n\t"
392 	"add	sp, %0, %6\n\t"
393 	"msr	cpsr_c, %7"
394 	    :
395 	    : "r" (stk),
396 	      "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
397 	      "I" (offsetof(struct stack, irq[0])),
398 	      "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
399 	      "I" (offsetof(struct stack, abt[0])),
400 	      "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
401 	      "I" (offsetof(struct stack, und[0])),
402 	      "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
403 	    : "r14");
404 }
405 
406 static struct machine_desc * __init setup_machine(unsigned int nr)
407 {
408 	struct machine_desc *list;
409 
410 	/*
411 	 * locate machine in the list of supported machines.
412 	 */
413 	list = lookup_machine_type(nr);
414 	if (!list) {
415 		printk("Machine configuration botched (nr %d), unable "
416 		       "to continue.\n", nr);
417 		while (1);
418 	}
419 
420 	printk("Machine: %s\n", list->name);
421 
422 	return list;
423 }
424 
425 static void __init early_initrd(char **p)
426 {
427 	unsigned long start, size;
428 
429 	start = memparse(*p, p);
430 	if (**p == ',') {
431 		size = memparse((*p) + 1, p);
432 
433 		phys_initrd_start = start;
434 		phys_initrd_size = size;
435 	}
436 }
437 __early_param("initrd=", early_initrd);
438 
439 static void __init arm_add_memory(unsigned long start, unsigned long size)
440 {
441 	/*
442 	 * Ensure that start/size are aligned to a page boundary.
443 	 * Size is appropriately rounded down, start is rounded up.
444 	 */
445 	size -= start & ~PAGE_MASK;
446 
447 	meminfo.bank[meminfo.nr_banks].start = PAGE_ALIGN(start);
448 	meminfo.bank[meminfo.nr_banks].size  = size & PAGE_MASK;
449 	meminfo.bank[meminfo.nr_banks].node  = PHYS_TO_NID(start);
450 	meminfo.nr_banks += 1;
451 }
452 
453 /*
454  * Pick out the memory size.  We look for mem=size@start,
455  * where start and size are "size[KkMm]"
456  */
457 static void __init early_mem(char **p)
458 {
459 	static int usermem __initdata = 0;
460 	unsigned long size, start;
461 
462 	/*
463 	 * If the user specifies memory size, we
464 	 * blow away any automatically generated
465 	 * size.
466 	 */
467 	if (usermem == 0) {
468 		usermem = 1;
469 		meminfo.nr_banks = 0;
470 	}
471 
472 	start = PHYS_OFFSET;
473 	size  = memparse(*p, p);
474 	if (**p == '@')
475 		start = memparse(*p + 1, p);
476 
477 	arm_add_memory(start, size);
478 }
479 __early_param("mem=", early_mem);
480 
481 /*
482  * Initial parsing of the command line.
483  */
484 static void __init parse_cmdline(char **cmdline_p, char *from)
485 {
486 	char c = ' ', *to = command_line;
487 	int len = 0;
488 
489 	for (;;) {
490 		if (c == ' ') {
491 			extern struct early_params __early_begin, __early_end;
492 			struct early_params *p;
493 
494 			for (p = &__early_begin; p < &__early_end; p++) {
495 				int len = strlen(p->arg);
496 
497 				if (memcmp(from, p->arg, len) == 0) {
498 					if (to != command_line)
499 						to -= 1;
500 					from += len;
501 					p->fn(&from);
502 
503 					while (*from != ' ' && *from != '\0')
504 						from++;
505 					break;
506 				}
507 			}
508 		}
509 		c = *from++;
510 		if (!c)
511 			break;
512 		if (COMMAND_LINE_SIZE <= ++len)
513 			break;
514 		*to++ = c;
515 	}
516 	*to = '\0';
517 	*cmdline_p = command_line;
518 }
519 
520 static void __init
521 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
522 {
523 #ifdef CONFIG_BLK_DEV_RAM
524 	extern int rd_size, rd_image_start, rd_prompt, rd_doload;
525 
526 	rd_image_start = image_start;
527 	rd_prompt = prompt;
528 	rd_doload = doload;
529 
530 	if (rd_sz)
531 		rd_size = rd_sz;
532 #endif
533 }
534 
535 static void __init
536 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
537 {
538 	struct resource *res;
539 	int i;
540 
541 	kernel_code.start   = virt_to_phys(&_text);
542 	kernel_code.end     = virt_to_phys(&_etext - 1);
543 	kernel_data.start   = virt_to_phys(&__data_start);
544 	kernel_data.end     = virt_to_phys(&_end - 1);
545 
546 	for (i = 0; i < mi->nr_banks; i++) {
547 		unsigned long virt_start, virt_end;
548 
549 		if (mi->bank[i].size == 0)
550 			continue;
551 
552 		virt_start = __phys_to_virt(mi->bank[i].start);
553 		virt_end   = virt_start + mi->bank[i].size - 1;
554 
555 		res = alloc_bootmem_low(sizeof(*res));
556 		res->name  = "System RAM";
557 		res->start = __virt_to_phys(virt_start);
558 		res->end   = __virt_to_phys(virt_end);
559 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
560 
561 		request_resource(&iomem_resource, res);
562 
563 		if (kernel_code.start >= res->start &&
564 		    kernel_code.end <= res->end)
565 			request_resource(res, &kernel_code);
566 		if (kernel_data.start >= res->start &&
567 		    kernel_data.end <= res->end)
568 			request_resource(res, &kernel_data);
569 	}
570 
571 	if (mdesc->video_start) {
572 		video_ram.start = mdesc->video_start;
573 		video_ram.end   = mdesc->video_end;
574 		request_resource(&iomem_resource, &video_ram);
575 	}
576 
577 	/*
578 	 * Some machines don't have the possibility of ever
579 	 * possessing lp0, lp1 or lp2
580 	 */
581 	if (mdesc->reserve_lp0)
582 		request_resource(&ioport_resource, &lp0);
583 	if (mdesc->reserve_lp1)
584 		request_resource(&ioport_resource, &lp1);
585 	if (mdesc->reserve_lp2)
586 		request_resource(&ioport_resource, &lp2);
587 }
588 
589 /*
590  *  Tag parsing.
591  *
592  * This is the new way of passing data to the kernel at boot time.  Rather
593  * than passing a fixed inflexible structure to the kernel, we pass a list
594  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
595  * tag for the list to be recognised (to distinguish the tagged list from
596  * a param_struct).  The list is terminated with a zero-length tag (this tag
597  * is not parsed in any way).
598  */
599 static int __init parse_tag_core(const struct tag *tag)
600 {
601 	if (tag->hdr.size > 2) {
602 		if ((tag->u.core.flags & 1) == 0)
603 			root_mountflags &= ~MS_RDONLY;
604 		ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
605 	}
606 	return 0;
607 }
608 
609 __tagtable(ATAG_CORE, parse_tag_core);
610 
611 static int __init parse_tag_mem32(const struct tag *tag)
612 {
613 	if (meminfo.nr_banks >= NR_BANKS) {
614 		printk(KERN_WARNING
615 		       "Ignoring memory bank 0x%08x size %dKB\n",
616 			tag->u.mem.start, tag->u.mem.size / 1024);
617 		return -EINVAL;
618 	}
619 	arm_add_memory(tag->u.mem.start, tag->u.mem.size);
620 	return 0;
621 }
622 
623 __tagtable(ATAG_MEM, parse_tag_mem32);
624 
625 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
626 struct screen_info screen_info = {
627  .orig_video_lines	= 30,
628  .orig_video_cols	= 80,
629  .orig_video_mode	= 0,
630  .orig_video_ega_bx	= 0,
631  .orig_video_isVGA	= 1,
632  .orig_video_points	= 8
633 };
634 
635 static int __init parse_tag_videotext(const struct tag *tag)
636 {
637 	screen_info.orig_x            = tag->u.videotext.x;
638 	screen_info.orig_y            = tag->u.videotext.y;
639 	screen_info.orig_video_page   = tag->u.videotext.video_page;
640 	screen_info.orig_video_mode   = tag->u.videotext.video_mode;
641 	screen_info.orig_video_cols   = tag->u.videotext.video_cols;
642 	screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
643 	screen_info.orig_video_lines  = tag->u.videotext.video_lines;
644 	screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
645 	screen_info.orig_video_points = tag->u.videotext.video_points;
646 	return 0;
647 }
648 
649 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
650 #endif
651 
652 static int __init parse_tag_ramdisk(const struct tag *tag)
653 {
654 	setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
655 		      (tag->u.ramdisk.flags & 2) == 0,
656 		      tag->u.ramdisk.start, tag->u.ramdisk.size);
657 	return 0;
658 }
659 
660 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
661 
662 static int __init parse_tag_initrd(const struct tag *tag)
663 {
664 	printk(KERN_WARNING "ATAG_INITRD is deprecated; "
665 		"please update your bootloader.\n");
666 	phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
667 	phys_initrd_size = tag->u.initrd.size;
668 	return 0;
669 }
670 
671 __tagtable(ATAG_INITRD, parse_tag_initrd);
672 
673 static int __init parse_tag_initrd2(const struct tag *tag)
674 {
675 	phys_initrd_start = tag->u.initrd.start;
676 	phys_initrd_size = tag->u.initrd.size;
677 	return 0;
678 }
679 
680 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
681 
682 static int __init parse_tag_serialnr(const struct tag *tag)
683 {
684 	system_serial_low = tag->u.serialnr.low;
685 	system_serial_high = tag->u.serialnr.high;
686 	return 0;
687 }
688 
689 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
690 
691 static int __init parse_tag_revision(const struct tag *tag)
692 {
693 	system_rev = tag->u.revision.rev;
694 	return 0;
695 }
696 
697 __tagtable(ATAG_REVISION, parse_tag_revision);
698 
699 static int __init parse_tag_cmdline(const struct tag *tag)
700 {
701 	strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
702 	return 0;
703 }
704 
705 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
706 
707 /*
708  * Scan the tag table for this tag, and call its parse function.
709  * The tag table is built by the linker from all the __tagtable
710  * declarations.
711  */
712 static int __init parse_tag(const struct tag *tag)
713 {
714 	extern struct tagtable __tagtable_begin, __tagtable_end;
715 	struct tagtable *t;
716 
717 	for (t = &__tagtable_begin; t < &__tagtable_end; t++)
718 		if (tag->hdr.tag == t->tag) {
719 			t->parse(tag);
720 			break;
721 		}
722 
723 	return t < &__tagtable_end;
724 }
725 
726 /*
727  * Parse all tags in the list, checking both the global and architecture
728  * specific tag tables.
729  */
730 static void __init parse_tags(const struct tag *t)
731 {
732 	for (; t->hdr.size; t = tag_next(t))
733 		if (!parse_tag(t))
734 			printk(KERN_WARNING
735 				"Ignoring unrecognised tag 0x%08x\n",
736 				t->hdr.tag);
737 }
738 
739 /*
740  * This holds our defaults.
741  */
742 static struct init_tags {
743 	struct tag_header hdr1;
744 	struct tag_core   core;
745 	struct tag_header hdr2;
746 	struct tag_mem32  mem;
747 	struct tag_header hdr3;
748 } init_tags __initdata = {
749 	{ tag_size(tag_core), ATAG_CORE },
750 	{ 1, PAGE_SIZE, 0xff },
751 	{ tag_size(tag_mem32), ATAG_MEM },
752 	{ MEM_SIZE, PHYS_OFFSET },
753 	{ 0, ATAG_NONE }
754 };
755 
756 static void (*init_machine)(void) __initdata;
757 
758 static int __init customize_machine(void)
759 {
760 	/* customizes platform devices, or adds new ones */
761 	if (init_machine)
762 		init_machine();
763 	return 0;
764 }
765 arch_initcall(customize_machine);
766 
767 void __init setup_arch(char **cmdline_p)
768 {
769 	struct tag *tags = (struct tag *)&init_tags;
770 	struct machine_desc *mdesc;
771 	char *from = default_command_line;
772 
773 	setup_processor();
774 	mdesc = setup_machine(machine_arch_type);
775 	machine_name = mdesc->name;
776 
777 	if (mdesc->soft_reboot)
778 		reboot_setup("s");
779 
780 	if (mdesc->boot_params)
781 		tags = phys_to_virt(mdesc->boot_params);
782 
783 	/*
784 	 * If we have the old style parameters, convert them to
785 	 * a tag list.
786 	 */
787 	if (tags->hdr.tag != ATAG_CORE)
788 		convert_to_tag_list(tags);
789 	if (tags->hdr.tag != ATAG_CORE)
790 		tags = (struct tag *)&init_tags;
791 
792 	if (mdesc->fixup)
793 		mdesc->fixup(mdesc, tags, &from, &meminfo);
794 
795 	if (tags->hdr.tag == ATAG_CORE) {
796 		if (meminfo.nr_banks != 0)
797 			squash_mem_tags(tags);
798 		parse_tags(tags);
799 	}
800 
801 	init_mm.start_code = (unsigned long) &_text;
802 	init_mm.end_code   = (unsigned long) &_etext;
803 	init_mm.end_data   = (unsigned long) &_edata;
804 	init_mm.brk	   = (unsigned long) &_end;
805 
806 	memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
807 	saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
808 	parse_cmdline(cmdline_p, from);
809 	paging_init(&meminfo, mdesc);
810 	request_standard_resources(&meminfo, mdesc);
811 
812 #ifdef CONFIG_SMP
813 	smp_init_cpus();
814 #endif
815 
816 	cpu_init();
817 
818 	/*
819 	 * Set up various architecture-specific pointers
820 	 */
821 	init_arch_irq = mdesc->init_irq;
822 	system_timer = mdesc->timer;
823 	init_machine = mdesc->init_machine;
824 
825 #ifdef CONFIG_VT
826 #if defined(CONFIG_VGA_CONSOLE)
827 	conswitchp = &vga_con;
828 #elif defined(CONFIG_DUMMY_CONSOLE)
829 	conswitchp = &dummy_con;
830 #endif
831 #endif
832 }
833 
834 
835 static int __init topology_init(void)
836 {
837 	int cpu;
838 
839 	for_each_possible_cpu(cpu)
840 		register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
841 
842 	return 0;
843 }
844 
845 subsys_initcall(topology_init);
846 
847 static const char *hwcap_str[] = {
848 	"swp",
849 	"half",
850 	"thumb",
851 	"26bit",
852 	"fastmult",
853 	"fpa",
854 	"vfp",
855 	"edsp",
856 	"java",
857 	NULL
858 };
859 
860 static void
861 c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
862 {
863 	unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
864 
865 	seq_printf(m, "%s size\t\t: %d\n"
866 		      "%s assoc\t\t: %d\n"
867 		      "%s line length\t: %d\n"
868 		      "%s sets\t\t: %d\n",
869 		type, mult << (8 + CACHE_SIZE(cache)),
870 		type, (mult << CACHE_ASSOC(cache)) >> 1,
871 		type, 8 << CACHE_LINE(cache),
872 		type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
873 			    CACHE_LINE(cache)));
874 }
875 
876 static int c_show(struct seq_file *m, void *v)
877 {
878 	int i;
879 
880 	seq_printf(m, "Processor\t: %s rev %d (%s)\n",
881 		   cpu_name, (int)processor_id & 15, elf_platform);
882 
883 #if defined(CONFIG_SMP)
884 	for_each_online_cpu(i) {
885 		/*
886 		 * glibc reads /proc/cpuinfo to determine the number of
887 		 * online processors, looking for lines beginning with
888 		 * "processor".  Give glibc what it expects.
889 		 */
890 		seq_printf(m, "processor\t: %d\n", i);
891 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
892 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
893 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
894 	}
895 #else /* CONFIG_SMP */
896 	seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
897 		   loops_per_jiffy / (500000/HZ),
898 		   (loops_per_jiffy / (5000/HZ)) % 100);
899 #endif
900 
901 	/* dump out the processor features */
902 	seq_puts(m, "Features\t: ");
903 
904 	for (i = 0; hwcap_str[i]; i++)
905 		if (elf_hwcap & (1 << i))
906 			seq_printf(m, "%s ", hwcap_str[i]);
907 
908 	seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
909 	seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
910 
911 	if ((processor_id & 0x0008f000) == 0x00000000) {
912 		/* pre-ARM7 */
913 		seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4);
914 	} else {
915 		if ((processor_id & 0x0008f000) == 0x00007000) {
916 			/* ARM7 */
917 			seq_printf(m, "CPU variant\t: 0x%02x\n",
918 				   (processor_id >> 16) & 127);
919 		} else {
920 			/* post-ARM7 */
921 			seq_printf(m, "CPU variant\t: 0x%x\n",
922 				   (processor_id >> 20) & 15);
923 		}
924 		seq_printf(m, "CPU part\t: 0x%03x\n",
925 			   (processor_id >> 4) & 0xfff);
926 	}
927 	seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
928 
929 	{
930 		unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
931 		if (cache_info != processor_id) {
932 			seq_printf(m, "Cache type\t: %s\n"
933 				      "Cache clean\t: %s\n"
934 				      "Cache lockdown\t: %s\n"
935 				      "Cache format\t: %s\n",
936 				   cache_types[CACHE_TYPE(cache_info)],
937 				   cache_clean[CACHE_TYPE(cache_info)],
938 				   cache_lockdown[CACHE_TYPE(cache_info)],
939 				   CACHE_S(cache_info) ? "Harvard" : "Unified");
940 
941 			if (CACHE_S(cache_info)) {
942 				c_show_cache(m, "I", CACHE_ISIZE(cache_info));
943 				c_show_cache(m, "D", CACHE_DSIZE(cache_info));
944 			} else {
945 				c_show_cache(m, "Cache", CACHE_ISIZE(cache_info));
946 			}
947 		}
948 	}
949 
950 	seq_puts(m, "\n");
951 
952 	seq_printf(m, "Hardware\t: %s\n", machine_name);
953 	seq_printf(m, "Revision\t: %04x\n", system_rev);
954 	seq_printf(m, "Serial\t\t: %08x%08x\n",
955 		   system_serial_high, system_serial_low);
956 
957 	return 0;
958 }
959 
960 static void *c_start(struct seq_file *m, loff_t *pos)
961 {
962 	return *pos < 1 ? (void *)1 : NULL;
963 }
964 
965 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
966 {
967 	++*pos;
968 	return NULL;
969 }
970 
971 static void c_stop(struct seq_file *m, void *v)
972 {
973 }
974 
975 struct seq_operations cpuinfo_op = {
976 	.start	= c_start,
977 	.next	= c_next,
978 	.stop	= c_stop,
979 	.show	= c_show
980 };
981