xref: /linux/arch/arm/kernel/setup.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/config.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/ioport.h>
15 #include <linux/delay.h>
16 #include <linux/utsname.h>
17 #include <linux/initrd.h>
18 #include <linux/console.h>
19 #include <linux/bootmem.h>
20 #include <linux/seq_file.h>
21 #include <linux/tty.h>
22 #include <linux/init.h>
23 #include <linux/root_dev.h>
24 #include <linux/cpu.h>
25 #include <linux/interrupt.h>
26 #include <linux/smp.h>
27 
28 #include <asm/cpu.h>
29 #include <asm/elf.h>
30 #include <asm/procinfo.h>
31 #include <asm/setup.h>
32 #include <asm/mach-types.h>
33 #include <asm/cacheflush.h>
34 #include <asm/tlbflush.h>
35 
36 #include <asm/mach/arch.h>
37 #include <asm/mach/irq.h>
38 #include <asm/mach/time.h>
39 
40 #include "compat.h"
41 
42 #ifndef MEM_SIZE
43 #define MEM_SIZE	(16*1024*1024)
44 #endif
45 
46 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
47 char fpe_type[8];
48 
49 static int __init fpe_setup(char *line)
50 {
51 	memcpy(fpe_type, line, 8);
52 	return 1;
53 }
54 
55 __setup("fpe=", fpe_setup);
56 #endif
57 
58 extern void paging_init(struct meminfo *, struct machine_desc *desc);
59 extern void reboot_setup(char *str);
60 extern int root_mountflags;
61 extern void _stext, _text, _etext, __data_start, _edata, _end;
62 
63 unsigned int processor_id;
64 unsigned int __machine_arch_type;
65 EXPORT_SYMBOL(__machine_arch_type);
66 
67 unsigned int system_rev;
68 EXPORT_SYMBOL(system_rev);
69 
70 unsigned int system_serial_low;
71 EXPORT_SYMBOL(system_serial_low);
72 
73 unsigned int system_serial_high;
74 EXPORT_SYMBOL(system_serial_high);
75 
76 unsigned int elf_hwcap;
77 EXPORT_SYMBOL(elf_hwcap);
78 
79 
80 #ifdef MULTI_CPU
81 struct processor processor;
82 #endif
83 #ifdef MULTI_TLB
84 struct cpu_tlb_fns cpu_tlb;
85 #endif
86 #ifdef MULTI_USER
87 struct cpu_user_fns cpu_user;
88 #endif
89 #ifdef MULTI_CACHE
90 struct cpu_cache_fns cpu_cache;
91 #endif
92 
93 struct stack {
94 	u32 irq[3];
95 	u32 abt[3];
96 	u32 und[3];
97 } ____cacheline_aligned;
98 
99 static struct stack stacks[NR_CPUS];
100 
101 char elf_platform[ELF_PLATFORM_SIZE];
102 EXPORT_SYMBOL(elf_platform);
103 
104 unsigned long phys_initrd_start __initdata = 0;
105 unsigned long phys_initrd_size __initdata = 0;
106 
107 static struct meminfo meminfo __initdata = { 0, };
108 static const char *cpu_name;
109 static const char *machine_name;
110 static char command_line[COMMAND_LINE_SIZE];
111 
112 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
113 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
114 #define ENDIANNESS ((char)endian_test.l)
115 
116 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
117 
118 /*
119  * Standard memory resources
120  */
121 static struct resource mem_res[] = {
122 	{ "Video RAM",   0,     0,     IORESOURCE_MEM			},
123 	{ "Kernel text", 0,     0,     IORESOURCE_MEM			},
124 	{ "Kernel data", 0,     0,     IORESOURCE_MEM			}
125 };
126 
127 #define video_ram   mem_res[0]
128 #define kernel_code mem_res[1]
129 #define kernel_data mem_res[2]
130 
131 static struct resource io_res[] = {
132 	{ "reserved",    0x3bc, 0x3be, IORESOURCE_IO | IORESOURCE_BUSY },
133 	{ "reserved",    0x378, 0x37f, IORESOURCE_IO | IORESOURCE_BUSY },
134 	{ "reserved",    0x278, 0x27f, IORESOURCE_IO | IORESOURCE_BUSY }
135 };
136 
137 #define lp0 io_res[0]
138 #define lp1 io_res[1]
139 #define lp2 io_res[2]
140 
141 static const char *cache_types[16] = {
142 	"write-through",
143 	"write-back",
144 	"write-back",
145 	"undefined 3",
146 	"undefined 4",
147 	"undefined 5",
148 	"write-back",
149 	"write-back",
150 	"undefined 8",
151 	"undefined 9",
152 	"undefined 10",
153 	"undefined 11",
154 	"undefined 12",
155 	"undefined 13",
156 	"write-back",
157 	"undefined 15",
158 };
159 
160 static const char *cache_clean[16] = {
161 	"not required",
162 	"read-block",
163 	"cp15 c7 ops",
164 	"undefined 3",
165 	"undefined 4",
166 	"undefined 5",
167 	"cp15 c7 ops",
168 	"cp15 c7 ops",
169 	"undefined 8",
170 	"undefined 9",
171 	"undefined 10",
172 	"undefined 11",
173 	"undefined 12",
174 	"undefined 13",
175 	"cp15 c7 ops",
176 	"undefined 15",
177 };
178 
179 static const char *cache_lockdown[16] = {
180 	"not supported",
181 	"not supported",
182 	"not supported",
183 	"undefined 3",
184 	"undefined 4",
185 	"undefined 5",
186 	"format A",
187 	"format B",
188 	"undefined 8",
189 	"undefined 9",
190 	"undefined 10",
191 	"undefined 11",
192 	"undefined 12",
193 	"undefined 13",
194 	"format C",
195 	"undefined 15",
196 };
197 
198 static const char *proc_arch[] = {
199 	"undefined/unknown",
200 	"3",
201 	"4",
202 	"4T",
203 	"5",
204 	"5T",
205 	"5TE",
206 	"5TEJ",
207 	"6TEJ",
208 	"7",
209 	"?(11)",
210 	"?(12)",
211 	"?(13)",
212 	"?(14)",
213 	"?(15)",
214 	"?(16)",
215 	"?(17)",
216 };
217 
218 #define CACHE_TYPE(x)	(((x) >> 25) & 15)
219 #define CACHE_S(x)	((x) & (1 << 24))
220 #define CACHE_DSIZE(x)	(((x) >> 12) & 4095)	/* only if S=1 */
221 #define CACHE_ISIZE(x)	((x) & 4095)
222 
223 #define CACHE_SIZE(y)	(((y) >> 6) & 7)
224 #define CACHE_ASSOC(y)	(((y) >> 3) & 7)
225 #define CACHE_M(y)	((y) & (1 << 2))
226 #define CACHE_LINE(y)	((y) & 3)
227 
228 static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
229 {
230 	unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
231 
232 	printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
233 		cpu, prefix,
234 		mult << (8 + CACHE_SIZE(cache)),
235 		(mult << CACHE_ASSOC(cache)) >> 1,
236 		8 << CACHE_LINE(cache),
237 		1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
238 			CACHE_LINE(cache)));
239 }
240 
241 static void __init dump_cpu_info(int cpu)
242 {
243 	unsigned int info = read_cpuid(CPUID_CACHETYPE);
244 
245 	if (info != processor_id) {
246 		printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
247 		       cache_types[CACHE_TYPE(info)]);
248 		if (CACHE_S(info)) {
249 			dump_cache("I cache", cpu, CACHE_ISIZE(info));
250 			dump_cache("D cache", cpu, CACHE_DSIZE(info));
251 		} else {
252 			dump_cache("cache", cpu, CACHE_ISIZE(info));
253 		}
254 	}
255 
256 	if (arch_is_coherent())
257 		printk("Cache coherency enabled\n");
258 }
259 
260 int cpu_architecture(void)
261 {
262 	int cpu_arch;
263 
264 	if ((processor_id & 0x0008f000) == 0) {
265 		cpu_arch = CPU_ARCH_UNKNOWN;
266 	} else if ((processor_id & 0x0008f000) == 0x00007000) {
267 		cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
268 	} else if ((processor_id & 0x00080000) == 0x00000000) {
269 		cpu_arch = (processor_id >> 16) & 7;
270 		if (cpu_arch)
271 			cpu_arch += CPU_ARCH_ARMv3;
272 	} else {
273 		/* the revised CPUID */
274 		cpu_arch = ((processor_id >> 12) & 0xf) - 0xb + CPU_ARCH_ARMv6;
275 	}
276 
277 	return cpu_arch;
278 }
279 
280 /*
281  * These functions re-use the assembly code in head.S, which
282  * already provide the required functionality.
283  */
284 extern struct proc_info_list *lookup_processor_type(unsigned int);
285 extern struct machine_desc *lookup_machine_type(unsigned int);
286 
287 static void __init setup_processor(void)
288 {
289 	struct proc_info_list *list;
290 
291 	/*
292 	 * locate processor in the list of supported processor
293 	 * types.  The linker builds this table for us from the
294 	 * entries in arch/arm/mm/proc-*.S
295 	 */
296 	list = lookup_processor_type(processor_id);
297 	if (!list) {
298 		printk("CPU configuration botched (ID %08x), unable "
299 		       "to continue.\n", processor_id);
300 		while (1);
301 	}
302 
303 	cpu_name = list->cpu_name;
304 
305 #ifdef MULTI_CPU
306 	processor = *list->proc;
307 #endif
308 #ifdef MULTI_TLB
309 	cpu_tlb = *list->tlb;
310 #endif
311 #ifdef MULTI_USER
312 	cpu_user = *list->user;
313 #endif
314 #ifdef MULTI_CACHE
315 	cpu_cache = *list->cache;
316 #endif
317 
318 	printk("CPU: %s [%08x] revision %d (ARMv%s)\n",
319 	       cpu_name, processor_id, (int)processor_id & 15,
320 	       proc_arch[cpu_architecture()]);
321 
322 	sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS);
323 	sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
324 	elf_hwcap = list->elf_hwcap;
325 #ifndef CONFIG_ARM_THUMB
326 	elf_hwcap &= ~HWCAP_THUMB;
327 #endif
328 #ifndef CONFIG_VFP
329 	elf_hwcap &= ~HWCAP_VFP;
330 #endif
331 
332 	cpu_proc_init();
333 }
334 
335 /*
336  * cpu_init - initialise one CPU.
337  *
338  * cpu_init dumps the cache information, initialises SMP specific
339  * information, and sets up the per-CPU stacks.
340  */
341 void cpu_init(void)
342 {
343 	unsigned int cpu = smp_processor_id();
344 	struct stack *stk = &stacks[cpu];
345 
346 	if (cpu >= NR_CPUS) {
347 		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
348 		BUG();
349 	}
350 
351 	if (system_state == SYSTEM_BOOTING)
352 		dump_cpu_info(cpu);
353 
354 	/*
355 	 * setup stacks for re-entrant exception handlers
356 	 */
357 	__asm__ (
358 	"msr	cpsr_c, %1\n\t"
359 	"add	sp, %0, %2\n\t"
360 	"msr	cpsr_c, %3\n\t"
361 	"add	sp, %0, %4\n\t"
362 	"msr	cpsr_c, %5\n\t"
363 	"add	sp, %0, %6\n\t"
364 	"msr	cpsr_c, %7"
365 	    :
366 	    : "r" (stk),
367 	      "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
368 	      "I" (offsetof(struct stack, irq[0])),
369 	      "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
370 	      "I" (offsetof(struct stack, abt[0])),
371 	      "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
372 	      "I" (offsetof(struct stack, und[0])),
373 	      "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
374 	    : "r14");
375 }
376 
377 static struct machine_desc * __init setup_machine(unsigned int nr)
378 {
379 	struct machine_desc *list;
380 
381 	/*
382 	 * locate machine in the list of supported machines.
383 	 */
384 	list = lookup_machine_type(nr);
385 	if (!list) {
386 		printk("Machine configuration botched (nr %d), unable "
387 		       "to continue.\n", nr);
388 		while (1);
389 	}
390 
391 	printk("Machine: %s\n", list->name);
392 
393 	return list;
394 }
395 
396 static void __init early_initrd(char **p)
397 {
398 	unsigned long start, size;
399 
400 	start = memparse(*p, p);
401 	if (**p == ',') {
402 		size = memparse((*p) + 1, p);
403 
404 		phys_initrd_start = start;
405 		phys_initrd_size = size;
406 	}
407 }
408 __early_param("initrd=", early_initrd);
409 
410 static void __init arm_add_memory(unsigned long start, unsigned long size)
411 {
412 	/*
413 	 * Ensure that start/size are aligned to a page boundary.
414 	 * Size is appropriately rounded down, start is rounded up.
415 	 */
416 	size -= start & ~PAGE_MASK;
417 
418 	meminfo.bank[meminfo.nr_banks].start = PAGE_ALIGN(start);
419 	meminfo.bank[meminfo.nr_banks].size  = size & PAGE_MASK;
420 	meminfo.bank[meminfo.nr_banks].node  = PHYS_TO_NID(start);
421 	meminfo.nr_banks += 1;
422 }
423 
424 /*
425  * Pick out the memory size.  We look for mem=size@start,
426  * where start and size are "size[KkMm]"
427  */
428 static void __init early_mem(char **p)
429 {
430 	static int usermem __initdata = 0;
431 	unsigned long size, start;
432 
433 	/*
434 	 * If the user specifies memory size, we
435 	 * blow away any automatically generated
436 	 * size.
437 	 */
438 	if (usermem == 0) {
439 		usermem = 1;
440 		meminfo.nr_banks = 0;
441 	}
442 
443 	start = PHYS_OFFSET;
444 	size  = memparse(*p, p);
445 	if (**p == '@')
446 		start = memparse(*p + 1, p);
447 
448 	arm_add_memory(start, size);
449 }
450 __early_param("mem=", early_mem);
451 
452 /*
453  * Initial parsing of the command line.
454  */
455 static void __init parse_cmdline(char **cmdline_p, char *from)
456 {
457 	char c = ' ', *to = command_line;
458 	int len = 0;
459 
460 	for (;;) {
461 		if (c == ' ') {
462 			extern struct early_params __early_begin, __early_end;
463 			struct early_params *p;
464 
465 			for (p = &__early_begin; p < &__early_end; p++) {
466 				int len = strlen(p->arg);
467 
468 				if (memcmp(from, p->arg, len) == 0) {
469 					if (to != command_line)
470 						to -= 1;
471 					from += len;
472 					p->fn(&from);
473 
474 					while (*from != ' ' && *from != '\0')
475 						from++;
476 					break;
477 				}
478 			}
479 		}
480 		c = *from++;
481 		if (!c)
482 			break;
483 		if (COMMAND_LINE_SIZE <= ++len)
484 			break;
485 		*to++ = c;
486 	}
487 	*to = '\0';
488 	*cmdline_p = command_line;
489 }
490 
491 static void __init
492 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
493 {
494 #ifdef CONFIG_BLK_DEV_RAM
495 	extern int rd_size, rd_image_start, rd_prompt, rd_doload;
496 
497 	rd_image_start = image_start;
498 	rd_prompt = prompt;
499 	rd_doload = doload;
500 
501 	if (rd_sz)
502 		rd_size = rd_sz;
503 #endif
504 }
505 
506 static void __init
507 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
508 {
509 	struct resource *res;
510 	int i;
511 
512 	kernel_code.start   = virt_to_phys(&_text);
513 	kernel_code.end     = virt_to_phys(&_etext - 1);
514 	kernel_data.start   = virt_to_phys(&__data_start);
515 	kernel_data.end     = virt_to_phys(&_end - 1);
516 
517 	for (i = 0; i < mi->nr_banks; i++) {
518 		unsigned long virt_start, virt_end;
519 
520 		if (mi->bank[i].size == 0)
521 			continue;
522 
523 		virt_start = __phys_to_virt(mi->bank[i].start);
524 		virt_end   = virt_start + mi->bank[i].size - 1;
525 
526 		res = alloc_bootmem_low(sizeof(*res));
527 		res->name  = "System RAM";
528 		res->start = __virt_to_phys(virt_start);
529 		res->end   = __virt_to_phys(virt_end);
530 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
531 
532 		request_resource(&iomem_resource, res);
533 
534 		if (kernel_code.start >= res->start &&
535 		    kernel_code.end <= res->end)
536 			request_resource(res, &kernel_code);
537 		if (kernel_data.start >= res->start &&
538 		    kernel_data.end <= res->end)
539 			request_resource(res, &kernel_data);
540 	}
541 
542 	if (mdesc->video_start) {
543 		video_ram.start = mdesc->video_start;
544 		video_ram.end   = mdesc->video_end;
545 		request_resource(&iomem_resource, &video_ram);
546 	}
547 
548 	/*
549 	 * Some machines don't have the possibility of ever
550 	 * possessing lp0, lp1 or lp2
551 	 */
552 	if (mdesc->reserve_lp0)
553 		request_resource(&ioport_resource, &lp0);
554 	if (mdesc->reserve_lp1)
555 		request_resource(&ioport_resource, &lp1);
556 	if (mdesc->reserve_lp2)
557 		request_resource(&ioport_resource, &lp2);
558 }
559 
560 /*
561  *  Tag parsing.
562  *
563  * This is the new way of passing data to the kernel at boot time.  Rather
564  * than passing a fixed inflexible structure to the kernel, we pass a list
565  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
566  * tag for the list to be recognised (to distinguish the tagged list from
567  * a param_struct).  The list is terminated with a zero-length tag (this tag
568  * is not parsed in any way).
569  */
570 static int __init parse_tag_core(const struct tag *tag)
571 {
572 	if (tag->hdr.size > 2) {
573 		if ((tag->u.core.flags & 1) == 0)
574 			root_mountflags &= ~MS_RDONLY;
575 		ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
576 	}
577 	return 0;
578 }
579 
580 __tagtable(ATAG_CORE, parse_tag_core);
581 
582 static int __init parse_tag_mem32(const struct tag *tag)
583 {
584 	if (meminfo.nr_banks >= NR_BANKS) {
585 		printk(KERN_WARNING
586 		       "Ignoring memory bank 0x%08x size %dKB\n",
587 			tag->u.mem.start, tag->u.mem.size / 1024);
588 		return -EINVAL;
589 	}
590 	arm_add_memory(tag->u.mem.start, tag->u.mem.size);
591 	return 0;
592 }
593 
594 __tagtable(ATAG_MEM, parse_tag_mem32);
595 
596 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
597 struct screen_info screen_info = {
598  .orig_video_lines	= 30,
599  .orig_video_cols	= 80,
600  .orig_video_mode	= 0,
601  .orig_video_ega_bx	= 0,
602  .orig_video_isVGA	= 1,
603  .orig_video_points	= 8
604 };
605 
606 static int __init parse_tag_videotext(const struct tag *tag)
607 {
608 	screen_info.orig_x            = tag->u.videotext.x;
609 	screen_info.orig_y            = tag->u.videotext.y;
610 	screen_info.orig_video_page   = tag->u.videotext.video_page;
611 	screen_info.orig_video_mode   = tag->u.videotext.video_mode;
612 	screen_info.orig_video_cols   = tag->u.videotext.video_cols;
613 	screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
614 	screen_info.orig_video_lines  = tag->u.videotext.video_lines;
615 	screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
616 	screen_info.orig_video_points = tag->u.videotext.video_points;
617 	return 0;
618 }
619 
620 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
621 #endif
622 
623 static int __init parse_tag_ramdisk(const struct tag *tag)
624 {
625 	setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
626 		      (tag->u.ramdisk.flags & 2) == 0,
627 		      tag->u.ramdisk.start, tag->u.ramdisk.size);
628 	return 0;
629 }
630 
631 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
632 
633 static int __init parse_tag_initrd(const struct tag *tag)
634 {
635 	printk(KERN_WARNING "ATAG_INITRD is deprecated; "
636 		"please update your bootloader.\n");
637 	phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
638 	phys_initrd_size = tag->u.initrd.size;
639 	return 0;
640 }
641 
642 __tagtable(ATAG_INITRD, parse_tag_initrd);
643 
644 static int __init parse_tag_initrd2(const struct tag *tag)
645 {
646 	phys_initrd_start = tag->u.initrd.start;
647 	phys_initrd_size = tag->u.initrd.size;
648 	return 0;
649 }
650 
651 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
652 
653 static int __init parse_tag_serialnr(const struct tag *tag)
654 {
655 	system_serial_low = tag->u.serialnr.low;
656 	system_serial_high = tag->u.serialnr.high;
657 	return 0;
658 }
659 
660 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
661 
662 static int __init parse_tag_revision(const struct tag *tag)
663 {
664 	system_rev = tag->u.revision.rev;
665 	return 0;
666 }
667 
668 __tagtable(ATAG_REVISION, parse_tag_revision);
669 
670 static int __init parse_tag_cmdline(const struct tag *tag)
671 {
672 	strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
673 	return 0;
674 }
675 
676 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
677 
678 /*
679  * Scan the tag table for this tag, and call its parse function.
680  * The tag table is built by the linker from all the __tagtable
681  * declarations.
682  */
683 static int __init parse_tag(const struct tag *tag)
684 {
685 	extern struct tagtable __tagtable_begin, __tagtable_end;
686 	struct tagtable *t;
687 
688 	for (t = &__tagtable_begin; t < &__tagtable_end; t++)
689 		if (tag->hdr.tag == t->tag) {
690 			t->parse(tag);
691 			break;
692 		}
693 
694 	return t < &__tagtable_end;
695 }
696 
697 /*
698  * Parse all tags in the list, checking both the global and architecture
699  * specific tag tables.
700  */
701 static void __init parse_tags(const struct tag *t)
702 {
703 	for (; t->hdr.size; t = tag_next(t))
704 		if (!parse_tag(t))
705 			printk(KERN_WARNING
706 				"Ignoring unrecognised tag 0x%08x\n",
707 				t->hdr.tag);
708 }
709 
710 /*
711  * This holds our defaults.
712  */
713 static struct init_tags {
714 	struct tag_header hdr1;
715 	struct tag_core   core;
716 	struct tag_header hdr2;
717 	struct tag_mem32  mem;
718 	struct tag_header hdr3;
719 } init_tags __initdata = {
720 	{ tag_size(tag_core), ATAG_CORE },
721 	{ 1, PAGE_SIZE, 0xff },
722 	{ tag_size(tag_mem32), ATAG_MEM },
723 	{ MEM_SIZE, PHYS_OFFSET },
724 	{ 0, ATAG_NONE }
725 };
726 
727 static void (*init_machine)(void) __initdata;
728 
729 static int __init customize_machine(void)
730 {
731 	/* customizes platform devices, or adds new ones */
732 	if (init_machine)
733 		init_machine();
734 	return 0;
735 }
736 arch_initcall(customize_machine);
737 
738 void __init setup_arch(char **cmdline_p)
739 {
740 	struct tag *tags = (struct tag *)&init_tags;
741 	struct machine_desc *mdesc;
742 	char *from = default_command_line;
743 
744 	setup_processor();
745 	mdesc = setup_machine(machine_arch_type);
746 	machine_name = mdesc->name;
747 
748 	if (mdesc->soft_reboot)
749 		reboot_setup("s");
750 
751 	if (mdesc->boot_params)
752 		tags = phys_to_virt(mdesc->boot_params);
753 
754 	/*
755 	 * If we have the old style parameters, convert them to
756 	 * a tag list.
757 	 */
758 	if (tags->hdr.tag != ATAG_CORE)
759 		convert_to_tag_list(tags);
760 	if (tags->hdr.tag != ATAG_CORE)
761 		tags = (struct tag *)&init_tags;
762 
763 	if (mdesc->fixup)
764 		mdesc->fixup(mdesc, tags, &from, &meminfo);
765 
766 	if (tags->hdr.tag == ATAG_CORE) {
767 		if (meminfo.nr_banks != 0)
768 			squash_mem_tags(tags);
769 		parse_tags(tags);
770 	}
771 
772 	init_mm.start_code = (unsigned long) &_text;
773 	init_mm.end_code   = (unsigned long) &_etext;
774 	init_mm.end_data   = (unsigned long) &_edata;
775 	init_mm.brk	   = (unsigned long) &_end;
776 
777 	memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
778 	saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
779 	parse_cmdline(cmdline_p, from);
780 	paging_init(&meminfo, mdesc);
781 	request_standard_resources(&meminfo, mdesc);
782 
783 #ifdef CONFIG_SMP
784 	smp_init_cpus();
785 #endif
786 
787 	cpu_init();
788 
789 	/*
790 	 * Set up various architecture-specific pointers
791 	 */
792 	init_arch_irq = mdesc->init_irq;
793 	system_timer = mdesc->timer;
794 	init_machine = mdesc->init_machine;
795 
796 #ifdef CONFIG_VT
797 #if defined(CONFIG_VGA_CONSOLE)
798 	conswitchp = &vga_con;
799 #elif defined(CONFIG_DUMMY_CONSOLE)
800 	conswitchp = &dummy_con;
801 #endif
802 #endif
803 }
804 
805 
806 static int __init topology_init(void)
807 {
808 	int cpu;
809 
810 	for_each_possible_cpu(cpu)
811 		register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu, NULL);
812 
813 	return 0;
814 }
815 
816 subsys_initcall(topology_init);
817 
818 static const char *hwcap_str[] = {
819 	"swp",
820 	"half",
821 	"thumb",
822 	"26bit",
823 	"fastmult",
824 	"fpa",
825 	"vfp",
826 	"edsp",
827 	"java",
828 	NULL
829 };
830 
831 static void
832 c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
833 {
834 	unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
835 
836 	seq_printf(m, "%s size\t\t: %d\n"
837 		      "%s assoc\t\t: %d\n"
838 		      "%s line length\t: %d\n"
839 		      "%s sets\t\t: %d\n",
840 		type, mult << (8 + CACHE_SIZE(cache)),
841 		type, (mult << CACHE_ASSOC(cache)) >> 1,
842 		type, 8 << CACHE_LINE(cache),
843 		type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
844 			    CACHE_LINE(cache)));
845 }
846 
847 static int c_show(struct seq_file *m, void *v)
848 {
849 	int i;
850 
851 	seq_printf(m, "Processor\t: %s rev %d (%s)\n",
852 		   cpu_name, (int)processor_id & 15, elf_platform);
853 
854 #if defined(CONFIG_SMP)
855 	for_each_online_cpu(i) {
856 		/*
857 		 * glibc reads /proc/cpuinfo to determine the number of
858 		 * online processors, looking for lines beginning with
859 		 * "processor".  Give glibc what it expects.
860 		 */
861 		seq_printf(m, "processor\t: %d\n", i);
862 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
863 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
864 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
865 	}
866 #else /* CONFIG_SMP */
867 	seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
868 		   loops_per_jiffy / (500000/HZ),
869 		   (loops_per_jiffy / (5000/HZ)) % 100);
870 #endif
871 
872 	/* dump out the processor features */
873 	seq_puts(m, "Features\t: ");
874 
875 	for (i = 0; hwcap_str[i]; i++)
876 		if (elf_hwcap & (1 << i))
877 			seq_printf(m, "%s ", hwcap_str[i]);
878 
879 	seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
880 	seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
881 
882 	if ((processor_id & 0x0008f000) == 0x00000000) {
883 		/* pre-ARM7 */
884 		seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4);
885 	} else {
886 		if ((processor_id & 0x0008f000) == 0x00007000) {
887 			/* ARM7 */
888 			seq_printf(m, "CPU variant\t: 0x%02x\n",
889 				   (processor_id >> 16) & 127);
890 		} else {
891 			/* post-ARM7 */
892 			seq_printf(m, "CPU variant\t: 0x%x\n",
893 				   (processor_id >> 20) & 15);
894 		}
895 		seq_printf(m, "CPU part\t: 0x%03x\n",
896 			   (processor_id >> 4) & 0xfff);
897 	}
898 	seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
899 
900 	{
901 		unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
902 		if (cache_info != processor_id) {
903 			seq_printf(m, "Cache type\t: %s\n"
904 				      "Cache clean\t: %s\n"
905 				      "Cache lockdown\t: %s\n"
906 				      "Cache format\t: %s\n",
907 				   cache_types[CACHE_TYPE(cache_info)],
908 				   cache_clean[CACHE_TYPE(cache_info)],
909 				   cache_lockdown[CACHE_TYPE(cache_info)],
910 				   CACHE_S(cache_info) ? "Harvard" : "Unified");
911 
912 			if (CACHE_S(cache_info)) {
913 				c_show_cache(m, "I", CACHE_ISIZE(cache_info));
914 				c_show_cache(m, "D", CACHE_DSIZE(cache_info));
915 			} else {
916 				c_show_cache(m, "Cache", CACHE_ISIZE(cache_info));
917 			}
918 		}
919 	}
920 
921 	seq_puts(m, "\n");
922 
923 	seq_printf(m, "Hardware\t: %s\n", machine_name);
924 	seq_printf(m, "Revision\t: %04x\n", system_rev);
925 	seq_printf(m, "Serial\t\t: %08x%08x\n",
926 		   system_serial_high, system_serial_low);
927 
928 	return 0;
929 }
930 
931 static void *c_start(struct seq_file *m, loff_t *pos)
932 {
933 	return *pos < 1 ? (void *)1 : NULL;
934 }
935 
936 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
937 {
938 	++*pos;
939 	return NULL;
940 }
941 
942 static void c_stop(struct seq_file *m, void *v)
943 {
944 }
945 
946 struct seq_operations cpuinfo_op = {
947 	.start	= c_start,
948 	.next	= c_next,
949 	.stop	= c_stop,
950 	.show	= c_show
951 };
952