xref: /linux/arch/arm/kernel/setup.c (revision d8327c784b51b57dac2c26cfad87dce0d68dfd98)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/config.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/ioport.h>
15 #include <linux/delay.h>
16 #include <linux/utsname.h>
17 #include <linux/initrd.h>
18 #include <linux/console.h>
19 #include <linux/bootmem.h>
20 #include <linux/seq_file.h>
21 #include <linux/tty.h>
22 #include <linux/init.h>
23 #include <linux/root_dev.h>
24 #include <linux/cpu.h>
25 #include <linux/interrupt.h>
26 #include <linux/smp.h>
27 
28 #include <asm/cpu.h>
29 #include <asm/elf.h>
30 #include <asm/procinfo.h>
31 #include <asm/setup.h>
32 #include <asm/mach-types.h>
33 #include <asm/cacheflush.h>
34 #include <asm/tlbflush.h>
35 
36 #include <asm/mach/arch.h>
37 #include <asm/mach/irq.h>
38 #include <asm/mach/time.h>
39 
40 #include "compat.h"
41 
42 #ifndef MEM_SIZE
43 #define MEM_SIZE	(16*1024*1024)
44 #endif
45 
46 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
47 char fpe_type[8];
48 
49 static int __init fpe_setup(char *line)
50 {
51 	memcpy(fpe_type, line, 8);
52 	return 1;
53 }
54 
55 __setup("fpe=", fpe_setup);
56 #endif
57 
58 extern void paging_init(struct meminfo *, struct machine_desc *desc);
59 extern void reboot_setup(char *str);
60 extern int root_mountflags;
61 extern void _stext, _text, _etext, __data_start, _edata, _end;
62 
63 unsigned int processor_id;
64 unsigned int __machine_arch_type;
65 EXPORT_SYMBOL(__machine_arch_type);
66 
67 unsigned int system_rev;
68 EXPORT_SYMBOL(system_rev);
69 
70 unsigned int system_serial_low;
71 EXPORT_SYMBOL(system_serial_low);
72 
73 unsigned int system_serial_high;
74 EXPORT_SYMBOL(system_serial_high);
75 
76 unsigned int elf_hwcap;
77 EXPORT_SYMBOL(elf_hwcap);
78 
79 
80 #ifdef MULTI_CPU
81 struct processor processor;
82 #endif
83 #ifdef MULTI_TLB
84 struct cpu_tlb_fns cpu_tlb;
85 #endif
86 #ifdef MULTI_USER
87 struct cpu_user_fns cpu_user;
88 #endif
89 #ifdef MULTI_CACHE
90 struct cpu_cache_fns cpu_cache;
91 #endif
92 
93 struct stack {
94 	u32 irq[3];
95 	u32 abt[3];
96 	u32 und[3];
97 } ____cacheline_aligned;
98 
99 static struct stack stacks[NR_CPUS];
100 
101 char elf_platform[ELF_PLATFORM_SIZE];
102 EXPORT_SYMBOL(elf_platform);
103 
104 unsigned long phys_initrd_start __initdata = 0;
105 unsigned long phys_initrd_size __initdata = 0;
106 
107 static struct meminfo meminfo __initdata = { 0, };
108 static const char *cpu_name;
109 static const char *machine_name;
110 static char command_line[COMMAND_LINE_SIZE];
111 
112 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
113 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
114 #define ENDIANNESS ((char)endian_test.l)
115 
116 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
117 
118 /*
119  * Standard memory resources
120  */
121 static struct resource mem_res[] = {
122 	{ "Video RAM",   0,     0,     IORESOURCE_MEM			},
123 	{ "Kernel text", 0,     0,     IORESOURCE_MEM			},
124 	{ "Kernel data", 0,     0,     IORESOURCE_MEM			}
125 };
126 
127 #define video_ram   mem_res[0]
128 #define kernel_code mem_res[1]
129 #define kernel_data mem_res[2]
130 
131 static struct resource io_res[] = {
132 	{ "reserved",    0x3bc, 0x3be, IORESOURCE_IO | IORESOURCE_BUSY },
133 	{ "reserved",    0x378, 0x37f, IORESOURCE_IO | IORESOURCE_BUSY },
134 	{ "reserved",    0x278, 0x27f, IORESOURCE_IO | IORESOURCE_BUSY }
135 };
136 
137 #define lp0 io_res[0]
138 #define lp1 io_res[1]
139 #define lp2 io_res[2]
140 
141 static const char *cache_types[16] = {
142 	"write-through",
143 	"write-back",
144 	"write-back",
145 	"undefined 3",
146 	"undefined 4",
147 	"undefined 5",
148 	"write-back",
149 	"write-back",
150 	"undefined 8",
151 	"undefined 9",
152 	"undefined 10",
153 	"undefined 11",
154 	"undefined 12",
155 	"undefined 13",
156 	"write-back",
157 	"undefined 15",
158 };
159 
160 static const char *cache_clean[16] = {
161 	"not required",
162 	"read-block",
163 	"cp15 c7 ops",
164 	"undefined 3",
165 	"undefined 4",
166 	"undefined 5",
167 	"cp15 c7 ops",
168 	"cp15 c7 ops",
169 	"undefined 8",
170 	"undefined 9",
171 	"undefined 10",
172 	"undefined 11",
173 	"undefined 12",
174 	"undefined 13",
175 	"cp15 c7 ops",
176 	"undefined 15",
177 };
178 
179 static const char *cache_lockdown[16] = {
180 	"not supported",
181 	"not supported",
182 	"not supported",
183 	"undefined 3",
184 	"undefined 4",
185 	"undefined 5",
186 	"format A",
187 	"format B",
188 	"undefined 8",
189 	"undefined 9",
190 	"undefined 10",
191 	"undefined 11",
192 	"undefined 12",
193 	"undefined 13",
194 	"format C",
195 	"undefined 15",
196 };
197 
198 static const char *proc_arch[] = {
199 	"undefined/unknown",
200 	"3",
201 	"4",
202 	"4T",
203 	"5",
204 	"5T",
205 	"5TE",
206 	"5TEJ",
207 	"6TEJ",
208 	"7",
209 	"?(11)",
210 	"?(12)",
211 	"?(13)",
212 	"?(14)",
213 	"?(15)",
214 	"?(16)",
215 	"?(17)",
216 };
217 
218 #define CACHE_TYPE(x)	(((x) >> 25) & 15)
219 #define CACHE_S(x)	((x) & (1 << 24))
220 #define CACHE_DSIZE(x)	(((x) >> 12) & 4095)	/* only if S=1 */
221 #define CACHE_ISIZE(x)	((x) & 4095)
222 
223 #define CACHE_SIZE(y)	(((y) >> 6) & 7)
224 #define CACHE_ASSOC(y)	(((y) >> 3) & 7)
225 #define CACHE_M(y)	((y) & (1 << 2))
226 #define CACHE_LINE(y)	((y) & 3)
227 
228 static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
229 {
230 	unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
231 
232 	printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
233 		cpu, prefix,
234 		mult << (8 + CACHE_SIZE(cache)),
235 		(mult << CACHE_ASSOC(cache)) >> 1,
236 		8 << CACHE_LINE(cache),
237 		1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
238 			CACHE_LINE(cache)));
239 }
240 
241 static void __init dump_cpu_info(int cpu)
242 {
243 	unsigned int info = read_cpuid(CPUID_CACHETYPE);
244 
245 	if (info != processor_id) {
246 		printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
247 		       cache_types[CACHE_TYPE(info)]);
248 		if (CACHE_S(info)) {
249 			dump_cache("I cache", cpu, CACHE_ISIZE(info));
250 			dump_cache("D cache", cpu, CACHE_DSIZE(info));
251 		} else {
252 			dump_cache("cache", cpu, CACHE_ISIZE(info));
253 		}
254 	}
255 }
256 
257 int cpu_architecture(void)
258 {
259 	int cpu_arch;
260 
261 	if ((processor_id & 0x0008f000) == 0) {
262 		cpu_arch = CPU_ARCH_UNKNOWN;
263 	} else if ((processor_id & 0x0008f000) == 0x00007000) {
264 		cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
265 	} else if ((processor_id & 0x00080000) == 0x00000000) {
266 		cpu_arch = (processor_id >> 16) & 7;
267 		if (cpu_arch)
268 			cpu_arch += CPU_ARCH_ARMv3;
269 	} else {
270 		/* the revised CPUID */
271 		cpu_arch = ((processor_id >> 12) & 0xf) - 0xb + CPU_ARCH_ARMv6;
272 	}
273 
274 	return cpu_arch;
275 }
276 
277 /*
278  * These functions re-use the assembly code in head.S, which
279  * already provide the required functionality.
280  */
281 extern struct proc_info_list *lookup_processor_type(void);
282 extern struct machine_desc *lookup_machine_type(unsigned int);
283 
284 static void __init setup_processor(void)
285 {
286 	struct proc_info_list *list;
287 
288 	/*
289 	 * locate processor in the list of supported processor
290 	 * types.  The linker builds this table for us from the
291 	 * entries in arch/arm/mm/proc-*.S
292 	 */
293 	list = lookup_processor_type();
294 	if (!list) {
295 		printk("CPU configuration botched (ID %08x), unable "
296 		       "to continue.\n", processor_id);
297 		while (1);
298 	}
299 
300 	cpu_name = list->cpu_name;
301 
302 #ifdef MULTI_CPU
303 	processor = *list->proc;
304 #endif
305 #ifdef MULTI_TLB
306 	cpu_tlb = *list->tlb;
307 #endif
308 #ifdef MULTI_USER
309 	cpu_user = *list->user;
310 #endif
311 #ifdef MULTI_CACHE
312 	cpu_cache = *list->cache;
313 #endif
314 
315 	printk("CPU: %s [%08x] revision %d (ARMv%s)\n",
316 	       cpu_name, processor_id, (int)processor_id & 15,
317 	       proc_arch[cpu_architecture()]);
318 
319 	sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS);
320 	sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
321 	elf_hwcap = list->elf_hwcap;
322 
323 	cpu_proc_init();
324 }
325 
326 /*
327  * cpu_init - initialise one CPU.
328  *
329  * cpu_init dumps the cache information, initialises SMP specific
330  * information, and sets up the per-CPU stacks.
331  */
332 void cpu_init(void)
333 {
334 	unsigned int cpu = smp_processor_id();
335 	struct stack *stk = &stacks[cpu];
336 
337 	if (cpu >= NR_CPUS) {
338 		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
339 		BUG();
340 	}
341 
342 	if (system_state == SYSTEM_BOOTING)
343 		dump_cpu_info(cpu);
344 
345 	/*
346 	 * setup stacks for re-entrant exception handlers
347 	 */
348 	__asm__ (
349 	"msr	cpsr_c, %1\n\t"
350 	"add	sp, %0, %2\n\t"
351 	"msr	cpsr_c, %3\n\t"
352 	"add	sp, %0, %4\n\t"
353 	"msr	cpsr_c, %5\n\t"
354 	"add	sp, %0, %6\n\t"
355 	"msr	cpsr_c, %7"
356 	    :
357 	    : "r" (stk),
358 	      "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
359 	      "I" (offsetof(struct stack, irq[0])),
360 	      "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
361 	      "I" (offsetof(struct stack, abt[0])),
362 	      "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
363 	      "I" (offsetof(struct stack, und[0])),
364 	      "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
365 	    : "r14");
366 }
367 
368 static struct machine_desc * __init setup_machine(unsigned int nr)
369 {
370 	struct machine_desc *list;
371 
372 	/*
373 	 * locate machine in the list of supported machines.
374 	 */
375 	list = lookup_machine_type(nr);
376 	if (!list) {
377 		printk("Machine configuration botched (nr %d), unable "
378 		       "to continue.\n", nr);
379 		while (1);
380 	}
381 
382 	printk("Machine: %s\n", list->name);
383 
384 	return list;
385 }
386 
387 static void __init early_initrd(char **p)
388 {
389 	unsigned long start, size;
390 
391 	start = memparse(*p, p);
392 	if (**p == ',') {
393 		size = memparse((*p) + 1, p);
394 
395 		phys_initrd_start = start;
396 		phys_initrd_size = size;
397 	}
398 }
399 __early_param("initrd=", early_initrd);
400 
401 static void __init add_memory(unsigned long start, unsigned long size)
402 {
403 	/*
404 	 * Ensure that start/size are aligned to a page boundary.
405 	 * Size is appropriately rounded down, start is rounded up.
406 	 */
407 	size -= start & ~PAGE_MASK;
408 
409 	meminfo.bank[meminfo.nr_banks].start = PAGE_ALIGN(start);
410 	meminfo.bank[meminfo.nr_banks].size  = size & PAGE_MASK;
411 	meminfo.bank[meminfo.nr_banks].node  = PHYS_TO_NID(start);
412 	meminfo.nr_banks += 1;
413 }
414 
415 /*
416  * Pick out the memory size.  We look for mem=size@start,
417  * where start and size are "size[KkMm]"
418  */
419 static void __init early_mem(char **p)
420 {
421 	static int usermem __initdata = 0;
422 	unsigned long size, start;
423 
424 	/*
425 	 * If the user specifies memory size, we
426 	 * blow away any automatically generated
427 	 * size.
428 	 */
429 	if (usermem == 0) {
430 		usermem = 1;
431 		meminfo.nr_banks = 0;
432 	}
433 
434 	start = PHYS_OFFSET;
435 	size  = memparse(*p, p);
436 	if (**p == '@')
437 		start = memparse(*p + 1, p);
438 
439 	add_memory(start, size);
440 }
441 __early_param("mem=", early_mem);
442 
443 /*
444  * Initial parsing of the command line.
445  */
446 static void __init parse_cmdline(char **cmdline_p, char *from)
447 {
448 	char c = ' ', *to = command_line;
449 	int len = 0;
450 
451 	for (;;) {
452 		if (c == ' ') {
453 			extern struct early_params __early_begin, __early_end;
454 			struct early_params *p;
455 
456 			for (p = &__early_begin; p < &__early_end; p++) {
457 				int len = strlen(p->arg);
458 
459 				if (memcmp(from, p->arg, len) == 0) {
460 					if (to != command_line)
461 						to -= 1;
462 					from += len;
463 					p->fn(&from);
464 
465 					while (*from != ' ' && *from != '\0')
466 						from++;
467 					break;
468 				}
469 			}
470 		}
471 		c = *from++;
472 		if (!c)
473 			break;
474 		if (COMMAND_LINE_SIZE <= ++len)
475 			break;
476 		*to++ = c;
477 	}
478 	*to = '\0';
479 	*cmdline_p = command_line;
480 }
481 
482 static void __init
483 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
484 {
485 #ifdef CONFIG_BLK_DEV_RAM
486 	extern int rd_size, rd_image_start, rd_prompt, rd_doload;
487 
488 	rd_image_start = image_start;
489 	rd_prompt = prompt;
490 	rd_doload = doload;
491 
492 	if (rd_sz)
493 		rd_size = rd_sz;
494 #endif
495 }
496 
497 static void __init
498 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
499 {
500 	struct resource *res;
501 	int i;
502 
503 	kernel_code.start   = virt_to_phys(&_text);
504 	kernel_code.end     = virt_to_phys(&_etext - 1);
505 	kernel_data.start   = virt_to_phys(&__data_start);
506 	kernel_data.end     = virt_to_phys(&_end - 1);
507 
508 	for (i = 0; i < mi->nr_banks; i++) {
509 		unsigned long virt_start, virt_end;
510 
511 		if (mi->bank[i].size == 0)
512 			continue;
513 
514 		virt_start = __phys_to_virt(mi->bank[i].start);
515 		virt_end   = virt_start + mi->bank[i].size - 1;
516 
517 		res = alloc_bootmem_low(sizeof(*res));
518 		res->name  = "System RAM";
519 		res->start = __virt_to_phys(virt_start);
520 		res->end   = __virt_to_phys(virt_end);
521 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
522 
523 		request_resource(&iomem_resource, res);
524 
525 		if (kernel_code.start >= res->start &&
526 		    kernel_code.end <= res->end)
527 			request_resource(res, &kernel_code);
528 		if (kernel_data.start >= res->start &&
529 		    kernel_data.end <= res->end)
530 			request_resource(res, &kernel_data);
531 	}
532 
533 	if (mdesc->video_start) {
534 		video_ram.start = mdesc->video_start;
535 		video_ram.end   = mdesc->video_end;
536 		request_resource(&iomem_resource, &video_ram);
537 	}
538 
539 	/*
540 	 * Some machines don't have the possibility of ever
541 	 * possessing lp0, lp1 or lp2
542 	 */
543 	if (mdesc->reserve_lp0)
544 		request_resource(&ioport_resource, &lp0);
545 	if (mdesc->reserve_lp1)
546 		request_resource(&ioport_resource, &lp1);
547 	if (mdesc->reserve_lp2)
548 		request_resource(&ioport_resource, &lp2);
549 }
550 
551 /*
552  *  Tag parsing.
553  *
554  * This is the new way of passing data to the kernel at boot time.  Rather
555  * than passing a fixed inflexible structure to the kernel, we pass a list
556  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
557  * tag for the list to be recognised (to distinguish the tagged list from
558  * a param_struct).  The list is terminated with a zero-length tag (this tag
559  * is not parsed in any way).
560  */
561 static int __init parse_tag_core(const struct tag *tag)
562 {
563 	if (tag->hdr.size > 2) {
564 		if ((tag->u.core.flags & 1) == 0)
565 			root_mountflags &= ~MS_RDONLY;
566 		ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
567 	}
568 	return 0;
569 }
570 
571 __tagtable(ATAG_CORE, parse_tag_core);
572 
573 static int __init parse_tag_mem32(const struct tag *tag)
574 {
575 	if (meminfo.nr_banks >= NR_BANKS) {
576 		printk(KERN_WARNING
577 		       "Ignoring memory bank 0x%08x size %dKB\n",
578 			tag->u.mem.start, tag->u.mem.size / 1024);
579 		return -EINVAL;
580 	}
581 	add_memory(tag->u.mem.start, tag->u.mem.size);
582 	return 0;
583 }
584 
585 __tagtable(ATAG_MEM, parse_tag_mem32);
586 
587 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
588 struct screen_info screen_info = {
589  .orig_video_lines	= 30,
590  .orig_video_cols	= 80,
591  .orig_video_mode	= 0,
592  .orig_video_ega_bx	= 0,
593  .orig_video_isVGA	= 1,
594  .orig_video_points	= 8
595 };
596 
597 static int __init parse_tag_videotext(const struct tag *tag)
598 {
599 	screen_info.orig_x            = tag->u.videotext.x;
600 	screen_info.orig_y            = tag->u.videotext.y;
601 	screen_info.orig_video_page   = tag->u.videotext.video_page;
602 	screen_info.orig_video_mode   = tag->u.videotext.video_mode;
603 	screen_info.orig_video_cols   = tag->u.videotext.video_cols;
604 	screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
605 	screen_info.orig_video_lines  = tag->u.videotext.video_lines;
606 	screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
607 	screen_info.orig_video_points = tag->u.videotext.video_points;
608 	return 0;
609 }
610 
611 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
612 #endif
613 
614 static int __init parse_tag_ramdisk(const struct tag *tag)
615 {
616 	setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
617 		      (tag->u.ramdisk.flags & 2) == 0,
618 		      tag->u.ramdisk.start, tag->u.ramdisk.size);
619 	return 0;
620 }
621 
622 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
623 
624 static int __init parse_tag_initrd(const struct tag *tag)
625 {
626 	printk(KERN_WARNING "ATAG_INITRD is deprecated; "
627 		"please update your bootloader.\n");
628 	phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
629 	phys_initrd_size = tag->u.initrd.size;
630 	return 0;
631 }
632 
633 __tagtable(ATAG_INITRD, parse_tag_initrd);
634 
635 static int __init parse_tag_initrd2(const struct tag *tag)
636 {
637 	phys_initrd_start = tag->u.initrd.start;
638 	phys_initrd_size = tag->u.initrd.size;
639 	return 0;
640 }
641 
642 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
643 
644 static int __init parse_tag_serialnr(const struct tag *tag)
645 {
646 	system_serial_low = tag->u.serialnr.low;
647 	system_serial_high = tag->u.serialnr.high;
648 	return 0;
649 }
650 
651 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
652 
653 static int __init parse_tag_revision(const struct tag *tag)
654 {
655 	system_rev = tag->u.revision.rev;
656 	return 0;
657 }
658 
659 __tagtable(ATAG_REVISION, parse_tag_revision);
660 
661 static int __init parse_tag_cmdline(const struct tag *tag)
662 {
663 	strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
664 	return 0;
665 }
666 
667 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
668 
669 /*
670  * Scan the tag table for this tag, and call its parse function.
671  * The tag table is built by the linker from all the __tagtable
672  * declarations.
673  */
674 static int __init parse_tag(const struct tag *tag)
675 {
676 	extern struct tagtable __tagtable_begin, __tagtable_end;
677 	struct tagtable *t;
678 
679 	for (t = &__tagtable_begin; t < &__tagtable_end; t++)
680 		if (tag->hdr.tag == t->tag) {
681 			t->parse(tag);
682 			break;
683 		}
684 
685 	return t < &__tagtable_end;
686 }
687 
688 /*
689  * Parse all tags in the list, checking both the global and architecture
690  * specific tag tables.
691  */
692 static void __init parse_tags(const struct tag *t)
693 {
694 	for (; t->hdr.size; t = tag_next(t))
695 		if (!parse_tag(t))
696 			printk(KERN_WARNING
697 				"Ignoring unrecognised tag 0x%08x\n",
698 				t->hdr.tag);
699 }
700 
701 /*
702  * This holds our defaults.
703  */
704 static struct init_tags {
705 	struct tag_header hdr1;
706 	struct tag_core   core;
707 	struct tag_header hdr2;
708 	struct tag_mem32  mem;
709 	struct tag_header hdr3;
710 } init_tags __initdata = {
711 	{ tag_size(tag_core), ATAG_CORE },
712 	{ 1, PAGE_SIZE, 0xff },
713 	{ tag_size(tag_mem32), ATAG_MEM },
714 	{ MEM_SIZE, PHYS_OFFSET },
715 	{ 0, ATAG_NONE }
716 };
717 
718 static void (*init_machine)(void) __initdata;
719 
720 static int __init customize_machine(void)
721 {
722 	/* customizes platform devices, or adds new ones */
723 	if (init_machine)
724 		init_machine();
725 	return 0;
726 }
727 arch_initcall(customize_machine);
728 
729 void __init setup_arch(char **cmdline_p)
730 {
731 	struct tag *tags = (struct tag *)&init_tags;
732 	struct machine_desc *mdesc;
733 	char *from = default_command_line;
734 
735 	setup_processor();
736 	mdesc = setup_machine(machine_arch_type);
737 	machine_name = mdesc->name;
738 
739 	if (mdesc->soft_reboot)
740 		reboot_setup("s");
741 
742 	if (mdesc->boot_params)
743 		tags = phys_to_virt(mdesc->boot_params);
744 
745 	/*
746 	 * If we have the old style parameters, convert them to
747 	 * a tag list.
748 	 */
749 	if (tags->hdr.tag != ATAG_CORE)
750 		convert_to_tag_list(tags);
751 	if (tags->hdr.tag != ATAG_CORE)
752 		tags = (struct tag *)&init_tags;
753 
754 	if (mdesc->fixup)
755 		mdesc->fixup(mdesc, tags, &from, &meminfo);
756 
757 	if (tags->hdr.tag == ATAG_CORE) {
758 		if (meminfo.nr_banks != 0)
759 			squash_mem_tags(tags);
760 		parse_tags(tags);
761 	}
762 
763 	init_mm.start_code = (unsigned long) &_text;
764 	init_mm.end_code   = (unsigned long) &_etext;
765 	init_mm.end_data   = (unsigned long) &_edata;
766 	init_mm.brk	   = (unsigned long) &_end;
767 
768 	memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
769 	saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
770 	parse_cmdline(cmdline_p, from);
771 	paging_init(&meminfo, mdesc);
772 	request_standard_resources(&meminfo, mdesc);
773 
774 #ifdef CONFIG_SMP
775 	smp_init_cpus();
776 #endif
777 
778 	cpu_init();
779 
780 	/*
781 	 * Set up various architecture-specific pointers
782 	 */
783 	init_arch_irq = mdesc->init_irq;
784 	system_timer = mdesc->timer;
785 	init_machine = mdesc->init_machine;
786 
787 #ifdef CONFIG_VT
788 #if defined(CONFIG_VGA_CONSOLE)
789 	conswitchp = &vga_con;
790 #elif defined(CONFIG_DUMMY_CONSOLE)
791 	conswitchp = &dummy_con;
792 #endif
793 #endif
794 }
795 
796 
797 static int __init topology_init(void)
798 {
799 	int cpu;
800 
801 	for_each_cpu(cpu)
802 		register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu, NULL);
803 
804 	return 0;
805 }
806 
807 subsys_initcall(topology_init);
808 
809 static const char *hwcap_str[] = {
810 	"swp",
811 	"half",
812 	"thumb",
813 	"26bit",
814 	"fastmult",
815 	"fpa",
816 	"vfp",
817 	"edsp",
818 	"java",
819 	NULL
820 };
821 
822 static void
823 c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
824 {
825 	unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
826 
827 	seq_printf(m, "%s size\t\t: %d\n"
828 		      "%s assoc\t\t: %d\n"
829 		      "%s line length\t: %d\n"
830 		      "%s sets\t\t: %d\n",
831 		type, mult << (8 + CACHE_SIZE(cache)),
832 		type, (mult << CACHE_ASSOC(cache)) >> 1,
833 		type, 8 << CACHE_LINE(cache),
834 		type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
835 			    CACHE_LINE(cache)));
836 }
837 
838 static int c_show(struct seq_file *m, void *v)
839 {
840 	int i;
841 
842 	seq_printf(m, "Processor\t: %s rev %d (%s)\n",
843 		   cpu_name, (int)processor_id & 15, elf_platform);
844 
845 #if defined(CONFIG_SMP)
846 	for_each_online_cpu(i) {
847 		/*
848 		 * glibc reads /proc/cpuinfo to determine the number of
849 		 * online processors, looking for lines beginning with
850 		 * "processor".  Give glibc what it expects.
851 		 */
852 		seq_printf(m, "processor\t: %d\n", i);
853 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
854 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
855 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
856 	}
857 #else /* CONFIG_SMP */
858 	seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
859 		   loops_per_jiffy / (500000/HZ),
860 		   (loops_per_jiffy / (5000/HZ)) % 100);
861 #endif
862 
863 	/* dump out the processor features */
864 	seq_puts(m, "Features\t: ");
865 
866 	for (i = 0; hwcap_str[i]; i++)
867 		if (elf_hwcap & (1 << i))
868 			seq_printf(m, "%s ", hwcap_str[i]);
869 
870 	seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
871 	seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
872 
873 	if ((processor_id & 0x0008f000) == 0x00000000) {
874 		/* pre-ARM7 */
875 		seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4);
876 	} else {
877 		if ((processor_id & 0x0008f000) == 0x00007000) {
878 			/* ARM7 */
879 			seq_printf(m, "CPU variant\t: 0x%02x\n",
880 				   (processor_id >> 16) & 127);
881 		} else {
882 			/* post-ARM7 */
883 			seq_printf(m, "CPU variant\t: 0x%x\n",
884 				   (processor_id >> 20) & 15);
885 		}
886 		seq_printf(m, "CPU part\t: 0x%03x\n",
887 			   (processor_id >> 4) & 0xfff);
888 	}
889 	seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
890 
891 	{
892 		unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
893 		if (cache_info != processor_id) {
894 			seq_printf(m, "Cache type\t: %s\n"
895 				      "Cache clean\t: %s\n"
896 				      "Cache lockdown\t: %s\n"
897 				      "Cache format\t: %s\n",
898 				   cache_types[CACHE_TYPE(cache_info)],
899 				   cache_clean[CACHE_TYPE(cache_info)],
900 				   cache_lockdown[CACHE_TYPE(cache_info)],
901 				   CACHE_S(cache_info) ? "Harvard" : "Unified");
902 
903 			if (CACHE_S(cache_info)) {
904 				c_show_cache(m, "I", CACHE_ISIZE(cache_info));
905 				c_show_cache(m, "D", CACHE_DSIZE(cache_info));
906 			} else {
907 				c_show_cache(m, "Cache", CACHE_ISIZE(cache_info));
908 			}
909 		}
910 	}
911 
912 	seq_puts(m, "\n");
913 
914 	seq_printf(m, "Hardware\t: %s\n", machine_name);
915 	seq_printf(m, "Revision\t: %04x\n", system_rev);
916 	seq_printf(m, "Serial\t\t: %08x%08x\n",
917 		   system_serial_high, system_serial_low);
918 
919 	return 0;
920 }
921 
922 static void *c_start(struct seq_file *m, loff_t *pos)
923 {
924 	return *pos < 1 ? (void *)1 : NULL;
925 }
926 
927 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
928 {
929 	++*pos;
930 	return NULL;
931 }
932 
933 static void c_stop(struct seq_file *m, void *v)
934 {
935 }
936 
937 struct seq_operations cpuinfo_op = {
938 	.start	= c_start,
939 	.next	= c_next,
940 	.stop	= c_stop,
941 	.show	= c_show
942 };
943