xref: /linux/arch/arm/kernel/setup.c (revision 5499b45190237ca90dd2ac86395cf464fe1f4cc7)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/root_dev.h>
23 #include <linux/cpu.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/fs.h>
27 
28 #include <asm/unified.h>
29 #include <asm/cpu.h>
30 #include <asm/cputype.h>
31 #include <asm/elf.h>
32 #include <asm/procinfo.h>
33 #include <asm/sections.h>
34 #include <asm/setup.h>
35 #include <asm/mach-types.h>
36 #include <asm/cacheflush.h>
37 #include <asm/cachetype.h>
38 #include <asm/tlbflush.h>
39 
40 #include <asm/mach/arch.h>
41 #include <asm/mach/irq.h>
42 #include <asm/mach/time.h>
43 #include <asm/traps.h>
44 #include <asm/unwind.h>
45 
46 #include "compat.h"
47 #include "atags.h"
48 #include "tcm.h"
49 
50 #ifndef MEM_SIZE
51 #define MEM_SIZE	(16*1024*1024)
52 #endif
53 
54 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
55 char fpe_type[8];
56 
57 static int __init fpe_setup(char *line)
58 {
59 	memcpy(fpe_type, line, 8);
60 	return 1;
61 }
62 
63 __setup("fpe=", fpe_setup);
64 #endif
65 
66 extern void paging_init(struct machine_desc *desc);
67 extern void reboot_setup(char *str);
68 
69 unsigned int processor_id;
70 EXPORT_SYMBOL(processor_id);
71 unsigned int __machine_arch_type;
72 EXPORT_SYMBOL(__machine_arch_type);
73 unsigned int cacheid;
74 EXPORT_SYMBOL(cacheid);
75 
76 unsigned int __atags_pointer __initdata;
77 
78 unsigned int system_rev;
79 EXPORT_SYMBOL(system_rev);
80 
81 unsigned int system_serial_low;
82 EXPORT_SYMBOL(system_serial_low);
83 
84 unsigned int system_serial_high;
85 EXPORT_SYMBOL(system_serial_high);
86 
87 unsigned int elf_hwcap;
88 EXPORT_SYMBOL(elf_hwcap);
89 
90 
91 #ifdef MULTI_CPU
92 struct processor processor;
93 #endif
94 #ifdef MULTI_TLB
95 struct cpu_tlb_fns cpu_tlb;
96 #endif
97 #ifdef MULTI_USER
98 struct cpu_user_fns cpu_user;
99 #endif
100 #ifdef MULTI_CACHE
101 struct cpu_cache_fns cpu_cache;
102 #endif
103 #ifdef CONFIG_OUTER_CACHE
104 struct outer_cache_fns outer_cache;
105 EXPORT_SYMBOL(outer_cache);
106 #endif
107 
108 struct stack {
109 	u32 irq[3];
110 	u32 abt[3];
111 	u32 und[3];
112 } ____cacheline_aligned;
113 
114 static struct stack stacks[NR_CPUS];
115 
116 char elf_platform[ELF_PLATFORM_SIZE];
117 EXPORT_SYMBOL(elf_platform);
118 
119 static const char *cpu_name;
120 static const char *machine_name;
121 static char __initdata command_line[COMMAND_LINE_SIZE];
122 
123 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
124 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
125 #define ENDIANNESS ((char)endian_test.l)
126 
127 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
128 
129 /*
130  * Standard memory resources
131  */
132 static struct resource mem_res[] = {
133 	{
134 		.name = "Video RAM",
135 		.start = 0,
136 		.end = 0,
137 		.flags = IORESOURCE_MEM
138 	},
139 	{
140 		.name = "Kernel text",
141 		.start = 0,
142 		.end = 0,
143 		.flags = IORESOURCE_MEM
144 	},
145 	{
146 		.name = "Kernel data",
147 		.start = 0,
148 		.end = 0,
149 		.flags = IORESOURCE_MEM
150 	}
151 };
152 
153 #define video_ram   mem_res[0]
154 #define kernel_code mem_res[1]
155 #define kernel_data mem_res[2]
156 
157 static struct resource io_res[] = {
158 	{
159 		.name = "reserved",
160 		.start = 0x3bc,
161 		.end = 0x3be,
162 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
163 	},
164 	{
165 		.name = "reserved",
166 		.start = 0x378,
167 		.end = 0x37f,
168 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
169 	},
170 	{
171 		.name = "reserved",
172 		.start = 0x278,
173 		.end = 0x27f,
174 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
175 	}
176 };
177 
178 #define lp0 io_res[0]
179 #define lp1 io_res[1]
180 #define lp2 io_res[2]
181 
182 static const char *proc_arch[] = {
183 	"undefined/unknown",
184 	"3",
185 	"4",
186 	"4T",
187 	"5",
188 	"5T",
189 	"5TE",
190 	"5TEJ",
191 	"6TEJ",
192 	"7",
193 	"?(11)",
194 	"?(12)",
195 	"?(13)",
196 	"?(14)",
197 	"?(15)",
198 	"?(16)",
199 	"?(17)",
200 };
201 
202 int cpu_architecture(void)
203 {
204 	int cpu_arch;
205 
206 	if ((read_cpuid_id() & 0x0008f000) == 0) {
207 		cpu_arch = CPU_ARCH_UNKNOWN;
208 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
209 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
210 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
211 		cpu_arch = (read_cpuid_id() >> 16) & 7;
212 		if (cpu_arch)
213 			cpu_arch += CPU_ARCH_ARMv3;
214 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
215 		unsigned int mmfr0;
216 
217 		/* Revised CPUID format. Read the Memory Model Feature
218 		 * Register 0 and check for VMSAv7 or PMSAv7 */
219 		asm("mrc	p15, 0, %0, c0, c1, 4"
220 		    : "=r" (mmfr0));
221 		if ((mmfr0 & 0x0000000f) == 0x00000003 ||
222 		    (mmfr0 & 0x000000f0) == 0x00000030)
223 			cpu_arch = CPU_ARCH_ARMv7;
224 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
225 			 (mmfr0 & 0x000000f0) == 0x00000020)
226 			cpu_arch = CPU_ARCH_ARMv6;
227 		else
228 			cpu_arch = CPU_ARCH_UNKNOWN;
229 	} else
230 		cpu_arch = CPU_ARCH_UNKNOWN;
231 
232 	return cpu_arch;
233 }
234 
235 static void __init cacheid_init(void)
236 {
237 	unsigned int cachetype = read_cpuid_cachetype();
238 	unsigned int arch = cpu_architecture();
239 
240 	if (arch >= CPU_ARCH_ARMv6) {
241 		if ((cachetype & (7 << 29)) == 4 << 29) {
242 			/* ARMv7 register format */
243 			cacheid = CACHEID_VIPT_NONALIASING;
244 			if ((cachetype & (3 << 14)) == 1 << 14)
245 				cacheid |= CACHEID_ASID_TAGGED;
246 		} else if (cachetype & (1 << 23))
247 			cacheid = CACHEID_VIPT_ALIASING;
248 		else
249 			cacheid = CACHEID_VIPT_NONALIASING;
250 	} else {
251 		cacheid = CACHEID_VIVT;
252 	}
253 
254 	printk("CPU: %s data cache, %s instruction cache\n",
255 		cache_is_vivt() ? "VIVT" :
256 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
257 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
258 		cache_is_vivt() ? "VIVT" :
259 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
260 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
261 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
262 }
263 
264 /*
265  * These functions re-use the assembly code in head.S, which
266  * already provide the required functionality.
267  */
268 extern struct proc_info_list *lookup_processor_type(unsigned int);
269 extern struct machine_desc *lookup_machine_type(unsigned int);
270 
271 static void __init setup_processor(void)
272 {
273 	struct proc_info_list *list;
274 
275 	/*
276 	 * locate processor in the list of supported processor
277 	 * types.  The linker builds this table for us from the
278 	 * entries in arch/arm/mm/proc-*.S
279 	 */
280 	list = lookup_processor_type(read_cpuid_id());
281 	if (!list) {
282 		printk("CPU configuration botched (ID %08x), unable "
283 		       "to continue.\n", read_cpuid_id());
284 		while (1);
285 	}
286 
287 	cpu_name = list->cpu_name;
288 
289 #ifdef MULTI_CPU
290 	processor = *list->proc;
291 #endif
292 #ifdef MULTI_TLB
293 	cpu_tlb = *list->tlb;
294 #endif
295 #ifdef MULTI_USER
296 	cpu_user = *list->user;
297 #endif
298 #ifdef MULTI_CACHE
299 	cpu_cache = *list->cache;
300 #endif
301 
302 	printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
303 	       cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
304 	       proc_arch[cpu_architecture()], cr_alignment);
305 
306 	sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
307 	sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
308 	elf_hwcap = list->elf_hwcap;
309 #ifndef CONFIG_ARM_THUMB
310 	elf_hwcap &= ~HWCAP_THUMB;
311 #endif
312 
313 	cacheid_init();
314 	cpu_proc_init();
315 }
316 
317 /*
318  * cpu_init - initialise one CPU.
319  *
320  * cpu_init sets up the per-CPU stacks.
321  */
322 void cpu_init(void)
323 {
324 	unsigned int cpu = smp_processor_id();
325 	struct stack *stk = &stacks[cpu];
326 
327 	if (cpu >= NR_CPUS) {
328 		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
329 		BUG();
330 	}
331 
332 	/*
333 	 * Define the placement constraint for the inline asm directive below.
334 	 * In Thumb-2, msr with an immediate value is not allowed.
335 	 */
336 #ifdef CONFIG_THUMB2_KERNEL
337 #define PLC	"r"
338 #else
339 #define PLC	"I"
340 #endif
341 
342 	/*
343 	 * setup stacks for re-entrant exception handlers
344 	 */
345 	__asm__ (
346 	"msr	cpsr_c, %1\n\t"
347 	"add	r14, %0, %2\n\t"
348 	"mov	sp, r14\n\t"
349 	"msr	cpsr_c, %3\n\t"
350 	"add	r14, %0, %4\n\t"
351 	"mov	sp, r14\n\t"
352 	"msr	cpsr_c, %5\n\t"
353 	"add	r14, %0, %6\n\t"
354 	"mov	sp, r14\n\t"
355 	"msr	cpsr_c, %7"
356 	    :
357 	    : "r" (stk),
358 	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
359 	      "I" (offsetof(struct stack, irq[0])),
360 	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
361 	      "I" (offsetof(struct stack, abt[0])),
362 	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
363 	      "I" (offsetof(struct stack, und[0])),
364 	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
365 	    : "r14");
366 }
367 
368 static struct machine_desc * __init setup_machine(unsigned int nr)
369 {
370 	struct machine_desc *list;
371 
372 	/*
373 	 * locate machine in the list of supported machines.
374 	 */
375 	list = lookup_machine_type(nr);
376 	if (!list) {
377 		printk("Machine configuration botched (nr %d), unable "
378 		       "to continue.\n", nr);
379 		while (1);
380 	}
381 
382 	printk("Machine: %s\n", list->name);
383 
384 	return list;
385 }
386 
387 static int __init arm_add_memory(unsigned long start, unsigned long size)
388 {
389 	struct membank *bank = &meminfo.bank[meminfo.nr_banks];
390 
391 	if (meminfo.nr_banks >= NR_BANKS) {
392 		printk(KERN_CRIT "NR_BANKS too low, "
393 			"ignoring memory at %#lx\n", start);
394 		return -EINVAL;
395 	}
396 
397 	/*
398 	 * Ensure that start/size are aligned to a page boundary.
399 	 * Size is appropriately rounded down, start is rounded up.
400 	 */
401 	size -= start & ~PAGE_MASK;
402 	bank->start = PAGE_ALIGN(start);
403 	bank->size  = size & PAGE_MASK;
404 	bank->node  = PHYS_TO_NID(start);
405 
406 	/*
407 	 * Check whether this memory region has non-zero size or
408 	 * invalid node number.
409 	 */
410 	if (bank->size == 0 || bank->node >= MAX_NUMNODES)
411 		return -EINVAL;
412 
413 	meminfo.nr_banks++;
414 	return 0;
415 }
416 
417 /*
418  * Pick out the memory size.  We look for mem=size@start,
419  * where start and size are "size[KkMm]"
420  */
421 static void __init early_mem(char **p)
422 {
423 	static int usermem __initdata = 0;
424 	unsigned long size, start;
425 
426 	/*
427 	 * If the user specifies memory size, we
428 	 * blow away any automatically generated
429 	 * size.
430 	 */
431 	if (usermem == 0) {
432 		usermem = 1;
433 		meminfo.nr_banks = 0;
434 	}
435 
436 	start = PHYS_OFFSET;
437 	size  = memparse(*p, p);
438 	if (**p == '@')
439 		start = memparse(*p + 1, p);
440 
441 	arm_add_memory(start, size);
442 }
443 __early_param("mem=", early_mem);
444 
445 /*
446  * Initial parsing of the command line.
447  */
448 static void __init parse_cmdline(char **cmdline_p, char *from)
449 {
450 	char c = ' ', *to = command_line;
451 	int len = 0;
452 
453 	for (;;) {
454 		if (c == ' ') {
455 			extern struct early_params __early_begin, __early_end;
456 			struct early_params *p;
457 
458 			for (p = &__early_begin; p < &__early_end; p++) {
459 				int arglen = strlen(p->arg);
460 
461 				if (memcmp(from, p->arg, arglen) == 0) {
462 					if (to != command_line)
463 						to -= 1;
464 					from += arglen;
465 					p->fn(&from);
466 
467 					while (*from != ' ' && *from != '\0')
468 						from++;
469 					break;
470 				}
471 			}
472 		}
473 		c = *from++;
474 		if (!c)
475 			break;
476 		if (COMMAND_LINE_SIZE <= ++len)
477 			break;
478 		*to++ = c;
479 	}
480 	*to = '\0';
481 	*cmdline_p = command_line;
482 }
483 
484 static void __init
485 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
486 {
487 #ifdef CONFIG_BLK_DEV_RAM
488 	extern int rd_size, rd_image_start, rd_prompt, rd_doload;
489 
490 	rd_image_start = image_start;
491 	rd_prompt = prompt;
492 	rd_doload = doload;
493 
494 	if (rd_sz)
495 		rd_size = rd_sz;
496 #endif
497 }
498 
499 static void __init
500 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
501 {
502 	struct resource *res;
503 	int i;
504 
505 	kernel_code.start   = virt_to_phys(_text);
506 	kernel_code.end     = virt_to_phys(_etext - 1);
507 	kernel_data.start   = virt_to_phys(_data);
508 	kernel_data.end     = virt_to_phys(_end - 1);
509 
510 	for (i = 0; i < mi->nr_banks; i++) {
511 		if (mi->bank[i].size == 0)
512 			continue;
513 
514 		res = alloc_bootmem_low(sizeof(*res));
515 		res->name  = "System RAM";
516 		res->start = mi->bank[i].start;
517 		res->end   = mi->bank[i].start + mi->bank[i].size - 1;
518 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
519 
520 		request_resource(&iomem_resource, res);
521 
522 		if (kernel_code.start >= res->start &&
523 		    kernel_code.end <= res->end)
524 			request_resource(res, &kernel_code);
525 		if (kernel_data.start >= res->start &&
526 		    kernel_data.end <= res->end)
527 			request_resource(res, &kernel_data);
528 	}
529 
530 	if (mdesc->video_start) {
531 		video_ram.start = mdesc->video_start;
532 		video_ram.end   = mdesc->video_end;
533 		request_resource(&iomem_resource, &video_ram);
534 	}
535 
536 	/*
537 	 * Some machines don't have the possibility of ever
538 	 * possessing lp0, lp1 or lp2
539 	 */
540 	if (mdesc->reserve_lp0)
541 		request_resource(&ioport_resource, &lp0);
542 	if (mdesc->reserve_lp1)
543 		request_resource(&ioport_resource, &lp1);
544 	if (mdesc->reserve_lp2)
545 		request_resource(&ioport_resource, &lp2);
546 }
547 
548 /*
549  *  Tag parsing.
550  *
551  * This is the new way of passing data to the kernel at boot time.  Rather
552  * than passing a fixed inflexible structure to the kernel, we pass a list
553  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
554  * tag for the list to be recognised (to distinguish the tagged list from
555  * a param_struct).  The list is terminated with a zero-length tag (this tag
556  * is not parsed in any way).
557  */
558 static int __init parse_tag_core(const struct tag *tag)
559 {
560 	if (tag->hdr.size > 2) {
561 		if ((tag->u.core.flags & 1) == 0)
562 			root_mountflags &= ~MS_RDONLY;
563 		ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
564 	}
565 	return 0;
566 }
567 
568 __tagtable(ATAG_CORE, parse_tag_core);
569 
570 static int __init parse_tag_mem32(const struct tag *tag)
571 {
572 	return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
573 }
574 
575 __tagtable(ATAG_MEM, parse_tag_mem32);
576 
577 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
578 struct screen_info screen_info = {
579  .orig_video_lines	= 30,
580  .orig_video_cols	= 80,
581  .orig_video_mode	= 0,
582  .orig_video_ega_bx	= 0,
583  .orig_video_isVGA	= 1,
584  .orig_video_points	= 8
585 };
586 
587 static int __init parse_tag_videotext(const struct tag *tag)
588 {
589 	screen_info.orig_x            = tag->u.videotext.x;
590 	screen_info.orig_y            = tag->u.videotext.y;
591 	screen_info.orig_video_page   = tag->u.videotext.video_page;
592 	screen_info.orig_video_mode   = tag->u.videotext.video_mode;
593 	screen_info.orig_video_cols   = tag->u.videotext.video_cols;
594 	screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
595 	screen_info.orig_video_lines  = tag->u.videotext.video_lines;
596 	screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
597 	screen_info.orig_video_points = tag->u.videotext.video_points;
598 	return 0;
599 }
600 
601 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
602 #endif
603 
604 static int __init parse_tag_ramdisk(const struct tag *tag)
605 {
606 	setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
607 		      (tag->u.ramdisk.flags & 2) == 0,
608 		      tag->u.ramdisk.start, tag->u.ramdisk.size);
609 	return 0;
610 }
611 
612 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
613 
614 static int __init parse_tag_serialnr(const struct tag *tag)
615 {
616 	system_serial_low = tag->u.serialnr.low;
617 	system_serial_high = tag->u.serialnr.high;
618 	return 0;
619 }
620 
621 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
622 
623 static int __init parse_tag_revision(const struct tag *tag)
624 {
625 	system_rev = tag->u.revision.rev;
626 	return 0;
627 }
628 
629 __tagtable(ATAG_REVISION, parse_tag_revision);
630 
631 static int __init parse_tag_cmdline(const struct tag *tag)
632 {
633 	strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
634 	return 0;
635 }
636 
637 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
638 
639 /*
640  * Scan the tag table for this tag, and call its parse function.
641  * The tag table is built by the linker from all the __tagtable
642  * declarations.
643  */
644 static int __init parse_tag(const struct tag *tag)
645 {
646 	extern struct tagtable __tagtable_begin, __tagtable_end;
647 	struct tagtable *t;
648 
649 	for (t = &__tagtable_begin; t < &__tagtable_end; t++)
650 		if (tag->hdr.tag == t->tag) {
651 			t->parse(tag);
652 			break;
653 		}
654 
655 	return t < &__tagtable_end;
656 }
657 
658 /*
659  * Parse all tags in the list, checking both the global and architecture
660  * specific tag tables.
661  */
662 static void __init parse_tags(const struct tag *t)
663 {
664 	for (; t->hdr.size; t = tag_next(t))
665 		if (!parse_tag(t))
666 			printk(KERN_WARNING
667 				"Ignoring unrecognised tag 0x%08x\n",
668 				t->hdr.tag);
669 }
670 
671 /*
672  * This holds our defaults.
673  */
674 static struct init_tags {
675 	struct tag_header hdr1;
676 	struct tag_core   core;
677 	struct tag_header hdr2;
678 	struct tag_mem32  mem;
679 	struct tag_header hdr3;
680 } init_tags __initdata = {
681 	{ tag_size(tag_core), ATAG_CORE },
682 	{ 1, PAGE_SIZE, 0xff },
683 	{ tag_size(tag_mem32), ATAG_MEM },
684 	{ MEM_SIZE, PHYS_OFFSET },
685 	{ 0, ATAG_NONE }
686 };
687 
688 static void (*init_machine)(void) __initdata;
689 
690 static int __init customize_machine(void)
691 {
692 	/* customizes platform devices, or adds new ones */
693 	if (init_machine)
694 		init_machine();
695 	return 0;
696 }
697 arch_initcall(customize_machine);
698 
699 void __init setup_arch(char **cmdline_p)
700 {
701 	struct tag *tags = (struct tag *)&init_tags;
702 	struct machine_desc *mdesc;
703 	char *from = default_command_line;
704 
705 	unwind_init();
706 
707 	setup_processor();
708 	mdesc = setup_machine(machine_arch_type);
709 	machine_name = mdesc->name;
710 
711 	if (mdesc->soft_reboot)
712 		reboot_setup("s");
713 
714 	if (__atags_pointer)
715 		tags = phys_to_virt(__atags_pointer);
716 	else if (mdesc->boot_params)
717 		tags = phys_to_virt(mdesc->boot_params);
718 
719 	/*
720 	 * If we have the old style parameters, convert them to
721 	 * a tag list.
722 	 */
723 	if (tags->hdr.tag != ATAG_CORE)
724 		convert_to_tag_list(tags);
725 	if (tags->hdr.tag != ATAG_CORE)
726 		tags = (struct tag *)&init_tags;
727 
728 	if (mdesc->fixup)
729 		mdesc->fixup(mdesc, tags, &from, &meminfo);
730 
731 	if (tags->hdr.tag == ATAG_CORE) {
732 		if (meminfo.nr_banks != 0)
733 			squash_mem_tags(tags);
734 		save_atags(tags);
735 		parse_tags(tags);
736 	}
737 
738 	init_mm.start_code = (unsigned long) _text;
739 	init_mm.end_code   = (unsigned long) _etext;
740 	init_mm.end_data   = (unsigned long) _edata;
741 	init_mm.brk	   = (unsigned long) _end;
742 
743 	memcpy(boot_command_line, from, COMMAND_LINE_SIZE);
744 	boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
745 	parse_cmdline(cmdline_p, from);
746 	paging_init(mdesc);
747 	request_standard_resources(&meminfo, mdesc);
748 
749 #ifdef CONFIG_SMP
750 	smp_init_cpus();
751 #endif
752 
753 	cpu_init();
754 	tcm_init();
755 
756 	/*
757 	 * Set up various architecture-specific pointers
758 	 */
759 	init_arch_irq = mdesc->init_irq;
760 	system_timer = mdesc->timer;
761 	init_machine = mdesc->init_machine;
762 
763 #ifdef CONFIG_VT
764 #if defined(CONFIG_VGA_CONSOLE)
765 	conswitchp = &vga_con;
766 #elif defined(CONFIG_DUMMY_CONSOLE)
767 	conswitchp = &dummy_con;
768 #endif
769 #endif
770 	early_trap_init();
771 }
772 
773 
774 static int __init topology_init(void)
775 {
776 	int cpu;
777 
778 	for_each_possible_cpu(cpu) {
779 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
780 		cpuinfo->cpu.hotpluggable = 1;
781 		register_cpu(&cpuinfo->cpu, cpu);
782 	}
783 
784 	return 0;
785 }
786 
787 subsys_initcall(topology_init);
788 
789 static const char *hwcap_str[] = {
790 	"swp",
791 	"half",
792 	"thumb",
793 	"26bit",
794 	"fastmult",
795 	"fpa",
796 	"vfp",
797 	"edsp",
798 	"java",
799 	"iwmmxt",
800 	"crunch",
801 	"thumbee",
802 	"neon",
803 	"vfpv3",
804 	"vfpv3d16",
805 	NULL
806 };
807 
808 static int c_show(struct seq_file *m, void *v)
809 {
810 	int i;
811 
812 	seq_printf(m, "Processor\t: %s rev %d (%s)\n",
813 		   cpu_name, read_cpuid_id() & 15, elf_platform);
814 
815 #if defined(CONFIG_SMP)
816 	for_each_online_cpu(i) {
817 		/*
818 		 * glibc reads /proc/cpuinfo to determine the number of
819 		 * online processors, looking for lines beginning with
820 		 * "processor".  Give glibc what it expects.
821 		 */
822 		seq_printf(m, "processor\t: %d\n", i);
823 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
824 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
825 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
826 	}
827 #else /* CONFIG_SMP */
828 	seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
829 		   loops_per_jiffy / (500000/HZ),
830 		   (loops_per_jiffy / (5000/HZ)) % 100);
831 #endif
832 
833 	/* dump out the processor features */
834 	seq_puts(m, "Features\t: ");
835 
836 	for (i = 0; hwcap_str[i]; i++)
837 		if (elf_hwcap & (1 << i))
838 			seq_printf(m, "%s ", hwcap_str[i]);
839 
840 	seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
841 	seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
842 
843 	if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
844 		/* pre-ARM7 */
845 		seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
846 	} else {
847 		if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
848 			/* ARM7 */
849 			seq_printf(m, "CPU variant\t: 0x%02x\n",
850 				   (read_cpuid_id() >> 16) & 127);
851 		} else {
852 			/* post-ARM7 */
853 			seq_printf(m, "CPU variant\t: 0x%x\n",
854 				   (read_cpuid_id() >> 20) & 15);
855 		}
856 		seq_printf(m, "CPU part\t: 0x%03x\n",
857 			   (read_cpuid_id() >> 4) & 0xfff);
858 	}
859 	seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
860 
861 	seq_puts(m, "\n");
862 
863 	seq_printf(m, "Hardware\t: %s\n", machine_name);
864 	seq_printf(m, "Revision\t: %04x\n", system_rev);
865 	seq_printf(m, "Serial\t\t: %08x%08x\n",
866 		   system_serial_high, system_serial_low);
867 
868 	return 0;
869 }
870 
871 static void *c_start(struct seq_file *m, loff_t *pos)
872 {
873 	return *pos < 1 ? (void *)1 : NULL;
874 }
875 
876 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
877 {
878 	++*pos;
879 	return NULL;
880 }
881 
882 static void c_stop(struct seq_file *m, void *v)
883 {
884 }
885 
886 const struct seq_operations cpuinfo_op = {
887 	.start	= c_start,
888 	.next	= c_next,
889 	.stop	= c_stop,
890 	.show	= c_show
891 };
892