xref: /linux/arch/arm/kernel/setup.c (revision c053784454550cf750399caa65482b31ffbe3c57)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/crash_dump.h>
24 #include <linux/root_dev.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/fs.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31 
32 #include <asm/unified.h>
33 #include <asm/cpu.h>
34 #include <asm/cputype.h>
35 #include <asm/elf.h>
36 #include <asm/procinfo.h>
37 #include <asm/sections.h>
38 #include <asm/setup.h>
39 #include <asm/smp_plat.h>
40 #include <asm/mach-types.h>
41 #include <asm/cacheflush.h>
42 #include <asm/cachetype.h>
43 #include <asm/tlbflush.h>
44 
45 #include <asm/mach/arch.h>
46 #include <asm/mach/irq.h>
47 #include <asm/mach/time.h>
48 #include <asm/traps.h>
49 #include <asm/unwind.h>
50 
51 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
52 #include "compat.h"
53 #endif
54 #include "atags.h"
55 #include "tcm.h"
56 
57 #ifndef MEM_SIZE
58 #define MEM_SIZE	(16*1024*1024)
59 #endif
60 
61 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
62 char fpe_type[8];
63 
64 static int __init fpe_setup(char *line)
65 {
66 	memcpy(fpe_type, line, 8);
67 	return 1;
68 }
69 
70 __setup("fpe=", fpe_setup);
71 #endif
72 
73 extern void paging_init(struct machine_desc *desc);
74 extern void reboot_setup(char *str);
75 
76 unsigned int processor_id;
77 EXPORT_SYMBOL(processor_id);
78 unsigned int __machine_arch_type;
79 EXPORT_SYMBOL(__machine_arch_type);
80 unsigned int cacheid;
81 EXPORT_SYMBOL(cacheid);
82 
83 unsigned int __atags_pointer __initdata;
84 
85 unsigned int system_rev;
86 EXPORT_SYMBOL(system_rev);
87 
88 unsigned int system_serial_low;
89 EXPORT_SYMBOL(system_serial_low);
90 
91 unsigned int system_serial_high;
92 EXPORT_SYMBOL(system_serial_high);
93 
94 unsigned int elf_hwcap;
95 EXPORT_SYMBOL(elf_hwcap);
96 
97 
98 #ifdef MULTI_CPU
99 struct processor processor;
100 #endif
101 #ifdef MULTI_TLB
102 struct cpu_tlb_fns cpu_tlb;
103 #endif
104 #ifdef MULTI_USER
105 struct cpu_user_fns cpu_user;
106 #endif
107 #ifdef MULTI_CACHE
108 struct cpu_cache_fns cpu_cache;
109 #endif
110 #ifdef CONFIG_OUTER_CACHE
111 struct outer_cache_fns outer_cache;
112 EXPORT_SYMBOL(outer_cache);
113 #endif
114 
115 struct stack {
116 	u32 irq[3];
117 	u32 abt[3];
118 	u32 und[3];
119 } ____cacheline_aligned;
120 
121 static struct stack stacks[NR_CPUS];
122 
123 char elf_platform[ELF_PLATFORM_SIZE];
124 EXPORT_SYMBOL(elf_platform);
125 
126 static const char *cpu_name;
127 static const char *machine_name;
128 static char __initdata cmd_line[COMMAND_LINE_SIZE];
129 
130 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
131 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
132 #define ENDIANNESS ((char)endian_test.l)
133 
134 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
135 
136 /*
137  * Standard memory resources
138  */
139 static struct resource mem_res[] = {
140 	{
141 		.name = "Video RAM",
142 		.start = 0,
143 		.end = 0,
144 		.flags = IORESOURCE_MEM
145 	},
146 	{
147 		.name = "Kernel text",
148 		.start = 0,
149 		.end = 0,
150 		.flags = IORESOURCE_MEM
151 	},
152 	{
153 		.name = "Kernel data",
154 		.start = 0,
155 		.end = 0,
156 		.flags = IORESOURCE_MEM
157 	}
158 };
159 
160 #define video_ram   mem_res[0]
161 #define kernel_code mem_res[1]
162 #define kernel_data mem_res[2]
163 
164 static struct resource io_res[] = {
165 	{
166 		.name = "reserved",
167 		.start = 0x3bc,
168 		.end = 0x3be,
169 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
170 	},
171 	{
172 		.name = "reserved",
173 		.start = 0x378,
174 		.end = 0x37f,
175 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
176 	},
177 	{
178 		.name = "reserved",
179 		.start = 0x278,
180 		.end = 0x27f,
181 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
182 	}
183 };
184 
185 #define lp0 io_res[0]
186 #define lp1 io_res[1]
187 #define lp2 io_res[2]
188 
189 static const char *proc_arch[] = {
190 	"undefined/unknown",
191 	"3",
192 	"4",
193 	"4T",
194 	"5",
195 	"5T",
196 	"5TE",
197 	"5TEJ",
198 	"6TEJ",
199 	"7",
200 	"?(11)",
201 	"?(12)",
202 	"?(13)",
203 	"?(14)",
204 	"?(15)",
205 	"?(16)",
206 	"?(17)",
207 };
208 
209 int cpu_architecture(void)
210 {
211 	int cpu_arch;
212 
213 	if ((read_cpuid_id() & 0x0008f000) == 0) {
214 		cpu_arch = CPU_ARCH_UNKNOWN;
215 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
216 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
217 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
218 		cpu_arch = (read_cpuid_id() >> 16) & 7;
219 		if (cpu_arch)
220 			cpu_arch += CPU_ARCH_ARMv3;
221 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
222 		unsigned int mmfr0;
223 
224 		/* Revised CPUID format. Read the Memory Model Feature
225 		 * Register 0 and check for VMSAv7 or PMSAv7 */
226 		asm("mrc	p15, 0, %0, c0, c1, 4"
227 		    : "=r" (mmfr0));
228 		if ((mmfr0 & 0x0000000f) == 0x00000003 ||
229 		    (mmfr0 & 0x000000f0) == 0x00000030)
230 			cpu_arch = CPU_ARCH_ARMv7;
231 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
232 			 (mmfr0 & 0x000000f0) == 0x00000020)
233 			cpu_arch = CPU_ARCH_ARMv6;
234 		else
235 			cpu_arch = CPU_ARCH_UNKNOWN;
236 	} else
237 		cpu_arch = CPU_ARCH_UNKNOWN;
238 
239 	return cpu_arch;
240 }
241 
242 static int cpu_has_aliasing_icache(unsigned int arch)
243 {
244 	int aliasing_icache;
245 	unsigned int id_reg, num_sets, line_size;
246 
247 	/* arch specifies the register format */
248 	switch (arch) {
249 	case CPU_ARCH_ARMv7:
250 		asm("mcr	p15, 2, %0, c0, c0, 0 @ set CSSELR"
251 		    : /* No output operands */
252 		    : "r" (1));
253 		isb();
254 		asm("mrc	p15, 1, %0, c0, c0, 0 @ read CCSIDR"
255 		    : "=r" (id_reg));
256 		line_size = 4 << ((id_reg & 0x7) + 2);
257 		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
258 		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
259 		break;
260 	case CPU_ARCH_ARMv6:
261 		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
262 		break;
263 	default:
264 		/* I-cache aliases will be handled by D-cache aliasing code */
265 		aliasing_icache = 0;
266 	}
267 
268 	return aliasing_icache;
269 }
270 
271 static void __init cacheid_init(void)
272 {
273 	unsigned int cachetype = read_cpuid_cachetype();
274 	unsigned int arch = cpu_architecture();
275 
276 	if (arch >= CPU_ARCH_ARMv6) {
277 		if ((cachetype & (7 << 29)) == 4 << 29) {
278 			/* ARMv7 register format */
279 			cacheid = CACHEID_VIPT_NONALIASING;
280 			if ((cachetype & (3 << 14)) == 1 << 14)
281 				cacheid |= CACHEID_ASID_TAGGED;
282 			else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
283 				cacheid |= CACHEID_VIPT_I_ALIASING;
284 		} else if (cachetype & (1 << 23)) {
285 			cacheid = CACHEID_VIPT_ALIASING;
286 		} else {
287 			cacheid = CACHEID_VIPT_NONALIASING;
288 			if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6))
289 				cacheid |= CACHEID_VIPT_I_ALIASING;
290 		}
291 	} else {
292 		cacheid = CACHEID_VIVT;
293 	}
294 
295 	printk("CPU: %s data cache, %s instruction cache\n",
296 		cache_is_vivt() ? "VIVT" :
297 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
298 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
299 		cache_is_vivt() ? "VIVT" :
300 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
301 		icache_is_vipt_aliasing() ? "VIPT aliasing" :
302 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
303 }
304 
305 /*
306  * These functions re-use the assembly code in head.S, which
307  * already provide the required functionality.
308  */
309 extern struct proc_info_list *lookup_processor_type(unsigned int);
310 extern struct machine_desc *lookup_machine_type(unsigned int);
311 
312 static void __init feat_v6_fixup(void)
313 {
314 	int id = read_cpuid_id();
315 
316 	if ((id & 0xff0f0000) != 0x41070000)
317 		return;
318 
319 	/*
320 	 * HWCAP_TLS is available only on 1136 r1p0 and later,
321 	 * see also kuser_get_tls_init.
322 	 */
323 	if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
324 		elf_hwcap &= ~HWCAP_TLS;
325 }
326 
327 static void __init setup_processor(void)
328 {
329 	struct proc_info_list *list;
330 
331 	/*
332 	 * locate processor in the list of supported processor
333 	 * types.  The linker builds this table for us from the
334 	 * entries in arch/arm/mm/proc-*.S
335 	 */
336 	list = lookup_processor_type(read_cpuid_id());
337 	if (!list) {
338 		printk("CPU configuration botched (ID %08x), unable "
339 		       "to continue.\n", read_cpuid_id());
340 		while (1);
341 	}
342 
343 	cpu_name = list->cpu_name;
344 
345 #ifdef MULTI_CPU
346 	processor = *list->proc;
347 #endif
348 #ifdef MULTI_TLB
349 	cpu_tlb = *list->tlb;
350 #endif
351 #ifdef MULTI_USER
352 	cpu_user = *list->user;
353 #endif
354 #ifdef MULTI_CACHE
355 	cpu_cache = *list->cache;
356 #endif
357 
358 	printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
359 	       cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
360 	       proc_arch[cpu_architecture()], cr_alignment);
361 
362 	sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
363 	sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
364 	elf_hwcap = list->elf_hwcap;
365 #ifndef CONFIG_ARM_THUMB
366 	elf_hwcap &= ~HWCAP_THUMB;
367 #endif
368 
369 	feat_v6_fixup();
370 
371 	cacheid_init();
372 	cpu_proc_init();
373 }
374 
375 /*
376  * cpu_init - initialise one CPU.
377  *
378  * cpu_init sets up the per-CPU stacks.
379  */
380 void cpu_init(void)
381 {
382 	unsigned int cpu = smp_processor_id();
383 	struct stack *stk = &stacks[cpu];
384 
385 	if (cpu >= NR_CPUS) {
386 		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
387 		BUG();
388 	}
389 
390 	/*
391 	 * Define the placement constraint for the inline asm directive below.
392 	 * In Thumb-2, msr with an immediate value is not allowed.
393 	 */
394 #ifdef CONFIG_THUMB2_KERNEL
395 #define PLC	"r"
396 #else
397 #define PLC	"I"
398 #endif
399 
400 	/*
401 	 * setup stacks for re-entrant exception handlers
402 	 */
403 	__asm__ (
404 	"msr	cpsr_c, %1\n\t"
405 	"add	r14, %0, %2\n\t"
406 	"mov	sp, r14\n\t"
407 	"msr	cpsr_c, %3\n\t"
408 	"add	r14, %0, %4\n\t"
409 	"mov	sp, r14\n\t"
410 	"msr	cpsr_c, %5\n\t"
411 	"add	r14, %0, %6\n\t"
412 	"mov	sp, r14\n\t"
413 	"msr	cpsr_c, %7"
414 	    :
415 	    : "r" (stk),
416 	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
417 	      "I" (offsetof(struct stack, irq[0])),
418 	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
419 	      "I" (offsetof(struct stack, abt[0])),
420 	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
421 	      "I" (offsetof(struct stack, und[0])),
422 	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
423 	    : "r14");
424 }
425 
426 static struct machine_desc * __init setup_machine(unsigned int nr)
427 {
428 	struct machine_desc *list;
429 
430 	/*
431 	 * locate machine in the list of supported machines.
432 	 */
433 	list = lookup_machine_type(nr);
434 	if (!list) {
435 		printk("Machine configuration botched (nr %d), unable "
436 		       "to continue.\n", nr);
437 		while (1);
438 	}
439 
440 	printk("Machine: %s\n", list->name);
441 
442 	return list;
443 }
444 
445 static int __init arm_add_memory(unsigned long start, unsigned long size)
446 {
447 	struct membank *bank = &meminfo.bank[meminfo.nr_banks];
448 
449 	if (meminfo.nr_banks >= NR_BANKS) {
450 		printk(KERN_CRIT "NR_BANKS too low, "
451 			"ignoring memory at %#lx\n", start);
452 		return -EINVAL;
453 	}
454 
455 	/*
456 	 * Ensure that start/size are aligned to a page boundary.
457 	 * Size is appropriately rounded down, start is rounded up.
458 	 */
459 	size -= start & ~PAGE_MASK;
460 	bank->start = PAGE_ALIGN(start);
461 	bank->size  = size & PAGE_MASK;
462 
463 	/*
464 	 * Check whether this memory region has non-zero size or
465 	 * invalid node number.
466 	 */
467 	if (bank->size == 0)
468 		return -EINVAL;
469 
470 	meminfo.nr_banks++;
471 	return 0;
472 }
473 
474 /*
475  * Pick out the memory size.  We look for mem=size@start,
476  * where start and size are "size[KkMm]"
477  */
478 static int __init early_mem(char *p)
479 {
480 	static int usermem __initdata = 0;
481 	unsigned long size, start;
482 	char *endp;
483 
484 	/*
485 	 * If the user specifies memory size, we
486 	 * blow away any automatically generated
487 	 * size.
488 	 */
489 	if (usermem == 0) {
490 		usermem = 1;
491 		meminfo.nr_banks = 0;
492 	}
493 
494 	start = PHYS_OFFSET;
495 	size  = memparse(p, &endp);
496 	if (*endp == '@')
497 		start = memparse(endp + 1, NULL);
498 
499 	arm_add_memory(start, size);
500 
501 	return 0;
502 }
503 early_param("mem", early_mem);
504 
505 static void __init
506 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
507 {
508 #ifdef CONFIG_BLK_DEV_RAM
509 	extern int rd_size, rd_image_start, rd_prompt, rd_doload;
510 
511 	rd_image_start = image_start;
512 	rd_prompt = prompt;
513 	rd_doload = doload;
514 
515 	if (rd_sz)
516 		rd_size = rd_sz;
517 #endif
518 }
519 
520 static void __init
521 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
522 {
523 	struct resource *res;
524 	int i;
525 
526 	kernel_code.start   = virt_to_phys(_text);
527 	kernel_code.end     = virt_to_phys(_etext - 1);
528 	kernel_data.start   = virt_to_phys(_sdata);
529 	kernel_data.end     = virt_to_phys(_end - 1);
530 
531 	for (i = 0; i < mi->nr_banks; i++) {
532 		if (mi->bank[i].size == 0)
533 			continue;
534 
535 		res = alloc_bootmem_low(sizeof(*res));
536 		res->name  = "System RAM";
537 		res->start = mi->bank[i].start;
538 		res->end   = mi->bank[i].start + mi->bank[i].size - 1;
539 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
540 
541 		request_resource(&iomem_resource, res);
542 
543 		if (kernel_code.start >= res->start &&
544 		    kernel_code.end <= res->end)
545 			request_resource(res, &kernel_code);
546 		if (kernel_data.start >= res->start &&
547 		    kernel_data.end <= res->end)
548 			request_resource(res, &kernel_data);
549 	}
550 
551 	if (mdesc->video_start) {
552 		video_ram.start = mdesc->video_start;
553 		video_ram.end   = mdesc->video_end;
554 		request_resource(&iomem_resource, &video_ram);
555 	}
556 
557 	/*
558 	 * Some machines don't have the possibility of ever
559 	 * possessing lp0, lp1 or lp2
560 	 */
561 	if (mdesc->reserve_lp0)
562 		request_resource(&ioport_resource, &lp0);
563 	if (mdesc->reserve_lp1)
564 		request_resource(&ioport_resource, &lp1);
565 	if (mdesc->reserve_lp2)
566 		request_resource(&ioport_resource, &lp2);
567 }
568 
569 /*
570  *  Tag parsing.
571  *
572  * This is the new way of passing data to the kernel at boot time.  Rather
573  * than passing a fixed inflexible structure to the kernel, we pass a list
574  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
575  * tag for the list to be recognised (to distinguish the tagged list from
576  * a param_struct).  The list is terminated with a zero-length tag (this tag
577  * is not parsed in any way).
578  */
579 static int __init parse_tag_core(const struct tag *tag)
580 {
581 	if (tag->hdr.size > 2) {
582 		if ((tag->u.core.flags & 1) == 0)
583 			root_mountflags &= ~MS_RDONLY;
584 		ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
585 	}
586 	return 0;
587 }
588 
589 __tagtable(ATAG_CORE, parse_tag_core);
590 
591 static int __init parse_tag_mem32(const struct tag *tag)
592 {
593 	return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
594 }
595 
596 __tagtable(ATAG_MEM, parse_tag_mem32);
597 
598 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
599 struct screen_info screen_info = {
600  .orig_video_lines	= 30,
601  .orig_video_cols	= 80,
602  .orig_video_mode	= 0,
603  .orig_video_ega_bx	= 0,
604  .orig_video_isVGA	= 1,
605  .orig_video_points	= 8
606 };
607 
608 static int __init parse_tag_videotext(const struct tag *tag)
609 {
610 	screen_info.orig_x            = tag->u.videotext.x;
611 	screen_info.orig_y            = tag->u.videotext.y;
612 	screen_info.orig_video_page   = tag->u.videotext.video_page;
613 	screen_info.orig_video_mode   = tag->u.videotext.video_mode;
614 	screen_info.orig_video_cols   = tag->u.videotext.video_cols;
615 	screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
616 	screen_info.orig_video_lines  = tag->u.videotext.video_lines;
617 	screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
618 	screen_info.orig_video_points = tag->u.videotext.video_points;
619 	return 0;
620 }
621 
622 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
623 #endif
624 
625 static int __init parse_tag_ramdisk(const struct tag *tag)
626 {
627 	setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
628 		      (tag->u.ramdisk.flags & 2) == 0,
629 		      tag->u.ramdisk.start, tag->u.ramdisk.size);
630 	return 0;
631 }
632 
633 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
634 
635 static int __init parse_tag_serialnr(const struct tag *tag)
636 {
637 	system_serial_low = tag->u.serialnr.low;
638 	system_serial_high = tag->u.serialnr.high;
639 	return 0;
640 }
641 
642 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
643 
644 static int __init parse_tag_revision(const struct tag *tag)
645 {
646 	system_rev = tag->u.revision.rev;
647 	return 0;
648 }
649 
650 __tagtable(ATAG_REVISION, parse_tag_revision);
651 
652 #ifndef CONFIG_CMDLINE_FORCE
653 static int __init parse_tag_cmdline(const struct tag *tag)
654 {
655 	strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
656 	return 0;
657 }
658 
659 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
660 #endif /* CONFIG_CMDLINE_FORCE */
661 
662 /*
663  * Scan the tag table for this tag, and call its parse function.
664  * The tag table is built by the linker from all the __tagtable
665  * declarations.
666  */
667 static int __init parse_tag(const struct tag *tag)
668 {
669 	extern struct tagtable __tagtable_begin, __tagtable_end;
670 	struct tagtable *t;
671 
672 	for (t = &__tagtable_begin; t < &__tagtable_end; t++)
673 		if (tag->hdr.tag == t->tag) {
674 			t->parse(tag);
675 			break;
676 		}
677 
678 	return t < &__tagtable_end;
679 }
680 
681 /*
682  * Parse all tags in the list, checking both the global and architecture
683  * specific tag tables.
684  */
685 static void __init parse_tags(const struct tag *t)
686 {
687 	for (; t->hdr.size; t = tag_next(t))
688 		if (!parse_tag(t))
689 			printk(KERN_WARNING
690 				"Ignoring unrecognised tag 0x%08x\n",
691 				t->hdr.tag);
692 }
693 
694 /*
695  * This holds our defaults.
696  */
697 static struct init_tags {
698 	struct tag_header hdr1;
699 	struct tag_core   core;
700 	struct tag_header hdr2;
701 	struct tag_mem32  mem;
702 	struct tag_header hdr3;
703 } init_tags __initdata = {
704 	{ tag_size(tag_core), ATAG_CORE },
705 	{ 1, PAGE_SIZE, 0xff },
706 	{ tag_size(tag_mem32), ATAG_MEM },
707 	{ MEM_SIZE, PHYS_OFFSET },
708 	{ 0, ATAG_NONE }
709 };
710 
711 static void (*init_machine)(void) __initdata;
712 
713 static int __init customize_machine(void)
714 {
715 	/* customizes platform devices, or adds new ones */
716 	if (init_machine)
717 		init_machine();
718 	return 0;
719 }
720 arch_initcall(customize_machine);
721 
722 #ifdef CONFIG_KEXEC
723 static inline unsigned long long get_total_mem(void)
724 {
725 	unsigned long total;
726 
727 	total = max_low_pfn - min_low_pfn;
728 	return total << PAGE_SHIFT;
729 }
730 
731 /**
732  * reserve_crashkernel() - reserves memory are for crash kernel
733  *
734  * This function reserves memory area given in "crashkernel=" kernel command
735  * line parameter. The memory reserved is used by a dump capture kernel when
736  * primary kernel is crashing.
737  */
738 static void __init reserve_crashkernel(void)
739 {
740 	unsigned long long crash_size, crash_base;
741 	unsigned long long total_mem;
742 	int ret;
743 
744 	total_mem = get_total_mem();
745 	ret = parse_crashkernel(boot_command_line, total_mem,
746 				&crash_size, &crash_base);
747 	if (ret)
748 		return;
749 
750 	ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
751 	if (ret < 0) {
752 		printk(KERN_WARNING "crashkernel reservation failed - "
753 		       "memory is in use (0x%lx)\n", (unsigned long)crash_base);
754 		return;
755 	}
756 
757 	printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
758 	       "for crashkernel (System RAM: %ldMB)\n",
759 	       (unsigned long)(crash_size >> 20),
760 	       (unsigned long)(crash_base >> 20),
761 	       (unsigned long)(total_mem >> 20));
762 
763 	crashk_res.start = crash_base;
764 	crashk_res.end = crash_base + crash_size - 1;
765 	insert_resource(&iomem_resource, &crashk_res);
766 }
767 #else
768 static inline void reserve_crashkernel(void) {}
769 #endif /* CONFIG_KEXEC */
770 
771 /*
772  * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
773  * is_kdump_kernel() to determine if we are booting after a panic. Hence
774  * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
775  */
776 
777 #ifdef CONFIG_CRASH_DUMP
778 /*
779  * elfcorehdr= specifies the location of elf core header stored by the crashed
780  * kernel. This option will be passed by kexec loader to the capture kernel.
781  */
782 static int __init setup_elfcorehdr(char *arg)
783 {
784 	char *end;
785 
786 	if (!arg)
787 		return -EINVAL;
788 
789 	elfcorehdr_addr = memparse(arg, &end);
790 	return end > arg ? 0 : -EINVAL;
791 }
792 early_param("elfcorehdr", setup_elfcorehdr);
793 #endif /* CONFIG_CRASH_DUMP */
794 
795 static void __init squash_mem_tags(struct tag *tag)
796 {
797 	for (; tag->hdr.size; tag = tag_next(tag))
798 		if (tag->hdr.tag == ATAG_MEM)
799 			tag->hdr.tag = ATAG_NONE;
800 }
801 
802 void __init setup_arch(char **cmdline_p)
803 {
804 	struct tag *tags = (struct tag *)&init_tags;
805 	struct machine_desc *mdesc;
806 	char *from = default_command_line;
807 
808 	unwind_init();
809 
810 	setup_processor();
811 	mdesc = setup_machine(machine_arch_type);
812 	machine_name = mdesc->name;
813 
814 	if (mdesc->soft_reboot)
815 		reboot_setup("s");
816 
817 	if (__atags_pointer)
818 		tags = phys_to_virt(__atags_pointer);
819 	else if (mdesc->boot_params)
820 		tags = phys_to_virt(mdesc->boot_params);
821 
822 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
823 	/*
824 	 * If we have the old style parameters, convert them to
825 	 * a tag list.
826 	 */
827 	if (tags->hdr.tag != ATAG_CORE)
828 		convert_to_tag_list(tags);
829 #endif
830 	if (tags->hdr.tag != ATAG_CORE)
831 		tags = (struct tag *)&init_tags;
832 
833 	if (mdesc->fixup)
834 		mdesc->fixup(mdesc, tags, &from, &meminfo);
835 
836 	if (tags->hdr.tag == ATAG_CORE) {
837 		if (meminfo.nr_banks != 0)
838 			squash_mem_tags(tags);
839 		save_atags(tags);
840 		parse_tags(tags);
841 	}
842 
843 	init_mm.start_code = (unsigned long) _text;
844 	init_mm.end_code   = (unsigned long) _etext;
845 	init_mm.end_data   = (unsigned long) _edata;
846 	init_mm.brk	   = (unsigned long) _end;
847 
848 	/* parse_early_param needs a boot_command_line */
849 	strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
850 
851 	/* populate cmd_line too for later use, preserving boot_command_line */
852 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
853 	*cmdline_p = cmd_line;
854 
855 	parse_early_param();
856 
857 	arm_memblock_init(&meminfo, mdesc);
858 
859 	paging_init(mdesc);
860 	request_standard_resources(&meminfo, mdesc);
861 
862 #ifdef CONFIG_SMP
863 	if (is_smp())
864 		smp_init_cpus();
865 #endif
866 	reserve_crashkernel();
867 
868 	cpu_init();
869 	tcm_init();
870 
871 	/*
872 	 * Set up various architecture-specific pointers
873 	 */
874 	arch_nr_irqs = mdesc->nr_irqs;
875 	init_arch_irq = mdesc->init_irq;
876 	system_timer = mdesc->timer;
877 	init_machine = mdesc->init_machine;
878 
879 #ifdef CONFIG_VT
880 #if defined(CONFIG_VGA_CONSOLE)
881 	conswitchp = &vga_con;
882 #elif defined(CONFIG_DUMMY_CONSOLE)
883 	conswitchp = &dummy_con;
884 #endif
885 #endif
886 	early_trap_init();
887 }
888 
889 
890 static int __init topology_init(void)
891 {
892 	int cpu;
893 
894 	for_each_possible_cpu(cpu) {
895 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
896 		cpuinfo->cpu.hotpluggable = 1;
897 		register_cpu(&cpuinfo->cpu, cpu);
898 	}
899 
900 	return 0;
901 }
902 subsys_initcall(topology_init);
903 
904 #ifdef CONFIG_HAVE_PROC_CPU
905 static int __init proc_cpu_init(void)
906 {
907 	struct proc_dir_entry *res;
908 
909 	res = proc_mkdir("cpu", NULL);
910 	if (!res)
911 		return -ENOMEM;
912 	return 0;
913 }
914 fs_initcall(proc_cpu_init);
915 #endif
916 
917 static const char *hwcap_str[] = {
918 	"swp",
919 	"half",
920 	"thumb",
921 	"26bit",
922 	"fastmult",
923 	"fpa",
924 	"vfp",
925 	"edsp",
926 	"java",
927 	"iwmmxt",
928 	"crunch",
929 	"thumbee",
930 	"neon",
931 	"vfpv3",
932 	"vfpv3d16",
933 	NULL
934 };
935 
936 static int c_show(struct seq_file *m, void *v)
937 {
938 	int i;
939 
940 	seq_printf(m, "Processor\t: %s rev %d (%s)\n",
941 		   cpu_name, read_cpuid_id() & 15, elf_platform);
942 
943 #if defined(CONFIG_SMP)
944 	for_each_online_cpu(i) {
945 		/*
946 		 * glibc reads /proc/cpuinfo to determine the number of
947 		 * online processors, looking for lines beginning with
948 		 * "processor".  Give glibc what it expects.
949 		 */
950 		seq_printf(m, "processor\t: %d\n", i);
951 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
952 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
953 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
954 	}
955 #else /* CONFIG_SMP */
956 	seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
957 		   loops_per_jiffy / (500000/HZ),
958 		   (loops_per_jiffy / (5000/HZ)) % 100);
959 #endif
960 
961 	/* dump out the processor features */
962 	seq_puts(m, "Features\t: ");
963 
964 	for (i = 0; hwcap_str[i]; i++)
965 		if (elf_hwcap & (1 << i))
966 			seq_printf(m, "%s ", hwcap_str[i]);
967 
968 	seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
969 	seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
970 
971 	if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
972 		/* pre-ARM7 */
973 		seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
974 	} else {
975 		if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
976 			/* ARM7 */
977 			seq_printf(m, "CPU variant\t: 0x%02x\n",
978 				   (read_cpuid_id() >> 16) & 127);
979 		} else {
980 			/* post-ARM7 */
981 			seq_printf(m, "CPU variant\t: 0x%x\n",
982 				   (read_cpuid_id() >> 20) & 15);
983 		}
984 		seq_printf(m, "CPU part\t: 0x%03x\n",
985 			   (read_cpuid_id() >> 4) & 0xfff);
986 	}
987 	seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
988 
989 	seq_puts(m, "\n");
990 
991 	seq_printf(m, "Hardware\t: %s\n", machine_name);
992 	seq_printf(m, "Revision\t: %04x\n", system_rev);
993 	seq_printf(m, "Serial\t\t: %08x%08x\n",
994 		   system_serial_high, system_serial_low);
995 
996 	return 0;
997 }
998 
999 static void *c_start(struct seq_file *m, loff_t *pos)
1000 {
1001 	return *pos < 1 ? (void *)1 : NULL;
1002 }
1003 
1004 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1005 {
1006 	++*pos;
1007 	return NULL;
1008 }
1009 
1010 static void c_stop(struct seq_file *m, void *v)
1011 {
1012 }
1013 
1014 const struct seq_operations cpuinfo_op = {
1015 	.start	= c_start,
1016 	.next	= c_next,
1017 	.stop	= c_stop,
1018 	.show	= c_show
1019 };
1020