xref: /linux/arch/arm/kernel/setup.c (revision 779b96d20ca97cfa19162b340bff0c27b405b4b2)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/of_fdt.h>
24 #include <linux/crash_dump.h>
25 #include <linux/root_dev.h>
26 #include <linux/cpu.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/fs.h>
30 #include <linux/proc_fs.h>
31 #include <linux/memblock.h>
32 #include <linux/bug.h>
33 #include <linux/compiler.h>
34 #include <linux/sort.h>
35 
36 #include <asm/unified.h>
37 #include <asm/cpu.h>
38 #include <asm/cputype.h>
39 #include <asm/elf.h>
40 #include <asm/procinfo.h>
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43 #include <asm/smp_plat.h>
44 #include <asm/mach-types.h>
45 #include <asm/cacheflush.h>
46 #include <asm/cachetype.h>
47 #include <asm/tlbflush.h>
48 #include <asm/system.h>
49 
50 #include <asm/prom.h>
51 #include <asm/mach/arch.h>
52 #include <asm/mach/irq.h>
53 #include <asm/mach/time.h>
54 #include <asm/traps.h>
55 #include <asm/unwind.h>
56 #include <asm/memblock.h>
57 
58 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
59 #include "compat.h"
60 #endif
61 #include "atags.h"
62 #include "tcm.h"
63 
64 #ifndef MEM_SIZE
65 #define MEM_SIZE	(16*1024*1024)
66 #endif
67 
68 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
69 char fpe_type[8];
70 
71 static int __init fpe_setup(char *line)
72 {
73 	memcpy(fpe_type, line, 8);
74 	return 1;
75 }
76 
77 __setup("fpe=", fpe_setup);
78 #endif
79 
80 extern void paging_init(struct machine_desc *desc);
81 extern void sanity_check_meminfo(void);
82 extern void reboot_setup(char *str);
83 
84 unsigned int processor_id;
85 EXPORT_SYMBOL(processor_id);
86 unsigned int __machine_arch_type __read_mostly;
87 EXPORT_SYMBOL(__machine_arch_type);
88 unsigned int cacheid __read_mostly;
89 EXPORT_SYMBOL(cacheid);
90 
91 unsigned int __atags_pointer __initdata;
92 
93 unsigned int system_rev;
94 EXPORT_SYMBOL(system_rev);
95 
96 unsigned int system_serial_low;
97 EXPORT_SYMBOL(system_serial_low);
98 
99 unsigned int system_serial_high;
100 EXPORT_SYMBOL(system_serial_high);
101 
102 unsigned int elf_hwcap __read_mostly;
103 EXPORT_SYMBOL(elf_hwcap);
104 
105 
106 #ifdef MULTI_CPU
107 struct processor processor __read_mostly;
108 #endif
109 #ifdef MULTI_TLB
110 struct cpu_tlb_fns cpu_tlb __read_mostly;
111 #endif
112 #ifdef MULTI_USER
113 struct cpu_user_fns cpu_user __read_mostly;
114 #endif
115 #ifdef MULTI_CACHE
116 struct cpu_cache_fns cpu_cache __read_mostly;
117 #endif
118 #ifdef CONFIG_OUTER_CACHE
119 struct outer_cache_fns outer_cache __read_mostly;
120 EXPORT_SYMBOL(outer_cache);
121 #endif
122 
123 /*
124  * Cached cpu_architecture() result for use by assembler code.
125  * C code should use the cpu_architecture() function instead of accessing this
126  * variable directly.
127  */
128 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
129 
130 struct stack {
131 	u32 irq[3];
132 	u32 abt[3];
133 	u32 und[3];
134 } ____cacheline_aligned;
135 
136 static struct stack stacks[NR_CPUS];
137 
138 char elf_platform[ELF_PLATFORM_SIZE];
139 EXPORT_SYMBOL(elf_platform);
140 
141 static const char *cpu_name;
142 static const char *machine_name;
143 static char __initdata cmd_line[COMMAND_LINE_SIZE];
144 struct machine_desc *machine_desc __initdata;
145 
146 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
147 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
148 #define ENDIANNESS ((char)endian_test.l)
149 
150 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
151 
152 /*
153  * Standard memory resources
154  */
155 static struct resource mem_res[] = {
156 	{
157 		.name = "Video RAM",
158 		.start = 0,
159 		.end = 0,
160 		.flags = IORESOURCE_MEM
161 	},
162 	{
163 		.name = "Kernel text",
164 		.start = 0,
165 		.end = 0,
166 		.flags = IORESOURCE_MEM
167 	},
168 	{
169 		.name = "Kernel data",
170 		.start = 0,
171 		.end = 0,
172 		.flags = IORESOURCE_MEM
173 	}
174 };
175 
176 #define video_ram   mem_res[0]
177 #define kernel_code mem_res[1]
178 #define kernel_data mem_res[2]
179 
180 static struct resource io_res[] = {
181 	{
182 		.name = "reserved",
183 		.start = 0x3bc,
184 		.end = 0x3be,
185 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
186 	},
187 	{
188 		.name = "reserved",
189 		.start = 0x378,
190 		.end = 0x37f,
191 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
192 	},
193 	{
194 		.name = "reserved",
195 		.start = 0x278,
196 		.end = 0x27f,
197 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
198 	}
199 };
200 
201 #define lp0 io_res[0]
202 #define lp1 io_res[1]
203 #define lp2 io_res[2]
204 
205 static const char *proc_arch[] = {
206 	"undefined/unknown",
207 	"3",
208 	"4",
209 	"4T",
210 	"5",
211 	"5T",
212 	"5TE",
213 	"5TEJ",
214 	"6TEJ",
215 	"7",
216 	"?(11)",
217 	"?(12)",
218 	"?(13)",
219 	"?(14)",
220 	"?(15)",
221 	"?(16)",
222 	"?(17)",
223 };
224 
225 static int __get_cpu_architecture(void)
226 {
227 	int cpu_arch;
228 
229 	if ((read_cpuid_id() & 0x0008f000) == 0) {
230 		cpu_arch = CPU_ARCH_UNKNOWN;
231 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
232 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
233 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
234 		cpu_arch = (read_cpuid_id() >> 16) & 7;
235 		if (cpu_arch)
236 			cpu_arch += CPU_ARCH_ARMv3;
237 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
238 		unsigned int mmfr0;
239 
240 		/* Revised CPUID format. Read the Memory Model Feature
241 		 * Register 0 and check for VMSAv7 or PMSAv7 */
242 		asm("mrc	p15, 0, %0, c0, c1, 4"
243 		    : "=r" (mmfr0));
244 		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
245 		    (mmfr0 & 0x000000f0) >= 0x00000030)
246 			cpu_arch = CPU_ARCH_ARMv7;
247 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
248 			 (mmfr0 & 0x000000f0) == 0x00000020)
249 			cpu_arch = CPU_ARCH_ARMv6;
250 		else
251 			cpu_arch = CPU_ARCH_UNKNOWN;
252 	} else
253 		cpu_arch = CPU_ARCH_UNKNOWN;
254 
255 	return cpu_arch;
256 }
257 
258 int __pure cpu_architecture(void)
259 {
260 	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
261 
262 	return __cpu_architecture;
263 }
264 
265 static int cpu_has_aliasing_icache(unsigned int arch)
266 {
267 	int aliasing_icache;
268 	unsigned int id_reg, num_sets, line_size;
269 
270 	/* PIPT caches never alias. */
271 	if (icache_is_pipt())
272 		return 0;
273 
274 	/* arch specifies the register format */
275 	switch (arch) {
276 	case CPU_ARCH_ARMv7:
277 		asm("mcr	p15, 2, %0, c0, c0, 0 @ set CSSELR"
278 		    : /* No output operands */
279 		    : "r" (1));
280 		isb();
281 		asm("mrc	p15, 1, %0, c0, c0, 0 @ read CCSIDR"
282 		    : "=r" (id_reg));
283 		line_size = 4 << ((id_reg & 0x7) + 2);
284 		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
285 		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
286 		break;
287 	case CPU_ARCH_ARMv6:
288 		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
289 		break;
290 	default:
291 		/* I-cache aliases will be handled by D-cache aliasing code */
292 		aliasing_icache = 0;
293 	}
294 
295 	return aliasing_icache;
296 }
297 
298 static void __init cacheid_init(void)
299 {
300 	unsigned int cachetype = read_cpuid_cachetype();
301 	unsigned int arch = cpu_architecture();
302 
303 	if (arch >= CPU_ARCH_ARMv6) {
304 		if ((cachetype & (7 << 29)) == 4 << 29) {
305 			/* ARMv7 register format */
306 			arch = CPU_ARCH_ARMv7;
307 			cacheid = CACHEID_VIPT_NONALIASING;
308 			switch (cachetype & (3 << 14)) {
309 			case (1 << 14):
310 				cacheid |= CACHEID_ASID_TAGGED;
311 				break;
312 			case (3 << 14):
313 				cacheid |= CACHEID_PIPT;
314 				break;
315 			}
316 		} else {
317 			arch = CPU_ARCH_ARMv6;
318 			if (cachetype & (1 << 23))
319 				cacheid = CACHEID_VIPT_ALIASING;
320 			else
321 				cacheid = CACHEID_VIPT_NONALIASING;
322 		}
323 		if (cpu_has_aliasing_icache(arch))
324 			cacheid |= CACHEID_VIPT_I_ALIASING;
325 	} else {
326 		cacheid = CACHEID_VIVT;
327 	}
328 
329 	printk("CPU: %s data cache, %s instruction cache\n",
330 		cache_is_vivt() ? "VIVT" :
331 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
332 		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
333 		cache_is_vivt() ? "VIVT" :
334 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
335 		icache_is_vipt_aliasing() ? "VIPT aliasing" :
336 		icache_is_pipt() ? "PIPT" :
337 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
338 }
339 
340 /*
341  * These functions re-use the assembly code in head.S, which
342  * already provide the required functionality.
343  */
344 extern struct proc_info_list *lookup_processor_type(unsigned int);
345 
346 void __init early_print(const char *str, ...)
347 {
348 	extern void printascii(const char *);
349 	char buf[256];
350 	va_list ap;
351 
352 	va_start(ap, str);
353 	vsnprintf(buf, sizeof(buf), str, ap);
354 	va_end(ap);
355 
356 #ifdef CONFIG_DEBUG_LL
357 	printascii(buf);
358 #endif
359 	printk("%s", buf);
360 }
361 
362 static void __init feat_v6_fixup(void)
363 {
364 	int id = read_cpuid_id();
365 
366 	if ((id & 0xff0f0000) != 0x41070000)
367 		return;
368 
369 	/*
370 	 * HWCAP_TLS is available only on 1136 r1p0 and later,
371 	 * see also kuser_get_tls_init.
372 	 */
373 	if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
374 		elf_hwcap &= ~HWCAP_TLS;
375 }
376 
377 /*
378  * cpu_init - initialise one CPU.
379  *
380  * cpu_init sets up the per-CPU stacks.
381  */
382 void cpu_init(void)
383 {
384 	unsigned int cpu = smp_processor_id();
385 	struct stack *stk = &stacks[cpu];
386 
387 	if (cpu >= NR_CPUS) {
388 		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
389 		BUG();
390 	}
391 
392 	cpu_proc_init();
393 
394 	/*
395 	 * Define the placement constraint for the inline asm directive below.
396 	 * In Thumb-2, msr with an immediate value is not allowed.
397 	 */
398 #ifdef CONFIG_THUMB2_KERNEL
399 #define PLC	"r"
400 #else
401 #define PLC	"I"
402 #endif
403 
404 	/*
405 	 * setup stacks for re-entrant exception handlers
406 	 */
407 	__asm__ (
408 	"msr	cpsr_c, %1\n\t"
409 	"add	r14, %0, %2\n\t"
410 	"mov	sp, r14\n\t"
411 	"msr	cpsr_c, %3\n\t"
412 	"add	r14, %0, %4\n\t"
413 	"mov	sp, r14\n\t"
414 	"msr	cpsr_c, %5\n\t"
415 	"add	r14, %0, %6\n\t"
416 	"mov	sp, r14\n\t"
417 	"msr	cpsr_c, %7"
418 	    :
419 	    : "r" (stk),
420 	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
421 	      "I" (offsetof(struct stack, irq[0])),
422 	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
423 	      "I" (offsetof(struct stack, abt[0])),
424 	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
425 	      "I" (offsetof(struct stack, und[0])),
426 	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
427 	    : "r14");
428 }
429 
430 static void __init setup_processor(void)
431 {
432 	struct proc_info_list *list;
433 
434 	/*
435 	 * locate processor in the list of supported processor
436 	 * types.  The linker builds this table for us from the
437 	 * entries in arch/arm/mm/proc-*.S
438 	 */
439 	list = lookup_processor_type(read_cpuid_id());
440 	if (!list) {
441 		printk("CPU configuration botched (ID %08x), unable "
442 		       "to continue.\n", read_cpuid_id());
443 		while (1);
444 	}
445 
446 	cpu_name = list->cpu_name;
447 	__cpu_architecture = __get_cpu_architecture();
448 
449 #ifdef MULTI_CPU
450 	processor = *list->proc;
451 #endif
452 #ifdef MULTI_TLB
453 	cpu_tlb = *list->tlb;
454 #endif
455 #ifdef MULTI_USER
456 	cpu_user = *list->user;
457 #endif
458 #ifdef MULTI_CACHE
459 	cpu_cache = *list->cache;
460 #endif
461 
462 	printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
463 	       cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
464 	       proc_arch[cpu_architecture()], cr_alignment);
465 
466 	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
467 		 list->arch_name, ENDIANNESS);
468 	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
469 		 list->elf_name, ENDIANNESS);
470 	elf_hwcap = list->elf_hwcap;
471 #ifndef CONFIG_ARM_THUMB
472 	elf_hwcap &= ~HWCAP_THUMB;
473 #endif
474 
475 	feat_v6_fixup();
476 
477 	cacheid_init();
478 	cpu_init();
479 }
480 
481 void __init dump_machine_table(void)
482 {
483 	struct machine_desc *p;
484 
485 	early_print("Available machine support:\n\nID (hex)\tNAME\n");
486 	for_each_machine_desc(p)
487 		early_print("%08x\t%s\n", p->nr, p->name);
488 
489 	early_print("\nPlease check your kernel config and/or bootloader.\n");
490 
491 	while (true)
492 		/* can't use cpu_relax() here as it may require MMU setup */;
493 }
494 
495 int __init arm_add_memory(phys_addr_t start, unsigned long size)
496 {
497 	struct membank *bank = &meminfo.bank[meminfo.nr_banks];
498 
499 	if (meminfo.nr_banks >= NR_BANKS) {
500 		printk(KERN_CRIT "NR_BANKS too low, "
501 			"ignoring memory at 0x%08llx\n", (long long)start);
502 		return -EINVAL;
503 	}
504 
505 	/*
506 	 * Ensure that start/size are aligned to a page boundary.
507 	 * Size is appropriately rounded down, start is rounded up.
508 	 */
509 	size -= start & ~PAGE_MASK;
510 	bank->start = PAGE_ALIGN(start);
511 	bank->size  = size & PAGE_MASK;
512 
513 	/*
514 	 * Check whether this memory region has non-zero size or
515 	 * invalid node number.
516 	 */
517 	if (bank->size == 0)
518 		return -EINVAL;
519 
520 	meminfo.nr_banks++;
521 	return 0;
522 }
523 
524 /*
525  * Pick out the memory size.  We look for mem=size@start,
526  * where start and size are "size[KkMm]"
527  */
528 static int __init early_mem(char *p)
529 {
530 	static int usermem __initdata = 0;
531 	unsigned long size;
532 	phys_addr_t start;
533 	char *endp;
534 
535 	/*
536 	 * If the user specifies memory size, we
537 	 * blow away any automatically generated
538 	 * size.
539 	 */
540 	if (usermem == 0) {
541 		usermem = 1;
542 		meminfo.nr_banks = 0;
543 	}
544 
545 	start = PHYS_OFFSET;
546 	size  = memparse(p, &endp);
547 	if (*endp == '@')
548 		start = memparse(endp + 1, NULL);
549 
550 	arm_add_memory(start, size);
551 
552 	return 0;
553 }
554 early_param("mem", early_mem);
555 
556 static void __init
557 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
558 {
559 #ifdef CONFIG_BLK_DEV_RAM
560 	extern int rd_size, rd_image_start, rd_prompt, rd_doload;
561 
562 	rd_image_start = image_start;
563 	rd_prompt = prompt;
564 	rd_doload = doload;
565 
566 	if (rd_sz)
567 		rd_size = rd_sz;
568 #endif
569 }
570 
571 static void __init request_standard_resources(struct machine_desc *mdesc)
572 {
573 	struct memblock_region *region;
574 	struct resource *res;
575 
576 	kernel_code.start   = virt_to_phys(_text);
577 	kernel_code.end     = virt_to_phys(_etext - 1);
578 	kernel_data.start   = virt_to_phys(_sdata);
579 	kernel_data.end     = virt_to_phys(_end - 1);
580 
581 	for_each_memblock(memory, region) {
582 		res = alloc_bootmem_low(sizeof(*res));
583 		res->name  = "System RAM";
584 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
585 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
586 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
587 
588 		request_resource(&iomem_resource, res);
589 
590 		if (kernel_code.start >= res->start &&
591 		    kernel_code.end <= res->end)
592 			request_resource(res, &kernel_code);
593 		if (kernel_data.start >= res->start &&
594 		    kernel_data.end <= res->end)
595 			request_resource(res, &kernel_data);
596 	}
597 
598 	if (mdesc->video_start) {
599 		video_ram.start = mdesc->video_start;
600 		video_ram.end   = mdesc->video_end;
601 		request_resource(&iomem_resource, &video_ram);
602 	}
603 
604 	/*
605 	 * Some machines don't have the possibility of ever
606 	 * possessing lp0, lp1 or lp2
607 	 */
608 	if (mdesc->reserve_lp0)
609 		request_resource(&ioport_resource, &lp0);
610 	if (mdesc->reserve_lp1)
611 		request_resource(&ioport_resource, &lp1);
612 	if (mdesc->reserve_lp2)
613 		request_resource(&ioport_resource, &lp2);
614 }
615 
616 /*
617  *  Tag parsing.
618  *
619  * This is the new way of passing data to the kernel at boot time.  Rather
620  * than passing a fixed inflexible structure to the kernel, we pass a list
621  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
622  * tag for the list to be recognised (to distinguish the tagged list from
623  * a param_struct).  The list is terminated with a zero-length tag (this tag
624  * is not parsed in any way).
625  */
626 static int __init parse_tag_core(const struct tag *tag)
627 {
628 	if (tag->hdr.size > 2) {
629 		if ((tag->u.core.flags & 1) == 0)
630 			root_mountflags &= ~MS_RDONLY;
631 		ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
632 	}
633 	return 0;
634 }
635 
636 __tagtable(ATAG_CORE, parse_tag_core);
637 
638 static int __init parse_tag_mem32(const struct tag *tag)
639 {
640 	return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
641 }
642 
643 __tagtable(ATAG_MEM, parse_tag_mem32);
644 
645 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
646 struct screen_info screen_info = {
647  .orig_video_lines	= 30,
648  .orig_video_cols	= 80,
649  .orig_video_mode	= 0,
650  .orig_video_ega_bx	= 0,
651  .orig_video_isVGA	= 1,
652  .orig_video_points	= 8
653 };
654 
655 static int __init parse_tag_videotext(const struct tag *tag)
656 {
657 	screen_info.orig_x            = tag->u.videotext.x;
658 	screen_info.orig_y            = tag->u.videotext.y;
659 	screen_info.orig_video_page   = tag->u.videotext.video_page;
660 	screen_info.orig_video_mode   = tag->u.videotext.video_mode;
661 	screen_info.orig_video_cols   = tag->u.videotext.video_cols;
662 	screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
663 	screen_info.orig_video_lines  = tag->u.videotext.video_lines;
664 	screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
665 	screen_info.orig_video_points = tag->u.videotext.video_points;
666 	return 0;
667 }
668 
669 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
670 #endif
671 
672 static int __init parse_tag_ramdisk(const struct tag *tag)
673 {
674 	setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
675 		      (tag->u.ramdisk.flags & 2) == 0,
676 		      tag->u.ramdisk.start, tag->u.ramdisk.size);
677 	return 0;
678 }
679 
680 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
681 
682 static int __init parse_tag_serialnr(const struct tag *tag)
683 {
684 	system_serial_low = tag->u.serialnr.low;
685 	system_serial_high = tag->u.serialnr.high;
686 	return 0;
687 }
688 
689 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
690 
691 static int __init parse_tag_revision(const struct tag *tag)
692 {
693 	system_rev = tag->u.revision.rev;
694 	return 0;
695 }
696 
697 __tagtable(ATAG_REVISION, parse_tag_revision);
698 
699 static int __init parse_tag_cmdline(const struct tag *tag)
700 {
701 #if defined(CONFIG_CMDLINE_EXTEND)
702 	strlcat(default_command_line, " ", COMMAND_LINE_SIZE);
703 	strlcat(default_command_line, tag->u.cmdline.cmdline,
704 		COMMAND_LINE_SIZE);
705 #elif defined(CONFIG_CMDLINE_FORCE)
706 	pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
707 #else
708 	strlcpy(default_command_line, tag->u.cmdline.cmdline,
709 		COMMAND_LINE_SIZE);
710 #endif
711 	return 0;
712 }
713 
714 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
715 
716 /*
717  * Scan the tag table for this tag, and call its parse function.
718  * The tag table is built by the linker from all the __tagtable
719  * declarations.
720  */
721 static int __init parse_tag(const struct tag *tag)
722 {
723 	extern struct tagtable __tagtable_begin, __tagtable_end;
724 	struct tagtable *t;
725 
726 	for (t = &__tagtable_begin; t < &__tagtable_end; t++)
727 		if (tag->hdr.tag == t->tag) {
728 			t->parse(tag);
729 			break;
730 		}
731 
732 	return t < &__tagtable_end;
733 }
734 
735 /*
736  * Parse all tags in the list, checking both the global and architecture
737  * specific tag tables.
738  */
739 static void __init parse_tags(const struct tag *t)
740 {
741 	for (; t->hdr.size; t = tag_next(t))
742 		if (!parse_tag(t))
743 			printk(KERN_WARNING
744 				"Ignoring unrecognised tag 0x%08x\n",
745 				t->hdr.tag);
746 }
747 
748 /*
749  * This holds our defaults.
750  */
751 static struct init_tags {
752 	struct tag_header hdr1;
753 	struct tag_core   core;
754 	struct tag_header hdr2;
755 	struct tag_mem32  mem;
756 	struct tag_header hdr3;
757 } init_tags __initdata = {
758 	{ tag_size(tag_core), ATAG_CORE },
759 	{ 1, PAGE_SIZE, 0xff },
760 	{ tag_size(tag_mem32), ATAG_MEM },
761 	{ MEM_SIZE },
762 	{ 0, ATAG_NONE }
763 };
764 
765 static int __init customize_machine(void)
766 {
767 	/* customizes platform devices, or adds new ones */
768 	if (machine_desc->init_machine)
769 		machine_desc->init_machine();
770 	return 0;
771 }
772 arch_initcall(customize_machine);
773 
774 #ifdef CONFIG_KEXEC
775 static inline unsigned long long get_total_mem(void)
776 {
777 	unsigned long total;
778 
779 	total = max_low_pfn - min_low_pfn;
780 	return total << PAGE_SHIFT;
781 }
782 
783 /**
784  * reserve_crashkernel() - reserves memory are for crash kernel
785  *
786  * This function reserves memory area given in "crashkernel=" kernel command
787  * line parameter. The memory reserved is used by a dump capture kernel when
788  * primary kernel is crashing.
789  */
790 static void __init reserve_crashkernel(void)
791 {
792 	unsigned long long crash_size, crash_base;
793 	unsigned long long total_mem;
794 	int ret;
795 
796 	total_mem = get_total_mem();
797 	ret = parse_crashkernel(boot_command_line, total_mem,
798 				&crash_size, &crash_base);
799 	if (ret)
800 		return;
801 
802 	ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
803 	if (ret < 0) {
804 		printk(KERN_WARNING "crashkernel reservation failed - "
805 		       "memory is in use (0x%lx)\n", (unsigned long)crash_base);
806 		return;
807 	}
808 
809 	printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
810 	       "for crashkernel (System RAM: %ldMB)\n",
811 	       (unsigned long)(crash_size >> 20),
812 	       (unsigned long)(crash_base >> 20),
813 	       (unsigned long)(total_mem >> 20));
814 
815 	crashk_res.start = crash_base;
816 	crashk_res.end = crash_base + crash_size - 1;
817 	insert_resource(&iomem_resource, &crashk_res);
818 }
819 #else
820 static inline void reserve_crashkernel(void) {}
821 #endif /* CONFIG_KEXEC */
822 
823 static void __init squash_mem_tags(struct tag *tag)
824 {
825 	for (; tag->hdr.size; tag = tag_next(tag))
826 		if (tag->hdr.tag == ATAG_MEM)
827 			tag->hdr.tag = ATAG_NONE;
828 }
829 
830 static struct machine_desc * __init setup_machine_tags(unsigned int nr)
831 {
832 	struct tag *tags = (struct tag *)&init_tags;
833 	struct machine_desc *mdesc = NULL, *p;
834 	char *from = default_command_line;
835 
836 	init_tags.mem.start = PHYS_OFFSET;
837 
838 	/*
839 	 * locate machine in the list of supported machines.
840 	 */
841 	for_each_machine_desc(p)
842 		if (nr == p->nr) {
843 			printk("Machine: %s\n", p->name);
844 			mdesc = p;
845 			break;
846 		}
847 
848 	if (!mdesc) {
849 		early_print("\nError: unrecognized/unsupported machine ID"
850 			" (r1 = 0x%08x).\n\n", nr);
851 		dump_machine_table(); /* does not return */
852 	}
853 
854 	if (__atags_pointer)
855 		tags = phys_to_virt(__atags_pointer);
856 	else if (mdesc->atag_offset)
857 		tags = (void *)(PAGE_OFFSET + mdesc->atag_offset);
858 
859 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
860 	/*
861 	 * If we have the old style parameters, convert them to
862 	 * a tag list.
863 	 */
864 	if (tags->hdr.tag != ATAG_CORE)
865 		convert_to_tag_list(tags);
866 #endif
867 
868 	if (tags->hdr.tag != ATAG_CORE) {
869 #if defined(CONFIG_OF)
870 		/*
871 		 * If CONFIG_OF is set, then assume this is a reasonably
872 		 * modern system that should pass boot parameters
873 		 */
874 		early_print("Warning: Neither atags nor dtb found\n");
875 #endif
876 		tags = (struct tag *)&init_tags;
877 	}
878 
879 	if (mdesc->fixup)
880 		mdesc->fixup(tags, &from, &meminfo);
881 
882 	if (tags->hdr.tag == ATAG_CORE) {
883 		if (meminfo.nr_banks != 0)
884 			squash_mem_tags(tags);
885 		save_atags(tags);
886 		parse_tags(tags);
887 	}
888 
889 	/* parse_early_param needs a boot_command_line */
890 	strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
891 
892 	return mdesc;
893 }
894 
895 static int __init meminfo_cmp(const void *_a, const void *_b)
896 {
897 	const struct membank *a = _a, *b = _b;
898 	long cmp = bank_pfn_start(a) - bank_pfn_start(b);
899 	return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
900 }
901 
902 void __init setup_arch(char **cmdline_p)
903 {
904 	struct machine_desc *mdesc;
905 
906 	setup_processor();
907 	mdesc = setup_machine_fdt(__atags_pointer);
908 	if (!mdesc)
909 		mdesc = setup_machine_tags(machine_arch_type);
910 	machine_desc = mdesc;
911 	machine_name = mdesc->name;
912 
913 #ifdef CONFIG_ZONE_DMA
914 	if (mdesc->dma_zone_size) {
915 		extern unsigned long arm_dma_zone_size;
916 		arm_dma_zone_size = mdesc->dma_zone_size;
917 	}
918 #endif
919 	if (mdesc->restart_mode)
920 		reboot_setup(&mdesc->restart_mode);
921 
922 	init_mm.start_code = (unsigned long) _text;
923 	init_mm.end_code   = (unsigned long) _etext;
924 	init_mm.end_data   = (unsigned long) _edata;
925 	init_mm.brk	   = (unsigned long) _end;
926 
927 	/* populate cmd_line too for later use, preserving boot_command_line */
928 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
929 	*cmdline_p = cmd_line;
930 
931 	parse_early_param();
932 
933 	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
934 	sanity_check_meminfo();
935 	arm_memblock_init(&meminfo, mdesc);
936 
937 	paging_init(mdesc);
938 	request_standard_resources(mdesc);
939 
940 	if (mdesc->restart)
941 		arm_pm_restart = mdesc->restart;
942 
943 	unflatten_device_tree();
944 
945 #ifdef CONFIG_SMP
946 	if (is_smp())
947 		smp_init_cpus();
948 #endif
949 	reserve_crashkernel();
950 
951 	tcm_init();
952 
953 #ifdef CONFIG_MULTI_IRQ_HANDLER
954 	handle_arch_irq = mdesc->handle_irq;
955 #endif
956 
957 #ifdef CONFIG_VT
958 #if defined(CONFIG_VGA_CONSOLE)
959 	conswitchp = &vga_con;
960 #elif defined(CONFIG_DUMMY_CONSOLE)
961 	conswitchp = &dummy_con;
962 #endif
963 #endif
964 	early_trap_init();
965 
966 	if (mdesc->init_early)
967 		mdesc->init_early();
968 }
969 
970 
971 static int __init topology_init(void)
972 {
973 	int cpu;
974 
975 	for_each_possible_cpu(cpu) {
976 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
977 		cpuinfo->cpu.hotpluggable = 1;
978 		register_cpu(&cpuinfo->cpu, cpu);
979 	}
980 
981 	return 0;
982 }
983 subsys_initcall(topology_init);
984 
985 #ifdef CONFIG_HAVE_PROC_CPU
986 static int __init proc_cpu_init(void)
987 {
988 	struct proc_dir_entry *res;
989 
990 	res = proc_mkdir("cpu", NULL);
991 	if (!res)
992 		return -ENOMEM;
993 	return 0;
994 }
995 fs_initcall(proc_cpu_init);
996 #endif
997 
998 static const char *hwcap_str[] = {
999 	"swp",
1000 	"half",
1001 	"thumb",
1002 	"26bit",
1003 	"fastmult",
1004 	"fpa",
1005 	"vfp",
1006 	"edsp",
1007 	"java",
1008 	"iwmmxt",
1009 	"crunch",
1010 	"thumbee",
1011 	"neon",
1012 	"vfpv3",
1013 	"vfpv3d16",
1014 	"tls",
1015 	"vfpv4",
1016 	"idiva",
1017 	"idivt",
1018 	NULL
1019 };
1020 
1021 static int c_show(struct seq_file *m, void *v)
1022 {
1023 	int i;
1024 
1025 	seq_printf(m, "Processor\t: %s rev %d (%s)\n",
1026 		   cpu_name, read_cpuid_id() & 15, elf_platform);
1027 
1028 #if defined(CONFIG_SMP)
1029 	for_each_online_cpu(i) {
1030 		/*
1031 		 * glibc reads /proc/cpuinfo to determine the number of
1032 		 * online processors, looking for lines beginning with
1033 		 * "processor".  Give glibc what it expects.
1034 		 */
1035 		seq_printf(m, "processor\t: %d\n", i);
1036 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
1037 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1038 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1039 	}
1040 #else /* CONFIG_SMP */
1041 	seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1042 		   loops_per_jiffy / (500000/HZ),
1043 		   (loops_per_jiffy / (5000/HZ)) % 100);
1044 #endif
1045 
1046 	/* dump out the processor features */
1047 	seq_puts(m, "Features\t: ");
1048 
1049 	for (i = 0; hwcap_str[i]; i++)
1050 		if (elf_hwcap & (1 << i))
1051 			seq_printf(m, "%s ", hwcap_str[i]);
1052 
1053 	seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1054 	seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
1055 
1056 	if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1057 		/* pre-ARM7 */
1058 		seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1059 	} else {
1060 		if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1061 			/* ARM7 */
1062 			seq_printf(m, "CPU variant\t: 0x%02x\n",
1063 				   (read_cpuid_id() >> 16) & 127);
1064 		} else {
1065 			/* post-ARM7 */
1066 			seq_printf(m, "CPU variant\t: 0x%x\n",
1067 				   (read_cpuid_id() >> 20) & 15);
1068 		}
1069 		seq_printf(m, "CPU part\t: 0x%03x\n",
1070 			   (read_cpuid_id() >> 4) & 0xfff);
1071 	}
1072 	seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1073 
1074 	seq_puts(m, "\n");
1075 
1076 	seq_printf(m, "Hardware\t: %s\n", machine_name);
1077 	seq_printf(m, "Revision\t: %04x\n", system_rev);
1078 	seq_printf(m, "Serial\t\t: %08x%08x\n",
1079 		   system_serial_high, system_serial_low);
1080 
1081 	return 0;
1082 }
1083 
1084 static void *c_start(struct seq_file *m, loff_t *pos)
1085 {
1086 	return *pos < 1 ? (void *)1 : NULL;
1087 }
1088 
1089 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1090 {
1091 	++*pos;
1092 	return NULL;
1093 }
1094 
1095 static void c_stop(struct seq_file *m, void *v)
1096 {
1097 }
1098 
1099 const struct seq_operations cpuinfo_op = {
1100 	.start	= c_start,
1101 	.next	= c_next,
1102 	.stop	= c_stop,
1103 	.show	= c_show
1104 };
1105