xref: /linux/arch/arm/kernel/setup.c (revision 18d7f152df31e5a326301fdaad385e40874dff80)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_platform.h>
22 #include <linux/init.h>
23 #include <linux/kexec.h>
24 #include <linux/of_fdt.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/proc_fs.h>
29 #include <linux/memblock.h>
30 #include <linux/bug.h>
31 #include <linux/compiler.h>
32 #include <linux/sort.h>
33 
34 #include <asm/unified.h>
35 #include <asm/cp15.h>
36 #include <asm/cpu.h>
37 #include <asm/cputype.h>
38 #include <asm/elf.h>
39 #include <asm/procinfo.h>
40 #include <asm/sections.h>
41 #include <asm/setup.h>
42 #include <asm/smp_plat.h>
43 #include <asm/mach-types.h>
44 #include <asm/cacheflush.h>
45 #include <asm/cachetype.h>
46 #include <asm/tlbflush.h>
47 
48 #include <asm/prom.h>
49 #include <asm/mach/arch.h>
50 #include <asm/mach/irq.h>
51 #include <asm/mach/time.h>
52 #include <asm/system_info.h>
53 #include <asm/system_misc.h>
54 #include <asm/traps.h>
55 #include <asm/unwind.h>
56 #include <asm/memblock.h>
57 #include <asm/virt.h>
58 
59 #include "atags.h"
60 
61 
62 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
63 char fpe_type[8];
64 
65 static int __init fpe_setup(char *line)
66 {
67 	memcpy(fpe_type, line, 8);
68 	return 1;
69 }
70 
71 __setup("fpe=", fpe_setup);
72 #endif
73 
74 extern void paging_init(struct machine_desc *desc);
75 extern void sanity_check_meminfo(void);
76 extern void reboot_setup(char *str);
77 extern void setup_dma_zone(struct machine_desc *desc);
78 
79 unsigned int processor_id;
80 EXPORT_SYMBOL(processor_id);
81 unsigned int __machine_arch_type __read_mostly;
82 EXPORT_SYMBOL(__machine_arch_type);
83 unsigned int cacheid __read_mostly;
84 EXPORT_SYMBOL(cacheid);
85 
86 unsigned int __atags_pointer __initdata;
87 
88 unsigned int system_rev;
89 EXPORT_SYMBOL(system_rev);
90 
91 unsigned int system_serial_low;
92 EXPORT_SYMBOL(system_serial_low);
93 
94 unsigned int system_serial_high;
95 EXPORT_SYMBOL(system_serial_high);
96 
97 unsigned int elf_hwcap __read_mostly;
98 EXPORT_SYMBOL(elf_hwcap);
99 
100 
101 #ifdef MULTI_CPU
102 struct processor processor __read_mostly;
103 #endif
104 #ifdef MULTI_TLB
105 struct cpu_tlb_fns cpu_tlb __read_mostly;
106 #endif
107 #ifdef MULTI_USER
108 struct cpu_user_fns cpu_user __read_mostly;
109 #endif
110 #ifdef MULTI_CACHE
111 struct cpu_cache_fns cpu_cache __read_mostly;
112 #endif
113 #ifdef CONFIG_OUTER_CACHE
114 struct outer_cache_fns outer_cache __read_mostly;
115 EXPORT_SYMBOL(outer_cache);
116 #endif
117 
118 /*
119  * Cached cpu_architecture() result for use by assembler code.
120  * C code should use the cpu_architecture() function instead of accessing this
121  * variable directly.
122  */
123 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
124 
125 struct stack {
126 	u32 irq[3];
127 	u32 abt[3];
128 	u32 und[3];
129 } ____cacheline_aligned;
130 
131 static struct stack stacks[NR_CPUS];
132 
133 char elf_platform[ELF_PLATFORM_SIZE];
134 EXPORT_SYMBOL(elf_platform);
135 
136 static const char *cpu_name;
137 static const char *machine_name;
138 static char __initdata cmd_line[COMMAND_LINE_SIZE];
139 struct machine_desc *machine_desc __initdata;
140 
141 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
142 #define ENDIANNESS ((char)endian_test.l)
143 
144 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
145 
146 /*
147  * Standard memory resources
148  */
149 static struct resource mem_res[] = {
150 	{
151 		.name = "Video RAM",
152 		.start = 0,
153 		.end = 0,
154 		.flags = IORESOURCE_MEM
155 	},
156 	{
157 		.name = "Kernel code",
158 		.start = 0,
159 		.end = 0,
160 		.flags = IORESOURCE_MEM
161 	},
162 	{
163 		.name = "Kernel data",
164 		.start = 0,
165 		.end = 0,
166 		.flags = IORESOURCE_MEM
167 	}
168 };
169 
170 #define video_ram   mem_res[0]
171 #define kernel_code mem_res[1]
172 #define kernel_data mem_res[2]
173 
174 static struct resource io_res[] = {
175 	{
176 		.name = "reserved",
177 		.start = 0x3bc,
178 		.end = 0x3be,
179 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
180 	},
181 	{
182 		.name = "reserved",
183 		.start = 0x378,
184 		.end = 0x37f,
185 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
186 	},
187 	{
188 		.name = "reserved",
189 		.start = 0x278,
190 		.end = 0x27f,
191 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
192 	}
193 };
194 
195 #define lp0 io_res[0]
196 #define lp1 io_res[1]
197 #define lp2 io_res[2]
198 
199 static const char *proc_arch[] = {
200 	"undefined/unknown",
201 	"3",
202 	"4",
203 	"4T",
204 	"5",
205 	"5T",
206 	"5TE",
207 	"5TEJ",
208 	"6TEJ",
209 	"7",
210 	"?(11)",
211 	"?(12)",
212 	"?(13)",
213 	"?(14)",
214 	"?(15)",
215 	"?(16)",
216 	"?(17)",
217 };
218 
219 static int __get_cpu_architecture(void)
220 {
221 	int cpu_arch;
222 
223 	if ((read_cpuid_id() & 0x0008f000) == 0) {
224 		cpu_arch = CPU_ARCH_UNKNOWN;
225 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
226 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
227 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
228 		cpu_arch = (read_cpuid_id() >> 16) & 7;
229 		if (cpu_arch)
230 			cpu_arch += CPU_ARCH_ARMv3;
231 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
232 		unsigned int mmfr0;
233 
234 		/* Revised CPUID format. Read the Memory Model Feature
235 		 * Register 0 and check for VMSAv7 or PMSAv7 */
236 		asm("mrc	p15, 0, %0, c0, c1, 4"
237 		    : "=r" (mmfr0));
238 		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
239 		    (mmfr0 & 0x000000f0) >= 0x00000030)
240 			cpu_arch = CPU_ARCH_ARMv7;
241 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
242 			 (mmfr0 & 0x000000f0) == 0x00000020)
243 			cpu_arch = CPU_ARCH_ARMv6;
244 		else
245 			cpu_arch = CPU_ARCH_UNKNOWN;
246 	} else
247 		cpu_arch = CPU_ARCH_UNKNOWN;
248 
249 	return cpu_arch;
250 }
251 
252 int __pure cpu_architecture(void)
253 {
254 	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
255 
256 	return __cpu_architecture;
257 }
258 
259 static int cpu_has_aliasing_icache(unsigned int arch)
260 {
261 	int aliasing_icache;
262 	unsigned int id_reg, num_sets, line_size;
263 
264 	/* PIPT caches never alias. */
265 	if (icache_is_pipt())
266 		return 0;
267 
268 	/* arch specifies the register format */
269 	switch (arch) {
270 	case CPU_ARCH_ARMv7:
271 		asm("mcr	p15, 2, %0, c0, c0, 0 @ set CSSELR"
272 		    : /* No output operands */
273 		    : "r" (1));
274 		isb();
275 		asm("mrc	p15, 1, %0, c0, c0, 0 @ read CCSIDR"
276 		    : "=r" (id_reg));
277 		line_size = 4 << ((id_reg & 0x7) + 2);
278 		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
279 		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
280 		break;
281 	case CPU_ARCH_ARMv6:
282 		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
283 		break;
284 	default:
285 		/* I-cache aliases will be handled by D-cache aliasing code */
286 		aliasing_icache = 0;
287 	}
288 
289 	return aliasing_icache;
290 }
291 
292 static void __init cacheid_init(void)
293 {
294 	unsigned int arch = cpu_architecture();
295 
296 	if (arch >= CPU_ARCH_ARMv6) {
297 		unsigned int cachetype = read_cpuid_cachetype();
298 		if ((cachetype & (7 << 29)) == 4 << 29) {
299 			/* ARMv7 register format */
300 			arch = CPU_ARCH_ARMv7;
301 			cacheid = CACHEID_VIPT_NONALIASING;
302 			switch (cachetype & (3 << 14)) {
303 			case (1 << 14):
304 				cacheid |= CACHEID_ASID_TAGGED;
305 				break;
306 			case (3 << 14):
307 				cacheid |= CACHEID_PIPT;
308 				break;
309 			}
310 		} else {
311 			arch = CPU_ARCH_ARMv6;
312 			if (cachetype & (1 << 23))
313 				cacheid = CACHEID_VIPT_ALIASING;
314 			else
315 				cacheid = CACHEID_VIPT_NONALIASING;
316 		}
317 		if (cpu_has_aliasing_icache(arch))
318 			cacheid |= CACHEID_VIPT_I_ALIASING;
319 	} else {
320 		cacheid = CACHEID_VIVT;
321 	}
322 
323 	printk("CPU: %s data cache, %s instruction cache\n",
324 		cache_is_vivt() ? "VIVT" :
325 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
326 		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
327 		cache_is_vivt() ? "VIVT" :
328 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
329 		icache_is_vipt_aliasing() ? "VIPT aliasing" :
330 		icache_is_pipt() ? "PIPT" :
331 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
332 }
333 
334 /*
335  * These functions re-use the assembly code in head.S, which
336  * already provide the required functionality.
337  */
338 extern struct proc_info_list *lookup_processor_type(unsigned int);
339 
340 void __init early_print(const char *str, ...)
341 {
342 	extern void printascii(const char *);
343 	char buf[256];
344 	va_list ap;
345 
346 	va_start(ap, str);
347 	vsnprintf(buf, sizeof(buf), str, ap);
348 	va_end(ap);
349 
350 #ifdef CONFIG_DEBUG_LL
351 	printascii(buf);
352 #endif
353 	printk("%s", buf);
354 }
355 
356 static void __init cpuid_init_hwcaps(void)
357 {
358 	unsigned int divide_instrs;
359 
360 	if (cpu_architecture() < CPU_ARCH_ARMv7)
361 		return;
362 
363 	divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
364 
365 	switch (divide_instrs) {
366 	case 2:
367 		elf_hwcap |= HWCAP_IDIVA;
368 	case 1:
369 		elf_hwcap |= HWCAP_IDIVT;
370 	}
371 }
372 
373 static void __init feat_v6_fixup(void)
374 {
375 	int id = read_cpuid_id();
376 
377 	if ((id & 0xff0f0000) != 0x41070000)
378 		return;
379 
380 	/*
381 	 * HWCAP_TLS is available only on 1136 r1p0 and later,
382 	 * see also kuser_get_tls_init.
383 	 */
384 	if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
385 		elf_hwcap &= ~HWCAP_TLS;
386 }
387 
388 /*
389  * cpu_init - initialise one CPU.
390  *
391  * cpu_init sets up the per-CPU stacks.
392  */
393 void notrace cpu_init(void)
394 {
395 	unsigned int cpu = smp_processor_id();
396 	struct stack *stk = &stacks[cpu];
397 
398 	if (cpu >= NR_CPUS) {
399 		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
400 		BUG();
401 	}
402 
403 	/*
404 	 * This only works on resume and secondary cores. For booting on the
405 	 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
406 	 */
407 	set_my_cpu_offset(per_cpu_offset(cpu));
408 
409 	cpu_proc_init();
410 
411 	/*
412 	 * Define the placement constraint for the inline asm directive below.
413 	 * In Thumb-2, msr with an immediate value is not allowed.
414 	 */
415 #ifdef CONFIG_THUMB2_KERNEL
416 #define PLC	"r"
417 #else
418 #define PLC	"I"
419 #endif
420 
421 	/*
422 	 * setup stacks for re-entrant exception handlers
423 	 */
424 	__asm__ (
425 	"msr	cpsr_c, %1\n\t"
426 	"add	r14, %0, %2\n\t"
427 	"mov	sp, r14\n\t"
428 	"msr	cpsr_c, %3\n\t"
429 	"add	r14, %0, %4\n\t"
430 	"mov	sp, r14\n\t"
431 	"msr	cpsr_c, %5\n\t"
432 	"add	r14, %0, %6\n\t"
433 	"mov	sp, r14\n\t"
434 	"msr	cpsr_c, %7"
435 	    :
436 	    : "r" (stk),
437 	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
438 	      "I" (offsetof(struct stack, irq[0])),
439 	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
440 	      "I" (offsetof(struct stack, abt[0])),
441 	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
442 	      "I" (offsetof(struct stack, und[0])),
443 	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
444 	    : "r14");
445 }
446 
447 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
448 
449 void __init smp_setup_processor_id(void)
450 {
451 	int i;
452 	u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
453 	u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
454 
455 	cpu_logical_map(0) = cpu;
456 	for (i = 1; i < nr_cpu_ids; ++i)
457 		cpu_logical_map(i) = i == cpu ? 0 : i;
458 
459 	printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
460 }
461 
462 static void __init setup_processor(void)
463 {
464 	struct proc_info_list *list;
465 
466 	/*
467 	 * locate processor in the list of supported processor
468 	 * types.  The linker builds this table for us from the
469 	 * entries in arch/arm/mm/proc-*.S
470 	 */
471 	list = lookup_processor_type(read_cpuid_id());
472 	if (!list) {
473 		printk("CPU configuration botched (ID %08x), unable "
474 		       "to continue.\n", read_cpuid_id());
475 		while (1);
476 	}
477 
478 	cpu_name = list->cpu_name;
479 	__cpu_architecture = __get_cpu_architecture();
480 
481 #ifdef MULTI_CPU
482 	processor = *list->proc;
483 #endif
484 #ifdef MULTI_TLB
485 	cpu_tlb = *list->tlb;
486 #endif
487 #ifdef MULTI_USER
488 	cpu_user = *list->user;
489 #endif
490 #ifdef MULTI_CACHE
491 	cpu_cache = *list->cache;
492 #endif
493 
494 	printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
495 	       cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
496 	       proc_arch[cpu_architecture()], cr_alignment);
497 
498 	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
499 		 list->arch_name, ENDIANNESS);
500 	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
501 		 list->elf_name, ENDIANNESS);
502 	elf_hwcap = list->elf_hwcap;
503 
504 	cpuid_init_hwcaps();
505 
506 #ifndef CONFIG_ARM_THUMB
507 	elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
508 #endif
509 
510 	feat_v6_fixup();
511 
512 	cacheid_init();
513 	cpu_init();
514 }
515 
516 void __init dump_machine_table(void)
517 {
518 	struct machine_desc *p;
519 
520 	early_print("Available machine support:\n\nID (hex)\tNAME\n");
521 	for_each_machine_desc(p)
522 		early_print("%08x\t%s\n", p->nr, p->name);
523 
524 	early_print("\nPlease check your kernel config and/or bootloader.\n");
525 
526 	while (true)
527 		/* can't use cpu_relax() here as it may require MMU setup */;
528 }
529 
530 int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
531 {
532 	struct membank *bank = &meminfo.bank[meminfo.nr_banks];
533 
534 	if (meminfo.nr_banks >= NR_BANKS) {
535 		printk(KERN_CRIT "NR_BANKS too low, "
536 			"ignoring memory at 0x%08llx\n", (long long)start);
537 		return -EINVAL;
538 	}
539 
540 	/*
541 	 * Ensure that start/size are aligned to a page boundary.
542 	 * Size is appropriately rounded down, start is rounded up.
543 	 */
544 	size -= start & ~PAGE_MASK;
545 	bank->start = PAGE_ALIGN(start);
546 
547 #ifndef CONFIG_ARM_LPAE
548 	if (bank->start + size < bank->start) {
549 		printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
550 			"32-bit physical address space\n", (long long)start);
551 		/*
552 		 * To ensure bank->start + bank->size is representable in
553 		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
554 		 * This means we lose a page after masking.
555 		 */
556 		size = ULONG_MAX - bank->start;
557 	}
558 #endif
559 
560 	bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
561 
562 	/*
563 	 * Check whether this memory region has non-zero size or
564 	 * invalid node number.
565 	 */
566 	if (bank->size == 0)
567 		return -EINVAL;
568 
569 	meminfo.nr_banks++;
570 	return 0;
571 }
572 
573 /*
574  * Pick out the memory size.  We look for mem=size@start,
575  * where start and size are "size[KkMm]"
576  */
577 static int __init early_mem(char *p)
578 {
579 	static int usermem __initdata = 0;
580 	phys_addr_t size;
581 	phys_addr_t start;
582 	char *endp;
583 
584 	/*
585 	 * If the user specifies memory size, we
586 	 * blow away any automatically generated
587 	 * size.
588 	 */
589 	if (usermem == 0) {
590 		usermem = 1;
591 		meminfo.nr_banks = 0;
592 	}
593 
594 	start = PHYS_OFFSET;
595 	size  = memparse(p, &endp);
596 	if (*endp == '@')
597 		start = memparse(endp + 1, NULL);
598 
599 	arm_add_memory(start, size);
600 
601 	return 0;
602 }
603 early_param("mem", early_mem);
604 
605 static void __init request_standard_resources(struct machine_desc *mdesc)
606 {
607 	struct memblock_region *region;
608 	struct resource *res;
609 
610 	kernel_code.start   = virt_to_phys(_text);
611 	kernel_code.end     = virt_to_phys(_etext - 1);
612 	kernel_data.start   = virt_to_phys(_sdata);
613 	kernel_data.end     = virt_to_phys(_end - 1);
614 
615 	for_each_memblock(memory, region) {
616 		res = alloc_bootmem_low(sizeof(*res));
617 		res->name  = "System RAM";
618 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
619 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
620 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
621 
622 		request_resource(&iomem_resource, res);
623 
624 		if (kernel_code.start >= res->start &&
625 		    kernel_code.end <= res->end)
626 			request_resource(res, &kernel_code);
627 		if (kernel_data.start >= res->start &&
628 		    kernel_data.end <= res->end)
629 			request_resource(res, &kernel_data);
630 	}
631 
632 	if (mdesc->video_start) {
633 		video_ram.start = mdesc->video_start;
634 		video_ram.end   = mdesc->video_end;
635 		request_resource(&iomem_resource, &video_ram);
636 	}
637 
638 	/*
639 	 * Some machines don't have the possibility of ever
640 	 * possessing lp0, lp1 or lp2
641 	 */
642 	if (mdesc->reserve_lp0)
643 		request_resource(&ioport_resource, &lp0);
644 	if (mdesc->reserve_lp1)
645 		request_resource(&ioport_resource, &lp1);
646 	if (mdesc->reserve_lp2)
647 		request_resource(&ioport_resource, &lp2);
648 }
649 
650 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
651 struct screen_info screen_info = {
652  .orig_video_lines	= 30,
653  .orig_video_cols	= 80,
654  .orig_video_mode	= 0,
655  .orig_video_ega_bx	= 0,
656  .orig_video_isVGA	= 1,
657  .orig_video_points	= 8
658 };
659 #endif
660 
661 static int __init customize_machine(void)
662 {
663 	/*
664 	 * customizes platform devices, or adds new ones
665 	 * On DT based machines, we fall back to populating the
666 	 * machine from the device tree, if no callback is provided,
667 	 * otherwise we would always need an init_machine callback.
668 	 */
669 	if (machine_desc->init_machine)
670 		machine_desc->init_machine();
671 #ifdef CONFIG_OF
672 	else
673 		of_platform_populate(NULL, of_default_bus_match_table,
674 					NULL, NULL);
675 #endif
676 	return 0;
677 }
678 arch_initcall(customize_machine);
679 
680 static int __init init_machine_late(void)
681 {
682 	if (machine_desc->init_late)
683 		machine_desc->init_late();
684 	return 0;
685 }
686 late_initcall(init_machine_late);
687 
688 #ifdef CONFIG_KEXEC
689 static inline unsigned long long get_total_mem(void)
690 {
691 	unsigned long total;
692 
693 	total = max_low_pfn - min_low_pfn;
694 	return total << PAGE_SHIFT;
695 }
696 
697 /**
698  * reserve_crashkernel() - reserves memory are for crash kernel
699  *
700  * This function reserves memory area given in "crashkernel=" kernel command
701  * line parameter. The memory reserved is used by a dump capture kernel when
702  * primary kernel is crashing.
703  */
704 static void __init reserve_crashkernel(void)
705 {
706 	unsigned long long crash_size, crash_base;
707 	unsigned long long total_mem;
708 	int ret;
709 
710 	total_mem = get_total_mem();
711 	ret = parse_crashkernel(boot_command_line, total_mem,
712 				&crash_size, &crash_base);
713 	if (ret)
714 		return;
715 
716 	ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
717 	if (ret < 0) {
718 		printk(KERN_WARNING "crashkernel reservation failed - "
719 		       "memory is in use (0x%lx)\n", (unsigned long)crash_base);
720 		return;
721 	}
722 
723 	printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
724 	       "for crashkernel (System RAM: %ldMB)\n",
725 	       (unsigned long)(crash_size >> 20),
726 	       (unsigned long)(crash_base >> 20),
727 	       (unsigned long)(total_mem >> 20));
728 
729 	crashk_res.start = crash_base;
730 	crashk_res.end = crash_base + crash_size - 1;
731 	insert_resource(&iomem_resource, &crashk_res);
732 }
733 #else
734 static inline void reserve_crashkernel(void) {}
735 #endif /* CONFIG_KEXEC */
736 
737 static int __init meminfo_cmp(const void *_a, const void *_b)
738 {
739 	const struct membank *a = _a, *b = _b;
740 	long cmp = bank_pfn_start(a) - bank_pfn_start(b);
741 	return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
742 }
743 
744 void __init hyp_mode_check(void)
745 {
746 #ifdef CONFIG_ARM_VIRT_EXT
747 	if (is_hyp_mode_available()) {
748 		pr_info("CPU: All CPU(s) started in HYP mode.\n");
749 		pr_info("CPU: Virtualization extensions available.\n");
750 	} else if (is_hyp_mode_mismatched()) {
751 		pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
752 			__boot_cpu_mode & MODE_MASK);
753 		pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
754 	} else
755 		pr_info("CPU: All CPU(s) started in SVC mode.\n");
756 #endif
757 }
758 
759 void __init setup_arch(char **cmdline_p)
760 {
761 	struct machine_desc *mdesc;
762 
763 	setup_processor();
764 	mdesc = setup_machine_fdt(__atags_pointer);
765 	if (!mdesc)
766 		mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
767 	machine_desc = mdesc;
768 	machine_name = mdesc->name;
769 
770 	setup_dma_zone(mdesc);
771 
772 	if (mdesc->restart_mode)
773 		reboot_setup(&mdesc->restart_mode);
774 
775 	init_mm.start_code = (unsigned long) _text;
776 	init_mm.end_code   = (unsigned long) _etext;
777 	init_mm.end_data   = (unsigned long) _edata;
778 	init_mm.brk	   = (unsigned long) _end;
779 
780 	/* populate cmd_line too for later use, preserving boot_command_line */
781 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
782 	*cmdline_p = cmd_line;
783 
784 	parse_early_param();
785 
786 	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
787 	sanity_check_meminfo();
788 	arm_memblock_init(&meminfo, mdesc);
789 
790 	paging_init(mdesc);
791 	request_standard_resources(mdesc);
792 
793 	if (mdesc->restart)
794 		arm_pm_restart = mdesc->restart;
795 
796 	unflatten_device_tree();
797 
798 	arm_dt_init_cpu_maps();
799 #ifdef CONFIG_SMP
800 	if (is_smp()) {
801 		smp_set_ops(mdesc->smp);
802 		smp_init_cpus();
803 	}
804 #endif
805 
806 	if (!is_smp())
807 		hyp_mode_check();
808 
809 	reserve_crashkernel();
810 
811 #ifdef CONFIG_MULTI_IRQ_HANDLER
812 	handle_arch_irq = mdesc->handle_irq;
813 #endif
814 
815 #ifdef CONFIG_VT
816 #if defined(CONFIG_VGA_CONSOLE)
817 	conswitchp = &vga_con;
818 #elif defined(CONFIG_DUMMY_CONSOLE)
819 	conswitchp = &dummy_con;
820 #endif
821 #endif
822 
823 	if (mdesc->init_early)
824 		mdesc->init_early();
825 }
826 
827 
828 static int __init topology_init(void)
829 {
830 	int cpu;
831 
832 	for_each_possible_cpu(cpu) {
833 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
834 		cpuinfo->cpu.hotpluggable = 1;
835 		register_cpu(&cpuinfo->cpu, cpu);
836 	}
837 
838 	return 0;
839 }
840 subsys_initcall(topology_init);
841 
842 #ifdef CONFIG_HAVE_PROC_CPU
843 static int __init proc_cpu_init(void)
844 {
845 	struct proc_dir_entry *res;
846 
847 	res = proc_mkdir("cpu", NULL);
848 	if (!res)
849 		return -ENOMEM;
850 	return 0;
851 }
852 fs_initcall(proc_cpu_init);
853 #endif
854 
855 static const char *hwcap_str[] = {
856 	"swp",
857 	"half",
858 	"thumb",
859 	"26bit",
860 	"fastmult",
861 	"fpa",
862 	"vfp",
863 	"edsp",
864 	"java",
865 	"iwmmxt",
866 	"crunch",
867 	"thumbee",
868 	"neon",
869 	"vfpv3",
870 	"vfpv3d16",
871 	"tls",
872 	"vfpv4",
873 	"idiva",
874 	"idivt",
875 	NULL
876 };
877 
878 static int c_show(struct seq_file *m, void *v)
879 {
880 	int i, j;
881 	u32 cpuid;
882 
883 	for_each_online_cpu(i) {
884 		/*
885 		 * glibc reads /proc/cpuinfo to determine the number of
886 		 * online processors, looking for lines beginning with
887 		 * "processor".  Give glibc what it expects.
888 		 */
889 		seq_printf(m, "processor\t: %d\n", i);
890 		cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
891 		seq_printf(m, "model name\t: %s rev %d (%s)\n",
892 			   cpu_name, cpuid & 15, elf_platform);
893 
894 #if defined(CONFIG_SMP)
895 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
896 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
897 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
898 #else
899 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
900 			   loops_per_jiffy / (500000/HZ),
901 			   (loops_per_jiffy / (5000/HZ)) % 100);
902 #endif
903 		/* dump out the processor features */
904 		seq_puts(m, "Features\t: ");
905 
906 		for (j = 0; hwcap_str[j]; j++)
907 			if (elf_hwcap & (1 << j))
908 				seq_printf(m, "%s ", hwcap_str[j]);
909 
910 		seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
911 		seq_printf(m, "CPU architecture: %s\n",
912 			   proc_arch[cpu_architecture()]);
913 
914 		if ((cpuid & 0x0008f000) == 0x00000000) {
915 			/* pre-ARM7 */
916 			seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
917 		} else {
918 			if ((cpuid & 0x0008f000) == 0x00007000) {
919 				/* ARM7 */
920 				seq_printf(m, "CPU variant\t: 0x%02x\n",
921 					   (cpuid >> 16) & 127);
922 			} else {
923 				/* post-ARM7 */
924 				seq_printf(m, "CPU variant\t: 0x%x\n",
925 					   (cpuid >> 20) & 15);
926 			}
927 			seq_printf(m, "CPU part\t: 0x%03x\n",
928 				   (cpuid >> 4) & 0xfff);
929 		}
930 		seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
931 	}
932 
933 	seq_printf(m, "Hardware\t: %s\n", machine_name);
934 	seq_printf(m, "Revision\t: %04x\n", system_rev);
935 	seq_printf(m, "Serial\t\t: %08x%08x\n",
936 		   system_serial_high, system_serial_low);
937 
938 	return 0;
939 }
940 
941 static void *c_start(struct seq_file *m, loff_t *pos)
942 {
943 	return *pos < 1 ? (void *)1 : NULL;
944 }
945 
946 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
947 {
948 	++*pos;
949 	return NULL;
950 }
951 
952 static void c_stop(struct seq_file *m, void *v)
953 {
954 }
955 
956 const struct seq_operations cpuinfo_op = {
957 	.start	= c_start,
958 	.next	= c_next,
959 	.stop	= c_stop,
960 	.show	= c_show
961 };
962