xref: /linux/arch/arm/kernel/setup.c (revision 4a8e43feeac7996b8de2d5b2823e316917493df4)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/of_fdt.h>
24 #include <linux/cpu.h>
25 #include <linux/interrupt.h>
26 #include <linux/smp.h>
27 #include <linux/proc_fs.h>
28 #include <linux/memblock.h>
29 #include <linux/bug.h>
30 #include <linux/compiler.h>
31 #include <linux/sort.h>
32 
33 #include <asm/unified.h>
34 #include <asm/cp15.h>
35 #include <asm/cpu.h>
36 #include <asm/cputype.h>
37 #include <asm/elf.h>
38 #include <asm/procinfo.h>
39 #include <asm/sections.h>
40 #include <asm/setup.h>
41 #include <asm/smp_plat.h>
42 #include <asm/mach-types.h>
43 #include <asm/cacheflush.h>
44 #include <asm/cachetype.h>
45 #include <asm/tlbflush.h>
46 
47 #include <asm/prom.h>
48 #include <asm/mach/arch.h>
49 #include <asm/mach/irq.h>
50 #include <asm/mach/time.h>
51 #include <asm/system_info.h>
52 #include <asm/system_misc.h>
53 #include <asm/traps.h>
54 #include <asm/unwind.h>
55 #include <asm/memblock.h>
56 
57 #include "atags.h"
58 #include "tcm.h"
59 
60 
61 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
62 char fpe_type[8];
63 
64 static int __init fpe_setup(char *line)
65 {
66 	memcpy(fpe_type, line, 8);
67 	return 1;
68 }
69 
70 __setup("fpe=", fpe_setup);
71 #endif
72 
73 extern void paging_init(struct machine_desc *desc);
74 extern void sanity_check_meminfo(void);
75 extern void reboot_setup(char *str);
76 extern void setup_dma_zone(struct machine_desc *desc);
77 
78 unsigned int processor_id;
79 EXPORT_SYMBOL(processor_id);
80 unsigned int __machine_arch_type __read_mostly;
81 EXPORT_SYMBOL(__machine_arch_type);
82 unsigned int cacheid __read_mostly;
83 EXPORT_SYMBOL(cacheid);
84 
85 unsigned int __atags_pointer __initdata;
86 
87 unsigned int system_rev;
88 EXPORT_SYMBOL(system_rev);
89 
90 unsigned int system_serial_low;
91 EXPORT_SYMBOL(system_serial_low);
92 
93 unsigned int system_serial_high;
94 EXPORT_SYMBOL(system_serial_high);
95 
96 unsigned int elf_hwcap __read_mostly;
97 EXPORT_SYMBOL(elf_hwcap);
98 
99 
100 #ifdef MULTI_CPU
101 struct processor processor __read_mostly;
102 #endif
103 #ifdef MULTI_TLB
104 struct cpu_tlb_fns cpu_tlb __read_mostly;
105 #endif
106 #ifdef MULTI_USER
107 struct cpu_user_fns cpu_user __read_mostly;
108 #endif
109 #ifdef MULTI_CACHE
110 struct cpu_cache_fns cpu_cache __read_mostly;
111 #endif
112 #ifdef CONFIG_OUTER_CACHE
113 struct outer_cache_fns outer_cache __read_mostly;
114 EXPORT_SYMBOL(outer_cache);
115 #endif
116 
117 /*
118  * Cached cpu_architecture() result for use by assembler code.
119  * C code should use the cpu_architecture() function instead of accessing this
120  * variable directly.
121  */
122 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
123 
124 struct stack {
125 	u32 irq[3];
126 	u32 abt[3];
127 	u32 und[3];
128 } ____cacheline_aligned;
129 
130 static struct stack stacks[NR_CPUS];
131 
132 char elf_platform[ELF_PLATFORM_SIZE];
133 EXPORT_SYMBOL(elf_platform);
134 
135 static const char *cpu_name;
136 static const char *machine_name;
137 static char __initdata cmd_line[COMMAND_LINE_SIZE];
138 struct machine_desc *machine_desc __initdata;
139 
140 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
141 #define ENDIANNESS ((char)endian_test.l)
142 
143 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
144 
145 /*
146  * Standard memory resources
147  */
148 static struct resource mem_res[] = {
149 	{
150 		.name = "Video RAM",
151 		.start = 0,
152 		.end = 0,
153 		.flags = IORESOURCE_MEM
154 	},
155 	{
156 		.name = "Kernel code",
157 		.start = 0,
158 		.end = 0,
159 		.flags = IORESOURCE_MEM
160 	},
161 	{
162 		.name = "Kernel data",
163 		.start = 0,
164 		.end = 0,
165 		.flags = IORESOURCE_MEM
166 	}
167 };
168 
169 #define video_ram   mem_res[0]
170 #define kernel_code mem_res[1]
171 #define kernel_data mem_res[2]
172 
173 static struct resource io_res[] = {
174 	{
175 		.name = "reserved",
176 		.start = 0x3bc,
177 		.end = 0x3be,
178 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
179 	},
180 	{
181 		.name = "reserved",
182 		.start = 0x378,
183 		.end = 0x37f,
184 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
185 	},
186 	{
187 		.name = "reserved",
188 		.start = 0x278,
189 		.end = 0x27f,
190 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
191 	}
192 };
193 
194 #define lp0 io_res[0]
195 #define lp1 io_res[1]
196 #define lp2 io_res[2]
197 
198 static const char *proc_arch[] = {
199 	"undefined/unknown",
200 	"3",
201 	"4",
202 	"4T",
203 	"5",
204 	"5T",
205 	"5TE",
206 	"5TEJ",
207 	"6TEJ",
208 	"7",
209 	"?(11)",
210 	"?(12)",
211 	"?(13)",
212 	"?(14)",
213 	"?(15)",
214 	"?(16)",
215 	"?(17)",
216 };
217 
218 static int __get_cpu_architecture(void)
219 {
220 	int cpu_arch;
221 
222 	if ((read_cpuid_id() & 0x0008f000) == 0) {
223 		cpu_arch = CPU_ARCH_UNKNOWN;
224 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
225 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
226 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
227 		cpu_arch = (read_cpuid_id() >> 16) & 7;
228 		if (cpu_arch)
229 			cpu_arch += CPU_ARCH_ARMv3;
230 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
231 		unsigned int mmfr0;
232 
233 		/* Revised CPUID format. Read the Memory Model Feature
234 		 * Register 0 and check for VMSAv7 or PMSAv7 */
235 		asm("mrc	p15, 0, %0, c0, c1, 4"
236 		    : "=r" (mmfr0));
237 		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
238 		    (mmfr0 & 0x000000f0) >= 0x00000030)
239 			cpu_arch = CPU_ARCH_ARMv7;
240 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
241 			 (mmfr0 & 0x000000f0) == 0x00000020)
242 			cpu_arch = CPU_ARCH_ARMv6;
243 		else
244 			cpu_arch = CPU_ARCH_UNKNOWN;
245 	} else
246 		cpu_arch = CPU_ARCH_UNKNOWN;
247 
248 	return cpu_arch;
249 }
250 
251 int __pure cpu_architecture(void)
252 {
253 	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
254 
255 	return __cpu_architecture;
256 }
257 
258 static int cpu_has_aliasing_icache(unsigned int arch)
259 {
260 	int aliasing_icache;
261 	unsigned int id_reg, num_sets, line_size;
262 
263 	/* PIPT caches never alias. */
264 	if (icache_is_pipt())
265 		return 0;
266 
267 	/* arch specifies the register format */
268 	switch (arch) {
269 	case CPU_ARCH_ARMv7:
270 		asm("mcr	p15, 2, %0, c0, c0, 0 @ set CSSELR"
271 		    : /* No output operands */
272 		    : "r" (1));
273 		isb();
274 		asm("mrc	p15, 1, %0, c0, c0, 0 @ read CCSIDR"
275 		    : "=r" (id_reg));
276 		line_size = 4 << ((id_reg & 0x7) + 2);
277 		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
278 		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
279 		break;
280 	case CPU_ARCH_ARMv6:
281 		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
282 		break;
283 	default:
284 		/* I-cache aliases will be handled by D-cache aliasing code */
285 		aliasing_icache = 0;
286 	}
287 
288 	return aliasing_icache;
289 }
290 
291 static void __init cacheid_init(void)
292 {
293 	unsigned int cachetype = read_cpuid_cachetype();
294 	unsigned int arch = cpu_architecture();
295 
296 	if (arch >= CPU_ARCH_ARMv6) {
297 		if ((cachetype & (7 << 29)) == 4 << 29) {
298 			/* ARMv7 register format */
299 			arch = CPU_ARCH_ARMv7;
300 			cacheid = CACHEID_VIPT_NONALIASING;
301 			switch (cachetype & (3 << 14)) {
302 			case (1 << 14):
303 				cacheid |= CACHEID_ASID_TAGGED;
304 				break;
305 			case (3 << 14):
306 				cacheid |= CACHEID_PIPT;
307 				break;
308 			}
309 		} else {
310 			arch = CPU_ARCH_ARMv6;
311 			if (cachetype & (1 << 23))
312 				cacheid = CACHEID_VIPT_ALIASING;
313 			else
314 				cacheid = CACHEID_VIPT_NONALIASING;
315 		}
316 		if (cpu_has_aliasing_icache(arch))
317 			cacheid |= CACHEID_VIPT_I_ALIASING;
318 	} else {
319 		cacheid = CACHEID_VIVT;
320 	}
321 
322 	printk("CPU: %s data cache, %s instruction cache\n",
323 		cache_is_vivt() ? "VIVT" :
324 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
325 		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
326 		cache_is_vivt() ? "VIVT" :
327 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
328 		icache_is_vipt_aliasing() ? "VIPT aliasing" :
329 		icache_is_pipt() ? "PIPT" :
330 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
331 }
332 
333 /*
334  * These functions re-use the assembly code in head.S, which
335  * already provide the required functionality.
336  */
337 extern struct proc_info_list *lookup_processor_type(unsigned int);
338 
339 void __init early_print(const char *str, ...)
340 {
341 	extern void printascii(const char *);
342 	char buf[256];
343 	va_list ap;
344 
345 	va_start(ap, str);
346 	vsnprintf(buf, sizeof(buf), str, ap);
347 	va_end(ap);
348 
349 #ifdef CONFIG_DEBUG_LL
350 	printascii(buf);
351 #endif
352 	printk("%s", buf);
353 }
354 
355 static void __init feat_v6_fixup(void)
356 {
357 	int id = read_cpuid_id();
358 
359 	if ((id & 0xff0f0000) != 0x41070000)
360 		return;
361 
362 	/*
363 	 * HWCAP_TLS is available only on 1136 r1p0 and later,
364 	 * see also kuser_get_tls_init.
365 	 */
366 	if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
367 		elf_hwcap &= ~HWCAP_TLS;
368 }
369 
370 /*
371  * cpu_init - initialise one CPU.
372  *
373  * cpu_init sets up the per-CPU stacks.
374  */
375 void cpu_init(void)
376 {
377 	unsigned int cpu = smp_processor_id();
378 	struct stack *stk = &stacks[cpu];
379 
380 	if (cpu >= NR_CPUS) {
381 		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
382 		BUG();
383 	}
384 
385 	cpu_proc_init();
386 
387 	/*
388 	 * Define the placement constraint for the inline asm directive below.
389 	 * In Thumb-2, msr with an immediate value is not allowed.
390 	 */
391 #ifdef CONFIG_THUMB2_KERNEL
392 #define PLC	"r"
393 #else
394 #define PLC	"I"
395 #endif
396 
397 	/*
398 	 * setup stacks for re-entrant exception handlers
399 	 */
400 	__asm__ (
401 	"msr	cpsr_c, %1\n\t"
402 	"add	r14, %0, %2\n\t"
403 	"mov	sp, r14\n\t"
404 	"msr	cpsr_c, %3\n\t"
405 	"add	r14, %0, %4\n\t"
406 	"mov	sp, r14\n\t"
407 	"msr	cpsr_c, %5\n\t"
408 	"add	r14, %0, %6\n\t"
409 	"mov	sp, r14\n\t"
410 	"msr	cpsr_c, %7"
411 	    :
412 	    : "r" (stk),
413 	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
414 	      "I" (offsetof(struct stack, irq[0])),
415 	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
416 	      "I" (offsetof(struct stack, abt[0])),
417 	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
418 	      "I" (offsetof(struct stack, und[0])),
419 	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
420 	    : "r14");
421 }
422 
423 int __cpu_logical_map[NR_CPUS];
424 
425 void __init smp_setup_processor_id(void)
426 {
427 	int i;
428 	u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0;
429 
430 	cpu_logical_map(0) = cpu;
431 	for (i = 1; i < NR_CPUS; ++i)
432 		cpu_logical_map(i) = i == cpu ? 0 : i;
433 
434 	printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu);
435 }
436 
437 static void __init setup_processor(void)
438 {
439 	struct proc_info_list *list;
440 
441 	/*
442 	 * locate processor in the list of supported processor
443 	 * types.  The linker builds this table for us from the
444 	 * entries in arch/arm/mm/proc-*.S
445 	 */
446 	list = lookup_processor_type(read_cpuid_id());
447 	if (!list) {
448 		printk("CPU configuration botched (ID %08x), unable "
449 		       "to continue.\n", read_cpuid_id());
450 		while (1);
451 	}
452 
453 	cpu_name = list->cpu_name;
454 	__cpu_architecture = __get_cpu_architecture();
455 
456 #ifdef MULTI_CPU
457 	processor = *list->proc;
458 #endif
459 #ifdef MULTI_TLB
460 	cpu_tlb = *list->tlb;
461 #endif
462 #ifdef MULTI_USER
463 	cpu_user = *list->user;
464 #endif
465 #ifdef MULTI_CACHE
466 	cpu_cache = *list->cache;
467 #endif
468 
469 	printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
470 	       cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
471 	       proc_arch[cpu_architecture()], cr_alignment);
472 
473 	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
474 		 list->arch_name, ENDIANNESS);
475 	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
476 		 list->elf_name, ENDIANNESS);
477 	elf_hwcap = list->elf_hwcap;
478 #ifndef CONFIG_ARM_THUMB
479 	elf_hwcap &= ~HWCAP_THUMB;
480 #endif
481 
482 	feat_v6_fixup();
483 
484 	cacheid_init();
485 	cpu_init();
486 }
487 
488 void __init dump_machine_table(void)
489 {
490 	struct machine_desc *p;
491 
492 	early_print("Available machine support:\n\nID (hex)\tNAME\n");
493 	for_each_machine_desc(p)
494 		early_print("%08x\t%s\n", p->nr, p->name);
495 
496 	early_print("\nPlease check your kernel config and/or bootloader.\n");
497 
498 	while (true)
499 		/* can't use cpu_relax() here as it may require MMU setup */;
500 }
501 
502 int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
503 {
504 	struct membank *bank = &meminfo.bank[meminfo.nr_banks];
505 
506 	if (meminfo.nr_banks >= NR_BANKS) {
507 		printk(KERN_CRIT "NR_BANKS too low, "
508 			"ignoring memory at 0x%08llx\n", (long long)start);
509 		return -EINVAL;
510 	}
511 
512 	/*
513 	 * Ensure that start/size are aligned to a page boundary.
514 	 * Size is appropriately rounded down, start is rounded up.
515 	 */
516 	size -= start & ~PAGE_MASK;
517 	bank->start = PAGE_ALIGN(start);
518 
519 #ifndef CONFIG_LPAE
520 	if (bank->start + size < bank->start) {
521 		printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
522 			"32-bit physical address space\n", (long long)start);
523 		/*
524 		 * To ensure bank->start + bank->size is representable in
525 		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
526 		 * This means we lose a page after masking.
527 		 */
528 		size = ULONG_MAX - bank->start;
529 	}
530 #endif
531 
532 	bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
533 
534 	/*
535 	 * Check whether this memory region has non-zero size or
536 	 * invalid node number.
537 	 */
538 	if (bank->size == 0)
539 		return -EINVAL;
540 
541 	meminfo.nr_banks++;
542 	return 0;
543 }
544 
545 /*
546  * Pick out the memory size.  We look for mem=size@start,
547  * where start and size are "size[KkMm]"
548  */
549 static int __init early_mem(char *p)
550 {
551 	static int usermem __initdata = 0;
552 	phys_addr_t size;
553 	phys_addr_t start;
554 	char *endp;
555 
556 	/*
557 	 * If the user specifies memory size, we
558 	 * blow away any automatically generated
559 	 * size.
560 	 */
561 	if (usermem == 0) {
562 		usermem = 1;
563 		meminfo.nr_banks = 0;
564 	}
565 
566 	start = PHYS_OFFSET;
567 	size  = memparse(p, &endp);
568 	if (*endp == '@')
569 		start = memparse(endp + 1, NULL);
570 
571 	arm_add_memory(start, size);
572 
573 	return 0;
574 }
575 early_param("mem", early_mem);
576 
577 static void __init request_standard_resources(struct machine_desc *mdesc)
578 {
579 	struct memblock_region *region;
580 	struct resource *res;
581 
582 	kernel_code.start   = virt_to_phys(_text);
583 	kernel_code.end     = virt_to_phys(_etext - 1);
584 	kernel_data.start   = virt_to_phys(_sdata);
585 	kernel_data.end     = virt_to_phys(_end - 1);
586 
587 	for_each_memblock(memory, region) {
588 		res = alloc_bootmem_low(sizeof(*res));
589 		res->name  = "System RAM";
590 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
591 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
592 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
593 
594 		request_resource(&iomem_resource, res);
595 
596 		if (kernel_code.start >= res->start &&
597 		    kernel_code.end <= res->end)
598 			request_resource(res, &kernel_code);
599 		if (kernel_data.start >= res->start &&
600 		    kernel_data.end <= res->end)
601 			request_resource(res, &kernel_data);
602 	}
603 
604 	if (mdesc->video_start) {
605 		video_ram.start = mdesc->video_start;
606 		video_ram.end   = mdesc->video_end;
607 		request_resource(&iomem_resource, &video_ram);
608 	}
609 
610 	/*
611 	 * Some machines don't have the possibility of ever
612 	 * possessing lp0, lp1 or lp2
613 	 */
614 	if (mdesc->reserve_lp0)
615 		request_resource(&ioport_resource, &lp0);
616 	if (mdesc->reserve_lp1)
617 		request_resource(&ioport_resource, &lp1);
618 	if (mdesc->reserve_lp2)
619 		request_resource(&ioport_resource, &lp2);
620 }
621 
622 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
623 struct screen_info screen_info = {
624  .orig_video_lines	= 30,
625  .orig_video_cols	= 80,
626  .orig_video_mode	= 0,
627  .orig_video_ega_bx	= 0,
628  .orig_video_isVGA	= 1,
629  .orig_video_points	= 8
630 };
631 #endif
632 
633 static int __init customize_machine(void)
634 {
635 	/* customizes platform devices, or adds new ones */
636 	if (machine_desc->init_machine)
637 		machine_desc->init_machine();
638 	return 0;
639 }
640 arch_initcall(customize_machine);
641 
642 static int __init init_machine_late(void)
643 {
644 	if (machine_desc->init_late)
645 		machine_desc->init_late();
646 	return 0;
647 }
648 late_initcall(init_machine_late);
649 
650 #ifdef CONFIG_KEXEC
651 static inline unsigned long long get_total_mem(void)
652 {
653 	unsigned long total;
654 
655 	total = max_low_pfn - min_low_pfn;
656 	return total << PAGE_SHIFT;
657 }
658 
659 /**
660  * reserve_crashkernel() - reserves memory are for crash kernel
661  *
662  * This function reserves memory area given in "crashkernel=" kernel command
663  * line parameter. The memory reserved is used by a dump capture kernel when
664  * primary kernel is crashing.
665  */
666 static void __init reserve_crashkernel(void)
667 {
668 	unsigned long long crash_size, crash_base;
669 	unsigned long long total_mem;
670 	int ret;
671 
672 	total_mem = get_total_mem();
673 	ret = parse_crashkernel(boot_command_line, total_mem,
674 				&crash_size, &crash_base);
675 	if (ret)
676 		return;
677 
678 	ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
679 	if (ret < 0) {
680 		printk(KERN_WARNING "crashkernel reservation failed - "
681 		       "memory is in use (0x%lx)\n", (unsigned long)crash_base);
682 		return;
683 	}
684 
685 	printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
686 	       "for crashkernel (System RAM: %ldMB)\n",
687 	       (unsigned long)(crash_size >> 20),
688 	       (unsigned long)(crash_base >> 20),
689 	       (unsigned long)(total_mem >> 20));
690 
691 	crashk_res.start = crash_base;
692 	crashk_res.end = crash_base + crash_size - 1;
693 	insert_resource(&iomem_resource, &crashk_res);
694 }
695 #else
696 static inline void reserve_crashkernel(void) {}
697 #endif /* CONFIG_KEXEC */
698 
699 static int __init meminfo_cmp(const void *_a, const void *_b)
700 {
701 	const struct membank *a = _a, *b = _b;
702 	long cmp = bank_pfn_start(a) - bank_pfn_start(b);
703 	return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
704 }
705 
706 void __init setup_arch(char **cmdline_p)
707 {
708 	struct machine_desc *mdesc;
709 
710 	setup_processor();
711 	mdesc = setup_machine_fdt(__atags_pointer);
712 	if (!mdesc)
713 		mdesc = setup_machine_tags(__atags_pointer, machine_arch_type);
714 	machine_desc = mdesc;
715 	machine_name = mdesc->name;
716 
717 	setup_dma_zone(mdesc);
718 
719 	if (mdesc->restart_mode)
720 		reboot_setup(&mdesc->restart_mode);
721 
722 	init_mm.start_code = (unsigned long) _text;
723 	init_mm.end_code   = (unsigned long) _etext;
724 	init_mm.end_data   = (unsigned long) _edata;
725 	init_mm.brk	   = (unsigned long) _end;
726 
727 	/* populate cmd_line too for later use, preserving boot_command_line */
728 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
729 	*cmdline_p = cmd_line;
730 
731 	parse_early_param();
732 
733 	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
734 	sanity_check_meminfo();
735 	arm_memblock_init(&meminfo, mdesc);
736 
737 	paging_init(mdesc);
738 	request_standard_resources(mdesc);
739 
740 	if (mdesc->restart)
741 		arm_pm_restart = mdesc->restart;
742 
743 	unflatten_device_tree();
744 
745 #ifdef CONFIG_SMP
746 	if (is_smp()) {
747 		smp_set_ops(mdesc->smp);
748 		smp_init_cpus();
749 	}
750 #endif
751 	reserve_crashkernel();
752 
753 	tcm_init();
754 
755 #ifdef CONFIG_MULTI_IRQ_HANDLER
756 	handle_arch_irq = mdesc->handle_irq;
757 #endif
758 
759 #ifdef CONFIG_VT
760 #if defined(CONFIG_VGA_CONSOLE)
761 	conswitchp = &vga_con;
762 #elif defined(CONFIG_DUMMY_CONSOLE)
763 	conswitchp = &dummy_con;
764 #endif
765 #endif
766 
767 	if (mdesc->init_early)
768 		mdesc->init_early();
769 }
770 
771 
772 static int __init topology_init(void)
773 {
774 	int cpu;
775 
776 	for_each_possible_cpu(cpu) {
777 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
778 		cpuinfo->cpu.hotpluggable = 1;
779 		register_cpu(&cpuinfo->cpu, cpu);
780 	}
781 
782 	return 0;
783 }
784 subsys_initcall(topology_init);
785 
786 #ifdef CONFIG_HAVE_PROC_CPU
787 static int __init proc_cpu_init(void)
788 {
789 	struct proc_dir_entry *res;
790 
791 	res = proc_mkdir("cpu", NULL);
792 	if (!res)
793 		return -ENOMEM;
794 	return 0;
795 }
796 fs_initcall(proc_cpu_init);
797 #endif
798 
799 static const char *hwcap_str[] = {
800 	"swp",
801 	"half",
802 	"thumb",
803 	"26bit",
804 	"fastmult",
805 	"fpa",
806 	"vfp",
807 	"edsp",
808 	"java",
809 	"iwmmxt",
810 	"crunch",
811 	"thumbee",
812 	"neon",
813 	"vfpv3",
814 	"vfpv3d16",
815 	"tls",
816 	"vfpv4",
817 	"idiva",
818 	"idivt",
819 	NULL
820 };
821 
822 static int c_show(struct seq_file *m, void *v)
823 {
824 	int i;
825 
826 	seq_printf(m, "Processor\t: %s rev %d (%s)\n",
827 		   cpu_name, read_cpuid_id() & 15, elf_platform);
828 
829 #if defined(CONFIG_SMP)
830 	for_each_online_cpu(i) {
831 		/*
832 		 * glibc reads /proc/cpuinfo to determine the number of
833 		 * online processors, looking for lines beginning with
834 		 * "processor".  Give glibc what it expects.
835 		 */
836 		seq_printf(m, "processor\t: %d\n", i);
837 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
838 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
839 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
840 	}
841 #else /* CONFIG_SMP */
842 	seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
843 		   loops_per_jiffy / (500000/HZ),
844 		   (loops_per_jiffy / (5000/HZ)) % 100);
845 #endif
846 
847 	/* dump out the processor features */
848 	seq_puts(m, "Features\t: ");
849 
850 	for (i = 0; hwcap_str[i]; i++)
851 		if (elf_hwcap & (1 << i))
852 			seq_printf(m, "%s ", hwcap_str[i]);
853 
854 	seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
855 	seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
856 
857 	if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
858 		/* pre-ARM7 */
859 		seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
860 	} else {
861 		if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
862 			/* ARM7 */
863 			seq_printf(m, "CPU variant\t: 0x%02x\n",
864 				   (read_cpuid_id() >> 16) & 127);
865 		} else {
866 			/* post-ARM7 */
867 			seq_printf(m, "CPU variant\t: 0x%x\n",
868 				   (read_cpuid_id() >> 20) & 15);
869 		}
870 		seq_printf(m, "CPU part\t: 0x%03x\n",
871 			   (read_cpuid_id() >> 4) & 0xfff);
872 	}
873 	seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
874 
875 	seq_puts(m, "\n");
876 
877 	seq_printf(m, "Hardware\t: %s\n", machine_name);
878 	seq_printf(m, "Revision\t: %04x\n", system_rev);
879 	seq_printf(m, "Serial\t\t: %08x%08x\n",
880 		   system_serial_high, system_serial_low);
881 
882 	return 0;
883 }
884 
885 static void *c_start(struct seq_file *m, loff_t *pos)
886 {
887 	return *pos < 1 ? (void *)1 : NULL;
888 }
889 
890 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
891 {
892 	++*pos;
893 	return NULL;
894 }
895 
896 static void c_stop(struct seq_file *m, void *v)
897 {
898 }
899 
900 const struct seq_operations cpuinfo_op = {
901 	.start	= c_start,
902 	.next	= c_next,
903 	.stop	= c_stop,
904 	.show	= c_show
905 };
906