xref: /linux/arch/arm/kernel/setup.c (revision c98be0c96db00e9b6b02d31e0fa7590c54cdaaac)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_platform.h>
22 #include <linux/init.h>
23 #include <linux/kexec.h>
24 #include <linux/of_fdt.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/proc_fs.h>
29 #include <linux/memblock.h>
30 #include <linux/bug.h>
31 #include <linux/compiler.h>
32 #include <linux/sort.h>
33 
34 #include <asm/unified.h>
35 #include <asm/cp15.h>
36 #include <asm/cpu.h>
37 #include <asm/cputype.h>
38 #include <asm/elf.h>
39 #include <asm/procinfo.h>
40 #include <asm/psci.h>
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43 #include <asm/smp_plat.h>
44 #include <asm/mach-types.h>
45 #include <asm/cacheflush.h>
46 #include <asm/cachetype.h>
47 #include <asm/tlbflush.h>
48 
49 #include <asm/prom.h>
50 #include <asm/mach/arch.h>
51 #include <asm/mach/irq.h>
52 #include <asm/mach/time.h>
53 #include <asm/system_info.h>
54 #include <asm/system_misc.h>
55 #include <asm/traps.h>
56 #include <asm/unwind.h>
57 #include <asm/memblock.h>
58 #include <asm/virt.h>
59 
60 #include "atags.h"
61 
62 
63 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
64 char fpe_type[8];
65 
66 static int __init fpe_setup(char *line)
67 {
68 	memcpy(fpe_type, line, 8);
69 	return 1;
70 }
71 
72 __setup("fpe=", fpe_setup);
73 #endif
74 
75 extern void paging_init(const struct machine_desc *desc);
76 extern void early_paging_init(const struct machine_desc *,
77 			      struct proc_info_list *);
78 extern void sanity_check_meminfo(void);
79 extern enum reboot_mode reboot_mode;
80 extern void setup_dma_zone(const struct machine_desc *desc);
81 
82 unsigned int processor_id;
83 EXPORT_SYMBOL(processor_id);
84 unsigned int __machine_arch_type __read_mostly;
85 EXPORT_SYMBOL(__machine_arch_type);
86 unsigned int cacheid __read_mostly;
87 EXPORT_SYMBOL(cacheid);
88 
89 unsigned int __atags_pointer __initdata;
90 
91 unsigned int system_rev;
92 EXPORT_SYMBOL(system_rev);
93 
94 unsigned int system_serial_low;
95 EXPORT_SYMBOL(system_serial_low);
96 
97 unsigned int system_serial_high;
98 EXPORT_SYMBOL(system_serial_high);
99 
100 unsigned int elf_hwcap __read_mostly;
101 EXPORT_SYMBOL(elf_hwcap);
102 
103 
104 #ifdef MULTI_CPU
105 struct processor processor __read_mostly;
106 #endif
107 #ifdef MULTI_TLB
108 struct cpu_tlb_fns cpu_tlb __read_mostly;
109 #endif
110 #ifdef MULTI_USER
111 struct cpu_user_fns cpu_user __read_mostly;
112 #endif
113 #ifdef MULTI_CACHE
114 struct cpu_cache_fns cpu_cache __read_mostly;
115 #endif
116 #ifdef CONFIG_OUTER_CACHE
117 struct outer_cache_fns outer_cache __read_mostly;
118 EXPORT_SYMBOL(outer_cache);
119 #endif
120 
121 /*
122  * Cached cpu_architecture() result for use by assembler code.
123  * C code should use the cpu_architecture() function instead of accessing this
124  * variable directly.
125  */
126 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
127 
128 struct stack {
129 	u32 irq[3];
130 	u32 abt[3];
131 	u32 und[3];
132 } ____cacheline_aligned;
133 
134 #ifndef CONFIG_CPU_V7M
135 static struct stack stacks[NR_CPUS];
136 #endif
137 
138 char elf_platform[ELF_PLATFORM_SIZE];
139 EXPORT_SYMBOL(elf_platform);
140 
141 static const char *cpu_name;
142 static const char *machine_name;
143 static char __initdata cmd_line[COMMAND_LINE_SIZE];
144 const struct machine_desc *machine_desc __initdata;
145 
146 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
147 #define ENDIANNESS ((char)endian_test.l)
148 
149 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
150 
151 /*
152  * Standard memory resources
153  */
154 static struct resource mem_res[] = {
155 	{
156 		.name = "Video RAM",
157 		.start = 0,
158 		.end = 0,
159 		.flags = IORESOURCE_MEM
160 	},
161 	{
162 		.name = "Kernel code",
163 		.start = 0,
164 		.end = 0,
165 		.flags = IORESOURCE_MEM
166 	},
167 	{
168 		.name = "Kernel data",
169 		.start = 0,
170 		.end = 0,
171 		.flags = IORESOURCE_MEM
172 	}
173 };
174 
175 #define video_ram   mem_res[0]
176 #define kernel_code mem_res[1]
177 #define kernel_data mem_res[2]
178 
179 static struct resource io_res[] = {
180 	{
181 		.name = "reserved",
182 		.start = 0x3bc,
183 		.end = 0x3be,
184 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
185 	},
186 	{
187 		.name = "reserved",
188 		.start = 0x378,
189 		.end = 0x37f,
190 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
191 	},
192 	{
193 		.name = "reserved",
194 		.start = 0x278,
195 		.end = 0x27f,
196 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
197 	}
198 };
199 
200 #define lp0 io_res[0]
201 #define lp1 io_res[1]
202 #define lp2 io_res[2]
203 
204 static const char *proc_arch[] = {
205 	"undefined/unknown",
206 	"3",
207 	"4",
208 	"4T",
209 	"5",
210 	"5T",
211 	"5TE",
212 	"5TEJ",
213 	"6TEJ",
214 	"7",
215 	"7M",
216 	"?(12)",
217 	"?(13)",
218 	"?(14)",
219 	"?(15)",
220 	"?(16)",
221 	"?(17)",
222 };
223 
224 #ifdef CONFIG_CPU_V7M
225 static int __get_cpu_architecture(void)
226 {
227 	return CPU_ARCH_ARMv7M;
228 }
229 #else
230 static int __get_cpu_architecture(void)
231 {
232 	int cpu_arch;
233 
234 	if ((read_cpuid_id() & 0x0008f000) == 0) {
235 		cpu_arch = CPU_ARCH_UNKNOWN;
236 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
237 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
238 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
239 		cpu_arch = (read_cpuid_id() >> 16) & 7;
240 		if (cpu_arch)
241 			cpu_arch += CPU_ARCH_ARMv3;
242 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
243 		unsigned int mmfr0;
244 
245 		/* Revised CPUID format. Read the Memory Model Feature
246 		 * Register 0 and check for VMSAv7 or PMSAv7 */
247 		asm("mrc	p15, 0, %0, c0, c1, 4"
248 		    : "=r" (mmfr0));
249 		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
250 		    (mmfr0 & 0x000000f0) >= 0x00000030)
251 			cpu_arch = CPU_ARCH_ARMv7;
252 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
253 			 (mmfr0 & 0x000000f0) == 0x00000020)
254 			cpu_arch = CPU_ARCH_ARMv6;
255 		else
256 			cpu_arch = CPU_ARCH_UNKNOWN;
257 	} else
258 		cpu_arch = CPU_ARCH_UNKNOWN;
259 
260 	return cpu_arch;
261 }
262 #endif
263 
264 int __pure cpu_architecture(void)
265 {
266 	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
267 
268 	return __cpu_architecture;
269 }
270 
271 static int cpu_has_aliasing_icache(unsigned int arch)
272 {
273 	int aliasing_icache;
274 	unsigned int id_reg, num_sets, line_size;
275 
276 	/* PIPT caches never alias. */
277 	if (icache_is_pipt())
278 		return 0;
279 
280 	/* arch specifies the register format */
281 	switch (arch) {
282 	case CPU_ARCH_ARMv7:
283 		asm("mcr	p15, 2, %0, c0, c0, 0 @ set CSSELR"
284 		    : /* No output operands */
285 		    : "r" (1));
286 		isb();
287 		asm("mrc	p15, 1, %0, c0, c0, 0 @ read CCSIDR"
288 		    : "=r" (id_reg));
289 		line_size = 4 << ((id_reg & 0x7) + 2);
290 		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
291 		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
292 		break;
293 	case CPU_ARCH_ARMv6:
294 		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
295 		break;
296 	default:
297 		/* I-cache aliases will be handled by D-cache aliasing code */
298 		aliasing_icache = 0;
299 	}
300 
301 	return aliasing_icache;
302 }
303 
304 static void __init cacheid_init(void)
305 {
306 	unsigned int arch = cpu_architecture();
307 
308 	if (arch == CPU_ARCH_ARMv7M) {
309 		cacheid = 0;
310 	} else if (arch >= CPU_ARCH_ARMv6) {
311 		unsigned int cachetype = read_cpuid_cachetype();
312 		if ((cachetype & (7 << 29)) == 4 << 29) {
313 			/* ARMv7 register format */
314 			arch = CPU_ARCH_ARMv7;
315 			cacheid = CACHEID_VIPT_NONALIASING;
316 			switch (cachetype & (3 << 14)) {
317 			case (1 << 14):
318 				cacheid |= CACHEID_ASID_TAGGED;
319 				break;
320 			case (3 << 14):
321 				cacheid |= CACHEID_PIPT;
322 				break;
323 			}
324 		} else {
325 			arch = CPU_ARCH_ARMv6;
326 			if (cachetype & (1 << 23))
327 				cacheid = CACHEID_VIPT_ALIASING;
328 			else
329 				cacheid = CACHEID_VIPT_NONALIASING;
330 		}
331 		if (cpu_has_aliasing_icache(arch))
332 			cacheid |= CACHEID_VIPT_I_ALIASING;
333 	} else {
334 		cacheid = CACHEID_VIVT;
335 	}
336 
337 	pr_info("CPU: %s data cache, %s instruction cache\n",
338 		cache_is_vivt() ? "VIVT" :
339 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
340 		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
341 		cache_is_vivt() ? "VIVT" :
342 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
343 		icache_is_vipt_aliasing() ? "VIPT aliasing" :
344 		icache_is_pipt() ? "PIPT" :
345 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
346 }
347 
348 /*
349  * These functions re-use the assembly code in head.S, which
350  * already provide the required functionality.
351  */
352 extern struct proc_info_list *lookup_processor_type(unsigned int);
353 
354 void __init early_print(const char *str, ...)
355 {
356 	extern void printascii(const char *);
357 	char buf[256];
358 	va_list ap;
359 
360 	va_start(ap, str);
361 	vsnprintf(buf, sizeof(buf), str, ap);
362 	va_end(ap);
363 
364 #ifdef CONFIG_DEBUG_LL
365 	printascii(buf);
366 #endif
367 	printk("%s", buf);
368 }
369 
370 static void __init cpuid_init_hwcaps(void)
371 {
372 	unsigned int divide_instrs, vmsa;
373 
374 	if (cpu_architecture() < CPU_ARCH_ARMv7)
375 		return;
376 
377 	divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
378 
379 	switch (divide_instrs) {
380 	case 2:
381 		elf_hwcap |= HWCAP_IDIVA;
382 	case 1:
383 		elf_hwcap |= HWCAP_IDIVT;
384 	}
385 
386 	/* LPAE implies atomic ldrd/strd instructions */
387 	vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
388 	if (vmsa >= 5)
389 		elf_hwcap |= HWCAP_LPAE;
390 }
391 
392 static void __init feat_v6_fixup(void)
393 {
394 	int id = read_cpuid_id();
395 
396 	if ((id & 0xff0f0000) != 0x41070000)
397 		return;
398 
399 	/*
400 	 * HWCAP_TLS is available only on 1136 r1p0 and later,
401 	 * see also kuser_get_tls_init.
402 	 */
403 	if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
404 		elf_hwcap &= ~HWCAP_TLS;
405 }
406 
407 /*
408  * cpu_init - initialise one CPU.
409  *
410  * cpu_init sets up the per-CPU stacks.
411  */
412 void notrace cpu_init(void)
413 {
414 #ifndef CONFIG_CPU_V7M
415 	unsigned int cpu = smp_processor_id();
416 	struct stack *stk = &stacks[cpu];
417 
418 	if (cpu >= NR_CPUS) {
419 		pr_crit("CPU%u: bad primary CPU number\n", cpu);
420 		BUG();
421 	}
422 
423 	/*
424 	 * This only works on resume and secondary cores. For booting on the
425 	 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
426 	 */
427 	set_my_cpu_offset(per_cpu_offset(cpu));
428 
429 	cpu_proc_init();
430 
431 	/*
432 	 * Define the placement constraint for the inline asm directive below.
433 	 * In Thumb-2, msr with an immediate value is not allowed.
434 	 */
435 #ifdef CONFIG_THUMB2_KERNEL
436 #define PLC	"r"
437 #else
438 #define PLC	"I"
439 #endif
440 
441 	/*
442 	 * setup stacks for re-entrant exception handlers
443 	 */
444 	__asm__ (
445 	"msr	cpsr_c, %1\n\t"
446 	"add	r14, %0, %2\n\t"
447 	"mov	sp, r14\n\t"
448 	"msr	cpsr_c, %3\n\t"
449 	"add	r14, %0, %4\n\t"
450 	"mov	sp, r14\n\t"
451 	"msr	cpsr_c, %5\n\t"
452 	"add	r14, %0, %6\n\t"
453 	"mov	sp, r14\n\t"
454 	"msr	cpsr_c, %7"
455 	    :
456 	    : "r" (stk),
457 	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
458 	      "I" (offsetof(struct stack, irq[0])),
459 	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
460 	      "I" (offsetof(struct stack, abt[0])),
461 	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
462 	      "I" (offsetof(struct stack, und[0])),
463 	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
464 	    : "r14");
465 #endif
466 }
467 
468 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
469 
470 void __init smp_setup_processor_id(void)
471 {
472 	int i;
473 	u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
474 	u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
475 
476 	cpu_logical_map(0) = cpu;
477 	for (i = 1; i < nr_cpu_ids; ++i)
478 		cpu_logical_map(i) = i == cpu ? 0 : i;
479 
480 	/*
481 	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
482 	 * using percpu variable early, for example, lockdep will
483 	 * access percpu variable inside lock_release
484 	 */
485 	set_my_cpu_offset(0);
486 
487 	pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
488 }
489 
490 struct mpidr_hash mpidr_hash;
491 #ifdef CONFIG_SMP
492 /**
493  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
494  *			  level in order to build a linear index from an
495  *			  MPIDR value. Resulting algorithm is a collision
496  *			  free hash carried out through shifting and ORing
497  */
498 static void __init smp_build_mpidr_hash(void)
499 {
500 	u32 i, affinity;
501 	u32 fs[3], bits[3], ls, mask = 0;
502 	/*
503 	 * Pre-scan the list of MPIDRS and filter out bits that do
504 	 * not contribute to affinity levels, ie they never toggle.
505 	 */
506 	for_each_possible_cpu(i)
507 		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
508 	pr_debug("mask of set bits 0x%x\n", mask);
509 	/*
510 	 * Find and stash the last and first bit set at all affinity levels to
511 	 * check how many bits are required to represent them.
512 	 */
513 	for (i = 0; i < 3; i++) {
514 		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
515 		/*
516 		 * Find the MSB bit and LSB bits position
517 		 * to determine how many bits are required
518 		 * to express the affinity level.
519 		 */
520 		ls = fls(affinity);
521 		fs[i] = affinity ? ffs(affinity) - 1 : 0;
522 		bits[i] = ls - fs[i];
523 	}
524 	/*
525 	 * An index can be created from the MPIDR by isolating the
526 	 * significant bits at each affinity level and by shifting
527 	 * them in order to compress the 24 bits values space to a
528 	 * compressed set of values. This is equivalent to hashing
529 	 * the MPIDR through shifting and ORing. It is a collision free
530 	 * hash though not minimal since some levels might contain a number
531 	 * of CPUs that is not an exact power of 2 and their bit
532 	 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
533 	 */
534 	mpidr_hash.shift_aff[0] = fs[0];
535 	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
536 	mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
537 						(bits[1] + bits[0]);
538 	mpidr_hash.mask = mask;
539 	mpidr_hash.bits = bits[2] + bits[1] + bits[0];
540 	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
541 				mpidr_hash.shift_aff[0],
542 				mpidr_hash.shift_aff[1],
543 				mpidr_hash.shift_aff[2],
544 				mpidr_hash.mask,
545 				mpidr_hash.bits);
546 	/*
547 	 * 4x is an arbitrary value used to warn on a hash table much bigger
548 	 * than expected on most systems.
549 	 */
550 	if (mpidr_hash_size() > 4 * num_possible_cpus())
551 		pr_warn("Large number of MPIDR hash buckets detected\n");
552 	sync_cache_w(&mpidr_hash);
553 }
554 #endif
555 
556 static void __init setup_processor(void)
557 {
558 	struct proc_info_list *list;
559 
560 	/*
561 	 * locate processor in the list of supported processor
562 	 * types.  The linker builds this table for us from the
563 	 * entries in arch/arm/mm/proc-*.S
564 	 */
565 	list = lookup_processor_type(read_cpuid_id());
566 	if (!list) {
567 		pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
568 		       read_cpuid_id());
569 		while (1);
570 	}
571 
572 	cpu_name = list->cpu_name;
573 	__cpu_architecture = __get_cpu_architecture();
574 
575 #ifdef MULTI_CPU
576 	processor = *list->proc;
577 #endif
578 #ifdef MULTI_TLB
579 	cpu_tlb = *list->tlb;
580 #endif
581 #ifdef MULTI_USER
582 	cpu_user = *list->user;
583 #endif
584 #ifdef MULTI_CACHE
585 	cpu_cache = *list->cache;
586 #endif
587 
588 	pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
589 		cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
590 		proc_arch[cpu_architecture()], cr_alignment);
591 
592 	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
593 		 list->arch_name, ENDIANNESS);
594 	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
595 		 list->elf_name, ENDIANNESS);
596 	elf_hwcap = list->elf_hwcap;
597 
598 	cpuid_init_hwcaps();
599 
600 #ifndef CONFIG_ARM_THUMB
601 	elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
602 #endif
603 
604 	erratum_a15_798181_init();
605 
606 	feat_v6_fixup();
607 
608 	cacheid_init();
609 	cpu_init();
610 }
611 
612 void __init dump_machine_table(void)
613 {
614 	const struct machine_desc *p;
615 
616 	early_print("Available machine support:\n\nID (hex)\tNAME\n");
617 	for_each_machine_desc(p)
618 		early_print("%08x\t%s\n", p->nr, p->name);
619 
620 	early_print("\nPlease check your kernel config and/or bootloader.\n");
621 
622 	while (true)
623 		/* can't use cpu_relax() here as it may require MMU setup */;
624 }
625 
626 int __init arm_add_memory(u64 start, u64 size)
627 {
628 	struct membank *bank = &meminfo.bank[meminfo.nr_banks];
629 	u64 aligned_start;
630 
631 	if (meminfo.nr_banks >= NR_BANKS) {
632 		pr_crit("NR_BANKS too low, ignoring memory at 0x%08llx\n",
633 			(long long)start);
634 		return -EINVAL;
635 	}
636 
637 	/*
638 	 * Ensure that start/size are aligned to a page boundary.
639 	 * Size is appropriately rounded down, start is rounded up.
640 	 */
641 	size -= start & ~PAGE_MASK;
642 	aligned_start = PAGE_ALIGN(start);
643 
644 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
645 	if (aligned_start > ULONG_MAX) {
646 		pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
647 			(long long)start);
648 		return -EINVAL;
649 	}
650 
651 	if (aligned_start + size > ULONG_MAX) {
652 		pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
653 			(long long)start);
654 		/*
655 		 * To ensure bank->start + bank->size is representable in
656 		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
657 		 * This means we lose a page after masking.
658 		 */
659 		size = ULONG_MAX - aligned_start;
660 	}
661 #endif
662 
663 	if (aligned_start < PHYS_OFFSET) {
664 		if (aligned_start + size <= PHYS_OFFSET) {
665 			pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
666 				aligned_start, aligned_start + size);
667 			return -EINVAL;
668 		}
669 
670 		pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
671 			aligned_start, (u64)PHYS_OFFSET);
672 
673 		size -= PHYS_OFFSET - aligned_start;
674 		aligned_start = PHYS_OFFSET;
675 	}
676 
677 	bank->start = aligned_start;
678 	bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
679 
680 	/*
681 	 * Check whether this memory region has non-zero size or
682 	 * invalid node number.
683 	 */
684 	if (bank->size == 0)
685 		return -EINVAL;
686 
687 	meminfo.nr_banks++;
688 	return 0;
689 }
690 
691 /*
692  * Pick out the memory size.  We look for mem=size@start,
693  * where start and size are "size[KkMm]"
694  */
695 static int __init early_mem(char *p)
696 {
697 	static int usermem __initdata = 0;
698 	u64 size;
699 	u64 start;
700 	char *endp;
701 
702 	/*
703 	 * If the user specifies memory size, we
704 	 * blow away any automatically generated
705 	 * size.
706 	 */
707 	if (usermem == 0) {
708 		usermem = 1;
709 		meminfo.nr_banks = 0;
710 	}
711 
712 	start = PHYS_OFFSET;
713 	size  = memparse(p, &endp);
714 	if (*endp == '@')
715 		start = memparse(endp + 1, NULL);
716 
717 	arm_add_memory(start, size);
718 
719 	return 0;
720 }
721 early_param("mem", early_mem);
722 
723 static void __init request_standard_resources(const struct machine_desc *mdesc)
724 {
725 	struct memblock_region *region;
726 	struct resource *res;
727 
728 	kernel_code.start   = virt_to_phys(_text);
729 	kernel_code.end     = virt_to_phys(_etext - 1);
730 	kernel_data.start   = virt_to_phys(_sdata);
731 	kernel_data.end     = virt_to_phys(_end - 1);
732 
733 	for_each_memblock(memory, region) {
734 		res = memblock_virt_alloc(sizeof(*res), 0);
735 		res->name  = "System RAM";
736 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
737 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
738 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
739 
740 		request_resource(&iomem_resource, res);
741 
742 		if (kernel_code.start >= res->start &&
743 		    kernel_code.end <= res->end)
744 			request_resource(res, &kernel_code);
745 		if (kernel_data.start >= res->start &&
746 		    kernel_data.end <= res->end)
747 			request_resource(res, &kernel_data);
748 	}
749 
750 	if (mdesc->video_start) {
751 		video_ram.start = mdesc->video_start;
752 		video_ram.end   = mdesc->video_end;
753 		request_resource(&iomem_resource, &video_ram);
754 	}
755 
756 	/*
757 	 * Some machines don't have the possibility of ever
758 	 * possessing lp0, lp1 or lp2
759 	 */
760 	if (mdesc->reserve_lp0)
761 		request_resource(&ioport_resource, &lp0);
762 	if (mdesc->reserve_lp1)
763 		request_resource(&ioport_resource, &lp1);
764 	if (mdesc->reserve_lp2)
765 		request_resource(&ioport_resource, &lp2);
766 }
767 
768 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
769 struct screen_info screen_info = {
770  .orig_video_lines	= 30,
771  .orig_video_cols	= 80,
772  .orig_video_mode	= 0,
773  .orig_video_ega_bx	= 0,
774  .orig_video_isVGA	= 1,
775  .orig_video_points	= 8
776 };
777 #endif
778 
779 static int __init customize_machine(void)
780 {
781 	/*
782 	 * customizes platform devices, or adds new ones
783 	 * On DT based machines, we fall back to populating the
784 	 * machine from the device tree, if no callback is provided,
785 	 * otherwise we would always need an init_machine callback.
786 	 */
787 	if (machine_desc->init_machine)
788 		machine_desc->init_machine();
789 #ifdef CONFIG_OF
790 	else
791 		of_platform_populate(NULL, of_default_bus_match_table,
792 					NULL, NULL);
793 #endif
794 	return 0;
795 }
796 arch_initcall(customize_machine);
797 
798 static int __init init_machine_late(void)
799 {
800 	if (machine_desc->init_late)
801 		machine_desc->init_late();
802 	return 0;
803 }
804 late_initcall(init_machine_late);
805 
806 #ifdef CONFIG_KEXEC
807 static inline unsigned long long get_total_mem(void)
808 {
809 	unsigned long total;
810 
811 	total = max_low_pfn - min_low_pfn;
812 	return total << PAGE_SHIFT;
813 }
814 
815 /**
816  * reserve_crashkernel() - reserves memory are for crash kernel
817  *
818  * This function reserves memory area given in "crashkernel=" kernel command
819  * line parameter. The memory reserved is used by a dump capture kernel when
820  * primary kernel is crashing.
821  */
822 static void __init reserve_crashkernel(void)
823 {
824 	unsigned long long crash_size, crash_base;
825 	unsigned long long total_mem;
826 	int ret;
827 
828 	total_mem = get_total_mem();
829 	ret = parse_crashkernel(boot_command_line, total_mem,
830 				&crash_size, &crash_base);
831 	if (ret)
832 		return;
833 
834 	ret = memblock_reserve(crash_base, crash_size);
835 	if (ret < 0) {
836 		pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
837 			(unsigned long)crash_base);
838 		return;
839 	}
840 
841 	pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
842 		(unsigned long)(crash_size >> 20),
843 		(unsigned long)(crash_base >> 20),
844 		(unsigned long)(total_mem >> 20));
845 
846 	crashk_res.start = crash_base;
847 	crashk_res.end = crash_base + crash_size - 1;
848 	insert_resource(&iomem_resource, &crashk_res);
849 }
850 #else
851 static inline void reserve_crashkernel(void) {}
852 #endif /* CONFIG_KEXEC */
853 
854 static int __init meminfo_cmp(const void *_a, const void *_b)
855 {
856 	const struct membank *a = _a, *b = _b;
857 	long cmp = bank_pfn_start(a) - bank_pfn_start(b);
858 	return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
859 }
860 
861 void __init hyp_mode_check(void)
862 {
863 #ifdef CONFIG_ARM_VIRT_EXT
864 	sync_boot_mode();
865 
866 	if (is_hyp_mode_available()) {
867 		pr_info("CPU: All CPU(s) started in HYP mode.\n");
868 		pr_info("CPU: Virtualization extensions available.\n");
869 	} else if (is_hyp_mode_mismatched()) {
870 		pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
871 			__boot_cpu_mode & MODE_MASK);
872 		pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
873 	} else
874 		pr_info("CPU: All CPU(s) started in SVC mode.\n");
875 #endif
876 }
877 
878 void __init setup_arch(char **cmdline_p)
879 {
880 	const struct machine_desc *mdesc;
881 
882 	setup_processor();
883 	mdesc = setup_machine_fdt(__atags_pointer);
884 	if (!mdesc)
885 		mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
886 	machine_desc = mdesc;
887 	machine_name = mdesc->name;
888 
889 	if (mdesc->reboot_mode != REBOOT_HARD)
890 		reboot_mode = mdesc->reboot_mode;
891 
892 	init_mm.start_code = (unsigned long) _text;
893 	init_mm.end_code   = (unsigned long) _etext;
894 	init_mm.end_data   = (unsigned long) _edata;
895 	init_mm.brk	   = (unsigned long) _end;
896 
897 	/* populate cmd_line too for later use, preserving boot_command_line */
898 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
899 	*cmdline_p = cmd_line;
900 
901 	parse_early_param();
902 
903 	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
904 
905 	early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
906 	setup_dma_zone(mdesc);
907 	sanity_check_meminfo();
908 	arm_memblock_init(&meminfo, mdesc);
909 
910 	paging_init(mdesc);
911 	request_standard_resources(mdesc);
912 
913 	if (mdesc->restart)
914 		arm_pm_restart = mdesc->restart;
915 
916 	unflatten_device_tree();
917 
918 	arm_dt_init_cpu_maps();
919 	psci_init();
920 #ifdef CONFIG_SMP
921 	if (is_smp()) {
922 		if (!mdesc->smp_init || !mdesc->smp_init()) {
923 			if (psci_smp_available())
924 				smp_set_ops(&psci_smp_ops);
925 			else if (mdesc->smp)
926 				smp_set_ops(mdesc->smp);
927 		}
928 		smp_init_cpus();
929 		smp_build_mpidr_hash();
930 	}
931 #endif
932 
933 	if (!is_smp())
934 		hyp_mode_check();
935 
936 	reserve_crashkernel();
937 
938 #ifdef CONFIG_MULTI_IRQ_HANDLER
939 	handle_arch_irq = mdesc->handle_irq;
940 #endif
941 
942 #ifdef CONFIG_VT
943 #if defined(CONFIG_VGA_CONSOLE)
944 	conswitchp = &vga_con;
945 #elif defined(CONFIG_DUMMY_CONSOLE)
946 	conswitchp = &dummy_con;
947 #endif
948 #endif
949 
950 	if (mdesc->init_early)
951 		mdesc->init_early();
952 }
953 
954 
955 static int __init topology_init(void)
956 {
957 	int cpu;
958 
959 	for_each_possible_cpu(cpu) {
960 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
961 		cpuinfo->cpu.hotpluggable = 1;
962 		register_cpu(&cpuinfo->cpu, cpu);
963 	}
964 
965 	return 0;
966 }
967 subsys_initcall(topology_init);
968 
969 #ifdef CONFIG_HAVE_PROC_CPU
970 static int __init proc_cpu_init(void)
971 {
972 	struct proc_dir_entry *res;
973 
974 	res = proc_mkdir("cpu", NULL);
975 	if (!res)
976 		return -ENOMEM;
977 	return 0;
978 }
979 fs_initcall(proc_cpu_init);
980 #endif
981 
982 static const char *hwcap_str[] = {
983 	"swp",
984 	"half",
985 	"thumb",
986 	"26bit",
987 	"fastmult",
988 	"fpa",
989 	"vfp",
990 	"edsp",
991 	"java",
992 	"iwmmxt",
993 	"crunch",
994 	"thumbee",
995 	"neon",
996 	"vfpv3",
997 	"vfpv3d16",
998 	"tls",
999 	"vfpv4",
1000 	"idiva",
1001 	"idivt",
1002 	"vfpd32",
1003 	"lpae",
1004 	"evtstrm",
1005 	NULL
1006 };
1007 
1008 static int c_show(struct seq_file *m, void *v)
1009 {
1010 	int i, j;
1011 	u32 cpuid;
1012 
1013 	for_each_online_cpu(i) {
1014 		/*
1015 		 * glibc reads /proc/cpuinfo to determine the number of
1016 		 * online processors, looking for lines beginning with
1017 		 * "processor".  Give glibc what it expects.
1018 		 */
1019 		seq_printf(m, "processor\t: %d\n", i);
1020 		cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1021 		seq_printf(m, "model name\t: %s rev %d (%s)\n",
1022 			   cpu_name, cpuid & 15, elf_platform);
1023 
1024 		/* dump out the processor features */
1025 		seq_puts(m, "Features\t: ");
1026 
1027 		for (j = 0; hwcap_str[j]; j++)
1028 			if (elf_hwcap & (1 << j))
1029 				seq_printf(m, "%s ", hwcap_str[j]);
1030 
1031 		seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1032 		seq_printf(m, "CPU architecture: %s\n",
1033 			   proc_arch[cpu_architecture()]);
1034 
1035 		if ((cpuid & 0x0008f000) == 0x00000000) {
1036 			/* pre-ARM7 */
1037 			seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1038 		} else {
1039 			if ((cpuid & 0x0008f000) == 0x00007000) {
1040 				/* ARM7 */
1041 				seq_printf(m, "CPU variant\t: 0x%02x\n",
1042 					   (cpuid >> 16) & 127);
1043 			} else {
1044 				/* post-ARM7 */
1045 				seq_printf(m, "CPU variant\t: 0x%x\n",
1046 					   (cpuid >> 20) & 15);
1047 			}
1048 			seq_printf(m, "CPU part\t: 0x%03x\n",
1049 				   (cpuid >> 4) & 0xfff);
1050 		}
1051 		seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1052 	}
1053 
1054 	seq_printf(m, "Hardware\t: %s\n", machine_name);
1055 	seq_printf(m, "Revision\t: %04x\n", system_rev);
1056 	seq_printf(m, "Serial\t\t: %08x%08x\n",
1057 		   system_serial_high, system_serial_low);
1058 
1059 	return 0;
1060 }
1061 
1062 static void *c_start(struct seq_file *m, loff_t *pos)
1063 {
1064 	return *pos < 1 ? (void *)1 : NULL;
1065 }
1066 
1067 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1068 {
1069 	++*pos;
1070 	return NULL;
1071 }
1072 
1073 static void c_stop(struct seq_file *m, void *v)
1074 {
1075 }
1076 
1077 const struct seq_operations cpuinfo_op = {
1078 	.start	= c_start,
1079 	.next	= c_next,
1080 	.stop	= c_stop,
1081 	.show	= c_show
1082 };
1083