xref: /linux/arch/arm/kernel/setup.c (revision 071bf69a0220253a44acb8b2a27f7a262b9a46bf)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/efi.h>
11 #include <linux/export.h>
12 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/ioport.h>
15 #include <linux/delay.h>
16 #include <linux/utsname.h>
17 #include <linux/initrd.h>
18 #include <linux/console.h>
19 #include <linux/bootmem.h>
20 #include <linux/seq_file.h>
21 #include <linux/screen_info.h>
22 #include <linux/of_platform.h>
23 #include <linux/init.h>
24 #include <linux/kexec.h>
25 #include <linux/of_fdt.h>
26 #include <linux/cpu.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31 #include <linux/bug.h>
32 #include <linux/compiler.h>
33 #include <linux/sort.h>
34 #include <linux/psci.h>
35 
36 #include <asm/unified.h>
37 #include <asm/cp15.h>
38 #include <asm/cpu.h>
39 #include <asm/cputype.h>
40 #include <asm/efi.h>
41 #include <asm/elf.h>
42 #include <asm/early_ioremap.h>
43 #include <asm/fixmap.h>
44 #include <asm/procinfo.h>
45 #include <asm/psci.h>
46 #include <asm/sections.h>
47 #include <asm/setup.h>
48 #include <asm/smp_plat.h>
49 #include <asm/mach-types.h>
50 #include <asm/cacheflush.h>
51 #include <asm/cachetype.h>
52 #include <asm/tlbflush.h>
53 #include <asm/xen/hypervisor.h>
54 
55 #include <asm/prom.h>
56 #include <asm/mach/arch.h>
57 #include <asm/mach/irq.h>
58 #include <asm/mach/time.h>
59 #include <asm/system_info.h>
60 #include <asm/system_misc.h>
61 #include <asm/traps.h>
62 #include <asm/unwind.h>
63 #include <asm/memblock.h>
64 #include <asm/virt.h>
65 
66 #include "atags.h"
67 
68 
69 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
70 char fpe_type[8];
71 
72 static int __init fpe_setup(char *line)
73 {
74 	memcpy(fpe_type, line, 8);
75 	return 1;
76 }
77 
78 __setup("fpe=", fpe_setup);
79 #endif
80 
81 extern void init_default_cache_policy(unsigned long);
82 extern void paging_init(const struct machine_desc *desc);
83 extern void early_paging_init(const struct machine_desc *);
84 extern void sanity_check_meminfo(void);
85 extern enum reboot_mode reboot_mode;
86 extern void setup_dma_zone(const struct machine_desc *desc);
87 
88 unsigned int processor_id;
89 EXPORT_SYMBOL(processor_id);
90 unsigned int __machine_arch_type __read_mostly;
91 EXPORT_SYMBOL(__machine_arch_type);
92 unsigned int cacheid __read_mostly;
93 EXPORT_SYMBOL(cacheid);
94 
95 unsigned int __atags_pointer __initdata;
96 
97 unsigned int system_rev;
98 EXPORT_SYMBOL(system_rev);
99 
100 const char *system_serial;
101 EXPORT_SYMBOL(system_serial);
102 
103 unsigned int system_serial_low;
104 EXPORT_SYMBOL(system_serial_low);
105 
106 unsigned int system_serial_high;
107 EXPORT_SYMBOL(system_serial_high);
108 
109 unsigned int elf_hwcap __read_mostly;
110 EXPORT_SYMBOL(elf_hwcap);
111 
112 unsigned int elf_hwcap2 __read_mostly;
113 EXPORT_SYMBOL(elf_hwcap2);
114 
115 
116 #ifdef MULTI_CPU
117 struct processor processor __read_mostly;
118 #endif
119 #ifdef MULTI_TLB
120 struct cpu_tlb_fns cpu_tlb __read_mostly;
121 #endif
122 #ifdef MULTI_USER
123 struct cpu_user_fns cpu_user __read_mostly;
124 #endif
125 #ifdef MULTI_CACHE
126 struct cpu_cache_fns cpu_cache __read_mostly;
127 #endif
128 #ifdef CONFIG_OUTER_CACHE
129 struct outer_cache_fns outer_cache __read_mostly;
130 EXPORT_SYMBOL(outer_cache);
131 #endif
132 
133 /*
134  * Cached cpu_architecture() result for use by assembler code.
135  * C code should use the cpu_architecture() function instead of accessing this
136  * variable directly.
137  */
138 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
139 
140 struct stack {
141 	u32 irq[3];
142 	u32 abt[3];
143 	u32 und[3];
144 	u32 fiq[3];
145 } ____cacheline_aligned;
146 
147 #ifndef CONFIG_CPU_V7M
148 static struct stack stacks[NR_CPUS];
149 #endif
150 
151 char elf_platform[ELF_PLATFORM_SIZE];
152 EXPORT_SYMBOL(elf_platform);
153 
154 static const char *cpu_name;
155 static const char *machine_name;
156 static char __initdata cmd_line[COMMAND_LINE_SIZE];
157 const struct machine_desc *machine_desc __initdata;
158 
159 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
160 #define ENDIANNESS ((char)endian_test.l)
161 
162 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
163 
164 /*
165  * Standard memory resources
166  */
167 static struct resource mem_res[] = {
168 	{
169 		.name = "Video RAM",
170 		.start = 0,
171 		.end = 0,
172 		.flags = IORESOURCE_MEM
173 	},
174 	{
175 		.name = "Kernel code",
176 		.start = 0,
177 		.end = 0,
178 		.flags = IORESOURCE_SYSTEM_RAM
179 	},
180 	{
181 		.name = "Kernel data",
182 		.start = 0,
183 		.end = 0,
184 		.flags = IORESOURCE_SYSTEM_RAM
185 	}
186 };
187 
188 #define video_ram   mem_res[0]
189 #define kernel_code mem_res[1]
190 #define kernel_data mem_res[2]
191 
192 static struct resource io_res[] = {
193 	{
194 		.name = "reserved",
195 		.start = 0x3bc,
196 		.end = 0x3be,
197 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
198 	},
199 	{
200 		.name = "reserved",
201 		.start = 0x378,
202 		.end = 0x37f,
203 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
204 	},
205 	{
206 		.name = "reserved",
207 		.start = 0x278,
208 		.end = 0x27f,
209 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
210 	}
211 };
212 
213 #define lp0 io_res[0]
214 #define lp1 io_res[1]
215 #define lp2 io_res[2]
216 
217 static const char *proc_arch[] = {
218 	"undefined/unknown",
219 	"3",
220 	"4",
221 	"4T",
222 	"5",
223 	"5T",
224 	"5TE",
225 	"5TEJ",
226 	"6TEJ",
227 	"7",
228 	"7M",
229 	"?(12)",
230 	"?(13)",
231 	"?(14)",
232 	"?(15)",
233 	"?(16)",
234 	"?(17)",
235 };
236 
237 #ifdef CONFIG_CPU_V7M
238 static int __get_cpu_architecture(void)
239 {
240 	return CPU_ARCH_ARMv7M;
241 }
242 #else
243 static int __get_cpu_architecture(void)
244 {
245 	int cpu_arch;
246 
247 	if ((read_cpuid_id() & 0x0008f000) == 0) {
248 		cpu_arch = CPU_ARCH_UNKNOWN;
249 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
250 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
251 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
252 		cpu_arch = (read_cpuid_id() >> 16) & 7;
253 		if (cpu_arch)
254 			cpu_arch += CPU_ARCH_ARMv3;
255 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
256 		/* Revised CPUID format. Read the Memory Model Feature
257 		 * Register 0 and check for VMSAv7 or PMSAv7 */
258 		unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
259 		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
260 		    (mmfr0 & 0x000000f0) >= 0x00000030)
261 			cpu_arch = CPU_ARCH_ARMv7;
262 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
263 			 (mmfr0 & 0x000000f0) == 0x00000020)
264 			cpu_arch = CPU_ARCH_ARMv6;
265 		else
266 			cpu_arch = CPU_ARCH_UNKNOWN;
267 	} else
268 		cpu_arch = CPU_ARCH_UNKNOWN;
269 
270 	return cpu_arch;
271 }
272 #endif
273 
274 int __pure cpu_architecture(void)
275 {
276 	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
277 
278 	return __cpu_architecture;
279 }
280 
281 static int cpu_has_aliasing_icache(unsigned int arch)
282 {
283 	int aliasing_icache;
284 	unsigned int id_reg, num_sets, line_size;
285 
286 	/* PIPT caches never alias. */
287 	if (icache_is_pipt())
288 		return 0;
289 
290 	/* arch specifies the register format */
291 	switch (arch) {
292 	case CPU_ARCH_ARMv7:
293 		asm("mcr	p15, 2, %0, c0, c0, 0 @ set CSSELR"
294 		    : /* No output operands */
295 		    : "r" (1));
296 		isb();
297 		asm("mrc	p15, 1, %0, c0, c0, 0 @ read CCSIDR"
298 		    : "=r" (id_reg));
299 		line_size = 4 << ((id_reg & 0x7) + 2);
300 		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
301 		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
302 		break;
303 	case CPU_ARCH_ARMv6:
304 		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
305 		break;
306 	default:
307 		/* I-cache aliases will be handled by D-cache aliasing code */
308 		aliasing_icache = 0;
309 	}
310 
311 	return aliasing_icache;
312 }
313 
314 static void __init cacheid_init(void)
315 {
316 	unsigned int arch = cpu_architecture();
317 
318 	if (arch == CPU_ARCH_ARMv7M) {
319 		cacheid = 0;
320 	} else if (arch >= CPU_ARCH_ARMv6) {
321 		unsigned int cachetype = read_cpuid_cachetype();
322 		if ((cachetype & (7 << 29)) == 4 << 29) {
323 			/* ARMv7 register format */
324 			arch = CPU_ARCH_ARMv7;
325 			cacheid = CACHEID_VIPT_NONALIASING;
326 			switch (cachetype & (3 << 14)) {
327 			case (1 << 14):
328 				cacheid |= CACHEID_ASID_TAGGED;
329 				break;
330 			case (3 << 14):
331 				cacheid |= CACHEID_PIPT;
332 				break;
333 			}
334 		} else {
335 			arch = CPU_ARCH_ARMv6;
336 			if (cachetype & (1 << 23))
337 				cacheid = CACHEID_VIPT_ALIASING;
338 			else
339 				cacheid = CACHEID_VIPT_NONALIASING;
340 		}
341 		if (cpu_has_aliasing_icache(arch))
342 			cacheid |= CACHEID_VIPT_I_ALIASING;
343 	} else {
344 		cacheid = CACHEID_VIVT;
345 	}
346 
347 	pr_info("CPU: %s data cache, %s instruction cache\n",
348 		cache_is_vivt() ? "VIVT" :
349 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
350 		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
351 		cache_is_vivt() ? "VIVT" :
352 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
353 		icache_is_vipt_aliasing() ? "VIPT aliasing" :
354 		icache_is_pipt() ? "PIPT" :
355 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
356 }
357 
358 /*
359  * These functions re-use the assembly code in head.S, which
360  * already provide the required functionality.
361  */
362 extern struct proc_info_list *lookup_processor_type(unsigned int);
363 
364 void __init early_print(const char *str, ...)
365 {
366 	extern void printascii(const char *);
367 	char buf[256];
368 	va_list ap;
369 
370 	va_start(ap, str);
371 	vsnprintf(buf, sizeof(buf), str, ap);
372 	va_end(ap);
373 
374 #ifdef CONFIG_DEBUG_LL
375 	printascii(buf);
376 #endif
377 	printk("%s", buf);
378 }
379 
380 #ifdef CONFIG_ARM_PATCH_IDIV
381 
382 static inline u32 __attribute_const__ sdiv_instruction(void)
383 {
384 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
385 		/* "sdiv r0, r0, r1" */
386 		u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
387 		return __opcode_to_mem_thumb32(insn);
388 	}
389 
390 	/* "sdiv r0, r0, r1" */
391 	return __opcode_to_mem_arm(0xe710f110);
392 }
393 
394 static inline u32 __attribute_const__ udiv_instruction(void)
395 {
396 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
397 		/* "udiv r0, r0, r1" */
398 		u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
399 		return __opcode_to_mem_thumb32(insn);
400 	}
401 
402 	/* "udiv r0, r0, r1" */
403 	return __opcode_to_mem_arm(0xe730f110);
404 }
405 
406 static inline u32 __attribute_const__ bx_lr_instruction(void)
407 {
408 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
409 		/* "bx lr; nop" */
410 		u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
411 		return __opcode_to_mem_thumb32(insn);
412 	}
413 
414 	/* "bx lr" */
415 	return __opcode_to_mem_arm(0xe12fff1e);
416 }
417 
418 static void __init patch_aeabi_idiv(void)
419 {
420 	extern void __aeabi_uidiv(void);
421 	extern void __aeabi_idiv(void);
422 	uintptr_t fn_addr;
423 	unsigned int mask;
424 
425 	mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
426 	if (!(elf_hwcap & mask))
427 		return;
428 
429 	pr_info("CPU: div instructions available: patching division code\n");
430 
431 	fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
432 	asm ("" : "+g" (fn_addr));
433 	((u32 *)fn_addr)[0] = udiv_instruction();
434 	((u32 *)fn_addr)[1] = bx_lr_instruction();
435 	flush_icache_range(fn_addr, fn_addr + 8);
436 
437 	fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
438 	asm ("" : "+g" (fn_addr));
439 	((u32 *)fn_addr)[0] = sdiv_instruction();
440 	((u32 *)fn_addr)[1] = bx_lr_instruction();
441 	flush_icache_range(fn_addr, fn_addr + 8);
442 }
443 
444 #else
445 static inline void patch_aeabi_idiv(void) { }
446 #endif
447 
448 static void __init cpuid_init_hwcaps(void)
449 {
450 	int block;
451 	u32 isar5;
452 
453 	if (cpu_architecture() < CPU_ARCH_ARMv7)
454 		return;
455 
456 	block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
457 	if (block >= 2)
458 		elf_hwcap |= HWCAP_IDIVA;
459 	if (block >= 1)
460 		elf_hwcap |= HWCAP_IDIVT;
461 
462 	/* LPAE implies atomic ldrd/strd instructions */
463 	block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
464 	if (block >= 5)
465 		elf_hwcap |= HWCAP_LPAE;
466 
467 	/* check for supported v8 Crypto instructions */
468 	isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
469 
470 	block = cpuid_feature_extract_field(isar5, 4);
471 	if (block >= 2)
472 		elf_hwcap2 |= HWCAP2_PMULL;
473 	if (block >= 1)
474 		elf_hwcap2 |= HWCAP2_AES;
475 
476 	block = cpuid_feature_extract_field(isar5, 8);
477 	if (block >= 1)
478 		elf_hwcap2 |= HWCAP2_SHA1;
479 
480 	block = cpuid_feature_extract_field(isar5, 12);
481 	if (block >= 1)
482 		elf_hwcap2 |= HWCAP2_SHA2;
483 
484 	block = cpuid_feature_extract_field(isar5, 16);
485 	if (block >= 1)
486 		elf_hwcap2 |= HWCAP2_CRC32;
487 }
488 
489 static void __init elf_hwcap_fixup(void)
490 {
491 	unsigned id = read_cpuid_id();
492 
493 	/*
494 	 * HWCAP_TLS is available only on 1136 r1p0 and later,
495 	 * see also kuser_get_tls_init.
496 	 */
497 	if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
498 	    ((id >> 20) & 3) == 0) {
499 		elf_hwcap &= ~HWCAP_TLS;
500 		return;
501 	}
502 
503 	/* Verify if CPUID scheme is implemented */
504 	if ((id & 0x000f0000) != 0x000f0000)
505 		return;
506 
507 	/*
508 	 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
509 	 * avoid advertising SWP; it may not be atomic with
510 	 * multiprocessing cores.
511 	 */
512 	if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
513 	    (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
514 	     cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
515 		elf_hwcap &= ~HWCAP_SWP;
516 }
517 
518 /*
519  * cpu_init - initialise one CPU.
520  *
521  * cpu_init sets up the per-CPU stacks.
522  */
523 void notrace cpu_init(void)
524 {
525 #ifndef CONFIG_CPU_V7M
526 	unsigned int cpu = smp_processor_id();
527 	struct stack *stk = &stacks[cpu];
528 
529 	if (cpu >= NR_CPUS) {
530 		pr_crit("CPU%u: bad primary CPU number\n", cpu);
531 		BUG();
532 	}
533 
534 	/*
535 	 * This only works on resume and secondary cores. For booting on the
536 	 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
537 	 */
538 	set_my_cpu_offset(per_cpu_offset(cpu));
539 
540 	cpu_proc_init();
541 
542 	/*
543 	 * Define the placement constraint for the inline asm directive below.
544 	 * In Thumb-2, msr with an immediate value is not allowed.
545 	 */
546 #ifdef CONFIG_THUMB2_KERNEL
547 #define PLC	"r"
548 #else
549 #define PLC	"I"
550 #endif
551 
552 	/*
553 	 * setup stacks for re-entrant exception handlers
554 	 */
555 	__asm__ (
556 	"msr	cpsr_c, %1\n\t"
557 	"add	r14, %0, %2\n\t"
558 	"mov	sp, r14\n\t"
559 	"msr	cpsr_c, %3\n\t"
560 	"add	r14, %0, %4\n\t"
561 	"mov	sp, r14\n\t"
562 	"msr	cpsr_c, %5\n\t"
563 	"add	r14, %0, %6\n\t"
564 	"mov	sp, r14\n\t"
565 	"msr	cpsr_c, %7\n\t"
566 	"add	r14, %0, %8\n\t"
567 	"mov	sp, r14\n\t"
568 	"msr	cpsr_c, %9"
569 	    :
570 	    : "r" (stk),
571 	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
572 	      "I" (offsetof(struct stack, irq[0])),
573 	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
574 	      "I" (offsetof(struct stack, abt[0])),
575 	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
576 	      "I" (offsetof(struct stack, und[0])),
577 	      PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
578 	      "I" (offsetof(struct stack, fiq[0])),
579 	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
580 	    : "r14");
581 #endif
582 }
583 
584 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
585 
586 void __init smp_setup_processor_id(void)
587 {
588 	int i;
589 	u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
590 	u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
591 
592 	cpu_logical_map(0) = cpu;
593 	for (i = 1; i < nr_cpu_ids; ++i)
594 		cpu_logical_map(i) = i == cpu ? 0 : i;
595 
596 	/*
597 	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
598 	 * using percpu variable early, for example, lockdep will
599 	 * access percpu variable inside lock_release
600 	 */
601 	set_my_cpu_offset(0);
602 
603 	pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
604 }
605 
606 struct mpidr_hash mpidr_hash;
607 #ifdef CONFIG_SMP
608 /**
609  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
610  *			  level in order to build a linear index from an
611  *			  MPIDR value. Resulting algorithm is a collision
612  *			  free hash carried out through shifting and ORing
613  */
614 static void __init smp_build_mpidr_hash(void)
615 {
616 	u32 i, affinity;
617 	u32 fs[3], bits[3], ls, mask = 0;
618 	/*
619 	 * Pre-scan the list of MPIDRS and filter out bits that do
620 	 * not contribute to affinity levels, ie they never toggle.
621 	 */
622 	for_each_possible_cpu(i)
623 		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
624 	pr_debug("mask of set bits 0x%x\n", mask);
625 	/*
626 	 * Find and stash the last and first bit set at all affinity levels to
627 	 * check how many bits are required to represent them.
628 	 */
629 	for (i = 0; i < 3; i++) {
630 		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
631 		/*
632 		 * Find the MSB bit and LSB bits position
633 		 * to determine how many bits are required
634 		 * to express the affinity level.
635 		 */
636 		ls = fls(affinity);
637 		fs[i] = affinity ? ffs(affinity) - 1 : 0;
638 		bits[i] = ls - fs[i];
639 	}
640 	/*
641 	 * An index can be created from the MPIDR by isolating the
642 	 * significant bits at each affinity level and by shifting
643 	 * them in order to compress the 24 bits values space to a
644 	 * compressed set of values. This is equivalent to hashing
645 	 * the MPIDR through shifting and ORing. It is a collision free
646 	 * hash though not minimal since some levels might contain a number
647 	 * of CPUs that is not an exact power of 2 and their bit
648 	 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
649 	 */
650 	mpidr_hash.shift_aff[0] = fs[0];
651 	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
652 	mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
653 						(bits[1] + bits[0]);
654 	mpidr_hash.mask = mask;
655 	mpidr_hash.bits = bits[2] + bits[1] + bits[0];
656 	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
657 				mpidr_hash.shift_aff[0],
658 				mpidr_hash.shift_aff[1],
659 				mpidr_hash.shift_aff[2],
660 				mpidr_hash.mask,
661 				mpidr_hash.bits);
662 	/*
663 	 * 4x is an arbitrary value used to warn on a hash table much bigger
664 	 * than expected on most systems.
665 	 */
666 	if (mpidr_hash_size() > 4 * num_possible_cpus())
667 		pr_warn("Large number of MPIDR hash buckets detected\n");
668 	sync_cache_w(&mpidr_hash);
669 }
670 #endif
671 
672 static void __init setup_processor(void)
673 {
674 	struct proc_info_list *list;
675 
676 	/*
677 	 * locate processor in the list of supported processor
678 	 * types.  The linker builds this table for us from the
679 	 * entries in arch/arm/mm/proc-*.S
680 	 */
681 	list = lookup_processor_type(read_cpuid_id());
682 	if (!list) {
683 		pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
684 		       read_cpuid_id());
685 		while (1);
686 	}
687 
688 	cpu_name = list->cpu_name;
689 	__cpu_architecture = __get_cpu_architecture();
690 
691 #ifdef MULTI_CPU
692 	processor = *list->proc;
693 #endif
694 #ifdef MULTI_TLB
695 	cpu_tlb = *list->tlb;
696 #endif
697 #ifdef MULTI_USER
698 	cpu_user = *list->user;
699 #endif
700 #ifdef MULTI_CACHE
701 	cpu_cache = *list->cache;
702 #endif
703 
704 	pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
705 		cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
706 		proc_arch[cpu_architecture()], get_cr());
707 
708 	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
709 		 list->arch_name, ENDIANNESS);
710 	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
711 		 list->elf_name, ENDIANNESS);
712 	elf_hwcap = list->elf_hwcap;
713 
714 	cpuid_init_hwcaps();
715 	patch_aeabi_idiv();
716 
717 #ifndef CONFIG_ARM_THUMB
718 	elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
719 #endif
720 #ifdef CONFIG_MMU
721 	init_default_cache_policy(list->__cpu_mm_mmu_flags);
722 #endif
723 	erratum_a15_798181_init();
724 
725 	elf_hwcap_fixup();
726 
727 	cacheid_init();
728 	cpu_init();
729 }
730 
731 void __init dump_machine_table(void)
732 {
733 	const struct machine_desc *p;
734 
735 	early_print("Available machine support:\n\nID (hex)\tNAME\n");
736 	for_each_machine_desc(p)
737 		early_print("%08x\t%s\n", p->nr, p->name);
738 
739 	early_print("\nPlease check your kernel config and/or bootloader.\n");
740 
741 	while (true)
742 		/* can't use cpu_relax() here as it may require MMU setup */;
743 }
744 
745 int __init arm_add_memory(u64 start, u64 size)
746 {
747 	u64 aligned_start;
748 
749 	/*
750 	 * Ensure that start/size are aligned to a page boundary.
751 	 * Size is rounded down, start is rounded up.
752 	 */
753 	aligned_start = PAGE_ALIGN(start);
754 	if (aligned_start > start + size)
755 		size = 0;
756 	else
757 		size -= aligned_start - start;
758 
759 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
760 	if (aligned_start > ULONG_MAX) {
761 		pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
762 			(long long)start);
763 		return -EINVAL;
764 	}
765 
766 	if (aligned_start + size > ULONG_MAX) {
767 		pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
768 			(long long)start);
769 		/*
770 		 * To ensure bank->start + bank->size is representable in
771 		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
772 		 * This means we lose a page after masking.
773 		 */
774 		size = ULONG_MAX - aligned_start;
775 	}
776 #endif
777 
778 	if (aligned_start < PHYS_OFFSET) {
779 		if (aligned_start + size <= PHYS_OFFSET) {
780 			pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
781 				aligned_start, aligned_start + size);
782 			return -EINVAL;
783 		}
784 
785 		pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
786 			aligned_start, (u64)PHYS_OFFSET);
787 
788 		size -= PHYS_OFFSET - aligned_start;
789 		aligned_start = PHYS_OFFSET;
790 	}
791 
792 	start = aligned_start;
793 	size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
794 
795 	/*
796 	 * Check whether this memory region has non-zero size or
797 	 * invalid node number.
798 	 */
799 	if (size == 0)
800 		return -EINVAL;
801 
802 	memblock_add(start, size);
803 	return 0;
804 }
805 
806 /*
807  * Pick out the memory size.  We look for mem=size@start,
808  * where start and size are "size[KkMm]"
809  */
810 
811 static int __init early_mem(char *p)
812 {
813 	static int usermem __initdata = 0;
814 	u64 size;
815 	u64 start;
816 	char *endp;
817 
818 	/*
819 	 * If the user specifies memory size, we
820 	 * blow away any automatically generated
821 	 * size.
822 	 */
823 	if (usermem == 0) {
824 		usermem = 1;
825 		memblock_remove(memblock_start_of_DRAM(),
826 			memblock_end_of_DRAM() - memblock_start_of_DRAM());
827 	}
828 
829 	start = PHYS_OFFSET;
830 	size  = memparse(p, &endp);
831 	if (*endp == '@')
832 		start = memparse(endp + 1, NULL);
833 
834 	arm_add_memory(start, size);
835 
836 	return 0;
837 }
838 early_param("mem", early_mem);
839 
840 static void __init request_standard_resources(const struct machine_desc *mdesc)
841 {
842 	struct memblock_region *region;
843 	struct resource *res;
844 
845 	kernel_code.start   = virt_to_phys(_text);
846 	kernel_code.end     = virt_to_phys(__init_begin - 1);
847 	kernel_data.start   = virt_to_phys(_sdata);
848 	kernel_data.end     = virt_to_phys(_end - 1);
849 
850 	for_each_memblock(memory, region) {
851 		phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
852 		phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
853 		unsigned long boot_alias_start;
854 
855 		/*
856 		 * Some systems have a special memory alias which is only
857 		 * used for booting.  We need to advertise this region to
858 		 * kexec-tools so they know where bootable RAM is located.
859 		 */
860 		boot_alias_start = phys_to_idmap(start);
861 		if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
862 			res = memblock_virt_alloc(sizeof(*res), 0);
863 			res->name = "System RAM (boot alias)";
864 			res->start = boot_alias_start;
865 			res->end = phys_to_idmap(end);
866 			res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
867 			request_resource(&iomem_resource, res);
868 		}
869 
870 		res = memblock_virt_alloc(sizeof(*res), 0);
871 		res->name  = "System RAM";
872 		res->start = start;
873 		res->end = end;
874 		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
875 
876 		request_resource(&iomem_resource, res);
877 
878 		if (kernel_code.start >= res->start &&
879 		    kernel_code.end <= res->end)
880 			request_resource(res, &kernel_code);
881 		if (kernel_data.start >= res->start &&
882 		    kernel_data.end <= res->end)
883 			request_resource(res, &kernel_data);
884 	}
885 
886 	if (mdesc->video_start) {
887 		video_ram.start = mdesc->video_start;
888 		video_ram.end   = mdesc->video_end;
889 		request_resource(&iomem_resource, &video_ram);
890 	}
891 
892 	/*
893 	 * Some machines don't have the possibility of ever
894 	 * possessing lp0, lp1 or lp2
895 	 */
896 	if (mdesc->reserve_lp0)
897 		request_resource(&ioport_resource, &lp0);
898 	if (mdesc->reserve_lp1)
899 		request_resource(&ioport_resource, &lp1);
900 	if (mdesc->reserve_lp2)
901 		request_resource(&ioport_resource, &lp2);
902 }
903 
904 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
905     defined(CONFIG_EFI)
906 struct screen_info screen_info = {
907  .orig_video_lines	= 30,
908  .orig_video_cols	= 80,
909  .orig_video_mode	= 0,
910  .orig_video_ega_bx	= 0,
911  .orig_video_isVGA	= 1,
912  .orig_video_points	= 8
913 };
914 #endif
915 
916 static int __init customize_machine(void)
917 {
918 	/*
919 	 * customizes platform devices, or adds new ones
920 	 * On DT based machines, we fall back to populating the
921 	 * machine from the device tree, if no callback is provided,
922 	 * otherwise we would always need an init_machine callback.
923 	 */
924 	if (machine_desc->init_machine)
925 		machine_desc->init_machine();
926 
927 	return 0;
928 }
929 arch_initcall(customize_machine);
930 
931 static int __init init_machine_late(void)
932 {
933 	struct device_node *root;
934 	int ret;
935 
936 	if (machine_desc->init_late)
937 		machine_desc->init_late();
938 
939 	root = of_find_node_by_path("/");
940 	if (root) {
941 		ret = of_property_read_string(root, "serial-number",
942 					      &system_serial);
943 		if (ret)
944 			system_serial = NULL;
945 	}
946 
947 	if (!system_serial)
948 		system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
949 					  system_serial_high,
950 					  system_serial_low);
951 
952 	return 0;
953 }
954 late_initcall(init_machine_late);
955 
956 #ifdef CONFIG_KEXEC
957 /*
958  * The crash region must be aligned to 128MB to avoid
959  * zImage relocating below the reserved region.
960  */
961 #define CRASH_ALIGN	(128 << 20)
962 
963 static inline unsigned long long get_total_mem(void)
964 {
965 	unsigned long total;
966 
967 	total = max_low_pfn - min_low_pfn;
968 	return total << PAGE_SHIFT;
969 }
970 
971 /**
972  * reserve_crashkernel() - reserves memory are for crash kernel
973  *
974  * This function reserves memory area given in "crashkernel=" kernel command
975  * line parameter. The memory reserved is used by a dump capture kernel when
976  * primary kernel is crashing.
977  */
978 static void __init reserve_crashkernel(void)
979 {
980 	unsigned long long crash_size, crash_base;
981 	unsigned long long total_mem;
982 	int ret;
983 
984 	total_mem = get_total_mem();
985 	ret = parse_crashkernel(boot_command_line, total_mem,
986 				&crash_size, &crash_base);
987 	if (ret)
988 		return;
989 
990 	if (crash_base <= 0) {
991 		unsigned long long crash_max = idmap_to_phys((u32)~0);
992 		crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
993 						    crash_size, CRASH_ALIGN);
994 		if (!crash_base) {
995 			pr_err("crashkernel reservation failed - No suitable area found.\n");
996 			return;
997 		}
998 	} else {
999 		unsigned long long start;
1000 
1001 		start = memblock_find_in_range(crash_base,
1002 					       crash_base + crash_size,
1003 					       crash_size, SECTION_SIZE);
1004 		if (start != crash_base) {
1005 			pr_err("crashkernel reservation failed - memory is in use.\n");
1006 			return;
1007 		}
1008 	}
1009 
1010 	ret = memblock_reserve(crash_base, crash_size);
1011 	if (ret < 0) {
1012 		pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
1013 			(unsigned long)crash_base);
1014 		return;
1015 	}
1016 
1017 	pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1018 		(unsigned long)(crash_size >> 20),
1019 		(unsigned long)(crash_base >> 20),
1020 		(unsigned long)(total_mem >> 20));
1021 
1022 	/* The crashk resource must always be located in normal mem */
1023 	crashk_res.start = crash_base;
1024 	crashk_res.end = crash_base + crash_size - 1;
1025 	insert_resource(&iomem_resource, &crashk_res);
1026 
1027 	if (arm_has_idmap_alias()) {
1028 		/*
1029 		 * If we have a special RAM alias for use at boot, we
1030 		 * need to advertise to kexec tools where the alias is.
1031 		 */
1032 		static struct resource crashk_boot_res = {
1033 			.name = "Crash kernel (boot alias)",
1034 			.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1035 		};
1036 
1037 		crashk_boot_res.start = phys_to_idmap(crash_base);
1038 		crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1039 		insert_resource(&iomem_resource, &crashk_boot_res);
1040 	}
1041 }
1042 #else
1043 static inline void reserve_crashkernel(void) {}
1044 #endif /* CONFIG_KEXEC */
1045 
1046 void __init hyp_mode_check(void)
1047 {
1048 #ifdef CONFIG_ARM_VIRT_EXT
1049 	sync_boot_mode();
1050 
1051 	if (is_hyp_mode_available()) {
1052 		pr_info("CPU: All CPU(s) started in HYP mode.\n");
1053 		pr_info("CPU: Virtualization extensions available.\n");
1054 	} else if (is_hyp_mode_mismatched()) {
1055 		pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1056 			__boot_cpu_mode & MODE_MASK);
1057 		pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1058 	} else
1059 		pr_info("CPU: All CPU(s) started in SVC mode.\n");
1060 #endif
1061 }
1062 
1063 void __init setup_arch(char **cmdline_p)
1064 {
1065 	const struct machine_desc *mdesc;
1066 
1067 	setup_processor();
1068 	mdesc = setup_machine_fdt(__atags_pointer);
1069 	if (!mdesc)
1070 		mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
1071 	machine_desc = mdesc;
1072 	machine_name = mdesc->name;
1073 	dump_stack_set_arch_desc("%s", mdesc->name);
1074 
1075 	if (mdesc->reboot_mode != REBOOT_HARD)
1076 		reboot_mode = mdesc->reboot_mode;
1077 
1078 	init_mm.start_code = (unsigned long) _text;
1079 	init_mm.end_code   = (unsigned long) _etext;
1080 	init_mm.end_data   = (unsigned long) _edata;
1081 	init_mm.brk	   = (unsigned long) _end;
1082 
1083 	/* populate cmd_line too for later use, preserving boot_command_line */
1084 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1085 	*cmdline_p = cmd_line;
1086 
1087 	early_fixmap_init();
1088 	early_ioremap_init();
1089 
1090 	parse_early_param();
1091 
1092 #ifdef CONFIG_MMU
1093 	early_paging_init(mdesc);
1094 #endif
1095 	setup_dma_zone(mdesc);
1096 	xen_early_init();
1097 	efi_init();
1098 	sanity_check_meminfo();
1099 	arm_memblock_init(mdesc);
1100 
1101 	early_ioremap_reset();
1102 
1103 	paging_init(mdesc);
1104 	request_standard_resources(mdesc);
1105 
1106 	if (mdesc->restart)
1107 		arm_pm_restart = mdesc->restart;
1108 
1109 	unflatten_device_tree();
1110 
1111 	arm_dt_init_cpu_maps();
1112 	psci_dt_init();
1113 #ifdef CONFIG_SMP
1114 	if (is_smp()) {
1115 		if (!mdesc->smp_init || !mdesc->smp_init()) {
1116 			if (psci_smp_available())
1117 				smp_set_ops(&psci_smp_ops);
1118 			else if (mdesc->smp)
1119 				smp_set_ops(mdesc->smp);
1120 		}
1121 		smp_init_cpus();
1122 		smp_build_mpidr_hash();
1123 	}
1124 #endif
1125 
1126 	if (!is_smp())
1127 		hyp_mode_check();
1128 
1129 	reserve_crashkernel();
1130 
1131 #ifdef CONFIG_MULTI_IRQ_HANDLER
1132 	handle_arch_irq = mdesc->handle_irq;
1133 #endif
1134 
1135 #ifdef CONFIG_VT
1136 #if defined(CONFIG_VGA_CONSOLE)
1137 	conswitchp = &vga_con;
1138 #elif defined(CONFIG_DUMMY_CONSOLE)
1139 	conswitchp = &dummy_con;
1140 #endif
1141 #endif
1142 
1143 	if (mdesc->init_early)
1144 		mdesc->init_early();
1145 }
1146 
1147 
1148 static int __init topology_init(void)
1149 {
1150 	int cpu;
1151 
1152 	for_each_possible_cpu(cpu) {
1153 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1154 		cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1155 		register_cpu(&cpuinfo->cpu, cpu);
1156 	}
1157 
1158 	return 0;
1159 }
1160 subsys_initcall(topology_init);
1161 
1162 #ifdef CONFIG_HAVE_PROC_CPU
1163 static int __init proc_cpu_init(void)
1164 {
1165 	struct proc_dir_entry *res;
1166 
1167 	res = proc_mkdir("cpu", NULL);
1168 	if (!res)
1169 		return -ENOMEM;
1170 	return 0;
1171 }
1172 fs_initcall(proc_cpu_init);
1173 #endif
1174 
1175 static const char *hwcap_str[] = {
1176 	"swp",
1177 	"half",
1178 	"thumb",
1179 	"26bit",
1180 	"fastmult",
1181 	"fpa",
1182 	"vfp",
1183 	"edsp",
1184 	"java",
1185 	"iwmmxt",
1186 	"crunch",
1187 	"thumbee",
1188 	"neon",
1189 	"vfpv3",
1190 	"vfpv3d16",
1191 	"tls",
1192 	"vfpv4",
1193 	"idiva",
1194 	"idivt",
1195 	"vfpd32",
1196 	"lpae",
1197 	"evtstrm",
1198 	NULL
1199 };
1200 
1201 static const char *hwcap2_str[] = {
1202 	"aes",
1203 	"pmull",
1204 	"sha1",
1205 	"sha2",
1206 	"crc32",
1207 	NULL
1208 };
1209 
1210 static int c_show(struct seq_file *m, void *v)
1211 {
1212 	int i, j;
1213 	u32 cpuid;
1214 
1215 	for_each_online_cpu(i) {
1216 		/*
1217 		 * glibc reads /proc/cpuinfo to determine the number of
1218 		 * online processors, looking for lines beginning with
1219 		 * "processor".  Give glibc what it expects.
1220 		 */
1221 		seq_printf(m, "processor\t: %d\n", i);
1222 		cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1223 		seq_printf(m, "model name\t: %s rev %d (%s)\n",
1224 			   cpu_name, cpuid & 15, elf_platform);
1225 
1226 #if defined(CONFIG_SMP)
1227 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1228 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1229 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1230 #else
1231 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1232 			   loops_per_jiffy / (500000/HZ),
1233 			   (loops_per_jiffy / (5000/HZ)) % 100);
1234 #endif
1235 		/* dump out the processor features */
1236 		seq_puts(m, "Features\t: ");
1237 
1238 		for (j = 0; hwcap_str[j]; j++)
1239 			if (elf_hwcap & (1 << j))
1240 				seq_printf(m, "%s ", hwcap_str[j]);
1241 
1242 		for (j = 0; hwcap2_str[j]; j++)
1243 			if (elf_hwcap2 & (1 << j))
1244 				seq_printf(m, "%s ", hwcap2_str[j]);
1245 
1246 		seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1247 		seq_printf(m, "CPU architecture: %s\n",
1248 			   proc_arch[cpu_architecture()]);
1249 
1250 		if ((cpuid & 0x0008f000) == 0x00000000) {
1251 			/* pre-ARM7 */
1252 			seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1253 		} else {
1254 			if ((cpuid & 0x0008f000) == 0x00007000) {
1255 				/* ARM7 */
1256 				seq_printf(m, "CPU variant\t: 0x%02x\n",
1257 					   (cpuid >> 16) & 127);
1258 			} else {
1259 				/* post-ARM7 */
1260 				seq_printf(m, "CPU variant\t: 0x%x\n",
1261 					   (cpuid >> 20) & 15);
1262 			}
1263 			seq_printf(m, "CPU part\t: 0x%03x\n",
1264 				   (cpuid >> 4) & 0xfff);
1265 		}
1266 		seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1267 	}
1268 
1269 	seq_printf(m, "Hardware\t: %s\n", machine_name);
1270 	seq_printf(m, "Revision\t: %04x\n", system_rev);
1271 	seq_printf(m, "Serial\t\t: %s\n", system_serial);
1272 
1273 	return 0;
1274 }
1275 
1276 static void *c_start(struct seq_file *m, loff_t *pos)
1277 {
1278 	return *pos < 1 ? (void *)1 : NULL;
1279 }
1280 
1281 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1282 {
1283 	++*pos;
1284 	return NULL;
1285 }
1286 
1287 static void c_stop(struct seq_file *m, void *v)
1288 {
1289 }
1290 
1291 const struct seq_operations cpuinfo_op = {
1292 	.start	= c_start,
1293 	.next	= c_next,
1294 	.stop	= c_stop,
1295 	.show	= c_show
1296 };
1297