xref: /linux/arch/arm/kernel/setup.c (revision be120397e7709d9d5ed88317a385ce864a2603bc)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_iommu.h>
22 #include <linux/of_platform.h>
23 #include <linux/init.h>
24 #include <linux/kexec.h>
25 #include <linux/of_fdt.h>
26 #include <linux/cpu.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31 #include <linux/bug.h>
32 #include <linux/compiler.h>
33 #include <linux/sort.h>
34 #include <linux/psci.h>
35 
36 #include <asm/unified.h>
37 #include <asm/cp15.h>
38 #include <asm/cpu.h>
39 #include <asm/cputype.h>
40 #include <asm/elf.h>
41 #include <asm/procinfo.h>
42 #include <asm/psci.h>
43 #include <asm/sections.h>
44 #include <asm/setup.h>
45 #include <asm/smp_plat.h>
46 #include <asm/mach-types.h>
47 #include <asm/cacheflush.h>
48 #include <asm/cachetype.h>
49 #include <asm/tlbflush.h>
50 #include <asm/xen/hypervisor.h>
51 
52 #include <asm/prom.h>
53 #include <asm/mach/arch.h>
54 #include <asm/mach/irq.h>
55 #include <asm/mach/time.h>
56 #include <asm/system_info.h>
57 #include <asm/system_misc.h>
58 #include <asm/traps.h>
59 #include <asm/unwind.h>
60 #include <asm/memblock.h>
61 #include <asm/virt.h>
62 
63 #include "atags.h"
64 
65 
66 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
67 char fpe_type[8];
68 
69 static int __init fpe_setup(char *line)
70 {
71 	memcpy(fpe_type, line, 8);
72 	return 1;
73 }
74 
75 __setup("fpe=", fpe_setup);
76 #endif
77 
78 extern void init_default_cache_policy(unsigned long);
79 extern void paging_init(const struct machine_desc *desc);
80 extern void early_paging_init(const struct machine_desc *);
81 extern void sanity_check_meminfo(void);
82 extern enum reboot_mode reboot_mode;
83 extern void setup_dma_zone(const struct machine_desc *desc);
84 
85 unsigned int processor_id;
86 EXPORT_SYMBOL(processor_id);
87 unsigned int __machine_arch_type __read_mostly;
88 EXPORT_SYMBOL(__machine_arch_type);
89 unsigned int cacheid __read_mostly;
90 EXPORT_SYMBOL(cacheid);
91 
92 unsigned int __atags_pointer __initdata;
93 
94 unsigned int system_rev;
95 EXPORT_SYMBOL(system_rev);
96 
97 const char *system_serial;
98 EXPORT_SYMBOL(system_serial);
99 
100 unsigned int system_serial_low;
101 EXPORT_SYMBOL(system_serial_low);
102 
103 unsigned int system_serial_high;
104 EXPORT_SYMBOL(system_serial_high);
105 
106 unsigned int elf_hwcap __read_mostly;
107 EXPORT_SYMBOL(elf_hwcap);
108 
109 unsigned int elf_hwcap2 __read_mostly;
110 EXPORT_SYMBOL(elf_hwcap2);
111 
112 
113 #ifdef MULTI_CPU
114 struct processor processor __read_mostly;
115 #endif
116 #ifdef MULTI_TLB
117 struct cpu_tlb_fns cpu_tlb __read_mostly;
118 #endif
119 #ifdef MULTI_USER
120 struct cpu_user_fns cpu_user __read_mostly;
121 #endif
122 #ifdef MULTI_CACHE
123 struct cpu_cache_fns cpu_cache __read_mostly;
124 #endif
125 #ifdef CONFIG_OUTER_CACHE
126 struct outer_cache_fns outer_cache __read_mostly;
127 EXPORT_SYMBOL(outer_cache);
128 #endif
129 
130 /*
131  * Cached cpu_architecture() result for use by assembler code.
132  * C code should use the cpu_architecture() function instead of accessing this
133  * variable directly.
134  */
135 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
136 
137 struct stack {
138 	u32 irq[3];
139 	u32 abt[3];
140 	u32 und[3];
141 	u32 fiq[3];
142 } ____cacheline_aligned;
143 
144 #ifndef CONFIG_CPU_V7M
145 static struct stack stacks[NR_CPUS];
146 #endif
147 
148 char elf_platform[ELF_PLATFORM_SIZE];
149 EXPORT_SYMBOL(elf_platform);
150 
151 static const char *cpu_name;
152 static const char *machine_name;
153 static char __initdata cmd_line[COMMAND_LINE_SIZE];
154 const struct machine_desc *machine_desc __initdata;
155 
156 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
157 #define ENDIANNESS ((char)endian_test.l)
158 
159 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
160 
161 /*
162  * Standard memory resources
163  */
164 static struct resource mem_res[] = {
165 	{
166 		.name = "Video RAM",
167 		.start = 0,
168 		.end = 0,
169 		.flags = IORESOURCE_MEM
170 	},
171 	{
172 		.name = "Kernel code",
173 		.start = 0,
174 		.end = 0,
175 		.flags = IORESOURCE_MEM
176 	},
177 	{
178 		.name = "Kernel data",
179 		.start = 0,
180 		.end = 0,
181 		.flags = IORESOURCE_MEM
182 	}
183 };
184 
185 #define video_ram   mem_res[0]
186 #define kernel_code mem_res[1]
187 #define kernel_data mem_res[2]
188 
189 static struct resource io_res[] = {
190 	{
191 		.name = "reserved",
192 		.start = 0x3bc,
193 		.end = 0x3be,
194 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
195 	},
196 	{
197 		.name = "reserved",
198 		.start = 0x378,
199 		.end = 0x37f,
200 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
201 	},
202 	{
203 		.name = "reserved",
204 		.start = 0x278,
205 		.end = 0x27f,
206 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
207 	}
208 };
209 
210 #define lp0 io_res[0]
211 #define lp1 io_res[1]
212 #define lp2 io_res[2]
213 
214 static const char *proc_arch[] = {
215 	"undefined/unknown",
216 	"3",
217 	"4",
218 	"4T",
219 	"5",
220 	"5T",
221 	"5TE",
222 	"5TEJ",
223 	"6TEJ",
224 	"7",
225 	"7M",
226 	"?(12)",
227 	"?(13)",
228 	"?(14)",
229 	"?(15)",
230 	"?(16)",
231 	"?(17)",
232 };
233 
234 #ifdef CONFIG_CPU_V7M
235 static int __get_cpu_architecture(void)
236 {
237 	return CPU_ARCH_ARMv7M;
238 }
239 #else
240 static int __get_cpu_architecture(void)
241 {
242 	int cpu_arch;
243 
244 	if ((read_cpuid_id() & 0x0008f000) == 0) {
245 		cpu_arch = CPU_ARCH_UNKNOWN;
246 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
247 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
248 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
249 		cpu_arch = (read_cpuid_id() >> 16) & 7;
250 		if (cpu_arch)
251 			cpu_arch += CPU_ARCH_ARMv3;
252 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
253 		/* Revised CPUID format. Read the Memory Model Feature
254 		 * Register 0 and check for VMSAv7 or PMSAv7 */
255 		unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
256 		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
257 		    (mmfr0 & 0x000000f0) >= 0x00000030)
258 			cpu_arch = CPU_ARCH_ARMv7;
259 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
260 			 (mmfr0 & 0x000000f0) == 0x00000020)
261 			cpu_arch = CPU_ARCH_ARMv6;
262 		else
263 			cpu_arch = CPU_ARCH_UNKNOWN;
264 	} else
265 		cpu_arch = CPU_ARCH_UNKNOWN;
266 
267 	return cpu_arch;
268 }
269 #endif
270 
271 int __pure cpu_architecture(void)
272 {
273 	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
274 
275 	return __cpu_architecture;
276 }
277 
278 static int cpu_has_aliasing_icache(unsigned int arch)
279 {
280 	int aliasing_icache;
281 	unsigned int id_reg, num_sets, line_size;
282 
283 	/* PIPT caches never alias. */
284 	if (icache_is_pipt())
285 		return 0;
286 
287 	/* arch specifies the register format */
288 	switch (arch) {
289 	case CPU_ARCH_ARMv7:
290 		asm("mcr	p15, 2, %0, c0, c0, 0 @ set CSSELR"
291 		    : /* No output operands */
292 		    : "r" (1));
293 		isb();
294 		asm("mrc	p15, 1, %0, c0, c0, 0 @ read CCSIDR"
295 		    : "=r" (id_reg));
296 		line_size = 4 << ((id_reg & 0x7) + 2);
297 		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
298 		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
299 		break;
300 	case CPU_ARCH_ARMv6:
301 		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
302 		break;
303 	default:
304 		/* I-cache aliases will be handled by D-cache aliasing code */
305 		aliasing_icache = 0;
306 	}
307 
308 	return aliasing_icache;
309 }
310 
311 static void __init cacheid_init(void)
312 {
313 	unsigned int arch = cpu_architecture();
314 
315 	if (arch == CPU_ARCH_ARMv7M) {
316 		cacheid = 0;
317 	} else if (arch >= CPU_ARCH_ARMv6) {
318 		unsigned int cachetype = read_cpuid_cachetype();
319 		if ((cachetype & (7 << 29)) == 4 << 29) {
320 			/* ARMv7 register format */
321 			arch = CPU_ARCH_ARMv7;
322 			cacheid = CACHEID_VIPT_NONALIASING;
323 			switch (cachetype & (3 << 14)) {
324 			case (1 << 14):
325 				cacheid |= CACHEID_ASID_TAGGED;
326 				break;
327 			case (3 << 14):
328 				cacheid |= CACHEID_PIPT;
329 				break;
330 			}
331 		} else {
332 			arch = CPU_ARCH_ARMv6;
333 			if (cachetype & (1 << 23))
334 				cacheid = CACHEID_VIPT_ALIASING;
335 			else
336 				cacheid = CACHEID_VIPT_NONALIASING;
337 		}
338 		if (cpu_has_aliasing_icache(arch))
339 			cacheid |= CACHEID_VIPT_I_ALIASING;
340 	} else {
341 		cacheid = CACHEID_VIVT;
342 	}
343 
344 	pr_info("CPU: %s data cache, %s instruction cache\n",
345 		cache_is_vivt() ? "VIVT" :
346 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
347 		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
348 		cache_is_vivt() ? "VIVT" :
349 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
350 		icache_is_vipt_aliasing() ? "VIPT aliasing" :
351 		icache_is_pipt() ? "PIPT" :
352 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
353 }
354 
355 /*
356  * These functions re-use the assembly code in head.S, which
357  * already provide the required functionality.
358  */
359 extern struct proc_info_list *lookup_processor_type(unsigned int);
360 
361 void __init early_print(const char *str, ...)
362 {
363 	extern void printascii(const char *);
364 	char buf[256];
365 	va_list ap;
366 
367 	va_start(ap, str);
368 	vsnprintf(buf, sizeof(buf), str, ap);
369 	va_end(ap);
370 
371 #ifdef CONFIG_DEBUG_LL
372 	printascii(buf);
373 #endif
374 	printk("%s", buf);
375 }
376 
377 static void __init cpuid_init_hwcaps(void)
378 {
379 	int block;
380 	u32 isar5;
381 
382 	if (cpu_architecture() < CPU_ARCH_ARMv7)
383 		return;
384 
385 	block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
386 	if (block >= 2)
387 		elf_hwcap |= HWCAP_IDIVA;
388 	if (block >= 1)
389 		elf_hwcap |= HWCAP_IDIVT;
390 
391 	/* LPAE implies atomic ldrd/strd instructions */
392 	block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
393 	if (block >= 5)
394 		elf_hwcap |= HWCAP_LPAE;
395 
396 	/* check for supported v8 Crypto instructions */
397 	isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
398 
399 	block = cpuid_feature_extract_field(isar5, 4);
400 	if (block >= 2)
401 		elf_hwcap2 |= HWCAP2_PMULL;
402 	if (block >= 1)
403 		elf_hwcap2 |= HWCAP2_AES;
404 
405 	block = cpuid_feature_extract_field(isar5, 8);
406 	if (block >= 1)
407 		elf_hwcap2 |= HWCAP2_SHA1;
408 
409 	block = cpuid_feature_extract_field(isar5, 12);
410 	if (block >= 1)
411 		elf_hwcap2 |= HWCAP2_SHA2;
412 
413 	block = cpuid_feature_extract_field(isar5, 16);
414 	if (block >= 1)
415 		elf_hwcap2 |= HWCAP2_CRC32;
416 }
417 
418 static void __init elf_hwcap_fixup(void)
419 {
420 	unsigned id = read_cpuid_id();
421 
422 	/*
423 	 * HWCAP_TLS is available only on 1136 r1p0 and later,
424 	 * see also kuser_get_tls_init.
425 	 */
426 	if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
427 	    ((id >> 20) & 3) == 0) {
428 		elf_hwcap &= ~HWCAP_TLS;
429 		return;
430 	}
431 
432 	/* Verify if CPUID scheme is implemented */
433 	if ((id & 0x000f0000) != 0x000f0000)
434 		return;
435 
436 	/*
437 	 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
438 	 * avoid advertising SWP; it may not be atomic with
439 	 * multiprocessing cores.
440 	 */
441 	if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
442 	    (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
443 	     cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
444 		elf_hwcap &= ~HWCAP_SWP;
445 }
446 
447 /*
448  * cpu_init - initialise one CPU.
449  *
450  * cpu_init sets up the per-CPU stacks.
451  */
452 void notrace cpu_init(void)
453 {
454 #ifndef CONFIG_CPU_V7M
455 	unsigned int cpu = smp_processor_id();
456 	struct stack *stk = &stacks[cpu];
457 
458 	if (cpu >= NR_CPUS) {
459 		pr_crit("CPU%u: bad primary CPU number\n", cpu);
460 		BUG();
461 	}
462 
463 	/*
464 	 * This only works on resume and secondary cores. For booting on the
465 	 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
466 	 */
467 	set_my_cpu_offset(per_cpu_offset(cpu));
468 
469 	cpu_proc_init();
470 
471 	/*
472 	 * Define the placement constraint for the inline asm directive below.
473 	 * In Thumb-2, msr with an immediate value is not allowed.
474 	 */
475 #ifdef CONFIG_THUMB2_KERNEL
476 #define PLC	"r"
477 #else
478 #define PLC	"I"
479 #endif
480 
481 	/*
482 	 * setup stacks for re-entrant exception handlers
483 	 */
484 	__asm__ (
485 	"msr	cpsr_c, %1\n\t"
486 	"add	r14, %0, %2\n\t"
487 	"mov	sp, r14\n\t"
488 	"msr	cpsr_c, %3\n\t"
489 	"add	r14, %0, %4\n\t"
490 	"mov	sp, r14\n\t"
491 	"msr	cpsr_c, %5\n\t"
492 	"add	r14, %0, %6\n\t"
493 	"mov	sp, r14\n\t"
494 	"msr	cpsr_c, %7\n\t"
495 	"add	r14, %0, %8\n\t"
496 	"mov	sp, r14\n\t"
497 	"msr	cpsr_c, %9"
498 	    :
499 	    : "r" (stk),
500 	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
501 	      "I" (offsetof(struct stack, irq[0])),
502 	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
503 	      "I" (offsetof(struct stack, abt[0])),
504 	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
505 	      "I" (offsetof(struct stack, und[0])),
506 	      PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
507 	      "I" (offsetof(struct stack, fiq[0])),
508 	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
509 	    : "r14");
510 #endif
511 }
512 
513 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
514 
515 void __init smp_setup_processor_id(void)
516 {
517 	int i;
518 	u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
519 	u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
520 
521 	cpu_logical_map(0) = cpu;
522 	for (i = 1; i < nr_cpu_ids; ++i)
523 		cpu_logical_map(i) = i == cpu ? 0 : i;
524 
525 	/*
526 	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
527 	 * using percpu variable early, for example, lockdep will
528 	 * access percpu variable inside lock_release
529 	 */
530 	set_my_cpu_offset(0);
531 
532 	pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
533 }
534 
535 struct mpidr_hash mpidr_hash;
536 #ifdef CONFIG_SMP
537 /**
538  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
539  *			  level in order to build a linear index from an
540  *			  MPIDR value. Resulting algorithm is a collision
541  *			  free hash carried out through shifting and ORing
542  */
543 static void __init smp_build_mpidr_hash(void)
544 {
545 	u32 i, affinity;
546 	u32 fs[3], bits[3], ls, mask = 0;
547 	/*
548 	 * Pre-scan the list of MPIDRS and filter out bits that do
549 	 * not contribute to affinity levels, ie they never toggle.
550 	 */
551 	for_each_possible_cpu(i)
552 		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
553 	pr_debug("mask of set bits 0x%x\n", mask);
554 	/*
555 	 * Find and stash the last and first bit set at all affinity levels to
556 	 * check how many bits are required to represent them.
557 	 */
558 	for (i = 0; i < 3; i++) {
559 		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
560 		/*
561 		 * Find the MSB bit and LSB bits position
562 		 * to determine how many bits are required
563 		 * to express the affinity level.
564 		 */
565 		ls = fls(affinity);
566 		fs[i] = affinity ? ffs(affinity) - 1 : 0;
567 		bits[i] = ls - fs[i];
568 	}
569 	/*
570 	 * An index can be created from the MPIDR by isolating the
571 	 * significant bits at each affinity level and by shifting
572 	 * them in order to compress the 24 bits values space to a
573 	 * compressed set of values. This is equivalent to hashing
574 	 * the MPIDR through shifting and ORing. It is a collision free
575 	 * hash though not minimal since some levels might contain a number
576 	 * of CPUs that is not an exact power of 2 and their bit
577 	 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
578 	 */
579 	mpidr_hash.shift_aff[0] = fs[0];
580 	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
581 	mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
582 						(bits[1] + bits[0]);
583 	mpidr_hash.mask = mask;
584 	mpidr_hash.bits = bits[2] + bits[1] + bits[0];
585 	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
586 				mpidr_hash.shift_aff[0],
587 				mpidr_hash.shift_aff[1],
588 				mpidr_hash.shift_aff[2],
589 				mpidr_hash.mask,
590 				mpidr_hash.bits);
591 	/*
592 	 * 4x is an arbitrary value used to warn on a hash table much bigger
593 	 * than expected on most systems.
594 	 */
595 	if (mpidr_hash_size() > 4 * num_possible_cpus())
596 		pr_warn("Large number of MPIDR hash buckets detected\n");
597 	sync_cache_w(&mpidr_hash);
598 }
599 #endif
600 
601 static void __init setup_processor(void)
602 {
603 	struct proc_info_list *list;
604 
605 	/*
606 	 * locate processor in the list of supported processor
607 	 * types.  The linker builds this table for us from the
608 	 * entries in arch/arm/mm/proc-*.S
609 	 */
610 	list = lookup_processor_type(read_cpuid_id());
611 	if (!list) {
612 		pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
613 		       read_cpuid_id());
614 		while (1);
615 	}
616 
617 	cpu_name = list->cpu_name;
618 	__cpu_architecture = __get_cpu_architecture();
619 
620 #ifdef MULTI_CPU
621 	processor = *list->proc;
622 #endif
623 #ifdef MULTI_TLB
624 	cpu_tlb = *list->tlb;
625 #endif
626 #ifdef MULTI_USER
627 	cpu_user = *list->user;
628 #endif
629 #ifdef MULTI_CACHE
630 	cpu_cache = *list->cache;
631 #endif
632 
633 	pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
634 		cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
635 		proc_arch[cpu_architecture()], get_cr());
636 
637 	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
638 		 list->arch_name, ENDIANNESS);
639 	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
640 		 list->elf_name, ENDIANNESS);
641 	elf_hwcap = list->elf_hwcap;
642 
643 	cpuid_init_hwcaps();
644 
645 #ifndef CONFIG_ARM_THUMB
646 	elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
647 #endif
648 #ifdef CONFIG_MMU
649 	init_default_cache_policy(list->__cpu_mm_mmu_flags);
650 #endif
651 	erratum_a15_798181_init();
652 
653 	elf_hwcap_fixup();
654 
655 	cacheid_init();
656 	cpu_init();
657 }
658 
659 void __init dump_machine_table(void)
660 {
661 	const struct machine_desc *p;
662 
663 	early_print("Available machine support:\n\nID (hex)\tNAME\n");
664 	for_each_machine_desc(p)
665 		early_print("%08x\t%s\n", p->nr, p->name);
666 
667 	early_print("\nPlease check your kernel config and/or bootloader.\n");
668 
669 	while (true)
670 		/* can't use cpu_relax() here as it may require MMU setup */;
671 }
672 
673 int __init arm_add_memory(u64 start, u64 size)
674 {
675 	u64 aligned_start;
676 
677 	/*
678 	 * Ensure that start/size are aligned to a page boundary.
679 	 * Size is rounded down, start is rounded up.
680 	 */
681 	aligned_start = PAGE_ALIGN(start);
682 	if (aligned_start > start + size)
683 		size = 0;
684 	else
685 		size -= aligned_start - start;
686 
687 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
688 	if (aligned_start > ULONG_MAX) {
689 		pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
690 			(long long)start);
691 		return -EINVAL;
692 	}
693 
694 	if (aligned_start + size > ULONG_MAX) {
695 		pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
696 			(long long)start);
697 		/*
698 		 * To ensure bank->start + bank->size is representable in
699 		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
700 		 * This means we lose a page after masking.
701 		 */
702 		size = ULONG_MAX - aligned_start;
703 	}
704 #endif
705 
706 	if (aligned_start < PHYS_OFFSET) {
707 		if (aligned_start + size <= PHYS_OFFSET) {
708 			pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
709 				aligned_start, aligned_start + size);
710 			return -EINVAL;
711 		}
712 
713 		pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
714 			aligned_start, (u64)PHYS_OFFSET);
715 
716 		size -= PHYS_OFFSET - aligned_start;
717 		aligned_start = PHYS_OFFSET;
718 	}
719 
720 	start = aligned_start;
721 	size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
722 
723 	/*
724 	 * Check whether this memory region has non-zero size or
725 	 * invalid node number.
726 	 */
727 	if (size == 0)
728 		return -EINVAL;
729 
730 	memblock_add(start, size);
731 	return 0;
732 }
733 
734 /*
735  * Pick out the memory size.  We look for mem=size@start,
736  * where start and size are "size[KkMm]"
737  */
738 
739 static int __init early_mem(char *p)
740 {
741 	static int usermem __initdata = 0;
742 	u64 size;
743 	u64 start;
744 	char *endp;
745 
746 	/*
747 	 * If the user specifies memory size, we
748 	 * blow away any automatically generated
749 	 * size.
750 	 */
751 	if (usermem == 0) {
752 		usermem = 1;
753 		memblock_remove(memblock_start_of_DRAM(),
754 			memblock_end_of_DRAM() - memblock_start_of_DRAM());
755 	}
756 
757 	start = PHYS_OFFSET;
758 	size  = memparse(p, &endp);
759 	if (*endp == '@')
760 		start = memparse(endp + 1, NULL);
761 
762 	arm_add_memory(start, size);
763 
764 	return 0;
765 }
766 early_param("mem", early_mem);
767 
768 static void __init request_standard_resources(const struct machine_desc *mdesc)
769 {
770 	struct memblock_region *region;
771 	struct resource *res;
772 
773 	kernel_code.start   = virt_to_phys(_text);
774 	kernel_code.end     = virt_to_phys(_etext - 1);
775 	kernel_data.start   = virt_to_phys(_sdata);
776 	kernel_data.end     = virt_to_phys(_end - 1);
777 
778 	for_each_memblock(memory, region) {
779 		res = memblock_virt_alloc(sizeof(*res), 0);
780 		res->name  = "System RAM";
781 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
782 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
783 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
784 
785 		request_resource(&iomem_resource, res);
786 
787 		if (kernel_code.start >= res->start &&
788 		    kernel_code.end <= res->end)
789 			request_resource(res, &kernel_code);
790 		if (kernel_data.start >= res->start &&
791 		    kernel_data.end <= res->end)
792 			request_resource(res, &kernel_data);
793 	}
794 
795 	if (mdesc->video_start) {
796 		video_ram.start = mdesc->video_start;
797 		video_ram.end   = mdesc->video_end;
798 		request_resource(&iomem_resource, &video_ram);
799 	}
800 
801 	/*
802 	 * Some machines don't have the possibility of ever
803 	 * possessing lp0, lp1 or lp2
804 	 */
805 	if (mdesc->reserve_lp0)
806 		request_resource(&ioport_resource, &lp0);
807 	if (mdesc->reserve_lp1)
808 		request_resource(&ioport_resource, &lp1);
809 	if (mdesc->reserve_lp2)
810 		request_resource(&ioport_resource, &lp2);
811 }
812 
813 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
814 struct screen_info screen_info = {
815  .orig_video_lines	= 30,
816  .orig_video_cols	= 80,
817  .orig_video_mode	= 0,
818  .orig_video_ega_bx	= 0,
819  .orig_video_isVGA	= 1,
820  .orig_video_points	= 8
821 };
822 #endif
823 
824 static int __init customize_machine(void)
825 {
826 	/*
827 	 * customizes platform devices, or adds new ones
828 	 * On DT based machines, we fall back to populating the
829 	 * machine from the device tree, if no callback is provided,
830 	 * otherwise we would always need an init_machine callback.
831 	 */
832 	of_iommu_init();
833 	if (machine_desc->init_machine)
834 		machine_desc->init_machine();
835 #ifdef CONFIG_OF
836 	else
837 		of_platform_populate(NULL, of_default_bus_match_table,
838 					NULL, NULL);
839 #endif
840 	return 0;
841 }
842 arch_initcall(customize_machine);
843 
844 static int __init init_machine_late(void)
845 {
846 	struct device_node *root;
847 	int ret;
848 
849 	if (machine_desc->init_late)
850 		machine_desc->init_late();
851 
852 	root = of_find_node_by_path("/");
853 	if (root) {
854 		ret = of_property_read_string(root, "serial-number",
855 					      &system_serial);
856 		if (ret)
857 			system_serial = NULL;
858 	}
859 
860 	if (!system_serial)
861 		system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
862 					  system_serial_high,
863 					  system_serial_low);
864 
865 	return 0;
866 }
867 late_initcall(init_machine_late);
868 
869 #ifdef CONFIG_KEXEC
870 static inline unsigned long long get_total_mem(void)
871 {
872 	unsigned long total;
873 
874 	total = max_low_pfn - min_low_pfn;
875 	return total << PAGE_SHIFT;
876 }
877 
878 /**
879  * reserve_crashkernel() - reserves memory are for crash kernel
880  *
881  * This function reserves memory area given in "crashkernel=" kernel command
882  * line parameter. The memory reserved is used by a dump capture kernel when
883  * primary kernel is crashing.
884  */
885 static void __init reserve_crashkernel(void)
886 {
887 	unsigned long long crash_size, crash_base;
888 	unsigned long long total_mem;
889 	int ret;
890 
891 	total_mem = get_total_mem();
892 	ret = parse_crashkernel(boot_command_line, total_mem,
893 				&crash_size, &crash_base);
894 	if (ret)
895 		return;
896 
897 	ret = memblock_reserve(crash_base, crash_size);
898 	if (ret < 0) {
899 		pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
900 			(unsigned long)crash_base);
901 		return;
902 	}
903 
904 	pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
905 		(unsigned long)(crash_size >> 20),
906 		(unsigned long)(crash_base >> 20),
907 		(unsigned long)(total_mem >> 20));
908 
909 	crashk_res.start = crash_base;
910 	crashk_res.end = crash_base + crash_size - 1;
911 	insert_resource(&iomem_resource, &crashk_res);
912 }
913 #else
914 static inline void reserve_crashkernel(void) {}
915 #endif /* CONFIG_KEXEC */
916 
917 void __init hyp_mode_check(void)
918 {
919 #ifdef CONFIG_ARM_VIRT_EXT
920 	sync_boot_mode();
921 
922 	if (is_hyp_mode_available()) {
923 		pr_info("CPU: All CPU(s) started in HYP mode.\n");
924 		pr_info("CPU: Virtualization extensions available.\n");
925 	} else if (is_hyp_mode_mismatched()) {
926 		pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
927 			__boot_cpu_mode & MODE_MASK);
928 		pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
929 	} else
930 		pr_info("CPU: All CPU(s) started in SVC mode.\n");
931 #endif
932 }
933 
934 void __init setup_arch(char **cmdline_p)
935 {
936 	const struct machine_desc *mdesc;
937 
938 	setup_processor();
939 	mdesc = setup_machine_fdt(__atags_pointer);
940 	if (!mdesc)
941 		mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
942 	machine_desc = mdesc;
943 	machine_name = mdesc->name;
944 	dump_stack_set_arch_desc("%s", mdesc->name);
945 
946 	if (mdesc->reboot_mode != REBOOT_HARD)
947 		reboot_mode = mdesc->reboot_mode;
948 
949 	init_mm.start_code = (unsigned long) _text;
950 	init_mm.end_code   = (unsigned long) _etext;
951 	init_mm.end_data   = (unsigned long) _edata;
952 	init_mm.brk	   = (unsigned long) _end;
953 
954 	/* populate cmd_line too for later use, preserving boot_command_line */
955 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
956 	*cmdline_p = cmd_line;
957 
958 	parse_early_param();
959 
960 #ifdef CONFIG_MMU
961 	early_paging_init(mdesc);
962 #endif
963 	setup_dma_zone(mdesc);
964 	sanity_check_meminfo();
965 	arm_memblock_init(mdesc);
966 
967 	paging_init(mdesc);
968 	request_standard_resources(mdesc);
969 
970 	if (mdesc->restart)
971 		arm_pm_restart = mdesc->restart;
972 
973 	unflatten_device_tree();
974 
975 	arm_dt_init_cpu_maps();
976 	psci_dt_init();
977 	xen_early_init();
978 #ifdef CONFIG_SMP
979 	if (is_smp()) {
980 		if (!mdesc->smp_init || !mdesc->smp_init()) {
981 			if (psci_smp_available())
982 				smp_set_ops(&psci_smp_ops);
983 			else if (mdesc->smp)
984 				smp_set_ops(mdesc->smp);
985 		}
986 		smp_init_cpus();
987 		smp_build_mpidr_hash();
988 	}
989 #endif
990 
991 	if (!is_smp())
992 		hyp_mode_check();
993 
994 	reserve_crashkernel();
995 
996 #ifdef CONFIG_MULTI_IRQ_HANDLER
997 	handle_arch_irq = mdesc->handle_irq;
998 #endif
999 
1000 #ifdef CONFIG_VT
1001 #if defined(CONFIG_VGA_CONSOLE)
1002 	conswitchp = &vga_con;
1003 #elif defined(CONFIG_DUMMY_CONSOLE)
1004 	conswitchp = &dummy_con;
1005 #endif
1006 #endif
1007 
1008 	if (mdesc->init_early)
1009 		mdesc->init_early();
1010 }
1011 
1012 
1013 static int __init topology_init(void)
1014 {
1015 	int cpu;
1016 
1017 	for_each_possible_cpu(cpu) {
1018 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1019 		cpuinfo->cpu.hotpluggable = 1;
1020 		register_cpu(&cpuinfo->cpu, cpu);
1021 	}
1022 
1023 	return 0;
1024 }
1025 subsys_initcall(topology_init);
1026 
1027 #ifdef CONFIG_HAVE_PROC_CPU
1028 static int __init proc_cpu_init(void)
1029 {
1030 	struct proc_dir_entry *res;
1031 
1032 	res = proc_mkdir("cpu", NULL);
1033 	if (!res)
1034 		return -ENOMEM;
1035 	return 0;
1036 }
1037 fs_initcall(proc_cpu_init);
1038 #endif
1039 
1040 static const char *hwcap_str[] = {
1041 	"swp",
1042 	"half",
1043 	"thumb",
1044 	"26bit",
1045 	"fastmult",
1046 	"fpa",
1047 	"vfp",
1048 	"edsp",
1049 	"java",
1050 	"iwmmxt",
1051 	"crunch",
1052 	"thumbee",
1053 	"neon",
1054 	"vfpv3",
1055 	"vfpv3d16",
1056 	"tls",
1057 	"vfpv4",
1058 	"idiva",
1059 	"idivt",
1060 	"vfpd32",
1061 	"lpae",
1062 	"evtstrm",
1063 	NULL
1064 };
1065 
1066 static const char *hwcap2_str[] = {
1067 	"aes",
1068 	"pmull",
1069 	"sha1",
1070 	"sha2",
1071 	"crc32",
1072 	NULL
1073 };
1074 
1075 static int c_show(struct seq_file *m, void *v)
1076 {
1077 	int i, j;
1078 	u32 cpuid;
1079 
1080 	for_each_online_cpu(i) {
1081 		/*
1082 		 * glibc reads /proc/cpuinfo to determine the number of
1083 		 * online processors, looking for lines beginning with
1084 		 * "processor".  Give glibc what it expects.
1085 		 */
1086 		seq_printf(m, "processor\t: %d\n", i);
1087 		cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1088 		seq_printf(m, "model name\t: %s rev %d (%s)\n",
1089 			   cpu_name, cpuid & 15, elf_platform);
1090 
1091 #if defined(CONFIG_SMP)
1092 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1093 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1094 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1095 #else
1096 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1097 			   loops_per_jiffy / (500000/HZ),
1098 			   (loops_per_jiffy / (5000/HZ)) % 100);
1099 #endif
1100 		/* dump out the processor features */
1101 		seq_puts(m, "Features\t: ");
1102 
1103 		for (j = 0; hwcap_str[j]; j++)
1104 			if (elf_hwcap & (1 << j))
1105 				seq_printf(m, "%s ", hwcap_str[j]);
1106 
1107 		for (j = 0; hwcap2_str[j]; j++)
1108 			if (elf_hwcap2 & (1 << j))
1109 				seq_printf(m, "%s ", hwcap2_str[j]);
1110 
1111 		seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1112 		seq_printf(m, "CPU architecture: %s\n",
1113 			   proc_arch[cpu_architecture()]);
1114 
1115 		if ((cpuid & 0x0008f000) == 0x00000000) {
1116 			/* pre-ARM7 */
1117 			seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1118 		} else {
1119 			if ((cpuid & 0x0008f000) == 0x00007000) {
1120 				/* ARM7 */
1121 				seq_printf(m, "CPU variant\t: 0x%02x\n",
1122 					   (cpuid >> 16) & 127);
1123 			} else {
1124 				/* post-ARM7 */
1125 				seq_printf(m, "CPU variant\t: 0x%x\n",
1126 					   (cpuid >> 20) & 15);
1127 			}
1128 			seq_printf(m, "CPU part\t: 0x%03x\n",
1129 				   (cpuid >> 4) & 0xfff);
1130 		}
1131 		seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1132 	}
1133 
1134 	seq_printf(m, "Hardware\t: %s\n", machine_name);
1135 	seq_printf(m, "Revision\t: %04x\n", system_rev);
1136 	seq_printf(m, "Serial\t\t: %s\n", system_serial);
1137 
1138 	return 0;
1139 }
1140 
1141 static void *c_start(struct seq_file *m, loff_t *pos)
1142 {
1143 	return *pos < 1 ? (void *)1 : NULL;
1144 }
1145 
1146 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1147 {
1148 	++*pos;
1149 	return NULL;
1150 }
1151 
1152 static void c_stop(struct seq_file *m, void *v)
1153 {
1154 }
1155 
1156 const struct seq_operations cpuinfo_op = {
1157 	.start	= c_start,
1158 	.next	= c_next,
1159 	.stop	= c_stop,
1160 	.show	= c_show
1161 };
1162