xref: /linux/arch/arm/kernel/setup.c (revision 3e44c471a2dab210f7e9b1e5f7d4d54d52df59eb)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_iommu.h>
22 #include <linux/of_platform.h>
23 #include <linux/init.h>
24 #include <linux/kexec.h>
25 #include <linux/of_fdt.h>
26 #include <linux/cpu.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31 #include <linux/bug.h>
32 #include <linux/compiler.h>
33 #include <linux/sort.h>
34 
35 #include <asm/unified.h>
36 #include <asm/cp15.h>
37 #include <asm/cpu.h>
38 #include <asm/cputype.h>
39 #include <asm/elf.h>
40 #include <asm/procinfo.h>
41 #include <asm/psci.h>
42 #include <asm/sections.h>
43 #include <asm/setup.h>
44 #include <asm/smp_plat.h>
45 #include <asm/mach-types.h>
46 #include <asm/cacheflush.h>
47 #include <asm/cachetype.h>
48 #include <asm/tlbflush.h>
49 
50 #include <asm/prom.h>
51 #include <asm/mach/arch.h>
52 #include <asm/mach/irq.h>
53 #include <asm/mach/time.h>
54 #include <asm/system_info.h>
55 #include <asm/system_misc.h>
56 #include <asm/traps.h>
57 #include <asm/unwind.h>
58 #include <asm/memblock.h>
59 #include <asm/virt.h>
60 
61 #include "atags.h"
62 
63 
64 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
65 char fpe_type[8];
66 
67 static int __init fpe_setup(char *line)
68 {
69 	memcpy(fpe_type, line, 8);
70 	return 1;
71 }
72 
73 __setup("fpe=", fpe_setup);
74 #endif
75 
76 extern void init_default_cache_policy(unsigned long);
77 extern void paging_init(const struct machine_desc *desc);
78 extern void early_paging_init(const struct machine_desc *);
79 extern void sanity_check_meminfo(void);
80 extern enum reboot_mode reboot_mode;
81 extern void setup_dma_zone(const struct machine_desc *desc);
82 
83 unsigned int processor_id;
84 EXPORT_SYMBOL(processor_id);
85 unsigned int __machine_arch_type __read_mostly;
86 EXPORT_SYMBOL(__machine_arch_type);
87 unsigned int cacheid __read_mostly;
88 EXPORT_SYMBOL(cacheid);
89 
90 unsigned int __atags_pointer __initdata;
91 
92 unsigned int system_rev;
93 EXPORT_SYMBOL(system_rev);
94 
95 const char *system_serial;
96 EXPORT_SYMBOL(system_serial);
97 
98 unsigned int system_serial_low;
99 EXPORT_SYMBOL(system_serial_low);
100 
101 unsigned int system_serial_high;
102 EXPORT_SYMBOL(system_serial_high);
103 
104 unsigned int elf_hwcap __read_mostly;
105 EXPORT_SYMBOL(elf_hwcap);
106 
107 unsigned int elf_hwcap2 __read_mostly;
108 EXPORT_SYMBOL(elf_hwcap2);
109 
110 
111 #ifdef MULTI_CPU
112 struct processor processor __read_mostly;
113 #endif
114 #ifdef MULTI_TLB
115 struct cpu_tlb_fns cpu_tlb __read_mostly;
116 #endif
117 #ifdef MULTI_USER
118 struct cpu_user_fns cpu_user __read_mostly;
119 #endif
120 #ifdef MULTI_CACHE
121 struct cpu_cache_fns cpu_cache __read_mostly;
122 #endif
123 #ifdef CONFIG_OUTER_CACHE
124 struct outer_cache_fns outer_cache __read_mostly;
125 EXPORT_SYMBOL(outer_cache);
126 #endif
127 
128 /*
129  * Cached cpu_architecture() result for use by assembler code.
130  * C code should use the cpu_architecture() function instead of accessing this
131  * variable directly.
132  */
133 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
134 
135 struct stack {
136 	u32 irq[3];
137 	u32 abt[3];
138 	u32 und[3];
139 	u32 fiq[3];
140 } ____cacheline_aligned;
141 
142 #ifndef CONFIG_CPU_V7M
143 static struct stack stacks[NR_CPUS];
144 #endif
145 
146 char elf_platform[ELF_PLATFORM_SIZE];
147 EXPORT_SYMBOL(elf_platform);
148 
149 static const char *cpu_name;
150 static const char *machine_name;
151 static char __initdata cmd_line[COMMAND_LINE_SIZE];
152 const struct machine_desc *machine_desc __initdata;
153 
154 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
155 #define ENDIANNESS ((char)endian_test.l)
156 
157 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
158 
159 /*
160  * Standard memory resources
161  */
162 static struct resource mem_res[] = {
163 	{
164 		.name = "Video RAM",
165 		.start = 0,
166 		.end = 0,
167 		.flags = IORESOURCE_MEM
168 	},
169 	{
170 		.name = "Kernel code",
171 		.start = 0,
172 		.end = 0,
173 		.flags = IORESOURCE_MEM
174 	},
175 	{
176 		.name = "Kernel data",
177 		.start = 0,
178 		.end = 0,
179 		.flags = IORESOURCE_MEM
180 	}
181 };
182 
183 #define video_ram   mem_res[0]
184 #define kernel_code mem_res[1]
185 #define kernel_data mem_res[2]
186 
187 static struct resource io_res[] = {
188 	{
189 		.name = "reserved",
190 		.start = 0x3bc,
191 		.end = 0x3be,
192 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
193 	},
194 	{
195 		.name = "reserved",
196 		.start = 0x378,
197 		.end = 0x37f,
198 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
199 	},
200 	{
201 		.name = "reserved",
202 		.start = 0x278,
203 		.end = 0x27f,
204 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
205 	}
206 };
207 
208 #define lp0 io_res[0]
209 #define lp1 io_res[1]
210 #define lp2 io_res[2]
211 
212 static const char *proc_arch[] = {
213 	"undefined/unknown",
214 	"3",
215 	"4",
216 	"4T",
217 	"5",
218 	"5T",
219 	"5TE",
220 	"5TEJ",
221 	"6TEJ",
222 	"7",
223 	"7M",
224 	"?(12)",
225 	"?(13)",
226 	"?(14)",
227 	"?(15)",
228 	"?(16)",
229 	"?(17)",
230 };
231 
232 #ifdef CONFIG_CPU_V7M
233 static int __get_cpu_architecture(void)
234 {
235 	return CPU_ARCH_ARMv7M;
236 }
237 #else
238 static int __get_cpu_architecture(void)
239 {
240 	int cpu_arch;
241 
242 	if ((read_cpuid_id() & 0x0008f000) == 0) {
243 		cpu_arch = CPU_ARCH_UNKNOWN;
244 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
245 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
246 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
247 		cpu_arch = (read_cpuid_id() >> 16) & 7;
248 		if (cpu_arch)
249 			cpu_arch += CPU_ARCH_ARMv3;
250 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
251 		/* Revised CPUID format. Read the Memory Model Feature
252 		 * Register 0 and check for VMSAv7 or PMSAv7 */
253 		unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
254 		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
255 		    (mmfr0 & 0x000000f0) >= 0x00000030)
256 			cpu_arch = CPU_ARCH_ARMv7;
257 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
258 			 (mmfr0 & 0x000000f0) == 0x00000020)
259 			cpu_arch = CPU_ARCH_ARMv6;
260 		else
261 			cpu_arch = CPU_ARCH_UNKNOWN;
262 	} else
263 		cpu_arch = CPU_ARCH_UNKNOWN;
264 
265 	return cpu_arch;
266 }
267 #endif
268 
269 int __pure cpu_architecture(void)
270 {
271 	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
272 
273 	return __cpu_architecture;
274 }
275 
276 static int cpu_has_aliasing_icache(unsigned int arch)
277 {
278 	int aliasing_icache;
279 	unsigned int id_reg, num_sets, line_size;
280 
281 	/* PIPT caches never alias. */
282 	if (icache_is_pipt())
283 		return 0;
284 
285 	/* arch specifies the register format */
286 	switch (arch) {
287 	case CPU_ARCH_ARMv7:
288 		asm("mcr	p15, 2, %0, c0, c0, 0 @ set CSSELR"
289 		    : /* No output operands */
290 		    : "r" (1));
291 		isb();
292 		asm("mrc	p15, 1, %0, c0, c0, 0 @ read CCSIDR"
293 		    : "=r" (id_reg));
294 		line_size = 4 << ((id_reg & 0x7) + 2);
295 		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
296 		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
297 		break;
298 	case CPU_ARCH_ARMv6:
299 		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
300 		break;
301 	default:
302 		/* I-cache aliases will be handled by D-cache aliasing code */
303 		aliasing_icache = 0;
304 	}
305 
306 	return aliasing_icache;
307 }
308 
309 static void __init cacheid_init(void)
310 {
311 	unsigned int arch = cpu_architecture();
312 
313 	if (arch == CPU_ARCH_ARMv7M) {
314 		cacheid = 0;
315 	} else if (arch >= CPU_ARCH_ARMv6) {
316 		unsigned int cachetype = read_cpuid_cachetype();
317 		if ((cachetype & (7 << 29)) == 4 << 29) {
318 			/* ARMv7 register format */
319 			arch = CPU_ARCH_ARMv7;
320 			cacheid = CACHEID_VIPT_NONALIASING;
321 			switch (cachetype & (3 << 14)) {
322 			case (1 << 14):
323 				cacheid |= CACHEID_ASID_TAGGED;
324 				break;
325 			case (3 << 14):
326 				cacheid |= CACHEID_PIPT;
327 				break;
328 			}
329 		} else {
330 			arch = CPU_ARCH_ARMv6;
331 			if (cachetype & (1 << 23))
332 				cacheid = CACHEID_VIPT_ALIASING;
333 			else
334 				cacheid = CACHEID_VIPT_NONALIASING;
335 		}
336 		if (cpu_has_aliasing_icache(arch))
337 			cacheid |= CACHEID_VIPT_I_ALIASING;
338 	} else {
339 		cacheid = CACHEID_VIVT;
340 	}
341 
342 	pr_info("CPU: %s data cache, %s instruction cache\n",
343 		cache_is_vivt() ? "VIVT" :
344 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
345 		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
346 		cache_is_vivt() ? "VIVT" :
347 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
348 		icache_is_vipt_aliasing() ? "VIPT aliasing" :
349 		icache_is_pipt() ? "PIPT" :
350 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
351 }
352 
353 /*
354  * These functions re-use the assembly code in head.S, which
355  * already provide the required functionality.
356  */
357 extern struct proc_info_list *lookup_processor_type(unsigned int);
358 
359 void __init early_print(const char *str, ...)
360 {
361 	extern void printascii(const char *);
362 	char buf[256];
363 	va_list ap;
364 
365 	va_start(ap, str);
366 	vsnprintf(buf, sizeof(buf), str, ap);
367 	va_end(ap);
368 
369 #ifdef CONFIG_DEBUG_LL
370 	printascii(buf);
371 #endif
372 	printk("%s", buf);
373 }
374 
375 static void __init cpuid_init_hwcaps(void)
376 {
377 	int block;
378 	u32 isar5;
379 
380 	if (cpu_architecture() < CPU_ARCH_ARMv7)
381 		return;
382 
383 	block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
384 	if (block >= 2)
385 		elf_hwcap |= HWCAP_IDIVA;
386 	if (block >= 1)
387 		elf_hwcap |= HWCAP_IDIVT;
388 
389 	/* LPAE implies atomic ldrd/strd instructions */
390 	block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
391 	if (block >= 5)
392 		elf_hwcap |= HWCAP_LPAE;
393 
394 	/* check for supported v8 Crypto instructions */
395 	isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
396 
397 	block = cpuid_feature_extract_field(isar5, 4);
398 	if (block >= 2)
399 		elf_hwcap2 |= HWCAP2_PMULL;
400 	if (block >= 1)
401 		elf_hwcap2 |= HWCAP2_AES;
402 
403 	block = cpuid_feature_extract_field(isar5, 8);
404 	if (block >= 1)
405 		elf_hwcap2 |= HWCAP2_SHA1;
406 
407 	block = cpuid_feature_extract_field(isar5, 12);
408 	if (block >= 1)
409 		elf_hwcap2 |= HWCAP2_SHA2;
410 
411 	block = cpuid_feature_extract_field(isar5, 16);
412 	if (block >= 1)
413 		elf_hwcap2 |= HWCAP2_CRC32;
414 }
415 
416 static void __init elf_hwcap_fixup(void)
417 {
418 	unsigned id = read_cpuid_id();
419 
420 	/*
421 	 * HWCAP_TLS is available only on 1136 r1p0 and later,
422 	 * see also kuser_get_tls_init.
423 	 */
424 	if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
425 	    ((id >> 20) & 3) == 0) {
426 		elf_hwcap &= ~HWCAP_TLS;
427 		return;
428 	}
429 
430 	/* Verify if CPUID scheme is implemented */
431 	if ((id & 0x000f0000) != 0x000f0000)
432 		return;
433 
434 	/*
435 	 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
436 	 * avoid advertising SWP; it may not be atomic with
437 	 * multiprocessing cores.
438 	 */
439 	if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
440 	    (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
441 	     cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
442 		elf_hwcap &= ~HWCAP_SWP;
443 }
444 
445 /*
446  * cpu_init - initialise one CPU.
447  *
448  * cpu_init sets up the per-CPU stacks.
449  */
450 void notrace cpu_init(void)
451 {
452 #ifndef CONFIG_CPU_V7M
453 	unsigned int cpu = smp_processor_id();
454 	struct stack *stk = &stacks[cpu];
455 
456 	if (cpu >= NR_CPUS) {
457 		pr_crit("CPU%u: bad primary CPU number\n", cpu);
458 		BUG();
459 	}
460 
461 	/*
462 	 * This only works on resume and secondary cores. For booting on the
463 	 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
464 	 */
465 	set_my_cpu_offset(per_cpu_offset(cpu));
466 
467 	cpu_proc_init();
468 
469 	/*
470 	 * Define the placement constraint for the inline asm directive below.
471 	 * In Thumb-2, msr with an immediate value is not allowed.
472 	 */
473 #ifdef CONFIG_THUMB2_KERNEL
474 #define PLC	"r"
475 #else
476 #define PLC	"I"
477 #endif
478 
479 	/*
480 	 * setup stacks for re-entrant exception handlers
481 	 */
482 	__asm__ (
483 	"msr	cpsr_c, %1\n\t"
484 	"add	r14, %0, %2\n\t"
485 	"mov	sp, r14\n\t"
486 	"msr	cpsr_c, %3\n\t"
487 	"add	r14, %0, %4\n\t"
488 	"mov	sp, r14\n\t"
489 	"msr	cpsr_c, %5\n\t"
490 	"add	r14, %0, %6\n\t"
491 	"mov	sp, r14\n\t"
492 	"msr	cpsr_c, %7\n\t"
493 	"add	r14, %0, %8\n\t"
494 	"mov	sp, r14\n\t"
495 	"msr	cpsr_c, %9"
496 	    :
497 	    : "r" (stk),
498 	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
499 	      "I" (offsetof(struct stack, irq[0])),
500 	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
501 	      "I" (offsetof(struct stack, abt[0])),
502 	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
503 	      "I" (offsetof(struct stack, und[0])),
504 	      PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
505 	      "I" (offsetof(struct stack, fiq[0])),
506 	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
507 	    : "r14");
508 #endif
509 }
510 
511 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
512 
513 void __init smp_setup_processor_id(void)
514 {
515 	int i;
516 	u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
517 	u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
518 
519 	cpu_logical_map(0) = cpu;
520 	for (i = 1; i < nr_cpu_ids; ++i)
521 		cpu_logical_map(i) = i == cpu ? 0 : i;
522 
523 	/*
524 	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
525 	 * using percpu variable early, for example, lockdep will
526 	 * access percpu variable inside lock_release
527 	 */
528 	set_my_cpu_offset(0);
529 
530 	pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
531 }
532 
533 struct mpidr_hash mpidr_hash;
534 #ifdef CONFIG_SMP
535 /**
536  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
537  *			  level in order to build a linear index from an
538  *			  MPIDR value. Resulting algorithm is a collision
539  *			  free hash carried out through shifting and ORing
540  */
541 static void __init smp_build_mpidr_hash(void)
542 {
543 	u32 i, affinity;
544 	u32 fs[3], bits[3], ls, mask = 0;
545 	/*
546 	 * Pre-scan the list of MPIDRS and filter out bits that do
547 	 * not contribute to affinity levels, ie they never toggle.
548 	 */
549 	for_each_possible_cpu(i)
550 		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
551 	pr_debug("mask of set bits 0x%x\n", mask);
552 	/*
553 	 * Find and stash the last and first bit set at all affinity levels to
554 	 * check how many bits are required to represent them.
555 	 */
556 	for (i = 0; i < 3; i++) {
557 		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
558 		/*
559 		 * Find the MSB bit and LSB bits position
560 		 * to determine how many bits are required
561 		 * to express the affinity level.
562 		 */
563 		ls = fls(affinity);
564 		fs[i] = affinity ? ffs(affinity) - 1 : 0;
565 		bits[i] = ls - fs[i];
566 	}
567 	/*
568 	 * An index can be created from the MPIDR by isolating the
569 	 * significant bits at each affinity level and by shifting
570 	 * them in order to compress the 24 bits values space to a
571 	 * compressed set of values. This is equivalent to hashing
572 	 * the MPIDR through shifting and ORing. It is a collision free
573 	 * hash though not minimal since some levels might contain a number
574 	 * of CPUs that is not an exact power of 2 and their bit
575 	 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
576 	 */
577 	mpidr_hash.shift_aff[0] = fs[0];
578 	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
579 	mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
580 						(bits[1] + bits[0]);
581 	mpidr_hash.mask = mask;
582 	mpidr_hash.bits = bits[2] + bits[1] + bits[0];
583 	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
584 				mpidr_hash.shift_aff[0],
585 				mpidr_hash.shift_aff[1],
586 				mpidr_hash.shift_aff[2],
587 				mpidr_hash.mask,
588 				mpidr_hash.bits);
589 	/*
590 	 * 4x is an arbitrary value used to warn on a hash table much bigger
591 	 * than expected on most systems.
592 	 */
593 	if (mpidr_hash_size() > 4 * num_possible_cpus())
594 		pr_warn("Large number of MPIDR hash buckets detected\n");
595 	sync_cache_w(&mpidr_hash);
596 }
597 #endif
598 
599 static void __init setup_processor(void)
600 {
601 	struct proc_info_list *list;
602 
603 	/*
604 	 * locate processor in the list of supported processor
605 	 * types.  The linker builds this table for us from the
606 	 * entries in arch/arm/mm/proc-*.S
607 	 */
608 	list = lookup_processor_type(read_cpuid_id());
609 	if (!list) {
610 		pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
611 		       read_cpuid_id());
612 		while (1);
613 	}
614 
615 	cpu_name = list->cpu_name;
616 	__cpu_architecture = __get_cpu_architecture();
617 
618 #ifdef MULTI_CPU
619 	processor = *list->proc;
620 #endif
621 #ifdef MULTI_TLB
622 	cpu_tlb = *list->tlb;
623 #endif
624 #ifdef MULTI_USER
625 	cpu_user = *list->user;
626 #endif
627 #ifdef MULTI_CACHE
628 	cpu_cache = *list->cache;
629 #endif
630 
631 	pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
632 		cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
633 		proc_arch[cpu_architecture()], get_cr());
634 
635 	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
636 		 list->arch_name, ENDIANNESS);
637 	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
638 		 list->elf_name, ENDIANNESS);
639 	elf_hwcap = list->elf_hwcap;
640 
641 	cpuid_init_hwcaps();
642 
643 #ifndef CONFIG_ARM_THUMB
644 	elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
645 #endif
646 #ifdef CONFIG_MMU
647 	init_default_cache_policy(list->__cpu_mm_mmu_flags);
648 #endif
649 	erratum_a15_798181_init();
650 
651 	elf_hwcap_fixup();
652 
653 	cacheid_init();
654 	cpu_init();
655 }
656 
657 void __init dump_machine_table(void)
658 {
659 	const struct machine_desc *p;
660 
661 	early_print("Available machine support:\n\nID (hex)\tNAME\n");
662 	for_each_machine_desc(p)
663 		early_print("%08x\t%s\n", p->nr, p->name);
664 
665 	early_print("\nPlease check your kernel config and/or bootloader.\n");
666 
667 	while (true)
668 		/* can't use cpu_relax() here as it may require MMU setup */;
669 }
670 
671 int __init arm_add_memory(u64 start, u64 size)
672 {
673 	u64 aligned_start;
674 
675 	/*
676 	 * Ensure that start/size are aligned to a page boundary.
677 	 * Size is rounded down, start is rounded up.
678 	 */
679 	aligned_start = PAGE_ALIGN(start);
680 	if (aligned_start > start + size)
681 		size = 0;
682 	else
683 		size -= aligned_start - start;
684 
685 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
686 	if (aligned_start > ULONG_MAX) {
687 		pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
688 			(long long)start);
689 		return -EINVAL;
690 	}
691 
692 	if (aligned_start + size > ULONG_MAX) {
693 		pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
694 			(long long)start);
695 		/*
696 		 * To ensure bank->start + bank->size is representable in
697 		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
698 		 * This means we lose a page after masking.
699 		 */
700 		size = ULONG_MAX - aligned_start;
701 	}
702 #endif
703 
704 	if (aligned_start < PHYS_OFFSET) {
705 		if (aligned_start + size <= PHYS_OFFSET) {
706 			pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
707 				aligned_start, aligned_start + size);
708 			return -EINVAL;
709 		}
710 
711 		pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
712 			aligned_start, (u64)PHYS_OFFSET);
713 
714 		size -= PHYS_OFFSET - aligned_start;
715 		aligned_start = PHYS_OFFSET;
716 	}
717 
718 	start = aligned_start;
719 	size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
720 
721 	/*
722 	 * Check whether this memory region has non-zero size or
723 	 * invalid node number.
724 	 */
725 	if (size == 0)
726 		return -EINVAL;
727 
728 	memblock_add(start, size);
729 	return 0;
730 }
731 
732 /*
733  * Pick out the memory size.  We look for mem=size@start,
734  * where start and size are "size[KkMm]"
735  */
736 
737 static int __init early_mem(char *p)
738 {
739 	static int usermem __initdata = 0;
740 	u64 size;
741 	u64 start;
742 	char *endp;
743 
744 	/*
745 	 * If the user specifies memory size, we
746 	 * blow away any automatically generated
747 	 * size.
748 	 */
749 	if (usermem == 0) {
750 		usermem = 1;
751 		memblock_remove(memblock_start_of_DRAM(),
752 			memblock_end_of_DRAM() - memblock_start_of_DRAM());
753 	}
754 
755 	start = PHYS_OFFSET;
756 	size  = memparse(p, &endp);
757 	if (*endp == '@')
758 		start = memparse(endp + 1, NULL);
759 
760 	arm_add_memory(start, size);
761 
762 	return 0;
763 }
764 early_param("mem", early_mem);
765 
766 static void __init request_standard_resources(const struct machine_desc *mdesc)
767 {
768 	struct memblock_region *region;
769 	struct resource *res;
770 
771 	kernel_code.start   = virt_to_phys(_text);
772 	kernel_code.end     = virt_to_phys(_etext - 1);
773 	kernel_data.start   = virt_to_phys(_sdata);
774 	kernel_data.end     = virt_to_phys(_end - 1);
775 
776 	for_each_memblock(memory, region) {
777 		res = memblock_virt_alloc(sizeof(*res), 0);
778 		res->name  = "System RAM";
779 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
780 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
781 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
782 
783 		request_resource(&iomem_resource, res);
784 
785 		if (kernel_code.start >= res->start &&
786 		    kernel_code.end <= res->end)
787 			request_resource(res, &kernel_code);
788 		if (kernel_data.start >= res->start &&
789 		    kernel_data.end <= res->end)
790 			request_resource(res, &kernel_data);
791 	}
792 
793 	if (mdesc->video_start) {
794 		video_ram.start = mdesc->video_start;
795 		video_ram.end   = mdesc->video_end;
796 		request_resource(&iomem_resource, &video_ram);
797 	}
798 
799 	/*
800 	 * Some machines don't have the possibility of ever
801 	 * possessing lp0, lp1 or lp2
802 	 */
803 	if (mdesc->reserve_lp0)
804 		request_resource(&ioport_resource, &lp0);
805 	if (mdesc->reserve_lp1)
806 		request_resource(&ioport_resource, &lp1);
807 	if (mdesc->reserve_lp2)
808 		request_resource(&ioport_resource, &lp2);
809 }
810 
811 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
812 struct screen_info screen_info = {
813  .orig_video_lines	= 30,
814  .orig_video_cols	= 80,
815  .orig_video_mode	= 0,
816  .orig_video_ega_bx	= 0,
817  .orig_video_isVGA	= 1,
818  .orig_video_points	= 8
819 };
820 #endif
821 
822 static int __init customize_machine(void)
823 {
824 	/*
825 	 * customizes platform devices, or adds new ones
826 	 * On DT based machines, we fall back to populating the
827 	 * machine from the device tree, if no callback is provided,
828 	 * otherwise we would always need an init_machine callback.
829 	 */
830 	of_iommu_init();
831 	if (machine_desc->init_machine)
832 		machine_desc->init_machine();
833 #ifdef CONFIG_OF
834 	else
835 		of_platform_populate(NULL, of_default_bus_match_table,
836 					NULL, NULL);
837 #endif
838 	return 0;
839 }
840 arch_initcall(customize_machine);
841 
842 static int __init init_machine_late(void)
843 {
844 	struct device_node *root;
845 	int ret;
846 
847 	if (machine_desc->init_late)
848 		machine_desc->init_late();
849 
850 	root = of_find_node_by_path("/");
851 	if (root) {
852 		ret = of_property_read_string(root, "serial-number",
853 					      &system_serial);
854 		if (ret)
855 			system_serial = NULL;
856 	}
857 
858 	if (!system_serial)
859 		system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
860 					  system_serial_high,
861 					  system_serial_low);
862 
863 	return 0;
864 }
865 late_initcall(init_machine_late);
866 
867 #ifdef CONFIG_KEXEC
868 static inline unsigned long long get_total_mem(void)
869 {
870 	unsigned long total;
871 
872 	total = max_low_pfn - min_low_pfn;
873 	return total << PAGE_SHIFT;
874 }
875 
876 /**
877  * reserve_crashkernel() - reserves memory are for crash kernel
878  *
879  * This function reserves memory area given in "crashkernel=" kernel command
880  * line parameter. The memory reserved is used by a dump capture kernel when
881  * primary kernel is crashing.
882  */
883 static void __init reserve_crashkernel(void)
884 {
885 	unsigned long long crash_size, crash_base;
886 	unsigned long long total_mem;
887 	int ret;
888 
889 	total_mem = get_total_mem();
890 	ret = parse_crashkernel(boot_command_line, total_mem,
891 				&crash_size, &crash_base);
892 	if (ret)
893 		return;
894 
895 	ret = memblock_reserve(crash_base, crash_size);
896 	if (ret < 0) {
897 		pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
898 			(unsigned long)crash_base);
899 		return;
900 	}
901 
902 	pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
903 		(unsigned long)(crash_size >> 20),
904 		(unsigned long)(crash_base >> 20),
905 		(unsigned long)(total_mem >> 20));
906 
907 	crashk_res.start = crash_base;
908 	crashk_res.end = crash_base + crash_size - 1;
909 	insert_resource(&iomem_resource, &crashk_res);
910 }
911 #else
912 static inline void reserve_crashkernel(void) {}
913 #endif /* CONFIG_KEXEC */
914 
915 void __init hyp_mode_check(void)
916 {
917 #ifdef CONFIG_ARM_VIRT_EXT
918 	sync_boot_mode();
919 
920 	if (is_hyp_mode_available()) {
921 		pr_info("CPU: All CPU(s) started in HYP mode.\n");
922 		pr_info("CPU: Virtualization extensions available.\n");
923 	} else if (is_hyp_mode_mismatched()) {
924 		pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
925 			__boot_cpu_mode & MODE_MASK);
926 		pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
927 	} else
928 		pr_info("CPU: All CPU(s) started in SVC mode.\n");
929 #endif
930 }
931 
932 void __init setup_arch(char **cmdline_p)
933 {
934 	const struct machine_desc *mdesc;
935 
936 	setup_processor();
937 	mdesc = setup_machine_fdt(__atags_pointer);
938 	if (!mdesc)
939 		mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
940 	machine_desc = mdesc;
941 	machine_name = mdesc->name;
942 	dump_stack_set_arch_desc("%s", mdesc->name);
943 
944 	if (mdesc->reboot_mode != REBOOT_HARD)
945 		reboot_mode = mdesc->reboot_mode;
946 
947 	init_mm.start_code = (unsigned long) _text;
948 	init_mm.end_code   = (unsigned long) _etext;
949 	init_mm.end_data   = (unsigned long) _edata;
950 	init_mm.brk	   = (unsigned long) _end;
951 
952 	/* populate cmd_line too for later use, preserving boot_command_line */
953 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
954 	*cmdline_p = cmd_line;
955 
956 	parse_early_param();
957 
958 #ifdef CONFIG_MMU
959 	early_paging_init(mdesc);
960 #endif
961 	setup_dma_zone(mdesc);
962 	sanity_check_meminfo();
963 	arm_memblock_init(mdesc);
964 
965 	paging_init(mdesc);
966 	request_standard_resources(mdesc);
967 
968 	if (mdesc->restart)
969 		arm_pm_restart = mdesc->restart;
970 
971 	unflatten_device_tree();
972 
973 	arm_dt_init_cpu_maps();
974 	psci_init();
975 #ifdef CONFIG_SMP
976 	if (is_smp()) {
977 		if (!mdesc->smp_init || !mdesc->smp_init()) {
978 			if (psci_smp_available())
979 				smp_set_ops(&psci_smp_ops);
980 			else if (mdesc->smp)
981 				smp_set_ops(mdesc->smp);
982 		}
983 		smp_init_cpus();
984 		smp_build_mpidr_hash();
985 	}
986 #endif
987 
988 	if (!is_smp())
989 		hyp_mode_check();
990 
991 	reserve_crashkernel();
992 
993 #ifdef CONFIG_MULTI_IRQ_HANDLER
994 	handle_arch_irq = mdesc->handle_irq;
995 #endif
996 
997 #ifdef CONFIG_VT
998 #if defined(CONFIG_VGA_CONSOLE)
999 	conswitchp = &vga_con;
1000 #elif defined(CONFIG_DUMMY_CONSOLE)
1001 	conswitchp = &dummy_con;
1002 #endif
1003 #endif
1004 
1005 	if (mdesc->init_early)
1006 		mdesc->init_early();
1007 }
1008 
1009 
1010 static int __init topology_init(void)
1011 {
1012 	int cpu;
1013 
1014 	for_each_possible_cpu(cpu) {
1015 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1016 		cpuinfo->cpu.hotpluggable = 1;
1017 		register_cpu(&cpuinfo->cpu, cpu);
1018 	}
1019 
1020 	return 0;
1021 }
1022 subsys_initcall(topology_init);
1023 
1024 #ifdef CONFIG_HAVE_PROC_CPU
1025 static int __init proc_cpu_init(void)
1026 {
1027 	struct proc_dir_entry *res;
1028 
1029 	res = proc_mkdir("cpu", NULL);
1030 	if (!res)
1031 		return -ENOMEM;
1032 	return 0;
1033 }
1034 fs_initcall(proc_cpu_init);
1035 #endif
1036 
1037 static const char *hwcap_str[] = {
1038 	"swp",
1039 	"half",
1040 	"thumb",
1041 	"26bit",
1042 	"fastmult",
1043 	"fpa",
1044 	"vfp",
1045 	"edsp",
1046 	"java",
1047 	"iwmmxt",
1048 	"crunch",
1049 	"thumbee",
1050 	"neon",
1051 	"vfpv3",
1052 	"vfpv3d16",
1053 	"tls",
1054 	"vfpv4",
1055 	"idiva",
1056 	"idivt",
1057 	"vfpd32",
1058 	"lpae",
1059 	"evtstrm",
1060 	NULL
1061 };
1062 
1063 static const char *hwcap2_str[] = {
1064 	"aes",
1065 	"pmull",
1066 	"sha1",
1067 	"sha2",
1068 	"crc32",
1069 	NULL
1070 };
1071 
1072 static int c_show(struct seq_file *m, void *v)
1073 {
1074 	int i, j;
1075 	u32 cpuid;
1076 
1077 	for_each_online_cpu(i) {
1078 		/*
1079 		 * glibc reads /proc/cpuinfo to determine the number of
1080 		 * online processors, looking for lines beginning with
1081 		 * "processor".  Give glibc what it expects.
1082 		 */
1083 		seq_printf(m, "processor\t: %d\n", i);
1084 		cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1085 		seq_printf(m, "model name\t: %s rev %d (%s)\n",
1086 			   cpu_name, cpuid & 15, elf_platform);
1087 
1088 #if defined(CONFIG_SMP)
1089 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1090 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1091 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1092 #else
1093 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1094 			   loops_per_jiffy / (500000/HZ),
1095 			   (loops_per_jiffy / (5000/HZ)) % 100);
1096 #endif
1097 		/* dump out the processor features */
1098 		seq_puts(m, "Features\t: ");
1099 
1100 		for (j = 0; hwcap_str[j]; j++)
1101 			if (elf_hwcap & (1 << j))
1102 				seq_printf(m, "%s ", hwcap_str[j]);
1103 
1104 		for (j = 0; hwcap2_str[j]; j++)
1105 			if (elf_hwcap2 & (1 << j))
1106 				seq_printf(m, "%s ", hwcap2_str[j]);
1107 
1108 		seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1109 		seq_printf(m, "CPU architecture: %s\n",
1110 			   proc_arch[cpu_architecture()]);
1111 
1112 		if ((cpuid & 0x0008f000) == 0x00000000) {
1113 			/* pre-ARM7 */
1114 			seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1115 		} else {
1116 			if ((cpuid & 0x0008f000) == 0x00007000) {
1117 				/* ARM7 */
1118 				seq_printf(m, "CPU variant\t: 0x%02x\n",
1119 					   (cpuid >> 16) & 127);
1120 			} else {
1121 				/* post-ARM7 */
1122 				seq_printf(m, "CPU variant\t: 0x%x\n",
1123 					   (cpuid >> 20) & 15);
1124 			}
1125 			seq_printf(m, "CPU part\t: 0x%03x\n",
1126 				   (cpuid >> 4) & 0xfff);
1127 		}
1128 		seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1129 	}
1130 
1131 	seq_printf(m, "Hardware\t: %s\n", machine_name);
1132 	seq_printf(m, "Revision\t: %04x\n", system_rev);
1133 	seq_printf(m, "Serial\t\t: %s\n", system_serial);
1134 
1135 	return 0;
1136 }
1137 
1138 static void *c_start(struct seq_file *m, loff_t *pos)
1139 {
1140 	return *pos < 1 ? (void *)1 : NULL;
1141 }
1142 
1143 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1144 {
1145 	++*pos;
1146 	return NULL;
1147 }
1148 
1149 static void c_stop(struct seq_file *m, void *v)
1150 {
1151 }
1152 
1153 const struct seq_operations cpuinfo_op = {
1154 	.start	= c_start,
1155 	.next	= c_next,
1156 	.stop	= c_stop,
1157 	.show	= c_show
1158 };
1159