xref: /linux/arch/arm/kernel/setup.c (revision 1636f57c7841101af8bd4872aafb79cfc74bf389)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/arch/arm/kernel/setup.c
4  *
5  *  Copyright (C) 1995-2001 Russell King
6  */
7 #include <linux/efi.h>
8 #include <linux/export.h>
9 #include <linux/kernel.h>
10 #include <linux/stddef.h>
11 #include <linux/ioport.h>
12 #include <linux/delay.h>
13 #include <linux/utsname.h>
14 #include <linux/initrd.h>
15 #include <linux/console.h>
16 #include <linux/seq_file.h>
17 #include <linux/screen_info.h>
18 #include <linux/init.h>
19 #include <linux/kexec.h>
20 #include <linux/libfdt.h>
21 #include <linux/of.h>
22 #include <linux/of_fdt.h>
23 #include <linux/cpu.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/proc_fs.h>
27 #include <linux/memblock.h>
28 #include <linux/bug.h>
29 #include <linux/compiler.h>
30 #include <linux/sort.h>
31 #include <linux/psci.h>
32 
33 #include <asm/unified.h>
34 #include <asm/cp15.h>
35 #include <asm/cpu.h>
36 #include <asm/cputype.h>
37 #include <asm/efi.h>
38 #include <asm/elf.h>
39 #include <asm/early_ioremap.h>
40 #include <asm/fixmap.h>
41 #include <asm/procinfo.h>
42 #include <asm/psci.h>
43 #include <asm/sections.h>
44 #include <asm/setup.h>
45 #include <asm/smp_plat.h>
46 #include <asm/mach-types.h>
47 #include <asm/cacheflush.h>
48 #include <asm/cachetype.h>
49 #include <asm/tlbflush.h>
50 #include <asm/xen/hypervisor.h>
51 
52 #include <asm/prom.h>
53 #include <asm/mach/arch.h>
54 #include <asm/mach/irq.h>
55 #include <asm/mach/time.h>
56 #include <asm/system_info.h>
57 #include <asm/system_misc.h>
58 #include <asm/traps.h>
59 #include <asm/unwind.h>
60 #include <asm/memblock.h>
61 #include <asm/virt.h>
62 #include <asm/kasan.h>
63 
64 #include "atags.h"
65 
66 
67 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
68 char fpe_type[8];
69 
fpe_setup(char * line)70 static int __init fpe_setup(char *line)
71 {
72 	memcpy(fpe_type, line, 8);
73 	return 1;
74 }
75 
76 __setup("fpe=", fpe_setup);
77 #endif
78 
79 unsigned int processor_id;
80 EXPORT_SYMBOL(processor_id);
81 unsigned int __machine_arch_type __read_mostly;
82 EXPORT_SYMBOL(__machine_arch_type);
83 unsigned int cacheid __read_mostly;
84 EXPORT_SYMBOL(cacheid);
85 
86 unsigned int __atags_pointer __initdata;
87 
88 unsigned int system_rev;
89 EXPORT_SYMBOL(system_rev);
90 
91 const char *system_serial;
92 EXPORT_SYMBOL(system_serial);
93 
94 unsigned int system_serial_low;
95 EXPORT_SYMBOL(system_serial_low);
96 
97 unsigned int system_serial_high;
98 EXPORT_SYMBOL(system_serial_high);
99 
100 unsigned int elf_hwcap __read_mostly;
101 EXPORT_SYMBOL(elf_hwcap);
102 
103 unsigned int elf_hwcap2 __read_mostly;
104 EXPORT_SYMBOL(elf_hwcap2);
105 
106 
107 #ifdef MULTI_CPU
108 struct processor processor __ro_after_init;
109 #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
110 struct processor *cpu_vtable[NR_CPUS] = {
111 	[0] = &processor,
112 };
113 #endif
114 #endif
115 #ifdef MULTI_TLB
116 struct cpu_tlb_fns cpu_tlb __ro_after_init;
117 #endif
118 #ifdef MULTI_USER
119 struct cpu_user_fns cpu_user __ro_after_init;
120 #endif
121 #ifdef MULTI_CACHE
122 struct cpu_cache_fns cpu_cache __ro_after_init;
123 #endif
124 #ifdef CONFIG_OUTER_CACHE
125 struct outer_cache_fns outer_cache __ro_after_init;
126 EXPORT_SYMBOL(outer_cache);
127 #endif
128 
129 /*
130  * Cached cpu_architecture() result for use by assembler code.
131  * C code should use the cpu_architecture() function instead of accessing this
132  * variable directly.
133  */
134 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
135 
136 struct stack {
137 	u32 irq[4];
138 	u32 abt[4];
139 	u32 und[4];
140 	u32 fiq[4];
141 } ____cacheline_aligned;
142 
143 #ifndef CONFIG_CPU_V7M
144 static struct stack stacks[NR_CPUS];
145 #endif
146 
147 char elf_platform[ELF_PLATFORM_SIZE];
148 EXPORT_SYMBOL(elf_platform);
149 
150 static const char *cpu_name;
151 static const char *machine_name;
152 static char __initdata cmd_line[COMMAND_LINE_SIZE];
153 const struct machine_desc *machine_desc __initdata;
154 
155 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
156 #define ENDIANNESS ((char)endian_test.l)
157 
158 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
159 
160 /*
161  * Standard memory resources
162  */
163 static struct resource mem_res[] = {
164 	{
165 		.name = "Video RAM",
166 		.start = 0,
167 		.end = 0,
168 		.flags = IORESOURCE_MEM
169 	},
170 	{
171 		.name = "Kernel code",
172 		.start = 0,
173 		.end = 0,
174 		.flags = IORESOURCE_SYSTEM_RAM
175 	},
176 	{
177 		.name = "Kernel data",
178 		.start = 0,
179 		.end = 0,
180 		.flags = IORESOURCE_SYSTEM_RAM
181 	}
182 };
183 
184 #define video_ram   mem_res[0]
185 #define kernel_code mem_res[1]
186 #define kernel_data mem_res[2]
187 
188 static struct resource io_res[] = {
189 	{
190 		.name = "reserved",
191 		.start = 0x3bc,
192 		.end = 0x3be,
193 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
194 	},
195 	{
196 		.name = "reserved",
197 		.start = 0x378,
198 		.end = 0x37f,
199 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
200 	},
201 	{
202 		.name = "reserved",
203 		.start = 0x278,
204 		.end = 0x27f,
205 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
206 	}
207 };
208 
209 #define lp0 io_res[0]
210 #define lp1 io_res[1]
211 #define lp2 io_res[2]
212 
213 static const char *proc_arch[] = {
214 	"undefined/unknown",
215 	"3",
216 	"4",
217 	"4T",
218 	"5",
219 	"5T",
220 	"5TE",
221 	"5TEJ",
222 	"6TEJ",
223 	"7",
224 	"7M",
225 	"?(12)",
226 	"?(13)",
227 	"?(14)",
228 	"?(15)",
229 	"?(16)",
230 	"?(17)",
231 };
232 
233 #ifdef CONFIG_CPU_V7M
__get_cpu_architecture(void)234 static int __get_cpu_architecture(void)
235 {
236 	return CPU_ARCH_ARMv7M;
237 }
238 #else
__get_cpu_architecture(void)239 static int __get_cpu_architecture(void)
240 {
241 	int cpu_arch;
242 
243 	if ((read_cpuid_id() & 0x0008f000) == 0) {
244 		cpu_arch = CPU_ARCH_UNKNOWN;
245 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
246 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
247 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
248 		cpu_arch = (read_cpuid_id() >> 16) & 7;
249 		if (cpu_arch)
250 			cpu_arch += CPU_ARCH_ARMv3;
251 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
252 		/* Revised CPUID format. Read the Memory Model Feature
253 		 * Register 0 and check for VMSAv7 or PMSAv7 */
254 		unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
255 		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
256 		    (mmfr0 & 0x000000f0) >= 0x00000030)
257 			cpu_arch = CPU_ARCH_ARMv7;
258 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
259 			 (mmfr0 & 0x000000f0) == 0x00000020)
260 			cpu_arch = CPU_ARCH_ARMv6;
261 		else
262 			cpu_arch = CPU_ARCH_UNKNOWN;
263 	} else
264 		cpu_arch = CPU_ARCH_UNKNOWN;
265 
266 	return cpu_arch;
267 }
268 #endif
269 
cpu_architecture(void)270 int __pure cpu_architecture(void)
271 {
272 	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
273 
274 	return __cpu_architecture;
275 }
276 
cpu_has_aliasing_icache(unsigned int arch)277 static int cpu_has_aliasing_icache(unsigned int arch)
278 {
279 	int aliasing_icache;
280 	unsigned int id_reg, num_sets, line_size;
281 
282 	/* PIPT caches never alias. */
283 	if (icache_is_pipt())
284 		return 0;
285 
286 	/* arch specifies the register format */
287 	switch (arch) {
288 	case CPU_ARCH_ARMv7:
289 		set_csselr(CSSELR_ICACHE | CSSELR_L1);
290 		isb();
291 		id_reg = read_ccsidr();
292 		line_size = 4 << ((id_reg & 0x7) + 2);
293 		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
294 		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
295 		break;
296 	case CPU_ARCH_ARMv6:
297 		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
298 		break;
299 	default:
300 		/* I-cache aliases will be handled by D-cache aliasing code */
301 		aliasing_icache = 0;
302 	}
303 
304 	return aliasing_icache;
305 }
306 
cacheid_init(void)307 static void __init cacheid_init(void)
308 {
309 	unsigned int arch = cpu_architecture();
310 
311 	if (arch >= CPU_ARCH_ARMv6) {
312 		unsigned int cachetype = read_cpuid_cachetype();
313 
314 		if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
315 			cacheid = 0;
316 		} else if ((cachetype & (7 << 29)) == 4 << 29) {
317 			/* ARMv7 register format */
318 			arch = CPU_ARCH_ARMv7;
319 			cacheid = CACHEID_VIPT_NONALIASING;
320 			switch (cachetype & (3 << 14)) {
321 			case (1 << 14):
322 				cacheid |= CACHEID_ASID_TAGGED;
323 				break;
324 			case (3 << 14):
325 				cacheid |= CACHEID_PIPT;
326 				break;
327 			}
328 		} else {
329 			arch = CPU_ARCH_ARMv6;
330 			if (cachetype & (1 << 23))
331 				cacheid = CACHEID_VIPT_ALIASING;
332 			else
333 				cacheid = CACHEID_VIPT_NONALIASING;
334 		}
335 		if (cpu_has_aliasing_icache(arch))
336 			cacheid |= CACHEID_VIPT_I_ALIASING;
337 	} else {
338 		cacheid = CACHEID_VIVT;
339 	}
340 
341 	pr_info("CPU: %s data cache, %s instruction cache\n",
342 		cache_is_vivt() ? "VIVT" :
343 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
344 		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
345 		cache_is_vivt() ? "VIVT" :
346 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
347 		icache_is_vipt_aliasing() ? "VIPT aliasing" :
348 		icache_is_pipt() ? "PIPT" :
349 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
350 }
351 
352 /*
353  * These functions re-use the assembly code in head.S, which
354  * already provide the required functionality.
355  */
356 extern struct proc_info_list *lookup_processor_type(unsigned int);
357 
early_print(const char * str,...)358 void __init early_print(const char *str, ...)
359 {
360 	extern void printascii(const char *);
361 	char buf[256];
362 	va_list ap;
363 
364 	va_start(ap, str);
365 	vsnprintf(buf, sizeof(buf), str, ap);
366 	va_end(ap);
367 
368 #ifdef CONFIG_DEBUG_LL
369 	printascii(buf);
370 #endif
371 	printk("%s", buf);
372 }
373 
374 #ifdef CONFIG_ARM_PATCH_IDIV
375 
sdiv_instruction(void)376 static inline u32 __attribute_const__ sdiv_instruction(void)
377 {
378 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
379 		/* "sdiv r0, r0, r1" */
380 		u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
381 		return __opcode_to_mem_thumb32(insn);
382 	}
383 
384 	/* "sdiv r0, r0, r1" */
385 	return __opcode_to_mem_arm(0xe710f110);
386 }
387 
udiv_instruction(void)388 static inline u32 __attribute_const__ udiv_instruction(void)
389 {
390 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
391 		/* "udiv r0, r0, r1" */
392 		u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
393 		return __opcode_to_mem_thumb32(insn);
394 	}
395 
396 	/* "udiv r0, r0, r1" */
397 	return __opcode_to_mem_arm(0xe730f110);
398 }
399 
bx_lr_instruction(void)400 static inline u32 __attribute_const__ bx_lr_instruction(void)
401 {
402 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
403 		/* "bx lr; nop" */
404 		u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
405 		return __opcode_to_mem_thumb32(insn);
406 	}
407 
408 	/* "bx lr" */
409 	return __opcode_to_mem_arm(0xe12fff1e);
410 }
411 
patch_aeabi_idiv(void)412 static void __init patch_aeabi_idiv(void)
413 {
414 	extern void __aeabi_uidiv(void);
415 	extern void __aeabi_idiv(void);
416 	uintptr_t fn_addr;
417 	unsigned int mask;
418 
419 	mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
420 	if (!(elf_hwcap & mask))
421 		return;
422 
423 	pr_info("CPU: div instructions available: patching division code\n");
424 
425 	fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
426 	asm ("" : "+g" (fn_addr));
427 	((u32 *)fn_addr)[0] = udiv_instruction();
428 	((u32 *)fn_addr)[1] = bx_lr_instruction();
429 	flush_icache_range(fn_addr, fn_addr + 8);
430 
431 	fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
432 	asm ("" : "+g" (fn_addr));
433 	((u32 *)fn_addr)[0] = sdiv_instruction();
434 	((u32 *)fn_addr)[1] = bx_lr_instruction();
435 	flush_icache_range(fn_addr, fn_addr + 8);
436 }
437 
438 #else
patch_aeabi_idiv(void)439 static inline void patch_aeabi_idiv(void) { }
440 #endif
441 
cpuid_init_hwcaps(void)442 static void __init cpuid_init_hwcaps(void)
443 {
444 	int block;
445 	u32 isar5;
446 	u32 isar6;
447 	u32 pfr2;
448 
449 	if (cpu_architecture() < CPU_ARCH_ARMv7)
450 		return;
451 
452 	block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
453 	if (block >= 2)
454 		elf_hwcap |= HWCAP_IDIVA;
455 	if (block >= 1)
456 		elf_hwcap |= HWCAP_IDIVT;
457 
458 	/* LPAE implies atomic ldrd/strd instructions */
459 	block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
460 	if (block >= 5)
461 		elf_hwcap |= HWCAP_LPAE;
462 
463 	/* check for supported v8 Crypto instructions */
464 	isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
465 
466 	block = cpuid_feature_extract_field(isar5, 4);
467 	if (block >= 2)
468 		elf_hwcap2 |= HWCAP2_PMULL;
469 	if (block >= 1)
470 		elf_hwcap2 |= HWCAP2_AES;
471 
472 	block = cpuid_feature_extract_field(isar5, 8);
473 	if (block >= 1)
474 		elf_hwcap2 |= HWCAP2_SHA1;
475 
476 	block = cpuid_feature_extract_field(isar5, 12);
477 	if (block >= 1)
478 		elf_hwcap2 |= HWCAP2_SHA2;
479 
480 	block = cpuid_feature_extract_field(isar5, 16);
481 	if (block >= 1)
482 		elf_hwcap2 |= HWCAP2_CRC32;
483 
484 	/* Check for Speculation barrier instruction */
485 	isar6 = read_cpuid_ext(CPUID_EXT_ISAR6);
486 	block = cpuid_feature_extract_field(isar6, 12);
487 	if (block >= 1)
488 		elf_hwcap2 |= HWCAP2_SB;
489 
490 	/* Check for Speculative Store Bypassing control */
491 	pfr2 = read_cpuid_ext(CPUID_EXT_PFR2);
492 	block = cpuid_feature_extract_field(pfr2, 4);
493 	if (block >= 1)
494 		elf_hwcap2 |= HWCAP2_SSBS;
495 }
496 
elf_hwcap_fixup(void)497 static void __init elf_hwcap_fixup(void)
498 {
499 	unsigned id = read_cpuid_id();
500 
501 	/*
502 	 * HWCAP_TLS is available only on 1136 r1p0 and later,
503 	 * see also kuser_get_tls_init.
504 	 */
505 	if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
506 	    ((id >> 20) & 3) == 0) {
507 		elf_hwcap &= ~HWCAP_TLS;
508 		return;
509 	}
510 
511 	/* Verify if CPUID scheme is implemented */
512 	if ((id & 0x000f0000) != 0x000f0000)
513 		return;
514 
515 	/*
516 	 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
517 	 * avoid advertising SWP; it may not be atomic with
518 	 * multiprocessing cores.
519 	 */
520 	if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
521 	    (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
522 	     cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
523 		elf_hwcap &= ~HWCAP_SWP;
524 }
525 
526 /*
527  * cpu_init - initialise one CPU.
528  *
529  * cpu_init sets up the per-CPU stacks.
530  */
cpu_init(void)531 void notrace cpu_init(void)
532 {
533 #ifndef CONFIG_CPU_V7M
534 	unsigned int cpu = smp_processor_id();
535 	struct stack *stk = &stacks[cpu];
536 
537 	if (cpu >= NR_CPUS) {
538 		pr_crit("CPU%u: bad primary CPU number\n", cpu);
539 		BUG();
540 	}
541 
542 	/*
543 	 * This only works on resume and secondary cores. For booting on the
544 	 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
545 	 */
546 	set_my_cpu_offset(per_cpu_offset(cpu));
547 
548 	cpu_proc_init();
549 
550 	/*
551 	 * Define the placement constraint for the inline asm directive below.
552 	 * In Thumb-2, msr with an immediate value is not allowed.
553 	 */
554 #ifdef CONFIG_THUMB2_KERNEL
555 #define PLC_l	"l"
556 #define PLC_r	"r"
557 #else
558 #define PLC_l	"I"
559 #define PLC_r	"I"
560 #endif
561 
562 	/*
563 	 * setup stacks for re-entrant exception handlers
564 	 */
565 	__asm__ (
566 	"msr	cpsr_c, %1\n\t"
567 	"add	r14, %0, %2\n\t"
568 	"mov	sp, r14\n\t"
569 	"msr	cpsr_c, %3\n\t"
570 	"add	r14, %0, %4\n\t"
571 	"mov	sp, r14\n\t"
572 	"msr	cpsr_c, %5\n\t"
573 	"add	r14, %0, %6\n\t"
574 	"mov	sp, r14\n\t"
575 	"msr	cpsr_c, %7\n\t"
576 	"add	r14, %0, %8\n\t"
577 	"mov	sp, r14\n\t"
578 	"msr	cpsr_c, %9"
579 	    :
580 	    : "r" (stk),
581 	      PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
582 	      "I" (offsetof(struct stack, irq[0])),
583 	      PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
584 	      "I" (offsetof(struct stack, abt[0])),
585 	      PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
586 	      "I" (offsetof(struct stack, und[0])),
587 	      PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
588 	      "I" (offsetof(struct stack, fiq[0])),
589 	      PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
590 	    : "r14");
591 #endif
592 }
593 
594 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
595 
smp_setup_processor_id(void)596 void __init smp_setup_processor_id(void)
597 {
598 	int i;
599 	u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
600 	u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
601 
602 	cpu_logical_map(0) = cpu;
603 	for (i = 1; i < nr_cpu_ids; ++i)
604 		cpu_logical_map(i) = i == cpu ? 0 : i;
605 
606 	/*
607 	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
608 	 * using percpu variable early, for example, lockdep will
609 	 * access percpu variable inside lock_release
610 	 */
611 	set_my_cpu_offset(0);
612 
613 	pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
614 }
615 
616 struct mpidr_hash mpidr_hash;
617 #ifdef CONFIG_SMP
618 /**
619  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
620  *			  level in order to build a linear index from an
621  *			  MPIDR value. Resulting algorithm is a collision
622  *			  free hash carried out through shifting and ORing
623  */
smp_build_mpidr_hash(void)624 static void __init smp_build_mpidr_hash(void)
625 {
626 	u32 i, affinity;
627 	u32 fs[3], bits[3], ls, mask = 0;
628 	/*
629 	 * Pre-scan the list of MPIDRS and filter out bits that do
630 	 * not contribute to affinity levels, ie they never toggle.
631 	 */
632 	for_each_possible_cpu(i)
633 		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
634 	pr_debug("mask of set bits 0x%x\n", mask);
635 	/*
636 	 * Find and stash the last and first bit set at all affinity levels to
637 	 * check how many bits are required to represent them.
638 	 */
639 	for (i = 0; i < 3; i++) {
640 		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
641 		/*
642 		 * Find the MSB bit and LSB bits position
643 		 * to determine how many bits are required
644 		 * to express the affinity level.
645 		 */
646 		ls = fls(affinity);
647 		fs[i] = affinity ? ffs(affinity) - 1 : 0;
648 		bits[i] = ls - fs[i];
649 	}
650 	/*
651 	 * An index can be created from the MPIDR by isolating the
652 	 * significant bits at each affinity level and by shifting
653 	 * them in order to compress the 24 bits values space to a
654 	 * compressed set of values. This is equivalent to hashing
655 	 * the MPIDR through shifting and ORing. It is a collision free
656 	 * hash though not minimal since some levels might contain a number
657 	 * of CPUs that is not an exact power of 2 and their bit
658 	 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
659 	 */
660 	mpidr_hash.shift_aff[0] = fs[0];
661 	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
662 	mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
663 						(bits[1] + bits[0]);
664 	mpidr_hash.mask = mask;
665 	mpidr_hash.bits = bits[2] + bits[1] + bits[0];
666 	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
667 				mpidr_hash.shift_aff[0],
668 				mpidr_hash.shift_aff[1],
669 				mpidr_hash.shift_aff[2],
670 				mpidr_hash.mask,
671 				mpidr_hash.bits);
672 	/*
673 	 * 4x is an arbitrary value used to warn on a hash table much bigger
674 	 * than expected on most systems.
675 	 */
676 	if (mpidr_hash_size() > 4 * num_possible_cpus())
677 		pr_warn("Large number of MPIDR hash buckets detected\n");
678 	sync_cache_w(&mpidr_hash);
679 }
680 #endif
681 
682 /*
683  * locate processor in the list of supported processor types.  The linker
684  * builds this table for us from the entries in arch/arm/mm/proc-*.S
685  */
lookup_processor(u32 midr)686 struct proc_info_list *lookup_processor(u32 midr)
687 {
688 	struct proc_info_list *list = lookup_processor_type(midr);
689 
690 	if (!list) {
691 		pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
692 		       smp_processor_id(), midr);
693 		while (1)
694 		/* can't use cpu_relax() here as it may require MMU setup */;
695 	}
696 
697 	return list;
698 }
699 
setup_processor(void)700 static void __init setup_processor(void)
701 {
702 	unsigned int midr = read_cpuid_id();
703 	struct proc_info_list *list = lookup_processor(midr);
704 
705 	cpu_name = list->cpu_name;
706 	__cpu_architecture = __get_cpu_architecture();
707 
708 	init_proc_vtable(list->proc);
709 #ifdef MULTI_TLB
710 	cpu_tlb = *list->tlb;
711 #endif
712 #ifdef MULTI_USER
713 	cpu_user = *list->user;
714 #endif
715 #ifdef MULTI_CACHE
716 	cpu_cache = *list->cache;
717 #endif
718 
719 	pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
720 		list->cpu_name, midr, midr & 15,
721 		proc_arch[cpu_architecture()], get_cr());
722 
723 	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
724 		 list->arch_name, ENDIANNESS);
725 	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
726 		 list->elf_name, ENDIANNESS);
727 	elf_hwcap = list->elf_hwcap;
728 
729 	cpuid_init_hwcaps();
730 	patch_aeabi_idiv();
731 
732 #ifndef CONFIG_ARM_THUMB
733 	elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
734 #endif
735 #ifdef CONFIG_MMU
736 	init_default_cache_policy(list->__cpu_mm_mmu_flags);
737 #endif
738 	erratum_a15_798181_init();
739 
740 	elf_hwcap_fixup();
741 
742 	cacheid_init();
743 	cpu_init();
744 }
745 
dump_machine_table(void)746 void __init dump_machine_table(void)
747 {
748 	const struct machine_desc *p;
749 
750 	early_print("Available machine support:\n\nID (hex)\tNAME\n");
751 	for_each_machine_desc(p)
752 		early_print("%08x\t%s\n", p->nr, p->name);
753 
754 	early_print("\nPlease check your kernel config and/or bootloader.\n");
755 
756 	while (true)
757 		/* can't use cpu_relax() here as it may require MMU setup */;
758 }
759 
arm_add_memory(u64 start,u64 size)760 int __init arm_add_memory(u64 start, u64 size)
761 {
762 	u64 aligned_start;
763 
764 	/*
765 	 * Ensure that start/size are aligned to a page boundary.
766 	 * Size is rounded down, start is rounded up.
767 	 */
768 	aligned_start = PAGE_ALIGN(start);
769 	if (aligned_start > start + size)
770 		size = 0;
771 	else
772 		size -= aligned_start - start;
773 
774 #ifndef CONFIG_PHYS_ADDR_T_64BIT
775 	if (aligned_start > ULONG_MAX) {
776 		pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
777 			start);
778 		return -EINVAL;
779 	}
780 
781 	if (aligned_start + size > ULONG_MAX) {
782 		pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
783 			(long long)start);
784 		/*
785 		 * To ensure bank->start + bank->size is representable in
786 		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
787 		 * This means we lose a page after masking.
788 		 */
789 		size = ULONG_MAX - aligned_start;
790 	}
791 #endif
792 
793 	if (aligned_start < PHYS_OFFSET) {
794 		if (aligned_start + size <= PHYS_OFFSET) {
795 			pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
796 				aligned_start, aligned_start + size);
797 			return -EINVAL;
798 		}
799 
800 		pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
801 			aligned_start, (u64)PHYS_OFFSET);
802 
803 		size -= PHYS_OFFSET - aligned_start;
804 		aligned_start = PHYS_OFFSET;
805 	}
806 
807 	start = aligned_start;
808 	size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
809 
810 	/*
811 	 * Check whether this memory region has non-zero size or
812 	 * invalid node number.
813 	 */
814 	if (size == 0)
815 		return -EINVAL;
816 
817 	memblock_add(start, size);
818 	return 0;
819 }
820 
821 /*
822  * Pick out the memory size.  We look for mem=size@start,
823  * where start and size are "size[KkMm]"
824  */
825 
early_mem(char * p)826 static int __init early_mem(char *p)
827 {
828 	static int usermem __initdata = 0;
829 	u64 size;
830 	u64 start;
831 	char *endp;
832 
833 	/*
834 	 * If the user specifies memory size, we
835 	 * blow away any automatically generated
836 	 * size.
837 	 */
838 	if (usermem == 0) {
839 		usermem = 1;
840 		memblock_remove(memblock_start_of_DRAM(),
841 			memblock_end_of_DRAM() - memblock_start_of_DRAM());
842 	}
843 
844 	start = PHYS_OFFSET;
845 	size  = memparse(p, &endp);
846 	if (*endp == '@')
847 		start = memparse(endp + 1, NULL);
848 
849 	arm_add_memory(start, size);
850 
851 	return 0;
852 }
853 early_param("mem", early_mem);
854 
request_standard_resources(const struct machine_desc * mdesc)855 static void __init request_standard_resources(const struct machine_desc *mdesc)
856 {
857 	phys_addr_t start, end, res_end;
858 	struct resource *res;
859 	u64 i;
860 
861 	kernel_code.start   = virt_to_phys(_text);
862 	kernel_code.end     = virt_to_phys(__init_begin - 1);
863 	kernel_data.start   = virt_to_phys(_sdata);
864 	kernel_data.end     = virt_to_phys(_end - 1);
865 
866 	for_each_mem_range(i, &start, &end) {
867 		unsigned long boot_alias_start;
868 
869 		/*
870 		 * In memblock, end points to the first byte after the
871 		 * range while in resourses, end points to the last byte in
872 		 * the range.
873 		 */
874 		res_end = end - 1;
875 
876 		/*
877 		 * Some systems have a special memory alias which is only
878 		 * used for booting.  We need to advertise this region to
879 		 * kexec-tools so they know where bootable RAM is located.
880 		 */
881 		boot_alias_start = phys_to_idmap(start);
882 		if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
883 			res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
884 			if (!res)
885 				panic("%s: Failed to allocate %zu bytes\n",
886 				      __func__, sizeof(*res));
887 			res->name = "System RAM (boot alias)";
888 			res->start = boot_alias_start;
889 			res->end = phys_to_idmap(res_end);
890 			res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
891 			request_resource(&iomem_resource, res);
892 		}
893 
894 		res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
895 		if (!res)
896 			panic("%s: Failed to allocate %zu bytes\n", __func__,
897 			      sizeof(*res));
898 		res->name  = "System RAM";
899 		res->start = start;
900 		res->end = res_end;
901 		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
902 
903 		request_resource(&iomem_resource, res);
904 
905 		if (kernel_code.start >= res->start &&
906 		    kernel_code.end <= res->end)
907 			request_resource(res, &kernel_code);
908 		if (kernel_data.start >= res->start &&
909 		    kernel_data.end <= res->end)
910 			request_resource(res, &kernel_data);
911 	}
912 
913 	if (mdesc->video_start) {
914 		video_ram.start = mdesc->video_start;
915 		video_ram.end   = mdesc->video_end;
916 		request_resource(&iomem_resource, &video_ram);
917 	}
918 
919 	/*
920 	 * Some machines don't have the possibility of ever
921 	 * possessing lp0, lp1 or lp2
922 	 */
923 	if (mdesc->reserve_lp0)
924 		request_resource(&ioport_resource, &lp0);
925 	if (mdesc->reserve_lp1)
926 		request_resource(&ioport_resource, &lp1);
927 	if (mdesc->reserve_lp2)
928 		request_resource(&ioport_resource, &lp2);
929 }
930 
931 #if defined(CONFIG_VGA_CONSOLE)
932 struct screen_info vgacon_screen_info = {
933  .orig_video_lines	= 30,
934  .orig_video_cols	= 80,
935  .orig_video_mode	= 0,
936  .orig_video_ega_bx	= 0,
937  .orig_video_isVGA	= 1,
938  .orig_video_points	= 8
939 };
940 #endif
941 
customize_machine(void)942 static int __init customize_machine(void)
943 {
944 	/*
945 	 * customizes platform devices, or adds new ones
946 	 * On DT based machines, we fall back to populating the
947 	 * machine from the device tree, if no callback is provided,
948 	 * otherwise we would always need an init_machine callback.
949 	 */
950 	if (machine_desc->init_machine)
951 		machine_desc->init_machine();
952 
953 	return 0;
954 }
955 arch_initcall(customize_machine);
956 
init_machine_late(void)957 static int __init init_machine_late(void)
958 {
959 	struct device_node *root;
960 	int ret;
961 
962 	if (machine_desc->init_late)
963 		machine_desc->init_late();
964 
965 	root = of_find_node_by_path("/");
966 	if (root) {
967 		ret = of_property_read_string(root, "serial-number",
968 					      &system_serial);
969 		if (ret)
970 			system_serial = NULL;
971 	}
972 
973 	if (!system_serial)
974 		system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
975 					  system_serial_high,
976 					  system_serial_low);
977 
978 	return 0;
979 }
980 late_initcall(init_machine_late);
981 
982 #ifdef CONFIG_CRASH_RESERVE
983 /*
984  * The crash region must be aligned to 128MB to avoid
985  * zImage relocating below the reserved region.
986  */
987 #define CRASH_ALIGN	(128 << 20)
988 
get_total_mem(void)989 static inline unsigned long long get_total_mem(void)
990 {
991 	unsigned long total;
992 
993 	total = max_low_pfn - min_low_pfn;
994 	return total << PAGE_SHIFT;
995 }
996 
997 /**
998  * reserve_crashkernel() - reserves memory are for crash kernel
999  *
1000  * This function reserves memory area given in "crashkernel=" kernel command
1001  * line parameter. The memory reserved is used by a dump capture kernel when
1002  * primary kernel is crashing.
1003  */
reserve_crashkernel(void)1004 static void __init reserve_crashkernel(void)
1005 {
1006 	unsigned long long crash_size, crash_base;
1007 	unsigned long long total_mem;
1008 	int ret;
1009 
1010 	total_mem = get_total_mem();
1011 	ret = parse_crashkernel(boot_command_line, total_mem,
1012 				&crash_size, &crash_base,
1013 				NULL, NULL);
1014 	/* invalid value specified or crashkernel=0 */
1015 	if (ret || !crash_size)
1016 		return;
1017 
1018 	if (crash_base <= 0) {
1019 		unsigned long long crash_max = idmap_to_phys((u32)~0);
1020 		unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
1021 		if (crash_max > lowmem_max)
1022 			crash_max = lowmem_max;
1023 
1024 		crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
1025 						       CRASH_ALIGN, crash_max);
1026 		if (!crash_base) {
1027 			pr_err("crashkernel reservation failed - No suitable area found.\n");
1028 			return;
1029 		}
1030 	} else {
1031 		unsigned long long crash_max = crash_base + crash_size;
1032 		unsigned long long start;
1033 
1034 		start = memblock_phys_alloc_range(crash_size, SECTION_SIZE,
1035 						  crash_base, crash_max);
1036 		if (!start) {
1037 			pr_err("crashkernel reservation failed - memory is in use.\n");
1038 			return;
1039 		}
1040 	}
1041 
1042 	pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1043 		(unsigned long)(crash_size >> 20),
1044 		(unsigned long)(crash_base >> 20),
1045 		(unsigned long)(total_mem >> 20));
1046 
1047 	/* The crashk resource must always be located in normal mem */
1048 	crashk_res.start = crash_base;
1049 	crashk_res.end = crash_base + crash_size - 1;
1050 	insert_resource(&iomem_resource, &crashk_res);
1051 
1052 	if (arm_has_idmap_alias()) {
1053 		/*
1054 		 * If we have a special RAM alias for use at boot, we
1055 		 * need to advertise to kexec tools where the alias is.
1056 		 */
1057 		static struct resource crashk_boot_res = {
1058 			.name = "Crash kernel (boot alias)",
1059 			.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1060 		};
1061 
1062 		crashk_boot_res.start = phys_to_idmap(crash_base);
1063 		crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1064 		insert_resource(&iomem_resource, &crashk_boot_res);
1065 	}
1066 }
1067 #else
reserve_crashkernel(void)1068 static inline void reserve_crashkernel(void) {}
1069 #endif /* CONFIG_CRASH_RESERVE*/
1070 
hyp_mode_check(void)1071 void __init hyp_mode_check(void)
1072 {
1073 #ifdef CONFIG_ARM_VIRT_EXT
1074 	sync_boot_mode();
1075 
1076 	if (is_hyp_mode_available()) {
1077 		pr_info("CPU: All CPU(s) started in HYP mode.\n");
1078 		pr_info("CPU: Virtualization extensions available.\n");
1079 	} else if (is_hyp_mode_mismatched()) {
1080 		pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1081 			__boot_cpu_mode & MODE_MASK);
1082 		pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1083 	} else
1084 		pr_info("CPU: All CPU(s) started in SVC mode.\n");
1085 #endif
1086 }
1087 
1088 static void (*__arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
1089 
arm_restart(struct notifier_block * nb,unsigned long action,void * data)1090 static int arm_restart(struct notifier_block *nb, unsigned long action,
1091 		       void *data)
1092 {
1093 	__arm_pm_restart(action, data);
1094 	return NOTIFY_DONE;
1095 }
1096 
1097 static struct notifier_block arm_restart_nb = {
1098 	.notifier_call = arm_restart,
1099 	.priority = 128,
1100 };
1101 
setup_arch(char ** cmdline_p)1102 void __init setup_arch(char **cmdline_p)
1103 {
1104 	const struct machine_desc *mdesc = NULL;
1105 	void *atags_vaddr = NULL;
1106 
1107 	if (__atags_pointer)
1108 		atags_vaddr = FDT_VIRT_BASE(__atags_pointer);
1109 
1110 	setup_processor();
1111 	if (atags_vaddr) {
1112 		mdesc = setup_machine_fdt(atags_vaddr);
1113 		if (mdesc)
1114 			memblock_reserve(__atags_pointer,
1115 					 fdt_totalsize(atags_vaddr));
1116 	}
1117 	if (!mdesc)
1118 		mdesc = setup_machine_tags(atags_vaddr, __machine_arch_type);
1119 	if (!mdesc) {
1120 		early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
1121 		early_print("  r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
1122 			    __atags_pointer);
1123 		if (__atags_pointer)
1124 			early_print("  r2[]=%*ph\n", 16, atags_vaddr);
1125 		dump_machine_table();
1126 	}
1127 
1128 	machine_desc = mdesc;
1129 	machine_name = mdesc->name;
1130 	dump_stack_set_arch_desc("%s", mdesc->name);
1131 
1132 	if (mdesc->reboot_mode != REBOOT_HARD)
1133 		reboot_mode = mdesc->reboot_mode;
1134 
1135 	setup_initial_init_mm(_text, _etext, _edata, _end);
1136 
1137 	/* populate cmd_line too for later use, preserving boot_command_line */
1138 	strscpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1139 	*cmdline_p = cmd_line;
1140 
1141 	early_fixmap_init();
1142 	early_ioremap_init();
1143 
1144 	parse_early_param();
1145 
1146 #ifdef CONFIG_MMU
1147 	early_mm_init(mdesc);
1148 #endif
1149 	setup_dma_zone(mdesc);
1150 	xen_early_init();
1151 	arm_efi_init();
1152 	/*
1153 	 * Make sure the calculation for lowmem/highmem is set appropriately
1154 	 * before reserving/allocating any memory
1155 	 */
1156 	adjust_lowmem_bounds();
1157 	arm_memblock_init(mdesc);
1158 	/* Memory may have been removed so recalculate the bounds. */
1159 	adjust_lowmem_bounds();
1160 
1161 	early_ioremap_reset();
1162 
1163 	paging_init(mdesc);
1164 	kasan_init();
1165 	request_standard_resources(mdesc);
1166 
1167 	if (mdesc->restart) {
1168 		__arm_pm_restart = mdesc->restart;
1169 		register_restart_handler(&arm_restart_nb);
1170 	}
1171 
1172 	unflatten_device_tree();
1173 
1174 	arm_dt_init_cpu_maps();
1175 	psci_dt_init();
1176 #ifdef CONFIG_SMP
1177 	if (is_smp()) {
1178 		if (!mdesc->smp_init || !mdesc->smp_init()) {
1179 			if (psci_smp_available())
1180 				smp_set_ops(&psci_smp_ops);
1181 			else if (mdesc->smp)
1182 				smp_set_ops(mdesc->smp);
1183 		}
1184 		smp_init_cpus();
1185 		smp_build_mpidr_hash();
1186 	}
1187 #endif
1188 
1189 	if (!is_smp())
1190 		hyp_mode_check();
1191 
1192 	reserve_crashkernel();
1193 
1194 #ifdef CONFIG_VT
1195 #if defined(CONFIG_VGA_CONSOLE)
1196 	vgacon_register_screen(&vgacon_screen_info);
1197 #endif
1198 #endif
1199 
1200 	if (mdesc->init_early)
1201 		mdesc->init_early();
1202 }
1203 
arch_cpu_is_hotpluggable(int num)1204 bool arch_cpu_is_hotpluggable(int num)
1205 {
1206 	return platform_can_hotplug_cpu(num);
1207 }
1208 
1209 #ifdef CONFIG_HAVE_PROC_CPU
proc_cpu_init(void)1210 static int __init proc_cpu_init(void)
1211 {
1212 	struct proc_dir_entry *res;
1213 
1214 	res = proc_mkdir("cpu", NULL);
1215 	if (!res)
1216 		return -ENOMEM;
1217 	return 0;
1218 }
1219 fs_initcall(proc_cpu_init);
1220 #endif
1221 
1222 static const char *hwcap_str[] = {
1223 	"swp",
1224 	"half",
1225 	"thumb",
1226 	"26bit",
1227 	"fastmult",
1228 	"fpa",
1229 	"vfp",
1230 	"edsp",
1231 	"java",
1232 	"iwmmxt",
1233 	"crunch",
1234 	"thumbee",
1235 	"neon",
1236 	"vfpv3",
1237 	"vfpv3d16",
1238 	"tls",
1239 	"vfpv4",
1240 	"idiva",
1241 	"idivt",
1242 	"vfpd32",
1243 	"lpae",
1244 	"evtstrm",
1245 	"fphp",
1246 	"asimdhp",
1247 	"asimddp",
1248 	"asimdfhm",
1249 	"asimdbf16",
1250 	"i8mm",
1251 	NULL
1252 };
1253 
1254 static const char *hwcap2_str[] = {
1255 	"aes",
1256 	"pmull",
1257 	"sha1",
1258 	"sha2",
1259 	"crc32",
1260 	"sb",
1261 	"ssbs",
1262 	NULL
1263 };
1264 
c_show(struct seq_file * m,void * v)1265 static int c_show(struct seq_file *m, void *v)
1266 {
1267 	int i, j;
1268 	u32 cpuid;
1269 
1270 	for_each_online_cpu(i) {
1271 		/*
1272 		 * glibc reads /proc/cpuinfo to determine the number of
1273 		 * online processors, looking for lines beginning with
1274 		 * "processor".  Give glibc what it expects.
1275 		 */
1276 		seq_printf(m, "processor\t: %d\n", i);
1277 		cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1278 		seq_printf(m, "model name\t: %s rev %d (%s)\n",
1279 			   cpu_name, cpuid & 15, elf_platform);
1280 
1281 #if defined(CONFIG_SMP)
1282 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1283 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1284 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1285 #else
1286 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1287 			   loops_per_jiffy / (500000/HZ),
1288 			   (loops_per_jiffy / (5000/HZ)) % 100);
1289 #endif
1290 		/* dump out the processor features */
1291 		seq_puts(m, "Features\t: ");
1292 
1293 		for (j = 0; hwcap_str[j]; j++)
1294 			if (elf_hwcap & (1 << j))
1295 				seq_printf(m, "%s ", hwcap_str[j]);
1296 
1297 		for (j = 0; hwcap2_str[j]; j++)
1298 			if (elf_hwcap2 & (1 << j))
1299 				seq_printf(m, "%s ", hwcap2_str[j]);
1300 
1301 		seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1302 		seq_printf(m, "CPU architecture: %s\n",
1303 			   proc_arch[cpu_architecture()]);
1304 
1305 		if ((cpuid & 0x0008f000) == 0x00000000) {
1306 			/* pre-ARM7 */
1307 			seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1308 		} else {
1309 			if ((cpuid & 0x0008f000) == 0x00007000) {
1310 				/* ARM7 */
1311 				seq_printf(m, "CPU variant\t: 0x%02x\n",
1312 					   (cpuid >> 16) & 127);
1313 			} else {
1314 				/* post-ARM7 */
1315 				seq_printf(m, "CPU variant\t: 0x%x\n",
1316 					   (cpuid >> 20) & 15);
1317 			}
1318 			seq_printf(m, "CPU part\t: 0x%03x\n",
1319 				   (cpuid >> 4) & 0xfff);
1320 		}
1321 		seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1322 	}
1323 
1324 	seq_printf(m, "Hardware\t: %s\n", machine_name);
1325 	seq_printf(m, "Revision\t: %04x\n", system_rev);
1326 	seq_printf(m, "Serial\t\t: %s\n", system_serial);
1327 
1328 	return 0;
1329 }
1330 
c_start(struct seq_file * m,loff_t * pos)1331 static void *c_start(struct seq_file *m, loff_t *pos)
1332 {
1333 	return *pos < 1 ? (void *)1 : NULL;
1334 }
1335 
c_next(struct seq_file * m,void * v,loff_t * pos)1336 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1337 {
1338 	++*pos;
1339 	return NULL;
1340 }
1341 
c_stop(struct seq_file * m,void * v)1342 static void c_stop(struct seq_file *m, void *v)
1343 {
1344 }
1345 
1346 const struct seq_operations cpuinfo_op = {
1347 	.start	= c_start,
1348 	.next	= c_next,
1349 	.stop	= c_stop,
1350 	.show	= c_show
1351 };
1352