xref: /linux/arch/arm/kernel/setup.c (revision 28ab1bb0e8f031dd7dd3462ff8f6b2e93fc77e7f)
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/efi.h>
11 #include <linux/export.h>
12 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/ioport.h>
15 #include <linux/delay.h>
16 #include <linux/utsname.h>
17 #include <linux/initrd.h>
18 #include <linux/console.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_platform.h>
22 #include <linux/init.h>
23 #include <linux/kexec.h>
24 #include <linux/of_fdt.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/proc_fs.h>
29 #include <linux/memblock.h>
30 #include <linux/bug.h>
31 #include <linux/compiler.h>
32 #include <linux/sort.h>
33 #include <linux/psci.h>
34 
35 #include <asm/unified.h>
36 #include <asm/cp15.h>
37 #include <asm/cpu.h>
38 #include <asm/cputype.h>
39 #include <asm/efi.h>
40 #include <asm/elf.h>
41 #include <asm/early_ioremap.h>
42 #include <asm/fixmap.h>
43 #include <asm/procinfo.h>
44 #include <asm/psci.h>
45 #include <asm/sections.h>
46 #include <asm/setup.h>
47 #include <asm/smp_plat.h>
48 #include <asm/mach-types.h>
49 #include <asm/cacheflush.h>
50 #include <asm/cachetype.h>
51 #include <asm/tlbflush.h>
52 #include <asm/xen/hypervisor.h>
53 
54 #include <asm/prom.h>
55 #include <asm/mach/arch.h>
56 #include <asm/mach/irq.h>
57 #include <asm/mach/time.h>
58 #include <asm/system_info.h>
59 #include <asm/system_misc.h>
60 #include <asm/traps.h>
61 #include <asm/unwind.h>
62 #include <asm/memblock.h>
63 #include <asm/virt.h>
64 
65 #include "atags.h"
66 
67 
68 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
69 char fpe_type[8];
70 
71 static int __init fpe_setup(char *line)
72 {
73 	memcpy(fpe_type, line, 8);
74 	return 1;
75 }
76 
77 __setup("fpe=", fpe_setup);
78 #endif
79 
80 extern void init_default_cache_policy(unsigned long);
81 extern void paging_init(const struct machine_desc *desc);
82 extern void early_mm_init(const struct machine_desc *);
83 extern void adjust_lowmem_bounds(void);
84 extern enum reboot_mode reboot_mode;
85 extern void setup_dma_zone(const struct machine_desc *desc);
86 
87 unsigned int processor_id;
88 EXPORT_SYMBOL(processor_id);
89 unsigned int __machine_arch_type __read_mostly;
90 EXPORT_SYMBOL(__machine_arch_type);
91 unsigned int cacheid __read_mostly;
92 EXPORT_SYMBOL(cacheid);
93 
94 unsigned int __atags_pointer __initdata;
95 
96 unsigned int system_rev;
97 EXPORT_SYMBOL(system_rev);
98 
99 const char *system_serial;
100 EXPORT_SYMBOL(system_serial);
101 
102 unsigned int system_serial_low;
103 EXPORT_SYMBOL(system_serial_low);
104 
105 unsigned int system_serial_high;
106 EXPORT_SYMBOL(system_serial_high);
107 
108 unsigned int elf_hwcap __read_mostly;
109 EXPORT_SYMBOL(elf_hwcap);
110 
111 unsigned int elf_hwcap2 __read_mostly;
112 EXPORT_SYMBOL(elf_hwcap2);
113 
114 
115 #ifdef MULTI_CPU
116 struct processor processor __ro_after_init;
117 #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
118 struct processor *cpu_vtable[NR_CPUS] = {
119 	[0] = &processor,
120 };
121 #endif
122 #endif
123 #ifdef MULTI_TLB
124 struct cpu_tlb_fns cpu_tlb __ro_after_init;
125 #endif
126 #ifdef MULTI_USER
127 struct cpu_user_fns cpu_user __ro_after_init;
128 #endif
129 #ifdef MULTI_CACHE
130 struct cpu_cache_fns cpu_cache __ro_after_init;
131 #endif
132 #ifdef CONFIG_OUTER_CACHE
133 struct outer_cache_fns outer_cache __ro_after_init;
134 EXPORT_SYMBOL(outer_cache);
135 #endif
136 
137 /*
138  * Cached cpu_architecture() result for use by assembler code.
139  * C code should use the cpu_architecture() function instead of accessing this
140  * variable directly.
141  */
142 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
143 
144 struct stack {
145 	u32 irq[3];
146 	u32 abt[3];
147 	u32 und[3];
148 	u32 fiq[3];
149 } ____cacheline_aligned;
150 
151 #ifndef CONFIG_CPU_V7M
152 static struct stack stacks[NR_CPUS];
153 #endif
154 
155 char elf_platform[ELF_PLATFORM_SIZE];
156 EXPORT_SYMBOL(elf_platform);
157 
158 static const char *cpu_name;
159 static const char *machine_name;
160 static char __initdata cmd_line[COMMAND_LINE_SIZE];
161 const struct machine_desc *machine_desc __initdata;
162 
163 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
164 #define ENDIANNESS ((char)endian_test.l)
165 
166 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
167 
168 /*
169  * Standard memory resources
170  */
171 static struct resource mem_res[] = {
172 	{
173 		.name = "Video RAM",
174 		.start = 0,
175 		.end = 0,
176 		.flags = IORESOURCE_MEM
177 	},
178 	{
179 		.name = "Kernel code",
180 		.start = 0,
181 		.end = 0,
182 		.flags = IORESOURCE_SYSTEM_RAM
183 	},
184 	{
185 		.name = "Kernel data",
186 		.start = 0,
187 		.end = 0,
188 		.flags = IORESOURCE_SYSTEM_RAM
189 	}
190 };
191 
192 #define video_ram   mem_res[0]
193 #define kernel_code mem_res[1]
194 #define kernel_data mem_res[2]
195 
196 static struct resource io_res[] = {
197 	{
198 		.name = "reserved",
199 		.start = 0x3bc,
200 		.end = 0x3be,
201 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
202 	},
203 	{
204 		.name = "reserved",
205 		.start = 0x378,
206 		.end = 0x37f,
207 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
208 	},
209 	{
210 		.name = "reserved",
211 		.start = 0x278,
212 		.end = 0x27f,
213 		.flags = IORESOURCE_IO | IORESOURCE_BUSY
214 	}
215 };
216 
217 #define lp0 io_res[0]
218 #define lp1 io_res[1]
219 #define lp2 io_res[2]
220 
221 static const char *proc_arch[] = {
222 	"undefined/unknown",
223 	"3",
224 	"4",
225 	"4T",
226 	"5",
227 	"5T",
228 	"5TE",
229 	"5TEJ",
230 	"6TEJ",
231 	"7",
232 	"7M",
233 	"?(12)",
234 	"?(13)",
235 	"?(14)",
236 	"?(15)",
237 	"?(16)",
238 	"?(17)",
239 };
240 
241 #ifdef CONFIG_CPU_V7M
242 static int __get_cpu_architecture(void)
243 {
244 	return CPU_ARCH_ARMv7M;
245 }
246 #else
247 static int __get_cpu_architecture(void)
248 {
249 	int cpu_arch;
250 
251 	if ((read_cpuid_id() & 0x0008f000) == 0) {
252 		cpu_arch = CPU_ARCH_UNKNOWN;
253 	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
254 		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
255 	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
256 		cpu_arch = (read_cpuid_id() >> 16) & 7;
257 		if (cpu_arch)
258 			cpu_arch += CPU_ARCH_ARMv3;
259 	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
260 		/* Revised CPUID format. Read the Memory Model Feature
261 		 * Register 0 and check for VMSAv7 or PMSAv7 */
262 		unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
263 		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
264 		    (mmfr0 & 0x000000f0) >= 0x00000030)
265 			cpu_arch = CPU_ARCH_ARMv7;
266 		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
267 			 (mmfr0 & 0x000000f0) == 0x00000020)
268 			cpu_arch = CPU_ARCH_ARMv6;
269 		else
270 			cpu_arch = CPU_ARCH_UNKNOWN;
271 	} else
272 		cpu_arch = CPU_ARCH_UNKNOWN;
273 
274 	return cpu_arch;
275 }
276 #endif
277 
278 int __pure cpu_architecture(void)
279 {
280 	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
281 
282 	return __cpu_architecture;
283 }
284 
285 static int cpu_has_aliasing_icache(unsigned int arch)
286 {
287 	int aliasing_icache;
288 	unsigned int id_reg, num_sets, line_size;
289 
290 	/* PIPT caches never alias. */
291 	if (icache_is_pipt())
292 		return 0;
293 
294 	/* arch specifies the register format */
295 	switch (arch) {
296 	case CPU_ARCH_ARMv7:
297 		set_csselr(CSSELR_ICACHE | CSSELR_L1);
298 		isb();
299 		id_reg = read_ccsidr();
300 		line_size = 4 << ((id_reg & 0x7) + 2);
301 		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
302 		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
303 		break;
304 	case CPU_ARCH_ARMv6:
305 		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
306 		break;
307 	default:
308 		/* I-cache aliases will be handled by D-cache aliasing code */
309 		aliasing_icache = 0;
310 	}
311 
312 	return aliasing_icache;
313 }
314 
315 static void __init cacheid_init(void)
316 {
317 	unsigned int arch = cpu_architecture();
318 
319 	if (arch >= CPU_ARCH_ARMv6) {
320 		unsigned int cachetype = read_cpuid_cachetype();
321 
322 		if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
323 			cacheid = 0;
324 		} else if ((cachetype & (7 << 29)) == 4 << 29) {
325 			/* ARMv7 register format */
326 			arch = CPU_ARCH_ARMv7;
327 			cacheid = CACHEID_VIPT_NONALIASING;
328 			switch (cachetype & (3 << 14)) {
329 			case (1 << 14):
330 				cacheid |= CACHEID_ASID_TAGGED;
331 				break;
332 			case (3 << 14):
333 				cacheid |= CACHEID_PIPT;
334 				break;
335 			}
336 		} else {
337 			arch = CPU_ARCH_ARMv6;
338 			if (cachetype & (1 << 23))
339 				cacheid = CACHEID_VIPT_ALIASING;
340 			else
341 				cacheid = CACHEID_VIPT_NONALIASING;
342 		}
343 		if (cpu_has_aliasing_icache(arch))
344 			cacheid |= CACHEID_VIPT_I_ALIASING;
345 	} else {
346 		cacheid = CACHEID_VIVT;
347 	}
348 
349 	pr_info("CPU: %s data cache, %s instruction cache\n",
350 		cache_is_vivt() ? "VIVT" :
351 		cache_is_vipt_aliasing() ? "VIPT aliasing" :
352 		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
353 		cache_is_vivt() ? "VIVT" :
354 		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
355 		icache_is_vipt_aliasing() ? "VIPT aliasing" :
356 		icache_is_pipt() ? "PIPT" :
357 		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
358 }
359 
360 /*
361  * These functions re-use the assembly code in head.S, which
362  * already provide the required functionality.
363  */
364 extern struct proc_info_list *lookup_processor_type(unsigned int);
365 
366 void __init early_print(const char *str, ...)
367 {
368 	extern void printascii(const char *);
369 	char buf[256];
370 	va_list ap;
371 
372 	va_start(ap, str);
373 	vsnprintf(buf, sizeof(buf), str, ap);
374 	va_end(ap);
375 
376 #ifdef CONFIG_DEBUG_LL
377 	printascii(buf);
378 #endif
379 	printk("%s", buf);
380 }
381 
382 #ifdef CONFIG_ARM_PATCH_IDIV
383 
384 static inline u32 __attribute_const__ sdiv_instruction(void)
385 {
386 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
387 		/* "sdiv r0, r0, r1" */
388 		u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
389 		return __opcode_to_mem_thumb32(insn);
390 	}
391 
392 	/* "sdiv r0, r0, r1" */
393 	return __opcode_to_mem_arm(0xe710f110);
394 }
395 
396 static inline u32 __attribute_const__ udiv_instruction(void)
397 {
398 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
399 		/* "udiv r0, r0, r1" */
400 		u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
401 		return __opcode_to_mem_thumb32(insn);
402 	}
403 
404 	/* "udiv r0, r0, r1" */
405 	return __opcode_to_mem_arm(0xe730f110);
406 }
407 
408 static inline u32 __attribute_const__ bx_lr_instruction(void)
409 {
410 	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
411 		/* "bx lr; nop" */
412 		u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
413 		return __opcode_to_mem_thumb32(insn);
414 	}
415 
416 	/* "bx lr" */
417 	return __opcode_to_mem_arm(0xe12fff1e);
418 }
419 
420 static void __init patch_aeabi_idiv(void)
421 {
422 	extern void __aeabi_uidiv(void);
423 	extern void __aeabi_idiv(void);
424 	uintptr_t fn_addr;
425 	unsigned int mask;
426 
427 	mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
428 	if (!(elf_hwcap & mask))
429 		return;
430 
431 	pr_info("CPU: div instructions available: patching division code\n");
432 
433 	fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
434 	asm ("" : "+g" (fn_addr));
435 	((u32 *)fn_addr)[0] = udiv_instruction();
436 	((u32 *)fn_addr)[1] = bx_lr_instruction();
437 	flush_icache_range(fn_addr, fn_addr + 8);
438 
439 	fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
440 	asm ("" : "+g" (fn_addr));
441 	((u32 *)fn_addr)[0] = sdiv_instruction();
442 	((u32 *)fn_addr)[1] = bx_lr_instruction();
443 	flush_icache_range(fn_addr, fn_addr + 8);
444 }
445 
446 #else
447 static inline void patch_aeabi_idiv(void) { }
448 #endif
449 
450 static void __init cpuid_init_hwcaps(void)
451 {
452 	int block;
453 	u32 isar5;
454 
455 	if (cpu_architecture() < CPU_ARCH_ARMv7)
456 		return;
457 
458 	block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
459 	if (block >= 2)
460 		elf_hwcap |= HWCAP_IDIVA;
461 	if (block >= 1)
462 		elf_hwcap |= HWCAP_IDIVT;
463 
464 	/* LPAE implies atomic ldrd/strd instructions */
465 	block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
466 	if (block >= 5)
467 		elf_hwcap |= HWCAP_LPAE;
468 
469 	/* check for supported v8 Crypto instructions */
470 	isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
471 
472 	block = cpuid_feature_extract_field(isar5, 4);
473 	if (block >= 2)
474 		elf_hwcap2 |= HWCAP2_PMULL;
475 	if (block >= 1)
476 		elf_hwcap2 |= HWCAP2_AES;
477 
478 	block = cpuid_feature_extract_field(isar5, 8);
479 	if (block >= 1)
480 		elf_hwcap2 |= HWCAP2_SHA1;
481 
482 	block = cpuid_feature_extract_field(isar5, 12);
483 	if (block >= 1)
484 		elf_hwcap2 |= HWCAP2_SHA2;
485 
486 	block = cpuid_feature_extract_field(isar5, 16);
487 	if (block >= 1)
488 		elf_hwcap2 |= HWCAP2_CRC32;
489 }
490 
491 static void __init elf_hwcap_fixup(void)
492 {
493 	unsigned id = read_cpuid_id();
494 
495 	/*
496 	 * HWCAP_TLS is available only on 1136 r1p0 and later,
497 	 * see also kuser_get_tls_init.
498 	 */
499 	if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
500 	    ((id >> 20) & 3) == 0) {
501 		elf_hwcap &= ~HWCAP_TLS;
502 		return;
503 	}
504 
505 	/* Verify if CPUID scheme is implemented */
506 	if ((id & 0x000f0000) != 0x000f0000)
507 		return;
508 
509 	/*
510 	 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
511 	 * avoid advertising SWP; it may not be atomic with
512 	 * multiprocessing cores.
513 	 */
514 	if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
515 	    (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
516 	     cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
517 		elf_hwcap &= ~HWCAP_SWP;
518 }
519 
520 /*
521  * cpu_init - initialise one CPU.
522  *
523  * cpu_init sets up the per-CPU stacks.
524  */
525 void notrace cpu_init(void)
526 {
527 #ifndef CONFIG_CPU_V7M
528 	unsigned int cpu = smp_processor_id();
529 	struct stack *stk = &stacks[cpu];
530 
531 	if (cpu >= NR_CPUS) {
532 		pr_crit("CPU%u: bad primary CPU number\n", cpu);
533 		BUG();
534 	}
535 
536 	/*
537 	 * This only works on resume and secondary cores. For booting on the
538 	 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
539 	 */
540 	set_my_cpu_offset(per_cpu_offset(cpu));
541 
542 	cpu_proc_init();
543 
544 	/*
545 	 * Define the placement constraint for the inline asm directive below.
546 	 * In Thumb-2, msr with an immediate value is not allowed.
547 	 */
548 #ifdef CONFIG_THUMB2_KERNEL
549 #define PLC	"r"
550 #else
551 #define PLC	"I"
552 #endif
553 
554 	/*
555 	 * setup stacks for re-entrant exception handlers
556 	 */
557 	__asm__ (
558 	"msr	cpsr_c, %1\n\t"
559 	"add	r14, %0, %2\n\t"
560 	"mov	sp, r14\n\t"
561 	"msr	cpsr_c, %3\n\t"
562 	"add	r14, %0, %4\n\t"
563 	"mov	sp, r14\n\t"
564 	"msr	cpsr_c, %5\n\t"
565 	"add	r14, %0, %6\n\t"
566 	"mov	sp, r14\n\t"
567 	"msr	cpsr_c, %7\n\t"
568 	"add	r14, %0, %8\n\t"
569 	"mov	sp, r14\n\t"
570 	"msr	cpsr_c, %9"
571 	    :
572 	    : "r" (stk),
573 	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
574 	      "I" (offsetof(struct stack, irq[0])),
575 	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
576 	      "I" (offsetof(struct stack, abt[0])),
577 	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
578 	      "I" (offsetof(struct stack, und[0])),
579 	      PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
580 	      "I" (offsetof(struct stack, fiq[0])),
581 	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
582 	    : "r14");
583 #endif
584 }
585 
586 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
587 
588 void __init smp_setup_processor_id(void)
589 {
590 	int i;
591 	u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
592 	u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
593 
594 	cpu_logical_map(0) = cpu;
595 	for (i = 1; i < nr_cpu_ids; ++i)
596 		cpu_logical_map(i) = i == cpu ? 0 : i;
597 
598 	/*
599 	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
600 	 * using percpu variable early, for example, lockdep will
601 	 * access percpu variable inside lock_release
602 	 */
603 	set_my_cpu_offset(0);
604 
605 	pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
606 }
607 
608 struct mpidr_hash mpidr_hash;
609 #ifdef CONFIG_SMP
610 /**
611  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
612  *			  level in order to build a linear index from an
613  *			  MPIDR value. Resulting algorithm is a collision
614  *			  free hash carried out through shifting and ORing
615  */
616 static void __init smp_build_mpidr_hash(void)
617 {
618 	u32 i, affinity;
619 	u32 fs[3], bits[3], ls, mask = 0;
620 	/*
621 	 * Pre-scan the list of MPIDRS and filter out bits that do
622 	 * not contribute to affinity levels, ie they never toggle.
623 	 */
624 	for_each_possible_cpu(i)
625 		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
626 	pr_debug("mask of set bits 0x%x\n", mask);
627 	/*
628 	 * Find and stash the last and first bit set at all affinity levels to
629 	 * check how many bits are required to represent them.
630 	 */
631 	for (i = 0; i < 3; i++) {
632 		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
633 		/*
634 		 * Find the MSB bit and LSB bits position
635 		 * to determine how many bits are required
636 		 * to express the affinity level.
637 		 */
638 		ls = fls(affinity);
639 		fs[i] = affinity ? ffs(affinity) - 1 : 0;
640 		bits[i] = ls - fs[i];
641 	}
642 	/*
643 	 * An index can be created from the MPIDR by isolating the
644 	 * significant bits at each affinity level and by shifting
645 	 * them in order to compress the 24 bits values space to a
646 	 * compressed set of values. This is equivalent to hashing
647 	 * the MPIDR through shifting and ORing. It is a collision free
648 	 * hash though not minimal since some levels might contain a number
649 	 * of CPUs that is not an exact power of 2 and their bit
650 	 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
651 	 */
652 	mpidr_hash.shift_aff[0] = fs[0];
653 	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
654 	mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
655 						(bits[1] + bits[0]);
656 	mpidr_hash.mask = mask;
657 	mpidr_hash.bits = bits[2] + bits[1] + bits[0];
658 	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
659 				mpidr_hash.shift_aff[0],
660 				mpidr_hash.shift_aff[1],
661 				mpidr_hash.shift_aff[2],
662 				mpidr_hash.mask,
663 				mpidr_hash.bits);
664 	/*
665 	 * 4x is an arbitrary value used to warn on a hash table much bigger
666 	 * than expected on most systems.
667 	 */
668 	if (mpidr_hash_size() > 4 * num_possible_cpus())
669 		pr_warn("Large number of MPIDR hash buckets detected\n");
670 	sync_cache_w(&mpidr_hash);
671 }
672 #endif
673 
674 /*
675  * locate processor in the list of supported processor types.  The linker
676  * builds this table for us from the entries in arch/arm/mm/proc-*.S
677  */
678 struct proc_info_list *lookup_processor(u32 midr)
679 {
680 	struct proc_info_list *list = lookup_processor_type(midr);
681 
682 	if (!list) {
683 		pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
684 		       smp_processor_id(), midr);
685 		while (1)
686 		/* can't use cpu_relax() here as it may require MMU setup */;
687 	}
688 
689 	return list;
690 }
691 
692 static void __init setup_processor(void)
693 {
694 	unsigned int midr = read_cpuid_id();
695 	struct proc_info_list *list = lookup_processor(midr);
696 
697 	cpu_name = list->cpu_name;
698 	__cpu_architecture = __get_cpu_architecture();
699 
700 	init_proc_vtable(list->proc);
701 #ifdef MULTI_TLB
702 	cpu_tlb = *list->tlb;
703 #endif
704 #ifdef MULTI_USER
705 	cpu_user = *list->user;
706 #endif
707 #ifdef MULTI_CACHE
708 	cpu_cache = *list->cache;
709 #endif
710 
711 	pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
712 		list->cpu_name, midr, midr & 15,
713 		proc_arch[cpu_architecture()], get_cr());
714 
715 	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
716 		 list->arch_name, ENDIANNESS);
717 	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
718 		 list->elf_name, ENDIANNESS);
719 	elf_hwcap = list->elf_hwcap;
720 
721 	cpuid_init_hwcaps();
722 	patch_aeabi_idiv();
723 
724 #ifndef CONFIG_ARM_THUMB
725 	elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
726 #endif
727 #ifdef CONFIG_MMU
728 	init_default_cache_policy(list->__cpu_mm_mmu_flags);
729 #endif
730 	erratum_a15_798181_init();
731 
732 	elf_hwcap_fixup();
733 
734 	cacheid_init();
735 	cpu_init();
736 }
737 
738 void __init dump_machine_table(void)
739 {
740 	const struct machine_desc *p;
741 
742 	early_print("Available machine support:\n\nID (hex)\tNAME\n");
743 	for_each_machine_desc(p)
744 		early_print("%08x\t%s\n", p->nr, p->name);
745 
746 	early_print("\nPlease check your kernel config and/or bootloader.\n");
747 
748 	while (true)
749 		/* can't use cpu_relax() here as it may require MMU setup */;
750 }
751 
752 int __init arm_add_memory(u64 start, u64 size)
753 {
754 	u64 aligned_start;
755 
756 	/*
757 	 * Ensure that start/size are aligned to a page boundary.
758 	 * Size is rounded down, start is rounded up.
759 	 */
760 	aligned_start = PAGE_ALIGN(start);
761 	if (aligned_start > start + size)
762 		size = 0;
763 	else
764 		size -= aligned_start - start;
765 
766 #ifndef CONFIG_PHYS_ADDR_T_64BIT
767 	if (aligned_start > ULONG_MAX) {
768 		pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
769 			(long long)start);
770 		return -EINVAL;
771 	}
772 
773 	if (aligned_start + size > ULONG_MAX) {
774 		pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
775 			(long long)start);
776 		/*
777 		 * To ensure bank->start + bank->size is representable in
778 		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
779 		 * This means we lose a page after masking.
780 		 */
781 		size = ULONG_MAX - aligned_start;
782 	}
783 #endif
784 
785 	if (aligned_start < PHYS_OFFSET) {
786 		if (aligned_start + size <= PHYS_OFFSET) {
787 			pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
788 				aligned_start, aligned_start + size);
789 			return -EINVAL;
790 		}
791 
792 		pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
793 			aligned_start, (u64)PHYS_OFFSET);
794 
795 		size -= PHYS_OFFSET - aligned_start;
796 		aligned_start = PHYS_OFFSET;
797 	}
798 
799 	start = aligned_start;
800 	size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
801 
802 	/*
803 	 * Check whether this memory region has non-zero size or
804 	 * invalid node number.
805 	 */
806 	if (size == 0)
807 		return -EINVAL;
808 
809 	memblock_add(start, size);
810 	return 0;
811 }
812 
813 /*
814  * Pick out the memory size.  We look for mem=size@start,
815  * where start and size are "size[KkMm]"
816  */
817 
818 static int __init early_mem(char *p)
819 {
820 	static int usermem __initdata = 0;
821 	u64 size;
822 	u64 start;
823 	char *endp;
824 
825 	/*
826 	 * If the user specifies memory size, we
827 	 * blow away any automatically generated
828 	 * size.
829 	 */
830 	if (usermem == 0) {
831 		usermem = 1;
832 		memblock_remove(memblock_start_of_DRAM(),
833 			memblock_end_of_DRAM() - memblock_start_of_DRAM());
834 	}
835 
836 	start = PHYS_OFFSET;
837 	size  = memparse(p, &endp);
838 	if (*endp == '@')
839 		start = memparse(endp + 1, NULL);
840 
841 	arm_add_memory(start, size);
842 
843 	return 0;
844 }
845 early_param("mem", early_mem);
846 
847 static void __init request_standard_resources(const struct machine_desc *mdesc)
848 {
849 	struct memblock_region *region;
850 	struct resource *res;
851 
852 	kernel_code.start   = virt_to_phys(_text);
853 	kernel_code.end     = virt_to_phys(__init_begin - 1);
854 	kernel_data.start   = virt_to_phys(_sdata);
855 	kernel_data.end     = virt_to_phys(_end - 1);
856 
857 	for_each_memblock(memory, region) {
858 		phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
859 		phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
860 		unsigned long boot_alias_start;
861 
862 		/*
863 		 * Some systems have a special memory alias which is only
864 		 * used for booting.  We need to advertise this region to
865 		 * kexec-tools so they know where bootable RAM is located.
866 		 */
867 		boot_alias_start = phys_to_idmap(start);
868 		if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
869 			res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
870 			res->name = "System RAM (boot alias)";
871 			res->start = boot_alias_start;
872 			res->end = phys_to_idmap(end);
873 			res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
874 			request_resource(&iomem_resource, res);
875 		}
876 
877 		res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
878 		res->name  = "System RAM";
879 		res->start = start;
880 		res->end = end;
881 		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
882 
883 		request_resource(&iomem_resource, res);
884 
885 		if (kernel_code.start >= res->start &&
886 		    kernel_code.end <= res->end)
887 			request_resource(res, &kernel_code);
888 		if (kernel_data.start >= res->start &&
889 		    kernel_data.end <= res->end)
890 			request_resource(res, &kernel_data);
891 	}
892 
893 	if (mdesc->video_start) {
894 		video_ram.start = mdesc->video_start;
895 		video_ram.end   = mdesc->video_end;
896 		request_resource(&iomem_resource, &video_ram);
897 	}
898 
899 	/*
900 	 * Some machines don't have the possibility of ever
901 	 * possessing lp0, lp1 or lp2
902 	 */
903 	if (mdesc->reserve_lp0)
904 		request_resource(&ioport_resource, &lp0);
905 	if (mdesc->reserve_lp1)
906 		request_resource(&ioport_resource, &lp1);
907 	if (mdesc->reserve_lp2)
908 		request_resource(&ioport_resource, &lp2);
909 }
910 
911 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
912     defined(CONFIG_EFI)
913 struct screen_info screen_info = {
914  .orig_video_lines	= 30,
915  .orig_video_cols	= 80,
916  .orig_video_mode	= 0,
917  .orig_video_ega_bx	= 0,
918  .orig_video_isVGA	= 1,
919  .orig_video_points	= 8
920 };
921 #endif
922 
923 static int __init customize_machine(void)
924 {
925 	/*
926 	 * customizes platform devices, or adds new ones
927 	 * On DT based machines, we fall back to populating the
928 	 * machine from the device tree, if no callback is provided,
929 	 * otherwise we would always need an init_machine callback.
930 	 */
931 	if (machine_desc->init_machine)
932 		machine_desc->init_machine();
933 
934 	return 0;
935 }
936 arch_initcall(customize_machine);
937 
938 static int __init init_machine_late(void)
939 {
940 	struct device_node *root;
941 	int ret;
942 
943 	if (machine_desc->init_late)
944 		machine_desc->init_late();
945 
946 	root = of_find_node_by_path("/");
947 	if (root) {
948 		ret = of_property_read_string(root, "serial-number",
949 					      &system_serial);
950 		if (ret)
951 			system_serial = NULL;
952 	}
953 
954 	if (!system_serial)
955 		system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
956 					  system_serial_high,
957 					  system_serial_low);
958 
959 	return 0;
960 }
961 late_initcall(init_machine_late);
962 
963 #ifdef CONFIG_KEXEC
964 /*
965  * The crash region must be aligned to 128MB to avoid
966  * zImage relocating below the reserved region.
967  */
968 #define CRASH_ALIGN	(128 << 20)
969 
970 static inline unsigned long long get_total_mem(void)
971 {
972 	unsigned long total;
973 
974 	total = max_low_pfn - min_low_pfn;
975 	return total << PAGE_SHIFT;
976 }
977 
978 /**
979  * reserve_crashkernel() - reserves memory are for crash kernel
980  *
981  * This function reserves memory area given in "crashkernel=" kernel command
982  * line parameter. The memory reserved is used by a dump capture kernel when
983  * primary kernel is crashing.
984  */
985 static void __init reserve_crashkernel(void)
986 {
987 	unsigned long long crash_size, crash_base;
988 	unsigned long long total_mem;
989 	int ret;
990 
991 	total_mem = get_total_mem();
992 	ret = parse_crashkernel(boot_command_line, total_mem,
993 				&crash_size, &crash_base);
994 	if (ret)
995 		return;
996 
997 	if (crash_base <= 0) {
998 		unsigned long long crash_max = idmap_to_phys((u32)~0);
999 		unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
1000 		if (crash_max > lowmem_max)
1001 			crash_max = lowmem_max;
1002 		crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
1003 						    crash_size, CRASH_ALIGN);
1004 		if (!crash_base) {
1005 			pr_err("crashkernel reservation failed - No suitable area found.\n");
1006 			return;
1007 		}
1008 	} else {
1009 		unsigned long long start;
1010 
1011 		start = memblock_find_in_range(crash_base,
1012 					       crash_base + crash_size,
1013 					       crash_size, SECTION_SIZE);
1014 		if (start != crash_base) {
1015 			pr_err("crashkernel reservation failed - memory is in use.\n");
1016 			return;
1017 		}
1018 	}
1019 
1020 	ret = memblock_reserve(crash_base, crash_size);
1021 	if (ret < 0) {
1022 		pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
1023 			(unsigned long)crash_base);
1024 		return;
1025 	}
1026 
1027 	pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1028 		(unsigned long)(crash_size >> 20),
1029 		(unsigned long)(crash_base >> 20),
1030 		(unsigned long)(total_mem >> 20));
1031 
1032 	/* The crashk resource must always be located in normal mem */
1033 	crashk_res.start = crash_base;
1034 	crashk_res.end = crash_base + crash_size - 1;
1035 	insert_resource(&iomem_resource, &crashk_res);
1036 
1037 	if (arm_has_idmap_alias()) {
1038 		/*
1039 		 * If we have a special RAM alias for use at boot, we
1040 		 * need to advertise to kexec tools where the alias is.
1041 		 */
1042 		static struct resource crashk_boot_res = {
1043 			.name = "Crash kernel (boot alias)",
1044 			.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1045 		};
1046 
1047 		crashk_boot_res.start = phys_to_idmap(crash_base);
1048 		crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1049 		insert_resource(&iomem_resource, &crashk_boot_res);
1050 	}
1051 }
1052 #else
1053 static inline void reserve_crashkernel(void) {}
1054 #endif /* CONFIG_KEXEC */
1055 
1056 void __init hyp_mode_check(void)
1057 {
1058 #ifdef CONFIG_ARM_VIRT_EXT
1059 	sync_boot_mode();
1060 
1061 	if (is_hyp_mode_available()) {
1062 		pr_info("CPU: All CPU(s) started in HYP mode.\n");
1063 		pr_info("CPU: Virtualization extensions available.\n");
1064 	} else if (is_hyp_mode_mismatched()) {
1065 		pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1066 			__boot_cpu_mode & MODE_MASK);
1067 		pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1068 	} else
1069 		pr_info("CPU: All CPU(s) started in SVC mode.\n");
1070 #endif
1071 }
1072 
1073 void __init setup_arch(char **cmdline_p)
1074 {
1075 	const struct machine_desc *mdesc;
1076 
1077 	setup_processor();
1078 	mdesc = setup_machine_fdt(__atags_pointer);
1079 	if (!mdesc)
1080 		mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
1081 	if (!mdesc) {
1082 		early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
1083 		early_print("  r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
1084 			    __atags_pointer);
1085 		if (__atags_pointer)
1086 			early_print("  r2[]=%*ph\n", 16,
1087 				    phys_to_virt(__atags_pointer));
1088 		dump_machine_table();
1089 	}
1090 
1091 	machine_desc = mdesc;
1092 	machine_name = mdesc->name;
1093 	dump_stack_set_arch_desc("%s", mdesc->name);
1094 
1095 	if (mdesc->reboot_mode != REBOOT_HARD)
1096 		reboot_mode = mdesc->reboot_mode;
1097 
1098 	init_mm.start_code = (unsigned long) _text;
1099 	init_mm.end_code   = (unsigned long) _etext;
1100 	init_mm.end_data   = (unsigned long) _edata;
1101 	init_mm.brk	   = (unsigned long) _end;
1102 
1103 	/* populate cmd_line too for later use, preserving boot_command_line */
1104 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1105 	*cmdline_p = cmd_line;
1106 
1107 	early_fixmap_init();
1108 	early_ioremap_init();
1109 
1110 	parse_early_param();
1111 
1112 #ifdef CONFIG_MMU
1113 	early_mm_init(mdesc);
1114 #endif
1115 	setup_dma_zone(mdesc);
1116 	xen_early_init();
1117 	efi_init();
1118 	/*
1119 	 * Make sure the calculation for lowmem/highmem is set appropriately
1120 	 * before reserving/allocating any mmeory
1121 	 */
1122 	adjust_lowmem_bounds();
1123 	arm_memblock_init(mdesc);
1124 	/* Memory may have been removed so recalculate the bounds. */
1125 	adjust_lowmem_bounds();
1126 
1127 	early_ioremap_reset();
1128 
1129 	paging_init(mdesc);
1130 	request_standard_resources(mdesc);
1131 
1132 	if (mdesc->restart)
1133 		arm_pm_restart = mdesc->restart;
1134 
1135 	unflatten_device_tree();
1136 
1137 	arm_dt_init_cpu_maps();
1138 	psci_dt_init();
1139 #ifdef CONFIG_SMP
1140 	if (is_smp()) {
1141 		if (!mdesc->smp_init || !mdesc->smp_init()) {
1142 			if (psci_smp_available())
1143 				smp_set_ops(&psci_smp_ops);
1144 			else if (mdesc->smp)
1145 				smp_set_ops(mdesc->smp);
1146 		}
1147 		smp_init_cpus();
1148 		smp_build_mpidr_hash();
1149 	}
1150 #endif
1151 
1152 	if (!is_smp())
1153 		hyp_mode_check();
1154 
1155 	reserve_crashkernel();
1156 
1157 #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
1158 	handle_arch_irq = mdesc->handle_irq;
1159 #endif
1160 
1161 #ifdef CONFIG_VT
1162 #if defined(CONFIG_VGA_CONSOLE)
1163 	conswitchp = &vga_con;
1164 #elif defined(CONFIG_DUMMY_CONSOLE)
1165 	conswitchp = &dummy_con;
1166 #endif
1167 #endif
1168 
1169 	if (mdesc->init_early)
1170 		mdesc->init_early();
1171 }
1172 
1173 
1174 static int __init topology_init(void)
1175 {
1176 	int cpu;
1177 
1178 	for_each_possible_cpu(cpu) {
1179 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1180 		cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1181 		register_cpu(&cpuinfo->cpu, cpu);
1182 	}
1183 
1184 	return 0;
1185 }
1186 subsys_initcall(topology_init);
1187 
1188 #ifdef CONFIG_HAVE_PROC_CPU
1189 static int __init proc_cpu_init(void)
1190 {
1191 	struct proc_dir_entry *res;
1192 
1193 	res = proc_mkdir("cpu", NULL);
1194 	if (!res)
1195 		return -ENOMEM;
1196 	return 0;
1197 }
1198 fs_initcall(proc_cpu_init);
1199 #endif
1200 
1201 static const char *hwcap_str[] = {
1202 	"swp",
1203 	"half",
1204 	"thumb",
1205 	"26bit",
1206 	"fastmult",
1207 	"fpa",
1208 	"vfp",
1209 	"edsp",
1210 	"java",
1211 	"iwmmxt",
1212 	"crunch",
1213 	"thumbee",
1214 	"neon",
1215 	"vfpv3",
1216 	"vfpv3d16",
1217 	"tls",
1218 	"vfpv4",
1219 	"idiva",
1220 	"idivt",
1221 	"vfpd32",
1222 	"lpae",
1223 	"evtstrm",
1224 	NULL
1225 };
1226 
1227 static const char *hwcap2_str[] = {
1228 	"aes",
1229 	"pmull",
1230 	"sha1",
1231 	"sha2",
1232 	"crc32",
1233 	NULL
1234 };
1235 
1236 static int c_show(struct seq_file *m, void *v)
1237 {
1238 	int i, j;
1239 	u32 cpuid;
1240 
1241 	for_each_online_cpu(i) {
1242 		/*
1243 		 * glibc reads /proc/cpuinfo to determine the number of
1244 		 * online processors, looking for lines beginning with
1245 		 * "processor".  Give glibc what it expects.
1246 		 */
1247 		seq_printf(m, "processor\t: %d\n", i);
1248 		cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1249 		seq_printf(m, "model name\t: %s rev %d (%s)\n",
1250 			   cpu_name, cpuid & 15, elf_platform);
1251 
1252 #if defined(CONFIG_SMP)
1253 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1254 			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1255 			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1256 #else
1257 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1258 			   loops_per_jiffy / (500000/HZ),
1259 			   (loops_per_jiffy / (5000/HZ)) % 100);
1260 #endif
1261 		/* dump out the processor features */
1262 		seq_puts(m, "Features\t: ");
1263 
1264 		for (j = 0; hwcap_str[j]; j++)
1265 			if (elf_hwcap & (1 << j))
1266 				seq_printf(m, "%s ", hwcap_str[j]);
1267 
1268 		for (j = 0; hwcap2_str[j]; j++)
1269 			if (elf_hwcap2 & (1 << j))
1270 				seq_printf(m, "%s ", hwcap2_str[j]);
1271 
1272 		seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1273 		seq_printf(m, "CPU architecture: %s\n",
1274 			   proc_arch[cpu_architecture()]);
1275 
1276 		if ((cpuid & 0x0008f000) == 0x00000000) {
1277 			/* pre-ARM7 */
1278 			seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1279 		} else {
1280 			if ((cpuid & 0x0008f000) == 0x00007000) {
1281 				/* ARM7 */
1282 				seq_printf(m, "CPU variant\t: 0x%02x\n",
1283 					   (cpuid >> 16) & 127);
1284 			} else {
1285 				/* post-ARM7 */
1286 				seq_printf(m, "CPU variant\t: 0x%x\n",
1287 					   (cpuid >> 20) & 15);
1288 			}
1289 			seq_printf(m, "CPU part\t: 0x%03x\n",
1290 				   (cpuid >> 4) & 0xfff);
1291 		}
1292 		seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1293 	}
1294 
1295 	seq_printf(m, "Hardware\t: %s\n", machine_name);
1296 	seq_printf(m, "Revision\t: %04x\n", system_rev);
1297 	seq_printf(m, "Serial\t\t: %s\n", system_serial);
1298 
1299 	return 0;
1300 }
1301 
1302 static void *c_start(struct seq_file *m, loff_t *pos)
1303 {
1304 	return *pos < 1 ? (void *)1 : NULL;
1305 }
1306 
1307 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1308 {
1309 	++*pos;
1310 	return NULL;
1311 }
1312 
1313 static void c_stop(struct seq_file *m, void *v)
1314 {
1315 }
1316 
1317 const struct seq_operations cpuinfo_op = {
1318 	.start	= c_start,
1319 	.next	= c_next,
1320 	.stop	= c_stop,
1321 	.show	= c_show
1322 };
1323