xref: /linux/arch/arm64/kernel/setup.c (revision 2ba9268dd603d23e17643437b2246acb6844953b)
1 /*
2  * Based on arch/arm/kernel/setup.c
3  *
4  * Copyright (C) 1995-2001 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/stddef.h>
23 #include <linux/ioport.h>
24 #include <linux/delay.h>
25 #include <linux/utsname.h>
26 #include <linux/initrd.h>
27 #include <linux/console.h>
28 #include <linux/cache.h>
29 #include <linux/bootmem.h>
30 #include <linux/seq_file.h>
31 #include <linux/screen_info.h>
32 #include <linux/init.h>
33 #include <linux/kexec.h>
34 #include <linux/crash_dump.h>
35 #include <linux/root_dev.h>
36 #include <linux/clk-provider.h>
37 #include <linux/cpu.h>
38 #include <linux/interrupt.h>
39 #include <linux/smp.h>
40 #include <linux/fs.h>
41 #include <linux/proc_fs.h>
42 #include <linux/memblock.h>
43 #include <linux/of_iommu.h>
44 #include <linux/of_fdt.h>
45 #include <linux/of_platform.h>
46 #include <linux/efi.h>
47 #include <linux/personality.h>
48 
49 #include <asm/fixmap.h>
50 #include <asm/cpu.h>
51 #include <asm/cputype.h>
52 #include <asm/elf.h>
53 #include <asm/cputable.h>
54 #include <asm/cpufeature.h>
55 #include <asm/cpu_ops.h>
56 #include <asm/sections.h>
57 #include <asm/setup.h>
58 #include <asm/smp_plat.h>
59 #include <asm/cacheflush.h>
60 #include <asm/tlbflush.h>
61 #include <asm/traps.h>
62 #include <asm/memblock.h>
63 #include <asm/psci.h>
64 #include <asm/efi.h>
65 
66 unsigned int processor_id;
67 EXPORT_SYMBOL(processor_id);
68 
69 unsigned long elf_hwcap __read_mostly;
70 EXPORT_SYMBOL_GPL(elf_hwcap);
71 
72 #ifdef CONFIG_COMPAT
73 #define COMPAT_ELF_HWCAP_DEFAULT	\
74 				(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
75 				 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
76 				 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
77 				 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
78 				 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
79 				 COMPAT_HWCAP_LPAE)
80 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
81 unsigned int compat_elf_hwcap2 __read_mostly;
82 #endif
83 
84 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
85 
86 static const char *cpu_name;
87 phys_addr_t __fdt_pointer __initdata;
88 
89 /*
90  * Standard memory resources
91  */
92 static struct resource mem_res[] = {
93 	{
94 		.name = "Kernel code",
95 		.start = 0,
96 		.end = 0,
97 		.flags = IORESOURCE_MEM
98 	},
99 	{
100 		.name = "Kernel data",
101 		.start = 0,
102 		.end = 0,
103 		.flags = IORESOURCE_MEM
104 	}
105 };
106 
107 #define kernel_code mem_res[0]
108 #define kernel_data mem_res[1]
109 
110 void __init early_print(const char *str, ...)
111 {
112 	char buf[256];
113 	va_list ap;
114 
115 	va_start(ap, str);
116 	vsnprintf(buf, sizeof(buf), str, ap);
117 	va_end(ap);
118 
119 	printk("%s", buf);
120 }
121 
122 void __init smp_setup_processor_id(void)
123 {
124 	u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
125 	cpu_logical_map(0) = mpidr;
126 
127 	/*
128 	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
129 	 * using percpu variable early, for example, lockdep will
130 	 * access percpu variable inside lock_release
131 	 */
132 	set_my_cpu_offset(0);
133 	pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
134 }
135 
136 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
137 {
138 	return phys_id == cpu_logical_map(cpu);
139 }
140 
141 struct mpidr_hash mpidr_hash;
142 #ifdef CONFIG_SMP
143 /**
144  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
145  *			  level in order to build a linear index from an
146  *			  MPIDR value. Resulting algorithm is a collision
147  *			  free hash carried out through shifting and ORing
148  */
149 static void __init smp_build_mpidr_hash(void)
150 {
151 	u32 i, affinity, fs[4], bits[4], ls;
152 	u64 mask = 0;
153 	/*
154 	 * Pre-scan the list of MPIDRS and filter out bits that do
155 	 * not contribute to affinity levels, ie they never toggle.
156 	 */
157 	for_each_possible_cpu(i)
158 		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
159 	pr_debug("mask of set bits %#llx\n", mask);
160 	/*
161 	 * Find and stash the last and first bit set at all affinity levels to
162 	 * check how many bits are required to represent them.
163 	 */
164 	for (i = 0; i < 4; i++) {
165 		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
166 		/*
167 		 * Find the MSB bit and LSB bits position
168 		 * to determine how many bits are required
169 		 * to express the affinity level.
170 		 */
171 		ls = fls(affinity);
172 		fs[i] = affinity ? ffs(affinity) - 1 : 0;
173 		bits[i] = ls - fs[i];
174 	}
175 	/*
176 	 * An index can be created from the MPIDR_EL1 by isolating the
177 	 * significant bits at each affinity level and by shifting
178 	 * them in order to compress the 32 bits values space to a
179 	 * compressed set of values. This is equivalent to hashing
180 	 * the MPIDR_EL1 through shifting and ORing. It is a collision free
181 	 * hash though not minimal since some levels might contain a number
182 	 * of CPUs that is not an exact power of 2 and their bit
183 	 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
184 	 */
185 	mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
186 	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
187 	mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
188 						(bits[1] + bits[0]);
189 	mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
190 				  fs[3] - (bits[2] + bits[1] + bits[0]);
191 	mpidr_hash.mask = mask;
192 	mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
193 	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
194 		mpidr_hash.shift_aff[0],
195 		mpidr_hash.shift_aff[1],
196 		mpidr_hash.shift_aff[2],
197 		mpidr_hash.shift_aff[3],
198 		mpidr_hash.mask,
199 		mpidr_hash.bits);
200 	/*
201 	 * 4x is an arbitrary value used to warn on a hash table much bigger
202 	 * than expected on most systems.
203 	 */
204 	if (mpidr_hash_size() > 4 * num_possible_cpus())
205 		pr_warn("Large number of MPIDR hash buckets detected\n");
206 	__flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
207 }
208 #endif
209 
210 static void __init setup_processor(void)
211 {
212 	struct cpu_info *cpu_info;
213 	u64 features, block;
214 	u32 cwg;
215 	int cls;
216 
217 	cpu_info = lookup_processor_type(read_cpuid_id());
218 	if (!cpu_info) {
219 		printk("CPU configuration botched (ID %08x), unable to continue.\n",
220 		       read_cpuid_id());
221 		while (1);
222 	}
223 
224 	cpu_name = cpu_info->cpu_name;
225 
226 	printk("CPU: %s [%08x] revision %d\n",
227 	       cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
228 
229 	sprintf(init_utsname()->machine, ELF_PLATFORM);
230 	elf_hwcap = 0;
231 
232 	cpuinfo_store_boot_cpu();
233 
234 	/*
235 	 * Check for sane CTR_EL0.CWG value.
236 	 */
237 	cwg = cache_type_cwg();
238 	cls = cache_line_size();
239 	if (!cwg)
240 		pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
241 			cls);
242 	if (L1_CACHE_BYTES < cls)
243 		pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
244 			L1_CACHE_BYTES, cls);
245 
246 	/*
247 	 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
248 	 * The blocks we test below represent incremental functionality
249 	 * for non-negative values. Negative values are reserved.
250 	 */
251 	features = read_cpuid(ID_AA64ISAR0_EL1);
252 	block = (features >> 4) & 0xf;
253 	if (!(block & 0x8)) {
254 		switch (block) {
255 		default:
256 		case 2:
257 			elf_hwcap |= HWCAP_PMULL;
258 		case 1:
259 			elf_hwcap |= HWCAP_AES;
260 		case 0:
261 			break;
262 		}
263 	}
264 
265 	block = (features >> 8) & 0xf;
266 	if (block && !(block & 0x8))
267 		elf_hwcap |= HWCAP_SHA1;
268 
269 	block = (features >> 12) & 0xf;
270 	if (block && !(block & 0x8))
271 		elf_hwcap |= HWCAP_SHA2;
272 
273 	block = (features >> 16) & 0xf;
274 	if (block && !(block & 0x8))
275 		elf_hwcap |= HWCAP_CRC32;
276 
277 #ifdef CONFIG_COMPAT
278 	/*
279 	 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
280 	 * the Aarch32 32-bit execution state.
281 	 */
282 	features = read_cpuid(ID_ISAR5_EL1);
283 	block = (features >> 4) & 0xf;
284 	if (!(block & 0x8)) {
285 		switch (block) {
286 		default:
287 		case 2:
288 			compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
289 		case 1:
290 			compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
291 		case 0:
292 			break;
293 		}
294 	}
295 
296 	block = (features >> 8) & 0xf;
297 	if (block && !(block & 0x8))
298 		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
299 
300 	block = (features >> 12) & 0xf;
301 	if (block && !(block & 0x8))
302 		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
303 
304 	block = (features >> 16) & 0xf;
305 	if (block && !(block & 0x8))
306 		compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
307 #endif
308 }
309 
310 static void __init setup_machine_fdt(phys_addr_t dt_phys)
311 {
312 	if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) {
313 		early_print("\n"
314 			"Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
315 			"The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
316 			"\nPlease check your bootloader.\n",
317 			dt_phys, phys_to_virt(dt_phys));
318 
319 		while (true)
320 			cpu_relax();
321 	}
322 
323 	dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
324 }
325 
326 static void __init request_standard_resources(void)
327 {
328 	struct memblock_region *region;
329 	struct resource *res;
330 
331 	kernel_code.start   = virt_to_phys(_text);
332 	kernel_code.end     = virt_to_phys(_etext - 1);
333 	kernel_data.start   = virt_to_phys(_sdata);
334 	kernel_data.end     = virt_to_phys(_end - 1);
335 
336 	for_each_memblock(memory, region) {
337 		res = alloc_bootmem_low(sizeof(*res));
338 		res->name  = "System RAM";
339 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
340 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
341 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
342 
343 		request_resource(&iomem_resource, res);
344 
345 		if (kernel_code.start >= res->start &&
346 		    kernel_code.end <= res->end)
347 			request_resource(res, &kernel_code);
348 		if (kernel_data.start >= res->start &&
349 		    kernel_data.end <= res->end)
350 			request_resource(res, &kernel_data);
351 	}
352 }
353 
354 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
355 
356 void __init setup_arch(char **cmdline_p)
357 {
358 	setup_processor();
359 
360 	setup_machine_fdt(__fdt_pointer);
361 
362 	init_mm.start_code = (unsigned long) _text;
363 	init_mm.end_code   = (unsigned long) _etext;
364 	init_mm.end_data   = (unsigned long) _edata;
365 	init_mm.brk	   = (unsigned long) _end;
366 
367 	*cmdline_p = boot_command_line;
368 
369 	early_fixmap_init();
370 	early_ioremap_init();
371 
372 	parse_early_param();
373 
374 	/*
375 	 *  Unmask asynchronous aborts after bringing up possible earlycon.
376 	 * (Report possible System Errors once we can report this occurred)
377 	 */
378 	local_async_enable();
379 
380 	efi_init();
381 	arm64_memblock_init();
382 
383 	paging_init();
384 	request_standard_resources();
385 
386 	early_ioremap_reset();
387 
388 	unflatten_device_tree();
389 
390 	psci_init();
391 
392 	cpu_read_bootcpu_ops();
393 #ifdef CONFIG_SMP
394 	smp_init_cpus();
395 	smp_build_mpidr_hash();
396 #endif
397 
398 #ifdef CONFIG_VT
399 #if defined(CONFIG_VGA_CONSOLE)
400 	conswitchp = &vga_con;
401 #elif defined(CONFIG_DUMMY_CONSOLE)
402 	conswitchp = &dummy_con;
403 #endif
404 #endif
405 }
406 
407 static int __init arm64_device_init(void)
408 {
409 	of_iommu_init();
410 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
411 	return 0;
412 }
413 arch_initcall_sync(arm64_device_init);
414 
415 static int __init topology_init(void)
416 {
417 	int i;
418 
419 	for_each_possible_cpu(i) {
420 		struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
421 		cpu->hotpluggable = 1;
422 		register_cpu(cpu, i);
423 	}
424 
425 	return 0;
426 }
427 subsys_initcall(topology_init);
428 
429 static const char *hwcap_str[] = {
430 	"fp",
431 	"asimd",
432 	"evtstrm",
433 	"aes",
434 	"pmull",
435 	"sha1",
436 	"sha2",
437 	"crc32",
438 	NULL
439 };
440 
441 #ifdef CONFIG_COMPAT
442 static const char *compat_hwcap_str[] = {
443 	"swp",
444 	"half",
445 	"thumb",
446 	"26bit",
447 	"fastmult",
448 	"fpa",
449 	"vfp",
450 	"edsp",
451 	"java",
452 	"iwmmxt",
453 	"crunch",
454 	"thumbee",
455 	"neon",
456 	"vfpv3",
457 	"vfpv3d16",
458 	"tls",
459 	"vfpv4",
460 	"idiva",
461 	"idivt",
462 	"vfpd32",
463 	"lpae",
464 	"evtstrm"
465 };
466 
467 static const char *compat_hwcap2_str[] = {
468 	"aes",
469 	"pmull",
470 	"sha1",
471 	"sha2",
472 	"crc32",
473 	NULL
474 };
475 #endif /* CONFIG_COMPAT */
476 
477 static int c_show(struct seq_file *m, void *v)
478 {
479 	int i, j;
480 
481 	for_each_online_cpu(i) {
482 		struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
483 		u32 midr = cpuinfo->reg_midr;
484 
485 		/*
486 		 * glibc reads /proc/cpuinfo to determine the number of
487 		 * online processors, looking for lines beginning with
488 		 * "processor".  Give glibc what it expects.
489 		 */
490 #ifdef CONFIG_SMP
491 		seq_printf(m, "processor\t: %d\n", i);
492 #endif
493 
494 		/*
495 		 * Dump out the common processor features in a single line.
496 		 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
497 		 * rather than attempting to parse this, but there's a body of
498 		 * software which does already (at least for 32-bit).
499 		 */
500 		seq_puts(m, "Features\t:");
501 		if (personality(current->personality) == PER_LINUX32) {
502 #ifdef CONFIG_COMPAT
503 			for (j = 0; compat_hwcap_str[j]; j++)
504 				if (compat_elf_hwcap & (1 << j))
505 					seq_printf(m, " %s", compat_hwcap_str[j]);
506 
507 			for (j = 0; compat_hwcap2_str[j]; j++)
508 				if (compat_elf_hwcap2 & (1 << j))
509 					seq_printf(m, " %s", compat_hwcap2_str[j]);
510 #endif /* CONFIG_COMPAT */
511 		} else {
512 			for (j = 0; hwcap_str[j]; j++)
513 				if (elf_hwcap & (1 << j))
514 					seq_printf(m, " %s", hwcap_str[j]);
515 		}
516 		seq_puts(m, "\n");
517 
518 		seq_printf(m, "CPU implementer\t: 0x%02x\n",
519 			   MIDR_IMPLEMENTOR(midr));
520 		seq_printf(m, "CPU architecture: 8\n");
521 		seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
522 		seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
523 		seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
524 	}
525 
526 	return 0;
527 }
528 
529 static void *c_start(struct seq_file *m, loff_t *pos)
530 {
531 	return *pos < 1 ? (void *)1 : NULL;
532 }
533 
534 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
535 {
536 	++*pos;
537 	return NULL;
538 }
539 
540 static void c_stop(struct seq_file *m, void *v)
541 {
542 }
543 
544 const struct seq_operations cpuinfo_op = {
545 	.start	= c_start,
546 	.next	= c_next,
547 	.stop	= c_stop,
548 	.show	= c_show
549 };
550