xref: /linux/arch/arm64/kernel/setup.c (revision 3ce095c16263630dde46d6051854073edaacf3d7)
1 /*
2  * Based on arch/arm/kernel/setup.c
3  *
4  * Copyright (C) 1995-2001 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/acpi.h>
21 #include <linux/export.h>
22 #include <linux/kernel.h>
23 #include <linux/stddef.h>
24 #include <linux/ioport.h>
25 #include <linux/delay.h>
26 #include <linux/utsname.h>
27 #include <linux/initrd.h>
28 #include <linux/console.h>
29 #include <linux/cache.h>
30 #include <linux/bootmem.h>
31 #include <linux/seq_file.h>
32 #include <linux/screen_info.h>
33 #include <linux/init.h>
34 #include <linux/kexec.h>
35 #include <linux/crash_dump.h>
36 #include <linux/root_dev.h>
37 #include <linux/clk-provider.h>
38 #include <linux/cpu.h>
39 #include <linux/interrupt.h>
40 #include <linux/smp.h>
41 #include <linux/fs.h>
42 #include <linux/proc_fs.h>
43 #include <linux/memblock.h>
44 #include <linux/of_iommu.h>
45 #include <linux/of_fdt.h>
46 #include <linux/of_platform.h>
47 #include <linux/efi.h>
48 #include <linux/personality.h>
49 
50 #include <asm/acpi.h>
51 #include <asm/fixmap.h>
52 #include <asm/cpu.h>
53 #include <asm/cputype.h>
54 #include <asm/elf.h>
55 #include <asm/cpufeature.h>
56 #include <asm/cpu_ops.h>
57 #include <asm/sections.h>
58 #include <asm/setup.h>
59 #include <asm/smp_plat.h>
60 #include <asm/cacheflush.h>
61 #include <asm/tlbflush.h>
62 #include <asm/traps.h>
63 #include <asm/memblock.h>
64 #include <asm/psci.h>
65 #include <asm/efi.h>
66 #include <asm/virt.h>
67 
68 unsigned long elf_hwcap __read_mostly;
69 EXPORT_SYMBOL_GPL(elf_hwcap);
70 
71 #ifdef CONFIG_COMPAT
72 #define COMPAT_ELF_HWCAP_DEFAULT	\
73 				(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
74 				 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
75 				 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
76 				 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
77 				 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
78 				 COMPAT_HWCAP_LPAE)
79 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
80 unsigned int compat_elf_hwcap2 __read_mostly;
81 #endif
82 
83 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
84 
85 phys_addr_t __fdt_pointer __initdata;
86 
87 /*
88  * Standard memory resources
89  */
90 static struct resource mem_res[] = {
91 	{
92 		.name = "Kernel code",
93 		.start = 0,
94 		.end = 0,
95 		.flags = IORESOURCE_MEM
96 	},
97 	{
98 		.name = "Kernel data",
99 		.start = 0,
100 		.end = 0,
101 		.flags = IORESOURCE_MEM
102 	}
103 };
104 
105 #define kernel_code mem_res[0]
106 #define kernel_data mem_res[1]
107 
108 void __init early_print(const char *str, ...)
109 {
110 	char buf[256];
111 	va_list ap;
112 
113 	va_start(ap, str);
114 	vsnprintf(buf, sizeof(buf), str, ap);
115 	va_end(ap);
116 
117 	printk("%s", buf);
118 }
119 
120 /*
121  * The recorded values of x0 .. x3 upon kernel entry.
122  */
123 u64 __cacheline_aligned boot_args[4];
124 
125 void __init smp_setup_processor_id(void)
126 {
127 	u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
128 	cpu_logical_map(0) = mpidr;
129 
130 	/*
131 	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
132 	 * using percpu variable early, for example, lockdep will
133 	 * access percpu variable inside lock_release
134 	 */
135 	set_my_cpu_offset(0);
136 	pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
137 }
138 
139 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
140 {
141 	return phys_id == cpu_logical_map(cpu);
142 }
143 
144 struct mpidr_hash mpidr_hash;
145 #ifdef CONFIG_SMP
146 /**
147  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
148  *			  level in order to build a linear index from an
149  *			  MPIDR value. Resulting algorithm is a collision
150  *			  free hash carried out through shifting and ORing
151  */
152 static void __init smp_build_mpidr_hash(void)
153 {
154 	u32 i, affinity, fs[4], bits[4], ls;
155 	u64 mask = 0;
156 	/*
157 	 * Pre-scan the list of MPIDRS and filter out bits that do
158 	 * not contribute to affinity levels, ie they never toggle.
159 	 */
160 	for_each_possible_cpu(i)
161 		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
162 	pr_debug("mask of set bits %#llx\n", mask);
163 	/*
164 	 * Find and stash the last and first bit set at all affinity levels to
165 	 * check how many bits are required to represent them.
166 	 */
167 	for (i = 0; i < 4; i++) {
168 		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
169 		/*
170 		 * Find the MSB bit and LSB bits position
171 		 * to determine how many bits are required
172 		 * to express the affinity level.
173 		 */
174 		ls = fls(affinity);
175 		fs[i] = affinity ? ffs(affinity) - 1 : 0;
176 		bits[i] = ls - fs[i];
177 	}
178 	/*
179 	 * An index can be created from the MPIDR_EL1 by isolating the
180 	 * significant bits at each affinity level and by shifting
181 	 * them in order to compress the 32 bits values space to a
182 	 * compressed set of values. This is equivalent to hashing
183 	 * the MPIDR_EL1 through shifting and ORing. It is a collision free
184 	 * hash though not minimal since some levels might contain a number
185 	 * of CPUs that is not an exact power of 2 and their bit
186 	 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
187 	 */
188 	mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
189 	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
190 	mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
191 						(bits[1] + bits[0]);
192 	mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
193 				  fs[3] - (bits[2] + bits[1] + bits[0]);
194 	mpidr_hash.mask = mask;
195 	mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
196 	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
197 		mpidr_hash.shift_aff[0],
198 		mpidr_hash.shift_aff[1],
199 		mpidr_hash.shift_aff[2],
200 		mpidr_hash.shift_aff[3],
201 		mpidr_hash.mask,
202 		mpidr_hash.bits);
203 	/*
204 	 * 4x is an arbitrary value used to warn on a hash table much bigger
205 	 * than expected on most systems.
206 	 */
207 	if (mpidr_hash_size() > 4 * num_possible_cpus())
208 		pr_warn("Large number of MPIDR hash buckets detected\n");
209 	__flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
210 }
211 #endif
212 
213 static void __init hyp_mode_check(void)
214 {
215 	if (is_hyp_mode_available())
216 		pr_info("CPU: All CPU(s) started at EL2\n");
217 	else if (is_hyp_mode_mismatched())
218 		WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
219 			   "CPU: CPUs started in inconsistent modes");
220 	else
221 		pr_info("CPU: All CPU(s) started at EL1\n");
222 }
223 
224 void __init do_post_cpus_up_work(void)
225 {
226 	hyp_mode_check();
227 	apply_alternatives_all();
228 }
229 
230 #ifdef CONFIG_UP_LATE_INIT
231 void __init up_late_init(void)
232 {
233 	do_post_cpus_up_work();
234 }
235 #endif /* CONFIG_UP_LATE_INIT */
236 
237 static void __init setup_processor(void)
238 {
239 	u64 features, block;
240 	u32 cwg;
241 	int cls;
242 
243 	printk("CPU: AArch64 Processor [%08x] revision %d\n",
244 	       read_cpuid_id(), read_cpuid_id() & 15);
245 
246 	sprintf(init_utsname()->machine, ELF_PLATFORM);
247 	elf_hwcap = 0;
248 
249 	cpuinfo_store_boot_cpu();
250 
251 	/*
252 	 * Check for sane CTR_EL0.CWG value.
253 	 */
254 	cwg = cache_type_cwg();
255 	cls = cache_line_size();
256 	if (!cwg)
257 		pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
258 			cls);
259 	if (L1_CACHE_BYTES < cls)
260 		pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
261 			L1_CACHE_BYTES, cls);
262 
263 	/*
264 	 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
265 	 * The blocks we test below represent incremental functionality
266 	 * for non-negative values. Negative values are reserved.
267 	 */
268 	features = read_cpuid(ID_AA64ISAR0_EL1);
269 	block = (features >> 4) & 0xf;
270 	if (!(block & 0x8)) {
271 		switch (block) {
272 		default:
273 		case 2:
274 			elf_hwcap |= HWCAP_PMULL;
275 		case 1:
276 			elf_hwcap |= HWCAP_AES;
277 		case 0:
278 			break;
279 		}
280 	}
281 
282 	block = (features >> 8) & 0xf;
283 	if (block && !(block & 0x8))
284 		elf_hwcap |= HWCAP_SHA1;
285 
286 	block = (features >> 12) & 0xf;
287 	if (block && !(block & 0x8))
288 		elf_hwcap |= HWCAP_SHA2;
289 
290 	block = (features >> 16) & 0xf;
291 	if (block && !(block & 0x8))
292 		elf_hwcap |= HWCAP_CRC32;
293 
294 #ifdef CONFIG_COMPAT
295 	/*
296 	 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
297 	 * the Aarch32 32-bit execution state.
298 	 */
299 	features = read_cpuid(ID_ISAR5_EL1);
300 	block = (features >> 4) & 0xf;
301 	if (!(block & 0x8)) {
302 		switch (block) {
303 		default:
304 		case 2:
305 			compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
306 		case 1:
307 			compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
308 		case 0:
309 			break;
310 		}
311 	}
312 
313 	block = (features >> 8) & 0xf;
314 	if (block && !(block & 0x8))
315 		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
316 
317 	block = (features >> 12) & 0xf;
318 	if (block && !(block & 0x8))
319 		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
320 
321 	block = (features >> 16) & 0xf;
322 	if (block && !(block & 0x8))
323 		compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
324 #endif
325 }
326 
327 static void __init setup_machine_fdt(phys_addr_t dt_phys)
328 {
329 	if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) {
330 		early_print("\n"
331 			"Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
332 			"The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
333 			"\nPlease check your bootloader.\n",
334 			dt_phys, phys_to_virt(dt_phys));
335 
336 		while (true)
337 			cpu_relax();
338 	}
339 
340 	dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
341 }
342 
343 static void __init request_standard_resources(void)
344 {
345 	struct memblock_region *region;
346 	struct resource *res;
347 
348 	kernel_code.start   = virt_to_phys(_text);
349 	kernel_code.end     = virt_to_phys(_etext - 1);
350 	kernel_data.start   = virt_to_phys(_sdata);
351 	kernel_data.end     = virt_to_phys(_end - 1);
352 
353 	for_each_memblock(memory, region) {
354 		res = alloc_bootmem_low(sizeof(*res));
355 		res->name  = "System RAM";
356 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
357 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
358 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
359 
360 		request_resource(&iomem_resource, res);
361 
362 		if (kernel_code.start >= res->start &&
363 		    kernel_code.end <= res->end)
364 			request_resource(res, &kernel_code);
365 		if (kernel_data.start >= res->start &&
366 		    kernel_data.end <= res->end)
367 			request_resource(res, &kernel_data);
368 	}
369 }
370 
371 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
372 
373 void __init setup_arch(char **cmdline_p)
374 {
375 	setup_processor();
376 
377 	setup_machine_fdt(__fdt_pointer);
378 
379 	init_mm.start_code = (unsigned long) _text;
380 	init_mm.end_code   = (unsigned long) _etext;
381 	init_mm.end_data   = (unsigned long) _edata;
382 	init_mm.brk	   = (unsigned long) _end;
383 
384 	*cmdline_p = boot_command_line;
385 
386 	early_fixmap_init();
387 	early_ioremap_init();
388 
389 	parse_early_param();
390 
391 	/*
392 	 *  Unmask asynchronous aborts after bringing up possible earlycon.
393 	 * (Report possible System Errors once we can report this occurred)
394 	 */
395 	local_async_enable();
396 
397 	efi_init();
398 	arm64_memblock_init();
399 
400 	/* Parse the ACPI tables for possible boot-time configuration */
401 	acpi_boot_table_init();
402 
403 	paging_init();
404 	request_standard_resources();
405 
406 	early_ioremap_reset();
407 
408 	if (acpi_disabled) {
409 		unflatten_device_tree();
410 		psci_dt_init();
411 		cpu_read_bootcpu_ops();
412 #ifdef CONFIG_SMP
413 		of_smp_init_cpus();
414 #endif
415 	} else {
416 		psci_acpi_init();
417 		acpi_init_cpus();
418 	}
419 
420 #ifdef CONFIG_SMP
421 	smp_build_mpidr_hash();
422 #endif
423 
424 #ifdef CONFIG_VT
425 #if defined(CONFIG_VGA_CONSOLE)
426 	conswitchp = &vga_con;
427 #elif defined(CONFIG_DUMMY_CONSOLE)
428 	conswitchp = &dummy_con;
429 #endif
430 #endif
431 	if (boot_args[1] || boot_args[2] || boot_args[3]) {
432 		pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
433 			"\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
434 			"This indicates a broken bootloader or old kernel\n",
435 			boot_args[1], boot_args[2], boot_args[3]);
436 	}
437 }
438 
439 static int __init arm64_device_init(void)
440 {
441 	of_iommu_init();
442 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
443 	return 0;
444 }
445 arch_initcall_sync(arm64_device_init);
446 
447 static int __init topology_init(void)
448 {
449 	int i;
450 
451 	for_each_possible_cpu(i) {
452 		struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
453 		cpu->hotpluggable = 1;
454 		register_cpu(cpu, i);
455 	}
456 
457 	return 0;
458 }
459 subsys_initcall(topology_init);
460 
461 static const char *hwcap_str[] = {
462 	"fp",
463 	"asimd",
464 	"evtstrm",
465 	"aes",
466 	"pmull",
467 	"sha1",
468 	"sha2",
469 	"crc32",
470 	NULL
471 };
472 
473 #ifdef CONFIG_COMPAT
474 static const char *compat_hwcap_str[] = {
475 	"swp",
476 	"half",
477 	"thumb",
478 	"26bit",
479 	"fastmult",
480 	"fpa",
481 	"vfp",
482 	"edsp",
483 	"java",
484 	"iwmmxt",
485 	"crunch",
486 	"thumbee",
487 	"neon",
488 	"vfpv3",
489 	"vfpv3d16",
490 	"tls",
491 	"vfpv4",
492 	"idiva",
493 	"idivt",
494 	"vfpd32",
495 	"lpae",
496 	"evtstrm"
497 };
498 
499 static const char *compat_hwcap2_str[] = {
500 	"aes",
501 	"pmull",
502 	"sha1",
503 	"sha2",
504 	"crc32",
505 	NULL
506 };
507 #endif /* CONFIG_COMPAT */
508 
509 static int c_show(struct seq_file *m, void *v)
510 {
511 	int i, j;
512 
513 	for_each_online_cpu(i) {
514 		struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
515 		u32 midr = cpuinfo->reg_midr;
516 
517 		/*
518 		 * glibc reads /proc/cpuinfo to determine the number of
519 		 * online processors, looking for lines beginning with
520 		 * "processor".  Give glibc what it expects.
521 		 */
522 #ifdef CONFIG_SMP
523 		seq_printf(m, "processor\t: %d\n", i);
524 #endif
525 
526 		/*
527 		 * Dump out the common processor features in a single line.
528 		 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
529 		 * rather than attempting to parse this, but there's a body of
530 		 * software which does already (at least for 32-bit).
531 		 */
532 		seq_puts(m, "Features\t:");
533 		if (personality(current->personality) == PER_LINUX32) {
534 #ifdef CONFIG_COMPAT
535 			for (j = 0; compat_hwcap_str[j]; j++)
536 				if (compat_elf_hwcap & (1 << j))
537 					seq_printf(m, " %s", compat_hwcap_str[j]);
538 
539 			for (j = 0; compat_hwcap2_str[j]; j++)
540 				if (compat_elf_hwcap2 & (1 << j))
541 					seq_printf(m, " %s", compat_hwcap2_str[j]);
542 #endif /* CONFIG_COMPAT */
543 		} else {
544 			for (j = 0; hwcap_str[j]; j++)
545 				if (elf_hwcap & (1 << j))
546 					seq_printf(m, " %s", hwcap_str[j]);
547 		}
548 		seq_puts(m, "\n");
549 
550 		seq_printf(m, "CPU implementer\t: 0x%02x\n",
551 			   MIDR_IMPLEMENTOR(midr));
552 		seq_printf(m, "CPU architecture: 8\n");
553 		seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
554 		seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
555 		seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
556 	}
557 
558 	return 0;
559 }
560 
561 static void *c_start(struct seq_file *m, loff_t *pos)
562 {
563 	return *pos < 1 ? (void *)1 : NULL;
564 }
565 
566 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
567 {
568 	++*pos;
569 	return NULL;
570 }
571 
572 static void c_stop(struct seq_file *m, void *v)
573 {
574 }
575 
576 const struct seq_operations cpuinfo_op = {
577 	.start	= c_start,
578 	.next	= c_next,
579 	.stop	= c_stop,
580 	.show	= c_show
581 };
582