xref: /linux/arch/arm64/kernel/setup.c (revision c7e1e3ccfbd153c890240a391f258efaedfa94d0)
1 /*
2  * Based on arch/arm/kernel/setup.c
3  *
4  * Copyright (C) 1995-2001 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/acpi.h>
21 #include <linux/export.h>
22 #include <linux/kernel.h>
23 #include <linux/stddef.h>
24 #include <linux/ioport.h>
25 #include <linux/delay.h>
26 #include <linux/utsname.h>
27 #include <linux/initrd.h>
28 #include <linux/console.h>
29 #include <linux/cache.h>
30 #include <linux/bootmem.h>
31 #include <linux/seq_file.h>
32 #include <linux/screen_info.h>
33 #include <linux/init.h>
34 #include <linux/kexec.h>
35 #include <linux/crash_dump.h>
36 #include <linux/root_dev.h>
37 #include <linux/cpu.h>
38 #include <linux/interrupt.h>
39 #include <linux/smp.h>
40 #include <linux/fs.h>
41 #include <linux/proc_fs.h>
42 #include <linux/memblock.h>
43 #include <linux/of_iommu.h>
44 #include <linux/of_fdt.h>
45 #include <linux/of_platform.h>
46 #include <linux/efi.h>
47 #include <linux/personality.h>
48 #include <linux/psci.h>
49 
50 #include <asm/acpi.h>
51 #include <asm/fixmap.h>
52 #include <asm/cpu.h>
53 #include <asm/cputype.h>
54 #include <asm/elf.h>
55 #include <asm/cpufeature.h>
56 #include <asm/cpu_ops.h>
57 #include <asm/sections.h>
58 #include <asm/setup.h>
59 #include <asm/smp_plat.h>
60 #include <asm/cacheflush.h>
61 #include <asm/tlbflush.h>
62 #include <asm/traps.h>
63 #include <asm/memblock.h>
64 #include <asm/efi.h>
65 #include <asm/xen/hypervisor.h>
66 
67 unsigned long elf_hwcap __read_mostly;
68 EXPORT_SYMBOL_GPL(elf_hwcap);
69 
70 #ifdef CONFIG_COMPAT
71 #define COMPAT_ELF_HWCAP_DEFAULT	\
72 				(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
73 				 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
74 				 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
75 				 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
76 				 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
77 				 COMPAT_HWCAP_LPAE)
78 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
79 unsigned int compat_elf_hwcap2 __read_mostly;
80 #endif
81 
82 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
83 
84 phys_addr_t __fdt_pointer __initdata;
85 
86 /*
87  * Standard memory resources
88  */
89 static struct resource mem_res[] = {
90 	{
91 		.name = "Kernel code",
92 		.start = 0,
93 		.end = 0,
94 		.flags = IORESOURCE_MEM
95 	},
96 	{
97 		.name = "Kernel data",
98 		.start = 0,
99 		.end = 0,
100 		.flags = IORESOURCE_MEM
101 	}
102 };
103 
104 #define kernel_code mem_res[0]
105 #define kernel_data mem_res[1]
106 
107 /*
108  * The recorded values of x0 .. x3 upon kernel entry.
109  */
110 u64 __cacheline_aligned boot_args[4];
111 
112 void __init smp_setup_processor_id(void)
113 {
114 	u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
115 	cpu_logical_map(0) = mpidr;
116 
117 	/*
118 	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
119 	 * using percpu variable early, for example, lockdep will
120 	 * access percpu variable inside lock_release
121 	 */
122 	set_my_cpu_offset(0);
123 	pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
124 }
125 
126 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
127 {
128 	return phys_id == cpu_logical_map(cpu);
129 }
130 
131 struct mpidr_hash mpidr_hash;
132 /**
133  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
134  *			  level in order to build a linear index from an
135  *			  MPIDR value. Resulting algorithm is a collision
136  *			  free hash carried out through shifting and ORing
137  */
138 static void __init smp_build_mpidr_hash(void)
139 {
140 	u32 i, affinity, fs[4], bits[4], ls;
141 	u64 mask = 0;
142 	/*
143 	 * Pre-scan the list of MPIDRS and filter out bits that do
144 	 * not contribute to affinity levels, ie they never toggle.
145 	 */
146 	for_each_possible_cpu(i)
147 		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
148 	pr_debug("mask of set bits %#llx\n", mask);
149 	/*
150 	 * Find and stash the last and first bit set at all affinity levels to
151 	 * check how many bits are required to represent them.
152 	 */
153 	for (i = 0; i < 4; i++) {
154 		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
155 		/*
156 		 * Find the MSB bit and LSB bits position
157 		 * to determine how many bits are required
158 		 * to express the affinity level.
159 		 */
160 		ls = fls(affinity);
161 		fs[i] = affinity ? ffs(affinity) - 1 : 0;
162 		bits[i] = ls - fs[i];
163 	}
164 	/*
165 	 * An index can be created from the MPIDR_EL1 by isolating the
166 	 * significant bits at each affinity level and by shifting
167 	 * them in order to compress the 32 bits values space to a
168 	 * compressed set of values. This is equivalent to hashing
169 	 * the MPIDR_EL1 through shifting and ORing. It is a collision free
170 	 * hash though not minimal since some levels might contain a number
171 	 * of CPUs that is not an exact power of 2 and their bit
172 	 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
173 	 */
174 	mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
175 	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
176 	mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
177 						(bits[1] + bits[0]);
178 	mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
179 				  fs[3] - (bits[2] + bits[1] + bits[0]);
180 	mpidr_hash.mask = mask;
181 	mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
182 	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
183 		mpidr_hash.shift_aff[0],
184 		mpidr_hash.shift_aff[1],
185 		mpidr_hash.shift_aff[2],
186 		mpidr_hash.shift_aff[3],
187 		mpidr_hash.mask,
188 		mpidr_hash.bits);
189 	/*
190 	 * 4x is an arbitrary value used to warn on a hash table much bigger
191 	 * than expected on most systems.
192 	 */
193 	if (mpidr_hash_size() > 4 * num_possible_cpus())
194 		pr_warn("Large number of MPIDR hash buckets detected\n");
195 	__flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
196 }
197 
198 static void __init setup_processor(void)
199 {
200 	u64 features;
201 	s64 block;
202 	u32 cwg;
203 	int cls;
204 
205 	printk("CPU: AArch64 Processor [%08x] revision %d\n",
206 	       read_cpuid_id(), read_cpuid_id() & 15);
207 
208 	sprintf(init_utsname()->machine, ELF_PLATFORM);
209 	elf_hwcap = 0;
210 
211 	cpuinfo_store_boot_cpu();
212 
213 	/*
214 	 * Check for sane CTR_EL0.CWG value.
215 	 */
216 	cwg = cache_type_cwg();
217 	cls = cache_line_size();
218 	if (!cwg)
219 		pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
220 			cls);
221 	if (L1_CACHE_BYTES < cls)
222 		pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
223 			L1_CACHE_BYTES, cls);
224 
225 	/*
226 	 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
227 	 * The blocks we test below represent incremental functionality
228 	 * for non-negative values. Negative values are reserved.
229 	 */
230 	features = read_cpuid(ID_AA64ISAR0_EL1);
231 	block = cpuid_feature_extract_field(features, 4);
232 	if (block > 0) {
233 		switch (block) {
234 		default:
235 		case 2:
236 			elf_hwcap |= HWCAP_PMULL;
237 		case 1:
238 			elf_hwcap |= HWCAP_AES;
239 		case 0:
240 			break;
241 		}
242 	}
243 
244 	if (cpuid_feature_extract_field(features, 8) > 0)
245 		elf_hwcap |= HWCAP_SHA1;
246 
247 	if (cpuid_feature_extract_field(features, 12) > 0)
248 		elf_hwcap |= HWCAP_SHA2;
249 
250 	if (cpuid_feature_extract_field(features, 16) > 0)
251 		elf_hwcap |= HWCAP_CRC32;
252 
253 	block = cpuid_feature_extract_field(features, 20);
254 	if (block > 0) {
255 		switch (block) {
256 		default:
257 		case 2:
258 			elf_hwcap |= HWCAP_ATOMICS;
259 		case 1:
260 			/* RESERVED */
261 		case 0:
262 			break;
263 		}
264 	}
265 
266 #ifdef CONFIG_COMPAT
267 	/*
268 	 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
269 	 * the AArch32 32-bit execution state.
270 	 */
271 	features = read_cpuid(ID_ISAR5_EL1);
272 	block = cpuid_feature_extract_field(features, 4);
273 	if (block > 0) {
274 		switch (block) {
275 		default:
276 		case 2:
277 			compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
278 		case 1:
279 			compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
280 		case 0:
281 			break;
282 		}
283 	}
284 
285 	if (cpuid_feature_extract_field(features, 8) > 0)
286 		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
287 
288 	if (cpuid_feature_extract_field(features, 12) > 0)
289 		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
290 
291 	if (cpuid_feature_extract_field(features, 16) > 0)
292 		compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
293 #endif
294 }
295 
296 static void __init setup_machine_fdt(phys_addr_t dt_phys)
297 {
298 	void *dt_virt = fixmap_remap_fdt(dt_phys);
299 
300 	if (!dt_virt || !early_init_dt_scan(dt_virt)) {
301 		pr_crit("\n"
302 			"Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
303 			"The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
304 			"\nPlease check your bootloader.",
305 			&dt_phys, dt_virt);
306 
307 		while (true)
308 			cpu_relax();
309 	}
310 
311 	dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
312 }
313 
314 static void __init request_standard_resources(void)
315 {
316 	struct memblock_region *region;
317 	struct resource *res;
318 
319 	kernel_code.start   = virt_to_phys(_text);
320 	kernel_code.end     = virt_to_phys(_etext - 1);
321 	kernel_data.start   = virt_to_phys(_sdata);
322 	kernel_data.end     = virt_to_phys(_end - 1);
323 
324 	for_each_memblock(memory, region) {
325 		res = alloc_bootmem_low(sizeof(*res));
326 		res->name  = "System RAM";
327 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
328 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
329 		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
330 
331 		request_resource(&iomem_resource, res);
332 
333 		if (kernel_code.start >= res->start &&
334 		    kernel_code.end <= res->end)
335 			request_resource(res, &kernel_code);
336 		if (kernel_data.start >= res->start &&
337 		    kernel_data.end <= res->end)
338 			request_resource(res, &kernel_data);
339 	}
340 }
341 
342 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
343 
344 void __init setup_arch(char **cmdline_p)
345 {
346 	setup_processor();
347 
348 	init_mm.start_code = (unsigned long) _text;
349 	init_mm.end_code   = (unsigned long) _etext;
350 	init_mm.end_data   = (unsigned long) _edata;
351 	init_mm.brk	   = (unsigned long) _end;
352 
353 	*cmdline_p = boot_command_line;
354 
355 	early_fixmap_init();
356 	early_ioremap_init();
357 
358 	setup_machine_fdt(__fdt_pointer);
359 
360 	parse_early_param();
361 
362 	/*
363 	 *  Unmask asynchronous aborts after bringing up possible earlycon.
364 	 * (Report possible System Errors once we can report this occurred)
365 	 */
366 	local_async_enable();
367 
368 	efi_init();
369 	arm64_memblock_init();
370 
371 	/* Parse the ACPI tables for possible boot-time configuration */
372 	acpi_boot_table_init();
373 
374 	paging_init();
375 	request_standard_resources();
376 
377 	early_ioremap_reset();
378 
379 	if (acpi_disabled) {
380 		unflatten_device_tree();
381 		psci_dt_init();
382 	} else {
383 		psci_acpi_init();
384 	}
385 	xen_early_init();
386 
387 	cpu_read_bootcpu_ops();
388 	smp_init_cpus();
389 	smp_build_mpidr_hash();
390 
391 #ifdef CONFIG_VT
392 #if defined(CONFIG_VGA_CONSOLE)
393 	conswitchp = &vga_con;
394 #elif defined(CONFIG_DUMMY_CONSOLE)
395 	conswitchp = &dummy_con;
396 #endif
397 #endif
398 	if (boot_args[1] || boot_args[2] || boot_args[3]) {
399 		pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
400 			"\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
401 			"This indicates a broken bootloader or old kernel\n",
402 			boot_args[1], boot_args[2], boot_args[3]);
403 	}
404 }
405 
406 static int __init arm64_device_init(void)
407 {
408 	if (of_have_populated_dt()) {
409 		of_iommu_init();
410 		of_platform_populate(NULL, of_default_bus_match_table,
411 				     NULL, NULL);
412 	} else if (acpi_disabled) {
413 		pr_crit("Device tree not populated\n");
414 	}
415 	return 0;
416 }
417 arch_initcall_sync(arm64_device_init);
418 
419 static int __init topology_init(void)
420 {
421 	int i;
422 
423 	for_each_possible_cpu(i) {
424 		struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
425 		cpu->hotpluggable = 1;
426 		register_cpu(cpu, i);
427 	}
428 
429 	return 0;
430 }
431 subsys_initcall(topology_init);
432 
433 static const char *hwcap_str[] = {
434 	"fp",
435 	"asimd",
436 	"evtstrm",
437 	"aes",
438 	"pmull",
439 	"sha1",
440 	"sha2",
441 	"crc32",
442 	"atomics",
443 	NULL
444 };
445 
446 #ifdef CONFIG_COMPAT
447 static const char *compat_hwcap_str[] = {
448 	"swp",
449 	"half",
450 	"thumb",
451 	"26bit",
452 	"fastmult",
453 	"fpa",
454 	"vfp",
455 	"edsp",
456 	"java",
457 	"iwmmxt",
458 	"crunch",
459 	"thumbee",
460 	"neon",
461 	"vfpv3",
462 	"vfpv3d16",
463 	"tls",
464 	"vfpv4",
465 	"idiva",
466 	"idivt",
467 	"vfpd32",
468 	"lpae",
469 	"evtstrm"
470 };
471 
472 static const char *compat_hwcap2_str[] = {
473 	"aes",
474 	"pmull",
475 	"sha1",
476 	"sha2",
477 	"crc32",
478 	NULL
479 };
480 #endif /* CONFIG_COMPAT */
481 
482 static int c_show(struct seq_file *m, void *v)
483 {
484 	int i, j;
485 
486 	for_each_online_cpu(i) {
487 		struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
488 		u32 midr = cpuinfo->reg_midr;
489 
490 		/*
491 		 * glibc reads /proc/cpuinfo to determine the number of
492 		 * online processors, looking for lines beginning with
493 		 * "processor".  Give glibc what it expects.
494 		 */
495 		seq_printf(m, "processor\t: %d\n", i);
496 
497 		/*
498 		 * Dump out the common processor features in a single line.
499 		 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
500 		 * rather than attempting to parse this, but there's a body of
501 		 * software which does already (at least for 32-bit).
502 		 */
503 		seq_puts(m, "Features\t:");
504 		if (personality(current->personality) == PER_LINUX32) {
505 #ifdef CONFIG_COMPAT
506 			for (j = 0; compat_hwcap_str[j]; j++)
507 				if (compat_elf_hwcap & (1 << j))
508 					seq_printf(m, " %s", compat_hwcap_str[j]);
509 
510 			for (j = 0; compat_hwcap2_str[j]; j++)
511 				if (compat_elf_hwcap2 & (1 << j))
512 					seq_printf(m, " %s", compat_hwcap2_str[j]);
513 #endif /* CONFIG_COMPAT */
514 		} else {
515 			for (j = 0; hwcap_str[j]; j++)
516 				if (elf_hwcap & (1 << j))
517 					seq_printf(m, " %s", hwcap_str[j]);
518 		}
519 		seq_puts(m, "\n");
520 
521 		seq_printf(m, "CPU implementer\t: 0x%02x\n",
522 			   MIDR_IMPLEMENTOR(midr));
523 		seq_printf(m, "CPU architecture: 8\n");
524 		seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
525 		seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
526 		seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
527 	}
528 
529 	return 0;
530 }
531 
532 static void *c_start(struct seq_file *m, loff_t *pos)
533 {
534 	return *pos < 1 ? (void *)1 : NULL;
535 }
536 
537 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
538 {
539 	++*pos;
540 	return NULL;
541 }
542 
543 static void c_stop(struct seq_file *m, void *v)
544 {
545 }
546 
547 const struct seq_operations cpuinfo_op = {
548 	.start	= c_start,
549 	.next	= c_next,
550 	.stop	= c_stop,
551 	.show	= c_show
552 };
553