xref: /linux/arch/loongarch/kernel/setup.c (revision 31bedc1fb1d93250ae1900ee92ccd56689956d22)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  *
5  * Derived from MIPS:
6  * Copyright (C) 1995 Linus Torvalds
7  * Copyright (C) 1995 Waldorf Electronics
8  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
9  * Copyright (C) 1996 Stoned Elipot
10  * Copyright (C) 1999 Silicon Graphics, Inc.
11  * Copyright (C) 2000, 2001, 2002, 2007	 Maciej W. Rozycki
12  */
13 #include <linux/init.h>
14 #include <linux/acpi.h>
15 #include <linux/cpu.h>
16 #include <linux/dmi.h>
17 #include <linux/efi.h>
18 #include <linux/export.h>
19 #include <linux/memblock.h>
20 #include <linux/initrd.h>
21 #include <linux/ioport.h>
22 #include <linux/kexec.h>
23 #include <linux/crash_dump.h>
24 #include <linux/root_dev.h>
25 #include <linux/console.h>
26 #include <linux/pfn.h>
27 #include <linux/platform_device.h>
28 #include <linux/sizes.h>
29 #include <linux/device.h>
30 #include <linux/dma-map-ops.h>
31 #include <linux/libfdt.h>
32 #include <linux/of_fdt.h>
33 #include <linux/of_address.h>
34 #include <linux/suspend.h>
35 #include <linux/swiotlb.h>
36 
37 #include <asm/addrspace.h>
38 #include <asm/alternative.h>
39 #include <asm/bootinfo.h>
40 #include <asm/cache.h>
41 #include <asm/cpu.h>
42 #include <asm/dma.h>
43 #include <asm/efi.h>
44 #include <asm/loongson.h>
45 #include <asm/numa.h>
46 #include <asm/pgalloc.h>
47 #include <asm/sections.h>
48 #include <asm/setup.h>
49 #include <asm/time.h>
50 #include <asm/unwind.h>
51 
52 #define SMBIOS_BIOSSIZE_OFFSET		0x09
53 #define SMBIOS_BIOSEXTERN_OFFSET	0x13
54 #define SMBIOS_FREQLOW_OFFSET		0x16
55 #define SMBIOS_FREQHIGH_OFFSET		0x17
56 #define SMBIOS_FREQLOW_MASK		0xFF
57 #define SMBIOS_CORE_PACKAGE_OFFSET	0x23
58 #define SMBIOS_THREAD_PACKAGE_OFFSET	0x25
59 #define LOONGSON_EFI_ENABLE		(1 << 3)
60 
61 unsigned long fw_arg0, fw_arg1, fw_arg2;
62 DEFINE_PER_CPU(unsigned long, kernelsp);
63 struct cpuinfo_loongarch cpu_data[NR_CPUS] __read_mostly;
64 
65 EXPORT_SYMBOL(cpu_data);
66 
67 struct loongson_board_info b_info;
68 static const char dmi_empty_string[] = "        ";
69 
70 /*
71  * Setup information
72  *
73  * These are initialized so they are in the .data section
74  */
75 char init_command_line[COMMAND_LINE_SIZE] __initdata;
76 
77 static int num_standard_resources;
78 static struct resource *standard_resources;
79 
80 static struct resource code_resource = { .name = "Kernel code", };
81 static struct resource data_resource = { .name = "Kernel data", };
82 static struct resource bss_resource  = { .name = "Kernel bss", };
83 
84 const char *get_system_type(void)
85 {
86 	return "generic-loongson-machine";
87 }
88 
89 void __init arch_cpu_finalize_init(void)
90 {
91 	alternative_instructions();
92 }
93 
94 static const char *dmi_string_parse(const struct dmi_header *dm, u8 s)
95 {
96 	const u8 *bp = ((u8 *) dm) + dm->length;
97 
98 	if (s) {
99 		s--;
100 		while (s > 0 && *bp) {
101 			bp += strlen(bp) + 1;
102 			s--;
103 		}
104 
105 		if (*bp != 0) {
106 			size_t len = strlen(bp)+1;
107 			size_t cmp_len = len > 8 ? 8 : len;
108 
109 			if (!memcmp(bp, dmi_empty_string, cmp_len))
110 				return dmi_empty_string;
111 
112 			return bp;
113 		}
114 	}
115 
116 	return "";
117 }
118 
119 static void __init parse_cpu_table(const struct dmi_header *dm)
120 {
121 	long freq_temp = 0;
122 	char *dmi_data = (char *)dm;
123 
124 	freq_temp = ((*(dmi_data + SMBIOS_FREQHIGH_OFFSET) << 8) +
125 			((*(dmi_data + SMBIOS_FREQLOW_OFFSET)) & SMBIOS_FREQLOW_MASK));
126 	cpu_clock_freq = freq_temp * 1000000;
127 
128 	loongson_sysconf.cpuname = (void *)dmi_string_parse(dm, dmi_data[16]);
129 	loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_THREAD_PACKAGE_OFFSET);
130 
131 	pr_info("CpuClock = %llu\n", cpu_clock_freq);
132 }
133 
134 static void __init parse_bios_table(const struct dmi_header *dm)
135 {
136 	char *dmi_data = (char *)dm;
137 
138 	b_info.bios_size = (*(dmi_data + SMBIOS_BIOSSIZE_OFFSET) + 1) << 6;
139 }
140 
141 static void __init find_tokens(const struct dmi_header *dm, void *dummy)
142 {
143 	switch (dm->type) {
144 	case 0x0: /* Extern BIOS */
145 		parse_bios_table(dm);
146 		break;
147 	case 0x4: /* Calling interface */
148 		parse_cpu_table(dm);
149 		break;
150 	}
151 }
152 static void __init smbios_parse(void)
153 {
154 	b_info.bios_vendor = (void *)dmi_get_system_info(DMI_BIOS_VENDOR);
155 	b_info.bios_version = (void *)dmi_get_system_info(DMI_BIOS_VERSION);
156 	b_info.bios_release_date = (void *)dmi_get_system_info(DMI_BIOS_DATE);
157 	b_info.board_vendor = (void *)dmi_get_system_info(DMI_BOARD_VENDOR);
158 	b_info.board_name = (void *)dmi_get_system_info(DMI_BOARD_NAME);
159 	dmi_walk(find_tokens, NULL);
160 }
161 
162 #ifdef CONFIG_ARCH_WRITECOMBINE
163 bool wc_enabled = true;
164 #else
165 bool wc_enabled = false;
166 #endif
167 
168 EXPORT_SYMBOL(wc_enabled);
169 
170 static int __init setup_writecombine(char *p)
171 {
172 	if (!strcmp(p, "on"))
173 		wc_enabled = true;
174 	else if (!strcmp(p, "off"))
175 		wc_enabled = false;
176 	else
177 		pr_warn("Unknown writecombine setting \"%s\".\n", p);
178 
179 	return 0;
180 }
181 early_param("writecombine", setup_writecombine);
182 
183 static int usermem __initdata;
184 
185 static int __init early_parse_mem(char *p)
186 {
187 	phys_addr_t start, size;
188 
189 	if (!p) {
190 		pr_err("mem parameter is empty, do nothing\n");
191 		return -EINVAL;
192 	}
193 
194 	/*
195 	 * If a user specifies memory size, we
196 	 * blow away any automatically generated
197 	 * size.
198 	 */
199 	if (usermem == 0) {
200 		usermem = 1;
201 		memblock_remove(memblock_start_of_DRAM(),
202 			memblock_end_of_DRAM() - memblock_start_of_DRAM());
203 	}
204 	start = 0;
205 	size = memparse(p, &p);
206 	if (*p == '@')
207 		start = memparse(p + 1, &p);
208 	else {
209 		pr_err("Invalid format!\n");
210 		return -EINVAL;
211 	}
212 
213 	if (!IS_ENABLED(CONFIG_NUMA))
214 		memblock_add(start, size);
215 	else
216 		memblock_add_node(start, size, pa_to_nid(start), MEMBLOCK_NONE);
217 
218 	return 0;
219 }
220 early_param("mem", early_parse_mem);
221 
222 static void __init arch_reserve_vmcore(void)
223 {
224 #ifdef CONFIG_PROC_VMCORE
225 	u64 i;
226 	phys_addr_t start, end;
227 
228 	if (!is_kdump_kernel())
229 		return;
230 
231 	if (!elfcorehdr_size) {
232 		for_each_mem_range(i, &start, &end) {
233 			if (elfcorehdr_addr >= start && elfcorehdr_addr < end) {
234 				/*
235 				 * Reserve from the elf core header to the end of
236 				 * the memory segment, that should all be kdump
237 				 * reserved memory.
238 				 */
239 				elfcorehdr_size = end - elfcorehdr_addr;
240 				break;
241 			}
242 		}
243 	}
244 
245 	if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
246 		pr_warn("elfcorehdr is overlapped\n");
247 		return;
248 	}
249 
250 	memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
251 
252 	pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n",
253 		elfcorehdr_size >> 10, elfcorehdr_addr);
254 #endif
255 }
256 
257 static void __init arch_reserve_crashkernel(void)
258 {
259 	int ret;
260 	unsigned long long low_size = 0;
261 	unsigned long long crash_base, crash_size;
262 	char *cmdline = boot_command_line;
263 	bool high = false;
264 
265 	if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
266 		return;
267 
268 	ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
269 				&crash_size, &crash_base, &low_size, &high);
270 	if (ret)
271 		return;
272 
273 	reserve_crashkernel_generic(cmdline, crash_size, crash_base, low_size, high);
274 }
275 
276 static void __init fdt_setup(void)
277 {
278 #ifdef CONFIG_OF_EARLY_FLATTREE
279 	void *fdt_pointer;
280 
281 	/* ACPI-based systems do not require parsing fdt */
282 	if (acpi_os_get_root_pointer())
283 		return;
284 
285 	/* Prefer to use built-in dtb, checking its legality first. */
286 	if (IS_ENABLED(CONFIG_BUILTIN_DTB) && !fdt_check_header(__dtb_start))
287 		fdt_pointer = __dtb_start;
288 	else
289 		fdt_pointer = efi_fdt_pointer(); /* Fallback to firmware dtb */
290 
291 	if (!fdt_pointer || fdt_check_header(fdt_pointer))
292 		return;
293 
294 	early_init_dt_scan(fdt_pointer);
295 	early_init_fdt_reserve_self();
296 
297 	max_low_pfn = PFN_PHYS(memblock_end_of_DRAM());
298 #endif
299 }
300 
301 static void __init bootcmdline_init(char **cmdline_p)
302 {
303 	/*
304 	 * If CONFIG_CMDLINE_FORCE is enabled then initializing the command line
305 	 * is trivial - we simply use the built-in command line unconditionally &
306 	 * unmodified.
307 	 */
308 	if (IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
309 		strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
310 		goto out;
311 	}
312 
313 #ifdef CONFIG_OF_FLATTREE
314 	/*
315 	 * If CONFIG_CMDLINE_BOOTLOADER is enabled and we are in FDT-based system,
316 	 * the boot_command_line will be overwritten by early_init_dt_scan_chosen().
317 	 * So we need to append init_command_line (the original copy of boot_command_line)
318 	 * to boot_command_line.
319 	 */
320 	if (initial_boot_params) {
321 		if (boot_command_line[0])
322 			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
323 
324 		if (!strstr(boot_command_line, init_command_line))
325 			strlcat(boot_command_line, init_command_line, COMMAND_LINE_SIZE);
326 
327 		goto out;
328 	}
329 #endif
330 
331 	/*
332 	 * Append built-in command line to the bootloader command line if
333 	 * CONFIG_CMDLINE_EXTEND is enabled.
334 	 */
335 	if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) && CONFIG_CMDLINE[0]) {
336 		strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
337 		strlcat(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
338 	}
339 
340 	/*
341 	 * Use built-in command line if the bootloader command line is empty.
342 	 */
343 	if (IS_ENABLED(CONFIG_CMDLINE_BOOTLOADER) && !boot_command_line[0])
344 		strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
345 
346 out:
347 	*cmdline_p = boot_command_line;
348 }
349 
350 void __init platform_init(void)
351 {
352 	arch_reserve_vmcore();
353 	arch_reserve_crashkernel();
354 
355 #ifdef CONFIG_ACPI
356 	acpi_table_upgrade();
357 	acpi_gbl_use_default_register_widths = false;
358 	acpi_boot_table_init();
359 #endif
360 
361 	early_init_fdt_scan_reserved_mem();
362 	unflatten_and_copy_device_tree();
363 
364 #ifdef CONFIG_NUMA
365 	init_numa_memory();
366 #endif
367 	dmi_setup();
368 	smbios_parse();
369 	pr_info("The BIOS Version: %s\n", b_info.bios_version);
370 
371 	efi_runtime_init();
372 }
373 
374 static void __init check_kernel_sections_mem(void)
375 {
376 	phys_addr_t start = __pa_symbol(&_text);
377 	phys_addr_t size = __pa_symbol(&_end) - start;
378 
379 	if (!memblock_is_region_memory(start, size)) {
380 		pr_info("Kernel sections are not in the memory maps\n");
381 		memblock_add(start, size);
382 	}
383 }
384 
385 /*
386  * arch_mem_init - initialize memory management subsystem
387  */
388 static void __init arch_mem_init(char **cmdline_p)
389 {
390 	if (usermem)
391 		pr_info("User-defined physical RAM map overwrite\n");
392 
393 	check_kernel_sections_mem();
394 
395 	/*
396 	 * In order to reduce the possibility of kernel panic when failed to
397 	 * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
398 	 * low memory as small as possible before swiotlb_init(), so make
399 	 * sparse_init() using top-down allocation.
400 	 */
401 	memblock_set_bottom_up(false);
402 	sparse_init();
403 	memblock_set_bottom_up(true);
404 
405 	swiotlb_init(true, SWIOTLB_VERBOSE);
406 
407 	dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
408 
409 	/* Reserve for hibernation. */
410 	register_nosave_region(PFN_DOWN(__pa_symbol(&__nosave_begin)),
411 				   PFN_UP(__pa_symbol(&__nosave_end)));
412 
413 	memblock_dump_all();
414 
415 	early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
416 }
417 
418 static void __init resource_init(void)
419 {
420 	long i = 0;
421 	size_t res_size;
422 	struct resource *res;
423 	struct memblock_region *region;
424 
425 	code_resource.start = __pa_symbol(&_text);
426 	code_resource.end = __pa_symbol(&_etext) - 1;
427 	data_resource.start = __pa_symbol(&_etext);
428 	data_resource.end = __pa_symbol(&_edata) - 1;
429 	bss_resource.start = __pa_symbol(&__bss_start);
430 	bss_resource.end = __pa_symbol(&__bss_stop) - 1;
431 
432 	num_standard_resources = memblock.memory.cnt;
433 	res_size = num_standard_resources * sizeof(*standard_resources);
434 	standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
435 
436 	for_each_mem_region(region) {
437 		res = &standard_resources[i++];
438 		if (!memblock_is_nomap(region)) {
439 			res->name  = "System RAM";
440 			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
441 			res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
442 			res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
443 		} else {
444 			res->name  = "Reserved";
445 			res->flags = IORESOURCE_MEM;
446 			res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region));
447 			res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1;
448 		}
449 
450 		request_resource(&iomem_resource, res);
451 
452 		/*
453 		 *  We don't know which RAM region contains kernel data,
454 		 *  so we try it repeatedly and let the resource manager
455 		 *  test it.
456 		 */
457 		request_resource(res, &code_resource);
458 		request_resource(res, &data_resource);
459 		request_resource(res, &bss_resource);
460 	}
461 }
462 
463 static int __init add_legacy_isa_io(struct fwnode_handle *fwnode,
464 				resource_size_t hw_start, resource_size_t size)
465 {
466 	int ret = 0;
467 	unsigned long vaddr;
468 	struct logic_pio_hwaddr *range;
469 
470 	range = kzalloc(sizeof(*range), GFP_ATOMIC);
471 	if (!range)
472 		return -ENOMEM;
473 
474 	range->fwnode = fwnode;
475 	range->size = size = round_up(size, PAGE_SIZE);
476 	range->hw_start = hw_start;
477 	range->flags = LOGIC_PIO_CPU_MMIO;
478 
479 	ret = logic_pio_register_range(range);
480 	if (ret) {
481 		kfree(range);
482 		return ret;
483 	}
484 
485 	/* Legacy ISA must placed at the start of PCI_IOBASE */
486 	if (range->io_start != 0) {
487 		logic_pio_unregister_range(range);
488 		kfree(range);
489 		return -EINVAL;
490 	}
491 
492 	vaddr = (unsigned long)(PCI_IOBASE + range->io_start);
493 	vmap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
494 
495 	return 0;
496 }
497 
498 static __init int arch_reserve_pio_range(void)
499 {
500 	struct device_node *np;
501 
502 	for_each_node_by_name(np, "isa") {
503 		struct of_range range;
504 		struct of_range_parser parser;
505 
506 		pr_info("ISA Bridge: %pOF\n", np);
507 
508 		if (of_range_parser_init(&parser, np)) {
509 			pr_info("Failed to parse resources.\n");
510 			of_node_put(np);
511 			break;
512 		}
513 
514 		for_each_of_range(&parser, &range) {
515 			switch (range.flags & IORESOURCE_TYPE_BITS) {
516 			case IORESOURCE_IO:
517 				pr_info(" IO 0x%016llx..0x%016llx  ->  0x%016llx\n",
518 					range.cpu_addr,
519 					range.cpu_addr + range.size - 1,
520 					range.bus_addr);
521 				if (add_legacy_isa_io(&np->fwnode, range.cpu_addr, range.size))
522 					pr_warn("Failed to reserve legacy IO in Logic PIO\n");
523 				break;
524 			case IORESOURCE_MEM:
525 				pr_info(" MEM 0x%016llx..0x%016llx  ->  0x%016llx\n",
526 					range.cpu_addr,
527 					range.cpu_addr + range.size - 1,
528 					range.bus_addr);
529 				break;
530 			}
531 		}
532 	}
533 
534 	return 0;
535 }
536 arch_initcall(arch_reserve_pio_range);
537 
538 static int __init reserve_memblock_reserved_regions(void)
539 {
540 	u64 i, j;
541 
542 	for (i = 0; i < num_standard_resources; ++i) {
543 		struct resource *mem = &standard_resources[i];
544 		phys_addr_t r_start, r_end, mem_size = resource_size(mem);
545 
546 		if (!memblock_is_region_reserved(mem->start, mem_size))
547 			continue;
548 
549 		for_each_reserved_mem_range(j, &r_start, &r_end) {
550 			resource_size_t start, end;
551 
552 			start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
553 			end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
554 
555 			if (start > mem->end || end < mem->start)
556 				continue;
557 
558 			reserve_region_with_split(mem, start, end, "Reserved");
559 		}
560 	}
561 
562 	return 0;
563 }
564 arch_initcall(reserve_memblock_reserved_regions);
565 
566 #ifdef CONFIG_SMP
567 static void __init prefill_possible_map(void)
568 {
569 	int i, possible;
570 
571 	possible = num_processors + disabled_cpus;
572 	if (possible > nr_cpu_ids)
573 		possible = nr_cpu_ids;
574 
575 	pr_info("SMP: Allowing %d CPUs, %d hotplug CPUs\n",
576 			possible, max((possible - num_processors), 0));
577 
578 	for (i = 0; i < possible; i++)
579 		set_cpu_possible(i, true);
580 	for (; i < NR_CPUS; i++) {
581 		set_cpu_present(i, false);
582 		set_cpu_possible(i, false);
583 	}
584 
585 	set_nr_cpu_ids(possible);
586 }
587 #endif
588 
589 void __init setup_arch(char **cmdline_p)
590 {
591 	cpu_probe();
592 	unwind_init();
593 
594 	init_environ();
595 	efi_init();
596 	fdt_setup();
597 	memblock_init();
598 	pagetable_init();
599 	bootcmdline_init(cmdline_p);
600 	parse_early_param();
601 	reserve_initrd_mem();
602 
603 	platform_init();
604 	arch_mem_init(cmdline_p);
605 
606 	resource_init();
607 	jump_label_init(); /* Initialise the static keys for paravirtualization */
608 
609 #ifdef CONFIG_SMP
610 	plat_smp_setup();
611 	prefill_possible_map();
612 #endif
613 
614 	paging_init();
615 
616 #ifdef CONFIG_KASAN
617 	kasan_init();
618 #endif
619 }
620