1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 *
5 * Derived from MIPS:
6 * Copyright (C) 1995 Linus Torvalds
7 * Copyright (C) 1995 Waldorf Electronics
8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
9 * Copyright (C) 1996 Stoned Elipot
10 * Copyright (C) 1999 Silicon Graphics, Inc.
11 * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
12 */
13 #include <linux/init.h>
14 #include <linux/acpi.h>
15 #include <linux/cpu.h>
16 #include <linux/dmi.h>
17 #include <linux/efi.h>
18 #include <linux/export.h>
19 #include <linux/memblock.h>
20 #include <linux/initrd.h>
21 #include <linux/ioport.h>
22 #include <linux/kexec.h>
23 #include <linux/crash_dump.h>
24 #include <linux/root_dev.h>
25 #include <linux/console.h>
26 #include <linux/pfn.h>
27 #include <linux/platform_device.h>
28 #include <linux/sizes.h>
29 #include <linux/device.h>
30 #include <linux/dma-map-ops.h>
31 #include <linux/libfdt.h>
32 #include <linux/of_fdt.h>
33 #include <linux/of_address.h>
34 #include <linux/suspend.h>
35 #include <linux/swiotlb.h>
36
37 #include <asm/addrspace.h>
38 #include <asm/alternative.h>
39 #include <asm/bootinfo.h>
40 #include <asm/cache.h>
41 #include <asm/cpu.h>
42 #include <asm/dma.h>
43 #include <asm/efi.h>
44 #include <asm/loongson.h>
45 #include <asm/numa.h>
46 #include <asm/pgalloc.h>
47 #include <asm/sections.h>
48 #include <asm/setup.h>
49 #include <asm/time.h>
50 #include <asm/unwind.h>
51
52 #define SMBIOS_BIOSSIZE_OFFSET 0x09
53 #define SMBIOS_BIOSEXTERN_OFFSET 0x13
54 #define SMBIOS_FREQLOW_OFFSET 0x16
55 #define SMBIOS_FREQHIGH_OFFSET 0x17
56 #define SMBIOS_FREQLOW_MASK 0xFF
57 #define SMBIOS_CORE_PACKAGE_OFFSET 0x23
58 #define LOONGSON_EFI_ENABLE (1 << 3)
59
60 unsigned long fw_arg0, fw_arg1, fw_arg2;
61 DEFINE_PER_CPU(unsigned long, kernelsp);
62 struct cpuinfo_loongarch cpu_data[NR_CPUS] __read_mostly;
63
64 EXPORT_SYMBOL(cpu_data);
65
66 struct loongson_board_info b_info;
67 static const char dmi_empty_string[] = " ";
68
69 /*
70 * Setup information
71 *
72 * These are initialized so they are in the .data section
73 */
74 char init_command_line[COMMAND_LINE_SIZE] __initdata;
75
76 static int num_standard_resources;
77 static struct resource *standard_resources;
78
79 static struct resource code_resource = { .name = "Kernel code", };
80 static struct resource data_resource = { .name = "Kernel data", };
81 static struct resource bss_resource = { .name = "Kernel bss", };
82
get_system_type(void)83 const char *get_system_type(void)
84 {
85 return "generic-loongson-machine";
86 }
87
arch_cpu_finalize_init(void)88 void __init arch_cpu_finalize_init(void)
89 {
90 alternative_instructions();
91 }
92
dmi_string_parse(const struct dmi_header * dm,u8 s)93 static const char *dmi_string_parse(const struct dmi_header *dm, u8 s)
94 {
95 const u8 *bp = ((u8 *) dm) + dm->length;
96
97 if (s) {
98 s--;
99 while (s > 0 && *bp) {
100 bp += strlen(bp) + 1;
101 s--;
102 }
103
104 if (*bp != 0) {
105 size_t len = strlen(bp)+1;
106 size_t cmp_len = len > 8 ? 8 : len;
107
108 if (!memcmp(bp, dmi_empty_string, cmp_len))
109 return dmi_empty_string;
110
111 return bp;
112 }
113 }
114
115 return "";
116 }
117
parse_cpu_table(const struct dmi_header * dm)118 static void __init parse_cpu_table(const struct dmi_header *dm)
119 {
120 long freq_temp = 0;
121 char *dmi_data = (char *)dm;
122
123 freq_temp = ((*(dmi_data + SMBIOS_FREQHIGH_OFFSET) << 8) +
124 ((*(dmi_data + SMBIOS_FREQLOW_OFFSET)) & SMBIOS_FREQLOW_MASK));
125 cpu_clock_freq = freq_temp * 1000000;
126
127 loongson_sysconf.cpuname = (void *)dmi_string_parse(dm, dmi_data[16]);
128 loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_CORE_PACKAGE_OFFSET);
129
130 pr_info("CpuClock = %llu\n", cpu_clock_freq);
131 }
132
parse_bios_table(const struct dmi_header * dm)133 static void __init parse_bios_table(const struct dmi_header *dm)
134 {
135 char *dmi_data = (char *)dm;
136
137 b_info.bios_size = (*(dmi_data + SMBIOS_BIOSSIZE_OFFSET) + 1) << 6;
138 }
139
find_tokens(const struct dmi_header * dm,void * dummy)140 static void __init find_tokens(const struct dmi_header *dm, void *dummy)
141 {
142 switch (dm->type) {
143 case 0x0: /* Extern BIOS */
144 parse_bios_table(dm);
145 break;
146 case 0x4: /* Calling interface */
147 parse_cpu_table(dm);
148 break;
149 }
150 }
smbios_parse(void)151 static void __init smbios_parse(void)
152 {
153 b_info.bios_vendor = (void *)dmi_get_system_info(DMI_BIOS_VENDOR);
154 b_info.bios_version = (void *)dmi_get_system_info(DMI_BIOS_VERSION);
155 b_info.bios_release_date = (void *)dmi_get_system_info(DMI_BIOS_DATE);
156 b_info.board_vendor = (void *)dmi_get_system_info(DMI_BOARD_VENDOR);
157 b_info.board_name = (void *)dmi_get_system_info(DMI_BOARD_NAME);
158 dmi_walk(find_tokens, NULL);
159 }
160
161 #ifdef CONFIG_ARCH_WRITECOMBINE
162 bool wc_enabled = true;
163 #else
164 bool wc_enabled = false;
165 #endif
166
167 EXPORT_SYMBOL(wc_enabled);
168
setup_writecombine(char * p)169 static int __init setup_writecombine(char *p)
170 {
171 if (!strcmp(p, "on"))
172 wc_enabled = true;
173 else if (!strcmp(p, "off"))
174 wc_enabled = false;
175 else
176 pr_warn("Unknown writecombine setting \"%s\".\n", p);
177
178 return 0;
179 }
180 early_param("writecombine", setup_writecombine);
181
182 static int usermem __initdata;
183
early_parse_mem(char * p)184 static int __init early_parse_mem(char *p)
185 {
186 phys_addr_t start, size;
187
188 if (!p) {
189 pr_err("mem parameter is empty, do nothing\n");
190 return -EINVAL;
191 }
192
193 /*
194 * If a user specifies memory size, we
195 * blow away any automatically generated
196 * size.
197 */
198 if (usermem == 0) {
199 usermem = 1;
200 memblock_remove(memblock_start_of_DRAM(),
201 memblock_end_of_DRAM() - memblock_start_of_DRAM());
202 }
203 start = 0;
204 size = memparse(p, &p);
205 if (*p == '@')
206 start = memparse(p + 1, &p);
207 else {
208 pr_err("Invalid format!\n");
209 return -EINVAL;
210 }
211
212 if (!IS_ENABLED(CONFIG_NUMA))
213 memblock_add(start, size);
214 else
215 memblock_add_node(start, size, pa_to_nid(start), MEMBLOCK_NONE);
216
217 return 0;
218 }
219 early_param("mem", early_parse_mem);
220
arch_reserve_vmcore(void)221 static void __init arch_reserve_vmcore(void)
222 {
223 #ifdef CONFIG_PROC_VMCORE
224 u64 i;
225 phys_addr_t start, end;
226
227 if (!is_kdump_kernel())
228 return;
229
230 if (!elfcorehdr_size) {
231 for_each_mem_range(i, &start, &end) {
232 if (elfcorehdr_addr >= start && elfcorehdr_addr < end) {
233 /*
234 * Reserve from the elf core header to the end of
235 * the memory segment, that should all be kdump
236 * reserved memory.
237 */
238 elfcorehdr_size = end - elfcorehdr_addr;
239 break;
240 }
241 }
242 }
243
244 if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
245 pr_warn("elfcorehdr is overlapped\n");
246 return;
247 }
248
249 memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
250
251 pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n",
252 elfcorehdr_size >> 10, elfcorehdr_addr);
253 #endif
254 }
255
arch_reserve_crashkernel(void)256 static void __init arch_reserve_crashkernel(void)
257 {
258 int ret;
259 unsigned long long low_size = 0;
260 unsigned long long crash_base, crash_size;
261 char *cmdline = boot_command_line;
262 bool high = false;
263
264 if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
265 return;
266
267 ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
268 &crash_size, &crash_base, &low_size, &high);
269 if (ret)
270 return;
271
272 reserve_crashkernel_generic(cmdline, crash_size, crash_base, low_size, high);
273 }
274
fdt_setup(void)275 static void __init fdt_setup(void)
276 {
277 #ifdef CONFIG_OF_EARLY_FLATTREE
278 void *fdt_pointer;
279
280 /* ACPI-based systems do not require parsing fdt */
281 if (acpi_os_get_root_pointer())
282 return;
283
284 /* Prefer to use built-in dtb, checking its legality first. */
285 if (IS_ENABLED(CONFIG_BUILTIN_DTB) && !fdt_check_header(__dtb_start))
286 fdt_pointer = __dtb_start;
287 else
288 fdt_pointer = efi_fdt_pointer(); /* Fallback to firmware dtb */
289
290 if (!fdt_pointer || fdt_check_header(fdt_pointer))
291 return;
292
293 early_init_dt_scan(fdt_pointer);
294 early_init_fdt_reserve_self();
295
296 max_low_pfn = PFN_PHYS(memblock_end_of_DRAM());
297 #endif
298 }
299
bootcmdline_init(char ** cmdline_p)300 static void __init bootcmdline_init(char **cmdline_p)
301 {
302 /*
303 * If CONFIG_CMDLINE_FORCE is enabled then initializing the command line
304 * is trivial - we simply use the built-in command line unconditionally &
305 * unmodified.
306 */
307 if (IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
308 strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
309 goto out;
310 }
311
312 #ifdef CONFIG_OF_FLATTREE
313 /*
314 * If CONFIG_CMDLINE_BOOTLOADER is enabled and we are in FDT-based system,
315 * the boot_command_line will be overwritten by early_init_dt_scan_chosen().
316 * So we need to append init_command_line (the original copy of boot_command_line)
317 * to boot_command_line.
318 */
319 if (initial_boot_params) {
320 if (boot_command_line[0])
321 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
322
323 if (!strstr(boot_command_line, init_command_line))
324 strlcat(boot_command_line, init_command_line, COMMAND_LINE_SIZE);
325
326 goto out;
327 }
328 #endif
329
330 /*
331 * Append built-in command line to the bootloader command line if
332 * CONFIG_CMDLINE_EXTEND is enabled.
333 */
334 if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) && CONFIG_CMDLINE[0]) {
335 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
336 strlcat(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
337 }
338
339 /*
340 * Use built-in command line if the bootloader command line is empty.
341 */
342 if (IS_ENABLED(CONFIG_CMDLINE_BOOTLOADER) && !boot_command_line[0])
343 strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
344
345 out:
346 *cmdline_p = boot_command_line;
347 }
348
platform_init(void)349 void __init platform_init(void)
350 {
351 arch_reserve_vmcore();
352 arch_reserve_crashkernel();
353
354 #ifdef CONFIG_ACPI
355 acpi_table_upgrade();
356 acpi_gbl_use_default_register_widths = false;
357 acpi_boot_table_init();
358 #endif
359
360 early_init_fdt_scan_reserved_mem();
361 unflatten_and_copy_device_tree();
362
363 #ifdef CONFIG_NUMA
364 init_numa_memory();
365 #endif
366 dmi_setup();
367 smbios_parse();
368 pr_info("The BIOS Version: %s\n", b_info.bios_version);
369
370 efi_runtime_init();
371 }
372
check_kernel_sections_mem(void)373 static void __init check_kernel_sections_mem(void)
374 {
375 phys_addr_t start = __pa_symbol(&_text);
376 phys_addr_t size = __pa_symbol(&_end) - start;
377
378 if (!memblock_is_region_memory(start, size)) {
379 pr_info("Kernel sections are not in the memory maps\n");
380 memblock_add(start, size);
381 }
382 }
383
384 /*
385 * arch_mem_init - initialize memory management subsystem
386 */
arch_mem_init(char ** cmdline_p)387 static void __init arch_mem_init(char **cmdline_p)
388 {
389 if (usermem)
390 pr_info("User-defined physical RAM map overwrite\n");
391
392 check_kernel_sections_mem();
393
394 /*
395 * In order to reduce the possibility of kernel panic when failed to
396 * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
397 * low memory as small as possible before swiotlb_init(), so make
398 * sparse_init() using top-down allocation.
399 */
400 memblock_set_bottom_up(false);
401 sparse_init();
402 memblock_set_bottom_up(true);
403
404 swiotlb_init(true, SWIOTLB_VERBOSE);
405
406 dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
407
408 /* Reserve for hibernation. */
409 register_nosave_region(PFN_DOWN(__pa_symbol(&__nosave_begin)),
410 PFN_UP(__pa_symbol(&__nosave_end)));
411
412 memblock_dump_all();
413
414 early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
415 }
416
resource_init(void)417 static void __init resource_init(void)
418 {
419 long i = 0;
420 size_t res_size;
421 struct resource *res;
422 struct memblock_region *region;
423
424 code_resource.start = __pa_symbol(&_text);
425 code_resource.end = __pa_symbol(&_etext) - 1;
426 data_resource.start = __pa_symbol(&_etext);
427 data_resource.end = __pa_symbol(&_edata) - 1;
428 bss_resource.start = __pa_symbol(&__bss_start);
429 bss_resource.end = __pa_symbol(&__bss_stop) - 1;
430
431 num_standard_resources = memblock.memory.cnt;
432 res_size = num_standard_resources * sizeof(*standard_resources);
433 standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
434
435 for_each_mem_region(region) {
436 res = &standard_resources[i++];
437 if (!memblock_is_nomap(region)) {
438 res->name = "System RAM";
439 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
440 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
441 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
442 } else {
443 res->name = "Reserved";
444 res->flags = IORESOURCE_MEM;
445 res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region));
446 res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1;
447 }
448
449 request_resource(&iomem_resource, res);
450
451 /*
452 * We don't know which RAM region contains kernel data,
453 * so we try it repeatedly and let the resource manager
454 * test it.
455 */
456 request_resource(res, &code_resource);
457 request_resource(res, &data_resource);
458 request_resource(res, &bss_resource);
459 }
460 }
461
add_legacy_isa_io(struct fwnode_handle * fwnode,resource_size_t hw_start,resource_size_t size)462 static int __init add_legacy_isa_io(struct fwnode_handle *fwnode,
463 resource_size_t hw_start, resource_size_t size)
464 {
465 int ret = 0;
466 unsigned long vaddr;
467 struct logic_pio_hwaddr *range;
468
469 range = kzalloc(sizeof(*range), GFP_ATOMIC);
470 if (!range)
471 return -ENOMEM;
472
473 range->fwnode = fwnode;
474 range->size = size = round_up(size, PAGE_SIZE);
475 range->hw_start = hw_start;
476 range->flags = LOGIC_PIO_CPU_MMIO;
477
478 ret = logic_pio_register_range(range);
479 if (ret) {
480 kfree(range);
481 return ret;
482 }
483
484 /* Legacy ISA must placed at the start of PCI_IOBASE */
485 if (range->io_start != 0) {
486 logic_pio_unregister_range(range);
487 kfree(range);
488 return -EINVAL;
489 }
490
491 vaddr = (unsigned long)(PCI_IOBASE + range->io_start);
492 vmap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
493
494 return 0;
495 }
496
arch_reserve_pio_range(void)497 static __init int arch_reserve_pio_range(void)
498 {
499 struct device_node *np;
500
501 for_each_node_by_name(np, "isa") {
502 struct of_range range;
503 struct of_range_parser parser;
504
505 pr_info("ISA Bridge: %pOF\n", np);
506
507 if (of_range_parser_init(&parser, np)) {
508 pr_info("Failed to parse resources.\n");
509 of_node_put(np);
510 break;
511 }
512
513 for_each_of_range(&parser, &range) {
514 switch (range.flags & IORESOURCE_TYPE_BITS) {
515 case IORESOURCE_IO:
516 pr_info(" IO 0x%016llx..0x%016llx -> 0x%016llx\n",
517 range.cpu_addr,
518 range.cpu_addr + range.size - 1,
519 range.bus_addr);
520 if (add_legacy_isa_io(&np->fwnode, range.cpu_addr, range.size))
521 pr_warn("Failed to reserve legacy IO in Logic PIO\n");
522 break;
523 case IORESOURCE_MEM:
524 pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx\n",
525 range.cpu_addr,
526 range.cpu_addr + range.size - 1,
527 range.bus_addr);
528 break;
529 }
530 }
531 }
532
533 return 0;
534 }
535 arch_initcall(arch_reserve_pio_range);
536
reserve_memblock_reserved_regions(void)537 static int __init reserve_memblock_reserved_regions(void)
538 {
539 u64 i, j;
540
541 for (i = 0; i < num_standard_resources; ++i) {
542 struct resource *mem = &standard_resources[i];
543 phys_addr_t r_start, r_end, mem_size = resource_size(mem);
544
545 if (!memblock_is_region_reserved(mem->start, mem_size))
546 continue;
547
548 for_each_reserved_mem_range(j, &r_start, &r_end) {
549 resource_size_t start, end;
550
551 start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
552 end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
553
554 if (start > mem->end || end < mem->start)
555 continue;
556
557 reserve_region_with_split(mem, start, end, "Reserved");
558 }
559 }
560
561 return 0;
562 }
563 arch_initcall(reserve_memblock_reserved_regions);
564
565 #ifdef CONFIG_SMP
prefill_possible_map(void)566 static void __init prefill_possible_map(void)
567 {
568 int i, possible;
569
570 possible = num_processors + disabled_cpus;
571 if (possible > nr_cpu_ids)
572 possible = nr_cpu_ids;
573
574 pr_info("SMP: Allowing %d CPUs, %d hotplug CPUs\n",
575 possible, max((possible - num_processors), 0));
576
577 for (i = 0; i < possible; i++)
578 set_cpu_possible(i, true);
579 for (; i < NR_CPUS; i++) {
580 set_cpu_present(i, false);
581 set_cpu_possible(i, false);
582 }
583
584 set_nr_cpu_ids(possible);
585 }
586 #endif
587
setup_arch(char ** cmdline_p)588 void __init setup_arch(char **cmdline_p)
589 {
590 cpu_probe();
591 unwind_init();
592
593 init_environ();
594 efi_init();
595 fdt_setup();
596 memblock_init();
597 pagetable_init();
598 bootcmdline_init(cmdline_p);
599 parse_early_param();
600 reserve_initrd_mem();
601
602 platform_init();
603 arch_mem_init(cmdline_p);
604
605 resource_init();
606 jump_label_init(); /* Initialise the static keys for paravirtualization */
607
608 #ifdef CONFIG_SMP
609 plat_smp_setup();
610 prefill_possible_map();
611 #endif
612
613 paging_init();
614
615 #ifdef CONFIG_KASAN
616 kasan_init();
617 #endif
618 }
619