xref: /linux/arch/riscv/kernel/setup.c (revision b6d27a345f9d12fb80d61a1b1801ced9c1d6178a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4  *  Chen Liqin <liqin.chen@sunplusct.com>
5  *  Lennox Wu <lennox.wu@sunplusct.com>
6  * Copyright (C) 2012 Regents of the University of California
7  * Copyright (C) 2020 FORTH-ICS/CARV
8  *  Nick Kossifidis <mick@ics.forth.gr>
9  */
10 
11 #include <linux/acpi.h>
12 #include <linux/cpu.h>
13 #include <linux/init.h>
14 #include <linux/mm.h>
15 #include <linux/memblock.h>
16 #include <linux/sched.h>
17 #include <linux/console.h>
18 #include <linux/of_fdt.h>
19 #include <linux/sched/task.h>
20 #include <linux/smp.h>
21 #include <linux/efi.h>
22 #include <linux/crash_dump.h>
23 #include <linux/panic_notifier.h>
24 
25 #include <asm/acpi.h>
26 #include <asm/alternative.h>
27 #include <asm/cacheflush.h>
28 #include <asm/cpufeature.h>
29 #include <asm/early_ioremap.h>
30 #include <asm/pgtable.h>
31 #include <asm/setup.h>
32 #include <asm/set_memory.h>
33 #include <asm/sections.h>
34 #include <asm/sbi.h>
35 #include <asm/tlbflush.h>
36 #include <asm/thread_info.h>
37 #include <asm/kasan.h>
38 #include <asm/efi.h>
39 
40 #include "head.h"
41 
42 /*
43  * The lucky hart to first increment this variable will boot the other cores.
44  * This is used before the kernel initializes the BSS so it can't be in the
45  * BSS.
46  */
47 atomic_t hart_lottery __section(".sdata")
48 #ifdef CONFIG_XIP_KERNEL
49 = ATOMIC_INIT(0xC001BEEF)
50 #endif
51 ;
52 unsigned long boot_cpu_hartid;
53 
54 /*
55  * Place kernel memory regions on the resource tree so that
56  * kexec-tools can retrieve them from /proc/iomem. While there
57  * also add "System RAM" regions for compatibility with other
58  * archs, and the rest of the known regions for completeness.
59  */
60 static struct resource kimage_res = { .name = "Kernel image", };
61 static struct resource code_res = { .name = "Kernel code", };
62 static struct resource data_res = { .name = "Kernel data", };
63 static struct resource rodata_res = { .name = "Kernel rodata", };
64 static struct resource bss_res = { .name = "Kernel bss", };
65 #ifdef CONFIG_CRASH_DUMP
66 static struct resource elfcorehdr_res = { .name = "ELF Core hdr", };
67 #endif
68 
69 static int __init add_resource(struct resource *parent,
70 				struct resource *res)
71 {
72 	int ret = 0;
73 
74 	ret = insert_resource(parent, res);
75 	if (ret < 0) {
76 		pr_err("Failed to add a %s resource at %llx\n",
77 			res->name, (unsigned long long) res->start);
78 		return ret;
79 	}
80 
81 	return 1;
82 }
83 
84 static int __init add_kernel_resources(void)
85 {
86 	int ret = 0;
87 
88 	/*
89 	 * The memory region of the kernel image is continuous and
90 	 * was reserved on setup_bootmem, register it here as a
91 	 * resource, with the various segments of the image as
92 	 * child nodes.
93 	 */
94 
95 	code_res.start = __pa_symbol(_text);
96 	code_res.end = __pa_symbol(_etext) - 1;
97 	code_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
98 
99 	rodata_res.start = __pa_symbol(__start_rodata);
100 	rodata_res.end = __pa_symbol(__end_rodata) - 1;
101 	rodata_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
102 
103 	data_res.start = __pa_symbol(_data);
104 	data_res.end = __pa_symbol(_edata) - 1;
105 	data_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
106 
107 	bss_res.start = __pa_symbol(__bss_start);
108 	bss_res.end = __pa_symbol(__bss_stop) - 1;
109 	bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
110 
111 	kimage_res.start = code_res.start;
112 	kimage_res.end = bss_res.end;
113 	kimage_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
114 
115 	ret = add_resource(&iomem_resource, &kimage_res);
116 	if (ret < 0)
117 		return ret;
118 
119 	ret = add_resource(&kimage_res, &code_res);
120 	if (ret < 0)
121 		return ret;
122 
123 	ret = add_resource(&kimage_res, &rodata_res);
124 	if (ret < 0)
125 		return ret;
126 
127 	ret = add_resource(&kimage_res, &data_res);
128 	if (ret < 0)
129 		return ret;
130 
131 	ret = add_resource(&kimage_res, &bss_res);
132 
133 	return ret;
134 }
135 
136 static void __init init_resources(void)
137 {
138 	struct memblock_region *region = NULL;
139 	struct resource *res = NULL;
140 	struct resource *mem_res = NULL;
141 	size_t mem_res_sz = 0;
142 	int num_resources = 0, res_idx = 0;
143 	int ret = 0;
144 
145 	/* + 1 as memblock_alloc() might increase memblock.reserved.cnt */
146 	num_resources = memblock.memory.cnt + memblock.reserved.cnt + 1;
147 	res_idx = num_resources - 1;
148 
149 	mem_res_sz = num_resources * sizeof(*mem_res);
150 	mem_res = memblock_alloc_or_panic(mem_res_sz, SMP_CACHE_BYTES);
151 
152 	/*
153 	 * Start by adding the reserved regions, if they overlap
154 	 * with /memory regions, insert_resource later on will take
155 	 * care of it.
156 	 */
157 	ret = add_kernel_resources();
158 	if (ret < 0)
159 		goto error;
160 
161 #ifdef CONFIG_CRASH_DUMP
162 	if (elfcorehdr_size > 0) {
163 		elfcorehdr_res.start = elfcorehdr_addr;
164 		elfcorehdr_res.end = elfcorehdr_addr + elfcorehdr_size - 1;
165 		elfcorehdr_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
166 		add_resource(&iomem_resource, &elfcorehdr_res);
167 	}
168 #endif
169 
170 	for_each_reserved_mem_region(region) {
171 		res = &mem_res[res_idx--];
172 
173 		res->name = "Reserved";
174 		res->flags = IORESOURCE_MEM | IORESOURCE_EXCLUSIVE;
175 		res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region));
176 		res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1;
177 
178 		/*
179 		 * Ignore any other reserved regions within
180 		 * system memory.
181 		 */
182 		if (memblock_is_memory(res->start)) {
183 			/* Re-use this pre-allocated resource */
184 			res_idx++;
185 			continue;
186 		}
187 
188 		ret = add_resource(&iomem_resource, res);
189 		if (ret < 0)
190 			goto error;
191 	}
192 
193 	/* Add /memory regions to the resource tree */
194 	for_each_mem_region(region) {
195 		res = &mem_res[res_idx--];
196 
197 		if (unlikely(memblock_is_nomap(region))) {
198 			res->name = "Reserved";
199 			res->flags = IORESOURCE_MEM | IORESOURCE_EXCLUSIVE;
200 		} else {
201 			res->name = "System RAM";
202 			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
203 		}
204 
205 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
206 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
207 
208 		ret = add_resource(&iomem_resource, res);
209 		if (ret < 0)
210 			goto error;
211 	}
212 
213 	/* Clean-up any unused pre-allocated resources */
214 	if (res_idx >= 0)
215 		memblock_free(mem_res, (res_idx + 1) * sizeof(*mem_res));
216 	return;
217 
218  error:
219 	/* Better an empty resource tree than an inconsistent one */
220 	release_child_resources(&iomem_resource);
221 	memblock_free(mem_res, mem_res_sz);
222 }
223 
224 
225 static void __init parse_dtb(void)
226 {
227 	/* Early scan of device tree from init memory */
228 	if (early_init_dt_scan(dtb_early_va, dtb_early_pa)) {
229 		const char *name = of_flat_dt_get_machine_name();
230 
231 		if (name) {
232 			pr_info("Machine model: %s\n", name);
233 			dump_stack_set_arch_desc("%s (DT)", name);
234 		}
235 	} else {
236 		pr_err("No DTB passed to the kernel\n");
237 	}
238 }
239 
240 #if defined(CONFIG_RISCV_COMBO_SPINLOCKS)
241 DEFINE_STATIC_KEY_TRUE(qspinlock_key);
242 EXPORT_SYMBOL(qspinlock_key);
243 #endif
244 
245 static void __init riscv_spinlock_init(void)
246 {
247 	char *using_ext = NULL;
248 
249 	if (IS_ENABLED(CONFIG_RISCV_TICKET_SPINLOCKS)) {
250 		pr_info("Ticket spinlock: enabled\n");
251 		return;
252 	}
253 
254 	if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) &&
255 	    IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) &&
256 	    riscv_isa_extension_available(NULL, ZABHA) &&
257 	    riscv_isa_extension_available(NULL, ZACAS)) {
258 		using_ext = "using Zabha";
259 	} else if (riscv_isa_extension_available(NULL, ZICCRSE)) {
260 		using_ext = "using Ziccrse";
261 	}
262 #if defined(CONFIG_RISCV_COMBO_SPINLOCKS)
263 	else {
264 		static_branch_disable(&qspinlock_key);
265 		pr_info("Ticket spinlock: enabled\n");
266 		return;
267 	}
268 #endif
269 
270 	if (!using_ext)
271 		pr_err("Queued spinlock without Zabha or Ziccrse");
272 	else
273 		pr_info("Queued spinlock %s: enabled\n", using_ext);
274 }
275 
276 extern void __init init_rt_signal_env(void);
277 
278 void __init setup_arch(char **cmdline_p)
279 {
280 	parse_dtb();
281 	setup_initial_init_mm(_stext, _etext, _edata, _end);
282 
283 	*cmdline_p = boot_command_line;
284 
285 	early_ioremap_setup();
286 	sbi_init();
287 	jump_label_init();
288 	parse_early_param();
289 
290 	efi_init();
291 	paging_init();
292 
293 	/* Parse the ACPI tables for possible boot-time configuration */
294 	acpi_boot_table_init();
295 
296 #if IS_ENABLED(CONFIG_BUILTIN_DTB)
297 	unflatten_and_copy_device_tree();
298 #else
299 	unflatten_device_tree();
300 #endif
301 	misc_mem_init();
302 
303 	init_resources();
304 
305 #ifdef CONFIG_KASAN
306 	kasan_init();
307 #endif
308 
309 #ifdef CONFIG_SMP
310 	setup_smp();
311 #endif
312 
313 	if (!acpi_disabled) {
314 		acpi_init_rintc_map();
315 		acpi_map_cpus_to_nodes();
316 	}
317 
318 	riscv_init_cbo_blocksizes();
319 	riscv_fill_hwcap();
320 	apply_boot_alternatives();
321 	init_rt_signal_env();
322 
323 	if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) &&
324 	    riscv_isa_extension_available(NULL, ZICBOM))
325 		riscv_noncoherent_supported();
326 	riscv_set_dma_cache_alignment();
327 
328 	riscv_user_isa_enable();
329 	riscv_spinlock_init();
330 }
331 
332 bool arch_cpu_is_hotpluggable(int cpu)
333 {
334 	return cpu_has_hotplug(cpu);
335 }
336 
337 void free_initmem(void)
338 {
339 	if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
340 		set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end), set_memory_rw_nx);
341 		if (IS_ENABLED(CONFIG_64BIT))
342 			set_kernel_memory(__init_begin, __init_end, set_memory_nx);
343 	}
344 
345 	free_initmem_default(POISON_FREE_INITMEM);
346 }
347 
348 static int dump_kernel_offset(struct notifier_block *self,
349 			      unsigned long v, void *p)
350 {
351 	pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
352 		 kernel_map.virt_offset,
353 		 KERNEL_LINK_ADDR);
354 
355 	return 0;
356 }
357 
358 static struct notifier_block kernel_offset_notifier = {
359 	.notifier_call = dump_kernel_offset
360 };
361 
362 static int __init register_kernel_offset_dumper(void)
363 {
364 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
365 		atomic_notifier_chain_register(&panic_notifier_list,
366 					       &kernel_offset_notifier);
367 
368 	return 0;
369 }
370 device_initcall(register_kernel_offset_dumper);
371