1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4 * Chen Liqin <liqin.chen@sunplusct.com>
5 * Lennox Wu <lennox.wu@sunplusct.com>
6 * Copyright (C) 2012 Regents of the University of California
7 * Copyright (C) 2020 FORTH-ICS/CARV
8 * Nick Kossifidis <mick@ics.forth.gr>
9 */
10
11 #include <linux/acpi.h>
12 #include <linux/cpu.h>
13 #include <linux/init.h>
14 #include <linux/mm.h>
15 #include <linux/memblock.h>
16 #include <linux/sched.h>
17 #include <linux/console.h>
18 #include <linux/of_fdt.h>
19 #include <linux/sched/task.h>
20 #include <linux/smp.h>
21 #include <linux/efi.h>
22 #include <linux/crash_dump.h>
23 #include <linux/panic_notifier.h>
24 #include <linux/jump_label.h>
25 #include <linux/gcd.h>
26
27 #include <asm/acpi.h>
28 #include <asm/alternative.h>
29 #include <asm/cacheflush.h>
30 #include <asm/cpufeature.h>
31 #include <asm/early_ioremap.h>
32 #include <asm/pgtable.h>
33 #include <asm/setup.h>
34 #include <asm/set_memory.h>
35 #include <asm/sections.h>
36 #include <asm/sbi.h>
37 #include <asm/tlbflush.h>
38 #include <asm/thread_info.h>
39 #include <asm/kasan.h>
40 #include <asm/efi.h>
41
42 #include "head.h"
43
44 /*
45 * The lucky hart to first increment this variable will boot the other cores.
46 * This is used before the kernel initializes the BSS so it can't be in the
47 * BSS.
48 */
49 atomic_t hart_lottery __section(".sdata");
50 unsigned long boot_cpu_hartid;
51 EXPORT_SYMBOL_GPL(boot_cpu_hartid);
52
53 /*
54 * Place kernel memory regions on the resource tree so that
55 * kexec-tools can retrieve them from /proc/iomem. While there
56 * also add "System RAM" regions for compatibility with other
57 * archs, and the rest of the known regions for completeness.
58 */
59 static struct resource kimage_res = { .name = "Kernel image", };
60 static struct resource code_res = { .name = "Kernel code", };
61 static struct resource data_res = { .name = "Kernel data", };
62 static struct resource rodata_res = { .name = "Kernel rodata", };
63 static struct resource bss_res = { .name = "Kernel bss", };
64 #ifdef CONFIG_CRASH_DUMP
65 static struct resource elfcorehdr_res = { .name = "ELF Core hdr", };
66 #endif
67
68 static int num_standard_resources;
69 static struct resource *standard_resources;
70
add_resource(struct resource * parent,struct resource * res)71 static int __init add_resource(struct resource *parent,
72 struct resource *res)
73 {
74 int ret = 0;
75
76 ret = insert_resource(parent, res);
77 if (ret < 0) {
78 pr_err("Failed to add a %s resource at %llx\n",
79 res->name, (unsigned long long) res->start);
80 return ret;
81 }
82
83 return 1;
84 }
85
add_kernel_resources(void)86 static int __init add_kernel_resources(void)
87 {
88 int ret = 0;
89
90 /*
91 * The memory region of the kernel image is continuous and
92 * was reserved on setup_bootmem, register it here as a
93 * resource, with the various segments of the image as
94 * child nodes.
95 */
96
97 code_res.start = __pa_symbol(_text);
98 code_res.end = __pa_symbol(_etext) - 1;
99 code_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
100
101 rodata_res.start = __pa_symbol(__start_rodata);
102 rodata_res.end = __pa_symbol(__end_rodata) - 1;
103 rodata_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
104
105 data_res.start = __pa_symbol(_data);
106 data_res.end = __pa_symbol(_edata) - 1;
107 data_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
108
109 bss_res.start = __pa_symbol(__bss_start);
110 bss_res.end = __pa_symbol(__bss_stop) - 1;
111 bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
112
113 kimage_res.start = code_res.start;
114 kimage_res.end = bss_res.end;
115 kimage_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
116
117 ret = add_resource(&iomem_resource, &kimage_res);
118 if (ret < 0)
119 return ret;
120
121 ret = add_resource(&kimage_res, &code_res);
122 if (ret < 0)
123 return ret;
124
125 ret = add_resource(&kimage_res, &rodata_res);
126 if (ret < 0)
127 return ret;
128
129 ret = add_resource(&kimage_res, &data_res);
130 if (ret < 0)
131 return ret;
132
133 ret = add_resource(&kimage_res, &bss_res);
134
135 return ret;
136 }
137
init_resources(void)138 static void __init init_resources(void)
139 {
140 struct memblock_region *region = NULL;
141 struct resource *res = NULL;
142 struct resource *mem_res = NULL;
143 size_t mem_res_sz = 0;
144 int num_resources = 0, res_idx = 0, non_resv_res = 0;
145 int ret = 0;
146
147 /* + 1 as memblock_alloc() might increase memblock.reserved.cnt */
148 num_resources = memblock.memory.cnt + memblock.reserved.cnt + 1;
149 res_idx = num_resources - 1;
150
151 mem_res_sz = num_resources * sizeof(*mem_res);
152 mem_res = memblock_alloc_or_panic(mem_res_sz, SMP_CACHE_BYTES);
153
154 /*
155 * Start by adding the reserved regions, if they overlap
156 * with /memory regions, insert_resource later on will take
157 * care of it.
158 */
159 ret = add_kernel_resources();
160 if (ret < 0)
161 goto error;
162
163 #ifdef CONFIG_CRASH_DUMP
164 if (elfcorehdr_size > 0) {
165 elfcorehdr_res.start = elfcorehdr_addr;
166 elfcorehdr_res.end = elfcorehdr_addr + elfcorehdr_size - 1;
167 elfcorehdr_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
168 add_resource(&iomem_resource, &elfcorehdr_res);
169 }
170 #endif
171
172 for_each_reserved_mem_region(region) {
173 res = &mem_res[res_idx--];
174
175 res->name = "Reserved";
176 res->flags = IORESOURCE_MEM | IORESOURCE_EXCLUSIVE;
177 res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region));
178 res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1;
179
180 /*
181 * Ignore any other reserved regions within
182 * system memory.
183 */
184 if (memblock_is_memory(res->start)) {
185 /* Re-use this pre-allocated resource */
186 res_idx++;
187 continue;
188 }
189
190 ret = add_resource(&iomem_resource, res);
191 if (ret < 0)
192 goto error;
193 }
194
195 /* Add /memory regions to the resource tree */
196 for_each_mem_region(region) {
197 res = &mem_res[res_idx--];
198 non_resv_res++;
199
200 if (unlikely(memblock_is_nomap(region))) {
201 res->name = "Reserved";
202 res->flags = IORESOURCE_MEM | IORESOURCE_EXCLUSIVE;
203 } else {
204 res->name = "System RAM";
205 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
206 }
207
208 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
209 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
210
211 ret = add_resource(&iomem_resource, res);
212 if (ret < 0)
213 goto error;
214 }
215
216 num_standard_resources = non_resv_res;
217 standard_resources = &mem_res[res_idx + 1];
218
219 /* Clean-up any unused pre-allocated resources */
220 if (res_idx >= 0)
221 memblock_free(mem_res, (res_idx + 1) * sizeof(*mem_res));
222 return;
223
224 error:
225 /* Better an empty resource tree than an inconsistent one */
226 release_child_resources(&iomem_resource);
227 memblock_free(mem_res, mem_res_sz);
228 }
229
reserve_memblock_reserved_regions(void)230 static int __init reserve_memblock_reserved_regions(void)
231 {
232 u64 i, j;
233
234 for (i = 0; i < num_standard_resources; i++) {
235 struct resource *mem = &standard_resources[i];
236 phys_addr_t r_start, r_end, mem_size = resource_size(mem);
237
238 if (!memblock_is_region_reserved(mem->start, mem_size))
239 continue;
240
241 for_each_reserved_mem_range(j, &r_start, &r_end) {
242 resource_size_t start, end;
243
244 start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
245 end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
246
247 if (start > mem->end || end < mem->start)
248 continue;
249
250 reserve_region_with_split(mem, start, end, "Reserved");
251 }
252 }
253
254 return 0;
255 }
256 arch_initcall(reserve_memblock_reserved_regions);
257
parse_dtb(void)258 static void __init parse_dtb(void)
259 {
260 /* Early scan of device tree from init memory */
261 if (early_init_dt_scan(dtb_early_va, dtb_early_pa)) {
262 const char *name = of_flat_dt_get_machine_name();
263
264 if (name) {
265 pr_info("Machine model: %s\n", name);
266 dump_stack_set_arch_desc("%s (DT)", name);
267 }
268 } else {
269 pr_err("No DTB passed to the kernel\n");
270 }
271 }
272
273 #if defined(CONFIG_RISCV_COMBO_SPINLOCKS)
274 DEFINE_STATIC_KEY_TRUE(qspinlock_key);
275 EXPORT_SYMBOL(qspinlock_key);
276 #endif
277
riscv_spinlock_init(void)278 static void __init riscv_spinlock_init(void)
279 {
280 char *using_ext = NULL;
281
282 if (IS_ENABLED(CONFIG_RISCV_TICKET_SPINLOCKS)) {
283 pr_info("Ticket spinlock: enabled\n");
284 return;
285 }
286
287 if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) &&
288 IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) &&
289 IS_ENABLED(CONFIG_TOOLCHAIN_HAS_ZACAS) &&
290 riscv_isa_extension_available(NULL, ZABHA) &&
291 riscv_isa_extension_available(NULL, ZACAS)) {
292 using_ext = "using Zabha";
293 } else if (riscv_isa_extension_available(NULL, ZICCRSE)) {
294 using_ext = "using Ziccrse";
295 }
296 #if defined(CONFIG_RISCV_COMBO_SPINLOCKS)
297 else {
298 static_branch_disable(&qspinlock_key);
299 pr_info("Ticket spinlock: enabled\n");
300 return;
301 }
302 #endif
303
304 if (!using_ext)
305 pr_err("Queued spinlock without Zabha or Ziccrse");
306 else
307 pr_info("Queued spinlock %s: enabled\n", using_ext);
308 }
309
310 extern void __init init_rt_signal_env(void);
311
setup_arch(char ** cmdline_p)312 void __init setup_arch(char **cmdline_p)
313 {
314 parse_dtb();
315 setup_initial_init_mm(_stext, _etext, _edata, _end);
316
317 *cmdline_p = boot_command_line;
318
319 early_ioremap_setup();
320 sbi_init();
321 jump_label_init();
322 parse_early_param();
323
324 efi_init();
325 paging_init();
326
327 /* Parse the ACPI tables for possible boot-time configuration */
328 acpi_boot_table_init();
329
330 if (acpi_disabled) {
331 #if IS_ENABLED(CONFIG_BUILTIN_DTB)
332 unflatten_and_copy_device_tree();
333 #else
334 unflatten_device_tree();
335 #endif
336 }
337
338 misc_mem_init();
339
340 init_resources();
341
342 #ifdef CONFIG_KASAN
343 kasan_init();
344 #endif
345
346 #ifdef CONFIG_SMP
347 setup_smp();
348 #endif
349
350 if (!acpi_disabled) {
351 acpi_init_rintc_map();
352 acpi_map_cpus_to_nodes();
353 }
354
355 riscv_init_cbo_blocksizes();
356 riscv_fill_hwcap();
357 apply_boot_alternatives();
358 init_rt_signal_env();
359
360 if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) &&
361 riscv_isa_extension_available(NULL, ZICBOM))
362 riscv_noncoherent_supported();
363 riscv_set_dma_cache_alignment();
364
365 riscv_user_isa_enable();
366 riscv_spinlock_init();
367
368 if (!IS_ENABLED(CONFIG_RISCV_ISA_ZBB) || !riscv_isa_extension_available(NULL, ZBB))
369 static_branch_disable(&efficient_ffs_key);
370 }
371
arch_cpu_is_hotpluggable(int cpu)372 bool arch_cpu_is_hotpluggable(int cpu)
373 {
374 return cpu_has_hotplug(cpu);
375 }
376
free_initmem(void)377 void free_initmem(void)
378 {
379 if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
380 set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end), set_memory_rw_nx);
381 if (IS_ENABLED(CONFIG_64BIT))
382 set_kernel_memory(__init_begin, __init_end, set_memory_nx);
383 }
384
385 free_initmem_default(POISON_FREE_INITMEM);
386 }
387
dump_kernel_offset(struct notifier_block * self,unsigned long v,void * p)388 static int dump_kernel_offset(struct notifier_block *self,
389 unsigned long v, void *p)
390 {
391 pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
392 kernel_map.virt_offset,
393 KERNEL_LINK_ADDR);
394
395 return 0;
396 }
397
398 static struct notifier_block kernel_offset_notifier = {
399 .notifier_call = dump_kernel_offset
400 };
401
register_kernel_offset_dumper(void)402 static int __init register_kernel_offset_dumper(void)
403 {
404 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
405 atomic_notifier_chain_register(&panic_notifier_list,
406 &kernel_offset_notifier);
407
408 return 0;
409 }
410 device_initcall(register_kernel_offset_dumper);
411