init.c (34069d12e239ae8f36dd96c378e4622fb1c42a76) init.c (84fe419dc7578b03e721b9bd6eb07947db70fd0e)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
5 * Copyright (C) 2020 FORTH-ICS/CARV
6 * Nick Kossifidis <mick@ics.forth.gr>
7 */
8

--- 12 unchanged lines hidden (view full) ---

21#include <linux/crash_dump.h>
22#include <linux/hugetlb.h>
23#ifdef CONFIG_RELOCATABLE
24#include <linux/elf.h>
25#endif
26#include <linux/kfence.h>
27
28#include <asm/fixmap.h>
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
5 * Copyright (C) 2020 FORTH-ICS/CARV
6 * Nick Kossifidis <mick@ics.forth.gr>
7 */
8

--- 12 unchanged lines hidden (view full) ---

21#include <linux/crash_dump.h>
22#include <linux/hugetlb.h>
23#ifdef CONFIG_RELOCATABLE
24#include <linux/elf.h>
25#endif
26#include <linux/kfence.h>
27
28#include <asm/fixmap.h>
29#include <asm/io.h>
30#include <asm/numa.h>
31#include <asm/pgtable.h>
32#include <asm/ptdump.h>
29#include <asm/tlbflush.h>
33#include <asm/sections.h>
34#include <asm/soc.h>
30#include <asm/sections.h>
31#include <asm/soc.h>
35#include <asm/tlbflush.h>
32#include <asm/io.h>
33#include <asm/ptdump.h>
34#include <asm/numa.h>
36
37#include "../kernel/head.h"
38
39struct kernel_mapping kernel_map __ro_after_init;
40EXPORT_SYMBOL(kernel_map);
41#ifdef CONFIG_XIP_KERNEL
42#define kernel_map (*(struct kernel_mapping *)XIP_FIXUP(&kernel_map))
43#endif

--- 166 unchanged lines hidden (view full) ---

210 if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
211 vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
212 /*
213 * Reserve from the start of the kernel to the end of the kernel
214 */
215 memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
216
217 phys_ram_end = memblock_end_of_DRAM();
35
36#include "../kernel/head.h"
37
38struct kernel_mapping kernel_map __ro_after_init;
39EXPORT_SYMBOL(kernel_map);
40#ifdef CONFIG_XIP_KERNEL
41#define kernel_map (*(struct kernel_mapping *)XIP_FIXUP(&kernel_map))
42#endif

--- 166 unchanged lines hidden (view full) ---

209 if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
210 vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
211 /*
212 * Reserve from the start of the kernel to the end of the kernel
213 */
214 memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
215
216 phys_ram_end = memblock_end_of_DRAM();
218
219 /*
220 * Make sure we align the start of the memory on a PMD boundary so that
221 * at worst, we map the linear mapping with PMD mappings.
222 */
223 if (!IS_ENABLED(CONFIG_XIP_KERNEL))
217 if (!IS_ENABLED(CONFIG_XIP_KERNEL))
224 phys_ram_base = memblock_start_of_DRAM() & PMD_MASK;
218 phys_ram_base = memblock_start_of_DRAM();
225
226 /*
227 * In 64-bit, any use of __va/__pa before this point is wrong as we
228 * did not know the start of DRAM before.
229 */
230 if (IS_ENABLED(CONFIG_64BIT))
231 kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
232

--- 780 unchanged lines hidden (view full) ---

1013 pt_ops.get_pmd_virt = get_pmd_virt_late;
1014 pt_ops.alloc_pud = alloc_pud_late;
1015 pt_ops.get_pud_virt = get_pud_virt_late;
1016 pt_ops.alloc_p4d = alloc_p4d_late;
1017 pt_ops.get_p4d_virt = get_p4d_virt_late;
1018#endif
1019}
1020
219
220 /*
221 * In 64-bit, any use of __va/__pa before this point is wrong as we
222 * did not know the start of DRAM before.
223 */
224 if (IS_ENABLED(CONFIG_64BIT))
225 kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
226

--- 780 unchanged lines hidden (view full) ---

1007 pt_ops.get_pmd_virt = get_pmd_virt_late;
1008 pt_ops.alloc_pud = alloc_pud_late;
1009 pt_ops.get_pud_virt = get_pud_virt_late;
1010 pt_ops.alloc_p4d = alloc_p4d_late;
1011 pt_ops.get_p4d_virt = get_p4d_virt_late;
1012#endif
1013}
1014
1015#ifdef CONFIG_RANDOMIZE_BASE
1016extern bool __init __pi_set_nokaslr_from_cmdline(uintptr_t dtb_pa);
1017extern u64 __init __pi_get_kaslr_seed(uintptr_t dtb_pa);
1018
1019static int __init print_nokaslr(char *p)
1020{
1021 pr_info("Disabled KASLR");
1022 return 0;
1023}
1024early_param("nokaslr", print_nokaslr);
1025
1026unsigned long kaslr_offset(void)
1027{
1028 return kernel_map.virt_offset;
1029}
1030#endif
1031
1021asmlinkage void __init setup_vm(uintptr_t dtb_pa)
1022{
1023 pmd_t __maybe_unused fix_bmap_spmd, fix_bmap_epmd;
1024
1032asmlinkage void __init setup_vm(uintptr_t dtb_pa)
1033{
1034 pmd_t __maybe_unused fix_bmap_spmd, fix_bmap_epmd;
1035
1025 kernel_map.virt_addr = KERNEL_LINK_ADDR;
1036#ifdef CONFIG_RANDOMIZE_BASE
1037 if (!__pi_set_nokaslr_from_cmdline(dtb_pa)) {
1038 u64 kaslr_seed = __pi_get_kaslr_seed(dtb_pa);
1039 u32 kernel_size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
1040 u32 nr_pos;
1041
1042 /*
1043 * Compute the number of positions available: we are limited
1044 * by the early page table that only has one PUD and we must
1045 * be aligned on PMD_SIZE.
1046 */
1047 nr_pos = (PUD_SIZE - kernel_size) / PMD_SIZE;
1048
1049 kernel_map.virt_offset = (kaslr_seed % nr_pos) * PMD_SIZE;
1050 }
1051#endif
1052
1053 kernel_map.virt_addr = KERNEL_LINK_ADDR + kernel_map.virt_offset;
1026 kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
1027
1028#ifdef CONFIG_XIP_KERNEL
1029 kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
1030 kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
1031
1032 phys_ram_base = CONFIG_PHYS_RAM_BASE;
1033 kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;

--- 313 unchanged lines hidden (view full) ---

1347 * Current riscv boot protocol requires 2MB alignment for
1348 * RV64 and 4MB alignment for RV32 (hugepage size)
1349 *
1350 * Try to alloc from 32bit addressible physical memory so that
1351 * swiotlb can work on the crash kernel.
1352 */
1353 crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
1354 search_start,
1054 kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
1055
1056#ifdef CONFIG_XIP_KERNEL
1057 kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
1058 kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
1059
1060 phys_ram_base = CONFIG_PHYS_RAM_BASE;
1061 kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;

--- 313 unchanged lines hidden (view full) ---

1375 * Current riscv boot protocol requires 2MB alignment for
1376 * RV64 and 4MB alignment for RV32 (hugepage size)
1377 *
1378 * Try to alloc from 32bit addressible physical memory so that
1379 * swiotlb can work on the crash kernel.
1380 */
1381 crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
1382 search_start,
1355 min(search_end, (unsigned long)(SZ_4G - 1)));
1383 min(search_end, (unsigned long) SZ_4G));
1356 if (crash_base == 0) {
1357 /* Try again without restricting region to 32bit addressible memory */
1358 crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
1359 search_start, search_end);
1360 if (crash_base == 0) {
1361 pr_warn("crashkernel: couldn't allocate %lldKB\n",
1362 crash_size >> 10);
1363 return;

--- 94 unchanged lines hidden ---
1384 if (crash_base == 0) {
1385 /* Try again without restricting region to 32bit addressible memory */
1386 crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
1387 search_start, search_end);
1388 if (crash_base == 0) {
1389 pr_warn("crashkernel: couldn't allocate %lldKB\n",
1390 crash_size >> 10);
1391 return;

--- 94 unchanged lines hidden ---