xref: /linux/arch/riscv/mm/init.c (revision c5ab54e9945b5f3dc8e9c31b93bb334fcea126f4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Regents of the University of California
4  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
5  */
6 
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/memblock.h>
10 #include <linux/initrd.h>
11 #include <linux/swap.h>
12 #include <linux/sizes.h>
13 #include <linux/of_fdt.h>
14 #include <linux/libfdt.h>
15 
16 #include <asm/fixmap.h>
17 #include <asm/tlbflush.h>
18 #include <asm/sections.h>
19 #include <asm/pgtable.h>
20 #include <asm/io.h>
21 
22 #include "../kernel/head.h"
23 
24 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
25 							__page_aligned_bss;
26 EXPORT_SYMBOL(empty_zero_page);
27 
28 extern char _start[];
29 
30 static void __init zone_sizes_init(void)
31 {
32 	unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
33 
34 #ifdef CONFIG_ZONE_DMA32
35 	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
36 			(unsigned long) PFN_PHYS(max_low_pfn)));
37 #endif
38 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
39 
40 	free_area_init_nodes(max_zone_pfns);
41 }
42 
43 void setup_zero_page(void)
44 {
45 	memset((void *)empty_zero_page, 0, PAGE_SIZE);
46 }
47 
48 void __init mem_init(void)
49 {
50 #ifdef CONFIG_FLATMEM
51 	BUG_ON(!mem_map);
52 #endif /* CONFIG_FLATMEM */
53 
54 	high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
55 	memblock_free_all();
56 
57 	mem_init_print_info(NULL);
58 }
59 
60 #ifdef CONFIG_BLK_DEV_INITRD
61 static void __init setup_initrd(void)
62 {
63 	unsigned long size;
64 
65 	if (initrd_start >= initrd_end) {
66 		pr_info("initrd not found or empty");
67 		goto disable;
68 	}
69 	if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
70 		pr_err("initrd extends beyond end of memory");
71 		goto disable;
72 	}
73 
74 	size = initrd_end - initrd_start;
75 	memblock_reserve(__pa(initrd_start), size);
76 	initrd_below_start_ok = 1;
77 
78 	pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
79 		(void *)(initrd_start), size);
80 	return;
81 disable:
82 	pr_cont(" - disabling initrd\n");
83 	initrd_start = 0;
84 	initrd_end = 0;
85 }
86 #endif /* CONFIG_BLK_DEV_INITRD */
87 
88 static phys_addr_t dtb_early_pa __initdata;
89 
90 void __init setup_bootmem(void)
91 {
92 	struct memblock_region *reg;
93 	phys_addr_t mem_size = 0;
94 	phys_addr_t vmlinux_end = __pa(&_end);
95 	phys_addr_t vmlinux_start = __pa(&_start);
96 
97 	/* Find the memory region containing the kernel */
98 	for_each_memblock(memory, reg) {
99 		phys_addr_t end = reg->base + reg->size;
100 
101 		if (reg->base <= vmlinux_end && vmlinux_end <= end) {
102 			mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
103 
104 			/*
105 			 * Remove memblock from the end of usable area to the
106 			 * end of region
107 			 */
108 			if (reg->base + mem_size < end)
109 				memblock_remove(reg->base + mem_size,
110 						end - reg->base - mem_size);
111 		}
112 	}
113 	BUG_ON(mem_size == 0);
114 
115 	/* Reserve from the start of the kernel to the end of the kernel */
116 	memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
117 
118 	set_max_mapnr(PFN_DOWN(mem_size));
119 	max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
120 
121 #ifdef CONFIG_BLK_DEV_INITRD
122 	setup_initrd();
123 #endif /* CONFIG_BLK_DEV_INITRD */
124 
125 	/*
126 	 * Avoid using early_init_fdt_reserve_self() since __pa() does
127 	 * not work for DTB pointers that are fixmap addresses
128 	 */
129 	memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
130 
131 	early_init_fdt_scan_reserved_mem();
132 	memblock_allow_resize();
133 	memblock_dump_all();
134 
135 	for_each_memblock(memory, reg) {
136 		unsigned long start_pfn = memblock_region_memory_base_pfn(reg);
137 		unsigned long end_pfn = memblock_region_memory_end_pfn(reg);
138 
139 		memblock_set_node(PFN_PHYS(start_pfn),
140 				  PFN_PHYS(end_pfn - start_pfn),
141 				  &memblock.memory, 0);
142 	}
143 }
144 
145 unsigned long va_pa_offset;
146 EXPORT_SYMBOL(va_pa_offset);
147 unsigned long pfn_base;
148 EXPORT_SYMBOL(pfn_base);
149 
150 void *dtb_early_va;
151 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
152 pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
153 pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
154 static bool mmu_enabled;
155 
156 #define MAX_EARLY_MAPPING_SIZE	SZ_128M
157 
158 pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
159 
160 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
161 {
162 	unsigned long addr = __fix_to_virt(idx);
163 	pte_t *ptep;
164 
165 	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
166 
167 	ptep = &fixmap_pte[pte_index(addr)];
168 
169 	if (pgprot_val(prot)) {
170 		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
171 	} else {
172 		pte_clear(&init_mm, addr, ptep);
173 		local_flush_tlb_page(addr);
174 	}
175 }
176 
177 static pte_t *__init get_pte_virt(phys_addr_t pa)
178 {
179 	if (mmu_enabled) {
180 		clear_fixmap(FIX_PTE);
181 		return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
182 	} else {
183 		return (pte_t *)((uintptr_t)pa);
184 	}
185 }
186 
187 static phys_addr_t __init alloc_pte(uintptr_t va)
188 {
189 	/*
190 	 * We only create PMD or PGD early mappings so we
191 	 * should never reach here with MMU disabled.
192 	 */
193 	BUG_ON(!mmu_enabled);
194 
195 	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
196 }
197 
198 static void __init create_pte_mapping(pte_t *ptep,
199 				      uintptr_t va, phys_addr_t pa,
200 				      phys_addr_t sz, pgprot_t prot)
201 {
202 	uintptr_t pte_index = pte_index(va);
203 
204 	BUG_ON(sz != PAGE_SIZE);
205 
206 	if (pte_none(ptep[pte_index]))
207 		ptep[pte_index] = pfn_pte(PFN_DOWN(pa), prot);
208 }
209 
210 #ifndef __PAGETABLE_PMD_FOLDED
211 
212 pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
213 pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
214 
215 #if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE
216 #define NUM_EARLY_PMDS		1UL
217 #else
218 #define NUM_EARLY_PMDS		(1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
219 #endif
220 pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);
221 
222 static pmd_t *__init get_pmd_virt(phys_addr_t pa)
223 {
224 	if (mmu_enabled) {
225 		clear_fixmap(FIX_PMD);
226 		return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
227 	} else {
228 		return (pmd_t *)((uintptr_t)pa);
229 	}
230 }
231 
232 static phys_addr_t __init alloc_pmd(uintptr_t va)
233 {
234 	uintptr_t pmd_num;
235 
236 	if (mmu_enabled)
237 		return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
238 
239 	pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT;
240 	BUG_ON(pmd_num >= NUM_EARLY_PMDS);
241 	return (uintptr_t)&early_pmd[pmd_num * PTRS_PER_PMD];
242 }
243 
244 static void __init create_pmd_mapping(pmd_t *pmdp,
245 				      uintptr_t va, phys_addr_t pa,
246 				      phys_addr_t sz, pgprot_t prot)
247 {
248 	pte_t *ptep;
249 	phys_addr_t pte_phys;
250 	uintptr_t pmd_index = pmd_index(va);
251 
252 	if (sz == PMD_SIZE) {
253 		if (pmd_none(pmdp[pmd_index]))
254 			pmdp[pmd_index] = pfn_pmd(PFN_DOWN(pa), prot);
255 		return;
256 	}
257 
258 	if (pmd_none(pmdp[pmd_index])) {
259 		pte_phys = alloc_pte(va);
260 		pmdp[pmd_index] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
261 		ptep = get_pte_virt(pte_phys);
262 		memset(ptep, 0, PAGE_SIZE);
263 	} else {
264 		pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_index]));
265 		ptep = get_pte_virt(pte_phys);
266 	}
267 
268 	create_pte_mapping(ptep, va, pa, sz, prot);
269 }
270 
271 #define pgd_next_t		pmd_t
272 #define alloc_pgd_next(__va)	alloc_pmd(__va)
273 #define get_pgd_next_virt(__pa)	get_pmd_virt(__pa)
274 #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
275 	create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
276 #define fixmap_pgd_next		fixmap_pmd
277 #else
278 #define pgd_next_t		pte_t
279 #define alloc_pgd_next(__va)	alloc_pte(__va)
280 #define get_pgd_next_virt(__pa)	get_pte_virt(__pa)
281 #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
282 	create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
283 #define fixmap_pgd_next		fixmap_pte
284 #endif
285 
286 static void __init create_pgd_mapping(pgd_t *pgdp,
287 				      uintptr_t va, phys_addr_t pa,
288 				      phys_addr_t sz, pgprot_t prot)
289 {
290 	pgd_next_t *nextp;
291 	phys_addr_t next_phys;
292 	uintptr_t pgd_index = pgd_index(va);
293 
294 	if (sz == PGDIR_SIZE) {
295 		if (pgd_val(pgdp[pgd_index]) == 0)
296 			pgdp[pgd_index] = pfn_pgd(PFN_DOWN(pa), prot);
297 		return;
298 	}
299 
300 	if (pgd_val(pgdp[pgd_index]) == 0) {
301 		next_phys = alloc_pgd_next(va);
302 		pgdp[pgd_index] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
303 		nextp = get_pgd_next_virt(next_phys);
304 		memset(nextp, 0, PAGE_SIZE);
305 	} else {
306 		next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_index]));
307 		nextp = get_pgd_next_virt(next_phys);
308 	}
309 
310 	create_pgd_next_mapping(nextp, va, pa, sz, prot);
311 }
312 
313 static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
314 {
315 	/* Upgrade to PMD_SIZE mappings whenever possible */
316 	if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
317 		return PAGE_SIZE;
318 
319 	return PMD_SIZE;
320 }
321 
322 /*
323  * setup_vm() is called from head.S with MMU-off.
324  *
325  * Following requirements should be honoured for setup_vm() to work
326  * correctly:
327  * 1) It should use PC-relative addressing for accessing kernel symbols.
328  *    To achieve this we always use GCC cmodel=medany.
329  * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
330  *    so disable compiler instrumentation when FTRACE is enabled.
331  *
332  * Currently, the above requirements are honoured by using custom CFLAGS
333  * for init.o in mm/Makefile.
334  */
335 
336 #ifndef __riscv_cmodel_medany
337 #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
338 #endif
339 
340 asmlinkage void __init setup_vm(uintptr_t dtb_pa)
341 {
342 	uintptr_t va, end_va;
343 	uintptr_t load_pa = (uintptr_t)(&_start);
344 	uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
345 	uintptr_t map_size = best_map_size(load_pa, MAX_EARLY_MAPPING_SIZE);
346 
347 	va_pa_offset = PAGE_OFFSET - load_pa;
348 	pfn_base = PFN_DOWN(load_pa);
349 
350 	/*
351 	 * Enforce boot alignment requirements of RV32 and
352 	 * RV64 by only allowing PMD or PGD mappings.
353 	 */
354 	BUG_ON(map_size == PAGE_SIZE);
355 
356 	/* Sanity check alignment and size */
357 	BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
358 	BUG_ON((load_pa % map_size) != 0);
359 	BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE);
360 
361 	/* Setup early PGD for fixmap */
362 	create_pgd_mapping(early_pg_dir, FIXADDR_START,
363 			   (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
364 
365 #ifndef __PAGETABLE_PMD_FOLDED
366 	/* Setup fixmap PMD */
367 	create_pmd_mapping(fixmap_pmd, FIXADDR_START,
368 			   (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
369 	/* Setup trampoline PGD and PMD */
370 	create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
371 			   (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
372 	create_pmd_mapping(trampoline_pmd, PAGE_OFFSET,
373 			   load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
374 #else
375 	/* Setup trampoline PGD */
376 	create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
377 			   load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
378 #endif
379 
380 	/*
381 	 * Setup early PGD covering entire kernel which will allows
382 	 * us to reach paging_init(). We map all memory banks later
383 	 * in setup_vm_final() below.
384 	 */
385 	end_va = PAGE_OFFSET + load_sz;
386 	for (va = PAGE_OFFSET; va < end_va; va += map_size)
387 		create_pgd_mapping(early_pg_dir, va,
388 				   load_pa + (va - PAGE_OFFSET),
389 				   map_size, PAGE_KERNEL_EXEC);
390 
391 	/* Create fixed mapping for early FDT parsing */
392 	end_va = __fix_to_virt(FIX_FDT) + FIX_FDT_SIZE;
393 	for (va = __fix_to_virt(FIX_FDT); va < end_va; va += PAGE_SIZE)
394 		create_pte_mapping(fixmap_pte, va,
395 				   dtb_pa + (va - __fix_to_virt(FIX_FDT)),
396 				   PAGE_SIZE, PAGE_KERNEL);
397 
398 	/* Save pointer to DTB for early FDT parsing */
399 	dtb_early_va = (void *)fix_to_virt(FIX_FDT) + (dtb_pa & ~PAGE_MASK);
400 	/* Save physical address for memblock reservation */
401 	dtb_early_pa = dtb_pa;
402 }
403 
404 static void __init setup_vm_final(void)
405 {
406 	uintptr_t va, map_size;
407 	phys_addr_t pa, start, end;
408 	struct memblock_region *reg;
409 
410 	/* Set mmu_enabled flag */
411 	mmu_enabled = true;
412 
413 	/* Setup swapper PGD for fixmap */
414 	create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
415 			   __pa(fixmap_pgd_next),
416 			   PGDIR_SIZE, PAGE_TABLE);
417 
418 	/* Map all memory banks */
419 	for_each_memblock(memory, reg) {
420 		start = reg->base;
421 		end = start + reg->size;
422 
423 		if (start >= end)
424 			break;
425 		if (memblock_is_nomap(reg))
426 			continue;
427 		if (start <= __pa(PAGE_OFFSET) &&
428 		    __pa(PAGE_OFFSET) < end)
429 			start = __pa(PAGE_OFFSET);
430 
431 		map_size = best_map_size(start, end - start);
432 		for (pa = start; pa < end; pa += map_size) {
433 			va = (uintptr_t)__va(pa);
434 			create_pgd_mapping(swapper_pg_dir, va, pa,
435 					   map_size, PAGE_KERNEL_EXEC);
436 		}
437 	}
438 
439 	/* Clear fixmap PTE and PMD mappings */
440 	clear_fixmap(FIX_PTE);
441 	clear_fixmap(FIX_PMD);
442 
443 	/* Move to swapper page table */
444 	csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE);
445 	local_flush_tlb_all();
446 }
447 
448 void __init paging_init(void)
449 {
450 	setup_vm_final();
451 	memblocks_present();
452 	sparse_init();
453 	setup_zero_page();
454 	zone_sizes_init();
455 }
456 
457 #ifdef CONFIG_SPARSEMEM_VMEMMAP
458 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
459 			       struct vmem_altmap *altmap)
460 {
461 	return vmemmap_populate_basepages(start, end, node);
462 }
463 #endif
464