xref: /linux/arch/sh/mm/init.c (revision e467705a9fb37f51595aa6deaca085ccb4005454)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/arch/sh/mm/init.c
4  *
5  *  Copyright (C) 1999  Niibe Yutaka
6  *  Copyright (C) 2002 - 2011  Paul Mundt
7  *
8  *  Based on linux/arch/i386/mm/init.c:
9  *   Copyright (C) 1995  Linus Torvalds
10  */
11 #include <linux/mm.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/gfp.h>
15 #include <linux/memblock.h>
16 #include <linux/proc_fs.h>
17 #include <linux/pagemap.h>
18 #include <linux/percpu.h>
19 #include <linux/io.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/export.h>
22 #include <asm/mmu_context.h>
23 #include <asm/mmzone.h>
24 #include <asm/kexec.h>
25 #include <asm/tlb.h>
26 #include <asm/cacheflush.h>
27 #include <asm/sections.h>
28 #include <asm/setup.h>
29 #include <asm/cache.h>
30 #include <asm/pgalloc.h>
31 #include <linux/sizes.h>
32 #include "ioremap.h"
33 
34 pgd_t swapper_pg_dir[PTRS_PER_PGD];
35 
36 void __init generic_mem_init(void)
37 {
38 	memblock_add(__MEMORY_START, __MEMORY_SIZE);
39 }
40 
41 void __init __weak plat_mem_setup(void)
42 {
43 	/* Nothing to see here, move along. */
44 }
45 
46 #ifdef CONFIG_MMU
47 static pte_t *__get_pte_phys(unsigned long addr)
48 {
49 	pgd_t *pgd;
50 	p4d_t *p4d;
51 	pud_t *pud;
52 	pmd_t *pmd;
53 
54 	pgd = pgd_offset_k(addr);
55 	if (pgd_none(*pgd)) {
56 		pgd_ERROR(*pgd);
57 		return NULL;
58 	}
59 
60 	p4d = p4d_alloc(NULL, pgd, addr);
61 	if (unlikely(!p4d)) {
62 		p4d_ERROR(*p4d);
63 		return NULL;
64 	}
65 
66 	pud = pud_alloc(NULL, p4d, addr);
67 	if (unlikely(!pud)) {
68 		pud_ERROR(*pud);
69 		return NULL;
70 	}
71 
72 	pmd = pmd_alloc(NULL, pud, addr);
73 	if (unlikely(!pmd)) {
74 		pmd_ERROR(*pmd);
75 		return NULL;
76 	}
77 
78 	return pte_offset_kernel(pmd, addr);
79 }
80 
81 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
82 {
83 	pte_t *pte;
84 
85 	pte = __get_pte_phys(addr);
86 	if (!pte_none(*pte)) {
87 		pte_ERROR(*pte);
88 		return;
89 	}
90 
91 	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
92 	local_flush_tlb_one(get_asid(), addr);
93 
94 	if (pgprot_val(prot) & _PAGE_WIRED)
95 		tlb_wire_entry(NULL, addr, *pte);
96 }
97 
98 static void clear_pte_phys(unsigned long addr, pgprot_t prot)
99 {
100 	pte_t *pte;
101 
102 	pte = __get_pte_phys(addr);
103 
104 	if (pgprot_val(prot) & _PAGE_WIRED)
105 		tlb_unwire_entry();
106 
107 	set_pte(pte, pfn_pte(0, __pgprot(0)));
108 	local_flush_tlb_one(get_asid(), addr);
109 }
110 
111 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
112 {
113 	unsigned long address = __fix_to_virt(idx);
114 
115 	if (idx >= __end_of_fixed_addresses) {
116 		BUG();
117 		return;
118 	}
119 
120 	set_pte_phys(address, phys, prot);
121 }
122 
123 void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
124 {
125 	unsigned long address = __fix_to_virt(idx);
126 
127 	if (idx >= __end_of_fixed_addresses) {
128 		BUG();
129 		return;
130 	}
131 
132 	clear_pte_phys(address, prot);
133 }
134 
135 static pmd_t * __init one_md_table_init(pud_t *pud)
136 {
137 	if (pud_none(*pud)) {
138 		pmd_t *pmd;
139 
140 		pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
141 		if (!pmd)
142 			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
143 			      __func__, PAGE_SIZE, PAGE_SIZE);
144 		pud_populate(&init_mm, pud, pmd);
145 		BUG_ON(pmd != pmd_offset(pud, 0));
146 	}
147 
148 	return pmd_offset(pud, 0);
149 }
150 
151 static pte_t * __init one_page_table_init(pmd_t *pmd)
152 {
153 	if (pmd_none(*pmd)) {
154 		pte_t *pte;
155 
156 		pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
157 		if (!pte)
158 			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
159 			      __func__, PAGE_SIZE, PAGE_SIZE);
160 		pmd_populate_kernel(&init_mm, pmd, pte);
161 		BUG_ON(pte != pte_offset_kernel(pmd, 0));
162 	}
163 
164 	return pte_offset_kernel(pmd, 0);
165 }
166 
167 static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
168 					    unsigned long vaddr, pte_t *lastpte)
169 {
170 	return pte;
171 }
172 
173 void __init page_table_range_init(unsigned long start, unsigned long end,
174 					 pgd_t *pgd_base)
175 {
176 	pgd_t *pgd;
177 	pud_t *pud;
178 	pmd_t *pmd;
179 	pte_t *pte = NULL;
180 	int i, j, k;
181 	unsigned long vaddr;
182 
183 	vaddr = start;
184 	i = pgd_index(vaddr);
185 	j = pud_index(vaddr);
186 	k = pmd_index(vaddr);
187 	pgd = pgd_base + i;
188 
189 	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
190 		pud = (pud_t *)pgd;
191 		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
192 			pmd = one_md_table_init(pud);
193 #ifndef __PAGETABLE_PMD_FOLDED
194 			pmd += k;
195 #endif
196 			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
197 				pte = page_table_kmap_check(one_page_table_init(pmd),
198 							    pmd, vaddr, pte);
199 				vaddr += PMD_SIZE;
200 			}
201 			k = 0;
202 		}
203 		j = 0;
204 	}
205 }
206 #endif	/* CONFIG_MMU */
207 
208 void __init allocate_pgdat(unsigned int nid)
209 {
210 	unsigned long start_pfn, end_pfn;
211 
212 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
213 
214 #ifdef CONFIG_NUMA
215 	NODE_DATA(nid) = memblock_alloc_try_nid(
216 				sizeof(struct pglist_data),
217 				SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
218 				MEMBLOCK_ALLOC_ACCESSIBLE, nid);
219 	if (!NODE_DATA(nid))
220 		panic("Can't allocate pgdat for node %d\n", nid);
221 #endif
222 
223 	NODE_DATA(nid)->node_start_pfn = start_pfn;
224 	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
225 }
226 
227 static void __init do_init_bootmem(void)
228 {
229 	unsigned long start_pfn, end_pfn;
230 	int i;
231 
232 	/* Add active regions with valid PFNs. */
233 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL)
234 		__add_active_range(0, start_pfn, end_pfn);
235 
236 	/* All of system RAM sits in node 0 for the non-NUMA case */
237 	allocate_pgdat(0);
238 	node_set_online(0);
239 
240 	plat_mem_setup();
241 
242 	sparse_init();
243 }
244 
245 static void __init early_reserve_mem(void)
246 {
247 	unsigned long start_pfn;
248 	u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
249 	u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
250 
251 	/*
252 	 * Partially used pages are not usable - thus
253 	 * we are rounding upwards:
254 	 */
255 	start_pfn = PFN_UP(__pa(_end));
256 
257 	/*
258 	 * Reserve the kernel text and Reserve the bootmem bitmap. We do
259 	 * this in two steps (first step was init_bootmem()), because
260 	 * this catches the (definitely buggy) case of us accidentally
261 	 * initializing the bootmem allocator with an invalid RAM area.
262 	 */
263 	memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
264 
265 	/*
266 	 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
267 	 */
268 	if (CONFIG_ZERO_PAGE_OFFSET != 0)
269 		memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
270 
271 	/*
272 	 * Handle additional early reservations
273 	 */
274 	check_for_initrd();
275 	reserve_crashkernel();
276 }
277 
278 void __init paging_init(void)
279 {
280 	unsigned long max_zone_pfns[MAX_NR_ZONES];
281 	unsigned long vaddr, end;
282 
283 	sh_mv.mv_mem_init();
284 
285 	early_reserve_mem();
286 
287 	/*
288 	 * Once the early reservations are out of the way, give the
289 	 * platforms a chance to kick out some memory.
290 	 */
291 	if (sh_mv.mv_mem_reserve)
292 		sh_mv.mv_mem_reserve();
293 
294 	memblock_enforce_memory_limit(memory_limit);
295 	memblock_allow_resize();
296 
297 	memblock_dump_all();
298 
299 	/*
300 	 * Determine low and high memory ranges:
301 	 */
302 	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
303 	min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
304 	set_max_mapnr(max_low_pfn - min_low_pfn);
305 
306 	nodes_clear(node_online_map);
307 
308 	memory_start = (unsigned long)__va(__MEMORY_START);
309 	memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
310 
311 	uncached_init();
312 	pmb_init();
313 	do_init_bootmem();
314 	ioremap_fixed_init();
315 
316 	/* We don't need to map the kernel through the TLB, as
317 	 * it is permanatly mapped using P1. So clear the
318 	 * entire pgd. */
319 	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
320 
321 	/* Set an initial value for the MMU.TTB so we don't have to
322 	 * check for a null value. */
323 	set_TTB(swapper_pg_dir);
324 
325 	/*
326 	 * Populate the relevant portions of swapper_pg_dir so that
327 	 * we can use the fixmap entries without calling kmalloc.
328 	 * pte's will be filled in by __set_fixmap().
329 	 */
330 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
331 	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
332 	page_table_range_init(vaddr, end, swapper_pg_dir);
333 
334 	kmap_coherent_init();
335 
336 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
337 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
338 	free_area_init(max_zone_pfns);
339 }
340 
341 unsigned int mem_init_done = 0;
342 
343 void __init mem_init(void)
344 {
345 	pg_data_t *pgdat;
346 
347 	high_memory = NULL;
348 	for_each_online_pgdat(pgdat)
349 		high_memory = max_t(void *, high_memory,
350 				    __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
351 
352 	memblock_free_all();
353 
354 	/* Set this up early, so we can take care of the zero page */
355 	cpu_cache_init();
356 
357 	/* clear the zero-page */
358 	memset(empty_zero_page, 0, PAGE_SIZE);
359 	__flush_wback_region(empty_zero_page, PAGE_SIZE);
360 
361 	vsyscall_init();
362 
363 	pr_info("virtual kernel memory layout:\n"
364 		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
365 		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
366 		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
367 #ifdef CONFIG_UNCACHED_MAPPING
368 		"            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
369 #endif
370 		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
371 		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
372 		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
373 		FIXADDR_START, FIXADDR_TOP,
374 		(FIXADDR_TOP - FIXADDR_START) >> 10,
375 
376 		(unsigned long)VMALLOC_START, VMALLOC_END,
377 		(VMALLOC_END - VMALLOC_START) >> 20,
378 
379 		(unsigned long)memory_start, (unsigned long)high_memory,
380 		((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
381 
382 #ifdef CONFIG_UNCACHED_MAPPING
383 		uncached_start, uncached_end, uncached_size >> 20,
384 #endif
385 
386 		(unsigned long)&__init_begin, (unsigned long)&__init_end,
387 		((unsigned long)&__init_end -
388 		 (unsigned long)&__init_begin) >> 10,
389 
390 		(unsigned long)&_etext, (unsigned long)&_edata,
391 		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
392 
393 		(unsigned long)&_text, (unsigned long)&_etext,
394 		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
395 
396 	mem_init_done = 1;
397 }
398 
399 #ifdef CONFIG_MEMORY_HOTPLUG
400 int arch_add_memory(int nid, u64 start, u64 size,
401 		    struct mhp_params *params)
402 {
403 	unsigned long start_pfn = PFN_DOWN(start);
404 	unsigned long nr_pages = size >> PAGE_SHIFT;
405 	int ret;
406 
407 	if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
408 		return -EINVAL;
409 
410 	/* We only have ZONE_NORMAL, so this is easy.. */
411 	ret = __add_pages(nid, start_pfn, nr_pages, params);
412 	if (unlikely(ret))
413 		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
414 
415 	return ret;
416 }
417 
418 void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
419 {
420 	unsigned long start_pfn = PFN_DOWN(start);
421 	unsigned long nr_pages = size >> PAGE_SHIFT;
422 
423 	__remove_pages(start_pfn, nr_pages, altmap);
424 }
425 #endif /* CONFIG_MEMORY_HOTPLUG */
426