xref: /linux/arch/sh/mm/init.c (revision 6aacab308a5dfd222b2d23662bbae60c11007cfb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/arch/sh/mm/init.c
4  *
5  *  Copyright (C) 1999  Niibe Yutaka
6  *  Copyright (C) 2002 - 2011  Paul Mundt
7  *
8  *  Based on linux/arch/i386/mm/init.c:
9  *   Copyright (C) 1995  Linus Torvalds
10  */
11 #include <linux/mm.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/gfp.h>
15 #include <linux/memblock.h>
16 #include <linux/proc_fs.h>
17 #include <linux/pagemap.h>
18 #include <linux/percpu.h>
19 #include <linux/io.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/export.h>
22 #include <asm/mmu_context.h>
23 #include <asm/mmzone.h>
24 #include <asm/kexec.h>
25 #include <asm/tlb.h>
26 #include <asm/cacheflush.h>
27 #include <asm/sections.h>
28 #include <asm/setup.h>
29 #include <asm/cache.h>
30 #include <asm/pgalloc.h>
31 #include <linux/sizes.h>
32 #include "ioremap.h"
33 
34 pgd_t swapper_pg_dir[PTRS_PER_PGD];
35 
36 void __init generic_mem_init(void)
37 {
38 	memblock_add(__MEMORY_START, __MEMORY_SIZE);
39 }
40 
41 void __init __weak plat_mem_setup(void)
42 {
43 	/* Nothing to see here, move along. */
44 }
45 
46 #ifdef CONFIG_MMU
47 static pte_t *__get_pte_phys(unsigned long addr)
48 {
49 	pgd_t *pgd;
50 	p4d_t *p4d;
51 	pud_t *pud;
52 	pmd_t *pmd;
53 
54 	pgd = pgd_offset_k(addr);
55 	if (pgd_none(*pgd)) {
56 		pgd_ERROR(*pgd);
57 		return NULL;
58 	}
59 
60 	p4d = p4d_alloc(NULL, pgd, addr);
61 	if (unlikely(!p4d)) {
62 		p4d_ERROR(*p4d);
63 		return NULL;
64 	}
65 
66 	pud = pud_alloc(NULL, p4d, addr);
67 	if (unlikely(!pud)) {
68 		pud_ERROR(*pud);
69 		return NULL;
70 	}
71 
72 	pmd = pmd_alloc(NULL, pud, addr);
73 	if (unlikely(!pmd)) {
74 		pmd_ERROR(*pmd);
75 		return NULL;
76 	}
77 
78 	return pte_offset_kernel(pmd, addr);
79 }
80 
81 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
82 {
83 	pte_t *pte;
84 
85 	pte = __get_pte_phys(addr);
86 	if (!pte_none(*pte)) {
87 		pte_ERROR(*pte);
88 		return;
89 	}
90 
91 	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
92 	local_flush_tlb_one(get_asid(), addr);
93 
94 	if (pgprot_val(prot) & _PAGE_WIRED)
95 		tlb_wire_entry(NULL, addr, *pte);
96 }
97 
98 static void clear_pte_phys(unsigned long addr, pgprot_t prot)
99 {
100 	pte_t *pte;
101 
102 	pte = __get_pte_phys(addr);
103 
104 	if (pgprot_val(prot) & _PAGE_WIRED)
105 		tlb_unwire_entry();
106 
107 	set_pte(pte, pfn_pte(0, __pgprot(0)));
108 	local_flush_tlb_one(get_asid(), addr);
109 }
110 
111 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
112 {
113 	unsigned long address = __fix_to_virt(idx);
114 
115 	if (idx >= __end_of_fixed_addresses) {
116 		BUG();
117 		return;
118 	}
119 
120 	set_pte_phys(address, phys, prot);
121 }
122 
123 void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
124 {
125 	unsigned long address = __fix_to_virt(idx);
126 
127 	if (idx >= __end_of_fixed_addresses) {
128 		BUG();
129 		return;
130 	}
131 
132 	clear_pte_phys(address, prot);
133 }
134 
135 static pmd_t * __init one_md_table_init(pud_t *pud)
136 {
137 	if (pud_none(*pud)) {
138 		pmd_t *pmd;
139 
140 		pmd = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
141 		pud_populate(&init_mm, pud, pmd);
142 		BUG_ON(pmd != pmd_offset(pud, 0));
143 	}
144 
145 	return pmd_offset(pud, 0);
146 }
147 
148 static pte_t * __init one_page_table_init(pmd_t *pmd)
149 {
150 	if (pmd_none(*pmd)) {
151 		pte_t *pte;
152 
153 		pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
154 		pmd_populate_kernel(&init_mm, pmd, pte);
155 		BUG_ON(pte != pte_offset_kernel(pmd, 0));
156 	}
157 
158 	return pte_offset_kernel(pmd, 0);
159 }
160 
161 static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
162 					    unsigned long vaddr, pte_t *lastpte)
163 {
164 	return pte;
165 }
166 
167 void __init page_table_range_init(unsigned long start, unsigned long end,
168 					 pgd_t *pgd_base)
169 {
170 	pgd_t *pgd;
171 	pud_t *pud;
172 	pmd_t *pmd;
173 	pte_t *pte = NULL;
174 	int i, j, k;
175 	unsigned long vaddr;
176 
177 	vaddr = start;
178 	i = pgd_index(vaddr);
179 	j = pud_index(vaddr);
180 	k = pmd_index(vaddr);
181 	pgd = pgd_base + i;
182 
183 	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
184 		pud = (pud_t *)pgd;
185 		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
186 			pmd = one_md_table_init(pud);
187 #ifndef __PAGETABLE_PMD_FOLDED
188 			pmd += k;
189 #endif
190 			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
191 				pte = page_table_kmap_check(one_page_table_init(pmd),
192 							    pmd, vaddr, pte);
193 				vaddr += PMD_SIZE;
194 			}
195 			k = 0;
196 		}
197 		j = 0;
198 	}
199 }
200 #endif	/* CONFIG_MMU */
201 
202 void __init allocate_pgdat(unsigned int nid)
203 {
204 	unsigned long start_pfn, end_pfn;
205 
206 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
207 
208 #ifdef CONFIG_NUMA
209 	alloc_node_data(nid);
210 #endif
211 
212 	NODE_DATA(nid)->node_start_pfn = start_pfn;
213 	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
214 }
215 
216 static void __init do_init_bootmem(void)
217 {
218 	unsigned long start_pfn, end_pfn;
219 	int i;
220 
221 	/* Add active regions with valid PFNs. */
222 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL)
223 		__add_active_range(0, start_pfn, end_pfn);
224 
225 	/* All of system RAM sits in node 0 for the non-NUMA case */
226 	allocate_pgdat(0);
227 	node_set_online(0);
228 
229 	plat_mem_setup();
230 }
231 
232 static void __init early_reserve_mem(void)
233 {
234 	unsigned long start_pfn;
235 	u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
236 	u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
237 
238 	/*
239 	 * Partially used pages are not usable - thus
240 	 * we are rounding upwards:
241 	 */
242 	start_pfn = PFN_UP(__pa(_end));
243 
244 	/*
245 	 * Reserve the kernel text and Reserve the bootmem bitmap. We do
246 	 * this in two steps (first step was init_bootmem()), because
247 	 * this catches the (definitely buggy) case of us accidentally
248 	 * initializing the bootmem allocator with an invalid RAM area.
249 	 */
250 	memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
251 
252 	/*
253 	 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
254 	 */
255 	if (CONFIG_ZERO_PAGE_OFFSET != 0)
256 		memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
257 
258 	/*
259 	 * Handle additional early reservations
260 	 */
261 	check_for_initrd();
262 	reserve_crashkernel();
263 }
264 
265 void __init arch_zone_limits_init(unsigned long *max_zone_pfns)
266 {
267 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
268 }
269 
270 void __init paging_init(void)
271 {
272 	unsigned long vaddr, end;
273 
274 	sh_mv.mv_mem_init();
275 
276 	early_reserve_mem();
277 
278 	/*
279 	 * Once the early reservations are out of the way, give the
280 	 * platforms a chance to kick out some memory.
281 	 */
282 	if (sh_mv.mv_mem_reserve)
283 		sh_mv.mv_mem_reserve();
284 
285 	memblock_enforce_memory_limit(memory_limit);
286 	memblock_allow_resize();
287 
288 	memblock_dump_all();
289 
290 	/*
291 	 * Determine low and high memory ranges:
292 	 */
293 	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
294 	min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
295 
296 	nodes_clear(node_online_map);
297 
298 	memory_start = (unsigned long)__va(__MEMORY_START);
299 	memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
300 
301 	uncached_init();
302 	pmb_init();
303 	do_init_bootmem();
304 	ioremap_fixed_init();
305 
306 	/* We don't need to map the kernel through the TLB, as
307 	 * it is permanatly mapped using P1. So clear the
308 	 * entire pgd. */
309 	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
310 
311 	/* Set an initial value for the MMU.TTB so we don't have to
312 	 * check for a null value. */
313 	set_TTB(swapper_pg_dir);
314 
315 	/*
316 	 * Populate the relevant portions of swapper_pg_dir so that
317 	 * we can use the fixmap entries without calling kmalloc.
318 	 * pte's will be filled in by __set_fixmap().
319 	 */
320 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
321 	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
322 	page_table_range_init(vaddr, end, swapper_pg_dir);
323 
324 	kmap_coherent_init();
325 }
326 
327 unsigned int mem_init_done = 0;
328 
329 void __init mem_init(void)
330 {
331 	/* Set this up early, so we can take care of the zero page */
332 	cpu_cache_init();
333 
334 	/* clear the zero-page */
335 	memset(empty_zero_page, 0, PAGE_SIZE);
336 	__flush_wback_region(empty_zero_page, PAGE_SIZE);
337 
338 	vsyscall_init();
339 
340 	pr_info("virtual kernel memory layout:\n"
341 		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
342 		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
343 		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
344 #ifdef CONFIG_UNCACHED_MAPPING
345 		"            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
346 #endif
347 		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
348 		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
349 		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
350 		FIXADDR_START, FIXADDR_TOP,
351 		(FIXADDR_TOP - FIXADDR_START) >> 10,
352 
353 		(unsigned long)VMALLOC_START, VMALLOC_END,
354 		(VMALLOC_END - VMALLOC_START) >> 20,
355 
356 		(unsigned long)memory_start, (unsigned long)high_memory,
357 		((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
358 
359 #ifdef CONFIG_UNCACHED_MAPPING
360 		uncached_start, uncached_end, uncached_size >> 20,
361 #endif
362 
363 		(unsigned long)&__init_begin, (unsigned long)&__init_end,
364 		((unsigned long)&__init_end -
365 		 (unsigned long)&__init_begin) >> 10,
366 
367 		(unsigned long)&_etext, (unsigned long)&_edata,
368 		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
369 
370 		(unsigned long)&_text, (unsigned long)&_etext,
371 		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
372 
373 	mem_init_done = 1;
374 }
375