xref: /linux/arch/sh/mm/init.c (revision ac8fd122e070ce0e60c608d4f085f7af77290844)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/arch/sh/mm/init.c
4  *
5  *  Copyright (C) 1999  Niibe Yutaka
6  *  Copyright (C) 2002 - 2011  Paul Mundt
7  *
8  *  Based on linux/arch/i386/mm/init.c:
9  *   Copyright (C) 1995  Linus Torvalds
10  */
11 #include <linux/mm.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/gfp.h>
15 #include <linux/memblock.h>
16 #include <linux/proc_fs.h>
17 #include <linux/pagemap.h>
18 #include <linux/percpu.h>
19 #include <linux/io.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/export.h>
22 #include <asm/mmu_context.h>
23 #include <asm/mmzone.h>
24 #include <asm/kexec.h>
25 #include <asm/tlb.h>
26 #include <asm/cacheflush.h>
27 #include <asm/sections.h>
28 #include <asm/setup.h>
29 #include <asm/cache.h>
30 #include <linux/sizes.h>
31 
32 pgd_t swapper_pg_dir[PTRS_PER_PGD];
33 
34 void __init generic_mem_init(void)
35 {
36 	memblock_add(__MEMORY_START, __MEMORY_SIZE);
37 }
38 
39 void __init __weak plat_mem_setup(void)
40 {
41 	/* Nothing to see here, move along. */
42 }
43 
44 #ifdef CONFIG_MMU
45 static pte_t *__get_pte_phys(unsigned long addr)
46 {
47 	pgd_t *pgd;
48 	pud_t *pud;
49 	pmd_t *pmd;
50 
51 	pgd = pgd_offset_k(addr);
52 	if (pgd_none(*pgd)) {
53 		pgd_ERROR(*pgd);
54 		return NULL;
55 	}
56 
57 	pud = pud_alloc(NULL, pgd, addr);
58 	if (unlikely(!pud)) {
59 		pud_ERROR(*pud);
60 		return NULL;
61 	}
62 
63 	pmd = pmd_alloc(NULL, pud, addr);
64 	if (unlikely(!pmd)) {
65 		pmd_ERROR(*pmd);
66 		return NULL;
67 	}
68 
69 	return pte_offset_kernel(pmd, addr);
70 }
71 
72 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
73 {
74 	pte_t *pte;
75 
76 	pte = __get_pte_phys(addr);
77 	if (!pte_none(*pte)) {
78 		pte_ERROR(*pte);
79 		return;
80 	}
81 
82 	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
83 	local_flush_tlb_one(get_asid(), addr);
84 
85 	if (pgprot_val(prot) & _PAGE_WIRED)
86 		tlb_wire_entry(NULL, addr, *pte);
87 }
88 
89 static void clear_pte_phys(unsigned long addr, pgprot_t prot)
90 {
91 	pte_t *pte;
92 
93 	pte = __get_pte_phys(addr);
94 
95 	if (pgprot_val(prot) & _PAGE_WIRED)
96 		tlb_unwire_entry();
97 
98 	set_pte(pte, pfn_pte(0, __pgprot(0)));
99 	local_flush_tlb_one(get_asid(), addr);
100 }
101 
102 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
103 {
104 	unsigned long address = __fix_to_virt(idx);
105 
106 	if (idx >= __end_of_fixed_addresses) {
107 		BUG();
108 		return;
109 	}
110 
111 	set_pte_phys(address, phys, prot);
112 }
113 
114 void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
115 {
116 	unsigned long address = __fix_to_virt(idx);
117 
118 	if (idx >= __end_of_fixed_addresses) {
119 		BUG();
120 		return;
121 	}
122 
123 	clear_pte_phys(address, prot);
124 }
125 
126 static pmd_t * __init one_md_table_init(pud_t *pud)
127 {
128 	if (pud_none(*pud)) {
129 		pmd_t *pmd;
130 
131 		pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
132 		if (!pmd)
133 			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
134 			      __func__, PAGE_SIZE, PAGE_SIZE);
135 		pud_populate(&init_mm, pud, pmd);
136 		BUG_ON(pmd != pmd_offset(pud, 0));
137 	}
138 
139 	return pmd_offset(pud, 0);
140 }
141 
142 static pte_t * __init one_page_table_init(pmd_t *pmd)
143 {
144 	if (pmd_none(*pmd)) {
145 		pte_t *pte;
146 
147 		pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
148 		if (!pte)
149 			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
150 			      __func__, PAGE_SIZE, PAGE_SIZE);
151 		pmd_populate_kernel(&init_mm, pmd, pte);
152 		BUG_ON(pte != pte_offset_kernel(pmd, 0));
153 	}
154 
155 	return pte_offset_kernel(pmd, 0);
156 }
157 
158 static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
159 					    unsigned long vaddr, pte_t *lastpte)
160 {
161 	return pte;
162 }
163 
164 void __init page_table_range_init(unsigned long start, unsigned long end,
165 					 pgd_t *pgd_base)
166 {
167 	pgd_t *pgd;
168 	pud_t *pud;
169 	pmd_t *pmd;
170 	pte_t *pte = NULL;
171 	int i, j, k;
172 	unsigned long vaddr;
173 
174 	vaddr = start;
175 	i = __pgd_offset(vaddr);
176 	j = __pud_offset(vaddr);
177 	k = __pmd_offset(vaddr);
178 	pgd = pgd_base + i;
179 
180 	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
181 		pud = (pud_t *)pgd;
182 		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
183 			pmd = one_md_table_init(pud);
184 #ifndef __PAGETABLE_PMD_FOLDED
185 			pmd += k;
186 #endif
187 			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
188 				pte = page_table_kmap_check(one_page_table_init(pmd),
189 							    pmd, vaddr, pte);
190 				vaddr += PMD_SIZE;
191 			}
192 			k = 0;
193 		}
194 		j = 0;
195 	}
196 }
197 #endif	/* CONFIG_MMU */
198 
199 void __init allocate_pgdat(unsigned int nid)
200 {
201 	unsigned long start_pfn, end_pfn;
202 
203 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
204 
205 #ifdef CONFIG_NEED_MULTIPLE_NODES
206 	NODE_DATA(nid) = memblock_alloc_try_nid(
207 				sizeof(struct pglist_data),
208 				SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
209 				MEMBLOCK_ALLOC_ACCESSIBLE, nid);
210 	if (!NODE_DATA(nid))
211 		panic("Can't allocate pgdat for node %d\n", nid);
212 #endif
213 
214 	NODE_DATA(nid)->node_start_pfn = start_pfn;
215 	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
216 }
217 
218 static void __init do_init_bootmem(void)
219 {
220 	struct memblock_region *reg;
221 
222 	/* Add active regions with valid PFNs. */
223 	for_each_memblock(memory, reg) {
224 		unsigned long start_pfn, end_pfn;
225 		start_pfn = memblock_region_memory_base_pfn(reg);
226 		end_pfn = memblock_region_memory_end_pfn(reg);
227 		__add_active_range(0, start_pfn, end_pfn);
228 	}
229 
230 	/* All of system RAM sits in node 0 for the non-NUMA case */
231 	allocate_pgdat(0);
232 	node_set_online(0);
233 
234 	plat_mem_setup();
235 
236 	for_each_memblock(memory, reg) {
237 		int nid = memblock_get_region_node(reg);
238 
239 		memory_present(nid, memblock_region_memory_base_pfn(reg),
240 			memblock_region_memory_end_pfn(reg));
241 	}
242 	sparse_init();
243 }
244 
245 static void __init early_reserve_mem(void)
246 {
247 	unsigned long start_pfn;
248 	u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
249 	u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
250 
251 	/*
252 	 * Partially used pages are not usable - thus
253 	 * we are rounding upwards:
254 	 */
255 	start_pfn = PFN_UP(__pa(_end));
256 
257 	/*
258 	 * Reserve the kernel text and Reserve the bootmem bitmap. We do
259 	 * this in two steps (first step was init_bootmem()), because
260 	 * this catches the (definitely buggy) case of us accidentally
261 	 * initializing the bootmem allocator with an invalid RAM area.
262 	 */
263 	memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
264 
265 	/*
266 	 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
267 	 */
268 	if (CONFIG_ZERO_PAGE_OFFSET != 0)
269 		memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
270 
271 	/*
272 	 * Handle additional early reservations
273 	 */
274 	check_for_initrd();
275 	reserve_crashkernel();
276 }
277 
278 void __init paging_init(void)
279 {
280 	unsigned long max_zone_pfns[MAX_NR_ZONES];
281 	unsigned long vaddr, end;
282 
283 	sh_mv.mv_mem_init();
284 
285 	early_reserve_mem();
286 
287 	/*
288 	 * Once the early reservations are out of the way, give the
289 	 * platforms a chance to kick out some memory.
290 	 */
291 	if (sh_mv.mv_mem_reserve)
292 		sh_mv.mv_mem_reserve();
293 
294 	memblock_enforce_memory_limit(memory_limit);
295 	memblock_allow_resize();
296 
297 	memblock_dump_all();
298 
299 	/*
300 	 * Determine low and high memory ranges:
301 	 */
302 	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
303 	min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
304 
305 	nodes_clear(node_online_map);
306 
307 	memory_start = (unsigned long)__va(__MEMORY_START);
308 	memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
309 
310 	uncached_init();
311 	pmb_init();
312 	do_init_bootmem();
313 	ioremap_fixed_init();
314 
315 	/* We don't need to map the kernel through the TLB, as
316 	 * it is permanatly mapped using P1. So clear the
317 	 * entire pgd. */
318 	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
319 
320 	/* Set an initial value for the MMU.TTB so we don't have to
321 	 * check for a null value. */
322 	set_TTB(swapper_pg_dir);
323 
324 	/*
325 	 * Populate the relevant portions of swapper_pg_dir so that
326 	 * we can use the fixmap entries without calling kmalloc.
327 	 * pte's will be filled in by __set_fixmap().
328 	 */
329 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
330 	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
331 	page_table_range_init(vaddr, end, swapper_pg_dir);
332 
333 	kmap_coherent_init();
334 
335 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
336 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
337 	free_area_init_nodes(max_zone_pfns);
338 }
339 
340 unsigned int mem_init_done = 0;
341 
342 void __init mem_init(void)
343 {
344 	pg_data_t *pgdat;
345 
346 	high_memory = NULL;
347 	for_each_online_pgdat(pgdat)
348 		high_memory = max_t(void *, high_memory,
349 				    __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
350 
351 	memblock_free_all();
352 
353 	/* Set this up early, so we can take care of the zero page */
354 	cpu_cache_init();
355 
356 	/* clear the zero-page */
357 	memset(empty_zero_page, 0, PAGE_SIZE);
358 	__flush_wback_region(empty_zero_page, PAGE_SIZE);
359 
360 	vsyscall_init();
361 
362 	mem_init_print_info(NULL);
363 	pr_info("virtual kernel memory layout:\n"
364 		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
365 #ifdef CONFIG_HIGHMEM
366 		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
367 #endif
368 		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
369 		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
370 #ifdef CONFIG_UNCACHED_MAPPING
371 		"            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
372 #endif
373 		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
374 		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
375 		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
376 		FIXADDR_START, FIXADDR_TOP,
377 		(FIXADDR_TOP - FIXADDR_START) >> 10,
378 
379 #ifdef CONFIG_HIGHMEM
380 		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
381 		(LAST_PKMAP*PAGE_SIZE) >> 10,
382 #endif
383 
384 		(unsigned long)VMALLOC_START, VMALLOC_END,
385 		(VMALLOC_END - VMALLOC_START) >> 20,
386 
387 		(unsigned long)memory_start, (unsigned long)high_memory,
388 		((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
389 
390 #ifdef CONFIG_UNCACHED_MAPPING
391 		uncached_start, uncached_end, uncached_size >> 20,
392 #endif
393 
394 		(unsigned long)&__init_begin, (unsigned long)&__init_end,
395 		((unsigned long)&__init_end -
396 		 (unsigned long)&__init_begin) >> 10,
397 
398 		(unsigned long)&_etext, (unsigned long)&_edata,
399 		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
400 
401 		(unsigned long)&_text, (unsigned long)&_etext,
402 		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
403 
404 	mem_init_done = 1;
405 }
406 
407 #ifdef CONFIG_MEMORY_HOTPLUG
408 int arch_add_memory(int nid, u64 start, u64 size,
409 			struct mhp_restrictions *restrictions)
410 {
411 	unsigned long start_pfn = PFN_DOWN(start);
412 	unsigned long nr_pages = size >> PAGE_SHIFT;
413 	int ret;
414 
415 	/* We only have ZONE_NORMAL, so this is easy.. */
416 	ret = __add_pages(nid, start_pfn, nr_pages, restrictions);
417 	if (unlikely(ret))
418 		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
419 
420 	return ret;
421 }
422 
423 #ifdef CONFIG_NUMA
424 int memory_add_physaddr_to_nid(u64 addr)
425 {
426 	/* Node 0 for now.. */
427 	return 0;
428 }
429 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
430 #endif
431 
432 void arch_remove_memory(int nid, u64 start, u64 size,
433 			struct vmem_altmap *altmap)
434 {
435 	unsigned long start_pfn = PFN_DOWN(start);
436 	unsigned long nr_pages = size >> PAGE_SHIFT;
437 
438 	__remove_pages(start_pfn, nr_pages, altmap);
439 }
440 #endif /* CONFIG_MEMORY_HOTPLUG */
441