xref: /linux/arch/powerpc/mm/mem.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  PowerPC version
4  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5  *
6  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
8  *    Copyright (C) 1996 Paul Mackerras
9  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10  *
11  *  Derived from "arch/i386/mm/init.c"
12  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
13  */
14 
15 #include <linux/memblock.h>
16 #include <linux/highmem.h>
17 #include <linux/suspend.h>
18 #include <linux/dma-direct.h>
19 #include <linux/execmem.h>
20 #include <linux/vmalloc.h>
21 
22 #include <asm/swiotlb.h>
23 #include <asm/machdep.h>
24 #include <asm/rtas.h>
25 #include <asm/kasan.h>
26 #include <asm/svm.h>
27 #include <asm/mmzone.h>
28 #include <asm/ftrace.h>
29 #include <asm/code-patching.h>
30 #include <asm/setup.h>
31 #include <asm/fixmap.h>
32 
33 #include <mm/mmu_decl.h>
34 
35 unsigned long long memory_limit __initdata;
36 
37 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
38 EXPORT_SYMBOL(empty_zero_page);
39 
40 pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size,
41 				pgprot_t vma_prot)
42 {
43 	if (ppc_md.phys_mem_access_prot)
44 		return ppc_md.phys_mem_access_prot(pfn, size, vma_prot);
45 
46 	if (!page_is_ram(pfn))
47 		vma_prot = pgprot_noncached(vma_prot);
48 
49 	return vma_prot;
50 }
51 EXPORT_SYMBOL(__phys_mem_access_prot);
52 
53 #ifdef CONFIG_MEMORY_HOTPLUG
54 static DEFINE_MUTEX(linear_mapping_mutex);
55 
56 #ifdef CONFIG_NUMA
57 int memory_add_physaddr_to_nid(u64 start)
58 {
59 	return hot_add_scn_to_nid(start);
60 }
61 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
62 #endif
63 
64 int __weak create_section_mapping(unsigned long start, unsigned long end,
65 				  int nid, pgprot_t prot)
66 {
67 	return -ENODEV;
68 }
69 
70 int __weak remove_section_mapping(unsigned long start, unsigned long end)
71 {
72 	return -ENODEV;
73 }
74 
75 int __ref arch_create_linear_mapping(int nid, u64 start, u64 size,
76 				     struct mhp_params *params)
77 {
78 	int rc;
79 
80 	start = (unsigned long)__va(start);
81 	mutex_lock(&linear_mapping_mutex);
82 	rc = create_section_mapping(start, start + size, nid,
83 				    params->pgprot);
84 	mutex_unlock(&linear_mapping_mutex);
85 	if (rc) {
86 		pr_warn("Unable to create linear mapping for 0x%llx..0x%llx: %d\n",
87 			start, start + size, rc);
88 		return -EFAULT;
89 	}
90 	return 0;
91 }
92 
93 void __ref arch_remove_linear_mapping(u64 start, u64 size)
94 {
95 	int ret;
96 
97 	/* Remove htab bolted mappings for this section of memory */
98 	start = (unsigned long)__va(start);
99 
100 	mutex_lock(&linear_mapping_mutex);
101 	ret = remove_section_mapping(start, start + size);
102 	mutex_unlock(&linear_mapping_mutex);
103 	if (ret)
104 		pr_warn("Unable to remove linear mapping for 0x%llx..0x%llx: %d\n",
105 			start, start + size, ret);
106 
107 	/* Ensure all vmalloc mappings are flushed in case they also
108 	 * hit that section of memory
109 	 */
110 	vm_unmap_aliases();
111 }
112 
113 /*
114  * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
115  * updating.
116  */
117 static void update_end_of_memory_vars(u64 start, u64 size)
118 {
119 	unsigned long end_pfn = PFN_UP(start + size);
120 
121 	if (end_pfn > max_pfn) {
122 		max_pfn = end_pfn;
123 		max_low_pfn = end_pfn;
124 		high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
125 	}
126 }
127 
128 int __ref add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
129 		    struct mhp_params *params)
130 {
131 	int ret;
132 
133 	ret = __add_pages(nid, start_pfn, nr_pages, params);
134 	if (ret)
135 		return ret;
136 
137 	/* update max_pfn, max_low_pfn and high_memory */
138 	update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
139 				  nr_pages << PAGE_SHIFT);
140 
141 	return ret;
142 }
143 
144 int __ref arch_add_memory(int nid, u64 start, u64 size,
145 			  struct mhp_params *params)
146 {
147 	unsigned long start_pfn = start >> PAGE_SHIFT;
148 	unsigned long nr_pages = size >> PAGE_SHIFT;
149 	int rc;
150 
151 	rc = arch_create_linear_mapping(nid, start, size, params);
152 	if (rc)
153 		return rc;
154 	rc = add_pages(nid, start_pfn, nr_pages, params);
155 	if (rc)
156 		arch_remove_linear_mapping(start, size);
157 	return rc;
158 }
159 
160 void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
161 {
162 	unsigned long start_pfn = start >> PAGE_SHIFT;
163 	unsigned long nr_pages = size >> PAGE_SHIFT;
164 
165 	__remove_pages(start_pfn, nr_pages, altmap);
166 	arch_remove_linear_mapping(start, size);
167 }
168 #endif
169 
170 #ifndef CONFIG_NUMA
171 void __init mem_topology_setup(void)
172 {
173 	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
174 	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
175 #ifdef CONFIG_HIGHMEM
176 	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
177 #endif
178 
179 	/* Place all memblock_regions in the same node and merge contiguous
180 	 * memblock_regions
181 	 */
182 	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
183 }
184 
185 void __init initmem_init(void)
186 {
187 	sparse_init();
188 }
189 
190 /* mark pages that don't exist as nosave */
191 static int __init mark_nonram_nosave(void)
192 {
193 	unsigned long spfn, epfn, prev = 0;
194 	int i;
195 
196 	for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) {
197 		if (prev && prev < spfn)
198 			register_nosave_region(prev, spfn);
199 
200 		prev = epfn;
201 	}
202 
203 	return 0;
204 }
205 #else /* CONFIG_NUMA */
206 static int __init mark_nonram_nosave(void)
207 {
208 	return 0;
209 }
210 #endif
211 
212 /*
213  * Zones usage:
214  *
215  * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
216  * everything else. GFP_DMA32 page allocations automatically fall back to
217  * ZONE_DMA.
218  *
219  * By using 31-bit unconditionally, we can exploit zone_dma_limit to inform the
220  * generic DMA mapping code.  32-bit only devices (if not handled by an IOMMU
221  * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
222  * ZONE_DMA.
223  */
224 static unsigned long max_zone_pfns[MAX_NR_ZONES];
225 
226 /*
227  * paging_init() sets up the page tables - in fact we've already done this.
228  */
229 void __init paging_init(void)
230 {
231 	unsigned long long total_ram = memblock_phys_mem_size();
232 	phys_addr_t top_of_ram = memblock_end_of_DRAM();
233 	int zone_dma_bits;
234 
235 #ifdef CONFIG_HIGHMEM
236 	unsigned long v = __fix_to_virt(FIX_KMAP_END);
237 	unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN);
238 
239 	for (; v < end; v += PAGE_SIZE)
240 		map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
241 
242 	map_kernel_page(PKMAP_BASE, 0, __pgprot(0));	/* XXX gross */
243 	pkmap_page_table = virt_to_kpte(PKMAP_BASE);
244 #endif /* CONFIG_HIGHMEM */
245 
246 	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
247 	       (unsigned long long)top_of_ram, total_ram);
248 	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
249 	       (long int)((top_of_ram - total_ram) >> 20));
250 
251 	/*
252 	 * Allow 30-bit DMA for very limited Broadcom wifi chips on many
253 	 * powerbooks.
254 	 */
255 	if (IS_ENABLED(CONFIG_PPC32))
256 		zone_dma_bits = 30;
257 	else
258 		zone_dma_bits = 31;
259 
260 	zone_dma_limit = DMA_BIT_MASK(zone_dma_bits);
261 
262 #ifdef CONFIG_ZONE_DMA
263 	max_zone_pfns[ZONE_DMA]	= min(max_low_pfn,
264 				      1UL << (zone_dma_bits - PAGE_SHIFT));
265 #endif
266 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
267 #ifdef CONFIG_HIGHMEM
268 	max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
269 #endif
270 
271 	free_area_init(max_zone_pfns);
272 
273 	mark_nonram_nosave();
274 }
275 
276 void __init mem_init(void)
277 {
278 	/*
279 	 * book3s is limited to 16 page sizes due to encoding this in
280 	 * a 4-bit field for slices.
281 	 */
282 	BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
283 
284 #ifdef CONFIG_SWIOTLB
285 	/*
286 	 * Some platforms (e.g. 85xx) limit DMA-able memory way below
287 	 * 4G. We force memblock to bottom-up mode to ensure that the
288 	 * memory allocated in swiotlb_init() is DMA-able.
289 	 * As it's the last memblock allocation, no need to reset it
290 	 * back to to-down.
291 	 */
292 	memblock_set_bottom_up(true);
293 	swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags);
294 #endif
295 
296 	kasan_late_init();
297 
298 	memblock_free_all();
299 
300 #ifdef CONFIG_HIGHMEM
301 	{
302 		unsigned long pfn, highmem_mapnr;
303 
304 		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
305 		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
306 			phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
307 			struct page *page = pfn_to_page(pfn);
308 			if (memblock_is_memory(paddr) && !memblock_is_reserved(paddr))
309 				free_highmem_page(page);
310 		}
311 	}
312 #endif /* CONFIG_HIGHMEM */
313 
314 #if defined(CONFIG_PPC_E500) && !defined(CONFIG_SMP)
315 	/*
316 	 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
317 	 * functions.... do it here for the non-smp case.
318 	 */
319 	per_cpu(next_tlbcam_idx, smp_processor_id()) =
320 		(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
321 #endif
322 
323 #ifdef CONFIG_PPC32
324 	pr_info("Kernel virtual memory layout:\n");
325 #ifdef CONFIG_KASAN
326 	pr_info("  * 0x%08lx..0x%08lx  : kasan shadow mem\n",
327 		KASAN_SHADOW_START, KASAN_SHADOW_END);
328 #endif
329 	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
330 #ifdef CONFIG_HIGHMEM
331 	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
332 		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
333 #endif /* CONFIG_HIGHMEM */
334 	if (ioremap_bot != IOREMAP_TOP)
335 		pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
336 			ioremap_bot, IOREMAP_TOP);
337 	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
338 		VMALLOC_START, VMALLOC_END);
339 #ifdef MODULES_VADDR
340 	pr_info("  * 0x%08lx..0x%08lx  : modules\n",
341 		MODULES_VADDR, MODULES_END);
342 #endif
343 #endif /* CONFIG_PPC32 */
344 }
345 
346 void free_initmem(void)
347 {
348 	ppc_md.progress = ppc_printk_progress;
349 	mark_initmem_nx();
350 	free_initmem_default(POISON_FREE_INITMEM);
351 	ftrace_free_init_tramp();
352 }
353 
354 /*
355  * System memory should not be in /proc/iomem but various tools expect it
356  * (eg kdump).
357  */
358 static int __init add_system_ram_resources(void)
359 {
360 	phys_addr_t start, end;
361 	u64 i;
362 
363 	for_each_mem_range(i, &start, &end) {
364 		struct resource *res;
365 
366 		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
367 		WARN_ON(!res);
368 
369 		if (res) {
370 			res->name = "System RAM";
371 			res->start = start;
372 			/*
373 			 * In memblock, end points to the first byte after
374 			 * the range while in resourses, end points to the
375 			 * last byte in the range.
376 			 */
377 			res->end = end - 1;
378 			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
379 			WARN_ON(request_resource(&iomem_resource, res) < 0);
380 		}
381 	}
382 
383 	return 0;
384 }
385 subsys_initcall(add_system_ram_resources);
386 
387 #ifdef CONFIG_STRICT_DEVMEM
388 /*
389  * devmem_is_allowed(): check to see if /dev/mem access to a certain address
390  * is valid. The argument is a physical page number.
391  *
392  * Access has to be given to non-kernel-ram areas as well, these contain the
393  * PCI mmio resources as well as potential bios/acpi data regions.
394  */
395 int devmem_is_allowed(unsigned long pfn)
396 {
397 	if (page_is_rtas_user_buf(pfn))
398 		return 1;
399 	if (iomem_is_exclusive(PFN_PHYS(pfn)))
400 		return 0;
401 	if (!page_is_ram(pfn))
402 		return 1;
403 	return 0;
404 }
405 #endif /* CONFIG_STRICT_DEVMEM */
406 
407 /*
408  * This is defined in kernel/resource.c but only powerpc needs to export it, for
409  * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
410  */
411 EXPORT_SYMBOL_GPL(walk_system_ram_range);
412 
413 #ifdef CONFIG_EXECMEM
414 static struct execmem_info execmem_info __ro_after_init;
415 
416 #if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_BOOK3S_603)
417 static void prealloc_execmem_pgtable(void)
418 {
419 	unsigned long va;
420 
421 	for (va = ALIGN_DOWN(MODULES_VADDR, PGDIR_SIZE); va < MODULES_END; va += PGDIR_SIZE)
422 		pte_alloc_kernel(pmd_off_k(va), va);
423 }
424 #else
425 static void prealloc_execmem_pgtable(void) { }
426 #endif
427 
428 struct execmem_info __init *execmem_arch_setup(void)
429 {
430 	pgprot_t kprobes_prot = strict_module_rwx_enabled() ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
431 	pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC;
432 	unsigned long fallback_start = 0, fallback_end = 0;
433 	unsigned long start, end;
434 
435 	/*
436 	 * BOOK3S_32 and 8xx define MODULES_VADDR for text allocations and
437 	 * allow allocating data in the entire vmalloc space
438 	 */
439 #ifdef MODULES_VADDR
440 	unsigned long limit = (unsigned long)_etext - SZ_32M;
441 
442 	BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
443 
444 	/* First try within 32M limit from _etext to avoid branch trampolines */
445 	if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit) {
446 		start = limit;
447 		fallback_start = MODULES_VADDR;
448 		fallback_end = MODULES_END;
449 	} else {
450 		start = MODULES_VADDR;
451 	}
452 
453 	end = MODULES_END;
454 #else
455 	start = VMALLOC_START;
456 	end = VMALLOC_END;
457 #endif
458 
459 	prealloc_execmem_pgtable();
460 
461 	execmem_info = (struct execmem_info){
462 		.ranges = {
463 			[EXECMEM_DEFAULT] = {
464 				.start	= start,
465 				.end	= end,
466 				.pgprot	= prot,
467 				.alignment = 1,
468 				.fallback_start	= fallback_start,
469 				.fallback_end	= fallback_end,
470 			},
471 			[EXECMEM_KPROBES] = {
472 				.start	= VMALLOC_START,
473 				.end	= VMALLOC_END,
474 				.pgprot	= kprobes_prot,
475 				.alignment = 1,
476 			},
477 			[EXECMEM_MODULE_DATA] = {
478 				.start	= VMALLOC_START,
479 				.end	= VMALLOC_END,
480 				.pgprot	= PAGE_KERNEL,
481 				.alignment = 1,
482 			},
483 		},
484 	};
485 
486 	return &execmem_info;
487 }
488 #endif /* CONFIG_EXECMEM */
489