xref: /linux/arch/powerpc/mm/mem.c (revision 6aacab308a5dfd222b2d23662bbae60c11007cfb)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  PowerPC version
4  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5  *
6  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
8  *    Copyright (C) 1996 Paul Mackerras
9  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10  *
11  *  Derived from "arch/i386/mm/init.c"
12  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
13  */
14 
15 #include <linux/memblock.h>
16 #include <linux/highmem.h>
17 #include <linux/suspend.h>
18 #include <linux/dma-direct.h>
19 #include <linux/execmem.h>
20 #include <linux/vmalloc.h>
21 
22 #include <asm/swiotlb.h>
23 #include <asm/machdep.h>
24 #include <asm/rtas.h>
25 #include <asm/kasan.h>
26 #include <asm/svm.h>
27 #include <asm/mmzone.h>
28 #include <asm/ftrace.h>
29 #include <asm/text-patching.h>
30 #include <asm/setup.h>
31 #include <asm/fixmap.h>
32 
33 #include <mm/mmu_decl.h>
34 
35 unsigned long long memory_limit __initdata;
36 
37 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
38 EXPORT_SYMBOL(empty_zero_page);
39 
40 pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size,
41 				pgprot_t vma_prot)
42 {
43 	if (ppc_md.phys_mem_access_prot)
44 		return ppc_md.phys_mem_access_prot(pfn, size, vma_prot);
45 
46 	if (!page_is_ram(pfn))
47 		vma_prot = pgprot_noncached(vma_prot);
48 
49 	return vma_prot;
50 }
51 EXPORT_SYMBOL(__phys_mem_access_prot);
52 
53 #ifdef CONFIG_MEMORY_HOTPLUG
54 static DEFINE_MUTEX(linear_mapping_mutex);
55 
56 #ifdef CONFIG_NUMA
57 int memory_add_physaddr_to_nid(u64 start)
58 {
59 	return hot_add_scn_to_nid(start);
60 }
61 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
62 #endif
63 
64 int __weak create_section_mapping(unsigned long start, unsigned long end,
65 				  int nid, pgprot_t prot)
66 {
67 	return -ENODEV;
68 }
69 
70 int __weak remove_section_mapping(unsigned long start, unsigned long end)
71 {
72 	return -ENODEV;
73 }
74 
75 int __ref arch_create_linear_mapping(int nid, u64 start, u64 size,
76 				     struct mhp_params *params)
77 {
78 	int rc;
79 
80 	start = (unsigned long)__va(start);
81 	mutex_lock(&linear_mapping_mutex);
82 	rc = create_section_mapping(start, start + size, nid,
83 				    params->pgprot);
84 	mutex_unlock(&linear_mapping_mutex);
85 	if (rc) {
86 		pr_warn("Unable to create linear mapping for 0x%llx..0x%llx: %d\n",
87 			start, start + size, rc);
88 		return -EFAULT;
89 	}
90 	return 0;
91 }
92 
93 void __ref arch_remove_linear_mapping(u64 start, u64 size)
94 {
95 	int ret;
96 
97 	/* Remove htab bolted mappings for this section of memory */
98 	start = (unsigned long)__va(start);
99 
100 	mutex_lock(&linear_mapping_mutex);
101 	ret = remove_section_mapping(start, start + size);
102 	mutex_unlock(&linear_mapping_mutex);
103 	if (ret)
104 		pr_warn("Unable to remove linear mapping for 0x%llx..0x%llx: %d\n",
105 			start, start + size, ret);
106 
107 	/* Ensure all vmalloc mappings are flushed in case they also
108 	 * hit that section of memory
109 	 */
110 	vm_unmap_aliases();
111 }
112 
113 /*
114  * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
115  * updating.
116  */
117 static void update_end_of_memory_vars(u64 start, u64 size)
118 {
119 	unsigned long end_pfn = PFN_UP(start + size);
120 
121 	if (end_pfn > max_pfn) {
122 		max_pfn = end_pfn;
123 		max_low_pfn = end_pfn;
124 		high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
125 	}
126 }
127 
128 int __ref add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
129 		    struct mhp_params *params)
130 {
131 	int ret;
132 
133 	ret = __add_pages(nid, start_pfn, nr_pages, params);
134 	if (ret)
135 		return ret;
136 
137 	/* update max_pfn, max_low_pfn and high_memory */
138 	update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
139 				  nr_pages << PAGE_SHIFT);
140 
141 	return ret;
142 }
143 
144 int __ref arch_add_memory(int nid, u64 start, u64 size,
145 			  struct mhp_params *params)
146 {
147 	unsigned long start_pfn = start >> PAGE_SHIFT;
148 	unsigned long nr_pages = size >> PAGE_SHIFT;
149 	int rc;
150 
151 	rc = arch_create_linear_mapping(nid, start, size, params);
152 	if (rc)
153 		return rc;
154 	rc = add_pages(nid, start_pfn, nr_pages, params);
155 	if (rc)
156 		arch_remove_linear_mapping(start, size);
157 	return rc;
158 }
159 
160 void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
161 {
162 	unsigned long start_pfn = start >> PAGE_SHIFT;
163 	unsigned long nr_pages = size >> PAGE_SHIFT;
164 
165 	__remove_pages(start_pfn, nr_pages, altmap);
166 	arch_remove_linear_mapping(start, size);
167 }
168 #endif
169 
170 #ifndef CONFIG_NUMA
171 void __init mem_topology_setup(void)
172 {
173 	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
174 	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
175 #ifdef CONFIG_HIGHMEM
176 	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
177 #endif
178 
179 	/* Place all memblock_regions in the same node and merge contiguous
180 	 * memblock_regions
181 	 */
182 	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
183 }
184 
185 /* mark pages that don't exist as nosave */
186 static int __init mark_nonram_nosave(void)
187 {
188 	unsigned long spfn, epfn, prev = 0;
189 	int i;
190 
191 	for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) {
192 		if (prev && prev < spfn)
193 			register_nosave_region(prev, spfn);
194 
195 		prev = epfn;
196 	}
197 
198 	return 0;
199 }
200 #else /* CONFIG_NUMA */
201 static int __init mark_nonram_nosave(void)
202 {
203 	return 0;
204 }
205 #endif
206 
207 /*
208  * Zones usage:
209  *
210  * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
211  * everything else. GFP_DMA32 page allocations automatically fall back to
212  * ZONE_DMA.
213  *
214  * By using 31-bit unconditionally, we can exploit zone_dma_limit to inform the
215  * generic DMA mapping code.  32-bit only devices (if not handled by an IOMMU
216  * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
217  * ZONE_DMA.
218  */
219 void __init arch_zone_limits_init(unsigned long *max_zone_pfns)
220 {
221 #ifdef CONFIG_ZONE_DMA
222 	max_zone_pfns[ZONE_DMA] = min((zone_dma_limit >> PAGE_SHIFT) + 1, max_low_pfn);
223 #endif
224 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
225 #ifdef CONFIG_HIGHMEM
226 	max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
227 #endif
228 }
229 
230 /*
231  * paging_init() sets up the page tables - in fact we've already done this.
232  */
233 void __init paging_init(void)
234 {
235 	unsigned long long total_ram = memblock_phys_mem_size();
236 	phys_addr_t top_of_ram = memblock_end_of_DRAM();
237 	int zone_dma_bits;
238 
239 #ifdef CONFIG_HIGHMEM
240 	unsigned long v = __fix_to_virt(FIX_KMAP_END);
241 	unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN);
242 
243 	for (; v < end; v += PAGE_SIZE)
244 		map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
245 
246 	map_kernel_page(PKMAP_BASE, 0, __pgprot(0));	/* XXX gross */
247 	pkmap_page_table = virt_to_kpte(PKMAP_BASE);
248 #endif /* CONFIG_HIGHMEM */
249 
250 	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
251 	       (unsigned long long)top_of_ram, total_ram);
252 	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
253 	       (long int)((top_of_ram - total_ram) >> 20));
254 
255 	/*
256 	 * Allow 30-bit DMA for very limited Broadcom wifi chips on many
257 	 * powerbooks.
258 	 */
259 	if (IS_ENABLED(CONFIG_PPC32))
260 		zone_dma_bits = 30;
261 	else
262 		zone_dma_bits = 31;
263 
264 	zone_dma_limit = DMA_BIT_MASK(zone_dma_bits);
265 
266 	mark_nonram_nosave();
267 }
268 
269 void __init arch_mm_preinit(void)
270 {
271 	/*
272 	 * book3s is limited to 16 page sizes due to encoding this in
273 	 * a 4-bit field for slices.
274 	 */
275 	BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
276 
277 #ifdef CONFIG_SWIOTLB
278 	/*
279 	 * Some platforms (e.g. 85xx) limit DMA-able memory way below
280 	 * 4G. We force memblock to bottom-up mode to ensure that the
281 	 * memory allocated in swiotlb_init() is DMA-able.
282 	 * As it's the last memblock allocation, no need to reset it
283 	 * back to to-down.
284 	 */
285 	memblock_set_bottom_up(true);
286 	swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags);
287 #endif
288 
289 	kasan_late_init();
290 
291 #if defined(CONFIG_PPC_E500) && !defined(CONFIG_SMP)
292 	/*
293 	 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
294 	 * functions.... do it here for the non-smp case.
295 	 */
296 	per_cpu(next_tlbcam_idx, smp_processor_id()) =
297 		(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
298 #endif
299 }
300 
301 void free_initmem(void)
302 {
303 	ppc_md.progress = ppc_printk_progress;
304 	mark_initmem_nx();
305 	free_initmem_default(POISON_FREE_INITMEM);
306 	ftrace_free_init_tramp();
307 }
308 
309 /*
310  * System memory should not be in /proc/iomem but various tools expect it
311  * (eg kdump).
312  */
313 static int __init add_system_ram_resources(void)
314 {
315 	phys_addr_t start, end;
316 	u64 i;
317 
318 	for_each_mem_range(i, &start, &end) {
319 		struct resource *res;
320 
321 		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
322 		WARN_ON(!res);
323 
324 		if (res) {
325 			res->name = "System RAM";
326 			res->start = start;
327 			/*
328 			 * In memblock, end points to the first byte after
329 			 * the range while in resourses, end points to the
330 			 * last byte in the range.
331 			 */
332 			res->end = end - 1;
333 			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
334 			WARN_ON(insert_resource(&iomem_resource, res) < 0);
335 		}
336 	}
337 
338 	return 0;
339 }
340 subsys_initcall(add_system_ram_resources);
341 
342 #ifdef CONFIG_STRICT_DEVMEM
343 /*
344  * devmem_is_allowed(): check to see if /dev/mem access to a certain address
345  * is valid. The argument is a physical page number.
346  *
347  * Access has to be given to non-kernel-ram areas as well, these contain the
348  * PCI mmio resources as well as potential bios/acpi data regions.
349  */
350 int devmem_is_allowed(unsigned long pfn)
351 {
352 	if (page_is_rtas_user_buf(pfn))
353 		return 1;
354 	if (iomem_is_exclusive(PFN_PHYS(pfn)))
355 		return 0;
356 	if (!page_is_ram(pfn))
357 		return 1;
358 	return 0;
359 }
360 #endif /* CONFIG_STRICT_DEVMEM */
361 
362 /*
363  * This is defined in kernel/resource.c but only powerpc needs to export it, for
364  * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
365  */
366 EXPORT_SYMBOL_GPL(walk_system_ram_range);
367 
368 #ifdef CONFIG_EXECMEM
369 static struct execmem_info execmem_info __ro_after_init;
370 
371 #if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_BOOK3S_603)
372 static void prealloc_execmem_pgtable(void)
373 {
374 	unsigned long va;
375 
376 	for (va = ALIGN_DOWN(MODULES_VADDR, PGDIR_SIZE); va < MODULES_END; va += PGDIR_SIZE)
377 		pte_alloc_kernel(pmd_off_k(va), va);
378 }
379 #else
380 static void prealloc_execmem_pgtable(void) { }
381 #endif
382 
383 struct execmem_info __init *execmem_arch_setup(void)
384 {
385 	pgprot_t kprobes_prot = strict_module_rwx_enabled() ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
386 	pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC;
387 	unsigned long fallback_start = 0, fallback_end = 0;
388 	unsigned long start, end;
389 
390 	/*
391 	 * BOOK3S_32 and 8xx define MODULES_VADDR for text allocations and
392 	 * allow allocating data in the entire vmalloc space
393 	 */
394 #ifdef MODULES_VADDR
395 	unsigned long limit = (unsigned long)_etext - SZ_32M;
396 
397 	BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
398 
399 	/* First try within 32M limit from _etext to avoid branch trampolines */
400 	if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit) {
401 		start = limit;
402 		fallback_start = MODULES_VADDR;
403 		fallback_end = MODULES_END;
404 	} else {
405 		start = MODULES_VADDR;
406 	}
407 
408 	end = MODULES_END;
409 #else
410 	start = VMALLOC_START;
411 	end = VMALLOC_END;
412 #endif
413 
414 	prealloc_execmem_pgtable();
415 
416 	execmem_info = (struct execmem_info){
417 		.ranges = {
418 			[EXECMEM_DEFAULT] = {
419 				.start	= start,
420 				.end	= end,
421 				.pgprot	= prot,
422 				.alignment = 1,
423 				.fallback_start	= fallback_start,
424 				.fallback_end	= fallback_end,
425 			},
426 			[EXECMEM_KPROBES] = {
427 				.start	= VMALLOC_START,
428 				.end	= VMALLOC_END,
429 				.pgprot	= kprobes_prot,
430 				.alignment = 1,
431 			},
432 			[EXECMEM_MODULE_DATA] = {
433 				.start	= VMALLOC_START,
434 				.end	= VMALLOC_END,
435 				.pgprot	= PAGE_KERNEL,
436 				.alignment = 1,
437 			},
438 		},
439 	};
440 
441 	return &execmem_info;
442 }
443 #endif /* CONFIG_EXECMEM */
444