xref: /linux/arch/powerpc/mm/mem.c (revision 8a79db5e83a5d52c74e6f3c40d6f312cf899213e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  PowerPC version
4  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5  *
6  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
8  *    Copyright (C) 1996 Paul Mackerras
9  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10  *
11  *  Derived from "arch/i386/mm/init.c"
12  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
13  */
14 
15 #include <linux/export.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/gfp.h>
21 #include <linux/types.h>
22 #include <linux/mm.h>
23 #include <linux/stddef.h>
24 #include <linux/init.h>
25 #include <linux/memblock.h>
26 #include <linux/highmem.h>
27 #include <linux/initrd.h>
28 #include <linux/pagemap.h>
29 #include <linux/suspend.h>
30 #include <linux/hugetlb.h>
31 #include <linux/slab.h>
32 #include <linux/vmalloc.h>
33 #include <linux/memremap.h>
34 #include <linux/dma-direct.h>
35 
36 #include <asm/pgalloc.h>
37 #include <asm/prom.h>
38 #include <asm/io.h>
39 #include <asm/mmu_context.h>
40 #include <asm/pgtable.h>
41 #include <asm/mmu.h>
42 #include <asm/smp.h>
43 #include <asm/machdep.h>
44 #include <asm/btext.h>
45 #include <asm/tlb.h>
46 #include <asm/sections.h>
47 #include <asm/sparsemem.h>
48 #include <asm/vdso.h>
49 #include <asm/fixmap.h>
50 #include <asm/swiotlb.h>
51 #include <asm/rtas.h>
52 
53 #include <mm/mmu_decl.h>
54 
55 #ifndef CPU_FTR_COHERENT_ICACHE
56 #define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
57 #define CPU_FTR_NOEXECUTE	0
58 #endif
59 
60 unsigned long long memory_limit;
61 bool init_mem_is_free;
62 
63 #ifdef CONFIG_HIGHMEM
64 pte_t *kmap_pte;
65 EXPORT_SYMBOL(kmap_pte);
66 pgprot_t kmap_prot;
67 EXPORT_SYMBOL(kmap_prot);
68 
69 static inline pte_t *virt_to_kpte(unsigned long vaddr)
70 {
71 	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
72 			vaddr), vaddr), vaddr);
73 }
74 #endif
75 
76 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
77 			      unsigned long size, pgprot_t vma_prot)
78 {
79 	if (ppc_md.phys_mem_access_prot)
80 		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
81 
82 	if (!page_is_ram(pfn))
83 		vma_prot = pgprot_noncached(vma_prot);
84 
85 	return vma_prot;
86 }
87 EXPORT_SYMBOL(phys_mem_access_prot);
88 
89 #ifdef CONFIG_MEMORY_HOTPLUG
90 
91 #ifdef CONFIG_NUMA
92 int memory_add_physaddr_to_nid(u64 start)
93 {
94 	return hot_add_scn_to_nid(start);
95 }
96 #endif
97 
98 int __weak create_section_mapping(unsigned long start, unsigned long end, int nid)
99 {
100 	return -ENODEV;
101 }
102 
103 int __weak remove_section_mapping(unsigned long start, unsigned long end)
104 {
105 	return -ENODEV;
106 }
107 
108 #define FLUSH_CHUNK_SIZE SZ_1G
109 /**
110  * flush_dcache_range_chunked(): Write any modified data cache blocks out to
111  * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
112  * Does not invalidate the corresponding instruction cache blocks.
113  *
114  * @start: the start address
115  * @stop: the stop address (exclusive)
116  * @chunk: the max size of the chunks
117  */
118 static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
119 				       unsigned long chunk)
120 {
121 	unsigned long i;
122 
123 	for (i = start; i < stop; i += chunk) {
124 		flush_dcache_range(i, min(stop, i + chunk));
125 		cond_resched();
126 	}
127 }
128 
129 int __ref arch_add_memory(int nid, u64 start, u64 size,
130 			struct mhp_restrictions *restrictions)
131 {
132 	unsigned long start_pfn = start >> PAGE_SHIFT;
133 	unsigned long nr_pages = size >> PAGE_SHIFT;
134 	int rc;
135 
136 	resize_hpt_for_hotplug(memblock_phys_mem_size());
137 
138 	start = (unsigned long)__va(start);
139 	rc = create_section_mapping(start, start + size, nid);
140 	if (rc) {
141 		pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
142 			start, start + size, rc);
143 		return -EFAULT;
144 	}
145 
146 	return __add_pages(nid, start_pfn, nr_pages, restrictions);
147 }
148 
149 void __ref arch_remove_memory(int nid, u64 start, u64 size,
150 			     struct vmem_altmap *altmap)
151 {
152 	unsigned long start_pfn = start >> PAGE_SHIFT;
153 	unsigned long nr_pages = size >> PAGE_SHIFT;
154 	struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
155 	int ret;
156 
157 	__remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
158 
159 	/* Remove htab bolted mappings for this section of memory */
160 	start = (unsigned long)__va(start);
161 	flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE);
162 
163 	ret = remove_section_mapping(start, start + size);
164 	WARN_ON_ONCE(ret);
165 
166 	/* Ensure all vmalloc mappings are flushed in case they also
167 	 * hit that section of memory
168 	 */
169 	vm_unmap_aliases();
170 
171 	if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC)
172 		pr_warn("Hash collision while resizing HPT\n");
173 }
174 #endif
175 
176 #ifndef CONFIG_NEED_MULTIPLE_NODES
177 void __init mem_topology_setup(void)
178 {
179 	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
180 	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
181 #ifdef CONFIG_HIGHMEM
182 	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
183 #endif
184 
185 	/* Place all memblock_regions in the same node and merge contiguous
186 	 * memblock_regions
187 	 */
188 	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
189 }
190 
191 void __init initmem_init(void)
192 {
193 	/* XXX need to clip this if using highmem? */
194 	sparse_memory_present_with_active_regions(0);
195 	sparse_init();
196 }
197 
198 /* mark pages that don't exist as nosave */
199 static int __init mark_nonram_nosave(void)
200 {
201 	struct memblock_region *reg, *prev = NULL;
202 
203 	for_each_memblock(memory, reg) {
204 		if (prev &&
205 		    memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
206 			register_nosave_region(memblock_region_memory_end_pfn(prev),
207 					       memblock_region_memory_base_pfn(reg));
208 		prev = reg;
209 	}
210 	return 0;
211 }
212 #else /* CONFIG_NEED_MULTIPLE_NODES */
213 static int __init mark_nonram_nosave(void)
214 {
215 	return 0;
216 }
217 #endif
218 
219 /*
220  * Zones usage:
221  *
222  * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
223  * everything else. GFP_DMA32 page allocations automatically fall back to
224  * ZONE_DMA.
225  *
226  * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the
227  * generic DMA mapping code.  32-bit only devices (if not handled by an IOMMU
228  * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
229  * ZONE_DMA.
230  */
231 static unsigned long max_zone_pfns[MAX_NR_ZONES];
232 
233 /*
234  * paging_init() sets up the page tables - in fact we've already done this.
235  */
236 void __init paging_init(void)
237 {
238 	unsigned long long total_ram = memblock_phys_mem_size();
239 	phys_addr_t top_of_ram = memblock_end_of_DRAM();
240 
241 #ifdef CONFIG_HIGHMEM
242 	unsigned long v = __fix_to_virt(FIX_KMAP_END);
243 	unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN);
244 
245 	for (; v < end; v += PAGE_SIZE)
246 		map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
247 
248 	map_kernel_page(PKMAP_BASE, 0, __pgprot(0));	/* XXX gross */
249 	pkmap_page_table = virt_to_kpte(PKMAP_BASE);
250 
251 	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
252 	kmap_prot = PAGE_KERNEL;
253 #endif /* CONFIG_HIGHMEM */
254 
255 	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
256 	       (unsigned long long)top_of_ram, total_ram);
257 	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
258 	       (long int)((top_of_ram - total_ram) >> 20));
259 
260 	/*
261 	 * Allow 30-bit DMA for very limited Broadcom wifi chips on many
262 	 * powerbooks.
263 	 */
264 	if (IS_ENABLED(CONFIG_PPC32))
265 		zone_dma_bits = 30;
266 	else
267 		zone_dma_bits = 31;
268 
269 #ifdef CONFIG_ZONE_DMA
270 	max_zone_pfns[ZONE_DMA]	= min(max_low_pfn,
271 				      1UL << (zone_dma_bits - PAGE_SHIFT));
272 #endif
273 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
274 #ifdef CONFIG_HIGHMEM
275 	max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
276 #endif
277 
278 	free_area_init_nodes(max_zone_pfns);
279 
280 	mark_nonram_nosave();
281 }
282 
283 void __init mem_init(void)
284 {
285 	/*
286 	 * book3s is limited to 16 page sizes due to encoding this in
287 	 * a 4-bit field for slices.
288 	 */
289 	BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
290 
291 #ifdef CONFIG_SWIOTLB
292 	swiotlb_init(0);
293 #endif
294 
295 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
296 	set_max_mapnr(max_pfn);
297 	memblock_free_all();
298 
299 #ifdef CONFIG_HIGHMEM
300 	{
301 		unsigned long pfn, highmem_mapnr;
302 
303 		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
304 		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
305 			phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
306 			struct page *page = pfn_to_page(pfn);
307 			if (!memblock_is_reserved(paddr))
308 				free_highmem_page(page);
309 		}
310 	}
311 #endif /* CONFIG_HIGHMEM */
312 
313 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
314 	/*
315 	 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
316 	 * functions.... do it here for the non-smp case.
317 	 */
318 	per_cpu(next_tlbcam_idx, smp_processor_id()) =
319 		(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
320 #endif
321 
322 	mem_init_print_info(NULL);
323 #ifdef CONFIG_PPC32
324 	pr_info("Kernel virtual memory layout:\n");
325 #ifdef CONFIG_KASAN
326 	pr_info("  * 0x%08lx..0x%08lx  : kasan shadow mem\n",
327 		KASAN_SHADOW_START, KASAN_SHADOW_END);
328 #endif
329 	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
330 #ifdef CONFIG_HIGHMEM
331 	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
332 		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
333 #endif /* CONFIG_HIGHMEM */
334 	if (ioremap_bot != IOREMAP_TOP)
335 		pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
336 			ioremap_bot, IOREMAP_TOP);
337 	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
338 		VMALLOC_START, VMALLOC_END);
339 #endif /* CONFIG_PPC32 */
340 }
341 
342 void free_initmem(void)
343 {
344 	ppc_md.progress = ppc_printk_progress;
345 	mark_initmem_nx();
346 	init_mem_is_free = true;
347 	free_initmem_default(POISON_FREE_INITMEM);
348 }
349 
350 /**
351  * flush_coherent_icache() - if a CPU has a coherent icache, flush it
352  * @addr: The base address to use (can be any valid address, the whole cache will be flushed)
353  * Return true if the cache was flushed, false otherwise
354  */
355 static inline bool flush_coherent_icache(unsigned long addr)
356 {
357 	/*
358 	 * For a snooping icache, we still need a dummy icbi to purge all the
359 	 * prefetched instructions from the ifetch buffers. We also need a sync
360 	 * before the icbi to order the the actual stores to memory that might
361 	 * have modified instructions with the icbi.
362 	 */
363 	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
364 		mb(); /* sync */
365 		icbi((void *)addr);
366 		mb(); /* sync */
367 		isync();
368 		return true;
369 	}
370 
371 	return false;
372 }
373 
374 /**
375  * invalidate_icache_range() - Flush the icache by issuing icbi across an address range
376  * @start: the start address
377  * @stop: the stop address (exclusive)
378  */
379 static void invalidate_icache_range(unsigned long start, unsigned long stop)
380 {
381 	unsigned long shift = l1_icache_shift();
382 	unsigned long bytes = l1_icache_bytes();
383 	char *addr = (char *)(start & ~(bytes - 1));
384 	unsigned long size = stop - (unsigned long)addr + (bytes - 1);
385 	unsigned long i;
386 
387 	for (i = 0; i < size >> shift; i++, addr += bytes)
388 		icbi(addr);
389 
390 	mb(); /* sync */
391 	isync();
392 }
393 
394 /**
395  * flush_icache_range: Write any modified data cache blocks out to memory
396  * and invalidate the corresponding blocks in the instruction cache
397  *
398  * Generic code will call this after writing memory, before executing from it.
399  *
400  * @start: the start address
401  * @stop: the stop address (exclusive)
402  */
403 void flush_icache_range(unsigned long start, unsigned long stop)
404 {
405 	if (flush_coherent_icache(start))
406 		return;
407 
408 	clean_dcache_range(start, stop);
409 
410 	if (IS_ENABLED(CONFIG_44x)) {
411 		/*
412 		 * Flash invalidate on 44x because we are passed kmapped
413 		 * addresses and this doesn't work for userspace pages due to
414 		 * the virtually tagged icache.
415 		 */
416 		iccci((void *)start);
417 		mb(); /* sync */
418 		isync();
419 	} else
420 		invalidate_icache_range(start, stop);
421 }
422 EXPORT_SYMBOL(flush_icache_range);
423 
424 #if !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)
425 /**
426  * flush_dcache_icache_phys() - Flush a page by it's physical address
427  * @physaddr: the physical address of the page
428  */
429 static void flush_dcache_icache_phys(unsigned long physaddr)
430 {
431 	unsigned long bytes = l1_dcache_bytes();
432 	unsigned long nb = PAGE_SIZE / bytes;
433 	unsigned long addr = physaddr & PAGE_MASK;
434 	unsigned long msr, msr0;
435 	unsigned long loop1 = addr, loop2 = addr;
436 
437 	msr0 = mfmsr();
438 	msr = msr0 & ~MSR_DR;
439 	/*
440 	 * This must remain as ASM to prevent potential memory accesses
441 	 * while the data MMU is disabled
442 	 */
443 	asm volatile(
444 		"   mtctr %2;\n"
445 		"   mtmsr %3;\n"
446 		"   isync;\n"
447 		"0: dcbst   0, %0;\n"
448 		"   addi    %0, %0, %4;\n"
449 		"   bdnz    0b;\n"
450 		"   sync;\n"
451 		"   mtctr %2;\n"
452 		"1: icbi    0, %1;\n"
453 		"   addi    %1, %1, %4;\n"
454 		"   bdnz    1b;\n"
455 		"   sync;\n"
456 		"   mtmsr %5;\n"
457 		"   isync;\n"
458 		: "+&r" (loop1), "+&r" (loop2)
459 		: "r" (nb), "r" (msr), "i" (bytes), "r" (msr0)
460 		: "ctr", "memory");
461 }
462 #endif // !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)
463 
464 /*
465  * This is called when a page has been modified by the kernel.
466  * It just marks the page as not i-cache clean.  We do the i-cache
467  * flush later when the page is given to a user process, if necessary.
468  */
469 void flush_dcache_page(struct page *page)
470 {
471 	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
472 		return;
473 	/* avoid an atomic op if possible */
474 	if (test_bit(PG_arch_1, &page->flags))
475 		clear_bit(PG_arch_1, &page->flags);
476 }
477 EXPORT_SYMBOL(flush_dcache_page);
478 
479 void flush_dcache_icache_page(struct page *page)
480 {
481 #ifdef CONFIG_HUGETLB_PAGE
482 	if (PageCompound(page)) {
483 		flush_dcache_icache_hugepage(page);
484 		return;
485 	}
486 #endif
487 #if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
488 	/* On 8xx there is no need to kmap since highmem is not supported */
489 	__flush_dcache_icache(page_address(page));
490 #else
491 	if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
492 		void *start = kmap_atomic(page);
493 		__flush_dcache_icache(start);
494 		kunmap_atomic(start);
495 	} else {
496 		unsigned long addr = page_to_pfn(page) << PAGE_SHIFT;
497 
498 		if (flush_coherent_icache(addr))
499 			return;
500 		flush_dcache_icache_phys(addr);
501 	}
502 #endif
503 }
504 EXPORT_SYMBOL(flush_dcache_icache_page);
505 
506 /**
507  * __flush_dcache_icache(): Flush a particular page from the data cache to RAM.
508  * Note: this is necessary because the instruction cache does *not*
509  * snoop from the data cache.
510  *
511  * @page: the address of the page to flush
512  */
513 void __flush_dcache_icache(void *p)
514 {
515 	unsigned long addr = (unsigned long)p;
516 
517 	if (flush_coherent_icache(addr))
518 		return;
519 
520 	clean_dcache_range(addr, addr + PAGE_SIZE);
521 
522 	/*
523 	 * We don't flush the icache on 44x. Those have a virtual icache and we
524 	 * don't have access to the virtual address here (it's not the page
525 	 * vaddr but where it's mapped in user space). The flushing of the
526 	 * icache on these is handled elsewhere, when a change in the address
527 	 * space occurs, before returning to user space.
528 	 */
529 
530 	if (cpu_has_feature(MMU_FTR_TYPE_44x))
531 		return;
532 
533 	invalidate_icache_range(addr, addr + PAGE_SIZE);
534 }
535 
536 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
537 {
538 	clear_page(page);
539 
540 	/*
541 	 * We shouldn't have to do this, but some versions of glibc
542 	 * require it (ld.so assumes zero filled pages are icache clean)
543 	 * - Anton
544 	 */
545 	flush_dcache_page(pg);
546 }
547 EXPORT_SYMBOL(clear_user_page);
548 
549 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
550 		    struct page *pg)
551 {
552 	copy_page(vto, vfrom);
553 
554 	/*
555 	 * We should be able to use the following optimisation, however
556 	 * there are two problems.
557 	 * Firstly a bug in some versions of binutils meant PLT sections
558 	 * were not marked executable.
559 	 * Secondly the first word in the GOT section is blrl, used
560 	 * to establish the GOT address. Until recently the GOT was
561 	 * not marked executable.
562 	 * - Anton
563 	 */
564 #if 0
565 	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
566 		return;
567 #endif
568 
569 	flush_dcache_page(pg);
570 }
571 
572 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
573 			     unsigned long addr, int len)
574 {
575 	unsigned long maddr;
576 
577 	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
578 	flush_icache_range(maddr, maddr + len);
579 	kunmap(page);
580 }
581 EXPORT_SYMBOL(flush_icache_user_range);
582 
583 /*
584  * System memory should not be in /proc/iomem but various tools expect it
585  * (eg kdump).
586  */
587 static int __init add_system_ram_resources(void)
588 {
589 	struct memblock_region *reg;
590 
591 	for_each_memblock(memory, reg) {
592 		struct resource *res;
593 		unsigned long base = reg->base;
594 		unsigned long size = reg->size;
595 
596 		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
597 		WARN_ON(!res);
598 
599 		if (res) {
600 			res->name = "System RAM";
601 			res->start = base;
602 			res->end = base + size - 1;
603 			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
604 			WARN_ON(request_resource(&iomem_resource, res) < 0);
605 		}
606 	}
607 
608 	return 0;
609 }
610 subsys_initcall(add_system_ram_resources);
611 
612 #ifdef CONFIG_STRICT_DEVMEM
613 /*
614  * devmem_is_allowed(): check to see if /dev/mem access to a certain address
615  * is valid. The argument is a physical page number.
616  *
617  * Access has to be given to non-kernel-ram areas as well, these contain the
618  * PCI mmio resources as well as potential bios/acpi data regions.
619  */
620 int devmem_is_allowed(unsigned long pfn)
621 {
622 	if (page_is_rtas_user_buf(pfn))
623 		return 1;
624 	if (iomem_is_exclusive(PFN_PHYS(pfn)))
625 		return 0;
626 	if (!page_is_ram(pfn))
627 		return 1;
628 	return 0;
629 }
630 #endif /* CONFIG_STRICT_DEVMEM */
631 
632 /*
633  * This is defined in kernel/resource.c but only powerpc needs to export it, for
634  * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
635  */
636 EXPORT_SYMBOL_GPL(walk_system_ram_range);
637