xref: /linux/arch/powerpc/mm/mem.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
7  *    Copyright (C) 1996 Paul Mackerras
8  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9  *
10  *  Derived from "arch/i386/mm/init.c"
11  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
12  *
13  *  This program is free software; you can redistribute it and/or
14  *  modify it under the terms of the GNU General Public License
15  *  as published by the Free Software Foundation; either version
16  *  2 of the License, or (at your option) any later version.
17  *
18  */
19 
20 #include <linux/export.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/gfp.h>
26 #include <linux/types.h>
27 #include <linux/mm.h>
28 #include <linux/stddef.h>
29 #include <linux/init.h>
30 #include <linux/bootmem.h>
31 #include <linux/highmem.h>
32 #include <linux/initrd.h>
33 #include <linux/pagemap.h>
34 #include <linux/suspend.h>
35 #include <linux/memblock.h>
36 #include <linux/hugetlb.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
39 
40 #include <asm/pgalloc.h>
41 #include <asm/prom.h>
42 #include <asm/io.h>
43 #include <asm/mmu_context.h>
44 #include <asm/pgtable.h>
45 #include <asm/mmu.h>
46 #include <asm/smp.h>
47 #include <asm/machdep.h>
48 #include <asm/btext.h>
49 #include <asm/tlb.h>
50 #include <asm/sections.h>
51 #include <asm/sparsemem.h>
52 #include <asm/vdso.h>
53 #include <asm/fixmap.h>
54 #include <asm/swiotlb.h>
55 #include <asm/rtas.h>
56 
57 #include "mmu_decl.h"
58 
59 #ifndef CPU_FTR_COHERENT_ICACHE
60 #define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
61 #define CPU_FTR_NOEXECUTE	0
62 #endif
63 
64 unsigned long long memory_limit;
65 
66 #ifdef CONFIG_HIGHMEM
67 pte_t *kmap_pte;
68 EXPORT_SYMBOL(kmap_pte);
69 pgprot_t kmap_prot;
70 EXPORT_SYMBOL(kmap_prot);
71 
72 static inline pte_t *virt_to_kpte(unsigned long vaddr)
73 {
74 	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
75 			vaddr), vaddr), vaddr);
76 }
77 #endif
78 
79 int page_is_ram(unsigned long pfn)
80 {
81 #ifndef CONFIG_PPC64	/* XXX for now */
82 	return pfn < max_pfn;
83 #else
84 	unsigned long paddr = (pfn << PAGE_SHIFT);
85 	struct memblock_region *reg;
86 
87 	for_each_memblock(memory, reg)
88 		if (paddr >= reg->base && paddr < (reg->base + reg->size))
89 			return 1;
90 	return 0;
91 #endif
92 }
93 
94 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
95 			      unsigned long size, pgprot_t vma_prot)
96 {
97 	if (ppc_md.phys_mem_access_prot)
98 		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
99 
100 	if (!page_is_ram(pfn))
101 		vma_prot = pgprot_noncached(vma_prot);
102 
103 	return vma_prot;
104 }
105 EXPORT_SYMBOL(phys_mem_access_prot);
106 
107 #ifdef CONFIG_MEMORY_HOTPLUG
108 
109 #ifdef CONFIG_NUMA
110 int memory_add_physaddr_to_nid(u64 start)
111 {
112 	return hot_add_scn_to_nid(start);
113 }
114 #endif
115 
116 int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
117 {
118 	struct pglist_data *pgdata;
119 	struct zone *zone;
120 	unsigned long start_pfn = start >> PAGE_SHIFT;
121 	unsigned long nr_pages = size >> PAGE_SHIFT;
122 
123 	pgdata = NODE_DATA(nid);
124 
125 	start = (unsigned long)__va(start);
126 	if (create_section_mapping(start, start + size))
127 		return -EINVAL;
128 
129 	/* this should work for most non-highmem platforms */
130 	zone = pgdata->node_zones +
131 		zone_for_memory(nid, start, size, 0, for_device);
132 
133 	return __add_pages(nid, zone, start_pfn, nr_pages);
134 }
135 
136 #ifdef CONFIG_MEMORY_HOTREMOVE
137 int arch_remove_memory(u64 start, u64 size)
138 {
139 	unsigned long start_pfn = start >> PAGE_SHIFT;
140 	unsigned long nr_pages = size >> PAGE_SHIFT;
141 	struct zone *zone;
142 	int ret;
143 
144 	zone = page_zone(pfn_to_page(start_pfn));
145 	ret = __remove_pages(zone, start_pfn, nr_pages);
146 	if (ret)
147 		return ret;
148 
149 	/* Remove htab bolted mappings for this section of memory */
150 	start = (unsigned long)__va(start);
151 	ret = remove_section_mapping(start, start + size);
152 
153 	/* Ensure all vmalloc mappings are flushed in case they also
154 	 * hit that section of memory
155 	 */
156 	vm_unmap_aliases();
157 
158 	return ret;
159 }
160 #endif
161 #endif /* CONFIG_MEMORY_HOTPLUG */
162 
163 /*
164  * walk_memory_resource() needs to make sure there is no holes in a given
165  * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
166  * Instead it maintains it in memblock.memory structures.  Walk through the
167  * memory regions, find holes and callback for contiguous regions.
168  */
169 int
170 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
171 		void *arg, int (*func)(unsigned long, unsigned long, void *))
172 {
173 	struct memblock_region *reg;
174 	unsigned long end_pfn = start_pfn + nr_pages;
175 	unsigned long tstart, tend;
176 	int ret = -1;
177 
178 	for_each_memblock(memory, reg) {
179 		tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
180 		tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
181 		if (tstart >= tend)
182 			continue;
183 		ret = (*func)(tstart, tend - tstart, arg);
184 		if (ret)
185 			break;
186 	}
187 	return ret;
188 }
189 EXPORT_SYMBOL_GPL(walk_system_ram_range);
190 
191 #ifndef CONFIG_NEED_MULTIPLE_NODES
192 void __init initmem_init(void)
193 {
194 	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
195 	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
196 #ifdef CONFIG_HIGHMEM
197 	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
198 #endif
199 
200 	/* Place all memblock_regions in the same node and merge contiguous
201 	 * memblock_regions
202 	 */
203 	memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
204 
205 	/* XXX need to clip this if using highmem? */
206 	sparse_memory_present_with_active_regions(0);
207 	sparse_init();
208 }
209 
210 /* mark pages that don't exist as nosave */
211 static int __init mark_nonram_nosave(void)
212 {
213 	struct memblock_region *reg, *prev = NULL;
214 
215 	for_each_memblock(memory, reg) {
216 		if (prev &&
217 		    memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
218 			register_nosave_region(memblock_region_memory_end_pfn(prev),
219 					       memblock_region_memory_base_pfn(reg));
220 		prev = reg;
221 	}
222 	return 0;
223 }
224 #else /* CONFIG_NEED_MULTIPLE_NODES */
225 static int __init mark_nonram_nosave(void)
226 {
227 	return 0;
228 }
229 #endif
230 
231 static bool zone_limits_final;
232 
233 static unsigned long max_zone_pfns[MAX_NR_ZONES] = {
234 	[0 ... MAX_NR_ZONES - 1] = ~0UL
235 };
236 
237 /*
238  * Restrict the specified zone and all more restrictive zones
239  * to be below the specified pfn.  May not be called after
240  * paging_init().
241  */
242 void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
243 {
244 	int i;
245 
246 	if (WARN_ON(zone_limits_final))
247 		return;
248 
249 	for (i = zone; i >= 0; i--) {
250 		if (max_zone_pfns[i] > pfn_limit)
251 			max_zone_pfns[i] = pfn_limit;
252 	}
253 }
254 
255 /*
256  * Find the least restrictive zone that is entirely below the
257  * specified pfn limit.  Returns < 0 if no suitable zone is found.
258  *
259  * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
260  * systems -- the DMA limit can be higher than any possible real pfn.
261  */
262 int dma_pfn_limit_to_zone(u64 pfn_limit)
263 {
264 	enum zone_type top_zone = ZONE_NORMAL;
265 	int i;
266 
267 #ifdef CONFIG_HIGHMEM
268 	top_zone = ZONE_HIGHMEM;
269 #endif
270 
271 	for (i = top_zone; i >= 0; i--) {
272 		if (max_zone_pfns[i] <= pfn_limit)
273 			return i;
274 	}
275 
276 	return -EPERM;
277 }
278 
279 /*
280  * paging_init() sets up the page tables - in fact we've already done this.
281  */
282 void __init paging_init(void)
283 {
284 	unsigned long long total_ram = memblock_phys_mem_size();
285 	phys_addr_t top_of_ram = memblock_end_of_DRAM();
286 	enum zone_type top_zone;
287 
288 #ifdef CONFIG_PPC32
289 	unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
290 	unsigned long end = __fix_to_virt(FIX_HOLE);
291 
292 	for (; v < end; v += PAGE_SIZE)
293 		map_page(v, 0, 0); /* XXX gross */
294 #endif
295 
296 #ifdef CONFIG_HIGHMEM
297 	map_page(PKMAP_BASE, 0, 0);	/* XXX gross */
298 	pkmap_page_table = virt_to_kpte(PKMAP_BASE);
299 
300 	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
301 	kmap_prot = PAGE_KERNEL;
302 #endif /* CONFIG_HIGHMEM */
303 
304 	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
305 	       (unsigned long long)top_of_ram, total_ram);
306 	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
307 	       (long int)((top_of_ram - total_ram) >> 20));
308 
309 #ifdef CONFIG_HIGHMEM
310 	top_zone = ZONE_HIGHMEM;
311 	limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT);
312 #else
313 	top_zone = ZONE_NORMAL;
314 #endif
315 
316 	limit_zone_pfn(top_zone, top_of_ram >> PAGE_SHIFT);
317 	zone_limits_final = true;
318 	free_area_init_nodes(max_zone_pfns);
319 
320 	mark_nonram_nosave();
321 }
322 
323 void __init mem_init(void)
324 {
325 	/*
326 	 * book3s is limited to 16 page sizes due to encoding this in
327 	 * a 4-bit field for slices.
328 	 */
329 	BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
330 
331 #ifdef CONFIG_SWIOTLB
332 	swiotlb_init(0);
333 #endif
334 
335 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
336 	set_max_mapnr(max_pfn);
337 	free_all_bootmem();
338 
339 #ifdef CONFIG_HIGHMEM
340 	{
341 		unsigned long pfn, highmem_mapnr;
342 
343 		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
344 		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
345 			phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
346 			struct page *page = pfn_to_page(pfn);
347 			if (!memblock_is_reserved(paddr))
348 				free_highmem_page(page);
349 		}
350 	}
351 #endif /* CONFIG_HIGHMEM */
352 
353 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
354 	/*
355 	 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
356 	 * functions.... do it here for the non-smp case.
357 	 */
358 	per_cpu(next_tlbcam_idx, smp_processor_id()) =
359 		(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
360 #endif
361 
362 	mem_init_print_info(NULL);
363 #ifdef CONFIG_PPC32
364 	pr_info("Kernel virtual memory layout:\n");
365 	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
366 #ifdef CONFIG_HIGHMEM
367 	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
368 		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
369 #endif /* CONFIG_HIGHMEM */
370 #ifdef CONFIG_NOT_COHERENT_CACHE
371 	pr_info("  * 0x%08lx..0x%08lx  : consistent mem\n",
372 		IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
373 #endif /* CONFIG_NOT_COHERENT_CACHE */
374 	pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
375 		ioremap_bot, IOREMAP_TOP);
376 	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
377 		VMALLOC_START, VMALLOC_END);
378 #endif /* CONFIG_PPC32 */
379 }
380 
381 void free_initmem(void)
382 {
383 	ppc_md.progress = ppc_printk_progress;
384 	free_initmem_default(POISON_FREE_INITMEM);
385 }
386 
387 #ifdef CONFIG_BLK_DEV_INITRD
388 void __init free_initrd_mem(unsigned long start, unsigned long end)
389 {
390 	free_reserved_area((void *)start, (void *)end, -1, "initrd");
391 }
392 #endif
393 
394 /*
395  * This is called when a page has been modified by the kernel.
396  * It just marks the page as not i-cache clean.  We do the i-cache
397  * flush later when the page is given to a user process, if necessary.
398  */
399 void flush_dcache_page(struct page *page)
400 {
401 	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
402 		return;
403 	/* avoid an atomic op if possible */
404 	if (test_bit(PG_arch_1, &page->flags))
405 		clear_bit(PG_arch_1, &page->flags);
406 }
407 EXPORT_SYMBOL(flush_dcache_page);
408 
409 void flush_dcache_icache_page(struct page *page)
410 {
411 #ifdef CONFIG_HUGETLB_PAGE
412 	if (PageCompound(page)) {
413 		flush_dcache_icache_hugepage(page);
414 		return;
415 	}
416 #endif
417 #if defined(CONFIG_8xx) || defined(CONFIG_PPC64)
418 	/* On 8xx there is no need to kmap since highmem is not supported */
419 	__flush_dcache_icache(page_address(page));
420 #else
421 	if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
422 		void *start = kmap_atomic(page);
423 		__flush_dcache_icache(start);
424 		kunmap_atomic(start);
425 	} else {
426 		__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
427 	}
428 #endif
429 }
430 EXPORT_SYMBOL(flush_dcache_icache_page);
431 
432 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
433 {
434 	clear_page(page);
435 
436 	/*
437 	 * We shouldn't have to do this, but some versions of glibc
438 	 * require it (ld.so assumes zero filled pages are icache clean)
439 	 * - Anton
440 	 */
441 	flush_dcache_page(pg);
442 }
443 EXPORT_SYMBOL(clear_user_page);
444 
445 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
446 		    struct page *pg)
447 {
448 	copy_page(vto, vfrom);
449 
450 	/*
451 	 * We should be able to use the following optimisation, however
452 	 * there are two problems.
453 	 * Firstly a bug in some versions of binutils meant PLT sections
454 	 * were not marked executable.
455 	 * Secondly the first word in the GOT section is blrl, used
456 	 * to establish the GOT address. Until recently the GOT was
457 	 * not marked executable.
458 	 * - Anton
459 	 */
460 #if 0
461 	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
462 		return;
463 #endif
464 
465 	flush_dcache_page(pg);
466 }
467 
468 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
469 			     unsigned long addr, int len)
470 {
471 	unsigned long maddr;
472 
473 	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
474 	flush_icache_range(maddr, maddr + len);
475 	kunmap(page);
476 }
477 EXPORT_SYMBOL(flush_icache_user_range);
478 
479 /*
480  * This is called at the end of handling a user page fault, when the
481  * fault has been handled by updating a PTE in the linux page tables.
482  * We use it to preload an HPTE into the hash table corresponding to
483  * the updated linux PTE.
484  *
485  * This must always be called with the pte lock held.
486  */
487 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
488 		      pte_t *ptep)
489 {
490 #ifdef CONFIG_PPC_STD_MMU
491 	/*
492 	 * We don't need to worry about _PAGE_PRESENT here because we are
493 	 * called with either mm->page_table_lock held or ptl lock held
494 	 */
495 	unsigned long access = 0, trap;
496 
497 	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
498 	if (!pte_young(*ptep) || address >= TASK_SIZE)
499 		return;
500 
501 	/* We try to figure out if we are coming from an instruction
502 	 * access fault and pass that down to __hash_page so we avoid
503 	 * double-faulting on execution of fresh text. We have to test
504 	 * for regs NULL since init will get here first thing at boot
505 	 *
506 	 * We also avoid filling the hash if not coming from a fault
507 	 */
508 	if (current->thread.regs == NULL)
509 		return;
510 	trap = TRAP(current->thread.regs);
511 	if (trap == 0x400)
512 		access |= _PAGE_EXEC;
513 	else if (trap != 0x300)
514 		return;
515 	hash_preload(vma->vm_mm, address, access, trap);
516 #endif /* CONFIG_PPC_STD_MMU */
517 #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
518 	&& defined(CONFIG_HUGETLB_PAGE)
519 	if (is_vm_hugetlb_page(vma))
520 		book3e_hugetlb_preload(vma, address, *ptep);
521 #endif
522 }
523 
524 /*
525  * System memory should not be in /proc/iomem but various tools expect it
526  * (eg kdump).
527  */
528 static int __init add_system_ram_resources(void)
529 {
530 	struct memblock_region *reg;
531 
532 	for_each_memblock(memory, reg) {
533 		struct resource *res;
534 		unsigned long base = reg->base;
535 		unsigned long size = reg->size;
536 
537 		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
538 		WARN_ON(!res);
539 
540 		if (res) {
541 			res->name = "System RAM";
542 			res->start = base;
543 			res->end = base + size - 1;
544 			res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
545 			WARN_ON(request_resource(&iomem_resource, res) < 0);
546 		}
547 	}
548 
549 	return 0;
550 }
551 subsys_initcall(add_system_ram_resources);
552 
553 #ifdef CONFIG_STRICT_DEVMEM
554 /*
555  * devmem_is_allowed(): check to see if /dev/mem access to a certain address
556  * is valid. The argument is a physical page number.
557  *
558  * Access has to be given to non-kernel-ram areas as well, these contain the
559  * PCI mmio resources as well as potential bios/acpi data regions.
560  */
561 int devmem_is_allowed(unsigned long pfn)
562 {
563 	if (iomem_is_exclusive(PFN_PHYS(pfn)))
564 		return 0;
565 	if (!page_is_ram(pfn))
566 		return 1;
567 	if (page_is_rtas_user_buf(pfn))
568 		return 1;
569 	return 0;
570 }
571 #endif /* CONFIG_STRICT_DEVMEM */
572