xref: /linux/arch/powerpc/mm/mem.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
7  *    Copyright (C) 1996 Paul Mackerras
8  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9  *
10  *  Derived from "arch/i386/mm/init.c"
11  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
12  *
13  *  This program is free software; you can redistribute it and/or
14  *  modify it under the terms of the GNU General Public License
15  *  as published by the Free Software Foundation; either version
16  *  2 of the License, or (at your option) any later version.
17  *
18  */
19 
20 #include <linux/export.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/gfp.h>
26 #include <linux/types.h>
27 #include <linux/mm.h>
28 #include <linux/stddef.h>
29 #include <linux/init.h>
30 #include <linux/bootmem.h>
31 #include <linux/highmem.h>
32 #include <linux/initrd.h>
33 #include <linux/pagemap.h>
34 #include <linux/suspend.h>
35 #include <linux/memblock.h>
36 #include <linux/hugetlb.h>
37 
38 #include <asm/pgalloc.h>
39 #include <asm/prom.h>
40 #include <asm/io.h>
41 #include <asm/mmu_context.h>
42 #include <asm/pgtable.h>
43 #include <asm/mmu.h>
44 #include <asm/smp.h>
45 #include <asm/machdep.h>
46 #include <asm/btext.h>
47 #include <asm/tlb.h>
48 #include <asm/sections.h>
49 #include <asm/sparsemem.h>
50 #include <asm/vdso.h>
51 #include <asm/fixmap.h>
52 #include <asm/swiotlb.h>
53 
54 #include "mmu_decl.h"
55 
56 #ifndef CPU_FTR_COHERENT_ICACHE
57 #define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
58 #define CPU_FTR_NOEXECUTE	0
59 #endif
60 
61 int init_bootmem_done;
62 int mem_init_done;
63 phys_addr_t memory_limit;
64 
65 #ifdef CONFIG_HIGHMEM
66 pte_t *kmap_pte;
67 pgprot_t kmap_prot;
68 
69 EXPORT_SYMBOL(kmap_prot);
70 EXPORT_SYMBOL(kmap_pte);
71 
72 static inline pte_t *virt_to_kpte(unsigned long vaddr)
73 {
74 	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
75 			vaddr), vaddr), vaddr);
76 }
77 #endif
78 
79 int page_is_ram(unsigned long pfn)
80 {
81 #ifndef CONFIG_PPC64	/* XXX for now */
82 	return pfn < max_pfn;
83 #else
84 	unsigned long paddr = (pfn << PAGE_SHIFT);
85 	struct memblock_region *reg;
86 
87 	for_each_memblock(memory, reg)
88 		if (paddr >= reg->base && paddr < (reg->base + reg->size))
89 			return 1;
90 	return 0;
91 #endif
92 }
93 
94 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
95 			      unsigned long size, pgprot_t vma_prot)
96 {
97 	if (ppc_md.phys_mem_access_prot)
98 		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
99 
100 	if (!page_is_ram(pfn))
101 		vma_prot = pgprot_noncached(vma_prot);
102 
103 	return vma_prot;
104 }
105 EXPORT_SYMBOL(phys_mem_access_prot);
106 
107 #ifdef CONFIG_MEMORY_HOTPLUG
108 
109 #ifdef CONFIG_NUMA
110 int memory_add_physaddr_to_nid(u64 start)
111 {
112 	return hot_add_scn_to_nid(start);
113 }
114 #endif
115 
116 int arch_add_memory(int nid, u64 start, u64 size)
117 {
118 	struct pglist_data *pgdata;
119 	struct zone *zone;
120 	unsigned long start_pfn = start >> PAGE_SHIFT;
121 	unsigned long nr_pages = size >> PAGE_SHIFT;
122 
123 	pgdata = NODE_DATA(nid);
124 
125 	start = (unsigned long)__va(start);
126 	if (create_section_mapping(start, start + size))
127 		return -EINVAL;
128 
129 	/* this should work for most non-highmem platforms */
130 	zone = pgdata->node_zones;
131 
132 	return __add_pages(nid, zone, start_pfn, nr_pages);
133 }
134 #endif /* CONFIG_MEMORY_HOTPLUG */
135 
136 /*
137  * walk_memory_resource() needs to make sure there is no holes in a given
138  * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
139  * Instead it maintains it in memblock.memory structures.  Walk through the
140  * memory regions, find holes and callback for contiguous regions.
141  */
142 int
143 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
144 		void *arg, int (*func)(unsigned long, unsigned long, void *))
145 {
146 	struct memblock_region *reg;
147 	unsigned long end_pfn = start_pfn + nr_pages;
148 	unsigned long tstart, tend;
149 	int ret = -1;
150 
151 	for_each_memblock(memory, reg) {
152 		tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
153 		tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
154 		if (tstart >= tend)
155 			continue;
156 		ret = (*func)(tstart, tend - tstart, arg);
157 		if (ret)
158 			break;
159 	}
160 	return ret;
161 }
162 EXPORT_SYMBOL_GPL(walk_system_ram_range);
163 
164 /*
165  * Initialize the bootmem system and give it all the memory we
166  * have available.  If we are using highmem, we only put the
167  * lowmem into the bootmem system.
168  */
169 #ifndef CONFIG_NEED_MULTIPLE_NODES
170 void __init do_init_bootmem(void)
171 {
172 	unsigned long start, bootmap_pages;
173 	unsigned long total_pages;
174 	struct memblock_region *reg;
175 	int boot_mapsize;
176 
177 	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
178 	total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
179 #ifdef CONFIG_HIGHMEM
180 	total_pages = total_lowmem >> PAGE_SHIFT;
181 	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
182 #endif
183 
184 	/*
185 	 * Find an area to use for the bootmem bitmap.  Calculate the size of
186 	 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
187 	 * Add 1 additional page in case the address isn't page-aligned.
188 	 */
189 	bootmap_pages = bootmem_bootmap_pages(total_pages);
190 
191 	start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
192 
193 	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
194 	boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
195 
196 	/* Add active regions with valid PFNs */
197 	for_each_memblock(memory, reg) {
198 		unsigned long start_pfn, end_pfn;
199 		start_pfn = memblock_region_memory_base_pfn(reg);
200 		end_pfn = memblock_region_memory_end_pfn(reg);
201 		add_active_range(0, start_pfn, end_pfn);
202 	}
203 
204 	/* Add all physical memory to the bootmem map, mark each area
205 	 * present.
206 	 */
207 #ifdef CONFIG_HIGHMEM
208 	free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
209 
210 	/* reserve the sections we're already using */
211 	for_each_memblock(reserved, reg) {
212 		unsigned long top = reg->base + reg->size - 1;
213 		if (top < lowmem_end_addr)
214 			reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
215 		else if (reg->base < lowmem_end_addr) {
216 			unsigned long trunc_size = lowmem_end_addr - reg->base;
217 			reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
218 		}
219 	}
220 #else
221 	free_bootmem_with_active_regions(0, max_pfn);
222 
223 	/* reserve the sections we're already using */
224 	for_each_memblock(reserved, reg)
225 		reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
226 #endif
227 	/* XXX need to clip this if using highmem? */
228 	sparse_memory_present_with_active_regions(0);
229 
230 	init_bootmem_done = 1;
231 }
232 
233 /* mark pages that don't exist as nosave */
234 static int __init mark_nonram_nosave(void)
235 {
236 	struct memblock_region *reg, *prev = NULL;
237 
238 	for_each_memblock(memory, reg) {
239 		if (prev &&
240 		    memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
241 			register_nosave_region(memblock_region_memory_end_pfn(prev),
242 					       memblock_region_memory_base_pfn(reg));
243 		prev = reg;
244 	}
245 	return 0;
246 }
247 
248 /*
249  * paging_init() sets up the page tables - in fact we've already done this.
250  */
251 void __init paging_init(void)
252 {
253 	unsigned long long total_ram = memblock_phys_mem_size();
254 	phys_addr_t top_of_ram = memblock_end_of_DRAM();
255 	unsigned long max_zone_pfns[MAX_NR_ZONES];
256 
257 #ifdef CONFIG_PPC32
258 	unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
259 	unsigned long end = __fix_to_virt(FIX_HOLE);
260 
261 	for (; v < end; v += PAGE_SIZE)
262 		map_page(v, 0, 0); /* XXX gross */
263 #endif
264 
265 #ifdef CONFIG_HIGHMEM
266 	map_page(PKMAP_BASE, 0, 0);	/* XXX gross */
267 	pkmap_page_table = virt_to_kpte(PKMAP_BASE);
268 
269 	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
270 	kmap_prot = PAGE_KERNEL;
271 #endif /* CONFIG_HIGHMEM */
272 
273 	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
274 	       (unsigned long long)top_of_ram, total_ram);
275 	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
276 	       (long int)((top_of_ram - total_ram) >> 20));
277 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
278 #ifdef CONFIG_HIGHMEM
279 	max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT;
280 	max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
281 #else
282 	max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
283 #endif
284 	free_area_init_nodes(max_zone_pfns);
285 
286 	mark_nonram_nosave();
287 }
288 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
289 
290 void __init mem_init(void)
291 {
292 #ifdef CONFIG_NEED_MULTIPLE_NODES
293 	int nid;
294 #endif
295 	pg_data_t *pgdat;
296 	unsigned long i;
297 	struct page *page;
298 	unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
299 
300 #ifdef CONFIG_SWIOTLB
301 	if (ppc_swiotlb_enable)
302 		swiotlb_init(1);
303 #endif
304 
305 	num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT;
306 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
307 
308 #ifdef CONFIG_NEED_MULTIPLE_NODES
309         for_each_online_node(nid) {
310 		if (NODE_DATA(nid)->node_spanned_pages != 0) {
311 			printk("freeing bootmem node %d\n", nid);
312 			totalram_pages +=
313 				free_all_bootmem_node(NODE_DATA(nid));
314 		}
315 	}
316 #else
317 	max_mapnr = max_pfn;
318 	totalram_pages += free_all_bootmem();
319 #endif
320 	for_each_online_pgdat(pgdat) {
321 		for (i = 0; i < pgdat->node_spanned_pages; i++) {
322 			if (!pfn_valid(pgdat->node_start_pfn + i))
323 				continue;
324 			page = pgdat_page_nr(pgdat, i);
325 			if (PageReserved(page))
326 				reservedpages++;
327 		}
328 	}
329 
330 	codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
331 	datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
332 	initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
333 	bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
334 
335 #ifdef CONFIG_HIGHMEM
336 	{
337 		unsigned long pfn, highmem_mapnr;
338 
339 		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
340 		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
341 			phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
342 			struct page *page = pfn_to_page(pfn);
343 			if (memblock_is_reserved(paddr))
344 				continue;
345 			ClearPageReserved(page);
346 			init_page_count(page);
347 			__free_page(page);
348 			totalhigh_pages++;
349 			reservedpages--;
350 		}
351 		totalram_pages += totalhigh_pages;
352 		printk(KERN_DEBUG "High memory: %luk\n",
353 		       totalhigh_pages << (PAGE_SHIFT-10));
354 	}
355 #endif /* CONFIG_HIGHMEM */
356 
357 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
358 	/*
359 	 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
360 	 * functions.... do it here for the non-smp case.
361 	 */
362 	per_cpu(next_tlbcam_idx, smp_processor_id()) =
363 		(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
364 #endif
365 
366 	printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
367 	       "%luk reserved, %luk data, %luk bss, %luk init)\n",
368 		nr_free_pages() << (PAGE_SHIFT-10),
369 		num_physpages << (PAGE_SHIFT-10),
370 		codesize >> 10,
371 		reservedpages << (PAGE_SHIFT-10),
372 		datasize >> 10,
373 		bsssize >> 10,
374 		initsize >> 10);
375 
376 #ifdef CONFIG_PPC32
377 	pr_info("Kernel virtual memory layout:\n");
378 	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
379 #ifdef CONFIG_HIGHMEM
380 	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
381 		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
382 #endif /* CONFIG_HIGHMEM */
383 #ifdef CONFIG_NOT_COHERENT_CACHE
384 	pr_info("  * 0x%08lx..0x%08lx  : consistent mem\n",
385 		IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
386 #endif /* CONFIG_NOT_COHERENT_CACHE */
387 	pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
388 		ioremap_bot, IOREMAP_TOP);
389 	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
390 		VMALLOC_START, VMALLOC_END);
391 #endif /* CONFIG_PPC32 */
392 
393 	mem_init_done = 1;
394 }
395 
396 void free_initmem(void)
397 {
398 	unsigned long addr;
399 
400 	ppc_md.progress = ppc_printk_progress;
401 
402 	addr = (unsigned long)__init_begin;
403 	for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
404 		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
405 		ClearPageReserved(virt_to_page(addr));
406 		init_page_count(virt_to_page(addr));
407 		free_page(addr);
408 		totalram_pages++;
409 	}
410 	pr_info("Freeing unused kernel memory: %luk freed\n",
411 		((unsigned long)__init_end -
412 		(unsigned long)__init_begin) >> 10);
413 }
414 
415 #ifdef CONFIG_BLK_DEV_INITRD
416 void __init free_initrd_mem(unsigned long start, unsigned long end)
417 {
418 	if (start >= end)
419 		return;
420 
421 	start = _ALIGN_DOWN(start, PAGE_SIZE);
422 	end = _ALIGN_UP(end, PAGE_SIZE);
423 	pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
424 
425 	for (; start < end; start += PAGE_SIZE) {
426 		ClearPageReserved(virt_to_page(start));
427 		init_page_count(virt_to_page(start));
428 		free_page(start);
429 		totalram_pages++;
430 	}
431 }
432 #endif
433 
434 /*
435  * This is called when a page has been modified by the kernel.
436  * It just marks the page as not i-cache clean.  We do the i-cache
437  * flush later when the page is given to a user process, if necessary.
438  */
439 void flush_dcache_page(struct page *page)
440 {
441 	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
442 		return;
443 	/* avoid an atomic op if possible */
444 	if (test_bit(PG_arch_1, &page->flags))
445 		clear_bit(PG_arch_1, &page->flags);
446 }
447 EXPORT_SYMBOL(flush_dcache_page);
448 
449 void flush_dcache_icache_page(struct page *page)
450 {
451 #ifdef CONFIG_HUGETLB_PAGE
452 	if (PageCompound(page)) {
453 		flush_dcache_icache_hugepage(page);
454 		return;
455 	}
456 #endif
457 #ifdef CONFIG_BOOKE
458 	{
459 		void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
460 		__flush_dcache_icache(start);
461 		kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
462 	}
463 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
464 	/* On 8xx there is no need to kmap since highmem is not supported */
465 	__flush_dcache_icache(page_address(page));
466 #else
467 	__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
468 #endif
469 }
470 
471 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
472 {
473 	clear_page(page);
474 
475 	/*
476 	 * We shouldn't have to do this, but some versions of glibc
477 	 * require it (ld.so assumes zero filled pages are icache clean)
478 	 * - Anton
479 	 */
480 	flush_dcache_page(pg);
481 }
482 EXPORT_SYMBOL(clear_user_page);
483 
484 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
485 		    struct page *pg)
486 {
487 	copy_page(vto, vfrom);
488 
489 	/*
490 	 * We should be able to use the following optimisation, however
491 	 * there are two problems.
492 	 * Firstly a bug in some versions of binutils meant PLT sections
493 	 * were not marked executable.
494 	 * Secondly the first word in the GOT section is blrl, used
495 	 * to establish the GOT address. Until recently the GOT was
496 	 * not marked executable.
497 	 * - Anton
498 	 */
499 #if 0
500 	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
501 		return;
502 #endif
503 
504 	flush_dcache_page(pg);
505 }
506 
507 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
508 			     unsigned long addr, int len)
509 {
510 	unsigned long maddr;
511 
512 	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
513 	flush_icache_range(maddr, maddr + len);
514 	kunmap(page);
515 }
516 EXPORT_SYMBOL(flush_icache_user_range);
517 
518 /*
519  * This is called at the end of handling a user page fault, when the
520  * fault has been handled by updating a PTE in the linux page tables.
521  * We use it to preload an HPTE into the hash table corresponding to
522  * the updated linux PTE.
523  *
524  * This must always be called with the pte lock held.
525  */
526 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
527 		      pte_t *ptep)
528 {
529 #ifdef CONFIG_PPC_STD_MMU
530 	unsigned long access = 0, trap;
531 
532 	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
533 	if (!pte_young(*ptep) || address >= TASK_SIZE)
534 		return;
535 
536 	/* We try to figure out if we are coming from an instruction
537 	 * access fault and pass that down to __hash_page so we avoid
538 	 * double-faulting on execution of fresh text. We have to test
539 	 * for regs NULL since init will get here first thing at boot
540 	 *
541 	 * We also avoid filling the hash if not coming from a fault
542 	 */
543 	if (current->thread.regs == NULL)
544 		return;
545 	trap = TRAP(current->thread.regs);
546 	if (trap == 0x400)
547 		access |= _PAGE_EXEC;
548 	else if (trap != 0x300)
549 		return;
550 	hash_preload(vma->vm_mm, address, access, trap);
551 #endif /* CONFIG_PPC_STD_MMU */
552 #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
553 	&& defined(CONFIG_HUGETLB_PAGE)
554 	if (is_vm_hugetlb_page(vma))
555 		book3e_hugetlb_preload(vma->vm_mm, address, *ptep);
556 #endif
557 }
558