xref: /linux/arch/powerpc/mm/mem.c (revision d39d0ed196aa1685bb24771e92f78633c66ac9cb)
1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
7  *    Copyright (C) 1996 Paul Mackerras
8  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9  *
10  *  Derived from "arch/i386/mm/init.c"
11  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
12  *
13  *  This program is free software; you can redistribute it and/or
14  *  modify it under the terms of the GNU General Public License
15  *  as published by the Free Software Foundation; either version
16  *  2 of the License, or (at your option) any later version.
17  *
18  */
19 
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/gfp.h>
26 #include <linux/types.h>
27 #include <linux/mm.h>
28 #include <linux/stddef.h>
29 #include <linux/init.h>
30 #include <linux/bootmem.h>
31 #include <linux/highmem.h>
32 #include <linux/initrd.h>
33 #include <linux/pagemap.h>
34 #include <linux/suspend.h>
35 #include <linux/memblock.h>
36 #include <linux/hugetlb.h>
37 
38 #include <asm/pgalloc.h>
39 #include <asm/prom.h>
40 #include <asm/io.h>
41 #include <asm/mmu_context.h>
42 #include <asm/pgtable.h>
43 #include <asm/mmu.h>
44 #include <asm/smp.h>
45 #include <asm/machdep.h>
46 #include <asm/btext.h>
47 #include <asm/tlb.h>
48 #include <asm/sections.h>
49 #include <asm/sparsemem.h>
50 #include <asm/vdso.h>
51 #include <asm/fixmap.h>
52 #include <asm/swiotlb.h>
53 
54 #include "mmu_decl.h"
55 
56 #ifndef CPU_FTR_COHERENT_ICACHE
57 #define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
58 #define CPU_FTR_NOEXECUTE	0
59 #endif
60 
61 int init_bootmem_done;
62 int mem_init_done;
63 phys_addr_t memory_limit;
64 
65 #ifdef CONFIG_HIGHMEM
66 pte_t *kmap_pte;
67 pgprot_t kmap_prot;
68 
69 EXPORT_SYMBOL(kmap_prot);
70 EXPORT_SYMBOL(kmap_pte);
71 
72 static inline pte_t *virt_to_kpte(unsigned long vaddr)
73 {
74 	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
75 			vaddr), vaddr), vaddr);
76 }
77 #endif
78 
79 int page_is_ram(unsigned long pfn)
80 {
81 #ifndef CONFIG_PPC64	/* XXX for now */
82 	return pfn < max_pfn;
83 #else
84 	unsigned long paddr = (pfn << PAGE_SHIFT);
85 	int i;
86 	for (i=0; i < memblock.memory.cnt; i++) {
87 		unsigned long base;
88 
89 		base = memblock.memory.region[i].base;
90 
91 		if ((paddr >= base) &&
92 			(paddr < (base + memblock.memory.region[i].size))) {
93 			return 1;
94 		}
95 	}
96 
97 	return 0;
98 #endif
99 }
100 
101 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
102 			      unsigned long size, pgprot_t vma_prot)
103 {
104 	if (ppc_md.phys_mem_access_prot)
105 		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
106 
107 	if (!page_is_ram(pfn))
108 		vma_prot = pgprot_noncached(vma_prot);
109 
110 	return vma_prot;
111 }
112 EXPORT_SYMBOL(phys_mem_access_prot);
113 
114 #ifdef CONFIG_MEMORY_HOTPLUG
115 
116 #ifdef CONFIG_NUMA
117 int memory_add_physaddr_to_nid(u64 start)
118 {
119 	return hot_add_scn_to_nid(start);
120 }
121 #endif
122 
123 int arch_add_memory(int nid, u64 start, u64 size)
124 {
125 	struct pglist_data *pgdata;
126 	struct zone *zone;
127 	unsigned long start_pfn = start >> PAGE_SHIFT;
128 	unsigned long nr_pages = size >> PAGE_SHIFT;
129 
130 	pgdata = NODE_DATA(nid);
131 
132 	start = (unsigned long)__va(start);
133 	create_section_mapping(start, start + size);
134 
135 	/* this should work for most non-highmem platforms */
136 	zone = pgdata->node_zones;
137 
138 	return __add_pages(nid, zone, start_pfn, nr_pages);
139 }
140 #endif /* CONFIG_MEMORY_HOTPLUG */
141 
142 /*
143  * walk_memory_resource() needs to make sure there is no holes in a given
144  * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
145  * Instead it maintains it in memblock.memory structures.  Walk through the
146  * memory regions, find holes and callback for contiguous regions.
147  */
148 int
149 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
150 		void *arg, int (*func)(unsigned long, unsigned long, void *))
151 {
152 	struct memblock_property res;
153 	unsigned long pfn, len;
154 	u64 end;
155 	int ret = -1;
156 
157 	res.base = (u64) start_pfn << PAGE_SHIFT;
158 	res.size = (u64) nr_pages << PAGE_SHIFT;
159 
160 	end = res.base + res.size - 1;
161 	while ((res.base < end) && (memblock_find(&res) >= 0)) {
162 		pfn = (unsigned long)(res.base >> PAGE_SHIFT);
163 		len = (unsigned long)(res.size >> PAGE_SHIFT);
164 		ret = (*func)(pfn, len, arg);
165 		if (ret)
166 			break;
167 		res.base += (res.size + 1);
168 		res.size = (end - res.base + 1);
169 	}
170 	return ret;
171 }
172 EXPORT_SYMBOL_GPL(walk_system_ram_range);
173 
174 /*
175  * Initialize the bootmem system and give it all the memory we
176  * have available.  If we are using highmem, we only put the
177  * lowmem into the bootmem system.
178  */
179 #ifndef CONFIG_NEED_MULTIPLE_NODES
180 void __init do_init_bootmem(void)
181 {
182 	unsigned long i;
183 	unsigned long start, bootmap_pages;
184 	unsigned long total_pages;
185 	int boot_mapsize;
186 
187 	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
188 	total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
189 #ifdef CONFIG_HIGHMEM
190 	total_pages = total_lowmem >> PAGE_SHIFT;
191 	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
192 #endif
193 
194 	/*
195 	 * Find an area to use for the bootmem bitmap.  Calculate the size of
196 	 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
197 	 * Add 1 additional page in case the address isn't page-aligned.
198 	 */
199 	bootmap_pages = bootmem_bootmap_pages(total_pages);
200 
201 	start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
202 
203 	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
204 	boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
205 
206 	/* Add active regions with valid PFNs */
207 	for (i = 0; i < memblock.memory.cnt; i++) {
208 		unsigned long start_pfn, end_pfn;
209 		start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
210 		end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
211 		add_active_range(0, start_pfn, end_pfn);
212 	}
213 
214 	/* Add all physical memory to the bootmem map, mark each area
215 	 * present.
216 	 */
217 #ifdef CONFIG_HIGHMEM
218 	free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
219 
220 	/* reserve the sections we're already using */
221 	for (i = 0; i < memblock.reserved.cnt; i++) {
222 		unsigned long addr = memblock.reserved.region[i].base +
223 				     memblock_size_bytes(&memblock.reserved, i) - 1;
224 		if (addr < lowmem_end_addr)
225 			reserve_bootmem(memblock.reserved.region[i].base,
226 					memblock_size_bytes(&memblock.reserved, i),
227 					BOOTMEM_DEFAULT);
228 		else if (memblock.reserved.region[i].base < lowmem_end_addr) {
229 			unsigned long adjusted_size = lowmem_end_addr -
230 				      memblock.reserved.region[i].base;
231 			reserve_bootmem(memblock.reserved.region[i].base,
232 					adjusted_size, BOOTMEM_DEFAULT);
233 		}
234 	}
235 #else
236 	free_bootmem_with_active_regions(0, max_pfn);
237 
238 	/* reserve the sections we're already using */
239 	for (i = 0; i < memblock.reserved.cnt; i++)
240 		reserve_bootmem(memblock.reserved.region[i].base,
241 				memblock_size_bytes(&memblock.reserved, i),
242 				BOOTMEM_DEFAULT);
243 
244 #endif
245 	/* XXX need to clip this if using highmem? */
246 	sparse_memory_present_with_active_regions(0);
247 
248 	init_bootmem_done = 1;
249 }
250 
251 /* mark pages that don't exist as nosave */
252 static int __init mark_nonram_nosave(void)
253 {
254 	unsigned long memblock_next_region_start_pfn,
255 		      memblock_region_max_pfn;
256 	int i;
257 
258 	for (i = 0; i < memblock.memory.cnt - 1; i++) {
259 		memblock_region_max_pfn =
260 			(memblock.memory.region[i].base >> PAGE_SHIFT) +
261 			(memblock.memory.region[i].size >> PAGE_SHIFT);
262 		memblock_next_region_start_pfn =
263 			memblock.memory.region[i+1].base >> PAGE_SHIFT;
264 
265 		if (memblock_region_max_pfn < memblock_next_region_start_pfn)
266 			register_nosave_region(memblock_region_max_pfn,
267 					       memblock_next_region_start_pfn);
268 	}
269 
270 	return 0;
271 }
272 
273 /*
274  * paging_init() sets up the page tables - in fact we've already done this.
275  */
276 void __init paging_init(void)
277 {
278 	unsigned long total_ram = memblock_phys_mem_size();
279 	phys_addr_t top_of_ram = memblock_end_of_DRAM();
280 	unsigned long max_zone_pfns[MAX_NR_ZONES];
281 
282 #ifdef CONFIG_PPC32
283 	unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
284 	unsigned long end = __fix_to_virt(FIX_HOLE);
285 
286 	for (; v < end; v += PAGE_SIZE)
287 		map_page(v, 0, 0); /* XXX gross */
288 #endif
289 
290 #ifdef CONFIG_HIGHMEM
291 	map_page(PKMAP_BASE, 0, 0);	/* XXX gross */
292 	pkmap_page_table = virt_to_kpte(PKMAP_BASE);
293 
294 	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
295 	kmap_prot = PAGE_KERNEL;
296 #endif /* CONFIG_HIGHMEM */
297 
298 	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%lx\n",
299 	       (unsigned long long)top_of_ram, total_ram);
300 	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
301 	       (long int)((top_of_ram - total_ram) >> 20));
302 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
303 #ifdef CONFIG_HIGHMEM
304 	max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT;
305 	max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
306 #else
307 	max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
308 #endif
309 	free_area_init_nodes(max_zone_pfns);
310 
311 	mark_nonram_nosave();
312 }
313 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
314 
315 void __init mem_init(void)
316 {
317 #ifdef CONFIG_NEED_MULTIPLE_NODES
318 	int nid;
319 #endif
320 	pg_data_t *pgdat;
321 	unsigned long i;
322 	struct page *page;
323 	unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
324 
325 #ifdef CONFIG_SWIOTLB
326 	if (ppc_swiotlb_enable)
327 		swiotlb_init(1);
328 #endif
329 
330 	num_physpages = memblock.memory.size >> PAGE_SHIFT;
331 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
332 
333 #ifdef CONFIG_NEED_MULTIPLE_NODES
334         for_each_online_node(nid) {
335 		if (NODE_DATA(nid)->node_spanned_pages != 0) {
336 			printk("freeing bootmem node %d\n", nid);
337 			totalram_pages +=
338 				free_all_bootmem_node(NODE_DATA(nid));
339 		}
340 	}
341 #else
342 	max_mapnr = max_pfn;
343 	totalram_pages += free_all_bootmem();
344 #endif
345 	for_each_online_pgdat(pgdat) {
346 		for (i = 0; i < pgdat->node_spanned_pages; i++) {
347 			if (!pfn_valid(pgdat->node_start_pfn + i))
348 				continue;
349 			page = pgdat_page_nr(pgdat, i);
350 			if (PageReserved(page))
351 				reservedpages++;
352 		}
353 	}
354 
355 	codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
356 	datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
357 	initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
358 	bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
359 
360 #ifdef CONFIG_HIGHMEM
361 	{
362 		unsigned long pfn, highmem_mapnr;
363 
364 		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
365 		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
366 			struct page *page = pfn_to_page(pfn);
367 			if (memblock_is_reserved(pfn << PAGE_SHIFT))
368 				continue;
369 			ClearPageReserved(page);
370 			init_page_count(page);
371 			__free_page(page);
372 			totalhigh_pages++;
373 			reservedpages--;
374 		}
375 		totalram_pages += totalhigh_pages;
376 		printk(KERN_DEBUG "High memory: %luk\n",
377 		       totalhigh_pages << (PAGE_SHIFT-10));
378 	}
379 #endif /* CONFIG_HIGHMEM */
380 
381 	printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
382 	       "%luk reserved, %luk data, %luk bss, %luk init)\n",
383 		nr_free_pages() << (PAGE_SHIFT-10),
384 		num_physpages << (PAGE_SHIFT-10),
385 		codesize >> 10,
386 		reservedpages << (PAGE_SHIFT-10),
387 		datasize >> 10,
388 		bsssize >> 10,
389 		initsize >> 10);
390 
391 #ifdef CONFIG_PPC32
392 	pr_info("Kernel virtual memory layout:\n");
393 	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
394 #ifdef CONFIG_HIGHMEM
395 	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
396 		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
397 #endif /* CONFIG_HIGHMEM */
398 #ifdef CONFIG_NOT_COHERENT_CACHE
399 	pr_info("  * 0x%08lx..0x%08lx  : consistent mem\n",
400 		IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
401 #endif /* CONFIG_NOT_COHERENT_CACHE */
402 	pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
403 		ioremap_bot, IOREMAP_TOP);
404 	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
405 		VMALLOC_START, VMALLOC_END);
406 #endif /* CONFIG_PPC32 */
407 
408 	mem_init_done = 1;
409 }
410 
411 /*
412  * This is called when a page has been modified by the kernel.
413  * It just marks the page as not i-cache clean.  We do the i-cache
414  * flush later when the page is given to a user process, if necessary.
415  */
416 void flush_dcache_page(struct page *page)
417 {
418 	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
419 		return;
420 	/* avoid an atomic op if possible */
421 	if (test_bit(PG_arch_1, &page->flags))
422 		clear_bit(PG_arch_1, &page->flags);
423 }
424 EXPORT_SYMBOL(flush_dcache_page);
425 
426 void flush_dcache_icache_page(struct page *page)
427 {
428 #ifdef CONFIG_HUGETLB_PAGE
429 	if (PageCompound(page)) {
430 		flush_dcache_icache_hugepage(page);
431 		return;
432 	}
433 #endif
434 #ifdef CONFIG_BOOKE
435 	{
436 		void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
437 		__flush_dcache_icache(start);
438 		kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
439 	}
440 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
441 	/* On 8xx there is no need to kmap since highmem is not supported */
442 	__flush_dcache_icache(page_address(page));
443 #else
444 	__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
445 #endif
446 }
447 
448 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
449 {
450 	clear_page(page);
451 
452 	/*
453 	 * We shouldnt have to do this, but some versions of glibc
454 	 * require it (ld.so assumes zero filled pages are icache clean)
455 	 * - Anton
456 	 */
457 	flush_dcache_page(pg);
458 }
459 EXPORT_SYMBOL(clear_user_page);
460 
461 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
462 		    struct page *pg)
463 {
464 	copy_page(vto, vfrom);
465 
466 	/*
467 	 * We should be able to use the following optimisation, however
468 	 * there are two problems.
469 	 * Firstly a bug in some versions of binutils meant PLT sections
470 	 * were not marked executable.
471 	 * Secondly the first word in the GOT section is blrl, used
472 	 * to establish the GOT address. Until recently the GOT was
473 	 * not marked executable.
474 	 * - Anton
475 	 */
476 #if 0
477 	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
478 		return;
479 #endif
480 
481 	flush_dcache_page(pg);
482 }
483 
484 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
485 			     unsigned long addr, int len)
486 {
487 	unsigned long maddr;
488 
489 	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
490 	flush_icache_range(maddr, maddr + len);
491 	kunmap(page);
492 }
493 EXPORT_SYMBOL(flush_icache_user_range);
494 
495 /*
496  * This is called at the end of handling a user page fault, when the
497  * fault has been handled by updating a PTE in the linux page tables.
498  * We use it to preload an HPTE into the hash table corresponding to
499  * the updated linux PTE.
500  *
501  * This must always be called with the pte lock held.
502  */
503 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
504 		      pte_t *ptep)
505 {
506 #ifdef CONFIG_PPC_STD_MMU
507 	unsigned long access = 0, trap;
508 
509 	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
510 	if (!pte_young(*ptep) || address >= TASK_SIZE)
511 		return;
512 
513 	/* We try to figure out if we are coming from an instruction
514 	 * access fault and pass that down to __hash_page so we avoid
515 	 * double-faulting on execution of fresh text. We have to test
516 	 * for regs NULL since init will get here first thing at boot
517 	 *
518 	 * We also avoid filling the hash if not coming from a fault
519 	 */
520 	if (current->thread.regs == NULL)
521 		return;
522 	trap = TRAP(current->thread.regs);
523 	if (trap == 0x400)
524 		access |= _PAGE_EXEC;
525 	else if (trap != 0x300)
526 		return;
527 	hash_preload(vma->vm_mm, address, access, trap);
528 #endif /* CONFIG_PPC_STD_MMU */
529 }
530