1 /* 2 * PowerPC version 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * 5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 6 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 7 * Copyright (C) 1996 Paul Mackerras 8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com) 9 * 10 * Derived from "arch/i386/mm/init.c" 11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License 15 * as published by the Free Software Foundation; either version 16 * 2 of the License, or (at your option) any later version. 17 * 18 */ 19 20 #include <linux/export.h> 21 #include <linux/sched.h> 22 #include <linux/kernel.h> 23 #include <linux/errno.h> 24 #include <linux/string.h> 25 #include <linux/gfp.h> 26 #include <linux/types.h> 27 #include <linux/mm.h> 28 #include <linux/stddef.h> 29 #include <linux/init.h> 30 #include <linux/bootmem.h> 31 #include <linux/highmem.h> 32 #include <linux/initrd.h> 33 #include <linux/pagemap.h> 34 #include <linux/suspend.h> 35 #include <linux/memblock.h> 36 #include <linux/hugetlb.h> 37 #include <linux/slab.h> 38 39 #include <asm/pgalloc.h> 40 #include <asm/prom.h> 41 #include <asm/io.h> 42 #include <asm/mmu_context.h> 43 #include <asm/pgtable.h> 44 #include <asm/mmu.h> 45 #include <asm/smp.h> 46 #include <asm/machdep.h> 47 #include <asm/btext.h> 48 #include <asm/tlb.h> 49 #include <asm/sections.h> 50 #include <asm/sparsemem.h> 51 #include <asm/vdso.h> 52 #include <asm/fixmap.h> 53 #include <asm/swiotlb.h> 54 55 #include "mmu_decl.h" 56 57 #ifndef CPU_FTR_COHERENT_ICACHE 58 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */ 59 #define CPU_FTR_NOEXECUTE 0 60 #endif 61 62 int init_bootmem_done; 63 int mem_init_done; 64 phys_addr_t memory_limit; 65 66 #ifdef CONFIG_HIGHMEM 67 pte_t *kmap_pte; 68 pgprot_t kmap_prot; 69 70 EXPORT_SYMBOL(kmap_prot); 71 EXPORT_SYMBOL(kmap_pte); 72 73 static inline pte_t *virt_to_kpte(unsigned long vaddr) 74 { 75 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), 76 vaddr), vaddr), vaddr); 77 } 78 #endif 79 80 int page_is_ram(unsigned long pfn) 81 { 82 #ifndef CONFIG_PPC64 /* XXX for now */ 83 return pfn < max_pfn; 84 #else 85 unsigned long paddr = (pfn << PAGE_SHIFT); 86 struct memblock_region *reg; 87 88 for_each_memblock(memory, reg) 89 if (paddr >= reg->base && paddr < (reg->base + reg->size)) 90 return 1; 91 return 0; 92 #endif 93 } 94 95 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 96 unsigned long size, pgprot_t vma_prot) 97 { 98 if (ppc_md.phys_mem_access_prot) 99 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot); 100 101 if (!page_is_ram(pfn)) 102 vma_prot = pgprot_noncached(vma_prot); 103 104 return vma_prot; 105 } 106 EXPORT_SYMBOL(phys_mem_access_prot); 107 108 #ifdef CONFIG_MEMORY_HOTPLUG 109 110 #ifdef CONFIG_NUMA 111 int memory_add_physaddr_to_nid(u64 start) 112 { 113 return hot_add_scn_to_nid(start); 114 } 115 #endif 116 117 int arch_add_memory(int nid, u64 start, u64 size) 118 { 119 struct pglist_data *pgdata; 120 struct zone *zone; 121 unsigned long start_pfn = start >> PAGE_SHIFT; 122 unsigned long nr_pages = size >> PAGE_SHIFT; 123 124 pgdata = NODE_DATA(nid); 125 126 start = (unsigned long)__va(start); 127 if (create_section_mapping(start, start + size)) 128 return -EINVAL; 129 130 /* this should work for most non-highmem platforms */ 131 zone = pgdata->node_zones; 132 133 return __add_pages(nid, zone, start_pfn, nr_pages); 134 } 135 #endif /* CONFIG_MEMORY_HOTPLUG */ 136 137 /* 138 * walk_memory_resource() needs to make sure there is no holes in a given 139 * memory range. PPC64 does not maintain the memory layout in /proc/iomem. 140 * Instead it maintains it in memblock.memory structures. Walk through the 141 * memory regions, find holes and callback for contiguous regions. 142 */ 143 int 144 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, 145 void *arg, int (*func)(unsigned long, unsigned long, void *)) 146 { 147 struct memblock_region *reg; 148 unsigned long end_pfn = start_pfn + nr_pages; 149 unsigned long tstart, tend; 150 int ret = -1; 151 152 for_each_memblock(memory, reg) { 153 tstart = max(start_pfn, memblock_region_memory_base_pfn(reg)); 154 tend = min(end_pfn, memblock_region_memory_end_pfn(reg)); 155 if (tstart >= tend) 156 continue; 157 ret = (*func)(tstart, tend - tstart, arg); 158 if (ret) 159 break; 160 } 161 return ret; 162 } 163 EXPORT_SYMBOL_GPL(walk_system_ram_range); 164 165 /* 166 * Initialize the bootmem system and give it all the memory we 167 * have available. If we are using highmem, we only put the 168 * lowmem into the bootmem system. 169 */ 170 #ifndef CONFIG_NEED_MULTIPLE_NODES 171 void __init do_init_bootmem(void) 172 { 173 unsigned long start, bootmap_pages; 174 unsigned long total_pages; 175 struct memblock_region *reg; 176 int boot_mapsize; 177 178 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 179 total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT; 180 #ifdef CONFIG_HIGHMEM 181 total_pages = total_lowmem >> PAGE_SHIFT; 182 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; 183 #endif 184 185 /* 186 * Find an area to use for the bootmem bitmap. Calculate the size of 187 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE. 188 * Add 1 additional page in case the address isn't page-aligned. 189 */ 190 bootmap_pages = bootmem_bootmap_pages(total_pages); 191 192 start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); 193 194 min_low_pfn = MEMORY_START >> PAGE_SHIFT; 195 boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); 196 197 /* Add active regions with valid PFNs */ 198 for_each_memblock(memory, reg) { 199 unsigned long start_pfn, end_pfn; 200 start_pfn = memblock_region_memory_base_pfn(reg); 201 end_pfn = memblock_region_memory_end_pfn(reg); 202 add_active_range(0, start_pfn, end_pfn); 203 } 204 205 /* Add all physical memory to the bootmem map, mark each area 206 * present. 207 */ 208 #ifdef CONFIG_HIGHMEM 209 free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); 210 211 /* reserve the sections we're already using */ 212 for_each_memblock(reserved, reg) { 213 unsigned long top = reg->base + reg->size - 1; 214 if (top < lowmem_end_addr) 215 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); 216 else if (reg->base < lowmem_end_addr) { 217 unsigned long trunc_size = lowmem_end_addr - reg->base; 218 reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT); 219 } 220 } 221 #else 222 free_bootmem_with_active_regions(0, max_pfn); 223 224 /* reserve the sections we're already using */ 225 for_each_memblock(reserved, reg) 226 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); 227 #endif 228 /* XXX need to clip this if using highmem? */ 229 sparse_memory_present_with_active_regions(0); 230 231 init_bootmem_done = 1; 232 } 233 234 /* mark pages that don't exist as nosave */ 235 static int __init mark_nonram_nosave(void) 236 { 237 struct memblock_region *reg, *prev = NULL; 238 239 for_each_memblock(memory, reg) { 240 if (prev && 241 memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) 242 register_nosave_region(memblock_region_memory_end_pfn(prev), 243 memblock_region_memory_base_pfn(reg)); 244 prev = reg; 245 } 246 return 0; 247 } 248 249 /* 250 * paging_init() sets up the page tables - in fact we've already done this. 251 */ 252 void __init paging_init(void) 253 { 254 unsigned long long total_ram = memblock_phys_mem_size(); 255 phys_addr_t top_of_ram = memblock_end_of_DRAM(); 256 unsigned long max_zone_pfns[MAX_NR_ZONES]; 257 258 #ifdef CONFIG_PPC32 259 unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1); 260 unsigned long end = __fix_to_virt(FIX_HOLE); 261 262 for (; v < end; v += PAGE_SIZE) 263 map_page(v, 0, 0); /* XXX gross */ 264 #endif 265 266 #ifdef CONFIG_HIGHMEM 267 map_page(PKMAP_BASE, 0, 0); /* XXX gross */ 268 pkmap_page_table = virt_to_kpte(PKMAP_BASE); 269 270 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); 271 kmap_prot = PAGE_KERNEL; 272 #endif /* CONFIG_HIGHMEM */ 273 274 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n", 275 (unsigned long long)top_of_ram, total_ram); 276 printk(KERN_DEBUG "Memory hole size: %ldMB\n", 277 (long int)((top_of_ram - total_ram) >> 20)); 278 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 279 #ifdef CONFIG_HIGHMEM 280 max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT; 281 max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT; 282 #else 283 max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; 284 #endif 285 free_area_init_nodes(max_zone_pfns); 286 287 mark_nonram_nosave(); 288 } 289 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */ 290 291 void __init mem_init(void) 292 { 293 #ifdef CONFIG_NEED_MULTIPLE_NODES 294 int nid; 295 #endif 296 pg_data_t *pgdat; 297 unsigned long i; 298 struct page *page; 299 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; 300 301 #ifdef CONFIG_SWIOTLB 302 if (ppc_swiotlb_enable) 303 swiotlb_init(1); 304 #endif 305 306 num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT; 307 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 308 309 #ifdef CONFIG_NEED_MULTIPLE_NODES 310 for_each_online_node(nid) { 311 if (NODE_DATA(nid)->node_spanned_pages != 0) { 312 printk("freeing bootmem node %d\n", nid); 313 totalram_pages += 314 free_all_bootmem_node(NODE_DATA(nid)); 315 } 316 } 317 #else 318 max_mapnr = max_pfn; 319 totalram_pages += free_all_bootmem(); 320 #endif 321 for_each_online_pgdat(pgdat) { 322 for (i = 0; i < pgdat->node_spanned_pages; i++) { 323 if (!pfn_valid(pgdat->node_start_pfn + i)) 324 continue; 325 page = pgdat_page_nr(pgdat, i); 326 if (PageReserved(page)) 327 reservedpages++; 328 } 329 } 330 331 codesize = (unsigned long)&_sdata - (unsigned long)&_stext; 332 datasize = (unsigned long)&_edata - (unsigned long)&_sdata; 333 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; 334 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start; 335 336 #ifdef CONFIG_HIGHMEM 337 { 338 unsigned long pfn, highmem_mapnr; 339 340 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; 341 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { 342 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; 343 struct page *page = pfn_to_page(pfn); 344 if (memblock_is_reserved(paddr)) 345 continue; 346 ClearPageReserved(page); 347 init_page_count(page); 348 __free_page(page); 349 totalhigh_pages++; 350 reservedpages--; 351 } 352 totalram_pages += totalhigh_pages; 353 printk(KERN_DEBUG "High memory: %luk\n", 354 totalhigh_pages << (PAGE_SHIFT-10)); 355 } 356 #endif /* CONFIG_HIGHMEM */ 357 358 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP) 359 /* 360 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up 361 * functions.... do it here for the non-smp case. 362 */ 363 per_cpu(next_tlbcam_idx, smp_processor_id()) = 364 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; 365 #endif 366 367 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, " 368 "%luk reserved, %luk data, %luk bss, %luk init)\n", 369 nr_free_pages() << (PAGE_SHIFT-10), 370 num_physpages << (PAGE_SHIFT-10), 371 codesize >> 10, 372 reservedpages << (PAGE_SHIFT-10), 373 datasize >> 10, 374 bsssize >> 10, 375 initsize >> 10); 376 377 #ifdef CONFIG_PPC32 378 pr_info("Kernel virtual memory layout:\n"); 379 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); 380 #ifdef CONFIG_HIGHMEM 381 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", 382 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); 383 #endif /* CONFIG_HIGHMEM */ 384 #ifdef CONFIG_NOT_COHERENT_CACHE 385 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n", 386 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE); 387 #endif /* CONFIG_NOT_COHERENT_CACHE */ 388 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", 389 ioremap_bot, IOREMAP_TOP); 390 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", 391 VMALLOC_START, VMALLOC_END); 392 #endif /* CONFIG_PPC32 */ 393 394 mem_init_done = 1; 395 } 396 397 void free_initmem(void) 398 { 399 unsigned long addr; 400 401 ppc_md.progress = ppc_printk_progress; 402 403 addr = (unsigned long)__init_begin; 404 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) { 405 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); 406 ClearPageReserved(virt_to_page(addr)); 407 init_page_count(virt_to_page(addr)); 408 free_page(addr); 409 totalram_pages++; 410 } 411 pr_info("Freeing unused kernel memory: %luk freed\n", 412 ((unsigned long)__init_end - 413 (unsigned long)__init_begin) >> 10); 414 } 415 416 #ifdef CONFIG_BLK_DEV_INITRD 417 void __init free_initrd_mem(unsigned long start, unsigned long end) 418 { 419 if (start >= end) 420 return; 421 422 start = _ALIGN_DOWN(start, PAGE_SIZE); 423 end = _ALIGN_UP(end, PAGE_SIZE); 424 pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 425 426 for (; start < end; start += PAGE_SIZE) { 427 ClearPageReserved(virt_to_page(start)); 428 init_page_count(virt_to_page(start)); 429 free_page(start); 430 totalram_pages++; 431 } 432 } 433 #endif 434 435 /* 436 * This is called when a page has been modified by the kernel. 437 * It just marks the page as not i-cache clean. We do the i-cache 438 * flush later when the page is given to a user process, if necessary. 439 */ 440 void flush_dcache_page(struct page *page) 441 { 442 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 443 return; 444 /* avoid an atomic op if possible */ 445 if (test_bit(PG_arch_1, &page->flags)) 446 clear_bit(PG_arch_1, &page->flags); 447 } 448 EXPORT_SYMBOL(flush_dcache_page); 449 450 void flush_dcache_icache_page(struct page *page) 451 { 452 #ifdef CONFIG_HUGETLB_PAGE 453 if (PageCompound(page)) { 454 flush_dcache_icache_hugepage(page); 455 return; 456 } 457 #endif 458 #ifdef CONFIG_BOOKE 459 { 460 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE); 461 __flush_dcache_icache(start); 462 kunmap_atomic(start, KM_PPC_SYNC_ICACHE); 463 } 464 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64) 465 /* On 8xx there is no need to kmap since highmem is not supported */ 466 __flush_dcache_icache(page_address(page)); 467 #else 468 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); 469 #endif 470 } 471 472 void clear_user_page(void *page, unsigned long vaddr, struct page *pg) 473 { 474 clear_page(page); 475 476 /* 477 * We shouldn't have to do this, but some versions of glibc 478 * require it (ld.so assumes zero filled pages are icache clean) 479 * - Anton 480 */ 481 flush_dcache_page(pg); 482 } 483 EXPORT_SYMBOL(clear_user_page); 484 485 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 486 struct page *pg) 487 { 488 copy_page(vto, vfrom); 489 490 /* 491 * We should be able to use the following optimisation, however 492 * there are two problems. 493 * Firstly a bug in some versions of binutils meant PLT sections 494 * were not marked executable. 495 * Secondly the first word in the GOT section is blrl, used 496 * to establish the GOT address. Until recently the GOT was 497 * not marked executable. 498 * - Anton 499 */ 500 #if 0 501 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0)) 502 return; 503 #endif 504 505 flush_dcache_page(pg); 506 } 507 508 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 509 unsigned long addr, int len) 510 { 511 unsigned long maddr; 512 513 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK); 514 flush_icache_range(maddr, maddr + len); 515 kunmap(page); 516 } 517 EXPORT_SYMBOL(flush_icache_user_range); 518 519 /* 520 * This is called at the end of handling a user page fault, when the 521 * fault has been handled by updating a PTE in the linux page tables. 522 * We use it to preload an HPTE into the hash table corresponding to 523 * the updated linux PTE. 524 * 525 * This must always be called with the pte lock held. 526 */ 527 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 528 pte_t *ptep) 529 { 530 #ifdef CONFIG_PPC_STD_MMU 531 unsigned long access = 0, trap; 532 533 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ 534 if (!pte_young(*ptep) || address >= TASK_SIZE) 535 return; 536 537 /* We try to figure out if we are coming from an instruction 538 * access fault and pass that down to __hash_page so we avoid 539 * double-faulting on execution of fresh text. We have to test 540 * for regs NULL since init will get here first thing at boot 541 * 542 * We also avoid filling the hash if not coming from a fault 543 */ 544 if (current->thread.regs == NULL) 545 return; 546 trap = TRAP(current->thread.regs); 547 if (trap == 0x400) 548 access |= _PAGE_EXEC; 549 else if (trap != 0x300) 550 return; 551 hash_preload(vma->vm_mm, address, access, trap); 552 #endif /* CONFIG_PPC_STD_MMU */ 553 #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ 554 && defined(CONFIG_HUGETLB_PAGE) 555 if (is_vm_hugetlb_page(vma)) 556 book3e_hugetlb_preload(vma->vm_mm, address, *ptep); 557 #endif 558 } 559 560 /* 561 * System memory should not be in /proc/iomem but various tools expect it 562 * (eg kdump). 563 */ 564 static int add_system_ram_resources(void) 565 { 566 struct memblock_region *reg; 567 568 for_each_memblock(memory, reg) { 569 struct resource *res; 570 unsigned long base = reg->base; 571 unsigned long size = reg->size; 572 573 res = kzalloc(sizeof(struct resource), GFP_KERNEL); 574 WARN_ON(!res); 575 576 if (res) { 577 res->name = "System RAM"; 578 res->start = base; 579 res->end = base + size - 1; 580 res->flags = IORESOURCE_MEM; 581 WARN_ON(request_resource(&iomem_resource, res) < 0); 582 } 583 } 584 585 return 0; 586 } 587 subsys_initcall(add_system_ram_resources); 588