1 /* 2 * PowerPC version 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * 5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 6 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 7 * Copyright (C) 1996 Paul Mackerras 8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com) 9 * 10 * Derived from "arch/i386/mm/init.c" 11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License 15 * as published by the Free Software Foundation; either version 16 * 2 of the License, or (at your option) any later version. 17 * 18 */ 19 20 #include <linux/export.h> 21 #include <linux/sched.h> 22 #include <linux/kernel.h> 23 #include <linux/errno.h> 24 #include <linux/string.h> 25 #include <linux/gfp.h> 26 #include <linux/types.h> 27 #include <linux/mm.h> 28 #include <linux/stddef.h> 29 #include <linux/init.h> 30 #include <linux/memblock.h> 31 #include <linux/highmem.h> 32 #include <linux/initrd.h> 33 #include <linux/pagemap.h> 34 #include <linux/suspend.h> 35 #include <linux/hugetlb.h> 36 #include <linux/slab.h> 37 #include <linux/vmalloc.h> 38 #include <linux/memremap.h> 39 40 #include <asm/pgalloc.h> 41 #include <asm/prom.h> 42 #include <asm/io.h> 43 #include <asm/mmu_context.h> 44 #include <asm/pgtable.h> 45 #include <asm/mmu.h> 46 #include <asm/smp.h> 47 #include <asm/machdep.h> 48 #include <asm/btext.h> 49 #include <asm/tlb.h> 50 #include <asm/sections.h> 51 #include <asm/sparsemem.h> 52 #include <asm/vdso.h> 53 #include <asm/fixmap.h> 54 #include <asm/swiotlb.h> 55 #include <asm/rtas.h> 56 57 #include <mm/mmu_decl.h> 58 59 #ifndef CPU_FTR_COHERENT_ICACHE 60 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */ 61 #define CPU_FTR_NOEXECUTE 0 62 #endif 63 64 unsigned long long memory_limit; 65 bool init_mem_is_free; 66 67 #ifdef CONFIG_HIGHMEM 68 pte_t *kmap_pte; 69 EXPORT_SYMBOL(kmap_pte); 70 pgprot_t kmap_prot; 71 EXPORT_SYMBOL(kmap_prot); 72 73 static inline pte_t *virt_to_kpte(unsigned long vaddr) 74 { 75 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), 76 vaddr), vaddr), vaddr); 77 } 78 #endif 79 80 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 81 unsigned long size, pgprot_t vma_prot) 82 { 83 if (ppc_md.phys_mem_access_prot) 84 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot); 85 86 if (!page_is_ram(pfn)) 87 vma_prot = pgprot_noncached(vma_prot); 88 89 return vma_prot; 90 } 91 EXPORT_SYMBOL(phys_mem_access_prot); 92 93 #ifdef CONFIG_MEMORY_HOTPLUG 94 95 #ifdef CONFIG_NUMA 96 int memory_add_physaddr_to_nid(u64 start) 97 { 98 return hot_add_scn_to_nid(start); 99 } 100 #endif 101 102 int __weak create_section_mapping(unsigned long start, unsigned long end, int nid) 103 { 104 return -ENODEV; 105 } 106 107 int __weak remove_section_mapping(unsigned long start, unsigned long end) 108 { 109 return -ENODEV; 110 } 111 112 int __ref arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, 113 bool want_memblock) 114 { 115 unsigned long start_pfn = start >> PAGE_SHIFT; 116 unsigned long nr_pages = size >> PAGE_SHIFT; 117 int rc; 118 119 resize_hpt_for_hotplug(memblock_phys_mem_size()); 120 121 start = (unsigned long)__va(start); 122 rc = create_section_mapping(start, start + size, nid); 123 if (rc) { 124 pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n", 125 start, start + size, rc); 126 return -EFAULT; 127 } 128 flush_inval_dcache_range(start, start + size); 129 130 return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); 131 } 132 133 #ifdef CONFIG_MEMORY_HOTREMOVE 134 int __ref arch_remove_memory(int nid, u64 start, u64 size, 135 struct vmem_altmap *altmap) 136 { 137 unsigned long start_pfn = start >> PAGE_SHIFT; 138 unsigned long nr_pages = size >> PAGE_SHIFT; 139 struct page *page; 140 int ret; 141 142 /* 143 * If we have an altmap then we need to skip over any reserved PFNs 144 * when querying the zone. 145 */ 146 page = pfn_to_page(start_pfn); 147 if (altmap) 148 page += vmem_altmap_offset(altmap); 149 150 ret = __remove_pages(page_zone(page), start_pfn, nr_pages, altmap); 151 if (ret) 152 return ret; 153 154 /* Remove htab bolted mappings for this section of memory */ 155 start = (unsigned long)__va(start); 156 flush_inval_dcache_range(start, start + size); 157 ret = remove_section_mapping(start, start + size); 158 159 /* Ensure all vmalloc mappings are flushed in case they also 160 * hit that section of memory 161 */ 162 vm_unmap_aliases(); 163 164 if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC) 165 pr_warn("Hash collision while resizing HPT\n"); 166 167 return ret; 168 } 169 #endif 170 #endif /* CONFIG_MEMORY_HOTPLUG */ 171 172 #ifndef CONFIG_NEED_MULTIPLE_NODES 173 void __init mem_topology_setup(void) 174 { 175 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 176 min_low_pfn = MEMORY_START >> PAGE_SHIFT; 177 #ifdef CONFIG_HIGHMEM 178 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; 179 #endif 180 181 /* Place all memblock_regions in the same node and merge contiguous 182 * memblock_regions 183 */ 184 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); 185 } 186 187 void __init initmem_init(void) 188 { 189 /* XXX need to clip this if using highmem? */ 190 sparse_memory_present_with_active_regions(0); 191 sparse_init(); 192 } 193 194 /* mark pages that don't exist as nosave */ 195 static int __init mark_nonram_nosave(void) 196 { 197 struct memblock_region *reg, *prev = NULL; 198 199 for_each_memblock(memory, reg) { 200 if (prev && 201 memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) 202 register_nosave_region(memblock_region_memory_end_pfn(prev), 203 memblock_region_memory_base_pfn(reg)); 204 prev = reg; 205 } 206 return 0; 207 } 208 #else /* CONFIG_NEED_MULTIPLE_NODES */ 209 static int __init mark_nonram_nosave(void) 210 { 211 return 0; 212 } 213 #endif 214 215 /* 216 * Zones usage: 217 * 218 * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be 219 * everything else. GFP_DMA32 page allocations automatically fall back to 220 * ZONE_DMA. 221 * 222 * By using 31-bit unconditionally, we can exploit ARCH_ZONE_DMA_BITS to 223 * inform the generic DMA mapping code. 32-bit only devices (if not handled 224 * by an IOMMU anyway) will take a first dip into ZONE_NORMAL and get 225 * otherwise served by ZONE_DMA. 226 */ 227 static unsigned long max_zone_pfns[MAX_NR_ZONES]; 228 229 /* 230 * paging_init() sets up the page tables - in fact we've already done this. 231 */ 232 void __init paging_init(void) 233 { 234 unsigned long long total_ram = memblock_phys_mem_size(); 235 phys_addr_t top_of_ram = memblock_end_of_DRAM(); 236 237 #ifdef CONFIG_PPC32 238 unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1); 239 unsigned long end = __fix_to_virt(FIX_HOLE); 240 241 for (; v < end; v += PAGE_SIZE) 242 map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */ 243 #endif 244 245 #ifdef CONFIG_HIGHMEM 246 map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */ 247 pkmap_page_table = virt_to_kpte(PKMAP_BASE); 248 249 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); 250 kmap_prot = PAGE_KERNEL; 251 #endif /* CONFIG_HIGHMEM */ 252 253 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n", 254 (unsigned long long)top_of_ram, total_ram); 255 printk(KERN_DEBUG "Memory hole size: %ldMB\n", 256 (long int)((top_of_ram - total_ram) >> 20)); 257 258 #ifdef CONFIG_ZONE_DMA 259 max_zone_pfns[ZONE_DMA] = min(max_low_pfn, 0x7fffffffUL >> PAGE_SHIFT); 260 #endif 261 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 262 #ifdef CONFIG_HIGHMEM 263 max_zone_pfns[ZONE_HIGHMEM] = max_pfn; 264 #endif 265 266 free_area_init_nodes(max_zone_pfns); 267 268 mark_nonram_nosave(); 269 } 270 271 void __init mem_init(void) 272 { 273 /* 274 * book3s is limited to 16 page sizes due to encoding this in 275 * a 4-bit field for slices. 276 */ 277 BUILD_BUG_ON(MMU_PAGE_COUNT > 16); 278 279 #ifdef CONFIG_SWIOTLB 280 swiotlb_init(0); 281 #endif 282 283 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 284 set_max_mapnr(max_pfn); 285 memblock_free_all(); 286 287 #ifdef CONFIG_HIGHMEM 288 { 289 unsigned long pfn, highmem_mapnr; 290 291 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; 292 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { 293 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; 294 struct page *page = pfn_to_page(pfn); 295 if (!memblock_is_reserved(paddr)) 296 free_highmem_page(page); 297 } 298 } 299 #endif /* CONFIG_HIGHMEM */ 300 301 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP) 302 /* 303 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up 304 * functions.... do it here for the non-smp case. 305 */ 306 per_cpu(next_tlbcam_idx, smp_processor_id()) = 307 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; 308 #endif 309 310 mem_init_print_info(NULL); 311 #ifdef CONFIG_PPC32 312 pr_info("Kernel virtual memory layout:\n"); 313 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); 314 #ifdef CONFIG_HIGHMEM 315 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", 316 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); 317 #endif /* CONFIG_HIGHMEM */ 318 #ifdef CONFIG_NOT_COHERENT_CACHE 319 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n", 320 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE); 321 #endif /* CONFIG_NOT_COHERENT_CACHE */ 322 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", 323 ioremap_bot, IOREMAP_TOP); 324 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", 325 VMALLOC_START, VMALLOC_END); 326 #endif /* CONFIG_PPC32 */ 327 } 328 329 void free_initmem(void) 330 { 331 ppc_md.progress = ppc_printk_progress; 332 mark_initmem_nx(); 333 init_mem_is_free = true; 334 free_initmem_default(POISON_FREE_INITMEM); 335 } 336 337 #ifdef CONFIG_BLK_DEV_INITRD 338 void __init free_initrd_mem(unsigned long start, unsigned long end) 339 { 340 free_reserved_area((void *)start, (void *)end, -1, "initrd"); 341 } 342 #endif 343 344 /* 345 * This is called when a page has been modified by the kernel. 346 * It just marks the page as not i-cache clean. We do the i-cache 347 * flush later when the page is given to a user process, if necessary. 348 */ 349 void flush_dcache_page(struct page *page) 350 { 351 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 352 return; 353 /* avoid an atomic op if possible */ 354 if (test_bit(PG_arch_1, &page->flags)) 355 clear_bit(PG_arch_1, &page->flags); 356 } 357 EXPORT_SYMBOL(flush_dcache_page); 358 359 void flush_dcache_icache_page(struct page *page) 360 { 361 #ifdef CONFIG_HUGETLB_PAGE 362 if (PageCompound(page)) { 363 flush_dcache_icache_hugepage(page); 364 return; 365 } 366 #endif 367 #if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64) 368 /* On 8xx there is no need to kmap since highmem is not supported */ 369 __flush_dcache_icache(page_address(page)); 370 #else 371 if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) { 372 void *start = kmap_atomic(page); 373 __flush_dcache_icache(start); 374 kunmap_atomic(start); 375 } else { 376 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); 377 } 378 #endif 379 } 380 EXPORT_SYMBOL(flush_dcache_icache_page); 381 382 void clear_user_page(void *page, unsigned long vaddr, struct page *pg) 383 { 384 clear_page(page); 385 386 /* 387 * We shouldn't have to do this, but some versions of glibc 388 * require it (ld.so assumes zero filled pages are icache clean) 389 * - Anton 390 */ 391 flush_dcache_page(pg); 392 } 393 EXPORT_SYMBOL(clear_user_page); 394 395 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 396 struct page *pg) 397 { 398 copy_page(vto, vfrom); 399 400 /* 401 * We should be able to use the following optimisation, however 402 * there are two problems. 403 * Firstly a bug in some versions of binutils meant PLT sections 404 * were not marked executable. 405 * Secondly the first word in the GOT section is blrl, used 406 * to establish the GOT address. Until recently the GOT was 407 * not marked executable. 408 * - Anton 409 */ 410 #if 0 411 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0)) 412 return; 413 #endif 414 415 flush_dcache_page(pg); 416 } 417 418 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 419 unsigned long addr, int len) 420 { 421 unsigned long maddr; 422 423 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK); 424 flush_icache_range(maddr, maddr + len); 425 kunmap(page); 426 } 427 EXPORT_SYMBOL(flush_icache_user_range); 428 429 /* 430 * This is called at the end of handling a user page fault, when the 431 * fault has been handled by updating a PTE in the linux page tables. 432 * We use it to preload an HPTE into the hash table corresponding to 433 * the updated linux PTE. 434 * 435 * This must always be called with the pte lock held. 436 */ 437 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 438 pte_t *ptep) 439 { 440 #ifdef CONFIG_PPC_BOOK3S 441 /* 442 * We don't need to worry about _PAGE_PRESENT here because we are 443 * called with either mm->page_table_lock held or ptl lock held 444 */ 445 unsigned long trap; 446 bool is_exec; 447 448 if (radix_enabled()) { 449 prefetch((void *)address); 450 return; 451 } 452 453 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ 454 if (!pte_young(*ptep) || address >= TASK_SIZE) 455 return; 456 457 /* We try to figure out if we are coming from an instruction 458 * access fault and pass that down to __hash_page so we avoid 459 * double-faulting on execution of fresh text. We have to test 460 * for regs NULL since init will get here first thing at boot 461 * 462 * We also avoid filling the hash if not coming from a fault 463 */ 464 465 trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL; 466 switch (trap) { 467 case 0x300: 468 is_exec = false; 469 break; 470 case 0x400: 471 is_exec = true; 472 break; 473 default: 474 return; 475 } 476 477 hash_preload(vma->vm_mm, address, is_exec, trap); 478 #endif /* CONFIG_PPC_BOOK3S */ 479 #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ 480 && defined(CONFIG_HUGETLB_PAGE) 481 if (is_vm_hugetlb_page(vma)) 482 book3e_hugetlb_preload(vma, address, *ptep); 483 #endif 484 } 485 486 /* 487 * System memory should not be in /proc/iomem but various tools expect it 488 * (eg kdump). 489 */ 490 static int __init add_system_ram_resources(void) 491 { 492 struct memblock_region *reg; 493 494 for_each_memblock(memory, reg) { 495 struct resource *res; 496 unsigned long base = reg->base; 497 unsigned long size = reg->size; 498 499 res = kzalloc(sizeof(struct resource), GFP_KERNEL); 500 WARN_ON(!res); 501 502 if (res) { 503 res->name = "System RAM"; 504 res->start = base; 505 res->end = base + size - 1; 506 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 507 WARN_ON(request_resource(&iomem_resource, res) < 0); 508 } 509 } 510 511 return 0; 512 } 513 subsys_initcall(add_system_ram_resources); 514 515 #ifdef CONFIG_STRICT_DEVMEM 516 /* 517 * devmem_is_allowed(): check to see if /dev/mem access to a certain address 518 * is valid. The argument is a physical page number. 519 * 520 * Access has to be given to non-kernel-ram areas as well, these contain the 521 * PCI mmio resources as well as potential bios/acpi data regions. 522 */ 523 int devmem_is_allowed(unsigned long pfn) 524 { 525 if (page_is_rtas_user_buf(pfn)) 526 return 1; 527 if (iomem_is_exclusive(PFN_PHYS(pfn))) 528 return 0; 529 if (!page_is_ram(pfn)) 530 return 1; 531 return 0; 532 } 533 #endif /* CONFIG_STRICT_DEVMEM */ 534 535 /* 536 * This is defined in kernel/resource.c but only powerpc needs to export it, for 537 * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed. 538 */ 539 EXPORT_SYMBOL_GPL(walk_system_ram_range); 540