1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/mm_types.h> 8 #include <linux/memblock.h> 9 #include <misc/cxl-base.h> 10 11 #include <asm/pgalloc.h> 12 #include <asm/tlb.h> 13 #include <asm/trace.h> 14 #include <asm/powernv.h> 15 16 #include <mm/mmu_decl.h> 17 #include <trace/events/thp.h> 18 19 unsigned long __pmd_frag_nr; 20 EXPORT_SYMBOL(__pmd_frag_nr); 21 unsigned long __pmd_frag_size_shift; 22 EXPORT_SYMBOL(__pmd_frag_size_shift); 23 24 int (*register_process_table)(unsigned long base, unsigned long page_size, 25 unsigned long tbl_size); 26 27 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 28 /* 29 * This is called when relaxing access to a hugepage. It's also called in the page 30 * fault path when we don't hit any of the major fault cases, ie, a minor 31 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have 32 * handled those two for us, we additionally deal with missing execute 33 * permission here on some processors 34 */ 35 int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, 36 pmd_t *pmdp, pmd_t entry, int dirty) 37 { 38 int changed; 39 #ifdef CONFIG_DEBUG_VM 40 WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); 41 assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp)); 42 #endif 43 changed = !pmd_same(*(pmdp), entry); 44 if (changed) { 45 /* 46 * We can use MMU_PAGE_2M here, because only radix 47 * path look at the psize. 48 */ 49 __ptep_set_access_flags(vma, pmdp_ptep(pmdp), 50 pmd_pte(entry), address, MMU_PAGE_2M); 51 } 52 return changed; 53 } 54 55 int pmdp_test_and_clear_young(struct vm_area_struct *vma, 56 unsigned long address, pmd_t *pmdp) 57 { 58 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); 59 } 60 /* 61 * set a new huge pmd. We should not be called for updating 62 * an existing pmd entry. That should go via pmd_hugepage_update. 63 */ 64 void set_pmd_at(struct mm_struct *mm, unsigned long addr, 65 pmd_t *pmdp, pmd_t pmd) 66 { 67 #ifdef CONFIG_DEBUG_VM 68 /* 69 * Make sure hardware valid bit is not set. We don't do 70 * tlb flush for this update. 71 */ 72 73 WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp))); 74 assert_spin_locked(pmd_lockptr(mm, pmdp)); 75 WARN_ON(!(pmd_large(pmd))); 76 #endif 77 trace_hugepage_set_pmd(addr, pmd_val(pmd)); 78 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); 79 } 80 81 static void do_nothing(void *unused) 82 { 83 84 } 85 /* 86 * Serialize against find_current_mm_pte which does lock-less 87 * lookup in page tables with local interrupts disabled. For huge pages 88 * it casts pmd_t to pte_t. Since format of pte_t is different from 89 * pmd_t we want to prevent transit from pmd pointing to page table 90 * to pmd pointing to huge page (and back) while interrupts are disabled. 91 * We clear pmd to possibly replace it with page table pointer in 92 * different code paths. So make sure we wait for the parallel 93 * find_current_mm_pte to finish. 94 */ 95 void serialize_against_pte_lookup(struct mm_struct *mm) 96 { 97 smp_mb(); 98 smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1); 99 } 100 101 /* 102 * We use this to invalidate a pmdp entry before switching from a 103 * hugepte to regular pmd entry. 104 */ 105 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 106 pmd_t *pmdp) 107 { 108 unsigned long old_pmd; 109 110 old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID); 111 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 112 /* 113 * This ensures that generic code that rely on IRQ disabling 114 * to prevent a parallel THP split work as expected. 115 * 116 * Marking the entry with _PAGE_INVALID && ~_PAGE_PRESENT requires 117 * a special case check in pmd_access_permitted. 118 */ 119 serialize_against_pte_lookup(vma->vm_mm); 120 return __pmd(old_pmd); 121 } 122 123 static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) 124 { 125 return __pmd(pmd_val(pmd) | pgprot_val(pgprot)); 126 } 127 128 pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) 129 { 130 unsigned long pmdv; 131 132 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK; 133 return pmd_set_protbits(__pmd(pmdv), pgprot); 134 } 135 136 pmd_t mk_pmd(struct page *page, pgprot_t pgprot) 137 { 138 return pfn_pmd(page_to_pfn(page), pgprot); 139 } 140 141 pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 142 { 143 unsigned long pmdv; 144 145 pmdv = pmd_val(pmd); 146 pmdv &= _HPAGE_CHG_MASK; 147 return pmd_set_protbits(__pmd(pmdv), newprot); 148 } 149 150 /* 151 * This is called at the end of handling a user page fault, when the 152 * fault has been handled by updating a HUGE PMD entry in the linux page tables. 153 * We use it to preload an HPTE into the hash table corresponding to 154 * the updated linux HUGE PMD entry. 155 */ 156 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, 157 pmd_t *pmd) 158 { 159 if (radix_enabled()) 160 prefetch((void *)addr); 161 } 162 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 163 164 /* For use by kexec */ 165 void mmu_cleanup_all(void) 166 { 167 if (radix_enabled()) 168 radix__mmu_cleanup_all(); 169 else if (mmu_hash_ops.hpte_clear_all) 170 mmu_hash_ops.hpte_clear_all(); 171 } 172 173 #ifdef CONFIG_MEMORY_HOTPLUG 174 int __meminit create_section_mapping(unsigned long start, unsigned long end, int nid) 175 { 176 if (radix_enabled()) 177 return radix__create_section_mapping(start, end, nid); 178 179 return hash__create_section_mapping(start, end, nid); 180 } 181 182 int __meminit remove_section_mapping(unsigned long start, unsigned long end) 183 { 184 if (radix_enabled()) 185 return radix__remove_section_mapping(start, end); 186 187 return hash__remove_section_mapping(start, end); 188 } 189 #endif /* CONFIG_MEMORY_HOTPLUG */ 190 191 void __init mmu_partition_table_init(void) 192 { 193 unsigned long patb_size = 1UL << PATB_SIZE_SHIFT; 194 unsigned long ptcr; 195 196 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large."); 197 /* Initialize the Partition Table with no entries */ 198 partition_tb = memblock_alloc(patb_size, patb_size); 199 if (!partition_tb) 200 panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 201 __func__, patb_size, patb_size); 202 203 /* 204 * update partition table control register, 205 * 64 K size. 206 */ 207 ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12); 208 mtspr(SPRN_PTCR, ptcr); 209 powernv_set_nmmu_ptcr(ptcr); 210 } 211 212 void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0, 213 unsigned long dw1) 214 { 215 unsigned long old = be64_to_cpu(partition_tb[lpid].patb0); 216 217 partition_tb[lpid].patb0 = cpu_to_be64(dw0); 218 partition_tb[lpid].patb1 = cpu_to_be64(dw1); 219 220 /* 221 * Global flush of TLBs and partition table caches for this lpid. 222 * The type of flush (hash or radix) depends on what the previous 223 * use of this partition ID was, not the new use. 224 */ 225 asm volatile("ptesync" : : : "memory"); 226 if (old & PATB_HR) { 227 asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : : 228 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); 229 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : 230 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); 231 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1); 232 } else { 233 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : 234 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); 235 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0); 236 } 237 /* do we need fixup here ?*/ 238 asm volatile("eieio; tlbsync; ptesync" : : : "memory"); 239 } 240 EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry); 241 242 static pmd_t *get_pmd_from_cache(struct mm_struct *mm) 243 { 244 void *pmd_frag, *ret; 245 246 if (PMD_FRAG_NR == 1) 247 return NULL; 248 249 spin_lock(&mm->page_table_lock); 250 ret = mm->context.pmd_frag; 251 if (ret) { 252 pmd_frag = ret + PMD_FRAG_SIZE; 253 /* 254 * If we have taken up all the fragments mark PTE page NULL 255 */ 256 if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0) 257 pmd_frag = NULL; 258 mm->context.pmd_frag = pmd_frag; 259 } 260 spin_unlock(&mm->page_table_lock); 261 return (pmd_t *)ret; 262 } 263 264 static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm) 265 { 266 void *ret = NULL; 267 struct page *page; 268 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO; 269 270 if (mm == &init_mm) 271 gfp &= ~__GFP_ACCOUNT; 272 page = alloc_page(gfp); 273 if (!page) 274 return NULL; 275 if (!pgtable_pmd_page_ctor(page)) { 276 __free_pages(page, 0); 277 return NULL; 278 } 279 280 atomic_set(&page->pt_frag_refcount, 1); 281 282 ret = page_address(page); 283 /* 284 * if we support only one fragment just return the 285 * allocated page. 286 */ 287 if (PMD_FRAG_NR == 1) 288 return ret; 289 290 spin_lock(&mm->page_table_lock); 291 /* 292 * If we find pgtable_page set, we return 293 * the allocated page with single fragement 294 * count. 295 */ 296 if (likely(!mm->context.pmd_frag)) { 297 atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR); 298 mm->context.pmd_frag = ret + PMD_FRAG_SIZE; 299 } 300 spin_unlock(&mm->page_table_lock); 301 302 return (pmd_t *)ret; 303 } 304 305 pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr) 306 { 307 pmd_t *pmd; 308 309 pmd = get_pmd_from_cache(mm); 310 if (pmd) 311 return pmd; 312 313 return __alloc_for_pmdcache(mm); 314 } 315 316 void pmd_fragment_free(unsigned long *pmd) 317 { 318 struct page *page = virt_to_page(pmd); 319 320 BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0); 321 if (atomic_dec_and_test(&page->pt_frag_refcount)) { 322 pgtable_pmd_page_dtor(page); 323 __free_page(page); 324 } 325 } 326 327 static inline void pgtable_free(void *table, int index) 328 { 329 switch (index) { 330 case PTE_INDEX: 331 pte_fragment_free(table, 0); 332 break; 333 case PMD_INDEX: 334 pmd_fragment_free(table); 335 break; 336 case PUD_INDEX: 337 kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table); 338 break; 339 #if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE) 340 /* 16M hugepd directory at pud level */ 341 case HTLB_16M_INDEX: 342 BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0); 343 kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table); 344 break; 345 /* 16G hugepd directory at the pgd level */ 346 case HTLB_16G_INDEX: 347 BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0); 348 kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table); 349 break; 350 #endif 351 /* We don't free pgd table via RCU callback */ 352 default: 353 BUG(); 354 } 355 } 356 357 #ifdef CONFIG_SMP 358 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index) 359 { 360 unsigned long pgf = (unsigned long)table; 361 362 BUG_ON(index > MAX_PGTABLE_INDEX_SIZE); 363 pgf |= index; 364 tlb_remove_table(tlb, (void *)pgf); 365 } 366 367 void __tlb_remove_table(void *_table) 368 { 369 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); 370 unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; 371 372 return pgtable_free(table, index); 373 } 374 #else 375 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index) 376 { 377 return pgtable_free(table, index); 378 } 379 #endif 380 381 #ifdef CONFIG_PROC_FS 382 atomic_long_t direct_pages_count[MMU_PAGE_COUNT]; 383 384 void arch_report_meminfo(struct seq_file *m) 385 { 386 /* 387 * Hash maps the memory with one size mmu_linear_psize. 388 * So don't bother to print these on hash 389 */ 390 if (!radix_enabled()) 391 return; 392 seq_printf(m, "DirectMap4k: %8lu kB\n", 393 atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2); 394 seq_printf(m, "DirectMap64k: %8lu kB\n", 395 atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6); 396 seq_printf(m, "DirectMap2M: %8lu kB\n", 397 atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11); 398 seq_printf(m, "DirectMap1G: %8lu kB\n", 399 atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20); 400 } 401 #endif /* CONFIG_PROC_FS */ 402 403 pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, 404 pte_t *ptep) 405 { 406 unsigned long pte_val; 407 408 /* 409 * Clear the _PAGE_PRESENT so that no hardware parallel update is 410 * possible. Also keep the pte_present true so that we don't take 411 * wrong fault. 412 */ 413 pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0); 414 415 return __pte(pte_val); 416 417 } 418 419 void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, 420 pte_t *ptep, pte_t old_pte, pte_t pte) 421 { 422 if (radix_enabled()) 423 return radix__ptep_modify_prot_commit(vma, addr, 424 ptep, old_pte, pte); 425 set_pte_at(vma->vm_mm, addr, ptep, pte); 426 } 427 428 /* 429 * For hash translation mode, we use the deposited table to store hash slot 430 * information and they are stored at PTRS_PER_PMD offset from related pmd 431 * location. Hence a pmd move requires deposit and withdraw. 432 * 433 * For radix translation with split pmd ptl, we store the deposited table in the 434 * pmd page. Hence if we have different pmd page we need to withdraw during pmd 435 * move. 436 * 437 * With hash we use deposited table always irrespective of anon or not. 438 * With radix we use deposited table only for anonymous mapping. 439 */ 440 int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, 441 struct spinlock *old_pmd_ptl, 442 struct vm_area_struct *vma) 443 { 444 if (radix_enabled()) 445 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); 446 447 return true; 448 } 449 450 int ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot, int nid) 451 { 452 unsigned long i; 453 454 if (radix_enabled()) 455 return radix__ioremap_range(ea, pa, size, prot, nid); 456 457 for (i = 0; i < size; i += PAGE_SIZE) { 458 int err = map_kernel_page(ea + i, pa + i, prot); 459 if (err) { 460 if (slab_is_available()) 461 unmap_kernel_range(ea, size); 462 else 463 WARN_ON_ONCE(1); /* Should clean up */ 464 return err; 465 } 466 } 467 468 return 0; 469 } 470