1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2005, Paul Mackerras, IBM Corporation. 4 * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation. 5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. 6 */ 7 8 #include <linux/sched.h> 9 #include <linux/mm_types.h> 10 #include <linux/mm.h> 11 #include <linux/page_table_check.h> 12 #include <linux/stop_machine.h> 13 14 #include <asm/sections.h> 15 #include <asm/mmu.h> 16 #include <asm/tlb.h> 17 #include <asm/firmware.h> 18 19 #include <mm/mmu_decl.h> 20 21 #include <trace/events/thp.h> 22 23 #if H_PGTABLE_RANGE > (USER_VSID_RANGE * (TASK_SIZE_USER64 / TASK_CONTEXT_SIZE)) 24 #warning Limited user VSID range means pagetable space is wasted 25 #endif 26 27 #ifdef CONFIG_SPARSEMEM_VMEMMAP 28 /* 29 * vmemmap is the starting address of the virtual address space where 30 * struct pages are allocated for all possible PFNs present on the system 31 * including holes and bad memory (hence sparse). These virtual struct 32 * pages are stored in sequence in this virtual address space irrespective 33 * of the fact whether the corresponding PFN is valid or not. This achieves 34 * constant relationship between address of struct page and its PFN. 35 * 36 * During boot or memory hotplug operation when a new memory section is 37 * added, physical memory allocation (including hash table bolting) will 38 * be performed for the set of struct pages which are part of the memory 39 * section. This saves memory by not allocating struct pages for PFNs 40 * which are not valid. 41 * 42 * ---------------------------------------------- 43 * | PHYSICAL ALLOCATION OF VIRTUAL STRUCT PAGES| 44 * ---------------------------------------------- 45 * 46 * f000000000000000 c000000000000000 47 * vmemmap +--------------+ +--------------+ 48 * + | page struct | +--------------> | page struct | 49 * | +--------------+ +--------------+ 50 * | | page struct | +--------------> | page struct | 51 * | +--------------+ | +--------------+ 52 * | | page struct | + +------> | page struct | 53 * | +--------------+ | +--------------+ 54 * | | page struct | | +--> | page struct | 55 * | +--------------+ | | +--------------+ 56 * | | page struct | | | 57 * | +--------------+ | | 58 * | | page struct | | | 59 * | +--------------+ | | 60 * | | page struct | | | 61 * | +--------------+ | | 62 * | | page struct | | | 63 * | +--------------+ | | 64 * | | page struct | +-------+ | 65 * | +--------------+ | 66 * | | page struct | +-----------+ 67 * | +--------------+ 68 * | | page struct | No mapping 69 * | +--------------+ 70 * | | page struct | No mapping 71 * v +--------------+ 72 * 73 * ----------------------------------------- 74 * | RELATION BETWEEN STRUCT PAGES AND PFNS| 75 * ----------------------------------------- 76 * 77 * vmemmap +--------------+ +---------------+ 78 * + | page struct | +-------------> | PFN | 79 * | +--------------+ +---------------+ 80 * | | page struct | +-------------> | PFN | 81 * | +--------------+ +---------------+ 82 * | | page struct | +-------------> | PFN | 83 * | +--------------+ +---------------+ 84 * | | page struct | +-------------> | PFN | 85 * | +--------------+ +---------------+ 86 * | | | 87 * | +--------------+ 88 * | | | 89 * | +--------------+ 90 * | | | 91 * | +--------------+ +---------------+ 92 * | | page struct | +-------------> | PFN | 93 * | +--------------+ +---------------+ 94 * | | | 95 * | +--------------+ 96 * | | | 97 * | +--------------+ +---------------+ 98 * | | page struct | +-------------> | PFN | 99 * | +--------------+ +---------------+ 100 * | | page struct | +-------------> | PFN | 101 * v +--------------+ +---------------+ 102 */ 103 /* 104 * On hash-based CPUs, the vmemmap is bolted in the hash table. 105 * 106 */ 107 int __meminit hash__vmemmap_create_mapping(unsigned long start, 108 unsigned long page_size, 109 unsigned long phys) 110 { 111 int rc; 112 113 if ((start + page_size) >= H_VMEMMAP_END) { 114 pr_warn("Outside the supported range\n"); 115 return -1; 116 } 117 118 rc = htab_bolt_mapping(start, start + page_size, phys, 119 pgprot_val(PAGE_KERNEL), 120 mmu_vmemmap_psize, mmu_kernel_ssize); 121 if (rc < 0) { 122 int rc2 = htab_remove_mapping(start, start + page_size, 123 mmu_vmemmap_psize, 124 mmu_kernel_ssize); 125 BUG_ON(rc2 && (rc2 != -ENOENT)); 126 } 127 return rc; 128 } 129 130 #ifdef CONFIG_MEMORY_HOTPLUG 131 void hash__vmemmap_remove_mapping(unsigned long start, 132 unsigned long page_size) 133 { 134 int rc = htab_remove_mapping(start, start + page_size, 135 mmu_vmemmap_psize, 136 mmu_kernel_ssize); 137 BUG_ON((rc < 0) && (rc != -ENOENT)); 138 WARN_ON(rc == -ENOENT); 139 } 140 #endif 141 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 142 143 /* 144 * map_kernel_page currently only called by __ioremap 145 * map_kernel_page adds an entry to the ioremap page table 146 * and adds an entry to the HPT, possibly bolting it 147 */ 148 int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) 149 { 150 pgd_t *pgdp; 151 p4d_t *p4dp; 152 pud_t *pudp; 153 pmd_t *pmdp; 154 pte_t *ptep; 155 156 BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE); 157 if (slab_is_available()) { 158 pgdp = pgd_offset_k(ea); 159 p4dp = p4d_offset(pgdp, ea); 160 pudp = pud_alloc(&init_mm, p4dp, ea); 161 if (!pudp) 162 return -ENOMEM; 163 pmdp = pmd_alloc(&init_mm, pudp, ea); 164 if (!pmdp) 165 return -ENOMEM; 166 ptep = pte_alloc_kernel(pmdp, ea); 167 if (!ptep) 168 return -ENOMEM; 169 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot)); 170 } else { 171 /* 172 * If the mm subsystem is not fully up, we cannot create a 173 * linux page table entry for this mapping. Simply bolt an 174 * entry in the hardware page table. 175 * 176 */ 177 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, pgprot_val(prot), 178 mmu_io_psize, mmu_kernel_ssize)) { 179 printk(KERN_ERR "Failed to do bolted mapping IO " 180 "memory at %016lx !\n", pa); 181 return -ENOMEM; 182 } 183 } 184 185 smp_wmb(); 186 return 0; 187 } 188 189 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 190 191 unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, 192 pmd_t *pmdp, unsigned long clr, 193 unsigned long set) 194 { 195 __be64 old_be, tmp; 196 unsigned long old; 197 198 #ifdef CONFIG_DEBUG_VM 199 WARN_ON(!hash__pmd_trans_huge(*pmdp)); 200 assert_spin_locked(pmd_lockptr(mm, pmdp)); 201 #endif 202 203 __asm__ __volatile__( 204 "1: ldarx %0,0,%3\n\ 205 and. %1,%0,%6\n\ 206 bne- 1b \n\ 207 andc %1,%0,%4 \n\ 208 or %1,%1,%7\n\ 209 stdcx. %1,0,%3 \n\ 210 bne- 1b" 211 : "=&r" (old_be), "=&r" (tmp), "=m" (*pmdp) 212 : "r" (pmdp), "r" (cpu_to_be64(clr)), "m" (*pmdp), 213 "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set)) 214 : "cc" ); 215 216 old = be64_to_cpu(old_be); 217 218 trace_hugepage_update_pmd(addr, old, clr, set); 219 if (old & H_PAGE_HASHPTE) 220 hpte_do_hugepage_flush(mm, addr, pmdp, old); 221 return old; 222 } 223 224 pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, 225 pmd_t *pmdp) 226 { 227 pmd_t pmd; 228 229 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 230 VM_BUG_ON(pmd_trans_huge(*pmdp)); 231 232 pmd = *pmdp; 233 pmd_clear(pmdp); 234 235 page_table_check_pmd_clear(vma->vm_mm, address, pmd); 236 237 /* 238 * Wait for all pending hash_page to finish. This is needed 239 * in case of subpage collapse. When we collapse normal pages 240 * to hugepage, we first clear the pmd, then invalidate all 241 * the PTE entries. The assumption here is that any low level 242 * page fault will see a none pmd and take the slow path that 243 * will wait on mmap_lock. But we could very well be in a 244 * hash_page with local ptep pointer value. Such a hash page 245 * can result in adding new HPTE entries for normal subpages. 246 * That means we could be modifying the page content as we 247 * copy them to a huge page. So wait for parallel hash_page 248 * to finish before invalidating HPTE entries. We can do this 249 * by sending an IPI to all the cpus and executing a dummy 250 * function there. 251 */ 252 serialize_against_pte_lookup(vma->vm_mm); 253 /* 254 * Now invalidate the hpte entries in the range 255 * covered by pmd. This make sure we take a 256 * fault and will find the pmd as none, which will 257 * result in a major fault which takes mmap_lock and 258 * hence wait for collapse to complete. Without this 259 * the __collapse_huge_page_copy can result in copying 260 * the old content. 261 */ 262 flush_hash_table_pmd_range(vma->vm_mm, &pmd, address); 263 return pmd; 264 } 265 266 /* 267 * We want to put the pgtable in pmd and use pgtable for tracking 268 * the base page size hptes 269 */ 270 void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 271 pgtable_t pgtable) 272 { 273 pgtable_t *pgtable_slot; 274 275 assert_spin_locked(pmd_lockptr(mm, pmdp)); 276 /* 277 * we store the pgtable in the second half of PMD 278 */ 279 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; 280 *pgtable_slot = pgtable; 281 /* 282 * expose the deposited pgtable to other cpus. 283 * before we set the hugepage PTE at pmd level 284 * hash fault code looks at the deposted pgtable 285 * to store hash index values. 286 */ 287 smp_wmb(); 288 } 289 290 pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) 291 { 292 pgtable_t pgtable; 293 pgtable_t *pgtable_slot; 294 295 assert_spin_locked(pmd_lockptr(mm, pmdp)); 296 297 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; 298 pgtable = *pgtable_slot; 299 /* 300 * Once we withdraw, mark the entry NULL. 301 */ 302 *pgtable_slot = NULL; 303 /* 304 * We store HPTE information in the deposited PTE fragment. 305 * zero out the content on withdraw. 306 */ 307 memset(pgtable, 0, PTE_FRAG_SIZE); 308 return pgtable; 309 } 310 311 /* 312 * A linux hugepage PMD was changed and the corresponding hash table entries 313 * neesd to be flushed. 314 */ 315 void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, 316 pmd_t *pmdp, unsigned long old_pmd) 317 { 318 int ssize; 319 unsigned int psize; 320 unsigned long vsid; 321 unsigned long flags = 0; 322 323 /* get the base page size,vsid and segment size */ 324 #ifdef CONFIG_DEBUG_VM 325 psize = get_slice_psize(mm, addr); 326 BUG_ON(psize == MMU_PAGE_16M); 327 #endif 328 if (old_pmd & H_PAGE_COMBO) 329 psize = MMU_PAGE_4K; 330 else 331 psize = MMU_PAGE_64K; 332 333 if (!is_kernel_addr(addr)) { 334 ssize = user_segment_size(addr); 335 vsid = get_user_vsid(&mm->context, addr, ssize); 336 WARN_ON(vsid == 0); 337 } else { 338 vsid = get_kernel_vsid(addr, mmu_kernel_ssize); 339 ssize = mmu_kernel_ssize; 340 } 341 342 if (mm_is_thread_local(mm)) 343 flags |= HPTE_LOCAL_UPDATE; 344 345 return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags); 346 } 347 348 pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, 349 unsigned long addr, pmd_t *pmdp) 350 { 351 pmd_t old_pmd; 352 pgtable_t pgtable; 353 unsigned long old; 354 pgtable_t *pgtable_slot; 355 356 old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); 357 old_pmd = __pmd(old); 358 /* 359 * We have pmd == none and we are holding page_table_lock. 360 * So we can safely go and clear the pgtable hash 361 * index info. 362 */ 363 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; 364 pgtable = *pgtable_slot; 365 /* 366 * Let's zero out old valid and hash index details 367 * hash fault look at them. 368 */ 369 memset(pgtable, 0, PTE_FRAG_SIZE); 370 return old_pmd; 371 } 372 373 int hash__has_transparent_hugepage(void) 374 { 375 376 if (!mmu_has_feature(MMU_FTR_16M_PAGE)) 377 return 0; 378 /* 379 * We support THP only if PMD_SIZE is 16MB. 380 */ 381 if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT) 382 return 0; 383 /* 384 * We need to make sure that we support 16MB hugepage in a segment 385 * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE 386 * of 64K. 387 */ 388 /* 389 * If we have 64K HPTE, we will be using that by default 390 */ 391 if (mmu_psize_defs[MMU_PAGE_64K].shift && 392 (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1)) 393 return 0; 394 /* 395 * Ok we only have 4K HPTE 396 */ 397 if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1) 398 return 0; 399 400 return 1; 401 } 402 EXPORT_SYMBOL_GPL(hash__has_transparent_hugepage); 403 404 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 405 406 #ifdef CONFIG_STRICT_KERNEL_RWX 407 408 struct change_memory_parms { 409 unsigned long start, end, newpp; 410 unsigned int step, nr_cpus; 411 atomic_t master_cpu; 412 atomic_t cpu_counter; 413 }; 414 415 // We'd rather this was on the stack but it has to be in the RMO 416 static struct change_memory_parms chmem_parms; 417 418 // And therefore we need a lock to protect it from concurrent use 419 static DEFINE_MUTEX(chmem_lock); 420 421 static void change_memory_range(unsigned long start, unsigned long end, 422 unsigned int step, unsigned long newpp) 423 { 424 unsigned long idx; 425 426 pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n", 427 start, end, newpp, step); 428 429 for (idx = start; idx < end; idx += step) 430 /* Not sure if we can do much with the return value */ 431 mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize, 432 mmu_kernel_ssize); 433 } 434 435 static int notrace chmem_secondary_loop(struct change_memory_parms *parms) 436 { 437 unsigned long msr, tmp, flags; 438 int *p; 439 440 p = &parms->cpu_counter.counter; 441 442 local_irq_save(flags); 443 hard_irq_disable(); 444 445 asm volatile ( 446 // Switch to real mode and leave interrupts off 447 "mfmsr %[msr] ;" 448 "li %[tmp], %[MSR_IR_DR] ;" 449 "andc %[tmp], %[msr], %[tmp] ;" 450 "mtmsrd %[tmp] ;" 451 452 // Tell the master we are in real mode 453 "1: " 454 "lwarx %[tmp], 0, %[p] ;" 455 "addic %[tmp], %[tmp], -1 ;" 456 "stwcx. %[tmp], 0, %[p] ;" 457 "bne- 1b ;" 458 459 // Spin until the counter goes to zero 460 "2: ;" 461 "lwz %[tmp], 0(%[p]) ;" 462 "cmpwi %[tmp], 0 ;" 463 "bne- 2b ;" 464 465 // Switch back to virtual mode 466 "mtmsrd %[msr] ;" 467 468 : // outputs 469 [msr] "=&r" (msr), [tmp] "=&b" (tmp), "+m" (*p) 470 : // inputs 471 [p] "b" (p), [MSR_IR_DR] "i" (MSR_IR | MSR_DR) 472 : // clobbers 473 "cc", "xer" 474 ); 475 476 local_irq_restore(flags); 477 478 return 0; 479 } 480 481 static int change_memory_range_fn(void *data) 482 { 483 struct change_memory_parms *parms = data; 484 485 // First CPU goes through, all others wait. 486 if (atomic_xchg(&parms->master_cpu, 1) == 1) 487 return chmem_secondary_loop(parms); 488 489 // Wait for all but one CPU (this one) to call-in 490 while (atomic_read(&parms->cpu_counter) > 1) 491 barrier(); 492 493 change_memory_range(parms->start, parms->end, parms->step, parms->newpp); 494 495 mb(); 496 497 // Signal the other CPUs that we're done 498 atomic_dec(&parms->cpu_counter); 499 500 return 0; 501 } 502 503 static bool hash__change_memory_range(unsigned long start, unsigned long end, 504 unsigned long newpp) 505 { 506 unsigned int step, shift; 507 508 shift = mmu_psize_defs[mmu_linear_psize].shift; 509 step = 1 << shift; 510 511 start = ALIGN_DOWN(start, step); 512 end = ALIGN(end, step); // aligns up 513 514 if (start >= end) 515 return false; 516 517 if (firmware_has_feature(FW_FEATURE_LPAR)) { 518 mutex_lock(&chmem_lock); 519 520 chmem_parms.start = start; 521 chmem_parms.end = end; 522 chmem_parms.step = step; 523 chmem_parms.newpp = newpp; 524 atomic_set(&chmem_parms.master_cpu, 0); 525 526 cpus_read_lock(); 527 528 atomic_set(&chmem_parms.cpu_counter, num_online_cpus()); 529 530 // Ensure state is consistent before we call the other CPUs 531 mb(); 532 533 stop_machine_cpuslocked(change_memory_range_fn, &chmem_parms, 534 cpu_online_mask); 535 536 cpus_read_unlock(); 537 mutex_unlock(&chmem_lock); 538 } else 539 change_memory_range(start, end, step, newpp); 540 541 return true; 542 } 543 544 void hash__mark_rodata_ro(void) 545 { 546 unsigned long start, end, pp; 547 548 start = (unsigned long)_stext; 549 end = (unsigned long)__end_rodata; 550 551 pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL_ROX), HPTE_USE_KERNEL_KEY); 552 553 WARN_ON(!hash__change_memory_range(start, end, pp)); 554 } 555 556 void hash__mark_initmem_nx(void) 557 { 558 unsigned long start, end, pp; 559 560 start = (unsigned long)__init_begin; 561 end = (unsigned long)__init_end; 562 563 pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL), HPTE_USE_KERNEL_KEY); 564 565 WARN_ON(!hash__change_memory_range(start, end, pp)); 566 } 567 #endif 568