1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * arch/arm64/mm/hugetlbpage.c 4 * 5 * Copyright (C) 2013 Linaro Ltd. 6 * 7 * Based on arch/x86/mm/hugetlbpage.c. 8 */ 9 10 #include <linux/init.h> 11 #include <linux/fs.h> 12 #include <linux/mm.h> 13 #include <linux/hugetlb.h> 14 #include <linux/pagemap.h> 15 #include <linux/err.h> 16 #include <linux/sysctl.h> 17 #include <asm/mman.h> 18 #include <asm/tlb.h> 19 #include <asm/tlbflush.h> 20 21 /* 22 * HugeTLB Support Matrix 23 * 24 * --------------------------------------------------- 25 * | Page Size | CONT PTE | PMD | CONT PMD | PUD | 26 * --------------------------------------------------- 27 * | 4K | 64K | 2M | 32M | 1G | 28 * | 16K | 2M | 32M | 1G | | 29 * | 64K | 2M | 512M | 16G | | 30 * --------------------------------------------------- 31 */ 32 33 /* 34 * Reserve CMA areas for the largest supported gigantic 35 * huge page when requested. Any other smaller gigantic 36 * huge pages could still be served from those areas. 37 */ 38 #ifdef CONFIG_CMA 39 void __init arm64_hugetlb_cma_reserve(void) 40 { 41 int order; 42 43 if (pud_sect_supported()) 44 order = PUD_SHIFT - PAGE_SHIFT; 45 else 46 order = CONT_PMD_SHIFT - PAGE_SHIFT; 47 48 /* 49 * HugeTLB CMA reservation is required for gigantic 50 * huge pages which could not be allocated via the 51 * page allocator. Just warn if there is any change 52 * breaking this assumption. 53 */ 54 WARN_ON(order <= MAX_ORDER); 55 hugetlb_cma_reserve(order); 56 } 57 #endif /* CONFIG_CMA */ 58 59 static bool __hugetlb_valid_size(unsigned long size) 60 { 61 switch (size) { 62 #ifndef __PAGETABLE_PMD_FOLDED 63 case PUD_SIZE: 64 return pud_sect_supported(); 65 #endif 66 case CONT_PMD_SIZE: 67 case PMD_SIZE: 68 case CONT_PTE_SIZE: 69 return true; 70 } 71 72 return false; 73 } 74 75 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION 76 bool arch_hugetlb_migration_supported(struct hstate *h) 77 { 78 size_t pagesize = huge_page_size(h); 79 80 if (!__hugetlb_valid_size(pagesize)) { 81 pr_warn("%s: unrecognized huge page size 0x%lx\n", 82 __func__, pagesize); 83 return false; 84 } 85 return true; 86 } 87 #endif 88 89 int pmd_huge(pmd_t pmd) 90 { 91 return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); 92 } 93 94 int pud_huge(pud_t pud) 95 { 96 #ifndef __PAGETABLE_PMD_FOLDED 97 return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT); 98 #else 99 return 0; 100 #endif 101 } 102 103 static int find_num_contig(struct mm_struct *mm, unsigned long addr, 104 pte_t *ptep, size_t *pgsize) 105 { 106 pgd_t *pgdp = pgd_offset(mm, addr); 107 p4d_t *p4dp; 108 pud_t *pudp; 109 pmd_t *pmdp; 110 111 *pgsize = PAGE_SIZE; 112 p4dp = p4d_offset(pgdp, addr); 113 pudp = pud_offset(p4dp, addr); 114 pmdp = pmd_offset(pudp, addr); 115 if ((pte_t *)pmdp == ptep) { 116 *pgsize = PMD_SIZE; 117 return CONT_PMDS; 118 } 119 return CONT_PTES; 120 } 121 122 static inline int num_contig_ptes(unsigned long size, size_t *pgsize) 123 { 124 int contig_ptes = 0; 125 126 *pgsize = size; 127 128 switch (size) { 129 #ifndef __PAGETABLE_PMD_FOLDED 130 case PUD_SIZE: 131 if (pud_sect_supported()) 132 contig_ptes = 1; 133 break; 134 #endif 135 case PMD_SIZE: 136 contig_ptes = 1; 137 break; 138 case CONT_PMD_SIZE: 139 *pgsize = PMD_SIZE; 140 contig_ptes = CONT_PMDS; 141 break; 142 case CONT_PTE_SIZE: 143 *pgsize = PAGE_SIZE; 144 contig_ptes = CONT_PTES; 145 break; 146 } 147 148 return contig_ptes; 149 } 150 151 pte_t huge_ptep_get(pte_t *ptep) 152 { 153 int ncontig, i; 154 size_t pgsize; 155 pte_t orig_pte = ptep_get(ptep); 156 157 if (!pte_present(orig_pte) || !pte_cont(orig_pte)) 158 return orig_pte; 159 160 ncontig = num_contig_ptes(page_size(pte_page(orig_pte)), &pgsize); 161 for (i = 0; i < ncontig; i++, ptep++) { 162 pte_t pte = ptep_get(ptep); 163 164 if (pte_dirty(pte)) 165 orig_pte = pte_mkdirty(orig_pte); 166 167 if (pte_young(pte)) 168 orig_pte = pte_mkyoung(orig_pte); 169 } 170 return orig_pte; 171 } 172 173 /* 174 * Changing some bits of contiguous entries requires us to follow a 175 * Break-Before-Make approach, breaking the whole contiguous set 176 * before we can change any entries. See ARM DDI 0487A.k_iss10775, 177 * "Misprogramming of the Contiguous bit", page D4-1762. 178 * 179 * This helper performs the break step. 180 */ 181 static pte_t get_clear_contig(struct mm_struct *mm, 182 unsigned long addr, 183 pte_t *ptep, 184 unsigned long pgsize, 185 unsigned long ncontig) 186 { 187 pte_t orig_pte = ptep_get(ptep); 188 unsigned long i; 189 190 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) { 191 pte_t pte = ptep_get_and_clear(mm, addr, ptep); 192 193 /* 194 * If HW_AFDBM is enabled, then the HW could turn on 195 * the dirty or accessed bit for any page in the set, 196 * so check them all. 197 */ 198 if (pte_dirty(pte)) 199 orig_pte = pte_mkdirty(orig_pte); 200 201 if (pte_young(pte)) 202 orig_pte = pte_mkyoung(orig_pte); 203 } 204 return orig_pte; 205 } 206 207 static pte_t get_clear_contig_flush(struct mm_struct *mm, 208 unsigned long addr, 209 pte_t *ptep, 210 unsigned long pgsize, 211 unsigned long ncontig) 212 { 213 pte_t orig_pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig); 214 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); 215 216 flush_tlb_range(&vma, addr, addr + (pgsize * ncontig)); 217 return orig_pte; 218 } 219 220 /* 221 * Changing some bits of contiguous entries requires us to follow a 222 * Break-Before-Make approach, breaking the whole contiguous set 223 * before we can change any entries. See ARM DDI 0487A.k_iss10775, 224 * "Misprogramming of the Contiguous bit", page D4-1762. 225 * 226 * This helper performs the break step for use cases where the 227 * original pte is not needed. 228 */ 229 static void clear_flush(struct mm_struct *mm, 230 unsigned long addr, 231 pte_t *ptep, 232 unsigned long pgsize, 233 unsigned long ncontig) 234 { 235 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); 236 unsigned long i, saddr = addr; 237 238 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) 239 pte_clear(mm, addr, ptep); 240 241 flush_tlb_range(&vma, saddr, addr); 242 } 243 244 static inline struct folio *hugetlb_swap_entry_to_folio(swp_entry_t entry) 245 { 246 VM_BUG_ON(!is_migration_entry(entry) && !is_hwpoison_entry(entry)); 247 248 return page_folio(pfn_to_page(swp_offset(entry))); 249 } 250 251 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 252 pte_t *ptep, pte_t pte) 253 { 254 size_t pgsize; 255 int i; 256 int ncontig; 257 unsigned long pfn, dpfn; 258 pgprot_t hugeprot; 259 260 if (!pte_present(pte)) { 261 struct folio *folio; 262 263 folio = hugetlb_swap_entry_to_folio(pte_to_swp_entry(pte)); 264 ncontig = num_contig_ptes(folio_size(folio), &pgsize); 265 266 for (i = 0; i < ncontig; i++, ptep++) 267 set_pte_at(mm, addr, ptep, pte); 268 return; 269 } 270 271 if (!pte_cont(pte)) { 272 set_pte_at(mm, addr, ptep, pte); 273 return; 274 } 275 276 ncontig = find_num_contig(mm, addr, ptep, &pgsize); 277 pfn = pte_pfn(pte); 278 dpfn = pgsize >> PAGE_SHIFT; 279 hugeprot = pte_pgprot(pte); 280 281 clear_flush(mm, addr, ptep, pgsize, ncontig); 282 283 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) 284 set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); 285 } 286 287 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 288 unsigned long addr, unsigned long sz) 289 { 290 pgd_t *pgdp; 291 p4d_t *p4dp; 292 pud_t *pudp; 293 pmd_t *pmdp; 294 pte_t *ptep = NULL; 295 296 pgdp = pgd_offset(mm, addr); 297 p4dp = p4d_offset(pgdp, addr); 298 pudp = pud_alloc(mm, p4dp, addr); 299 if (!pudp) 300 return NULL; 301 302 if (sz == PUD_SIZE) { 303 ptep = (pte_t *)pudp; 304 } else if (sz == (CONT_PTE_SIZE)) { 305 pmdp = pmd_alloc(mm, pudp, addr); 306 if (!pmdp) 307 return NULL; 308 309 WARN_ON(addr & (sz - 1)); 310 /* 311 * Note that if this code were ever ported to the 312 * 32-bit arm platform then it will cause trouble in 313 * the case where CONFIG_HIGHPTE is set, since there 314 * will be no pte_unmap() to correspond with this 315 * pte_alloc_map(). 316 */ 317 ptep = pte_alloc_map(mm, pmdp, addr); 318 } else if (sz == PMD_SIZE) { 319 if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp))) 320 ptep = huge_pmd_share(mm, vma, addr, pudp); 321 else 322 ptep = (pte_t *)pmd_alloc(mm, pudp, addr); 323 } else if (sz == (CONT_PMD_SIZE)) { 324 pmdp = pmd_alloc(mm, pudp, addr); 325 WARN_ON(addr & (sz - 1)); 326 return (pte_t *)pmdp; 327 } 328 329 return ptep; 330 } 331 332 pte_t *huge_pte_offset(struct mm_struct *mm, 333 unsigned long addr, unsigned long sz) 334 { 335 pgd_t *pgdp; 336 p4d_t *p4dp; 337 pud_t *pudp, pud; 338 pmd_t *pmdp, pmd; 339 340 pgdp = pgd_offset(mm, addr); 341 if (!pgd_present(READ_ONCE(*pgdp))) 342 return NULL; 343 344 p4dp = p4d_offset(pgdp, addr); 345 if (!p4d_present(READ_ONCE(*p4dp))) 346 return NULL; 347 348 pudp = pud_offset(p4dp, addr); 349 pud = READ_ONCE(*pudp); 350 if (sz != PUD_SIZE && pud_none(pud)) 351 return NULL; 352 /* hugepage or swap? */ 353 if (pud_huge(pud) || !pud_present(pud)) 354 return (pte_t *)pudp; 355 /* table; check the next level */ 356 357 if (sz == CONT_PMD_SIZE) 358 addr &= CONT_PMD_MASK; 359 360 pmdp = pmd_offset(pudp, addr); 361 pmd = READ_ONCE(*pmdp); 362 if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) && 363 pmd_none(pmd)) 364 return NULL; 365 if (pmd_huge(pmd) || !pmd_present(pmd)) 366 return (pte_t *)pmdp; 367 368 if (sz == CONT_PTE_SIZE) 369 return pte_offset_kernel(pmdp, (addr & CONT_PTE_MASK)); 370 371 return NULL; 372 } 373 374 unsigned long hugetlb_mask_last_page(struct hstate *h) 375 { 376 unsigned long hp_size = huge_page_size(h); 377 378 switch (hp_size) { 379 #ifndef __PAGETABLE_PMD_FOLDED 380 case PUD_SIZE: 381 return PGDIR_SIZE - PUD_SIZE; 382 #endif 383 case CONT_PMD_SIZE: 384 return PUD_SIZE - CONT_PMD_SIZE; 385 case PMD_SIZE: 386 return PUD_SIZE - PMD_SIZE; 387 case CONT_PTE_SIZE: 388 return PMD_SIZE - CONT_PTE_SIZE; 389 default: 390 break; 391 } 392 393 return 0UL; 394 } 395 396 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags) 397 { 398 size_t pagesize = 1UL << shift; 399 400 entry = pte_mkhuge(entry); 401 if (pagesize == CONT_PTE_SIZE) { 402 entry = pte_mkcont(entry); 403 } else if (pagesize == CONT_PMD_SIZE) { 404 entry = pmd_pte(pmd_mkcont(pte_pmd(entry))); 405 } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) { 406 pr_warn("%s: unrecognized huge page size 0x%lx\n", 407 __func__, pagesize); 408 } 409 return entry; 410 } 411 412 void huge_pte_clear(struct mm_struct *mm, unsigned long addr, 413 pte_t *ptep, unsigned long sz) 414 { 415 int i, ncontig; 416 size_t pgsize; 417 418 ncontig = num_contig_ptes(sz, &pgsize); 419 420 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) 421 pte_clear(mm, addr, ptep); 422 } 423 424 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, 425 unsigned long addr, pte_t *ptep) 426 { 427 int ncontig; 428 size_t pgsize; 429 pte_t orig_pte = ptep_get(ptep); 430 431 if (!pte_cont(orig_pte)) 432 return ptep_get_and_clear(mm, addr, ptep); 433 434 ncontig = find_num_contig(mm, addr, ptep, &pgsize); 435 436 return get_clear_contig(mm, addr, ptep, pgsize, ncontig); 437 } 438 439 /* 440 * huge_ptep_set_access_flags will update access flags (dirty, accesssed) 441 * and write permission. 442 * 443 * For a contiguous huge pte range we need to check whether or not write 444 * permission has to change only on the first pte in the set. Then for 445 * all the contiguous ptes we need to check whether or not there is a 446 * discrepancy between dirty or young. 447 */ 448 static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig) 449 { 450 int i; 451 452 if (pte_write(pte) != pte_write(ptep_get(ptep))) 453 return 1; 454 455 for (i = 0; i < ncontig; i++) { 456 pte_t orig_pte = ptep_get(ptep + i); 457 458 if (pte_dirty(pte) != pte_dirty(orig_pte)) 459 return 1; 460 461 if (pte_young(pte) != pte_young(orig_pte)) 462 return 1; 463 } 464 465 return 0; 466 } 467 468 int huge_ptep_set_access_flags(struct vm_area_struct *vma, 469 unsigned long addr, pte_t *ptep, 470 pte_t pte, int dirty) 471 { 472 int ncontig, i; 473 size_t pgsize = 0; 474 unsigned long pfn = pte_pfn(pte), dpfn; 475 struct mm_struct *mm = vma->vm_mm; 476 pgprot_t hugeprot; 477 pte_t orig_pte; 478 479 if (!pte_cont(pte)) 480 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); 481 482 ncontig = find_num_contig(mm, addr, ptep, &pgsize); 483 dpfn = pgsize >> PAGE_SHIFT; 484 485 if (!__cont_access_flags_changed(ptep, pte, ncontig)) 486 return 0; 487 488 orig_pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig); 489 490 /* Make sure we don't lose the dirty or young state */ 491 if (pte_dirty(orig_pte)) 492 pte = pte_mkdirty(pte); 493 494 if (pte_young(orig_pte)) 495 pte = pte_mkyoung(pte); 496 497 hugeprot = pte_pgprot(pte); 498 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) 499 set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); 500 501 return 1; 502 } 503 504 void huge_ptep_set_wrprotect(struct mm_struct *mm, 505 unsigned long addr, pte_t *ptep) 506 { 507 unsigned long pfn, dpfn; 508 pgprot_t hugeprot; 509 int ncontig, i; 510 size_t pgsize; 511 pte_t pte; 512 513 if (!pte_cont(READ_ONCE(*ptep))) { 514 ptep_set_wrprotect(mm, addr, ptep); 515 return; 516 } 517 518 ncontig = find_num_contig(mm, addr, ptep, &pgsize); 519 dpfn = pgsize >> PAGE_SHIFT; 520 521 pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig); 522 pte = pte_wrprotect(pte); 523 524 hugeprot = pte_pgprot(pte); 525 pfn = pte_pfn(pte); 526 527 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) 528 set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); 529 } 530 531 pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, 532 unsigned long addr, pte_t *ptep) 533 { 534 struct mm_struct *mm = vma->vm_mm; 535 size_t pgsize; 536 int ncontig; 537 538 if (!pte_cont(READ_ONCE(*ptep))) 539 return ptep_clear_flush(vma, addr, ptep); 540 541 ncontig = find_num_contig(mm, addr, ptep, &pgsize); 542 return get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig); 543 } 544 545 static int __init hugetlbpage_init(void) 546 { 547 if (pud_sect_supported()) 548 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); 549 550 hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT); 551 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); 552 hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT); 553 554 return 0; 555 } 556 arch_initcall(hugetlbpage_init); 557 558 bool __init arch_hugetlb_valid_size(unsigned long size) 559 { 560 return __hugetlb_valid_size(size); 561 } 562