1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * arch/arm64/mm/hugetlbpage.c
4 *
5 * Copyright (C) 2013 Linaro Ltd.
6 *
7 * Based on arch/x86/mm/hugetlbpage.c.
8 */
9
10 #include <linux/init.h>
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
17 #include <asm/mman.h>
18 #include <asm/tlb.h>
19 #include <asm/tlbflush.h>
20
21 /*
22 * HugeTLB Support Matrix
23 *
24 * ---------------------------------------------------
25 * | Page Size | CONT PTE | PMD | CONT PMD | PUD |
26 * ---------------------------------------------------
27 * | 4K | 64K | 2M | 32M | 1G |
28 * | 16K | 2M | 32M | 1G | |
29 * | 64K | 2M | 512M | 16G | |
30 * ---------------------------------------------------
31 */
32
33 /*
34 * Reserve CMA areas for the largest supported gigantic
35 * huge page when requested. Any other smaller gigantic
36 * huge pages could still be served from those areas.
37 */
38 #ifdef CONFIG_CMA
arm64_hugetlb_cma_reserve(void)39 void __init arm64_hugetlb_cma_reserve(void)
40 {
41 int order;
42
43 if (pud_sect_supported())
44 order = PUD_SHIFT - PAGE_SHIFT;
45 else
46 order = CONT_PMD_SHIFT - PAGE_SHIFT;
47
48 hugetlb_cma_reserve(order);
49 }
50 #endif /* CONFIG_CMA */
51
__hugetlb_valid_size(unsigned long size)52 static bool __hugetlb_valid_size(unsigned long size)
53 {
54 switch (size) {
55 #ifndef __PAGETABLE_PMD_FOLDED
56 case PUD_SIZE:
57 return pud_sect_supported();
58 #endif
59 case CONT_PMD_SIZE:
60 case PMD_SIZE:
61 case CONT_PTE_SIZE:
62 return true;
63 }
64
65 return false;
66 }
67
68 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
arch_hugetlb_migration_supported(struct hstate * h)69 bool arch_hugetlb_migration_supported(struct hstate *h)
70 {
71 size_t pagesize = huge_page_size(h);
72
73 if (!__hugetlb_valid_size(pagesize)) {
74 pr_warn("%s: unrecognized huge page size 0x%lx\n",
75 __func__, pagesize);
76 return false;
77 }
78 return true;
79 }
80 #endif
81
find_num_contig(struct mm_struct * mm,unsigned long addr,pte_t * ptep,size_t * pgsize)82 static int find_num_contig(struct mm_struct *mm, unsigned long addr,
83 pte_t *ptep, size_t *pgsize)
84 {
85 pgd_t *pgdp = pgd_offset(mm, addr);
86 p4d_t *p4dp;
87 pud_t *pudp;
88 pmd_t *pmdp;
89
90 *pgsize = PAGE_SIZE;
91 p4dp = p4d_offset(pgdp, addr);
92 pudp = pud_offset(p4dp, addr);
93 pmdp = pmd_offset(pudp, addr);
94 if ((pte_t *)pmdp == ptep) {
95 *pgsize = PMD_SIZE;
96 return CONT_PMDS;
97 }
98 return CONT_PTES;
99 }
100
num_contig_ptes(unsigned long size,size_t * pgsize)101 static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
102 {
103 int contig_ptes = 1;
104
105 *pgsize = size;
106
107 switch (size) {
108 case CONT_PMD_SIZE:
109 *pgsize = PMD_SIZE;
110 contig_ptes = CONT_PMDS;
111 break;
112 case CONT_PTE_SIZE:
113 *pgsize = PAGE_SIZE;
114 contig_ptes = CONT_PTES;
115 break;
116 default:
117 WARN_ON(!__hugetlb_valid_size(size));
118 }
119
120 return contig_ptes;
121 }
122
huge_ptep_get(struct mm_struct * mm,unsigned long addr,pte_t * ptep)123 pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
124 {
125 int ncontig, i;
126 size_t pgsize;
127 pte_t orig_pte = __ptep_get(ptep);
128
129 if (!pte_present(orig_pte) || !pte_cont(orig_pte))
130 return orig_pte;
131
132 ncontig = num_contig_ptes(page_size(pte_page(orig_pte)), &pgsize);
133 for (i = 0; i < ncontig; i++, ptep++) {
134 pte_t pte = __ptep_get(ptep);
135
136 if (pte_dirty(pte))
137 orig_pte = pte_mkdirty(orig_pte);
138
139 if (pte_young(pte))
140 orig_pte = pte_mkyoung(orig_pte);
141 }
142 return orig_pte;
143 }
144
145 /*
146 * Changing some bits of contiguous entries requires us to follow a
147 * Break-Before-Make approach, breaking the whole contiguous set
148 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
149 * "Misprogramming of the Contiguous bit", page D4-1762.
150 *
151 * This helper performs the break step.
152 */
get_clear_contig(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned long pgsize,unsigned long ncontig)153 static pte_t get_clear_contig(struct mm_struct *mm,
154 unsigned long addr,
155 pte_t *ptep,
156 unsigned long pgsize,
157 unsigned long ncontig)
158 {
159 pte_t pte, tmp_pte;
160 bool present;
161
162 pte = __ptep_get_and_clear(mm, addr, ptep);
163 present = pte_present(pte);
164 while (--ncontig) {
165 ptep++;
166 addr += pgsize;
167 tmp_pte = __ptep_get_and_clear(mm, addr, ptep);
168 if (present) {
169 if (pte_dirty(tmp_pte))
170 pte = pte_mkdirty(pte);
171 if (pte_young(tmp_pte))
172 pte = pte_mkyoung(pte);
173 }
174 }
175 return pte;
176 }
177
get_clear_contig_flush(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned long pgsize,unsigned long ncontig)178 static pte_t get_clear_contig_flush(struct mm_struct *mm,
179 unsigned long addr,
180 pte_t *ptep,
181 unsigned long pgsize,
182 unsigned long ncontig)
183 {
184 pte_t orig_pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig);
185 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
186
187 flush_tlb_range(&vma, addr, addr + (pgsize * ncontig));
188 return orig_pte;
189 }
190
191 /*
192 * Changing some bits of contiguous entries requires us to follow a
193 * Break-Before-Make approach, breaking the whole contiguous set
194 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
195 * "Misprogramming of the Contiguous bit", page D4-1762.
196 *
197 * This helper performs the break step for use cases where the
198 * original pte is not needed.
199 */
clear_flush(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned long pgsize,unsigned long ncontig)200 static void clear_flush(struct mm_struct *mm,
201 unsigned long addr,
202 pte_t *ptep,
203 unsigned long pgsize,
204 unsigned long ncontig)
205 {
206 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
207 unsigned long i, saddr = addr;
208
209 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
210 __ptep_get_and_clear(mm, addr, ptep);
211
212 flush_tlb_range(&vma, saddr, addr);
213 }
214
set_huge_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned long sz)215 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
216 pte_t *ptep, pte_t pte, unsigned long sz)
217 {
218 size_t pgsize;
219 int i;
220 int ncontig;
221 unsigned long pfn, dpfn;
222 pgprot_t hugeprot;
223
224 ncontig = num_contig_ptes(sz, &pgsize);
225
226 if (!pte_present(pte)) {
227 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize)
228 __set_ptes(mm, addr, ptep, pte, 1);
229 return;
230 }
231
232 if (!pte_cont(pte)) {
233 __set_ptes(mm, addr, ptep, pte, 1);
234 return;
235 }
236
237 pfn = pte_pfn(pte);
238 dpfn = pgsize >> PAGE_SHIFT;
239 hugeprot = pte_pgprot(pte);
240
241 clear_flush(mm, addr, ptep, pgsize, ncontig);
242
243 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
244 __set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
245 }
246
huge_pte_alloc(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,unsigned long sz)247 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
248 unsigned long addr, unsigned long sz)
249 {
250 pgd_t *pgdp;
251 p4d_t *p4dp;
252 pud_t *pudp;
253 pmd_t *pmdp;
254 pte_t *ptep = NULL;
255
256 pgdp = pgd_offset(mm, addr);
257 p4dp = p4d_alloc(mm, pgdp, addr);
258 if (!p4dp)
259 return NULL;
260
261 pudp = pud_alloc(mm, p4dp, addr);
262 if (!pudp)
263 return NULL;
264
265 if (sz == PUD_SIZE) {
266 ptep = (pte_t *)pudp;
267 } else if (sz == (CONT_PTE_SIZE)) {
268 pmdp = pmd_alloc(mm, pudp, addr);
269 if (!pmdp)
270 return NULL;
271
272 WARN_ON(addr & (sz - 1));
273 ptep = pte_alloc_huge(mm, pmdp, addr);
274 } else if (sz == PMD_SIZE) {
275 if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp)))
276 ptep = huge_pmd_share(mm, vma, addr, pudp);
277 else
278 ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
279 } else if (sz == (CONT_PMD_SIZE)) {
280 pmdp = pmd_alloc(mm, pudp, addr);
281 WARN_ON(addr & (sz - 1));
282 return (pte_t *)pmdp;
283 }
284
285 return ptep;
286 }
287
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)288 pte_t *huge_pte_offset(struct mm_struct *mm,
289 unsigned long addr, unsigned long sz)
290 {
291 pgd_t *pgdp;
292 p4d_t *p4dp;
293 pud_t *pudp, pud;
294 pmd_t *pmdp, pmd;
295
296 pgdp = pgd_offset(mm, addr);
297 if (!pgd_present(READ_ONCE(*pgdp)))
298 return NULL;
299
300 p4dp = p4d_offset(pgdp, addr);
301 if (!p4d_present(READ_ONCE(*p4dp)))
302 return NULL;
303
304 pudp = pud_offset(p4dp, addr);
305 pud = READ_ONCE(*pudp);
306 if (sz != PUD_SIZE && pud_none(pud))
307 return NULL;
308 /* hugepage or swap? */
309 if (pud_leaf(pud) || !pud_present(pud))
310 return (pte_t *)pudp;
311 /* table; check the next level */
312
313 if (sz == CONT_PMD_SIZE)
314 addr &= CONT_PMD_MASK;
315
316 pmdp = pmd_offset(pudp, addr);
317 pmd = READ_ONCE(*pmdp);
318 if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
319 pmd_none(pmd))
320 return NULL;
321 if (pmd_leaf(pmd) || !pmd_present(pmd))
322 return (pte_t *)pmdp;
323
324 if (sz == CONT_PTE_SIZE)
325 return pte_offset_huge(pmdp, (addr & CONT_PTE_MASK));
326
327 return NULL;
328 }
329
hugetlb_mask_last_page(struct hstate * h)330 unsigned long hugetlb_mask_last_page(struct hstate *h)
331 {
332 unsigned long hp_size = huge_page_size(h);
333
334 switch (hp_size) {
335 #ifndef __PAGETABLE_PMD_FOLDED
336 case PUD_SIZE:
337 return PGDIR_SIZE - PUD_SIZE;
338 #endif
339 case CONT_PMD_SIZE:
340 return PUD_SIZE - CONT_PMD_SIZE;
341 case PMD_SIZE:
342 return PUD_SIZE - PMD_SIZE;
343 case CONT_PTE_SIZE:
344 return PMD_SIZE - CONT_PTE_SIZE;
345 default:
346 break;
347 }
348
349 return 0UL;
350 }
351
arch_make_huge_pte(pte_t entry,unsigned int shift,vm_flags_t flags)352 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
353 {
354 size_t pagesize = 1UL << shift;
355
356 switch (pagesize) {
357 #ifndef __PAGETABLE_PMD_FOLDED
358 case PUD_SIZE:
359 entry = pud_pte(pud_mkhuge(pte_pud(entry)));
360 break;
361 #endif
362 case CONT_PMD_SIZE:
363 entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
364 fallthrough;
365 case PMD_SIZE:
366 entry = pmd_pte(pmd_mkhuge(pte_pmd(entry)));
367 break;
368 case CONT_PTE_SIZE:
369 entry = pte_mkcont(entry);
370 break;
371 default:
372 pr_warn("%s: unrecognized huge page size 0x%lx\n",
373 __func__, pagesize);
374 break;
375 }
376 return entry;
377 }
378
huge_pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned long sz)379 void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
380 pte_t *ptep, unsigned long sz)
381 {
382 int i, ncontig;
383 size_t pgsize;
384
385 ncontig = num_contig_ptes(sz, &pgsize);
386
387 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
388 __pte_clear(mm, addr, ptep);
389 }
390
huge_ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned long sz)391 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
392 pte_t *ptep, unsigned long sz)
393 {
394 int ncontig;
395 size_t pgsize;
396
397 ncontig = num_contig_ptes(sz, &pgsize);
398 return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
399 }
400
401 /*
402 * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
403 * and write permission.
404 *
405 * For a contiguous huge pte range we need to check whether or not write
406 * permission has to change only on the first pte in the set. Then for
407 * all the contiguous ptes we need to check whether or not there is a
408 * discrepancy between dirty or young.
409 */
__cont_access_flags_changed(pte_t * ptep,pte_t pte,int ncontig)410 static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
411 {
412 int i;
413
414 if (pte_write(pte) != pte_write(__ptep_get(ptep)))
415 return 1;
416
417 for (i = 0; i < ncontig; i++) {
418 pte_t orig_pte = __ptep_get(ptep + i);
419
420 if (pte_dirty(pte) != pte_dirty(orig_pte))
421 return 1;
422
423 if (pte_young(pte) != pte_young(orig_pte))
424 return 1;
425 }
426
427 return 0;
428 }
429
huge_ptep_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t pte,int dirty)430 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
431 unsigned long addr, pte_t *ptep,
432 pte_t pte, int dirty)
433 {
434 int ncontig, i;
435 size_t pgsize = 0;
436 unsigned long pfn = pte_pfn(pte), dpfn;
437 struct mm_struct *mm = vma->vm_mm;
438 pgprot_t hugeprot;
439 pte_t orig_pte;
440
441 if (!pte_cont(pte))
442 return __ptep_set_access_flags(vma, addr, ptep, pte, dirty);
443
444 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
445 dpfn = pgsize >> PAGE_SHIFT;
446
447 if (!__cont_access_flags_changed(ptep, pte, ncontig))
448 return 0;
449
450 orig_pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
451
452 /* Make sure we don't lose the dirty or young state */
453 if (pte_dirty(orig_pte))
454 pte = pte_mkdirty(pte);
455
456 if (pte_young(orig_pte))
457 pte = pte_mkyoung(pte);
458
459 hugeprot = pte_pgprot(pte);
460 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
461 __set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
462
463 return 1;
464 }
465
huge_ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)466 void huge_ptep_set_wrprotect(struct mm_struct *mm,
467 unsigned long addr, pte_t *ptep)
468 {
469 unsigned long pfn, dpfn;
470 pgprot_t hugeprot;
471 int ncontig, i;
472 size_t pgsize;
473 pte_t pte;
474
475 if (!pte_cont(__ptep_get(ptep))) {
476 __ptep_set_wrprotect(mm, addr, ptep);
477 return;
478 }
479
480 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
481 dpfn = pgsize >> PAGE_SHIFT;
482
483 pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
484 pte = pte_wrprotect(pte);
485
486 hugeprot = pte_pgprot(pte);
487 pfn = pte_pfn(pte);
488
489 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
490 __set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
491 }
492
huge_ptep_clear_flush(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)493 pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
494 unsigned long addr, pte_t *ptep)
495 {
496 struct mm_struct *mm = vma->vm_mm;
497 size_t pgsize;
498 int ncontig;
499
500 if (!pte_cont(__ptep_get(ptep)))
501 return ptep_clear_flush(vma, addr, ptep);
502
503 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
504 return get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
505 }
506
hugetlbpage_init(void)507 static int __init hugetlbpage_init(void)
508 {
509 /*
510 * HugeTLB pages are supported on maximum four page table
511 * levels (PUD, CONT PMD, PMD, CONT PTE) for a given base
512 * page size, corresponding to hugetlb_add_hstate() calls
513 * here.
514 *
515 * HUGE_MAX_HSTATE should at least match maximum supported
516 * HugeTLB page sizes on the platform. Any new addition to
517 * supported HugeTLB page sizes will also require changing
518 * HUGE_MAX_HSTATE as well.
519 */
520 BUILD_BUG_ON(HUGE_MAX_HSTATE < 4);
521 if (pud_sect_supported())
522 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
523
524 hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT);
525 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
526 hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT);
527
528 return 0;
529 }
530 arch_initcall(hugetlbpage_init);
531
arch_hugetlb_valid_size(unsigned long size)532 bool __init arch_hugetlb_valid_size(unsigned long size)
533 {
534 return __hugetlb_valid_size(size);
535 }
536
huge_ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)537 pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
538 {
539 unsigned long psize = huge_page_size(hstate_vma(vma));
540
541 if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
542 /*
543 * Break-before-make (BBM) is required for all user space mappings
544 * when the permission changes from executable to non-executable
545 * in cases where cpu is affected with errata #2645198.
546 */
547 if (pte_user_exec(__ptep_get(ptep)))
548 return huge_ptep_clear_flush(vma, addr, ptep);
549 }
550 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);
551 }
552
huge_ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)553 void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
554 pte_t old_pte, pte_t pte)
555 {
556 unsigned long psize = huge_page_size(hstate_vma(vma));
557
558 set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
559 }
560