1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * arch/arm64/mm/hugetlbpage.c
4 *
5 * Copyright (C) 2013 Linaro Ltd.
6 *
7 * Based on arch/x86/mm/hugetlbpage.c.
8 */
9
10 #include <linux/init.h>
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
17 #include <asm/mman.h>
18 #include <asm/tlb.h>
19 #include <asm/tlbflush.h>
20
21 /*
22 * HugeTLB Support Matrix
23 *
24 * ---------------------------------------------------
25 * | Page Size | CONT PTE | PMD | CONT PMD | PUD |
26 * ---------------------------------------------------
27 * | 4K | 64K | 2M | 32M | 1G |
28 * | 16K | 2M | 32M | 1G | |
29 * | 64K | 2M | 512M | 16G | |
30 * ---------------------------------------------------
31 */
32
33 /*
34 * Reserve CMA areas for the largest supported gigantic
35 * huge page when requested. Any other smaller gigantic
36 * huge pages could still be served from those areas.
37 */
38 #ifdef CONFIG_CMA
arm64_hugetlb_cma_reserve(void)39 void __init arm64_hugetlb_cma_reserve(void)
40 {
41 int order;
42
43 if (pud_sect_supported())
44 order = PUD_SHIFT - PAGE_SHIFT;
45 else
46 order = CONT_PMD_SHIFT - PAGE_SHIFT;
47
48 hugetlb_cma_reserve(order);
49 }
50 #endif /* CONFIG_CMA */
51
__hugetlb_valid_size(unsigned long size)52 static bool __hugetlb_valid_size(unsigned long size)
53 {
54 switch (size) {
55 #ifndef __PAGETABLE_PMD_FOLDED
56 case PUD_SIZE:
57 return pud_sect_supported();
58 #endif
59 case CONT_PMD_SIZE:
60 case PMD_SIZE:
61 case CONT_PTE_SIZE:
62 return true;
63 }
64
65 return false;
66 }
67
68 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
arch_hugetlb_migration_supported(struct hstate * h)69 bool arch_hugetlb_migration_supported(struct hstate *h)
70 {
71 size_t pagesize = huge_page_size(h);
72
73 if (!__hugetlb_valid_size(pagesize)) {
74 pr_warn("%s: unrecognized huge page size 0x%lx\n",
75 __func__, pagesize);
76 return false;
77 }
78 return true;
79 }
80 #endif
81
find_num_contig(struct mm_struct * mm,unsigned long addr,pte_t * ptep,size_t * pgsize)82 static int find_num_contig(struct mm_struct *mm, unsigned long addr,
83 pte_t *ptep, size_t *pgsize)
84 {
85 pgd_t *pgdp = pgd_offset(mm, addr);
86 p4d_t *p4dp;
87 pud_t *pudp;
88 pmd_t *pmdp;
89
90 *pgsize = PAGE_SIZE;
91 p4dp = p4d_offset(pgdp, addr);
92 pudp = pud_offset(p4dp, addr);
93 pmdp = pmd_offset(pudp, addr);
94 if ((pte_t *)pmdp == ptep) {
95 *pgsize = PMD_SIZE;
96 return CONT_PMDS;
97 }
98 return CONT_PTES;
99 }
100
num_contig_ptes(unsigned long size,size_t * pgsize)101 static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
102 {
103 int contig_ptes = 1;
104
105 *pgsize = size;
106
107 switch (size) {
108 case CONT_PMD_SIZE:
109 *pgsize = PMD_SIZE;
110 contig_ptes = CONT_PMDS;
111 break;
112 case CONT_PTE_SIZE:
113 *pgsize = PAGE_SIZE;
114 contig_ptes = CONT_PTES;
115 break;
116 default:
117 WARN_ON(!__hugetlb_valid_size(size));
118 }
119
120 return contig_ptes;
121 }
122
huge_ptep_get(struct mm_struct * mm,unsigned long addr,pte_t * ptep)123 pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
124 {
125 int ncontig, i;
126 size_t pgsize;
127 pte_t orig_pte = __ptep_get(ptep);
128
129 if (!pte_present(orig_pte) || !pte_cont(orig_pte))
130 return orig_pte;
131
132 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
133 for (i = 0; i < ncontig; i++, ptep++) {
134 pte_t pte = __ptep_get(ptep);
135
136 if (pte_dirty(pte))
137 orig_pte = pte_mkdirty(orig_pte);
138
139 if (pte_young(pte))
140 orig_pte = pte_mkyoung(orig_pte);
141 }
142 return orig_pte;
143 }
144
145 /*
146 * Changing some bits of contiguous entries requires us to follow a
147 * Break-Before-Make approach, breaking the whole contiguous set
148 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
149 * "Misprogramming of the Contiguous bit", page D4-1762.
150 *
151 * This helper performs the break step.
152 */
get_clear_contig(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned long pgsize,unsigned long ncontig)153 static pte_t get_clear_contig(struct mm_struct *mm,
154 unsigned long addr,
155 pte_t *ptep,
156 unsigned long pgsize,
157 unsigned long ncontig)
158 {
159 pte_t pte, tmp_pte;
160 bool present;
161
162 pte = __ptep_get_and_clear_anysz(mm, ptep, pgsize);
163 present = pte_present(pte);
164 while (--ncontig) {
165 ptep++;
166 tmp_pte = __ptep_get_and_clear_anysz(mm, ptep, pgsize);
167 if (present) {
168 if (pte_dirty(tmp_pte))
169 pte = pte_mkdirty(pte);
170 if (pte_young(tmp_pte))
171 pte = pte_mkyoung(pte);
172 }
173 }
174 return pte;
175 }
176
get_clear_contig_flush(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned long pgsize,unsigned long ncontig)177 static pte_t get_clear_contig_flush(struct mm_struct *mm,
178 unsigned long addr,
179 pte_t *ptep,
180 unsigned long pgsize,
181 unsigned long ncontig)
182 {
183 pte_t orig_pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig);
184 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
185 unsigned long end = addr + (pgsize * ncontig);
186
187 __flush_hugetlb_tlb_range(&vma, addr, end, pgsize, true);
188 return orig_pte;
189 }
190
191 /*
192 * Changing some bits of contiguous entries requires us to follow a
193 * Break-Before-Make approach, breaking the whole contiguous set
194 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
195 * "Misprogramming of the Contiguous bit", page D4-1762.
196 *
197 * This helper performs the break step for use cases where the
198 * original pte is not needed.
199 */
clear_flush(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned long pgsize,unsigned long ncontig)200 static void clear_flush(struct mm_struct *mm,
201 unsigned long addr,
202 pte_t *ptep,
203 unsigned long pgsize,
204 unsigned long ncontig)
205 {
206 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
207 unsigned long i, saddr = addr;
208
209 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
210 __ptep_get_and_clear_anysz(mm, ptep, pgsize);
211
212 if (mm == &init_mm)
213 flush_tlb_kernel_range(saddr, addr);
214 else
215 __flush_hugetlb_tlb_range(&vma, saddr, addr, pgsize, true);
216 }
217
set_huge_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned long sz)218 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
219 pte_t *ptep, pte_t pte, unsigned long sz)
220 {
221 size_t pgsize;
222 int i;
223 int ncontig;
224
225 ncontig = num_contig_ptes(sz, &pgsize);
226
227 if (!pte_present(pte)) {
228 for (i = 0; i < ncontig; i++, ptep++)
229 __set_ptes_anysz(mm, ptep, pte, 1, pgsize);
230 return;
231 }
232
233 /* Only need to "break" if transitioning valid -> valid. */
234 if (pte_cont(pte) && pte_valid(__ptep_get(ptep)))
235 clear_flush(mm, addr, ptep, pgsize, ncontig);
236
237 __set_ptes_anysz(mm, ptep, pte, ncontig, pgsize);
238 }
239
huge_pte_alloc(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,unsigned long sz)240 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
241 unsigned long addr, unsigned long sz)
242 {
243 pgd_t *pgdp;
244 p4d_t *p4dp;
245 pud_t *pudp;
246 pmd_t *pmdp;
247 pte_t *ptep = NULL;
248
249 pgdp = pgd_offset(mm, addr);
250 p4dp = p4d_alloc(mm, pgdp, addr);
251 if (!p4dp)
252 return NULL;
253
254 pudp = pud_alloc(mm, p4dp, addr);
255 if (!pudp)
256 return NULL;
257
258 if (sz == PUD_SIZE) {
259 ptep = (pte_t *)pudp;
260 } else if (sz == (CONT_PTE_SIZE)) {
261 pmdp = pmd_alloc(mm, pudp, addr);
262 if (!pmdp)
263 return NULL;
264
265 WARN_ON(addr & (sz - 1));
266 ptep = pte_alloc_huge(mm, pmdp, addr);
267 } else if (sz == PMD_SIZE) {
268 if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp)))
269 ptep = huge_pmd_share(mm, vma, addr, pudp);
270 else
271 ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
272 } else if (sz == (CONT_PMD_SIZE)) {
273 pmdp = pmd_alloc(mm, pudp, addr);
274 WARN_ON(addr & (sz - 1));
275 return (pte_t *)pmdp;
276 }
277
278 return ptep;
279 }
280
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)281 pte_t *huge_pte_offset(struct mm_struct *mm,
282 unsigned long addr, unsigned long sz)
283 {
284 pgd_t *pgdp;
285 p4d_t *p4dp;
286 pud_t *pudp, pud;
287 pmd_t *pmdp, pmd;
288
289 pgdp = pgd_offset(mm, addr);
290 if (!pgd_present(READ_ONCE(*pgdp)))
291 return NULL;
292
293 p4dp = p4d_offset(pgdp, addr);
294 if (!p4d_present(READ_ONCE(*p4dp)))
295 return NULL;
296
297 pudp = pud_offset(p4dp, addr);
298 pud = READ_ONCE(*pudp);
299 if (sz != PUD_SIZE && pud_none(pud))
300 return NULL;
301 /* hugepage or swap? */
302 if (pud_leaf(pud) || !pud_present(pud))
303 return (pte_t *)pudp;
304 /* table; check the next level */
305
306 if (sz == CONT_PMD_SIZE)
307 addr &= CONT_PMD_MASK;
308
309 pmdp = pmd_offset(pudp, addr);
310 pmd = READ_ONCE(*pmdp);
311 if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
312 pmd_none(pmd))
313 return NULL;
314 if (pmd_leaf(pmd) || !pmd_present(pmd))
315 return (pte_t *)pmdp;
316
317 if (sz == CONT_PTE_SIZE)
318 return pte_offset_huge(pmdp, (addr & CONT_PTE_MASK));
319
320 return NULL;
321 }
322
hugetlb_mask_last_page(struct hstate * h)323 unsigned long hugetlb_mask_last_page(struct hstate *h)
324 {
325 unsigned long hp_size = huge_page_size(h);
326
327 switch (hp_size) {
328 #ifndef __PAGETABLE_PMD_FOLDED
329 case PUD_SIZE:
330 if (pud_sect_supported())
331 return PGDIR_SIZE - PUD_SIZE;
332 break;
333 #endif
334 case CONT_PMD_SIZE:
335 return PUD_SIZE - CONT_PMD_SIZE;
336 case PMD_SIZE:
337 return PUD_SIZE - PMD_SIZE;
338 case CONT_PTE_SIZE:
339 return PMD_SIZE - CONT_PTE_SIZE;
340 default:
341 break;
342 }
343
344 return 0UL;
345 }
346
arch_make_huge_pte(pte_t entry,unsigned int shift,vm_flags_t flags)347 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
348 {
349 size_t pagesize = 1UL << shift;
350
351 switch (pagesize) {
352 #ifndef __PAGETABLE_PMD_FOLDED
353 case PUD_SIZE:
354 if (pud_sect_supported())
355 return pud_pte(pud_mkhuge(pte_pud(entry)));
356 break;
357 #endif
358 case CONT_PMD_SIZE:
359 return pmd_pte(pmd_mkhuge(pmd_mkcont(pte_pmd(entry))));
360 case PMD_SIZE:
361 return pmd_pte(pmd_mkhuge(pte_pmd(entry)));
362 case CONT_PTE_SIZE:
363 return pte_mkcont(entry);
364 default:
365 break;
366 }
367 pr_warn("%s: unrecognized huge page size 0x%lx\n",
368 __func__, pagesize);
369 return entry;
370 }
371
huge_pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned long sz)372 void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
373 pte_t *ptep, unsigned long sz)
374 {
375 int i, ncontig;
376 size_t pgsize;
377
378 ncontig = num_contig_ptes(sz, &pgsize);
379
380 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
381 __pte_clear(mm, addr, ptep);
382 }
383
huge_ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned long sz)384 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
385 pte_t *ptep, unsigned long sz)
386 {
387 int ncontig;
388 size_t pgsize;
389
390 ncontig = num_contig_ptes(sz, &pgsize);
391 return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
392 }
393
394 /*
395 * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
396 * and write permission.
397 *
398 * For a contiguous huge pte range we need to check whether or not write
399 * permission has to change only on the first pte in the set. Then for
400 * all the contiguous ptes we need to check whether or not there is a
401 * discrepancy between dirty or young.
402 */
__cont_access_flags_changed(pte_t * ptep,pte_t pte,int ncontig)403 static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
404 {
405 int i;
406
407 if (pte_write(pte) != pte_write(__ptep_get(ptep)))
408 return 1;
409
410 for (i = 0; i < ncontig; i++) {
411 pte_t orig_pte = __ptep_get(ptep + i);
412
413 if (pte_dirty(pte) != pte_dirty(orig_pte))
414 return 1;
415
416 if (pte_young(pte) != pte_young(orig_pte))
417 return 1;
418 }
419
420 return 0;
421 }
422
huge_ptep_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t pte,int dirty)423 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
424 unsigned long addr, pte_t *ptep,
425 pte_t pte, int dirty)
426 {
427 int ncontig;
428 size_t pgsize = 0;
429 struct mm_struct *mm = vma->vm_mm;
430 pte_t orig_pte;
431
432 VM_WARN_ON(!pte_present(pte));
433
434 if (!pte_cont(pte))
435 return __ptep_set_access_flags(vma, addr, ptep, pte, dirty);
436
437 ncontig = num_contig_ptes(huge_page_size(hstate_vma(vma)), &pgsize);
438
439 if (!__cont_access_flags_changed(ptep, pte, ncontig))
440 return 0;
441
442 orig_pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
443 VM_WARN_ON(!pte_present(orig_pte));
444
445 /* Make sure we don't lose the dirty or young state */
446 if (pte_dirty(orig_pte))
447 pte = pte_mkdirty(pte);
448
449 if (pte_young(orig_pte))
450 pte = pte_mkyoung(pte);
451
452 __set_ptes_anysz(mm, ptep, pte, ncontig, pgsize);
453 return 1;
454 }
455
huge_ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)456 void huge_ptep_set_wrprotect(struct mm_struct *mm,
457 unsigned long addr, pte_t *ptep)
458 {
459 int ncontig;
460 size_t pgsize;
461 pte_t pte;
462
463 pte = __ptep_get(ptep);
464 VM_WARN_ON(!pte_present(pte));
465
466 if (!pte_cont(pte)) {
467 __ptep_set_wrprotect(mm, addr, ptep);
468 return;
469 }
470
471 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
472
473 pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
474 pte = pte_wrprotect(pte);
475
476 __set_ptes_anysz(mm, ptep, pte, ncontig, pgsize);
477 }
478
huge_ptep_clear_flush(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)479 pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
480 unsigned long addr, pte_t *ptep)
481 {
482 struct mm_struct *mm = vma->vm_mm;
483 size_t pgsize;
484 int ncontig;
485
486 ncontig = num_contig_ptes(huge_page_size(hstate_vma(vma)), &pgsize);
487 return get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
488 }
489
hugetlbpage_init(void)490 static int __init hugetlbpage_init(void)
491 {
492 /*
493 * HugeTLB pages are supported on maximum four page table
494 * levels (PUD, CONT PMD, PMD, CONT PTE) for a given base
495 * page size, corresponding to hugetlb_add_hstate() calls
496 * here.
497 *
498 * HUGE_MAX_HSTATE should at least match maximum supported
499 * HugeTLB page sizes on the platform. Any new addition to
500 * supported HugeTLB page sizes will also require changing
501 * HUGE_MAX_HSTATE as well.
502 */
503 BUILD_BUG_ON(HUGE_MAX_HSTATE < 4);
504 if (pud_sect_supported())
505 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
506
507 hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT);
508 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
509 hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT);
510
511 return 0;
512 }
513 arch_initcall(hugetlbpage_init);
514
arch_hugetlb_valid_size(unsigned long size)515 bool __init arch_hugetlb_valid_size(unsigned long size)
516 {
517 return __hugetlb_valid_size(size);
518 }
519
huge_ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)520 pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
521 {
522 unsigned long psize = huge_page_size(hstate_vma(vma));
523
524 if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
525 /*
526 * Break-before-make (BBM) is required for all user space mappings
527 * when the permission changes from executable to non-executable
528 * in cases where cpu is affected with errata #2645198.
529 */
530 if (pte_user_exec(__ptep_get(ptep)))
531 return huge_ptep_clear_flush(vma, addr, ptep);
532 }
533 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);
534 }
535
huge_ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)536 void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
537 pte_t old_pte, pte_t pte)
538 {
539 unsigned long psize = huge_page_size(hstate_vma(vma));
540
541 set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
542 }
543