xref: /linux/arch/arm64/mm/hugetlbpage.c (revision ae22a94997b8a03dcb3c922857c203246711f9d4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * arch/arm64/mm/hugetlbpage.c
4  *
5  * Copyright (C) 2013 Linaro Ltd.
6  *
7  * Based on arch/x86/mm/hugetlbpage.c.
8  */
9 
10 #include <linux/init.h>
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
17 #include <asm/mman.h>
18 #include <asm/tlb.h>
19 #include <asm/tlbflush.h>
20 
21 /*
22  * HugeTLB Support Matrix
23  *
24  * ---------------------------------------------------
25  * | Page Size | CONT PTE |  PMD  | CONT PMD |  PUD  |
26  * ---------------------------------------------------
27  * |     4K    |   64K    |   2M  |    32M   |   1G  |
28  * |    16K    |    2M    |  32M  |     1G   |       |
29  * |    64K    |    2M    | 512M  |    16G   |       |
30  * ---------------------------------------------------
31  */
32 
33 /*
34  * Reserve CMA areas for the largest supported gigantic
35  * huge page when requested. Any other smaller gigantic
36  * huge pages could still be served from those areas.
37  */
38 #ifdef CONFIG_CMA
39 void __init arm64_hugetlb_cma_reserve(void)
40 {
41 	int order;
42 
43 	if (pud_sect_supported())
44 		order = PUD_SHIFT - PAGE_SHIFT;
45 	else
46 		order = CONT_PMD_SHIFT - PAGE_SHIFT;
47 
48 	hugetlb_cma_reserve(order);
49 }
50 #endif /* CONFIG_CMA */
51 
52 static bool __hugetlb_valid_size(unsigned long size)
53 {
54 	switch (size) {
55 #ifndef __PAGETABLE_PMD_FOLDED
56 	case PUD_SIZE:
57 		return pud_sect_supported();
58 #endif
59 	case CONT_PMD_SIZE:
60 	case PMD_SIZE:
61 	case CONT_PTE_SIZE:
62 		return true;
63 	}
64 
65 	return false;
66 }
67 
68 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
69 bool arch_hugetlb_migration_supported(struct hstate *h)
70 {
71 	size_t pagesize = huge_page_size(h);
72 
73 	if (!__hugetlb_valid_size(pagesize)) {
74 		pr_warn("%s: unrecognized huge page size 0x%lx\n",
75 			__func__, pagesize);
76 		return false;
77 	}
78 	return true;
79 }
80 #endif
81 
82 int pmd_huge(pmd_t pmd)
83 {
84 	return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
85 }
86 
87 int pud_huge(pud_t pud)
88 {
89 #ifndef __PAGETABLE_PMD_FOLDED
90 	return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
91 #else
92 	return 0;
93 #endif
94 }
95 
96 static int find_num_contig(struct mm_struct *mm, unsigned long addr,
97 			   pte_t *ptep, size_t *pgsize)
98 {
99 	pgd_t *pgdp = pgd_offset(mm, addr);
100 	p4d_t *p4dp;
101 	pud_t *pudp;
102 	pmd_t *pmdp;
103 
104 	*pgsize = PAGE_SIZE;
105 	p4dp = p4d_offset(pgdp, addr);
106 	pudp = pud_offset(p4dp, addr);
107 	pmdp = pmd_offset(pudp, addr);
108 	if ((pte_t *)pmdp == ptep) {
109 		*pgsize = PMD_SIZE;
110 		return CONT_PMDS;
111 	}
112 	return CONT_PTES;
113 }
114 
115 static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
116 {
117 	int contig_ptes = 0;
118 
119 	*pgsize = size;
120 
121 	switch (size) {
122 #ifndef __PAGETABLE_PMD_FOLDED
123 	case PUD_SIZE:
124 		if (pud_sect_supported())
125 			contig_ptes = 1;
126 		break;
127 #endif
128 	case PMD_SIZE:
129 		contig_ptes = 1;
130 		break;
131 	case CONT_PMD_SIZE:
132 		*pgsize = PMD_SIZE;
133 		contig_ptes = CONT_PMDS;
134 		break;
135 	case CONT_PTE_SIZE:
136 		*pgsize = PAGE_SIZE;
137 		contig_ptes = CONT_PTES;
138 		break;
139 	}
140 
141 	return contig_ptes;
142 }
143 
144 pte_t huge_ptep_get(pte_t *ptep)
145 {
146 	int ncontig, i;
147 	size_t pgsize;
148 	pte_t orig_pte = __ptep_get(ptep);
149 
150 	if (!pte_present(orig_pte) || !pte_cont(orig_pte))
151 		return orig_pte;
152 
153 	ncontig = num_contig_ptes(page_size(pte_page(orig_pte)), &pgsize);
154 	for (i = 0; i < ncontig; i++, ptep++) {
155 		pte_t pte = __ptep_get(ptep);
156 
157 		if (pte_dirty(pte))
158 			orig_pte = pte_mkdirty(orig_pte);
159 
160 		if (pte_young(pte))
161 			orig_pte = pte_mkyoung(orig_pte);
162 	}
163 	return orig_pte;
164 }
165 
166 /*
167  * Changing some bits of contiguous entries requires us to follow a
168  * Break-Before-Make approach, breaking the whole contiguous set
169  * before we can change any entries. See ARM DDI 0487A.k_iss10775,
170  * "Misprogramming of the Contiguous bit", page D4-1762.
171  *
172  * This helper performs the break step.
173  */
174 static pte_t get_clear_contig(struct mm_struct *mm,
175 			     unsigned long addr,
176 			     pte_t *ptep,
177 			     unsigned long pgsize,
178 			     unsigned long ncontig)
179 {
180 	pte_t orig_pte = __ptep_get(ptep);
181 	unsigned long i;
182 
183 	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
184 		pte_t pte = __ptep_get_and_clear(mm, addr, ptep);
185 
186 		/*
187 		 * If HW_AFDBM is enabled, then the HW could turn on
188 		 * the dirty or accessed bit for any page in the set,
189 		 * so check them all.
190 		 */
191 		if (pte_dirty(pte))
192 			orig_pte = pte_mkdirty(orig_pte);
193 
194 		if (pte_young(pte))
195 			orig_pte = pte_mkyoung(orig_pte);
196 	}
197 	return orig_pte;
198 }
199 
200 static pte_t get_clear_contig_flush(struct mm_struct *mm,
201 				    unsigned long addr,
202 				    pte_t *ptep,
203 				    unsigned long pgsize,
204 				    unsigned long ncontig)
205 {
206 	pte_t orig_pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig);
207 	struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
208 
209 	flush_tlb_range(&vma, addr, addr + (pgsize * ncontig));
210 	return orig_pte;
211 }
212 
213 /*
214  * Changing some bits of contiguous entries requires us to follow a
215  * Break-Before-Make approach, breaking the whole contiguous set
216  * before we can change any entries. See ARM DDI 0487A.k_iss10775,
217  * "Misprogramming of the Contiguous bit", page D4-1762.
218  *
219  * This helper performs the break step for use cases where the
220  * original pte is not needed.
221  */
222 static void clear_flush(struct mm_struct *mm,
223 			     unsigned long addr,
224 			     pte_t *ptep,
225 			     unsigned long pgsize,
226 			     unsigned long ncontig)
227 {
228 	struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
229 	unsigned long i, saddr = addr;
230 
231 	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
232 		__ptep_get_and_clear(mm, addr, ptep);
233 
234 	flush_tlb_range(&vma, saddr, addr);
235 }
236 
237 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
238 			    pte_t *ptep, pte_t pte, unsigned long sz)
239 {
240 	size_t pgsize;
241 	int i;
242 	int ncontig;
243 	unsigned long pfn, dpfn;
244 	pgprot_t hugeprot;
245 
246 	ncontig = num_contig_ptes(sz, &pgsize);
247 
248 	if (!pte_present(pte)) {
249 		for (i = 0; i < ncontig; i++, ptep++, addr += pgsize)
250 			__set_ptes(mm, addr, ptep, pte, 1);
251 		return;
252 	}
253 
254 	if (!pte_cont(pte)) {
255 		__set_ptes(mm, addr, ptep, pte, 1);
256 		return;
257 	}
258 
259 	pfn = pte_pfn(pte);
260 	dpfn = pgsize >> PAGE_SHIFT;
261 	hugeprot = pte_pgprot(pte);
262 
263 	clear_flush(mm, addr, ptep, pgsize, ncontig);
264 
265 	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
266 		__set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
267 }
268 
269 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
270 		      unsigned long addr, unsigned long sz)
271 {
272 	pgd_t *pgdp;
273 	p4d_t *p4dp;
274 	pud_t *pudp;
275 	pmd_t *pmdp;
276 	pte_t *ptep = NULL;
277 
278 	pgdp = pgd_offset(mm, addr);
279 	p4dp = p4d_offset(pgdp, addr);
280 	pudp = pud_alloc(mm, p4dp, addr);
281 	if (!pudp)
282 		return NULL;
283 
284 	if (sz == PUD_SIZE) {
285 		ptep = (pte_t *)pudp;
286 	} else if (sz == (CONT_PTE_SIZE)) {
287 		pmdp = pmd_alloc(mm, pudp, addr);
288 		if (!pmdp)
289 			return NULL;
290 
291 		WARN_ON(addr & (sz - 1));
292 		ptep = pte_alloc_huge(mm, pmdp, addr);
293 	} else if (sz == PMD_SIZE) {
294 		if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp)))
295 			ptep = huge_pmd_share(mm, vma, addr, pudp);
296 		else
297 			ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
298 	} else if (sz == (CONT_PMD_SIZE)) {
299 		pmdp = pmd_alloc(mm, pudp, addr);
300 		WARN_ON(addr & (sz - 1));
301 		return (pte_t *)pmdp;
302 	}
303 
304 	return ptep;
305 }
306 
307 pte_t *huge_pte_offset(struct mm_struct *mm,
308 		       unsigned long addr, unsigned long sz)
309 {
310 	pgd_t *pgdp;
311 	p4d_t *p4dp;
312 	pud_t *pudp, pud;
313 	pmd_t *pmdp, pmd;
314 
315 	pgdp = pgd_offset(mm, addr);
316 	if (!pgd_present(READ_ONCE(*pgdp)))
317 		return NULL;
318 
319 	p4dp = p4d_offset(pgdp, addr);
320 	if (!p4d_present(READ_ONCE(*p4dp)))
321 		return NULL;
322 
323 	pudp = pud_offset(p4dp, addr);
324 	pud = READ_ONCE(*pudp);
325 	if (sz != PUD_SIZE && pud_none(pud))
326 		return NULL;
327 	/* hugepage or swap? */
328 	if (pud_huge(pud) || !pud_present(pud))
329 		return (pte_t *)pudp;
330 	/* table; check the next level */
331 
332 	if (sz == CONT_PMD_SIZE)
333 		addr &= CONT_PMD_MASK;
334 
335 	pmdp = pmd_offset(pudp, addr);
336 	pmd = READ_ONCE(*pmdp);
337 	if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
338 	    pmd_none(pmd))
339 		return NULL;
340 	if (pmd_huge(pmd) || !pmd_present(pmd))
341 		return (pte_t *)pmdp;
342 
343 	if (sz == CONT_PTE_SIZE)
344 		return pte_offset_huge(pmdp, (addr & CONT_PTE_MASK));
345 
346 	return NULL;
347 }
348 
349 unsigned long hugetlb_mask_last_page(struct hstate *h)
350 {
351 	unsigned long hp_size = huge_page_size(h);
352 
353 	switch (hp_size) {
354 #ifndef __PAGETABLE_PMD_FOLDED
355 	case PUD_SIZE:
356 		return PGDIR_SIZE - PUD_SIZE;
357 #endif
358 	case CONT_PMD_SIZE:
359 		return PUD_SIZE - CONT_PMD_SIZE;
360 	case PMD_SIZE:
361 		return PUD_SIZE - PMD_SIZE;
362 	case CONT_PTE_SIZE:
363 		return PMD_SIZE - CONT_PTE_SIZE;
364 	default:
365 		break;
366 	}
367 
368 	return 0UL;
369 }
370 
371 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
372 {
373 	size_t pagesize = 1UL << shift;
374 
375 	entry = pte_mkhuge(entry);
376 	if (pagesize == CONT_PTE_SIZE) {
377 		entry = pte_mkcont(entry);
378 	} else if (pagesize == CONT_PMD_SIZE) {
379 		entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
380 	} else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) {
381 		pr_warn("%s: unrecognized huge page size 0x%lx\n",
382 			__func__, pagesize);
383 	}
384 	return entry;
385 }
386 
387 void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
388 		    pte_t *ptep, unsigned long sz)
389 {
390 	int i, ncontig;
391 	size_t pgsize;
392 
393 	ncontig = num_contig_ptes(sz, &pgsize);
394 
395 	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
396 		__pte_clear(mm, addr, ptep);
397 }
398 
399 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
400 			      unsigned long addr, pte_t *ptep)
401 {
402 	int ncontig;
403 	size_t pgsize;
404 	pte_t orig_pte = __ptep_get(ptep);
405 
406 	if (!pte_cont(orig_pte))
407 		return __ptep_get_and_clear(mm, addr, ptep);
408 
409 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
410 
411 	return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
412 }
413 
414 /*
415  * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
416  * and write permission.
417  *
418  * For a contiguous huge pte range we need to check whether or not write
419  * permission has to change only on the first pte in the set. Then for
420  * all the contiguous ptes we need to check whether or not there is a
421  * discrepancy between dirty or young.
422  */
423 static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
424 {
425 	int i;
426 
427 	if (pte_write(pte) != pte_write(__ptep_get(ptep)))
428 		return 1;
429 
430 	for (i = 0; i < ncontig; i++) {
431 		pte_t orig_pte = __ptep_get(ptep + i);
432 
433 		if (pte_dirty(pte) != pte_dirty(orig_pte))
434 			return 1;
435 
436 		if (pte_young(pte) != pte_young(orig_pte))
437 			return 1;
438 	}
439 
440 	return 0;
441 }
442 
443 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
444 			       unsigned long addr, pte_t *ptep,
445 			       pte_t pte, int dirty)
446 {
447 	int ncontig, i;
448 	size_t pgsize = 0;
449 	unsigned long pfn = pte_pfn(pte), dpfn;
450 	struct mm_struct *mm = vma->vm_mm;
451 	pgprot_t hugeprot;
452 	pte_t orig_pte;
453 
454 	if (!pte_cont(pte))
455 		return __ptep_set_access_flags(vma, addr, ptep, pte, dirty);
456 
457 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
458 	dpfn = pgsize >> PAGE_SHIFT;
459 
460 	if (!__cont_access_flags_changed(ptep, pte, ncontig))
461 		return 0;
462 
463 	orig_pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
464 
465 	/* Make sure we don't lose the dirty or young state */
466 	if (pte_dirty(orig_pte))
467 		pte = pte_mkdirty(pte);
468 
469 	if (pte_young(orig_pte))
470 		pte = pte_mkyoung(pte);
471 
472 	hugeprot = pte_pgprot(pte);
473 	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
474 		__set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
475 
476 	return 1;
477 }
478 
479 void huge_ptep_set_wrprotect(struct mm_struct *mm,
480 			     unsigned long addr, pte_t *ptep)
481 {
482 	unsigned long pfn, dpfn;
483 	pgprot_t hugeprot;
484 	int ncontig, i;
485 	size_t pgsize;
486 	pte_t pte;
487 
488 	if (!pte_cont(__ptep_get(ptep))) {
489 		__ptep_set_wrprotect(mm, addr, ptep);
490 		return;
491 	}
492 
493 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
494 	dpfn = pgsize >> PAGE_SHIFT;
495 
496 	pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
497 	pte = pte_wrprotect(pte);
498 
499 	hugeprot = pte_pgprot(pte);
500 	pfn = pte_pfn(pte);
501 
502 	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
503 		__set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
504 }
505 
506 pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
507 			    unsigned long addr, pte_t *ptep)
508 {
509 	struct mm_struct *mm = vma->vm_mm;
510 	size_t pgsize;
511 	int ncontig;
512 
513 	if (!pte_cont(__ptep_get(ptep)))
514 		return ptep_clear_flush(vma, addr, ptep);
515 
516 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
517 	return get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
518 }
519 
520 static int __init hugetlbpage_init(void)
521 {
522 	if (pud_sect_supported())
523 		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
524 
525 	hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT);
526 	hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
527 	hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT);
528 
529 	return 0;
530 }
531 arch_initcall(hugetlbpage_init);
532 
533 bool __init arch_hugetlb_valid_size(unsigned long size)
534 {
535 	return __hugetlb_valid_size(size);
536 }
537 
538 pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
539 {
540 	if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
541 		/*
542 		 * Break-before-make (BBM) is required for all user space mappings
543 		 * when the permission changes from executable to non-executable
544 		 * in cases where cpu is affected with errata #2645198.
545 		 */
546 		if (pte_user_exec(__ptep_get(ptep)))
547 			return huge_ptep_clear_flush(vma, addr, ptep);
548 	}
549 	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
550 }
551 
552 void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
553 				  pte_t old_pte, pte_t pte)
554 {
555 	unsigned long psize = huge_page_size(hstate_vma(vma));
556 
557 	set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
558 }
559