xref: /linux/arch/arm64/mm/hugetlbpage.c (revision c5288cda69ee2d8607f5026bd599a5cebf0ee783)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * arch/arm64/mm/hugetlbpage.c
4  *
5  * Copyright (C) 2013 Linaro Ltd.
6  *
7  * Based on arch/x86/mm/hugetlbpage.c.
8  */
9 
10 #include <linux/init.h>
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
17 #include <asm/mman.h>
18 #include <asm/tlb.h>
19 #include <asm/tlbflush.h>
20 
21 /*
22  * HugeTLB Support Matrix
23  *
24  * ---------------------------------------------------
25  * | Page Size | CONT PTE |  PMD  | CONT PMD |  PUD  |
26  * ---------------------------------------------------
27  * |     4K    |   64K    |   2M  |    32M   |   1G  |
28  * |    16K    |    2M    |  32M  |     1G   |       |
29  * |    64K    |    2M    | 512M  |    16G   |       |
30  * ---------------------------------------------------
31  */
32 
33 /*
34  * Reserve CMA areas for the largest supported gigantic
35  * huge page when requested. Any other smaller gigantic
36  * huge pages could still be served from those areas.
37  */
38 #ifdef CONFIG_CMA
39 void __init arm64_hugetlb_cma_reserve(void)
40 {
41 	int order;
42 
43 	if (pud_sect_supported())
44 		order = PUD_SHIFT - PAGE_SHIFT;
45 	else
46 		order = CONT_PMD_SHIFT - PAGE_SHIFT;
47 
48 	hugetlb_cma_reserve(order);
49 }
50 #endif /* CONFIG_CMA */
51 
52 static bool __hugetlb_valid_size(unsigned long size)
53 {
54 	switch (size) {
55 #ifndef __PAGETABLE_PMD_FOLDED
56 	case PUD_SIZE:
57 		return pud_sect_supported();
58 #endif
59 	case CONT_PMD_SIZE:
60 	case PMD_SIZE:
61 	case CONT_PTE_SIZE:
62 		return true;
63 	}
64 
65 	return false;
66 }
67 
68 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
69 bool arch_hugetlb_migration_supported(struct hstate *h)
70 {
71 	size_t pagesize = huge_page_size(h);
72 
73 	if (!__hugetlb_valid_size(pagesize)) {
74 		pr_warn("%s: unrecognized huge page size 0x%lx\n",
75 			__func__, pagesize);
76 		return false;
77 	}
78 	return true;
79 }
80 #endif
81 
82 static int find_num_contig(struct mm_struct *mm, unsigned long addr,
83 			   pte_t *ptep, size_t *pgsize)
84 {
85 	pgd_t *pgdp = pgd_offset(mm, addr);
86 	p4d_t *p4dp;
87 	pud_t *pudp;
88 	pmd_t *pmdp;
89 
90 	*pgsize = PAGE_SIZE;
91 	p4dp = p4d_offset(pgdp, addr);
92 	pudp = pud_offset(p4dp, addr);
93 	pmdp = pmd_offset(pudp, addr);
94 	if ((pte_t *)pmdp == ptep) {
95 		*pgsize = PMD_SIZE;
96 		return CONT_PMDS;
97 	}
98 	return CONT_PTES;
99 }
100 
101 static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
102 {
103 	int contig_ptes = 0;
104 
105 	*pgsize = size;
106 
107 	switch (size) {
108 #ifndef __PAGETABLE_PMD_FOLDED
109 	case PUD_SIZE:
110 		if (pud_sect_supported())
111 			contig_ptes = 1;
112 		break;
113 #endif
114 	case PMD_SIZE:
115 		contig_ptes = 1;
116 		break;
117 	case CONT_PMD_SIZE:
118 		*pgsize = PMD_SIZE;
119 		contig_ptes = CONT_PMDS;
120 		break;
121 	case CONT_PTE_SIZE:
122 		*pgsize = PAGE_SIZE;
123 		contig_ptes = CONT_PTES;
124 		break;
125 	}
126 
127 	return contig_ptes;
128 }
129 
130 pte_t huge_ptep_get(pte_t *ptep)
131 {
132 	int ncontig, i;
133 	size_t pgsize;
134 	pte_t orig_pte = __ptep_get(ptep);
135 
136 	if (!pte_present(orig_pte) || !pte_cont(orig_pte))
137 		return orig_pte;
138 
139 	ncontig = num_contig_ptes(page_size(pte_page(orig_pte)), &pgsize);
140 	for (i = 0; i < ncontig; i++, ptep++) {
141 		pte_t pte = __ptep_get(ptep);
142 
143 		if (pte_dirty(pte))
144 			orig_pte = pte_mkdirty(orig_pte);
145 
146 		if (pte_young(pte))
147 			orig_pte = pte_mkyoung(orig_pte);
148 	}
149 	return orig_pte;
150 }
151 
152 /*
153  * Changing some bits of contiguous entries requires us to follow a
154  * Break-Before-Make approach, breaking the whole contiguous set
155  * before we can change any entries. See ARM DDI 0487A.k_iss10775,
156  * "Misprogramming of the Contiguous bit", page D4-1762.
157  *
158  * This helper performs the break step.
159  */
160 static pte_t get_clear_contig(struct mm_struct *mm,
161 			     unsigned long addr,
162 			     pte_t *ptep,
163 			     unsigned long pgsize,
164 			     unsigned long ncontig)
165 {
166 	pte_t orig_pte = __ptep_get(ptep);
167 	unsigned long i;
168 
169 	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
170 		pte_t pte = __ptep_get_and_clear(mm, addr, ptep);
171 
172 		/*
173 		 * If HW_AFDBM is enabled, then the HW could turn on
174 		 * the dirty or accessed bit for any page in the set,
175 		 * so check them all.
176 		 */
177 		if (pte_dirty(pte))
178 			orig_pte = pte_mkdirty(orig_pte);
179 
180 		if (pte_young(pte))
181 			orig_pte = pte_mkyoung(orig_pte);
182 	}
183 	return orig_pte;
184 }
185 
186 static pte_t get_clear_contig_flush(struct mm_struct *mm,
187 				    unsigned long addr,
188 				    pte_t *ptep,
189 				    unsigned long pgsize,
190 				    unsigned long ncontig)
191 {
192 	pte_t orig_pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig);
193 	struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
194 
195 	flush_tlb_range(&vma, addr, addr + (pgsize * ncontig));
196 	return orig_pte;
197 }
198 
199 /*
200  * Changing some bits of contiguous entries requires us to follow a
201  * Break-Before-Make approach, breaking the whole contiguous set
202  * before we can change any entries. See ARM DDI 0487A.k_iss10775,
203  * "Misprogramming of the Contiguous bit", page D4-1762.
204  *
205  * This helper performs the break step for use cases where the
206  * original pte is not needed.
207  */
208 static void clear_flush(struct mm_struct *mm,
209 			     unsigned long addr,
210 			     pte_t *ptep,
211 			     unsigned long pgsize,
212 			     unsigned long ncontig)
213 {
214 	struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
215 	unsigned long i, saddr = addr;
216 
217 	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
218 		__ptep_get_and_clear(mm, addr, ptep);
219 
220 	flush_tlb_range(&vma, saddr, addr);
221 }
222 
223 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
224 			    pte_t *ptep, pte_t pte, unsigned long sz)
225 {
226 	size_t pgsize;
227 	int i;
228 	int ncontig;
229 	unsigned long pfn, dpfn;
230 	pgprot_t hugeprot;
231 
232 	ncontig = num_contig_ptes(sz, &pgsize);
233 
234 	if (!pte_present(pte)) {
235 		for (i = 0; i < ncontig; i++, ptep++, addr += pgsize)
236 			__set_ptes(mm, addr, ptep, pte, 1);
237 		return;
238 	}
239 
240 	if (!pte_cont(pte)) {
241 		__set_ptes(mm, addr, ptep, pte, 1);
242 		return;
243 	}
244 
245 	pfn = pte_pfn(pte);
246 	dpfn = pgsize >> PAGE_SHIFT;
247 	hugeprot = pte_pgprot(pte);
248 
249 	clear_flush(mm, addr, ptep, pgsize, ncontig);
250 
251 	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
252 		__set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
253 }
254 
255 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
256 		      unsigned long addr, unsigned long sz)
257 {
258 	pgd_t *pgdp;
259 	p4d_t *p4dp;
260 	pud_t *pudp;
261 	pmd_t *pmdp;
262 	pte_t *ptep = NULL;
263 
264 	pgdp = pgd_offset(mm, addr);
265 	p4dp = p4d_alloc(mm, pgdp, addr);
266 	if (!p4dp)
267 		return NULL;
268 
269 	pudp = pud_alloc(mm, p4dp, addr);
270 	if (!pudp)
271 		return NULL;
272 
273 	if (sz == PUD_SIZE) {
274 		ptep = (pte_t *)pudp;
275 	} else if (sz == (CONT_PTE_SIZE)) {
276 		pmdp = pmd_alloc(mm, pudp, addr);
277 		if (!pmdp)
278 			return NULL;
279 
280 		WARN_ON(addr & (sz - 1));
281 		ptep = pte_alloc_huge(mm, pmdp, addr);
282 	} else if (sz == PMD_SIZE) {
283 		if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp)))
284 			ptep = huge_pmd_share(mm, vma, addr, pudp);
285 		else
286 			ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
287 	} else if (sz == (CONT_PMD_SIZE)) {
288 		pmdp = pmd_alloc(mm, pudp, addr);
289 		WARN_ON(addr & (sz - 1));
290 		return (pte_t *)pmdp;
291 	}
292 
293 	return ptep;
294 }
295 
296 pte_t *huge_pte_offset(struct mm_struct *mm,
297 		       unsigned long addr, unsigned long sz)
298 {
299 	pgd_t *pgdp;
300 	p4d_t *p4dp;
301 	pud_t *pudp, pud;
302 	pmd_t *pmdp, pmd;
303 
304 	pgdp = pgd_offset(mm, addr);
305 	if (!pgd_present(READ_ONCE(*pgdp)))
306 		return NULL;
307 
308 	p4dp = p4d_offset(pgdp, addr);
309 	if (!p4d_present(READ_ONCE(*p4dp)))
310 		return NULL;
311 
312 	pudp = pud_offset(p4dp, addr);
313 	pud = READ_ONCE(*pudp);
314 	if (sz != PUD_SIZE && pud_none(pud))
315 		return NULL;
316 	/* hugepage or swap? */
317 	if (pud_leaf(pud) || !pud_present(pud))
318 		return (pte_t *)pudp;
319 	/* table; check the next level */
320 
321 	if (sz == CONT_PMD_SIZE)
322 		addr &= CONT_PMD_MASK;
323 
324 	pmdp = pmd_offset(pudp, addr);
325 	pmd = READ_ONCE(*pmdp);
326 	if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
327 	    pmd_none(pmd))
328 		return NULL;
329 	if (pmd_leaf(pmd) || !pmd_present(pmd))
330 		return (pte_t *)pmdp;
331 
332 	if (sz == CONT_PTE_SIZE)
333 		return pte_offset_huge(pmdp, (addr & CONT_PTE_MASK));
334 
335 	return NULL;
336 }
337 
338 unsigned long hugetlb_mask_last_page(struct hstate *h)
339 {
340 	unsigned long hp_size = huge_page_size(h);
341 
342 	switch (hp_size) {
343 #ifndef __PAGETABLE_PMD_FOLDED
344 	case PUD_SIZE:
345 		return PGDIR_SIZE - PUD_SIZE;
346 #endif
347 	case CONT_PMD_SIZE:
348 		return PUD_SIZE - CONT_PMD_SIZE;
349 	case PMD_SIZE:
350 		return PUD_SIZE - PMD_SIZE;
351 	case CONT_PTE_SIZE:
352 		return PMD_SIZE - CONT_PTE_SIZE;
353 	default:
354 		break;
355 	}
356 
357 	return 0UL;
358 }
359 
360 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
361 {
362 	size_t pagesize = 1UL << shift;
363 
364 	entry = pte_mkhuge(entry);
365 	if (pagesize == CONT_PTE_SIZE) {
366 		entry = pte_mkcont(entry);
367 	} else if (pagesize == CONT_PMD_SIZE) {
368 		entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
369 	} else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) {
370 		pr_warn("%s: unrecognized huge page size 0x%lx\n",
371 			__func__, pagesize);
372 	}
373 	return entry;
374 }
375 
376 void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
377 		    pte_t *ptep, unsigned long sz)
378 {
379 	int i, ncontig;
380 	size_t pgsize;
381 
382 	ncontig = num_contig_ptes(sz, &pgsize);
383 
384 	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
385 		__pte_clear(mm, addr, ptep);
386 }
387 
388 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
389 			      unsigned long addr, pte_t *ptep)
390 {
391 	int ncontig;
392 	size_t pgsize;
393 	pte_t orig_pte = __ptep_get(ptep);
394 
395 	if (!pte_cont(orig_pte))
396 		return __ptep_get_and_clear(mm, addr, ptep);
397 
398 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
399 
400 	return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
401 }
402 
403 /*
404  * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
405  * and write permission.
406  *
407  * For a contiguous huge pte range we need to check whether or not write
408  * permission has to change only on the first pte in the set. Then for
409  * all the contiguous ptes we need to check whether or not there is a
410  * discrepancy between dirty or young.
411  */
412 static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
413 {
414 	int i;
415 
416 	if (pte_write(pte) != pte_write(__ptep_get(ptep)))
417 		return 1;
418 
419 	for (i = 0; i < ncontig; i++) {
420 		pte_t orig_pte = __ptep_get(ptep + i);
421 
422 		if (pte_dirty(pte) != pte_dirty(orig_pte))
423 			return 1;
424 
425 		if (pte_young(pte) != pte_young(orig_pte))
426 			return 1;
427 	}
428 
429 	return 0;
430 }
431 
432 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
433 			       unsigned long addr, pte_t *ptep,
434 			       pte_t pte, int dirty)
435 {
436 	int ncontig, i;
437 	size_t pgsize = 0;
438 	unsigned long pfn = pte_pfn(pte), dpfn;
439 	struct mm_struct *mm = vma->vm_mm;
440 	pgprot_t hugeprot;
441 	pte_t orig_pte;
442 
443 	if (!pte_cont(pte))
444 		return __ptep_set_access_flags(vma, addr, ptep, pte, dirty);
445 
446 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
447 	dpfn = pgsize >> PAGE_SHIFT;
448 
449 	if (!__cont_access_flags_changed(ptep, pte, ncontig))
450 		return 0;
451 
452 	orig_pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
453 
454 	/* Make sure we don't lose the dirty or young state */
455 	if (pte_dirty(orig_pte))
456 		pte = pte_mkdirty(pte);
457 
458 	if (pte_young(orig_pte))
459 		pte = pte_mkyoung(pte);
460 
461 	hugeprot = pte_pgprot(pte);
462 	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
463 		__set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
464 
465 	return 1;
466 }
467 
468 void huge_ptep_set_wrprotect(struct mm_struct *mm,
469 			     unsigned long addr, pte_t *ptep)
470 {
471 	unsigned long pfn, dpfn;
472 	pgprot_t hugeprot;
473 	int ncontig, i;
474 	size_t pgsize;
475 	pte_t pte;
476 
477 	if (!pte_cont(__ptep_get(ptep))) {
478 		__ptep_set_wrprotect(mm, addr, ptep);
479 		return;
480 	}
481 
482 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
483 	dpfn = pgsize >> PAGE_SHIFT;
484 
485 	pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
486 	pte = pte_wrprotect(pte);
487 
488 	hugeprot = pte_pgprot(pte);
489 	pfn = pte_pfn(pte);
490 
491 	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
492 		__set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1);
493 }
494 
495 pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
496 			    unsigned long addr, pte_t *ptep)
497 {
498 	struct mm_struct *mm = vma->vm_mm;
499 	size_t pgsize;
500 	int ncontig;
501 
502 	if (!pte_cont(__ptep_get(ptep)))
503 		return ptep_clear_flush(vma, addr, ptep);
504 
505 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
506 	return get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
507 }
508 
509 static int __init hugetlbpage_init(void)
510 {
511 	if (pud_sect_supported())
512 		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
513 
514 	hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT);
515 	hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
516 	hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT);
517 
518 	return 0;
519 }
520 arch_initcall(hugetlbpage_init);
521 
522 bool __init arch_hugetlb_valid_size(unsigned long size)
523 {
524 	return __hugetlb_valid_size(size);
525 }
526 
527 pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
528 {
529 	if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
530 		/*
531 		 * Break-before-make (BBM) is required for all user space mappings
532 		 * when the permission changes from executable to non-executable
533 		 * in cases where cpu is affected with errata #2645198.
534 		 */
535 		if (pte_user_exec(__ptep_get(ptep)))
536 			return huge_ptep_clear_flush(vma, addr, ptep);
537 	}
538 	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
539 }
540 
541 void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
542 				  pte_t old_pte, pte_t pte)
543 {
544 	unsigned long psize = huge_page_size(hstate_vma(vma));
545 
546 	set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
547 }
548