xref: /linux/arch/arm64/mm/hugetlbpage.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * arch/arm64/mm/hugetlbpage.c
4  *
5  * Copyright (C) 2013 Linaro Ltd.
6  *
7  * Based on arch/x86/mm/hugetlbpage.c.
8  */
9 
10 #include <linux/init.h>
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
17 #include <asm/mman.h>
18 #include <asm/tlb.h>
19 #include <asm/tlbflush.h>
20 
21 /*
22  * HugeTLB Support Matrix
23  *
24  * ---------------------------------------------------
25  * | Page Size | CONT PTE |  PMD  | CONT PMD |  PUD  |
26  * ---------------------------------------------------
27  * |     4K    |   64K    |   2M  |    32M   |   1G  |
28  * |    16K    |    2M    |  32M  |     1G   |       |
29  * |    64K    |    2M    | 512M  |    16G   |       |
30  * ---------------------------------------------------
31  */
32 
33 /*
34  * Reserve CMA areas for the largest supported gigantic
35  * huge page when requested. Any other smaller gigantic
36  * huge pages could still be served from those areas.
37  */
38 #ifdef CONFIG_CMA
39 void __init arm64_hugetlb_cma_reserve(void)
40 {
41 	int order;
42 
43 	if (pud_sect_supported())
44 		order = PUD_SHIFT - PAGE_SHIFT;
45 	else
46 		order = CONT_PMD_SHIFT - PAGE_SHIFT;
47 
48 	/*
49 	 * HugeTLB CMA reservation is required for gigantic
50 	 * huge pages which could not be allocated via the
51 	 * page allocator. Just warn if there is any change
52 	 * breaking this assumption.
53 	 */
54 	WARN_ON(order <= MAX_ORDER);
55 	hugetlb_cma_reserve(order);
56 }
57 #endif /* CONFIG_CMA */
58 
59 static bool __hugetlb_valid_size(unsigned long size)
60 {
61 	switch (size) {
62 #ifndef __PAGETABLE_PMD_FOLDED
63 	case PUD_SIZE:
64 		return pud_sect_supported();
65 #endif
66 	case CONT_PMD_SIZE:
67 	case PMD_SIZE:
68 	case CONT_PTE_SIZE:
69 		return true;
70 	}
71 
72 	return false;
73 }
74 
75 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
76 bool arch_hugetlb_migration_supported(struct hstate *h)
77 {
78 	size_t pagesize = huge_page_size(h);
79 
80 	if (!__hugetlb_valid_size(pagesize)) {
81 		pr_warn("%s: unrecognized huge page size 0x%lx\n",
82 			__func__, pagesize);
83 		return false;
84 	}
85 	return true;
86 }
87 #endif
88 
89 int pmd_huge(pmd_t pmd)
90 {
91 	return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
92 }
93 
94 int pud_huge(pud_t pud)
95 {
96 #ifndef __PAGETABLE_PMD_FOLDED
97 	return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
98 #else
99 	return 0;
100 #endif
101 }
102 
103 /*
104  * Select all bits except the pfn
105  */
106 static inline pgprot_t pte_pgprot(pte_t pte)
107 {
108 	unsigned long pfn = pte_pfn(pte);
109 
110 	return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
111 }
112 
113 static int find_num_contig(struct mm_struct *mm, unsigned long addr,
114 			   pte_t *ptep, size_t *pgsize)
115 {
116 	pgd_t *pgdp = pgd_offset(mm, addr);
117 	p4d_t *p4dp;
118 	pud_t *pudp;
119 	pmd_t *pmdp;
120 
121 	*pgsize = PAGE_SIZE;
122 	p4dp = p4d_offset(pgdp, addr);
123 	pudp = pud_offset(p4dp, addr);
124 	pmdp = pmd_offset(pudp, addr);
125 	if ((pte_t *)pmdp == ptep) {
126 		*pgsize = PMD_SIZE;
127 		return CONT_PMDS;
128 	}
129 	return CONT_PTES;
130 }
131 
132 static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
133 {
134 	int contig_ptes = 0;
135 
136 	*pgsize = size;
137 
138 	switch (size) {
139 #ifndef __PAGETABLE_PMD_FOLDED
140 	case PUD_SIZE:
141 		if (pud_sect_supported())
142 			contig_ptes = 1;
143 		break;
144 #endif
145 	case PMD_SIZE:
146 		contig_ptes = 1;
147 		break;
148 	case CONT_PMD_SIZE:
149 		*pgsize = PMD_SIZE;
150 		contig_ptes = CONT_PMDS;
151 		break;
152 	case CONT_PTE_SIZE:
153 		*pgsize = PAGE_SIZE;
154 		contig_ptes = CONT_PTES;
155 		break;
156 	}
157 
158 	return contig_ptes;
159 }
160 
161 pte_t huge_ptep_get(pte_t *ptep)
162 {
163 	int ncontig, i;
164 	size_t pgsize;
165 	pte_t orig_pte = ptep_get(ptep);
166 
167 	if (!pte_present(orig_pte) || !pte_cont(orig_pte))
168 		return orig_pte;
169 
170 	ncontig = num_contig_ptes(page_size(pte_page(orig_pte)), &pgsize);
171 	for (i = 0; i < ncontig; i++, ptep++) {
172 		pte_t pte = ptep_get(ptep);
173 
174 		if (pte_dirty(pte))
175 			orig_pte = pte_mkdirty(orig_pte);
176 
177 		if (pte_young(pte))
178 			orig_pte = pte_mkyoung(orig_pte);
179 	}
180 	return orig_pte;
181 }
182 
183 /*
184  * Changing some bits of contiguous entries requires us to follow a
185  * Break-Before-Make approach, breaking the whole contiguous set
186  * before we can change any entries. See ARM DDI 0487A.k_iss10775,
187  * "Misprogramming of the Contiguous bit", page D4-1762.
188  *
189  * This helper performs the break step.
190  */
191 static pte_t get_clear_contig(struct mm_struct *mm,
192 			     unsigned long addr,
193 			     pte_t *ptep,
194 			     unsigned long pgsize,
195 			     unsigned long ncontig)
196 {
197 	pte_t orig_pte = ptep_get(ptep);
198 	unsigned long i;
199 
200 	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
201 		pte_t pte = ptep_get_and_clear(mm, addr, ptep);
202 
203 		/*
204 		 * If HW_AFDBM is enabled, then the HW could turn on
205 		 * the dirty or accessed bit for any page in the set,
206 		 * so check them all.
207 		 */
208 		if (pte_dirty(pte))
209 			orig_pte = pte_mkdirty(orig_pte);
210 
211 		if (pte_young(pte))
212 			orig_pte = pte_mkyoung(orig_pte);
213 	}
214 	return orig_pte;
215 }
216 
217 static pte_t get_clear_contig_flush(struct mm_struct *mm,
218 				    unsigned long addr,
219 				    pte_t *ptep,
220 				    unsigned long pgsize,
221 				    unsigned long ncontig)
222 {
223 	pte_t orig_pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig);
224 	struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
225 
226 	flush_tlb_range(&vma, addr, addr + (pgsize * ncontig));
227 	return orig_pte;
228 }
229 
230 /*
231  * Changing some bits of contiguous entries requires us to follow a
232  * Break-Before-Make approach, breaking the whole contiguous set
233  * before we can change any entries. See ARM DDI 0487A.k_iss10775,
234  * "Misprogramming of the Contiguous bit", page D4-1762.
235  *
236  * This helper performs the break step for use cases where the
237  * original pte is not needed.
238  */
239 static void clear_flush(struct mm_struct *mm,
240 			     unsigned long addr,
241 			     pte_t *ptep,
242 			     unsigned long pgsize,
243 			     unsigned long ncontig)
244 {
245 	struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
246 	unsigned long i, saddr = addr;
247 
248 	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
249 		pte_clear(mm, addr, ptep);
250 
251 	flush_tlb_range(&vma, saddr, addr);
252 }
253 
254 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
255 			    pte_t *ptep, pte_t pte)
256 {
257 	size_t pgsize;
258 	int i;
259 	int ncontig;
260 	unsigned long pfn, dpfn;
261 	pgprot_t hugeprot;
262 
263 	/*
264 	 * Code needs to be expanded to handle huge swap and migration
265 	 * entries. Needed for HUGETLB and MEMORY_FAILURE.
266 	 */
267 	WARN_ON(!pte_present(pte));
268 
269 	if (!pte_cont(pte)) {
270 		set_pte_at(mm, addr, ptep, pte);
271 		return;
272 	}
273 
274 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
275 	pfn = pte_pfn(pte);
276 	dpfn = pgsize >> PAGE_SHIFT;
277 	hugeprot = pte_pgprot(pte);
278 
279 	clear_flush(mm, addr, ptep, pgsize, ncontig);
280 
281 	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
282 		set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
283 }
284 
285 void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
286 			  pte_t *ptep, pte_t pte, unsigned long sz)
287 {
288 	int i, ncontig;
289 	size_t pgsize;
290 
291 	ncontig = num_contig_ptes(sz, &pgsize);
292 
293 	for (i = 0; i < ncontig; i++, ptep++)
294 		set_pte(ptep, pte);
295 }
296 
297 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
298 		      unsigned long addr, unsigned long sz)
299 {
300 	pgd_t *pgdp;
301 	p4d_t *p4dp;
302 	pud_t *pudp;
303 	pmd_t *pmdp;
304 	pte_t *ptep = NULL;
305 
306 	pgdp = pgd_offset(mm, addr);
307 	p4dp = p4d_offset(pgdp, addr);
308 	pudp = pud_alloc(mm, p4dp, addr);
309 	if (!pudp)
310 		return NULL;
311 
312 	if (sz == PUD_SIZE) {
313 		ptep = (pte_t *)pudp;
314 	} else if (sz == (CONT_PTE_SIZE)) {
315 		pmdp = pmd_alloc(mm, pudp, addr);
316 		if (!pmdp)
317 			return NULL;
318 
319 		WARN_ON(addr & (sz - 1));
320 		/*
321 		 * Note that if this code were ever ported to the
322 		 * 32-bit arm platform then it will cause trouble in
323 		 * the case where CONFIG_HIGHPTE is set, since there
324 		 * will be no pte_unmap() to correspond with this
325 		 * pte_alloc_map().
326 		 */
327 		ptep = pte_alloc_map(mm, pmdp, addr);
328 	} else if (sz == PMD_SIZE) {
329 		if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp)))
330 			ptep = huge_pmd_share(mm, vma, addr, pudp);
331 		else
332 			ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
333 	} else if (sz == (CONT_PMD_SIZE)) {
334 		pmdp = pmd_alloc(mm, pudp, addr);
335 		WARN_ON(addr & (sz - 1));
336 		return (pte_t *)pmdp;
337 	}
338 
339 	return ptep;
340 }
341 
342 pte_t *huge_pte_offset(struct mm_struct *mm,
343 		       unsigned long addr, unsigned long sz)
344 {
345 	pgd_t *pgdp;
346 	p4d_t *p4dp;
347 	pud_t *pudp, pud;
348 	pmd_t *pmdp, pmd;
349 
350 	pgdp = pgd_offset(mm, addr);
351 	if (!pgd_present(READ_ONCE(*pgdp)))
352 		return NULL;
353 
354 	p4dp = p4d_offset(pgdp, addr);
355 	if (!p4d_present(READ_ONCE(*p4dp)))
356 		return NULL;
357 
358 	pudp = pud_offset(p4dp, addr);
359 	pud = READ_ONCE(*pudp);
360 	if (sz != PUD_SIZE && pud_none(pud))
361 		return NULL;
362 	/* hugepage or swap? */
363 	if (pud_huge(pud) || !pud_present(pud))
364 		return (pte_t *)pudp;
365 	/* table; check the next level */
366 
367 	if (sz == CONT_PMD_SIZE)
368 		addr &= CONT_PMD_MASK;
369 
370 	pmdp = pmd_offset(pudp, addr);
371 	pmd = READ_ONCE(*pmdp);
372 	if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
373 	    pmd_none(pmd))
374 		return NULL;
375 	if (pmd_huge(pmd) || !pmd_present(pmd))
376 		return (pte_t *)pmdp;
377 
378 	if (sz == CONT_PTE_SIZE)
379 		return pte_offset_kernel(pmdp, (addr & CONT_PTE_MASK));
380 
381 	return NULL;
382 }
383 
384 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
385 {
386 	size_t pagesize = 1UL << shift;
387 
388 	entry = pte_mkhuge(entry);
389 	if (pagesize == CONT_PTE_SIZE) {
390 		entry = pte_mkcont(entry);
391 	} else if (pagesize == CONT_PMD_SIZE) {
392 		entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
393 	} else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) {
394 		pr_warn("%s: unrecognized huge page size 0x%lx\n",
395 			__func__, pagesize);
396 	}
397 	return entry;
398 }
399 
400 void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
401 		    pte_t *ptep, unsigned long sz)
402 {
403 	int i, ncontig;
404 	size_t pgsize;
405 
406 	ncontig = num_contig_ptes(sz, &pgsize);
407 
408 	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
409 		pte_clear(mm, addr, ptep);
410 }
411 
412 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
413 			      unsigned long addr, pte_t *ptep)
414 {
415 	int ncontig;
416 	size_t pgsize;
417 	pte_t orig_pte = ptep_get(ptep);
418 
419 	if (!pte_cont(orig_pte))
420 		return ptep_get_and_clear(mm, addr, ptep);
421 
422 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
423 
424 	return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
425 }
426 
427 /*
428  * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
429  * and write permission.
430  *
431  * For a contiguous huge pte range we need to check whether or not write
432  * permission has to change only on the first pte in the set. Then for
433  * all the contiguous ptes we need to check whether or not there is a
434  * discrepancy between dirty or young.
435  */
436 static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
437 {
438 	int i;
439 
440 	if (pte_write(pte) != pte_write(ptep_get(ptep)))
441 		return 1;
442 
443 	for (i = 0; i < ncontig; i++) {
444 		pte_t orig_pte = ptep_get(ptep + i);
445 
446 		if (pte_dirty(pte) != pte_dirty(orig_pte))
447 			return 1;
448 
449 		if (pte_young(pte) != pte_young(orig_pte))
450 			return 1;
451 	}
452 
453 	return 0;
454 }
455 
456 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
457 			       unsigned long addr, pte_t *ptep,
458 			       pte_t pte, int dirty)
459 {
460 	int ncontig, i;
461 	size_t pgsize = 0;
462 	unsigned long pfn = pte_pfn(pte), dpfn;
463 	struct mm_struct *mm = vma->vm_mm;
464 	pgprot_t hugeprot;
465 	pte_t orig_pte;
466 
467 	if (!pte_cont(pte))
468 		return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
469 
470 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
471 	dpfn = pgsize >> PAGE_SHIFT;
472 
473 	if (!__cont_access_flags_changed(ptep, pte, ncontig))
474 		return 0;
475 
476 	orig_pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
477 
478 	/* Make sure we don't lose the dirty or young state */
479 	if (pte_dirty(orig_pte))
480 		pte = pte_mkdirty(pte);
481 
482 	if (pte_young(orig_pte))
483 		pte = pte_mkyoung(pte);
484 
485 	hugeprot = pte_pgprot(pte);
486 	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
487 		set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
488 
489 	return 1;
490 }
491 
492 void huge_ptep_set_wrprotect(struct mm_struct *mm,
493 			     unsigned long addr, pte_t *ptep)
494 {
495 	unsigned long pfn, dpfn;
496 	pgprot_t hugeprot;
497 	int ncontig, i;
498 	size_t pgsize;
499 	pte_t pte;
500 
501 	if (!pte_cont(READ_ONCE(*ptep))) {
502 		ptep_set_wrprotect(mm, addr, ptep);
503 		return;
504 	}
505 
506 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
507 	dpfn = pgsize >> PAGE_SHIFT;
508 
509 	pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
510 	pte = pte_wrprotect(pte);
511 
512 	hugeprot = pte_pgprot(pte);
513 	pfn = pte_pfn(pte);
514 
515 	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
516 		set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
517 }
518 
519 pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
520 			    unsigned long addr, pte_t *ptep)
521 {
522 	struct mm_struct *mm = vma->vm_mm;
523 	size_t pgsize;
524 	int ncontig;
525 
526 	if (!pte_cont(READ_ONCE(*ptep)))
527 		return ptep_clear_flush(vma, addr, ptep);
528 
529 	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
530 	return get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
531 }
532 
533 static int __init hugetlbpage_init(void)
534 {
535 	if (pud_sect_supported())
536 		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
537 
538 	hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT);
539 	hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
540 	hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT);
541 
542 	return 0;
543 }
544 arch_initcall(hugetlbpage_init);
545 
546 bool __init arch_hugetlb_valid_size(unsigned long size)
547 {
548 	return __hugetlb_valid_size(size);
549 }
550