xref: /linux/arch/arm64/include/asm/hugetlb.h (revision 3a64d5b82eccc0dc629d43cde791a2c19bd67dfc)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * arch/arm64/include/asm/hugetlb.h
4  *
5  * Copyright (C) 2013 Linaro Ltd.
6  *
7  * Based on arch/x86/include/asm/hugetlb.h
8  */
9 
10 #ifndef __ASM_HUGETLB_H
11 #define __ASM_HUGETLB_H
12 
13 #include <asm/cacheflush.h>
14 #include <asm/mte.h>
15 #include <asm/page.h>
16 
17 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
18 #define arch_hugetlb_migration_supported arch_hugetlb_migration_supported
19 extern bool arch_hugetlb_migration_supported(struct hstate *h);
20 #endif
21 
22 static inline void arch_clear_hugetlb_flags(struct folio *folio)
23 {
24 	clear_bit(PG_dcache_clean, &folio->flags.f);
25 
26 #ifdef CONFIG_ARM64_MTE
27 	if (system_supports_mte()) {
28 		clear_bit(PG_mte_tagged, &folio->flags.f);
29 		clear_bit(PG_mte_lock, &folio->flags.f);
30 	}
31 #endif
32 }
33 #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
34 
35 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
36 #define arch_make_huge_pte arch_make_huge_pte
37 #define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
38 extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
39 			    pte_t *ptep, pte_t pte, unsigned long sz);
40 #define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
41 extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
42 				      unsigned long addr, pte_t *ptep,
43 				      pte_t pte, int dirty);
44 #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
45 extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
46 				     pte_t *ptep, unsigned long sz);
47 #define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
48 extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
49 				    unsigned long addr, pte_t *ptep);
50 #define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
51 extern pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
52 				   unsigned long addr, pte_t *ptep);
53 #define __HAVE_ARCH_HUGE_PTE_CLEAR
54 extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
55 			   pte_t *ptep, unsigned long sz);
56 #define __HAVE_ARCH_HUGE_PTEP_GET
57 extern pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
58 
59 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
60 extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
61 					 unsigned long addr, pte_t *ptep);
62 
63 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
64 extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
65 					 unsigned long addr, pte_t *ptep,
66 					 pte_t old_pte, pte_t new_pte);
67 
68 #include <asm-generic/hugetlb.h>
69 
70 static inline void __flush_hugetlb_tlb_range(struct vm_area_struct *vma,
71 					     unsigned long start,
72 					     unsigned long end,
73 					     unsigned long stride,
74 					     bool last_level)
75 {
76 	switch (stride) {
77 #ifndef __PAGETABLE_PMD_FOLDED
78 	case PUD_SIZE:
79 		__flush_tlb_range(vma, start, end, PUD_SIZE, last_level, 1);
80 		break;
81 #endif
82 	case CONT_PMD_SIZE:
83 	case PMD_SIZE:
84 		__flush_tlb_range(vma, start, end, PMD_SIZE, last_level, 2);
85 		break;
86 	case CONT_PTE_SIZE:
87 		__flush_tlb_range(vma, start, end, PAGE_SIZE, last_level, 3);
88 		break;
89 	default:
90 		__flush_tlb_range(vma, start, end, PAGE_SIZE, last_level, TLBI_TTL_UNKNOWN);
91 	}
92 }
93 
94 #define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
95 static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
96 					   unsigned long start,
97 					   unsigned long end)
98 {
99 	unsigned long stride = huge_page_size(hstate_vma(vma));
100 
101 	__flush_hugetlb_tlb_range(vma, start, end, stride, false);
102 }
103 
104 #endif /* __ASM_HUGETLB_H */
105