1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * arch/arm64/include/asm/hugetlb.h
4 *
5 * Copyright (C) 2013 Linaro Ltd.
6 *
7 * Based on arch/x86/include/asm/hugetlb.h
8 */
9
10 #ifndef __ASM_HUGETLB_H
11 #define __ASM_HUGETLB_H
12
13 #include <asm/cacheflush.h>
14 #include <asm/mte.h>
15 #include <asm/page.h>
16
17 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
18 #define arch_hugetlb_migration_supported arch_hugetlb_migration_supported
19 extern bool arch_hugetlb_migration_supported(struct hstate *h);
20 #endif
21
arch_clear_hugetlb_flags(struct folio * folio)22 static inline void arch_clear_hugetlb_flags(struct folio *folio)
23 {
24 clear_bit(PG_dcache_clean, &folio->flags);
25
26 #ifdef CONFIG_ARM64_MTE
27 if (system_supports_mte()) {
28 clear_bit(PG_mte_tagged, &folio->flags);
29 clear_bit(PG_mte_lock, &folio->flags);
30 }
31 #endif
32 }
33 #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
34
35 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
36 #define arch_make_huge_pte arch_make_huge_pte
37 #define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
38 extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
39 pte_t *ptep, pte_t pte, unsigned long sz);
40 #define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
41 extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
42 unsigned long addr, pte_t *ptep,
43 pte_t pte, int dirty);
44 #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
45 extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
46 pte_t *ptep, unsigned long sz);
47 #define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
48 extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
49 unsigned long addr, pte_t *ptep);
50 #define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
51 extern pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
52 unsigned long addr, pte_t *ptep);
53 #define __HAVE_ARCH_HUGE_PTE_CLEAR
54 extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
55 pte_t *ptep, unsigned long sz);
56 #define __HAVE_ARCH_HUGE_PTEP_GET
57 extern pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
58
59 void __init arm64_hugetlb_cma_reserve(void);
60
61 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
62 extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
63 unsigned long addr, pte_t *ptep);
64
65 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
66 extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
67 unsigned long addr, pte_t *ptep,
68 pte_t old_pte, pte_t new_pte);
69
70 #include <asm-generic/hugetlb.h>
71
72 #define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
flush_hugetlb_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)73 static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
74 unsigned long start,
75 unsigned long end)
76 {
77 unsigned long stride = huge_page_size(hstate_vma(vma));
78
79 switch (stride) {
80 #ifndef __PAGETABLE_PMD_FOLDED
81 case PUD_SIZE:
82 __flush_tlb_range(vma, start, end, PUD_SIZE, false, 1);
83 break;
84 #endif
85 case CONT_PMD_SIZE:
86 case PMD_SIZE:
87 __flush_tlb_range(vma, start, end, PMD_SIZE, false, 2);
88 break;
89 case CONT_PTE_SIZE:
90 __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 3);
91 break;
92 default:
93 __flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN);
94 }
95 }
96
97 #endif /* __ASM_HUGETLB_H */
98