xref: /linux/arch/s390/include/asm/hugetlb.h (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  IBM System z Huge TLB Page Support for Kernel.
4  *
5  *    Copyright IBM Corp. 2008
6  *    Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
7  */
8 
9 #ifndef _ASM_S390_HUGETLB_H
10 #define _ASM_S390_HUGETLB_H
11 
12 #include <linux/pgtable.h>
13 #include <asm/page.h>
14 
15 #define hugetlb_free_pgd_range			free_pgd_range
16 #define hugepages_supported()			(MACHINE_HAS_EDAT1)
17 
18 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
19 		     pte_t *ptep, pte_t pte, unsigned long sz);
20 void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
21 		     pte_t *ptep, pte_t pte);
22 pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
23 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
24 			      unsigned long addr, pte_t *ptep);
25 
26 /*
27  * If the arch doesn't supply something else, assume that hugepage
28  * size aligned regions are ok without further preparation.
29  */
30 static inline int prepare_hugepage_range(struct file *file,
31 			unsigned long addr, unsigned long len)
32 {
33 	struct hstate *h = hstate_file(file);
34 
35 	if (len & ~huge_page_mask(h))
36 		return -EINVAL;
37 	if (addr & ~huge_page_mask(h))
38 		return -EINVAL;
39 	return 0;
40 }
41 
42 static inline void arch_clear_hugetlb_flags(struct folio *folio)
43 {
44 	clear_bit(PG_arch_1, &folio->flags);
45 }
46 #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
47 
48 static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
49 				  pte_t *ptep, unsigned long sz)
50 {
51 	if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
52 		set_pte(ptep, __pte(_REGION3_ENTRY_EMPTY));
53 	else
54 		set_pte(ptep, __pte(_SEGMENT_ENTRY_EMPTY));
55 }
56 
57 static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
58 					  unsigned long address, pte_t *ptep)
59 {
60 	return huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
61 }
62 
63 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
64 					     unsigned long addr, pte_t *ptep,
65 					     pte_t pte, int dirty)
66 {
67 	int changed = !pte_same(huge_ptep_get(vma->vm_mm, addr, ptep), pte);
68 	if (changed) {
69 		huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
70 		__set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
71 	}
72 	return changed;
73 }
74 
75 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
76 					   unsigned long addr, pte_t *ptep)
77 {
78 	pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
79 	__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
80 }
81 
82 static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
83 {
84 	return mk_pte(page, pgprot);
85 }
86 
87 static inline int huge_pte_none(pte_t pte)
88 {
89 	return pte_none(pte);
90 }
91 
92 static inline int huge_pte_none_mostly(pte_t pte)
93 {
94 	return huge_pte_none(pte);
95 }
96 
97 static inline int huge_pte_write(pte_t pte)
98 {
99 	return pte_write(pte);
100 }
101 
102 static inline int huge_pte_dirty(pte_t pte)
103 {
104 	return pte_dirty(pte);
105 }
106 
107 static inline pte_t huge_pte_mkwrite(pte_t pte)
108 {
109 	return pte_mkwrite_novma(pte);
110 }
111 
112 static inline pte_t huge_pte_mkdirty(pte_t pte)
113 {
114 	return pte_mkdirty(pte);
115 }
116 
117 static inline pte_t huge_pte_wrprotect(pte_t pte)
118 {
119 	return pte_wrprotect(pte);
120 }
121 
122 static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
123 {
124 	return pte_modify(pte, newprot);
125 }
126 
127 static inline pte_t huge_pte_mkuffd_wp(pte_t pte)
128 {
129 	return pte;
130 }
131 
132 static inline pte_t huge_pte_clear_uffd_wp(pte_t pte)
133 {
134 	return pte;
135 }
136 
137 static inline int huge_pte_uffd_wp(pte_t pte)
138 {
139 	return 0;
140 }
141 
142 static inline bool gigantic_page_runtime_supported(void)
143 {
144 	return true;
145 }
146 
147 #endif /* _ASM_S390_HUGETLB_H */
148