1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * IBM System z Huge TLB Page Support for Kernel.
4 *
5 * Copyright IBM Corp. 2007,2020
6 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
7 */
8
9 #define KMSG_COMPONENT "hugetlb"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12 #include <linux/cpufeature.h>
13 #include <linux/mm.h>
14 #include <linux/hugetlb.h>
15 #include <linux/mman.h>
16 #include <linux/sched/mm.h>
17 #include <linux/security.h>
18 #include <asm/pgalloc.h>
19
20 /*
21 * If the bit selected by single-bit bitmask "a" is set within "x", move
22 * it to the position indicated by single-bit bitmask "b".
23 */
24 #define move_set_bit(x, a, b) (((x) & (a)) >> ilog2(a) << ilog2(b))
25
__pte_to_rste(pte_t pte)26 static inline unsigned long __pte_to_rste(pte_t pte)
27 {
28 swp_entry_t arch_entry;
29 unsigned long rste;
30
31 /*
32 * Convert encoding pte bits pmd / pud bits
33 * lIR.uswrdy.p dy..R...I...wr
34 * empty 010.000000.0 -> 00..0...1...00
35 * prot-none, clean, old 111.000000.1 -> 00..1...1...00
36 * prot-none, clean, young 111.000001.1 -> 01..1...1...00
37 * prot-none, dirty, old 111.000010.1 -> 10..1...1...00
38 * prot-none, dirty, young 111.000011.1 -> 11..1...1...00
39 * read-only, clean, old 111.000100.1 -> 00..1...1...01
40 * read-only, clean, young 101.000101.1 -> 01..1...0...01
41 * read-only, dirty, old 111.000110.1 -> 10..1...1...01
42 * read-only, dirty, young 101.000111.1 -> 11..1...0...01
43 * read-write, clean, old 111.001100.1 -> 00..1...1...11
44 * read-write, clean, young 101.001101.1 -> 01..1...0...11
45 * read-write, dirty, old 110.001110.1 -> 10..0...1...11
46 * read-write, dirty, young 100.001111.1 -> 11..0...0...11
47 * HW-bits: R read-only, I invalid
48 * SW-bits: p present, y young, d dirty, r read, w write, s special,
49 * u unused, l large
50 */
51 if (pte_present(pte)) {
52 rste = pte_val(pte) & PAGE_MASK;
53 rste |= _SEGMENT_ENTRY_PRESENT;
54 rste |= move_set_bit(pte_val(pte), _PAGE_READ,
55 _SEGMENT_ENTRY_READ);
56 rste |= move_set_bit(pte_val(pte), _PAGE_WRITE,
57 _SEGMENT_ENTRY_WRITE);
58 rste |= move_set_bit(pte_val(pte), _PAGE_INVALID,
59 _SEGMENT_ENTRY_INVALID);
60 rste |= move_set_bit(pte_val(pte), _PAGE_PROTECT,
61 _SEGMENT_ENTRY_PROTECT);
62 rste |= move_set_bit(pte_val(pte), _PAGE_DIRTY,
63 _SEGMENT_ENTRY_DIRTY);
64 rste |= move_set_bit(pte_val(pte), _PAGE_YOUNG,
65 _SEGMENT_ENTRY_YOUNG);
66 #ifdef CONFIG_MEM_SOFT_DIRTY
67 rste |= move_set_bit(pte_val(pte), _PAGE_SOFT_DIRTY,
68 _SEGMENT_ENTRY_SOFT_DIRTY);
69 #endif
70 rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC,
71 _SEGMENT_ENTRY_NOEXEC);
72 } else if (!pte_none(pte)) {
73 /* swap pte */
74 arch_entry = __pte_to_swp_entry(pte);
75 rste = mk_swap_rste(__swp_type(arch_entry), __swp_offset(arch_entry));
76 } else
77 rste = _SEGMENT_ENTRY_EMPTY;
78 return rste;
79 }
80
__rste_to_pte(unsigned long rste)81 static inline pte_t __rste_to_pte(unsigned long rste)
82 {
83 swp_entry_t arch_entry;
84 unsigned long pteval;
85 int present, none;
86 pte_t pte;
87
88 if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
89 present = pud_present(__pud(rste));
90 none = pud_none(__pud(rste));
91 } else {
92 present = pmd_present(__pmd(rste));
93 none = pmd_none(__pmd(rste));
94 }
95
96 /*
97 * Convert encoding pmd / pud bits pte bits
98 * dy..R...I...wr lIR.uswrdy.p
99 * empty 00..0...1...00 -> 010.000000.0
100 * prot-none, clean, old 00..1...1...00 -> 111.000000.1
101 * prot-none, clean, young 01..1...1...00 -> 111.000001.1
102 * prot-none, dirty, old 10..1...1...00 -> 111.000010.1
103 * prot-none, dirty, young 11..1...1...00 -> 111.000011.1
104 * read-only, clean, old 00..1...1...01 -> 111.000100.1
105 * read-only, clean, young 01..1...0...01 -> 101.000101.1
106 * read-only, dirty, old 10..1...1...01 -> 111.000110.1
107 * read-only, dirty, young 11..1...0...01 -> 101.000111.1
108 * read-write, clean, old 00..1...1...11 -> 111.001100.1
109 * read-write, clean, young 01..1...0...11 -> 101.001101.1
110 * read-write, dirty, old 10..0...1...11 -> 110.001110.1
111 * read-write, dirty, young 11..0...0...11 -> 100.001111.1
112 * HW-bits: R read-only, I invalid
113 * SW-bits: p present, y young, d dirty, r read, w write, s special,
114 * u unused, l large
115 */
116 if (present) {
117 pteval = rste & _SEGMENT_ENTRY_ORIGIN_LARGE;
118 pteval |= _PAGE_LARGE | _PAGE_PRESENT;
119 pteval |= move_set_bit(rste, _SEGMENT_ENTRY_READ, _PAGE_READ);
120 pteval |= move_set_bit(rste, _SEGMENT_ENTRY_WRITE, _PAGE_WRITE);
121 pteval |= move_set_bit(rste, _SEGMENT_ENTRY_INVALID, _PAGE_INVALID);
122 pteval |= move_set_bit(rste, _SEGMENT_ENTRY_PROTECT, _PAGE_PROTECT);
123 pteval |= move_set_bit(rste, _SEGMENT_ENTRY_DIRTY, _PAGE_DIRTY);
124 pteval |= move_set_bit(rste, _SEGMENT_ENTRY_YOUNG, _PAGE_YOUNG);
125 #ifdef CONFIG_MEM_SOFT_DIRTY
126 pteval |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY, _PAGE_SOFT_DIRTY);
127 #endif
128 pteval |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC, _PAGE_NOEXEC);
129 } else if (!none) {
130 /* swap rste */
131 arch_entry = __rste_to_swp_entry(rste);
132 pte = mk_swap_pte(__swp_type_rste(arch_entry), __swp_offset_rste(arch_entry));
133 pteval = pte_val(pte);
134 } else
135 pteval = _PAGE_INVALID;
136 return __pte(pteval);
137 }
138
clear_huge_pte_skeys(struct mm_struct * mm,unsigned long rste)139 static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
140 {
141 struct folio *folio;
142 unsigned long size, paddr;
143
144 if (!mm_uses_skeys(mm) ||
145 rste & _SEGMENT_ENTRY_INVALID)
146 return;
147
148 if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
149 folio = page_folio(pud_page(__pud(rste)));
150 size = PUD_SIZE;
151 paddr = rste & PUD_MASK;
152 } else {
153 folio = page_folio(pmd_page(__pmd(rste)));
154 size = PMD_SIZE;
155 paddr = rste & PMD_MASK;
156 }
157
158 if (!test_and_set_bit(PG_arch_1, &folio->flags))
159 __storage_key_init_range(paddr, paddr + size);
160 }
161
__set_huge_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)162 void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
163 pte_t *ptep, pte_t pte)
164 {
165 unsigned long rste;
166
167 rste = __pte_to_rste(pte);
168
169 /* Set correct table type for 2G hugepages */
170 if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
171 if (likely(pte_present(pte)))
172 rste |= _REGION3_ENTRY_LARGE;
173 rste |= _REGION_ENTRY_TYPE_R3;
174 } else if (likely(pte_present(pte)))
175 rste |= _SEGMENT_ENTRY_LARGE;
176
177 clear_huge_pte_skeys(mm, rste);
178 set_pte(ptep, __pte(rste));
179 }
180
set_huge_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned long sz)181 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
182 pte_t *ptep, pte_t pte, unsigned long sz)
183 {
184 __set_huge_pte_at(mm, addr, ptep, pte);
185 }
186
huge_ptep_get(struct mm_struct * mm,unsigned long addr,pte_t * ptep)187 pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
188 {
189 return __rste_to_pte(pte_val(*ptep));
190 }
191
__huge_ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)192 pte_t __huge_ptep_get_and_clear(struct mm_struct *mm,
193 unsigned long addr, pte_t *ptep)
194 {
195 pte_t pte = huge_ptep_get(mm, addr, ptep);
196 pmd_t *pmdp = (pmd_t *) ptep;
197 pud_t *pudp = (pud_t *) ptep;
198
199 if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
200 pudp_xchg_direct(mm, addr, pudp, __pud(_REGION3_ENTRY_EMPTY));
201 else
202 pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
203 return pte;
204 }
205
huge_pte_alloc(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,unsigned long sz)206 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
207 unsigned long addr, unsigned long sz)
208 {
209 pgd_t *pgdp;
210 p4d_t *p4dp;
211 pud_t *pudp;
212 pmd_t *pmdp = NULL;
213
214 pgdp = pgd_offset(mm, addr);
215 p4dp = p4d_alloc(mm, pgdp, addr);
216 if (p4dp) {
217 pudp = pud_alloc(mm, p4dp, addr);
218 if (pudp) {
219 if (sz == PUD_SIZE)
220 return (pte_t *) pudp;
221 else if (sz == PMD_SIZE)
222 pmdp = pmd_alloc(mm, pudp, addr);
223 }
224 }
225 return (pte_t *) pmdp;
226 }
227
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)228 pte_t *huge_pte_offset(struct mm_struct *mm,
229 unsigned long addr, unsigned long sz)
230 {
231 pgd_t *pgdp;
232 p4d_t *p4dp;
233 pud_t *pudp;
234 pmd_t *pmdp = NULL;
235
236 pgdp = pgd_offset(mm, addr);
237 if (pgd_present(*pgdp)) {
238 p4dp = p4d_offset(pgdp, addr);
239 if (p4d_present(*p4dp)) {
240 pudp = pud_offset(p4dp, addr);
241 if (sz == PUD_SIZE)
242 return (pte_t *)pudp;
243 if (pud_present(*pudp))
244 pmdp = pmd_offset(pudp, addr);
245 }
246 }
247 return (pte_t *) pmdp;
248 }
249
arch_hugetlb_valid_size(unsigned long size)250 bool __init arch_hugetlb_valid_size(unsigned long size)
251 {
252 if (cpu_has_edat1() && size == PMD_SIZE)
253 return true;
254 else if (cpu_has_edat2() && size == PUD_SIZE)
255 return true;
256 else
257 return false;
258 }
259