1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * IBM System z Huge TLB Page Support for Kernel.
4 *
5 * Copyright IBM Corp. 2007,2020
6 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
7 */
8
9 #define KMSG_COMPONENT "hugetlb"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12 #include <asm/pgalloc.h>
13 #include <linux/mm.h>
14 #include <linux/hugetlb.h>
15 #include <linux/mman.h>
16 #include <linux/sched/mm.h>
17 #include <linux/security.h>
18
19 /*
20 * If the bit selected by single-bit bitmask "a" is set within "x", move
21 * it to the position indicated by single-bit bitmask "b".
22 */
23 #define move_set_bit(x, a, b) (((x) & (a)) >> ilog2(a) << ilog2(b))
24
__pte_to_rste(pte_t pte)25 static inline unsigned long __pte_to_rste(pte_t pte)
26 {
27 swp_entry_t arch_entry;
28 unsigned long rste;
29
30 /*
31 * Convert encoding pte bits pmd / pud bits
32 * lIR.uswrdy.p dy..R...I...wr
33 * empty 010.000000.0 -> 00..0...1...00
34 * prot-none, clean, old 111.000000.1 -> 00..1...1...00
35 * prot-none, clean, young 111.000001.1 -> 01..1...1...00
36 * prot-none, dirty, old 111.000010.1 -> 10..1...1...00
37 * prot-none, dirty, young 111.000011.1 -> 11..1...1...00
38 * read-only, clean, old 111.000100.1 -> 00..1...1...01
39 * read-only, clean, young 101.000101.1 -> 01..1...0...01
40 * read-only, dirty, old 111.000110.1 -> 10..1...1...01
41 * read-only, dirty, young 101.000111.1 -> 11..1...0...01
42 * read-write, clean, old 111.001100.1 -> 00..1...1...11
43 * read-write, clean, young 101.001101.1 -> 01..1...0...11
44 * read-write, dirty, old 110.001110.1 -> 10..0...1...11
45 * read-write, dirty, young 100.001111.1 -> 11..0...0...11
46 * HW-bits: R read-only, I invalid
47 * SW-bits: p present, y young, d dirty, r read, w write, s special,
48 * u unused, l large
49 */
50 if (pte_present(pte)) {
51 rste = pte_val(pte) & PAGE_MASK;
52 rste |= _SEGMENT_ENTRY_PRESENT;
53 rste |= move_set_bit(pte_val(pte), _PAGE_READ,
54 _SEGMENT_ENTRY_READ);
55 rste |= move_set_bit(pte_val(pte), _PAGE_WRITE,
56 _SEGMENT_ENTRY_WRITE);
57 rste |= move_set_bit(pte_val(pte), _PAGE_INVALID,
58 _SEGMENT_ENTRY_INVALID);
59 rste |= move_set_bit(pte_val(pte), _PAGE_PROTECT,
60 _SEGMENT_ENTRY_PROTECT);
61 rste |= move_set_bit(pte_val(pte), _PAGE_DIRTY,
62 _SEGMENT_ENTRY_DIRTY);
63 rste |= move_set_bit(pte_val(pte), _PAGE_YOUNG,
64 _SEGMENT_ENTRY_YOUNG);
65 #ifdef CONFIG_MEM_SOFT_DIRTY
66 rste |= move_set_bit(pte_val(pte), _PAGE_SOFT_DIRTY,
67 _SEGMENT_ENTRY_SOFT_DIRTY);
68 #endif
69 rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC,
70 _SEGMENT_ENTRY_NOEXEC);
71 } else if (!pte_none(pte)) {
72 /* swap pte */
73 arch_entry = __pte_to_swp_entry(pte);
74 rste = mk_swap_rste(__swp_type(arch_entry), __swp_offset(arch_entry));
75 } else
76 rste = _SEGMENT_ENTRY_EMPTY;
77 return rste;
78 }
79
__rste_to_pte(unsigned long rste)80 static inline pte_t __rste_to_pte(unsigned long rste)
81 {
82 swp_entry_t arch_entry;
83 unsigned long pteval;
84 int present, none;
85 pte_t pte;
86
87 if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
88 present = pud_present(__pud(rste));
89 none = pud_none(__pud(rste));
90 } else {
91 present = pmd_present(__pmd(rste));
92 none = pmd_none(__pmd(rste));
93 }
94
95 /*
96 * Convert encoding pmd / pud bits pte bits
97 * dy..R...I...wr lIR.uswrdy.p
98 * empty 00..0...1...00 -> 010.000000.0
99 * prot-none, clean, old 00..1...1...00 -> 111.000000.1
100 * prot-none, clean, young 01..1...1...00 -> 111.000001.1
101 * prot-none, dirty, old 10..1...1...00 -> 111.000010.1
102 * prot-none, dirty, young 11..1...1...00 -> 111.000011.1
103 * read-only, clean, old 00..1...1...01 -> 111.000100.1
104 * read-only, clean, young 01..1...0...01 -> 101.000101.1
105 * read-only, dirty, old 10..1...1...01 -> 111.000110.1
106 * read-only, dirty, young 11..1...0...01 -> 101.000111.1
107 * read-write, clean, old 00..1...1...11 -> 111.001100.1
108 * read-write, clean, young 01..1...0...11 -> 101.001101.1
109 * read-write, dirty, old 10..0...1...11 -> 110.001110.1
110 * read-write, dirty, young 11..0...0...11 -> 100.001111.1
111 * HW-bits: R read-only, I invalid
112 * SW-bits: p present, y young, d dirty, r read, w write, s special,
113 * u unused, l large
114 */
115 if (present) {
116 pteval = rste & _SEGMENT_ENTRY_ORIGIN_LARGE;
117 pteval |= _PAGE_LARGE | _PAGE_PRESENT;
118 pteval |= move_set_bit(rste, _SEGMENT_ENTRY_READ, _PAGE_READ);
119 pteval |= move_set_bit(rste, _SEGMENT_ENTRY_WRITE, _PAGE_WRITE);
120 pteval |= move_set_bit(rste, _SEGMENT_ENTRY_INVALID, _PAGE_INVALID);
121 pteval |= move_set_bit(rste, _SEGMENT_ENTRY_PROTECT, _PAGE_PROTECT);
122 pteval |= move_set_bit(rste, _SEGMENT_ENTRY_DIRTY, _PAGE_DIRTY);
123 pteval |= move_set_bit(rste, _SEGMENT_ENTRY_YOUNG, _PAGE_YOUNG);
124 #ifdef CONFIG_MEM_SOFT_DIRTY
125 pteval |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY, _PAGE_SOFT_DIRTY);
126 #endif
127 pteval |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC, _PAGE_NOEXEC);
128 } else if (!none) {
129 /* swap rste */
130 arch_entry = __rste_to_swp_entry(rste);
131 pte = mk_swap_pte(__swp_type_rste(arch_entry), __swp_offset_rste(arch_entry));
132 pteval = pte_val(pte);
133 } else
134 pteval = _PAGE_INVALID;
135 return __pte(pteval);
136 }
137
clear_huge_pte_skeys(struct mm_struct * mm,unsigned long rste)138 static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
139 {
140 struct folio *folio;
141 unsigned long size, paddr;
142
143 if (!mm_uses_skeys(mm) ||
144 rste & _SEGMENT_ENTRY_INVALID)
145 return;
146
147 if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
148 folio = page_folio(pud_page(__pud(rste)));
149 size = PUD_SIZE;
150 paddr = rste & PUD_MASK;
151 } else {
152 folio = page_folio(pmd_page(__pmd(rste)));
153 size = PMD_SIZE;
154 paddr = rste & PMD_MASK;
155 }
156
157 if (!test_and_set_bit(PG_arch_1, &folio->flags))
158 __storage_key_init_range(paddr, paddr + size);
159 }
160
__set_huge_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)161 void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
162 pte_t *ptep, pte_t pte)
163 {
164 unsigned long rste;
165
166 rste = __pte_to_rste(pte);
167
168 /* Set correct table type for 2G hugepages */
169 if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
170 if (likely(pte_present(pte)))
171 rste |= _REGION3_ENTRY_LARGE;
172 rste |= _REGION_ENTRY_TYPE_R3;
173 } else if (likely(pte_present(pte)))
174 rste |= _SEGMENT_ENTRY_LARGE;
175
176 clear_huge_pte_skeys(mm, rste);
177 set_pte(ptep, __pte(rste));
178 }
179
set_huge_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned long sz)180 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
181 pte_t *ptep, pte_t pte, unsigned long sz)
182 {
183 __set_huge_pte_at(mm, addr, ptep, pte);
184 }
185
huge_ptep_get(struct mm_struct * mm,unsigned long addr,pte_t * ptep)186 pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
187 {
188 return __rste_to_pte(pte_val(*ptep));
189 }
190
__huge_ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)191 pte_t __huge_ptep_get_and_clear(struct mm_struct *mm,
192 unsigned long addr, pte_t *ptep)
193 {
194 pte_t pte = huge_ptep_get(mm, addr, ptep);
195 pmd_t *pmdp = (pmd_t *) ptep;
196 pud_t *pudp = (pud_t *) ptep;
197
198 if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
199 pudp_xchg_direct(mm, addr, pudp, __pud(_REGION3_ENTRY_EMPTY));
200 else
201 pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
202 return pte;
203 }
204
huge_pte_alloc(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,unsigned long sz)205 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
206 unsigned long addr, unsigned long sz)
207 {
208 pgd_t *pgdp;
209 p4d_t *p4dp;
210 pud_t *pudp;
211 pmd_t *pmdp = NULL;
212
213 pgdp = pgd_offset(mm, addr);
214 p4dp = p4d_alloc(mm, pgdp, addr);
215 if (p4dp) {
216 pudp = pud_alloc(mm, p4dp, addr);
217 if (pudp) {
218 if (sz == PUD_SIZE)
219 return (pte_t *) pudp;
220 else if (sz == PMD_SIZE)
221 pmdp = pmd_alloc(mm, pudp, addr);
222 }
223 }
224 return (pte_t *) pmdp;
225 }
226
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)227 pte_t *huge_pte_offset(struct mm_struct *mm,
228 unsigned long addr, unsigned long sz)
229 {
230 pgd_t *pgdp;
231 p4d_t *p4dp;
232 pud_t *pudp;
233 pmd_t *pmdp = NULL;
234
235 pgdp = pgd_offset(mm, addr);
236 if (pgd_present(*pgdp)) {
237 p4dp = p4d_offset(pgdp, addr);
238 if (p4d_present(*p4dp)) {
239 pudp = pud_offset(p4dp, addr);
240 if (sz == PUD_SIZE)
241 return (pte_t *)pudp;
242 if (pud_present(*pudp))
243 pmdp = pmd_offset(pudp, addr);
244 }
245 }
246 return (pte_t *) pmdp;
247 }
248
arch_hugetlb_valid_size(unsigned long size)249 bool __init arch_hugetlb_valid_size(unsigned long size)
250 {
251 if (MACHINE_HAS_EDAT1 && size == PMD_SIZE)
252 return true;
253 else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE)
254 return true;
255 else
256 return false;
257 }
258