1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012 Regents of the University of California
4 */
5
6 #ifndef _ASM_RISCV_PGTABLE_64_H
7 #define _ASM_RISCV_PGTABLE_64_H
8
9 #include <linux/bits.h>
10 #include <linux/const.h>
11 #include <asm/errata_list.h>
12
13 extern bool pgtable_l4_enabled;
14 extern bool pgtable_l5_enabled;
15
16 #define PGDIR_SHIFT_L3 30
17 #define PGDIR_SHIFT_L4 39
18 #define PGDIR_SHIFT_L5 48
19 #define PGDIR_SHIFT (pgtable_l5_enabled ? PGDIR_SHIFT_L5 : \
20 (pgtable_l4_enabled ? PGDIR_SHIFT_L4 : PGDIR_SHIFT_L3))
21 /* Size of region mapped by a page global directory */
22 #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
23 #define PGDIR_MASK (~(PGDIR_SIZE - 1))
24
25 /* p4d is folded into pgd in case of 4-level page table */
26 #define P4D_SHIFT_L3 30
27 #define P4D_SHIFT_L4 39
28 #define P4D_SHIFT_L5 39
29 #define P4D_SHIFT (pgtable_l5_enabled ? P4D_SHIFT_L5 : \
30 (pgtable_l4_enabled ? P4D_SHIFT_L4 : P4D_SHIFT_L3))
31 #define P4D_SIZE (_AC(1, UL) << P4D_SHIFT)
32 #define P4D_MASK (~(P4D_SIZE - 1))
33
34 /* pud is folded into pgd in case of 3-level page table */
35 #define PUD_SHIFT 30
36 #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
37 #define PUD_MASK (~(PUD_SIZE - 1))
38
39 #define PMD_SHIFT 21
40 /* Size of region mapped by a page middle directory */
41 #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
42 #define PMD_MASK (~(PMD_SIZE - 1))
43
44 /* Page 4th Directory entry */
45 typedef struct {
46 unsigned long p4d;
47 } p4d_t;
48
49 #define p4d_val(x) ((x).p4d)
50 #define __p4d(x) ((p4d_t) { (x) })
51 #define PTRS_PER_P4D (PAGE_SIZE / sizeof(p4d_t))
52
53 /* Page Upper Directory entry */
54 typedef struct {
55 unsigned long pud;
56 } pud_t;
57
58 #define pud_val(x) ((x).pud)
59 #define __pud(x) ((pud_t) { (x) })
60 #define PTRS_PER_PUD (PAGE_SIZE / sizeof(pud_t))
61
62 /* Page Middle Directory entry */
63 typedef struct {
64 unsigned long pmd;
65 } pmd_t;
66
67 #define pmd_val(x) ((x).pmd)
68 #define __pmd(x) ((pmd_t) { (x) })
69
70 #define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
71
72 /*
73 * rv64 PTE format:
74 * | 63 | 62 61 | 60 54 | 53 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
75 * N MT RSV PFN reserved for SW D A G U X W R V
76 */
77 #define _PAGE_PFN_MASK GENMASK(53, 10)
78
79 /*
80 * [63] Svnapot definitions:
81 * 0 Svnapot disabled
82 * 1 Svnapot enabled
83 */
84 #define _PAGE_NAPOT_SHIFT 63
85 #define _PAGE_NAPOT BIT(_PAGE_NAPOT_SHIFT)
86 /*
87 * Only 64KB (order 4) napot ptes supported.
88 */
89 #define NAPOT_CONT_ORDER_BASE 4
90 enum napot_cont_order {
91 NAPOT_CONT64KB_ORDER = NAPOT_CONT_ORDER_BASE,
92 NAPOT_ORDER_MAX,
93 };
94
95 #define for_each_napot_order(order) \
96 for (order = NAPOT_CONT_ORDER_BASE; order < NAPOT_ORDER_MAX; order++)
97 #define for_each_napot_order_rev(order) \
98 for (order = NAPOT_ORDER_MAX - 1; \
99 order >= NAPOT_CONT_ORDER_BASE; order--)
100 #define napot_cont_order(val) (__builtin_ctzl((val.pte >> _PAGE_PFN_SHIFT) << 1))
101
102 #define napot_cont_shift(order) ((order) + PAGE_SHIFT)
103 #define napot_cont_size(order) BIT(napot_cont_shift(order))
104 #define napot_cont_mask(order) (~(napot_cont_size(order) - 1UL))
105 #define napot_pte_num(order) BIT(order)
106
107 #ifdef CONFIG_RISCV_ISA_SVNAPOT
108 #define HUGE_MAX_HSTATE (2 + (NAPOT_ORDER_MAX - NAPOT_CONT_ORDER_BASE))
109 #else
110 #define HUGE_MAX_HSTATE 2
111 #endif
112
113 /*
114 * [62:61] Svpbmt Memory Type definitions:
115 *
116 * 00 - PMA Normal Cacheable, No change to implied PMA memory type
117 * 01 - NC Non-cacheable, idempotent, weakly-ordered Main Memory
118 * 10 - IO Non-cacheable, non-idempotent, strongly-ordered I/O memory
119 * 11 - Rsvd Reserved for future standard use
120 */
121 #define _PAGE_NOCACHE_SVPBMT (1UL << 61)
122 #define _PAGE_IO_SVPBMT (1UL << 62)
123 #define _PAGE_MTMASK_SVPBMT (_PAGE_NOCACHE_SVPBMT | _PAGE_IO_SVPBMT)
124
125 /*
126 * [63:59] T-Head Memory Type definitions:
127 * bit[63] SO - Strong Order
128 * bit[62] C - Cacheable
129 * bit[61] B - Bufferable
130 * bit[60] SH - Shareable
131 * bit[59] Sec - Trustable
132 * 00110 - NC Weakly-ordered, Non-cacheable, Bufferable, Shareable, Non-trustable
133 * 01110 - PMA Weakly-ordered, Cacheable, Bufferable, Shareable, Non-trustable
134 * 10010 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Shareable, Non-trustable
135 */
136 #define _PAGE_PMA_THEAD ((1UL << 62) | (1UL << 61) | (1UL << 60))
137 #define _PAGE_NOCACHE_THEAD ((1UL << 61) | (1UL << 60))
138 #define _PAGE_IO_THEAD ((1UL << 63) | (1UL << 60))
139 #define _PAGE_MTMASK_THEAD (_PAGE_PMA_THEAD | _PAGE_IO_THEAD | (1UL << 59))
140
riscv_page_mtmask(void)141 static inline u64 riscv_page_mtmask(void)
142 {
143 u64 val;
144
145 ALT_SVPBMT(val, _PAGE_MTMASK);
146 return val;
147 }
148
riscv_page_nocache(void)149 static inline u64 riscv_page_nocache(void)
150 {
151 u64 val;
152
153 ALT_SVPBMT(val, _PAGE_NOCACHE);
154 return val;
155 }
156
riscv_page_io(void)157 static inline u64 riscv_page_io(void)
158 {
159 u64 val;
160
161 ALT_SVPBMT(val, _PAGE_IO);
162 return val;
163 }
164
165 #define _PAGE_NOCACHE riscv_page_nocache()
166 #define _PAGE_IO riscv_page_io()
167 #define _PAGE_MTMASK riscv_page_mtmask()
168
169 /* Set of bits to preserve across pte_modify() */
170 #define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
171 _PAGE_WRITE | _PAGE_EXEC | \
172 _PAGE_USER | _PAGE_GLOBAL | \
173 _PAGE_MTMASK))
174
pud_present(pud_t pud)175 static inline int pud_present(pud_t pud)
176 {
177 return (pud_val(pud) & _PAGE_PRESENT);
178 }
179
pud_none(pud_t pud)180 static inline int pud_none(pud_t pud)
181 {
182 return (pud_val(pud) == 0);
183 }
184
pud_bad(pud_t pud)185 static inline int pud_bad(pud_t pud)
186 {
187 return !pud_present(pud);
188 }
189
190 #define pud_leaf pud_leaf
pud_leaf(pud_t pud)191 static inline bool pud_leaf(pud_t pud)
192 {
193 return pud_present(pud) && (pud_val(pud) & _PAGE_LEAF);
194 }
195
pud_user(pud_t pud)196 static inline int pud_user(pud_t pud)
197 {
198 return pud_val(pud) & _PAGE_USER;
199 }
200
set_pud(pud_t * pudp,pud_t pud)201 static inline void set_pud(pud_t *pudp, pud_t pud)
202 {
203 WRITE_ONCE(*pudp, pud);
204 }
205
pud_clear(pud_t * pudp)206 static inline void pud_clear(pud_t *pudp)
207 {
208 set_pud(pudp, __pud(0));
209 }
210
pfn_pud(unsigned long pfn,pgprot_t prot)211 static inline pud_t pfn_pud(unsigned long pfn, pgprot_t prot)
212 {
213 return __pud((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
214 }
215
_pud_pfn(pud_t pud)216 static inline unsigned long _pud_pfn(pud_t pud)
217 {
218 return __page_val_to_pfn(pud_val(pud));
219 }
220
pud_pgtable(pud_t pud)221 static inline pmd_t *pud_pgtable(pud_t pud)
222 {
223 return (pmd_t *)pfn_to_virt(__page_val_to_pfn(pud_val(pud)));
224 }
225
pud_page(pud_t pud)226 static inline struct page *pud_page(pud_t pud)
227 {
228 return pfn_to_page(__page_val_to_pfn(pud_val(pud)));
229 }
230
231 #define mm_p4d_folded mm_p4d_folded
mm_p4d_folded(struct mm_struct * mm)232 static inline bool mm_p4d_folded(struct mm_struct *mm)
233 {
234 if (pgtable_l5_enabled)
235 return false;
236
237 return true;
238 }
239
240 #define mm_pud_folded mm_pud_folded
mm_pud_folded(struct mm_struct * mm)241 static inline bool mm_pud_folded(struct mm_struct *mm)
242 {
243 if (pgtable_l4_enabled)
244 return false;
245
246 return true;
247 }
248
249 #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
250
pfn_pmd(unsigned long pfn,pgprot_t prot)251 static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
252 {
253 unsigned long prot_val = pgprot_val(prot);
254
255 ALT_THEAD_PMA(prot_val);
256
257 return __pmd((pfn << _PAGE_PFN_SHIFT) | prot_val);
258 }
259
_pmd_pfn(pmd_t pmd)260 static inline unsigned long _pmd_pfn(pmd_t pmd)
261 {
262 return __page_val_to_pfn(pmd_val(pmd));
263 }
264
265 #define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), prot)
266
267 #define pmd_ERROR(e) \
268 pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
269
270 #define pud_ERROR(e) \
271 pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
272
273 #define p4d_ERROR(e) \
274 pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e))
275
set_p4d(p4d_t * p4dp,p4d_t p4d)276 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
277 {
278 if (pgtable_l4_enabled)
279 WRITE_ONCE(*p4dp, p4d);
280 else
281 set_pud((pud_t *)p4dp, (pud_t){ p4d_val(p4d) });
282 }
283
p4d_none(p4d_t p4d)284 static inline int p4d_none(p4d_t p4d)
285 {
286 if (pgtable_l4_enabled)
287 return (p4d_val(p4d) == 0);
288
289 return 0;
290 }
291
p4d_present(p4d_t p4d)292 static inline int p4d_present(p4d_t p4d)
293 {
294 if (pgtable_l4_enabled)
295 return (p4d_val(p4d) & _PAGE_PRESENT);
296
297 return 1;
298 }
299
p4d_bad(p4d_t p4d)300 static inline int p4d_bad(p4d_t p4d)
301 {
302 if (pgtable_l4_enabled)
303 return !p4d_present(p4d);
304
305 return 0;
306 }
307
p4d_clear(p4d_t * p4d)308 static inline void p4d_clear(p4d_t *p4d)
309 {
310 if (pgtable_l4_enabled)
311 set_p4d(p4d, __p4d(0));
312 }
313
pfn_p4d(unsigned long pfn,pgprot_t prot)314 static inline p4d_t pfn_p4d(unsigned long pfn, pgprot_t prot)
315 {
316 return __p4d((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
317 }
318
_p4d_pfn(p4d_t p4d)319 static inline unsigned long _p4d_pfn(p4d_t p4d)
320 {
321 return __page_val_to_pfn(p4d_val(p4d));
322 }
323
p4d_pgtable(p4d_t p4d)324 static inline pud_t *p4d_pgtable(p4d_t p4d)
325 {
326 if (pgtable_l4_enabled)
327 return (pud_t *)pfn_to_virt(__page_val_to_pfn(p4d_val(p4d)));
328
329 return (pud_t *)pud_pgtable((pud_t) { p4d_val(p4d) });
330 }
331 #define p4d_page_vaddr(p4d) ((unsigned long)p4d_pgtable(p4d))
332
p4d_page(p4d_t p4d)333 static inline struct page *p4d_page(p4d_t p4d)
334 {
335 return pfn_to_page(__page_val_to_pfn(p4d_val(p4d)));
336 }
337
338 #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
339
340 #define pud_offset pud_offset
341 pud_t *pud_offset(p4d_t *p4d, unsigned long address);
342
set_pgd(pgd_t * pgdp,pgd_t pgd)343 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
344 {
345 if (pgtable_l5_enabled)
346 WRITE_ONCE(*pgdp, pgd);
347 else
348 set_p4d((p4d_t *)pgdp, (p4d_t){ pgd_val(pgd) });
349 }
350
pgd_none(pgd_t pgd)351 static inline int pgd_none(pgd_t pgd)
352 {
353 if (pgtable_l5_enabled)
354 return (pgd_val(pgd) == 0);
355
356 return 0;
357 }
358
pgd_present(pgd_t pgd)359 static inline int pgd_present(pgd_t pgd)
360 {
361 if (pgtable_l5_enabled)
362 return (pgd_val(pgd) & _PAGE_PRESENT);
363
364 return 1;
365 }
366
pgd_bad(pgd_t pgd)367 static inline int pgd_bad(pgd_t pgd)
368 {
369 if (pgtable_l5_enabled)
370 return !pgd_present(pgd);
371
372 return 0;
373 }
374
pgd_clear(pgd_t * pgd)375 static inline void pgd_clear(pgd_t *pgd)
376 {
377 if (pgtable_l5_enabled)
378 set_pgd(pgd, __pgd(0));
379 }
380
pgd_pgtable(pgd_t pgd)381 static inline p4d_t *pgd_pgtable(pgd_t pgd)
382 {
383 if (pgtable_l5_enabled)
384 return (p4d_t *)pfn_to_virt(__page_val_to_pfn(pgd_val(pgd)));
385
386 return (p4d_t *)p4d_pgtable((p4d_t) { pgd_val(pgd) });
387 }
388 #define pgd_page_vaddr(pgd) ((unsigned long)pgd_pgtable(pgd))
389
pgd_page(pgd_t pgd)390 static inline struct page *pgd_page(pgd_t pgd)
391 {
392 return pfn_to_page(__page_val_to_pfn(pgd_val(pgd)));
393 }
394 #define pgd_page(pgd) pgd_page(pgd)
395
396 #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
397
398 #define p4d_offset p4d_offset
399 p4d_t *p4d_offset(pgd_t *pgd, unsigned long address);
400
401 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
402 static inline int pte_devmap(pte_t pte);
403 static inline pte_t pmd_pte(pmd_t pmd);
404
pmd_devmap(pmd_t pmd)405 static inline int pmd_devmap(pmd_t pmd)
406 {
407 return pte_devmap(pmd_pte(pmd));
408 }
409
pud_devmap(pud_t pud)410 static inline int pud_devmap(pud_t pud)
411 {
412 return 0;
413 }
414
pgd_devmap(pgd_t pgd)415 static inline int pgd_devmap(pgd_t pgd)
416 {
417 return 0;
418 }
419 #endif
420
421 #endif /* _ASM_RISCV_PGTABLE_64_H */
422