xref: /linux/arch/alpha/include/asm/pgtable.h (revision e2683c8868d03382da7e1ce8453b543a043066d1)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ALPHA_PGTABLE_H
3 #define _ALPHA_PGTABLE_H
4 
5 #include <asm-generic/pgtable-nopud.h>
6 
7 /*
8  * This file contains the functions and defines necessary to modify and use
9  * the Alpha page table tree.
10  *
11  * This hopefully works with any standard Alpha page-size, as defined
12  * in <asm/page.h> (currently 8192).
13  */
14 #include <linux/mmzone.h>
15 
16 #include <asm/page.h>
17 #include <asm/processor.h>	/* For TASK_SIZE */
18 #include <asm/machvec.h>
19 #include <asm/setup.h>
20 #include <linux/page_table_check.h>
21 
22 struct mm_struct;
23 struct vm_area_struct;
24 
25 /* Certain architectures need to do special things when PTEs
26  * within a page table are directly modified.  Thus, the following
27  * hook is made available.
28  */
29 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
30 
31 /* PMD_SHIFT determines the size of the area a second-level page table can map */
32 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-3))
33 #define PMD_SIZE	(1UL << PMD_SHIFT)
34 #define PMD_MASK	(~(PMD_SIZE-1))
35 
36 /* PGDIR_SHIFT determines what a third-level page table entry can map */
37 #define PGDIR_SHIFT	(PAGE_SHIFT + 2*(PAGE_SHIFT-3))
38 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
39 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
40 
41 /*
42  * Entries per page directory level:  the Alpha is three-level, with
43  * all levels having a one-page page table.
44  */
45 #define PTRS_PER_PTE	(1UL << (PAGE_SHIFT-3))
46 #define PTRS_PER_PMD	(1UL << (PAGE_SHIFT-3))
47 #define PTRS_PER_PGD	(1UL << (PAGE_SHIFT-3))
48 #define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
49 
50 /* Number of pointers that fit on a page:  this will go away. */
51 #define PTRS_PER_PAGE	(1UL << (PAGE_SHIFT-3))
52 
53 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
54 #define VMALLOC_START		0xfffffe0000000000
55 #else
56 #define VMALLOC_START		(-2*PGDIR_SIZE)
57 #endif
58 #define VMALLOC_END		(-PGDIR_SIZE)
59 
60 /*
61  * OSF/1 PAL-code-imposed page table bits
62  */
63 #define _PAGE_VALID	0x0001
64 #define _PAGE_FOR	0x0002	/* used for page protection (fault on read) */
65 #define _PAGE_FOW	0x0004	/* used for page protection (fault on write) */
66 #define _PAGE_FOE	0x0008	/* used for page protection (fault on exec) */
67 #define _PAGE_ASM	0x0010
68 #define _PAGE_KRE	0x0100	/* xxx - see below on the "accessed" bit */
69 #define _PAGE_URE	0x0200	/* xxx */
70 #define _PAGE_KWE	0x1000	/* used to do the dirty bit in software */
71 #define _PAGE_UWE	0x2000	/* used to do the dirty bit in software */
72 
73 /* .. and these are ours ... */
74 #define _PAGE_DIRTY	0x20000
75 #define _PAGE_ACCESSED	0x40000
76 
77 /* We borrow bit 39 to store the exclusive marker in swap PTEs. */
78 #define _PAGE_SWP_EXCLUSIVE	0x8000000000UL
79 
80 /*
81  * NOTE! The "accessed" bit isn't necessarily exact:  it can be kept exactly
82  * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it.
83  * Under Linux/AXP, the "accessed" bit just means "read", and I'll just use
84  * the KRE/URE bits to watch for it. That way we don't need to overload the
85  * KWE/UWE bits with both handling dirty and accessed.
86  *
87  * Note that the kernel uses the accessed bit just to check whether to page
88  * out a page or not, so it doesn't have to be exact anyway.
89  */
90 
91 #define __DIRTY_BITS	(_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
92 #define __ACCESS_BITS	(_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
93 
94 #define _PFN_MASK	0xFFFFFFFF00000000UL
95 
96 #define _PAGE_TABLE	(_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
97 #define _PAGE_CHG_MASK	(_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
98 
99 /*
100  * All the normal masks have the "page accessed" bits on, as any time they are used,
101  * the page is accessed. They are cleared only by the page-out routines
102  */
103 #define PAGE_NONE	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
104 #define PAGE_SHARED	__pgprot(_PAGE_VALID | __ACCESS_BITS)
105 #define PAGE_COPY	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
106 #define PAGE_READONLY	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
107 #define PAGE_KERNEL	__pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
108 
109 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
110 
111 #define _PAGE_P(x) _PAGE_NORMAL((x) | _PAGE_FOW)
112 #define _PAGE_S(x) _PAGE_NORMAL(x)
113 
114 /*
115  * The hardware can handle write-only mappings, but as the Alpha
116  * architecture does byte-wide writes with a read-modify-write
117  * sequence, it's not practical to have write-without-read privs.
118  * Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in
119  * arch/alpha/mm/fault.c)
120  */
121 	/* xwr */
122 
123 /*
124  * pgprot_noncached() is only for infiniband pci support, and a real
125  * implementation for RAM would be more complicated.
126  */
127 #define pgprot_noncached(prot)	(prot)
128 
129 /*
130  * All caching attribute macros are identity on Alpha, so the generic
131  * pgprot_modify() degenerates to tautological self-comparisons.
132  * Override it to just return newprot directly.
133  */
134 #define pgprot_modify pgprot_modify
135 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
136 {
137 	return newprot;
138 }
139 
140 /*
141  * On certain platforms whose physical address space can overlap KSEG,
142  * namely EV6 and above, we must re-twiddle the physaddr to restore the
143  * correct high-order bits.
144  *
145  * This is extremely confusing until you realize that this is actually
146  * just working around a userspace bug.  The X server was intending to
147  * provide the physical address but instead provided the KSEG address.
148  * Or tried to, except it's not representable.
149  *
150  * On Tsunami there's nothing meaningful at 0x40000000000, so this is
151  * a safe thing to do.  Come the first core logic that does put something
152  * in this area -- memory or whathaveyou -- then this hack will have
153  * to go away.  So be prepared!
154  */
155 
156 #if defined(CONFIG_ALPHA_GENERIC) && defined(USE_48_BIT_KSEG)
157 #error "EV6-only feature in a generic kernel"
158 #endif
159 #if defined(CONFIG_ALPHA_GENERIC) || \
160     (defined(CONFIG_ALPHA_EV6) && !defined(USE_48_BIT_KSEG))
161 #define KSEG_PFN	(0xc0000000000UL >> PAGE_SHIFT)
162 #define PHYS_TWIDDLE(pfn) \
163   ((((pfn) & KSEG_PFN) == (0x40000000000UL >> PAGE_SHIFT)) \
164   ? ((pfn) ^= KSEG_PFN) : (pfn))
165 #else
166 #define PHYS_TWIDDLE(pfn) (pfn)
167 #endif
168 
169 /*
170  * Conversion functions:  convert a page and protection to a page entry,
171  * and a page entry and page directory to the page they refer to.
172  */
173 #define page_to_pa(page)	(page_to_pfn(page) << PAGE_SHIFT)
174 #define PFN_PTE_SHIFT		32
175 #define pte_pfn(pte)		(pte_val(pte) >> PFN_PTE_SHIFT)
176 
177 #define pte_page(pte)	pfn_to_page(pte_pfn(pte))
178 
179 extern inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot)
180 { pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; }
181 
182 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
183 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
184 
185 extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
186 { pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
187 
188 extern inline void pud_set(pud_t * pudp, pmd_t * pmdp)
189 { pud_val(*pudp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
190 
191 
192 extern void migrate_flush_tlb_page(struct vm_area_struct *vma,
193 					unsigned long addr);
194 
195 extern inline unsigned long
196 pmd_page_vaddr(pmd_t pmd)
197 {
198 	return ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)) + PAGE_OFFSET;
199 }
200 
201 #define pmd_pfn(pmd)	(pmd_val(pmd) >> 32)
202 #define pmd_page(pmd)	(pfn_to_page(pmd_val(pmd) >> 32))
203 #define pud_page(pud)	(pfn_to_page(pud_val(pud) >> 32))
204 
205 extern inline pmd_t *pud_pgtable(pud_t pgd)
206 {
207 	return (pmd_t *)(PAGE_OFFSET + ((pud_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)));
208 }
209 
210 extern inline int pte_none(pte_t pte)		{ return !pte_val(pte); }
211 extern inline int pte_present(pte_t pte)	{ return pte_val(pte) & _PAGE_VALID; }
212 extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
213 {
214 	WRITE_ONCE(pte_val(*ptep), 0);
215 }
216 
217 extern inline int pmd_none(pmd_t pmd)		{ return !pmd_val(pmd); }
218 extern inline int pmd_bad(pmd_t pmd)		{ return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; }
219 extern inline int pmd_present(pmd_t pmd)	{ return pmd_val(pmd) & _PAGE_VALID; }
220 extern inline void pmd_clear(pmd_t * pmdp)	{ pmd_val(*pmdp) = 0; }
221 
222 extern inline int pud_none(pud_t pud)		{ return !pud_val(pud); }
223 extern inline int pud_bad(pud_t pud)		{ return (pud_val(pud) & ~_PFN_MASK) != _PAGE_TABLE; }
224 extern inline int pud_present(pud_t pud)	{ return pud_val(pud) & _PAGE_VALID; }
225 extern inline void pud_clear(pud_t * pudp)	{ pud_val(*pudp) = 0; }
226 
227 /*
228  * The following only work if pte_present() is true.
229  * Undefined behaviour if not..
230  */
231 extern inline int pte_write(pte_t pte)		{ return !(pte_val(pte) & _PAGE_FOW); }
232 extern inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
233 extern inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }
234 
235 extern inline pte_t pte_wrprotect(pte_t pte)	{ pte_val(pte) |= _PAGE_FOW; return pte; }
236 extern inline pte_t pte_mkclean(pte_t pte)	{ pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
237 extern inline pte_t pte_mkold(pte_t pte)	{ pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
238 extern inline pte_t pte_mkwrite_novma(pte_t pte){ pte_val(pte) &= ~_PAGE_FOW; return pte; }
239 extern inline pte_t pte_mkdirty(pte_t pte)	{ pte_val(pte) |= __DIRTY_BITS; return pte; }
240 extern inline pte_t pte_mkyoung(pte_t pte)	{ pte_val(pte) |= __ACCESS_BITS; return pte; }
241 
242 /*
243  * The smp_rmb() in the following functions are required to order the load of
244  * *dir (the pointer in the top level page table) with any subsequent load of
245  * the returned pmd_t *ret (ret is data dependent on *dir).
246  *
247  * If this ordering is not enforced, the CPU might load an older value of
248  * *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for
249  * more details.
250  *
251  * Note that we never change the mm->pgd pointer after the task is running, so
252  * pgd_offset does not require such a barrier.
253  */
254 
255 /* Find an entry in the second-level page table.. */
256 extern inline pmd_t * pmd_offset(pud_t * dir, unsigned long address)
257 {
258 	pmd_t *ret = pud_pgtable(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
259 	smp_rmb(); /* see above */
260 	return ret;
261 }
262 #define pmd_offset pmd_offset
263 
264 /* Find an entry in the third-level page table.. */
265 extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
266 {
267 	pte_t *ret = (pte_t *) pmd_page_vaddr(*dir)
268 		+ ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
269 	smp_rmb(); /* see above */
270 	return ret;
271 }
272 #define pte_offset_kernel pte_offset_kernel
273 
274 extern pgd_t swapper_pg_dir[1024];
275 
276 #ifdef CONFIG_COMPACTION
277 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
278 
279 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
280 					unsigned long address,
281 					pte_t *ptep)
282 {
283 	pte_t pte = READ_ONCE(*ptep);
284 
285 	pte_clear(mm, address, ptep);
286 	return pte;
287 }
288 
289 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
290 
291 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
292 				unsigned long addr, pte_t *ptep)
293 {
294 	struct mm_struct *mm = vma->vm_mm;
295 	pte_t pte = ptep_get_and_clear(mm, addr, ptep);
296 
297 	page_table_check_pte_clear(mm, addr, pte);
298 	migrate_flush_tlb_page(vma, addr);
299 	return pte;
300 }
301 
302 #endif
303 /*
304  * The Alpha doesn't have any external MMU info:  the kernel page
305  * tables contain all the necessary information.
306  */
307 extern inline void update_mmu_cache(struct vm_area_struct * vma,
308 	unsigned long address, pte_t *ptep)
309 {
310 }
311 
312 static inline void update_mmu_cache_range(struct vm_fault *vmf,
313 		struct vm_area_struct *vma, unsigned long address,
314 		pte_t *ptep, unsigned int nr)
315 {
316 }
317 
318 /*
319  * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
320  * are !pte_none() && !pte_present().
321  *
322  * Format of swap PTEs:
323  *
324  *   6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
325  *   3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
326  *   <------------------- offset ------------------> E <--- type -->
327  *
328  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
329  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
330  *   <--------------------------- zeroes -------------------------->
331  *
332  *   E is the exclusive marker that is not stored in swap entries.
333  */
334 extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
335 { pte_t pte; pte_val(pte) = ((type & 0x7f) << 32) | (offset << 40); return pte; }
336 
337 #define __swp_type(x)		(((x).val >> 32) & 0x7f)
338 #define __swp_offset(x)		((x).val >> 40)
339 #define __swp_entry(type, off)	((swp_entry_t) { pte_val(mk_swap_pte((type), (off))) })
340 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
341 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
342 
343 static inline bool pte_swp_exclusive(pte_t pte)
344 {
345 	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
346 }
347 
348 static inline pte_t pte_swp_mkexclusive(pte_t pte)
349 {
350 	pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
351 	return pte;
352 }
353 
354 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
355 {
356 	pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
357 	return pte;
358 }
359 
360 #define pte_ERROR(e) \
361 	printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
362 #define pmd_ERROR(e) \
363 	printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
364 #define pgd_ERROR(e) \
365 	printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
366 
367 extern void paging_init(void);
368 
369 /* We have our own get_unmapped_area */
370 #define HAVE_ARCH_UNMAPPED_AREA
371 
372 #endif /* _ALPHA_PGTABLE_H */
373