xref: /linux/arch/um/include/asm/pgtable.h (revision aec2f682d47c54ef434b2d440992626d80b1ebdc)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4  * Copyright 2003 PathScale, Inc.
5  * Derived from include/asm-i386/pgtable.h
6  */
7 
8 #ifndef __UM_PGTABLE_H
9 #define __UM_PGTABLE_H
10 
11 #include <asm/page.h>
12 #include <linux/mm_types.h>
13 
14 #define _PAGE_PRESENT	0x001
15 #define _PAGE_NEEDSYNC	0x002
16 #define _PAGE_RW	0x020
17 #define _PAGE_USER	0x040
18 #define _PAGE_ACCESSED	0x080
19 #define _PAGE_DIRTY	0x100
20 /* If _PAGE_PRESENT is clear, we use these: */
21 #define _PAGE_PROTNONE	0x010	/* if the user mapped it with PROT_NONE;
22 				   pte_present gives true */
23 
24 /* We borrow bit 10 to store the exclusive marker in swap PTEs. */
25 #define _PAGE_SWP_EXCLUSIVE	0x400
26 
27 #if CONFIG_PGTABLE_LEVELS == 4
28 #include <asm/pgtable-4level.h>
29 #elif CONFIG_PGTABLE_LEVELS == 2
30 #include <asm/pgtable-2level.h>
31 #else
32 #error "Unsupported number of page table levels"
33 #endif
34 
35 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
36 
37 /* Just any arbitrary offset to the start of the vmalloc VM area: the
38  * current 8MB value just means that there will be a 8MB "hole" after the
39  * physical memory until the kernel virtual memory starts.  That means that
40  * any out-of-bounds memory accesses will hopefully be caught.
41  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
42  * area for the same reason. ;)
43  */
44 
45 #ifndef COMPILE_OFFSETS
46 #include <as-layout.h> /* for high_physmem */
47 #endif
48 
49 #define VMALLOC_OFFSET	(__va_space)
50 #define VMALLOC_START	((high_physmem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
51 #define VMALLOC_END	(TASK_SIZE-2*PAGE_SIZE)
52 #define MODULES_VADDR	VMALLOC_START
53 #define MODULES_END	VMALLOC_END
54 
55 #define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
56 #define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
57 #define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
58 #define __PAGE_KERNEL_EXEC                                              \
59 	 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
60 #define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
61 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
62 #define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
63 #define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
64 #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
65 #define PAGE_KERNEL_EXEC	__pgprot(__PAGE_KERNEL_EXEC)
66 
67 /*
68  * The i386 can't do page protection for execute, and considers that the same
69  * are read.
70  * Also, write permissions imply read permissions. This is the closest we can
71  * get..
72  */
73 
74 #define pte_clear(mm, addr, xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEEDSYNC))
75 
76 #define pmd_none(x)	(!((unsigned long)pmd_val(x) & ~_PAGE_NEEDSYNC))
77 #define	pmd_bad(x)	((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
78 
79 #define pmd_present(x)	(pmd_val(x) & _PAGE_PRESENT)
80 #define pmd_clear(xp)	do { pmd_val(*(xp)) = _PAGE_NEEDSYNC; } while (0)
81 
82 #define pmd_needsync(x)   (pmd_val(x) & _PAGE_NEEDSYNC)
83 #define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEEDSYNC)
84 
85 #define pud_needsync(x)   (pud_val(x) & _PAGE_NEEDSYNC)
86 #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEEDSYNC)
87 
88 #define p4d_needsync(x)   (p4d_val(x) & _PAGE_NEEDSYNC)
89 #define p4d_mkuptodate(x) (p4d_val(x) &= ~_PAGE_NEEDSYNC)
90 
91 #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
92 #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
93 
94 #define pte_page(x) pfn_to_page(pte_pfn(x))
95 
96 #define pte_present(x)	pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
97 
98 /*
99  * =================================
100  * Flags checking section.
101  * =================================
102  */
103 
104 static inline int pte_none(pte_t pte)
105 {
106 	return pte_is_zero(pte);
107 }
108 
109 /*
110  * The following only work if pte_present() is true.
111  * Undefined behaviour if not..
112  */
113 static inline int pte_read(pte_t pte)
114 {
115 	return((pte_get_bits(pte, _PAGE_USER)) &&
116 	       !(pte_get_bits(pte, _PAGE_PROTNONE)));
117 }
118 
119 static inline int pte_exec(pte_t pte){
120 	return((pte_get_bits(pte, _PAGE_USER)) &&
121 	       !(pte_get_bits(pte, _PAGE_PROTNONE)));
122 }
123 
124 static inline int pte_write(pte_t pte)
125 {
126 	return((pte_get_bits(pte, _PAGE_RW)) &&
127 	       !(pte_get_bits(pte, _PAGE_PROTNONE)));
128 }
129 
130 static inline int pte_dirty(pte_t pte)
131 {
132 	return pte_get_bits(pte, _PAGE_DIRTY);
133 }
134 
135 static inline int pte_young(pte_t pte)
136 {
137 	return pte_get_bits(pte, _PAGE_ACCESSED);
138 }
139 
140 static inline int pte_needsync(pte_t pte)
141 {
142 	return pte_get_bits(pte, _PAGE_NEEDSYNC);
143 }
144 
145 /*
146  * =================================
147  * Flags setting section.
148  * =================================
149  */
150 
151 static inline pte_t pte_mkclean(pte_t pte)
152 {
153 	pte_clear_bits(pte, _PAGE_DIRTY);
154 	return(pte);
155 }
156 
157 static inline pte_t pte_mkold(pte_t pte)
158 {
159 	pte_clear_bits(pte, _PAGE_ACCESSED);
160 	return(pte);
161 }
162 
163 static inline pte_t pte_wrprotect(pte_t pte)
164 {
165 	pte_clear_bits(pte, _PAGE_RW);
166 	return pte;
167 }
168 
169 static inline pte_t pte_mkread(pte_t pte)
170 {
171 	pte_set_bits(pte, _PAGE_USER);
172 	return pte;
173 }
174 
175 static inline pte_t pte_mkdirty(pte_t pte)
176 {
177 	pte_set_bits(pte, _PAGE_DIRTY);
178 	return(pte);
179 }
180 
181 static inline pte_t pte_mkyoung(pte_t pte)
182 {
183 	pte_set_bits(pte, _PAGE_ACCESSED);
184 	return(pte);
185 }
186 
187 static inline pte_t pte_mkwrite_novma(pte_t pte)
188 {
189 	pte_set_bits(pte, _PAGE_RW);
190 	return pte;
191 }
192 
193 static inline pte_t pte_mkuptodate(pte_t pte)
194 {
195 	pte_clear_bits(pte, _PAGE_NEEDSYNC);
196 	return pte;
197 }
198 
199 static inline pte_t pte_mkneedsync(pte_t pte)
200 {
201 	pte_set_bits(pte, _PAGE_NEEDSYNC);
202 	return(pte);
203 }
204 
205 static inline void set_pte(pte_t *pteptr, pte_t pteval)
206 {
207 	pte_copy(*pteptr, pteval);
208 
209 	/* If it's a swap entry, it needs to be marked _PAGE_NEEDSYNC so
210 	 * update_pte_range knows to unmap it.
211 	 */
212 
213 	*pteptr = pte_mkneedsync(*pteptr);
214 }
215 
216 #define PFN_PTE_SHIFT		PAGE_SHIFT
217 
218 static inline void um_tlb_mark_sync(struct mm_struct *mm, unsigned long start,
219 				    unsigned long end)
220 {
221 	guard(spinlock_irqsave)(&mm->context.sync_tlb_lock);
222 
223 	if (!mm->context.sync_tlb_range_to) {
224 		mm->context.sync_tlb_range_from = start;
225 		mm->context.sync_tlb_range_to = end;
226 	} else {
227 		if (start < mm->context.sync_tlb_range_from)
228 			mm->context.sync_tlb_range_from = start;
229 		if (end > mm->context.sync_tlb_range_to)
230 			mm->context.sync_tlb_range_to = end;
231 	}
232 }
233 
234 #define set_ptes set_ptes
235 static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
236 			    pte_t *ptep, pte_t pte, int nr)
237 {
238 	/* Basically the default implementation */
239 	size_t length = nr * PAGE_SIZE;
240 
241 	for (;;) {
242 		set_pte(ptep, pte);
243 		if (--nr == 0)
244 			break;
245 		ptep++;
246 		pte = __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT));
247 	}
248 
249 	um_tlb_mark_sync(mm, addr, addr + length);
250 }
251 
252 #define __HAVE_ARCH_PTE_SAME
253 static inline int pte_same(pte_t pte_a, pte_t pte_b)
254 {
255 	return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEEDSYNC);
256 }
257 
258 #define __virt_to_page(virt) phys_to_page(__pa(virt))
259 #define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
260 
261 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
262 {
263 	pte_t pte;
264 
265 	pte_set_val(pte, pfn_to_phys(pfn), pgprot);
266 
267 	return pte;
268 }
269 
270 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
271 {
272 	pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
273 	return pte;
274 }
275 
276 /*
277  * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
278  *
279  * this macro returns the index of the entry in the pmd page which would
280  * control the given virtual address
281  */
282 #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
283 
284 struct mm_struct;
285 extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
286 
287 #define update_mmu_cache(vma,address,ptep) do {} while (0)
288 #define update_mmu_cache_range(vmf, vma, address, ptep, nr) do {} while (0)
289 
290 /*
291  * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
292  * are !pte_none() && !pte_present().
293  *
294  * Format of swap PTEs:
295  *
296  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
297  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
298  *   <--------------- offset ----------------> E < type -> 0 0 0 1 0
299  *
300  *   E is the exclusive marker that is not stored in swap entries.
301  *   _PAGE_NEEDSYNC (bit 1) is always set to 1 in set_pte().
302  */
303 #define __swp_type(x)			(((x).val >> 5) & 0x1f)
304 #define __swp_offset(x)			((x).val >> 11)
305 
306 #define __swp_entry(type, offset) \
307 	((swp_entry_t) { (((type) & 0x1f) << 5) | ((offset) << 11) })
308 #define __pte_to_swp_entry(pte) \
309 	((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
310 #define __swp_entry_to_pte(x)		((pte_t) { (x).val })
311 
312 static inline bool pte_swp_exclusive(pte_t pte)
313 {
314 	return pte_get_bits(pte, _PAGE_SWP_EXCLUSIVE);
315 }
316 
317 static inline pte_t pte_swp_mkexclusive(pte_t pte)
318 {
319 	pte_set_bits(pte, _PAGE_SWP_EXCLUSIVE);
320 	return pte;
321 }
322 
323 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
324 {
325 	pte_clear_bits(pte, _PAGE_SWP_EXCLUSIVE);
326 	return pte;
327 }
328 
329 #endif
330