xref: /linux/arch/um/include/asm/pgtable.h (revision 7ee1e43a5f493a1332af3ac668cc2a87515c1622)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4  * Copyright 2003 PathScale, Inc.
5  * Derived from include/asm-i386/pgtable.h
6  */
7 
8 #ifndef __UM_PGTABLE_H
9 #define __UM_PGTABLE_H
10 
11 #define _PAGE_PRESENT	0x001
12 #define _PAGE_NEEDSYNC	0x002
13 #define _PAGE_RW	0x020
14 #define _PAGE_USER	0x040
15 #define _PAGE_ACCESSED	0x080
16 #define _PAGE_DIRTY	0x100
17 /* If _PAGE_PRESENT is clear, we use these: */
18 #define _PAGE_PROTNONE	0x010	/* if the user mapped it with PROT_NONE;
19 				   pte_present gives true */
20 
21 /* We borrow bit 10 to store the exclusive marker in swap PTEs. */
22 #define _PAGE_SWP_EXCLUSIVE	0x400
23 
24 #if CONFIG_PGTABLE_LEVELS == 4
25 #include <asm/pgtable-4level.h>
26 #elif CONFIG_PGTABLE_LEVELS == 2
27 #include <asm/pgtable-2level.h>
28 #else
29 #error "Unsupported number of page table levels"
30 #endif
31 
32 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
33 
34 /* zero page used for uninitialized stuff */
35 extern unsigned long *empty_zero_page;
36 
37 /* Just any arbitrary offset to the start of the vmalloc VM area: the
38  * current 8MB value just means that there will be a 8MB "hole" after the
39  * physical memory until the kernel virtual memory starts.  That means that
40  * any out-of-bounds memory accesses will hopefully be caught.
41  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
42  * area for the same reason. ;)
43  */
44 
45 extern unsigned long end_iomem;
46 
47 #define VMALLOC_OFFSET	(__va_space)
48 #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
49 #define VMALLOC_END	(TASK_SIZE-2*PAGE_SIZE)
50 #define MODULES_VADDR	VMALLOC_START
51 #define MODULES_END	VMALLOC_END
52 
53 #define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
54 #define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
55 #define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
56 #define __PAGE_KERNEL_EXEC                                              \
57 	 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
58 #define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
59 #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
60 #define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
61 #define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
62 #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
63 #define PAGE_KERNEL_EXEC	__pgprot(__PAGE_KERNEL_EXEC)
64 
65 /*
66  * The i386 can't do page protection for execute, and considers that the same
67  * are read.
68  * Also, write permissions imply read permissions. This is the closest we can
69  * get..
70  */
71 
72 /*
73  * ZERO_PAGE is a global shared page that is always zero: used
74  * for zero-mapped memory areas etc..
75  */
76 #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
77 
78 #define pte_clear(mm, addr, xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEEDSYNC))
79 
80 #define pmd_none(x)	(!((unsigned long)pmd_val(x) & ~_PAGE_NEEDSYNC))
81 #define	pmd_bad(x)	((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
82 
83 #define pmd_present(x)	(pmd_val(x) & _PAGE_PRESENT)
84 #define pmd_clear(xp)	do { pmd_val(*(xp)) = _PAGE_NEEDSYNC; } while (0)
85 
86 #define pmd_needsync(x)   (pmd_val(x) & _PAGE_NEEDSYNC)
87 #define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEEDSYNC)
88 
89 #define pud_needsync(x)   (pud_val(x) & _PAGE_NEEDSYNC)
90 #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEEDSYNC)
91 
92 #define p4d_needsync(x)   (p4d_val(x) & _PAGE_NEEDSYNC)
93 #define p4d_mkuptodate(x) (p4d_val(x) &= ~_PAGE_NEEDSYNC)
94 
95 #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
96 #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
97 
98 #define pte_page(x) pfn_to_page(pte_pfn(x))
99 
100 #define pte_present(x)	pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
101 
102 /*
103  * =================================
104  * Flags checking section.
105  * =================================
106  */
107 
108 static inline int pte_none(pte_t pte)
109 {
110 	return pte_is_zero(pte);
111 }
112 
113 /*
114  * The following only work if pte_present() is true.
115  * Undefined behaviour if not..
116  */
117 static inline int pte_read(pte_t pte)
118 {
119 	return((pte_get_bits(pte, _PAGE_USER)) &&
120 	       !(pte_get_bits(pte, _PAGE_PROTNONE)));
121 }
122 
123 static inline int pte_exec(pte_t pte){
124 	return((pte_get_bits(pte, _PAGE_USER)) &&
125 	       !(pte_get_bits(pte, _PAGE_PROTNONE)));
126 }
127 
128 static inline int pte_write(pte_t pte)
129 {
130 	return((pte_get_bits(pte, _PAGE_RW)) &&
131 	       !(pte_get_bits(pte, _PAGE_PROTNONE)));
132 }
133 
134 static inline int pte_dirty(pte_t pte)
135 {
136 	return pte_get_bits(pte, _PAGE_DIRTY);
137 }
138 
139 static inline int pte_young(pte_t pte)
140 {
141 	return pte_get_bits(pte, _PAGE_ACCESSED);
142 }
143 
144 static inline int pte_needsync(pte_t pte)
145 {
146 	return pte_get_bits(pte, _PAGE_NEEDSYNC);
147 }
148 
149 /*
150  * =================================
151  * Flags setting section.
152  * =================================
153  */
154 
155 static inline pte_t pte_mkclean(pte_t pte)
156 {
157 	pte_clear_bits(pte, _PAGE_DIRTY);
158 	return(pte);
159 }
160 
161 static inline pte_t pte_mkold(pte_t pte)
162 {
163 	pte_clear_bits(pte, _PAGE_ACCESSED);
164 	return(pte);
165 }
166 
167 static inline pte_t pte_wrprotect(pte_t pte)
168 {
169 	pte_clear_bits(pte, _PAGE_RW);
170 	return pte;
171 }
172 
173 static inline pte_t pte_mkread(pte_t pte)
174 {
175 	pte_set_bits(pte, _PAGE_USER);
176 	return pte;
177 }
178 
179 static inline pte_t pte_mkdirty(pte_t pte)
180 {
181 	pte_set_bits(pte, _PAGE_DIRTY);
182 	return(pte);
183 }
184 
185 static inline pte_t pte_mkyoung(pte_t pte)
186 {
187 	pte_set_bits(pte, _PAGE_ACCESSED);
188 	return(pte);
189 }
190 
191 static inline pte_t pte_mkwrite_novma(pte_t pte)
192 {
193 	pte_set_bits(pte, _PAGE_RW);
194 	return pte;
195 }
196 
197 static inline pte_t pte_mkuptodate(pte_t pte)
198 {
199 	pte_clear_bits(pte, _PAGE_NEEDSYNC);
200 	return pte;
201 }
202 
203 static inline pte_t pte_mkneedsync(pte_t pte)
204 {
205 	pte_set_bits(pte, _PAGE_NEEDSYNC);
206 	return(pte);
207 }
208 
209 static inline void set_pte(pte_t *pteptr, pte_t pteval)
210 {
211 	pte_copy(*pteptr, pteval);
212 
213 	/* If it's a swap entry, it needs to be marked _PAGE_NEEDSYNC so
214 	 * update_pte_range knows to unmap it.
215 	 */
216 
217 	*pteptr = pte_mkneedsync(*pteptr);
218 }
219 
220 #define PFN_PTE_SHIFT		PAGE_SHIFT
221 
222 static inline void um_tlb_mark_sync(struct mm_struct *mm, unsigned long start,
223 				    unsigned long end)
224 {
225 	if (!mm->context.sync_tlb_range_to) {
226 		mm->context.sync_tlb_range_from = start;
227 		mm->context.sync_tlb_range_to = end;
228 	} else {
229 		if (start < mm->context.sync_tlb_range_from)
230 			mm->context.sync_tlb_range_from = start;
231 		if (end > mm->context.sync_tlb_range_to)
232 			mm->context.sync_tlb_range_to = end;
233 	}
234 }
235 
236 #define set_ptes set_ptes
237 static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
238 			    pte_t *ptep, pte_t pte, int nr)
239 {
240 	/* Basically the default implementation */
241 	size_t length = nr * PAGE_SIZE;
242 
243 	for (;;) {
244 		set_pte(ptep, pte);
245 		if (--nr == 0)
246 			break;
247 		ptep++;
248 		pte = __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT));
249 	}
250 
251 	um_tlb_mark_sync(mm, addr, addr + length);
252 }
253 
254 #define __HAVE_ARCH_PTE_SAME
255 static inline int pte_same(pte_t pte_a, pte_t pte_b)
256 {
257 	return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEEDSYNC);
258 }
259 
260 /*
261  * Conversion functions: convert a page and protection to a page entry,
262  * and a page entry and page directory to the page they refer to.
263  */
264 
265 #define __virt_to_page(virt) phys_to_page(__pa(virt))
266 #define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
267 
268 #define mk_pte(page, pgprot) \
269 	({ pte_t pte;					\
270 							\
271 	pte_set_val(pte, page_to_phys(page), (pgprot));	\
272 	pte;})
273 
274 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
275 {
276 	pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
277 	return pte;
278 }
279 
280 /*
281  * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
282  *
283  * this macro returns the index of the entry in the pmd page which would
284  * control the given virtual address
285  */
286 #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
287 
288 struct mm_struct;
289 extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
290 
291 #define update_mmu_cache(vma,address,ptep) do {} while (0)
292 #define update_mmu_cache_range(vmf, vma, address, ptep, nr) do {} while (0)
293 
294 /*
295  * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
296  * are !pte_none() && !pte_present().
297  *
298  * Format of swap PTEs:
299  *
300  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
301  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
302  *   <--------------- offset ----------------> E < type -> 0 0 0 1 0
303  *
304  *   E is the exclusive marker that is not stored in swap entries.
305  *   _PAGE_NEEDSYNC (bit 1) is always set to 1 in set_pte().
306  */
307 #define __swp_type(x)			(((x).val >> 5) & 0x1f)
308 #define __swp_offset(x)			((x).val >> 11)
309 
310 #define __swp_entry(type, offset) \
311 	((swp_entry_t) { (((type) & 0x1f) << 5) | ((offset) << 11) })
312 #define __pte_to_swp_entry(pte) \
313 	((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
314 #define __swp_entry_to_pte(x)		((pte_t) { (x).val })
315 
316 static inline int pte_swp_exclusive(pte_t pte)
317 {
318 	return pte_get_bits(pte, _PAGE_SWP_EXCLUSIVE);
319 }
320 
321 static inline pte_t pte_swp_mkexclusive(pte_t pte)
322 {
323 	pte_set_bits(pte, _PAGE_SWP_EXCLUSIVE);
324 	return pte;
325 }
326 
327 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
328 {
329 	pte_clear_bits(pte, _PAGE_SWP_EXCLUSIVE);
330 	return pte;
331 }
332 
333 #endif
334