xref: /linux/arch/nios2/include/asm/pgtable.h (revision 40286d6379aacfcc053253ef78dc78b09addffda)
1 /*
2  * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
3  * Copyright (C) 2009 Wind River Systems Inc
4  *
5  * Based on asm/pgtable-32.h from mips which is:
6  *
7  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
8  * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
9  *
10  * This file is subject to the terms and conditions of the GNU General Public
11  * License.  See the file "COPYING" in the main directory of this archive
12  * for more details.
13  */
14 
15 #ifndef _ASM_NIOS2_PGTABLE_H
16 #define _ASM_NIOS2_PGTABLE_H
17 
18 #include <linux/io.h>
19 #include <linux/bug.h>
20 #include <asm/page.h>
21 #include <asm/cacheflush.h>
22 #include <asm/tlbflush.h>
23 
24 #include <asm/pgtable-bits.h>
25 #include <asm-generic/pgtable-nopmd.h>
26 
27 #define VMALLOC_START		CONFIG_NIOS2_KERNEL_MMU_REGION_BASE
28 #define VMALLOC_END		(CONFIG_NIOS2_KERNEL_REGION_BASE - SZ_32M - 1)
29 
30 #define MODULES_VADDR		(CONFIG_NIOS2_KERNEL_REGION_BASE - SZ_32M)
31 #define MODULES_END		(CONFIG_NIOS2_KERNEL_REGION_BASE - 1)
32 
33 struct mm_struct;
34 
35 /* Helper macro */
36 #define MKP(x, w, r) __pgprot(_PAGE_PRESENT | _PAGE_CACHED |		\
37 				((x) ? _PAGE_EXEC : 0) |		\
38 				((r) ? _PAGE_READ : 0) |		\
39 				((w) ? _PAGE_WRITE : 0))
40 /*
41  * These are the macros that generic kernel code needs
42  * (to populate protection_map[])
43  */
44 
45 /* Remove W bit on private pages for COW support */
46 
47 /* Shared pages can have exact HW mapping */
48 
49 /* Used all over the kernel */
50 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHED | _PAGE_READ | \
51 			     _PAGE_WRITE | _PAGE_EXEC | _PAGE_GLOBAL)
52 
53 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_CACHED | _PAGE_READ | \
54 			     _PAGE_WRITE | _PAGE_ACCESSED)
55 
56 #define PAGE_COPY MKP(0, 0, 1)
57 
58 #define PTRS_PER_PGD	(PAGE_SIZE / sizeof(pgd_t))
59 #define PTRS_PER_PTE	(PAGE_SIZE / sizeof(pte_t))
60 
61 #define USER_PTRS_PER_PGD	\
62 	(CONFIG_NIOS2_KERNEL_MMU_REGION_BASE / PGDIR_SIZE)
63 
64 #define PGDIR_SHIFT	22
65 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
66 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
67 
68 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
69 extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
70 
71 /*
72  * (pmds are folded into puds so this doesn't get actually called,
73  * but the define is needed for a generic inline function.)
74  */
75 static inline void set_pmd(pmd_t *pmdptr, pmd_t pmdval)
76 {
77 	*pmdptr = pmdval;
78 }
79 
80 static inline int pte_write(pte_t pte)		\
81 	{ return pte_val(pte) & _PAGE_WRITE; }
82 static inline int pte_dirty(pte_t pte)		\
83 	{ return pte_val(pte) & _PAGE_DIRTY; }
84 static inline int pte_young(pte_t pte)		\
85 	{ return pte_val(pte) & _PAGE_ACCESSED; }
86 
87 #define pgprot_noncached pgprot_noncached
88 
89 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
90 {
91 	unsigned long prot = pgprot_val(_prot);
92 
93 	prot &= ~_PAGE_CACHED;
94 
95 	return __pgprot(prot);
96 }
97 
98 static inline int pte_none(pte_t pte)
99 {
100 	return !(pte_val(pte) & ~(_PAGE_GLOBAL|0xf));
101 }
102 
103 static inline int pte_present(pte_t pte)	\
104 	{ return pte_val(pte) & _PAGE_PRESENT; }
105 
106 /*
107  * The following only work if pte_present() is true.
108  * Undefined behaviour if not..
109  */
110 static inline pte_t pte_wrprotect(pte_t pte)
111 {
112 	pte_val(pte) &= ~_PAGE_WRITE;
113 	return pte;
114 }
115 
116 static inline pte_t pte_mkclean(pte_t pte)
117 {
118 	pte_val(pte) &= ~_PAGE_DIRTY;
119 	return pte;
120 }
121 
122 static inline pte_t pte_mkold(pte_t pte)
123 {
124 	pte_val(pte) &= ~_PAGE_ACCESSED;
125 	return pte;
126 }
127 
128 static inline pte_t pte_mkwrite_novma(pte_t pte)
129 {
130 	pte_val(pte) |= _PAGE_WRITE;
131 	return pte;
132 }
133 
134 static inline pte_t pte_mkdirty(pte_t pte)
135 {
136 	pte_val(pte) |= _PAGE_DIRTY;
137 	return pte;
138 }
139 
140 static inline pte_t pte_mkyoung(pte_t pte)
141 {
142 	pte_val(pte) |= _PAGE_ACCESSED;
143 	return pte;
144 }
145 
146 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
147 {
148 	const unsigned long mask = _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC;
149 
150 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
151 	return pte;
152 }
153 
154 static inline int pmd_present(pmd_t pmd)
155 {
156 	return (pmd_val(pmd) != (unsigned long) invalid_pte_table)
157 			&& (pmd_val(pmd) != 0UL);
158 }
159 
160 static inline void pmd_clear(pmd_t *pmdp)
161 {
162 	pmd_val(*pmdp) = (unsigned long) invalid_pte_table;
163 }
164 
165 #define pte_pfn(pte)		(pte_val(pte) & 0xfffff)
166 #define pfn_pte(pfn, prot)	(__pte(pfn | pgprot_val(prot)))
167 #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
168 
169 /*
170  * Store a linux PTE into the linux page table.
171  */
172 static inline void set_pte(pte_t *ptep, pte_t pteval)
173 {
174 	*ptep = pteval;
175 }
176 
177 #define PFN_PTE_SHIFT		0
178 
179 static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
180 		pte_t *ptep, pte_t pte, unsigned int nr)
181 {
182 	unsigned long paddr = (unsigned long)page_to_virt(pte_page(pte));
183 
184 	flush_dcache_range(paddr, paddr + nr * PAGE_SIZE);
185 	for (;;) {
186 		set_pte(ptep, pte);
187 		if (--nr == 0)
188 			break;
189 		ptep++;
190 		pte_val(pte) += 1;
191 	}
192 }
193 #define set_ptes set_ptes
194 
195 static inline int pmd_none(pmd_t pmd)
196 {
197 	return (pmd_val(pmd) ==
198 		(unsigned long) invalid_pte_table) || (pmd_val(pmd) == 0UL);
199 }
200 
201 #define pmd_bad(pmd)	(pmd_val(pmd) & ~PAGE_MASK)
202 
203 static inline void pte_clear(struct mm_struct *mm,
204 				unsigned long addr, pte_t *ptep)
205 {
206 	pte_t null;
207 
208 	pte_val(null) = (addr >> PAGE_SHIFT) & 0xf;
209 
210 	set_pte(ptep, null);
211 }
212 
213 /*
214  * Conversion functions: convert a page and protection to a page entry,
215  * and a page entry and page directory to the page they refer to.
216  */
217 #define pmd_phys(pmd)		virt_to_phys((void *)pmd_val(pmd))
218 #define pmd_pfn(pmd)		(pmd_phys(pmd) >> PAGE_SHIFT)
219 #define pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
220 
221 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
222 {
223 	return pmd_val(pmd);
224 }
225 
226 #define pte_ERROR(e) \
227 	pr_err("%s:%d: bad pte %08lx.\n", \
228 		__FILE__, __LINE__, pte_val(e))
229 #define pgd_ERROR(e) \
230 	pr_err("%s:%d: bad pgd %08lx.\n", \
231 		__FILE__, __LINE__, pgd_val(e))
232 
233 /*
234  * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
235  * are !pte_none() && !pte_present().
236  *
237  * Format of swap PTEs:
238  *
239  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
240  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
241  *   E < type -> 0 0 0 0 0 0 <-------------- offset --------------->
242  *
243  *   E is the exclusive marker that is not stored in swap entries.
244  *
245  * Note that the offset field is always non-zero if the swap type is 0, thus
246  * !pte_none() is always true.
247  */
248 #define __swp_type(swp)		(((swp).val >> 26) & 0x1f)
249 #define __swp_offset(swp)	((swp).val & 0xfffff)
250 #define __swp_entry(type, off)	((swp_entry_t) { (((type) & 0x1f) << 26) \
251 						 | ((off) & 0xfffff) })
252 #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
253 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
254 
255 static inline bool pte_swp_exclusive(pte_t pte)
256 {
257 	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
258 }
259 
260 static inline pte_t pte_swp_mkexclusive(pte_t pte)
261 {
262 	pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
263 	return pte;
264 }
265 
266 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
267 {
268 	pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
269 	return pte;
270 }
271 
272 extern void __init paging_init(void);
273 extern void __init mmu_init(void);
274 
275 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
276 		unsigned long address, pte_t *ptep, unsigned int nr);
277 
278 #define update_mmu_cache(vma, addr, ptep) \
279 	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
280 
281 static inline int pte_same(pte_t pte_a, pte_t pte_b);
282 
283 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
284 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
285 					unsigned long address, pte_t *ptep,
286 					pte_t entry, int dirty)
287 {
288 	if (!pte_same(*ptep, entry))
289 		set_ptes(vma->vm_mm, address, ptep, entry, 1);
290 	/*
291 	 * update_mmu_cache will unconditionally execute, handling both
292 	 * the case that the PTE changed and the spurious fault case.
293 	 */
294 	return true;
295 }
296 
297 #endif /* _ASM_NIOS2_PGTABLE_H */
298