xref: /linux/arch/nios2/include/asm/pgtable.h (revision bdd1a21b52557ea8f61d0a5dc2f77151b576eb70)
1 /*
2  * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
3  * Copyright (C) 2009 Wind River Systems Inc
4  *
5  * Based on asm/pgtable-32.h from mips which is:
6  *
7  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
8  * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
9  *
10  * This file is subject to the terms and conditions of the GNU General Public
11  * License.  See the file "COPYING" in the main directory of this archive
12  * for more details.
13  */
14 
15 #ifndef _ASM_NIOS2_PGTABLE_H
16 #define _ASM_NIOS2_PGTABLE_H
17 
18 #include <linux/io.h>
19 #include <linux/bug.h>
20 #include <asm/page.h>
21 #include <asm/cacheflush.h>
22 #include <asm/tlbflush.h>
23 
24 #include <asm/pgtable-bits.h>
25 #include <asm-generic/pgtable-nopmd.h>
26 
27 #define VMALLOC_START		CONFIG_NIOS2_KERNEL_MMU_REGION_BASE
28 #define VMALLOC_END		(CONFIG_NIOS2_KERNEL_REGION_BASE - 1)
29 
30 struct mm_struct;
31 
32 /* Helper macro */
33 #define MKP(x, w, r) __pgprot(_PAGE_PRESENT | _PAGE_CACHED |		\
34 				((x) ? _PAGE_EXEC : 0) |		\
35 				((r) ? _PAGE_READ : 0) |		\
36 				((w) ? _PAGE_WRITE : 0))
37 /*
38  * These are the macros that generic kernel code needs
39  * (to populate protection_map[])
40  */
41 
42 /* Remove W bit on private pages for COW support */
43 #define __P000	MKP(0, 0, 0)
44 #define __P001	MKP(0, 0, 1)
45 #define __P010	MKP(0, 0, 0)	/* COW */
46 #define __P011	MKP(0, 0, 1)	/* COW */
47 #define __P100	MKP(1, 0, 0)
48 #define __P101	MKP(1, 0, 1)
49 #define __P110	MKP(1, 0, 0)	/* COW */
50 #define __P111	MKP(1, 0, 1)	/* COW */
51 
52 /* Shared pages can have exact HW mapping */
53 #define __S000	MKP(0, 0, 0)
54 #define __S001	MKP(0, 0, 1)
55 #define __S010	MKP(0, 1, 0)
56 #define __S011	MKP(0, 1, 1)
57 #define __S100	MKP(1, 0, 0)
58 #define __S101	MKP(1, 0, 1)
59 #define __S110	MKP(1, 1, 0)
60 #define __S111	MKP(1, 1, 1)
61 
62 /* Used all over the kernel */
63 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHED | _PAGE_READ | \
64 			     _PAGE_WRITE | _PAGE_EXEC | _PAGE_GLOBAL)
65 
66 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_CACHED | _PAGE_READ | \
67 			     _PAGE_WRITE | _PAGE_ACCESSED)
68 
69 #define PAGE_COPY MKP(0, 0, 1)
70 
71 #define PGD_ORDER	0
72 #define PTE_ORDER	0
73 
74 #define PTRS_PER_PGD	((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
75 #define PTRS_PER_PTE	((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
76 
77 #define USER_PTRS_PER_PGD	\
78 	(CONFIG_NIOS2_KERNEL_MMU_REGION_BASE / PGDIR_SIZE)
79 
80 #define PGDIR_SHIFT	22
81 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
82 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
83 
84 /*
85  * ZERO_PAGE is a global shared page that is always zero: used
86  * for zero-mapped memory areas etc..
87  */
88 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
89 #define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
90 
91 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
92 extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
93 
94 /*
95  * (pmds are folded into puds so this doesn't get actually called,
96  * but the define is needed for a generic inline function.)
97  */
98 static inline void set_pmd(pmd_t *pmdptr, pmd_t pmdval)
99 {
100 	*pmdptr = pmdval;
101 }
102 
103 static inline int pte_write(pte_t pte)		\
104 	{ return pte_val(pte) & _PAGE_WRITE; }
105 static inline int pte_dirty(pte_t pte)		\
106 	{ return pte_val(pte) & _PAGE_DIRTY; }
107 static inline int pte_young(pte_t pte)		\
108 	{ return pte_val(pte) & _PAGE_ACCESSED; }
109 
110 #define pgprot_noncached pgprot_noncached
111 
112 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
113 {
114 	unsigned long prot = pgprot_val(_prot);
115 
116 	prot &= ~_PAGE_CACHED;
117 
118 	return __pgprot(prot);
119 }
120 
121 static inline int pte_none(pte_t pte)
122 {
123 	return !(pte_val(pte) & ~(_PAGE_GLOBAL|0xf));
124 }
125 
126 static inline int pte_present(pte_t pte)	\
127 	{ return pte_val(pte) & _PAGE_PRESENT; }
128 
129 /*
130  * The following only work if pte_present() is true.
131  * Undefined behaviour if not..
132  */
133 static inline pte_t pte_wrprotect(pte_t pte)
134 {
135 	pte_val(pte) &= ~_PAGE_WRITE;
136 	return pte;
137 }
138 
139 static inline pte_t pte_mkclean(pte_t pte)
140 {
141 	pte_val(pte) &= ~_PAGE_DIRTY;
142 	return pte;
143 }
144 
145 static inline pte_t pte_mkold(pte_t pte)
146 {
147 	pte_val(pte) &= ~_PAGE_ACCESSED;
148 	return pte;
149 }
150 
151 static inline pte_t pte_mkwrite(pte_t pte)
152 {
153 	pte_val(pte) |= _PAGE_WRITE;
154 	return pte;
155 }
156 
157 static inline pte_t pte_mkdirty(pte_t pte)
158 {
159 	pte_val(pte) |= _PAGE_DIRTY;
160 	return pte;
161 }
162 
163 static inline pte_t pte_mkyoung(pte_t pte)
164 {
165 	pte_val(pte) |= _PAGE_ACCESSED;
166 	return pte;
167 }
168 
169 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
170 {
171 	const unsigned long mask = _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC;
172 
173 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
174 	return pte;
175 }
176 
177 static inline int pmd_present(pmd_t pmd)
178 {
179 	return (pmd_val(pmd) != (unsigned long) invalid_pte_table)
180 			&& (pmd_val(pmd) != 0UL);
181 }
182 
183 static inline void pmd_clear(pmd_t *pmdp)
184 {
185 	pmd_val(*pmdp) = (unsigned long) invalid_pte_table;
186 }
187 
188 #define pte_pfn(pte)		(pte_val(pte) & 0xfffff)
189 #define pfn_pte(pfn, prot)	(__pte(pfn | pgprot_val(prot)))
190 #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
191 
192 /*
193  * Store a linux PTE into the linux page table.
194  */
195 static inline void set_pte(pte_t *ptep, pte_t pteval)
196 {
197 	*ptep = pteval;
198 }
199 
200 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
201 			      pte_t *ptep, pte_t pteval)
202 {
203 	unsigned long paddr = (unsigned long)page_to_virt(pte_page(pteval));
204 
205 	flush_dcache_range(paddr, paddr + PAGE_SIZE);
206 	set_pte(ptep, pteval);
207 }
208 
209 static inline int pmd_none(pmd_t pmd)
210 {
211 	return (pmd_val(pmd) ==
212 		(unsigned long) invalid_pte_table) || (pmd_val(pmd) == 0UL);
213 }
214 
215 #define pmd_bad(pmd)	(pmd_val(pmd) & ~PAGE_MASK)
216 
217 static inline void pte_clear(struct mm_struct *mm,
218 				unsigned long addr, pte_t *ptep)
219 {
220 	pte_t null;
221 
222 	pte_val(null) = (addr >> PAGE_SHIFT) & 0xf;
223 
224 	set_pte_at(mm, addr, ptep, null);
225 }
226 
227 /*
228  * Conversion functions: convert a page and protection to a page entry,
229  * and a page entry and page directory to the page they refer to.
230  */
231 #define mk_pte(page, prot)	(pfn_pte(page_to_pfn(page), prot))
232 
233 /*
234  * Conversion functions: convert a page and protection to a page entry,
235  * and a page entry and page directory to the page they refer to.
236  */
237 #define pmd_phys(pmd)		virt_to_phys((void *)pmd_val(pmd))
238 #define pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
239 
240 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
241 {
242 	return pmd_val(pmd);
243 }
244 
245 #define pte_ERROR(e) \
246 	pr_err("%s:%d: bad pte %08lx.\n", \
247 		__FILE__, __LINE__, pte_val(e))
248 #define pgd_ERROR(e) \
249 	pr_err("%s:%d: bad pgd %08lx.\n", \
250 		__FILE__, __LINE__, pgd_val(e))
251 
252 /*
253  * Encode and decode a swap entry (must be !pte_none(pte) && !pte_present(pte):
254  *
255  * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 ...  1  0
256  *  0  0  0  0 type.  0  0  0  0  0  0 offset.........
257  *
258  * This gives us up to 2**2 = 4 swap files and 2**20 * 4K = 4G per swap file.
259  *
260  * Note that the offset field is always non-zero, thus !pte_none(pte) is always
261  * true.
262  */
263 #define __swp_type(swp)		(((swp).val >> 26) & 0x3)
264 #define __swp_offset(swp)	((swp).val & 0xfffff)
265 #define __swp_entry(type, off)	((swp_entry_t) { (((type) & 0x3) << 26) \
266 						 | ((off) & 0xfffff) })
267 #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
268 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
269 
270 #define kern_addr_valid(addr)		(1)
271 
272 extern void __init paging_init(void);
273 extern void __init mmu_init(void);
274 
275 extern void update_mmu_cache(struct vm_area_struct *vma,
276 			     unsigned long address, pte_t *pte);
277 
278 #endif /* _ASM_NIOS2_PGTABLE_H */
279