xref: /linux/arch/arm/include/asm/pgtable.h (revision 17f57211969bddca2e922299a2530b1c65ccabfa)
1 /*
2  *  arch/arm/include/asm/pgtable.h
3  *
4  *  Copyright (C) 1995-2002 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #ifndef _ASMARM_PGTABLE_H
11 #define _ASMARM_PGTABLE_H
12 
13 #include <linux/const.h>
14 #include <asm-generic/4level-fixup.h>
15 #include <asm/proc-fns.h>
16 
17 #ifndef CONFIG_MMU
18 
19 #include "pgtable-nommu.h"
20 
21 #else
22 
23 #include <asm/memory.h>
24 #include <mach/vmalloc.h>
25 #include <asm/pgtable-hwdef.h>
26 
27 #include <asm/pgtable-2level.h>
28 
29 /*
30  * Just any arbitrary offset to the start of the vmalloc VM area: the
31  * current 8MB value just means that there will be a 8MB "hole" after the
32  * physical memory until the kernel virtual memory starts.  That means that
33  * any out-of-bounds memory accesses will hopefully be caught.
34  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
35  * area for the same reason. ;)
36  *
37  * Note that platforms may override VMALLOC_START, but they must provide
38  * VMALLOC_END.  VMALLOC_END defines the (exclusive) limit of this space,
39  * which may not overlap IO space.
40  */
41 #ifndef VMALLOC_START
42 #define VMALLOC_OFFSET		(8*1024*1024)
43 #define VMALLOC_START		(((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
44 #endif
45 
46 #define LIBRARY_TEXT_START	0x0c000000
47 
48 #ifndef __ASSEMBLY__
49 extern void __pte_error(const char *file, int line, pte_t);
50 extern void __pmd_error(const char *file, int line, pmd_t);
51 extern void __pgd_error(const char *file, int line, pgd_t);
52 
53 #define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte)
54 #define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd)
55 #define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd)
56 
57 /*
58  * This is the lowest virtual address we can permit any user space
59  * mapping to be mapped at.  This is particularly important for
60  * non-high vector CPUs.
61  */
62 #define FIRST_USER_ADDRESS	PAGE_SIZE
63 
64 /*
65  * The pgprot_* and protection_map entries will be fixed up in runtime
66  * to include the cachable and bufferable bits based on memory policy,
67  * as well as any architecture dependent bits like global/ASID and SMP
68  * shared mapping bits.
69  */
70 #define _L_PTE_DEFAULT	L_PTE_PRESENT | L_PTE_YOUNG
71 
72 extern pgprot_t		pgprot_user;
73 extern pgprot_t		pgprot_kernel;
74 
75 #define _MOD_PROT(p, b)	__pgprot(pgprot_val(p) | (b))
76 
77 #define PAGE_NONE		_MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY)
78 #define PAGE_SHARED		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
79 #define PAGE_SHARED_EXEC	_MOD_PROT(pgprot_user, L_PTE_USER)
80 #define PAGE_COPY		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
81 #define PAGE_COPY_EXEC		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
82 #define PAGE_READONLY		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
83 #define PAGE_READONLY_EXEC	_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
84 #define PAGE_KERNEL		_MOD_PROT(pgprot_kernel, L_PTE_XN)
85 #define PAGE_KERNEL_EXEC	pgprot_kernel
86 
87 #define __PAGE_NONE		__pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN)
88 #define __PAGE_SHARED		__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
89 #define __PAGE_SHARED_EXEC	__pgprot(_L_PTE_DEFAULT | L_PTE_USER)
90 #define __PAGE_COPY		__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
91 #define __PAGE_COPY_EXEC	__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
92 #define __PAGE_READONLY		__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
93 #define __PAGE_READONLY_EXEC	__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
94 
95 #define __pgprot_modify(prot,mask,bits)		\
96 	__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
97 
98 #define pgprot_noncached(prot) \
99 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
100 
101 #define pgprot_writecombine(prot) \
102 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
103 
104 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
105 #define pgprot_dmacoherent(prot) \
106 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
107 #define __HAVE_PHYS_MEM_ACCESS_PROT
108 struct file;
109 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
110 				     unsigned long size, pgprot_t vma_prot);
111 #else
112 #define pgprot_dmacoherent(prot) \
113 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
114 #endif
115 
116 #endif /* __ASSEMBLY__ */
117 
118 /*
119  * The table below defines the page protection levels that we insert into our
120  * Linux page table version.  These get translated into the best that the
121  * architecture can perform.  Note that on most ARM hardware:
122  *  1) We cannot do execute protection
123  *  2) If we could do execute protection, then read is implied
124  *  3) write implies read permissions
125  */
126 #define __P000  __PAGE_NONE
127 #define __P001  __PAGE_READONLY
128 #define __P010  __PAGE_COPY
129 #define __P011  __PAGE_COPY
130 #define __P100  __PAGE_READONLY_EXEC
131 #define __P101  __PAGE_READONLY_EXEC
132 #define __P110  __PAGE_COPY_EXEC
133 #define __P111  __PAGE_COPY_EXEC
134 
135 #define __S000  __PAGE_NONE
136 #define __S001  __PAGE_READONLY
137 #define __S010  __PAGE_SHARED
138 #define __S011  __PAGE_SHARED
139 #define __S100  __PAGE_READONLY_EXEC
140 #define __S101  __PAGE_READONLY_EXEC
141 #define __S110  __PAGE_SHARED_EXEC
142 #define __S111  __PAGE_SHARED_EXEC
143 
144 #ifndef __ASSEMBLY__
145 /*
146  * ZERO_PAGE is a global shared page that is always zero: used
147  * for zero-mapped memory areas etc..
148  */
149 extern struct page *empty_zero_page;
150 #define ZERO_PAGE(vaddr)	(empty_zero_page)
151 
152 
153 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
154 
155 /* to find an entry in a page-table-directory */
156 #define pgd_index(addr)		((addr) >> PGDIR_SHIFT)
157 
158 #define pgd_offset(mm, addr)	((mm)->pgd + pgd_index(addr))
159 
160 /* to find an entry in a kernel page-table-directory */
161 #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
162 
163 /*
164  * The "pgd_xxx()" functions here are trivial for a folded two-level
165  * setup: the pgd is never bad, and a pmd always exists (as it's folded
166  * into the pgd entry)
167  */
168 #define pgd_none(pgd)		(0)
169 #define pgd_bad(pgd)		(0)
170 #define pgd_present(pgd)	(1)
171 #define pgd_clear(pgdp)		do { } while (0)
172 #define set_pgd(pgd,pgdp)	do { } while (0)
173 #define set_pud(pud,pudp)	do { } while (0)
174 
175 
176 /* Find an entry in the second-level page table.. */
177 #define pmd_offset(dir, addr)	((pmd_t *)(dir))
178 
179 #define pmd_none(pmd)		(!pmd_val(pmd))
180 #define pmd_present(pmd)	(pmd_val(pmd))
181 #define pmd_bad(pmd)		(pmd_val(pmd) & 2)
182 
183 #define copy_pmd(pmdpd,pmdps)		\
184 	do {				\
185 		pmdpd[0] = pmdps[0];	\
186 		pmdpd[1] = pmdps[1];	\
187 		flush_pmd_entry(pmdpd);	\
188 	} while (0)
189 
190 #define pmd_clear(pmdp)			\
191 	do {				\
192 		pmdp[0] = __pmd(0);	\
193 		pmdp[1] = __pmd(0);	\
194 		clean_pmd_entry(pmdp);	\
195 	} while (0)
196 
197 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
198 {
199 	return __va(pmd_val(pmd) & PAGE_MASK);
200 }
201 
202 #define pmd_page(pmd)		pfn_to_page(__phys_to_pfn(pmd_val(pmd)))
203 
204 /* we don't need complex calculations here as the pmd is folded into the pgd */
205 #define pmd_addr_end(addr,end)	(end)
206 
207 
208 #ifndef CONFIG_HIGHPTE
209 #define __pte_map(pmd)		pmd_page_vaddr(*(pmd))
210 #define __pte_unmap(pte)	do { } while (0)
211 #else
212 #define __pte_map(pmd)		(pte_t *)kmap_atomic(pmd_page(*(pmd)))
213 #define __pte_unmap(pte)	kunmap_atomic(pte)
214 #endif
215 
216 #define pte_index(addr)		(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
217 
218 #define pte_offset_kernel(pmd,addr)	(pmd_page_vaddr(*(pmd)) + pte_index(addr))
219 
220 #define pte_offset_map(pmd,addr)	(__pte_map(pmd) + pte_index(addr))
221 #define pte_unmap(pte)			__pte_unmap(pte)
222 
223 #define pte_pfn(pte)		(pte_val(pte) >> PAGE_SHIFT)
224 #define pfn_pte(pfn,prot)	__pte(__pfn_to_phys(pfn) | pgprot_val(prot))
225 
226 #define pte_page(pte)		pfn_to_page(pte_pfn(pte))
227 #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page), prot)
228 
229 #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
230 #define pte_clear(mm,addr,ptep)	set_pte_ext(ptep, __pte(0), 0)
231 
232 #if __LINUX_ARM_ARCH__ < 6
233 static inline void __sync_icache_dcache(pte_t pteval)
234 {
235 }
236 #else
237 extern void __sync_icache_dcache(pte_t pteval);
238 #endif
239 
240 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
241 			      pte_t *ptep, pte_t pteval)
242 {
243 	if (addr >= TASK_SIZE)
244 		set_pte_ext(ptep, pteval, 0);
245 	else {
246 		__sync_icache_dcache(pteval);
247 		set_pte_ext(ptep, pteval, PTE_EXT_NG);
248 	}
249 }
250 
251 #define pte_none(pte)		(!pte_val(pte))
252 #define pte_present(pte)	(pte_val(pte) & L_PTE_PRESENT)
253 #define pte_write(pte)		(!(pte_val(pte) & L_PTE_RDONLY))
254 #define pte_dirty(pte)		(pte_val(pte) & L_PTE_DIRTY)
255 #define pte_young(pte)		(pte_val(pte) & L_PTE_YOUNG)
256 #define pte_exec(pte)		(!(pte_val(pte) & L_PTE_XN))
257 #define pte_special(pte)	(0)
258 
259 #define pte_present_user(pte) \
260 	((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
261 	 (L_PTE_PRESENT | L_PTE_USER))
262 
263 #define PTE_BIT_FUNC(fn,op) \
264 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
265 
266 PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY);
267 PTE_BIT_FUNC(mkwrite,   &= ~L_PTE_RDONLY);
268 PTE_BIT_FUNC(mkclean,   &= ~L_PTE_DIRTY);
269 PTE_BIT_FUNC(mkdirty,   |= L_PTE_DIRTY);
270 PTE_BIT_FUNC(mkold,     &= ~L_PTE_YOUNG);
271 PTE_BIT_FUNC(mkyoung,   |= L_PTE_YOUNG);
272 
273 static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
274 
275 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
276 {
277 	const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER;
278 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
279 	return pte;
280 }
281 
282 /*
283  * Encode and decode a swap entry.  Swap entries are stored in the Linux
284  * page tables as follows:
285  *
286  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
287  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
288  *   <--------------- offset --------------------> <- type --> 0 0 0
289  *
290  * This gives us up to 63 swap files and 32GB per swap file.  Note that
291  * the offset field is always non-zero.
292  */
293 #define __SWP_TYPE_SHIFT	3
294 #define __SWP_TYPE_BITS		6
295 #define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
296 #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
297 
298 #define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
299 #define __swp_offset(x)		((x).val >> __SWP_OFFSET_SHIFT)
300 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
301 
302 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
303 #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
304 
305 /*
306  * It is an error for the kernel to have more swap files than we can
307  * encode in the PTEs.  This ensures that we know when MAX_SWAPFILES
308  * is increased beyond what we presently support.
309  */
310 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
311 
312 /*
313  * Encode and decode a file entry.  File entries are stored in the Linux
314  * page tables as follows:
315  *
316  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
317  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
318  *   <----------------------- offset ------------------------> 1 0 0
319  */
320 #define pte_file(pte)		(pte_val(pte) & L_PTE_FILE)
321 #define pte_to_pgoff(x)		(pte_val(x) >> 3)
322 #define pgoff_to_pte(x)		__pte(((x) << 3) | L_PTE_FILE)
323 
324 #define PTE_FILE_MAX_BITS	29
325 
326 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
327 /* FIXME: this is not correct */
328 #define kern_addr_valid(addr)	(1)
329 
330 #include <asm-generic/pgtable.h>
331 
332 /*
333  * We provide our own arch_get_unmapped_area to cope with VIPT caches.
334  */
335 #define HAVE_ARCH_UNMAPPED_AREA
336 
337 /*
338  * remap a physical page `pfn' of size `size' with page protection `prot'
339  * into virtual address `from'
340  */
341 #define io_remap_pfn_range(vma,from,pfn,size,prot) \
342 		remap_pfn_range(vma, from, pfn, size, prot)
343 
344 #define pgtable_cache_init() do { } while (0)
345 
346 void identity_mapping_add(pgd_t *, unsigned long, unsigned long);
347 void identity_mapping_del(pgd_t *, unsigned long, unsigned long);
348 
349 #endif /* !__ASSEMBLY__ */
350 
351 #endif /* CONFIG_MMU */
352 
353 #endif /* _ASMARM_PGTABLE_H */
354