xref: /linux/arch/arm64/include/asm/pgtable.h (revision 0d456bad36d42d16022be045c8a53ddbb59ee478)
1 /*
2  * Copyright (C) 2012 ARM Ltd.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16 #ifndef __ASM_PGTABLE_H
17 #define __ASM_PGTABLE_H
18 
19 #include <asm/proc-fns.h>
20 
21 #include <asm/memory.h>
22 #include <asm/pgtable-hwdef.h>
23 
24 /*
25  * Software defined PTE bits definition.
26  */
27 #define PTE_VALID		(_AT(pteval_t, 1) << 0)	/* pte_present() check */
28 #define PTE_FILE		(_AT(pteval_t, 1) << 2)	/* only when !pte_present() */
29 #define PTE_DIRTY		(_AT(pteval_t, 1) << 55)
30 #define PTE_SPECIAL		(_AT(pteval_t, 1) << 56)
31 
32 /*
33  * VMALLOC and SPARSEMEM_VMEMMAP ranges.
34  */
35 #define VMALLOC_START		UL(0xffffff8000000000)
36 #define VMALLOC_END		(PAGE_OFFSET - UL(0x400000000) - SZ_64K)
37 
38 #define vmemmap			((struct page *)(VMALLOC_END + SZ_64K))
39 
40 #define FIRST_USER_ADDRESS	0
41 
42 #ifndef __ASSEMBLY__
43 extern void __pte_error(const char *file, int line, unsigned long val);
44 extern void __pmd_error(const char *file, int line, unsigned long val);
45 extern void __pgd_error(const char *file, int line, unsigned long val);
46 
47 #define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte_val(pte))
48 #ifndef CONFIG_ARM64_64K_PAGES
49 #define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd_val(pmd))
50 #endif
51 #define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd_val(pgd))
52 
53 /*
54  * The pgprot_* and protection_map entries will be fixed up at runtime to
55  * include the cachable and bufferable bits based on memory policy, as well as
56  * any architecture dependent bits like global/ASID and SMP shared mapping
57  * bits.
58  */
59 #define _PAGE_DEFAULT		PTE_TYPE_PAGE | PTE_AF
60 
61 extern pgprot_t pgprot_default;
62 
63 #define _MOD_PROT(p, b)	__pgprot(pgprot_val(p) | (b))
64 
65 #define PAGE_NONE		_MOD_PROT(pgprot_default, PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
66 #define PAGE_SHARED		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
67 #define PAGE_SHARED_EXEC	_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
68 #define PAGE_COPY		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
69 #define PAGE_COPY_EXEC		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
70 #define PAGE_READONLY		_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
71 #define PAGE_READONLY_EXEC	_MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
72 #define PAGE_KERNEL		_MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
73 #define PAGE_KERNEL_EXEC	_MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
74 
75 #define __PAGE_NONE		__pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
76 #define __PAGE_SHARED		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
77 #define __PAGE_SHARED_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
78 #define __PAGE_COPY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
79 #define __PAGE_COPY_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
80 #define __PAGE_READONLY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
81 #define __PAGE_READONLY_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
82 
83 #endif /* __ASSEMBLY__ */
84 
85 #define __P000  __PAGE_NONE
86 #define __P001  __PAGE_READONLY
87 #define __P010  __PAGE_COPY
88 #define __P011  __PAGE_COPY
89 #define __P100  __PAGE_READONLY_EXEC
90 #define __P101  __PAGE_READONLY_EXEC
91 #define __P110  __PAGE_COPY_EXEC
92 #define __P111  __PAGE_COPY_EXEC
93 
94 #define __S000  __PAGE_NONE
95 #define __S001  __PAGE_READONLY
96 #define __S010  __PAGE_SHARED
97 #define __S011  __PAGE_SHARED
98 #define __S100  __PAGE_READONLY_EXEC
99 #define __S101  __PAGE_READONLY_EXEC
100 #define __S110  __PAGE_SHARED_EXEC
101 #define __S111  __PAGE_SHARED_EXEC
102 
103 #ifndef __ASSEMBLY__
104 /*
105  * ZERO_PAGE is a global shared page that is always zero: used
106  * for zero-mapped memory areas etc..
107  */
108 extern struct page *empty_zero_page;
109 #define ZERO_PAGE(vaddr)	(empty_zero_page)
110 
111 #define pte_pfn(pte)		((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
112 
113 #define pfn_pte(pfn,prot)	(__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
114 
115 #define pte_none(pte)		(!pte_val(pte))
116 #define pte_clear(mm,addr,ptep)	set_pte(ptep, __pte(0))
117 #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
118 #define pte_offset_kernel(dir,addr)	(pmd_page_vaddr(*(dir)) + __pte_index(addr))
119 
120 #define pte_offset_map(dir,addr)	pte_offset_kernel((dir), (addr))
121 #define pte_offset_map_nested(dir,addr)	pte_offset_kernel((dir), (addr))
122 #define pte_unmap(pte)			do { } while (0)
123 #define pte_unmap_nested(pte)		do { } while (0)
124 
125 /*
126  * The following only work if pte_present(). Undefined behaviour otherwise.
127  */
128 #define pte_present(pte)	(pte_val(pte) & PTE_VALID)
129 #define pte_dirty(pte)		(pte_val(pte) & PTE_DIRTY)
130 #define pte_young(pte)		(pte_val(pte) & PTE_AF)
131 #define pte_special(pte)	(pte_val(pte) & PTE_SPECIAL)
132 #define pte_write(pte)		(!(pte_val(pte) & PTE_RDONLY))
133 #define pte_exec(pte)		(!(pte_val(pte) & PTE_UXN))
134 
135 #define pte_present_exec_user(pte) \
136 	((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == \
137 	 (PTE_VALID | PTE_USER))
138 
139 #define PTE_BIT_FUNC(fn,op) \
140 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
141 
142 PTE_BIT_FUNC(wrprotect, |= PTE_RDONLY);
143 PTE_BIT_FUNC(mkwrite,   &= ~PTE_RDONLY);
144 PTE_BIT_FUNC(mkclean,   &= ~PTE_DIRTY);
145 PTE_BIT_FUNC(mkdirty,   |= PTE_DIRTY);
146 PTE_BIT_FUNC(mkold,     &= ~PTE_AF);
147 PTE_BIT_FUNC(mkyoung,   |= PTE_AF);
148 PTE_BIT_FUNC(mkspecial, |= PTE_SPECIAL);
149 
150 static inline void set_pte(pte_t *ptep, pte_t pte)
151 {
152 	*ptep = pte;
153 }
154 
155 extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
156 
157 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
158 			      pte_t *ptep, pte_t pte)
159 {
160 	if (pte_present_exec_user(pte))
161 		__sync_icache_dcache(pte, addr);
162 	if (!pte_dirty(pte))
163 		pte = pte_wrprotect(pte);
164 	set_pte(ptep, pte);
165 }
166 
167 /*
168  * Huge pte definitions.
169  */
170 #define pte_huge(pte)		((pte_val(pte) & PTE_TYPE_MASK) == PTE_TYPE_HUGEPAGE)
171 #define pte_mkhuge(pte)		(__pte((pte_val(pte) & ~PTE_TYPE_MASK) | PTE_TYPE_HUGEPAGE))
172 
173 #define __pgprot_modify(prot,mask,bits)		\
174 	__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
175 
176 #define __HAVE_ARCH_PTE_SPECIAL
177 
178 /*
179  * Mark the prot value as uncacheable and unbufferable.
180  */
181 #define pgprot_noncached(prot) \
182 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
183 #define pgprot_writecombine(prot) \
184 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE))
185 #define pgprot_dmacoherent(prot) \
186 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
187 #define __HAVE_PHYS_MEM_ACCESS_PROT
188 struct file;
189 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
190 				     unsigned long size, pgprot_t vma_prot);
191 
192 #define pmd_none(pmd)		(!pmd_val(pmd))
193 #define pmd_present(pmd)	(pmd_val(pmd))
194 
195 #define pmd_bad(pmd)		(!(pmd_val(pmd) & 2))
196 
197 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
198 {
199 	*pmdp = pmd;
200 	dsb();
201 }
202 
203 static inline void pmd_clear(pmd_t *pmdp)
204 {
205 	set_pmd(pmdp, __pmd(0));
206 }
207 
208 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
209 {
210 	return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
211 }
212 
213 #define pmd_page(pmd)		pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
214 
215 /*
216  * Conversion functions: convert a page and protection to a page entry,
217  * and a page entry and page directory to the page they refer to.
218  */
219 #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)
220 
221 #ifndef CONFIG_ARM64_64K_PAGES
222 
223 #define pud_none(pud)		(!pud_val(pud))
224 #define pud_bad(pud)		(!(pud_val(pud) & 2))
225 #define pud_present(pud)	(pud_val(pud))
226 
227 static inline void set_pud(pud_t *pudp, pud_t pud)
228 {
229 	*pudp = pud;
230 	dsb();
231 }
232 
233 static inline void pud_clear(pud_t *pudp)
234 {
235 	set_pud(pudp, __pud(0));
236 }
237 
238 static inline pmd_t *pud_page_vaddr(pud_t pud)
239 {
240 	return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
241 }
242 
243 #endif	/* CONFIG_ARM64_64K_PAGES */
244 
245 /* to find an entry in a page-table-directory */
246 #define pgd_index(addr)		(((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
247 
248 #define pgd_offset(mm, addr)	((mm)->pgd+pgd_index(addr))
249 
250 /* to find an entry in a kernel page-table-directory */
251 #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
252 
253 /* Find an entry in the second-level page table.. */
254 #ifndef CONFIG_ARM64_64K_PAGES
255 #define pmd_index(addr)		(((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
256 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
257 {
258 	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
259 }
260 #endif
261 
262 /* Find an entry in the third-level page table.. */
263 #define __pte_index(addr)	(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
264 
265 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
266 {
267 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY;
268 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
269 	return pte;
270 }
271 
272 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
273 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
274 
275 #define SWAPPER_DIR_SIZE	(3 * PAGE_SIZE)
276 #define IDMAP_DIR_SIZE		(2 * PAGE_SIZE)
277 
278 /*
279  * Encode and decode a swap entry:
280  *	bits 0-1:	present (must be zero)
281  *	bit  2:		PTE_FILE
282  *	bits 3-8:	swap type
283  *	bits 9-63:	swap offset
284  */
285 #define __SWP_TYPE_SHIFT	3
286 #define __SWP_TYPE_BITS		6
287 #define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
288 #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
289 
290 #define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
291 #define __swp_offset(x)		((x).val >> __SWP_OFFSET_SHIFT)
292 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
293 
294 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
295 #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
296 
297 /*
298  * Ensure that there are not more swap files than can be encoded in the kernel
299  * the PTEs.
300  */
301 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
302 
303 /*
304  * Encode and decode a file entry:
305  *	bits 0-1:	present (must be zero)
306  *	bit  2:		PTE_FILE
307  *	bits 3-63:	file offset / PAGE_SIZE
308  */
309 #define pte_file(pte)		(pte_val(pte) & PTE_FILE)
310 #define pte_to_pgoff(x)		(pte_val(x) >> 3)
311 #define pgoff_to_pte(x)		__pte(((x) << 3) | PTE_FILE)
312 
313 #define PTE_FILE_MAX_BITS	61
314 
315 extern int kern_addr_valid(unsigned long addr);
316 
317 #include <asm-generic/pgtable.h>
318 
319 /*
320  * remap a physical page `pfn' of size `size' with page protection `prot'
321  * into virtual address `from'
322  */
323 #define io_remap_pfn_range(vma,from,pfn,size,prot) \
324 		remap_pfn_range(vma, from, pfn, size, prot)
325 
326 #define pgtable_cache_init() do { } while (0)
327 
328 #endif /* !__ASSEMBLY__ */
329 
330 #endif /* __ASM_PGTABLE_H */
331