xref: /linux/arch/arc/include/asm/pgtable.h (revision 26b8f996239884451aeb1213747e3ca808c26024)
1 /*
2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * vineetg: May 2011
9  *  -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
10  *     They are semantically the same although in different contexts
11  *     VALID marks a TLB entry exists and it will only happen if PRESENT
12  *  - Utilise some unused free bits to confine PTE flags to 12 bits
13  *     This is a must for 4k pg-sz
14  *
15  * vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods
16  *  -TLB Locking never really existed, except for initial specs
17  *  -SILENT_xxx not needed for our port
18  *  -Per my request, MMU V3 changes the layout of some of the bits
19  *     to avoid a few shifts in TLB Miss handlers.
20  *
21  * vineetg: April 2010
22  *  -PGD entry no longer contains any flags. If empty it is 0, otherwise has
23  *   Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
24  *
25  * vineetg: April 2010
26  *  -Switched form 8:11:13 split for page table lookup to 11:8:13
27  *  -this speeds up page table allocation itself as we now have to memset 1K
28  *    instead of 8k per page table.
29  * -TODO: Right now page table alloc is 8K and rest 7K is unused
30  *    need to optimise it
31  *
32  * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
33  */
34 
35 #ifndef _ASM_ARC_PGTABLE_H
36 #define _ASM_ARC_PGTABLE_H
37 
38 #include <asm/page.h>
39 #include <asm/mmu.h>
40 #include <asm-generic/pgtable-nopmd.h>
41 
42 /**************************************************************************
43  * Page Table Flags
44  *
45  * ARC700 MMU only deals with softare managed TLB entries.
46  * Page Tables are purely for Linux VM's consumption and the bits below are
47  * suited to that (uniqueness). Hence some are not implemented in the TLB and
48  * some have different value in TLB.
49  * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
50  *      seperate PD0 and PD1, which combined forms a translation entry)
51  *      while for PTE perspective, they are 8 and 9 respectively
52  * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
53  *      (saves some bit shift ops in TLB Miss hdlrs)
54  */
55 
56 #if (CONFIG_ARC_MMU_VER <= 2)
57 
58 #define _PAGE_ACCESSED      (1<<1)	/* Page is accessed (S) */
59 #define _PAGE_CACHEABLE     (1<<2)	/* Page is cached (H) */
60 #define _PAGE_EXECUTE       (1<<3)	/* Page has user execute perm (H) */
61 #define _PAGE_WRITE         (1<<4)	/* Page has user write perm (H) */
62 #define _PAGE_READ          (1<<5)	/* Page has user read perm (H) */
63 #define _PAGE_DIRTY         (1<<6)	/* Page modified (dirty) (S) */
64 #define _PAGE_SPECIAL       (1<<7)
65 #define _PAGE_GLOBAL        (1<<8)	/* Page is global (H) */
66 #define _PAGE_PRESENT       (1<<10)	/* TLB entry is valid (H) */
67 
68 #else	/* MMU v3 onwards */
69 
70 #define _PAGE_CACHEABLE     (1<<0)	/* Page is cached (H) */
71 #define _PAGE_EXECUTE       (1<<1)	/* Page has user execute perm (H) */
72 #define _PAGE_WRITE         (1<<2)	/* Page has user write perm (H) */
73 #define _PAGE_READ          (1<<3)	/* Page has user read perm (H) */
74 #define _PAGE_ACCESSED      (1<<4)	/* Page is accessed (S) */
75 #define _PAGE_DIRTY         (1<<5)	/* Page modified (dirty) (S) */
76 #define _PAGE_SPECIAL       (1<<6)
77 
78 #if (CONFIG_ARC_MMU_VER >= 4)
79 #define _PAGE_WTHRU         (1<<7)	/* Page cache mode write-thru (H) */
80 #endif
81 
82 #define _PAGE_GLOBAL        (1<<8)	/* Page is global (H) */
83 #define _PAGE_PRESENT       (1<<9)	/* TLB entry is valid (H) */
84 
85 #if (CONFIG_ARC_MMU_VER >= 4)
86 #define _PAGE_HW_SZ         (1<<10)	/* Page Size indicator (H): 0 normal, 1 super */
87 #endif
88 
89 #define _PAGE_SHARED_CODE   (1<<11)	/* Shared Code page with cmn vaddr
90 					   usable for shared TLB entries (H) */
91 
92 #define _PAGE_UNUSED_BIT    (1<<12)
93 #endif
94 
95 /* vmalloc permissions */
96 #define _K_PAGE_PERMS  (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
97 			_PAGE_GLOBAL | _PAGE_PRESENT)
98 
99 #ifndef CONFIG_ARC_CACHE_PAGES
100 #undef _PAGE_CACHEABLE
101 #define _PAGE_CACHEABLE 0
102 #endif
103 
104 #ifndef _PAGE_HW_SZ
105 #define _PAGE_HW_SZ	0
106 #endif
107 
108 /* Defaults for every user page */
109 #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
110 
111 /* Set of bits not changed in pte_modify */
112 #define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
113 
114 /* More Abbrevaited helpers */
115 #define PAGE_U_NONE     __pgprot(___DEF)
116 #define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
117 #define PAGE_U_W_R      __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
118 #define PAGE_U_X_R      __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
119 #define PAGE_U_X_W_R    __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
120 						       _PAGE_EXECUTE)
121 
122 #define PAGE_SHARED	PAGE_U_W_R
123 
124 /* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of
125  * user vaddr space - visible in all addr spaces, but kernel mode only
126  * Thus Global, all-kernel-access, no-user-access, cached
127  */
128 #define PAGE_KERNEL          __pgprot(_K_PAGE_PERMS | _PAGE_CACHEABLE)
129 
130 /* ioremap */
131 #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
132 
133 /* Masks for actual TLB "PD"s */
134 #define PTE_BITS_IN_PD0		(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
135 #define PTE_BITS_RWX		(_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
136 #define PTE_BITS_NON_RWX_IN_PD1	(PAGE_MASK | _PAGE_CACHEABLE)
137 
138 /**************************************************************************
139  * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
140  *
141  * Certain cases have 1:1 mapping
142  *  e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
143  *       which directly corresponds to  PAGE_U_X_R
144  *
145  * Other rules which cause the divergence from 1:1 mapping
146  *
147  *  1. Although ARC700 can do exclusive execute/write protection (meaning R
148  *     can be tracked independet of X/W unlike some other CPUs), still to
149  *     keep things consistent with other archs:
150  *      -Write implies Read:   W => R
151  *      -Execute implies Read: X => R
152  *
153  *  2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
154  *     This is to enable COW mechanism
155  */
156 	/* xwr */
157 #define __P000  PAGE_U_NONE
158 #define __P001  PAGE_U_R
159 #define __P010  PAGE_U_R	/* Pvt-W => !W */
160 #define __P011  PAGE_U_R	/* Pvt-W => !W */
161 #define __P100  PAGE_U_X_R	/* X => R */
162 #define __P101  PAGE_U_X_R
163 #define __P110  PAGE_U_X_R	/* Pvt-W => !W and X => R */
164 #define __P111  PAGE_U_X_R	/* Pvt-W => !W */
165 
166 #define __S000  PAGE_U_NONE
167 #define __S001  PAGE_U_R
168 #define __S010  PAGE_U_W_R	/* W => R */
169 #define __S011  PAGE_U_W_R
170 #define __S100  PAGE_U_X_R	/* X => R */
171 #define __S101  PAGE_U_X_R
172 #define __S110  PAGE_U_X_W_R	/* X => R */
173 #define __S111  PAGE_U_X_W_R
174 
175 /****************************************************************
176  * Page Table Lookup split
177  *
178  * We implement 2 tier paging and since this is all software, we are free
179  * to customize the span of a PGD / PTE entry to suit us
180  *
181  *			32 bit virtual address
182  * -------------------------------------------------------
183  * | BITS_FOR_PGD    |  BITS_FOR_PTE    |  BITS_IN_PAGE  |
184  * -------------------------------------------------------
185  *       |                  |                |
186  *       |                  |                --> off in page frame
187  *       |		    |
188  *       |                  ---> index into Page Table
189  *       |
190  *       ----> index into Page Directory
191  */
192 
193 #define BITS_IN_PAGE	PAGE_SHIFT
194 
195 /* Optimal Sizing of Pg Tbl - based on MMU page size */
196 #if defined(CONFIG_ARC_PAGE_SIZE_8K)
197 #define BITS_FOR_PTE	8		/* 11:8:13 */
198 #elif defined(CONFIG_ARC_PAGE_SIZE_16K)
199 #define BITS_FOR_PTE	8		/* 10:8:14 */
200 #elif defined(CONFIG_ARC_PAGE_SIZE_4K)
201 #define BITS_FOR_PTE	9		/* 11:9:12 */
202 #endif
203 
204 #define BITS_FOR_PGD	(32 - BITS_FOR_PTE - BITS_IN_PAGE)
205 
206 #define PGDIR_SHIFT	(32 - BITS_FOR_PGD)
207 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)	/* vaddr span, not PDG sz */
208 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
209 
210 #ifdef __ASSEMBLY__
211 #define	PTRS_PER_PTE	(1 << BITS_FOR_PTE)
212 #define	PTRS_PER_PGD	(1 << BITS_FOR_PGD)
213 #else
214 #define	PTRS_PER_PTE	(1UL << BITS_FOR_PTE)
215 #define	PTRS_PER_PGD	(1UL << BITS_FOR_PGD)
216 #endif
217 /*
218  * Number of entries a user land program use.
219  * TASK_SIZE is the maximum vaddr that can be used by a userland program.
220  */
221 #define	USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
222 
223 /*
224  * No special requirements for lowest virtual address we permit any user space
225  * mapping to be mapped at.
226  */
227 #define FIRST_USER_ADDRESS      0UL
228 
229 
230 /****************************************************************
231  * Bucket load of VM Helpers
232  */
233 
234 #ifndef __ASSEMBLY__
235 
236 #define pte_ERROR(e) \
237 	pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
238 #define pgd_ERROR(e) \
239 	pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
240 
241 /* the zero page used for uninitialized and anonymous pages */
242 extern char empty_zero_page[PAGE_SIZE];
243 #define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
244 
245 #define pte_unmap(pte)		do { } while (0)
246 #define pte_unmap_nested(pte)		do { } while (0)
247 
248 #define set_pte(pteptr, pteval)	((*(pteptr)) = (pteval))
249 #define set_pmd(pmdptr, pmdval)	(*(pmdptr) = pmdval)
250 
251 /* find the page descriptor of the Page Tbl ref by PMD entry */
252 #define pmd_page(pmd)		virt_to_page(pmd_val(pmd) & PAGE_MASK)
253 
254 /* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
255 #define pmd_page_vaddr(pmd)	(pmd_val(pmd) & PAGE_MASK)
256 
257 /* In a 2 level sys, setup the PGD entry with PTE value */
258 static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
259 {
260 	pmd_val(*pmdp) = (unsigned long)ptep;
261 }
262 
263 #define pte_none(x)			(!pte_val(x))
264 #define pte_present(x)			(pte_val(x) & _PAGE_PRESENT)
265 #define pte_clear(mm, addr, ptep)	set_pte_at(mm, addr, ptep, __pte(0))
266 
267 #define pmd_none(x)			(!pmd_val(x))
268 #define	pmd_bad(x)			((pmd_val(x) & ~PAGE_MASK))
269 #define pmd_present(x)			(pmd_val(x))
270 #define pmd_clear(xp)			do { pmd_val(*(xp)) = 0; } while (0)
271 
272 #define pte_page(x) (mem_map + \
273 		(unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
274 				PAGE_SHIFT)))
275 
276 #define mk_pte(page, pgprot)						\
277 ({									\
278 	pte_t pte;							\
279 	pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot);	\
280 	pte;								\
281 })
282 
283 #define pte_pfn(pte)		(pte_val(pte) >> PAGE_SHIFT)
284 #define pfn_pte(pfn, prot)	(__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
285 #define __pte_index(addr)	(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
286 
287 /*
288  * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
289  * and returns ptr to PTE entry corresponding to @addr
290  */
291 #define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
292 					 __pte_index(addr))
293 
294 /* No mapping of Page Tables in high mem etc, so following same as above */
295 #define pte_offset_kernel(dir, addr)		pte_offset(dir, addr)
296 #define pte_offset_map(dir, addr)		pte_offset(dir, addr)
297 
298 /* Zoo of pte_xxx function */
299 #define pte_read(pte)		(pte_val(pte) & _PAGE_READ)
300 #define pte_write(pte)		(pte_val(pte) & _PAGE_WRITE)
301 #define pte_dirty(pte)		(pte_val(pte) & _PAGE_DIRTY)
302 #define pte_young(pte)		(pte_val(pte) & _PAGE_ACCESSED)
303 #define pte_special(pte)	(pte_val(pte) & _PAGE_SPECIAL)
304 
305 #define PTE_BIT_FUNC(fn, op) \
306 	static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
307 
308 PTE_BIT_FUNC(mknotpresent,	&= ~(_PAGE_PRESENT));
309 PTE_BIT_FUNC(wrprotect,	&= ~(_PAGE_WRITE));
310 PTE_BIT_FUNC(mkwrite,	|= (_PAGE_WRITE));
311 PTE_BIT_FUNC(mkclean,	&= ~(_PAGE_DIRTY));
312 PTE_BIT_FUNC(mkdirty,	|= (_PAGE_DIRTY));
313 PTE_BIT_FUNC(mkold,	&= ~(_PAGE_ACCESSED));
314 PTE_BIT_FUNC(mkyoung,	|= (_PAGE_ACCESSED));
315 PTE_BIT_FUNC(exprotect,	&= ~(_PAGE_EXECUTE));
316 PTE_BIT_FUNC(mkexec,	|= (_PAGE_EXECUTE));
317 PTE_BIT_FUNC(mkspecial,	|= (_PAGE_SPECIAL));
318 PTE_BIT_FUNC(mkhuge,	|= (_PAGE_HW_SZ));
319 
320 #define __HAVE_ARCH_PTE_SPECIAL
321 
322 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
323 {
324 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
325 }
326 
327 /* Macro to mark a page protection as uncacheable */
328 #define pgprot_noncached(prot)	(__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
329 
330 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
331 			      pte_t *ptep, pte_t pteval)
332 {
333 	set_pte(ptep, pteval);
334 }
335 
336 /*
337  * All kernel related VM pages are in init's mm.
338  */
339 #define pgd_offset_k(address)	pgd_offset(&init_mm, address)
340 #define pgd_index(addr)		((addr) >> PGDIR_SHIFT)
341 #define pgd_offset(mm, addr)	(((mm)->pgd)+pgd_index(addr))
342 
343 /*
344  * Macro to quickly access the PGD entry, utlising the fact that some
345  * arch may cache the pointer to Page Directory of "current" task
346  * in a MMU register
347  *
348  * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
349  * becomes read a register
350  *
351  * ********CAUTION*******:
352  * Kernel code might be dealing with some mm_struct of NON "current"
353  * Thus use this macro only when you are certain that "current" is current
354  * e.g. when dealing with signal frame setup code etc
355  */
356 #ifndef CONFIG_SMP
357 #define pgd_offset_fast(mm, addr)	\
358 ({					\
359 	pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0);  \
360 	pgd_base + pgd_index(addr);	\
361 })
362 #else
363 #define pgd_offset_fast(mm, addr)	pgd_offset(mm, addr)
364 #endif
365 
366 extern void paging_init(void);
367 extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
368 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
369 		      pte_t *ptep);
370 
371 /* Encode swap {type,off} tuple into PTE
372  * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
373  * PAGE_PRESENT is zero in a PTE holding swap "identifier"
374  */
375 #define __swp_entry(type, off)	((swp_entry_t) { \
376 					((type) & 0x1f) | ((off) << 13) })
377 
378 /* Decode a PTE containing swap "identifier "into constituents */
379 #define __swp_type(pte_lookalike)	(((pte_lookalike).val) & 0x1f)
380 #define __swp_offset(pte_lookalike)	((pte_lookalike).val << 13)
381 
382 /* NOPs, to keep generic kernel happy */
383 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
384 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
385 
386 #define kern_addr_valid(addr)	(1)
387 
388 /*
389  * remap a physical page `pfn' of size `size' with page protection `prot'
390  * into virtual address `from'
391  */
392 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
393 #include <asm/hugepage.h>
394 #endif
395 
396 #include <asm-generic/pgtable.h>
397 
398 /* to cope with aliasing VIPT cache */
399 #define HAVE_ARCH_UNMAPPED_AREA
400 
401 /*
402  * No page table caches to initialise
403  */
404 #define pgtable_cache_init()   do { } while (0)
405 
406 #endif /* __ASSEMBLY__ */
407 
408 #endif
409