xref: /linux/arch/powerpc/include/asm/nohash/32/pgtable.h (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
2 #define _ASM_POWERPC_NOHASH_32_PGTABLE_H
3 
4 #define __ARCH_USE_5LEVEL_HACK
5 #include <asm-generic/pgtable-nopmd.h>
6 
7 #ifndef __ASSEMBLY__
8 #include <linux/sched.h>
9 #include <linux/threads.h>
10 #include <asm/io.h>			/* For sub-arch specific PPC_PIN_SIZE */
11 
12 extern unsigned long ioremap_bot;
13 
14 #ifdef CONFIG_44x
15 extern int icache_44x_need_flush;
16 #endif
17 
18 #endif /* __ASSEMBLY__ */
19 
20 #define PTE_INDEX_SIZE	PTE_SHIFT
21 #define PMD_INDEX_SIZE	0
22 #define PUD_INDEX_SIZE	0
23 #define PGD_INDEX_SIZE	(32 - PGDIR_SHIFT)
24 
25 #define PMD_CACHE_INDEX	PMD_INDEX_SIZE
26 
27 #ifndef __ASSEMBLY__
28 #define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_INDEX_SIZE)
29 #define PMD_TABLE_SIZE	0
30 #define PUD_TABLE_SIZE	0
31 #define PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
32 #endif	/* __ASSEMBLY__ */
33 
34 #define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
35 #define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
36 
37 /*
38  * The normal case is that PTEs are 32-bits and we have a 1-page
39  * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
40  *
41  * For any >32-bit physical address platform, we can use the following
42  * two level page table layout where the pgdir is 8KB and the MS 13 bits
43  * are an index to the second level table.  The combined pgdir/pmd first
44  * level has 2048 entries and the second level has 512 64-bit PTE entries.
45  * -Matt
46  */
47 /* PGDIR_SHIFT determines what a top-level page table entry can map */
48 #define PGDIR_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
49 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
50 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
51 
52 /* Bits to mask out from a PGD to get to the PUD page */
53 #define PGD_MASKED_BITS		0
54 
55 #define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
56 #define FIRST_USER_ADDRESS	0UL
57 
58 #define pte_ERROR(e) \
59 	pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
60 		(unsigned long long)pte_val(e))
61 #define pgd_ERROR(e) \
62 	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
63 
64 /*
65  * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
66  * value (for now) on others, from where we can start layout kernel
67  * virtual space that goes below PKMAP and FIXMAP
68  */
69 #ifdef CONFIG_HIGHMEM
70 #define KVIRT_TOP	PKMAP_BASE
71 #else
72 #define KVIRT_TOP	(0xfe000000UL)	/* for now, could be FIXMAP_BASE ? */
73 #endif
74 
75 /*
76  * ioremap_bot starts at that address. Early ioremaps move down from there,
77  * until mem_init() at which point this becomes the top of the vmalloc
78  * and ioremap space
79  */
80 #ifdef CONFIG_NOT_COHERENT_CACHE
81 #define IOREMAP_TOP	((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
82 #else
83 #define IOREMAP_TOP	KVIRT_TOP
84 #endif
85 
86 /*
87  * Just any arbitrary offset to the start of the vmalloc VM area: the
88  * current 16MB value just means that there will be a 64MB "hole" after the
89  * physical memory until the kernel virtual memory starts.  That means that
90  * any out-of-bounds memory accesses will hopefully be caught.
91  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
92  * area for the same reason. ;)
93  *
94  * We no longer map larger than phys RAM with the BATs so we don't have
95  * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
96  * about clashes between our early calls to ioremap() that start growing down
97  * from IOREMAP_TOP being run into the VM area allocations (growing upwards
98  * from VMALLOC_START).  For this reason we have ioremap_bot to check when
99  * we actually run into our mappings setup in the early boot with the VM
100  * system.  This really does become a problem for machines with good amounts
101  * of RAM.  -- Cort
102  */
103 #define VMALLOC_OFFSET (0x1000000) /* 16M */
104 #ifdef PPC_PIN_SIZE
105 #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
106 #else
107 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
108 #endif
109 #define VMALLOC_END	ioremap_bot
110 
111 /*
112  * Bits in a linux-style PTE.  These match the bits in the
113  * (hardware-defined) PowerPC PTE as closely as possible.
114  */
115 
116 #if defined(CONFIG_40x)
117 #include <asm/nohash/32/pte-40x.h>
118 #elif defined(CONFIG_44x)
119 #include <asm/nohash/32/pte-44x.h>
120 #elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
121 #include <asm/nohash/pte-book3e.h>
122 #elif defined(CONFIG_FSL_BOOKE)
123 #include <asm/nohash/32/pte-fsl-booke.h>
124 #elif defined(CONFIG_8xx)
125 #include <asm/nohash/32/pte-8xx.h>
126 #endif
127 
128 /* And here we include common definitions */
129 #include <asm/pte-common.h>
130 
131 #ifndef __ASSEMBLY__
132 
133 #define pte_clear(mm, addr, ptep) \
134 	do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
135 
136 #define pmd_none(pmd)		(!pmd_val(pmd))
137 #define	pmd_bad(pmd)		(pmd_val(pmd) & _PMD_BAD)
138 #define	pmd_present(pmd)	(pmd_val(pmd) & _PMD_PRESENT_MASK)
139 static inline void pmd_clear(pmd_t *pmdp)
140 {
141 	*pmdp = __pmd(0);
142 }
143 
144 
145 
146 /*
147  * When flushing the tlb entry for a page, we also need to flush the hash
148  * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S.
149  */
150 extern int flush_hash_pages(unsigned context, unsigned long va,
151 			    unsigned long pmdval, int count);
152 
153 /* Add an HPTE to the hash table */
154 extern void add_hash_page(unsigned context, unsigned long va,
155 			  unsigned long pmdval);
156 
157 /* Flush an entry from the TLB/hash table */
158 extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
159 			     unsigned long address);
160 
161 /*
162  * PTE updates. This function is called whenever an existing
163  * valid PTE is updated. This does -not- include set_pte_at()
164  * which nowadays only sets a new PTE.
165  *
166  * Depending on the type of MMU, we may need to use atomic updates
167  * and the PTE may be either 32 or 64 bit wide. In the later case,
168  * when using atomic updates, only the low part of the PTE is
169  * accessed atomically.
170  *
171  * In addition, on 44x, we also maintain a global flag indicating
172  * that an executable user mapping was modified, which is needed
173  * to properly flush the virtually tagged instruction cache of
174  * those implementations.
175  */
176 #ifndef CONFIG_PTE_64BIT
177 static inline unsigned long pte_update(pte_t *p,
178 				       unsigned long clr,
179 				       unsigned long set)
180 {
181 #ifdef PTE_ATOMIC_UPDATES
182 	unsigned long old, tmp;
183 
184 	__asm__ __volatile__("\
185 1:	lwarx	%0,0,%3\n\
186 	andc	%1,%0,%4\n\
187 	or	%1,%1,%5\n"
188 	PPC405_ERR77(0,%3)
189 "	stwcx.	%1,0,%3\n\
190 	bne-	1b"
191 	: "=&r" (old), "=&r" (tmp), "=m" (*p)
192 	: "r" (p), "r" (clr), "r" (set), "m" (*p)
193 	: "cc" );
194 #else /* PTE_ATOMIC_UPDATES */
195 	unsigned long old = pte_val(*p);
196 	*p = __pte((old & ~clr) | set);
197 #endif /* !PTE_ATOMIC_UPDATES */
198 
199 #ifdef CONFIG_44x
200 	if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
201 		icache_44x_need_flush = 1;
202 #endif
203 	return old;
204 }
205 #else /* CONFIG_PTE_64BIT */
206 static inline unsigned long long pte_update(pte_t *p,
207 					    unsigned long clr,
208 					    unsigned long set)
209 {
210 #ifdef PTE_ATOMIC_UPDATES
211 	unsigned long long old;
212 	unsigned long tmp;
213 
214 	__asm__ __volatile__("\
215 1:	lwarx	%L0,0,%4\n\
216 	lwzx	%0,0,%3\n\
217 	andc	%1,%L0,%5\n\
218 	or	%1,%1,%6\n"
219 	PPC405_ERR77(0,%3)
220 "	stwcx.	%1,0,%4\n\
221 	bne-	1b"
222 	: "=&r" (old), "=&r" (tmp), "=m" (*p)
223 	: "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
224 	: "cc" );
225 #else /* PTE_ATOMIC_UPDATES */
226 	unsigned long long old = pte_val(*p);
227 	*p = __pte((old & ~(unsigned long long)clr) | set);
228 #endif /* !PTE_ATOMIC_UPDATES */
229 
230 #ifdef CONFIG_44x
231 	if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
232 		icache_44x_need_flush = 1;
233 #endif
234 	return old;
235 }
236 #endif /* CONFIG_PTE_64BIT */
237 
238 /*
239  * 2.6 calls this without flushing the TLB entry; this is wrong
240  * for our hash-based implementation, we fix that up here.
241  */
242 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
243 static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
244 {
245 	unsigned long old;
246 	old = pte_update(ptep, _PAGE_ACCESSED, 0);
247 #if _PAGE_HASHPTE != 0
248 	if (old & _PAGE_HASHPTE) {
249 		unsigned long ptephys = __pa(ptep) & PAGE_MASK;
250 		flush_hash_pages(context, addr, ptephys, 1);
251 	}
252 #endif
253 	return (old & _PAGE_ACCESSED) != 0;
254 }
255 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
256 	__ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
257 
258 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
259 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
260 				       pte_t *ptep)
261 {
262 	return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
263 }
264 
265 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
266 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
267 				      pte_t *ptep)
268 {
269 	pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
270 }
271 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
272 					   unsigned long addr, pte_t *ptep)
273 {
274 	ptep_set_wrprotect(mm, addr, ptep);
275 }
276 
277 
278 static inline void __ptep_set_access_flags(struct mm_struct *mm,
279 					   pte_t *ptep, pte_t entry,
280 					   unsigned long address)
281 {
282 	unsigned long set = pte_val(entry) &
283 		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
284 	unsigned long clr = ~pte_val(entry) & _PAGE_RO;
285 
286 	pte_update(ptep, clr, set);
287 }
288 
289 #define __HAVE_ARCH_PTE_SAME
290 #define pte_same(A,B)	(((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
291 
292 /*
293  * Note that on Book E processors, the pmd contains the kernel virtual
294  * (lowmem) address of the pte page.  The physical address is less useful
295  * because everything runs with translation enabled (even the TLB miss
296  * handler).  On everything else the pmd contains the physical address
297  * of the pte page.  -- paulus
298  */
299 #ifndef CONFIG_BOOKE
300 #define pmd_page_vaddr(pmd)	\
301 	((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
302 #define pmd_page(pmd)		\
303 	pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
304 #else
305 #define pmd_page_vaddr(pmd)	\
306 	((unsigned long) (pmd_val(pmd) & PAGE_MASK))
307 #define pmd_page(pmd)		\
308 	pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
309 #endif
310 
311 /* to find an entry in a kernel page-table-directory */
312 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
313 
314 /* to find an entry in a page-table-directory */
315 #define pgd_index(address)	 ((address) >> PGDIR_SHIFT)
316 #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
317 
318 /* Find an entry in the third-level page table.. */
319 #define pte_index(address)		\
320 	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
321 #define pte_offset_kernel(dir, addr)	\
322 	(pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
323 				  pte_index(addr))
324 #define pte_offset_map(dir, addr)		\
325 	((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
326 #define pte_unmap(pte)		kunmap_atomic(pte)
327 
328 /*
329  * Encode and decode a swap entry.
330  * Note that the bits we use in a PTE for representing a swap entry
331  * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
332  *   -- paulus
333  */
334 #define __swp_type(entry)		((entry).val & 0x1f)
335 #define __swp_offset(entry)		((entry).val >> 5)
336 #define __swp_entry(type, offset)	((swp_entry_t) { (type) | ((offset) << 5) })
337 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) >> 3 })
338 #define __swp_entry_to_pte(x)		((pte_t) { (x).val << 3 })
339 
340 extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
341 		      pmd_t **pmdp);
342 
343 #endif /* !__ASSEMBLY__ */
344 
345 #endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */
346