xref: /linux/arch/powerpc/include/asm/book3s/64/pgtable.h (revision c0c914eca7f251c70facc37dfebeaf176601918d)
1 #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
2 #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
3 /*
4  * This file contains the functions and defines necessary to modify and use
5  * the ppc64 hashed page table.
6  */
7 
8 #include <asm/book3s/64/hash.h>
9 #include <asm/barrier.h>
10 
11 /*
12  * The second half of the kernel virtual space is used for IO mappings,
13  * it's itself carved into the PIO region (ISA and PHB IO space) and
14  * the ioremap space
15  *
16  *  ISA_IO_BASE = KERN_IO_START, 64K reserved area
17  *  PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
18  * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
19  */
20 #define KERN_IO_START	(KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
21 #define FULL_IO_SIZE	0x80000000ul
22 #define  ISA_IO_BASE	(KERN_IO_START)
23 #define  ISA_IO_END	(KERN_IO_START + 0x10000ul)
24 #define  PHB_IO_BASE	(ISA_IO_END)
25 #define  PHB_IO_END	(KERN_IO_START + FULL_IO_SIZE)
26 #define IOREMAP_BASE	(PHB_IO_END)
27 #define IOREMAP_END	(KERN_VIRT_START + KERN_VIRT_SIZE)
28 
29 #define vmemmap			((struct page *)VMEMMAP_BASE)
30 
31 /* Advertise special mapping type for AGP */
32 #define HAVE_PAGE_AGP
33 
34 /* Advertise support for _PAGE_SPECIAL */
35 #define __HAVE_ARCH_PTE_SPECIAL
36 
37 #ifndef __ASSEMBLY__
38 
39 /*
40  * This is the default implementation of various PTE accessors, it's
41  * used in all cases except Book3S with 64K pages where we have a
42  * concept of sub-pages
43  */
44 #ifndef __real_pte
45 
46 #ifdef CONFIG_STRICT_MM_TYPECHECKS
47 #define __real_pte(e,p)		((real_pte_t){(e)})
48 #define __rpte_to_pte(r)	((r).pte)
49 #else
50 #define __real_pte(e,p)		(e)
51 #define __rpte_to_pte(r)	(__pte(r))
52 #endif
53 #define __rpte_to_hidx(r,index)	(pte_val(__rpte_to_pte(r)) >>_PAGE_F_GIX_SHIFT)
54 
55 #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift)       \
56 	do {							         \
57 		index = 0;					         \
58 		shift = mmu_psize_defs[psize].shift;		         \
59 
60 #define pte_iterate_hashed_end() } while(0)
61 
62 /*
63  * We expect this to be called only for user addresses or kernel virtual
64  * addresses other than the linear mapping.
65  */
66 #define pte_pagesize_index(mm, addr, pte)	MMU_PAGE_4K
67 
68 #endif /* __real_pte */
69 
70 static inline void pmd_set(pmd_t *pmdp, unsigned long val)
71 {
72 	*pmdp = __pmd(val);
73 }
74 
75 static inline void pmd_clear(pmd_t *pmdp)
76 {
77 	*pmdp = __pmd(0);
78 }
79 
80 #define pmd_none(pmd)		(!pmd_val(pmd))
81 #define	pmd_present(pmd)	(!pmd_none(pmd))
82 
83 static inline void pud_set(pud_t *pudp, unsigned long val)
84 {
85 	*pudp = __pud(val);
86 }
87 
88 static inline void pud_clear(pud_t *pudp)
89 {
90 	*pudp = __pud(0);
91 }
92 
93 #define pud_none(pud)		(!pud_val(pud))
94 #define pud_present(pud)	(pud_val(pud) != 0)
95 
96 extern struct page *pud_page(pud_t pud);
97 extern struct page *pmd_page(pmd_t pmd);
98 static inline pte_t pud_pte(pud_t pud)
99 {
100 	return __pte(pud_val(pud));
101 }
102 
103 static inline pud_t pte_pud(pte_t pte)
104 {
105 	return __pud(pte_val(pte));
106 }
107 #define pud_write(pud)		pte_write(pud_pte(pud))
108 #define pgd_write(pgd)		pte_write(pgd_pte(pgd))
109 static inline void pgd_set(pgd_t *pgdp, unsigned long val)
110 {
111 	*pgdp = __pgd(val);
112 }
113 
114 /*
115  * Find an entry in a page-table-directory.  We combine the address region
116  * (the high order N bits) and the pgd portion of the address.
117  */
118 
119 #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
120 
121 #define pmd_offset(pudp,addr) \
122 	(((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr))
123 
124 #define pte_offset_kernel(dir,addr) \
125 	(((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr))
126 
127 #define pte_offset_map(dir,addr)	pte_offset_kernel((dir), (addr))
128 #define pte_unmap(pte)			do { } while(0)
129 
130 /* to find an entry in a kernel page-table-directory */
131 /* This now only contains the vmalloc pages */
132 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
133 
134 #define pte_ERROR(e) \
135 	pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
136 #define pmd_ERROR(e) \
137 	pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
138 #define pgd_ERROR(e) \
139 	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
140 
141 /* Encode and de-code a swap entry */
142 #define MAX_SWAPFILES_CHECK() do { \
143 	BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
144 	/*							\
145 	 * Don't have overlapping bits with _PAGE_HPTEFLAGS	\
146 	 * We filter HPTEFLAGS on set_pte.			\
147 	 */							\
148 	BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
149 	BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY);	\
150 	} while (0)
151 /*
152  * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
153  */
154 #define SWP_TYPE_BITS 5
155 #define __swp_type(x)		(((x).val >> _PAGE_BIT_SWAP_TYPE) \
156 				& ((1UL << SWP_TYPE_BITS) - 1))
157 #define __swp_offset(x)		((x).val >> PTE_RPN_SHIFT)
158 #define __swp_entry(type, offset)	((swp_entry_t) { \
159 					((type) << _PAGE_BIT_SWAP_TYPE) \
160 					| ((offset) << PTE_RPN_SHIFT) })
161 /*
162  * swp_entry_t must be independent of pte bits. We build a swp_entry_t from
163  * swap type and offset we get from swap and convert that to pte to find a
164  * matching pte in linux page table.
165  * Clear bits not found in swap entries here.
166  */
167 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE })
168 #define __swp_entry_to_pte(x)	__pte((x).val | _PAGE_PTE)
169 
170 #ifdef CONFIG_MEM_SOFT_DIRTY
171 #define _PAGE_SWP_SOFT_DIRTY   (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE))
172 #else
173 #define _PAGE_SWP_SOFT_DIRTY	0UL
174 #endif /* CONFIG_MEM_SOFT_DIRTY */
175 
176 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
177 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
178 {
179 	return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY);
180 }
181 static inline bool pte_swp_soft_dirty(pte_t pte)
182 {
183 	return !!(pte_val(pte) & _PAGE_SWP_SOFT_DIRTY);
184 }
185 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
186 {
187 	return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY);
188 }
189 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
190 
191 void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
192 void pgtable_cache_init(void);
193 
194 struct page *realmode_pfn_to_page(unsigned long pfn);
195 
196 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
197 extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
198 extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
199 extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
200 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
201 		       pmd_t *pmdp, pmd_t pmd);
202 extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
203 				 pmd_t *pmd);
204 extern int has_transparent_hugepage(void);
205 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
206 
207 
208 static inline pte_t pmd_pte(pmd_t pmd)
209 {
210 	return __pte(pmd_val(pmd));
211 }
212 
213 static inline pmd_t pte_pmd(pte_t pte)
214 {
215 	return __pmd(pte_val(pte));
216 }
217 
218 static inline pte_t *pmdp_ptep(pmd_t *pmd)
219 {
220 	return (pte_t *)pmd;
221 }
222 
223 #define pmd_pfn(pmd)		pte_pfn(pmd_pte(pmd))
224 #define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
225 #define pmd_young(pmd)		pte_young(pmd_pte(pmd))
226 #define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
227 #define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
228 #define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
229 #define pmd_mkclean(pmd)	pte_pmd(pte_mkclean(pmd_pte(pmd)))
230 #define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
231 #define pmd_mkwrite(pmd)	pte_pmd(pte_mkwrite(pmd_pte(pmd)))
232 
233 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
234 #define pmd_soft_dirty(pmd)    pte_soft_dirty(pmd_pte(pmd))
235 #define pmd_mksoft_dirty(pmd)  pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)))
236 #define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)))
237 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
238 
239 #ifdef CONFIG_NUMA_BALANCING
240 static inline int pmd_protnone(pmd_t pmd)
241 {
242 	return pte_protnone(pmd_pte(pmd));
243 }
244 #endif /* CONFIG_NUMA_BALANCING */
245 
246 #define __HAVE_ARCH_PMD_WRITE
247 #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
248 
249 static inline pmd_t pmd_mkhuge(pmd_t pmd)
250 {
251 	return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_THP_HUGE));
252 }
253 
254 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
255 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
256 				 unsigned long address, pmd_t *pmdp,
257 				 pmd_t entry, int dirty);
258 
259 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
260 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
261 				     unsigned long address, pmd_t *pmdp);
262 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
263 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
264 				  unsigned long address, pmd_t *pmdp);
265 
266 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
267 extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
268 				     unsigned long addr, pmd_t *pmdp);
269 
270 extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
271 				 unsigned long address, pmd_t *pmdp);
272 #define pmdp_collapse_flush pmdp_collapse_flush
273 
274 #define __HAVE_ARCH_PGTABLE_DEPOSIT
275 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
276 				       pgtable_t pgtable);
277 #define __HAVE_ARCH_PGTABLE_WITHDRAW
278 extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
279 
280 #define __HAVE_ARCH_PMDP_INVALIDATE
281 extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
282 			    pmd_t *pmdp);
283 
284 #define __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
285 extern void pmdp_huge_split_prepare(struct vm_area_struct *vma,
286 				    unsigned long address, pmd_t *pmdp);
287 
288 #define pmd_move_must_withdraw pmd_move_must_withdraw
289 struct spinlock;
290 static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
291 					 struct spinlock *old_pmd_ptl)
292 {
293 	/*
294 	 * Archs like ppc64 use pgtable to store per pmd
295 	 * specific information. So when we switch the pmd,
296 	 * we should also withdraw and deposit the pgtable
297 	 */
298 	return true;
299 }
300 #endif /* __ASSEMBLY__ */
301 #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
302