xref: /linux/arch/powerpc/include/asm/pgtable.h (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_PGTABLE_H
3 #define _ASM_POWERPC_PGTABLE_H
4 
5 #ifndef __ASSEMBLER__
6 #include <linux/mmdebug.h>
7 #include <linux/mmzone.h>
8 #include <asm/processor.h>		/* For TASK_SIZE */
9 #include <asm/mmu.h>
10 #include <asm/page.h>
11 #include <asm/tlbflush.h>
12 
13 struct mm_struct;
14 
15 #endif /* !__ASSEMBLER__ */
16 
17 #ifdef CONFIG_PPC_BOOK3S
18 #include <asm/book3s/pgtable.h>
19 #else
20 #include <asm/nohash/pgtable.h>
21 #endif /* !CONFIG_PPC_BOOK3S */
22 
23 /* Make modules code happy. We don't set RO yet */
24 #define PAGE_KERNEL_EXEC	PAGE_KERNEL_X
25 
26 /* Advertise special mapping type for AGP */
27 #define PAGE_AGP		(PAGE_KERNEL_NC)
28 #define HAVE_PAGE_AGP
29 
30 #ifndef __ASSEMBLER__
31 
32 #define PFN_PTE_SHIFT		PTE_RPN_SHIFT
33 
34 void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
35 		pte_t pte, unsigned int nr);
36 #define set_ptes set_ptes
37 void set_pte_at_unchecked(struct mm_struct *mm, unsigned long addr,
38 			  pte_t *ptep, pte_t pte);
39 #define update_mmu_cache(vma, addr, ptep) \
40 	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
41 
42 #ifndef MAX_PTRS_PER_PGD
43 #define MAX_PTRS_PER_PGD PTRS_PER_PGD
44 #endif
45 
46 /* Keep this as a macro to avoid include dependency mess */
47 #define pte_page(x)		pfn_to_page(pte_pfn(x))
48 
49 static inline unsigned long pte_pfn(pte_t pte)
50 {
51 	return (pte_val(pte) & PTE_RPN_MASK) >> PTE_RPN_SHIFT;
52 }
53 
54 /*
55  * Select all bits except the pfn
56  */
57 #define pte_pgprot pte_pgprot
58 static inline pgprot_t pte_pgprot(pte_t pte)
59 {
60 	unsigned long pte_flags;
61 
62 	pte_flags = pte_val(pte) & ~PTE_RPN_MASK;
63 	return __pgprot(pte_flags);
64 }
65 
66 static inline pgprot_t pgprot_nx(pgprot_t prot)
67 {
68 	return pte_pgprot(pte_exprotect(__pte(pgprot_val(prot))));
69 }
70 #define pgprot_nx pgprot_nx
71 
72 #ifndef pmd_page_vaddr
73 static inline const void *pmd_page_vaddr(pmd_t pmd)
74 {
75 	return __va(pmd_val(pmd) & ~PMD_MASKED_BITS);
76 }
77 #define pmd_page_vaddr pmd_page_vaddr
78 #endif
79 /*
80  * ZERO_PAGE is a global shared page that is always zero: used
81  * for zero-mapped memory areas etc..
82  */
83 extern unsigned long empty_zero_page[];
84 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
85 
86 extern pgd_t swapper_pg_dir[];
87 
88 extern void paging_init(void);
89 void poking_init(void);
90 
91 extern unsigned long ioremap_bot;
92 extern const pgprot_t protection_map[16];
93 
94 /* can we use this in kvm */
95 unsigned long vmalloc_to_phys(void *vmalloc_addr);
96 
97 void pgtable_cache_add(unsigned int shift);
98 
99 #ifdef CONFIG_PPC32
100 void __init *early_alloc_pgtable(unsigned long size);
101 #endif
102 pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va);
103 
104 #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
105 void mark_initmem_nx(void);
106 #else
107 static inline void mark_initmem_nx(void) { }
108 #endif
109 
110 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
111 int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
112 			  pte_t *ptep, pte_t entry, int dirty);
113 
114 pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size,
115 				pgprot_t vma_prot);
116 
117 struct file;
118 static inline pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
119 					    unsigned long size, pgprot_t vma_prot)
120 {
121 	return __phys_mem_access_prot(pfn, size, vma_prot);
122 }
123 #define __HAVE_PHYS_MEM_ACCESS_PROT
124 
125 void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
126 
127 /*
128  * This gets called at the end of handling a page fault, when
129  * the kernel has put a new PTE into the page table for the process.
130  * We use it to ensure coherency between the i-cache and d-cache
131  * for the page which has just been mapped in.
132  * On machines which use an MMU hash table, we use this to put a
133  * corresponding HPTE into the hash table ahead of time, instead of
134  * waiting for the inevitable extra hash-table miss exception.
135  */
136 static inline void update_mmu_cache_range(struct vm_fault *vmf,
137 		struct vm_area_struct *vma, unsigned long address,
138 		pte_t *ptep, unsigned int nr)
139 {
140 	if ((mmu_has_feature(MMU_FTR_HPTE_TABLE) && !radix_enabled()) ||
141 	    (IS_ENABLED(CONFIG_PPC_E500) && IS_ENABLED(CONFIG_HUGETLB_PAGE)))
142 		__update_mmu_cache(vma, address, ptep);
143 }
144 
145 /*
146  * When used, PTE_FRAG_NR is defined in subarch pgtable.h
147  * so we are sure it is included when arriving here.
148  */
149 #ifdef PTE_FRAG_NR
150 static inline void *pte_frag_get(mm_context_t *ctx)
151 {
152 	return ctx->pte_frag;
153 }
154 
155 static inline void pte_frag_set(mm_context_t *ctx, void *p)
156 {
157 	ctx->pte_frag = p;
158 }
159 #else
160 #define PTE_FRAG_NR		1
161 #define PTE_FRAG_SIZE_SHIFT	PAGE_SHIFT
162 #define PTE_FRAG_SIZE		(1UL << PTE_FRAG_SIZE_SHIFT)
163 
164 static inline void *pte_frag_get(mm_context_t *ctx)
165 {
166 	return NULL;
167 }
168 
169 static inline void pte_frag_set(mm_context_t *ctx, void *p)
170 {
171 }
172 #endif
173 
174 #define pmd_pgtable pmd_pgtable
175 static inline pgtable_t pmd_pgtable(pmd_t pmd)
176 {
177 	return (pgtable_t)pmd_page_vaddr(pmd);
178 }
179 
180 #ifdef CONFIG_PPC64
181 int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size);
182 bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
183 			   unsigned long page_size);
184 /*
185  * mm/memory_hotplug.c:mhp_supports_memmap_on_memory goes into details
186  * some of the restrictions. We don't check for PMD_SIZE because our
187  * vmemmap allocation code can fallback correctly. The pageblock
188  * alignment requirement is met using altmap->reserve blocks.
189  */
190 #define arch_supports_memmap_on_memory arch_supports_memmap_on_memory
191 static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size)
192 {
193 	if (!radix_enabled())
194 		return false;
195 	/*
196 	 * With 4K page size and 2M PMD_SIZE, we can align
197 	 * things better with memory block size value
198 	 * starting from 128MB. Hence align things with PMD_SIZE.
199 	 */
200 	if (IS_ENABLED(CONFIG_PPC_4K_PAGES))
201 		return IS_ALIGNED(vmemmap_size, PMD_SIZE);
202 	return true;
203 }
204 
205 #endif /* CONFIG_PPC64 */
206 
207 #ifndef pmd_user_accessible_page
208 #define pmd_user_accessible_page(pmd, addr)	false
209 #endif
210 
211 #ifndef pud_user_accessible_page
212 #define pud_user_accessible_page(pud, addr)	false
213 #endif
214 
215 #endif /* __ASSEMBLER__ */
216 
217 #endif /* _ASM_POWERPC_PGTABLE_H */
218