xref: /linux/arch/powerpc/include/asm/mmu.h (revision 98838d95075a5295f3478ceba18bcccf472e30f4)
1 #ifndef _ASM_POWERPC_MMU_H_
2 #define _ASM_POWERPC_MMU_H_
3 #ifdef __KERNEL__
4 
5 #include <linux/types.h>
6 
7 #include <asm/asm-compat.h>
8 #include <asm/feature-fixups.h>
9 
10 /*
11  * MMU features bit definitions
12  */
13 
14 /*
15  * MMU families
16  */
17 #define MMU_FTR_HPTE_TABLE		ASM_CONST(0x00000001)
18 #define MMU_FTR_TYPE_8xx		ASM_CONST(0x00000002)
19 #define MMU_FTR_TYPE_40x		ASM_CONST(0x00000004)
20 #define MMU_FTR_TYPE_44x		ASM_CONST(0x00000008)
21 #define MMU_FTR_TYPE_FSL_E		ASM_CONST(0x00000010)
22 #define MMU_FTR_TYPE_47x		ASM_CONST(0x00000020)
23 
24 /* Radix page table supported and enabled */
25 #define MMU_FTR_TYPE_RADIX		ASM_CONST(0x00000040)
26 
27 /*
28  * Individual features below.
29  */
30 
31 /*
32  * We need to clear top 16bits of va (from the remaining 64 bits )in
33  * tlbie* instructions
34  */
35 #define MMU_FTR_TLBIE_CROP_VA		ASM_CONST(0x00008000)
36 
37 /* Enable use of high BAT registers */
38 #define MMU_FTR_USE_HIGH_BATS		ASM_CONST(0x00010000)
39 
40 /* Enable >32-bit physical addresses on 32-bit processor, only used
41  * by CONFIG_6xx currently as BookE supports that from day 1
42  */
43 #define MMU_FTR_BIG_PHYS		ASM_CONST(0x00020000)
44 
45 /* Enable use of broadcast TLB invalidations. We don't always set it
46  * on processors that support it due to other constraints with the
47  * use of such invalidations
48  */
49 #define MMU_FTR_USE_TLBIVAX_BCAST	ASM_CONST(0x00040000)
50 
51 /* Enable use of tlbilx invalidate instructions.
52  */
53 #define MMU_FTR_USE_TLBILX		ASM_CONST(0x00080000)
54 
55 /* This indicates that the processor cannot handle multiple outstanding
56  * broadcast tlbivax or tlbsync. This makes the code use a spinlock
57  * around such invalidate forms.
58  */
59 #define MMU_FTR_LOCK_BCAST_INVAL	ASM_CONST(0x00100000)
60 
61 /* This indicates that the processor doesn't handle way selection
62  * properly and needs SW to track and update the LRU state.  This
63  * is specific to an errata on e300c2/c3/c4 class parts
64  */
65 #define MMU_FTR_NEED_DTLB_SW_LRU	ASM_CONST(0x00200000)
66 
67 /* Enable use of TLB reservation.  Processor should support tlbsrx.
68  * instruction and MAS0[WQ].
69  */
70 #define MMU_FTR_USE_TLBRSRV		ASM_CONST(0x00800000)
71 
72 /* Use paired MAS registers (MAS7||MAS3, etc.)
73  */
74 #define MMU_FTR_USE_PAIRED_MAS		ASM_CONST(0x01000000)
75 
76 /* Doesn't support the B bit (1T segment) in SLBIE
77  */
78 #define MMU_FTR_NO_SLBIE_B		ASM_CONST(0x02000000)
79 
80 /* Support 16M large pages
81  */
82 #define MMU_FTR_16M_PAGE		ASM_CONST(0x04000000)
83 
84 /* Supports TLBIEL variant
85  */
86 #define MMU_FTR_TLBIEL			ASM_CONST(0x08000000)
87 
88 /* Supports tlbies w/o locking
89  */
90 #define MMU_FTR_LOCKLESS_TLBIE		ASM_CONST(0x10000000)
91 
92 /* Large pages can be marked CI
93  */
94 #define MMU_FTR_CI_LARGE_PAGE		ASM_CONST(0x20000000)
95 
96 /* 1T segments available
97  */
98 #define MMU_FTR_1T_SEGMENT		ASM_CONST(0x40000000)
99 
100 /* MMU feature bit sets for various CPUs */
101 #define MMU_FTRS_DEFAULT_HPTE_ARCH_V2	\
102 	MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
103 #define MMU_FTRS_POWER4		MMU_FTRS_DEFAULT_HPTE_ARCH_V2
104 #define MMU_FTRS_PPC970		MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA
105 #define MMU_FTRS_POWER5		MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
106 #define MMU_FTRS_POWER6		MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
107 #define MMU_FTRS_POWER7		MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
108 #define MMU_FTRS_POWER8		MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
109 #define MMU_FTRS_POWER9		MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
110 #define MMU_FTRS_CELL		MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
111 				MMU_FTR_CI_LARGE_PAGE
112 #define MMU_FTRS_PA6T		MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
113 				MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B
114 #ifndef __ASSEMBLY__
115 #include <linux/bug.h>
116 #include <asm/cputable.h>
117 
118 #ifdef CONFIG_PPC_FSL_BOOK3E
119 #include <asm/percpu.h>
120 DECLARE_PER_CPU(int, next_tlbcam_idx);
121 #endif
122 
123 enum {
124 	MMU_FTRS_POSSIBLE = MMU_FTR_HPTE_TABLE | MMU_FTR_TYPE_8xx |
125 		MMU_FTR_TYPE_40x | MMU_FTR_TYPE_44x | MMU_FTR_TYPE_FSL_E |
126 		MMU_FTR_TYPE_47x | MMU_FTR_USE_HIGH_BATS | MMU_FTR_BIG_PHYS |
127 		MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_USE_TLBILX |
128 		MMU_FTR_LOCK_BCAST_INVAL | MMU_FTR_NEED_DTLB_SW_LRU |
129 		MMU_FTR_USE_TLBRSRV | MMU_FTR_USE_PAIRED_MAS |
130 		MMU_FTR_NO_SLBIE_B | MMU_FTR_16M_PAGE | MMU_FTR_TLBIEL |
131 		MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE |
132 		MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA |
133 #ifdef CONFIG_PPC_RADIX_MMU
134 		MMU_FTR_TYPE_RADIX |
135 #endif
136 		0,
137 };
138 
139 static inline bool early_mmu_has_feature(unsigned long feature)
140 {
141 	return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature);
142 }
143 
144 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
145 #include <linux/jump_label.h>
146 
147 #define NUM_MMU_FTR_KEYS	32
148 
149 extern struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS];
150 
151 extern void mmu_feature_keys_init(void);
152 
153 static __always_inline bool mmu_has_feature(unsigned long feature)
154 {
155 	int i;
156 
157 	BUILD_BUG_ON(!__builtin_constant_p(feature));
158 
159 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
160 	if (!static_key_initialized) {
161 		printk("Warning! mmu_has_feature() used prior to jump label init!\n");
162 		dump_stack();
163 		return early_mmu_has_feature(feature);
164 	}
165 #endif
166 
167 	if (!(MMU_FTRS_POSSIBLE & feature))
168 		return false;
169 
170 	i = __builtin_ctzl(feature);
171 	return static_branch_likely(&mmu_feature_keys[i]);
172 }
173 
174 static inline void mmu_clear_feature(unsigned long feature)
175 {
176 	int i;
177 
178 	i = __builtin_ctzl(feature);
179 	cur_cpu_spec->mmu_features &= ~feature;
180 	static_branch_disable(&mmu_feature_keys[i]);
181 }
182 #else
183 
184 static inline void mmu_feature_keys_init(void)
185 {
186 
187 }
188 
189 static inline bool mmu_has_feature(unsigned long feature)
190 {
191 	return early_mmu_has_feature(feature);
192 }
193 
194 static inline void mmu_clear_feature(unsigned long feature)
195 {
196 	cur_cpu_spec->mmu_features &= ~feature;
197 }
198 #endif /* CONFIG_JUMP_LABEL */
199 
200 extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
201 
202 #ifdef CONFIG_PPC64
203 /* This is our real memory area size on ppc64 server, on embedded, we
204  * make it match the size our of bolted TLB area
205  */
206 extern u64 ppc64_rma_size;
207 
208 /* Cleanup function used by kexec */
209 extern void mmu_cleanup_all(void);
210 extern void radix__mmu_cleanup_all(void);
211 #endif /* CONFIG_PPC64 */
212 
213 struct mm_struct;
214 #ifdef CONFIG_DEBUG_VM
215 extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
216 #else /* CONFIG_DEBUG_VM */
217 static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
218 {
219 }
220 #endif /* !CONFIG_DEBUG_VM */
221 
222 #ifdef CONFIG_PPC_RADIX_MMU
223 static inline bool radix_enabled(void)
224 {
225 	return mmu_has_feature(MMU_FTR_TYPE_RADIX);
226 }
227 
228 static inline bool early_radix_enabled(void)
229 {
230 	return early_mmu_has_feature(MMU_FTR_TYPE_RADIX);
231 }
232 #else
233 static inline bool radix_enabled(void)
234 {
235 	return false;
236 }
237 
238 static inline bool early_radix_enabled(void)
239 {
240 	return false;
241 }
242 #endif
243 
244 #endif /* !__ASSEMBLY__ */
245 
246 /* The kernel use the constants below to index in the page sizes array.
247  * The use of fixed constants for this purpose is better for performances
248  * of the low level hash refill handlers.
249  *
250  * A non supported page size has a "shift" field set to 0
251  *
252  * Any new page size being implemented can get a new entry in here. Whether
253  * the kernel will use it or not is a different matter though. The actual page
254  * size used by hugetlbfs is not defined here and may be made variable
255  *
256  * Note: This array ended up being a false good idea as it's growing to the
257  * point where I wonder if we should replace it with something different,
258  * to think about, feedback welcome. --BenH.
259  */
260 
261 /* These are #defines as they have to be used in assembly */
262 #define MMU_PAGE_4K	0
263 #define MMU_PAGE_16K	1
264 #define MMU_PAGE_64K	2
265 #define MMU_PAGE_64K_AP	3	/* "Admixed pages" (hash64 only) */
266 #define MMU_PAGE_256K	4
267 #define MMU_PAGE_1M	5
268 #define MMU_PAGE_2M	6
269 #define MMU_PAGE_4M	7
270 #define MMU_PAGE_8M	8
271 #define MMU_PAGE_16M	9
272 #define MMU_PAGE_64M	10
273 #define MMU_PAGE_256M	11
274 #define MMU_PAGE_1G	12
275 #define MMU_PAGE_16G	13
276 #define MMU_PAGE_64G	14
277 
278 /* N.B. we need to change the type of hpte_page_sizes if this gets to be > 16 */
279 #define MMU_PAGE_COUNT	15
280 
281 #ifdef CONFIG_PPC_BOOK3S_64
282 #include <asm/book3s/64/mmu.h>
283 #else /* CONFIG_PPC_BOOK3S_64 */
284 
285 #ifndef __ASSEMBLY__
286 /* MMU initialization */
287 extern void early_init_mmu(void);
288 extern void early_init_mmu_secondary(void);
289 extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
290 				       phys_addr_t first_memblock_size);
291 static inline void mmu_early_init_devtree(void) { }
292 #endif /* __ASSEMBLY__ */
293 #endif
294 
295 #if defined(CONFIG_PPC_STD_MMU_32)
296 /* 32-bit classic hash table MMU */
297 #include <asm/book3s/32/mmu-hash.h>
298 #elif defined(CONFIG_40x)
299 /* 40x-style software loaded TLB */
300 #  include <asm/mmu-40x.h>
301 #elif defined(CONFIG_44x)
302 /* 44x-style software loaded TLB */
303 #  include <asm/mmu-44x.h>
304 #elif defined(CONFIG_PPC_BOOK3E_MMU)
305 /* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */
306 #  include <asm/mmu-book3e.h>
307 #elif defined (CONFIG_PPC_8xx)
308 /* Motorola/Freescale 8xx software loaded TLB */
309 #  include <asm/mmu-8xx.h>
310 #endif
311 
312 #endif /* __KERNEL__ */
313 #endif /* _ASM_POWERPC_MMU_H_ */
314