xref: /linux/arch/powerpc/include/asm/nohash/32/mmu-8xx.h (revision 6b93f350e55f3f2ee071dd41109d936abfba8ebf)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_MMU_8XX_H_
3 #define _ASM_POWERPC_MMU_8XX_H_
4 /*
5  * PPC8xx support
6  */
7 
8 /* Control/status registers for the MPC8xx.
9  * A write operation to these registers causes serialized access.
10  * During software tablewalk, the registers used perform mask/shift-add
11  * operations when written/read.  A TLB entry is created when the Mx_RPN
12  * is written, and the contents of several registers are used to
13  * create the entry.
14  */
15 #define SPRN_MI_CTR	784	/* Instruction TLB control register */
16 #define MI_GPM		0x80000000	/* Set domain manager mode */
17 #define MI_PPM		0x40000000	/* Set subpage protection */
18 #define MI_CIDEF	0x20000000	/* Set cache inhibit when MMU dis */
19 #define MI_RSV4I	0x08000000	/* Reserve 4 TLB entries */
20 #define MI_PPCS		0x02000000	/* Use MI_RPN prob/priv state */
21 #define MI_IDXMASK	0x00001f00	/* TLB index to be loaded */
22 
23 /* These are the Ks and Kp from the PowerPC books.  For proper operation,
24  * Ks = 0, Kp = 1.
25  */
26 #define SPRN_MI_AP	786
27 #define MI_Ks		0x80000000	/* Should not be set */
28 #define MI_Kp		0x40000000	/* Should always be set */
29 
30 /*
31  * All pages' PP data bits are set to either 001 or 011 by copying _PAGE_EXEC
32  * into bit 21 in the ITLBmiss handler (bit 21 is the middle bit), which means
33  * respectively NA for All or X for Supervisor and no access for User.
34  * Then we use the APG to say whether accesses are according to Page rules or
35  * "all Supervisor" rules (Access to all)
36  * _PAGE_ACCESSED is also managed via APG. When _PAGE_ACCESSED is not set, say
37  * "all User" rules, that will lead to NA for all.
38  * Therefore, we define 4 APG groups. lsb is _PAGE_ACCESSED
39  * 0 => Kernel => 11 (all accesses performed according as user iaw page definition)
40  * 1 => Kernel+Accessed => 01 (all accesses performed according to page definition)
41  * 2 => User => 11 (all accesses performed according as user iaw page definition)
42  * 3 => User+Accessed => 10 (all accesses performed according to swaped page definition) for KUEP
43  * 4-15 => Not Used
44  */
45 #define MI_APG_INIT	0xde000000
46 
47 /* The effective page number register.  When read, contains the information
48  * about the last instruction TLB miss.  When MI_RPN is written, bits in
49  * this register are used to create the TLB entry.
50  */
51 #define SPRN_MI_EPN	787
52 #define MI_EPNMASK	0xfffff000	/* Effective page number for entry */
53 #define MI_EVALID	0x00000200	/* Entry is valid */
54 #define MI_ASIDMASK	0x0000000f	/* ASID match value */
55 					/* Reset value is undefined */
56 
57 /* A "level 1" or "segment" or whatever you want to call it register.
58  * For the instruction TLB, it contains bits that get loaded into the
59  * TLB entry when the MI_RPN is written.
60  */
61 #define SPRN_MI_TWC	789
62 #define MI_APG		0x000001e0	/* Access protection group (0) */
63 #define MI_GUARDED	0x00000010	/* Guarded storage */
64 #define MI_PSMASK	0x0000000c	/* Mask of page size bits */
65 #define MI_PS8MEG	0x0000000c	/* 8M page size */
66 #define MI_PS512K	0x00000004	/* 512K page size */
67 #define MI_PS4K_16K	0x00000000	/* 4K or 16K page size */
68 #define MI_SVALID	0x00000001	/* Segment entry is valid */
69 					/* Reset value is undefined */
70 
71 /* Real page number.  Defined by the pte.  Writing this register
72  * causes a TLB entry to be created for the instruction TLB, using
73  * additional information from the MI_EPN, and MI_TWC registers.
74  */
75 #define SPRN_MI_RPN	790
76 #define MI_SPS16K	0x00000008	/* Small page size (0 = 4k, 1 = 16k) */
77 
78 /* Define an RPN value for mapping kernel memory to large virtual
79  * pages for boot initialization.  This has real page number of 0,
80  * large page size, shared page, cache enabled, and valid.
81  * Also mark all subpages valid and write access.
82  */
83 #define MI_BOOTINIT	0x000001fd
84 
85 #define SPRN_MD_CTR	792	/* Data TLB control register */
86 #define MD_GPM		0x80000000	/* Set domain manager mode */
87 #define MD_PPM		0x40000000	/* Set subpage protection */
88 #define MD_CIDEF	0x20000000	/* Set cache inhibit when MMU dis */
89 #define MD_WTDEF	0x10000000	/* Set writethrough when MMU dis */
90 #define MD_RSV4I	0x08000000	/* Reserve 4 TLB entries */
91 #define MD_TWAM		0x04000000	/* Use 4K page hardware assist */
92 #define MD_PPCS		0x02000000	/* Use MI_RPN prob/priv state */
93 #define MD_IDXMASK	0x00001f00	/* TLB index to be loaded */
94 
95 #define SPRN_M_CASID	793	/* Address space ID (context) to match */
96 #define MC_ASIDMASK	0x0000000f	/* Bits used for ASID value */
97 
98 
99 /* These are the Ks and Kp from the PowerPC books.  For proper operation,
100  * Ks = 0, Kp = 1.
101  */
102 #define SPRN_MD_AP	794
103 #define MD_Ks		0x80000000	/* Should not be set */
104 #define MD_Kp		0x40000000	/* Should always be set */
105 
106 /* See explanation above at the definition of MI_APG_INIT */
107 #define MD_APG_INIT	0xdc000000
108 #define MD_APG_KUAP	0xde000000
109 
110 /* The effective page number register.  When read, contains the information
111  * about the last instruction TLB miss.  When MD_RPN is written, bits in
112  * this register are used to create the TLB entry.
113  */
114 #define SPRN_MD_EPN	795
115 #define MD_EPNMASK	0xfffff000	/* Effective page number for entry */
116 #define MD_EVALID	0x00000200	/* Entry is valid */
117 #define MD_ASIDMASK	0x0000000f	/* ASID match value */
118 					/* Reset value is undefined */
119 
120 /* The pointer to the base address of the first level page table.
121  * During a software tablewalk, reading this register provides the address
122  * of the entry associated with MD_EPN.
123  */
124 #define SPRN_M_TWB	796
125 #define	M_L1TB		0xfffff000	/* Level 1 table base address */
126 #define M_L1INDX	0x00000ffc	/* Level 1 index, when read */
127 					/* Reset value is undefined */
128 
129 /* A "level 1" or "segment" or whatever you want to call it register.
130  * For the data TLB, it contains bits that get loaded into the TLB entry
131  * when the MD_RPN is written.  It is also provides the hardware assist
132  * for finding the PTE address during software tablewalk.
133  */
134 #define SPRN_MD_TWC	797
135 #define MD_L2TB		0xfffff000	/* Level 2 table base address */
136 #define MD_L2INDX	0xfffffe00	/* Level 2 index (*pte), when read */
137 #define MD_APG		0x000001e0	/* Access protection group (0) */
138 #define MD_GUARDED	0x00000010	/* Guarded storage */
139 #define MD_PSMASK	0x0000000c	/* Mask of page size bits */
140 #define MD_PS8MEG	0x0000000c	/* 8M page size */
141 #define MD_PS512K	0x00000004	/* 512K page size */
142 #define MD_PS4K_16K	0x00000000	/* 4K or 16K page size */
143 #define MD_WT		0x00000002	/* Use writethrough page attribute */
144 #define MD_SVALID	0x00000001	/* Segment entry is valid */
145 					/* Reset value is undefined */
146 
147 
148 /* Real page number.  Defined by the pte.  Writing this register
149  * causes a TLB entry to be created for the data TLB, using
150  * additional information from the MD_EPN, and MD_TWC registers.
151  */
152 #define SPRN_MD_RPN	798
153 #define MD_SPS16K	0x00000008	/* Small page size (0 = 4k, 1 = 16k) */
154 
155 /* This is a temporary storage register that could be used to save
156  * a processor working register during a tablewalk.
157  */
158 #define SPRN_M_TW	799
159 
160 #if defined(CONFIG_PPC_4K_PAGES)
161 #define mmu_virtual_psize	MMU_PAGE_4K
162 #elif defined(CONFIG_PPC_16K_PAGES)
163 #define mmu_virtual_psize	MMU_PAGE_16K
164 #define PTE_FRAG_NR		4
165 #define PTE_FRAG_SIZE_SHIFT	12
166 #define PTE_FRAG_SIZE		(1UL << 12)
167 #else
168 #error "Unsupported PAGE_SIZE"
169 #endif
170 
171 #define mmu_linear_psize	MMU_PAGE_8M
172 
173 #define MODULES_VADDR	(PAGE_OFFSET - SZ_256M)
174 #define MODULES_END	PAGE_OFFSET
175 
176 #ifndef __ASSEMBLY__
177 
178 #include <linux/mmdebug.h>
179 #include <linux/sizes.h>
180 
181 void mmu_pin_tlb(unsigned long top, bool readonly);
182 
183 typedef struct {
184 	unsigned int id;
185 	unsigned int active;
186 	void __user *vdso;
187 	void *pte_frag;
188 } mm_context_t;
189 
190 #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
191 
192 /* Page size definitions, common between 32 and 64-bit
193  *
194  *    shift : is the "PAGE_SHIFT" value for that page size
195  *    penc  : is the pte encoding mask
196  *
197  */
198 struct mmu_psize_def {
199 	unsigned int	shift;	/* number of bits */
200 	unsigned int	enc;	/* PTE encoding */
201 	unsigned int    ind;    /* Corresponding indirect page size shift */
202 	unsigned int	flags;
203 #define MMU_PAGE_SIZE_DIRECT	0x1	/* Supported as a direct size */
204 #define MMU_PAGE_SIZE_INDIRECT	0x2	/* Supported as an indirect size */
205 };
206 
207 extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
208 
209 static inline int shift_to_mmu_psize(unsigned int shift)
210 {
211 	int psize;
212 
213 	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
214 		if (mmu_psize_defs[psize].shift == shift)
215 			return psize;
216 	return -1;
217 }
218 
219 static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
220 {
221 	if (mmu_psize_defs[mmu_psize].shift)
222 		return mmu_psize_defs[mmu_psize].shift;
223 	BUG();
224 }
225 
226 static inline bool arch_vmap_try_size(unsigned long addr, unsigned long end, u64 pfn,
227 				      unsigned int max_page_shift, unsigned long size)
228 {
229 	if (end - addr < size)
230 		return false;
231 
232 	if ((1UL << max_page_shift) < size)
233 		return false;
234 
235 	if (!IS_ALIGNED(addr, size))
236 		return false;
237 
238 	if (!IS_ALIGNED(PFN_PHYS(pfn), size))
239 		return false;
240 
241 	return true;
242 }
243 
244 static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
245 							 u64 pfn, unsigned int max_page_shift)
246 {
247 	if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_512K))
248 		return SZ_512K;
249 	if (PAGE_SIZE == SZ_16K)
250 		return SZ_16K;
251 	if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_16K))
252 		return SZ_16K;
253 	return PAGE_SIZE;
254 }
255 #define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
256 
257 static inline int arch_vmap_pte_supported_shift(unsigned long size)
258 {
259 	if (size >= SZ_512K)
260 		return 19;
261 	else if (size >= SZ_16K)
262 		return 14;
263 	else
264 		return PAGE_SHIFT;
265 }
266 #define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
267 
268 /* patch sites */
269 extern s32 patch__itlbmiss_exit_1, patch__dtlbmiss_exit_1;
270 extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf;
271 
272 #endif /* !__ASSEMBLY__ */
273 
274 #endif /* _ASM_POWERPC_MMU_8XX_H_ */
275