1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_MMU_8XX_H_
3 #define _ASM_POWERPC_MMU_8XX_H_
4 /*
5 * PPC8xx support
6 */
7
8 /* Control/status registers for the MPC8xx.
9 * A write operation to these registers causes serialized access.
10 * During software tablewalk, the registers used perform mask/shift-add
11 * operations when written/read. A TLB entry is created when the Mx_RPN
12 * is written, and the contents of several registers are used to
13 * create the entry.
14 */
15 #define SPRN_MI_CTR 784 /* Instruction TLB control register */
16 #define MI_GPM 0x80000000 /* Set domain manager mode */
17 #define MI_PPM 0x40000000 /* Set subpage protection */
18 #define MI_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */
19 #define MI_RSV4I 0x08000000 /* Reserve 4 TLB entries */
20 #define MI_PPCS 0x02000000 /* Use MI_RPN prob/priv state */
21 #define MI_IDXMASK 0x00001f00 /* TLB index to be loaded */
22
23 /* These are the Ks and Kp from the PowerPC books. For proper operation,
24 * Ks = 0, Kp = 1.
25 */
26 #define SPRN_MI_AP 786
27 #define MI_Ks 0x80000000 /* Should not be set */
28 #define MI_Kp 0x40000000 /* Should always be set */
29
30 /*
31 * All pages' PP data bits are set to either 001 or 011 by copying _PAGE_EXEC
32 * into bit 21 in the ITLBmiss handler (bit 21 is the middle bit), which means
33 * respectively NA for All or X for Supervisor and no access for User.
34 * Then we use the APG to say whether accesses are according to Page rules or
35 * "all Supervisor" rules (Access to all)
36 * _PAGE_ACCESSED is also managed via APG. When _PAGE_ACCESSED is not set, say
37 * "all User" rules, that will lead to NA for all.
38 * Therefore, we define 4 APG groups. lsb is _PAGE_ACCESSED
39 * 0 => Kernel => 11 (all accesses performed according as user iaw page definition)
40 * 1 => Kernel+Accessed => 01 (all accesses performed according to page definition)
41 * 2 => User => 11 (all accesses performed according as user iaw page definition)
42 * 3 => User+Accessed => 10 (all accesses performed according to swaped page definition) for KUEP
43 * 4-15 => Not Used
44 */
45 #define MI_APG_INIT 0xde000000
46
47 /* The effective page number register. When read, contains the information
48 * about the last instruction TLB miss. When MI_RPN is written, bits in
49 * this register are used to create the TLB entry.
50 */
51 #define SPRN_MI_EPN 787
52 #define MI_EPNMASK 0xfffff000 /* Effective page number for entry */
53 #define MI_EVALID 0x00000200 /* Entry is valid */
54 #define MI_ASIDMASK 0x0000000f /* ASID match value */
55 /* Reset value is undefined */
56
57 /* A "level 1" or "segment" or whatever you want to call it register.
58 * For the instruction TLB, it contains bits that get loaded into the
59 * TLB entry when the MI_RPN is written.
60 */
61 #define SPRN_MI_TWC 789
62 #define MI_APG 0x000001e0 /* Access protection group (0) */
63 #define MI_GUARDED 0x00000010 /* Guarded storage */
64 #define MI_PSMASK 0x0000000c /* Mask of page size bits */
65 #define MI_PS8MEG 0x0000000c /* 8M page size */
66 #define MI_PS512K 0x00000004 /* 512K page size */
67 #define MI_PS4K_16K 0x00000000 /* 4K or 16K page size */
68 #define MI_SVALID 0x00000001 /* Segment entry is valid */
69 /* Reset value is undefined */
70
71 /* Real page number. Defined by the pte. Writing this register
72 * causes a TLB entry to be created for the instruction TLB, using
73 * additional information from the MI_EPN, and MI_TWC registers.
74 */
75 #define SPRN_MI_RPN 790
76 #define MI_SPS16K 0x00000008 /* Small page size (0 = 4k, 1 = 16k) */
77
78 /* Define an RPN value for mapping kernel memory to large virtual
79 * pages for boot initialization. This has real page number of 0,
80 * large page size, shared page, cache enabled, and valid.
81 * Also mark all subpages valid and write access.
82 */
83 #define MI_BOOTINIT 0x000001fd
84
85 #define SPRN_MD_CTR 792 /* Data TLB control register */
86 #define MD_GPM 0x80000000 /* Set domain manager mode */
87 #define MD_PPM 0x40000000 /* Set subpage protection */
88 #define MD_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */
89 #define MD_WTDEF 0x10000000 /* Set writethrough when MMU dis */
90 #define MD_RSV4I 0x08000000 /* Reserve 4 TLB entries */
91 #define MD_TWAM 0x04000000 /* Use 4K page hardware assist */
92 #define MD_PPCS 0x02000000 /* Use MI_RPN prob/priv state */
93 #define MD_IDXMASK 0x00001f00 /* TLB index to be loaded */
94
95 #define SPRN_M_CASID 793 /* Address space ID (context) to match */
96 #define MC_ASIDMASK 0x0000000f /* Bits used for ASID value */
97
98
99 /* These are the Ks and Kp from the PowerPC books. For proper operation,
100 * Ks = 0, Kp = 1.
101 */
102 #define SPRN_MD_AP 794
103 #define MD_Ks 0x80000000 /* Should not be set */
104 #define MD_Kp 0x40000000 /* Should always be set */
105
106 /* See explanation above at the definition of MI_APG_INIT */
107 #define MD_APG_INIT 0xdc000000
108 #define MD_APG_KUAP 0xde000000
109
110 /* The effective page number register. When read, contains the information
111 * about the last instruction TLB miss. When MD_RPN is written, bits in
112 * this register are used to create the TLB entry.
113 */
114 #define SPRN_MD_EPN 795
115 #define MD_EPNMASK 0xfffff000 /* Effective page number for entry */
116 #define MD_EVALID 0x00000200 /* Entry is valid */
117 #define MD_ASIDMASK 0x0000000f /* ASID match value */
118 /* Reset value is undefined */
119
120 /* The pointer to the base address of the first level page table.
121 * During a software tablewalk, reading this register provides the address
122 * of the entry associated with MD_EPN.
123 */
124 #define SPRN_M_TWB 796
125 #define M_L1TB 0xfffff000 /* Level 1 table base address */
126 #define M_L1INDX 0x00000ffc /* Level 1 index, when read */
127 /* Reset value is undefined */
128
129 /* A "level 1" or "segment" or whatever you want to call it register.
130 * For the data TLB, it contains bits that get loaded into the TLB entry
131 * when the MD_RPN is written. It is also provides the hardware assist
132 * for finding the PTE address during software tablewalk.
133 */
134 #define SPRN_MD_TWC 797
135 #define MD_L2TB 0xfffff000 /* Level 2 table base address */
136 #define MD_L2INDX 0xfffffe00 /* Level 2 index (*pte), when read */
137 #define MD_APG 0x000001e0 /* Access protection group (0) */
138 #define MD_GUARDED 0x00000010 /* Guarded storage */
139 #define MD_PSMASK 0x0000000c /* Mask of page size bits */
140 #define MD_PS8MEG 0x0000000c /* 8M page size */
141 #define MD_PS512K 0x00000004 /* 512K page size */
142 #define MD_PS4K_16K 0x00000000 /* 4K or 16K page size */
143 #define MD_WT 0x00000002 /* Use writethrough page attribute */
144 #define MD_SVALID 0x00000001 /* Segment entry is valid */
145 /* Reset value is undefined */
146
147
148 /* Real page number. Defined by the pte. Writing this register
149 * causes a TLB entry to be created for the data TLB, using
150 * additional information from the MD_EPN, and MD_TWC registers.
151 */
152 #define SPRN_MD_RPN 798
153 #define MD_SPS16K 0x00000008 /* Small page size (0 = 4k, 1 = 16k) */
154
155 /* This is a temporary storage register that could be used to save
156 * a processor working register during a tablewalk.
157 */
158 #define SPRN_M_TW 799
159
160 #if defined(CONFIG_PPC_4K_PAGES)
161 #define mmu_virtual_psize MMU_PAGE_4K
162 #elif defined(CONFIG_PPC_16K_PAGES)
163 #define mmu_virtual_psize MMU_PAGE_16K
164 #define PTE_FRAG_NR 4
165 #define PTE_FRAG_SIZE_SHIFT 12
166 #define PTE_FRAG_SIZE (1UL << 12)
167 #else
168 #error "Unsupported PAGE_SIZE"
169 #endif
170
171 #define mmu_linear_psize MMU_PAGE_8M
172
173 #define MODULES_END PAGE_OFFSET
174 #define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M)
175 #define MODULES_VADDR (MODULES_END - MODULES_SIZE)
176
177 #ifndef __ASSEMBLY__
178
179 #include <linux/mmdebug.h>
180 #include <linux/sizes.h>
181
182 void mmu_pin_tlb(unsigned long top, bool readonly);
183
184 typedef struct {
185 unsigned int id;
186 unsigned int active;
187 void __user *vdso;
188 void *pte_frag;
189 } mm_context_t;
190
191 #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
192
193 /*
194 * Page size definitions for 8xx
195 *
196 * shift : is the "PAGE_SHIFT" value for that page size
197 *
198 */
199 struct mmu_psize_def {
200 unsigned int shift; /* number of bits */
201 };
202
203 extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
204
shift_to_mmu_psize(unsigned int shift)205 static inline int shift_to_mmu_psize(unsigned int shift)
206 {
207 int psize;
208
209 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
210 if (mmu_psize_defs[psize].shift == shift)
211 return psize;
212 return -1;
213 }
214
mmu_psize_to_shift(unsigned int mmu_psize)215 static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
216 {
217 if (mmu_psize_defs[mmu_psize].shift)
218 return mmu_psize_defs[mmu_psize].shift;
219 BUG();
220 }
221
arch_vmap_try_size(unsigned long addr,unsigned long end,u64 pfn,unsigned int max_page_shift,unsigned long size)222 static inline bool arch_vmap_try_size(unsigned long addr, unsigned long end, u64 pfn,
223 unsigned int max_page_shift, unsigned long size)
224 {
225 if (end - addr < size)
226 return false;
227
228 if ((1UL << max_page_shift) < size)
229 return false;
230
231 if (!IS_ALIGNED(addr, size))
232 return false;
233
234 if (!IS_ALIGNED(PFN_PHYS(pfn), size))
235 return false;
236
237 return true;
238 }
239
arch_vmap_pte_range_map_size(unsigned long addr,unsigned long end,u64 pfn,unsigned int max_page_shift)240 static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
241 u64 pfn, unsigned int max_page_shift)
242 {
243 if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_512K))
244 return SZ_512K;
245 if (PAGE_SIZE == SZ_16K)
246 return SZ_16K;
247 if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_16K))
248 return SZ_16K;
249 return PAGE_SIZE;
250 }
251 #define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
252
arch_vmap_pte_supported_shift(unsigned long size)253 static inline int arch_vmap_pte_supported_shift(unsigned long size)
254 {
255 if (size >= SZ_512K)
256 return 19;
257 else if (size >= SZ_16K)
258 return 14;
259 else
260 return PAGE_SHIFT;
261 }
262 #define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
263
264 /* patch sites */
265 extern s32 patch__itlbmiss_exit_1, patch__dtlbmiss_exit_1;
266 extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf;
267
268 #endif /* !__ASSEMBLY__ */
269
270 #endif /* _ASM_POWERPC_MMU_8XX_H_ */
271