1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
4 *
5 * Author: Yu Liu <yu.liu@freescale.com>
6 * Scott Wood <scottwood@freescale.com>
7 * Ashish Kalra <ashish.kalra@freescale.com>
8 * Varun Sethi <varun.sethi@freescale.com>
9 *
10 * Description:
11 * This file is based on arch/powerpc/kvm/44x_tlb.h and
12 * arch/powerpc/include/asm/kvm_44x.h by Hollis Blanchard <hollisb@us.ibm.com>,
13 * Copyright IBM Corp. 2007-2008
14 */
15
16 #ifndef KVM_E500_H
17 #define KVM_E500_H
18
19 #include <linux/kvm_host.h>
20 #include <asm/nohash/mmu-e500.h>
21 #include <asm/tlb.h>
22 #include <asm/cputhreads.h>
23
24 enum vcpu_ftr {
25 VCPU_FTR_MMU_V2
26 };
27
28 #define E500_PID_NUM 3
29 #define E500_TLB_NUM 2
30
31 /* entry is mapped somewhere in host TLB */
32 #define E500_TLB_VALID (1 << 31)
33 /* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
34 #define E500_TLB_BITMAP (1 << 30)
35 /* TLB1 entry is mapped by host TLB0 */
36 #define E500_TLB_TLB0 (1 << 29)
37 /* entry is writable on the host */
38 #define E500_TLB_WRITABLE (1 << 28)
39 /* bits [6-5] MAS2_X1 and MAS2_X0 and [4-0] bits for WIMGE */
40 #define E500_TLB_MAS2_ATTR (0x7f)
41
42 struct tlbe_ref {
43 kvm_pfn_t pfn; /* valid only for TLB0, except briefly */
44 unsigned int flags; /* E500_TLB_* */
45 };
46
47 struct tlbe_priv {
48 struct tlbe_ref ref;
49 };
50
51 #ifdef CONFIG_KVM_E500V2
52 struct vcpu_id_table;
53 #endif
54
55 struct kvmppc_e500_tlb_params {
56 int entries, ways, sets;
57 };
58
59 struct kvmppc_vcpu_e500 {
60 struct kvm_vcpu vcpu;
61
62 /* Unmodified copy of the guest's TLB -- shared with host userspace. */
63 struct kvm_book3e_206_tlb_entry *gtlb_arch;
64
65 /* Starting entry number in gtlb_arch[] */
66 int gtlb_offset[E500_TLB_NUM];
67
68 /* KVM internal information associated with each guest TLB entry */
69 struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
70
71 struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM];
72
73 unsigned int gtlb_nv[E500_TLB_NUM];
74
75 unsigned int host_tlb1_nv;
76
77 u32 svr;
78 u32 l1csr0;
79 u32 l1csr1;
80 u32 hid0;
81 u32 hid1;
82 u64 mcar;
83
84 struct page **shared_tlb_pages;
85 int num_shared_tlb_pages;
86
87 u64 *g2h_tlb1_map;
88 unsigned int *h2g_tlb1_rmap;
89
90 /* Minimum and maximum address mapped my TLB1 */
91 unsigned long tlb1_min_eaddr;
92 unsigned long tlb1_max_eaddr;
93
94 #ifdef CONFIG_KVM_E500V2
95 u32 pid[E500_PID_NUM];
96
97 /* vcpu id table */
98 struct vcpu_id_table *idt;
99 #endif
100 };
101
to_e500(struct kvm_vcpu * vcpu)102 static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
103 {
104 return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu);
105 }
106
107
108 /* This geometry is the legacy default -- can be overridden by userspace */
109 #define KVM_E500_TLB0_WAY_SIZE 128
110 #define KVM_E500_TLB0_WAY_NUM 2
111
112 #define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
113 #define KVM_E500_TLB1_SIZE 16
114
115 #define index_of(tlbsel, esel) (((tlbsel) << 16) | ((esel) & 0xFFFF))
116 #define tlbsel_of(index) ((index) >> 16)
117 #define esel_of(index) ((index) & 0xFFFF)
118
119 #define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
120 #define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
121 #define MAS2_ATTRIB_MASK \
122 (MAS2_X0 | MAS2_X1 | MAS2_E | MAS2_G)
123 #define MAS3_ATTRIB_MASK \
124 (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
125 | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
126
127 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500,
128 ulong value);
129 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu);
130 int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu);
131 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea);
132 int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea);
133 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea);
134 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500);
135 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500);
136
137 void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
138 int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
139
140 int kvmppc_get_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
141 union kvmppc_one_reg *val);
142 int kvmppc_set_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
143 union kvmppc_one_reg *val);
144
145 #ifdef CONFIG_KVM_E500V2
146 unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
147 unsigned int as, unsigned int gid,
148 unsigned int pr, int avoid_recursion);
149 #endif
150
151 /* TLB helper functions */
152 static inline unsigned int
get_tlb_size(const struct kvm_book3e_206_tlb_entry * tlbe)153 get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
154 {
155 return (tlbe->mas1 >> 7) & 0x1f;
156 }
157
get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry * tlbe)158 static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe)
159 {
160 return tlbe->mas2 & MAS2_EPN;
161 }
162
get_tlb_bytes(const struct kvm_book3e_206_tlb_entry * tlbe)163 static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe)
164 {
165 unsigned int pgsize = get_tlb_size(tlbe);
166 return 1ULL << 10 << pgsize;
167 }
168
get_tlb_end(const struct kvm_book3e_206_tlb_entry * tlbe)169 static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe)
170 {
171 u64 bytes = get_tlb_bytes(tlbe);
172 return get_tlb_eaddr(tlbe) + bytes - 1;
173 }
174
get_tlb_raddr(const struct kvm_book3e_206_tlb_entry * tlbe)175 static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe)
176 {
177 return tlbe->mas7_3 & ~0xfffULL;
178 }
179
180 static inline unsigned int
get_tlb_tid(const struct kvm_book3e_206_tlb_entry * tlbe)181 get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe)
182 {
183 return (tlbe->mas1 >> 16) & 0xff;
184 }
185
186 static inline unsigned int
get_tlb_ts(const struct kvm_book3e_206_tlb_entry * tlbe)187 get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe)
188 {
189 return (tlbe->mas1 >> 12) & 0x1;
190 }
191
192 static inline unsigned int
get_tlb_v(const struct kvm_book3e_206_tlb_entry * tlbe)193 get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe)
194 {
195 return (tlbe->mas1 >> 31) & 0x1;
196 }
197
198 static inline unsigned int
get_tlb_iprot(const struct kvm_book3e_206_tlb_entry * tlbe)199 get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe)
200 {
201 return (tlbe->mas1 >> 30) & 0x1;
202 }
203
204 static inline unsigned int
get_tlb_tsize(const struct kvm_book3e_206_tlb_entry * tlbe)205 get_tlb_tsize(const struct kvm_book3e_206_tlb_entry *tlbe)
206 {
207 return (tlbe->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
208 }
209
get_cur_pid(struct kvm_vcpu * vcpu)210 static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
211 {
212 return vcpu->arch.pid & 0xff;
213 }
214
get_cur_as(struct kvm_vcpu * vcpu)215 static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu)
216 {
217 return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS));
218 }
219
get_cur_pr(struct kvm_vcpu * vcpu)220 static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
221 {
222 return !!(vcpu->arch.shared->msr & MSR_PR);
223 }
224
get_cur_spid(const struct kvm_vcpu * vcpu)225 static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
226 {
227 return (vcpu->arch.shared->mas6 >> 16) & 0xff;
228 }
229
get_cur_sas(const struct kvm_vcpu * vcpu)230 static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
231 {
232 return vcpu->arch.shared->mas6 & 0x1;
233 }
234
get_tlb_tlbsel(const struct kvm_vcpu * vcpu)235 static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
236 {
237 /*
238 * Manual says that tlbsel has 2 bits wide.
239 * Since we only have two TLBs, only lower bit is used.
240 */
241 return (vcpu->arch.shared->mas0 >> 28) & 0x1;
242 }
243
get_tlb_nv_bit(const struct kvm_vcpu * vcpu)244 static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
245 {
246 return vcpu->arch.shared->mas0 & 0xfff;
247 }
248
get_tlb_esel_bit(const struct kvm_vcpu * vcpu)249 static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
250 {
251 return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
252 }
253
tlbe_is_host_safe(const struct kvm_vcpu * vcpu,const struct kvm_book3e_206_tlb_entry * tlbe)254 static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
255 const struct kvm_book3e_206_tlb_entry *tlbe)
256 {
257 gpa_t gpa;
258
259 if (!get_tlb_v(tlbe))
260 return 0;
261
262 #ifndef CONFIG_KVM_BOOKE_HV
263 /* Does it match current guest AS? */
264 /* XXX what about IS != DS? */
265 if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
266 return 0;
267 #endif
268
269 gpa = get_tlb_raddr(tlbe);
270 if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
271 /* Mapping is not for RAM. */
272 return 0;
273
274 return 1;
275 }
276
get_entry(struct kvmppc_vcpu_e500 * vcpu_e500,int tlbsel,int entry)277 static inline struct kvm_book3e_206_tlb_entry *get_entry(
278 struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry)
279 {
280 int offset = vcpu_e500->gtlb_offset[tlbsel];
281 return &vcpu_e500->gtlb_arch[offset + entry];
282 }
283
284 void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
285 struct kvm_book3e_206_tlb_entry *gtlbe);
286 void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500);
287
288 #ifdef CONFIG_KVM_BOOKE_HV
289 #define kvmppc_e500_get_tlb_stid(vcpu, gtlbe) get_tlb_tid(gtlbe)
290 #define get_tlbmiss_tid(vcpu) get_cur_pid(vcpu)
291 #define get_tlb_sts(gtlbe) (gtlbe->mas1 & MAS1_TS)
292
293 /*
294 * These functions should be called with preemption disabled
295 * and the returned value is valid only in that context
296 */
get_thread_specific_lpid(int vm_lpid)297 static inline int get_thread_specific_lpid(int vm_lpid)
298 {
299 int vcpu_lpid = vm_lpid;
300
301 if (threads_per_core == 2)
302 vcpu_lpid |= smp_processor_id() & 1;
303
304 return vcpu_lpid;
305 }
306
get_lpid(struct kvm_vcpu * vcpu)307 static inline int get_lpid(struct kvm_vcpu *vcpu)
308 {
309 return get_thread_specific_lpid(vcpu->kvm->arch.lpid);
310 }
311 #else
312 unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
313 struct kvm_book3e_206_tlb_entry *gtlbe);
314
get_tlbmiss_tid(struct kvm_vcpu * vcpu)315 static inline unsigned int get_tlbmiss_tid(struct kvm_vcpu *vcpu)
316 {
317 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
318 unsigned int tidseld = (vcpu->arch.shared->mas4 >> 16) & 0xf;
319
320 return vcpu_e500->pid[tidseld];
321 }
322
323 /* Force TS=1 for all guest mappings. */
324 #define get_tlb_sts(gtlbe) (MAS1_TS)
325 #endif /* !BOOKE_HV */
326
has_feature(const struct kvm_vcpu * vcpu,enum vcpu_ftr ftr)327 static inline bool has_feature(const struct kvm_vcpu *vcpu,
328 enum vcpu_ftr ftr)
329 {
330 bool has_ftr;
331 switch (ftr) {
332 case VCPU_FTR_MMU_V2:
333 has_ftr = ((vcpu->arch.mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2);
334 break;
335 default:
336 return false;
337 }
338 return has_ftr;
339 }
340
341 #endif /* KVM_E500_H */
342