xref: /linux/arch/powerpc/kvm/e500.h (revision 11e8c7e9471cf8e6ae6ec7324a3174191cd965e3)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
4  *
5  * Author: Yu Liu <yu.liu@freescale.com>
6  *         Scott Wood <scottwood@freescale.com>
7  *         Ashish Kalra <ashish.kalra@freescale.com>
8  *         Varun Sethi <varun.sethi@freescale.com>
9  *
10  * Description:
11  * This file is based on arch/powerpc/kvm/44x_tlb.h and
12  * arch/powerpc/include/asm/kvm_44x.h by Hollis Blanchard <hollisb@us.ibm.com>,
13  * Copyright IBM Corp. 2007-2008
14  */
15 
16 #ifndef KVM_E500_H
17 #define KVM_E500_H
18 
19 #include <linux/kvm_host.h>
20 #include <asm/nohash/mmu-e500.h>
21 #include <asm/tlb.h>
22 #include <asm/cputhreads.h>
23 
24 enum vcpu_ftr {
25 	VCPU_FTR_MMU_V2
26 };
27 
28 #define E500_PID_NUM   3
29 #define E500_TLB_NUM   2
30 
31 /* entry is mapped somewhere in host TLB */
32 #define E500_TLB_VALID		(1 << 31)
33 /* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
34 #define E500_TLB_BITMAP		(1 << 30)
35 /* TLB1 entry is mapped by host TLB0 */
36 #define E500_TLB_TLB0		(1 << 29)
37 /* entry is writable on the host */
38 #define E500_TLB_WRITABLE	(1 << 28)
39 /* bits [6-5] MAS2_X1 and MAS2_X0 and [4-0] bits for WIMGE */
40 #define E500_TLB_MAS2_ATTR	(0x7f)
41 
42 struct tlbe_priv {
43 	kvm_pfn_t pfn;		/* valid only for TLB0, except briefly */
44 	unsigned int flags;	/* E500_TLB_* */
45 };
46 
47 #ifdef CONFIG_KVM_E500V2
48 struct vcpu_id_table;
49 #endif
50 
51 struct kvmppc_e500_tlb_params {
52 	int entries, ways, sets;
53 };
54 
55 struct kvmppc_vcpu_e500 {
56 	struct kvm_vcpu vcpu;
57 
58 	/* Unmodified copy of the guest's TLB -- shared with host userspace. */
59 	struct kvm_book3e_206_tlb_entry *gtlb_arch;
60 
61 	/* Starting entry number in gtlb_arch[] */
62 	int gtlb_offset[E500_TLB_NUM];
63 
64 	/* KVM internal information associated with each guest TLB entry */
65 	struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
66 
67 	struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM];
68 
69 	unsigned int gtlb_nv[E500_TLB_NUM];
70 
71 	unsigned int host_tlb1_nv;
72 
73 	u32 svr;
74 	u32 l1csr0;
75 	u32 l1csr1;
76 	u32 hid0;
77 	u32 hid1;
78 	u64 mcar;
79 
80 	struct page **shared_tlb_pages;
81 	int num_shared_tlb_pages;
82 
83 	u64 *g2h_tlb1_map;
84 	unsigned int *h2g_tlb1_rmap;
85 
86 	/* Minimum and maximum address mapped my TLB1 */
87 	unsigned long tlb1_min_eaddr;
88 	unsigned long tlb1_max_eaddr;
89 
90 #ifdef CONFIG_KVM_E500V2
91 	u32 pid[E500_PID_NUM];
92 
93 	/* vcpu id table */
94 	struct vcpu_id_table *idt;
95 #endif
96 };
97 
to_e500(struct kvm_vcpu * vcpu)98 static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
99 {
100 	return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu);
101 }
102 
103 
104 /* This geometry is the legacy default -- can be overridden by userspace */
105 #define KVM_E500_TLB0_WAY_SIZE		128
106 #define KVM_E500_TLB0_WAY_NUM		2
107 
108 #define KVM_E500_TLB0_SIZE  (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
109 #define KVM_E500_TLB1_SIZE  16
110 
111 #define index_of(tlbsel, esel)	(((tlbsel) << 16) | ((esel) & 0xFFFF))
112 #define tlbsel_of(index)	((index) >> 16)
113 #define esel_of(index)		((index) & 0xFFFF)
114 
115 #define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
116 #define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
117 #define MAS2_ATTRIB_MASK \
118 	  (MAS2_X0 | MAS2_X1 | MAS2_E | MAS2_G)
119 #define MAS3_ATTRIB_MASK \
120 	  (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
121 	   | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
122 
123 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500,
124 				ulong value);
125 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu);
126 int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu);
127 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea);
128 int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea);
129 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea);
130 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500);
131 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500);
132 
133 void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
134 int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
135 
136 int kvmppc_get_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
137 				union kvmppc_one_reg *val);
138 int kvmppc_set_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
139 			       union kvmppc_one_reg *val);
140 
141 #ifdef CONFIG_KVM_E500V2
142 unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
143 				 unsigned int as, unsigned int gid,
144 				 unsigned int pr, int avoid_recursion);
145 #endif
146 
147 /* TLB helper functions */
148 static inline unsigned int
get_tlb_size(const struct kvm_book3e_206_tlb_entry * tlbe)149 get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
150 {
151 	return (tlbe->mas1 >> 7) & 0x1f;
152 }
153 
get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry * tlbe)154 static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe)
155 {
156 	return tlbe->mas2 & MAS2_EPN;
157 }
158 
get_tlb_bytes(const struct kvm_book3e_206_tlb_entry * tlbe)159 static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe)
160 {
161 	unsigned int pgsize = get_tlb_size(tlbe);
162 	return 1ULL << 10 << pgsize;
163 }
164 
get_tlb_end(const struct kvm_book3e_206_tlb_entry * tlbe)165 static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe)
166 {
167 	u64 bytes = get_tlb_bytes(tlbe);
168 	return get_tlb_eaddr(tlbe) + bytes - 1;
169 }
170 
get_tlb_raddr(const struct kvm_book3e_206_tlb_entry * tlbe)171 static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe)
172 {
173 	return tlbe->mas7_3 & ~0xfffULL;
174 }
175 
176 static inline unsigned int
get_tlb_tid(const struct kvm_book3e_206_tlb_entry * tlbe)177 get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe)
178 {
179 	return (tlbe->mas1 >> 16) & 0xff;
180 }
181 
182 static inline unsigned int
get_tlb_ts(const struct kvm_book3e_206_tlb_entry * tlbe)183 get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe)
184 {
185 	return (tlbe->mas1 >> 12) & 0x1;
186 }
187 
188 static inline unsigned int
get_tlb_v(const struct kvm_book3e_206_tlb_entry * tlbe)189 get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe)
190 {
191 	return (tlbe->mas1 >> 31) & 0x1;
192 }
193 
194 static inline unsigned int
get_tlb_iprot(const struct kvm_book3e_206_tlb_entry * tlbe)195 get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe)
196 {
197 	return (tlbe->mas1 >> 30) & 0x1;
198 }
199 
200 static inline unsigned int
get_tlb_tsize(const struct kvm_book3e_206_tlb_entry * tlbe)201 get_tlb_tsize(const struct kvm_book3e_206_tlb_entry *tlbe)
202 {
203 	return (tlbe->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
204 }
205 
get_cur_pid(struct kvm_vcpu * vcpu)206 static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
207 {
208 	return vcpu->arch.pid & 0xff;
209 }
210 
get_cur_as(struct kvm_vcpu * vcpu)211 static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu)
212 {
213 	return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS));
214 }
215 
get_cur_pr(struct kvm_vcpu * vcpu)216 static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
217 {
218 	return !!(vcpu->arch.shared->msr & MSR_PR);
219 }
220 
get_cur_spid(const struct kvm_vcpu * vcpu)221 static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
222 {
223 	return (vcpu->arch.shared->mas6 >> 16) & 0xff;
224 }
225 
get_cur_sas(const struct kvm_vcpu * vcpu)226 static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
227 {
228 	return vcpu->arch.shared->mas6 & 0x1;
229 }
230 
get_tlb_tlbsel(const struct kvm_vcpu * vcpu)231 static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
232 {
233 	/*
234 	 * Manual says that tlbsel has 2 bits wide.
235 	 * Since we only have two TLBs, only lower bit is used.
236 	 */
237 	return (vcpu->arch.shared->mas0 >> 28) & 0x1;
238 }
239 
get_tlb_nv_bit(const struct kvm_vcpu * vcpu)240 static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
241 {
242 	return vcpu->arch.shared->mas0 & 0xfff;
243 }
244 
get_tlb_esel_bit(const struct kvm_vcpu * vcpu)245 static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
246 {
247 	return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
248 }
249 
tlbe_is_host_safe(const struct kvm_vcpu * vcpu,const struct kvm_book3e_206_tlb_entry * tlbe)250 static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
251 			const struct kvm_book3e_206_tlb_entry *tlbe)
252 {
253 	gpa_t gpa;
254 
255 	if (!get_tlb_v(tlbe))
256 		return 0;
257 
258 #ifndef CONFIG_KVM_BOOKE_HV
259 	/* Does it match current guest AS? */
260 	/* XXX what about IS != DS? */
261 	if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
262 		return 0;
263 #endif
264 
265 	gpa = get_tlb_raddr(tlbe);
266 	if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
267 		/* Mapping is not for RAM. */
268 		return 0;
269 
270 	return 1;
271 }
272 
get_entry(struct kvmppc_vcpu_e500 * vcpu_e500,int tlbsel,int entry)273 static inline struct kvm_book3e_206_tlb_entry *get_entry(
274 	struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry)
275 {
276 	int offset = vcpu_e500->gtlb_offset[tlbsel];
277 	return &vcpu_e500->gtlb_arch[offset + entry];
278 }
279 
280 void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
281 			   struct kvm_book3e_206_tlb_entry *gtlbe);
282 void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500);
283 
284 #ifdef CONFIG_KVM_BOOKE_HV
285 #define kvmppc_e500_get_tlb_stid(vcpu, gtlbe)       get_tlb_tid(gtlbe)
286 #define get_tlbmiss_tid(vcpu)           get_cur_pid(vcpu)
287 #define get_tlb_sts(gtlbe)              (gtlbe->mas1 & MAS1_TS)
288 
289 /*
290  * These functions should be called with preemption disabled
291  * and the returned value is valid only in that context
292  */
get_thread_specific_lpid(int vm_lpid)293 static inline int get_thread_specific_lpid(int vm_lpid)
294 {
295 	int vcpu_lpid = vm_lpid;
296 
297 	if (threads_per_core == 2)
298 		vcpu_lpid |= smp_processor_id() & 1;
299 
300 	return vcpu_lpid;
301 }
302 
get_lpid(struct kvm_vcpu * vcpu)303 static inline int get_lpid(struct kvm_vcpu *vcpu)
304 {
305 	return get_thread_specific_lpid(vcpu->kvm->arch.lpid);
306 }
307 #else
308 unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
309 				      struct kvm_book3e_206_tlb_entry *gtlbe);
310 
get_tlbmiss_tid(struct kvm_vcpu * vcpu)311 static inline unsigned int get_tlbmiss_tid(struct kvm_vcpu *vcpu)
312 {
313 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
314 	unsigned int tidseld = (vcpu->arch.shared->mas4 >> 16) & 0xf;
315 
316 	return vcpu_e500->pid[tidseld];
317 }
318 
319 /* Force TS=1 for all guest mappings. */
320 #define get_tlb_sts(gtlbe)              (MAS1_TS)
321 #endif /* !BOOKE_HV */
322 
has_feature(const struct kvm_vcpu * vcpu,enum vcpu_ftr ftr)323 static inline bool has_feature(const struct kvm_vcpu *vcpu,
324 			       enum vcpu_ftr ftr)
325 {
326 	bool has_ftr;
327 	switch (ftr) {
328 	case VCPU_FTR_MMU_V2:
329 		has_ftr = ((vcpu->arch.mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2);
330 		break;
331 	default:
332 		return false;
333 	}
334 	return has_ftr;
335 }
336 
337 #endif /* KVM_E500_H */
338