xref: /linux/arch/powerpc/kvm/e500.h (revision b889fcf63cb62e7fdb7816565e28f44dbe4a76a5)
1 /*
2  * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
3  *
4  * Author: Yu Liu <yu.liu@freescale.com>
5  *         Scott Wood <scottwood@freescale.com>
6  *         Ashish Kalra <ashish.kalra@freescale.com>
7  *         Varun Sethi <varun.sethi@freescale.com>
8  *
9  * Description:
10  * This file is based on arch/powerpc/kvm/44x_tlb.h and
11  * arch/powerpc/include/asm/kvm_44x.h by Hollis Blanchard <hollisb@us.ibm.com>,
12  * Copyright IBM Corp. 2007-2008
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License, version 2, as
16  * published by the Free Software Foundation.
17  */
18 
19 #ifndef KVM_E500_H
20 #define KVM_E500_H
21 
22 #include <linux/kvm_host.h>
23 #include <asm/mmu-book3e.h>
24 #include <asm/tlb.h>
25 
26 #define E500_PID_NUM   3
27 #define E500_TLB_NUM   2
28 
29 #define E500_TLB_VALID 1
30 #define E500_TLB_BITMAP 2
31 
32 struct tlbe_ref {
33 	pfn_t pfn;
34 	unsigned int flags; /* E500_TLB_* */
35 };
36 
37 struct tlbe_priv {
38 	struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
39 };
40 
41 #ifdef CONFIG_KVM_E500V2
42 struct vcpu_id_table;
43 #endif
44 
45 struct kvmppc_e500_tlb_params {
46 	int entries, ways, sets;
47 };
48 
49 struct kvmppc_vcpu_e500 {
50 	struct kvm_vcpu vcpu;
51 
52 	/* Unmodified copy of the guest's TLB -- shared with host userspace. */
53 	struct kvm_book3e_206_tlb_entry *gtlb_arch;
54 
55 	/* Starting entry number in gtlb_arch[] */
56 	int gtlb_offset[E500_TLB_NUM];
57 
58 	/* KVM internal information associated with each guest TLB entry */
59 	struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
60 
61 	struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM];
62 
63 	unsigned int gtlb_nv[E500_TLB_NUM];
64 
65 	/*
66 	 * information associated with each host TLB entry --
67 	 * TLB1 only for now.  If/when guest TLB1 entries can be
68 	 * mapped with host TLB0, this will be used for that too.
69 	 *
70 	 * We don't want to use this for guest TLB0 because then we'd
71 	 * have the overhead of doing the translation again even if
72 	 * the entry is still in the guest TLB (e.g. we swapped out
73 	 * and back, and our host TLB entries got evicted).
74 	 */
75 	struct tlbe_ref *tlb_refs[E500_TLB_NUM];
76 	unsigned int host_tlb1_nv;
77 
78 	u32 svr;
79 	u32 l1csr0;
80 	u32 l1csr1;
81 	u32 hid0;
82 	u32 hid1;
83 	u64 mcar;
84 
85 	struct page **shared_tlb_pages;
86 	int num_shared_tlb_pages;
87 
88 	u64 *g2h_tlb1_map;
89 	unsigned int *h2g_tlb1_rmap;
90 
91 	/* Minimum and maximum address mapped my TLB1 */
92 	unsigned long tlb1_min_eaddr;
93 	unsigned long tlb1_max_eaddr;
94 
95 #ifdef CONFIG_KVM_E500V2
96 	u32 pid[E500_PID_NUM];
97 
98 	/* vcpu id table */
99 	struct vcpu_id_table *idt;
100 #endif
101 };
102 
103 static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
104 {
105 	return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu);
106 }
107 
108 
109 /* This geometry is the legacy default -- can be overridden by userspace */
110 #define KVM_E500_TLB0_WAY_SIZE		128
111 #define KVM_E500_TLB0_WAY_NUM		2
112 
113 #define KVM_E500_TLB0_SIZE  (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
114 #define KVM_E500_TLB1_SIZE  16
115 
116 #define index_of(tlbsel, esel)	(((tlbsel) << 16) | ((esel) & 0xFFFF))
117 #define tlbsel_of(index)	((index) >> 16)
118 #define esel_of(index)		((index) & 0xFFFF)
119 
120 #define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
121 #define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
122 #define MAS2_ATTRIB_MASK \
123 	  (MAS2_X0 | MAS2_X1)
124 #define MAS3_ATTRIB_MASK \
125 	  (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
126 	   | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
127 
128 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500,
129 				ulong value);
130 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu);
131 int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu);
132 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea);
133 int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea);
134 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea);
135 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500);
136 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500);
137 
138 void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
139 int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
140 
141 
142 #ifdef CONFIG_KVM_E500V2
143 unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
144 				 unsigned int as, unsigned int gid,
145 				 unsigned int pr, int avoid_recursion);
146 #endif
147 
148 /* TLB helper functions */
149 static inline unsigned int
150 get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
151 {
152 	return (tlbe->mas1 >> 7) & 0x1f;
153 }
154 
155 static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe)
156 {
157 	return tlbe->mas2 & MAS2_EPN;
158 }
159 
160 static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe)
161 {
162 	unsigned int pgsize = get_tlb_size(tlbe);
163 	return 1ULL << 10 << pgsize;
164 }
165 
166 static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe)
167 {
168 	u64 bytes = get_tlb_bytes(tlbe);
169 	return get_tlb_eaddr(tlbe) + bytes - 1;
170 }
171 
172 static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe)
173 {
174 	return tlbe->mas7_3 & ~0xfffULL;
175 }
176 
177 static inline unsigned int
178 get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe)
179 {
180 	return (tlbe->mas1 >> 16) & 0xff;
181 }
182 
183 static inline unsigned int
184 get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe)
185 {
186 	return (tlbe->mas1 >> 12) & 0x1;
187 }
188 
189 static inline unsigned int
190 get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe)
191 {
192 	return (tlbe->mas1 >> 31) & 0x1;
193 }
194 
195 static inline unsigned int
196 get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe)
197 {
198 	return (tlbe->mas1 >> 30) & 0x1;
199 }
200 
201 static inline unsigned int
202 get_tlb_tsize(const struct kvm_book3e_206_tlb_entry *tlbe)
203 {
204 	return (tlbe->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
205 }
206 
207 static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
208 {
209 	return vcpu->arch.pid & 0xff;
210 }
211 
212 static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu)
213 {
214 	return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS));
215 }
216 
217 static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
218 {
219 	return !!(vcpu->arch.shared->msr & MSR_PR);
220 }
221 
222 static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
223 {
224 	return (vcpu->arch.shared->mas6 >> 16) & 0xff;
225 }
226 
227 static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
228 {
229 	return vcpu->arch.shared->mas6 & 0x1;
230 }
231 
232 static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
233 {
234 	/*
235 	 * Manual says that tlbsel has 2 bits wide.
236 	 * Since we only have two TLBs, only lower bit is used.
237 	 */
238 	return (vcpu->arch.shared->mas0 >> 28) & 0x1;
239 }
240 
241 static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
242 {
243 	return vcpu->arch.shared->mas0 & 0xfff;
244 }
245 
246 static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
247 {
248 	return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
249 }
250 
251 static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
252 			const struct kvm_book3e_206_tlb_entry *tlbe)
253 {
254 	gpa_t gpa;
255 
256 	if (!get_tlb_v(tlbe))
257 		return 0;
258 
259 #ifndef CONFIG_KVM_BOOKE_HV
260 	/* Does it match current guest AS? */
261 	/* XXX what about IS != DS? */
262 	if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
263 		return 0;
264 #endif
265 
266 	gpa = get_tlb_raddr(tlbe);
267 	if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
268 		/* Mapping is not for RAM. */
269 		return 0;
270 
271 	return 1;
272 }
273 
274 static inline struct kvm_book3e_206_tlb_entry *get_entry(
275 	struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry)
276 {
277 	int offset = vcpu_e500->gtlb_offset[tlbsel];
278 	return &vcpu_e500->gtlb_arch[offset + entry];
279 }
280 
281 void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
282 			   struct kvm_book3e_206_tlb_entry *gtlbe);
283 void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500);
284 
285 #ifdef CONFIG_KVM_BOOKE_HV
286 #define kvmppc_e500_get_tlb_stid(vcpu, gtlbe)       get_tlb_tid(gtlbe)
287 #define get_tlbmiss_tid(vcpu)           get_cur_pid(vcpu)
288 #define get_tlb_sts(gtlbe)              (gtlbe->mas1 & MAS1_TS)
289 #else
290 unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
291 				      struct kvm_book3e_206_tlb_entry *gtlbe);
292 
293 static inline unsigned int get_tlbmiss_tid(struct kvm_vcpu *vcpu)
294 {
295 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
296 	unsigned int tidseld = (vcpu->arch.shared->mas4 >> 16) & 0xf;
297 
298 	return vcpu_e500->pid[tidseld];
299 }
300 
301 /* Force TS=1 for all guest mappings. */
302 #define get_tlb_sts(gtlbe)              (MAS1_TS)
303 #endif /* !BOOKE_HV */
304 
305 #endif /* KVM_E500_H */
306