1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
4 *
5 * Author: Yu Liu, yu.liu@freescale.com
6 * Scott Wood, scottwood@freescale.com
7 * Ashish Kalra, ashish.kalra@freescale.com
8 * Varun Sethi, varun.sethi@freescale.com
9 * Alexander Graf, agraf@suse.de
10 *
11 * Description:
12 * This file is based on arch/powerpc/kvm/44x_tlb.c,
13 * by Hollis Blanchard <hollisb@us.ibm.com>.
14 */
15
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/highmem.h>
23 #include <linux/log2.h>
24 #include <linux/uaccess.h>
25 #include <linux/sched/mm.h>
26 #include <linux/rwsem.h>
27 #include <linux/vmalloc.h>
28 #include <linux/hugetlb.h>
29 #include <asm/kvm_ppc.h>
30 #include <asm/pte-walk.h>
31
32 #include "e500.h"
33 #include "timing.h"
34 #include "e500_mmu_host.h"
35
36 #include "trace_booke.h"
37
38 #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
39
40 static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
41
tlb1_max_shadow_size(void)42 static inline unsigned int tlb1_max_shadow_size(void)
43 {
44 /* reserve one entry for magic page */
45 return host_tlb_params[1].entries - tlbcam_index - 1;
46 }
47
e500_shadow_mas3_attrib(u32 mas3,bool writable,int usermode)48 static inline u32 e500_shadow_mas3_attrib(u32 mas3, bool writable, int usermode)
49 {
50 /* Mask off reserved bits. */
51 mas3 &= MAS3_ATTRIB_MASK;
52
53 if (!writable)
54 mas3 &= ~(MAS3_UW|MAS3_SW);
55
56 #ifndef CONFIG_KVM_BOOKE_HV
57 if (!usermode) {
58 /* Guest is in supervisor mode,
59 * so we need to translate guest
60 * supervisor permissions into user permissions. */
61 mas3 &= ~E500_TLB_USER_PERM_MASK;
62 mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
63 }
64 mas3 |= E500_TLB_SUPER_PERM_MASK;
65 #endif
66 return mas3;
67 }
68
69 /*
70 * writing shadow tlb entry to host TLB
71 */
__write_host_tlbe(struct kvm_book3e_206_tlb_entry * stlbe,uint32_t mas0,uint32_t lpid)72 static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
73 uint32_t mas0,
74 uint32_t lpid)
75 {
76 unsigned long flags;
77
78 local_irq_save(flags);
79 mtspr(SPRN_MAS0, mas0);
80 mtspr(SPRN_MAS1, stlbe->mas1);
81 mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
82 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
83 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
84 #ifdef CONFIG_KVM_BOOKE_HV
85 mtspr(SPRN_MAS8, MAS8_TGS | get_thread_specific_lpid(lpid));
86 #endif
87 asm volatile("isync; tlbwe" : : : "memory");
88
89 #ifdef CONFIG_KVM_BOOKE_HV
90 /* Must clear mas8 for other host tlbwe's */
91 mtspr(SPRN_MAS8, 0);
92 isync();
93 #endif
94 local_irq_restore(flags);
95
96 trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
97 stlbe->mas2, stlbe->mas7_3);
98 }
99
100 /*
101 * Acquire a mas0 with victim hint, as if we just took a TLB miss.
102 *
103 * We don't care about the address we're searching for, other than that it's
104 * in the right set and is not present in the TLB. Using a zero PID and a
105 * userspace address means we don't have to set and then restore MAS5, or
106 * calculate a proper MAS6 value.
107 */
get_host_mas0(unsigned long eaddr)108 static u32 get_host_mas0(unsigned long eaddr)
109 {
110 unsigned long flags;
111 u32 mas0;
112 u32 mas4;
113
114 local_irq_save(flags);
115 mtspr(SPRN_MAS6, 0);
116 mas4 = mfspr(SPRN_MAS4);
117 mtspr(SPRN_MAS4, mas4 & ~MAS4_TLBSEL_MASK);
118 asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
119 mas0 = mfspr(SPRN_MAS0);
120 mtspr(SPRN_MAS4, mas4);
121 local_irq_restore(flags);
122
123 return mas0;
124 }
125
126 /* sesel is for tlb1 only */
write_host_tlbe(struct kvmppc_vcpu_e500 * vcpu_e500,int tlbsel,int sesel,struct kvm_book3e_206_tlb_entry * stlbe)127 static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
128 int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
129 {
130 u32 mas0;
131
132 if (tlbsel == 0) {
133 mas0 = get_host_mas0(stlbe->mas2);
134 __write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid);
135 } else {
136 __write_host_tlbe(stlbe,
137 MAS0_TLBSEL(1) |
138 MAS0_ESEL(to_htlb1_esel(sesel)),
139 vcpu_e500->vcpu.kvm->arch.lpid);
140 }
141 }
142
143 /* sesel is for tlb1 only */
write_stlbe(struct kvmppc_vcpu_e500 * vcpu_e500,struct kvm_book3e_206_tlb_entry * gtlbe,struct kvm_book3e_206_tlb_entry * stlbe,int stlbsel,int sesel)144 static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
145 struct kvm_book3e_206_tlb_entry *gtlbe,
146 struct kvm_book3e_206_tlb_entry *stlbe,
147 int stlbsel, int sesel)
148 {
149 int stid;
150
151 preempt_disable();
152 stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
153
154 stlbe->mas1 |= MAS1_TID(stid);
155 write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
156 preempt_enable();
157 }
158
159 #ifdef CONFIG_KVM_E500V2
160 /* XXX should be a hook in the gva2hpa translation */
kvmppc_map_magic(struct kvm_vcpu * vcpu)161 void kvmppc_map_magic(struct kvm_vcpu *vcpu)
162 {
163 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
164 struct kvm_book3e_206_tlb_entry magic;
165 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
166 unsigned int stid;
167 kvm_pfn_t pfn;
168
169 pfn = (kvm_pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
170 get_page(pfn_to_page(pfn));
171
172 preempt_disable();
173 stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
174
175 magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
176 MAS1_TSIZE(BOOK3E_PAGESZ_4K);
177 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
178 magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
179 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
180 magic.mas8 = 0;
181
182 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index), 0);
183 preempt_enable();
184 }
185 #endif
186
inval_gtlbe_on_host(struct kvmppc_vcpu_e500 * vcpu_e500,int tlbsel,int esel)187 void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
188 int esel)
189 {
190 struct kvm_book3e_206_tlb_entry *gtlbe =
191 get_entry(vcpu_e500, tlbsel, esel);
192 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
193
194 /* Don't bother with unmapped entries */
195 if (!(ref->flags & E500_TLB_VALID)) {
196 WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
197 "%s: flags %x\n", __func__, ref->flags);
198 WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
199 }
200
201 if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
202 u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
203 int hw_tlb_indx;
204 unsigned long flags;
205
206 local_irq_save(flags);
207 while (tmp) {
208 hw_tlb_indx = __ilog2_u64(tmp & -tmp);
209 mtspr(SPRN_MAS0,
210 MAS0_TLBSEL(1) |
211 MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
212 mtspr(SPRN_MAS1, 0);
213 asm volatile("tlbwe");
214 vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
215 tmp &= tmp - 1;
216 }
217 mb();
218 vcpu_e500->g2h_tlb1_map[esel] = 0;
219 ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
220 local_irq_restore(flags);
221 }
222
223 if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) {
224 /*
225 * TLB1 entry is backed by 4k pages. This should happen
226 * rarely and is not worth optimizing. Invalidate everything.
227 */
228 kvmppc_e500_tlbil_all(vcpu_e500);
229 ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
230 }
231
232 /*
233 * If TLB entry is still valid then it's a TLB0 entry, and thus
234 * backed by at most one host tlbe per shadow pid
235 */
236 if (ref->flags & E500_TLB_VALID)
237 kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
238
239 /* Mark the TLB as not backed by the host anymore */
240 ref->flags = 0;
241 }
242
tlbe_is_writable(struct kvm_book3e_206_tlb_entry * tlbe)243 static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
244 {
245 return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
246 }
247
kvmppc_e500_ref_setup(struct tlbe_ref * ref,struct kvm_book3e_206_tlb_entry * gtlbe,kvm_pfn_t pfn,unsigned int wimg,bool writable)248 static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
249 struct kvm_book3e_206_tlb_entry *gtlbe,
250 kvm_pfn_t pfn, unsigned int wimg,
251 bool writable)
252 {
253 ref->pfn = pfn;
254 ref->flags = E500_TLB_VALID;
255 if (writable)
256 ref->flags |= E500_TLB_WRITABLE;
257
258 /* Use guest supplied MAS2_G and MAS2_E */
259 ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
260 }
261
kvmppc_e500_ref_release(struct tlbe_ref * ref)262 static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
263 {
264 if (ref->flags & E500_TLB_VALID) {
265 /* FIXME: don't log bogus pfn for TLB1 */
266 trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
267 ref->flags = 0;
268 }
269 }
270
clear_tlb1_bitmap(struct kvmppc_vcpu_e500 * vcpu_e500)271 static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
272 {
273 if (vcpu_e500->g2h_tlb1_map)
274 memset(vcpu_e500->g2h_tlb1_map, 0,
275 sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
276 if (vcpu_e500->h2g_tlb1_rmap)
277 memset(vcpu_e500->h2g_tlb1_rmap, 0,
278 sizeof(unsigned int) * host_tlb_params[1].entries);
279 }
280
clear_tlb_privs(struct kvmppc_vcpu_e500 * vcpu_e500)281 static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
282 {
283 int tlbsel;
284 int i;
285
286 for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
287 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
288 struct tlbe_ref *ref =
289 &vcpu_e500->gtlb_priv[tlbsel][i].ref;
290 kvmppc_e500_ref_release(ref);
291 }
292 }
293 }
294
kvmppc_core_flush_tlb(struct kvm_vcpu * vcpu)295 void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
296 {
297 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
298 kvmppc_e500_tlbil_all(vcpu_e500);
299 clear_tlb_privs(vcpu_e500);
300 clear_tlb1_bitmap(vcpu_e500);
301 }
302
303 /* TID must be supplied by the caller */
kvmppc_e500_setup_stlbe(struct kvm_vcpu * vcpu,struct kvm_book3e_206_tlb_entry * gtlbe,int tsize,struct tlbe_ref * ref,u64 gvaddr,struct kvm_book3e_206_tlb_entry * stlbe)304 static void kvmppc_e500_setup_stlbe(
305 struct kvm_vcpu *vcpu,
306 struct kvm_book3e_206_tlb_entry *gtlbe,
307 int tsize, struct tlbe_ref *ref, u64 gvaddr,
308 struct kvm_book3e_206_tlb_entry *stlbe)
309 {
310 kvm_pfn_t pfn = ref->pfn;
311 u32 pr = vcpu->arch.shared->msr & MSR_PR;
312 bool writable = !!(ref->flags & E500_TLB_WRITABLE);
313
314 BUG_ON(!(ref->flags & E500_TLB_VALID));
315
316 /* Force IPROT=0 for all guest mappings. */
317 stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
318 stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
319 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
320 e500_shadow_mas3_attrib(gtlbe->mas7_3, writable, pr);
321 }
322
kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 * vcpu_e500,u64 gvaddr,gfn_t gfn,struct kvm_book3e_206_tlb_entry * gtlbe,int tlbsel,struct kvm_book3e_206_tlb_entry * stlbe,struct tlbe_ref * ref)323 static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
324 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
325 int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
326 struct tlbe_ref *ref)
327 {
328 struct kvm_memory_slot *slot;
329 unsigned int psize;
330 unsigned long pfn;
331 struct page *page = NULL;
332 unsigned long hva;
333 int tsize = BOOK3E_PAGESZ_4K;
334 int ret = 0;
335 unsigned long mmu_seq;
336 struct kvm *kvm = vcpu_e500->vcpu.kvm;
337 pte_t *ptep;
338 unsigned int wimg = 0;
339 pgd_t *pgdir;
340 unsigned long flags;
341 bool writable = false;
342
343 /* used to check for invalidations in progress */
344 mmu_seq = kvm->mmu_invalidate_seq;
345 smp_rmb();
346
347 /*
348 * Translate guest physical to true physical, acquiring
349 * a page reference if it is normal, non-reserved memory.
350 *
351 * gfn_to_memslot() must succeed because otherwise we wouldn't
352 * have gotten this far. Eventually we should just pass the slot
353 * pointer through from the first lookup.
354 */
355 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
356 hva = gfn_to_hva_memslot(slot, gfn);
357
358 pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, &writable, &page);
359 if (is_error_noslot_pfn(pfn)) {
360 if (printk_ratelimit())
361 pr_err("%s: real page not found for gfn %lx\n",
362 __func__, (long)gfn);
363 return -EINVAL;
364 }
365
366 spin_lock(&kvm->mmu_lock);
367 if (mmu_invalidate_retry(kvm, mmu_seq)) {
368 ret = -EAGAIN;
369 goto out;
370 }
371
372
373 pgdir = vcpu_e500->vcpu.arch.pgdir;
374 /*
375 * We are just looking at the wimg bits, so we don't
376 * care much about the trans splitting bit.
377 * We are holding kvm->mmu_lock so a notifier invalidate
378 * can't run hence pfn won't change.
379 */
380 local_irq_save(flags);
381 ptep = find_linux_pte(pgdir, hva, NULL, &psize);
382 if (ptep) {
383 pte_t pte = READ_ONCE(*ptep);
384
385 if (pte_present(pte)) {
386 wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) &
387 MAS2_WIMGE_MASK;
388 } else {
389 local_irq_restore(flags);
390 pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n",
391 __func__, (long)gfn, pfn);
392 ret = -EINVAL;
393 goto out;
394 }
395 }
396 local_irq_restore(flags);
397
398 if (psize && tlbsel == 1) {
399 unsigned long psize_pages, tsize_pages;
400 unsigned long start, end;
401 unsigned long slot_start, slot_end;
402
403 psize_pages = 1UL << (psize - PAGE_SHIFT);
404 start = pfn & ~(psize_pages - 1);
405 end = start + psize_pages;
406
407 slot_start = pfn - (gfn - slot->base_gfn);
408 slot_end = slot_start + slot->npages;
409
410 if (start < slot_start)
411 start = slot_start;
412 if (end > slot_end)
413 end = slot_end;
414
415 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
416 MAS1_TSIZE_SHIFT;
417
418 /*
419 * Any page size that doesn't satisfy the host mapping
420 * will fail the start and end tests.
421 */
422 tsize = min(psize - PAGE_SHIFT + BOOK3E_PAGESZ_4K, tsize);
423
424 /*
425 * e500 doesn't implement the lowest tsize bit,
426 * or 1K pages.
427 */
428 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
429
430 /*
431 * Now find the largest tsize (up to what the guest
432 * requested) that will cover gfn, stay within the
433 * range, and for which gfn and pfn are mutually
434 * aligned.
435 */
436
437 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
438 unsigned long gfn_start, gfn_end;
439 tsize_pages = 1UL << (tsize - 2);
440
441 gfn_start = gfn & ~(tsize_pages - 1);
442 gfn_end = gfn_start + tsize_pages;
443
444 if (gfn_start + pfn - gfn < start)
445 continue;
446 if (gfn_end + pfn - gfn > end)
447 continue;
448 if ((gfn & (tsize_pages - 1)) !=
449 (pfn & (tsize_pages - 1)))
450 continue;
451
452 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
453 pfn &= ~(tsize_pages - 1);
454 break;
455 }
456 }
457
458 kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg, writable);
459 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
460 ref, gvaddr, stlbe);
461 writable = tlbe_is_writable(stlbe);
462
463 /* Clear i-cache for new pages */
464 kvmppc_mmu_flush_icache(pfn);
465
466 out:
467 kvm_release_faultin_page(kvm, page, !!ret, writable);
468 spin_unlock(&kvm->mmu_lock);
469 return ret;
470 }
471
472 /* XXX only map the one-one case, for now use TLB0 */
kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 * vcpu_e500,int esel,struct kvm_book3e_206_tlb_entry * stlbe)473 static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel,
474 struct kvm_book3e_206_tlb_entry *stlbe)
475 {
476 struct kvm_book3e_206_tlb_entry *gtlbe;
477 struct tlbe_ref *ref;
478 int stlbsel = 0;
479 int sesel = 0;
480 int r;
481
482 gtlbe = get_entry(vcpu_e500, 0, esel);
483 ref = &vcpu_e500->gtlb_priv[0][esel].ref;
484
485 r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
486 get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
487 gtlbe, 0, stlbe, ref);
488 if (r)
489 return r;
490
491 write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
492
493 return 0;
494 }
495
kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 * vcpu_e500,struct tlbe_ref * ref,int esel)496 static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
497 struct tlbe_ref *ref,
498 int esel)
499 {
500 unsigned int sesel = vcpu_e500->host_tlb1_nv++;
501
502 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
503 vcpu_e500->host_tlb1_nv = 0;
504
505 if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
506 unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
507 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
508 }
509
510 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
511 vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
512 vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
513 WARN_ON(!(ref->flags & E500_TLB_VALID));
514
515 return sesel;
516 }
517
518 /* Caller must ensure that the specified guest TLB entry is safe to insert into
519 * the shadow TLB. */
520 /* For both one-one and one-to-many */
kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 * vcpu_e500,u64 gvaddr,gfn_t gfn,struct kvm_book3e_206_tlb_entry * gtlbe,struct kvm_book3e_206_tlb_entry * stlbe,int esel)521 static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
522 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
523 struct kvm_book3e_206_tlb_entry *stlbe, int esel)
524 {
525 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
526 int sesel;
527 int r;
528
529 r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
530 ref);
531 if (r)
532 return r;
533
534 /* Use TLB0 when we can only map a page with 4k */
535 if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) {
536 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0;
537 write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0);
538 return 0;
539 }
540
541 /* Otherwise map into TLB1 */
542 sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
543 write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
544
545 return 0;
546 }
547
kvmppc_mmu_map(struct kvm_vcpu * vcpu,u64 eaddr,gpa_t gpaddr,unsigned int index)548 void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
549 unsigned int index)
550 {
551 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
552 struct tlbe_priv *priv;
553 struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
554 int tlbsel = tlbsel_of(index);
555 int esel = esel_of(index);
556
557 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
558
559 switch (tlbsel) {
560 case 0:
561 priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
562
563 /* Triggers after clear_tlb_privs or on initial mapping */
564 if (!(priv->ref.flags & E500_TLB_VALID)) {
565 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
566 } else {
567 kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
568 &priv->ref, eaddr, &stlbe);
569 write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0);
570 }
571 break;
572
573 case 1: {
574 gfn_t gfn = gpaddr >> PAGE_SHIFT;
575 kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe,
576 esel);
577 break;
578 }
579
580 default:
581 BUG();
582 break;
583 }
584 }
585
586 #ifdef CONFIG_KVM_BOOKE_HV
kvmppc_load_last_inst(struct kvm_vcpu * vcpu,enum instruction_fetch_type type,unsigned long * instr)587 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
588 enum instruction_fetch_type type, unsigned long *instr)
589 {
590 gva_t geaddr;
591 hpa_t addr;
592 hfn_t pfn;
593 hva_t eaddr;
594 u32 mas1, mas2, mas3;
595 u64 mas7_mas3;
596 struct page *page;
597 unsigned int addr_space, psize_shift;
598 bool pr;
599 unsigned long flags;
600
601 /* Search TLB for guest pc to get the real address */
602 geaddr = kvmppc_get_pc(vcpu);
603
604 addr_space = (vcpu->arch.shared->msr & MSR_IS) >> MSR_IR_LG;
605
606 local_irq_save(flags);
607 mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space);
608 mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(vcpu));
609 asm volatile("tlbsx 0, %[geaddr]\n" : :
610 [geaddr] "r" (geaddr));
611 mtspr(SPRN_MAS5, 0);
612 mtspr(SPRN_MAS8, 0);
613 mas1 = mfspr(SPRN_MAS1);
614 mas2 = mfspr(SPRN_MAS2);
615 mas3 = mfspr(SPRN_MAS3);
616 #ifdef CONFIG_64BIT
617 mas7_mas3 = mfspr(SPRN_MAS7_MAS3);
618 #else
619 mas7_mas3 = ((u64)mfspr(SPRN_MAS7) << 32) | mas3;
620 #endif
621 local_irq_restore(flags);
622
623 /*
624 * If the TLB entry for guest pc was evicted, return to the guest.
625 * There are high chances to find a valid TLB entry next time.
626 */
627 if (!(mas1 & MAS1_VALID))
628 return EMULATE_AGAIN;
629
630 /*
631 * Another thread may rewrite the TLB entry in parallel, don't
632 * execute from the address if the execute permission is not set
633 */
634 pr = vcpu->arch.shared->msr & MSR_PR;
635 if (unlikely((pr && !(mas3 & MAS3_UX)) ||
636 (!pr && !(mas3 & MAS3_SX)))) {
637 pr_err_ratelimited(
638 "%s: Instruction emulation from guest address %08lx without execute permission\n",
639 __func__, geaddr);
640 return EMULATE_AGAIN;
641 }
642
643 /*
644 * The real address will be mapped by a cacheable, memory coherent,
645 * write-back page. Check for mismatches when LRAT is used.
646 */
647 if (has_feature(vcpu, VCPU_FTR_MMU_V2) &&
648 unlikely((mas2 & MAS2_I) || (mas2 & MAS2_W) || !(mas2 & MAS2_M))) {
649 pr_err_ratelimited(
650 "%s: Instruction emulation from guest address %08lx mismatches storage attributes\n",
651 __func__, geaddr);
652 return EMULATE_AGAIN;
653 }
654
655 /* Get pfn */
656 psize_shift = MAS1_GET_TSIZE(mas1) + 10;
657 addr = (mas7_mas3 & (~0ULL << psize_shift)) |
658 (geaddr & ((1ULL << psize_shift) - 1ULL));
659 pfn = addr >> PAGE_SHIFT;
660
661 /* Guard against emulation from devices area */
662 if (unlikely(!page_is_ram(pfn))) {
663 pr_err_ratelimited("%s: Instruction emulation from non-RAM host address %08llx is not supported\n",
664 __func__, addr);
665 return EMULATE_AGAIN;
666 }
667
668 /* Map a page and get guest's instruction */
669 page = pfn_to_page(pfn);
670 eaddr = (unsigned long)kmap_atomic(page);
671 *instr = *(u32 *)(eaddr | (unsigned long)(addr & ~PAGE_MASK));
672 kunmap_atomic((u32 *)eaddr);
673
674 return EMULATE_DONE;
675 }
676 #else
kvmppc_load_last_inst(struct kvm_vcpu * vcpu,enum instruction_fetch_type type,unsigned long * instr)677 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
678 enum instruction_fetch_type type, unsigned long *instr)
679 {
680 return EMULATE_AGAIN;
681 }
682 #endif
683
684 /************* MMU Notifiers *************/
685
kvm_e500_mmu_unmap_gfn(struct kvm * kvm,struct kvm_gfn_range * range)686 static bool kvm_e500_mmu_unmap_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
687 {
688 /*
689 * Flush all shadow tlb entries everywhere. This is slow, but
690 * we are 100% sure that we catch the to be unmapped page
691 */
692 return true;
693 }
694
kvm_unmap_gfn_range(struct kvm * kvm,struct kvm_gfn_range * range)695 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
696 {
697 return kvm_e500_mmu_unmap_gfn(kvm, range);
698 }
699
kvm_age_gfn(struct kvm * kvm,struct kvm_gfn_range * range)700 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
701 {
702 /* XXX could be more clever ;) */
703 return false;
704 }
705
kvm_test_age_gfn(struct kvm * kvm,struct kvm_gfn_range * range)706 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
707 {
708 /* XXX could be more clever ;) */
709 return false;
710 }
711
712 /*****************************************/
713
e500_mmu_host_init(struct kvmppc_vcpu_e500 * vcpu_e500)714 int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
715 {
716 host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
717 host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
718
719 /*
720 * This should never happen on real e500 hardware, but is
721 * architecturally possible -- e.g. in some weird nested
722 * virtualization case.
723 */
724 if (host_tlb_params[0].entries == 0 ||
725 host_tlb_params[1].entries == 0) {
726 pr_err("%s: need to know host tlb size\n", __func__);
727 return -ENODEV;
728 }
729
730 host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
731 TLBnCFG_ASSOC_SHIFT;
732 host_tlb_params[1].ways = host_tlb_params[1].entries;
733
734 if (!is_power_of_2(host_tlb_params[0].entries) ||
735 !is_power_of_2(host_tlb_params[0].ways) ||
736 host_tlb_params[0].entries < host_tlb_params[0].ways ||
737 host_tlb_params[0].ways == 0) {
738 pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
739 __func__, host_tlb_params[0].entries,
740 host_tlb_params[0].ways);
741 return -ENODEV;
742 }
743
744 host_tlb_params[0].sets =
745 host_tlb_params[0].entries / host_tlb_params[0].ways;
746 host_tlb_params[1].sets = 1;
747 vcpu_e500->h2g_tlb1_rmap = kcalloc(host_tlb_params[1].entries,
748 sizeof(*vcpu_e500->h2g_tlb1_rmap),
749 GFP_KERNEL);
750 if (!vcpu_e500->h2g_tlb1_rmap)
751 return -EINVAL;
752
753 return 0;
754 }
755
e500_mmu_host_uninit(struct kvmppc_vcpu_e500 * vcpu_e500)756 void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
757 {
758 kfree(vcpu_e500->h2g_tlb1_rmap);
759 }
760