1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved. 4 * 5 * Author: Yu Liu, yu.liu@freescale.com 6 * Scott Wood, scottwood@freescale.com 7 * Ashish Kalra, ashish.kalra@freescale.com 8 * Varun Sethi, varun.sethi@freescale.com 9 * Alexander Graf, agraf@suse.de 10 * 11 * Description: 12 * This file is based on arch/powerpc/kvm/44x_tlb.c, 13 * by Hollis Blanchard <hollisb@us.ibm.com>. 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/types.h> 18 #include <linux/slab.h> 19 #include <linux/string.h> 20 #include <linux/kvm.h> 21 #include <linux/kvm_host.h> 22 #include <linux/highmem.h> 23 #include <linux/log2.h> 24 #include <linux/uaccess.h> 25 #include <linux/sched/mm.h> 26 #include <linux/rwsem.h> 27 #include <linux/vmalloc.h> 28 #include <linux/hugetlb.h> 29 #include <asm/kvm_ppc.h> 30 #include <asm/pte-walk.h> 31 32 #include "e500.h" 33 #include "timing.h" 34 #include "e500_mmu_host.h" 35 36 #include "trace_booke.h" 37 38 #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) 39 40 static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; 41 42 static inline unsigned int tlb1_max_shadow_size(void) 43 { 44 /* reserve one entry for magic page */ 45 return host_tlb_params[1].entries - tlbcam_index - 1; 46 } 47 48 static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) 49 { 50 /* Mask off reserved bits. */ 51 mas3 &= MAS3_ATTRIB_MASK; 52 53 #ifndef CONFIG_KVM_BOOKE_HV 54 if (!usermode) { 55 /* Guest is in supervisor mode, 56 * so we need to translate guest 57 * supervisor permissions into user permissions. */ 58 mas3 &= ~E500_TLB_USER_PERM_MASK; 59 mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1; 60 } 61 mas3 |= E500_TLB_SUPER_PERM_MASK; 62 #endif 63 return mas3; 64 } 65 66 /* 67 * writing shadow tlb entry to host TLB 68 */ 69 static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, 70 uint32_t mas0, 71 uint32_t lpid) 72 { 73 unsigned long flags; 74 75 local_irq_save(flags); 76 mtspr(SPRN_MAS0, mas0); 77 mtspr(SPRN_MAS1, stlbe->mas1); 78 mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2); 79 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); 80 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); 81 #ifdef CONFIG_KVM_BOOKE_HV 82 mtspr(SPRN_MAS8, MAS8_TGS | get_thread_specific_lpid(lpid)); 83 #endif 84 asm volatile("isync; tlbwe" : : : "memory"); 85 86 #ifdef CONFIG_KVM_BOOKE_HV 87 /* Must clear mas8 for other host tlbwe's */ 88 mtspr(SPRN_MAS8, 0); 89 isync(); 90 #endif 91 local_irq_restore(flags); 92 93 trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, 94 stlbe->mas2, stlbe->mas7_3); 95 } 96 97 /* 98 * Acquire a mas0 with victim hint, as if we just took a TLB miss. 99 * 100 * We don't care about the address we're searching for, other than that it's 101 * in the right set and is not present in the TLB. Using a zero PID and a 102 * userspace address means we don't have to set and then restore MAS5, or 103 * calculate a proper MAS6 value. 104 */ 105 static u32 get_host_mas0(unsigned long eaddr) 106 { 107 unsigned long flags; 108 u32 mas0; 109 u32 mas4; 110 111 local_irq_save(flags); 112 mtspr(SPRN_MAS6, 0); 113 mas4 = mfspr(SPRN_MAS4); 114 mtspr(SPRN_MAS4, mas4 & ~MAS4_TLBSEL_MASK); 115 asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); 116 mas0 = mfspr(SPRN_MAS0); 117 mtspr(SPRN_MAS4, mas4); 118 local_irq_restore(flags); 119 120 return mas0; 121 } 122 123 /* sesel is for tlb1 only */ 124 static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, 125 int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe) 126 { 127 u32 mas0; 128 129 if (tlbsel == 0) { 130 mas0 = get_host_mas0(stlbe->mas2); 131 __write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid); 132 } else { 133 __write_host_tlbe(stlbe, 134 MAS0_TLBSEL(1) | 135 MAS0_ESEL(to_htlb1_esel(sesel)), 136 vcpu_e500->vcpu.kvm->arch.lpid); 137 } 138 } 139 140 /* sesel is for tlb1 only */ 141 static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, 142 struct kvm_book3e_206_tlb_entry *gtlbe, 143 struct kvm_book3e_206_tlb_entry *stlbe, 144 int stlbsel, int sesel) 145 { 146 int stid; 147 148 preempt_disable(); 149 stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe); 150 151 stlbe->mas1 |= MAS1_TID(stid); 152 write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); 153 preempt_enable(); 154 } 155 156 #ifdef CONFIG_KVM_E500V2 157 /* XXX should be a hook in the gva2hpa translation */ 158 void kvmppc_map_magic(struct kvm_vcpu *vcpu) 159 { 160 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 161 struct kvm_book3e_206_tlb_entry magic; 162 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; 163 unsigned int stid; 164 kvm_pfn_t pfn; 165 166 pfn = (kvm_pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT; 167 get_page(pfn_to_page(pfn)); 168 169 preempt_disable(); 170 stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0); 171 172 magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) | 173 MAS1_TSIZE(BOOK3E_PAGESZ_4K); 174 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; 175 magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) | 176 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; 177 magic.mas8 = 0; 178 179 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index), 0); 180 preempt_enable(); 181 } 182 #endif 183 184 void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, 185 int esel) 186 { 187 struct kvm_book3e_206_tlb_entry *gtlbe = 188 get_entry(vcpu_e500, tlbsel, esel); 189 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; 190 191 /* Don't bother with unmapped entries */ 192 if (!(ref->flags & E500_TLB_VALID)) { 193 WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0), 194 "%s: flags %x\n", __func__, ref->flags); 195 WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]); 196 } 197 198 if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { 199 u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; 200 int hw_tlb_indx; 201 unsigned long flags; 202 203 local_irq_save(flags); 204 while (tmp) { 205 hw_tlb_indx = __ilog2_u64(tmp & -tmp); 206 mtspr(SPRN_MAS0, 207 MAS0_TLBSEL(1) | 208 MAS0_ESEL(to_htlb1_esel(hw_tlb_indx))); 209 mtspr(SPRN_MAS1, 0); 210 asm volatile("tlbwe"); 211 vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0; 212 tmp &= tmp - 1; 213 } 214 mb(); 215 vcpu_e500->g2h_tlb1_map[esel] = 0; 216 ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID); 217 local_irq_restore(flags); 218 } 219 220 if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) { 221 /* 222 * TLB1 entry is backed by 4k pages. This should happen 223 * rarely and is not worth optimizing. Invalidate everything. 224 */ 225 kvmppc_e500_tlbil_all(vcpu_e500); 226 ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID); 227 } 228 229 /* 230 * If TLB entry is still valid then it's a TLB0 entry, and thus 231 * backed by at most one host tlbe per shadow pid 232 */ 233 if (ref->flags & E500_TLB_VALID) 234 kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); 235 236 /* Mark the TLB as not backed by the host anymore */ 237 ref->flags = 0; 238 } 239 240 static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) 241 { 242 return tlbe->mas7_3 & (MAS3_SW|MAS3_UW); 243 } 244 245 static inline bool kvmppc_e500_ref_setup(struct tlbe_ref *ref, 246 struct kvm_book3e_206_tlb_entry *gtlbe, 247 kvm_pfn_t pfn, unsigned int wimg) 248 { 249 ref->pfn = pfn; 250 ref->flags = E500_TLB_VALID; 251 252 /* Use guest supplied MAS2_G and MAS2_E */ 253 ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg; 254 255 return tlbe_is_writable(gtlbe); 256 } 257 258 static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) 259 { 260 if (ref->flags & E500_TLB_VALID) { 261 /* FIXME: don't log bogus pfn for TLB1 */ 262 trace_kvm_booke206_ref_release(ref->pfn, ref->flags); 263 ref->flags = 0; 264 } 265 } 266 267 static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) 268 { 269 if (vcpu_e500->g2h_tlb1_map) 270 memset(vcpu_e500->g2h_tlb1_map, 0, 271 sizeof(u64) * vcpu_e500->gtlb_params[1].entries); 272 if (vcpu_e500->h2g_tlb1_rmap) 273 memset(vcpu_e500->h2g_tlb1_rmap, 0, 274 sizeof(unsigned int) * host_tlb_params[1].entries); 275 } 276 277 static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) 278 { 279 int tlbsel; 280 int i; 281 282 for (tlbsel = 0; tlbsel <= 1; tlbsel++) { 283 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { 284 struct tlbe_ref *ref = 285 &vcpu_e500->gtlb_priv[tlbsel][i].ref; 286 kvmppc_e500_ref_release(ref); 287 } 288 } 289 } 290 291 void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) 292 { 293 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 294 kvmppc_e500_tlbil_all(vcpu_e500); 295 clear_tlb_privs(vcpu_e500); 296 clear_tlb1_bitmap(vcpu_e500); 297 } 298 299 /* TID must be supplied by the caller */ 300 static void kvmppc_e500_setup_stlbe( 301 struct kvm_vcpu *vcpu, 302 struct kvm_book3e_206_tlb_entry *gtlbe, 303 int tsize, struct tlbe_ref *ref, u64 gvaddr, 304 struct kvm_book3e_206_tlb_entry *stlbe) 305 { 306 kvm_pfn_t pfn = ref->pfn; 307 u32 pr = vcpu->arch.shared->msr & MSR_PR; 308 309 BUG_ON(!(ref->flags & E500_TLB_VALID)); 310 311 /* Force IPROT=0 for all guest mappings. */ 312 stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; 313 stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR); 314 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | 315 e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); 316 } 317 318 static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, 319 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, 320 int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe, 321 struct tlbe_ref *ref) 322 { 323 struct kvm_memory_slot *slot; 324 unsigned long pfn = 0; /* silence GCC warning */ 325 struct page *page = NULL; 326 unsigned long hva; 327 int pfnmap = 0; 328 int tsize = BOOK3E_PAGESZ_4K; 329 int ret = 0; 330 unsigned long mmu_seq; 331 struct kvm *kvm = vcpu_e500->vcpu.kvm; 332 unsigned long tsize_pages = 0; 333 pte_t *ptep; 334 unsigned int wimg = 0; 335 pgd_t *pgdir; 336 unsigned long flags; 337 bool writable = false; 338 339 /* used to check for invalidations in progress */ 340 mmu_seq = kvm->mmu_invalidate_seq; 341 smp_rmb(); 342 343 /* 344 * Translate guest physical to true physical, acquiring 345 * a page reference if it is normal, non-reserved memory. 346 * 347 * gfn_to_memslot() must succeed because otherwise we wouldn't 348 * have gotten this far. Eventually we should just pass the slot 349 * pointer through from the first lookup. 350 */ 351 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); 352 hva = gfn_to_hva_memslot(slot, gfn); 353 354 if (tlbsel == 1) { 355 struct vm_area_struct *vma; 356 mmap_read_lock(kvm->mm); 357 358 vma = find_vma(kvm->mm, hva); 359 if (vma && hva >= vma->vm_start && 360 (vma->vm_flags & VM_PFNMAP)) { 361 /* 362 * This VMA is a physically contiguous region (e.g. 363 * /dev/mem) that bypasses normal Linux page 364 * management. Find the overlap between the 365 * vma and the memslot. 366 */ 367 368 unsigned long start, end; 369 unsigned long slot_start, slot_end; 370 371 pfnmap = 1; 372 373 start = vma->vm_pgoff; 374 end = start + 375 vma_pages(vma); 376 377 pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT); 378 379 slot_start = pfn - (gfn - slot->base_gfn); 380 slot_end = slot_start + slot->npages; 381 382 if (start < slot_start) 383 start = slot_start; 384 if (end > slot_end) 385 end = slot_end; 386 387 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> 388 MAS1_TSIZE_SHIFT; 389 390 /* 391 * e500 doesn't implement the lowest tsize bit, 392 * or 1K pages. 393 */ 394 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); 395 396 /* 397 * Now find the largest tsize (up to what the guest 398 * requested) that will cover gfn, stay within the 399 * range, and for which gfn and pfn are mutually 400 * aligned. 401 */ 402 403 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { 404 unsigned long gfn_start, gfn_end; 405 tsize_pages = 1UL << (tsize - 2); 406 407 gfn_start = gfn & ~(tsize_pages - 1); 408 gfn_end = gfn_start + tsize_pages; 409 410 if (gfn_start + pfn - gfn < start) 411 continue; 412 if (gfn_end + pfn - gfn > end) 413 continue; 414 if ((gfn & (tsize_pages - 1)) != 415 (pfn & (tsize_pages - 1))) 416 continue; 417 418 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); 419 pfn &= ~(tsize_pages - 1); 420 break; 421 } 422 } else if (vma && hva >= vma->vm_start && 423 is_vm_hugetlb_page(vma)) { 424 unsigned long psize = vma_kernel_pagesize(vma); 425 426 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> 427 MAS1_TSIZE_SHIFT; 428 429 /* 430 * Take the largest page size that satisfies both host 431 * and guest mapping 432 */ 433 tsize = min(__ilog2(psize) - 10, tsize); 434 435 /* 436 * e500 doesn't implement the lowest tsize bit, 437 * or 1K pages. 438 */ 439 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); 440 } 441 442 mmap_read_unlock(kvm->mm); 443 } 444 445 if (likely(!pfnmap)) { 446 tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT); 447 pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, NULL, &page); 448 if (is_error_noslot_pfn(pfn)) { 449 if (printk_ratelimit()) 450 pr_err("%s: real page not found for gfn %lx\n", 451 __func__, (long)gfn); 452 return -EINVAL; 453 } 454 455 /* Align guest and physical address to page map boundaries */ 456 pfn &= ~(tsize_pages - 1); 457 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); 458 } 459 460 spin_lock(&kvm->mmu_lock); 461 if (mmu_invalidate_retry(kvm, mmu_seq)) { 462 ret = -EAGAIN; 463 goto out; 464 } 465 466 467 pgdir = vcpu_e500->vcpu.arch.pgdir; 468 /* 469 * We are just looking at the wimg bits, so we don't 470 * care much about the trans splitting bit. 471 * We are holding kvm->mmu_lock so a notifier invalidate 472 * can't run hence pfn won't change. 473 */ 474 local_irq_save(flags); 475 ptep = find_linux_pte(pgdir, hva, NULL, NULL); 476 if (ptep) { 477 pte_t pte = READ_ONCE(*ptep); 478 479 if (pte_present(pte)) { 480 wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) & 481 MAS2_WIMGE_MASK; 482 local_irq_restore(flags); 483 } else { 484 local_irq_restore(flags); 485 pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n", 486 __func__, (long)gfn, pfn); 487 ret = -EINVAL; 488 goto out; 489 } 490 } 491 writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); 492 493 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, 494 ref, gvaddr, stlbe); 495 496 /* Clear i-cache for new pages */ 497 kvmppc_mmu_flush_icache(pfn); 498 499 out: 500 kvm_release_faultin_page(kvm, page, !!ret, writable); 501 spin_unlock(&kvm->mmu_lock); 502 return ret; 503 } 504 505 /* XXX only map the one-one case, for now use TLB0 */ 506 static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel, 507 struct kvm_book3e_206_tlb_entry *stlbe) 508 { 509 struct kvm_book3e_206_tlb_entry *gtlbe; 510 struct tlbe_ref *ref; 511 int stlbsel = 0; 512 int sesel = 0; 513 int r; 514 515 gtlbe = get_entry(vcpu_e500, 0, esel); 516 ref = &vcpu_e500->gtlb_priv[0][esel].ref; 517 518 r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), 519 get_tlb_raddr(gtlbe) >> PAGE_SHIFT, 520 gtlbe, 0, stlbe, ref); 521 if (r) 522 return r; 523 524 write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel); 525 526 return 0; 527 } 528 529 static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500, 530 struct tlbe_ref *ref, 531 int esel) 532 { 533 unsigned int sesel = vcpu_e500->host_tlb1_nv++; 534 535 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) 536 vcpu_e500->host_tlb1_nv = 0; 537 538 if (vcpu_e500->h2g_tlb1_rmap[sesel]) { 539 unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1; 540 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); 541 } 542 543 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; 544 vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; 545 vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1; 546 WARN_ON(!(ref->flags & E500_TLB_VALID)); 547 548 return sesel; 549 } 550 551 /* Caller must ensure that the specified guest TLB entry is safe to insert into 552 * the shadow TLB. */ 553 /* For both one-one and one-to-many */ 554 static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, 555 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, 556 struct kvm_book3e_206_tlb_entry *stlbe, int esel) 557 { 558 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref; 559 int sesel; 560 int r; 561 562 r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, 563 ref); 564 if (r) 565 return r; 566 567 /* Use TLB0 when we can only map a page with 4k */ 568 if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) { 569 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0; 570 write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0); 571 return 0; 572 } 573 574 /* Otherwise map into TLB1 */ 575 sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel); 576 write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); 577 578 return 0; 579 } 580 581 void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, 582 unsigned int index) 583 { 584 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 585 struct tlbe_priv *priv; 586 struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; 587 int tlbsel = tlbsel_of(index); 588 int esel = esel_of(index); 589 590 gtlbe = get_entry(vcpu_e500, tlbsel, esel); 591 592 switch (tlbsel) { 593 case 0: 594 priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; 595 596 /* Triggers after clear_tlb_privs or on initial mapping */ 597 if (!(priv->ref.flags & E500_TLB_VALID)) { 598 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); 599 } else { 600 kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K, 601 &priv->ref, eaddr, &stlbe); 602 write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0); 603 } 604 break; 605 606 case 1: { 607 gfn_t gfn = gpaddr >> PAGE_SHIFT; 608 kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe, 609 esel); 610 break; 611 } 612 613 default: 614 BUG(); 615 break; 616 } 617 } 618 619 #ifdef CONFIG_KVM_BOOKE_HV 620 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, 621 enum instruction_fetch_type type, unsigned long *instr) 622 { 623 gva_t geaddr; 624 hpa_t addr; 625 hfn_t pfn; 626 hva_t eaddr; 627 u32 mas1, mas2, mas3; 628 u64 mas7_mas3; 629 struct page *page; 630 unsigned int addr_space, psize_shift; 631 bool pr; 632 unsigned long flags; 633 634 /* Search TLB for guest pc to get the real address */ 635 geaddr = kvmppc_get_pc(vcpu); 636 637 addr_space = (vcpu->arch.shared->msr & MSR_IS) >> MSR_IR_LG; 638 639 local_irq_save(flags); 640 mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space); 641 mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(vcpu)); 642 asm volatile("tlbsx 0, %[geaddr]\n" : : 643 [geaddr] "r" (geaddr)); 644 mtspr(SPRN_MAS5, 0); 645 mtspr(SPRN_MAS8, 0); 646 mas1 = mfspr(SPRN_MAS1); 647 mas2 = mfspr(SPRN_MAS2); 648 mas3 = mfspr(SPRN_MAS3); 649 #ifdef CONFIG_64BIT 650 mas7_mas3 = mfspr(SPRN_MAS7_MAS3); 651 #else 652 mas7_mas3 = ((u64)mfspr(SPRN_MAS7) << 32) | mas3; 653 #endif 654 local_irq_restore(flags); 655 656 /* 657 * If the TLB entry for guest pc was evicted, return to the guest. 658 * There are high chances to find a valid TLB entry next time. 659 */ 660 if (!(mas1 & MAS1_VALID)) 661 return EMULATE_AGAIN; 662 663 /* 664 * Another thread may rewrite the TLB entry in parallel, don't 665 * execute from the address if the execute permission is not set 666 */ 667 pr = vcpu->arch.shared->msr & MSR_PR; 668 if (unlikely((pr && !(mas3 & MAS3_UX)) || 669 (!pr && !(mas3 & MAS3_SX)))) { 670 pr_err_ratelimited( 671 "%s: Instruction emulation from guest address %08lx without execute permission\n", 672 __func__, geaddr); 673 return EMULATE_AGAIN; 674 } 675 676 /* 677 * The real address will be mapped by a cacheable, memory coherent, 678 * write-back page. Check for mismatches when LRAT is used. 679 */ 680 if (has_feature(vcpu, VCPU_FTR_MMU_V2) && 681 unlikely((mas2 & MAS2_I) || (mas2 & MAS2_W) || !(mas2 & MAS2_M))) { 682 pr_err_ratelimited( 683 "%s: Instruction emulation from guest address %08lx mismatches storage attributes\n", 684 __func__, geaddr); 685 return EMULATE_AGAIN; 686 } 687 688 /* Get pfn */ 689 psize_shift = MAS1_GET_TSIZE(mas1) + 10; 690 addr = (mas7_mas3 & (~0ULL << psize_shift)) | 691 (geaddr & ((1ULL << psize_shift) - 1ULL)); 692 pfn = addr >> PAGE_SHIFT; 693 694 /* Guard against emulation from devices area */ 695 if (unlikely(!page_is_ram(pfn))) { 696 pr_err_ratelimited("%s: Instruction emulation from non-RAM host address %08llx is not supported\n", 697 __func__, addr); 698 return EMULATE_AGAIN; 699 } 700 701 /* Map a page and get guest's instruction */ 702 page = pfn_to_page(pfn); 703 eaddr = (unsigned long)kmap_atomic(page); 704 *instr = *(u32 *)(eaddr | (unsigned long)(addr & ~PAGE_MASK)); 705 kunmap_atomic((u32 *)eaddr); 706 707 return EMULATE_DONE; 708 } 709 #else 710 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, 711 enum instruction_fetch_type type, unsigned long *instr) 712 { 713 return EMULATE_AGAIN; 714 } 715 #endif 716 717 /************* MMU Notifiers *************/ 718 719 static bool kvm_e500_mmu_unmap_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 720 { 721 /* 722 * Flush all shadow tlb entries everywhere. This is slow, but 723 * we are 100% sure that we catch the to be unmapped page 724 */ 725 return true; 726 } 727 728 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) 729 { 730 return kvm_e500_mmu_unmap_gfn(kvm, range); 731 } 732 733 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 734 { 735 /* XXX could be more clever ;) */ 736 return false; 737 } 738 739 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 740 { 741 /* XXX could be more clever ;) */ 742 return false; 743 } 744 745 /*****************************************/ 746 747 int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) 748 { 749 host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY; 750 host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; 751 752 /* 753 * This should never happen on real e500 hardware, but is 754 * architecturally possible -- e.g. in some weird nested 755 * virtualization case. 756 */ 757 if (host_tlb_params[0].entries == 0 || 758 host_tlb_params[1].entries == 0) { 759 pr_err("%s: need to know host tlb size\n", __func__); 760 return -ENODEV; 761 } 762 763 host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >> 764 TLBnCFG_ASSOC_SHIFT; 765 host_tlb_params[1].ways = host_tlb_params[1].entries; 766 767 if (!is_power_of_2(host_tlb_params[0].entries) || 768 !is_power_of_2(host_tlb_params[0].ways) || 769 host_tlb_params[0].entries < host_tlb_params[0].ways || 770 host_tlb_params[0].ways == 0) { 771 pr_err("%s: bad tlb0 host config: %u entries %u ways\n", 772 __func__, host_tlb_params[0].entries, 773 host_tlb_params[0].ways); 774 return -ENODEV; 775 } 776 777 host_tlb_params[0].sets = 778 host_tlb_params[0].entries / host_tlb_params[0].ways; 779 host_tlb_params[1].sets = 1; 780 vcpu_e500->h2g_tlb1_rmap = kcalloc(host_tlb_params[1].entries, 781 sizeof(*vcpu_e500->h2g_tlb1_rmap), 782 GFP_KERNEL); 783 if (!vcpu_e500->h2g_tlb1_rmap) 784 return -EINVAL; 785 786 return 0; 787 } 788 789 void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) 790 { 791 kfree(vcpu_e500->h2g_tlb1_rmap); 792 } 793