Lines Matching +full:i +full:- +full:tlb +full:- +full:sets

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
30 #include <asm/pte-walk.h>
38 #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
45 return host_tlb_params[1].entries - tlbcam_index - 1; in tlb1_max_shadow_size()
67 * writing shadow tlb entry to host TLB
77 mtspr(SPRN_MAS1, stlbe->mas1); in __write_host_tlbe()
78 mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2); in __write_host_tlbe()
79 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); in __write_host_tlbe()
80 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); in __write_host_tlbe()
93 trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, in __write_host_tlbe()
94 stlbe->mas2, stlbe->mas7_3); in __write_host_tlbe()
98 * Acquire a mas0 with victim hint, as if we just took a TLB miss.
101 * in the right set and is not present in the TLB. Using a zero PID and a
130 mas0 = get_host_mas0(stlbe->mas2); in write_host_tlbe()
131 __write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid); in write_host_tlbe()
136 vcpu_e500->vcpu.kvm->arch.lpid); in write_host_tlbe()
149 stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe); in write_stlbe()
151 stlbe->mas1 |= MAS1_TID(stid); in write_stlbe()
162 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; in kvmppc_map_magic()
174 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; in kvmppc_map_magic()
189 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; in inval_gtlbe_on_host()
192 if (!(ref->flags & E500_TLB_VALID)) { in inval_gtlbe_on_host()
193 WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0), in inval_gtlbe_on_host()
194 "%s: flags %x\n", __func__, ref->flags); in inval_gtlbe_on_host()
195 WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]); in inval_gtlbe_on_host()
198 if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { in inval_gtlbe_on_host()
199 u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; in inval_gtlbe_on_host()
205 hw_tlb_indx = __ilog2_u64(tmp & -tmp); in inval_gtlbe_on_host()
211 vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0; in inval_gtlbe_on_host()
212 tmp &= tmp - 1; in inval_gtlbe_on_host()
215 vcpu_e500->g2h_tlb1_map[esel] = 0; in inval_gtlbe_on_host()
216 ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID); in inval_gtlbe_on_host()
220 if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) { in inval_gtlbe_on_host()
226 ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID); in inval_gtlbe_on_host()
230 * If TLB entry is still valid then it's a TLB0 entry, and thus in inval_gtlbe_on_host()
233 if (ref->flags & E500_TLB_VALID) in inval_gtlbe_on_host()
236 /* Mark the TLB as not backed by the host anymore */ in inval_gtlbe_on_host()
237 ref->flags = 0; in inval_gtlbe_on_host()
242 return tlbe->mas7_3 & (MAS3_SW|MAS3_UW); in tlbe_is_writable()
249 ref->pfn = pfn; in kvmppc_e500_ref_setup()
250 ref->flags = E500_TLB_VALID; in kvmppc_e500_ref_setup()
253 ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg; in kvmppc_e500_ref_setup()
264 if (ref->flags & E500_TLB_VALID) { in kvmppc_e500_ref_release()
266 trace_kvm_booke206_ref_release(ref->pfn, ref->flags); in kvmppc_e500_ref_release()
267 ref->flags = 0; in kvmppc_e500_ref_release()
273 if (vcpu_e500->g2h_tlb1_map) in clear_tlb1_bitmap()
274 memset(vcpu_e500->g2h_tlb1_map, 0, in clear_tlb1_bitmap()
275 sizeof(u64) * vcpu_e500->gtlb_params[1].entries); in clear_tlb1_bitmap()
276 if (vcpu_e500->h2g_tlb1_rmap) in clear_tlb1_bitmap()
277 memset(vcpu_e500->h2g_tlb1_rmap, 0, in clear_tlb1_bitmap()
284 int i; in clear_tlb_privs() local
287 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { in clear_tlb_privs()
289 &vcpu_e500->gtlb_priv[tlbsel][i].ref; in clear_tlb_privs()
310 kvm_pfn_t pfn = ref->pfn; in kvmppc_e500_setup_stlbe()
311 u32 pr = vcpu->arch.shared->msr & MSR_PR; in kvmppc_e500_setup_stlbe()
313 BUG_ON(!(ref->flags & E500_TLB_VALID)); in kvmppc_e500_setup_stlbe()
316 stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; in kvmppc_e500_setup_stlbe()
317 stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR); in kvmppc_e500_setup_stlbe()
318 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | in kvmppc_e500_setup_stlbe()
319 e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); in kvmppc_e500_setup_stlbe()
334 struct kvm *kvm = vcpu_e500->vcpu.kvm; in kvmppc_e500_shadow_map()
342 mmu_seq = kvm->mmu_invalidate_seq; in kvmppc_e500_shadow_map()
347 * a page reference if it is normal, non-reserved memory. in kvmppc_e500_shadow_map()
353 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); in kvmppc_e500_shadow_map()
358 mmap_read_lock(kvm->mm); in kvmppc_e500_shadow_map()
360 vma = find_vma(kvm->mm, hva); in kvmppc_e500_shadow_map()
361 if (vma && hva >= vma->vm_start && in kvmppc_e500_shadow_map()
362 (vma->vm_flags & VM_PFNMAP)) { in kvmppc_e500_shadow_map()
375 start = vma->vm_pgoff; in kvmppc_e500_shadow_map()
379 pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT); in kvmppc_e500_shadow_map()
381 slot_start = pfn - (gfn - slot->base_gfn); in kvmppc_e500_shadow_map()
382 slot_end = slot_start + slot->npages; in kvmppc_e500_shadow_map()
389 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> in kvmppc_e500_shadow_map()
405 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { in kvmppc_e500_shadow_map()
407 tsize_pages = 1UL << (tsize - 2); in kvmppc_e500_shadow_map()
409 gfn_start = gfn & ~(tsize_pages - 1); in kvmppc_e500_shadow_map()
412 if (gfn_start + pfn - gfn < start) in kvmppc_e500_shadow_map()
414 if (gfn_end + pfn - gfn > end) in kvmppc_e500_shadow_map()
416 if ((gfn & (tsize_pages - 1)) != in kvmppc_e500_shadow_map()
417 (pfn & (tsize_pages - 1))) in kvmppc_e500_shadow_map()
420 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); in kvmppc_e500_shadow_map()
421 pfn &= ~(tsize_pages - 1); in kvmppc_e500_shadow_map()
424 } else if (vma && hva >= vma->vm_start && in kvmppc_e500_shadow_map()
428 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> in kvmppc_e500_shadow_map()
435 tsize = min(__ilog2(psize) - 10, tsize); in kvmppc_e500_shadow_map()
444 mmap_read_unlock(kvm->mm); in kvmppc_e500_shadow_map()
448 tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT); in kvmppc_e500_shadow_map()
454 return -EINVAL; in kvmppc_e500_shadow_map()
458 pfn &= ~(tsize_pages - 1); in kvmppc_e500_shadow_map()
459 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); in kvmppc_e500_shadow_map()
462 spin_lock(&kvm->mmu_lock); in kvmppc_e500_shadow_map()
464 ret = -EAGAIN; in kvmppc_e500_shadow_map()
469 pgdir = vcpu_e500->vcpu.arch.pgdir; in kvmppc_e500_shadow_map()
473 * We are holding kvm->mmu_lock so a notifier invalidate in kvmppc_e500_shadow_map()
489 ret = -EINVAL; in kvmppc_e500_shadow_map()
495 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, in kvmppc_e500_shadow_map()
498 /* Clear i-cache for new pages */ in kvmppc_e500_shadow_map()
502 spin_unlock(&kvm->mmu_lock); in kvmppc_e500_shadow_map()
510 /* XXX only map the one-one case, for now use TLB0 */
521 ref = &vcpu_e500->gtlb_priv[0][esel].ref; in kvmppc_e500_tlb0_map()
538 unsigned int sesel = vcpu_e500->host_tlb1_nv++; in kvmppc_e500_tlb1_map_tlb1()
540 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) in kvmppc_e500_tlb1_map_tlb1()
541 vcpu_e500->host_tlb1_nv = 0; in kvmppc_e500_tlb1_map_tlb1()
543 if (vcpu_e500->h2g_tlb1_rmap[sesel]) { in kvmppc_e500_tlb1_map_tlb1()
544 unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1; in kvmppc_e500_tlb1_map_tlb1()
545 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); in kvmppc_e500_tlb1_map_tlb1()
548 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; in kvmppc_e500_tlb1_map_tlb1()
549 vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; in kvmppc_e500_tlb1_map_tlb1()
550 vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1; in kvmppc_e500_tlb1_map_tlb1()
551 WARN_ON(!(ref->flags & E500_TLB_VALID)); in kvmppc_e500_tlb1_map_tlb1()
556 /* Caller must ensure that the specified guest TLB entry is safe to insert into
557 * the shadow TLB. */
558 /* For both one-one and one-to-many */
563 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref; in kvmppc_e500_tlb1_map()
574 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0; in kvmppc_e500_tlb1_map()
599 priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; in kvmppc_mmu_map()
602 if (!(priv->ref.flags & E500_TLB_VALID)) { in kvmppc_mmu_map()
606 &priv->ref, eaddr, &stlbe); in kvmppc_mmu_map()
639 /* Search TLB for guest pc to get the real address */ in kvmppc_load_last_inst()
642 addr_space = (vcpu->arch.shared->msr & MSR_IS) >> MSR_IR_LG; in kvmppc_load_last_inst()
645 mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space); in kvmppc_load_last_inst()
662 * If the TLB entry for guest pc was evicted, return to the guest. in kvmppc_load_last_inst()
663 * There are high chances to find a valid TLB entry next time. in kvmppc_load_last_inst()
669 * Another thread may rewrite the TLB entry in parallel, don't in kvmppc_load_last_inst()
672 pr = vcpu->arch.shared->msr & MSR_PR; in kvmppc_load_last_inst()
683 * write-back page. Check for mismatches when LRAT is used. in kvmppc_load_last_inst()
696 (geaddr & ((1ULL << psize_shift) - 1ULL)); in kvmppc_load_last_inst()
701 …pr_err_ratelimited("%s: Instruction emulation from non-RAM host address %08llx is not supported\n", in kvmppc_load_last_inst()
727 * Flush all shadow tlb entries everywhere. This is slow, but in kvm_e500_mmu_unmap_gfn()
759 * architecturally possible -- e.g. in some weird nested in e500_mmu_host_init()
764 pr_err("%s: need to know host tlb size\n", __func__); in e500_mmu_host_init()
765 return -ENODEV; in e500_mmu_host_init()
779 return -ENODEV; in e500_mmu_host_init()
782 host_tlb_params[0].sets = in e500_mmu_host_init()
784 host_tlb_params[1].sets = 1; in e500_mmu_host_init()
785 vcpu_e500->h2g_tlb1_rmap = kcalloc(host_tlb_params[1].entries, in e500_mmu_host_init()
786 sizeof(*vcpu_e500->h2g_tlb1_rmap), in e500_mmu_host_init()
788 if (!vcpu_e500->h2g_tlb1_rmap) in e500_mmu_host_init()
789 return -EINVAL; in e500_mmu_host_init()
796 kfree(vcpu_e500->h2g_tlb1_rmap); in e500_mmu_host_uninit()