Searched refs:tlb_lock (Results 1 – 3 of 3) sorted by relevance
348 mutex_lock(&l2->tlb_lock); in kvmhv_enter_nested_guest()350 mutex_unlock(&l2->tlb_lock); in kvmhv_enter_nested_guest()617 mutex_lock(&gp->tlb_lock); in kvmhv_copy_tofrom_guest_nested()648 mutex_unlock(&gp->tlb_lock); in kvmhv_copy_tofrom_guest_nested()726 mutex_init(&gp->tlb_lock); in kvmhv_alloc_nested()1145 mutex_lock(&gp->tlb_lock); in kvmhv_emulate_tlbie_tlb_addr()1155 mutex_unlock(&gp->tlb_lock); in kvmhv_emulate_tlbie_tlb_addr()1165 mutex_lock(&gp->tlb_lock); in kvmhv_emulate_tlbie_lpid()1188 mutex_unlock(&gp->tlb_lock); in kvmhv_emulate_tlbie_lpid()1692 mutex_lock(&gp->tlb_lock); in kvmhv_nested_page_fault()[all …]
248 spinlock_t tlb_lock; /* lock for tlb range flush */ member382 spin_lock_irqsave(&bank->tlb_lock, flags); in mtk_iommu_tlb_flush_all()386 spin_unlock_irqrestore(&bank->tlb_lock, flags); in mtk_iommu_tlb_flush_all()427 spin_lock_irqsave(&curbank->tlb_lock, flags); in mtk_iommu_tlb_flush_range_sync()442 spin_unlock_irqrestore(&curbank->tlb_lock, flags); in mtk_iommu_tlb_flush_range_sync()1336 spin_lock_init(&bank->tlb_lock); in mtk_iommu_probe()
31 struct mutex tlb_lock; /* serialize page faults and tlbies */ member