| /linux/drivers/staging/media/ipu3/ |
| H A D | ipu3-mmu.c | 78 static void imgu_mmu_tlb_invalidate(struct imgu_mmu *mmu) in imgu_mmu_tlb_invalidate() argument 80 writel(TLB_INVALIDATE, mmu->base + REG_TLB_INVALIDATE); in imgu_mmu_tlb_invalidate() 83 static void call_if_imgu_is_powered(struct imgu_mmu *mmu, in call_if_imgu_is_powered() argument 84 void (*func)(struct imgu_mmu *mmu)) in call_if_imgu_is_powered() argument 86 if (!pm_runtime_get_if_in_use(mmu->dev)) in call_if_imgu_is_powered() 89 func(mmu); in call_if_imgu_is_powered() 90 pm_runtime_put(mmu->dev); in call_if_imgu_is_powered() 101 static void imgu_mmu_set_halt(struct imgu_mmu *mmu, bool halt) in imgu_mmu_set_halt() argument 106 writel(halt, mmu->base + REG_GP_HALT); in imgu_mmu_set_halt() 107 ret = readl_poll_timeout(mmu->base + REG_GP_HALTED, in imgu_mmu_set_halt() [all …]
|
| /linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
| H A D | base.c | 42 nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt) in nvkm_mmu_ptp_put() argument 51 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_put() 56 nvkm_mmu_ptc_put(mmu, force, &ptp->pt); in nvkm_mmu_ptp_put() 65 nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero) in nvkm_mmu_ptp_get() argument 74 ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head); in nvkm_mmu_ptp_get() 82 ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false); in nvkm_mmu_ptp_get() 93 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_get() 120 nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size) in nvkm_mmu_ptc_find() argument 124 list_for_each_entry(ptc, &mmu->ptc.list, head) { in nvkm_mmu_ptc_find() 134 list_add(&ptc->head, &mmu->ptc.list); in nvkm_mmu_ptc_find() [all …]
|
| H A D | ummu.c | 35 struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu; in nvkm_ummu_sclass() local 37 if (mmu->func->mem.user.oclass) { in nvkm_ummu_sclass() 39 oclass->base = mmu->func->mem.user; in nvkm_ummu_sclass() 45 if (mmu->func->vmm.user.oclass) { in nvkm_ummu_sclass() 47 oclass->base = mmu->func->vmm.user; in nvkm_ummu_sclass() 59 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_heap() local 67 if ((index = args->v0.index) >= mmu->heap_nr) in nvkm_ummu_heap() 69 args->v0.size = mmu->heap[index].size; in nvkm_ummu_heap() 79 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_type() local 87 if ((index = args->v0.index) >= mmu->type_nr) in nvkm_ummu_type() [all …]
|
| H A D | umem.c | 72 struct nvkm_device *device = umem->mmu->subdev.device; in nvkm_umem_unmap() 90 struct nvkm_mmu *mmu = umem->mmu; in nvkm_umem_map() local 109 int ret = mmu->func->mem.umap(mmu, umem->memory, argv, argc, in nvkm_umem_map() 145 struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu; in nvkm_umem_new() local 161 if (type >= mmu->type_nr) in nvkm_umem_new() 167 umem->mmu = mmu; in nvkm_umem_new() 168 umem->type = mmu->type[type].type; in nvkm_umem_new() 172 if (mmu->type[type].type & NVKM_MEM_MAPPABLE) { in nvkm_umem_new() 177 ret = nvkm_mem_new_type(mmu, type, page, size, argv, argc, in nvkm_umem_new()
|
| H A D | mem.c | 33 struct nvkm_mmu *mmu; member 88 dma_unmap_page(mem->mmu->subdev.device->dev, in nvkm_mem_dtor() 144 nvkm_mem_new_host(struct nvkm_mmu *mmu, int type, u8 page, u64 size, in nvkm_mem_new_host() argument 147 struct device *dev = mmu->subdev.device->dev; in nvkm_mem_new_host() 157 if ( (mmu->type[type].type & NVKM_MEM_COHERENT) && in nvkm_mem_new_host() 158 !(mmu->type[type].type & NVKM_MEM_UNCACHED)) in nvkm_mem_new_host() 169 mem->mmu = mmu; in nvkm_mem_new_host() 199 if (mmu->dma_bits > 32) in nvkm_mem_new_host() 209 mem->dma[mem->pages] = dma_map_page(mmu->subdev.device->dev, in nvkm_mem_new_host() 224 nvkm_mem_new_type(struct nvkm_mmu *mmu, int type, u8 page, u64 size, in nvkm_mem_new_type() argument [all …]
|
| /linux/drivers/gpu/drm/nouveau/nvif/ |
| H A D | mmu.c | 28 nvif_mmu_dtor(struct nvif_mmu *mmu) in nvif_mmu_dtor() argument 30 if (!nvif_object_constructed(&mmu->object)) in nvif_mmu_dtor() 33 kfree(mmu->kind); in nvif_mmu_dtor() 34 kfree(mmu->type); in nvif_mmu_dtor() 35 kfree(mmu->heap); in nvif_mmu_dtor() 36 nvif_object_dtor(&mmu->object); in nvif_mmu_dtor() 41 struct nvif_mmu *mmu) in nvif_mmu_ctor() argument 53 mmu->heap = NULL; in nvif_mmu_ctor() 54 mmu->type = NULL; in nvif_mmu_ctor() 55 mmu->kind = NULL; in nvif_mmu_ctor() [all …]
|
| H A D | mem.c | 28 nvif_mem_ctor_map(struct nvif_mmu *mmu, const char *name, u8 type, u64 size, in nvif_mem_ctor_map() argument 31 int ret = nvif_mem_ctor(mmu, name, mmu->mem, NVIF_MEM_MAPPABLE | type, in nvif_mem_ctor_map() 48 nvif_mem_ctor_type(struct nvif_mmu *mmu, const char *name, s32 oclass, in nvif_mem_ctor_type() argument 72 ret = nvif_object_ctor(&mmu->object, name ? name : "nvifMem", 0, oclass, in nvif_mem_ctor_type() 75 mem->type = mmu->type[type].type; in nvif_mem_ctor_type() 88 nvif_mem_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, u8 type, in nvif_mem_ctor() argument 95 for (i = 0; ret && i < mmu->type_nr; i++) { in nvif_mem_ctor() 96 if ((mmu->type[i].type & type) == type) { in nvif_mem_ctor() 97 ret = nvif_mem_ctor_type(mmu, name, oclass, i, page, in nvif_mem_ctor()
|
| /linux/drivers/iommu/ |
| H A D | ipmmu-vmsa.c | 71 struct ipmmu_vmsa_device *mmu; member 149 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu) in ipmmu_is_root() argument 151 return mmu->root == mmu; in ipmmu_is_root() 156 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); in __ipmmu_check_device() local 159 if (ipmmu_is_root(mmu)) in __ipmmu_check_device() 160 *rootp = mmu; in __ipmmu_check_device() 177 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) in ipmmu_read() argument 179 return ioread32(mmu->base + offset); in ipmmu_read() 182 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, in ipmmu_write() argument 185 iowrite32(data, mmu->base + offset); in ipmmu_write() [all …]
|
| /linux/drivers/media/pci/intel/ipu6/ |
| H A D | ipu6-mmu.c | 54 static void tlb_invalidate(struct ipu6_mmu *mmu) in tlb_invalidate() argument 59 spin_lock_irqsave(&mmu->ready_lock, flags); in tlb_invalidate() 60 if (!mmu->ready) { in tlb_invalidate() 61 spin_unlock_irqrestore(&mmu->ready_lock, flags); in tlb_invalidate() 65 for (i = 0; i < mmu->nr_mmus; i++) { in tlb_invalidate() 74 if (mmu->mmu_hw[i].insert_read_before_invalidate) in tlb_invalidate() 75 readl(mmu->mmu_hw[i].base + REG_L1_PHYS); in tlb_invalidate() 77 writel(0xffffffff, mmu->mmu_hw[i].base + in tlb_invalidate() 87 spin_unlock_irqrestore(&mmu->ready_lock, flags); in tlb_invalidate() 414 static int allocate_trash_buffer(struct ipu6_mmu *mmu) in allocate_trash_buffer() argument [all …]
|
| /linux/arch/arm64/kvm/hyp/nvhe/ |
| H A D | tlb.c | 14 struct kvm_s2_mmu *mmu; member 19 static void enter_vmid_context(struct kvm_s2_mmu *mmu, in enter_vmid_context() argument 23 struct kvm_s2_mmu *host_s2_mmu = &host_mmu.arch.mmu; in enter_vmid_context() 29 cxt->mmu = NULL; in enter_vmid_context() 62 if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_s2_mmu)) in enter_vmid_context() 65 cxt->mmu = vcpu->arch.hw_mmu; in enter_vmid_context() 68 if (mmu == host_s2_mmu) in enter_vmid_context() 71 cxt->mmu = host_s2_mmu; in enter_vmid_context() 113 __load_stage2(mmu, kern_hyp_va(mmu->arch)); in enter_vmid_context() 120 struct kvm_s2_mmu *mmu = cxt->mmu; in exit_vmid_context() local [all …]
|
| /linux/drivers/gpu/drm/msm/ |
| H A D | msm_mmu.h | 17 void (*detach)(struct msm_mmu *mmu); 18 void (*prealloc_count)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p, 20 int (*prealloc_allocate)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p); 21 void (*prealloc_cleanup)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p); 22 int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, 24 int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len); 25 void (*destroy)(struct msm_mmu *mmu); 26 void (*set_stall)(struct msm_mmu *mmu, bool enable); 69 static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, in msm_mmu_init() argument 72 mmu->dev = dev; in msm_mmu_init() [all …]
|
| H A D | msm_iommu.c | 40 static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu) in to_pagetable() argument 42 return container_of(mmu, struct msm_iommu_pagetable, base); in to_pagetable() 101 static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova, in msm_iommu_pagetable_unmap() argument 104 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); in msm_iommu_pagetable_unmap() 134 static int msm_iommu_pagetable_map_prr(struct msm_mmu *mmu, u64 iova, size_t len, int prot) in msm_iommu_pagetable_map_prr() argument 136 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); in msm_iommu_pagetable_map_prr() 156 msm_iommu_pagetable_unmap(mmu, iova, addr - iova); in msm_iommu_pagetable_map_prr() 164 static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova, in msm_iommu_pagetable_map() argument 168 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); in msm_iommu_pagetable_map() 175 return msm_iommu_pagetable_map_prr(mmu, iova, len, prot); in msm_iommu_pagetable_map() [all …]
|
| /linux/arch/arm64/kvm/hyp/vhe/ |
| H A D | tlb.c | 14 struct kvm_s2_mmu *mmu; member 20 static void enter_vmid_context(struct kvm_s2_mmu *mmu, in enter_vmid_context() argument 28 if (vcpu && mmu != vcpu->arch.hw_mmu) in enter_vmid_context() 29 cxt->mmu = vcpu->arch.hw_mmu; in enter_vmid_context() 31 cxt->mmu = NULL; in enter_vmid_context() 63 __load_stage2(mmu, mmu->arch); in enter_vmid_context() 80 if (cxt->mmu) in exit_vmid_context() 81 __load_stage2(cxt->mmu, cxt->mmu->arch); in exit_vmid_context() 92 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, in __kvm_tlb_flush_vmid_ipa() argument 100 enter_vmid_context(mmu, &cxt); in __kvm_tlb_flush_vmid_ipa() [all …]
|
| /linux/arch/arc/mm/ |
| H A D | tlb.c | 136 struct cpuinfo_arc_mmu *mmu = &mmuinfo; in local_flush_tlb_all() local 139 int num_tlb = mmu->sets * mmu->ways; in local_flush_tlb_all() 565 struct cpuinfo_arc_mmu *mmu = &mmuinfo; in arc_mmu_mumbojumbo() local 573 mmu->ver = (bcr >> 24); in arc_mmu_mumbojumbo() 575 if (is_isa_arcompact() && mmu->ver == 3) { in arc_mmu_mumbojumbo() 577 mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1); in arc_mmu_mumbojumbo() 578 mmu->sets = 1 << mmu3->sets; in arc_mmu_mumbojumbo() 579 mmu->ways = 1 << mmu3->ways; in arc_mmu_mumbojumbo() 585 mmu->pg_sz_k = 1 << (mmu4->sz0 - 1); in arc_mmu_mumbojumbo() 586 mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11); in arc_mmu_mumbojumbo() [all …]
|
| /linux/drivers/xen/ |
| H A D | grant-dma-iommu.c | 36 struct grant_dma_iommu_device *mmu; in grant_dma_iommu_probe() local 39 mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); in grant_dma_iommu_probe() 40 if (!mmu) in grant_dma_iommu_probe() 43 mmu->dev = &pdev->dev; in grant_dma_iommu_probe() 45 ret = iommu_device_register(&mmu->iommu, &grant_dma_iommu_ops, &pdev->dev); in grant_dma_iommu_probe() 49 platform_set_drvdata(pdev, mmu); in grant_dma_iommu_probe() 56 struct grant_dma_iommu_device *mmu = platform_get_drvdata(pdev); in grant_dma_iommu_remove() local 59 iommu_device_unregister(&mmu->iommu); in grant_dma_iommu_remove()
|
| /linux/drivers/gpu/drm/nouveau/ |
| H A D | nouveau_mem.c | 93 struct nvif_mmu *mmu = &drm->mmu; in nouveau_mem_host() local 103 if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND)) in nouveau_mem_host() 105 if (mem->comp && !(mmu->type[type].type & NVIF_MEM_COMP)) { in nouveau_mem_host() 106 if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100) in nouveau_mem_host() 107 mem->kind = mmu->kind[mem->kind]; in nouveau_mem_host() 117 ret = nvif_mem_ctor_type(mmu, "ttmHostMem", mmu->mem, type, PAGE_SHIFT, in nouveau_mem_host() 129 struct nvif_mmu *mmu = &drm->mmu; in nouveau_mem_vram() local 134 switch (mmu->mem) { in nouveau_mem_vram() 136 ret = nvif_mem_ctor_type(mmu, "ttmVram", mmu->mem, in nouveau_mem_vram() 144 ret = nvif_mem_ctor_type(mmu, "ttmVram", mmu->mem, in nouveau_mem_vram() [all …]
|
| /linux/arch/x86/kvm/mmu/ |
| H A D | paging_tmpl.h | 31 #define PT_HAVE_ACCESSED_DIRTY(mmu) true argument 45 #define PT_HAVE_ACCESSED_DIRTY(mmu) true argument 58 #define PT_HAVE_ACCESSED_DIRTY(mmu) (!(mmu)->cpu_role.base.ad_disabled) argument 109 static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access, in FNAME() 115 if (!PT_HAVE_ACCESSED_DIRTY(mmu)) in FNAME() 145 static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) in FNAME() 147 return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level) || in FNAME() 148 FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte); in FNAME() 159 if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) && in FNAME() 163 if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PG_LEVEL_4K)) in FNAME() [all …]
|
| H A D | mmu.c | 224 static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu) \ 226 return !!(mmu->cpu_role. base_or_ext . reg##_##name); \ 237 static inline bool is_cr0_pg(struct kvm_mmu *mmu) in is_cr0_pg() argument 239 return mmu->cpu_role.base.level > 0; in is_cr0_pg() 242 static inline bool is_cr4_pae(struct kvm_mmu *mmu) in is_cr4_pae() argument 244 return !mmu->cpu_role.base.has_4_byte_gpte; in is_cr4_pae() 264 struct kvm_mmu *mmu) in kvm_mmu_get_guest_pgd() argument 266 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && mmu->get_guest_pgd == get_guest_cr3) in kvm_mmu_get_guest_pgd() 269 return mmu->get_guest_pgd(vcpu); in kvm_mmu_get_guest_pgd() 557 return tdp_mmu_enabled && vcpu->arch.mmu->root_role.direct; in is_tdp_mmu_active() [all …]
|
| /linux/drivers/gpu/drm/panthor/ |
| H A D | panthor_mmu.c | 566 lockdep_assert_held(&ptdev->mmu->as.slots_lock); in mmu_hw_do_operation_locked() 616 mutex_lock(&ptdev->mmu->as.slots_lock); in mmu_hw_do_operation() 618 mutex_unlock(&ptdev->mmu->as.slots_lock); in mmu_hw_do_operation() 691 lockdep_assert_held(&ptdev->mmu->as.slots_lock); in panthor_vm_release_as_locked() 696 ptdev->mmu->as.slots[vm->as.id].vm = NULL; in panthor_vm_release_as_locked() 697 clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask); in panthor_vm_release_as_locked() 725 mutex_lock(&ptdev->mmu->as.slots_lock); in panthor_vm_active() 735 if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) in panthor_vm_active() 743 drm_WARN_ON(&ptdev->base, ptdev->mmu->as.alloc_mask & BIT(0)); in panthor_vm_active() 746 as = ffz(ptdev->mmu->as.alloc_mask | BIT(0)); in panthor_vm_active() [all …]
|
| /linux/drivers/gpu/drm/msm/adreno/ |
| H A D | a2xx_gpummu.c | 27 static void a2xx_gpummu_detach(struct msm_mmu *mmu) in a2xx_gpummu_detach() argument 31 static int a2xx_gpummu_map(struct msm_mmu *mmu, uint64_t iova, in a2xx_gpummu_map() argument 35 struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu); in a2xx_gpummu_map() 62 static int a2xx_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len) in a2xx_gpummu_unmap() argument 64 struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu); in a2xx_gpummu_unmap() 77 static void a2xx_gpummu_destroy(struct msm_mmu *mmu) in a2xx_gpummu_destroy() argument 79 struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu); in a2xx_gpummu_destroy() 81 dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base, in a2xx_gpummu_destroy() 115 void a2xx_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base, in a2xx_gpummu_params() argument 118 dma_addr_t base = to_a2xx_gpummu(mmu)->pt_base; in a2xx_gpummu_params()
|
| /linux/arch/um/kernel/skas/ |
| H A D | mmu.c | 79 struct mm_context *mmu = &mm->context; in destroy_context() local 89 if (mmu->id.pid >= 0 && mmu->id.pid < 2) { in destroy_context() 91 mmu->id.pid); in destroy_context() 98 if (mmu->id.pid > 0) { in destroy_context() 99 os_kill_ptraced_process(mmu->id.pid, 1); in destroy_context() 100 mmu->id.pid = -1; in destroy_context() 103 if (using_seccomp && mmu->id.sock) in destroy_context() 104 os_close_file(mmu->id.sock); in destroy_context() 106 free_pages(mmu->id.stack, ilog2(STUB_DATA_PAGES)); in destroy_context()
|
| /linux/arch/arm64/kvm/ |
| H A D | mmu.c | 62 static int stage2_apply_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, in stage2_apply_range() argument 67 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); in stage2_apply_range() 72 struct kvm_pgtable *pgt = mmu->pgt; in stage2_apply_range() 88 #define stage2_apply_range_resched(mmu, addr, end, fn) \ argument 89 stage2_apply_range(mmu, addr, end, fn, true) 114 chunk_size = kvm->arch.mmu.split_page_chunk_size; in need_split_memcache_topup_or_resched() 116 cache = &kvm->arch.mmu.split_page_cache; in need_split_memcache_topup_or_resched() 130 chunk_size = kvm->arch.mmu.split_page_chunk_size; in kvm_mmu_split_huge_pages() 136 cache = &kvm->arch.mmu.split_page_cache; in kvm_mmu_split_huge_pages() 151 pgt = kvm->arch.mmu.pgt; in kvm_mmu_split_huge_pages() [all …]
|
| /linux/drivers/gpu/drm/nouveau/include/nvif/ |
| H A D | mmu.h | 39 nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind) in nvif_mmu_kind_valid() argument 42 if (kind >= mmu->kind_nr || mmu->kind[kind] == mmu->kind_inv) in nvif_mmu_kind_valid() 49 nvif_mmu_type(struct nvif_mmu *mmu, u8 mask) in nvif_mmu_type() argument 52 for (i = 0; i < mmu->type_nr; i++) { in nvif_mmu_type() 53 if ((mmu->type[i].type & mask) == mask) in nvif_mmu_type()
|
| /linux/arch/x86/kvm/ |
| H A D | mmu.h | 96 struct kvm_mmu *mmu); 120 if (likely(vcpu->arch.mmu->root.hpa != INVALID_PAGE)) in kvm_mmu_reload() 150 u64 root_hpa = vcpu->arch.mmu->root.hpa; in kvm_mmu_load_pgd() 156 vcpu->arch.mmu->root_role.level); in kvm_mmu_load_pgd() 160 struct kvm_mmu *mmu) in kvm_mmu_refresh_passthrough_bits() argument 171 if (!tdp_enabled || mmu == &vcpu->arch.guest_mmu) in kvm_mmu_refresh_passthrough_bits() 174 __kvm_mmu_refresh_passthrough_bits(vcpu, mmu); in kvm_mmu_refresh_passthrough_bits() 185 static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in permission_fault() argument 211 kvm_mmu_refresh_passthrough_bits(vcpu, mmu); in permission_fault() 213 fault = (mmu->permissions[index] >> pte_access) & 1; in permission_fault() [all …]
|
| /linux/drivers/accel/habanalabs/common/mmu/ |
| H A D | Makefile | 2 HL_COMMON_MMU_FILES := common/mmu/mmu.o common/mmu/mmu_v1.o \ 3 common/mmu/mmu_v2.o common/mmu/mmu_v2_hr.o
|