/linux/drivers/staging/media/ipu3/ |
H A D | ipu3-mmu.c | 21 #include "ipu3-mmu.h" 73 * @mmu: MMU to perform the invalidate operation on 78 static void imgu_mmu_tlb_invalidate(struct imgu_mmu *mmu) in imgu_mmu_tlb_invalidate() argument 80 writel(TLB_INVALIDATE, mmu->base + REG_TLB_INVALIDATE); in imgu_mmu_tlb_invalidate() 83 static void call_if_imgu_is_powered(struct imgu_mmu *mmu, in call_if_imgu_is_powered() argument 84 void (*func)(struct imgu_mmu *mmu)) in call_if_imgu_is_powered() argument 86 if (!pm_runtime_get_if_in_use(mmu->dev)) in call_if_imgu_is_powered() 89 func(mmu); in call_if_imgu_is_powered() 90 pm_runtime_put(mmu->dev); in call_if_imgu_is_powered() 95 * @mmu: MMU to set the CIO gate bit in. [all …]
|
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
H A D | base.c | 42 nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt) in nvkm_mmu_ptp_put() argument 51 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_put() 56 nvkm_mmu_ptc_put(mmu, force, &ptp->pt); in nvkm_mmu_ptp_put() 65 nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero) in nvkm_mmu_ptp_get() argument 74 ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head); in nvkm_mmu_ptp_get() 82 ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false); in nvkm_mmu_ptp_get() 93 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_get() 120 nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size) in nvkm_mmu_ptc_find() argument 124 list_for_each_entry(ptc, &mmu->ptc.list, head) { in nvkm_mmu_ptc_find() 134 list_add(&ptc->head, &mmu->ptc.list); in nvkm_mmu_ptc_find() [all …]
|
H A D | Kbuild | 2 nvkm-y += nvkm/subdev/mmu/base.o 3 nvkm-y += nvkm/subdev/mmu/nv04.o 4 nvkm-y += nvkm/subdev/mmu/nv41.o 5 nvkm-y += nvkm/subdev/mmu/nv44.o 6 nvkm-y += nvkm/subdev/mmu/nv50.o 7 nvkm-y += nvkm/subdev/mmu/g84.o 8 nvkm-y += nvkm/subdev/mmu/mcp77.o 9 nvkm-y += nvkm/subdev/mmu/gf100.o 10 nvkm-y += nvkm/subdev/mmu/gk104.o 11 nvkm-y += nvkm/subdev/mmu/gk20a.o [all …]
|
H A D | ummu.c | 35 struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu; in nvkm_ummu_sclass() local 37 if (mmu->func->mem.user.oclass) { in nvkm_ummu_sclass() 39 oclass->base = mmu->func->mem.user; in nvkm_ummu_sclass() 45 if (mmu->func->vmm.user.oclass) { in nvkm_ummu_sclass() 47 oclass->base = mmu->func->vmm.user; in nvkm_ummu_sclass() 59 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_heap() local 67 if ((index = args->v0.index) >= mmu->heap_nr) in nvkm_ummu_heap() 69 args->v0.size = mmu->heap[index].size; in nvkm_ummu_heap() 79 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_type() local 87 if ((index = args->v0.index) >= mmu->type_nr) in nvkm_ummu_type() [all …]
|
/linux/drivers/gpu/drm/nouveau/nvif/ |
H A D | mmu.c | 22 #include <nvif/mmu.h> 28 nvif_mmu_dtor(struct nvif_mmu *mmu) in nvif_mmu_dtor() argument 30 if (!nvif_object_constructed(&mmu->object)) in nvif_mmu_dtor() 33 kfree(mmu->kind); in nvif_mmu_dtor() 34 kfree(mmu->type); in nvif_mmu_dtor() 35 kfree(mmu->heap); in nvif_mmu_dtor() 36 nvif_object_dtor(&mmu->object); in nvif_mmu_dtor() 41 struct nvif_mmu *mmu) in nvif_mmu_ctor() argument 53 mmu->heap = NULL; in nvif_mmu_ctor() 54 mmu->type = NULL; in nvif_mmu_ctor() [all …]
|
/linux/drivers/iommu/ |
H A D | ipmmu-vmsa.c | 71 struct ipmmu_vmsa_device *mmu; member 99 /* MMU "context" registers */ 149 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu) in ipmmu_is_root() argument 151 return mmu->root == mmu; in ipmmu_is_root() 156 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); in __ipmmu_check_device() local 159 if (ipmmu_is_root(mmu)) in __ipmmu_check_device() 160 *rootp = mmu; in __ipmmu_check_device() 177 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) in ipmmu_read() argument 179 return ioread32(mmu->base + offset); in ipmmu_read() 182 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, in ipmmu_write() argument [all …]
|
/linux/drivers/media/pci/intel/ipu6/ |
H A D | ipu6-mmu.c | 29 #include "ipu6-mmu.h" 54 static void tlb_invalidate(struct ipu6_mmu *mmu) in tlb_invalidate() argument 59 spin_lock_irqsave(&mmu->ready_lock, flags); in tlb_invalidate() 60 if (!mmu->ready) { in tlb_invalidate() 61 spin_unlock_irqrestore(&mmu->ready_lock, flags); in tlb_invalidate() 65 for (i = 0; i < mmu->nr_mmus; i++) { in tlb_invalidate() 74 if (mmu->mmu_hw[i].insert_read_before_invalidate) in tlb_invalidate() 75 readl(mmu->mmu_hw[i].base + REG_L1_PHYS); in tlb_invalidate() 77 writel(0xffffffff, mmu->mmu_hw[i].base + in tlb_invalidate() 87 spin_unlock_irqrestore(&mmu->ready_lock, flags); in tlb_invalidate() [all …]
|
H A D | ipu6-dma.c | 19 #include "ipu6-mmu.h" 29 static struct vm_info *get_vm_info(struct ipu6_mmu *mmu, dma_addr_t iova) in get_vm_info() argument 33 list_for_each_entry_safe(info, save, &mmu->vma_list, list) { in get_vm_info() 120 struct ipu6_mmu *mmu = sys->mmu; in ipu6_dma_sync_single() local 122 info = get_vm_info(mmu, dma_handle); in ipu6_dma_sync_single() 159 struct ipu6_mmu *mmu = sys->mmu; in ipu6_dma_alloc() local 174 iova = alloc_iova(&mmu->dmap->iovad, count, in ipu6_dma_alloc() 196 ret = ipu6_mmu_map(mmu->dmap->mmu_info, in ipu6_dma_alloc() 218 list_add(&info->list, &mmu->vma_list); in ipu6_dma_alloc() 225 pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info, in ipu6_dma_alloc() [all …]
|
/linux/drivers/gpu/drm/panfrost/ |
H A D | panfrost_mmu.c | 34 /* Wait for the MMU status to indicate there is no active command, in in wait_ready() 52 /* write AS_COMMAND when MMU is ready to accept another command */ in write_cmd() 105 /* Run the MMU operation */ in mmu_hw_do_operation_locked() 113 struct panfrost_mmu *mmu, in mmu_hw_do_operation() argument 119 ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op); in mmu_hw_do_operation() 124 static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) in panfrost_mmu_enable() argument 126 int as_nr = mmu->as; in panfrost_mmu_enable() 127 struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg; in panfrost_mmu_enable() 158 u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) in panfrost_mmu_as_get() argument 164 as = mmu->as; in panfrost_mmu_as_get() [all …]
|
/linux/arch/arm64/kvm/hyp/nvhe/ |
H A D | tlb.c | 14 struct kvm_s2_mmu *mmu; member 19 static void enter_vmid_context(struct kvm_s2_mmu *mmu, in enter_vmid_context() argument 23 struct kvm_s2_mmu *host_s2_mmu = &host_mmu.arch.mmu; in enter_vmid_context() 29 cxt->mmu = NULL; in enter_vmid_context() 62 if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_s2_mmu)) in enter_vmid_context() 65 cxt->mmu = vcpu->arch.hw_mmu; in enter_vmid_context() 68 if (mmu == host_s2_mmu) in enter_vmid_context() 71 cxt->mmu = host_s2_mmu; in enter_vmid_context() 81 * We're guaranteed that the host S1 MMU is enabled, so in enter_vmid_context() 83 * TLB fill. For guests, we ensure that the S1 MMU is in enter_vmid_context() [all …]
|
/linux/arch/arm/mm/ |
H A D | Kconfig | 11 depends on !MMU 30 select CPU_COPY_V4WT if MMU 34 select CPU_TLB_V4WT if MMU 37 MMU built around an ARM7TDMI core. 45 depends on !MMU 63 depends on !MMU 82 select CPU_COPY_V4WB if MMU 86 select CPU_TLB_V4WBI if MMU 101 select CPU_COPY_V4WB if MMU 105 select CPU_TLB_V4WBI if MMU [all …]
|
/linux/arch/arc/mm/ |
H A D | tlb.c | 16 #include <asm/mmu.h> 87 * If Not already present get a free slot from MMU. in tlb_entry_insert() 99 * Commit the Entry to MMU in tlb_entry_insert() 131 * Un-conditionally (without lookup) erase the entire MMU contents 136 struct cpuinfo_arc_mmu *mmu = &mmuinfo; in local_flush_tlb_all() local 139 int num_tlb = mmu->sets * mmu->ways; in local_flush_tlb_all() 182 * Only for fork( ) do we need to move parent to a new MMU ctxt, in local_flush_tlb_mm() 245 /* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective) 274 * Delete TLB entry in MMU for a given page (??? address) 403 * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr) in create_tlb() [all …]
|
/linux/drivers/gpu/drm/msm/ |
H A D | msm_mmu.h | 13 void (*detach)(struct msm_mmu *mmu); 14 int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, 16 int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len); 17 void (*destroy)(struct msm_mmu *mmu); 18 void (*resume_translation)(struct msm_mmu *mmu); 35 static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, in msm_mmu_init() argument 38 mmu->dev = dev; in msm_mmu_init() 39 mmu->funcs = funcs; in msm_mmu_init() 40 mmu->type = type; in msm_mmu_init() 46 static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg, in msm_mmu_set_fault_handler() argument [all …]
|
H A D | msm_iommu.c | 30 static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu) in to_pagetable() argument 32 return container_of(mmu, struct msm_iommu_pagetable, base); in to_pagetable() 91 static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova, in msm_iommu_pagetable_unmap() argument 94 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); in msm_iommu_pagetable_unmap() 115 static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova, in msm_iommu_pagetable_map() argument 118 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); in msm_iommu_pagetable_map() 145 msm_iommu_pagetable_unmap(mmu, iova, addr - iova); in msm_iommu_pagetable_map() 154 static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu) in msm_iommu_pagetable_destroy() argument 156 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); in msm_iommu_pagetable_destroy() 172 int msm_iommu_pagetable_params(struct msm_mmu *mmu, in msm_iommu_pagetable_params() argument [all …]
|
/linux/arch/arm64/kvm/hyp/vhe/ |
H A D | tlb.c | 14 struct kvm_s2_mmu *mmu; member 20 static void enter_vmid_context(struct kvm_s2_mmu *mmu, in enter_vmid_context() argument 28 if (vcpu && mmu != vcpu->arch.hw_mmu) in enter_vmid_context() 29 cxt->mmu = vcpu->arch.hw_mmu; in enter_vmid_context() 31 cxt->mmu = NULL; in enter_vmid_context() 41 * allocate IPA->PA walks, so we enable the S1 MMU... in enter_vmid_context() 63 __load_stage2(mmu, mmu->arch); in enter_vmid_context() 79 /* ... and the stage-2 MMU context that we switched away from */ in exit_vmid_context() 80 if (cxt->mmu) in exit_vmid_context() 81 __load_stage2(cxt->mmu, cxt->mmu->arch); in exit_vmid_context() [all …]
|
/linux/Documentation/admin-guide/mm/ |
H A D | nommu-mmap.rst | 2 No-MMU memory mapping support 5 The kernel has limited support for memory mapping under no-MMU conditions, such 16 The behaviour is similar between the MMU and no-MMU cases, but not identical; 21 In the MMU case: VM regions backed by arbitrary pages; copy-on-write 24 In the no-MMU case: VM regions backed by arbitrary contiguous runs of 30 shared across fork() or clone() without CLONE_VM in the MMU case. Since 31 the no-MMU case doesn't support these, behaviour is identical to 36 In the MMU case: VM regions backed by pages read from file; changes to 39 In the no-MMU case: 56 are visible in other processes (no MMU protection), but should not [all …]
|
/linux/arch/arm64/include/asm/ |
H A D | kvm_mmu.h | 12 #include <asm/mmu.h> 150 #define kvm_phys_shift(mmu) VTCR_EL2_IPA((mmu)->vtcr) argument 151 #define kvm_phys_size(mmu) (_AC(1, ULL) << kvm_phys_shift(mmu)) 152 #define kvm_phys_mask(mmu) (kvm_phys_size(mmu) - _AC(1, ULL)) 171 void kvm_stage2_unmap_range(struct kvm_s2_mmu *mmu, phys_addr_t start, 173 void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end); 174 void kvm_stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_ 148 kvm_phys_shift(mmu) global() argument 149 kvm_phys_size(mmu) global() argument 301 kvm_get_vttbr(struct kvm_s2_mmu * mmu) kvm_get_vttbr() argument 317 __load_stage2(struct kvm_s2_mmu * mmu,struct kvm_arch * arch) __load_stage2() argument 331 kvm_s2_mmu_to_kvm(struct kvm_s2_mmu * mmu) kvm_s2_mmu_to_kvm() argument 342 kvm_s2_mmu_valid(struct kvm_s2_mmu * mmu) kvm_s2_mmu_valid() argument 347 kvm_is_nested_s2_mmu(struct kvm * kvm,struct kvm_s2_mmu * mmu) kvm_is_nested_s2_mmu() argument [all...] |
/linux/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/ |
H A D | branch.json | 18 …still counts when branch prediction is disabled due to the Memory Management Unit (MMU) being off", 21 … still counts when branch prediction is disabled due to the Memory Management Unit (MMU) being off" 24 … the address. This event still counts when branch prediction is disabled due to the MMU being off", 27 …r the address. This event still counts when branch prediction is disabled due to the MMU being off" 30 … the address. This event still counts when branch prediction is disabled due to the MMU being off", 33 …d the address. This event still counts when branch prediction is disabled due to the MMU being off" 36 …ion. This event still counts when branch prediction is disabled due to the MMU being off. Conditio… 39 …ion. This event still counts when branch prediction is disabled due to the MMU being off. Conditio… 42 …he condition. This event still counts when branch prediction is disabled due to the MMU being off", 45 …the condition. This event still counts when branch prediction is disabled due to the MMU being off" [all …]
|
/linux/arch/xtensa/ |
H A D | Kconfig | 6 select ARCH_HAS_BINFMT_FLAT if !MMU 9 select ARCH_HAS_DMA_PREP_COHERENT if MMU 12 select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU 13 select ARCH_HAS_SYNC_DMA_FOR_DEVICE if MMU 14 select ARCH_HAS_DMA_SET_UNCACHED if MMU 25 select DMA_NONCOHERENT_MMAP if MMU 33 select GENERIC_IOREMAP if MMU 36 select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL 93 config MMU config 121 select MMU [all …]
|
/linux/Documentation/devicetree/bindings/iommu/ |
H A D | samsung,sysmmu.yaml | 7 title: Samsung Exynos IOMMU H/W, System MMU (System Memory Management Unit) 17 System MMU is an IOMMU and supports identical translation table format to 19 permissions, shareability and security protection. In addition, System MMU has 25 master), but one System MMU can handle transactions from only one peripheral 26 device. The relation between a System MMU and the peripheral device needs to be 31 * MFC has one System MMU on its left and right bus. 32 * FIMD in Exynos5420 has one System MMU for window 0 and 4, the other system MMU 34 * M2M Scalers and G2D in Exynos5420 has one System MMU on the read channel and 35 the other System MMU on the write channel. 37 For information on assigning System MMU controller to its peripheral devices, [all …]
|
/linux/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/ |
H A D | branch.json | 18 …r is retired. This event still counts when branch prediction is disabled due to the MMU being off", 21 …or is retired. This event still counts when branch prediction is disabled due to the MMU being off" 24 … the address. This event still counts when branch prediction is disabled due to the MMU being off", 27 …r the address. This event still counts when branch prediction is disabled due to the MMU being off" 30 … the address. This event still counts when branch prediction is disabled due to the MMU being off", 33 …d the address. This event still counts when branch prediction is disabled due to the MMU being off" 36 …ion. This event still counts when branch prediction is disabled due to the MMU being off. Conditio… 39 …ion. This event still counts when branch prediction is disabled due to the MMU being off. Conditio… 42 …he condition. This event still counts when branch prediction is disabled due to the MMU being off", 45 …the condition. This event still counts when branch prediction is disabled due to the MMU being off" [all …]
|
/linux/arch/riscv/ |
H A D | Kconfig | 29 select ARCH_HAS_DEBUG_VIRTUAL if MMU 46 select ARCH_HAS_PTE_DEVMAP if 64BIT && MMU 48 select ARCH_HAS_SET_DIRECT_MAP if MMU 49 select ARCH_HAS_SET_MEMORY if MMU 50 select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL 51 select ARCH_HAS_STRICT_MODULE_RWX if MMU && !XIP_KERNEL 58 select ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE if 64BIT && MMU 64 select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU 65 select ARCH_SUPPORTS_HUGETLBFS if MMU 69 select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU [all …]
|
/linux/Documentation/virt/kvm/x86/ |
H A D | mmu.rst | 4 The x86 kvm shadow mmu 7 The mmu (in arch/x86/kvm, files mmu.[ch] and paging_tmpl.h) is responsible 8 for presenting a standard x86 mmu to the guest, while translating guest 11 The mmu code attempts to satisfy the following requirements: 15 on an emulated mmu except for timing (we attempt to comply 22 minimize the performance penalty imposed by the mmu 62 The mmu supports first-generation mmu hardware, which allows an atomic switch 65 it exposes is the traditional 2/3/4 level x86 mmu, with support for global 72 The primary job of the mmu is to program the processor's mmu to translate 86 number of required translations matches the hardware, the mmu operates in [all …]
|
/linux/arch/sh/mm/ |
H A D | Kconfig | 4 config MMU config 12 Some SH processors (such as SH-2/SH-2A) lack an MMU. In order to 15 On other systems (such as the SH-3 and 4) where an MMU exists, 17 MMU implicitly switched off. 20 def_bool !MMU 26 On MMU-less systems, any of these page sizes can be selected 30 default "0x80000000" if MMU 37 default "13" if !MMU 86 default !MMU 90 depends on MMU && CPU_SH4A && !CPU_SH4AL_DSP [all …]
|
/linux/drivers/gpu/drm/msm/adreno/ |
H A D | a2xx_gpummu.c | 27 static void a2xx_gpummu_detach(struct msm_mmu *mmu) in a2xx_gpummu_detach() argument 31 static int a2xx_gpummu_map(struct msm_mmu *mmu, uint64_t iova, in a2xx_gpummu_map() argument 34 struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu); in a2xx_gpummu_map() 59 static int a2xx_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len) in a2xx_gpummu_unmap() argument 61 struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu); in a2xx_gpummu_unmap() 74 static void a2xx_gpummu_resume_translation(struct msm_mmu *mmu) in a2xx_gpummu_resume_translation() argument 78 static void a2xx_gpummu_destroy(struct msm_mmu *mmu) in a2xx_gpummu_destroy() argument 80 struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu); in a2xx_gpummu_destroy() 82 dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base, in a2xx_gpummu_destroy() 117 void a2xx_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base, in a2xx_gpummu_params() argument [all …]
|