| /linux/drivers/staging/media/ipu3/ | 
| H A D | ipu3-mmu.c | 21 #include "ipu3-mmu.h"73  * @mmu: MMU to perform the invalidate operation on
 78 static void imgu_mmu_tlb_invalidate(struct imgu_mmu *mmu)  in imgu_mmu_tlb_invalidate()  argument
 80 	writel(TLB_INVALIDATE, mmu->base + REG_TLB_INVALIDATE);  in imgu_mmu_tlb_invalidate()
 83 static void call_if_imgu_is_powered(struct imgu_mmu *mmu,  in call_if_imgu_is_powered()  argument
 84 				    void (*func)(struct imgu_mmu *mmu))  in call_if_imgu_is_powered()  argument
 86 	if (!pm_runtime_get_if_in_use(mmu->dev))  in call_if_imgu_is_powered()
 89 	func(mmu);  in call_if_imgu_is_powered()
 90 	pm_runtime_put(mmu->dev);  in call_if_imgu_is_powered()
 95  * @mmu: MMU to set the CIO gate bit in.
 [all …]
 
 | 
| /linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ | 
| H A D | base.c | 42 nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt)  in nvkm_mmu_ptp_put()  argument51 		list_add(&ptp->head, &mmu->ptp.list);  in nvkm_mmu_ptp_put()
 56 		nvkm_mmu_ptc_put(mmu, force, &ptp->pt);  in nvkm_mmu_ptp_put()
 65 nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero)  in nvkm_mmu_ptp_get()  argument
 74 	ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head);  in nvkm_mmu_ptp_get()
 82 		ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false);  in nvkm_mmu_ptp_get()
 93 		list_add(&ptp->head, &mmu->ptp.list);  in nvkm_mmu_ptp_get()
 120 nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size)  in nvkm_mmu_ptc_find()  argument
 124 	list_for_each_entry(ptc, &mmu->ptc.list, head) {  in nvkm_mmu_ptc_find()
 134 		list_add(&ptc->head, &mmu->ptc.list);  in nvkm_mmu_ptc_find()
 [all …]
 
 | 
| H A D | ummu.c | 35 	struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu;  in nvkm_ummu_sclass()  local37 	if (mmu->func->mem.user.oclass) {  in nvkm_ummu_sclass()
 39 			oclass->base = mmu->func->mem.user;  in nvkm_ummu_sclass()
 45 	if (mmu->func->vmm.user.oclass) {  in nvkm_ummu_sclass()
 47 			oclass->base = mmu->func->vmm.user;  in nvkm_ummu_sclass()
 59 	struct nvkm_mmu *mmu = ummu->mmu;  in nvkm_ummu_heap()  local
 67 		if ((index = args->v0.index) >= mmu->heap_nr)  in nvkm_ummu_heap()
 69 		args->v0.size = mmu->heap[index].size;  in nvkm_ummu_heap()
 79 	struct nvkm_mmu *mmu = ummu->mmu;  in nvkm_ummu_type()  local
 87 		if ((index = args->v0.index) >= mmu->type_nr)  in nvkm_ummu_type()
 [all …]
 
 | 
| H A D | mem.c | 33 	struct nvkm_mmu *mmu;  member88 			dma_unmap_page(mem->mmu->subdev.device->dev,  in nvkm_mem_dtor()
 144 nvkm_mem_new_host(struct nvkm_mmu *mmu, int type, u8 page, u64 size,  in nvkm_mem_new_host()  argument
 147 	struct device *dev = mmu->subdev.device->dev;  in nvkm_mem_new_host()
 157 	if ( (mmu->type[type].type & NVKM_MEM_COHERENT) &&  in nvkm_mem_new_host()
 158 	    !(mmu->type[type].type & NVKM_MEM_UNCACHED))  in nvkm_mem_new_host()
 169 	mem->mmu = mmu;  in nvkm_mem_new_host()
 199 	if (mmu->dma_bits > 32)  in nvkm_mem_new_host()
 209 		mem->dma[mem->pages] = dma_map_page(mmu->subdev.device->dev,  in nvkm_mem_new_host()
 224 nvkm_mem_new_type(struct nvkm_mmu *mmu, int type, u8 page, u64 size,  in nvkm_mem_new_type()  argument
 [all …]
 
 | 
| /linux/drivers/gpu/drm/nouveau/nvif/ | 
| H A D | mmu.c | 22 #include <nvif/mmu.h>28 nvif_mmu_dtor(struct nvif_mmu *mmu)  in nvif_mmu_dtor()  argument
 30 	if (!nvif_object_constructed(&mmu->object))  in nvif_mmu_dtor()
 33 	kfree(mmu->kind);  in nvif_mmu_dtor()
 34 	kfree(mmu->type);  in nvif_mmu_dtor()
 35 	kfree(mmu->heap);  in nvif_mmu_dtor()
 36 	nvif_object_dtor(&mmu->object);  in nvif_mmu_dtor()
 41 	      struct nvif_mmu *mmu)  in nvif_mmu_ctor()  argument
 53 	mmu->heap = NULL;  in nvif_mmu_ctor()
 54 	mmu->type = NULL;  in nvif_mmu_ctor()
 [all …]
 
 | 
| /linux/drivers/iommu/ | 
| H A D | ipmmu-vmsa.c | 71 	struct ipmmu_vmsa_device *mmu;  member99 /* MMU "context" registers */
 149 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)  in ipmmu_is_root()  argument
 151 	return mmu->root == mmu;  in ipmmu_is_root()
 156 	struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);  in __ipmmu_check_device()  local
 159 	if (ipmmu_is_root(mmu))  in __ipmmu_check_device()
 160 		*rootp = mmu;  in __ipmmu_check_device()
 177 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)  in ipmmu_read()  argument
 179 	return ioread32(mmu->base + offset);  in ipmmu_read()
 182 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,  in ipmmu_write()  argument
 [all …]
 
 | 
| /linux/drivers/media/pci/intel/ipu6/ | 
| H A D | ipu6-mmu.c | 29 #include "ipu6-mmu.h"54 static void tlb_invalidate(struct ipu6_mmu *mmu)  in tlb_invalidate()  argument
 59 	spin_lock_irqsave(&mmu->ready_lock, flags);  in tlb_invalidate()
 60 	if (!mmu->ready) {  in tlb_invalidate()
 61 		spin_unlock_irqrestore(&mmu->ready_lock, flags);  in tlb_invalidate()
 65 	for (i = 0; i < mmu->nr_mmus; i++) {  in tlb_invalidate()
 74 		if (mmu->mmu_hw[i].insert_read_before_invalidate)  in tlb_invalidate()
 75 			readl(mmu->mmu_hw[i].base + REG_L1_PHYS);  in tlb_invalidate()
 77 		writel(0xffffffff, mmu->mmu_hw[i].base +  in tlb_invalidate()
 87 	spin_unlock_irqrestore(&mmu->ready_lock, flags);  in tlb_invalidate()
 [all …]
 
 | 
| /linux/drivers/gpu/drm/msm/ | 
| H A D | msm_mmu.h | 17 	void (*detach)(struct msm_mmu *mmu);18 	void (*prealloc_count)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p,
 20 	int (*prealloc_allocate)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p);
 21 	void (*prealloc_cleanup)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p);
 22 	int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
 24 	int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
 25 	void (*destroy)(struct msm_mmu *mmu);
 26 	void (*set_stall)(struct msm_mmu *mmu, bool enable);
 36  * struct msm_mmu_prealloc - Tracking for pre-allocated pages for MMU updates.
 44 	 * @pages: Array of pages preallocated for MMU table updates.
 [all …]
 
 | 
| H A D | msm_iommu.c | 40 static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)  in to_pagetable()  argument42 	return container_of(mmu, struct msm_iommu_pagetable, base);  in to_pagetable()
 101 static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,  in msm_iommu_pagetable_unmap()  argument
 104 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);  in msm_iommu_pagetable_unmap()
 134 static int msm_iommu_pagetable_map_prr(struct msm_mmu *mmu, u64 iova, size_t len, int prot)  in msm_iommu_pagetable_map_prr()  argument
 136 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);  in msm_iommu_pagetable_map_prr()
 156 			msm_iommu_pagetable_unmap(mmu, iova, addr - iova);  in msm_iommu_pagetable_map_prr()
 164 static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,  in msm_iommu_pagetable_map()  argument
 168 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);  in msm_iommu_pagetable_map()
 175 		return msm_iommu_pagetable_map_prr(mmu, iova, len, prot);  in msm_iommu_pagetable_map()
 [all …]
 
 | 
| /linux/arch/arm64/kvm/hyp/nvhe/ | 
| H A D | tlb.c | 14 	struct kvm_s2_mmu	*mmu;  member19 static void enter_vmid_context(struct kvm_s2_mmu *mmu,  in enter_vmid_context()  argument
 23 	struct kvm_s2_mmu *host_s2_mmu = &host_mmu.arch.mmu;  in enter_vmid_context()
 29 	cxt->mmu = NULL;  in enter_vmid_context()
 62 		if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_s2_mmu))  in enter_vmid_context()
 65 		cxt->mmu = vcpu->arch.hw_mmu;  in enter_vmid_context()
 68 		if (mmu == host_s2_mmu)  in enter_vmid_context()
 71 		cxt->mmu = host_s2_mmu;  in enter_vmid_context()
 81 		 * We're guaranteed that the host S1 MMU is enabled, so  in enter_vmid_context()
 83 		 * TLB fill. For guests, we ensure that the S1 MMU is  in enter_vmid_context()
 [all …]
 
 | 
| /linux/arch/arm/mm/ | 
| H A D | Kconfig | 11 	depends on !MMU30 	select CPU_COPY_V4WT if MMU
 34 	select CPU_TLB_V4WT if MMU
 37 	  MMU built around an ARM7TDMI core.
 45 	depends on !MMU
 63 	depends on !MMU
 82 	select CPU_COPY_V4WB if MMU
 86 	select CPU_TLB_V4WBI if MMU
 101 	select CPU_COPY_V4WB if MMU
 105 	select CPU_TLB_V4WBI if MMU
 [all …]
 
 | 
| /linux/arch/arc/mm/ | 
| H A D | tlb.c | 16 #include <asm/mmu.h>87 	 * If Not already present get a free slot from MMU.  in tlb_entry_insert()
 99 	 * Commit the Entry to MMU  in tlb_entry_insert()
 131  * Un-conditionally (without lookup) erase the entire MMU contents
 136 	struct cpuinfo_arc_mmu *mmu = &mmuinfo;  in local_flush_tlb_all()  local
 139 	int num_tlb = mmu->sets * mmu->ways;  in local_flush_tlb_all()
 182 	 * Only for fork( ) do we need to move parent to a new MMU ctxt,  in local_flush_tlb_mm()
 245 /* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
 274  * Delete TLB entry in MMU for a given page (??? address)
 403 	 * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)  in create_tlb()
 [all …]
 
 | 
| /linux/arch/arm64/kvm/hyp/vhe/ | 
| H A D | tlb.c | 14 	struct kvm_s2_mmu	*mmu;  member20 static void enter_vmid_context(struct kvm_s2_mmu *mmu,  in enter_vmid_context()  argument
 28 	if (vcpu && mmu != vcpu->arch.hw_mmu)  in enter_vmid_context()
 29 		cxt->mmu = vcpu->arch.hw_mmu;  in enter_vmid_context()
 31 		cxt->mmu = NULL;  in enter_vmid_context()
 41 		 * allocate IPA->PA walks, so we enable the S1 MMU...  in enter_vmid_context()
 63 	__load_stage2(mmu, mmu->arch);  in enter_vmid_context()
 79 	/* ... and the stage-2 MMU context that we switched away from */  in exit_vmid_context()
 80 	if (cxt->mmu)  in exit_vmid_context()
 81 		__load_stage2(cxt->mmu, cxt->mmu->arch);  in exit_vmid_context()
 [all …]
 
 | 
| /linux/arch/m68k/ | 
| H A D | Kconfig | 8 	select ARCH_HAS_CPU_FINALIZE_INIT if MMU21 	select GENERIC_IOMAP if HAS_IOPORT && MMU && !COLDFIRE
 38 	select MMU_GATHER_NO_RANGE if MMU
 41 	select NO_DMA if !MMU && !COLDFIRE
 44 	select UACCESS_MEMCPY if !MMU
 83 config MMU  config
 84 	bool "MMU-based Paged Memory Management Support"
 87 	  Select if you want MMU-based virtualised addressing space
 91 	def_bool MMU && M68KCLASSIC
 95 	def_bool MMU && COLDFIRE
 [all …]
 
 | 
| /linux/Documentation/admin-guide/mm/ | 
| H A D | nommu-mmap.rst | 2 No-MMU memory mapping support5 The kernel has limited support for memory mapping under no-MMU conditions, such
 16 The behaviour is similar between the MMU and no-MMU cases, but not identical;
 21 	In the MMU case: VM regions backed by arbitrary pages; copy-on-write
 24 	In the no-MMU case: VM regions backed by arbitrary contiguous runs of
 30 	shared across fork() or clone() without CLONE_VM in the MMU case. Since
 31 	the no-MMU case doesn't support these, behaviour is identical to
 36 	In the MMU case: VM regions backed by pages read from file; changes to
 39 	In the no-MMU case:
 56 	   are visible in other processes (no MMU protection), but should not
 [all …]
 
 | 
| /linux/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/ | 
| H A D | branch.json | 18 …still counts when branch prediction is disabled due to the Memory Management Unit (MMU) being off",21 … still counts when branch prediction is disabled due to the Memory Management Unit (MMU) being off"
 24 … the address. This event still counts when branch prediction is disabled due to the MMU being off",
 27 …r the address. This event still counts when branch prediction is disabled due to the MMU being off"
 30 … the address. This event still counts when branch prediction is disabled due to the MMU being off",
 33 …d the address. This event still counts when branch prediction is disabled due to the MMU being off"
 36 …ion. This event still counts when branch prediction is disabled due to the MMU being off. Conditio…
 39 …ion. This event still counts when branch prediction is disabled due to the MMU being off. Conditio…
 42 …he condition. This event still counts when branch prediction is disabled due to the MMU being off",
 45 …the condition. This event still counts when branch prediction is disabled due to the MMU being off"
 [all …]
 
 | 
| /linux/Documentation/devicetree/bindings/iommu/ | 
| H A D | samsung,sysmmu.yaml | 7 title: Samsung Exynos IOMMU H/W, System MMU (System Memory Management Unit)17   System MMU is an IOMMU and supports identical translation table format to
 19   permissions, shareability and security protection. In addition, System MMU has
 25   master), but one System MMU can handle transactions from only one peripheral
 26   device. The relation between a System MMU and the peripheral device needs to be
 31   * MFC has one System MMU on its left and right bus.
 32   * FIMD in Exynos5420 has one System MMU for window 0 and 4, the other system MMU
 34   * M2M Scalers and G2D in Exynos5420 has one System MMU on the read channel and
 35     the other System MMU on the write channel.
 37   For information on assigning System MMU controller to its peripheral devices,
 [all …]
 
 | 
| /linux/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/ | 
| H A D | branch.json | 18 …r is retired. This event still counts when branch prediction is disabled due to the MMU being off",21 …or is retired. This event still counts when branch prediction is disabled due to the MMU being off"
 24 … the address. This event still counts when branch prediction is disabled due to the MMU being off",
 27 …r the address. This event still counts when branch prediction is disabled due to the MMU being off"
 30 … the address. This event still counts when branch prediction is disabled due to the MMU being off",
 33 …d the address. This event still counts when branch prediction is disabled due to the MMU being off"
 36 …ion. This event still counts when branch prediction is disabled due to the MMU being off. Conditio…
 39 …ion. This event still counts when branch prediction is disabled due to the MMU being off. Conditio…
 42 …he condition. This event still counts when branch prediction is disabled due to the MMU being off",
 45 …the condition. This event still counts when branch prediction is disabled due to the MMU being off"
 [all …]
 
 | 
| /linux/Documentation/virt/kvm/x86/ | 
| H A D | mmu.rst | 4 The x86 kvm shadow mmu7 The mmu (in arch/x86/kvm, files mmu.[ch] and paging_tmpl.h) is responsible
 8 for presenting a standard x86 mmu to the guest, while translating guest
 11 The mmu code attempts to satisfy the following requirements:
 15                on an emulated mmu except for timing (we attempt to comply
 22                minimize the performance penalty imposed by the mmu
 62 The mmu supports first-generation mmu hardware, which allows an atomic switch
 65 it exposes is the traditional 2/3/4 level x86 mmu, with support for global
 72 The primary job of the mmu is to program the processor's mmu to translate
 86 number of required translations matches the hardware, the mmu operates in
 [all …]
 
 | 
| /linux/arch/arm64/include/asm/ | 
| H A D | kvm_mmu.h | 12 #include <asm/mmu.h>150 #define kvm_phys_shift(mmu)		VTCR_EL2_IPA((mmu)->vtcr)  argument
 151 #define kvm_phys_size(mmu)		(_AC(1, ULL) << kvm_phys_shift(mmu))  argument
 152 #define kvm_phys_mask(mmu)		(kvm_phys_size(mmu) - _AC(1, ULL))  argument
 171 void kvm_stage2_unmap_range(struct kvm_s2_mmu *mmu, phys_addr_t start,
 173 void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end);
 174 void kvm_stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end);
 177 int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type);
 179 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
 304 static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)  in kvm_get_vttbr()  argument
 [all …]
 
 | 
| /linux/arch/sh/mm/ | 
| H A D | Kconfig | 4 config MMU  config12 	  Some SH processors (such as SH-2/SH-2A) lack an MMU. In order to
 15 	  On other systems (such as the SH-3 and 4) where an MMU exists,
 17 	  MMU implicitly switched off.
 20 	def_bool !MMU
 26 	  On MMU-less systems, any of these page sizes can be selected
 30 	default "0x80000000" if MMU
 37 	default "13" if !MMU
 86 	default !MMU
 90 	depends on MMU && CPU_SH4A && !CPU_SH4AL_DSP
 [all …]
 
 | 
| /linux/drivers/gpu/drm/nouveau/ | 
| H A D | nouveau_mem.c | 93 	struct nvif_mmu *mmu = &drm->mmu;  in nouveau_mem_host()  local103 	if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND))  in nouveau_mem_host()
 105 	if (mem->comp && !(mmu->type[type].type & NVIF_MEM_COMP)) {  in nouveau_mem_host()
 106 		if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)  in nouveau_mem_host()
 107 			mem->kind = mmu->kind[mem->kind];  in nouveau_mem_host()
 117 	ret = nvif_mem_ctor_type(mmu, "ttmHostMem", mmu->mem, type, PAGE_SHIFT,  in nouveau_mem_host()
 129 	struct nvif_mmu *mmu = &drm->mmu;  in nouveau_mem_vram()  local
 134 	switch (mmu->mem) {  in nouveau_mem_vram()
 136 		ret = nvif_mem_ctor_type(mmu, "ttmVram", mmu->mem,  in nouveau_mem_vram()
 144 		ret = nvif_mem_ctor_type(mmu, "ttmVram", mmu->mem,  in nouveau_mem_vram()
 [all …]
 
 | 
| /linux/drivers/xen/ | 
| H A D | grant-dma-iommu.c | 36 	struct grant_dma_iommu_device *mmu;  in grant_dma_iommu_probe()  local39 	mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);  in grant_dma_iommu_probe()
 40 	if (!mmu)  in grant_dma_iommu_probe()
 43 	mmu->dev = &pdev->dev;  in grant_dma_iommu_probe()
 45 	ret = iommu_device_register(&mmu->iommu, &grant_dma_iommu_ops, &pdev->dev);  in grant_dma_iommu_probe()
 49 	platform_set_drvdata(pdev, mmu);  in grant_dma_iommu_probe()
 56 	struct grant_dma_iommu_device *mmu = platform_get_drvdata(pdev);  in grant_dma_iommu_remove()  local
 59 	iommu_device_unregister(&mmu->iommu);  in grant_dma_iommu_remove()
 
 | 
| /linux/drivers/accel/habanalabs/common/mmu/ | 
| H A D | mmu_v2_hr.c | 9 #include "../../include/hw_ip/mmu/mmu_general.h"37  * hl_mmu_v2_hr_init() - initialize the MMU module.
 55  * hl_mmu_v2_hr_fini() - release the MMU module.
 59  * - Disable MMU in H/W.
 72  * hl_mmu_v2_hr_ctx_init() - initialize a context for using the MMU module.
 86  * hl_mmu_v2_hr_ctx_fini - disable a ctx from using the mmu module
 378  * hl_mmu_v2_prepare - prepare mmu_if for working with mmu v2
 381  * @mmu_if: pointer to the mmu interface structure
 383 void hl_mmu_v2_hr_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu)  in hl_mmu_v2_hr_set_funcs()  argument
 385 	mmu->init = hl_mmu_v2_hr_init;  in hl_mmu_v2_hr_set_funcs()
 [all …]
 
 | 
| /linux/drivers/gpu/drm/msm/adreno/ | 
| H A D | a2xx_gpummu.c | 27 static void a2xx_gpummu_detach(struct msm_mmu *mmu)  in a2xx_gpummu_detach()  argument31 static int a2xx_gpummu_map(struct msm_mmu *mmu, uint64_t iova,  in a2xx_gpummu_map()  argument
 35 	struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);  in a2xx_gpummu_map()
 62 static int a2xx_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)  in a2xx_gpummu_unmap()  argument
 64 	struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);  in a2xx_gpummu_unmap()
 77 static void a2xx_gpummu_destroy(struct msm_mmu *mmu)  in a2xx_gpummu_destroy()  argument
 79 	struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);  in a2xx_gpummu_destroy()
 81 	dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base,  in a2xx_gpummu_destroy()
 115 void a2xx_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,  in a2xx_gpummu_params()  argument
 118 	dma_addr_t base = to_a2xx_gpummu(mmu)->pt_base;  in a2xx_gpummu_params()
 
 |