Lines Matching +full:memory +full:- +full:mapped
1 // SPDX-License-Identifier: GPL-2.0-only
18 drm_mm_takedown(&aspace->mm); in msm_gem_address_space_destroy()
19 if (aspace->mmu) in msm_gem_address_space_destroy()
20 aspace->mmu->funcs->destroy(aspace->mmu); in msm_gem_address_space_destroy()
21 put_pid(aspace->pid); in msm_gem_address_space_destroy()
29 kref_put(&aspace->kref, msm_gem_address_space_destroy); in msm_gem_address_space_put()
36 kref_get(&aspace->kref); in msm_gem_address_space_get()
41 /* Actually unmap memory for the vma */
44 struct msm_gem_address_space *aspace = vma->aspace; in msm_gem_vma_purge()
45 unsigned size = vma->node.size; in msm_gem_vma_purge()
47 /* Don't do anything if the memory isn't mapped */ in msm_gem_vma_purge()
48 if (!vma->mapped) in msm_gem_vma_purge()
51 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size); in msm_gem_vma_purge()
53 vma->mapped = false; in msm_gem_vma_purge()
61 struct msm_gem_address_space *aspace = vma->aspace; in msm_gem_vma_map()
64 if (GEM_WARN_ON(!vma->iova)) in msm_gem_vma_map()
65 return -EINVAL; in msm_gem_vma_map()
67 if (vma->mapped) in msm_gem_vma_map()
70 vma->mapped = true; in msm_gem_vma_map()
76 * NOTE: iommu/io-pgtable can allocate pages, so we cannot hold in msm_gem_vma_map()
81 * Revisit this if we can come up with a scheme to pre-alloc pages in msm_gem_vma_map()
84 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, size, prot); in msm_gem_vma_map()
87 vma->mapped = false; in msm_gem_vma_map()
96 struct msm_gem_address_space *aspace = vma->aspace; in msm_gem_vma_close()
98 GEM_WARN_ON(vma->mapped); in msm_gem_vma_close()
100 spin_lock(&aspace->lock); in msm_gem_vma_close()
101 if (vma->iova) in msm_gem_vma_close()
102 drm_mm_remove_node(&vma->node); in msm_gem_vma_close()
103 spin_unlock(&aspace->lock); in msm_gem_vma_close()
105 vma->iova = 0; in msm_gem_vma_close()
118 vma->aspace = aspace; in msm_gem_vma_new()
127 struct msm_gem_address_space *aspace = vma->aspace; in msm_gem_vma_init()
131 return -EINVAL; in msm_gem_vma_init()
133 if (GEM_WARN_ON(vma->iova)) in msm_gem_vma_init()
134 return -EBUSY; in msm_gem_vma_init()
136 spin_lock(&aspace->lock); in msm_gem_vma_init()
137 ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node, in msm_gem_vma_init()
140 spin_unlock(&aspace->lock); in msm_gem_vma_init()
145 vma->iova = vma->node.start; in msm_gem_vma_init()
146 vma->mapped = false; in msm_gem_vma_init()
148 kref_get(&aspace->kref); in msm_gem_vma_init()
164 return ERR_PTR(-ENOMEM); in msm_gem_address_space_create()
166 spin_lock_init(&aspace->lock); in msm_gem_address_space_create()
167 aspace->name = name; in msm_gem_address_space_create()
168 aspace->mmu = mmu; in msm_gem_address_space_create()
169 aspace->va_start = va_start; in msm_gem_address_space_create()
170 aspace->va_size = size; in msm_gem_address_space_create()
172 drm_mm_init(&aspace->mm, va_start, size); in msm_gem_address_space_create()
174 kref_init(&aspace->kref); in msm_gem_address_space_create()