Lines Matching +full:device +full:- +full:handle

1 // SPDX-License-Identifier: GPL-2.0
10 #include "iommu-priv.h"
13 static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
17 static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct device *dev) in iommu_alloc_mm_data()
25 return ERR_PTR(-EBUSY); in iommu_alloc_mm_data()
27 iommu_mm = mm->iommu_mm; in iommu_alloc_mm_data()
30 if (iommu_mm->pasid >= dev->iommu->max_pasids) in iommu_alloc_mm_data()
31 return ERR_PTR(-EOVERFLOW); in iommu_alloc_mm_data()
37 return ERR_PTR(-ENOMEM); in iommu_alloc_mm_data()
42 return ERR_PTR(-ENOSPC); in iommu_alloc_mm_data()
44 iommu_mm->pasid = pasid; in iommu_alloc_mm_data()
45 INIT_LIST_HEAD(&iommu_mm->sva_domains); in iommu_alloc_mm_data()
47 * Make sure the write to mm->iommu_mm is not reordered in front of in iommu_alloc_mm_data()
51 smp_store_release(&mm->iommu_mm, iommu_mm); in iommu_alloc_mm_data()
56 * iommu_sva_bind_device() - Bind a process address space to a device
57 * @dev: the device
60 * Create a bond between device and address space, allowing the device to
62 * bond already exists between @device and @mm, an additional internal
71 struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) in iommu_sva_bind_device()
73 struct iommu_group *group = dev->iommu_group; in iommu_sva_bind_device()
77 struct iommu_sva *handle; in iommu_sva_bind_device() local
81 return ERR_PTR(-ENODEV); in iommu_sva_bind_device()
85 /* Allocate mm->pasid if necessary. */ in iommu_sva_bind_device()
93 attach_handle = iommu_attach_handle_get(group, iommu_mm->pasid, IOMMU_DOMAIN_SVA); in iommu_sva_bind_device()
95 handle = container_of(attach_handle, struct iommu_sva, handle); in iommu_sva_bind_device()
96 if (attach_handle->domain->mm != mm) { in iommu_sva_bind_device()
97 ret = -EBUSY; in iommu_sva_bind_device()
100 refcount_inc(&handle->users); in iommu_sva_bind_device()
102 return handle; in iommu_sva_bind_device()
105 if (PTR_ERR(attach_handle) != -ENOENT) { in iommu_sva_bind_device()
110 handle = kzalloc(sizeof(*handle), GFP_KERNEL); in iommu_sva_bind_device()
111 if (!handle) { in iommu_sva_bind_device()
112 ret = -ENOMEM; in iommu_sva_bind_device()
117 list_for_each_entry(domain, &mm->iommu_mm->sva_domains, next) { in iommu_sva_bind_device()
118 ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid, in iommu_sva_bind_device()
119 &handle->handle); in iommu_sva_bind_device()
121 domain->users++; in iommu_sva_bind_device()
126 /* Allocate a new domain and set it on device pasid. */ in iommu_sva_bind_device()
133 ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid, in iommu_sva_bind_device()
134 &handle->handle); in iommu_sva_bind_device()
137 domain->users = 1; in iommu_sva_bind_device()
138 list_add(&domain->next, &mm->iommu_mm->sva_domains); in iommu_sva_bind_device()
141 refcount_set(&handle->users, 1); in iommu_sva_bind_device()
143 handle->dev = dev; in iommu_sva_bind_device()
144 return handle; in iommu_sva_bind_device()
149 kfree(handle); in iommu_sva_bind_device()
157 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
158 * @handle: the handle returned by iommu_sva_bind_device()
160 * Put reference to a bond between device and address space. The device should
164 void iommu_sva_unbind_device(struct iommu_sva *handle) in iommu_sva_unbind_device() argument
166 struct iommu_domain *domain = handle->handle.domain; in iommu_sva_unbind_device()
167 struct iommu_mm_data *iommu_mm = domain->mm->iommu_mm; in iommu_sva_unbind_device()
168 struct device *dev = handle->dev; in iommu_sva_unbind_device()
171 if (!refcount_dec_and_test(&handle->users)) { in iommu_sva_unbind_device()
176 iommu_detach_device_pasid(domain, dev, iommu_mm->pasid); in iommu_sva_unbind_device()
177 if (--domain->users == 0) { in iommu_sva_unbind_device()
178 list_del(&domain->next); in iommu_sva_unbind_device()
182 kfree(handle); in iommu_sva_unbind_device()
186 u32 iommu_sva_get_pasid(struct iommu_sva *handle) in iommu_sva_get_pasid() argument
188 struct iommu_domain *domain = handle->handle.domain; in iommu_sva_get_pasid()
190 return mm_get_enqcmd_pasid(domain->mm); in iommu_sva_get_pasid()
196 struct iommu_mm_data *iommu_mm = mm->iommu_mm; in mm_pasid_drop()
201 iommu_free_global_pasid(iommu_mm->pasid); in mm_pasid_drop()
215 struct iommu_fault_page_request *prm = &fault->prm; in iommu_sva_handle_mm()
218 if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID)) in iommu_sva_handle_mm()
226 vma = vma_lookup(mm, prm->addr); in iommu_sva_handle_mm()
231 if (prm->perm & IOMMU_FAULT_PERM_READ) in iommu_sva_handle_mm()
234 if (prm->perm & IOMMU_FAULT_PERM_WRITE) { in iommu_sva_handle_mm()
239 if (prm->perm & IOMMU_FAULT_PERM_EXEC) { in iommu_sva_handle_mm()
244 if (!(prm->perm & IOMMU_FAULT_PERM_PRIV)) in iommu_sva_handle_mm()
247 if (access_flags & ~vma->vm_flags) in iommu_sva_handle_mm()
251 ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL); in iommu_sva_handle_mm()
269 list_for_each_entry(iopf, &group->faults, list) { in iommu_sva_handle_iopf()
271 * For the moment, errors are sticky: don't handle subsequent in iommu_sva_handle_iopf()
277 status = iommu_sva_handle_mm(&iopf->fault, in iommu_sva_handle_iopf()
278 group->attach_handle->domain->mm); in iommu_sva_handle_iopf()
287 struct iommu_fault_param *fault_param = group->fault_param; in iommu_sva_iopf_handler()
289 INIT_WORK(&group->work, iommu_sva_handle_iopf); in iommu_sva_iopf_handler()
290 if (!queue_work(fault_param->queue->wq, &group->work)) in iommu_sva_iopf_handler()
291 return -EBUSY; in iommu_sva_iopf_handler()
296 static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, in iommu_sva_domain_alloc()
302 if (ops->domain_alloc_sva) { in iommu_sva_domain_alloc()
303 domain = ops->domain_alloc_sva(dev, mm); in iommu_sva_domain_alloc()
307 domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); in iommu_sva_domain_alloc()
309 return ERR_PTR(-ENOMEM); in iommu_sva_domain_alloc()
312 domain->type = IOMMU_DOMAIN_SVA; in iommu_sva_domain_alloc()
314 domain->mm = mm; in iommu_sva_domain_alloc()
315 domain->owner = ops; in iommu_sva_domain_alloc()
316 domain->iopf_handler = iommu_sva_iopf_handler; in iommu_sva_domain_alloc()