xref: /linux/drivers/iommu/iommu-sva.c (revision 8477ab143069c6b05d6da4a8184ded8b969240f5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Helpers for IOMMU drivers implementing SVA
4  */
5 #include <linux/mmu_context.h>
6 #include <linux/mutex.h>
7 #include <linux/sched/mm.h>
8 #include <linux/iommu.h>
9 
10 #include "iommu-priv.h"
11 
12 static DEFINE_MUTEX(iommu_sva_lock);
13 static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
14 						   struct mm_struct *mm);
15 
16 /* Allocate a PASID for the mm within range (inclusive) */
iommu_alloc_mm_data(struct mm_struct * mm,struct device * dev)17 static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct device *dev)
18 {
19 	struct iommu_mm_data *iommu_mm;
20 	ioasid_t pasid;
21 
22 	lockdep_assert_held(&iommu_sva_lock);
23 
24 	if (!arch_pgtable_dma_compat(mm))
25 		return ERR_PTR(-EBUSY);
26 
27 	iommu_mm = mm->iommu_mm;
28 	/* Is a PASID already associated with this mm? */
29 	if (iommu_mm) {
30 		if (iommu_mm->pasid >= dev->iommu->max_pasids)
31 			return ERR_PTR(-EOVERFLOW);
32 		return iommu_mm;
33 	}
34 
35 	iommu_mm = kzalloc(sizeof(struct iommu_mm_data), GFP_KERNEL);
36 	if (!iommu_mm)
37 		return ERR_PTR(-ENOMEM);
38 
39 	pasid = iommu_alloc_global_pasid(dev);
40 	if (pasid == IOMMU_PASID_INVALID) {
41 		kfree(iommu_mm);
42 		return ERR_PTR(-ENOSPC);
43 	}
44 	iommu_mm->pasid = pasid;
45 	INIT_LIST_HEAD(&iommu_mm->sva_domains);
46 	/*
47 	 * Make sure the write to mm->iommu_mm is not reordered in front of
48 	 * initialization to iommu_mm fields. If it does, readers may see a
49 	 * valid iommu_mm with uninitialized values.
50 	 */
51 	smp_store_release(&mm->iommu_mm, iommu_mm);
52 	return iommu_mm;
53 }
54 
55 /**
56  * iommu_sva_bind_device() - Bind a process address space to a device
57  * @dev: the device
58  * @mm: the mm to bind, caller must hold a reference to mm_users
59  *
60  * Create a bond between device and address space, allowing the device to
61  * access the mm using the PASID returned by iommu_sva_get_pasid(). If a
62  * bond already exists between @device and @mm, an additional internal
63  * reference is taken. Caller must call iommu_sva_unbind_device()
64  * to release each reference.
65  *
66  * On error, returns an ERR_PTR value.
67  */
iommu_sva_bind_device(struct device * dev,struct mm_struct * mm)68 struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
69 {
70 	struct iommu_group *group = dev->iommu_group;
71 	struct iommu_attach_handle *attach_handle;
72 	struct iommu_mm_data *iommu_mm;
73 	struct iommu_domain *domain;
74 	struct iommu_sva *handle;
75 	int ret;
76 
77 	if (!group)
78 		return ERR_PTR(-ENODEV);
79 
80 	mutex_lock(&iommu_sva_lock);
81 
82 	/* Allocate mm->pasid if necessary. */
83 	iommu_mm = iommu_alloc_mm_data(mm, dev);
84 	if (IS_ERR(iommu_mm)) {
85 		ret = PTR_ERR(iommu_mm);
86 		goto out_unlock;
87 	}
88 
89 	/* A bond already exists, just take a reference`. */
90 	attach_handle = iommu_attach_handle_get(group, iommu_mm->pasid, IOMMU_DOMAIN_SVA);
91 	if (!IS_ERR(attach_handle)) {
92 		handle = container_of(attach_handle, struct iommu_sva, handle);
93 		if (attach_handle->domain->mm != mm) {
94 			ret = -EBUSY;
95 			goto out_unlock;
96 		}
97 		refcount_inc(&handle->users);
98 		mutex_unlock(&iommu_sva_lock);
99 		return handle;
100 	}
101 
102 	if (PTR_ERR(attach_handle) != -ENOENT) {
103 		ret = PTR_ERR(attach_handle);
104 		goto out_unlock;
105 	}
106 
107 	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
108 	if (!handle) {
109 		ret = -ENOMEM;
110 		goto out_unlock;
111 	}
112 
113 	/* Search for an existing domain. */
114 	list_for_each_entry(domain, &mm->iommu_mm->sva_domains, next) {
115 		ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid,
116 						&handle->handle);
117 		if (!ret) {
118 			domain->users++;
119 			goto out;
120 		}
121 	}
122 
123 	/* Allocate a new domain and set it on device pasid. */
124 	domain = iommu_sva_domain_alloc(dev, mm);
125 	if (IS_ERR(domain)) {
126 		ret = PTR_ERR(domain);
127 		goto out_free_handle;
128 	}
129 
130 	ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid,
131 					&handle->handle);
132 	if (ret)
133 		goto out_free_domain;
134 	domain->users = 1;
135 	list_add(&domain->next, &mm->iommu_mm->sva_domains);
136 
137 out:
138 	refcount_set(&handle->users, 1);
139 	mutex_unlock(&iommu_sva_lock);
140 	handle->dev = dev;
141 	return handle;
142 
143 out_free_domain:
144 	iommu_domain_free(domain);
145 out_free_handle:
146 	kfree(handle);
147 out_unlock:
148 	mutex_unlock(&iommu_sva_lock);
149 	return ERR_PTR(ret);
150 }
151 EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
152 
153 /**
154  * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
155  * @handle: the handle returned by iommu_sva_bind_device()
156  *
157  * Put reference to a bond between device and address space. The device should
158  * not be issuing any more transaction for this PASID. All outstanding page
159  * requests for this PASID must have been flushed to the IOMMU.
160  */
iommu_sva_unbind_device(struct iommu_sva * handle)161 void iommu_sva_unbind_device(struct iommu_sva *handle)
162 {
163 	struct iommu_domain *domain = handle->handle.domain;
164 	struct iommu_mm_data *iommu_mm = domain->mm->iommu_mm;
165 	struct device *dev = handle->dev;
166 
167 	mutex_lock(&iommu_sva_lock);
168 	if (!refcount_dec_and_test(&handle->users)) {
169 		mutex_unlock(&iommu_sva_lock);
170 		return;
171 	}
172 
173 	iommu_detach_device_pasid(domain, dev, iommu_mm->pasid);
174 	if (--domain->users == 0) {
175 		list_del(&domain->next);
176 		iommu_domain_free(domain);
177 	}
178 	mutex_unlock(&iommu_sva_lock);
179 	kfree(handle);
180 }
181 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
182 
iommu_sva_get_pasid(struct iommu_sva * handle)183 u32 iommu_sva_get_pasid(struct iommu_sva *handle)
184 {
185 	struct iommu_domain *domain = handle->handle.domain;
186 
187 	return mm_get_enqcmd_pasid(domain->mm);
188 }
189 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
190 
mm_pasid_drop(struct mm_struct * mm)191 void mm_pasid_drop(struct mm_struct *mm)
192 {
193 	struct iommu_mm_data *iommu_mm = mm->iommu_mm;
194 
195 	if (!iommu_mm)
196 		return;
197 
198 	iommu_free_global_pasid(iommu_mm->pasid);
199 	kfree(iommu_mm);
200 }
201 
202 /*
203  * I/O page fault handler for SVA
204  */
205 static enum iommu_page_response_code
iommu_sva_handle_mm(struct iommu_fault * fault,struct mm_struct * mm)206 iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm)
207 {
208 	vm_fault_t ret;
209 	struct vm_area_struct *vma;
210 	unsigned int access_flags = 0;
211 	unsigned int fault_flags = FAULT_FLAG_REMOTE;
212 	struct iommu_fault_page_request *prm = &fault->prm;
213 	enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
214 
215 	if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
216 		return status;
217 
218 	if (!mmget_not_zero(mm))
219 		return status;
220 
221 	mmap_read_lock(mm);
222 
223 	vma = vma_lookup(mm, prm->addr);
224 	if (!vma)
225 		/* Unmapped area */
226 		goto out_put_mm;
227 
228 	if (prm->perm & IOMMU_FAULT_PERM_READ)
229 		access_flags |= VM_READ;
230 
231 	if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
232 		access_flags |= VM_WRITE;
233 		fault_flags |= FAULT_FLAG_WRITE;
234 	}
235 
236 	if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
237 		access_flags |= VM_EXEC;
238 		fault_flags |= FAULT_FLAG_INSTRUCTION;
239 	}
240 
241 	if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
242 		fault_flags |= FAULT_FLAG_USER;
243 
244 	if (access_flags & ~vma->vm_flags)
245 		/* Access fault */
246 		goto out_put_mm;
247 
248 	ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
249 	status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
250 		IOMMU_PAGE_RESP_SUCCESS;
251 
252 out_put_mm:
253 	mmap_read_unlock(mm);
254 	mmput(mm);
255 
256 	return status;
257 }
258 
iommu_sva_handle_iopf(struct work_struct * work)259 static void iommu_sva_handle_iopf(struct work_struct *work)
260 {
261 	struct iopf_fault *iopf;
262 	struct iopf_group *group;
263 	enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
264 
265 	group = container_of(work, struct iopf_group, work);
266 	list_for_each_entry(iopf, &group->faults, list) {
267 		/*
268 		 * For the moment, errors are sticky: don't handle subsequent
269 		 * faults in the group if there is an error.
270 		 */
271 		if (status != IOMMU_PAGE_RESP_SUCCESS)
272 			break;
273 
274 		status = iommu_sva_handle_mm(&iopf->fault,
275 					     group->attach_handle->domain->mm);
276 	}
277 
278 	iopf_group_response(group, status);
279 	iopf_free_group(group);
280 }
281 
iommu_sva_iopf_handler(struct iopf_group * group)282 static int iommu_sva_iopf_handler(struct iopf_group *group)
283 {
284 	struct iommu_fault_param *fault_param = group->fault_param;
285 
286 	INIT_WORK(&group->work, iommu_sva_handle_iopf);
287 	if (!queue_work(fault_param->queue->wq, &group->work))
288 		return -EBUSY;
289 
290 	return 0;
291 }
292 
iommu_sva_domain_alloc(struct device * dev,struct mm_struct * mm)293 static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
294 						   struct mm_struct *mm)
295 {
296 	const struct iommu_ops *ops = dev_iommu_ops(dev);
297 	struct iommu_domain *domain;
298 
299 	if (!ops->domain_alloc_sva)
300 		return ERR_PTR(-EOPNOTSUPP);
301 
302 	domain = ops->domain_alloc_sva(dev, mm);
303 	if (IS_ERR(domain))
304 		return domain;
305 
306 	domain->type = IOMMU_DOMAIN_SVA;
307 	domain->cookie_type = IOMMU_COOKIE_SVA;
308 	mmgrab(mm);
309 	domain->mm = mm;
310 	domain->owner = ops;
311 	domain->iopf_handler = iommu_sva_iopf_handler;
312 
313 	return domain;
314 }
315