xref: /linux/drivers/iommu/iommu-sva.c (revision 13b2d15d991b3f0f4ebfffbed081dbff27ac1c9d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Helpers for IOMMU drivers implementing SVA
4  */
5 #include <linux/mmu_context.h>
6 #include <linux/mmu_notifier.h>
7 #include <linux/mutex.h>
8 #include <linux/sched/mm.h>
9 #include <linux/iommu.h>
10 
11 #include "iommu-priv.h"
12 
13 static DEFINE_MUTEX(iommu_sva_lock);
14 static bool iommu_sva_present;
15 static LIST_HEAD(iommu_sva_mms);
16 static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
17 						   struct mm_struct *mm);
18 
19 /* Allocate a PASID for the mm within range (inclusive) */
iommu_alloc_mm_data(struct mm_struct * mm,struct device * dev)20 static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct device *dev)
21 {
22 	struct iommu_mm_data *iommu_mm;
23 	ioasid_t pasid;
24 
25 	lockdep_assert_held(&iommu_sva_lock);
26 
27 	if (!arch_pgtable_dma_compat(mm))
28 		return ERR_PTR(-EBUSY);
29 
30 	iommu_mm = mm->iommu_mm;
31 	/* Is a PASID already associated with this mm? */
32 	if (iommu_mm) {
33 		if (iommu_mm->pasid >= dev->iommu->max_pasids)
34 			return ERR_PTR(-EOVERFLOW);
35 		return iommu_mm;
36 	}
37 
38 	iommu_mm = kzalloc(sizeof(struct iommu_mm_data), GFP_KERNEL);
39 	if (!iommu_mm)
40 		return ERR_PTR(-ENOMEM);
41 
42 	pasid = iommu_alloc_global_pasid(dev);
43 	if (pasid == IOMMU_PASID_INVALID) {
44 		kfree(iommu_mm);
45 		return ERR_PTR(-ENOSPC);
46 	}
47 	iommu_mm->pasid = pasid;
48 	iommu_mm->mm = mm;
49 	INIT_LIST_HEAD(&iommu_mm->sva_domains);
50 	/*
51 	 * Make sure the write to mm->iommu_mm is not reordered in front of
52 	 * initialization to iommu_mm fields. If it does, readers may see a
53 	 * valid iommu_mm with uninitialized values.
54 	 */
55 	smp_store_release(&mm->iommu_mm, iommu_mm);
56 	return iommu_mm;
57 }
58 
59 /**
60  * iommu_sva_bind_device() - Bind a process address space to a device
61  * @dev: the device
62  * @mm: the mm to bind, caller must hold a reference to mm_users
63  *
64  * Create a bond between device and address space, allowing the device to
65  * access the mm using the PASID returned by iommu_sva_get_pasid(). If a
66  * bond already exists between @device and @mm, an additional internal
67  * reference is taken. Caller must call iommu_sva_unbind_device()
68  * to release each reference.
69  *
70  * On error, returns an ERR_PTR value.
71  */
iommu_sva_bind_device(struct device * dev,struct mm_struct * mm)72 struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
73 {
74 	struct iommu_group *group = dev->iommu_group;
75 	struct iommu_attach_handle *attach_handle;
76 	struct iommu_mm_data *iommu_mm;
77 	struct iommu_domain *domain;
78 	struct iommu_sva *handle;
79 	int ret;
80 
81 	if (!group)
82 		return ERR_PTR(-ENODEV);
83 
84 	mutex_lock(&iommu_sva_lock);
85 
86 	/* Allocate mm->pasid if necessary. */
87 	iommu_mm = iommu_alloc_mm_data(mm, dev);
88 	if (IS_ERR(iommu_mm)) {
89 		ret = PTR_ERR(iommu_mm);
90 		goto out_unlock;
91 	}
92 
93 	/* A bond already exists, just take a reference`. */
94 	attach_handle = iommu_attach_handle_get(group, iommu_mm->pasid, IOMMU_DOMAIN_SVA);
95 	if (!IS_ERR(attach_handle)) {
96 		handle = container_of(attach_handle, struct iommu_sva, handle);
97 		if (attach_handle->domain->mm != mm) {
98 			ret = -EBUSY;
99 			goto out_unlock;
100 		}
101 		refcount_inc(&handle->users);
102 		mutex_unlock(&iommu_sva_lock);
103 		return handle;
104 	}
105 
106 	if (PTR_ERR(attach_handle) != -ENOENT) {
107 		ret = PTR_ERR(attach_handle);
108 		goto out_unlock;
109 	}
110 
111 	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
112 	if (!handle) {
113 		ret = -ENOMEM;
114 		goto out_unlock;
115 	}
116 
117 	/* Search for an existing domain. */
118 	list_for_each_entry(domain, &mm->iommu_mm->sva_domains, next) {
119 		ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid,
120 						&handle->handle);
121 		if (!ret) {
122 			domain->users++;
123 			goto out;
124 		}
125 	}
126 
127 	/* Allocate a new domain and set it on device pasid. */
128 	domain = iommu_sva_domain_alloc(dev, mm);
129 	if (IS_ERR(domain)) {
130 		ret = PTR_ERR(domain);
131 		goto out_free_handle;
132 	}
133 
134 	ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid,
135 					&handle->handle);
136 	if (ret)
137 		goto out_free_domain;
138 	domain->users = 1;
139 
140 	if (list_empty(&iommu_mm->sva_domains)) {
141 		if (list_empty(&iommu_sva_mms))
142 			iommu_sva_present = true;
143 		list_add(&iommu_mm->mm_list_elm, &iommu_sva_mms);
144 	}
145 	list_add(&domain->next, &iommu_mm->sva_domains);
146 out:
147 	refcount_set(&handle->users, 1);
148 	mutex_unlock(&iommu_sva_lock);
149 	handle->dev = dev;
150 	return handle;
151 
152 out_free_domain:
153 	iommu_domain_free(domain);
154 out_free_handle:
155 	kfree(handle);
156 out_unlock:
157 	mutex_unlock(&iommu_sva_lock);
158 	return ERR_PTR(ret);
159 }
160 EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
161 
162 /**
163  * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
164  * @handle: the handle returned by iommu_sva_bind_device()
165  *
166  * Put reference to a bond between device and address space. The device should
167  * not be issuing any more transaction for this PASID. All outstanding page
168  * requests for this PASID must have been flushed to the IOMMU.
169  */
iommu_sva_unbind_device(struct iommu_sva * handle)170 void iommu_sva_unbind_device(struct iommu_sva *handle)
171 {
172 	struct iommu_domain *domain = handle->handle.domain;
173 	struct iommu_mm_data *iommu_mm = domain->mm->iommu_mm;
174 	struct device *dev = handle->dev;
175 
176 	mutex_lock(&iommu_sva_lock);
177 	if (!refcount_dec_and_test(&handle->users)) {
178 		mutex_unlock(&iommu_sva_lock);
179 		return;
180 	}
181 
182 	iommu_detach_device_pasid(domain, dev, iommu_mm->pasid);
183 	if (--domain->users == 0) {
184 		list_del(&domain->next);
185 		iommu_domain_free(domain);
186 	}
187 
188 	if (list_empty(&iommu_mm->sva_domains)) {
189 		list_del(&iommu_mm->mm_list_elm);
190 		if (list_empty(&iommu_sva_mms))
191 			iommu_sva_present = false;
192 	}
193 
194 	mutex_unlock(&iommu_sva_lock);
195 	kfree(handle);
196 }
197 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
198 
iommu_sva_get_pasid(struct iommu_sva * handle)199 u32 iommu_sva_get_pasid(struct iommu_sva *handle)
200 {
201 	struct iommu_domain *domain = handle->handle.domain;
202 
203 	return mm_get_enqcmd_pasid(domain->mm);
204 }
205 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
206 
mm_pasid_drop(struct mm_struct * mm)207 void mm_pasid_drop(struct mm_struct *mm)
208 {
209 	struct iommu_mm_data *iommu_mm = mm->iommu_mm;
210 
211 	if (!iommu_mm)
212 		return;
213 
214 	iommu_free_global_pasid(iommu_mm->pasid);
215 	kfree(iommu_mm);
216 }
217 
218 /*
219  * I/O page fault handler for SVA
220  */
221 static enum iommu_page_response_code
iommu_sva_handle_mm(struct iommu_fault * fault,struct mm_struct * mm)222 iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm)
223 {
224 	vm_fault_t ret;
225 	struct vm_area_struct *vma;
226 	unsigned int access_flags = 0;
227 	unsigned int fault_flags = FAULT_FLAG_REMOTE;
228 	struct iommu_fault_page_request *prm = &fault->prm;
229 	enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
230 
231 	if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
232 		return status;
233 
234 	if (!mmget_not_zero(mm))
235 		return status;
236 
237 	mmap_read_lock(mm);
238 
239 	vma = vma_lookup(mm, prm->addr);
240 	if (!vma)
241 		/* Unmapped area */
242 		goto out_put_mm;
243 
244 	if (prm->perm & IOMMU_FAULT_PERM_READ)
245 		access_flags |= VM_READ;
246 
247 	if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
248 		access_flags |= VM_WRITE;
249 		fault_flags |= FAULT_FLAG_WRITE;
250 	}
251 
252 	if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
253 		access_flags |= VM_EXEC;
254 		fault_flags |= FAULT_FLAG_INSTRUCTION;
255 	}
256 
257 	if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
258 		fault_flags |= FAULT_FLAG_USER;
259 
260 	if (access_flags & ~vma->vm_flags)
261 		/* Access fault */
262 		goto out_put_mm;
263 
264 	ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
265 	status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
266 		IOMMU_PAGE_RESP_SUCCESS;
267 
268 out_put_mm:
269 	mmap_read_unlock(mm);
270 	mmput(mm);
271 
272 	return status;
273 }
274 
iommu_sva_handle_iopf(struct work_struct * work)275 static void iommu_sva_handle_iopf(struct work_struct *work)
276 {
277 	struct iopf_fault *iopf;
278 	struct iopf_group *group;
279 	enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
280 
281 	group = container_of(work, struct iopf_group, work);
282 	list_for_each_entry(iopf, &group->faults, list) {
283 		/*
284 		 * For the moment, errors are sticky: don't handle subsequent
285 		 * faults in the group if there is an error.
286 		 */
287 		if (status != IOMMU_PAGE_RESP_SUCCESS)
288 			break;
289 
290 		status = iommu_sva_handle_mm(&iopf->fault,
291 					     group->attach_handle->domain->mm);
292 	}
293 
294 	iopf_group_response(group, status);
295 	iopf_free_group(group);
296 }
297 
iommu_sva_iopf_handler(struct iopf_group * group)298 static int iommu_sva_iopf_handler(struct iopf_group *group)
299 {
300 	struct iommu_fault_param *fault_param = group->fault_param;
301 
302 	INIT_WORK(&group->work, iommu_sva_handle_iopf);
303 	if (!queue_work(fault_param->queue->wq, &group->work))
304 		return -EBUSY;
305 
306 	return 0;
307 }
308 
iommu_sva_domain_alloc(struct device * dev,struct mm_struct * mm)309 static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
310 						   struct mm_struct *mm)
311 {
312 	const struct iommu_ops *ops = dev_iommu_ops(dev);
313 	struct iommu_domain *domain;
314 
315 	if (!ops->domain_alloc_sva)
316 		return ERR_PTR(-EOPNOTSUPP);
317 
318 	domain = ops->domain_alloc_sva(dev, mm);
319 	if (IS_ERR(domain))
320 		return domain;
321 
322 	domain->type = IOMMU_DOMAIN_SVA;
323 	domain->cookie_type = IOMMU_COOKIE_SVA;
324 	mmgrab(mm);
325 	domain->mm = mm;
326 	domain->owner = ops;
327 	domain->iopf_handler = iommu_sva_iopf_handler;
328 
329 	return domain;
330 }
331 
iommu_sva_invalidate_kva_range(unsigned long start,unsigned long end)332 void iommu_sva_invalidate_kva_range(unsigned long start, unsigned long end)
333 {
334 	struct iommu_mm_data *iommu_mm;
335 
336 	guard(mutex)(&iommu_sva_lock);
337 	if (!iommu_sva_present)
338 		return;
339 
340 	list_for_each_entry(iommu_mm, &iommu_sva_mms, mm_list_elm)
341 		mmu_notifier_arch_invalidate_secondary_tlbs(iommu_mm->mm, start, end);
342 }
343