xref: /linux/drivers/iommu/iommufd/fault.c (revision 6a103867b95ac7f9cc7dffe2fcad2f6c0d60b9ae)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2024 Intel Corporation
3  */
4 #define pr_fmt(fmt) "iommufd: " fmt
5 
6 #include <linux/anon_inodes.h>
7 #include <linux/file.h>
8 #include <linux/fs.h>
9 #include <linux/iommufd.h>
10 #include <linux/module.h>
11 #include <linux/mutex.h>
12 #include <linux/pci.h>
13 #include <linux/pci-ats.h>
14 #include <linux/poll.h>
15 #include <uapi/linux/iommufd.h>
16 
17 #include "../iommu-priv.h"
18 #include "iommufd_private.h"
19 
iommufd_fault_iopf_enable(struct iommufd_device * idev)20 static int iommufd_fault_iopf_enable(struct iommufd_device *idev)
21 {
22 	struct device *dev = idev->dev;
23 	int ret;
24 
25 	/*
26 	 * Once we turn on PCI/PRI support for VF, the response failure code
27 	 * should not be forwarded to the hardware due to PRI being a shared
28 	 * resource between PF and VFs. There is no coordination for this
29 	 * shared capability. This waits for a vPRI reset to recover.
30 	 */
31 	if (dev_is_pci(dev)) {
32 		struct pci_dev *pdev = to_pci_dev(dev);
33 
34 		if (pdev->is_virtfn && pci_pri_supported(pdev))
35 			return -EINVAL;
36 	}
37 
38 	mutex_lock(&idev->iopf_lock);
39 	/* Device iopf has already been on. */
40 	if (++idev->iopf_enabled > 1) {
41 		mutex_unlock(&idev->iopf_lock);
42 		return 0;
43 	}
44 
45 	ret = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_IOPF);
46 	if (ret)
47 		--idev->iopf_enabled;
48 	mutex_unlock(&idev->iopf_lock);
49 
50 	return ret;
51 }
52 
iommufd_fault_iopf_disable(struct iommufd_device * idev)53 static void iommufd_fault_iopf_disable(struct iommufd_device *idev)
54 {
55 	mutex_lock(&idev->iopf_lock);
56 	if (!WARN_ON(idev->iopf_enabled == 0)) {
57 		if (--idev->iopf_enabled == 0)
58 			iommu_dev_disable_feature(idev->dev, IOMMU_DEV_FEAT_IOPF);
59 	}
60 	mutex_unlock(&idev->iopf_lock);
61 }
62 
__fault_domain_attach_dev(struct iommufd_hw_pagetable * hwpt,struct iommufd_device * idev)63 static int __fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
64 				     struct iommufd_device *idev)
65 {
66 	struct iommufd_attach_handle *handle;
67 	int ret;
68 
69 	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
70 	if (!handle)
71 		return -ENOMEM;
72 
73 	handle->idev = idev;
74 	ret = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
75 					&handle->handle);
76 	if (ret)
77 		kfree(handle);
78 
79 	return ret;
80 }
81 
iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable * hwpt,struct iommufd_device * idev)82 int iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
83 				    struct iommufd_device *idev)
84 {
85 	int ret;
86 
87 	if (!hwpt->fault)
88 		return -EINVAL;
89 
90 	ret = iommufd_fault_iopf_enable(idev);
91 	if (ret)
92 		return ret;
93 
94 	ret = __fault_domain_attach_dev(hwpt, idev);
95 	if (ret)
96 		iommufd_fault_iopf_disable(idev);
97 
98 	return ret;
99 }
100 
iommufd_auto_response_faults(struct iommufd_hw_pagetable * hwpt,struct iommufd_attach_handle * handle)101 static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
102 					 struct iommufd_attach_handle *handle)
103 {
104 	struct iommufd_fault *fault = hwpt->fault;
105 	struct iopf_group *group, *next;
106 	unsigned long index;
107 
108 	if (!fault)
109 		return;
110 
111 	mutex_lock(&fault->mutex);
112 	list_for_each_entry_safe(group, next, &fault->deliver, node) {
113 		if (group->attach_handle != &handle->handle)
114 			continue;
115 		list_del(&group->node);
116 		iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
117 		iopf_free_group(group);
118 	}
119 
120 	xa_for_each(&fault->response, index, group) {
121 		if (group->attach_handle != &handle->handle)
122 			continue;
123 		xa_erase(&fault->response, index);
124 		iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
125 		iopf_free_group(group);
126 	}
127 	mutex_unlock(&fault->mutex);
128 }
129 
130 static struct iommufd_attach_handle *
iommufd_device_get_attach_handle(struct iommufd_device * idev)131 iommufd_device_get_attach_handle(struct iommufd_device *idev)
132 {
133 	struct iommu_attach_handle *handle;
134 
135 	handle = iommu_attach_handle_get(idev->igroup->group, IOMMU_NO_PASID, 0);
136 	if (IS_ERR(handle))
137 		return NULL;
138 
139 	return to_iommufd_handle(handle);
140 }
141 
iommufd_fault_domain_detach_dev(struct iommufd_hw_pagetable * hwpt,struct iommufd_device * idev)142 void iommufd_fault_domain_detach_dev(struct iommufd_hw_pagetable *hwpt,
143 				     struct iommufd_device *idev)
144 {
145 	struct iommufd_attach_handle *handle;
146 
147 	handle = iommufd_device_get_attach_handle(idev);
148 	iommu_detach_group_handle(hwpt->domain, idev->igroup->group);
149 	iommufd_auto_response_faults(hwpt, handle);
150 	iommufd_fault_iopf_disable(idev);
151 	kfree(handle);
152 }
153 
__fault_domain_replace_dev(struct iommufd_device * idev,struct iommufd_hw_pagetable * hwpt,struct iommufd_hw_pagetable * old)154 static int __fault_domain_replace_dev(struct iommufd_device *idev,
155 				      struct iommufd_hw_pagetable *hwpt,
156 				      struct iommufd_hw_pagetable *old)
157 {
158 	struct iommufd_attach_handle *handle, *curr = NULL;
159 	int ret;
160 
161 	if (old->fault)
162 		curr = iommufd_device_get_attach_handle(idev);
163 
164 	if (hwpt->fault) {
165 		handle = kzalloc(sizeof(*handle), GFP_KERNEL);
166 		if (!handle)
167 			return -ENOMEM;
168 
169 		handle->idev = idev;
170 		ret = iommu_replace_group_handle(idev->igroup->group,
171 						 hwpt->domain, &handle->handle);
172 	} else {
173 		ret = iommu_replace_group_handle(idev->igroup->group,
174 						 hwpt->domain, NULL);
175 	}
176 
177 	if (!ret && curr) {
178 		iommufd_auto_response_faults(old, curr);
179 		kfree(curr);
180 	}
181 
182 	return ret;
183 }
184 
iommufd_fault_domain_replace_dev(struct iommufd_device * idev,struct iommufd_hw_pagetable * hwpt,struct iommufd_hw_pagetable * old)185 int iommufd_fault_domain_replace_dev(struct iommufd_device *idev,
186 				     struct iommufd_hw_pagetable *hwpt,
187 				     struct iommufd_hw_pagetable *old)
188 {
189 	bool iopf_off = !hwpt->fault && old->fault;
190 	bool iopf_on = hwpt->fault && !old->fault;
191 	int ret;
192 
193 	if (iopf_on) {
194 		ret = iommufd_fault_iopf_enable(idev);
195 		if (ret)
196 			return ret;
197 	}
198 
199 	ret = __fault_domain_replace_dev(idev, hwpt, old);
200 	if (ret) {
201 		if (iopf_on)
202 			iommufd_fault_iopf_disable(idev);
203 		return ret;
204 	}
205 
206 	if (iopf_off)
207 		iommufd_fault_iopf_disable(idev);
208 
209 	return 0;
210 }
211 
iommufd_fault_destroy(struct iommufd_object * obj)212 void iommufd_fault_destroy(struct iommufd_object *obj)
213 {
214 	struct iommufd_fault *fault = container_of(obj, struct iommufd_fault, obj);
215 	struct iopf_group *group, *next;
216 
217 	/*
218 	 * The iommufd object's reference count is zero at this point.
219 	 * We can be confident that no other threads are currently
220 	 * accessing this pointer. Therefore, acquiring the mutex here
221 	 * is unnecessary.
222 	 */
223 	list_for_each_entry_safe(group, next, &fault->deliver, node) {
224 		list_del(&group->node);
225 		iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
226 		iopf_free_group(group);
227 	}
228 }
229 
iommufd_compose_fault_message(struct iommu_fault * fault,struct iommu_hwpt_pgfault * hwpt_fault,struct iommufd_device * idev,u32 cookie)230 static void iommufd_compose_fault_message(struct iommu_fault *fault,
231 					  struct iommu_hwpt_pgfault *hwpt_fault,
232 					  struct iommufd_device *idev,
233 					  u32 cookie)
234 {
235 	hwpt_fault->flags = fault->prm.flags;
236 	hwpt_fault->dev_id = idev->obj.id;
237 	hwpt_fault->pasid = fault->prm.pasid;
238 	hwpt_fault->grpid = fault->prm.grpid;
239 	hwpt_fault->perm = fault->prm.perm;
240 	hwpt_fault->addr = fault->prm.addr;
241 	hwpt_fault->length = 0;
242 	hwpt_fault->cookie = cookie;
243 }
244 
iommufd_fault_fops_read(struct file * filep,char __user * buf,size_t count,loff_t * ppos)245 static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
246 				       size_t count, loff_t *ppos)
247 {
248 	size_t fault_size = sizeof(struct iommu_hwpt_pgfault);
249 	struct iommufd_fault *fault = filep->private_data;
250 	struct iommu_hwpt_pgfault data;
251 	struct iommufd_device *idev;
252 	struct iopf_group *group;
253 	struct iopf_fault *iopf;
254 	size_t done = 0;
255 	int rc = 0;
256 
257 	if (*ppos || count % fault_size)
258 		return -ESPIPE;
259 
260 	mutex_lock(&fault->mutex);
261 	while (!list_empty(&fault->deliver) && count > done) {
262 		group = list_first_entry(&fault->deliver,
263 					 struct iopf_group, node);
264 
265 		if (group->fault_count * fault_size > count - done)
266 			break;
267 
268 		rc = xa_alloc(&fault->response, &group->cookie, group,
269 			      xa_limit_32b, GFP_KERNEL);
270 		if (rc)
271 			break;
272 
273 		idev = to_iommufd_handle(group->attach_handle)->idev;
274 		list_for_each_entry(iopf, &group->faults, list) {
275 			iommufd_compose_fault_message(&iopf->fault,
276 						      &data, idev,
277 						      group->cookie);
278 			if (copy_to_user(buf + done, &data, fault_size)) {
279 				xa_erase(&fault->response, group->cookie);
280 				rc = -EFAULT;
281 				break;
282 			}
283 			done += fault_size;
284 		}
285 
286 		list_del(&group->node);
287 	}
288 	mutex_unlock(&fault->mutex);
289 
290 	return done == 0 ? rc : done;
291 }
292 
iommufd_fault_fops_write(struct file * filep,const char __user * buf,size_t count,loff_t * ppos)293 static ssize_t iommufd_fault_fops_write(struct file *filep, const char __user *buf,
294 					size_t count, loff_t *ppos)
295 {
296 	size_t response_size = sizeof(struct iommu_hwpt_page_response);
297 	struct iommufd_fault *fault = filep->private_data;
298 	struct iommu_hwpt_page_response response;
299 	struct iopf_group *group;
300 	size_t done = 0;
301 	int rc = 0;
302 
303 	if (*ppos || count % response_size)
304 		return -ESPIPE;
305 
306 	mutex_lock(&fault->mutex);
307 	while (count > done) {
308 		rc = copy_from_user(&response, buf + done, response_size);
309 		if (rc)
310 			break;
311 
312 		static_assert((int)IOMMUFD_PAGE_RESP_SUCCESS ==
313 			      (int)IOMMU_PAGE_RESP_SUCCESS);
314 		static_assert((int)IOMMUFD_PAGE_RESP_INVALID ==
315 			      (int)IOMMU_PAGE_RESP_INVALID);
316 		if (response.code != IOMMUFD_PAGE_RESP_SUCCESS &&
317 		    response.code != IOMMUFD_PAGE_RESP_INVALID) {
318 			rc = -EINVAL;
319 			break;
320 		}
321 
322 		group = xa_erase(&fault->response, response.cookie);
323 		if (!group) {
324 			rc = -EINVAL;
325 			break;
326 		}
327 
328 		iopf_group_response(group, response.code);
329 		iopf_free_group(group);
330 		done += response_size;
331 	}
332 	mutex_unlock(&fault->mutex);
333 
334 	return done == 0 ? rc : done;
335 }
336 
iommufd_fault_fops_poll(struct file * filep,struct poll_table_struct * wait)337 static __poll_t iommufd_fault_fops_poll(struct file *filep,
338 					struct poll_table_struct *wait)
339 {
340 	struct iommufd_fault *fault = filep->private_data;
341 	__poll_t pollflags = EPOLLOUT;
342 
343 	poll_wait(filep, &fault->wait_queue, wait);
344 	mutex_lock(&fault->mutex);
345 	if (!list_empty(&fault->deliver))
346 		pollflags |= EPOLLIN | EPOLLRDNORM;
347 	mutex_unlock(&fault->mutex);
348 
349 	return pollflags;
350 }
351 
iommufd_fault_fops_release(struct inode * inode,struct file * filep)352 static int iommufd_fault_fops_release(struct inode *inode, struct file *filep)
353 {
354 	struct iommufd_fault *fault = filep->private_data;
355 
356 	refcount_dec(&fault->obj.users);
357 	iommufd_ctx_put(fault->ictx);
358 	return 0;
359 }
360 
361 static const struct file_operations iommufd_fault_fops = {
362 	.owner		= THIS_MODULE,
363 	.open		= nonseekable_open,
364 	.read		= iommufd_fault_fops_read,
365 	.write		= iommufd_fault_fops_write,
366 	.poll		= iommufd_fault_fops_poll,
367 	.release	= iommufd_fault_fops_release,
368 };
369 
iommufd_fault_alloc(struct iommufd_ucmd * ucmd)370 int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
371 {
372 	struct iommu_fault_alloc *cmd = ucmd->cmd;
373 	struct iommufd_fault *fault;
374 	struct file *filep;
375 	int fdno;
376 	int rc;
377 
378 	if (cmd->flags)
379 		return -EOPNOTSUPP;
380 
381 	fault = iommufd_object_alloc(ucmd->ictx, fault, IOMMUFD_OBJ_FAULT);
382 	if (IS_ERR(fault))
383 		return PTR_ERR(fault);
384 
385 	fault->ictx = ucmd->ictx;
386 	INIT_LIST_HEAD(&fault->deliver);
387 	xa_init_flags(&fault->response, XA_FLAGS_ALLOC1);
388 	mutex_init(&fault->mutex);
389 	init_waitqueue_head(&fault->wait_queue);
390 
391 	filep = anon_inode_getfile("[iommufd-pgfault]", &iommufd_fault_fops,
392 				   fault, O_RDWR);
393 	if (IS_ERR(filep)) {
394 		rc = PTR_ERR(filep);
395 		goto out_abort;
396 	}
397 
398 	refcount_inc(&fault->obj.users);
399 	iommufd_ctx_get(fault->ictx);
400 	fault->filep = filep;
401 
402 	fdno = get_unused_fd_flags(O_CLOEXEC);
403 	if (fdno < 0) {
404 		rc = fdno;
405 		goto out_fput;
406 	}
407 
408 	cmd->out_fault_id = fault->obj.id;
409 	cmd->out_fault_fd = fdno;
410 
411 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
412 	if (rc)
413 		goto out_put_fdno;
414 	iommufd_object_finalize(ucmd->ictx, &fault->obj);
415 
416 	fd_install(fdno, fault->filep);
417 
418 	return 0;
419 out_put_fdno:
420 	put_unused_fd(fdno);
421 out_fput:
422 	fput(filep);
423 out_abort:
424 	iommufd_object_abort_and_destroy(ucmd->ictx, &fault->obj);
425 
426 	return rc;
427 }
428 
iommufd_fault_iopf_handler(struct iopf_group * group)429 int iommufd_fault_iopf_handler(struct iopf_group *group)
430 {
431 	struct iommufd_hw_pagetable *hwpt;
432 	struct iommufd_fault *fault;
433 
434 	hwpt = group->attach_handle->domain->fault_data;
435 	fault = hwpt->fault;
436 
437 	mutex_lock(&fault->mutex);
438 	list_add_tail(&group->node, &fault->deliver);
439 	mutex_unlock(&fault->mutex);
440 
441 	wake_up_interruptible(&fault->wait_queue);
442 
443 	return 0;
444 }
445