xref: /linux/drivers/iommu/iommufd/fault.c (revision 9557b4376d02088a33e5f4116bcc324d35a3b64c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2024 Intel Corporation
3  */
4 #define pr_fmt(fmt) "iommufd: " fmt
5 
6 #include <linux/file.h>
7 #include <linux/fs.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/iommufd.h>
11 #include <linux/pci.h>
12 #include <linux/poll.h>
13 #include <linux/anon_inodes.h>
14 #include <uapi/linux/iommufd.h>
15 
16 #include "../iommu-priv.h"
17 #include "iommufd_private.h"
18 
19 static int iommufd_fault_iopf_enable(struct iommufd_device *idev)
20 {
21 	struct device *dev = idev->dev;
22 	int ret;
23 
24 	/*
25 	 * Once we turn on PCI/PRI support for VF, the response failure code
26 	 * should not be forwarded to the hardware due to PRI being a shared
27 	 * resource between PF and VFs. There is no coordination for this
28 	 * shared capability. This waits for a vPRI reset to recover.
29 	 */
30 	if (dev_is_pci(dev) && to_pci_dev(dev)->is_virtfn)
31 		return -EINVAL;
32 
33 	mutex_lock(&idev->iopf_lock);
34 	/* Device iopf has already been on. */
35 	if (++idev->iopf_enabled > 1) {
36 		mutex_unlock(&idev->iopf_lock);
37 		return 0;
38 	}
39 
40 	ret = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_IOPF);
41 	if (ret)
42 		--idev->iopf_enabled;
43 	mutex_unlock(&idev->iopf_lock);
44 
45 	return ret;
46 }
47 
48 static void iommufd_fault_iopf_disable(struct iommufd_device *idev)
49 {
50 	mutex_lock(&idev->iopf_lock);
51 	if (!WARN_ON(idev->iopf_enabled == 0)) {
52 		if (--idev->iopf_enabled == 0)
53 			iommu_dev_disable_feature(idev->dev, IOMMU_DEV_FEAT_IOPF);
54 	}
55 	mutex_unlock(&idev->iopf_lock);
56 }
57 
58 static int __fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
59 				     struct iommufd_device *idev)
60 {
61 	struct iommufd_attach_handle *handle;
62 	int ret;
63 
64 	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
65 	if (!handle)
66 		return -ENOMEM;
67 
68 	handle->idev = idev;
69 	ret = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
70 					&handle->handle);
71 	if (ret)
72 		kfree(handle);
73 
74 	return ret;
75 }
76 
77 int iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
78 				    struct iommufd_device *idev)
79 {
80 	int ret;
81 
82 	if (!hwpt->fault)
83 		return -EINVAL;
84 
85 	ret = iommufd_fault_iopf_enable(idev);
86 	if (ret)
87 		return ret;
88 
89 	ret = __fault_domain_attach_dev(hwpt, idev);
90 	if (ret)
91 		iommufd_fault_iopf_disable(idev);
92 
93 	return ret;
94 }
95 
96 static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
97 					 struct iommufd_attach_handle *handle)
98 {
99 	struct iommufd_fault *fault = hwpt->fault;
100 	struct iopf_group *group, *next;
101 	unsigned long index;
102 
103 	if (!fault)
104 		return;
105 
106 	mutex_lock(&fault->mutex);
107 	list_for_each_entry_safe(group, next, &fault->deliver, node) {
108 		if (group->attach_handle != &handle->handle)
109 			continue;
110 		list_del(&group->node);
111 		iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
112 		iopf_free_group(group);
113 	}
114 
115 	xa_for_each(&fault->response, index, group) {
116 		if (group->attach_handle != &handle->handle)
117 			continue;
118 		xa_erase(&fault->response, index);
119 		iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
120 		iopf_free_group(group);
121 	}
122 	mutex_unlock(&fault->mutex);
123 }
124 
125 static struct iommufd_attach_handle *
126 iommufd_device_get_attach_handle(struct iommufd_device *idev)
127 {
128 	struct iommu_attach_handle *handle;
129 
130 	handle = iommu_attach_handle_get(idev->igroup->group, IOMMU_NO_PASID, 0);
131 	if (IS_ERR(handle))
132 		return NULL;
133 
134 	return to_iommufd_handle(handle);
135 }
136 
137 void iommufd_fault_domain_detach_dev(struct iommufd_hw_pagetable *hwpt,
138 				     struct iommufd_device *idev)
139 {
140 	struct iommufd_attach_handle *handle;
141 
142 	handle = iommufd_device_get_attach_handle(idev);
143 	iommu_detach_group_handle(hwpt->domain, idev->igroup->group);
144 	iommufd_auto_response_faults(hwpt, handle);
145 	iommufd_fault_iopf_disable(idev);
146 	kfree(handle);
147 }
148 
149 static int __fault_domain_replace_dev(struct iommufd_device *idev,
150 				      struct iommufd_hw_pagetable *hwpt,
151 				      struct iommufd_hw_pagetable *old)
152 {
153 	struct iommufd_attach_handle *handle, *curr = NULL;
154 	int ret;
155 
156 	if (old->fault)
157 		curr = iommufd_device_get_attach_handle(idev);
158 
159 	if (hwpt->fault) {
160 		handle = kzalloc(sizeof(*handle), GFP_KERNEL);
161 		if (!handle)
162 			return -ENOMEM;
163 
164 		handle->handle.domain = hwpt->domain;
165 		handle->idev = idev;
166 		ret = iommu_replace_group_handle(idev->igroup->group,
167 						 hwpt->domain, &handle->handle);
168 	} else {
169 		ret = iommu_replace_group_handle(idev->igroup->group,
170 						 hwpt->domain, NULL);
171 	}
172 
173 	if (!ret && curr) {
174 		iommufd_auto_response_faults(old, curr);
175 		kfree(curr);
176 	}
177 
178 	return ret;
179 }
180 
181 int iommufd_fault_domain_replace_dev(struct iommufd_device *idev,
182 				     struct iommufd_hw_pagetable *hwpt,
183 				     struct iommufd_hw_pagetable *old)
184 {
185 	bool iopf_off = !hwpt->fault && old->fault;
186 	bool iopf_on = hwpt->fault && !old->fault;
187 	int ret;
188 
189 	if (iopf_on) {
190 		ret = iommufd_fault_iopf_enable(idev);
191 		if (ret)
192 			return ret;
193 	}
194 
195 	ret = __fault_domain_replace_dev(idev, hwpt, old);
196 	if (ret) {
197 		if (iopf_on)
198 			iommufd_fault_iopf_disable(idev);
199 		return ret;
200 	}
201 
202 	if (iopf_off)
203 		iommufd_fault_iopf_disable(idev);
204 
205 	return 0;
206 }
207 
208 void iommufd_fault_destroy(struct iommufd_object *obj)
209 {
210 	struct iommufd_fault *fault = container_of(obj, struct iommufd_fault, obj);
211 	struct iopf_group *group, *next;
212 
213 	/*
214 	 * The iommufd object's reference count is zero at this point.
215 	 * We can be confident that no other threads are currently
216 	 * accessing this pointer. Therefore, acquiring the mutex here
217 	 * is unnecessary.
218 	 */
219 	list_for_each_entry_safe(group, next, &fault->deliver, node) {
220 		list_del(&group->node);
221 		iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
222 		iopf_free_group(group);
223 	}
224 }
225 
226 static void iommufd_compose_fault_message(struct iommu_fault *fault,
227 					  struct iommu_hwpt_pgfault *hwpt_fault,
228 					  struct iommufd_device *idev,
229 					  u32 cookie)
230 {
231 	hwpt_fault->flags = fault->prm.flags;
232 	hwpt_fault->dev_id = idev->obj.id;
233 	hwpt_fault->pasid = fault->prm.pasid;
234 	hwpt_fault->grpid = fault->prm.grpid;
235 	hwpt_fault->perm = fault->prm.perm;
236 	hwpt_fault->addr = fault->prm.addr;
237 	hwpt_fault->length = 0;
238 	hwpt_fault->cookie = cookie;
239 }
240 
241 static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
242 				       size_t count, loff_t *ppos)
243 {
244 	size_t fault_size = sizeof(struct iommu_hwpt_pgfault);
245 	struct iommufd_fault *fault = filep->private_data;
246 	struct iommu_hwpt_pgfault data;
247 	struct iommufd_device *idev;
248 	struct iopf_group *group;
249 	struct iopf_fault *iopf;
250 	size_t done = 0;
251 	int rc = 0;
252 
253 	if (*ppos || count % fault_size)
254 		return -ESPIPE;
255 
256 	mutex_lock(&fault->mutex);
257 	while (!list_empty(&fault->deliver) && count > done) {
258 		group = list_first_entry(&fault->deliver,
259 					 struct iopf_group, node);
260 
261 		if (group->fault_count * fault_size > count - done)
262 			break;
263 
264 		rc = xa_alloc(&fault->response, &group->cookie, group,
265 			      xa_limit_32b, GFP_KERNEL);
266 		if (rc)
267 			break;
268 
269 		idev = to_iommufd_handle(group->attach_handle)->idev;
270 		list_for_each_entry(iopf, &group->faults, list) {
271 			iommufd_compose_fault_message(&iopf->fault,
272 						      &data, idev,
273 						      group->cookie);
274 			if (copy_to_user(buf + done, &data, fault_size)) {
275 				xa_erase(&fault->response, group->cookie);
276 				rc = -EFAULT;
277 				break;
278 			}
279 			done += fault_size;
280 		}
281 
282 		list_del(&group->node);
283 	}
284 	mutex_unlock(&fault->mutex);
285 
286 	return done == 0 ? rc : done;
287 }
288 
289 static ssize_t iommufd_fault_fops_write(struct file *filep, const char __user *buf,
290 					size_t count, loff_t *ppos)
291 {
292 	size_t response_size = sizeof(struct iommu_hwpt_page_response);
293 	struct iommufd_fault *fault = filep->private_data;
294 	struct iommu_hwpt_page_response response;
295 	struct iopf_group *group;
296 	size_t done = 0;
297 	int rc = 0;
298 
299 	if (*ppos || count % response_size)
300 		return -ESPIPE;
301 
302 	mutex_lock(&fault->mutex);
303 	while (count > done) {
304 		rc = copy_from_user(&response, buf + done, response_size);
305 		if (rc)
306 			break;
307 
308 		static_assert((int)IOMMUFD_PAGE_RESP_SUCCESS ==
309 			      (int)IOMMU_PAGE_RESP_SUCCESS);
310 		static_assert((int)IOMMUFD_PAGE_RESP_INVALID ==
311 			      (int)IOMMU_PAGE_RESP_INVALID);
312 		if (response.code != IOMMUFD_PAGE_RESP_SUCCESS &&
313 		    response.code != IOMMUFD_PAGE_RESP_INVALID) {
314 			rc = -EINVAL;
315 			break;
316 		}
317 
318 		group = xa_erase(&fault->response, response.cookie);
319 		if (!group) {
320 			rc = -EINVAL;
321 			break;
322 		}
323 
324 		iopf_group_response(group, response.code);
325 		iopf_free_group(group);
326 		done += response_size;
327 	}
328 	mutex_unlock(&fault->mutex);
329 
330 	return done == 0 ? rc : done;
331 }
332 
333 static __poll_t iommufd_fault_fops_poll(struct file *filep,
334 					struct poll_table_struct *wait)
335 {
336 	struct iommufd_fault *fault = filep->private_data;
337 	__poll_t pollflags = EPOLLOUT;
338 
339 	poll_wait(filep, &fault->wait_queue, wait);
340 	mutex_lock(&fault->mutex);
341 	if (!list_empty(&fault->deliver))
342 		pollflags |= EPOLLIN | EPOLLRDNORM;
343 	mutex_unlock(&fault->mutex);
344 
345 	return pollflags;
346 }
347 
348 static int iommufd_fault_fops_release(struct inode *inode, struct file *filep)
349 {
350 	struct iommufd_fault *fault = filep->private_data;
351 
352 	refcount_dec(&fault->obj.users);
353 	iommufd_ctx_put(fault->ictx);
354 	return 0;
355 }
356 
357 static const struct file_operations iommufd_fault_fops = {
358 	.owner		= THIS_MODULE,
359 	.open		= nonseekable_open,
360 	.read		= iommufd_fault_fops_read,
361 	.write		= iommufd_fault_fops_write,
362 	.poll		= iommufd_fault_fops_poll,
363 	.release	= iommufd_fault_fops_release,
364 	.llseek		= no_llseek,
365 };
366 
367 int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
368 {
369 	struct iommu_fault_alloc *cmd = ucmd->cmd;
370 	struct iommufd_fault *fault;
371 	struct file *filep;
372 	int fdno;
373 	int rc;
374 
375 	if (cmd->flags)
376 		return -EOPNOTSUPP;
377 
378 	fault = iommufd_object_alloc(ucmd->ictx, fault, IOMMUFD_OBJ_FAULT);
379 	if (IS_ERR(fault))
380 		return PTR_ERR(fault);
381 
382 	fault->ictx = ucmd->ictx;
383 	INIT_LIST_HEAD(&fault->deliver);
384 	xa_init_flags(&fault->response, XA_FLAGS_ALLOC1);
385 	mutex_init(&fault->mutex);
386 	init_waitqueue_head(&fault->wait_queue);
387 
388 	filep = anon_inode_getfile("[iommufd-pgfault]", &iommufd_fault_fops,
389 				   fault, O_RDWR);
390 	if (IS_ERR(filep)) {
391 		rc = PTR_ERR(filep);
392 		goto out_abort;
393 	}
394 
395 	refcount_inc(&fault->obj.users);
396 	iommufd_ctx_get(fault->ictx);
397 	fault->filep = filep;
398 
399 	fdno = get_unused_fd_flags(O_CLOEXEC);
400 	if (fdno < 0) {
401 		rc = fdno;
402 		goto out_fput;
403 	}
404 
405 	cmd->out_fault_id = fault->obj.id;
406 	cmd->out_fault_fd = fdno;
407 
408 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
409 	if (rc)
410 		goto out_put_fdno;
411 	iommufd_object_finalize(ucmd->ictx, &fault->obj);
412 
413 	fd_install(fdno, fault->filep);
414 
415 	return 0;
416 out_put_fdno:
417 	put_unused_fd(fdno);
418 out_fput:
419 	fput(filep);
420 	refcount_dec(&fault->obj.users);
421 	iommufd_ctx_put(fault->ictx);
422 out_abort:
423 	iommufd_object_abort_and_destroy(ucmd->ictx, &fault->obj);
424 
425 	return rc;
426 }
427 
428 int iommufd_fault_iopf_handler(struct iopf_group *group)
429 {
430 	struct iommufd_hw_pagetable *hwpt;
431 	struct iommufd_fault *fault;
432 
433 	hwpt = group->attach_handle->domain->fault_data;
434 	fault = hwpt->fault;
435 
436 	mutex_lock(&fault->mutex);
437 	list_add_tail(&group->node, &fault->deliver);
438 	mutex_unlock(&fault->mutex);
439 
440 	wake_up_interruptible(&fault->wait_queue);
441 
442 	return 0;
443 }
444