xref: /linux/drivers/iommu/iommufd/eventq.c (revision 8477ab143069c6b05d6da4a8184ded8b969240f5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2024 Intel Corporation
3  */
4 #define pr_fmt(fmt) "iommufd: " fmt
5 
6 #include <linux/anon_inodes.h>
7 #include <linux/file.h>
8 #include <linux/fs.h>
9 #include <linux/iommufd.h>
10 #include <linux/module.h>
11 #include <linux/mutex.h>
12 #include <linux/poll.h>
13 #include <uapi/linux/iommufd.h>
14 
15 #include "../iommu-priv.h"
16 #include "iommufd_private.h"
17 
18 /* IOMMUFD_OBJ_FAULT Functions */
iommufd_auto_response_faults(struct iommufd_hw_pagetable * hwpt,struct iommufd_attach_handle * handle)19 void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
20 				  struct iommufd_attach_handle *handle)
21 {
22 	struct iommufd_fault *fault = hwpt->fault;
23 	struct iopf_group *group, *next;
24 	struct list_head free_list;
25 	unsigned long index;
26 
27 	if (!fault || !handle)
28 		return;
29 	INIT_LIST_HEAD(&free_list);
30 
31 	mutex_lock(&fault->mutex);
32 	spin_lock(&fault->common.lock);
33 	list_for_each_entry_safe(group, next, &fault->common.deliver, node) {
34 		if (group->attach_handle != &handle->handle)
35 			continue;
36 		list_move(&group->node, &free_list);
37 	}
38 	spin_unlock(&fault->common.lock);
39 
40 	list_for_each_entry_safe(group, next, &free_list, node) {
41 		list_del(&group->node);
42 		iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
43 		iopf_free_group(group);
44 	}
45 
46 	xa_for_each(&fault->response, index, group) {
47 		if (group->attach_handle != &handle->handle)
48 			continue;
49 		xa_erase(&fault->response, index);
50 		iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
51 		iopf_free_group(group);
52 	}
53 	mutex_unlock(&fault->mutex);
54 }
55 
iommufd_fault_destroy(struct iommufd_object * obj)56 void iommufd_fault_destroy(struct iommufd_object *obj)
57 {
58 	struct iommufd_eventq *eventq =
59 		container_of(obj, struct iommufd_eventq, obj);
60 	struct iommufd_fault *fault = eventq_to_fault(eventq);
61 	struct iopf_group *group, *next;
62 	unsigned long index;
63 
64 	/*
65 	 * The iommufd object's reference count is zero at this point.
66 	 * We can be confident that no other threads are currently
67 	 * accessing this pointer. Therefore, acquiring the mutex here
68 	 * is unnecessary.
69 	 */
70 	list_for_each_entry_safe(group, next, &fault->common.deliver, node) {
71 		list_del(&group->node);
72 		iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
73 		iopf_free_group(group);
74 	}
75 	xa_for_each(&fault->response, index, group) {
76 		xa_erase(&fault->response, index);
77 		iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
78 		iopf_free_group(group);
79 	}
80 	xa_destroy(&fault->response);
81 	mutex_destroy(&fault->mutex);
82 }
83 
iommufd_compose_fault_message(struct iommu_fault * fault,struct iommu_hwpt_pgfault * hwpt_fault,struct iommufd_device * idev,u32 cookie)84 static void iommufd_compose_fault_message(struct iommu_fault *fault,
85 					  struct iommu_hwpt_pgfault *hwpt_fault,
86 					  struct iommufd_device *idev,
87 					  u32 cookie)
88 {
89 	hwpt_fault->flags = fault->prm.flags;
90 	hwpt_fault->dev_id = idev->obj.id;
91 	hwpt_fault->pasid = fault->prm.pasid;
92 	hwpt_fault->grpid = fault->prm.grpid;
93 	hwpt_fault->perm = fault->prm.perm;
94 	hwpt_fault->addr = fault->prm.addr;
95 	hwpt_fault->length = 0;
96 	hwpt_fault->cookie = cookie;
97 }
98 
99 /* Fetch the first node out of the fault->deliver list */
100 static struct iopf_group *
iommufd_fault_deliver_fetch(struct iommufd_fault * fault)101 iommufd_fault_deliver_fetch(struct iommufd_fault *fault)
102 {
103 	struct list_head *list = &fault->common.deliver;
104 	struct iopf_group *group = NULL;
105 
106 	spin_lock(&fault->common.lock);
107 	if (!list_empty(list)) {
108 		group = list_first_entry(list, struct iopf_group, node);
109 		list_del(&group->node);
110 	}
111 	spin_unlock(&fault->common.lock);
112 	return group;
113 }
114 
115 /* Restore a node back to the head of the fault->deliver list */
iommufd_fault_deliver_restore(struct iommufd_fault * fault,struct iopf_group * group)116 static void iommufd_fault_deliver_restore(struct iommufd_fault *fault,
117 					  struct iopf_group *group)
118 {
119 	spin_lock(&fault->common.lock);
120 	list_add(&group->node, &fault->common.deliver);
121 	spin_unlock(&fault->common.lock);
122 }
123 
iommufd_fault_fops_read(struct file * filep,char __user * buf,size_t count,loff_t * ppos)124 static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
125 				       size_t count, loff_t *ppos)
126 {
127 	size_t fault_size = sizeof(struct iommu_hwpt_pgfault);
128 	struct iommufd_eventq *eventq = filep->private_data;
129 	struct iommufd_fault *fault = eventq_to_fault(eventq);
130 	struct iommu_hwpt_pgfault data = {};
131 	struct iommufd_device *idev;
132 	struct iopf_group *group;
133 	struct iopf_fault *iopf;
134 	size_t done = 0;
135 	int rc = 0;
136 
137 	if (*ppos || count % fault_size)
138 		return -ESPIPE;
139 
140 	mutex_lock(&fault->mutex);
141 	while ((group = iommufd_fault_deliver_fetch(fault))) {
142 		if (done >= count ||
143 		    group->fault_count * fault_size > count - done) {
144 			iommufd_fault_deliver_restore(fault, group);
145 			break;
146 		}
147 
148 		rc = xa_alloc(&fault->response, &group->cookie, group,
149 			      xa_limit_32b, GFP_KERNEL);
150 		if (rc) {
151 			iommufd_fault_deliver_restore(fault, group);
152 			break;
153 		}
154 
155 		idev = to_iommufd_handle(group->attach_handle)->idev;
156 		list_for_each_entry(iopf, &group->faults, list) {
157 			iommufd_compose_fault_message(&iopf->fault,
158 						      &data, idev,
159 						      group->cookie);
160 			if (copy_to_user(buf + done, &data, fault_size)) {
161 				xa_erase(&fault->response, group->cookie);
162 				iommufd_fault_deliver_restore(fault, group);
163 				rc = -EFAULT;
164 				break;
165 			}
166 			done += fault_size;
167 		}
168 	}
169 	mutex_unlock(&fault->mutex);
170 
171 	return done == 0 ? rc : done;
172 }
173 
iommufd_fault_fops_write(struct file * filep,const char __user * buf,size_t count,loff_t * ppos)174 static ssize_t iommufd_fault_fops_write(struct file *filep, const char __user *buf,
175 					size_t count, loff_t *ppos)
176 {
177 	size_t response_size = sizeof(struct iommu_hwpt_page_response);
178 	struct iommufd_eventq *eventq = filep->private_data;
179 	struct iommufd_fault *fault = eventq_to_fault(eventq);
180 	struct iommu_hwpt_page_response response;
181 	struct iopf_group *group;
182 	size_t done = 0;
183 	int rc = 0;
184 
185 	if (*ppos || count % response_size)
186 		return -ESPIPE;
187 
188 	mutex_lock(&fault->mutex);
189 	while (count > done) {
190 		rc = copy_from_user(&response, buf + done, response_size);
191 		if (rc)
192 			break;
193 
194 		static_assert((int)IOMMUFD_PAGE_RESP_SUCCESS ==
195 			      (int)IOMMU_PAGE_RESP_SUCCESS);
196 		static_assert((int)IOMMUFD_PAGE_RESP_INVALID ==
197 			      (int)IOMMU_PAGE_RESP_INVALID);
198 		if (response.code != IOMMUFD_PAGE_RESP_SUCCESS &&
199 		    response.code != IOMMUFD_PAGE_RESP_INVALID) {
200 			rc = -EINVAL;
201 			break;
202 		}
203 
204 		group = xa_erase(&fault->response, response.cookie);
205 		if (!group) {
206 			rc = -EINVAL;
207 			break;
208 		}
209 
210 		iopf_group_response(group, response.code);
211 		iopf_free_group(group);
212 		done += response_size;
213 	}
214 	mutex_unlock(&fault->mutex);
215 
216 	return done == 0 ? rc : done;
217 }
218 
219 /* IOMMUFD_OBJ_VEVENTQ Functions */
220 
iommufd_veventq_abort(struct iommufd_object * obj)221 void iommufd_veventq_abort(struct iommufd_object *obj)
222 {
223 	struct iommufd_eventq *eventq =
224 		container_of(obj, struct iommufd_eventq, obj);
225 	struct iommufd_veventq *veventq = eventq_to_veventq(eventq);
226 	struct iommufd_viommu *viommu = veventq->viommu;
227 	struct iommufd_vevent *cur, *next;
228 
229 	lockdep_assert_held_write(&viommu->veventqs_rwsem);
230 
231 	list_for_each_entry_safe(cur, next, &eventq->deliver, node) {
232 		list_del(&cur->node);
233 		if (cur != &veventq->lost_events_header)
234 			kfree(cur);
235 	}
236 
237 	refcount_dec(&viommu->obj.users);
238 	list_del(&veventq->node);
239 }
240 
iommufd_veventq_destroy(struct iommufd_object * obj)241 void iommufd_veventq_destroy(struct iommufd_object *obj)
242 {
243 	struct iommufd_veventq *veventq = eventq_to_veventq(
244 		container_of(obj, struct iommufd_eventq, obj));
245 
246 	down_write(&veventq->viommu->veventqs_rwsem);
247 	iommufd_veventq_abort(obj);
248 	up_write(&veventq->viommu->veventqs_rwsem);
249 }
250 
251 static struct iommufd_vevent *
iommufd_veventq_deliver_fetch(struct iommufd_veventq * veventq)252 iommufd_veventq_deliver_fetch(struct iommufd_veventq *veventq)
253 {
254 	struct iommufd_eventq *eventq = &veventq->common;
255 	struct list_head *list = &eventq->deliver;
256 	struct iommufd_vevent *vevent = NULL;
257 
258 	spin_lock(&eventq->lock);
259 	if (!list_empty(list)) {
260 		struct iommufd_vevent *next;
261 
262 		next = list_first_entry(list, struct iommufd_vevent, node);
263 		/* Make a copy of the lost_events_header for copy_to_user */
264 		if (next == &veventq->lost_events_header) {
265 			vevent = kzalloc(sizeof(*vevent), GFP_ATOMIC);
266 			if (!vevent)
267 				goto out_unlock;
268 		}
269 		list_del(&next->node);
270 		if (vevent)
271 			memcpy(vevent, next, sizeof(*vevent));
272 		else
273 			vevent = next;
274 	}
275 out_unlock:
276 	spin_unlock(&eventq->lock);
277 	return vevent;
278 }
279 
iommufd_veventq_deliver_restore(struct iommufd_veventq * veventq,struct iommufd_vevent * vevent)280 static void iommufd_veventq_deliver_restore(struct iommufd_veventq *veventq,
281 					    struct iommufd_vevent *vevent)
282 {
283 	struct iommufd_eventq *eventq = &veventq->common;
284 	struct list_head *list = &eventq->deliver;
285 
286 	spin_lock(&eventq->lock);
287 	if (vevent_for_lost_events_header(vevent)) {
288 		/* Remove the copy of the lost_events_header */
289 		kfree(vevent);
290 		vevent = NULL;
291 		/* An empty list needs the lost_events_header back */
292 		if (list_empty(list))
293 			vevent = &veventq->lost_events_header;
294 	}
295 	if (vevent)
296 		list_add(&vevent->node, list);
297 	spin_unlock(&eventq->lock);
298 }
299 
iommufd_veventq_fops_read(struct file * filep,char __user * buf,size_t count,loff_t * ppos)300 static ssize_t iommufd_veventq_fops_read(struct file *filep, char __user *buf,
301 					 size_t count, loff_t *ppos)
302 {
303 	struct iommufd_eventq *eventq = filep->private_data;
304 	struct iommufd_veventq *veventq = eventq_to_veventq(eventq);
305 	struct iommufd_vevent_header *hdr;
306 	struct iommufd_vevent *cur;
307 	size_t done = 0;
308 	int rc = 0;
309 
310 	if (*ppos)
311 		return -ESPIPE;
312 
313 	while ((cur = iommufd_veventq_deliver_fetch(veventq))) {
314 		/* Validate the remaining bytes against the header size */
315 		if (done >= count || sizeof(*hdr) > count - done) {
316 			iommufd_veventq_deliver_restore(veventq, cur);
317 			break;
318 		}
319 		hdr = &cur->header;
320 
321 		/* If being a normal vEVENT, validate against the full size */
322 		if (!vevent_for_lost_events_header(cur) &&
323 		    sizeof(hdr) + cur->data_len > count - done) {
324 			iommufd_veventq_deliver_restore(veventq, cur);
325 			break;
326 		}
327 
328 		if (copy_to_user(buf + done, hdr, sizeof(*hdr))) {
329 			iommufd_veventq_deliver_restore(veventq, cur);
330 			rc = -EFAULT;
331 			break;
332 		}
333 		done += sizeof(*hdr);
334 
335 		if (cur->data_len &&
336 		    copy_to_user(buf + done, cur->event_data, cur->data_len)) {
337 			iommufd_veventq_deliver_restore(veventq, cur);
338 			rc = -EFAULT;
339 			break;
340 		}
341 		spin_lock(&eventq->lock);
342 		if (!vevent_for_lost_events_header(cur))
343 			veventq->num_events--;
344 		spin_unlock(&eventq->lock);
345 		done += cur->data_len;
346 		kfree(cur);
347 	}
348 
349 	return done == 0 ? rc : done;
350 }
351 
352 /* Common Event Queue Functions */
353 
iommufd_eventq_fops_poll(struct file * filep,struct poll_table_struct * wait)354 static __poll_t iommufd_eventq_fops_poll(struct file *filep,
355 					 struct poll_table_struct *wait)
356 {
357 	struct iommufd_eventq *eventq = filep->private_data;
358 	__poll_t pollflags = 0;
359 
360 	if (eventq->obj.type == IOMMUFD_OBJ_FAULT)
361 		pollflags |= EPOLLOUT;
362 
363 	poll_wait(filep, &eventq->wait_queue, wait);
364 	spin_lock(&eventq->lock);
365 	if (!list_empty(&eventq->deliver))
366 		pollflags |= EPOLLIN | EPOLLRDNORM;
367 	spin_unlock(&eventq->lock);
368 
369 	return pollflags;
370 }
371 
iommufd_eventq_fops_release(struct inode * inode,struct file * filep)372 static int iommufd_eventq_fops_release(struct inode *inode, struct file *filep)
373 {
374 	struct iommufd_eventq *eventq = filep->private_data;
375 
376 	refcount_dec(&eventq->obj.users);
377 	iommufd_ctx_put(eventq->ictx);
378 	return 0;
379 }
380 
381 #define INIT_EVENTQ_FOPS(read_op, write_op)                                    \
382 	((const struct file_operations){                                       \
383 		.owner = THIS_MODULE,                                          \
384 		.open = nonseekable_open,                                      \
385 		.read = read_op,                                               \
386 		.write = write_op,                                             \
387 		.poll = iommufd_eventq_fops_poll,                              \
388 		.release = iommufd_eventq_fops_release,                        \
389 	})
390 
iommufd_eventq_init(struct iommufd_eventq * eventq,char * name,struct iommufd_ctx * ictx,const struct file_operations * fops)391 static int iommufd_eventq_init(struct iommufd_eventq *eventq, char *name,
392 			       struct iommufd_ctx *ictx,
393 			       const struct file_operations *fops)
394 {
395 	struct file *filep;
396 	int fdno;
397 
398 	spin_lock_init(&eventq->lock);
399 	INIT_LIST_HEAD(&eventq->deliver);
400 	init_waitqueue_head(&eventq->wait_queue);
401 
402 	filep = anon_inode_getfile(name, fops, eventq, O_RDWR);
403 	if (IS_ERR(filep))
404 		return PTR_ERR(filep);
405 
406 	eventq->ictx = ictx;
407 	iommufd_ctx_get(eventq->ictx);
408 	eventq->filep = filep;
409 	refcount_inc(&eventq->obj.users);
410 
411 	fdno = get_unused_fd_flags(O_CLOEXEC);
412 	if (fdno < 0)
413 		fput(filep);
414 	return fdno;
415 }
416 
417 static const struct file_operations iommufd_fault_fops =
418 	INIT_EVENTQ_FOPS(iommufd_fault_fops_read, iommufd_fault_fops_write);
419 
iommufd_fault_alloc(struct iommufd_ucmd * ucmd)420 int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
421 {
422 	struct iommu_fault_alloc *cmd = ucmd->cmd;
423 	struct iommufd_fault *fault;
424 	int fdno;
425 	int rc;
426 
427 	if (cmd->flags)
428 		return -EOPNOTSUPP;
429 
430 	fault = __iommufd_object_alloc(ucmd->ictx, fault, IOMMUFD_OBJ_FAULT,
431 				       common.obj);
432 	if (IS_ERR(fault))
433 		return PTR_ERR(fault);
434 
435 	xa_init_flags(&fault->response, XA_FLAGS_ALLOC1);
436 	mutex_init(&fault->mutex);
437 
438 	fdno = iommufd_eventq_init(&fault->common, "[iommufd-pgfault]",
439 				   ucmd->ictx, &iommufd_fault_fops);
440 	if (fdno < 0) {
441 		rc = fdno;
442 		goto out_abort;
443 	}
444 
445 	cmd->out_fault_id = fault->common.obj.id;
446 	cmd->out_fault_fd = fdno;
447 
448 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
449 	if (rc)
450 		goto out_put_fdno;
451 	iommufd_object_finalize(ucmd->ictx, &fault->common.obj);
452 
453 	fd_install(fdno, fault->common.filep);
454 
455 	return 0;
456 out_put_fdno:
457 	put_unused_fd(fdno);
458 	fput(fault->common.filep);
459 out_abort:
460 	iommufd_object_abort_and_destroy(ucmd->ictx, &fault->common.obj);
461 
462 	return rc;
463 }
464 
iommufd_fault_iopf_handler(struct iopf_group * group)465 int iommufd_fault_iopf_handler(struct iopf_group *group)
466 {
467 	struct iommufd_hw_pagetable *hwpt;
468 	struct iommufd_fault *fault;
469 
470 	hwpt = group->attach_handle->domain->iommufd_hwpt;
471 	fault = hwpt->fault;
472 
473 	spin_lock(&fault->common.lock);
474 	list_add_tail(&group->node, &fault->common.deliver);
475 	spin_unlock(&fault->common.lock);
476 
477 	wake_up_interruptible(&fault->common.wait_queue);
478 
479 	return 0;
480 }
481 
482 static const struct file_operations iommufd_veventq_fops =
483 	INIT_EVENTQ_FOPS(iommufd_veventq_fops_read, NULL);
484 
iommufd_veventq_alloc(struct iommufd_ucmd * ucmd)485 int iommufd_veventq_alloc(struct iommufd_ucmd *ucmd)
486 {
487 	struct iommu_veventq_alloc *cmd = ucmd->cmd;
488 	struct iommufd_veventq *veventq;
489 	struct iommufd_viommu *viommu;
490 	int fdno;
491 	int rc;
492 
493 	if (cmd->flags || cmd->__reserved ||
494 	    cmd->type == IOMMU_VEVENTQ_TYPE_DEFAULT)
495 		return -EOPNOTSUPP;
496 	if (!cmd->veventq_depth)
497 		return -EINVAL;
498 
499 	viommu = iommufd_get_viommu(ucmd, cmd->viommu_id);
500 	if (IS_ERR(viommu))
501 		return PTR_ERR(viommu);
502 
503 	down_write(&viommu->veventqs_rwsem);
504 
505 	if (iommufd_viommu_find_veventq(viommu, cmd->type)) {
506 		rc = -EEXIST;
507 		goto out_unlock_veventqs;
508 	}
509 
510 	veventq = __iommufd_object_alloc(ucmd->ictx, veventq,
511 					 IOMMUFD_OBJ_VEVENTQ, common.obj);
512 	if (IS_ERR(veventq)) {
513 		rc = PTR_ERR(veventq);
514 		goto out_unlock_veventqs;
515 	}
516 
517 	veventq->type = cmd->type;
518 	veventq->viommu = viommu;
519 	refcount_inc(&viommu->obj.users);
520 	veventq->depth = cmd->veventq_depth;
521 	list_add_tail(&veventq->node, &viommu->veventqs);
522 	veventq->lost_events_header.header.flags =
523 		IOMMU_VEVENTQ_FLAG_LOST_EVENTS;
524 
525 	fdno = iommufd_eventq_init(&veventq->common, "[iommufd-viommu-event]",
526 				   ucmd->ictx, &iommufd_veventq_fops);
527 	if (fdno < 0) {
528 		rc = fdno;
529 		goto out_abort;
530 	}
531 
532 	cmd->out_veventq_id = veventq->common.obj.id;
533 	cmd->out_veventq_fd = fdno;
534 
535 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
536 	if (rc)
537 		goto out_put_fdno;
538 
539 	iommufd_object_finalize(ucmd->ictx, &veventq->common.obj);
540 	fd_install(fdno, veventq->common.filep);
541 	goto out_unlock_veventqs;
542 
543 out_put_fdno:
544 	put_unused_fd(fdno);
545 	fput(veventq->common.filep);
546 out_abort:
547 	iommufd_object_abort_and_destroy(ucmd->ictx, &veventq->common.obj);
548 out_unlock_veventqs:
549 	up_write(&viommu->veventqs_rwsem);
550 	iommufd_put_object(ucmd->ictx, &viommu->obj);
551 	return rc;
552 }
553