xref: /linux/drivers/iommu/iommufd/iommufd_private.h (revision b703b31ea8cd22c1915cfdd6d8e39bf39ec64c8b)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
3  */
4 #ifndef __IOMMUFD_PRIVATE_H
5 #define __IOMMUFD_PRIVATE_H
6 
7 #include <linux/iommu.h>
8 #include <linux/iommufd.h>
9 #include <linux/iova_bitmap.h>
10 #include <linux/maple_tree.h>
11 #include <linux/rwsem.h>
12 #include <linux/uaccess.h>
13 #include <linux/xarray.h>
14 #include <uapi/linux/iommufd.h>
15 
16 #include "../iommu-priv.h"
17 
18 struct iommu_domain;
19 struct iommu_group;
20 struct iommu_option;
21 struct iommufd_device;
22 struct dma_buf_attachment;
23 
24 struct iommufd_sw_msi_map {
25 	struct list_head sw_msi_item;
26 	phys_addr_t sw_msi_start;
27 	phys_addr_t msi_addr;
28 	unsigned int pgoff;
29 	unsigned int id;
30 };
31 
32 /* Bitmap of struct iommufd_sw_msi_map::id */
33 struct iommufd_sw_msi_maps {
34 	DECLARE_BITMAP(bitmap, 64);
35 };
36 
37 #ifdef CONFIG_IRQ_MSI_IOMMU
38 int iommufd_sw_msi_install(struct iommufd_ctx *ictx,
39 			   struct iommufd_hwpt_paging *hwpt_paging,
40 			   struct iommufd_sw_msi_map *msi_map);
41 #endif
42 
43 struct iommufd_ctx {
44 	struct file *file;
45 	struct xarray objects;
46 	struct xarray groups;
47 	wait_queue_head_t destroy_wait;
48 	struct rw_semaphore ioas_creation_lock;
49 	struct maple_tree mt_mmap;
50 
51 	struct mutex sw_msi_lock;
52 	struct list_head sw_msi_list;
53 	unsigned int sw_msi_id;
54 
55 	u8 account_mode;
56 	/* Compatibility with VFIO no iommu */
57 	u8 no_iommu_mode;
58 	struct iommufd_ioas *vfio_ioas;
59 };
60 
61 /* Entry for iommufd_ctx::mt_mmap */
62 struct iommufd_mmap {
63 	struct iommufd_object *owner;
64 
65 	/* Page-shifted start position in mt_mmap to validate vma->vm_pgoff */
66 	unsigned long vm_pgoff;
67 
68 	/* Physical range for io_remap_pfn_range() */
69 	phys_addr_t mmio_addr;
70 	size_t length;
71 };
72 
73 /*
74  * The IOVA to PFN map. The map automatically copies the PFNs into multiple
75  * domains and permits sharing of PFNs between io_pagetable instances. This
76  * supports both a design where IOAS's are 1:1 with a domain (eg because the
77  * domain is HW customized), or where the IOAS is 1:N with multiple generic
78  * domains.  The io_pagetable holds an interval tree of iopt_areas which point
79  * to shared iopt_pages which hold the pfns mapped to the page table.
80  *
81  * The locking order is domains_rwsem -> iova_rwsem -> pages::mutex
82  */
83 struct io_pagetable {
84 	struct rw_semaphore domains_rwsem;
85 	struct xarray domains;
86 	struct xarray access_list;
87 	unsigned int next_domain_id;
88 
89 	struct rw_semaphore iova_rwsem;
90 	struct rb_root_cached area_itree;
91 	/* IOVA that cannot become reserved, struct iopt_allowed */
92 	struct rb_root_cached allowed_itree;
93 	/* IOVA that cannot be allocated, struct iopt_reserved */
94 	struct rb_root_cached reserved_itree;
95 	u8 disable_large_pages;
96 	unsigned long iova_alignment;
97 };
98 
99 void iopt_init_table(struct io_pagetable *iopt);
100 void iopt_destroy_table(struct io_pagetable *iopt);
101 int iopt_get_pages(struct io_pagetable *iopt, unsigned long iova,
102 		   unsigned long length, struct list_head *pages_list);
103 void iopt_free_pages_list(struct list_head *pages_list);
104 enum {
105 	IOPT_ALLOC_IOVA = 1 << 0,
106 };
107 int iopt_map_user_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt,
108 			unsigned long *iova, void __user *uptr,
109 			unsigned long length, int iommu_prot,
110 			unsigned int flags);
111 int iopt_map_file_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt,
112 			unsigned long *iova, int fd,
113 			unsigned long start, unsigned long length,
114 			int iommu_prot, unsigned int flags);
115 int iopt_map_pages(struct io_pagetable *iopt, struct list_head *pages_list,
116 		   unsigned long length, unsigned long *dst_iova,
117 		   int iommu_prot, unsigned int flags);
118 int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova,
119 		    unsigned long length, unsigned long *unmapped);
120 int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped);
121 
122 int iopt_read_and_clear_dirty_data(struct io_pagetable *iopt,
123 				   struct iommu_domain *domain,
124 				   unsigned long flags,
125 				   struct iommu_hwpt_get_dirty_bitmap *bitmap);
126 int iopt_set_dirty_tracking(struct io_pagetable *iopt,
127 			    struct iommu_domain *domain, bool enable);
128 
129 void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
130 				 unsigned long length);
131 int iopt_table_add_domain(struct io_pagetable *iopt,
132 			  struct iommu_domain *domain);
133 void iopt_table_remove_domain(struct io_pagetable *iopt,
134 			      struct iommu_domain *domain);
135 int iopt_table_enforce_dev_resv_regions(struct io_pagetable *iopt,
136 					struct device *dev,
137 					phys_addr_t *sw_msi_start);
138 int iopt_set_allow_iova(struct io_pagetable *iopt,
139 			struct rb_root_cached *allowed_iova);
140 int iopt_reserve_iova(struct io_pagetable *iopt, unsigned long start,
141 		      unsigned long last, void *owner);
142 void iopt_remove_reserved_iova(struct io_pagetable *iopt, void *owner);
143 int iopt_cut_iova(struct io_pagetable *iopt, unsigned long *iovas,
144 		  size_t num_iovas);
145 void iopt_enable_large_pages(struct io_pagetable *iopt);
146 int iopt_disable_large_pages(struct io_pagetable *iopt);
147 
148 struct iommufd_ucmd {
149 	struct iommufd_ctx *ictx;
150 	void __user *ubuffer;
151 	u32 user_size;
152 	void *cmd;
153 	struct iommufd_object *new_obj;
154 };
155 
156 int iommufd_vfio_ioctl(struct iommufd_ctx *ictx, unsigned int cmd,
157 		       unsigned long arg);
158 
159 /* Copy the response in ucmd->cmd back to userspace. */
160 static inline int iommufd_ucmd_respond(struct iommufd_ucmd *ucmd,
161 				       size_t cmd_len)
162 {
163 	if (copy_to_user(ucmd->ubuffer, ucmd->cmd,
164 			 min_t(size_t, ucmd->user_size, cmd_len)))
165 		return -EFAULT;
166 	return 0;
167 }
168 
169 static inline bool iommufd_lock_obj(struct iommufd_object *obj)
170 {
171 	if (!refcount_inc_not_zero(&obj->users))
172 		return false;
173 	if (!refcount_inc_not_zero(&obj->wait_cnt)) {
174 		/*
175 		 * If the caller doesn't already have a ref on obj this must be
176 		 * called under the xa_lock. Otherwise the caller is holding a
177 		 * ref on users. Thus it cannot be one before this decrement.
178 		 */
179 		refcount_dec(&obj->users);
180 		return false;
181 	}
182 	return true;
183 }
184 
185 struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id,
186 					  enum iommufd_object_type type);
187 static inline void iommufd_put_object(struct iommufd_ctx *ictx,
188 				      struct iommufd_object *obj)
189 {
190 	/*
191 	 * Users first, then wait_cnt so that REMOVE_WAIT never sees a spurious
192 	 * !0 users with a 0 wait_cnt.
193 	 */
194 	refcount_dec(&obj->users);
195 	if (refcount_dec_and_test(&obj->wait_cnt))
196 		wake_up_interruptible_all(&ictx->destroy_wait);
197 }
198 
199 void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj);
200 void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
201 				      struct iommufd_object *obj);
202 void iommufd_object_finalize(struct iommufd_ctx *ictx,
203 			     struct iommufd_object *obj);
204 
205 enum {
206 	REMOVE_WAIT		= BIT(0),
207 	REMOVE_OBJ_TOMBSTONE	= BIT(1),
208 };
209 int iommufd_object_remove(struct iommufd_ctx *ictx,
210 			  struct iommufd_object *to_destroy, u32 id,
211 			  unsigned int flags);
212 
213 /*
214  * The caller holds a users refcount and wants to destroy the object. At this
215  * point the caller has no wait_cnt reference and at least the xarray will be
216  * holding one.
217  */
218 static inline void iommufd_object_destroy_user(struct iommufd_ctx *ictx,
219 					       struct iommufd_object *obj)
220 {
221 	int ret;
222 
223 	ret = iommufd_object_remove(ictx, obj, obj->id, REMOVE_WAIT);
224 
225 	/*
226 	 * If there is a bug and we couldn't destroy the object then we did put
227 	 * back the caller's users refcount and will eventually try to free it
228 	 * again during close.
229 	 */
230 	WARN_ON(ret);
231 }
232 
233 /*
234  * Similar to iommufd_object_destroy_user(), except that the object ID is left
235  * reserved/tombstoned.
236  */
237 static inline void iommufd_object_tombstone_user(struct iommufd_ctx *ictx,
238 						 struct iommufd_object *obj)
239 {
240 	int ret;
241 
242 	ret = iommufd_object_remove(ictx, obj, obj->id,
243 				    REMOVE_WAIT | REMOVE_OBJ_TOMBSTONE);
244 
245 	/*
246 	 * If there is a bug and we couldn't destroy the object then we did put
247 	 * back the caller's users refcount and will eventually try to free it
248 	 * again during close.
249 	 */
250 	WARN_ON(ret);
251 }
252 
253 /*
254  * The HWPT allocated by autodomains is used in possibly many devices and
255  * is automatically destroyed when its refcount reaches zero.
256  *
257  * If userspace uses the HWPT manually, even for a short term, then it will
258  * disrupt this refcounting and the auto-free in the kernel will not work.
259  * Userspace that tries to use the automatically allocated HWPT must be careful
260  * to ensure that it is consistently destroyed, eg by not racing accesses
261  * and by not attaching an automatic HWPT to a device manually.
262  */
263 static inline void
264 iommufd_object_put_and_try_destroy(struct iommufd_ctx *ictx,
265 				   struct iommufd_object *obj)
266 {
267 	iommufd_object_remove(ictx, obj, obj->id, 0);
268 }
269 
270 /*
271  * Callers of these normal object allocators must call iommufd_object_finalize()
272  * to finalize the object, or call iommufd_object_abort_and_destroy() to revert
273  * the allocation.
274  */
275 struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
276 					     size_t size,
277 					     enum iommufd_object_type type);
278 
279 #define __iommufd_object_alloc(ictx, ptr, type, obj)                           \
280 	container_of(_iommufd_object_alloc(                                    \
281 			     ictx,                                             \
282 			     sizeof(*(ptr)) + BUILD_BUG_ON_ZERO(               \
283 						      offsetof(typeof(*(ptr)), \
284 							       obj) != 0),     \
285 			     type),                                            \
286 		     typeof(*(ptr)), obj)
287 
288 #define iommufd_object_alloc(ictx, ptr, type) \
289 	__iommufd_object_alloc(ictx, ptr, type, obj)
290 
291 /*
292  * Callers of these _ucmd allocators should not call iommufd_object_finalize()
293  * or iommufd_object_abort_and_destroy(), as the core automatically does that.
294  */
295 struct iommufd_object *
296 _iommufd_object_alloc_ucmd(struct iommufd_ucmd *ucmd, size_t size,
297 			   enum iommufd_object_type type);
298 
299 #define __iommufd_object_alloc_ucmd(ucmd, ptr, type, obj)                      \
300 	container_of(_iommufd_object_alloc_ucmd(                               \
301 			     ucmd,                                             \
302 			     sizeof(*(ptr)) + BUILD_BUG_ON_ZERO(               \
303 						      offsetof(typeof(*(ptr)), \
304 							       obj) != 0),     \
305 			     type),                                            \
306 		     typeof(*(ptr)), obj)
307 
308 #define iommufd_object_alloc_ucmd(ucmd, ptr, type) \
309 	__iommufd_object_alloc_ucmd(ucmd, ptr, type, obj)
310 
311 /*
312  * The IO Address Space (IOAS) pagetable is a virtual page table backed by the
313  * io_pagetable object. It is a user controlled mapping of IOVA -> PFNs. The
314  * mapping is copied into all of the associated domains and made available to
315  * in-kernel users.
316  *
317  * Every iommu_domain that is created is wrapped in a iommufd_hw_pagetable
318  * object. When we go to attach a device to an IOAS we need to get an
319  * iommu_domain and wrapping iommufd_hw_pagetable for it.
320  *
321  * An iommu_domain & iommfd_hw_pagetable will be automatically selected
322  * for a device based on the hwpt_list. If no suitable iommu_domain
323  * is found a new iommu_domain will be created.
324  */
325 struct iommufd_ioas {
326 	struct iommufd_object obj;
327 	struct io_pagetable iopt;
328 	struct mutex mutex;
329 	struct list_head hwpt_list;
330 };
331 
332 static inline struct iommufd_ioas *iommufd_get_ioas(struct iommufd_ctx *ictx,
333 						    u32 id)
334 {
335 	return container_of(iommufd_get_object(ictx, id, IOMMUFD_OBJ_IOAS),
336 			    struct iommufd_ioas, obj);
337 }
338 
339 struct iommufd_ioas *iommufd_ioas_alloc(struct iommufd_ctx *ictx);
340 int iommufd_ioas_alloc_ioctl(struct iommufd_ucmd *ucmd);
341 void iommufd_ioas_destroy(struct iommufd_object *obj);
342 int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd);
343 int iommufd_ioas_allow_iovas(struct iommufd_ucmd *ucmd);
344 int iommufd_ioas_map(struct iommufd_ucmd *ucmd);
345 int iommufd_ioas_map_file(struct iommufd_ucmd *ucmd);
346 int iommufd_ioas_change_process(struct iommufd_ucmd *ucmd);
347 int iommufd_ioas_copy(struct iommufd_ucmd *ucmd);
348 int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd);
349 int iommufd_ioas_option(struct iommufd_ucmd *ucmd);
350 int iommufd_option_rlimit_mode(struct iommu_option *cmd,
351 			       struct iommufd_ctx *ictx);
352 
353 int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd);
354 int iommufd_check_iova_range(struct io_pagetable *iopt,
355 			     struct iommu_hwpt_get_dirty_bitmap *bitmap);
356 
357 /*
358  * A HW pagetable is called an iommu_domain inside the kernel. This user object
359  * allows directly creating and inspecting the domains. Domains that have kernel
360  * owned page tables will be associated with an iommufd_ioas that provides the
361  * IOVA to PFN map.
362  */
363 struct iommufd_hw_pagetable {
364 	struct iommufd_object obj;
365 	struct iommu_domain *domain;
366 	struct iommufd_fault *fault;
367 	bool pasid_compat : 1;
368 };
369 
370 struct iommufd_hwpt_paging {
371 	struct iommufd_hw_pagetable common;
372 	struct iommufd_ioas *ioas;
373 	bool auto_domain : 1;
374 	bool enforce_cache_coherency : 1;
375 	bool nest_parent : 1;
376 	/* Head at iommufd_ioas::hwpt_list */
377 	struct list_head hwpt_item;
378 	struct iommufd_sw_msi_maps present_sw_msi;
379 };
380 
381 struct iommufd_hwpt_nested {
382 	struct iommufd_hw_pagetable common;
383 	struct iommufd_hwpt_paging *parent;
384 	struct iommufd_viommu *viommu;
385 };
386 
387 static inline bool hwpt_is_paging(struct iommufd_hw_pagetable *hwpt)
388 {
389 	return hwpt->obj.type == IOMMUFD_OBJ_HWPT_PAGING;
390 }
391 
392 static inline struct iommufd_hwpt_paging *
393 to_hwpt_paging(struct iommufd_hw_pagetable *hwpt)
394 {
395 	return container_of(hwpt, struct iommufd_hwpt_paging, common);
396 }
397 
398 static inline struct iommufd_hwpt_nested *
399 to_hwpt_nested(struct iommufd_hw_pagetable *hwpt)
400 {
401 	return container_of(hwpt, struct iommufd_hwpt_nested, common);
402 }
403 
404 static inline struct iommufd_hwpt_paging *
405 find_hwpt_paging(struct iommufd_hw_pagetable *hwpt)
406 {
407 	switch (hwpt->obj.type) {
408 	case IOMMUFD_OBJ_HWPT_PAGING:
409 		return to_hwpt_paging(hwpt);
410 	case IOMMUFD_OBJ_HWPT_NESTED:
411 		return to_hwpt_nested(hwpt)->parent;
412 	default:
413 		return NULL;
414 	}
415 }
416 
417 static inline struct iommufd_hwpt_paging *
418 iommufd_get_hwpt_paging(struct iommufd_ucmd *ucmd, u32 id)
419 {
420 	return container_of(iommufd_get_object(ucmd->ictx, id,
421 					       IOMMUFD_OBJ_HWPT_PAGING),
422 			    struct iommufd_hwpt_paging, common.obj);
423 }
424 
425 static inline struct iommufd_hw_pagetable *
426 iommufd_get_hwpt_nested(struct iommufd_ucmd *ucmd, u32 id)
427 {
428 	return container_of(iommufd_get_object(ucmd->ictx, id,
429 					       IOMMUFD_OBJ_HWPT_NESTED),
430 			    struct iommufd_hw_pagetable, obj);
431 }
432 
433 int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd);
434 int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd);
435 
436 struct iommufd_hwpt_paging *
437 iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
438 			  struct iommufd_device *idev, ioasid_t pasid,
439 			  u32 flags, bool immediate_attach,
440 			  const struct iommu_user_data *user_data);
441 int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
442 				struct iommufd_device *idev, ioasid_t pasid);
443 struct iommufd_hw_pagetable *
444 iommufd_hw_pagetable_detach(struct iommufd_device *idev, ioasid_t pasid);
445 void iommufd_hwpt_paging_destroy(struct iommufd_object *obj);
446 void iommufd_hwpt_paging_abort(struct iommufd_object *obj);
447 void iommufd_hwpt_nested_destroy(struct iommufd_object *obj);
448 void iommufd_hwpt_nested_abort(struct iommufd_object *obj);
449 int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd);
450 int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd);
451 
452 static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx,
453 					    struct iommufd_hw_pagetable *hwpt)
454 {
455 	if (hwpt->obj.type == IOMMUFD_OBJ_HWPT_PAGING) {
456 		struct iommufd_hwpt_paging *hwpt_paging = to_hwpt_paging(hwpt);
457 
458 		if (hwpt_paging->auto_domain) {
459 			lockdep_assert_not_held(&hwpt_paging->ioas->mutex);
460 			iommufd_object_put_and_try_destroy(ictx, &hwpt->obj);
461 			return;
462 		}
463 	}
464 	refcount_dec(&hwpt->obj.users);
465 }
466 
467 struct iommufd_attach;
468 
469 struct iommufd_group {
470 	struct kref ref;
471 	struct mutex lock;
472 	struct iommufd_ctx *ictx;
473 	struct iommu_group *group;
474 	struct xarray pasid_attach;
475 	struct iommufd_sw_msi_maps required_sw_msi;
476 	phys_addr_t sw_msi_start;
477 };
478 
479 /*
480  * A iommufd_device object represents the binding relationship between a
481  * consuming driver and the iommufd. These objects are created/destroyed by
482  * external drivers, not by userspace.
483  */
484 struct iommufd_device {
485 	struct iommufd_object obj;
486 	struct iommufd_ctx *ictx;
487 	struct iommufd_group *igroup;
488 	struct list_head group_item;
489 	/* always the physical device */
490 	struct device *dev;
491 	bool enforce_cache_coherency;
492 	struct iommufd_vdevice *vdev;
493 	bool destroying;
494 };
495 
496 static inline struct iommufd_device *
497 iommufd_get_device(struct iommufd_ucmd *ucmd, u32 id)
498 {
499 	return container_of(iommufd_get_object(ucmd->ictx, id,
500 					       IOMMUFD_OBJ_DEVICE),
501 			    struct iommufd_device, obj);
502 }
503 
504 void iommufd_device_pre_destroy(struct iommufd_object *obj);
505 void iommufd_device_destroy(struct iommufd_object *obj);
506 int iommufd_get_hw_info(struct iommufd_ucmd *ucmd);
507 
508 struct device *iommufd_global_device(void);
509 
510 struct iommufd_access {
511 	struct iommufd_object obj;
512 	struct iommufd_ctx *ictx;
513 	struct iommufd_ioas *ioas;
514 	struct iommufd_ioas *ioas_unpin;
515 	struct mutex ioas_lock;
516 	const struct iommufd_access_ops *ops;
517 	void *data;
518 	unsigned long iova_alignment;
519 	u32 iopt_access_list_id;
520 };
521 
522 int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access);
523 void iopt_remove_access(struct io_pagetable *iopt,
524 			struct iommufd_access *access, u32 iopt_access_list_id);
525 void iommufd_access_destroy_object(struct iommufd_object *obj);
526 
527 /* iommufd_access for internal use */
528 static inline bool iommufd_access_is_internal(struct iommufd_access *access)
529 {
530 	return !access->ictx;
531 }
532 
533 struct iommufd_access *iommufd_access_create_internal(struct iommufd_ctx *ictx);
534 
535 static inline void
536 iommufd_access_destroy_internal(struct iommufd_ctx *ictx,
537 				struct iommufd_access *access)
538 {
539 	iommufd_object_destroy_user(ictx, &access->obj);
540 }
541 
542 int iommufd_access_attach_internal(struct iommufd_access *access,
543 				   struct iommufd_ioas *ioas);
544 
545 static inline void iommufd_access_detach_internal(struct iommufd_access *access)
546 {
547 	iommufd_access_detach(access);
548 }
549 
550 struct iommufd_eventq {
551 	struct iommufd_object obj;
552 	struct iommufd_ctx *ictx;
553 	struct file *filep;
554 
555 	spinlock_t lock; /* protects the deliver list */
556 	struct list_head deliver;
557 
558 	struct wait_queue_head wait_queue;
559 };
560 
561 struct iommufd_attach_handle {
562 	struct iommu_attach_handle handle;
563 	struct iommufd_device *idev;
564 };
565 
566 /* Convert an iommu attach handle to iommufd handle. */
567 #define to_iommufd_handle(hdl)	container_of(hdl, struct iommufd_attach_handle, handle)
568 
569 /*
570  * An iommufd_fault object represents an interface to deliver I/O page faults
571  * to the user space. These objects are created/destroyed by the user space and
572  * associated with hardware page table objects during page-table allocation.
573  */
574 struct iommufd_fault {
575 	struct iommufd_eventq common;
576 	struct mutex mutex; /* serializes response flows */
577 	struct xarray response;
578 };
579 
580 static inline struct iommufd_fault *
581 eventq_to_fault(struct iommufd_eventq *eventq)
582 {
583 	return container_of(eventq, struct iommufd_fault, common);
584 }
585 
586 static inline struct iommufd_fault *
587 iommufd_get_fault(struct iommufd_ucmd *ucmd, u32 id)
588 {
589 	return container_of(iommufd_get_object(ucmd->ictx, id,
590 					       IOMMUFD_OBJ_FAULT),
591 			    struct iommufd_fault, common.obj);
592 }
593 
594 int iommufd_fault_alloc(struct iommufd_ucmd *ucmd);
595 void iommufd_fault_destroy(struct iommufd_object *obj);
596 int iommufd_fault_iopf_handler(struct iopf_group *group);
597 void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
598 				  struct iommufd_attach_handle *handle);
599 
600 /* An iommufd_vevent represents a vIOMMU event in an iommufd_veventq */
601 struct iommufd_vevent {
602 	struct iommufd_vevent_header header;
603 	struct list_head node; /* for iommufd_eventq::deliver */
604 	ssize_t data_len;
605 	u64 event_data[] __counted_by(data_len);
606 };
607 
608 #define vevent_for_lost_events_header(vevent) \
609 	(vevent->header.flags & IOMMU_VEVENTQ_FLAG_LOST_EVENTS)
610 
611 /*
612  * An iommufd_veventq object represents an interface to deliver vIOMMU events to
613  * the user space. It is created/destroyed by the user space and associated with
614  * a vIOMMU object during the allocations.
615  */
616 struct iommufd_veventq {
617 	struct iommufd_eventq common;
618 	struct iommufd_viommu *viommu;
619 	struct list_head node; /* for iommufd_viommu::veventqs */
620 
621 	enum iommu_veventq_type type;
622 	unsigned int depth;
623 
624 	/* Use common.lock for protection */
625 	u32 num_events;
626 	u32 sequence;
627 
628 	/* Must be last as it ends in a flexible-array member. */
629 	struct iommufd_vevent lost_events_header;
630 };
631 
632 static inline struct iommufd_veventq *
633 eventq_to_veventq(struct iommufd_eventq *eventq)
634 {
635 	return container_of(eventq, struct iommufd_veventq, common);
636 }
637 
638 static inline struct iommufd_veventq *
639 iommufd_get_veventq(struct iommufd_ucmd *ucmd, u32 id)
640 {
641 	return container_of(iommufd_get_object(ucmd->ictx, id,
642 					       IOMMUFD_OBJ_VEVENTQ),
643 			    struct iommufd_veventq, common.obj);
644 }
645 
646 int iommufd_veventq_alloc(struct iommufd_ucmd *ucmd);
647 void iommufd_veventq_destroy(struct iommufd_object *obj);
648 void iommufd_veventq_abort(struct iommufd_object *obj);
649 
650 static inline void iommufd_vevent_handler(struct iommufd_veventq *veventq,
651 					  struct iommufd_vevent *vevent)
652 {
653 	struct iommufd_eventq *eventq = &veventq->common;
654 
655 	lockdep_assert_held(&eventq->lock);
656 
657 	/*
658 	 * Remove the lost_events_header and add the new node at the same time.
659 	 * Note the new node can be lost_events_header, for a sequence update.
660 	 */
661 	if (list_is_last(&veventq->lost_events_header.node, &eventq->deliver))
662 		list_del(&veventq->lost_events_header.node);
663 	list_add_tail(&vevent->node, &eventq->deliver);
664 	vevent->header.sequence = veventq->sequence;
665 	veventq->sequence = (veventq->sequence + 1) & INT_MAX;
666 
667 	wake_up_interruptible(&eventq->wait_queue);
668 }
669 
670 static inline struct iommufd_viommu *
671 iommufd_get_viommu(struct iommufd_ucmd *ucmd, u32 id)
672 {
673 	return container_of(iommufd_get_object(ucmd->ictx, id,
674 					       IOMMUFD_OBJ_VIOMMU),
675 			    struct iommufd_viommu, obj);
676 }
677 
678 static inline struct iommufd_veventq *
679 iommufd_viommu_find_veventq(struct iommufd_viommu *viommu,
680 			    enum iommu_veventq_type type)
681 {
682 	struct iommufd_veventq *veventq, *next;
683 
684 	lockdep_assert_held(&viommu->veventqs_rwsem);
685 
686 	list_for_each_entry_safe(veventq, next, &viommu->veventqs, node) {
687 		if (veventq->type == type)
688 			return veventq;
689 	}
690 	return NULL;
691 }
692 
693 int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd);
694 void iommufd_viommu_destroy(struct iommufd_object *obj);
695 int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd);
696 void iommufd_vdevice_destroy(struct iommufd_object *obj);
697 void iommufd_vdevice_abort(struct iommufd_object *obj);
698 int iommufd_hw_queue_alloc_ioctl(struct iommufd_ucmd *ucmd);
699 void iommufd_hw_queue_destroy(struct iommufd_object *obj);
700 
701 static inline struct iommufd_vdevice *
702 iommufd_get_vdevice(struct iommufd_ctx *ictx, u32 id)
703 {
704 	return container_of(iommufd_get_object(ictx, id,
705 					       IOMMUFD_OBJ_VDEVICE),
706 			    struct iommufd_vdevice, obj);
707 }
708 
709 #ifdef CONFIG_IOMMUFD_TEST
710 int iommufd_test(struct iommufd_ucmd *ucmd);
711 void iommufd_selftest_destroy(struct iommufd_object *obj);
712 extern size_t iommufd_test_memory_limit;
713 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
714 				   unsigned int ioas_id, u64 *iova, u32 *flags);
715 bool iommufd_should_fail(void);
716 int __init iommufd_test_init(void);
717 void iommufd_test_exit(void);
718 bool iommufd_selftest_is_mock_dev(struct device *dev);
719 int iommufd_test_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
720 				     struct phys_vec *phys);
721 #else
722 static inline void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
723 						 unsigned int ioas_id,
724 						 u64 *iova, u32 *flags)
725 {
726 }
727 static inline bool iommufd_should_fail(void)
728 {
729 	return false;
730 }
731 static inline int __init iommufd_test_init(void)
732 {
733 	return 0;
734 }
735 static inline void iommufd_test_exit(void)
736 {
737 }
738 static inline bool iommufd_selftest_is_mock_dev(struct device *dev)
739 {
740 	return false;
741 }
742 static inline int
743 iommufd_test_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
744 				 struct phys_vec *phys)
745 {
746 	return -EOPNOTSUPP;
747 }
748 #endif
749 #endif
750