xref: /linux/drivers/iommu/iommufd/iommufd_private.h (revision 336b4dae6dfecc9aa53a3a68c71b9c1c1d466388)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
3  */
4 #ifndef __IOMMUFD_PRIVATE_H
5 #define __IOMMUFD_PRIVATE_H
6 
7 #include <linux/iommu.h>
8 #include <linux/iommufd.h>
9 #include <linux/iova_bitmap.h>
10 #include <linux/rwsem.h>
11 #include <linux/uaccess.h>
12 #include <linux/xarray.h>
13 #include <uapi/linux/iommufd.h>
14 
15 #include "../iommu-priv.h"
16 
17 struct iommu_domain;
18 struct iommu_group;
19 struct iommu_option;
20 struct iommufd_device;
21 
22 struct iommufd_sw_msi_map {
23 	struct list_head sw_msi_item;
24 	phys_addr_t sw_msi_start;
25 	phys_addr_t msi_addr;
26 	unsigned int pgoff;
27 	unsigned int id;
28 };
29 
30 /* Bitmap of struct iommufd_sw_msi_map::id */
31 struct iommufd_sw_msi_maps {
32 	DECLARE_BITMAP(bitmap, 64);
33 };
34 
35 int iommufd_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
36 		   phys_addr_t msi_addr);
37 
38 struct iommufd_ctx {
39 	struct file *file;
40 	struct xarray objects;
41 	struct xarray groups;
42 	wait_queue_head_t destroy_wait;
43 	struct rw_semaphore ioas_creation_lock;
44 
45 	struct mutex sw_msi_lock;
46 	struct list_head sw_msi_list;
47 	unsigned int sw_msi_id;
48 
49 	u8 account_mode;
50 	/* Compatibility with VFIO no iommu */
51 	u8 no_iommu_mode;
52 	struct iommufd_ioas *vfio_ioas;
53 };
54 
55 /*
56  * The IOVA to PFN map. The map automatically copies the PFNs into multiple
57  * domains and permits sharing of PFNs between io_pagetable instances. This
58  * supports both a design where IOAS's are 1:1 with a domain (eg because the
59  * domain is HW customized), or where the IOAS is 1:N with multiple generic
60  * domains.  The io_pagetable holds an interval tree of iopt_areas which point
61  * to shared iopt_pages which hold the pfns mapped to the page table.
62  *
63  * The locking order is domains_rwsem -> iova_rwsem -> pages::mutex
64  */
65 struct io_pagetable {
66 	struct rw_semaphore domains_rwsem;
67 	struct xarray domains;
68 	struct xarray access_list;
69 	unsigned int next_domain_id;
70 
71 	struct rw_semaphore iova_rwsem;
72 	struct rb_root_cached area_itree;
73 	/* IOVA that cannot become reserved, struct iopt_allowed */
74 	struct rb_root_cached allowed_itree;
75 	/* IOVA that cannot be allocated, struct iopt_reserved */
76 	struct rb_root_cached reserved_itree;
77 	u8 disable_large_pages;
78 	unsigned long iova_alignment;
79 };
80 
81 void iopt_init_table(struct io_pagetable *iopt);
82 void iopt_destroy_table(struct io_pagetable *iopt);
83 int iopt_get_pages(struct io_pagetable *iopt, unsigned long iova,
84 		   unsigned long length, struct list_head *pages_list);
85 void iopt_free_pages_list(struct list_head *pages_list);
86 enum {
87 	IOPT_ALLOC_IOVA = 1 << 0,
88 };
89 int iopt_map_user_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt,
90 			unsigned long *iova, void __user *uptr,
91 			unsigned long length, int iommu_prot,
92 			unsigned int flags);
93 int iopt_map_file_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt,
94 			unsigned long *iova, struct file *file,
95 			unsigned long start, unsigned long length,
96 			int iommu_prot, unsigned int flags);
97 int iopt_map_pages(struct io_pagetable *iopt, struct list_head *pages_list,
98 		   unsigned long length, unsigned long *dst_iova,
99 		   int iommu_prot, unsigned int flags);
100 int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova,
101 		    unsigned long length, unsigned long *unmapped);
102 int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped);
103 
104 int iopt_read_and_clear_dirty_data(struct io_pagetable *iopt,
105 				   struct iommu_domain *domain,
106 				   unsigned long flags,
107 				   struct iommu_hwpt_get_dirty_bitmap *bitmap);
108 int iopt_set_dirty_tracking(struct io_pagetable *iopt,
109 			    struct iommu_domain *domain, bool enable);
110 
111 void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
112 				 unsigned long length);
113 int iopt_table_add_domain(struct io_pagetable *iopt,
114 			  struct iommu_domain *domain);
115 void iopt_table_remove_domain(struct io_pagetable *iopt,
116 			      struct iommu_domain *domain);
117 int iopt_table_enforce_dev_resv_regions(struct io_pagetable *iopt,
118 					struct device *dev,
119 					phys_addr_t *sw_msi_start);
120 int iopt_set_allow_iova(struct io_pagetable *iopt,
121 			struct rb_root_cached *allowed_iova);
122 int iopt_reserve_iova(struct io_pagetable *iopt, unsigned long start,
123 		      unsigned long last, void *owner);
124 void iopt_remove_reserved_iova(struct io_pagetable *iopt, void *owner);
125 int iopt_cut_iova(struct io_pagetable *iopt, unsigned long *iovas,
126 		  size_t num_iovas);
127 void iopt_enable_large_pages(struct io_pagetable *iopt);
128 int iopt_disable_large_pages(struct io_pagetable *iopt);
129 
130 struct iommufd_ucmd {
131 	struct iommufd_ctx *ictx;
132 	void __user *ubuffer;
133 	u32 user_size;
134 	void *cmd;
135 };
136 
137 int iommufd_vfio_ioctl(struct iommufd_ctx *ictx, unsigned int cmd,
138 		       unsigned long arg);
139 
140 /* Copy the response in ucmd->cmd back to userspace. */
iommufd_ucmd_respond(struct iommufd_ucmd * ucmd,size_t cmd_len)141 static inline int iommufd_ucmd_respond(struct iommufd_ucmd *ucmd,
142 				       size_t cmd_len)
143 {
144 	if (copy_to_user(ucmd->ubuffer, ucmd->cmd,
145 			 min_t(size_t, ucmd->user_size, cmd_len)))
146 		return -EFAULT;
147 	return 0;
148 }
149 
iommufd_lock_obj(struct iommufd_object * obj)150 static inline bool iommufd_lock_obj(struct iommufd_object *obj)
151 {
152 	if (!refcount_inc_not_zero(&obj->users))
153 		return false;
154 	if (!refcount_inc_not_zero(&obj->shortterm_users)) {
155 		/*
156 		 * If the caller doesn't already have a ref on obj this must be
157 		 * called under the xa_lock. Otherwise the caller is holding a
158 		 * ref on users. Thus it cannot be one before this decrement.
159 		 */
160 		refcount_dec(&obj->users);
161 		return false;
162 	}
163 	return true;
164 }
165 
166 struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id,
167 					  enum iommufd_object_type type);
iommufd_put_object(struct iommufd_ctx * ictx,struct iommufd_object * obj)168 static inline void iommufd_put_object(struct iommufd_ctx *ictx,
169 				      struct iommufd_object *obj)
170 {
171 	/*
172 	 * Users first, then shortterm so that REMOVE_WAIT_SHORTTERM never sees
173 	 * a spurious !0 users with a 0 shortterm_users.
174 	 */
175 	refcount_dec(&obj->users);
176 	if (refcount_dec_and_test(&obj->shortterm_users))
177 		wake_up_interruptible_all(&ictx->destroy_wait);
178 }
179 
180 void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj);
181 void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
182 				      struct iommufd_object *obj);
183 void iommufd_object_finalize(struct iommufd_ctx *ictx,
184 			     struct iommufd_object *obj);
185 
186 enum {
187 	REMOVE_WAIT_SHORTTERM = 1,
188 };
189 int iommufd_object_remove(struct iommufd_ctx *ictx,
190 			  struct iommufd_object *to_destroy, u32 id,
191 			  unsigned int flags);
192 
193 /*
194  * The caller holds a users refcount and wants to destroy the object. At this
195  * point the caller has no shortterm_users reference and at least the xarray
196  * will be holding one.
197  */
iommufd_object_destroy_user(struct iommufd_ctx * ictx,struct iommufd_object * obj)198 static inline void iommufd_object_destroy_user(struct iommufd_ctx *ictx,
199 					       struct iommufd_object *obj)
200 {
201 	int ret;
202 
203 	ret = iommufd_object_remove(ictx, obj, obj->id, REMOVE_WAIT_SHORTTERM);
204 
205 	/*
206 	 * If there is a bug and we couldn't destroy the object then we did put
207 	 * back the caller's users refcount and will eventually try to free it
208 	 * again during close.
209 	 */
210 	WARN_ON(ret);
211 }
212 
213 /*
214  * The HWPT allocated by autodomains is used in possibly many devices and
215  * is automatically destroyed when its refcount reaches zero.
216  *
217  * If userspace uses the HWPT manually, even for a short term, then it will
218  * disrupt this refcounting and the auto-free in the kernel will not work.
219  * Userspace that tries to use the automatically allocated HWPT must be careful
220  * to ensure that it is consistently destroyed, eg by not racing accesses
221  * and by not attaching an automatic HWPT to a device manually.
222  */
223 static inline void
iommufd_object_put_and_try_destroy(struct iommufd_ctx * ictx,struct iommufd_object * obj)224 iommufd_object_put_and_try_destroy(struct iommufd_ctx *ictx,
225 				   struct iommufd_object *obj)
226 {
227 	iommufd_object_remove(ictx, obj, obj->id, 0);
228 }
229 
230 #define __iommufd_object_alloc(ictx, ptr, type, obj)                           \
231 	container_of(_iommufd_object_alloc(                                    \
232 			     ictx,                                             \
233 			     sizeof(*(ptr)) + BUILD_BUG_ON_ZERO(               \
234 						      offsetof(typeof(*(ptr)), \
235 							       obj) != 0),     \
236 			     type),                                            \
237 		     typeof(*(ptr)), obj)
238 
239 #define iommufd_object_alloc(ictx, ptr, type) \
240 	__iommufd_object_alloc(ictx, ptr, type, obj)
241 
242 /*
243  * The IO Address Space (IOAS) pagetable is a virtual page table backed by the
244  * io_pagetable object. It is a user controlled mapping of IOVA -> PFNs. The
245  * mapping is copied into all of the associated domains and made available to
246  * in-kernel users.
247  *
248  * Every iommu_domain that is created is wrapped in a iommufd_hw_pagetable
249  * object. When we go to attach a device to an IOAS we need to get an
250  * iommu_domain and wrapping iommufd_hw_pagetable for it.
251  *
252  * An iommu_domain & iommfd_hw_pagetable will be automatically selected
253  * for a device based on the hwpt_list. If no suitable iommu_domain
254  * is found a new iommu_domain will be created.
255  */
256 struct iommufd_ioas {
257 	struct iommufd_object obj;
258 	struct io_pagetable iopt;
259 	struct mutex mutex;
260 	struct list_head hwpt_list;
261 };
262 
iommufd_get_ioas(struct iommufd_ctx * ictx,u32 id)263 static inline struct iommufd_ioas *iommufd_get_ioas(struct iommufd_ctx *ictx,
264 						    u32 id)
265 {
266 	return container_of(iommufd_get_object(ictx, id,
267 					       IOMMUFD_OBJ_IOAS),
268 			    struct iommufd_ioas, obj);
269 }
270 
271 struct iommufd_ioas *iommufd_ioas_alloc(struct iommufd_ctx *ictx);
272 int iommufd_ioas_alloc_ioctl(struct iommufd_ucmd *ucmd);
273 void iommufd_ioas_destroy(struct iommufd_object *obj);
274 int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd);
275 int iommufd_ioas_allow_iovas(struct iommufd_ucmd *ucmd);
276 int iommufd_ioas_map(struct iommufd_ucmd *ucmd);
277 int iommufd_ioas_map_file(struct iommufd_ucmd *ucmd);
278 int iommufd_ioas_change_process(struct iommufd_ucmd *ucmd);
279 int iommufd_ioas_copy(struct iommufd_ucmd *ucmd);
280 int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd);
281 int iommufd_ioas_option(struct iommufd_ucmd *ucmd);
282 int iommufd_option_rlimit_mode(struct iommu_option *cmd,
283 			       struct iommufd_ctx *ictx);
284 
285 int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd);
286 int iommufd_check_iova_range(struct io_pagetable *iopt,
287 			     struct iommu_hwpt_get_dirty_bitmap *bitmap);
288 
289 /*
290  * A HW pagetable is called an iommu_domain inside the kernel. This user object
291  * allows directly creating and inspecting the domains. Domains that have kernel
292  * owned page tables will be associated with an iommufd_ioas that provides the
293  * IOVA to PFN map.
294  */
295 struct iommufd_hw_pagetable {
296 	struct iommufd_object obj;
297 	struct iommu_domain *domain;
298 	struct iommufd_fault *fault;
299 };
300 
301 struct iommufd_hwpt_paging {
302 	struct iommufd_hw_pagetable common;
303 	struct iommufd_ioas *ioas;
304 	bool auto_domain : 1;
305 	bool enforce_cache_coherency : 1;
306 	bool nest_parent : 1;
307 	/* Head at iommufd_ioas::hwpt_list */
308 	struct list_head hwpt_item;
309 	struct iommufd_sw_msi_maps present_sw_msi;
310 };
311 
312 struct iommufd_hwpt_nested {
313 	struct iommufd_hw_pagetable common;
314 	struct iommufd_hwpt_paging *parent;
315 	struct iommufd_viommu *viommu;
316 };
317 
hwpt_is_paging(struct iommufd_hw_pagetable * hwpt)318 static inline bool hwpt_is_paging(struct iommufd_hw_pagetable *hwpt)
319 {
320 	return hwpt->obj.type == IOMMUFD_OBJ_HWPT_PAGING;
321 }
322 
323 static inline struct iommufd_hwpt_paging *
to_hwpt_paging(struct iommufd_hw_pagetable * hwpt)324 to_hwpt_paging(struct iommufd_hw_pagetable *hwpt)
325 {
326 	return container_of(hwpt, struct iommufd_hwpt_paging, common);
327 }
328 
329 static inline struct iommufd_hwpt_nested *
to_hwpt_nested(struct iommufd_hw_pagetable * hwpt)330 to_hwpt_nested(struct iommufd_hw_pagetable *hwpt)
331 {
332 	return container_of(hwpt, struct iommufd_hwpt_nested, common);
333 }
334 
335 static inline struct iommufd_hwpt_paging *
find_hwpt_paging(struct iommufd_hw_pagetable * hwpt)336 find_hwpt_paging(struct iommufd_hw_pagetable *hwpt)
337 {
338 	switch (hwpt->obj.type) {
339 	case IOMMUFD_OBJ_HWPT_PAGING:
340 		return to_hwpt_paging(hwpt);
341 	case IOMMUFD_OBJ_HWPT_NESTED:
342 		return to_hwpt_nested(hwpt)->parent;
343 	default:
344 		return NULL;
345 	}
346 }
347 
348 static inline struct iommufd_hwpt_paging *
iommufd_get_hwpt_paging(struct iommufd_ucmd * ucmd,u32 id)349 iommufd_get_hwpt_paging(struct iommufd_ucmd *ucmd, u32 id)
350 {
351 	return container_of(iommufd_get_object(ucmd->ictx, id,
352 					       IOMMUFD_OBJ_HWPT_PAGING),
353 			    struct iommufd_hwpt_paging, common.obj);
354 }
355 
356 static inline struct iommufd_hw_pagetable *
iommufd_get_hwpt_nested(struct iommufd_ucmd * ucmd,u32 id)357 iommufd_get_hwpt_nested(struct iommufd_ucmd *ucmd, u32 id)
358 {
359 	return container_of(iommufd_get_object(ucmd->ictx, id,
360 					       IOMMUFD_OBJ_HWPT_NESTED),
361 			    struct iommufd_hw_pagetable, obj);
362 }
363 
364 int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd);
365 int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd);
366 
367 struct iommufd_hwpt_paging *
368 iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
369 			  struct iommufd_device *idev, u32 flags,
370 			  bool immediate_attach,
371 			  const struct iommu_user_data *user_data);
372 int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
373 				struct iommufd_device *idev);
374 struct iommufd_hw_pagetable *
375 iommufd_hw_pagetable_detach(struct iommufd_device *idev);
376 void iommufd_hwpt_paging_destroy(struct iommufd_object *obj);
377 void iommufd_hwpt_paging_abort(struct iommufd_object *obj);
378 void iommufd_hwpt_nested_destroy(struct iommufd_object *obj);
379 void iommufd_hwpt_nested_abort(struct iommufd_object *obj);
380 int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd);
381 int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd);
382 
iommufd_hw_pagetable_put(struct iommufd_ctx * ictx,struct iommufd_hw_pagetable * hwpt)383 static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx,
384 					    struct iommufd_hw_pagetable *hwpt)
385 {
386 	if (hwpt->obj.type == IOMMUFD_OBJ_HWPT_PAGING) {
387 		struct iommufd_hwpt_paging *hwpt_paging = to_hwpt_paging(hwpt);
388 
389 		lockdep_assert_not_held(&hwpt_paging->ioas->mutex);
390 
391 		if (hwpt_paging->auto_domain) {
392 			iommufd_object_put_and_try_destroy(ictx, &hwpt->obj);
393 			return;
394 		}
395 	}
396 	refcount_dec(&hwpt->obj.users);
397 }
398 
399 struct iommufd_group {
400 	struct kref ref;
401 	struct mutex lock;
402 	struct iommufd_ctx *ictx;
403 	struct iommu_group *group;
404 	struct iommufd_hw_pagetable *hwpt;
405 	struct list_head device_list;
406 	struct iommufd_sw_msi_maps required_sw_msi;
407 	phys_addr_t sw_msi_start;
408 };
409 
410 /*
411  * A iommufd_device object represents the binding relationship between a
412  * consuming driver and the iommufd. These objects are created/destroyed by
413  * external drivers, not by userspace.
414  */
415 struct iommufd_device {
416 	struct iommufd_object obj;
417 	struct iommufd_ctx *ictx;
418 	struct iommufd_group *igroup;
419 	struct list_head group_item;
420 	/* always the physical device */
421 	struct device *dev;
422 	bool enforce_cache_coherency;
423 	/* protect iopf_enabled counter */
424 	struct mutex iopf_lock;
425 	unsigned int iopf_enabled;
426 };
427 
428 static inline struct iommufd_device *
iommufd_get_device(struct iommufd_ucmd * ucmd,u32 id)429 iommufd_get_device(struct iommufd_ucmd *ucmd, u32 id)
430 {
431 	return container_of(iommufd_get_object(ucmd->ictx, id,
432 					       IOMMUFD_OBJ_DEVICE),
433 			    struct iommufd_device, obj);
434 }
435 
436 void iommufd_device_destroy(struct iommufd_object *obj);
437 int iommufd_get_hw_info(struct iommufd_ucmd *ucmd);
438 
439 struct iommufd_access {
440 	struct iommufd_object obj;
441 	struct iommufd_ctx *ictx;
442 	struct iommufd_ioas *ioas;
443 	struct iommufd_ioas *ioas_unpin;
444 	struct mutex ioas_lock;
445 	const struct iommufd_access_ops *ops;
446 	void *data;
447 	unsigned long iova_alignment;
448 	u32 iopt_access_list_id;
449 };
450 
451 int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access);
452 void iopt_remove_access(struct io_pagetable *iopt,
453 			struct iommufd_access *access,
454 			u32 iopt_access_list_id);
455 void iommufd_access_destroy_object(struct iommufd_object *obj);
456 
457 /*
458  * An iommufd_fault object represents an interface to deliver I/O page faults
459  * to the user space. These objects are created/destroyed by the user space and
460  * associated with hardware page table objects during page-table allocation.
461  */
462 struct iommufd_fault {
463 	struct iommufd_object obj;
464 	struct iommufd_ctx *ictx;
465 	struct file *filep;
466 
467 	spinlock_t lock; /* protects the deliver list */
468 	struct list_head deliver;
469 	struct mutex mutex; /* serializes response flows */
470 	struct xarray response;
471 
472 	struct wait_queue_head wait_queue;
473 };
474 
475 /* Fetch the first node out of the fault->deliver list */
476 static inline struct iopf_group *
iommufd_fault_deliver_fetch(struct iommufd_fault * fault)477 iommufd_fault_deliver_fetch(struct iommufd_fault *fault)
478 {
479 	struct list_head *list = &fault->deliver;
480 	struct iopf_group *group = NULL;
481 
482 	spin_lock(&fault->lock);
483 	if (!list_empty(list)) {
484 		group = list_first_entry(list, struct iopf_group, node);
485 		list_del(&group->node);
486 	}
487 	spin_unlock(&fault->lock);
488 	return group;
489 }
490 
491 /* Restore a node back to the head of the fault->deliver list */
iommufd_fault_deliver_restore(struct iommufd_fault * fault,struct iopf_group * group)492 static inline void iommufd_fault_deliver_restore(struct iommufd_fault *fault,
493 						 struct iopf_group *group)
494 {
495 	spin_lock(&fault->lock);
496 	list_add(&group->node, &fault->deliver);
497 	spin_unlock(&fault->lock);
498 }
499 
500 struct iommufd_attach_handle {
501 	struct iommu_attach_handle handle;
502 	struct iommufd_device *idev;
503 };
504 
505 /* Convert an iommu attach handle to iommufd handle. */
506 #define to_iommufd_handle(hdl)	container_of(hdl, struct iommufd_attach_handle, handle)
507 
508 static inline struct iommufd_fault *
iommufd_get_fault(struct iommufd_ucmd * ucmd,u32 id)509 iommufd_get_fault(struct iommufd_ucmd *ucmd, u32 id)
510 {
511 	return container_of(iommufd_get_object(ucmd->ictx, id,
512 					       IOMMUFD_OBJ_FAULT),
513 			    struct iommufd_fault, obj);
514 }
515 
516 int iommufd_fault_alloc(struct iommufd_ucmd *ucmd);
517 void iommufd_fault_destroy(struct iommufd_object *obj);
518 int iommufd_fault_iopf_handler(struct iopf_group *group);
519 
520 int iommufd_fault_iopf_enable(struct iommufd_device *idev);
521 void iommufd_fault_iopf_disable(struct iommufd_device *idev);
522 void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
523 				  struct iommufd_attach_handle *handle);
524 
525 static inline struct iommufd_viommu *
iommufd_get_viommu(struct iommufd_ucmd * ucmd,u32 id)526 iommufd_get_viommu(struct iommufd_ucmd *ucmd, u32 id)
527 {
528 	return container_of(iommufd_get_object(ucmd->ictx, id,
529 					       IOMMUFD_OBJ_VIOMMU),
530 			    struct iommufd_viommu, obj);
531 }
532 
533 int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd);
534 void iommufd_viommu_destroy(struct iommufd_object *obj);
535 int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd);
536 void iommufd_vdevice_destroy(struct iommufd_object *obj);
537 
538 struct iommufd_vdevice {
539 	struct iommufd_object obj;
540 	struct iommufd_ctx *ictx;
541 	struct iommufd_viommu *viommu;
542 	struct device *dev;
543 	u64 id; /* per-vIOMMU virtual ID */
544 };
545 
546 #ifdef CONFIG_IOMMUFD_TEST
547 int iommufd_test(struct iommufd_ucmd *ucmd);
548 void iommufd_selftest_destroy(struct iommufd_object *obj);
549 extern size_t iommufd_test_memory_limit;
550 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
551 				   unsigned int ioas_id, u64 *iova, u32 *flags);
552 bool iommufd_should_fail(void);
553 int __init iommufd_test_init(void);
554 void iommufd_test_exit(void);
555 bool iommufd_selftest_is_mock_dev(struct device *dev);
556 #else
iommufd_test_syz_conv_iova_id(struct iommufd_ucmd * ucmd,unsigned int ioas_id,u64 * iova,u32 * flags)557 static inline void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
558 						 unsigned int ioas_id,
559 						 u64 *iova, u32 *flags)
560 {
561 }
iommufd_should_fail(void)562 static inline bool iommufd_should_fail(void)
563 {
564 	return false;
565 }
iommufd_test_init(void)566 static inline int __init iommufd_test_init(void)
567 {
568 	return 0;
569 }
iommufd_test_exit(void)570 static inline void iommufd_test_exit(void)
571 {
572 }
iommufd_selftest_is_mock_dev(struct device * dev)573 static inline bool iommufd_selftest_is_mock_dev(struct device *dev)
574 {
575 	return false;
576 }
577 #endif
578 #endif
579