xref: /linux/drivers/iommu/iommufd/iommufd_private.h (revision d4c7fccfa784da0583fed9c8f98ed78236c030fb)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
3  */
4 #ifndef __IOMMUFD_PRIVATE_H
5 #define __IOMMUFD_PRIVATE_H
6 
7 #include <linux/iommu.h>
8 #include <linux/iommufd.h>
9 #include <linux/iova_bitmap.h>
10 #include <linux/maple_tree.h>
11 #include <linux/rwsem.h>
12 #include <linux/uaccess.h>
13 #include <linux/xarray.h>
14 #include <uapi/linux/iommufd.h>
15 
16 #include "../iommu-priv.h"
17 
18 struct iommu_domain;
19 struct iommu_group;
20 struct iommu_option;
21 struct iommufd_device;
22 
23 struct iommufd_sw_msi_map {
24 	struct list_head sw_msi_item;
25 	phys_addr_t sw_msi_start;
26 	phys_addr_t msi_addr;
27 	unsigned int pgoff;
28 	unsigned int id;
29 };
30 
31 /* Bitmap of struct iommufd_sw_msi_map::id */
32 struct iommufd_sw_msi_maps {
33 	DECLARE_BITMAP(bitmap, 64);
34 };
35 
36 #ifdef CONFIG_IRQ_MSI_IOMMU
37 int iommufd_sw_msi_install(struct iommufd_ctx *ictx,
38 			   struct iommufd_hwpt_paging *hwpt_paging,
39 			   struct iommufd_sw_msi_map *msi_map);
40 #endif
41 
42 struct iommufd_ctx {
43 	struct file *file;
44 	struct xarray objects;
45 	struct xarray groups;
46 	wait_queue_head_t destroy_wait;
47 	struct rw_semaphore ioas_creation_lock;
48 	struct maple_tree mt_mmap;
49 
50 	struct mutex sw_msi_lock;
51 	struct list_head sw_msi_list;
52 	unsigned int sw_msi_id;
53 
54 	u8 account_mode;
55 	/* Compatibility with VFIO no iommu */
56 	u8 no_iommu_mode;
57 	struct iommufd_ioas *vfio_ioas;
58 };
59 
60 /* Entry for iommufd_ctx::mt_mmap */
61 struct iommufd_mmap {
62 	struct iommufd_object *owner;
63 
64 	/* Page-shifted start position in mt_mmap to validate vma->vm_pgoff */
65 	unsigned long vm_pgoff;
66 
67 	/* Physical range for io_remap_pfn_range() */
68 	phys_addr_t mmio_addr;
69 	size_t length;
70 };
71 
72 /*
73  * The IOVA to PFN map. The map automatically copies the PFNs into multiple
74  * domains and permits sharing of PFNs between io_pagetable instances. This
75  * supports both a design where IOAS's are 1:1 with a domain (eg because the
76  * domain is HW customized), or where the IOAS is 1:N with multiple generic
77  * domains.  The io_pagetable holds an interval tree of iopt_areas which point
78  * to shared iopt_pages which hold the pfns mapped to the page table.
79  *
80  * The locking order is domains_rwsem -> iova_rwsem -> pages::mutex
81  */
82 struct io_pagetable {
83 	struct rw_semaphore domains_rwsem;
84 	struct xarray domains;
85 	struct xarray access_list;
86 	unsigned int next_domain_id;
87 
88 	struct rw_semaphore iova_rwsem;
89 	struct rb_root_cached area_itree;
90 	/* IOVA that cannot become reserved, struct iopt_allowed */
91 	struct rb_root_cached allowed_itree;
92 	/* IOVA that cannot be allocated, struct iopt_reserved */
93 	struct rb_root_cached reserved_itree;
94 	u8 disable_large_pages;
95 	unsigned long iova_alignment;
96 };
97 
98 void iopt_init_table(struct io_pagetable *iopt);
99 void iopt_destroy_table(struct io_pagetable *iopt);
100 int iopt_get_pages(struct io_pagetable *iopt, unsigned long iova,
101 		   unsigned long length, struct list_head *pages_list);
102 void iopt_free_pages_list(struct list_head *pages_list);
103 enum {
104 	IOPT_ALLOC_IOVA = 1 << 0,
105 };
106 int iopt_map_user_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt,
107 			unsigned long *iova, void __user *uptr,
108 			unsigned long length, int iommu_prot,
109 			unsigned int flags);
110 int iopt_map_file_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt,
111 			unsigned long *iova, struct file *file,
112 			unsigned long start, unsigned long length,
113 			int iommu_prot, unsigned int flags);
114 int iopt_map_pages(struct io_pagetable *iopt, struct list_head *pages_list,
115 		   unsigned long length, unsigned long *dst_iova,
116 		   int iommu_prot, unsigned int flags);
117 int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova,
118 		    unsigned long length, unsigned long *unmapped);
119 int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped);
120 
121 int iopt_read_and_clear_dirty_data(struct io_pagetable *iopt,
122 				   struct iommu_domain *domain,
123 				   unsigned long flags,
124 				   struct iommu_hwpt_get_dirty_bitmap *bitmap);
125 int iopt_set_dirty_tracking(struct io_pagetable *iopt,
126 			    struct iommu_domain *domain, bool enable);
127 
128 void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
129 				 unsigned long length);
130 int iopt_table_add_domain(struct io_pagetable *iopt,
131 			  struct iommu_domain *domain);
132 void iopt_table_remove_domain(struct io_pagetable *iopt,
133 			      struct iommu_domain *domain);
134 int iopt_table_enforce_dev_resv_regions(struct io_pagetable *iopt,
135 					struct device *dev,
136 					phys_addr_t *sw_msi_start);
137 int iopt_set_allow_iova(struct io_pagetable *iopt,
138 			struct rb_root_cached *allowed_iova);
139 int iopt_reserve_iova(struct io_pagetable *iopt, unsigned long start,
140 		      unsigned long last, void *owner);
141 void iopt_remove_reserved_iova(struct io_pagetable *iopt, void *owner);
142 int iopt_cut_iova(struct io_pagetable *iopt, unsigned long *iovas,
143 		  size_t num_iovas);
144 void iopt_enable_large_pages(struct io_pagetable *iopt);
145 int iopt_disable_large_pages(struct io_pagetable *iopt);
146 
147 struct iommufd_ucmd {
148 	struct iommufd_ctx *ictx;
149 	void __user *ubuffer;
150 	u32 user_size;
151 	void *cmd;
152 	struct iommufd_object *new_obj;
153 };
154 
155 int iommufd_vfio_ioctl(struct iommufd_ctx *ictx, unsigned int cmd,
156 		       unsigned long arg);
157 
158 /* Copy the response in ucmd->cmd back to userspace. */
iommufd_ucmd_respond(struct iommufd_ucmd * ucmd,size_t cmd_len)159 static inline int iommufd_ucmd_respond(struct iommufd_ucmd *ucmd,
160 				       size_t cmd_len)
161 {
162 	if (copy_to_user(ucmd->ubuffer, ucmd->cmd,
163 			 min_t(size_t, ucmd->user_size, cmd_len)))
164 		return -EFAULT;
165 	return 0;
166 }
167 
iommufd_lock_obj(struct iommufd_object * obj)168 static inline bool iommufd_lock_obj(struct iommufd_object *obj)
169 {
170 	if (!refcount_inc_not_zero(&obj->users))
171 		return false;
172 	if (!refcount_inc_not_zero(&obj->wait_cnt)) {
173 		/*
174 		 * If the caller doesn't already have a ref on obj this must be
175 		 * called under the xa_lock. Otherwise the caller is holding a
176 		 * ref on users. Thus it cannot be one before this decrement.
177 		 */
178 		refcount_dec(&obj->users);
179 		return false;
180 	}
181 	return true;
182 }
183 
184 struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id,
185 					  enum iommufd_object_type type);
iommufd_put_object(struct iommufd_ctx * ictx,struct iommufd_object * obj)186 static inline void iommufd_put_object(struct iommufd_ctx *ictx,
187 				      struct iommufd_object *obj)
188 {
189 	/*
190 	 * Users first, then wait_cnt so that REMOVE_WAIT never sees a spurious
191 	 * !0 users with a 0 wait_cnt.
192 	 */
193 	refcount_dec(&obj->users);
194 	if (refcount_dec_and_test(&obj->wait_cnt))
195 		wake_up_interruptible_all(&ictx->destroy_wait);
196 }
197 
198 void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj);
199 void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
200 				      struct iommufd_object *obj);
201 void iommufd_object_finalize(struct iommufd_ctx *ictx,
202 			     struct iommufd_object *obj);
203 
204 enum {
205 	REMOVE_WAIT		= BIT(0),
206 	REMOVE_OBJ_TOMBSTONE	= BIT(1),
207 };
208 int iommufd_object_remove(struct iommufd_ctx *ictx,
209 			  struct iommufd_object *to_destroy, u32 id,
210 			  unsigned int flags);
211 
212 /*
213  * The caller holds a users refcount and wants to destroy the object. At this
214  * point the caller has no wait_cnt reference and at least the xarray will be
215  * holding one.
216  */
iommufd_object_destroy_user(struct iommufd_ctx * ictx,struct iommufd_object * obj)217 static inline void iommufd_object_destroy_user(struct iommufd_ctx *ictx,
218 					       struct iommufd_object *obj)
219 {
220 	int ret;
221 
222 	ret = iommufd_object_remove(ictx, obj, obj->id, REMOVE_WAIT);
223 
224 	/*
225 	 * If there is a bug and we couldn't destroy the object then we did put
226 	 * back the caller's users refcount and will eventually try to free it
227 	 * again during close.
228 	 */
229 	WARN_ON(ret);
230 }
231 
232 /*
233  * Similar to iommufd_object_destroy_user(), except that the object ID is left
234  * reserved/tombstoned.
235  */
iommufd_object_tombstone_user(struct iommufd_ctx * ictx,struct iommufd_object * obj)236 static inline void iommufd_object_tombstone_user(struct iommufd_ctx *ictx,
237 						 struct iommufd_object *obj)
238 {
239 	int ret;
240 
241 	ret = iommufd_object_remove(ictx, obj, obj->id,
242 				    REMOVE_WAIT | REMOVE_OBJ_TOMBSTONE);
243 
244 	/*
245 	 * If there is a bug and we couldn't destroy the object then we did put
246 	 * back the caller's users refcount and will eventually try to free it
247 	 * again during close.
248 	 */
249 	WARN_ON(ret);
250 }
251 
252 /*
253  * The HWPT allocated by autodomains is used in possibly many devices and
254  * is automatically destroyed when its refcount reaches zero.
255  *
256  * If userspace uses the HWPT manually, even for a short term, then it will
257  * disrupt this refcounting and the auto-free in the kernel will not work.
258  * Userspace that tries to use the automatically allocated HWPT must be careful
259  * to ensure that it is consistently destroyed, eg by not racing accesses
260  * and by not attaching an automatic HWPT to a device manually.
261  */
262 static inline void
iommufd_object_put_and_try_destroy(struct iommufd_ctx * ictx,struct iommufd_object * obj)263 iommufd_object_put_and_try_destroy(struct iommufd_ctx *ictx,
264 				   struct iommufd_object *obj)
265 {
266 	iommufd_object_remove(ictx, obj, obj->id, 0);
267 }
268 
269 /*
270  * Callers of these normal object allocators must call iommufd_object_finalize()
271  * to finalize the object, or call iommufd_object_abort_and_destroy() to revert
272  * the allocation.
273  */
274 struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
275 					     size_t size,
276 					     enum iommufd_object_type type);
277 
278 #define __iommufd_object_alloc(ictx, ptr, type, obj)                           \
279 	container_of(_iommufd_object_alloc(                                    \
280 			     ictx,                                             \
281 			     sizeof(*(ptr)) + BUILD_BUG_ON_ZERO(               \
282 						      offsetof(typeof(*(ptr)), \
283 							       obj) != 0),     \
284 			     type),                                            \
285 		     typeof(*(ptr)), obj)
286 
287 #define iommufd_object_alloc(ictx, ptr, type) \
288 	__iommufd_object_alloc(ictx, ptr, type, obj)
289 
290 /*
291  * Callers of these _ucmd allocators should not call iommufd_object_finalize()
292  * or iommufd_object_abort_and_destroy(), as the core automatically does that.
293  */
294 struct iommufd_object *
295 _iommufd_object_alloc_ucmd(struct iommufd_ucmd *ucmd, size_t size,
296 			   enum iommufd_object_type type);
297 
298 #define __iommufd_object_alloc_ucmd(ucmd, ptr, type, obj)                      \
299 	container_of(_iommufd_object_alloc_ucmd(                               \
300 			     ucmd,                                             \
301 			     sizeof(*(ptr)) + BUILD_BUG_ON_ZERO(               \
302 						      offsetof(typeof(*(ptr)), \
303 							       obj) != 0),     \
304 			     type),                                            \
305 		     typeof(*(ptr)), obj)
306 
307 #define iommufd_object_alloc_ucmd(ucmd, ptr, type) \
308 	__iommufd_object_alloc_ucmd(ucmd, ptr, type, obj)
309 
310 /*
311  * The IO Address Space (IOAS) pagetable is a virtual page table backed by the
312  * io_pagetable object. It is a user controlled mapping of IOVA -> PFNs. The
313  * mapping is copied into all of the associated domains and made available to
314  * in-kernel users.
315  *
316  * Every iommu_domain that is created is wrapped in a iommufd_hw_pagetable
317  * object. When we go to attach a device to an IOAS we need to get an
318  * iommu_domain and wrapping iommufd_hw_pagetable for it.
319  *
320  * An iommu_domain & iommfd_hw_pagetable will be automatically selected
321  * for a device based on the hwpt_list. If no suitable iommu_domain
322  * is found a new iommu_domain will be created.
323  */
324 struct iommufd_ioas {
325 	struct iommufd_object obj;
326 	struct io_pagetable iopt;
327 	struct mutex mutex;
328 	struct list_head hwpt_list;
329 };
330 
iommufd_get_ioas(struct iommufd_ctx * ictx,u32 id)331 static inline struct iommufd_ioas *iommufd_get_ioas(struct iommufd_ctx *ictx,
332 						    u32 id)
333 {
334 	return container_of(iommufd_get_object(ictx, id, IOMMUFD_OBJ_IOAS),
335 			    struct iommufd_ioas, obj);
336 }
337 
338 struct iommufd_ioas *iommufd_ioas_alloc(struct iommufd_ctx *ictx);
339 int iommufd_ioas_alloc_ioctl(struct iommufd_ucmd *ucmd);
340 void iommufd_ioas_destroy(struct iommufd_object *obj);
341 int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd);
342 int iommufd_ioas_allow_iovas(struct iommufd_ucmd *ucmd);
343 int iommufd_ioas_map(struct iommufd_ucmd *ucmd);
344 int iommufd_ioas_map_file(struct iommufd_ucmd *ucmd);
345 int iommufd_ioas_change_process(struct iommufd_ucmd *ucmd);
346 int iommufd_ioas_copy(struct iommufd_ucmd *ucmd);
347 int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd);
348 int iommufd_ioas_option(struct iommufd_ucmd *ucmd);
349 int iommufd_option_rlimit_mode(struct iommu_option *cmd,
350 			       struct iommufd_ctx *ictx);
351 
352 int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd);
353 int iommufd_check_iova_range(struct io_pagetable *iopt,
354 			     struct iommu_hwpt_get_dirty_bitmap *bitmap);
355 
356 /*
357  * A HW pagetable is called an iommu_domain inside the kernel. This user object
358  * allows directly creating and inspecting the domains. Domains that have kernel
359  * owned page tables will be associated with an iommufd_ioas that provides the
360  * IOVA to PFN map.
361  */
362 struct iommufd_hw_pagetable {
363 	struct iommufd_object obj;
364 	struct iommu_domain *domain;
365 	struct iommufd_fault *fault;
366 	bool pasid_compat : 1;
367 };
368 
369 struct iommufd_hwpt_paging {
370 	struct iommufd_hw_pagetable common;
371 	struct iommufd_ioas *ioas;
372 	bool auto_domain : 1;
373 	bool enforce_cache_coherency : 1;
374 	bool nest_parent : 1;
375 	/* Head at iommufd_ioas::hwpt_list */
376 	struct list_head hwpt_item;
377 	struct iommufd_sw_msi_maps present_sw_msi;
378 };
379 
380 struct iommufd_hwpt_nested {
381 	struct iommufd_hw_pagetable common;
382 	struct iommufd_hwpt_paging *parent;
383 	struct iommufd_viommu *viommu;
384 };
385 
hwpt_is_paging(struct iommufd_hw_pagetable * hwpt)386 static inline bool hwpt_is_paging(struct iommufd_hw_pagetable *hwpt)
387 {
388 	return hwpt->obj.type == IOMMUFD_OBJ_HWPT_PAGING;
389 }
390 
391 static inline struct iommufd_hwpt_paging *
to_hwpt_paging(struct iommufd_hw_pagetable * hwpt)392 to_hwpt_paging(struct iommufd_hw_pagetable *hwpt)
393 {
394 	return container_of(hwpt, struct iommufd_hwpt_paging, common);
395 }
396 
397 static inline struct iommufd_hwpt_nested *
to_hwpt_nested(struct iommufd_hw_pagetable * hwpt)398 to_hwpt_nested(struct iommufd_hw_pagetable *hwpt)
399 {
400 	return container_of(hwpt, struct iommufd_hwpt_nested, common);
401 }
402 
403 static inline struct iommufd_hwpt_paging *
find_hwpt_paging(struct iommufd_hw_pagetable * hwpt)404 find_hwpt_paging(struct iommufd_hw_pagetable *hwpt)
405 {
406 	switch (hwpt->obj.type) {
407 	case IOMMUFD_OBJ_HWPT_PAGING:
408 		return to_hwpt_paging(hwpt);
409 	case IOMMUFD_OBJ_HWPT_NESTED:
410 		return to_hwpt_nested(hwpt)->parent;
411 	default:
412 		return NULL;
413 	}
414 }
415 
416 static inline struct iommufd_hwpt_paging *
iommufd_get_hwpt_paging(struct iommufd_ucmd * ucmd,u32 id)417 iommufd_get_hwpt_paging(struct iommufd_ucmd *ucmd, u32 id)
418 {
419 	return container_of(iommufd_get_object(ucmd->ictx, id,
420 					       IOMMUFD_OBJ_HWPT_PAGING),
421 			    struct iommufd_hwpt_paging, common.obj);
422 }
423 
424 static inline struct iommufd_hw_pagetable *
iommufd_get_hwpt_nested(struct iommufd_ucmd * ucmd,u32 id)425 iommufd_get_hwpt_nested(struct iommufd_ucmd *ucmd, u32 id)
426 {
427 	return container_of(iommufd_get_object(ucmd->ictx, id,
428 					       IOMMUFD_OBJ_HWPT_NESTED),
429 			    struct iommufd_hw_pagetable, obj);
430 }
431 
432 int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd);
433 int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd);
434 
435 struct iommufd_hwpt_paging *
436 iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
437 			  struct iommufd_device *idev, ioasid_t pasid,
438 			  u32 flags, bool immediate_attach,
439 			  const struct iommu_user_data *user_data);
440 int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
441 				struct iommufd_device *idev, ioasid_t pasid);
442 struct iommufd_hw_pagetable *
443 iommufd_hw_pagetable_detach(struct iommufd_device *idev, ioasid_t pasid);
444 void iommufd_hwpt_paging_destroy(struct iommufd_object *obj);
445 void iommufd_hwpt_paging_abort(struct iommufd_object *obj);
446 void iommufd_hwpt_nested_destroy(struct iommufd_object *obj);
447 void iommufd_hwpt_nested_abort(struct iommufd_object *obj);
448 int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd);
449 int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd);
450 
iommufd_hw_pagetable_put(struct iommufd_ctx * ictx,struct iommufd_hw_pagetable * hwpt)451 static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx,
452 					    struct iommufd_hw_pagetable *hwpt)
453 {
454 	if (hwpt->obj.type == IOMMUFD_OBJ_HWPT_PAGING) {
455 		struct iommufd_hwpt_paging *hwpt_paging = to_hwpt_paging(hwpt);
456 
457 		if (hwpt_paging->auto_domain) {
458 			lockdep_assert_not_held(&hwpt_paging->ioas->mutex);
459 			iommufd_object_put_and_try_destroy(ictx, &hwpt->obj);
460 			return;
461 		}
462 	}
463 	refcount_dec(&hwpt->obj.users);
464 }
465 
466 struct iommufd_attach;
467 
468 struct iommufd_group {
469 	struct kref ref;
470 	struct mutex lock;
471 	struct iommufd_ctx *ictx;
472 	struct iommu_group *group;
473 	struct xarray pasid_attach;
474 	struct iommufd_sw_msi_maps required_sw_msi;
475 	phys_addr_t sw_msi_start;
476 };
477 
478 /*
479  * A iommufd_device object represents the binding relationship between a
480  * consuming driver and the iommufd. These objects are created/destroyed by
481  * external drivers, not by userspace.
482  */
483 struct iommufd_device {
484 	struct iommufd_object obj;
485 	struct iommufd_ctx *ictx;
486 	struct iommufd_group *igroup;
487 	struct list_head group_item;
488 	/* always the physical device */
489 	struct device *dev;
490 	bool enforce_cache_coherency;
491 	struct iommufd_vdevice *vdev;
492 	bool destroying;
493 };
494 
495 static inline struct iommufd_device *
iommufd_get_device(struct iommufd_ucmd * ucmd,u32 id)496 iommufd_get_device(struct iommufd_ucmd *ucmd, u32 id)
497 {
498 	return container_of(iommufd_get_object(ucmd->ictx, id,
499 					       IOMMUFD_OBJ_DEVICE),
500 			    struct iommufd_device, obj);
501 }
502 
503 void iommufd_device_pre_destroy(struct iommufd_object *obj);
504 void iommufd_device_destroy(struct iommufd_object *obj);
505 int iommufd_get_hw_info(struct iommufd_ucmd *ucmd);
506 
507 struct iommufd_access {
508 	struct iommufd_object obj;
509 	struct iommufd_ctx *ictx;
510 	struct iommufd_ioas *ioas;
511 	struct iommufd_ioas *ioas_unpin;
512 	struct mutex ioas_lock;
513 	const struct iommufd_access_ops *ops;
514 	void *data;
515 	unsigned long iova_alignment;
516 	u32 iopt_access_list_id;
517 };
518 
519 int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access);
520 void iopt_remove_access(struct io_pagetable *iopt,
521 			struct iommufd_access *access, u32 iopt_access_list_id);
522 void iommufd_access_destroy_object(struct iommufd_object *obj);
523 
524 /* iommufd_access for internal use */
iommufd_access_is_internal(struct iommufd_access * access)525 static inline bool iommufd_access_is_internal(struct iommufd_access *access)
526 {
527 	return !access->ictx;
528 }
529 
530 struct iommufd_access *iommufd_access_create_internal(struct iommufd_ctx *ictx);
531 
532 static inline void
iommufd_access_destroy_internal(struct iommufd_ctx * ictx,struct iommufd_access * access)533 iommufd_access_destroy_internal(struct iommufd_ctx *ictx,
534 				struct iommufd_access *access)
535 {
536 	iommufd_object_destroy_user(ictx, &access->obj);
537 }
538 
539 int iommufd_access_attach_internal(struct iommufd_access *access,
540 				   struct iommufd_ioas *ioas);
541 
iommufd_access_detach_internal(struct iommufd_access * access)542 static inline void iommufd_access_detach_internal(struct iommufd_access *access)
543 {
544 	iommufd_access_detach(access);
545 }
546 
547 struct iommufd_eventq {
548 	struct iommufd_object obj;
549 	struct iommufd_ctx *ictx;
550 	struct file *filep;
551 
552 	spinlock_t lock; /* protects the deliver list */
553 	struct list_head deliver;
554 
555 	struct wait_queue_head wait_queue;
556 };
557 
558 struct iommufd_attach_handle {
559 	struct iommu_attach_handle handle;
560 	struct iommufd_device *idev;
561 };
562 
563 /* Convert an iommu attach handle to iommufd handle. */
564 #define to_iommufd_handle(hdl)	container_of(hdl, struct iommufd_attach_handle, handle)
565 
566 /*
567  * An iommufd_fault object represents an interface to deliver I/O page faults
568  * to the user space. These objects are created/destroyed by the user space and
569  * associated with hardware page table objects during page-table allocation.
570  */
571 struct iommufd_fault {
572 	struct iommufd_eventq common;
573 	struct mutex mutex; /* serializes response flows */
574 	struct xarray response;
575 };
576 
577 static inline struct iommufd_fault *
eventq_to_fault(struct iommufd_eventq * eventq)578 eventq_to_fault(struct iommufd_eventq *eventq)
579 {
580 	return container_of(eventq, struct iommufd_fault, common);
581 }
582 
583 static inline struct iommufd_fault *
iommufd_get_fault(struct iommufd_ucmd * ucmd,u32 id)584 iommufd_get_fault(struct iommufd_ucmd *ucmd, u32 id)
585 {
586 	return container_of(iommufd_get_object(ucmd->ictx, id,
587 					       IOMMUFD_OBJ_FAULT),
588 			    struct iommufd_fault, common.obj);
589 }
590 
591 int iommufd_fault_alloc(struct iommufd_ucmd *ucmd);
592 void iommufd_fault_destroy(struct iommufd_object *obj);
593 int iommufd_fault_iopf_handler(struct iopf_group *group);
594 void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
595 				  struct iommufd_attach_handle *handle);
596 
597 /* An iommufd_vevent represents a vIOMMU event in an iommufd_veventq */
598 struct iommufd_vevent {
599 	struct iommufd_vevent_header header;
600 	struct list_head node; /* for iommufd_eventq::deliver */
601 	ssize_t data_len;
602 	u64 event_data[] __counted_by(data_len);
603 };
604 
605 #define vevent_for_lost_events_header(vevent) \
606 	(vevent->header.flags & IOMMU_VEVENTQ_FLAG_LOST_EVENTS)
607 
608 /*
609  * An iommufd_veventq object represents an interface to deliver vIOMMU events to
610  * the user space. It is created/destroyed by the user space and associated with
611  * a vIOMMU object during the allocations.
612  */
613 struct iommufd_veventq {
614 	struct iommufd_eventq common;
615 	struct iommufd_viommu *viommu;
616 	struct list_head node; /* for iommufd_viommu::veventqs */
617 	struct iommufd_vevent lost_events_header;
618 
619 	enum iommu_veventq_type type;
620 	unsigned int depth;
621 
622 	/* Use common.lock for protection */
623 	u32 num_events;
624 	u32 sequence;
625 };
626 
627 static inline struct iommufd_veventq *
eventq_to_veventq(struct iommufd_eventq * eventq)628 eventq_to_veventq(struct iommufd_eventq *eventq)
629 {
630 	return container_of(eventq, struct iommufd_veventq, common);
631 }
632 
633 static inline struct iommufd_veventq *
iommufd_get_veventq(struct iommufd_ucmd * ucmd,u32 id)634 iommufd_get_veventq(struct iommufd_ucmd *ucmd, u32 id)
635 {
636 	return container_of(iommufd_get_object(ucmd->ictx, id,
637 					       IOMMUFD_OBJ_VEVENTQ),
638 			    struct iommufd_veventq, common.obj);
639 }
640 
641 int iommufd_veventq_alloc(struct iommufd_ucmd *ucmd);
642 void iommufd_veventq_destroy(struct iommufd_object *obj);
643 void iommufd_veventq_abort(struct iommufd_object *obj);
644 
iommufd_vevent_handler(struct iommufd_veventq * veventq,struct iommufd_vevent * vevent)645 static inline void iommufd_vevent_handler(struct iommufd_veventq *veventq,
646 					  struct iommufd_vevent *vevent)
647 {
648 	struct iommufd_eventq *eventq = &veventq->common;
649 
650 	lockdep_assert_held(&eventq->lock);
651 
652 	/*
653 	 * Remove the lost_events_header and add the new node at the same time.
654 	 * Note the new node can be lost_events_header, for a sequence update.
655 	 */
656 	if (list_is_last(&veventq->lost_events_header.node, &eventq->deliver))
657 		list_del(&veventq->lost_events_header.node);
658 	list_add_tail(&vevent->node, &eventq->deliver);
659 	vevent->header.sequence = veventq->sequence;
660 	veventq->sequence = (veventq->sequence + 1) & INT_MAX;
661 
662 	wake_up_interruptible(&eventq->wait_queue);
663 }
664 
665 static inline struct iommufd_viommu *
iommufd_get_viommu(struct iommufd_ucmd * ucmd,u32 id)666 iommufd_get_viommu(struct iommufd_ucmd *ucmd, u32 id)
667 {
668 	return container_of(iommufd_get_object(ucmd->ictx, id,
669 					       IOMMUFD_OBJ_VIOMMU),
670 			    struct iommufd_viommu, obj);
671 }
672 
673 static inline struct iommufd_veventq *
iommufd_viommu_find_veventq(struct iommufd_viommu * viommu,enum iommu_veventq_type type)674 iommufd_viommu_find_veventq(struct iommufd_viommu *viommu,
675 			    enum iommu_veventq_type type)
676 {
677 	struct iommufd_veventq *veventq, *next;
678 
679 	lockdep_assert_held(&viommu->veventqs_rwsem);
680 
681 	list_for_each_entry_safe(veventq, next, &viommu->veventqs, node) {
682 		if (veventq->type == type)
683 			return veventq;
684 	}
685 	return NULL;
686 }
687 
688 int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd);
689 void iommufd_viommu_destroy(struct iommufd_object *obj);
690 int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd);
691 void iommufd_vdevice_destroy(struct iommufd_object *obj);
692 void iommufd_vdevice_abort(struct iommufd_object *obj);
693 int iommufd_hw_queue_alloc_ioctl(struct iommufd_ucmd *ucmd);
694 void iommufd_hw_queue_destroy(struct iommufd_object *obj);
695 
696 static inline struct iommufd_vdevice *
iommufd_get_vdevice(struct iommufd_ctx * ictx,u32 id)697 iommufd_get_vdevice(struct iommufd_ctx *ictx, u32 id)
698 {
699 	return container_of(iommufd_get_object(ictx, id,
700 					       IOMMUFD_OBJ_VDEVICE),
701 			    struct iommufd_vdevice, obj);
702 }
703 
704 #ifdef CONFIG_IOMMUFD_TEST
705 int iommufd_test(struct iommufd_ucmd *ucmd);
706 void iommufd_selftest_destroy(struct iommufd_object *obj);
707 extern size_t iommufd_test_memory_limit;
708 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
709 				   unsigned int ioas_id, u64 *iova, u32 *flags);
710 bool iommufd_should_fail(void);
711 int __init iommufd_test_init(void);
712 void iommufd_test_exit(void);
713 bool iommufd_selftest_is_mock_dev(struct device *dev);
714 #else
iommufd_test_syz_conv_iova_id(struct iommufd_ucmd * ucmd,unsigned int ioas_id,u64 * iova,u32 * flags)715 static inline void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
716 						 unsigned int ioas_id,
717 						 u64 *iova, u32 *flags)
718 {
719 }
iommufd_should_fail(void)720 static inline bool iommufd_should_fail(void)
721 {
722 	return false;
723 }
iommufd_test_init(void)724 static inline int __init iommufd_test_init(void)
725 {
726 	return 0;
727 }
iommufd_test_exit(void)728 static inline void iommufd_test_exit(void)
729 {
730 }
iommufd_selftest_is_mock_dev(struct device * dev)731 static inline bool iommufd_selftest_is_mock_dev(struct device *dev)
732 {
733 	return false;
734 }
735 #endif
736 #endif
737