xref: /linux/drivers/iommu/iommufd/iommufd_private.h (revision 056daec2925dc200b22c30419bc7b9e01f7843c4)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
3  */
4 #ifndef __IOMMUFD_PRIVATE_H
5 #define __IOMMUFD_PRIVATE_H
6 
7 #include <linux/iommu.h>
8 #include <linux/iommufd.h>
9 #include <linux/iova_bitmap.h>
10 #include <linux/maple_tree.h>
11 #include <linux/rwsem.h>
12 #include <linux/uaccess.h>
13 #include <linux/xarray.h>
14 #include <uapi/linux/iommufd.h>
15 
16 #include "../iommu-priv.h"
17 
18 struct iommu_domain;
19 struct iommu_group;
20 struct iommu_option;
21 struct iommufd_device;
22 struct dma_buf_attachment;
23 struct dma_buf_phys_vec;
24 
25 struct iommufd_sw_msi_map {
26 	struct list_head sw_msi_item;
27 	phys_addr_t sw_msi_start;
28 	phys_addr_t msi_addr;
29 	unsigned int pgoff;
30 	unsigned int id;
31 };
32 
33 /* Bitmap of struct iommufd_sw_msi_map::id */
34 struct iommufd_sw_msi_maps {
35 	DECLARE_BITMAP(bitmap, 64);
36 };
37 
38 #ifdef CONFIG_IRQ_MSI_IOMMU
39 int iommufd_sw_msi_install(struct iommufd_ctx *ictx,
40 			   struct iommufd_hwpt_paging *hwpt_paging,
41 			   struct iommufd_sw_msi_map *msi_map);
42 #endif
43 
44 struct iommufd_ctx {
45 	struct file *file;
46 	struct xarray objects;
47 	struct xarray groups;
48 	wait_queue_head_t destroy_wait;
49 	struct rw_semaphore ioas_creation_lock;
50 	struct maple_tree mt_mmap;
51 
52 	struct mutex sw_msi_lock;
53 	struct list_head sw_msi_list;
54 	unsigned int sw_msi_id;
55 
56 	u8 account_mode;
57 	/* Compatibility with VFIO no iommu */
58 	u8 no_iommu_mode;
59 	struct iommufd_ioas *vfio_ioas;
60 };
61 
62 /* Entry for iommufd_ctx::mt_mmap */
63 struct iommufd_mmap {
64 	struct iommufd_object *owner;
65 
66 	/* Page-shifted start position in mt_mmap to validate vma->vm_pgoff */
67 	unsigned long vm_pgoff;
68 
69 	/* Physical range for io_remap_pfn_range() */
70 	phys_addr_t mmio_addr;
71 	size_t length;
72 };
73 
74 /*
75  * The IOVA to PFN map. The map automatically copies the PFNs into multiple
76  * domains and permits sharing of PFNs between io_pagetable instances. This
77  * supports both a design where IOAS's are 1:1 with a domain (eg because the
78  * domain is HW customized), or where the IOAS is 1:N with multiple generic
79  * domains.  The io_pagetable holds an interval tree of iopt_areas which point
80  * to shared iopt_pages which hold the pfns mapped to the page table.
81  *
82  * The locking order is domains_rwsem -> iova_rwsem -> pages::mutex
83  */
84 struct io_pagetable {
85 	struct rw_semaphore domains_rwsem;
86 	struct xarray domains;
87 	struct xarray access_list;
88 	unsigned int next_domain_id;
89 
90 	struct rw_semaphore iova_rwsem;
91 	struct rb_root_cached area_itree;
92 	/* IOVA that cannot become reserved, struct iopt_allowed */
93 	struct rb_root_cached allowed_itree;
94 	/* IOVA that cannot be allocated, struct iopt_reserved */
95 	struct rb_root_cached reserved_itree;
96 	u8 disable_large_pages;
97 	unsigned long iova_alignment;
98 };
99 
100 void iopt_init_table(struct io_pagetable *iopt);
101 void iopt_destroy_table(struct io_pagetable *iopt);
102 int iopt_get_pages(struct io_pagetable *iopt, unsigned long iova,
103 		   unsigned long length, struct list_head *pages_list);
104 void iopt_free_pages_list(struct list_head *pages_list);
105 enum {
106 	IOPT_ALLOC_IOVA = 1 << 0,
107 };
108 int iopt_map_user_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt,
109 			unsigned long *iova, void __user *uptr,
110 			unsigned long length, int iommu_prot,
111 			unsigned int flags);
112 int iopt_map_file_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt,
113 			unsigned long *iova, int fd,
114 			unsigned long start, unsigned long length,
115 			int iommu_prot, unsigned int flags);
116 int iopt_map_pages(struct io_pagetable *iopt, struct list_head *pages_list,
117 		   unsigned long length, unsigned long *dst_iova,
118 		   int iommu_prot, unsigned int flags);
119 int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova,
120 		    unsigned long length, unsigned long *unmapped);
121 int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped);
122 
123 int iopt_read_and_clear_dirty_data(struct io_pagetable *iopt,
124 				   struct iommu_domain *domain,
125 				   unsigned long flags,
126 				   struct iommu_hwpt_get_dirty_bitmap *bitmap);
127 int iopt_set_dirty_tracking(struct io_pagetable *iopt,
128 			    struct iommu_domain *domain, bool enable);
129 
130 void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
131 				 unsigned long length);
132 int iopt_table_add_domain(struct io_pagetable *iopt,
133 			  struct iommu_domain *domain);
134 void iopt_table_remove_domain(struct io_pagetable *iopt,
135 			      struct iommu_domain *domain);
136 int iopt_table_enforce_dev_resv_regions(struct io_pagetable *iopt,
137 					struct device *dev,
138 					phys_addr_t *sw_msi_start);
139 int iopt_set_allow_iova(struct io_pagetable *iopt,
140 			struct rb_root_cached *allowed_iova);
141 int iopt_reserve_iova(struct io_pagetable *iopt, unsigned long start,
142 		      unsigned long last, void *owner);
143 void iopt_remove_reserved_iova(struct io_pagetable *iopt, void *owner);
144 int iopt_cut_iova(struct io_pagetable *iopt, unsigned long *iovas,
145 		  size_t num_iovas);
146 void iopt_enable_large_pages(struct io_pagetable *iopt);
147 int iopt_disable_large_pages(struct io_pagetable *iopt);
148 
149 struct iommufd_ucmd {
150 	struct iommufd_ctx *ictx;
151 	void __user *ubuffer;
152 	u32 user_size;
153 	void *cmd;
154 	struct iommufd_object *new_obj;
155 };
156 
157 int iommufd_vfio_ioctl(struct iommufd_ctx *ictx, unsigned int cmd,
158 		       unsigned long arg);
159 
160 /* Copy the response in ucmd->cmd back to userspace. */
iommufd_ucmd_respond(struct iommufd_ucmd * ucmd,size_t cmd_len)161 static inline int iommufd_ucmd_respond(struct iommufd_ucmd *ucmd,
162 				       size_t cmd_len)
163 {
164 	if (copy_to_user(ucmd->ubuffer, ucmd->cmd,
165 			 min_t(size_t, ucmd->user_size, cmd_len)))
166 		return -EFAULT;
167 	return 0;
168 }
169 
iommufd_lock_obj(struct iommufd_object * obj)170 static inline bool iommufd_lock_obj(struct iommufd_object *obj)
171 {
172 	if (!refcount_inc_not_zero(&obj->users))
173 		return false;
174 	if (!refcount_inc_not_zero(&obj->wait_cnt)) {
175 		/*
176 		 * If the caller doesn't already have a ref on obj this must be
177 		 * called under the xa_lock. Otherwise the caller is holding a
178 		 * ref on users. Thus it cannot be one before this decrement.
179 		 */
180 		refcount_dec(&obj->users);
181 		return false;
182 	}
183 	return true;
184 }
185 
186 struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id,
187 					  enum iommufd_object_type type);
iommufd_put_object(struct iommufd_ctx * ictx,struct iommufd_object * obj)188 static inline void iommufd_put_object(struct iommufd_ctx *ictx,
189 				      struct iommufd_object *obj)
190 {
191 	/*
192 	 * Users first, then wait_cnt so that REMOVE_WAIT never sees a spurious
193 	 * !0 users with a 0 wait_cnt.
194 	 */
195 	refcount_dec(&obj->users);
196 	if (refcount_dec_and_test(&obj->wait_cnt))
197 		wake_up_interruptible_all(&ictx->destroy_wait);
198 }
199 
200 void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj);
201 void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
202 				      struct iommufd_object *obj);
203 void iommufd_object_finalize(struct iommufd_ctx *ictx,
204 			     struct iommufd_object *obj);
205 
206 enum {
207 	REMOVE_WAIT		= BIT(0),
208 	REMOVE_OBJ_TOMBSTONE	= BIT(1),
209 };
210 int iommufd_object_remove(struct iommufd_ctx *ictx,
211 			  struct iommufd_object *to_destroy, u32 id,
212 			  unsigned int flags);
213 
214 /*
215  * The caller holds a users refcount and wants to destroy the object. At this
216  * point the caller has no wait_cnt reference and at least the xarray will be
217  * holding one.
218  */
iommufd_object_destroy_user(struct iommufd_ctx * ictx,struct iommufd_object * obj)219 static inline void iommufd_object_destroy_user(struct iommufd_ctx *ictx,
220 					       struct iommufd_object *obj)
221 {
222 	int ret;
223 
224 	ret = iommufd_object_remove(ictx, obj, obj->id, REMOVE_WAIT);
225 
226 	/*
227 	 * If there is a bug and we couldn't destroy the object then we did put
228 	 * back the caller's users refcount and will eventually try to free it
229 	 * again during close.
230 	 */
231 	WARN_ON(ret);
232 }
233 
234 /*
235  * Similar to iommufd_object_destroy_user(), except that the object ID is left
236  * reserved/tombstoned.
237  */
iommufd_object_tombstone_user(struct iommufd_ctx * ictx,struct iommufd_object * obj)238 static inline void iommufd_object_tombstone_user(struct iommufd_ctx *ictx,
239 						 struct iommufd_object *obj)
240 {
241 	int ret;
242 
243 	ret = iommufd_object_remove(ictx, obj, obj->id,
244 				    REMOVE_WAIT | REMOVE_OBJ_TOMBSTONE);
245 
246 	/*
247 	 * If there is a bug and we couldn't destroy the object then we did put
248 	 * back the caller's users refcount and will eventually try to free it
249 	 * again during close.
250 	 */
251 	WARN_ON(ret);
252 }
253 
254 /*
255  * The HWPT allocated by autodomains is used in possibly many devices and
256  * is automatically destroyed when its refcount reaches zero.
257  *
258  * If userspace uses the HWPT manually, even for a short term, then it will
259  * disrupt this refcounting and the auto-free in the kernel will not work.
260  * Userspace that tries to use the automatically allocated HWPT must be careful
261  * to ensure that it is consistently destroyed, eg by not racing accesses
262  * and by not attaching an automatic HWPT to a device manually.
263  */
264 static inline void
iommufd_object_put_and_try_destroy(struct iommufd_ctx * ictx,struct iommufd_object * obj)265 iommufd_object_put_and_try_destroy(struct iommufd_ctx *ictx,
266 				   struct iommufd_object *obj)
267 {
268 	iommufd_object_remove(ictx, obj, obj->id, 0);
269 }
270 
271 /*
272  * Callers of these normal object allocators must call iommufd_object_finalize()
273  * to finalize the object, or call iommufd_object_abort_and_destroy() to revert
274  * the allocation.
275  */
276 struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
277 					     size_t size,
278 					     enum iommufd_object_type type);
279 
280 #define __iommufd_object_alloc(ictx, ptr, type, obj)                           \
281 	container_of(_iommufd_object_alloc(                                    \
282 			     ictx,                                             \
283 			     sizeof(*(ptr)) + BUILD_BUG_ON_ZERO(               \
284 						      offsetof(typeof(*(ptr)), \
285 							       obj) != 0),     \
286 			     type),                                            \
287 		     typeof(*(ptr)), obj)
288 
289 #define iommufd_object_alloc(ictx, ptr, type) \
290 	__iommufd_object_alloc(ictx, ptr, type, obj)
291 
292 /*
293  * Callers of these _ucmd allocators should not call iommufd_object_finalize()
294  * or iommufd_object_abort_and_destroy(), as the core automatically does that.
295  */
296 struct iommufd_object *
297 _iommufd_object_alloc_ucmd(struct iommufd_ucmd *ucmd, size_t size,
298 			   enum iommufd_object_type type);
299 
300 #define __iommufd_object_alloc_ucmd(ucmd, ptr, type, obj)                      \
301 	container_of(_iommufd_object_alloc_ucmd(                               \
302 			     ucmd,                                             \
303 			     sizeof(*(ptr)) + BUILD_BUG_ON_ZERO(               \
304 						      offsetof(typeof(*(ptr)), \
305 							       obj) != 0),     \
306 			     type),                                            \
307 		     typeof(*(ptr)), obj)
308 
309 #define iommufd_object_alloc_ucmd(ucmd, ptr, type) \
310 	__iommufd_object_alloc_ucmd(ucmd, ptr, type, obj)
311 
312 /*
313  * The IO Address Space (IOAS) pagetable is a virtual page table backed by the
314  * io_pagetable object. It is a user controlled mapping of IOVA -> PFNs. The
315  * mapping is copied into all of the associated domains and made available to
316  * in-kernel users.
317  *
318  * Every iommu_domain that is created is wrapped in a iommufd_hw_pagetable
319  * object. When we go to attach a device to an IOAS we need to get an
320  * iommu_domain and wrapping iommufd_hw_pagetable for it.
321  *
322  * An iommu_domain & iommfd_hw_pagetable will be automatically selected
323  * for a device based on the hwpt_list. If no suitable iommu_domain
324  * is found a new iommu_domain will be created.
325  */
326 struct iommufd_ioas {
327 	struct iommufd_object obj;
328 	struct io_pagetable iopt;
329 	struct mutex mutex;
330 	struct list_head hwpt_list;
331 };
332 
iommufd_get_ioas(struct iommufd_ctx * ictx,u32 id)333 static inline struct iommufd_ioas *iommufd_get_ioas(struct iommufd_ctx *ictx,
334 						    u32 id)
335 {
336 	return container_of(iommufd_get_object(ictx, id, IOMMUFD_OBJ_IOAS),
337 			    struct iommufd_ioas, obj);
338 }
339 
340 struct iommufd_ioas *iommufd_ioas_alloc(struct iommufd_ctx *ictx);
341 int iommufd_ioas_alloc_ioctl(struct iommufd_ucmd *ucmd);
342 void iommufd_ioas_destroy(struct iommufd_object *obj);
343 int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd);
344 int iommufd_ioas_allow_iovas(struct iommufd_ucmd *ucmd);
345 int iommufd_ioas_map(struct iommufd_ucmd *ucmd);
346 int iommufd_ioas_map_file(struct iommufd_ucmd *ucmd);
347 int iommufd_ioas_change_process(struct iommufd_ucmd *ucmd);
348 int iommufd_ioas_copy(struct iommufd_ucmd *ucmd);
349 int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd);
350 int iommufd_ioas_option(struct iommufd_ucmd *ucmd);
351 int iommufd_option_rlimit_mode(struct iommu_option *cmd,
352 			       struct iommufd_ctx *ictx);
353 
354 int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd);
355 int iommufd_check_iova_range(struct io_pagetable *iopt,
356 			     struct iommu_hwpt_get_dirty_bitmap *bitmap);
357 
358 /*
359  * A HW pagetable is called an iommu_domain inside the kernel. This user object
360  * allows directly creating and inspecting the domains. Domains that have kernel
361  * owned page tables will be associated with an iommufd_ioas that provides the
362  * IOVA to PFN map.
363  */
364 struct iommufd_hw_pagetable {
365 	struct iommufd_object obj;
366 	struct iommu_domain *domain;
367 	struct iommufd_fault *fault;
368 	bool pasid_compat : 1;
369 };
370 
371 struct iommufd_hwpt_paging {
372 	struct iommufd_hw_pagetable common;
373 	struct iommufd_ioas *ioas;
374 	bool auto_domain : 1;
375 	bool enforce_cache_coherency : 1;
376 	bool nest_parent : 1;
377 	/* Head at iommufd_ioas::hwpt_list */
378 	struct list_head hwpt_item;
379 	struct iommufd_sw_msi_maps present_sw_msi;
380 };
381 
382 struct iommufd_hwpt_nested {
383 	struct iommufd_hw_pagetable common;
384 	struct iommufd_hwpt_paging *parent;
385 	struct iommufd_viommu *viommu;
386 };
387 
hwpt_is_paging(struct iommufd_hw_pagetable * hwpt)388 static inline bool hwpt_is_paging(struct iommufd_hw_pagetable *hwpt)
389 {
390 	return hwpt->obj.type == IOMMUFD_OBJ_HWPT_PAGING;
391 }
392 
393 static inline struct iommufd_hwpt_paging *
to_hwpt_paging(struct iommufd_hw_pagetable * hwpt)394 to_hwpt_paging(struct iommufd_hw_pagetable *hwpt)
395 {
396 	return container_of(hwpt, struct iommufd_hwpt_paging, common);
397 }
398 
399 static inline struct iommufd_hwpt_nested *
to_hwpt_nested(struct iommufd_hw_pagetable * hwpt)400 to_hwpt_nested(struct iommufd_hw_pagetable *hwpt)
401 {
402 	return container_of(hwpt, struct iommufd_hwpt_nested, common);
403 }
404 
405 static inline struct iommufd_hwpt_paging *
find_hwpt_paging(struct iommufd_hw_pagetable * hwpt)406 find_hwpt_paging(struct iommufd_hw_pagetable *hwpt)
407 {
408 	switch (hwpt->obj.type) {
409 	case IOMMUFD_OBJ_HWPT_PAGING:
410 		return to_hwpt_paging(hwpt);
411 	case IOMMUFD_OBJ_HWPT_NESTED:
412 		return to_hwpt_nested(hwpt)->parent;
413 	default:
414 		return NULL;
415 	}
416 }
417 
418 static inline struct iommufd_hwpt_paging *
iommufd_get_hwpt_paging(struct iommufd_ucmd * ucmd,u32 id)419 iommufd_get_hwpt_paging(struct iommufd_ucmd *ucmd, u32 id)
420 {
421 	return container_of(iommufd_get_object(ucmd->ictx, id,
422 					       IOMMUFD_OBJ_HWPT_PAGING),
423 			    struct iommufd_hwpt_paging, common.obj);
424 }
425 
426 static inline struct iommufd_hw_pagetable *
iommufd_get_hwpt_nested(struct iommufd_ucmd * ucmd,u32 id)427 iommufd_get_hwpt_nested(struct iommufd_ucmd *ucmd, u32 id)
428 {
429 	return container_of(iommufd_get_object(ucmd->ictx, id,
430 					       IOMMUFD_OBJ_HWPT_NESTED),
431 			    struct iommufd_hw_pagetable, obj);
432 }
433 
434 int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd);
435 int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd);
436 
437 struct iommufd_hwpt_paging *
438 iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
439 			  struct iommufd_device *idev, ioasid_t pasid,
440 			  u32 flags, bool immediate_attach,
441 			  const struct iommu_user_data *user_data);
442 int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
443 				struct iommufd_device *idev, ioasid_t pasid);
444 struct iommufd_hw_pagetable *
445 iommufd_hw_pagetable_detach(struct iommufd_device *idev, ioasid_t pasid);
446 void iommufd_hwpt_paging_destroy(struct iommufd_object *obj);
447 void iommufd_hwpt_paging_abort(struct iommufd_object *obj);
448 void iommufd_hwpt_nested_destroy(struct iommufd_object *obj);
449 void iommufd_hwpt_nested_abort(struct iommufd_object *obj);
450 int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd);
451 int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd);
452 
iommufd_hw_pagetable_put(struct iommufd_ctx * ictx,struct iommufd_hw_pagetable * hwpt)453 static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx,
454 					    struct iommufd_hw_pagetable *hwpt)
455 {
456 	if (hwpt->obj.type == IOMMUFD_OBJ_HWPT_PAGING) {
457 		struct iommufd_hwpt_paging *hwpt_paging = to_hwpt_paging(hwpt);
458 
459 		if (hwpt_paging->auto_domain) {
460 			lockdep_assert_not_held(&hwpt_paging->ioas->mutex);
461 			iommufd_object_put_and_try_destroy(ictx, &hwpt->obj);
462 			return;
463 		}
464 	}
465 	refcount_dec(&hwpt->obj.users);
466 }
467 
468 struct iommufd_attach;
469 
470 struct iommufd_group {
471 	struct kref ref;
472 	struct mutex lock;
473 	struct iommufd_ctx *ictx;
474 	struct iommu_group *group;
475 	struct xarray pasid_attach;
476 	struct iommufd_sw_msi_maps required_sw_msi;
477 	phys_addr_t sw_msi_start;
478 };
479 
480 /*
481  * A iommufd_device object represents the binding relationship between a
482  * consuming driver and the iommufd. These objects are created/destroyed by
483  * external drivers, not by userspace.
484  */
485 struct iommufd_device {
486 	struct iommufd_object obj;
487 	struct iommufd_ctx *ictx;
488 	struct iommufd_group *igroup;
489 	struct list_head group_item;
490 	/* always the physical device */
491 	struct device *dev;
492 	bool enforce_cache_coherency;
493 	struct iommufd_vdevice *vdev;
494 	bool destroying;
495 };
496 
497 static inline struct iommufd_device *
iommufd_get_device(struct iommufd_ucmd * ucmd,u32 id)498 iommufd_get_device(struct iommufd_ucmd *ucmd, u32 id)
499 {
500 	return container_of(iommufd_get_object(ucmd->ictx, id,
501 					       IOMMUFD_OBJ_DEVICE),
502 			    struct iommufd_device, obj);
503 }
504 
505 void iommufd_device_pre_destroy(struct iommufd_object *obj);
506 void iommufd_device_destroy(struct iommufd_object *obj);
507 int iommufd_get_hw_info(struct iommufd_ucmd *ucmd);
508 
509 struct device *iommufd_global_device(void);
510 
511 struct iommufd_access {
512 	struct iommufd_object obj;
513 	struct iommufd_ctx *ictx;
514 	struct iommufd_ioas *ioas;
515 	struct iommufd_ioas *ioas_unpin;
516 	struct mutex ioas_lock;
517 	const struct iommufd_access_ops *ops;
518 	void *data;
519 	unsigned long iova_alignment;
520 	u32 iopt_access_list_id;
521 };
522 
523 int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access);
524 void iopt_remove_access(struct io_pagetable *iopt,
525 			struct iommufd_access *access, u32 iopt_access_list_id);
526 void iommufd_access_destroy_object(struct iommufd_object *obj);
527 
528 /* iommufd_access for internal use */
iommufd_access_is_internal(struct iommufd_access * access)529 static inline bool iommufd_access_is_internal(struct iommufd_access *access)
530 {
531 	return !access->ictx;
532 }
533 
534 struct iommufd_access *iommufd_access_create_internal(struct iommufd_ctx *ictx);
535 
536 static inline void
iommufd_access_destroy_internal(struct iommufd_ctx * ictx,struct iommufd_access * access)537 iommufd_access_destroy_internal(struct iommufd_ctx *ictx,
538 				struct iommufd_access *access)
539 {
540 	iommufd_object_destroy_user(ictx, &access->obj);
541 }
542 
543 int iommufd_access_attach_internal(struct iommufd_access *access,
544 				   struct iommufd_ioas *ioas);
545 
iommufd_access_detach_internal(struct iommufd_access * access)546 static inline void iommufd_access_detach_internal(struct iommufd_access *access)
547 {
548 	iommufd_access_detach(access);
549 }
550 
551 struct iommufd_eventq {
552 	struct iommufd_object obj;
553 	struct iommufd_ctx *ictx;
554 	struct file *filep;
555 
556 	spinlock_t lock; /* protects the deliver list */
557 	struct list_head deliver;
558 
559 	struct wait_queue_head wait_queue;
560 };
561 
562 struct iommufd_attach_handle {
563 	struct iommu_attach_handle handle;
564 	struct iommufd_device *idev;
565 };
566 
567 /* Convert an iommu attach handle to iommufd handle. */
568 #define to_iommufd_handle(hdl)	container_of(hdl, struct iommufd_attach_handle, handle)
569 
570 /*
571  * An iommufd_fault object represents an interface to deliver I/O page faults
572  * to the user space. These objects are created/destroyed by the user space and
573  * associated with hardware page table objects during page-table allocation.
574  */
575 struct iommufd_fault {
576 	struct iommufd_eventq common;
577 	struct mutex mutex; /* serializes response flows */
578 	struct xarray response;
579 };
580 
581 static inline struct iommufd_fault *
eventq_to_fault(struct iommufd_eventq * eventq)582 eventq_to_fault(struct iommufd_eventq *eventq)
583 {
584 	return container_of(eventq, struct iommufd_fault, common);
585 }
586 
587 static inline struct iommufd_fault *
iommufd_get_fault(struct iommufd_ucmd * ucmd,u32 id)588 iommufd_get_fault(struct iommufd_ucmd *ucmd, u32 id)
589 {
590 	return container_of(iommufd_get_object(ucmd->ictx, id,
591 					       IOMMUFD_OBJ_FAULT),
592 			    struct iommufd_fault, common.obj);
593 }
594 
595 int iommufd_fault_alloc(struct iommufd_ucmd *ucmd);
596 void iommufd_fault_destroy(struct iommufd_object *obj);
597 int iommufd_fault_iopf_handler(struct iopf_group *group);
598 void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
599 				  struct iommufd_attach_handle *handle);
600 
601 /* An iommufd_vevent represents a vIOMMU event in an iommufd_veventq */
602 struct iommufd_vevent {
603 	struct iommufd_vevent_header header;
604 	struct list_head node; /* for iommufd_eventq::deliver */
605 	ssize_t data_len;
606 	u64 event_data[] __counted_by(data_len);
607 };
608 
609 #define vevent_for_lost_events_header(vevent) \
610 	(vevent->header.flags & IOMMU_VEVENTQ_FLAG_LOST_EVENTS)
611 
612 /*
613  * An iommufd_veventq object represents an interface to deliver vIOMMU events to
614  * the user space. It is created/destroyed by the user space and associated with
615  * a vIOMMU object during the allocations.
616  */
617 struct iommufd_veventq {
618 	struct iommufd_eventq common;
619 	struct iommufd_viommu *viommu;
620 	struct list_head node; /* for iommufd_viommu::veventqs */
621 
622 	enum iommu_veventq_type type;
623 	unsigned int depth;
624 
625 	/* Use common.lock for protection */
626 	u32 num_events;
627 	u32 sequence;
628 
629 	/* Must be last as it ends in a flexible-array member. */
630 	struct iommufd_vevent lost_events_header;
631 };
632 
633 static inline struct iommufd_veventq *
eventq_to_veventq(struct iommufd_eventq * eventq)634 eventq_to_veventq(struct iommufd_eventq *eventq)
635 {
636 	return container_of(eventq, struct iommufd_veventq, common);
637 }
638 
639 static inline struct iommufd_veventq *
iommufd_get_veventq(struct iommufd_ucmd * ucmd,u32 id)640 iommufd_get_veventq(struct iommufd_ucmd *ucmd, u32 id)
641 {
642 	return container_of(iommufd_get_object(ucmd->ictx, id,
643 					       IOMMUFD_OBJ_VEVENTQ),
644 			    struct iommufd_veventq, common.obj);
645 }
646 
647 int iommufd_veventq_alloc(struct iommufd_ucmd *ucmd);
648 void iommufd_veventq_destroy(struct iommufd_object *obj);
649 void iommufd_veventq_abort(struct iommufd_object *obj);
650 
iommufd_vevent_handler(struct iommufd_veventq * veventq,struct iommufd_vevent * vevent)651 static inline void iommufd_vevent_handler(struct iommufd_veventq *veventq,
652 					  struct iommufd_vevent *vevent)
653 {
654 	struct iommufd_eventq *eventq = &veventq->common;
655 
656 	lockdep_assert_held(&eventq->lock);
657 
658 	/*
659 	 * Remove the lost_events_header and add the new node at the same time.
660 	 * Note the new node can be lost_events_header, for a sequence update.
661 	 */
662 	if (list_is_last(&veventq->lost_events_header.node, &eventq->deliver))
663 		list_del(&veventq->lost_events_header.node);
664 	list_add_tail(&vevent->node, &eventq->deliver);
665 	vevent->header.sequence = veventq->sequence;
666 	veventq->sequence = (veventq->sequence + 1) & INT_MAX;
667 
668 	wake_up_interruptible(&eventq->wait_queue);
669 }
670 
671 static inline struct iommufd_viommu *
iommufd_get_viommu(struct iommufd_ucmd * ucmd,u32 id)672 iommufd_get_viommu(struct iommufd_ucmd *ucmd, u32 id)
673 {
674 	return container_of(iommufd_get_object(ucmd->ictx, id,
675 					       IOMMUFD_OBJ_VIOMMU),
676 			    struct iommufd_viommu, obj);
677 }
678 
679 static inline struct iommufd_veventq *
iommufd_viommu_find_veventq(struct iommufd_viommu * viommu,enum iommu_veventq_type type)680 iommufd_viommu_find_veventq(struct iommufd_viommu *viommu,
681 			    enum iommu_veventq_type type)
682 {
683 	struct iommufd_veventq *veventq, *next;
684 
685 	lockdep_assert_held(&viommu->veventqs_rwsem);
686 
687 	list_for_each_entry_safe(veventq, next, &viommu->veventqs, node) {
688 		if (veventq->type == type)
689 			return veventq;
690 	}
691 	return NULL;
692 }
693 
694 int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd);
695 void iommufd_viommu_destroy(struct iommufd_object *obj);
696 int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd);
697 void iommufd_vdevice_destroy(struct iommufd_object *obj);
698 void iommufd_vdevice_abort(struct iommufd_object *obj);
699 int iommufd_hw_queue_alloc_ioctl(struct iommufd_ucmd *ucmd);
700 void iommufd_hw_queue_destroy(struct iommufd_object *obj);
701 
702 static inline struct iommufd_vdevice *
iommufd_get_vdevice(struct iommufd_ctx * ictx,u32 id)703 iommufd_get_vdevice(struct iommufd_ctx *ictx, u32 id)
704 {
705 	return container_of(iommufd_get_object(ictx, id,
706 					       IOMMUFD_OBJ_VDEVICE),
707 			    struct iommufd_vdevice, obj);
708 }
709 
710 #ifdef CONFIG_IOMMUFD_TEST
711 int iommufd_test(struct iommufd_ucmd *ucmd);
712 void iommufd_selftest_destroy(struct iommufd_object *obj);
713 extern size_t iommufd_test_memory_limit;
714 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
715 				   unsigned int ioas_id, u64 *iova, u32 *flags);
716 bool iommufd_should_fail(void);
717 int __init iommufd_test_init(void);
718 void iommufd_test_exit(void);
719 bool iommufd_selftest_is_mock_dev(struct device *dev);
720 int iommufd_test_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
721 				     struct dma_buf_phys_vec *phys);
722 #else
iommufd_test_syz_conv_iova_id(struct iommufd_ucmd * ucmd,unsigned int ioas_id,u64 * iova,u32 * flags)723 static inline void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
724 						 unsigned int ioas_id,
725 						 u64 *iova, u32 *flags)
726 {
727 }
iommufd_should_fail(void)728 static inline bool iommufd_should_fail(void)
729 {
730 	return false;
731 }
iommufd_test_init(void)732 static inline int __init iommufd_test_init(void)
733 {
734 	return 0;
735 }
iommufd_test_exit(void)736 static inline void iommufd_test_exit(void)
737 {
738 }
iommufd_selftest_is_mock_dev(struct device * dev)739 static inline bool iommufd_selftest_is_mock_dev(struct device *dev)
740 {
741 	return false;
742 }
743 static inline int
iommufd_test_dma_buf_iommufd_map(struct dma_buf_attachment * attachment,struct dma_buf_phys_vec * phys)744 iommufd_test_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
745 				 struct dma_buf_phys_vec *phys)
746 {
747 	return -EOPNOTSUPP;
748 }
749 #endif
750 #endif
751