xref: /linux/drivers/iommu/iommufd/selftest.c (revision 056daec2925dc200b22c30419bc7b9e01f7843c4)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
3  *
4  * Kernel side components to support tools/testing/selftests/iommu
5  */
6 #include <linux/anon_inodes.h>
7 #include <linux/debugfs.h>
8 #include <linux/dma-buf.h>
9 #include <linux/dma-resv.h>
10 #include <linux/fault-inject.h>
11 #include <linux/file.h>
12 #include <linux/iommu.h>
13 #include <linux/platform_device.h>
14 #include <linux/slab.h>
15 #include <linux/xarray.h>
16 #include <uapi/linux/iommufd.h>
17 #include <linux/generic_pt/iommu.h>
18 #include "../iommu-pages.h"
19 
20 #include "../iommu-priv.h"
21 #include "io_pagetable.h"
22 #include "iommufd_private.h"
23 #include "iommufd_test.h"
24 
25 static DECLARE_FAULT_ATTR(fail_iommufd);
26 static struct dentry *dbgfs_root;
27 static struct platform_device *selftest_iommu_dev;
28 static const struct iommu_ops mock_ops;
29 static struct iommu_domain_ops domain_nested_ops;
30 
31 size_t iommufd_test_memory_limit = 65536;
32 
33 struct mock_bus_type {
34 	struct bus_type bus;
35 	struct notifier_block nb;
36 };
37 
38 static struct mock_bus_type iommufd_mock_bus_type = {
39 	.bus = {
40 		.name = "iommufd_mock",
41 	},
42 };
43 
44 static DEFINE_IDA(mock_dev_ida);
45 
46 enum {
47 	MOCK_DIRTY_TRACK = 1,
48 };
49 
50 static int mock_dev_enable_iopf(struct device *dev, struct iommu_domain *domain);
51 static void mock_dev_disable_iopf(struct device *dev, struct iommu_domain *domain);
52 
53 /*
54  * Syzkaller has trouble randomizing the correct iova to use since it is linked
55  * to the map ioctl's output, and it has no ide about that. So, simplify things.
56  * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset
57  * value. This has a much smaller randomization space and syzkaller can hit it.
58  */
__iommufd_test_syz_conv_iova(struct io_pagetable * iopt,u64 * iova)59 static unsigned long __iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
60 						  u64 *iova)
61 {
62 	struct syz_layout {
63 		__u32 nth_area;
64 		__u32 offset;
65 	};
66 	struct syz_layout *syz = (void *)iova;
67 	unsigned int nth = syz->nth_area;
68 	struct iopt_area *area;
69 
70 	down_read(&iopt->iova_rwsem);
71 	for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area;
72 	     area = iopt_area_iter_next(area, 0, ULONG_MAX)) {
73 		if (nth == 0) {
74 			up_read(&iopt->iova_rwsem);
75 			return iopt_area_iova(area) + syz->offset;
76 		}
77 		nth--;
78 	}
79 	up_read(&iopt->iova_rwsem);
80 
81 	return 0;
82 }
83 
iommufd_test_syz_conv_iova(struct iommufd_access * access,u64 * iova)84 static unsigned long iommufd_test_syz_conv_iova(struct iommufd_access *access,
85 						u64 *iova)
86 {
87 	unsigned long ret;
88 
89 	mutex_lock(&access->ioas_lock);
90 	if (!access->ioas) {
91 		mutex_unlock(&access->ioas_lock);
92 		return 0;
93 	}
94 	ret = __iommufd_test_syz_conv_iova(&access->ioas->iopt, iova);
95 	mutex_unlock(&access->ioas_lock);
96 	return ret;
97 }
98 
iommufd_test_syz_conv_iova_id(struct iommufd_ucmd * ucmd,unsigned int ioas_id,u64 * iova,u32 * flags)99 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
100 				   unsigned int ioas_id, u64 *iova, u32 *flags)
101 {
102 	struct iommufd_ioas *ioas;
103 
104 	if (!(*flags & MOCK_FLAGS_ACCESS_SYZ))
105 		return;
106 	*flags &= ~(u32)MOCK_FLAGS_ACCESS_SYZ;
107 
108 	ioas = iommufd_get_ioas(ucmd->ictx, ioas_id);
109 	if (IS_ERR(ioas))
110 		return;
111 	*iova = __iommufd_test_syz_conv_iova(&ioas->iopt, iova);
112 	iommufd_put_object(ucmd->ictx, &ioas->obj);
113 }
114 
115 struct mock_iommu_domain {
116 	union {
117 		struct iommu_domain domain;
118 		struct pt_iommu iommu;
119 		struct pt_iommu_amdv1 amdv1;
120 	};
121 	unsigned long flags;
122 };
123 PT_IOMMU_CHECK_DOMAIN(struct mock_iommu_domain, iommu, domain);
124 PT_IOMMU_CHECK_DOMAIN(struct mock_iommu_domain, amdv1.iommu, domain);
125 
126 static inline struct mock_iommu_domain *
to_mock_domain(struct iommu_domain * domain)127 to_mock_domain(struct iommu_domain *domain)
128 {
129 	return container_of(domain, struct mock_iommu_domain, domain);
130 }
131 
132 struct mock_iommu_domain_nested {
133 	struct iommu_domain domain;
134 	struct mock_viommu *mock_viommu;
135 	u32 iotlb[MOCK_NESTED_DOMAIN_IOTLB_NUM];
136 };
137 
138 static inline struct mock_iommu_domain_nested *
to_mock_nested(struct iommu_domain * domain)139 to_mock_nested(struct iommu_domain *domain)
140 {
141 	return container_of(domain, struct mock_iommu_domain_nested, domain);
142 }
143 
144 struct mock_viommu {
145 	struct iommufd_viommu core;
146 	struct mock_iommu_domain *s2_parent;
147 	struct mock_hw_queue *hw_queue[IOMMU_TEST_HW_QUEUE_MAX];
148 	struct mutex queue_mutex;
149 
150 	unsigned long mmap_offset;
151 	u32 *page; /* Mmap page to test u32 type of in_data */
152 };
153 
to_mock_viommu(struct iommufd_viommu * viommu)154 static inline struct mock_viommu *to_mock_viommu(struct iommufd_viommu *viommu)
155 {
156 	return container_of(viommu, struct mock_viommu, core);
157 }
158 
159 struct mock_hw_queue {
160 	struct iommufd_hw_queue core;
161 	struct mock_viommu *mock_viommu;
162 	struct mock_hw_queue *prev;
163 	u16 index;
164 };
165 
166 static inline struct mock_hw_queue *
to_mock_hw_queue(struct iommufd_hw_queue * hw_queue)167 to_mock_hw_queue(struct iommufd_hw_queue *hw_queue)
168 {
169 	return container_of(hw_queue, struct mock_hw_queue, core);
170 }
171 
172 enum selftest_obj_type {
173 	TYPE_IDEV,
174 };
175 
176 struct mock_dev {
177 	struct device dev;
178 	struct mock_viommu *viommu;
179 	struct rw_semaphore viommu_rwsem;
180 	unsigned long flags;
181 	unsigned long vdev_id;
182 	int id;
183 	u32 cache[MOCK_DEV_CACHE_NUM];
184 	atomic_t pasid_1024_fake_error;
185 	unsigned int iopf_refcount;
186 	struct iommu_domain *domain;
187 };
188 
to_mock_dev(struct device * dev)189 static inline struct mock_dev *to_mock_dev(struct device *dev)
190 {
191 	return container_of(dev, struct mock_dev, dev);
192 }
193 
194 struct selftest_obj {
195 	struct iommufd_object obj;
196 	enum selftest_obj_type type;
197 
198 	union {
199 		struct {
200 			struct iommufd_device *idev;
201 			struct iommufd_ctx *ictx;
202 			struct mock_dev *mock_dev;
203 		} idev;
204 	};
205 };
206 
to_selftest_obj(struct iommufd_object * obj)207 static inline struct selftest_obj *to_selftest_obj(struct iommufd_object *obj)
208 {
209 	return container_of(obj, struct selftest_obj, obj);
210 }
211 
mock_domain_nop_attach(struct iommu_domain * domain,struct device * dev,struct iommu_domain * old)212 static int mock_domain_nop_attach(struct iommu_domain *domain,
213 				  struct device *dev, struct iommu_domain *old)
214 {
215 	struct mock_dev *mdev = to_mock_dev(dev);
216 	struct mock_viommu *new_viommu = NULL;
217 	unsigned long vdev_id = 0;
218 	int rc;
219 
220 	if (domain->dirty_ops && (mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY))
221 		return -EINVAL;
222 
223 	iommu_group_mutex_assert(dev);
224 	if (domain->type == IOMMU_DOMAIN_NESTED) {
225 		new_viommu = to_mock_nested(domain)->mock_viommu;
226 		if (new_viommu) {
227 			rc = iommufd_viommu_get_vdev_id(&new_viommu->core, dev,
228 							&vdev_id);
229 			if (rc)
230 				return rc;
231 		}
232 	}
233 	if (new_viommu != mdev->viommu) {
234 		down_write(&mdev->viommu_rwsem);
235 		mdev->viommu = new_viommu;
236 		mdev->vdev_id = vdev_id;
237 		up_write(&mdev->viommu_rwsem);
238 	}
239 
240 	rc = mock_dev_enable_iopf(dev, domain);
241 	if (rc)
242 		return rc;
243 
244 	mock_dev_disable_iopf(dev, mdev->domain);
245 	mdev->domain = domain;
246 
247 	return 0;
248 }
249 
mock_domain_set_dev_pasid_nop(struct iommu_domain * domain,struct device * dev,ioasid_t pasid,struct iommu_domain * old)250 static int mock_domain_set_dev_pasid_nop(struct iommu_domain *domain,
251 					 struct device *dev, ioasid_t pasid,
252 					 struct iommu_domain *old)
253 {
254 	struct mock_dev *mdev = to_mock_dev(dev);
255 	int rc;
256 
257 	/*
258 	 * Per the first attach with pasid 1024, set the
259 	 * mdev->pasid_1024_fake_error. Hence the second call of this op
260 	 * can fake an error to validate the error path of the core. This
261 	 * is helpful to test the case in which the iommu core needs to
262 	 * rollback to the old domain due to driver failure. e.g. replace.
263 	 * User should be careful about the third call of this op, it shall
264 	 * succeed since the mdev->pasid_1024_fake_error is cleared in the
265 	 * second call.
266 	 */
267 	if (pasid == 1024) {
268 		if (domain->type == IOMMU_DOMAIN_BLOCKED) {
269 			atomic_set(&mdev->pasid_1024_fake_error, 0);
270 		} else if (atomic_read(&mdev->pasid_1024_fake_error)) {
271 			/*
272 			 * Clear the flag, and fake an error to fail the
273 			 * replacement.
274 			 */
275 			atomic_set(&mdev->pasid_1024_fake_error, 0);
276 			return -ENOMEM;
277 		} else {
278 			/* Set the flag to fake an error in next call */
279 			atomic_set(&mdev->pasid_1024_fake_error, 1);
280 		}
281 	}
282 
283 	rc = mock_dev_enable_iopf(dev, domain);
284 	if (rc)
285 		return rc;
286 
287 	mock_dev_disable_iopf(dev, old);
288 
289 	return 0;
290 }
291 
292 static const struct iommu_domain_ops mock_blocking_ops = {
293 	.attach_dev = mock_domain_nop_attach,
294 	.set_dev_pasid = mock_domain_set_dev_pasid_nop
295 };
296 
297 static struct iommu_domain mock_blocking_domain = {
298 	.type = IOMMU_DOMAIN_BLOCKED,
299 	.ops = &mock_blocking_ops,
300 };
301 
mock_domain_hw_info(struct device * dev,u32 * length,enum iommu_hw_info_type * type)302 static void *mock_domain_hw_info(struct device *dev, u32 *length,
303 				 enum iommu_hw_info_type *type)
304 {
305 	struct iommu_test_hw_info *info;
306 
307 	if (*type != IOMMU_HW_INFO_TYPE_DEFAULT &&
308 	    *type != IOMMU_HW_INFO_TYPE_SELFTEST)
309 		return ERR_PTR(-EOPNOTSUPP);
310 
311 	info = kzalloc(sizeof(*info), GFP_KERNEL);
312 	if (!info)
313 		return ERR_PTR(-ENOMEM);
314 
315 	info->test_reg = IOMMU_HW_INFO_SELFTEST_REGVAL;
316 	*length = sizeof(*info);
317 	*type = IOMMU_HW_INFO_TYPE_SELFTEST;
318 
319 	return info;
320 }
321 
mock_domain_set_dirty_tracking(struct iommu_domain * domain,bool enable)322 static int mock_domain_set_dirty_tracking(struct iommu_domain *domain,
323 					  bool enable)
324 {
325 	struct mock_iommu_domain *mock = to_mock_domain(domain);
326 	unsigned long flags = mock->flags;
327 
328 	if (enable && !domain->dirty_ops)
329 		return -EINVAL;
330 
331 	/* No change? */
332 	if (!(enable ^ !!(flags & MOCK_DIRTY_TRACK)))
333 		return 0;
334 
335 	flags = (enable ? flags | MOCK_DIRTY_TRACK : flags & ~MOCK_DIRTY_TRACK);
336 
337 	mock->flags = flags;
338 	return 0;
339 }
340 
341 static struct mock_iommu_domain_nested *
__mock_domain_alloc_nested(const struct iommu_user_data * user_data)342 __mock_domain_alloc_nested(const struct iommu_user_data *user_data)
343 {
344 	struct mock_iommu_domain_nested *mock_nested;
345 	struct iommu_hwpt_selftest user_cfg;
346 	int rc, i;
347 
348 	if (user_data->type != IOMMU_HWPT_DATA_SELFTEST)
349 		return ERR_PTR(-EOPNOTSUPP);
350 
351 	rc = iommu_copy_struct_from_user(&user_cfg, user_data,
352 					 IOMMU_HWPT_DATA_SELFTEST, iotlb);
353 	if (rc)
354 		return ERR_PTR(rc);
355 
356 	mock_nested = kzalloc(sizeof(*mock_nested), GFP_KERNEL);
357 	if (!mock_nested)
358 		return ERR_PTR(-ENOMEM);
359 	mock_nested->domain.ops = &domain_nested_ops;
360 	mock_nested->domain.type = IOMMU_DOMAIN_NESTED;
361 	for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++)
362 		mock_nested->iotlb[i] = user_cfg.iotlb;
363 	return mock_nested;
364 }
365 
366 static struct iommu_domain *
mock_domain_alloc_nested(struct device * dev,struct iommu_domain * parent,u32 flags,const struct iommu_user_data * user_data)367 mock_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
368 			 u32 flags, const struct iommu_user_data *user_data)
369 {
370 	struct mock_iommu_domain_nested *mock_nested;
371 	struct mock_iommu_domain *mock_parent;
372 
373 	if (flags & ~IOMMU_HWPT_ALLOC_PASID)
374 		return ERR_PTR(-EOPNOTSUPP);
375 	if (!parent || !(parent->type & __IOMMU_DOMAIN_PAGING))
376 		return ERR_PTR(-EINVAL);
377 
378 	mock_parent = to_mock_domain(parent);
379 	if (!mock_parent)
380 		return ERR_PTR(-EINVAL);
381 
382 	mock_nested = __mock_domain_alloc_nested(user_data);
383 	if (IS_ERR(mock_nested))
384 		return ERR_CAST(mock_nested);
385 	return &mock_nested->domain;
386 }
387 
mock_domain_free(struct iommu_domain * domain)388 static void mock_domain_free(struct iommu_domain *domain)
389 {
390 	struct mock_iommu_domain *mock = to_mock_domain(domain);
391 
392 	pt_iommu_deinit(&mock->iommu);
393 	kfree(mock);
394 }
395 
mock_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * gather)396 static void mock_iotlb_sync(struct iommu_domain *domain,
397 				struct iommu_iotlb_gather *gather)
398 {
399 	iommu_put_pages_list(&gather->freelist);
400 }
401 
402 static const struct iommu_domain_ops amdv1_mock_ops = {
403 	IOMMU_PT_DOMAIN_OPS(amdv1_mock),
404 	.free = mock_domain_free,
405 	.attach_dev = mock_domain_nop_attach,
406 	.set_dev_pasid = mock_domain_set_dev_pasid_nop,
407 	.iotlb_sync = &mock_iotlb_sync,
408 };
409 
410 static const struct iommu_domain_ops amdv1_mock_huge_ops = {
411 	IOMMU_PT_DOMAIN_OPS(amdv1_mock),
412 	.free = mock_domain_free,
413 	.attach_dev = mock_domain_nop_attach,
414 	.set_dev_pasid = mock_domain_set_dev_pasid_nop,
415 	.iotlb_sync = &mock_iotlb_sync,
416 };
417 #undef pt_iommu_amdv1_mock_map_pages
418 
419 static const struct iommu_dirty_ops amdv1_mock_dirty_ops = {
420 	IOMMU_PT_DIRTY_OPS(amdv1_mock),
421 	.set_dirty_tracking = mock_domain_set_dirty_tracking,
422 };
423 
424 static const struct iommu_domain_ops amdv1_ops = {
425 	IOMMU_PT_DOMAIN_OPS(amdv1),
426 	.free = mock_domain_free,
427 	.attach_dev = mock_domain_nop_attach,
428 	.set_dev_pasid = mock_domain_set_dev_pasid_nop,
429 	.iotlb_sync = &mock_iotlb_sync,
430 };
431 
432 static const struct iommu_dirty_ops amdv1_dirty_ops = {
433 	IOMMU_PT_DIRTY_OPS(amdv1),
434 	.set_dirty_tracking = mock_domain_set_dirty_tracking,
435 };
436 
437 static struct mock_iommu_domain *
mock_domain_alloc_pgtable(struct device * dev,const struct iommu_hwpt_selftest * user_cfg,u32 flags)438 mock_domain_alloc_pgtable(struct device *dev,
439 			  const struct iommu_hwpt_selftest *user_cfg, u32 flags)
440 {
441 	struct mock_iommu_domain *mock;
442 	int rc;
443 
444 	mock = kzalloc(sizeof(*mock), GFP_KERNEL);
445 	if (!mock)
446 		return ERR_PTR(-ENOMEM);
447 	mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
448 
449 	mock->amdv1.iommu.nid = NUMA_NO_NODE;
450 
451 	switch (user_cfg->pagetable_type) {
452 	case MOCK_IOMMUPT_DEFAULT:
453 	case MOCK_IOMMUPT_HUGE: {
454 		struct pt_iommu_amdv1_cfg cfg = {};
455 
456 		/* The mock version has a 2k page size */
457 		cfg.common.hw_max_vasz_lg2 = 56;
458 		cfg.common.hw_max_oasz_lg2 = 51;
459 		cfg.starting_level = 2;
460 		if (user_cfg->pagetable_type == MOCK_IOMMUPT_HUGE)
461 			mock->domain.ops = &amdv1_mock_huge_ops;
462 		else
463 			mock->domain.ops = &amdv1_mock_ops;
464 		rc = pt_iommu_amdv1_mock_init(&mock->amdv1, &cfg, GFP_KERNEL);
465 		if (rc)
466 			goto err_free;
467 
468 		/*
469 		 * In huge mode userspace should only provide huge pages, we
470 		 * have to include PAGE_SIZE for the domain to be accepted by
471 		 * iommufd.
472 		 */
473 		if (user_cfg->pagetable_type == MOCK_IOMMUPT_HUGE)
474 			mock->domain.pgsize_bitmap = MOCK_HUGE_PAGE_SIZE |
475 						     PAGE_SIZE;
476 		if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
477 			mock->domain.dirty_ops = &amdv1_mock_dirty_ops;
478 		break;
479 	}
480 
481 	case MOCK_IOMMUPT_AMDV1: {
482 		struct pt_iommu_amdv1_cfg cfg = {};
483 
484 		cfg.common.hw_max_vasz_lg2 = 64;
485 		cfg.common.hw_max_oasz_lg2 = 52;
486 		cfg.common.features = BIT(PT_FEAT_DYNAMIC_TOP) |
487 				      BIT(PT_FEAT_AMDV1_ENCRYPT_TABLES) |
488 				      BIT(PT_FEAT_AMDV1_FORCE_COHERENCE);
489 		cfg.starting_level = 2;
490 		mock->domain.ops = &amdv1_ops;
491 		rc = pt_iommu_amdv1_init(&mock->amdv1, &cfg, GFP_KERNEL);
492 		if (rc)
493 			goto err_free;
494 		if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
495 			mock->domain.dirty_ops = &amdv1_dirty_ops;
496 		break;
497 	}
498 	default:
499 		rc = -EOPNOTSUPP;
500 		goto err_free;
501 	}
502 
503 	/*
504 	 * Override the real aperture to the MOCK aperture for test purposes.
505 	 */
506 	if (user_cfg->pagetable_type == MOCK_IOMMUPT_DEFAULT) {
507 		WARN_ON(mock->domain.geometry.aperture_start != 0);
508 		WARN_ON(mock->domain.geometry.aperture_end < MOCK_APERTURE_LAST);
509 
510 		mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
511 		mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
512 	}
513 
514 	return mock;
515 err_free:
516 	kfree(mock);
517 	return ERR_PTR(rc);
518 }
519 
520 static struct iommu_domain *
mock_domain_alloc_paging_flags(struct device * dev,u32 flags,const struct iommu_user_data * user_data)521 mock_domain_alloc_paging_flags(struct device *dev, u32 flags,
522 			       const struct iommu_user_data *user_data)
523 {
524 	bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
525 	const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
526 				 IOMMU_HWPT_ALLOC_NEST_PARENT |
527 				 IOMMU_HWPT_ALLOC_PASID;
528 	struct mock_dev *mdev = to_mock_dev(dev);
529 	bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY;
530 	struct iommu_hwpt_selftest user_cfg = {};
531 	struct mock_iommu_domain *mock;
532 	int rc;
533 
534 	if ((flags & ~PAGING_FLAGS) || (has_dirty_flag && no_dirty_ops))
535 		return ERR_PTR(-EOPNOTSUPP);
536 
537 	if (user_data && (user_data->type != IOMMU_HWPT_DATA_SELFTEST &&
538 			  user_data->type != IOMMU_HWPT_DATA_NONE))
539 		return ERR_PTR(-EOPNOTSUPP);
540 
541 	if (user_data) {
542 		rc = iommu_copy_struct_from_user(
543 			&user_cfg, user_data, IOMMU_HWPT_DATA_SELFTEST, iotlb);
544 		if (rc)
545 			return ERR_PTR(rc);
546 	}
547 
548 	mock = mock_domain_alloc_pgtable(dev, &user_cfg, flags);
549 	if (IS_ERR(mock))
550 		return ERR_CAST(mock);
551 	return &mock->domain;
552 }
553 
mock_domain_capable(struct device * dev,enum iommu_cap cap)554 static bool mock_domain_capable(struct device *dev, enum iommu_cap cap)
555 {
556 	struct mock_dev *mdev = to_mock_dev(dev);
557 
558 	switch (cap) {
559 	case IOMMU_CAP_CACHE_COHERENCY:
560 		return true;
561 	case IOMMU_CAP_DIRTY_TRACKING:
562 		return !(mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY);
563 	default:
564 		break;
565 	}
566 
567 	return false;
568 }
569 
570 static struct iopf_queue *mock_iommu_iopf_queue;
571 
572 static struct mock_iommu_device {
573 	struct iommu_device iommu_dev;
574 	struct completion complete;
575 	refcount_t users;
576 } mock_iommu;
577 
mock_probe_device(struct device * dev)578 static struct iommu_device *mock_probe_device(struct device *dev)
579 {
580 	if (dev->bus != &iommufd_mock_bus_type.bus)
581 		return ERR_PTR(-ENODEV);
582 	return &mock_iommu.iommu_dev;
583 }
584 
mock_domain_page_response(struct device * dev,struct iopf_fault * evt,struct iommu_page_response * msg)585 static void mock_domain_page_response(struct device *dev, struct iopf_fault *evt,
586 				      struct iommu_page_response *msg)
587 {
588 }
589 
mock_dev_enable_iopf(struct device * dev,struct iommu_domain * domain)590 static int mock_dev_enable_iopf(struct device *dev, struct iommu_domain *domain)
591 {
592 	struct mock_dev *mdev = to_mock_dev(dev);
593 	int ret;
594 
595 	if (!domain || !domain->iopf_handler)
596 		return 0;
597 
598 	if (!mock_iommu_iopf_queue)
599 		return -ENODEV;
600 
601 	if (mdev->iopf_refcount) {
602 		mdev->iopf_refcount++;
603 		return 0;
604 	}
605 
606 	ret = iopf_queue_add_device(mock_iommu_iopf_queue, dev);
607 	if (ret)
608 		return ret;
609 
610 	mdev->iopf_refcount = 1;
611 
612 	return 0;
613 }
614 
mock_dev_disable_iopf(struct device * dev,struct iommu_domain * domain)615 static void mock_dev_disable_iopf(struct device *dev, struct iommu_domain *domain)
616 {
617 	struct mock_dev *mdev = to_mock_dev(dev);
618 
619 	if (!domain || !domain->iopf_handler)
620 		return;
621 
622 	if (--mdev->iopf_refcount)
623 		return;
624 
625 	iopf_queue_remove_device(mock_iommu_iopf_queue, dev);
626 }
627 
mock_viommu_destroy(struct iommufd_viommu * viommu)628 static void mock_viommu_destroy(struct iommufd_viommu *viommu)
629 {
630 	struct mock_iommu_device *mock_iommu = container_of(
631 		viommu->iommu_dev, struct mock_iommu_device, iommu_dev);
632 	struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
633 
634 	if (refcount_dec_and_test(&mock_iommu->users))
635 		complete(&mock_iommu->complete);
636 	if (mock_viommu->mmap_offset)
637 		iommufd_viommu_destroy_mmap(&mock_viommu->core,
638 					    mock_viommu->mmap_offset);
639 	free_page((unsigned long)mock_viommu->page);
640 	mutex_destroy(&mock_viommu->queue_mutex);
641 
642 	/* iommufd core frees mock_viommu and viommu */
643 }
644 
645 static struct iommu_domain *
mock_viommu_alloc_domain_nested(struct iommufd_viommu * viommu,u32 flags,const struct iommu_user_data * user_data)646 mock_viommu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
647 				const struct iommu_user_data *user_data)
648 {
649 	struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
650 	struct mock_iommu_domain_nested *mock_nested;
651 
652 	if (flags & ~IOMMU_HWPT_ALLOC_PASID)
653 		return ERR_PTR(-EOPNOTSUPP);
654 
655 	mock_nested = __mock_domain_alloc_nested(user_data);
656 	if (IS_ERR(mock_nested))
657 		return ERR_CAST(mock_nested);
658 	mock_nested->mock_viommu = mock_viommu;
659 	return &mock_nested->domain;
660 }
661 
mock_viommu_cache_invalidate(struct iommufd_viommu * viommu,struct iommu_user_data_array * array)662 static int mock_viommu_cache_invalidate(struct iommufd_viommu *viommu,
663 					struct iommu_user_data_array *array)
664 {
665 	struct iommu_viommu_invalidate_selftest *cmds;
666 	struct iommu_viommu_invalidate_selftest *cur;
667 	struct iommu_viommu_invalidate_selftest *end;
668 	int rc;
669 
670 	/* A zero-length array is allowed to validate the array type */
671 	if (array->entry_num == 0 &&
672 	    array->type == IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST) {
673 		array->entry_num = 0;
674 		return 0;
675 	}
676 
677 	cmds = kcalloc(array->entry_num, sizeof(*cmds), GFP_KERNEL);
678 	if (!cmds)
679 		return -ENOMEM;
680 	cur = cmds;
681 	end = cmds + array->entry_num;
682 
683 	static_assert(sizeof(*cmds) == 3 * sizeof(u32));
684 	rc = iommu_copy_struct_from_full_user_array(
685 		cmds, sizeof(*cmds), array,
686 		IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST);
687 	if (rc)
688 		goto out;
689 
690 	while (cur != end) {
691 		struct mock_dev *mdev;
692 		struct device *dev;
693 		int i;
694 
695 		if (cur->flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) {
696 			rc = -EOPNOTSUPP;
697 			goto out;
698 		}
699 
700 		if (cur->cache_id > MOCK_DEV_CACHE_ID_MAX) {
701 			rc = -EINVAL;
702 			goto out;
703 		}
704 
705 		xa_lock(&viommu->vdevs);
706 		dev = iommufd_viommu_find_dev(viommu,
707 					      (unsigned long)cur->vdev_id);
708 		if (!dev) {
709 			xa_unlock(&viommu->vdevs);
710 			rc = -EINVAL;
711 			goto out;
712 		}
713 		mdev = container_of(dev, struct mock_dev, dev);
714 
715 		if (cur->flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) {
716 			/* Invalidate all cache entries and ignore cache_id */
717 			for (i = 0; i < MOCK_DEV_CACHE_NUM; i++)
718 				mdev->cache[i] = 0;
719 		} else {
720 			mdev->cache[cur->cache_id] = 0;
721 		}
722 		xa_unlock(&viommu->vdevs);
723 
724 		cur++;
725 	}
726 out:
727 	array->entry_num = cur - cmds;
728 	kfree(cmds);
729 	return rc;
730 }
731 
mock_viommu_get_hw_queue_size(struct iommufd_viommu * viommu,enum iommu_hw_queue_type queue_type)732 static size_t mock_viommu_get_hw_queue_size(struct iommufd_viommu *viommu,
733 					    enum iommu_hw_queue_type queue_type)
734 {
735 	if (queue_type != IOMMU_HW_QUEUE_TYPE_SELFTEST)
736 		return 0;
737 	return HW_QUEUE_STRUCT_SIZE(struct mock_hw_queue, core);
738 }
739 
mock_hw_queue_destroy(struct iommufd_hw_queue * hw_queue)740 static void mock_hw_queue_destroy(struct iommufd_hw_queue *hw_queue)
741 {
742 	struct mock_hw_queue *mock_hw_queue = to_mock_hw_queue(hw_queue);
743 	struct mock_viommu *mock_viommu = mock_hw_queue->mock_viommu;
744 
745 	mutex_lock(&mock_viommu->queue_mutex);
746 	mock_viommu->hw_queue[mock_hw_queue->index] = NULL;
747 	if (mock_hw_queue->prev)
748 		iommufd_hw_queue_undepend(mock_hw_queue, mock_hw_queue->prev,
749 					  core);
750 	mutex_unlock(&mock_viommu->queue_mutex);
751 }
752 
753 /* Test iommufd_hw_queue_depend/undepend() */
mock_hw_queue_init_phys(struct iommufd_hw_queue * hw_queue,u32 index,phys_addr_t base_addr_pa)754 static int mock_hw_queue_init_phys(struct iommufd_hw_queue *hw_queue, u32 index,
755 				   phys_addr_t base_addr_pa)
756 {
757 	struct mock_viommu *mock_viommu = to_mock_viommu(hw_queue->viommu);
758 	struct mock_hw_queue *mock_hw_queue = to_mock_hw_queue(hw_queue);
759 	struct mock_hw_queue *prev = NULL;
760 	int rc = 0;
761 
762 	if (index >= IOMMU_TEST_HW_QUEUE_MAX)
763 		return -EINVAL;
764 
765 	mutex_lock(&mock_viommu->queue_mutex);
766 
767 	if (mock_viommu->hw_queue[index]) {
768 		rc = -EEXIST;
769 		goto unlock;
770 	}
771 
772 	if (index) {
773 		prev = mock_viommu->hw_queue[index - 1];
774 		if (!prev) {
775 			rc = -EIO;
776 			goto unlock;
777 		}
778 	}
779 
780 	/*
781 	 * Test to catch a kernel bug if the core converted the physical address
782 	 * incorrectly. Let mock_domain_iova_to_phys() WARN_ON if it fails.
783 	 */
784 	if (base_addr_pa != iommu_iova_to_phys(&mock_viommu->s2_parent->domain,
785 					       hw_queue->base_addr)) {
786 		rc = -EFAULT;
787 		goto unlock;
788 	}
789 
790 	if (prev) {
791 		rc = iommufd_hw_queue_depend(mock_hw_queue, prev, core);
792 		if (rc)
793 			goto unlock;
794 	}
795 
796 	mock_hw_queue->prev = prev;
797 	mock_hw_queue->mock_viommu = mock_viommu;
798 	mock_viommu->hw_queue[index] = mock_hw_queue;
799 
800 	hw_queue->destroy = &mock_hw_queue_destroy;
801 unlock:
802 	mutex_unlock(&mock_viommu->queue_mutex);
803 	return rc;
804 }
805 
806 static struct iommufd_viommu_ops mock_viommu_ops = {
807 	.destroy = mock_viommu_destroy,
808 	.alloc_domain_nested = mock_viommu_alloc_domain_nested,
809 	.cache_invalidate = mock_viommu_cache_invalidate,
810 	.get_hw_queue_size = mock_viommu_get_hw_queue_size,
811 	.hw_queue_init_phys = mock_hw_queue_init_phys,
812 };
813 
mock_get_viommu_size(struct device * dev,enum iommu_viommu_type viommu_type)814 static size_t mock_get_viommu_size(struct device *dev,
815 				   enum iommu_viommu_type viommu_type)
816 {
817 	if (viommu_type != IOMMU_VIOMMU_TYPE_SELFTEST)
818 		return 0;
819 	return VIOMMU_STRUCT_SIZE(struct mock_viommu, core);
820 }
821 
mock_viommu_init(struct iommufd_viommu * viommu,struct iommu_domain * parent_domain,const struct iommu_user_data * user_data)822 static int mock_viommu_init(struct iommufd_viommu *viommu,
823 			    struct iommu_domain *parent_domain,
824 			    const struct iommu_user_data *user_data)
825 {
826 	struct mock_iommu_device *mock_iommu = container_of(
827 		viommu->iommu_dev, struct mock_iommu_device, iommu_dev);
828 	struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
829 	struct iommu_viommu_selftest data;
830 	int rc;
831 
832 	if (user_data) {
833 		rc = iommu_copy_struct_from_user(
834 			&data, user_data, IOMMU_VIOMMU_TYPE_SELFTEST, out_data);
835 		if (rc)
836 			return rc;
837 
838 		/* Allocate two pages */
839 		mock_viommu->page =
840 			(u32 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
841 		if (!mock_viommu->page)
842 			return -ENOMEM;
843 
844 		rc = iommufd_viommu_alloc_mmap(&mock_viommu->core,
845 					       __pa(mock_viommu->page),
846 					       PAGE_SIZE * 2,
847 					       &mock_viommu->mmap_offset);
848 		if (rc)
849 			goto err_free_page;
850 
851 		/* For loopback tests on both the page and out_data */
852 		*mock_viommu->page = data.in_data;
853 		data.out_data = data.in_data;
854 		data.out_mmap_length = PAGE_SIZE * 2;
855 		data.out_mmap_offset = mock_viommu->mmap_offset;
856 		rc = iommu_copy_struct_to_user(
857 			user_data, &data, IOMMU_VIOMMU_TYPE_SELFTEST, out_data);
858 		if (rc)
859 			goto err_destroy_mmap;
860 	}
861 
862 	refcount_inc(&mock_iommu->users);
863 	mutex_init(&mock_viommu->queue_mutex);
864 	mock_viommu->s2_parent = to_mock_domain(parent_domain);
865 
866 	viommu->ops = &mock_viommu_ops;
867 	return 0;
868 
869 err_destroy_mmap:
870 	iommufd_viommu_destroy_mmap(&mock_viommu->core,
871 				    mock_viommu->mmap_offset);
872 err_free_page:
873 	free_page((unsigned long)mock_viommu->page);
874 	return rc;
875 }
876 
877 static const struct iommu_ops mock_ops = {
878 	/*
879 	 * IOMMU_DOMAIN_BLOCKED cannot be returned from def_domain_type()
880 	 * because it is zero.
881 	 */
882 	.default_domain = &mock_blocking_domain,
883 	.blocked_domain = &mock_blocking_domain,
884 	.owner = THIS_MODULE,
885 	.hw_info = mock_domain_hw_info,
886 	.domain_alloc_paging_flags = mock_domain_alloc_paging_flags,
887 	.domain_alloc_nested = mock_domain_alloc_nested,
888 	.capable = mock_domain_capable,
889 	.device_group = generic_device_group,
890 	.probe_device = mock_probe_device,
891 	.page_response = mock_domain_page_response,
892 	.user_pasid_table = true,
893 	.get_viommu_size = mock_get_viommu_size,
894 	.viommu_init = mock_viommu_init,
895 };
896 
mock_domain_free_nested(struct iommu_domain * domain)897 static void mock_domain_free_nested(struct iommu_domain *domain)
898 {
899 	kfree(to_mock_nested(domain));
900 }
901 
902 static int
mock_domain_cache_invalidate_user(struct iommu_domain * domain,struct iommu_user_data_array * array)903 mock_domain_cache_invalidate_user(struct iommu_domain *domain,
904 				  struct iommu_user_data_array *array)
905 {
906 	struct mock_iommu_domain_nested *mock_nested = to_mock_nested(domain);
907 	struct iommu_hwpt_invalidate_selftest inv;
908 	u32 processed = 0;
909 	int i = 0, j;
910 	int rc = 0;
911 
912 	if (array->type != IOMMU_HWPT_INVALIDATE_DATA_SELFTEST) {
913 		rc = -EINVAL;
914 		goto out;
915 	}
916 
917 	for ( ; i < array->entry_num; i++) {
918 		rc = iommu_copy_struct_from_user_array(&inv, array,
919 						       IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
920 						       i, iotlb_id);
921 		if (rc)
922 			break;
923 
924 		if (inv.flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) {
925 			rc = -EOPNOTSUPP;
926 			break;
927 		}
928 
929 		if (inv.iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX) {
930 			rc = -EINVAL;
931 			break;
932 		}
933 
934 		if (inv.flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) {
935 			/* Invalidate all mock iotlb entries and ignore iotlb_id */
936 			for (j = 0; j < MOCK_NESTED_DOMAIN_IOTLB_NUM; j++)
937 				mock_nested->iotlb[j] = 0;
938 		} else {
939 			mock_nested->iotlb[inv.iotlb_id] = 0;
940 		}
941 
942 		processed++;
943 	}
944 
945 out:
946 	array->entry_num = processed;
947 	return rc;
948 }
949 
950 static struct iommu_domain_ops domain_nested_ops = {
951 	.free = mock_domain_free_nested,
952 	.attach_dev = mock_domain_nop_attach,
953 	.cache_invalidate_user = mock_domain_cache_invalidate_user,
954 	.set_dev_pasid = mock_domain_set_dev_pasid_nop,
955 };
956 
957 static inline struct iommufd_hw_pagetable *
__get_md_pagetable(struct iommufd_ucmd * ucmd,u32 mockpt_id,u32 hwpt_type)958 __get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, u32 hwpt_type)
959 {
960 	struct iommufd_object *obj;
961 
962 	obj = iommufd_get_object(ucmd->ictx, mockpt_id, hwpt_type);
963 	if (IS_ERR(obj))
964 		return ERR_CAST(obj);
965 	return container_of(obj, struct iommufd_hw_pagetable, obj);
966 }
967 
968 static inline struct iommufd_hw_pagetable *
get_md_pagetable(struct iommufd_ucmd * ucmd,u32 mockpt_id,struct mock_iommu_domain ** mock)969 get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id,
970 		 struct mock_iommu_domain **mock)
971 {
972 	struct iommufd_hw_pagetable *hwpt;
973 
974 	hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_PAGING);
975 	if (IS_ERR(hwpt))
976 		return hwpt;
977 	if (hwpt->domain->type != IOMMU_DOMAIN_UNMANAGED ||
978 	    hwpt->domain->owner != &mock_ops) {
979 		iommufd_put_object(ucmd->ictx, &hwpt->obj);
980 		return ERR_PTR(-EINVAL);
981 	}
982 	*mock = to_mock_domain(hwpt->domain);
983 	return hwpt;
984 }
985 
986 static inline struct iommufd_hw_pagetable *
get_md_pagetable_nested(struct iommufd_ucmd * ucmd,u32 mockpt_id,struct mock_iommu_domain_nested ** mock_nested)987 get_md_pagetable_nested(struct iommufd_ucmd *ucmd, u32 mockpt_id,
988 			struct mock_iommu_domain_nested **mock_nested)
989 {
990 	struct iommufd_hw_pagetable *hwpt;
991 
992 	hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_NESTED);
993 	if (IS_ERR(hwpt))
994 		return hwpt;
995 	if (hwpt->domain->type != IOMMU_DOMAIN_NESTED ||
996 	    hwpt->domain->ops != &domain_nested_ops) {
997 		iommufd_put_object(ucmd->ictx, &hwpt->obj);
998 		return ERR_PTR(-EINVAL);
999 	}
1000 	*mock_nested = to_mock_nested(hwpt->domain);
1001 	return hwpt;
1002 }
1003 
mock_dev_release(struct device * dev)1004 static void mock_dev_release(struct device *dev)
1005 {
1006 	struct mock_dev *mdev = to_mock_dev(dev);
1007 
1008 	ida_free(&mock_dev_ida, mdev->id);
1009 	kfree(mdev);
1010 }
1011 
mock_dev_create(unsigned long dev_flags)1012 static struct mock_dev *mock_dev_create(unsigned long dev_flags)
1013 {
1014 	struct property_entry prop[] = {
1015 		PROPERTY_ENTRY_U32("pasid-num-bits", 0),
1016 		{},
1017 	};
1018 	const u32 valid_flags = MOCK_FLAGS_DEVICE_NO_DIRTY |
1019 				MOCK_FLAGS_DEVICE_PASID;
1020 	struct mock_dev *mdev;
1021 	int rc, i;
1022 
1023 	if (dev_flags & ~valid_flags)
1024 		return ERR_PTR(-EINVAL);
1025 
1026 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
1027 	if (!mdev)
1028 		return ERR_PTR(-ENOMEM);
1029 
1030 	init_rwsem(&mdev->viommu_rwsem);
1031 	device_initialize(&mdev->dev);
1032 	mdev->flags = dev_flags;
1033 	mdev->dev.release = mock_dev_release;
1034 	mdev->dev.bus = &iommufd_mock_bus_type.bus;
1035 	for (i = 0; i < MOCK_DEV_CACHE_NUM; i++)
1036 		mdev->cache[i] = IOMMU_TEST_DEV_CACHE_DEFAULT;
1037 
1038 	rc = ida_alloc(&mock_dev_ida, GFP_KERNEL);
1039 	if (rc < 0)
1040 		goto err_put;
1041 	mdev->id = rc;
1042 
1043 	rc = dev_set_name(&mdev->dev, "iommufd_mock%u", mdev->id);
1044 	if (rc)
1045 		goto err_put;
1046 
1047 	if (dev_flags & MOCK_FLAGS_DEVICE_PASID)
1048 		prop[0] = PROPERTY_ENTRY_U32("pasid-num-bits", MOCK_PASID_WIDTH);
1049 
1050 	rc = device_create_managed_software_node(&mdev->dev, prop, NULL);
1051 	if (rc) {
1052 		dev_err(&mdev->dev, "add pasid-num-bits property failed, rc: %d", rc);
1053 		goto err_put;
1054 	}
1055 
1056 	rc = iommu_mock_device_add(&mdev->dev, &mock_iommu.iommu_dev);
1057 	if (rc)
1058 		goto err_put;
1059 	return mdev;
1060 
1061 err_put:
1062 	put_device(&mdev->dev);
1063 	return ERR_PTR(rc);
1064 }
1065 
mock_dev_destroy(struct mock_dev * mdev)1066 static void mock_dev_destroy(struct mock_dev *mdev)
1067 {
1068 	device_unregister(&mdev->dev);
1069 }
1070 
iommufd_selftest_is_mock_dev(struct device * dev)1071 bool iommufd_selftest_is_mock_dev(struct device *dev)
1072 {
1073 	return dev->release == mock_dev_release;
1074 }
1075 
1076 /* Create an hw_pagetable with the mock domain so we can test the domain ops */
iommufd_test_mock_domain(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1077 static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd,
1078 				    struct iommu_test_cmd *cmd)
1079 {
1080 	struct iommufd_device *idev;
1081 	struct selftest_obj *sobj;
1082 	u32 pt_id = cmd->id;
1083 	u32 dev_flags = 0;
1084 	u32 idev_id;
1085 	int rc;
1086 
1087 	sobj = iommufd_object_alloc(ucmd->ictx, sobj, IOMMUFD_OBJ_SELFTEST);
1088 	if (IS_ERR(sobj))
1089 		return PTR_ERR(sobj);
1090 
1091 	sobj->idev.ictx = ucmd->ictx;
1092 	sobj->type = TYPE_IDEV;
1093 
1094 	if (cmd->op == IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS)
1095 		dev_flags = cmd->mock_domain_flags.dev_flags;
1096 
1097 	sobj->idev.mock_dev = mock_dev_create(dev_flags);
1098 	if (IS_ERR(sobj->idev.mock_dev)) {
1099 		rc = PTR_ERR(sobj->idev.mock_dev);
1100 		goto out_sobj;
1101 	}
1102 
1103 	idev = iommufd_device_bind(ucmd->ictx, &sobj->idev.mock_dev->dev,
1104 				   &idev_id);
1105 	if (IS_ERR(idev)) {
1106 		rc = PTR_ERR(idev);
1107 		goto out_mdev;
1108 	}
1109 	sobj->idev.idev = idev;
1110 
1111 	rc = iommufd_device_attach(idev, IOMMU_NO_PASID, &pt_id);
1112 	if (rc)
1113 		goto out_unbind;
1114 
1115 	/* Userspace must destroy the device_id to destroy the object */
1116 	cmd->mock_domain.out_hwpt_id = pt_id;
1117 	cmd->mock_domain.out_stdev_id = sobj->obj.id;
1118 	cmd->mock_domain.out_idev_id = idev_id;
1119 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1120 	if (rc)
1121 		goto out_detach;
1122 	iommufd_object_finalize(ucmd->ictx, &sobj->obj);
1123 	return 0;
1124 
1125 out_detach:
1126 	iommufd_device_detach(idev, IOMMU_NO_PASID);
1127 out_unbind:
1128 	iommufd_device_unbind(idev);
1129 out_mdev:
1130 	mock_dev_destroy(sobj->idev.mock_dev);
1131 out_sobj:
1132 	iommufd_object_abort(ucmd->ictx, &sobj->obj);
1133 	return rc;
1134 }
1135 
1136 static struct selftest_obj *
iommufd_test_get_selftest_obj(struct iommufd_ctx * ictx,u32 id)1137 iommufd_test_get_selftest_obj(struct iommufd_ctx *ictx, u32 id)
1138 {
1139 	struct iommufd_object *dev_obj;
1140 	struct selftest_obj *sobj;
1141 
1142 	/*
1143 	 * Prefer to use the OBJ_SELFTEST because the destroy_rwsem will ensure
1144 	 * it doesn't race with detach, which is not allowed.
1145 	 */
1146 	dev_obj = iommufd_get_object(ictx, id, IOMMUFD_OBJ_SELFTEST);
1147 	if (IS_ERR(dev_obj))
1148 		return ERR_CAST(dev_obj);
1149 
1150 	sobj = to_selftest_obj(dev_obj);
1151 	if (sobj->type != TYPE_IDEV) {
1152 		iommufd_put_object(ictx, dev_obj);
1153 		return ERR_PTR(-EINVAL);
1154 	}
1155 	return sobj;
1156 }
1157 
1158 /* Replace the mock domain with a manually allocated hw_pagetable */
iommufd_test_mock_domain_replace(struct iommufd_ucmd * ucmd,unsigned int device_id,u32 pt_id,struct iommu_test_cmd * cmd)1159 static int iommufd_test_mock_domain_replace(struct iommufd_ucmd *ucmd,
1160 					    unsigned int device_id, u32 pt_id,
1161 					    struct iommu_test_cmd *cmd)
1162 {
1163 	struct selftest_obj *sobj;
1164 	int rc;
1165 
1166 	sobj = iommufd_test_get_selftest_obj(ucmd->ictx, device_id);
1167 	if (IS_ERR(sobj))
1168 		return PTR_ERR(sobj);
1169 
1170 	rc = iommufd_device_replace(sobj->idev.idev, IOMMU_NO_PASID, &pt_id);
1171 	if (rc)
1172 		goto out_sobj;
1173 
1174 	cmd->mock_domain_replace.pt_id = pt_id;
1175 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1176 
1177 out_sobj:
1178 	iommufd_put_object(ucmd->ictx, &sobj->obj);
1179 	return rc;
1180 }
1181 
1182 /* Add an additional reserved IOVA to the IOAS */
iommufd_test_add_reserved(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long start,size_t length)1183 static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd,
1184 				     unsigned int mockpt_id,
1185 				     unsigned long start, size_t length)
1186 {
1187 	struct iommufd_ioas *ioas;
1188 	int rc;
1189 
1190 	ioas = iommufd_get_ioas(ucmd->ictx, mockpt_id);
1191 	if (IS_ERR(ioas))
1192 		return PTR_ERR(ioas);
1193 	down_write(&ioas->iopt.iova_rwsem);
1194 	rc = iopt_reserve_iova(&ioas->iopt, start, start + length - 1, NULL);
1195 	up_write(&ioas->iopt.iova_rwsem);
1196 	iommufd_put_object(ucmd->ictx, &ioas->obj);
1197 	return rc;
1198 }
1199 
1200 /* Check that every pfn under each iova matches the pfn under a user VA */
iommufd_test_md_check_pa(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long iova,size_t length,void __user * uptr)1201 static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd,
1202 				    unsigned int mockpt_id, unsigned long iova,
1203 				    size_t length, void __user *uptr)
1204 {
1205 	struct iommufd_hw_pagetable *hwpt;
1206 	struct mock_iommu_domain *mock;
1207 	unsigned int page_size;
1208 	uintptr_t end;
1209 	int rc;
1210 
1211 	hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
1212 	if (IS_ERR(hwpt))
1213 		return PTR_ERR(hwpt);
1214 
1215 	page_size = 1 << __ffs(mock->domain.pgsize_bitmap);
1216 	if (iova % page_size || length % page_size ||
1217 	    (uintptr_t)uptr % page_size ||
1218 	    check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
1219 		return -EINVAL;
1220 
1221 	for (; length; length -= page_size) {
1222 		struct page *pages[1];
1223 		phys_addr_t io_phys;
1224 		unsigned long pfn;
1225 		long npages;
1226 
1227 		npages = get_user_pages_fast((uintptr_t)uptr & PAGE_MASK, 1, 0,
1228 					     pages);
1229 		if (npages < 0) {
1230 			rc = npages;
1231 			goto out_put;
1232 		}
1233 		if (WARN_ON(npages != 1)) {
1234 			rc = -EFAULT;
1235 			goto out_put;
1236 		}
1237 		pfn = page_to_pfn(pages[0]);
1238 		put_page(pages[0]);
1239 
1240 		io_phys = mock->domain.ops->iova_to_phys(&mock->domain, iova);
1241 		if (io_phys !=
1242 		    pfn * PAGE_SIZE + ((uintptr_t)uptr % PAGE_SIZE)) {
1243 			rc = -EINVAL;
1244 			goto out_put;
1245 		}
1246 		iova += page_size;
1247 		uptr += page_size;
1248 	}
1249 	rc = 0;
1250 
1251 out_put:
1252 	iommufd_put_object(ucmd->ictx, &hwpt->obj);
1253 	return rc;
1254 }
1255 
1256 /* Check that the page ref count matches, to look for missing pin/unpins */
iommufd_test_md_check_refs(struct iommufd_ucmd * ucmd,void __user * uptr,size_t length,unsigned int refs)1257 static int iommufd_test_md_check_refs(struct iommufd_ucmd *ucmd,
1258 				      void __user *uptr, size_t length,
1259 				      unsigned int refs)
1260 {
1261 	uintptr_t end;
1262 
1263 	if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE ||
1264 	    check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
1265 		return -EINVAL;
1266 
1267 	for (; length; length -= PAGE_SIZE) {
1268 		struct page *pages[1];
1269 		long npages;
1270 
1271 		npages = get_user_pages_fast((uintptr_t)uptr, 1, 0, pages);
1272 		if (npages < 0)
1273 			return npages;
1274 		if (WARN_ON(npages != 1))
1275 			return -EFAULT;
1276 		if (!PageCompound(pages[0])) {
1277 			unsigned int count;
1278 
1279 			count = page_ref_count(pages[0]);
1280 			if (count / GUP_PIN_COUNTING_BIAS != refs) {
1281 				put_page(pages[0]);
1282 				return -EIO;
1283 			}
1284 		}
1285 		put_page(pages[0]);
1286 		uptr += PAGE_SIZE;
1287 	}
1288 	return 0;
1289 }
1290 
iommufd_test_md_check_iotlb(struct iommufd_ucmd * ucmd,u32 mockpt_id,unsigned int iotlb_id,u32 iotlb)1291 static int iommufd_test_md_check_iotlb(struct iommufd_ucmd *ucmd, u32 mockpt_id,
1292 				       unsigned int iotlb_id, u32 iotlb)
1293 {
1294 	struct mock_iommu_domain_nested *mock_nested;
1295 	struct iommufd_hw_pagetable *hwpt;
1296 	int rc = 0;
1297 
1298 	hwpt = get_md_pagetable_nested(ucmd, mockpt_id, &mock_nested);
1299 	if (IS_ERR(hwpt))
1300 		return PTR_ERR(hwpt);
1301 
1302 	mock_nested = to_mock_nested(hwpt->domain);
1303 
1304 	if (iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX ||
1305 	    mock_nested->iotlb[iotlb_id] != iotlb)
1306 		rc = -EINVAL;
1307 	iommufd_put_object(ucmd->ictx, &hwpt->obj);
1308 	return rc;
1309 }
1310 
iommufd_test_dev_check_cache(struct iommufd_ucmd * ucmd,u32 idev_id,unsigned int cache_id,u32 cache)1311 static int iommufd_test_dev_check_cache(struct iommufd_ucmd *ucmd, u32 idev_id,
1312 					unsigned int cache_id, u32 cache)
1313 {
1314 	struct iommufd_device *idev;
1315 	struct mock_dev *mdev;
1316 	int rc = 0;
1317 
1318 	idev = iommufd_get_device(ucmd, idev_id);
1319 	if (IS_ERR(idev))
1320 		return PTR_ERR(idev);
1321 	mdev = container_of(idev->dev, struct mock_dev, dev);
1322 
1323 	if (cache_id > MOCK_DEV_CACHE_ID_MAX || mdev->cache[cache_id] != cache)
1324 		rc = -EINVAL;
1325 	iommufd_put_object(ucmd->ictx, &idev->obj);
1326 	return rc;
1327 }
1328 
1329 struct selftest_access {
1330 	struct iommufd_access *access;
1331 	struct file *file;
1332 	struct mutex lock;
1333 	struct list_head items;
1334 	unsigned int next_id;
1335 	bool destroying;
1336 };
1337 
1338 struct selftest_access_item {
1339 	struct list_head items_elm;
1340 	unsigned long iova;
1341 	size_t length;
1342 	unsigned int id;
1343 };
1344 
1345 static const struct file_operations iommfd_test_staccess_fops;
1346 
iommufd_access_get(int fd)1347 static struct selftest_access *iommufd_access_get(int fd)
1348 {
1349 	struct file *file;
1350 
1351 	file = fget(fd);
1352 	if (!file)
1353 		return ERR_PTR(-EBADFD);
1354 
1355 	if (file->f_op != &iommfd_test_staccess_fops) {
1356 		fput(file);
1357 		return ERR_PTR(-EBADFD);
1358 	}
1359 	return file->private_data;
1360 }
1361 
iommufd_test_access_unmap(void * data,unsigned long iova,unsigned long length)1362 static void iommufd_test_access_unmap(void *data, unsigned long iova,
1363 				      unsigned long length)
1364 {
1365 	unsigned long iova_last = iova + length - 1;
1366 	struct selftest_access *staccess = data;
1367 	struct selftest_access_item *item;
1368 	struct selftest_access_item *tmp;
1369 
1370 	mutex_lock(&staccess->lock);
1371 	list_for_each_entry_safe(item, tmp, &staccess->items, items_elm) {
1372 		if (iova > item->iova + item->length - 1 ||
1373 		    iova_last < item->iova)
1374 			continue;
1375 		list_del(&item->items_elm);
1376 		iommufd_access_unpin_pages(staccess->access, item->iova,
1377 					   item->length);
1378 		kfree(item);
1379 	}
1380 	mutex_unlock(&staccess->lock);
1381 }
1382 
iommufd_test_access_item_destroy(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned int item_id)1383 static int iommufd_test_access_item_destroy(struct iommufd_ucmd *ucmd,
1384 					    unsigned int access_id,
1385 					    unsigned int item_id)
1386 {
1387 	struct selftest_access_item *item;
1388 	struct selftest_access *staccess;
1389 
1390 	staccess = iommufd_access_get(access_id);
1391 	if (IS_ERR(staccess))
1392 		return PTR_ERR(staccess);
1393 
1394 	mutex_lock(&staccess->lock);
1395 	list_for_each_entry(item, &staccess->items, items_elm) {
1396 		if (item->id == item_id) {
1397 			list_del(&item->items_elm);
1398 			iommufd_access_unpin_pages(staccess->access, item->iova,
1399 						   item->length);
1400 			mutex_unlock(&staccess->lock);
1401 			kfree(item);
1402 			fput(staccess->file);
1403 			return 0;
1404 		}
1405 	}
1406 	mutex_unlock(&staccess->lock);
1407 	fput(staccess->file);
1408 	return -ENOENT;
1409 }
1410 
iommufd_test_staccess_release(struct inode * inode,struct file * filep)1411 static int iommufd_test_staccess_release(struct inode *inode,
1412 					 struct file *filep)
1413 {
1414 	struct selftest_access *staccess = filep->private_data;
1415 
1416 	if (staccess->access) {
1417 		iommufd_test_access_unmap(staccess, 0, ULONG_MAX);
1418 		iommufd_access_destroy(staccess->access);
1419 	}
1420 	mutex_destroy(&staccess->lock);
1421 	kfree(staccess);
1422 	return 0;
1423 }
1424 
1425 static const struct iommufd_access_ops selftest_access_ops_pin = {
1426 	.needs_pin_pages = 1,
1427 	.unmap = iommufd_test_access_unmap,
1428 };
1429 
1430 static const struct iommufd_access_ops selftest_access_ops = {
1431 	.unmap = iommufd_test_access_unmap,
1432 };
1433 
1434 static const struct file_operations iommfd_test_staccess_fops = {
1435 	.release = iommufd_test_staccess_release,
1436 };
1437 
iommufd_test_alloc_access(void)1438 static struct selftest_access *iommufd_test_alloc_access(void)
1439 {
1440 	struct selftest_access *staccess;
1441 	struct file *filep;
1442 
1443 	staccess = kzalloc(sizeof(*staccess), GFP_KERNEL_ACCOUNT);
1444 	if (!staccess)
1445 		return ERR_PTR(-ENOMEM);
1446 	INIT_LIST_HEAD(&staccess->items);
1447 	mutex_init(&staccess->lock);
1448 
1449 	filep = anon_inode_getfile("[iommufd_test_staccess]",
1450 				   &iommfd_test_staccess_fops, staccess,
1451 				   O_RDWR);
1452 	if (IS_ERR(filep)) {
1453 		kfree(staccess);
1454 		return ERR_CAST(filep);
1455 	}
1456 	staccess->file = filep;
1457 	return staccess;
1458 }
1459 
iommufd_test_create_access(struct iommufd_ucmd * ucmd,unsigned int ioas_id,unsigned int flags)1460 static int iommufd_test_create_access(struct iommufd_ucmd *ucmd,
1461 				      unsigned int ioas_id, unsigned int flags)
1462 {
1463 	struct iommu_test_cmd *cmd = ucmd->cmd;
1464 	struct selftest_access *staccess;
1465 	struct iommufd_access *access;
1466 	u32 id;
1467 	int fdno;
1468 	int rc;
1469 
1470 	if (flags & ~MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES)
1471 		return -EOPNOTSUPP;
1472 
1473 	staccess = iommufd_test_alloc_access();
1474 	if (IS_ERR(staccess))
1475 		return PTR_ERR(staccess);
1476 
1477 	fdno = get_unused_fd_flags(O_CLOEXEC);
1478 	if (fdno < 0) {
1479 		rc = -ENOMEM;
1480 		goto out_free_staccess;
1481 	}
1482 
1483 	access = iommufd_access_create(
1484 		ucmd->ictx,
1485 		(flags & MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) ?
1486 			&selftest_access_ops_pin :
1487 			&selftest_access_ops,
1488 		staccess, &id);
1489 	if (IS_ERR(access)) {
1490 		rc = PTR_ERR(access);
1491 		goto out_put_fdno;
1492 	}
1493 	rc = iommufd_access_attach(access, ioas_id);
1494 	if (rc)
1495 		goto out_destroy;
1496 	cmd->create_access.out_access_fd = fdno;
1497 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1498 	if (rc)
1499 		goto out_destroy;
1500 
1501 	staccess->access = access;
1502 	fd_install(fdno, staccess->file);
1503 	return 0;
1504 
1505 out_destroy:
1506 	iommufd_access_destroy(access);
1507 out_put_fdno:
1508 	put_unused_fd(fdno);
1509 out_free_staccess:
1510 	fput(staccess->file);
1511 	return rc;
1512 }
1513 
iommufd_test_access_replace_ioas(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned int ioas_id)1514 static int iommufd_test_access_replace_ioas(struct iommufd_ucmd *ucmd,
1515 					    unsigned int access_id,
1516 					    unsigned int ioas_id)
1517 {
1518 	struct selftest_access *staccess;
1519 	int rc;
1520 
1521 	staccess = iommufd_access_get(access_id);
1522 	if (IS_ERR(staccess))
1523 		return PTR_ERR(staccess);
1524 
1525 	rc = iommufd_access_replace(staccess->access, ioas_id);
1526 	fput(staccess->file);
1527 	return rc;
1528 }
1529 
1530 /* Check that the pages in a page array match the pages in the user VA */
iommufd_test_check_pages(void __user * uptr,struct page ** pages,size_t npages)1531 static int iommufd_test_check_pages(void __user *uptr, struct page **pages,
1532 				    size_t npages)
1533 {
1534 	for (; npages; npages--) {
1535 		struct page *tmp_pages[1];
1536 		long rc;
1537 
1538 		rc = get_user_pages_fast((uintptr_t)uptr, 1, 0, tmp_pages);
1539 		if (rc < 0)
1540 			return rc;
1541 		if (WARN_ON(rc != 1))
1542 			return -EFAULT;
1543 		put_page(tmp_pages[0]);
1544 		if (tmp_pages[0] != *pages)
1545 			return -EBADE;
1546 		pages++;
1547 		uptr += PAGE_SIZE;
1548 	}
1549 	return 0;
1550 }
1551 
iommufd_test_access_pages(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned long iova,size_t length,void __user * uptr,u32 flags)1552 static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd,
1553 				     unsigned int access_id, unsigned long iova,
1554 				     size_t length, void __user *uptr,
1555 				     u32 flags)
1556 {
1557 	struct iommu_test_cmd *cmd = ucmd->cmd;
1558 	struct selftest_access_item *item;
1559 	struct selftest_access *staccess;
1560 	struct page **pages;
1561 	size_t npages;
1562 	int rc;
1563 
1564 	/* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
1565 	if (length > 16 * 1024 * 1024)
1566 		return -ENOMEM;
1567 
1568 	if (flags & ~(MOCK_FLAGS_ACCESS_WRITE | MOCK_FLAGS_ACCESS_SYZ))
1569 		return -EOPNOTSUPP;
1570 
1571 	staccess = iommufd_access_get(access_id);
1572 	if (IS_ERR(staccess))
1573 		return PTR_ERR(staccess);
1574 
1575 	if (staccess->access->ops != &selftest_access_ops_pin) {
1576 		rc = -EOPNOTSUPP;
1577 		goto out_put;
1578 	}
1579 
1580 	if (flags & MOCK_FLAGS_ACCESS_SYZ)
1581 		iova = iommufd_test_syz_conv_iova(staccess->access,
1582 						  &cmd->access_pages.iova);
1583 
1584 	npages = (ALIGN(iova + length, PAGE_SIZE) -
1585 		  ALIGN_DOWN(iova, PAGE_SIZE)) /
1586 		 PAGE_SIZE;
1587 	pages = kvcalloc(npages, sizeof(*pages), GFP_KERNEL_ACCOUNT);
1588 	if (!pages) {
1589 		rc = -ENOMEM;
1590 		goto out_put;
1591 	}
1592 
1593 	/*
1594 	 * Drivers will need to think very carefully about this locking. The
1595 	 * core code can do multiple unmaps instantaneously after
1596 	 * iommufd_access_pin_pages() and *all* the unmaps must not return until
1597 	 * the range is unpinned. This simple implementation puts a global lock
1598 	 * around the pin, which may not suit drivers that want this to be a
1599 	 * performance path. drivers that get this wrong will trigger WARN_ON
1600 	 * races and cause EDEADLOCK failures to userspace.
1601 	 */
1602 	mutex_lock(&staccess->lock);
1603 	rc = iommufd_access_pin_pages(staccess->access, iova, length, pages,
1604 				      flags & MOCK_FLAGS_ACCESS_WRITE);
1605 	if (rc)
1606 		goto out_unlock;
1607 
1608 	/* For syzkaller allow uptr to be NULL to skip this check */
1609 	if (uptr) {
1610 		rc = iommufd_test_check_pages(
1611 			uptr - (iova - ALIGN_DOWN(iova, PAGE_SIZE)), pages,
1612 			npages);
1613 		if (rc)
1614 			goto out_unaccess;
1615 	}
1616 
1617 	item = kzalloc(sizeof(*item), GFP_KERNEL_ACCOUNT);
1618 	if (!item) {
1619 		rc = -ENOMEM;
1620 		goto out_unaccess;
1621 	}
1622 
1623 	item->iova = iova;
1624 	item->length = length;
1625 	item->id = staccess->next_id++;
1626 	list_add_tail(&item->items_elm, &staccess->items);
1627 
1628 	cmd->access_pages.out_access_pages_id = item->id;
1629 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1630 	if (rc)
1631 		goto out_free_item;
1632 	goto out_unlock;
1633 
1634 out_free_item:
1635 	list_del(&item->items_elm);
1636 	kfree(item);
1637 out_unaccess:
1638 	iommufd_access_unpin_pages(staccess->access, iova, length);
1639 out_unlock:
1640 	mutex_unlock(&staccess->lock);
1641 	kvfree(pages);
1642 out_put:
1643 	fput(staccess->file);
1644 	return rc;
1645 }
1646 
iommufd_test_access_rw(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned long iova,size_t length,void __user * ubuf,unsigned int flags)1647 static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd,
1648 				  unsigned int access_id, unsigned long iova,
1649 				  size_t length, void __user *ubuf,
1650 				  unsigned int flags)
1651 {
1652 	struct iommu_test_cmd *cmd = ucmd->cmd;
1653 	struct selftest_access *staccess;
1654 	void *tmp;
1655 	int rc;
1656 
1657 	/* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
1658 	if (length > 16 * 1024 * 1024)
1659 		return -ENOMEM;
1660 
1661 	if (flags & ~(MOCK_ACCESS_RW_WRITE | MOCK_ACCESS_RW_SLOW_PATH |
1662 		      MOCK_FLAGS_ACCESS_SYZ))
1663 		return -EOPNOTSUPP;
1664 
1665 	staccess = iommufd_access_get(access_id);
1666 	if (IS_ERR(staccess))
1667 		return PTR_ERR(staccess);
1668 
1669 	tmp = kvzalloc(length, GFP_KERNEL_ACCOUNT);
1670 	if (!tmp) {
1671 		rc = -ENOMEM;
1672 		goto out_put;
1673 	}
1674 
1675 	if (flags & MOCK_ACCESS_RW_WRITE) {
1676 		if (copy_from_user(tmp, ubuf, length)) {
1677 			rc = -EFAULT;
1678 			goto out_free;
1679 		}
1680 	}
1681 
1682 	if (flags & MOCK_FLAGS_ACCESS_SYZ)
1683 		iova = iommufd_test_syz_conv_iova(staccess->access,
1684 						  &cmd->access_rw.iova);
1685 
1686 	rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags);
1687 	if (rc)
1688 		goto out_free;
1689 	if (!(flags & MOCK_ACCESS_RW_WRITE)) {
1690 		if (copy_to_user(ubuf, tmp, length)) {
1691 			rc = -EFAULT;
1692 			goto out_free;
1693 		}
1694 	}
1695 
1696 out_free:
1697 	kvfree(tmp);
1698 out_put:
1699 	fput(staccess->file);
1700 	return rc;
1701 }
1702 static_assert((unsigned int)MOCK_ACCESS_RW_WRITE == IOMMUFD_ACCESS_RW_WRITE);
1703 static_assert((unsigned int)MOCK_ACCESS_RW_SLOW_PATH ==
1704 	      __IOMMUFD_ACCESS_RW_SLOW_PATH);
1705 
iommufd_test_dirty(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long iova,size_t length,unsigned long page_size,void __user * uptr,u32 flags)1706 static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id,
1707 			      unsigned long iova, size_t length,
1708 			      unsigned long page_size, void __user *uptr,
1709 			      u32 flags)
1710 {
1711 	unsigned long i, max;
1712 	struct iommu_test_cmd *cmd = ucmd->cmd;
1713 	struct iommufd_hw_pagetable *hwpt;
1714 	struct mock_iommu_domain *mock;
1715 	int rc, count = 0;
1716 	void *tmp;
1717 
1718 	if (!page_size || !length || iova % page_size || length % page_size ||
1719 	    !uptr)
1720 		return -EINVAL;
1721 
1722 	hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
1723 	if (IS_ERR(hwpt))
1724 		return PTR_ERR(hwpt);
1725 
1726 	if (!(mock->flags & MOCK_DIRTY_TRACK) || !mock->iommu.ops->set_dirty) {
1727 		rc = -EINVAL;
1728 		goto out_put;
1729 	}
1730 
1731 	max = length / page_size;
1732 	tmp = kvzalloc(DIV_ROUND_UP(max, BITS_PER_LONG) * sizeof(unsigned long),
1733 		       GFP_KERNEL_ACCOUNT);
1734 	if (!tmp) {
1735 		rc = -ENOMEM;
1736 		goto out_put;
1737 	}
1738 
1739 	if (copy_from_user(tmp, uptr, DIV_ROUND_UP(max, BITS_PER_BYTE))) {
1740 		rc = -EFAULT;
1741 		goto out_free;
1742 	}
1743 
1744 	for (i = 0; i < max; i++) {
1745 		if (!test_bit(i, (unsigned long *)tmp))
1746 			continue;
1747 		mock->iommu.ops->set_dirty(&mock->iommu, iova + i * page_size);
1748 		count++;
1749 	}
1750 
1751 	cmd->dirty.out_nr_dirty = count;
1752 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1753 out_free:
1754 	kvfree(tmp);
1755 out_put:
1756 	iommufd_put_object(ucmd->ictx, &hwpt->obj);
1757 	return rc;
1758 }
1759 
iommufd_test_trigger_iopf(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1760 static int iommufd_test_trigger_iopf(struct iommufd_ucmd *ucmd,
1761 				     struct iommu_test_cmd *cmd)
1762 {
1763 	struct iopf_fault event = {};
1764 	struct iommufd_device *idev;
1765 
1766 	idev = iommufd_get_device(ucmd, cmd->trigger_iopf.dev_id);
1767 	if (IS_ERR(idev))
1768 		return PTR_ERR(idev);
1769 
1770 	event.fault.prm.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
1771 	if (cmd->trigger_iopf.pasid != IOMMU_NO_PASID)
1772 		event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1773 	event.fault.type = IOMMU_FAULT_PAGE_REQ;
1774 	event.fault.prm.addr = cmd->trigger_iopf.addr;
1775 	event.fault.prm.pasid = cmd->trigger_iopf.pasid;
1776 	event.fault.prm.grpid = cmd->trigger_iopf.grpid;
1777 	event.fault.prm.perm = cmd->trigger_iopf.perm;
1778 
1779 	iommu_report_device_fault(idev->dev, &event);
1780 	iommufd_put_object(ucmd->ictx, &idev->obj);
1781 
1782 	return 0;
1783 }
1784 
iommufd_test_trigger_vevent(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1785 static int iommufd_test_trigger_vevent(struct iommufd_ucmd *ucmd,
1786 				       struct iommu_test_cmd *cmd)
1787 {
1788 	struct iommu_viommu_event_selftest test = {};
1789 	struct iommufd_device *idev;
1790 	struct mock_dev *mdev;
1791 	int rc = -ENOENT;
1792 
1793 	idev = iommufd_get_device(ucmd, cmd->trigger_vevent.dev_id);
1794 	if (IS_ERR(idev))
1795 		return PTR_ERR(idev);
1796 	mdev = to_mock_dev(idev->dev);
1797 
1798 	down_read(&mdev->viommu_rwsem);
1799 	if (!mdev->viommu || !mdev->vdev_id)
1800 		goto out_unlock;
1801 
1802 	test.virt_id = mdev->vdev_id;
1803 	rc = iommufd_viommu_report_event(&mdev->viommu->core,
1804 					 IOMMU_VEVENTQ_TYPE_SELFTEST, &test,
1805 					 sizeof(test));
1806 out_unlock:
1807 	up_read(&mdev->viommu_rwsem);
1808 	iommufd_put_object(ucmd->ictx, &idev->obj);
1809 
1810 	return rc;
1811 }
1812 
1813 static inline struct iommufd_hw_pagetable *
iommufd_get_hwpt(struct iommufd_ucmd * ucmd,u32 id)1814 iommufd_get_hwpt(struct iommufd_ucmd *ucmd, u32 id)
1815 {
1816 	struct iommufd_object *pt_obj;
1817 
1818 	pt_obj = iommufd_get_object(ucmd->ictx, id, IOMMUFD_OBJ_ANY);
1819 	if (IS_ERR(pt_obj))
1820 		return ERR_CAST(pt_obj);
1821 
1822 	if (pt_obj->type != IOMMUFD_OBJ_HWPT_NESTED &&
1823 	    pt_obj->type != IOMMUFD_OBJ_HWPT_PAGING) {
1824 		iommufd_put_object(ucmd->ictx, pt_obj);
1825 		return ERR_PTR(-EINVAL);
1826 	}
1827 
1828 	return container_of(pt_obj, struct iommufd_hw_pagetable, obj);
1829 }
1830 
iommufd_test_pasid_check_hwpt(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1831 static int iommufd_test_pasid_check_hwpt(struct iommufd_ucmd *ucmd,
1832 					 struct iommu_test_cmd *cmd)
1833 {
1834 	u32 hwpt_id = cmd->pasid_check.hwpt_id;
1835 	struct iommu_domain *attached_domain;
1836 	struct iommu_attach_handle *handle;
1837 	struct iommufd_hw_pagetable *hwpt;
1838 	struct selftest_obj *sobj;
1839 	struct mock_dev *mdev;
1840 	int rc = 0;
1841 
1842 	sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
1843 	if (IS_ERR(sobj))
1844 		return PTR_ERR(sobj);
1845 
1846 	mdev = sobj->idev.mock_dev;
1847 
1848 	handle = iommu_attach_handle_get(mdev->dev.iommu_group,
1849 					 cmd->pasid_check.pasid, 0);
1850 	if (IS_ERR(handle))
1851 		attached_domain = NULL;
1852 	else
1853 		attached_domain = handle->domain;
1854 
1855 	/* hwpt_id == 0 means to check if pasid is detached */
1856 	if (!hwpt_id) {
1857 		if (attached_domain)
1858 			rc = -EINVAL;
1859 		goto out_sobj;
1860 	}
1861 
1862 	hwpt = iommufd_get_hwpt(ucmd, hwpt_id);
1863 	if (IS_ERR(hwpt)) {
1864 		rc = PTR_ERR(hwpt);
1865 		goto out_sobj;
1866 	}
1867 
1868 	if (attached_domain != hwpt->domain)
1869 		rc = -EINVAL;
1870 
1871 	iommufd_put_object(ucmd->ictx, &hwpt->obj);
1872 out_sobj:
1873 	iommufd_put_object(ucmd->ictx, &sobj->obj);
1874 	return rc;
1875 }
1876 
iommufd_test_pasid_attach(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1877 static int iommufd_test_pasid_attach(struct iommufd_ucmd *ucmd,
1878 				     struct iommu_test_cmd *cmd)
1879 {
1880 	struct selftest_obj *sobj;
1881 	int rc;
1882 
1883 	sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
1884 	if (IS_ERR(sobj))
1885 		return PTR_ERR(sobj);
1886 
1887 	rc = iommufd_device_attach(sobj->idev.idev, cmd->pasid_attach.pasid,
1888 				   &cmd->pasid_attach.pt_id);
1889 	if (rc)
1890 		goto out_sobj;
1891 
1892 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1893 	if (rc)
1894 		iommufd_device_detach(sobj->idev.idev, cmd->pasid_attach.pasid);
1895 
1896 out_sobj:
1897 	iommufd_put_object(ucmd->ictx, &sobj->obj);
1898 	return rc;
1899 }
1900 
iommufd_test_pasid_replace(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1901 static int iommufd_test_pasid_replace(struct iommufd_ucmd *ucmd,
1902 				      struct iommu_test_cmd *cmd)
1903 {
1904 	struct selftest_obj *sobj;
1905 	int rc;
1906 
1907 	sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
1908 	if (IS_ERR(sobj))
1909 		return PTR_ERR(sobj);
1910 
1911 	rc = iommufd_device_replace(sobj->idev.idev, cmd->pasid_attach.pasid,
1912 				    &cmd->pasid_attach.pt_id);
1913 	if (rc)
1914 		goto out_sobj;
1915 
1916 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1917 
1918 out_sobj:
1919 	iommufd_put_object(ucmd->ictx, &sobj->obj);
1920 	return rc;
1921 }
1922 
iommufd_test_pasid_detach(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1923 static int iommufd_test_pasid_detach(struct iommufd_ucmd *ucmd,
1924 				     struct iommu_test_cmd *cmd)
1925 {
1926 	struct selftest_obj *sobj;
1927 
1928 	sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
1929 	if (IS_ERR(sobj))
1930 		return PTR_ERR(sobj);
1931 
1932 	iommufd_device_detach(sobj->idev.idev, cmd->pasid_detach.pasid);
1933 	iommufd_put_object(ucmd->ictx, &sobj->obj);
1934 	return 0;
1935 }
1936 
iommufd_selftest_destroy(struct iommufd_object * obj)1937 void iommufd_selftest_destroy(struct iommufd_object *obj)
1938 {
1939 	struct selftest_obj *sobj = to_selftest_obj(obj);
1940 
1941 	switch (sobj->type) {
1942 	case TYPE_IDEV:
1943 		iommufd_device_detach(sobj->idev.idev, IOMMU_NO_PASID);
1944 		iommufd_device_unbind(sobj->idev.idev);
1945 		mock_dev_destroy(sobj->idev.mock_dev);
1946 		break;
1947 	}
1948 }
1949 
1950 struct iommufd_test_dma_buf {
1951 	void *memory;
1952 	size_t length;
1953 	bool revoked;
1954 };
1955 
iommufd_test_dma_buf_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)1956 static int iommufd_test_dma_buf_attach(struct dma_buf *dmabuf,
1957 				       struct dma_buf_attachment *attachment)
1958 {
1959 	return 0;
1960 }
1961 
iommufd_test_dma_buf_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)1962 static void iommufd_test_dma_buf_detach(struct dma_buf *dmabuf,
1963 					struct dma_buf_attachment *attachment)
1964 {
1965 }
1966 
1967 static struct sg_table *
iommufd_test_dma_buf_map(struct dma_buf_attachment * attachment,enum dma_data_direction dir)1968 iommufd_test_dma_buf_map(struct dma_buf_attachment *attachment,
1969 			 enum dma_data_direction dir)
1970 {
1971 	return ERR_PTR(-EOPNOTSUPP);
1972 }
1973 
iommufd_test_dma_buf_unmap(struct dma_buf_attachment * attachment,struct sg_table * sgt,enum dma_data_direction dir)1974 static void iommufd_test_dma_buf_unmap(struct dma_buf_attachment *attachment,
1975 				       struct sg_table *sgt,
1976 				       enum dma_data_direction dir)
1977 {
1978 }
1979 
iommufd_test_dma_buf_release(struct dma_buf * dmabuf)1980 static void iommufd_test_dma_buf_release(struct dma_buf *dmabuf)
1981 {
1982 	struct iommufd_test_dma_buf *priv = dmabuf->priv;
1983 
1984 	kfree(priv->memory);
1985 	kfree(priv);
1986 }
1987 
1988 static const struct dma_buf_ops iommufd_test_dmabuf_ops = {
1989 	.attach = iommufd_test_dma_buf_attach,
1990 	.detach = iommufd_test_dma_buf_detach,
1991 	.map_dma_buf = iommufd_test_dma_buf_map,
1992 	.release = iommufd_test_dma_buf_release,
1993 	.unmap_dma_buf = iommufd_test_dma_buf_unmap,
1994 };
1995 
iommufd_test_dma_buf_iommufd_map(struct dma_buf_attachment * attachment,struct dma_buf_phys_vec * phys)1996 int iommufd_test_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
1997 				     struct dma_buf_phys_vec *phys)
1998 {
1999 	struct iommufd_test_dma_buf *priv = attachment->dmabuf->priv;
2000 
2001 	dma_resv_assert_held(attachment->dmabuf->resv);
2002 
2003 	if (attachment->dmabuf->ops != &iommufd_test_dmabuf_ops)
2004 		return -EOPNOTSUPP;
2005 
2006 	if (priv->revoked)
2007 		return -ENODEV;
2008 
2009 	phys->paddr = virt_to_phys(priv->memory);
2010 	phys->len = priv->length;
2011 	return 0;
2012 }
2013 
iommufd_test_dmabuf_get(struct iommufd_ucmd * ucmd,unsigned int open_flags,size_t len)2014 static int iommufd_test_dmabuf_get(struct iommufd_ucmd *ucmd,
2015 				   unsigned int open_flags,
2016 				   size_t len)
2017 {
2018 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
2019 	struct iommufd_test_dma_buf *priv;
2020 	struct dma_buf *dmabuf;
2021 	int rc;
2022 
2023 	len = ALIGN(len, PAGE_SIZE);
2024 	if (len == 0 || len > PAGE_SIZE * 512)
2025 		return -EINVAL;
2026 
2027 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
2028 	if (!priv)
2029 		return -ENOMEM;
2030 
2031 	priv->length = len;
2032 	priv->memory = kzalloc(len, GFP_KERNEL);
2033 	if (!priv->memory) {
2034 		rc = -ENOMEM;
2035 		goto err_free;
2036 	}
2037 
2038 	exp_info.ops = &iommufd_test_dmabuf_ops;
2039 	exp_info.size = len;
2040 	exp_info.flags = open_flags;
2041 	exp_info.priv = priv;
2042 
2043 	dmabuf = dma_buf_export(&exp_info);
2044 	if (IS_ERR(dmabuf)) {
2045 		rc = PTR_ERR(dmabuf);
2046 		goto err_free;
2047 	}
2048 
2049 	return dma_buf_fd(dmabuf, open_flags);
2050 
2051 err_free:
2052 	kfree(priv->memory);
2053 	kfree(priv);
2054 	return rc;
2055 }
2056 
iommufd_test_dmabuf_revoke(struct iommufd_ucmd * ucmd,int fd,bool revoked)2057 static int iommufd_test_dmabuf_revoke(struct iommufd_ucmd *ucmd, int fd,
2058 				      bool revoked)
2059 {
2060 	struct iommufd_test_dma_buf *priv;
2061 	struct dma_buf *dmabuf;
2062 	int rc = 0;
2063 
2064 	dmabuf = dma_buf_get(fd);
2065 	if (IS_ERR(dmabuf))
2066 		return PTR_ERR(dmabuf);
2067 
2068 	if (dmabuf->ops != &iommufd_test_dmabuf_ops) {
2069 		rc = -EOPNOTSUPP;
2070 		goto err_put;
2071 	}
2072 
2073 	priv = dmabuf->priv;
2074 	dma_resv_lock(dmabuf->resv, NULL);
2075 	priv->revoked = revoked;
2076 	dma_buf_move_notify(dmabuf);
2077 	dma_resv_unlock(dmabuf->resv);
2078 
2079 err_put:
2080 	dma_buf_put(dmabuf);
2081 	return rc;
2082 }
2083 
iommufd_test(struct iommufd_ucmd * ucmd)2084 int iommufd_test(struct iommufd_ucmd *ucmd)
2085 {
2086 	struct iommu_test_cmd *cmd = ucmd->cmd;
2087 
2088 	switch (cmd->op) {
2089 	case IOMMU_TEST_OP_ADD_RESERVED:
2090 		return iommufd_test_add_reserved(ucmd, cmd->id,
2091 						 cmd->add_reserved.start,
2092 						 cmd->add_reserved.length);
2093 	case IOMMU_TEST_OP_MOCK_DOMAIN:
2094 	case IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS:
2095 		return iommufd_test_mock_domain(ucmd, cmd);
2096 	case IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE:
2097 		return iommufd_test_mock_domain_replace(
2098 			ucmd, cmd->id, cmd->mock_domain_replace.pt_id, cmd);
2099 	case IOMMU_TEST_OP_MD_CHECK_MAP:
2100 		return iommufd_test_md_check_pa(
2101 			ucmd, cmd->id, cmd->check_map.iova,
2102 			cmd->check_map.length,
2103 			u64_to_user_ptr(cmd->check_map.uptr));
2104 	case IOMMU_TEST_OP_MD_CHECK_REFS:
2105 		return iommufd_test_md_check_refs(
2106 			ucmd, u64_to_user_ptr(cmd->check_refs.uptr),
2107 			cmd->check_refs.length, cmd->check_refs.refs);
2108 	case IOMMU_TEST_OP_MD_CHECK_IOTLB:
2109 		return iommufd_test_md_check_iotlb(ucmd, cmd->id,
2110 						   cmd->check_iotlb.id,
2111 						   cmd->check_iotlb.iotlb);
2112 	case IOMMU_TEST_OP_DEV_CHECK_CACHE:
2113 		return iommufd_test_dev_check_cache(ucmd, cmd->id,
2114 						    cmd->check_dev_cache.id,
2115 						    cmd->check_dev_cache.cache);
2116 	case IOMMU_TEST_OP_CREATE_ACCESS:
2117 		return iommufd_test_create_access(ucmd, cmd->id,
2118 						  cmd->create_access.flags);
2119 	case IOMMU_TEST_OP_ACCESS_REPLACE_IOAS:
2120 		return iommufd_test_access_replace_ioas(
2121 			ucmd, cmd->id, cmd->access_replace_ioas.ioas_id);
2122 	case IOMMU_TEST_OP_ACCESS_PAGES:
2123 		return iommufd_test_access_pages(
2124 			ucmd, cmd->id, cmd->access_pages.iova,
2125 			cmd->access_pages.length,
2126 			u64_to_user_ptr(cmd->access_pages.uptr),
2127 			cmd->access_pages.flags);
2128 	case IOMMU_TEST_OP_ACCESS_RW:
2129 		return iommufd_test_access_rw(
2130 			ucmd, cmd->id, cmd->access_rw.iova,
2131 			cmd->access_rw.length,
2132 			u64_to_user_ptr(cmd->access_rw.uptr),
2133 			cmd->access_rw.flags);
2134 	case IOMMU_TEST_OP_DESTROY_ACCESS_PAGES:
2135 		return iommufd_test_access_item_destroy(
2136 			ucmd, cmd->id, cmd->destroy_access_pages.access_pages_id);
2137 	case IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT:
2138 		/* Protect _batch_init(), can not be less than elmsz */
2139 		if (cmd->memory_limit.limit <
2140 		    sizeof(unsigned long) + sizeof(u32))
2141 			return -EINVAL;
2142 		iommufd_test_memory_limit = cmd->memory_limit.limit;
2143 		return 0;
2144 	case IOMMU_TEST_OP_DIRTY:
2145 		return iommufd_test_dirty(ucmd, cmd->id, cmd->dirty.iova,
2146 					  cmd->dirty.length,
2147 					  cmd->dirty.page_size,
2148 					  u64_to_user_ptr(cmd->dirty.uptr),
2149 					  cmd->dirty.flags);
2150 	case IOMMU_TEST_OP_TRIGGER_IOPF:
2151 		return iommufd_test_trigger_iopf(ucmd, cmd);
2152 	case IOMMU_TEST_OP_TRIGGER_VEVENT:
2153 		return iommufd_test_trigger_vevent(ucmd, cmd);
2154 	case IOMMU_TEST_OP_PASID_ATTACH:
2155 		return iommufd_test_pasid_attach(ucmd, cmd);
2156 	case IOMMU_TEST_OP_PASID_REPLACE:
2157 		return iommufd_test_pasid_replace(ucmd, cmd);
2158 	case IOMMU_TEST_OP_PASID_DETACH:
2159 		return iommufd_test_pasid_detach(ucmd, cmd);
2160 	case IOMMU_TEST_OP_PASID_CHECK_HWPT:
2161 		return iommufd_test_pasid_check_hwpt(ucmd, cmd);
2162 	case IOMMU_TEST_OP_DMABUF_GET:
2163 		return iommufd_test_dmabuf_get(ucmd, cmd->dmabuf_get.open_flags,
2164 					       cmd->dmabuf_get.length);
2165 	case IOMMU_TEST_OP_DMABUF_REVOKE:
2166 		return iommufd_test_dmabuf_revoke(ucmd,
2167 						  cmd->dmabuf_revoke.dmabuf_fd,
2168 						  cmd->dmabuf_revoke.revoked);
2169 	default:
2170 		return -EOPNOTSUPP;
2171 	}
2172 }
2173 
iommufd_should_fail(void)2174 bool iommufd_should_fail(void)
2175 {
2176 	return should_fail(&fail_iommufd, 1);
2177 }
2178 
iommufd_test_init(void)2179 int __init iommufd_test_init(void)
2180 {
2181 	struct platform_device_info pdevinfo = {
2182 		.name = "iommufd_selftest_iommu",
2183 	};
2184 	int rc;
2185 
2186 	dbgfs_root =
2187 		fault_create_debugfs_attr("fail_iommufd", NULL, &fail_iommufd);
2188 
2189 	selftest_iommu_dev = platform_device_register_full(&pdevinfo);
2190 	if (IS_ERR(selftest_iommu_dev)) {
2191 		rc = PTR_ERR(selftest_iommu_dev);
2192 		goto err_dbgfs;
2193 	}
2194 
2195 	rc = bus_register(&iommufd_mock_bus_type.bus);
2196 	if (rc)
2197 		goto err_platform;
2198 
2199 	rc = iommu_device_sysfs_add(&mock_iommu.iommu_dev,
2200 				    &selftest_iommu_dev->dev, NULL, "%s",
2201 				    dev_name(&selftest_iommu_dev->dev));
2202 	if (rc)
2203 		goto err_bus;
2204 
2205 	rc = iommu_device_register_bus(&mock_iommu.iommu_dev, &mock_ops,
2206 				       &iommufd_mock_bus_type.bus,
2207 				       &iommufd_mock_bus_type.nb);
2208 	if (rc)
2209 		goto err_sysfs;
2210 
2211 	refcount_set(&mock_iommu.users, 1);
2212 	init_completion(&mock_iommu.complete);
2213 
2214 	mock_iommu_iopf_queue = iopf_queue_alloc("mock-iopfq");
2215 	mock_iommu.iommu_dev.max_pasids = (1 << MOCK_PASID_WIDTH);
2216 
2217 	return 0;
2218 
2219 err_sysfs:
2220 	iommu_device_sysfs_remove(&mock_iommu.iommu_dev);
2221 err_bus:
2222 	bus_unregister(&iommufd_mock_bus_type.bus);
2223 err_platform:
2224 	platform_device_unregister(selftest_iommu_dev);
2225 err_dbgfs:
2226 	debugfs_remove_recursive(dbgfs_root);
2227 	return rc;
2228 }
2229 
iommufd_test_wait_for_users(void)2230 static void iommufd_test_wait_for_users(void)
2231 {
2232 	if (refcount_dec_and_test(&mock_iommu.users))
2233 		return;
2234 	/*
2235 	 * Time out waiting for iommu device user count to become 0.
2236 	 *
2237 	 * Note that this is just making an example here, since the selftest is
2238 	 * built into the iommufd module, i.e. it only unplugs the iommu device
2239 	 * when unloading the module. So, it is expected that this WARN_ON will
2240 	 * not trigger, as long as any iommufd FDs are open.
2241 	 */
2242 	WARN_ON(!wait_for_completion_timeout(&mock_iommu.complete,
2243 					     msecs_to_jiffies(10000)));
2244 }
2245 
iommufd_test_exit(void)2246 void iommufd_test_exit(void)
2247 {
2248 	if (mock_iommu_iopf_queue) {
2249 		iopf_queue_free(mock_iommu_iopf_queue);
2250 		mock_iommu_iopf_queue = NULL;
2251 	}
2252 
2253 	iommufd_test_wait_for_users();
2254 	iommu_device_sysfs_remove(&mock_iommu.iommu_dev);
2255 	iommu_device_unregister_bus(&mock_iommu.iommu_dev,
2256 				    &iommufd_mock_bus_type.bus,
2257 				    &iommufd_mock_bus_type.nb);
2258 	bus_unregister(&iommufd_mock_bus_type.bus);
2259 	platform_device_unregister(selftest_iommu_dev);
2260 	debugfs_remove_recursive(dbgfs_root);
2261 }
2262 
2263 MODULE_IMPORT_NS("GENERIC_PT_IOMMU");
2264