xref: /linux/drivers/iommu/iommufd/selftest.c (revision 8477ab143069c6b05d6da4a8184ded8b969240f5)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
3  *
4  * Kernel side components to support tools/testing/selftests/iommu
5  */
6 #include <linux/anon_inodes.h>
7 #include <linux/debugfs.h>
8 #include <linux/fault-inject.h>
9 #include <linux/file.h>
10 #include <linux/iommu.h>
11 #include <linux/platform_device.h>
12 #include <linux/slab.h>
13 #include <linux/xarray.h>
14 #include <uapi/linux/iommufd.h>
15 
16 #include "../iommu-priv.h"
17 #include "io_pagetable.h"
18 #include "iommufd_private.h"
19 #include "iommufd_test.h"
20 
21 static DECLARE_FAULT_ATTR(fail_iommufd);
22 static struct dentry *dbgfs_root;
23 static struct platform_device *selftest_iommu_dev;
24 static const struct iommu_ops mock_ops;
25 static struct iommu_domain_ops domain_nested_ops;
26 
27 size_t iommufd_test_memory_limit = 65536;
28 
29 struct mock_bus_type {
30 	struct bus_type bus;
31 	struct notifier_block nb;
32 };
33 
34 static struct mock_bus_type iommufd_mock_bus_type = {
35 	.bus = {
36 		.name = "iommufd_mock",
37 	},
38 };
39 
40 static DEFINE_IDA(mock_dev_ida);
41 
42 enum {
43 	MOCK_DIRTY_TRACK = 1,
44 	MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2,
45 	MOCK_HUGE_PAGE_SIZE = 512 * MOCK_IO_PAGE_SIZE,
46 
47 	/*
48 	 * Like a real page table alignment requires the low bits of the address
49 	 * to be zero. xarray also requires the high bit to be zero, so we store
50 	 * the pfns shifted. The upper bits are used for metadata.
51 	 */
52 	MOCK_PFN_MASK = ULONG_MAX / MOCK_IO_PAGE_SIZE,
53 
54 	_MOCK_PFN_START = MOCK_PFN_MASK + 1,
55 	MOCK_PFN_START_IOVA = _MOCK_PFN_START,
56 	MOCK_PFN_LAST_IOVA = _MOCK_PFN_START,
57 	MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1,
58 	MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2,
59 };
60 
61 static int mock_dev_enable_iopf(struct device *dev, struct iommu_domain *domain);
62 static void mock_dev_disable_iopf(struct device *dev, struct iommu_domain *domain);
63 
64 /*
65  * Syzkaller has trouble randomizing the correct iova to use since it is linked
66  * to the map ioctl's output, and it has no ide about that. So, simplify things.
67  * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset
68  * value. This has a much smaller randomization space and syzkaller can hit it.
69  */
__iommufd_test_syz_conv_iova(struct io_pagetable * iopt,u64 * iova)70 static unsigned long __iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
71 						  u64 *iova)
72 {
73 	struct syz_layout {
74 		__u32 nth_area;
75 		__u32 offset;
76 	};
77 	struct syz_layout *syz = (void *)iova;
78 	unsigned int nth = syz->nth_area;
79 	struct iopt_area *area;
80 
81 	down_read(&iopt->iova_rwsem);
82 	for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area;
83 	     area = iopt_area_iter_next(area, 0, ULONG_MAX)) {
84 		if (nth == 0) {
85 			up_read(&iopt->iova_rwsem);
86 			return iopt_area_iova(area) + syz->offset;
87 		}
88 		nth--;
89 	}
90 	up_read(&iopt->iova_rwsem);
91 
92 	return 0;
93 }
94 
iommufd_test_syz_conv_iova(struct iommufd_access * access,u64 * iova)95 static unsigned long iommufd_test_syz_conv_iova(struct iommufd_access *access,
96 						u64 *iova)
97 {
98 	unsigned long ret;
99 
100 	mutex_lock(&access->ioas_lock);
101 	if (!access->ioas) {
102 		mutex_unlock(&access->ioas_lock);
103 		return 0;
104 	}
105 	ret = __iommufd_test_syz_conv_iova(&access->ioas->iopt, iova);
106 	mutex_unlock(&access->ioas_lock);
107 	return ret;
108 }
109 
iommufd_test_syz_conv_iova_id(struct iommufd_ucmd * ucmd,unsigned int ioas_id,u64 * iova,u32 * flags)110 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
111 				   unsigned int ioas_id, u64 *iova, u32 *flags)
112 {
113 	struct iommufd_ioas *ioas;
114 
115 	if (!(*flags & MOCK_FLAGS_ACCESS_SYZ))
116 		return;
117 	*flags &= ~(u32)MOCK_FLAGS_ACCESS_SYZ;
118 
119 	ioas = iommufd_get_ioas(ucmd->ictx, ioas_id);
120 	if (IS_ERR(ioas))
121 		return;
122 	*iova = __iommufd_test_syz_conv_iova(&ioas->iopt, iova);
123 	iommufd_put_object(ucmd->ictx, &ioas->obj);
124 }
125 
126 struct mock_iommu_domain {
127 	unsigned long flags;
128 	struct iommu_domain domain;
129 	struct xarray pfns;
130 };
131 
132 static inline struct mock_iommu_domain *
to_mock_domain(struct iommu_domain * domain)133 to_mock_domain(struct iommu_domain *domain)
134 {
135 	return container_of(domain, struct mock_iommu_domain, domain);
136 }
137 
138 struct mock_iommu_domain_nested {
139 	struct iommu_domain domain;
140 	struct mock_viommu *mock_viommu;
141 	struct mock_iommu_domain *parent;
142 	u32 iotlb[MOCK_NESTED_DOMAIN_IOTLB_NUM];
143 };
144 
145 static inline struct mock_iommu_domain_nested *
to_mock_nested(struct iommu_domain * domain)146 to_mock_nested(struct iommu_domain *domain)
147 {
148 	return container_of(domain, struct mock_iommu_domain_nested, domain);
149 }
150 
151 struct mock_viommu {
152 	struct iommufd_viommu core;
153 	struct mock_iommu_domain *s2_parent;
154 };
155 
to_mock_viommu(struct iommufd_viommu * viommu)156 static inline struct mock_viommu *to_mock_viommu(struct iommufd_viommu *viommu)
157 {
158 	return container_of(viommu, struct mock_viommu, core);
159 }
160 
161 enum selftest_obj_type {
162 	TYPE_IDEV,
163 };
164 
165 struct mock_dev {
166 	struct device dev;
167 	struct mock_viommu *viommu;
168 	struct rw_semaphore viommu_rwsem;
169 	unsigned long flags;
170 	unsigned long vdev_id;
171 	int id;
172 	u32 cache[MOCK_DEV_CACHE_NUM];
173 	atomic_t pasid_1024_fake_error;
174 	unsigned int iopf_refcount;
175 	struct iommu_domain *domain;
176 };
177 
to_mock_dev(struct device * dev)178 static inline struct mock_dev *to_mock_dev(struct device *dev)
179 {
180 	return container_of(dev, struct mock_dev, dev);
181 }
182 
183 struct selftest_obj {
184 	struct iommufd_object obj;
185 	enum selftest_obj_type type;
186 
187 	union {
188 		struct {
189 			struct iommufd_device *idev;
190 			struct iommufd_ctx *ictx;
191 			struct mock_dev *mock_dev;
192 		} idev;
193 	};
194 };
195 
to_selftest_obj(struct iommufd_object * obj)196 static inline struct selftest_obj *to_selftest_obj(struct iommufd_object *obj)
197 {
198 	return container_of(obj, struct selftest_obj, obj);
199 }
200 
mock_domain_nop_attach(struct iommu_domain * domain,struct device * dev)201 static int mock_domain_nop_attach(struct iommu_domain *domain,
202 				  struct device *dev)
203 {
204 	struct mock_dev *mdev = to_mock_dev(dev);
205 	struct mock_viommu *new_viommu = NULL;
206 	unsigned long vdev_id = 0;
207 	int rc;
208 
209 	if (domain->dirty_ops && (mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY))
210 		return -EINVAL;
211 
212 	iommu_group_mutex_assert(dev);
213 	if (domain->type == IOMMU_DOMAIN_NESTED) {
214 		new_viommu = to_mock_nested(domain)->mock_viommu;
215 		if (new_viommu) {
216 			rc = iommufd_viommu_get_vdev_id(&new_viommu->core, dev,
217 							&vdev_id);
218 			if (rc)
219 				return rc;
220 		}
221 	}
222 	if (new_viommu != mdev->viommu) {
223 		down_write(&mdev->viommu_rwsem);
224 		mdev->viommu = new_viommu;
225 		mdev->vdev_id = vdev_id;
226 		up_write(&mdev->viommu_rwsem);
227 	}
228 
229 	rc = mock_dev_enable_iopf(dev, domain);
230 	if (rc)
231 		return rc;
232 
233 	mock_dev_disable_iopf(dev, mdev->domain);
234 	mdev->domain = domain;
235 
236 	return 0;
237 }
238 
mock_domain_set_dev_pasid_nop(struct iommu_domain * domain,struct device * dev,ioasid_t pasid,struct iommu_domain * old)239 static int mock_domain_set_dev_pasid_nop(struct iommu_domain *domain,
240 					 struct device *dev, ioasid_t pasid,
241 					 struct iommu_domain *old)
242 {
243 	struct mock_dev *mdev = to_mock_dev(dev);
244 	int rc;
245 
246 	/*
247 	 * Per the first attach with pasid 1024, set the
248 	 * mdev->pasid_1024_fake_error. Hence the second call of this op
249 	 * can fake an error to validate the error path of the core. This
250 	 * is helpful to test the case in which the iommu core needs to
251 	 * rollback to the old domain due to driver failure. e.g. replace.
252 	 * User should be careful about the third call of this op, it shall
253 	 * succeed since the mdev->pasid_1024_fake_error is cleared in the
254 	 * second call.
255 	 */
256 	if (pasid == 1024) {
257 		if (domain->type == IOMMU_DOMAIN_BLOCKED) {
258 			atomic_set(&mdev->pasid_1024_fake_error, 0);
259 		} else if (atomic_read(&mdev->pasid_1024_fake_error)) {
260 			/*
261 			 * Clear the flag, and fake an error to fail the
262 			 * replacement.
263 			 */
264 			atomic_set(&mdev->pasid_1024_fake_error, 0);
265 			return -ENOMEM;
266 		} else {
267 			/* Set the flag to fake an error in next call */
268 			atomic_set(&mdev->pasid_1024_fake_error, 1);
269 		}
270 	}
271 
272 	rc = mock_dev_enable_iopf(dev, domain);
273 	if (rc)
274 		return rc;
275 
276 	mock_dev_disable_iopf(dev, old);
277 
278 	return 0;
279 }
280 
281 static const struct iommu_domain_ops mock_blocking_ops = {
282 	.attach_dev = mock_domain_nop_attach,
283 	.set_dev_pasid = mock_domain_set_dev_pasid_nop
284 };
285 
286 static struct iommu_domain mock_blocking_domain = {
287 	.type = IOMMU_DOMAIN_BLOCKED,
288 	.ops = &mock_blocking_ops,
289 };
290 
mock_domain_hw_info(struct device * dev,u32 * length,u32 * type)291 static void *mock_domain_hw_info(struct device *dev, u32 *length, u32 *type)
292 {
293 	struct iommu_test_hw_info *info;
294 
295 	info = kzalloc(sizeof(*info), GFP_KERNEL);
296 	if (!info)
297 		return ERR_PTR(-ENOMEM);
298 
299 	info->test_reg = IOMMU_HW_INFO_SELFTEST_REGVAL;
300 	*length = sizeof(*info);
301 	*type = IOMMU_HW_INFO_TYPE_SELFTEST;
302 
303 	return info;
304 }
305 
mock_domain_set_dirty_tracking(struct iommu_domain * domain,bool enable)306 static int mock_domain_set_dirty_tracking(struct iommu_domain *domain,
307 					  bool enable)
308 {
309 	struct mock_iommu_domain *mock = to_mock_domain(domain);
310 	unsigned long flags = mock->flags;
311 
312 	if (enable && !domain->dirty_ops)
313 		return -EINVAL;
314 
315 	/* No change? */
316 	if (!(enable ^ !!(flags & MOCK_DIRTY_TRACK)))
317 		return 0;
318 
319 	flags = (enable ? flags | MOCK_DIRTY_TRACK : flags & ~MOCK_DIRTY_TRACK);
320 
321 	mock->flags = flags;
322 	return 0;
323 }
324 
mock_test_and_clear_dirty(struct mock_iommu_domain * mock,unsigned long iova,size_t page_size,unsigned long flags)325 static bool mock_test_and_clear_dirty(struct mock_iommu_domain *mock,
326 				      unsigned long iova, size_t page_size,
327 				      unsigned long flags)
328 {
329 	unsigned long cur, end = iova + page_size - 1;
330 	bool dirty = false;
331 	void *ent, *old;
332 
333 	for (cur = iova; cur < end; cur += MOCK_IO_PAGE_SIZE) {
334 		ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
335 		if (!ent || !(xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA))
336 			continue;
337 
338 		dirty = true;
339 		/* Clear dirty */
340 		if (!(flags & IOMMU_DIRTY_NO_CLEAR)) {
341 			unsigned long val;
342 
343 			val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
344 			old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE,
345 				       xa_mk_value(val), GFP_KERNEL);
346 			WARN_ON_ONCE(ent != old);
347 		}
348 	}
349 
350 	return dirty;
351 }
352 
mock_domain_read_and_clear_dirty(struct iommu_domain * domain,unsigned long iova,size_t size,unsigned long flags,struct iommu_dirty_bitmap * dirty)353 static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
354 					    unsigned long iova, size_t size,
355 					    unsigned long flags,
356 					    struct iommu_dirty_bitmap *dirty)
357 {
358 	struct mock_iommu_domain *mock = to_mock_domain(domain);
359 	unsigned long end = iova + size;
360 	void *ent;
361 
362 	if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap)
363 		return -EINVAL;
364 
365 	do {
366 		unsigned long pgsize = MOCK_IO_PAGE_SIZE;
367 		unsigned long head;
368 
369 		ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
370 		if (!ent) {
371 			iova += pgsize;
372 			continue;
373 		}
374 
375 		if (xa_to_value(ent) & MOCK_PFN_HUGE_IOVA)
376 			pgsize = MOCK_HUGE_PAGE_SIZE;
377 		head = iova & ~(pgsize - 1);
378 
379 		/* Clear dirty */
380 		if (mock_test_and_clear_dirty(mock, head, pgsize, flags))
381 			iommu_dirty_bitmap_record(dirty, iova, pgsize);
382 		iova += pgsize;
383 	} while (iova < end);
384 
385 	return 0;
386 }
387 
388 static const struct iommu_dirty_ops dirty_ops = {
389 	.set_dirty_tracking = mock_domain_set_dirty_tracking,
390 	.read_and_clear_dirty = mock_domain_read_and_clear_dirty,
391 };
392 
393 static struct mock_iommu_domain_nested *
__mock_domain_alloc_nested(const struct iommu_user_data * user_data)394 __mock_domain_alloc_nested(const struct iommu_user_data *user_data)
395 {
396 	struct mock_iommu_domain_nested *mock_nested;
397 	struct iommu_hwpt_selftest user_cfg;
398 	int rc, i;
399 
400 	if (user_data->type != IOMMU_HWPT_DATA_SELFTEST)
401 		return ERR_PTR(-EOPNOTSUPP);
402 
403 	rc = iommu_copy_struct_from_user(&user_cfg, user_data,
404 					 IOMMU_HWPT_DATA_SELFTEST, iotlb);
405 	if (rc)
406 		return ERR_PTR(rc);
407 
408 	mock_nested = kzalloc(sizeof(*mock_nested), GFP_KERNEL);
409 	if (!mock_nested)
410 		return ERR_PTR(-ENOMEM);
411 	mock_nested->domain.ops = &domain_nested_ops;
412 	mock_nested->domain.type = IOMMU_DOMAIN_NESTED;
413 	for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++)
414 		mock_nested->iotlb[i] = user_cfg.iotlb;
415 	return mock_nested;
416 }
417 
418 static struct iommu_domain *
mock_domain_alloc_nested(struct device * dev,struct iommu_domain * parent,u32 flags,const struct iommu_user_data * user_data)419 mock_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
420 			 u32 flags, const struct iommu_user_data *user_data)
421 {
422 	struct mock_iommu_domain_nested *mock_nested;
423 	struct mock_iommu_domain *mock_parent;
424 
425 	if (flags & ~IOMMU_HWPT_ALLOC_PASID)
426 		return ERR_PTR(-EOPNOTSUPP);
427 	if (!parent || parent->ops != mock_ops.default_domain_ops)
428 		return ERR_PTR(-EINVAL);
429 
430 	mock_parent = to_mock_domain(parent);
431 	if (!mock_parent)
432 		return ERR_PTR(-EINVAL);
433 
434 	mock_nested = __mock_domain_alloc_nested(user_data);
435 	if (IS_ERR(mock_nested))
436 		return ERR_CAST(mock_nested);
437 	mock_nested->parent = mock_parent;
438 	return &mock_nested->domain;
439 }
440 
441 static struct iommu_domain *
mock_domain_alloc_paging_flags(struct device * dev,u32 flags,const struct iommu_user_data * user_data)442 mock_domain_alloc_paging_flags(struct device *dev, u32 flags,
443 			       const struct iommu_user_data *user_data)
444 {
445 	bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
446 	const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
447 				 IOMMU_HWPT_ALLOC_NEST_PARENT |
448 				 IOMMU_HWPT_ALLOC_PASID;
449 	struct mock_dev *mdev = to_mock_dev(dev);
450 	bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY;
451 	struct mock_iommu_domain *mock;
452 
453 	if (user_data)
454 		return ERR_PTR(-EOPNOTSUPP);
455 	if ((flags & ~PAGING_FLAGS) || (has_dirty_flag && no_dirty_ops))
456 		return ERR_PTR(-EOPNOTSUPP);
457 
458 	mock = kzalloc(sizeof(*mock), GFP_KERNEL);
459 	if (!mock)
460 		return ERR_PTR(-ENOMEM);
461 	mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
462 	mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
463 	mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
464 	if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
465 		mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
466 	mock->domain.ops = mock_ops.default_domain_ops;
467 	mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
468 	xa_init(&mock->pfns);
469 
470 	if (has_dirty_flag)
471 		mock->domain.dirty_ops = &dirty_ops;
472 	return &mock->domain;
473 }
474 
mock_domain_free(struct iommu_domain * domain)475 static void mock_domain_free(struct iommu_domain *domain)
476 {
477 	struct mock_iommu_domain *mock = to_mock_domain(domain);
478 
479 	WARN_ON(!xa_empty(&mock->pfns));
480 	kfree(mock);
481 }
482 
mock_domain_map_pages(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int prot,gfp_t gfp,size_t * mapped)483 static int mock_domain_map_pages(struct iommu_domain *domain,
484 				 unsigned long iova, phys_addr_t paddr,
485 				 size_t pgsize, size_t pgcount, int prot,
486 				 gfp_t gfp, size_t *mapped)
487 {
488 	struct mock_iommu_domain *mock = to_mock_domain(domain);
489 	unsigned long flags = MOCK_PFN_START_IOVA;
490 	unsigned long start_iova = iova;
491 
492 	/*
493 	 * xarray does not reliably work with fault injection because it does a
494 	 * retry allocation, so put our own failure point.
495 	 */
496 	if (iommufd_should_fail())
497 		return -ENOENT;
498 
499 	WARN_ON(iova % MOCK_IO_PAGE_SIZE);
500 	WARN_ON(pgsize % MOCK_IO_PAGE_SIZE);
501 	for (; pgcount; pgcount--) {
502 		size_t cur;
503 
504 		for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
505 			void *old;
506 
507 			if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
508 				flags = MOCK_PFN_LAST_IOVA;
509 			if (pgsize != MOCK_IO_PAGE_SIZE) {
510 				flags |= MOCK_PFN_HUGE_IOVA;
511 			}
512 			old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE,
513 				       xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) |
514 						   flags),
515 				       gfp);
516 			if (xa_is_err(old)) {
517 				for (; start_iova != iova;
518 				     start_iova += MOCK_IO_PAGE_SIZE)
519 					xa_erase(&mock->pfns,
520 						 start_iova /
521 							 MOCK_IO_PAGE_SIZE);
522 				return xa_err(old);
523 			}
524 			WARN_ON(old);
525 			iova += MOCK_IO_PAGE_SIZE;
526 			paddr += MOCK_IO_PAGE_SIZE;
527 			*mapped += MOCK_IO_PAGE_SIZE;
528 			flags = 0;
529 		}
530 	}
531 	return 0;
532 }
533 
mock_domain_unmap_pages(struct iommu_domain * domain,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * iotlb_gather)534 static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
535 				      unsigned long iova, size_t pgsize,
536 				      size_t pgcount,
537 				      struct iommu_iotlb_gather *iotlb_gather)
538 {
539 	struct mock_iommu_domain *mock = to_mock_domain(domain);
540 	bool first = true;
541 	size_t ret = 0;
542 	void *ent;
543 
544 	WARN_ON(iova % MOCK_IO_PAGE_SIZE);
545 	WARN_ON(pgsize % MOCK_IO_PAGE_SIZE);
546 
547 	for (; pgcount; pgcount--) {
548 		size_t cur;
549 
550 		for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
551 			ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
552 
553 			/*
554 			 * iommufd generates unmaps that must be a strict
555 			 * superset of the map's performend So every
556 			 * starting/ending IOVA should have been an iova passed
557 			 * to map.
558 			 *
559 			 * This simple logic doesn't work when the HUGE_PAGE is
560 			 * turned on since the core code will automatically
561 			 * switch between the two page sizes creating a break in
562 			 * the unmap calls. The break can land in the middle of
563 			 * contiguous IOVA.
564 			 */
565 			if (!(domain->pgsize_bitmap & MOCK_HUGE_PAGE_SIZE)) {
566 				if (first) {
567 					WARN_ON(ent && !(xa_to_value(ent) &
568 							 MOCK_PFN_START_IOVA));
569 					first = false;
570 				}
571 				if (pgcount == 1 &&
572 				    cur + MOCK_IO_PAGE_SIZE == pgsize)
573 					WARN_ON(ent && !(xa_to_value(ent) &
574 							 MOCK_PFN_LAST_IOVA));
575 			}
576 
577 			iova += MOCK_IO_PAGE_SIZE;
578 			ret += MOCK_IO_PAGE_SIZE;
579 		}
580 	}
581 	return ret;
582 }
583 
mock_domain_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)584 static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain,
585 					    dma_addr_t iova)
586 {
587 	struct mock_iommu_domain *mock = to_mock_domain(domain);
588 	void *ent;
589 
590 	WARN_ON(iova % MOCK_IO_PAGE_SIZE);
591 	ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
592 	WARN_ON(!ent);
593 	return (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE;
594 }
595 
mock_domain_capable(struct device * dev,enum iommu_cap cap)596 static bool mock_domain_capable(struct device *dev, enum iommu_cap cap)
597 {
598 	struct mock_dev *mdev = to_mock_dev(dev);
599 
600 	switch (cap) {
601 	case IOMMU_CAP_CACHE_COHERENCY:
602 		return true;
603 	case IOMMU_CAP_DIRTY_TRACKING:
604 		return !(mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY);
605 	default:
606 		break;
607 	}
608 
609 	return false;
610 }
611 
612 static struct iopf_queue *mock_iommu_iopf_queue;
613 
614 static struct mock_iommu_device {
615 	struct iommu_device iommu_dev;
616 	struct completion complete;
617 	refcount_t users;
618 } mock_iommu;
619 
mock_probe_device(struct device * dev)620 static struct iommu_device *mock_probe_device(struct device *dev)
621 {
622 	if (dev->bus != &iommufd_mock_bus_type.bus)
623 		return ERR_PTR(-ENODEV);
624 	return &mock_iommu.iommu_dev;
625 }
626 
mock_domain_page_response(struct device * dev,struct iopf_fault * evt,struct iommu_page_response * msg)627 static void mock_domain_page_response(struct device *dev, struct iopf_fault *evt,
628 				      struct iommu_page_response *msg)
629 {
630 }
631 
mock_dev_enable_iopf(struct device * dev,struct iommu_domain * domain)632 static int mock_dev_enable_iopf(struct device *dev, struct iommu_domain *domain)
633 {
634 	struct mock_dev *mdev = to_mock_dev(dev);
635 	int ret;
636 
637 	if (!domain || !domain->iopf_handler)
638 		return 0;
639 
640 	if (!mock_iommu_iopf_queue)
641 		return -ENODEV;
642 
643 	if (mdev->iopf_refcount) {
644 		mdev->iopf_refcount++;
645 		return 0;
646 	}
647 
648 	ret = iopf_queue_add_device(mock_iommu_iopf_queue, dev);
649 	if (ret)
650 		return ret;
651 
652 	mdev->iopf_refcount = 1;
653 
654 	return 0;
655 }
656 
mock_dev_disable_iopf(struct device * dev,struct iommu_domain * domain)657 static void mock_dev_disable_iopf(struct device *dev, struct iommu_domain *domain)
658 {
659 	struct mock_dev *mdev = to_mock_dev(dev);
660 
661 	if (!domain || !domain->iopf_handler)
662 		return;
663 
664 	if (--mdev->iopf_refcount)
665 		return;
666 
667 	iopf_queue_remove_device(mock_iommu_iopf_queue, dev);
668 }
669 
mock_viommu_destroy(struct iommufd_viommu * viommu)670 static void mock_viommu_destroy(struct iommufd_viommu *viommu)
671 {
672 	struct mock_iommu_device *mock_iommu = container_of(
673 		viommu->iommu_dev, struct mock_iommu_device, iommu_dev);
674 
675 	if (refcount_dec_and_test(&mock_iommu->users))
676 		complete(&mock_iommu->complete);
677 
678 	/* iommufd core frees mock_viommu and viommu */
679 }
680 
681 static struct iommu_domain *
mock_viommu_alloc_domain_nested(struct iommufd_viommu * viommu,u32 flags,const struct iommu_user_data * user_data)682 mock_viommu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
683 				const struct iommu_user_data *user_data)
684 {
685 	struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
686 	struct mock_iommu_domain_nested *mock_nested;
687 
688 	if (flags & ~IOMMU_HWPT_ALLOC_PASID)
689 		return ERR_PTR(-EOPNOTSUPP);
690 
691 	mock_nested = __mock_domain_alloc_nested(user_data);
692 	if (IS_ERR(mock_nested))
693 		return ERR_CAST(mock_nested);
694 	mock_nested->mock_viommu = mock_viommu;
695 	mock_nested->parent = mock_viommu->s2_parent;
696 	return &mock_nested->domain;
697 }
698 
mock_viommu_cache_invalidate(struct iommufd_viommu * viommu,struct iommu_user_data_array * array)699 static int mock_viommu_cache_invalidate(struct iommufd_viommu *viommu,
700 					struct iommu_user_data_array *array)
701 {
702 	struct iommu_viommu_invalidate_selftest *cmds;
703 	struct iommu_viommu_invalidate_selftest *cur;
704 	struct iommu_viommu_invalidate_selftest *end;
705 	int rc;
706 
707 	/* A zero-length array is allowed to validate the array type */
708 	if (array->entry_num == 0 &&
709 	    array->type == IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST) {
710 		array->entry_num = 0;
711 		return 0;
712 	}
713 
714 	cmds = kcalloc(array->entry_num, sizeof(*cmds), GFP_KERNEL);
715 	if (!cmds)
716 		return -ENOMEM;
717 	cur = cmds;
718 	end = cmds + array->entry_num;
719 
720 	static_assert(sizeof(*cmds) == 3 * sizeof(u32));
721 	rc = iommu_copy_struct_from_full_user_array(
722 		cmds, sizeof(*cmds), array,
723 		IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST);
724 	if (rc)
725 		goto out;
726 
727 	while (cur != end) {
728 		struct mock_dev *mdev;
729 		struct device *dev;
730 		int i;
731 
732 		if (cur->flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) {
733 			rc = -EOPNOTSUPP;
734 			goto out;
735 		}
736 
737 		if (cur->cache_id > MOCK_DEV_CACHE_ID_MAX) {
738 			rc = -EINVAL;
739 			goto out;
740 		}
741 
742 		xa_lock(&viommu->vdevs);
743 		dev = iommufd_viommu_find_dev(viommu,
744 					      (unsigned long)cur->vdev_id);
745 		if (!dev) {
746 			xa_unlock(&viommu->vdevs);
747 			rc = -EINVAL;
748 			goto out;
749 		}
750 		mdev = container_of(dev, struct mock_dev, dev);
751 
752 		if (cur->flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) {
753 			/* Invalidate all cache entries and ignore cache_id */
754 			for (i = 0; i < MOCK_DEV_CACHE_NUM; i++)
755 				mdev->cache[i] = 0;
756 		} else {
757 			mdev->cache[cur->cache_id] = 0;
758 		}
759 		xa_unlock(&viommu->vdevs);
760 
761 		cur++;
762 	}
763 out:
764 	array->entry_num = cur - cmds;
765 	kfree(cmds);
766 	return rc;
767 }
768 
769 static struct iommufd_viommu_ops mock_viommu_ops = {
770 	.destroy = mock_viommu_destroy,
771 	.alloc_domain_nested = mock_viommu_alloc_domain_nested,
772 	.cache_invalidate = mock_viommu_cache_invalidate,
773 };
774 
mock_viommu_alloc(struct device * dev,struct iommu_domain * domain,struct iommufd_ctx * ictx,unsigned int viommu_type)775 static struct iommufd_viommu *mock_viommu_alloc(struct device *dev,
776 						struct iommu_domain *domain,
777 						struct iommufd_ctx *ictx,
778 						unsigned int viommu_type)
779 {
780 	struct mock_iommu_device *mock_iommu =
781 		iommu_get_iommu_dev(dev, struct mock_iommu_device, iommu_dev);
782 	struct mock_viommu *mock_viommu;
783 
784 	if (viommu_type != IOMMU_VIOMMU_TYPE_SELFTEST)
785 		return ERR_PTR(-EOPNOTSUPP);
786 
787 	mock_viommu = iommufd_viommu_alloc(ictx, struct mock_viommu, core,
788 					   &mock_viommu_ops);
789 	if (IS_ERR(mock_viommu))
790 		return ERR_CAST(mock_viommu);
791 
792 	refcount_inc(&mock_iommu->users);
793 	return &mock_viommu->core;
794 }
795 
796 static const struct iommu_ops mock_ops = {
797 	/*
798 	 * IOMMU_DOMAIN_BLOCKED cannot be returned from def_domain_type()
799 	 * because it is zero.
800 	 */
801 	.default_domain = &mock_blocking_domain,
802 	.blocked_domain = &mock_blocking_domain,
803 	.owner = THIS_MODULE,
804 	.pgsize_bitmap = MOCK_IO_PAGE_SIZE,
805 	.hw_info = mock_domain_hw_info,
806 	.domain_alloc_paging_flags = mock_domain_alloc_paging_flags,
807 	.domain_alloc_nested = mock_domain_alloc_nested,
808 	.capable = mock_domain_capable,
809 	.device_group = generic_device_group,
810 	.probe_device = mock_probe_device,
811 	.page_response = mock_domain_page_response,
812 	.user_pasid_table = true,
813 	.viommu_alloc = mock_viommu_alloc,
814 	.default_domain_ops =
815 		&(struct iommu_domain_ops){
816 			.free = mock_domain_free,
817 			.attach_dev = mock_domain_nop_attach,
818 			.map_pages = mock_domain_map_pages,
819 			.unmap_pages = mock_domain_unmap_pages,
820 			.iova_to_phys = mock_domain_iova_to_phys,
821 			.set_dev_pasid = mock_domain_set_dev_pasid_nop,
822 		},
823 };
824 
mock_domain_free_nested(struct iommu_domain * domain)825 static void mock_domain_free_nested(struct iommu_domain *domain)
826 {
827 	kfree(to_mock_nested(domain));
828 }
829 
830 static int
mock_domain_cache_invalidate_user(struct iommu_domain * domain,struct iommu_user_data_array * array)831 mock_domain_cache_invalidate_user(struct iommu_domain *domain,
832 				  struct iommu_user_data_array *array)
833 {
834 	struct mock_iommu_domain_nested *mock_nested = to_mock_nested(domain);
835 	struct iommu_hwpt_invalidate_selftest inv;
836 	u32 processed = 0;
837 	int i = 0, j;
838 	int rc = 0;
839 
840 	if (array->type != IOMMU_HWPT_INVALIDATE_DATA_SELFTEST) {
841 		rc = -EINVAL;
842 		goto out;
843 	}
844 
845 	for ( ; i < array->entry_num; i++) {
846 		rc = iommu_copy_struct_from_user_array(&inv, array,
847 						       IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
848 						       i, iotlb_id);
849 		if (rc)
850 			break;
851 
852 		if (inv.flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) {
853 			rc = -EOPNOTSUPP;
854 			break;
855 		}
856 
857 		if (inv.iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX) {
858 			rc = -EINVAL;
859 			break;
860 		}
861 
862 		if (inv.flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) {
863 			/* Invalidate all mock iotlb entries and ignore iotlb_id */
864 			for (j = 0; j < MOCK_NESTED_DOMAIN_IOTLB_NUM; j++)
865 				mock_nested->iotlb[j] = 0;
866 		} else {
867 			mock_nested->iotlb[inv.iotlb_id] = 0;
868 		}
869 
870 		processed++;
871 	}
872 
873 out:
874 	array->entry_num = processed;
875 	return rc;
876 }
877 
878 static struct iommu_domain_ops domain_nested_ops = {
879 	.free = mock_domain_free_nested,
880 	.attach_dev = mock_domain_nop_attach,
881 	.cache_invalidate_user = mock_domain_cache_invalidate_user,
882 	.set_dev_pasid = mock_domain_set_dev_pasid_nop,
883 };
884 
885 static inline struct iommufd_hw_pagetable *
__get_md_pagetable(struct iommufd_ucmd * ucmd,u32 mockpt_id,u32 hwpt_type)886 __get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, u32 hwpt_type)
887 {
888 	struct iommufd_object *obj;
889 
890 	obj = iommufd_get_object(ucmd->ictx, mockpt_id, hwpt_type);
891 	if (IS_ERR(obj))
892 		return ERR_CAST(obj);
893 	return container_of(obj, struct iommufd_hw_pagetable, obj);
894 }
895 
896 static inline struct iommufd_hw_pagetable *
get_md_pagetable(struct iommufd_ucmd * ucmd,u32 mockpt_id,struct mock_iommu_domain ** mock)897 get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id,
898 		 struct mock_iommu_domain **mock)
899 {
900 	struct iommufd_hw_pagetable *hwpt;
901 
902 	hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_PAGING);
903 	if (IS_ERR(hwpt))
904 		return hwpt;
905 	if (hwpt->domain->type != IOMMU_DOMAIN_UNMANAGED ||
906 	    hwpt->domain->ops != mock_ops.default_domain_ops) {
907 		iommufd_put_object(ucmd->ictx, &hwpt->obj);
908 		return ERR_PTR(-EINVAL);
909 	}
910 	*mock = to_mock_domain(hwpt->domain);
911 	return hwpt;
912 }
913 
914 static inline struct iommufd_hw_pagetable *
get_md_pagetable_nested(struct iommufd_ucmd * ucmd,u32 mockpt_id,struct mock_iommu_domain_nested ** mock_nested)915 get_md_pagetable_nested(struct iommufd_ucmd *ucmd, u32 mockpt_id,
916 			struct mock_iommu_domain_nested **mock_nested)
917 {
918 	struct iommufd_hw_pagetable *hwpt;
919 
920 	hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_NESTED);
921 	if (IS_ERR(hwpt))
922 		return hwpt;
923 	if (hwpt->domain->type != IOMMU_DOMAIN_NESTED ||
924 	    hwpt->domain->ops != &domain_nested_ops) {
925 		iommufd_put_object(ucmd->ictx, &hwpt->obj);
926 		return ERR_PTR(-EINVAL);
927 	}
928 	*mock_nested = to_mock_nested(hwpt->domain);
929 	return hwpt;
930 }
931 
mock_dev_release(struct device * dev)932 static void mock_dev_release(struct device *dev)
933 {
934 	struct mock_dev *mdev = to_mock_dev(dev);
935 
936 	ida_free(&mock_dev_ida, mdev->id);
937 	kfree(mdev);
938 }
939 
mock_dev_create(unsigned long dev_flags)940 static struct mock_dev *mock_dev_create(unsigned long dev_flags)
941 {
942 	struct property_entry prop[] = {
943 		PROPERTY_ENTRY_U32("pasid-num-bits", 0),
944 		{},
945 	};
946 	const u32 valid_flags = MOCK_FLAGS_DEVICE_NO_DIRTY |
947 				MOCK_FLAGS_DEVICE_HUGE_IOVA |
948 				MOCK_FLAGS_DEVICE_PASID;
949 	struct mock_dev *mdev;
950 	int rc, i;
951 
952 	if (dev_flags & ~valid_flags)
953 		return ERR_PTR(-EINVAL);
954 
955 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
956 	if (!mdev)
957 		return ERR_PTR(-ENOMEM);
958 
959 	init_rwsem(&mdev->viommu_rwsem);
960 	device_initialize(&mdev->dev);
961 	mdev->flags = dev_flags;
962 	mdev->dev.release = mock_dev_release;
963 	mdev->dev.bus = &iommufd_mock_bus_type.bus;
964 	for (i = 0; i < MOCK_DEV_CACHE_NUM; i++)
965 		mdev->cache[i] = IOMMU_TEST_DEV_CACHE_DEFAULT;
966 
967 	rc = ida_alloc(&mock_dev_ida, GFP_KERNEL);
968 	if (rc < 0)
969 		goto err_put;
970 	mdev->id = rc;
971 
972 	rc = dev_set_name(&mdev->dev, "iommufd_mock%u", mdev->id);
973 	if (rc)
974 		goto err_put;
975 
976 	if (dev_flags & MOCK_FLAGS_DEVICE_PASID)
977 		prop[0] = PROPERTY_ENTRY_U32("pasid-num-bits", MOCK_PASID_WIDTH);
978 
979 	rc = device_create_managed_software_node(&mdev->dev, prop, NULL);
980 	if (rc) {
981 		dev_err(&mdev->dev, "add pasid-num-bits property failed, rc: %d", rc);
982 		goto err_put;
983 	}
984 
985 	rc = device_add(&mdev->dev);
986 	if (rc)
987 		goto err_put;
988 	return mdev;
989 
990 err_put:
991 	put_device(&mdev->dev);
992 	return ERR_PTR(rc);
993 }
994 
mock_dev_destroy(struct mock_dev * mdev)995 static void mock_dev_destroy(struct mock_dev *mdev)
996 {
997 	device_unregister(&mdev->dev);
998 }
999 
iommufd_selftest_is_mock_dev(struct device * dev)1000 bool iommufd_selftest_is_mock_dev(struct device *dev)
1001 {
1002 	return dev->release == mock_dev_release;
1003 }
1004 
1005 /* Create an hw_pagetable with the mock domain so we can test the domain ops */
iommufd_test_mock_domain(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1006 static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd,
1007 				    struct iommu_test_cmd *cmd)
1008 {
1009 	struct iommufd_device *idev;
1010 	struct selftest_obj *sobj;
1011 	u32 pt_id = cmd->id;
1012 	u32 dev_flags = 0;
1013 	u32 idev_id;
1014 	int rc;
1015 
1016 	sobj = iommufd_object_alloc(ucmd->ictx, sobj, IOMMUFD_OBJ_SELFTEST);
1017 	if (IS_ERR(sobj))
1018 		return PTR_ERR(sobj);
1019 
1020 	sobj->idev.ictx = ucmd->ictx;
1021 	sobj->type = TYPE_IDEV;
1022 
1023 	if (cmd->op == IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS)
1024 		dev_flags = cmd->mock_domain_flags.dev_flags;
1025 
1026 	sobj->idev.mock_dev = mock_dev_create(dev_flags);
1027 	if (IS_ERR(sobj->idev.mock_dev)) {
1028 		rc = PTR_ERR(sobj->idev.mock_dev);
1029 		goto out_sobj;
1030 	}
1031 
1032 	idev = iommufd_device_bind(ucmd->ictx, &sobj->idev.mock_dev->dev,
1033 				   &idev_id);
1034 	if (IS_ERR(idev)) {
1035 		rc = PTR_ERR(idev);
1036 		goto out_mdev;
1037 	}
1038 	sobj->idev.idev = idev;
1039 
1040 	rc = iommufd_device_attach(idev, IOMMU_NO_PASID, &pt_id);
1041 	if (rc)
1042 		goto out_unbind;
1043 
1044 	/* Userspace must destroy the device_id to destroy the object */
1045 	cmd->mock_domain.out_hwpt_id = pt_id;
1046 	cmd->mock_domain.out_stdev_id = sobj->obj.id;
1047 	cmd->mock_domain.out_idev_id = idev_id;
1048 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1049 	if (rc)
1050 		goto out_detach;
1051 	iommufd_object_finalize(ucmd->ictx, &sobj->obj);
1052 	return 0;
1053 
1054 out_detach:
1055 	iommufd_device_detach(idev, IOMMU_NO_PASID);
1056 out_unbind:
1057 	iommufd_device_unbind(idev);
1058 out_mdev:
1059 	mock_dev_destroy(sobj->idev.mock_dev);
1060 out_sobj:
1061 	iommufd_object_abort(ucmd->ictx, &sobj->obj);
1062 	return rc;
1063 }
1064 
1065 static struct selftest_obj *
iommufd_test_get_selftest_obj(struct iommufd_ctx * ictx,u32 id)1066 iommufd_test_get_selftest_obj(struct iommufd_ctx *ictx, u32 id)
1067 {
1068 	struct iommufd_object *dev_obj;
1069 	struct selftest_obj *sobj;
1070 
1071 	/*
1072 	 * Prefer to use the OBJ_SELFTEST because the destroy_rwsem will ensure
1073 	 * it doesn't race with detach, which is not allowed.
1074 	 */
1075 	dev_obj = iommufd_get_object(ictx, id, IOMMUFD_OBJ_SELFTEST);
1076 	if (IS_ERR(dev_obj))
1077 		return ERR_CAST(dev_obj);
1078 
1079 	sobj = to_selftest_obj(dev_obj);
1080 	if (sobj->type != TYPE_IDEV) {
1081 		iommufd_put_object(ictx, dev_obj);
1082 		return ERR_PTR(-EINVAL);
1083 	}
1084 	return sobj;
1085 }
1086 
1087 /* Replace the mock domain with a manually allocated hw_pagetable */
iommufd_test_mock_domain_replace(struct iommufd_ucmd * ucmd,unsigned int device_id,u32 pt_id,struct iommu_test_cmd * cmd)1088 static int iommufd_test_mock_domain_replace(struct iommufd_ucmd *ucmd,
1089 					    unsigned int device_id, u32 pt_id,
1090 					    struct iommu_test_cmd *cmd)
1091 {
1092 	struct selftest_obj *sobj;
1093 	int rc;
1094 
1095 	sobj = iommufd_test_get_selftest_obj(ucmd->ictx, device_id);
1096 	if (IS_ERR(sobj))
1097 		return PTR_ERR(sobj);
1098 
1099 	rc = iommufd_device_replace(sobj->idev.idev, IOMMU_NO_PASID, &pt_id);
1100 	if (rc)
1101 		goto out_sobj;
1102 
1103 	cmd->mock_domain_replace.pt_id = pt_id;
1104 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1105 
1106 out_sobj:
1107 	iommufd_put_object(ucmd->ictx, &sobj->obj);
1108 	return rc;
1109 }
1110 
1111 /* Add an additional reserved IOVA to the IOAS */
iommufd_test_add_reserved(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long start,size_t length)1112 static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd,
1113 				     unsigned int mockpt_id,
1114 				     unsigned long start, size_t length)
1115 {
1116 	struct iommufd_ioas *ioas;
1117 	int rc;
1118 
1119 	ioas = iommufd_get_ioas(ucmd->ictx, mockpt_id);
1120 	if (IS_ERR(ioas))
1121 		return PTR_ERR(ioas);
1122 	down_write(&ioas->iopt.iova_rwsem);
1123 	rc = iopt_reserve_iova(&ioas->iopt, start, start + length - 1, NULL);
1124 	up_write(&ioas->iopt.iova_rwsem);
1125 	iommufd_put_object(ucmd->ictx, &ioas->obj);
1126 	return rc;
1127 }
1128 
1129 /* Check that every pfn under each iova matches the pfn under a user VA */
iommufd_test_md_check_pa(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long iova,size_t length,void __user * uptr)1130 static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd,
1131 				    unsigned int mockpt_id, unsigned long iova,
1132 				    size_t length, void __user *uptr)
1133 {
1134 	struct iommufd_hw_pagetable *hwpt;
1135 	struct mock_iommu_domain *mock;
1136 	uintptr_t end;
1137 	int rc;
1138 
1139 	if (iova % MOCK_IO_PAGE_SIZE || length % MOCK_IO_PAGE_SIZE ||
1140 	    (uintptr_t)uptr % MOCK_IO_PAGE_SIZE ||
1141 	    check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
1142 		return -EINVAL;
1143 
1144 	hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
1145 	if (IS_ERR(hwpt))
1146 		return PTR_ERR(hwpt);
1147 
1148 	for (; length; length -= MOCK_IO_PAGE_SIZE) {
1149 		struct page *pages[1];
1150 		unsigned long pfn;
1151 		long npages;
1152 		void *ent;
1153 
1154 		npages = get_user_pages_fast((uintptr_t)uptr & PAGE_MASK, 1, 0,
1155 					     pages);
1156 		if (npages < 0) {
1157 			rc = npages;
1158 			goto out_put;
1159 		}
1160 		if (WARN_ON(npages != 1)) {
1161 			rc = -EFAULT;
1162 			goto out_put;
1163 		}
1164 		pfn = page_to_pfn(pages[0]);
1165 		put_page(pages[0]);
1166 
1167 		ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
1168 		if (!ent ||
1169 		    (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE !=
1170 			    pfn * PAGE_SIZE + ((uintptr_t)uptr % PAGE_SIZE)) {
1171 			rc = -EINVAL;
1172 			goto out_put;
1173 		}
1174 		iova += MOCK_IO_PAGE_SIZE;
1175 		uptr += MOCK_IO_PAGE_SIZE;
1176 	}
1177 	rc = 0;
1178 
1179 out_put:
1180 	iommufd_put_object(ucmd->ictx, &hwpt->obj);
1181 	return rc;
1182 }
1183 
1184 /* Check that the page ref count matches, to look for missing pin/unpins */
iommufd_test_md_check_refs(struct iommufd_ucmd * ucmd,void __user * uptr,size_t length,unsigned int refs)1185 static int iommufd_test_md_check_refs(struct iommufd_ucmd *ucmd,
1186 				      void __user *uptr, size_t length,
1187 				      unsigned int refs)
1188 {
1189 	uintptr_t end;
1190 
1191 	if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE ||
1192 	    check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
1193 		return -EINVAL;
1194 
1195 	for (; length; length -= PAGE_SIZE) {
1196 		struct page *pages[1];
1197 		long npages;
1198 
1199 		npages = get_user_pages_fast((uintptr_t)uptr, 1, 0, pages);
1200 		if (npages < 0)
1201 			return npages;
1202 		if (WARN_ON(npages != 1))
1203 			return -EFAULT;
1204 		if (!PageCompound(pages[0])) {
1205 			unsigned int count;
1206 
1207 			count = page_ref_count(pages[0]);
1208 			if (count / GUP_PIN_COUNTING_BIAS != refs) {
1209 				put_page(pages[0]);
1210 				return -EIO;
1211 			}
1212 		}
1213 		put_page(pages[0]);
1214 		uptr += PAGE_SIZE;
1215 	}
1216 	return 0;
1217 }
1218 
iommufd_test_md_check_iotlb(struct iommufd_ucmd * ucmd,u32 mockpt_id,unsigned int iotlb_id,u32 iotlb)1219 static int iommufd_test_md_check_iotlb(struct iommufd_ucmd *ucmd,
1220 				       u32 mockpt_id, unsigned int iotlb_id,
1221 				       u32 iotlb)
1222 {
1223 	struct mock_iommu_domain_nested *mock_nested;
1224 	struct iommufd_hw_pagetable *hwpt;
1225 	int rc = 0;
1226 
1227 	hwpt = get_md_pagetable_nested(ucmd, mockpt_id, &mock_nested);
1228 	if (IS_ERR(hwpt))
1229 		return PTR_ERR(hwpt);
1230 
1231 	mock_nested = to_mock_nested(hwpt->domain);
1232 
1233 	if (iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX ||
1234 	    mock_nested->iotlb[iotlb_id] != iotlb)
1235 		rc = -EINVAL;
1236 	iommufd_put_object(ucmd->ictx, &hwpt->obj);
1237 	return rc;
1238 }
1239 
iommufd_test_dev_check_cache(struct iommufd_ucmd * ucmd,u32 idev_id,unsigned int cache_id,u32 cache)1240 static int iommufd_test_dev_check_cache(struct iommufd_ucmd *ucmd, u32 idev_id,
1241 					unsigned int cache_id, u32 cache)
1242 {
1243 	struct iommufd_device *idev;
1244 	struct mock_dev *mdev;
1245 	int rc = 0;
1246 
1247 	idev = iommufd_get_device(ucmd, idev_id);
1248 	if (IS_ERR(idev))
1249 		return PTR_ERR(idev);
1250 	mdev = container_of(idev->dev, struct mock_dev, dev);
1251 
1252 	if (cache_id > MOCK_DEV_CACHE_ID_MAX || mdev->cache[cache_id] != cache)
1253 		rc = -EINVAL;
1254 	iommufd_put_object(ucmd->ictx, &idev->obj);
1255 	return rc;
1256 }
1257 
1258 struct selftest_access {
1259 	struct iommufd_access *access;
1260 	struct file *file;
1261 	struct mutex lock;
1262 	struct list_head items;
1263 	unsigned int next_id;
1264 	bool destroying;
1265 };
1266 
1267 struct selftest_access_item {
1268 	struct list_head items_elm;
1269 	unsigned long iova;
1270 	size_t length;
1271 	unsigned int id;
1272 };
1273 
1274 static const struct file_operations iommfd_test_staccess_fops;
1275 
iommufd_access_get(int fd)1276 static struct selftest_access *iommufd_access_get(int fd)
1277 {
1278 	struct file *file;
1279 
1280 	file = fget(fd);
1281 	if (!file)
1282 		return ERR_PTR(-EBADFD);
1283 
1284 	if (file->f_op != &iommfd_test_staccess_fops) {
1285 		fput(file);
1286 		return ERR_PTR(-EBADFD);
1287 	}
1288 	return file->private_data;
1289 }
1290 
iommufd_test_access_unmap(void * data,unsigned long iova,unsigned long length)1291 static void iommufd_test_access_unmap(void *data, unsigned long iova,
1292 				      unsigned long length)
1293 {
1294 	unsigned long iova_last = iova + length - 1;
1295 	struct selftest_access *staccess = data;
1296 	struct selftest_access_item *item;
1297 	struct selftest_access_item *tmp;
1298 
1299 	mutex_lock(&staccess->lock);
1300 	list_for_each_entry_safe(item, tmp, &staccess->items, items_elm) {
1301 		if (iova > item->iova + item->length - 1 ||
1302 		    iova_last < item->iova)
1303 			continue;
1304 		list_del(&item->items_elm);
1305 		iommufd_access_unpin_pages(staccess->access, item->iova,
1306 					   item->length);
1307 		kfree(item);
1308 	}
1309 	mutex_unlock(&staccess->lock);
1310 }
1311 
iommufd_test_access_item_destroy(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned int item_id)1312 static int iommufd_test_access_item_destroy(struct iommufd_ucmd *ucmd,
1313 					    unsigned int access_id,
1314 					    unsigned int item_id)
1315 {
1316 	struct selftest_access_item *item;
1317 	struct selftest_access *staccess;
1318 
1319 	staccess = iommufd_access_get(access_id);
1320 	if (IS_ERR(staccess))
1321 		return PTR_ERR(staccess);
1322 
1323 	mutex_lock(&staccess->lock);
1324 	list_for_each_entry(item, &staccess->items, items_elm) {
1325 		if (item->id == item_id) {
1326 			list_del(&item->items_elm);
1327 			iommufd_access_unpin_pages(staccess->access, item->iova,
1328 						   item->length);
1329 			mutex_unlock(&staccess->lock);
1330 			kfree(item);
1331 			fput(staccess->file);
1332 			return 0;
1333 		}
1334 	}
1335 	mutex_unlock(&staccess->lock);
1336 	fput(staccess->file);
1337 	return -ENOENT;
1338 }
1339 
iommufd_test_staccess_release(struct inode * inode,struct file * filep)1340 static int iommufd_test_staccess_release(struct inode *inode,
1341 					 struct file *filep)
1342 {
1343 	struct selftest_access *staccess = filep->private_data;
1344 
1345 	if (staccess->access) {
1346 		iommufd_test_access_unmap(staccess, 0, ULONG_MAX);
1347 		iommufd_access_destroy(staccess->access);
1348 	}
1349 	mutex_destroy(&staccess->lock);
1350 	kfree(staccess);
1351 	return 0;
1352 }
1353 
1354 static const struct iommufd_access_ops selftest_access_ops_pin = {
1355 	.needs_pin_pages = 1,
1356 	.unmap = iommufd_test_access_unmap,
1357 };
1358 
1359 static const struct iommufd_access_ops selftest_access_ops = {
1360 	.unmap = iommufd_test_access_unmap,
1361 };
1362 
1363 static const struct file_operations iommfd_test_staccess_fops = {
1364 	.release = iommufd_test_staccess_release,
1365 };
1366 
iommufd_test_alloc_access(void)1367 static struct selftest_access *iommufd_test_alloc_access(void)
1368 {
1369 	struct selftest_access *staccess;
1370 	struct file *filep;
1371 
1372 	staccess = kzalloc(sizeof(*staccess), GFP_KERNEL_ACCOUNT);
1373 	if (!staccess)
1374 		return ERR_PTR(-ENOMEM);
1375 	INIT_LIST_HEAD(&staccess->items);
1376 	mutex_init(&staccess->lock);
1377 
1378 	filep = anon_inode_getfile("[iommufd_test_staccess]",
1379 				   &iommfd_test_staccess_fops, staccess,
1380 				   O_RDWR);
1381 	if (IS_ERR(filep)) {
1382 		kfree(staccess);
1383 		return ERR_CAST(filep);
1384 	}
1385 	staccess->file = filep;
1386 	return staccess;
1387 }
1388 
iommufd_test_create_access(struct iommufd_ucmd * ucmd,unsigned int ioas_id,unsigned int flags)1389 static int iommufd_test_create_access(struct iommufd_ucmd *ucmd,
1390 				      unsigned int ioas_id, unsigned int flags)
1391 {
1392 	struct iommu_test_cmd *cmd = ucmd->cmd;
1393 	struct selftest_access *staccess;
1394 	struct iommufd_access *access;
1395 	u32 id;
1396 	int fdno;
1397 	int rc;
1398 
1399 	if (flags & ~MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES)
1400 		return -EOPNOTSUPP;
1401 
1402 	staccess = iommufd_test_alloc_access();
1403 	if (IS_ERR(staccess))
1404 		return PTR_ERR(staccess);
1405 
1406 	fdno = get_unused_fd_flags(O_CLOEXEC);
1407 	if (fdno < 0) {
1408 		rc = -ENOMEM;
1409 		goto out_free_staccess;
1410 	}
1411 
1412 	access = iommufd_access_create(
1413 		ucmd->ictx,
1414 		(flags & MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) ?
1415 			&selftest_access_ops_pin :
1416 			&selftest_access_ops,
1417 		staccess, &id);
1418 	if (IS_ERR(access)) {
1419 		rc = PTR_ERR(access);
1420 		goto out_put_fdno;
1421 	}
1422 	rc = iommufd_access_attach(access, ioas_id);
1423 	if (rc)
1424 		goto out_destroy;
1425 	cmd->create_access.out_access_fd = fdno;
1426 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1427 	if (rc)
1428 		goto out_destroy;
1429 
1430 	staccess->access = access;
1431 	fd_install(fdno, staccess->file);
1432 	return 0;
1433 
1434 out_destroy:
1435 	iommufd_access_destroy(access);
1436 out_put_fdno:
1437 	put_unused_fd(fdno);
1438 out_free_staccess:
1439 	fput(staccess->file);
1440 	return rc;
1441 }
1442 
iommufd_test_access_replace_ioas(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned int ioas_id)1443 static int iommufd_test_access_replace_ioas(struct iommufd_ucmd *ucmd,
1444 					    unsigned int access_id,
1445 					    unsigned int ioas_id)
1446 {
1447 	struct selftest_access *staccess;
1448 	int rc;
1449 
1450 	staccess = iommufd_access_get(access_id);
1451 	if (IS_ERR(staccess))
1452 		return PTR_ERR(staccess);
1453 
1454 	rc = iommufd_access_replace(staccess->access, ioas_id);
1455 	fput(staccess->file);
1456 	return rc;
1457 }
1458 
1459 /* Check that the pages in a page array match the pages in the user VA */
iommufd_test_check_pages(void __user * uptr,struct page ** pages,size_t npages)1460 static int iommufd_test_check_pages(void __user *uptr, struct page **pages,
1461 				    size_t npages)
1462 {
1463 	for (; npages; npages--) {
1464 		struct page *tmp_pages[1];
1465 		long rc;
1466 
1467 		rc = get_user_pages_fast((uintptr_t)uptr, 1, 0, tmp_pages);
1468 		if (rc < 0)
1469 			return rc;
1470 		if (WARN_ON(rc != 1))
1471 			return -EFAULT;
1472 		put_page(tmp_pages[0]);
1473 		if (tmp_pages[0] != *pages)
1474 			return -EBADE;
1475 		pages++;
1476 		uptr += PAGE_SIZE;
1477 	}
1478 	return 0;
1479 }
1480 
iommufd_test_access_pages(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned long iova,size_t length,void __user * uptr,u32 flags)1481 static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd,
1482 				     unsigned int access_id, unsigned long iova,
1483 				     size_t length, void __user *uptr,
1484 				     u32 flags)
1485 {
1486 	struct iommu_test_cmd *cmd = ucmd->cmd;
1487 	struct selftest_access_item *item;
1488 	struct selftest_access *staccess;
1489 	struct page **pages;
1490 	size_t npages;
1491 	int rc;
1492 
1493 	/* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
1494 	if (length > 16*1024*1024)
1495 		return -ENOMEM;
1496 
1497 	if (flags & ~(MOCK_FLAGS_ACCESS_WRITE | MOCK_FLAGS_ACCESS_SYZ))
1498 		return -EOPNOTSUPP;
1499 
1500 	staccess = iommufd_access_get(access_id);
1501 	if (IS_ERR(staccess))
1502 		return PTR_ERR(staccess);
1503 
1504 	if (staccess->access->ops != &selftest_access_ops_pin) {
1505 		rc = -EOPNOTSUPP;
1506 		goto out_put;
1507 	}
1508 
1509 	if (flags & MOCK_FLAGS_ACCESS_SYZ)
1510 		iova = iommufd_test_syz_conv_iova(staccess->access,
1511 					&cmd->access_pages.iova);
1512 
1513 	npages = (ALIGN(iova + length, PAGE_SIZE) -
1514 		  ALIGN_DOWN(iova, PAGE_SIZE)) /
1515 		 PAGE_SIZE;
1516 	pages = kvcalloc(npages, sizeof(*pages), GFP_KERNEL_ACCOUNT);
1517 	if (!pages) {
1518 		rc = -ENOMEM;
1519 		goto out_put;
1520 	}
1521 
1522 	/*
1523 	 * Drivers will need to think very carefully about this locking. The
1524 	 * core code can do multiple unmaps instantaneously after
1525 	 * iommufd_access_pin_pages() and *all* the unmaps must not return until
1526 	 * the range is unpinned. This simple implementation puts a global lock
1527 	 * around the pin, which may not suit drivers that want this to be a
1528 	 * performance path. drivers that get this wrong will trigger WARN_ON
1529 	 * races and cause EDEADLOCK failures to userspace.
1530 	 */
1531 	mutex_lock(&staccess->lock);
1532 	rc = iommufd_access_pin_pages(staccess->access, iova, length, pages,
1533 				      flags & MOCK_FLAGS_ACCESS_WRITE);
1534 	if (rc)
1535 		goto out_unlock;
1536 
1537 	/* For syzkaller allow uptr to be NULL to skip this check */
1538 	if (uptr) {
1539 		rc = iommufd_test_check_pages(
1540 			uptr - (iova - ALIGN_DOWN(iova, PAGE_SIZE)), pages,
1541 			npages);
1542 		if (rc)
1543 			goto out_unaccess;
1544 	}
1545 
1546 	item = kzalloc(sizeof(*item), GFP_KERNEL_ACCOUNT);
1547 	if (!item) {
1548 		rc = -ENOMEM;
1549 		goto out_unaccess;
1550 	}
1551 
1552 	item->iova = iova;
1553 	item->length = length;
1554 	item->id = staccess->next_id++;
1555 	list_add_tail(&item->items_elm, &staccess->items);
1556 
1557 	cmd->access_pages.out_access_pages_id = item->id;
1558 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1559 	if (rc)
1560 		goto out_free_item;
1561 	goto out_unlock;
1562 
1563 out_free_item:
1564 	list_del(&item->items_elm);
1565 	kfree(item);
1566 out_unaccess:
1567 	iommufd_access_unpin_pages(staccess->access, iova, length);
1568 out_unlock:
1569 	mutex_unlock(&staccess->lock);
1570 	kvfree(pages);
1571 out_put:
1572 	fput(staccess->file);
1573 	return rc;
1574 }
1575 
iommufd_test_access_rw(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned long iova,size_t length,void __user * ubuf,unsigned int flags)1576 static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd,
1577 				  unsigned int access_id, unsigned long iova,
1578 				  size_t length, void __user *ubuf,
1579 				  unsigned int flags)
1580 {
1581 	struct iommu_test_cmd *cmd = ucmd->cmd;
1582 	struct selftest_access *staccess;
1583 	void *tmp;
1584 	int rc;
1585 
1586 	/* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
1587 	if (length > 16*1024*1024)
1588 		return -ENOMEM;
1589 
1590 	if (flags & ~(MOCK_ACCESS_RW_WRITE | MOCK_ACCESS_RW_SLOW_PATH |
1591 		      MOCK_FLAGS_ACCESS_SYZ))
1592 		return -EOPNOTSUPP;
1593 
1594 	staccess = iommufd_access_get(access_id);
1595 	if (IS_ERR(staccess))
1596 		return PTR_ERR(staccess);
1597 
1598 	tmp = kvzalloc(length, GFP_KERNEL_ACCOUNT);
1599 	if (!tmp) {
1600 		rc = -ENOMEM;
1601 		goto out_put;
1602 	}
1603 
1604 	if (flags & MOCK_ACCESS_RW_WRITE) {
1605 		if (copy_from_user(tmp, ubuf, length)) {
1606 			rc = -EFAULT;
1607 			goto out_free;
1608 		}
1609 	}
1610 
1611 	if (flags & MOCK_FLAGS_ACCESS_SYZ)
1612 		iova = iommufd_test_syz_conv_iova(staccess->access,
1613 				&cmd->access_rw.iova);
1614 
1615 	rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags);
1616 	if (rc)
1617 		goto out_free;
1618 	if (!(flags & MOCK_ACCESS_RW_WRITE)) {
1619 		if (copy_to_user(ubuf, tmp, length)) {
1620 			rc = -EFAULT;
1621 			goto out_free;
1622 		}
1623 	}
1624 
1625 out_free:
1626 	kvfree(tmp);
1627 out_put:
1628 	fput(staccess->file);
1629 	return rc;
1630 }
1631 static_assert((unsigned int)MOCK_ACCESS_RW_WRITE == IOMMUFD_ACCESS_RW_WRITE);
1632 static_assert((unsigned int)MOCK_ACCESS_RW_SLOW_PATH ==
1633 	      __IOMMUFD_ACCESS_RW_SLOW_PATH);
1634 
iommufd_test_dirty(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long iova,size_t length,unsigned long page_size,void __user * uptr,u32 flags)1635 static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id,
1636 			      unsigned long iova, size_t length,
1637 			      unsigned long page_size, void __user *uptr,
1638 			      u32 flags)
1639 {
1640 	unsigned long i, max;
1641 	struct iommu_test_cmd *cmd = ucmd->cmd;
1642 	struct iommufd_hw_pagetable *hwpt;
1643 	struct mock_iommu_domain *mock;
1644 	int rc, count = 0;
1645 	void *tmp;
1646 
1647 	if (!page_size || !length || iova % page_size || length % page_size ||
1648 	    !uptr)
1649 		return -EINVAL;
1650 
1651 	hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
1652 	if (IS_ERR(hwpt))
1653 		return PTR_ERR(hwpt);
1654 
1655 	if (!(mock->flags & MOCK_DIRTY_TRACK)) {
1656 		rc = -EINVAL;
1657 		goto out_put;
1658 	}
1659 
1660 	max = length / page_size;
1661 	tmp = kvzalloc(DIV_ROUND_UP(max, BITS_PER_LONG) * sizeof(unsigned long),
1662 		       GFP_KERNEL_ACCOUNT);
1663 	if (!tmp) {
1664 		rc = -ENOMEM;
1665 		goto out_put;
1666 	}
1667 
1668 	if (copy_from_user(tmp, uptr,DIV_ROUND_UP(max, BITS_PER_BYTE))) {
1669 		rc = -EFAULT;
1670 		goto out_free;
1671 	}
1672 
1673 	for (i = 0; i < max; i++) {
1674 		unsigned long cur = iova + i * page_size;
1675 		void *ent, *old;
1676 
1677 		if (!test_bit(i, (unsigned long *)tmp))
1678 			continue;
1679 
1680 		ent = xa_load(&mock->pfns, cur / page_size);
1681 		if (ent) {
1682 			unsigned long val;
1683 
1684 			val = xa_to_value(ent) | MOCK_PFN_DIRTY_IOVA;
1685 			old = xa_store(&mock->pfns, cur / page_size,
1686 				       xa_mk_value(val), GFP_KERNEL);
1687 			WARN_ON_ONCE(ent != old);
1688 			count++;
1689 		}
1690 	}
1691 
1692 	cmd->dirty.out_nr_dirty = count;
1693 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1694 out_free:
1695 	kvfree(tmp);
1696 out_put:
1697 	iommufd_put_object(ucmd->ictx, &hwpt->obj);
1698 	return rc;
1699 }
1700 
iommufd_test_trigger_iopf(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1701 static int iommufd_test_trigger_iopf(struct iommufd_ucmd *ucmd,
1702 				     struct iommu_test_cmd *cmd)
1703 {
1704 	struct iopf_fault event = { };
1705 	struct iommufd_device *idev;
1706 
1707 	idev = iommufd_get_device(ucmd, cmd->trigger_iopf.dev_id);
1708 	if (IS_ERR(idev))
1709 		return PTR_ERR(idev);
1710 
1711 	event.fault.prm.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
1712 	if (cmd->trigger_iopf.pasid != IOMMU_NO_PASID)
1713 		event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1714 	event.fault.type = IOMMU_FAULT_PAGE_REQ;
1715 	event.fault.prm.addr = cmd->trigger_iopf.addr;
1716 	event.fault.prm.pasid = cmd->trigger_iopf.pasid;
1717 	event.fault.prm.grpid = cmd->trigger_iopf.grpid;
1718 	event.fault.prm.perm = cmd->trigger_iopf.perm;
1719 
1720 	iommu_report_device_fault(idev->dev, &event);
1721 	iommufd_put_object(ucmd->ictx, &idev->obj);
1722 
1723 	return 0;
1724 }
1725 
iommufd_test_trigger_vevent(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1726 static int iommufd_test_trigger_vevent(struct iommufd_ucmd *ucmd,
1727 				       struct iommu_test_cmd *cmd)
1728 {
1729 	struct iommu_viommu_event_selftest test = {};
1730 	struct iommufd_device *idev;
1731 	struct mock_dev *mdev;
1732 	int rc = -ENOENT;
1733 
1734 	idev = iommufd_get_device(ucmd, cmd->trigger_vevent.dev_id);
1735 	if (IS_ERR(idev))
1736 		return PTR_ERR(idev);
1737 	mdev = to_mock_dev(idev->dev);
1738 
1739 	down_read(&mdev->viommu_rwsem);
1740 	if (!mdev->viommu || !mdev->vdev_id)
1741 		goto out_unlock;
1742 
1743 	test.virt_id = mdev->vdev_id;
1744 	rc = iommufd_viommu_report_event(&mdev->viommu->core,
1745 					 IOMMU_VEVENTQ_TYPE_SELFTEST, &test,
1746 					 sizeof(test));
1747 out_unlock:
1748 	up_read(&mdev->viommu_rwsem);
1749 	iommufd_put_object(ucmd->ictx, &idev->obj);
1750 
1751 	return rc;
1752 }
1753 
1754 static inline struct iommufd_hw_pagetable *
iommufd_get_hwpt(struct iommufd_ucmd * ucmd,u32 id)1755 iommufd_get_hwpt(struct iommufd_ucmd *ucmd, u32 id)
1756 {
1757 	struct iommufd_object *pt_obj;
1758 
1759 	pt_obj = iommufd_get_object(ucmd->ictx, id, IOMMUFD_OBJ_ANY);
1760 	if (IS_ERR(pt_obj))
1761 		return ERR_CAST(pt_obj);
1762 
1763 	if (pt_obj->type != IOMMUFD_OBJ_HWPT_NESTED &&
1764 	    pt_obj->type != IOMMUFD_OBJ_HWPT_PAGING) {
1765 		iommufd_put_object(ucmd->ictx, pt_obj);
1766 		return ERR_PTR(-EINVAL);
1767 	}
1768 
1769 	return container_of(pt_obj, struct iommufd_hw_pagetable, obj);
1770 }
1771 
iommufd_test_pasid_check_hwpt(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1772 static int iommufd_test_pasid_check_hwpt(struct iommufd_ucmd *ucmd,
1773 					 struct iommu_test_cmd *cmd)
1774 {
1775 	u32 hwpt_id = cmd->pasid_check.hwpt_id;
1776 	struct iommu_domain *attached_domain;
1777 	struct iommu_attach_handle *handle;
1778 	struct iommufd_hw_pagetable *hwpt;
1779 	struct selftest_obj *sobj;
1780 	struct mock_dev *mdev;
1781 	int rc = 0;
1782 
1783 	sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
1784 	if (IS_ERR(sobj))
1785 		return PTR_ERR(sobj);
1786 
1787 	mdev = sobj->idev.mock_dev;
1788 
1789 	handle = iommu_attach_handle_get(mdev->dev.iommu_group,
1790 					 cmd->pasid_check.pasid, 0);
1791 	if (IS_ERR(handle))
1792 		attached_domain = NULL;
1793 	else
1794 		attached_domain = handle->domain;
1795 
1796 	/* hwpt_id == 0 means to check if pasid is detached */
1797 	if (!hwpt_id) {
1798 		if (attached_domain)
1799 			rc = -EINVAL;
1800 		goto out_sobj;
1801 	}
1802 
1803 	hwpt = iommufd_get_hwpt(ucmd, hwpt_id);
1804 	if (IS_ERR(hwpt)) {
1805 		rc = PTR_ERR(hwpt);
1806 		goto out_sobj;
1807 	}
1808 
1809 	if (attached_domain != hwpt->domain)
1810 		rc = -EINVAL;
1811 
1812 	iommufd_put_object(ucmd->ictx, &hwpt->obj);
1813 out_sobj:
1814 	iommufd_put_object(ucmd->ictx, &sobj->obj);
1815 	return rc;
1816 }
1817 
iommufd_test_pasid_attach(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1818 static int iommufd_test_pasid_attach(struct iommufd_ucmd *ucmd,
1819 				     struct iommu_test_cmd *cmd)
1820 {
1821 	struct selftest_obj *sobj;
1822 	int rc;
1823 
1824 	sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
1825 	if (IS_ERR(sobj))
1826 		return PTR_ERR(sobj);
1827 
1828 	rc = iommufd_device_attach(sobj->idev.idev, cmd->pasid_attach.pasid,
1829 				   &cmd->pasid_attach.pt_id);
1830 	if (rc)
1831 		goto out_sobj;
1832 
1833 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1834 	if (rc)
1835 		iommufd_device_detach(sobj->idev.idev,
1836 				      cmd->pasid_attach.pasid);
1837 
1838 out_sobj:
1839 	iommufd_put_object(ucmd->ictx, &sobj->obj);
1840 	return rc;
1841 }
1842 
iommufd_test_pasid_replace(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1843 static int iommufd_test_pasid_replace(struct iommufd_ucmd *ucmd,
1844 				      struct iommu_test_cmd *cmd)
1845 {
1846 	struct selftest_obj *sobj;
1847 	int rc;
1848 
1849 	sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
1850 	if (IS_ERR(sobj))
1851 		return PTR_ERR(sobj);
1852 
1853 	rc = iommufd_device_replace(sobj->idev.idev, cmd->pasid_attach.pasid,
1854 				    &cmd->pasid_attach.pt_id);
1855 	if (rc)
1856 		goto out_sobj;
1857 
1858 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1859 
1860 out_sobj:
1861 	iommufd_put_object(ucmd->ictx, &sobj->obj);
1862 	return rc;
1863 }
1864 
iommufd_test_pasid_detach(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1865 static int iommufd_test_pasid_detach(struct iommufd_ucmd *ucmd,
1866 				     struct iommu_test_cmd *cmd)
1867 {
1868 	struct selftest_obj *sobj;
1869 
1870 	sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
1871 	if (IS_ERR(sobj))
1872 		return PTR_ERR(sobj);
1873 
1874 	iommufd_device_detach(sobj->idev.idev, cmd->pasid_detach.pasid);
1875 	iommufd_put_object(ucmd->ictx, &sobj->obj);
1876 	return 0;
1877 }
1878 
iommufd_selftest_destroy(struct iommufd_object * obj)1879 void iommufd_selftest_destroy(struct iommufd_object *obj)
1880 {
1881 	struct selftest_obj *sobj = to_selftest_obj(obj);
1882 
1883 	switch (sobj->type) {
1884 	case TYPE_IDEV:
1885 		iommufd_device_detach(sobj->idev.idev, IOMMU_NO_PASID);
1886 		iommufd_device_unbind(sobj->idev.idev);
1887 		mock_dev_destroy(sobj->idev.mock_dev);
1888 		break;
1889 	}
1890 }
1891 
iommufd_test(struct iommufd_ucmd * ucmd)1892 int iommufd_test(struct iommufd_ucmd *ucmd)
1893 {
1894 	struct iommu_test_cmd *cmd = ucmd->cmd;
1895 
1896 	switch (cmd->op) {
1897 	case IOMMU_TEST_OP_ADD_RESERVED:
1898 		return iommufd_test_add_reserved(ucmd, cmd->id,
1899 						 cmd->add_reserved.start,
1900 						 cmd->add_reserved.length);
1901 	case IOMMU_TEST_OP_MOCK_DOMAIN:
1902 	case IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS:
1903 		return iommufd_test_mock_domain(ucmd, cmd);
1904 	case IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE:
1905 		return iommufd_test_mock_domain_replace(
1906 			ucmd, cmd->id, cmd->mock_domain_replace.pt_id, cmd);
1907 	case IOMMU_TEST_OP_MD_CHECK_MAP:
1908 		return iommufd_test_md_check_pa(
1909 			ucmd, cmd->id, cmd->check_map.iova,
1910 			cmd->check_map.length,
1911 			u64_to_user_ptr(cmd->check_map.uptr));
1912 	case IOMMU_TEST_OP_MD_CHECK_REFS:
1913 		return iommufd_test_md_check_refs(
1914 			ucmd, u64_to_user_ptr(cmd->check_refs.uptr),
1915 			cmd->check_refs.length, cmd->check_refs.refs);
1916 	case IOMMU_TEST_OP_MD_CHECK_IOTLB:
1917 		return iommufd_test_md_check_iotlb(ucmd, cmd->id,
1918 						   cmd->check_iotlb.id,
1919 						   cmd->check_iotlb.iotlb);
1920 	case IOMMU_TEST_OP_DEV_CHECK_CACHE:
1921 		return iommufd_test_dev_check_cache(ucmd, cmd->id,
1922 						    cmd->check_dev_cache.id,
1923 						    cmd->check_dev_cache.cache);
1924 	case IOMMU_TEST_OP_CREATE_ACCESS:
1925 		return iommufd_test_create_access(ucmd, cmd->id,
1926 						  cmd->create_access.flags);
1927 	case IOMMU_TEST_OP_ACCESS_REPLACE_IOAS:
1928 		return iommufd_test_access_replace_ioas(
1929 			ucmd, cmd->id, cmd->access_replace_ioas.ioas_id);
1930 	case IOMMU_TEST_OP_ACCESS_PAGES:
1931 		return iommufd_test_access_pages(
1932 			ucmd, cmd->id, cmd->access_pages.iova,
1933 			cmd->access_pages.length,
1934 			u64_to_user_ptr(cmd->access_pages.uptr),
1935 			cmd->access_pages.flags);
1936 	case IOMMU_TEST_OP_ACCESS_RW:
1937 		return iommufd_test_access_rw(
1938 			ucmd, cmd->id, cmd->access_rw.iova,
1939 			cmd->access_rw.length,
1940 			u64_to_user_ptr(cmd->access_rw.uptr),
1941 			cmd->access_rw.flags);
1942 	case IOMMU_TEST_OP_DESTROY_ACCESS_PAGES:
1943 		return iommufd_test_access_item_destroy(
1944 			ucmd, cmd->id, cmd->destroy_access_pages.access_pages_id);
1945 	case IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT:
1946 		/* Protect _batch_init(), can not be less than elmsz */
1947 		if (cmd->memory_limit.limit <
1948 		    sizeof(unsigned long) + sizeof(u32))
1949 			return -EINVAL;
1950 		iommufd_test_memory_limit = cmd->memory_limit.limit;
1951 		return 0;
1952 	case IOMMU_TEST_OP_DIRTY:
1953 		return iommufd_test_dirty(ucmd, cmd->id, cmd->dirty.iova,
1954 					  cmd->dirty.length,
1955 					  cmd->dirty.page_size,
1956 					  u64_to_user_ptr(cmd->dirty.uptr),
1957 					  cmd->dirty.flags);
1958 	case IOMMU_TEST_OP_TRIGGER_IOPF:
1959 		return iommufd_test_trigger_iopf(ucmd, cmd);
1960 	case IOMMU_TEST_OP_TRIGGER_VEVENT:
1961 		return iommufd_test_trigger_vevent(ucmd, cmd);
1962 	case IOMMU_TEST_OP_PASID_ATTACH:
1963 		return iommufd_test_pasid_attach(ucmd, cmd);
1964 	case IOMMU_TEST_OP_PASID_REPLACE:
1965 		return iommufd_test_pasid_replace(ucmd, cmd);
1966 	case IOMMU_TEST_OP_PASID_DETACH:
1967 		return iommufd_test_pasid_detach(ucmd, cmd);
1968 	case IOMMU_TEST_OP_PASID_CHECK_HWPT:
1969 		return iommufd_test_pasid_check_hwpt(ucmd, cmd);
1970 	default:
1971 		return -EOPNOTSUPP;
1972 	}
1973 }
1974 
iommufd_should_fail(void)1975 bool iommufd_should_fail(void)
1976 {
1977 	return should_fail(&fail_iommufd, 1);
1978 }
1979 
iommufd_test_init(void)1980 int __init iommufd_test_init(void)
1981 {
1982 	struct platform_device_info pdevinfo = {
1983 		.name = "iommufd_selftest_iommu",
1984 	};
1985 	int rc;
1986 
1987 	dbgfs_root =
1988 		fault_create_debugfs_attr("fail_iommufd", NULL, &fail_iommufd);
1989 
1990 	selftest_iommu_dev = platform_device_register_full(&pdevinfo);
1991 	if (IS_ERR(selftest_iommu_dev)) {
1992 		rc = PTR_ERR(selftest_iommu_dev);
1993 		goto err_dbgfs;
1994 	}
1995 
1996 	rc = bus_register(&iommufd_mock_bus_type.bus);
1997 	if (rc)
1998 		goto err_platform;
1999 
2000 	rc = iommu_device_sysfs_add(&mock_iommu.iommu_dev,
2001 				    &selftest_iommu_dev->dev, NULL, "%s",
2002 				    dev_name(&selftest_iommu_dev->dev));
2003 	if (rc)
2004 		goto err_bus;
2005 
2006 	rc = iommu_device_register_bus(&mock_iommu.iommu_dev, &mock_ops,
2007 				  &iommufd_mock_bus_type.bus,
2008 				  &iommufd_mock_bus_type.nb);
2009 	if (rc)
2010 		goto err_sysfs;
2011 
2012 	refcount_set(&mock_iommu.users, 1);
2013 	init_completion(&mock_iommu.complete);
2014 
2015 	mock_iommu_iopf_queue = iopf_queue_alloc("mock-iopfq");
2016 	mock_iommu.iommu_dev.max_pasids = (1 << MOCK_PASID_WIDTH);
2017 
2018 	return 0;
2019 
2020 err_sysfs:
2021 	iommu_device_sysfs_remove(&mock_iommu.iommu_dev);
2022 err_bus:
2023 	bus_unregister(&iommufd_mock_bus_type.bus);
2024 err_platform:
2025 	platform_device_unregister(selftest_iommu_dev);
2026 err_dbgfs:
2027 	debugfs_remove_recursive(dbgfs_root);
2028 	return rc;
2029 }
2030 
iommufd_test_wait_for_users(void)2031 static void iommufd_test_wait_for_users(void)
2032 {
2033 	if (refcount_dec_and_test(&mock_iommu.users))
2034 		return;
2035 	/*
2036 	 * Time out waiting for iommu device user count to become 0.
2037 	 *
2038 	 * Note that this is just making an example here, since the selftest is
2039 	 * built into the iommufd module, i.e. it only unplugs the iommu device
2040 	 * when unloading the module. So, it is expected that this WARN_ON will
2041 	 * not trigger, as long as any iommufd FDs are open.
2042 	 */
2043 	WARN_ON(!wait_for_completion_timeout(&mock_iommu.complete,
2044 					     msecs_to_jiffies(10000)));
2045 }
2046 
iommufd_test_exit(void)2047 void iommufd_test_exit(void)
2048 {
2049 	if (mock_iommu_iopf_queue) {
2050 		iopf_queue_free(mock_iommu_iopf_queue);
2051 		mock_iommu_iopf_queue = NULL;
2052 	}
2053 
2054 	iommufd_test_wait_for_users();
2055 	iommu_device_sysfs_remove(&mock_iommu.iommu_dev);
2056 	iommu_device_unregister_bus(&mock_iommu.iommu_dev,
2057 				    &iommufd_mock_bus_type.bus,
2058 				    &iommufd_mock_bus_type.nb);
2059 	bus_unregister(&iommufd_mock_bus_type.bus);
2060 	platform_device_unregister(selftest_iommu_dev);
2061 	debugfs_remove_recursive(dbgfs_root);
2062 }
2063