xref: /linux/drivers/iommu/iommufd/selftest.c (revision 42b16d3ac371a2fac9b6f08fd75f23f34ba3955a)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
3  *
4  * Kernel side components to support tools/testing/selftests/iommu
5  */
6 #include <linux/anon_inodes.h>
7 #include <linux/debugfs.h>
8 #include <linux/fault-inject.h>
9 #include <linux/file.h>
10 #include <linux/iommu.h>
11 #include <linux/platform_device.h>
12 #include <linux/slab.h>
13 #include <linux/xarray.h>
14 #include <uapi/linux/iommufd.h>
15 
16 #include "../iommu-priv.h"
17 #include "io_pagetable.h"
18 #include "iommufd_private.h"
19 #include "iommufd_test.h"
20 
21 static DECLARE_FAULT_ATTR(fail_iommufd);
22 static struct dentry *dbgfs_root;
23 static struct platform_device *selftest_iommu_dev;
24 static const struct iommu_ops mock_ops;
25 static struct iommu_domain_ops domain_nested_ops;
26 
27 size_t iommufd_test_memory_limit = 65536;
28 
29 struct mock_bus_type {
30 	struct bus_type bus;
31 	struct notifier_block nb;
32 };
33 
34 static struct mock_bus_type iommufd_mock_bus_type = {
35 	.bus = {
36 		.name = "iommufd_mock",
37 	},
38 };
39 
40 static DEFINE_IDA(mock_dev_ida);
41 
42 enum {
43 	MOCK_DIRTY_TRACK = 1,
44 	MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2,
45 	MOCK_HUGE_PAGE_SIZE = 512 * MOCK_IO_PAGE_SIZE,
46 
47 	/*
48 	 * Like a real page table alignment requires the low bits of the address
49 	 * to be zero. xarray also requires the high bit to be zero, so we store
50 	 * the pfns shifted. The upper bits are used for metadata.
51 	 */
52 	MOCK_PFN_MASK = ULONG_MAX / MOCK_IO_PAGE_SIZE,
53 
54 	_MOCK_PFN_START = MOCK_PFN_MASK + 1,
55 	MOCK_PFN_START_IOVA = _MOCK_PFN_START,
56 	MOCK_PFN_LAST_IOVA = _MOCK_PFN_START,
57 	MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1,
58 	MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2,
59 };
60 
61 /*
62  * Syzkaller has trouble randomizing the correct iova to use since it is linked
63  * to the map ioctl's output, and it has no ide about that. So, simplify things.
64  * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset
65  * value. This has a much smaller randomization space and syzkaller can hit it.
66  */
__iommufd_test_syz_conv_iova(struct io_pagetable * iopt,u64 * iova)67 static unsigned long __iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
68 						  u64 *iova)
69 {
70 	struct syz_layout {
71 		__u32 nth_area;
72 		__u32 offset;
73 	};
74 	struct syz_layout *syz = (void *)iova;
75 	unsigned int nth = syz->nth_area;
76 	struct iopt_area *area;
77 
78 	down_read(&iopt->iova_rwsem);
79 	for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area;
80 	     area = iopt_area_iter_next(area, 0, ULONG_MAX)) {
81 		if (nth == 0) {
82 			up_read(&iopt->iova_rwsem);
83 			return iopt_area_iova(area) + syz->offset;
84 		}
85 		nth--;
86 	}
87 	up_read(&iopt->iova_rwsem);
88 
89 	return 0;
90 }
91 
iommufd_test_syz_conv_iova(struct iommufd_access * access,u64 * iova)92 static unsigned long iommufd_test_syz_conv_iova(struct iommufd_access *access,
93 						u64 *iova)
94 {
95 	unsigned long ret;
96 
97 	mutex_lock(&access->ioas_lock);
98 	if (!access->ioas) {
99 		mutex_unlock(&access->ioas_lock);
100 		return 0;
101 	}
102 	ret = __iommufd_test_syz_conv_iova(&access->ioas->iopt, iova);
103 	mutex_unlock(&access->ioas_lock);
104 	return ret;
105 }
106 
iommufd_test_syz_conv_iova_id(struct iommufd_ucmd * ucmd,unsigned int ioas_id,u64 * iova,u32 * flags)107 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
108 				   unsigned int ioas_id, u64 *iova, u32 *flags)
109 {
110 	struct iommufd_ioas *ioas;
111 
112 	if (!(*flags & MOCK_FLAGS_ACCESS_SYZ))
113 		return;
114 	*flags &= ~(u32)MOCK_FLAGS_ACCESS_SYZ;
115 
116 	ioas = iommufd_get_ioas(ucmd->ictx, ioas_id);
117 	if (IS_ERR(ioas))
118 		return;
119 	*iova = __iommufd_test_syz_conv_iova(&ioas->iopt, iova);
120 	iommufd_put_object(ucmd->ictx, &ioas->obj);
121 }
122 
123 struct mock_iommu_domain {
124 	unsigned long flags;
125 	struct iommu_domain domain;
126 	struct xarray pfns;
127 };
128 
129 struct mock_iommu_domain_nested {
130 	struct iommu_domain domain;
131 	struct mock_iommu_domain *parent;
132 	u32 iotlb[MOCK_NESTED_DOMAIN_IOTLB_NUM];
133 };
134 
135 enum selftest_obj_type {
136 	TYPE_IDEV,
137 };
138 
139 struct mock_dev {
140 	struct device dev;
141 	unsigned long flags;
142 	int id;
143 };
144 
145 struct selftest_obj {
146 	struct iommufd_object obj;
147 	enum selftest_obj_type type;
148 
149 	union {
150 		struct {
151 			struct iommufd_device *idev;
152 			struct iommufd_ctx *ictx;
153 			struct mock_dev *mock_dev;
154 		} idev;
155 	};
156 };
157 
mock_domain_nop_attach(struct iommu_domain * domain,struct device * dev)158 static int mock_domain_nop_attach(struct iommu_domain *domain,
159 				  struct device *dev)
160 {
161 	struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
162 
163 	if (domain->dirty_ops && (mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY))
164 		return -EINVAL;
165 
166 	return 0;
167 }
168 
169 static const struct iommu_domain_ops mock_blocking_ops = {
170 	.attach_dev = mock_domain_nop_attach,
171 };
172 
173 static struct iommu_domain mock_blocking_domain = {
174 	.type = IOMMU_DOMAIN_BLOCKED,
175 	.ops = &mock_blocking_ops,
176 };
177 
mock_domain_hw_info(struct device * dev,u32 * length,u32 * type)178 static void *mock_domain_hw_info(struct device *dev, u32 *length, u32 *type)
179 {
180 	struct iommu_test_hw_info *info;
181 
182 	info = kzalloc(sizeof(*info), GFP_KERNEL);
183 	if (!info)
184 		return ERR_PTR(-ENOMEM);
185 
186 	info->test_reg = IOMMU_HW_INFO_SELFTEST_REGVAL;
187 	*length = sizeof(*info);
188 	*type = IOMMU_HW_INFO_TYPE_SELFTEST;
189 
190 	return info;
191 }
192 
mock_domain_set_dirty_tracking(struct iommu_domain * domain,bool enable)193 static int mock_domain_set_dirty_tracking(struct iommu_domain *domain,
194 					  bool enable)
195 {
196 	struct mock_iommu_domain *mock =
197 		container_of(domain, struct mock_iommu_domain, domain);
198 	unsigned long flags = mock->flags;
199 
200 	if (enable && !domain->dirty_ops)
201 		return -EINVAL;
202 
203 	/* No change? */
204 	if (!(enable ^ !!(flags & MOCK_DIRTY_TRACK)))
205 		return 0;
206 
207 	flags = (enable ? flags | MOCK_DIRTY_TRACK : flags & ~MOCK_DIRTY_TRACK);
208 
209 	mock->flags = flags;
210 	return 0;
211 }
212 
mock_test_and_clear_dirty(struct mock_iommu_domain * mock,unsigned long iova,size_t page_size,unsigned long flags)213 static bool mock_test_and_clear_dirty(struct mock_iommu_domain *mock,
214 				      unsigned long iova, size_t page_size,
215 				      unsigned long flags)
216 {
217 	unsigned long cur, end = iova + page_size - 1;
218 	bool dirty = false;
219 	void *ent, *old;
220 
221 	for (cur = iova; cur < end; cur += MOCK_IO_PAGE_SIZE) {
222 		ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
223 		if (!ent || !(xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA))
224 			continue;
225 
226 		dirty = true;
227 		/* Clear dirty */
228 		if (!(flags & IOMMU_DIRTY_NO_CLEAR)) {
229 			unsigned long val;
230 
231 			val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
232 			old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE,
233 				       xa_mk_value(val), GFP_KERNEL);
234 			WARN_ON_ONCE(ent != old);
235 		}
236 	}
237 
238 	return dirty;
239 }
240 
mock_domain_read_and_clear_dirty(struct iommu_domain * domain,unsigned long iova,size_t size,unsigned long flags,struct iommu_dirty_bitmap * dirty)241 static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
242 					    unsigned long iova, size_t size,
243 					    unsigned long flags,
244 					    struct iommu_dirty_bitmap *dirty)
245 {
246 	struct mock_iommu_domain *mock =
247 		container_of(domain, struct mock_iommu_domain, domain);
248 	unsigned long end = iova + size;
249 	void *ent;
250 
251 	if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap)
252 		return -EINVAL;
253 
254 	do {
255 		unsigned long pgsize = MOCK_IO_PAGE_SIZE;
256 		unsigned long head;
257 
258 		ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
259 		if (!ent) {
260 			iova += pgsize;
261 			continue;
262 		}
263 
264 		if (xa_to_value(ent) & MOCK_PFN_HUGE_IOVA)
265 			pgsize = MOCK_HUGE_PAGE_SIZE;
266 		head = iova & ~(pgsize - 1);
267 
268 		/* Clear dirty */
269 		if (mock_test_and_clear_dirty(mock, head, pgsize, flags))
270 			iommu_dirty_bitmap_record(dirty, iova, pgsize);
271 		iova += pgsize;
272 	} while (iova < end);
273 
274 	return 0;
275 }
276 
277 static const struct iommu_dirty_ops dirty_ops = {
278 	.set_dirty_tracking = mock_domain_set_dirty_tracking,
279 	.read_and_clear_dirty = mock_domain_read_and_clear_dirty,
280 };
281 
mock_domain_alloc_paging(struct device * dev)282 static struct iommu_domain *mock_domain_alloc_paging(struct device *dev)
283 {
284 	struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
285 	struct mock_iommu_domain *mock;
286 
287 	mock = kzalloc(sizeof(*mock), GFP_KERNEL);
288 	if (!mock)
289 		return NULL;
290 	mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
291 	mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
292 	mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
293 	if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
294 		mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
295 	mock->domain.ops = mock_ops.default_domain_ops;
296 	mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
297 	xa_init(&mock->pfns);
298 	return &mock->domain;
299 }
300 
301 static struct iommu_domain *
__mock_domain_alloc_nested(struct mock_iommu_domain * mock_parent,const struct iommu_hwpt_selftest * user_cfg)302 __mock_domain_alloc_nested(struct mock_iommu_domain *mock_parent,
303 			   const struct iommu_hwpt_selftest *user_cfg)
304 {
305 	struct mock_iommu_domain_nested *mock_nested;
306 	int i;
307 
308 	mock_nested = kzalloc(sizeof(*mock_nested), GFP_KERNEL);
309 	if (!mock_nested)
310 		return ERR_PTR(-ENOMEM);
311 	mock_nested->parent = mock_parent;
312 	mock_nested->domain.ops = &domain_nested_ops;
313 	mock_nested->domain.type = IOMMU_DOMAIN_NESTED;
314 	for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++)
315 		mock_nested->iotlb[i] = user_cfg->iotlb;
316 	return &mock_nested->domain;
317 }
318 
319 static struct iommu_domain *
mock_domain_alloc_user(struct device * dev,u32 flags,struct iommu_domain * parent,const struct iommu_user_data * user_data)320 mock_domain_alloc_user(struct device *dev, u32 flags,
321 		       struct iommu_domain *parent,
322 		       const struct iommu_user_data *user_data)
323 {
324 	struct mock_iommu_domain *mock_parent;
325 	struct iommu_hwpt_selftest user_cfg;
326 	int rc;
327 
328 	/* must be mock_domain */
329 	if (!parent) {
330 		struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
331 		bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
332 		bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY;
333 		struct iommu_domain *domain;
334 
335 		if (flags & (~(IOMMU_HWPT_ALLOC_NEST_PARENT |
336 			       IOMMU_HWPT_ALLOC_DIRTY_TRACKING)))
337 			return ERR_PTR(-EOPNOTSUPP);
338 		if (user_data || (has_dirty_flag && no_dirty_ops))
339 			return ERR_PTR(-EOPNOTSUPP);
340 		domain = mock_domain_alloc_paging(dev);
341 		if (!domain)
342 			return ERR_PTR(-ENOMEM);
343 		if (has_dirty_flag)
344 			container_of(domain, struct mock_iommu_domain, domain)
345 				->domain.dirty_ops = &dirty_ops;
346 		return domain;
347 	}
348 
349 	/* must be mock_domain_nested */
350 	if (user_data->type != IOMMU_HWPT_DATA_SELFTEST || flags)
351 		return ERR_PTR(-EOPNOTSUPP);
352 	if (!parent || parent->ops != mock_ops.default_domain_ops)
353 		return ERR_PTR(-EINVAL);
354 
355 	mock_parent = container_of(parent, struct mock_iommu_domain, domain);
356 	if (!mock_parent)
357 		return ERR_PTR(-EINVAL);
358 
359 	rc = iommu_copy_struct_from_user(&user_cfg, user_data,
360 					 IOMMU_HWPT_DATA_SELFTEST, iotlb);
361 	if (rc)
362 		return ERR_PTR(rc);
363 
364 	return __mock_domain_alloc_nested(mock_parent, &user_cfg);
365 }
366 
mock_domain_free(struct iommu_domain * domain)367 static void mock_domain_free(struct iommu_domain *domain)
368 {
369 	struct mock_iommu_domain *mock =
370 		container_of(domain, struct mock_iommu_domain, domain);
371 
372 	WARN_ON(!xa_empty(&mock->pfns));
373 	kfree(mock);
374 }
375 
mock_domain_map_pages(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int prot,gfp_t gfp,size_t * mapped)376 static int mock_domain_map_pages(struct iommu_domain *domain,
377 				 unsigned long iova, phys_addr_t paddr,
378 				 size_t pgsize, size_t pgcount, int prot,
379 				 gfp_t gfp, size_t *mapped)
380 {
381 	struct mock_iommu_domain *mock =
382 		container_of(domain, struct mock_iommu_domain, domain);
383 	unsigned long flags = MOCK_PFN_START_IOVA;
384 	unsigned long start_iova = iova;
385 
386 	/*
387 	 * xarray does not reliably work with fault injection because it does a
388 	 * retry allocation, so put our own failure point.
389 	 */
390 	if (iommufd_should_fail())
391 		return -ENOENT;
392 
393 	WARN_ON(iova % MOCK_IO_PAGE_SIZE);
394 	WARN_ON(pgsize % MOCK_IO_PAGE_SIZE);
395 	for (; pgcount; pgcount--) {
396 		size_t cur;
397 
398 		for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
399 			void *old;
400 
401 			if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
402 				flags = MOCK_PFN_LAST_IOVA;
403 			if (pgsize != MOCK_IO_PAGE_SIZE) {
404 				flags |= MOCK_PFN_HUGE_IOVA;
405 			}
406 			old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE,
407 				       xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) |
408 						   flags),
409 				       gfp);
410 			if (xa_is_err(old)) {
411 				for (; start_iova != iova;
412 				     start_iova += MOCK_IO_PAGE_SIZE)
413 					xa_erase(&mock->pfns,
414 						 start_iova /
415 							 MOCK_IO_PAGE_SIZE);
416 				return xa_err(old);
417 			}
418 			WARN_ON(old);
419 			iova += MOCK_IO_PAGE_SIZE;
420 			paddr += MOCK_IO_PAGE_SIZE;
421 			*mapped += MOCK_IO_PAGE_SIZE;
422 			flags = 0;
423 		}
424 	}
425 	return 0;
426 }
427 
mock_domain_unmap_pages(struct iommu_domain * domain,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * iotlb_gather)428 static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
429 				      unsigned long iova, size_t pgsize,
430 				      size_t pgcount,
431 				      struct iommu_iotlb_gather *iotlb_gather)
432 {
433 	struct mock_iommu_domain *mock =
434 		container_of(domain, struct mock_iommu_domain, domain);
435 	bool first = true;
436 	size_t ret = 0;
437 	void *ent;
438 
439 	WARN_ON(iova % MOCK_IO_PAGE_SIZE);
440 	WARN_ON(pgsize % MOCK_IO_PAGE_SIZE);
441 
442 	for (; pgcount; pgcount--) {
443 		size_t cur;
444 
445 		for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
446 			ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
447 
448 			/*
449 			 * iommufd generates unmaps that must be a strict
450 			 * superset of the map's performend So every
451 			 * starting/ending IOVA should have been an iova passed
452 			 * to map.
453 			 *
454 			 * This simple logic doesn't work when the HUGE_PAGE is
455 			 * turned on since the core code will automatically
456 			 * switch between the two page sizes creating a break in
457 			 * the unmap calls. The break can land in the middle of
458 			 * contiguous IOVA.
459 			 */
460 			if (!(domain->pgsize_bitmap & MOCK_HUGE_PAGE_SIZE)) {
461 				if (first) {
462 					WARN_ON(ent && !(xa_to_value(ent) &
463 							 MOCK_PFN_START_IOVA));
464 					first = false;
465 				}
466 				if (pgcount == 1 &&
467 				    cur + MOCK_IO_PAGE_SIZE == pgsize)
468 					WARN_ON(ent && !(xa_to_value(ent) &
469 							 MOCK_PFN_LAST_IOVA));
470 			}
471 
472 			iova += MOCK_IO_PAGE_SIZE;
473 			ret += MOCK_IO_PAGE_SIZE;
474 		}
475 	}
476 	return ret;
477 }
478 
mock_domain_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)479 static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain,
480 					    dma_addr_t iova)
481 {
482 	struct mock_iommu_domain *mock =
483 		container_of(domain, struct mock_iommu_domain, domain);
484 	void *ent;
485 
486 	WARN_ON(iova % MOCK_IO_PAGE_SIZE);
487 	ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
488 	WARN_ON(!ent);
489 	return (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE;
490 }
491 
mock_domain_capable(struct device * dev,enum iommu_cap cap)492 static bool mock_domain_capable(struct device *dev, enum iommu_cap cap)
493 {
494 	struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
495 
496 	switch (cap) {
497 	case IOMMU_CAP_CACHE_COHERENCY:
498 		return true;
499 	case IOMMU_CAP_DIRTY_TRACKING:
500 		return !(mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY);
501 	default:
502 		break;
503 	}
504 
505 	return false;
506 }
507 
508 static struct iopf_queue *mock_iommu_iopf_queue;
509 
510 static struct iommu_device mock_iommu_device = {
511 };
512 
mock_probe_device(struct device * dev)513 static struct iommu_device *mock_probe_device(struct device *dev)
514 {
515 	if (dev->bus != &iommufd_mock_bus_type.bus)
516 		return ERR_PTR(-ENODEV);
517 	return &mock_iommu_device;
518 }
519 
mock_domain_page_response(struct device * dev,struct iopf_fault * evt,struct iommu_page_response * msg)520 static void mock_domain_page_response(struct device *dev, struct iopf_fault *evt,
521 				      struct iommu_page_response *msg)
522 {
523 }
524 
mock_dev_enable_feat(struct device * dev,enum iommu_dev_features feat)525 static int mock_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
526 {
527 	if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue)
528 		return -ENODEV;
529 
530 	return iopf_queue_add_device(mock_iommu_iopf_queue, dev);
531 }
532 
mock_dev_disable_feat(struct device * dev,enum iommu_dev_features feat)533 static int mock_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
534 {
535 	if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue)
536 		return -ENODEV;
537 
538 	iopf_queue_remove_device(mock_iommu_iopf_queue, dev);
539 
540 	return 0;
541 }
542 
543 static const struct iommu_ops mock_ops = {
544 	/*
545 	 * IOMMU_DOMAIN_BLOCKED cannot be returned from def_domain_type()
546 	 * because it is zero.
547 	 */
548 	.default_domain = &mock_blocking_domain,
549 	.blocked_domain = &mock_blocking_domain,
550 	.owner = THIS_MODULE,
551 	.pgsize_bitmap = MOCK_IO_PAGE_SIZE,
552 	.hw_info = mock_domain_hw_info,
553 	.domain_alloc_paging = mock_domain_alloc_paging,
554 	.domain_alloc_user = mock_domain_alloc_user,
555 	.capable = mock_domain_capable,
556 	.device_group = generic_device_group,
557 	.probe_device = mock_probe_device,
558 	.page_response = mock_domain_page_response,
559 	.dev_enable_feat = mock_dev_enable_feat,
560 	.dev_disable_feat = mock_dev_disable_feat,
561 	.user_pasid_table = true,
562 	.default_domain_ops =
563 		&(struct iommu_domain_ops){
564 			.free = mock_domain_free,
565 			.attach_dev = mock_domain_nop_attach,
566 			.map_pages = mock_domain_map_pages,
567 			.unmap_pages = mock_domain_unmap_pages,
568 			.iova_to_phys = mock_domain_iova_to_phys,
569 		},
570 };
571 
mock_domain_free_nested(struct iommu_domain * domain)572 static void mock_domain_free_nested(struct iommu_domain *domain)
573 {
574 	struct mock_iommu_domain_nested *mock_nested =
575 		container_of(domain, struct mock_iommu_domain_nested, domain);
576 
577 	kfree(mock_nested);
578 }
579 
580 static int
mock_domain_cache_invalidate_user(struct iommu_domain * domain,struct iommu_user_data_array * array)581 mock_domain_cache_invalidate_user(struct iommu_domain *domain,
582 				  struct iommu_user_data_array *array)
583 {
584 	struct mock_iommu_domain_nested *mock_nested =
585 		container_of(domain, struct mock_iommu_domain_nested, domain);
586 	struct iommu_hwpt_invalidate_selftest inv;
587 	u32 processed = 0;
588 	int i = 0, j;
589 	int rc = 0;
590 
591 	if (array->type != IOMMU_HWPT_INVALIDATE_DATA_SELFTEST) {
592 		rc = -EINVAL;
593 		goto out;
594 	}
595 
596 	for ( ; i < array->entry_num; i++) {
597 		rc = iommu_copy_struct_from_user_array(&inv, array,
598 						       IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
599 						       i, iotlb_id);
600 		if (rc)
601 			break;
602 
603 		if (inv.flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) {
604 			rc = -EOPNOTSUPP;
605 			break;
606 		}
607 
608 		if (inv.iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX) {
609 			rc = -EINVAL;
610 			break;
611 		}
612 
613 		if (inv.flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) {
614 			/* Invalidate all mock iotlb entries and ignore iotlb_id */
615 			for (j = 0; j < MOCK_NESTED_DOMAIN_IOTLB_NUM; j++)
616 				mock_nested->iotlb[j] = 0;
617 		} else {
618 			mock_nested->iotlb[inv.iotlb_id] = 0;
619 		}
620 
621 		processed++;
622 	}
623 
624 out:
625 	array->entry_num = processed;
626 	return rc;
627 }
628 
629 static struct iommu_domain_ops domain_nested_ops = {
630 	.free = mock_domain_free_nested,
631 	.attach_dev = mock_domain_nop_attach,
632 	.cache_invalidate_user = mock_domain_cache_invalidate_user,
633 };
634 
635 static inline struct iommufd_hw_pagetable *
__get_md_pagetable(struct iommufd_ucmd * ucmd,u32 mockpt_id,u32 hwpt_type)636 __get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, u32 hwpt_type)
637 {
638 	struct iommufd_object *obj;
639 
640 	obj = iommufd_get_object(ucmd->ictx, mockpt_id, hwpt_type);
641 	if (IS_ERR(obj))
642 		return ERR_CAST(obj);
643 	return container_of(obj, struct iommufd_hw_pagetable, obj);
644 }
645 
646 static inline struct iommufd_hw_pagetable *
get_md_pagetable(struct iommufd_ucmd * ucmd,u32 mockpt_id,struct mock_iommu_domain ** mock)647 get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id,
648 		 struct mock_iommu_domain **mock)
649 {
650 	struct iommufd_hw_pagetable *hwpt;
651 
652 	hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_PAGING);
653 	if (IS_ERR(hwpt))
654 		return hwpt;
655 	if (hwpt->domain->type != IOMMU_DOMAIN_UNMANAGED ||
656 	    hwpt->domain->ops != mock_ops.default_domain_ops) {
657 		iommufd_put_object(ucmd->ictx, &hwpt->obj);
658 		return ERR_PTR(-EINVAL);
659 	}
660 	*mock = container_of(hwpt->domain, struct mock_iommu_domain, domain);
661 	return hwpt;
662 }
663 
664 static inline struct iommufd_hw_pagetable *
get_md_pagetable_nested(struct iommufd_ucmd * ucmd,u32 mockpt_id,struct mock_iommu_domain_nested ** mock_nested)665 get_md_pagetable_nested(struct iommufd_ucmd *ucmd, u32 mockpt_id,
666 			struct mock_iommu_domain_nested **mock_nested)
667 {
668 	struct iommufd_hw_pagetable *hwpt;
669 
670 	hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_NESTED);
671 	if (IS_ERR(hwpt))
672 		return hwpt;
673 	if (hwpt->domain->type != IOMMU_DOMAIN_NESTED ||
674 	    hwpt->domain->ops != &domain_nested_ops) {
675 		iommufd_put_object(ucmd->ictx, &hwpt->obj);
676 		return ERR_PTR(-EINVAL);
677 	}
678 	*mock_nested = container_of(hwpt->domain,
679 				    struct mock_iommu_domain_nested, domain);
680 	return hwpt;
681 }
682 
mock_dev_release(struct device * dev)683 static void mock_dev_release(struct device *dev)
684 {
685 	struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
686 
687 	ida_free(&mock_dev_ida, mdev->id);
688 	kfree(mdev);
689 }
690 
mock_dev_create(unsigned long dev_flags)691 static struct mock_dev *mock_dev_create(unsigned long dev_flags)
692 {
693 	struct mock_dev *mdev;
694 	int rc;
695 
696 	if (dev_flags &
697 	    ~(MOCK_FLAGS_DEVICE_NO_DIRTY | MOCK_FLAGS_DEVICE_HUGE_IOVA))
698 		return ERR_PTR(-EINVAL);
699 
700 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
701 	if (!mdev)
702 		return ERR_PTR(-ENOMEM);
703 
704 	device_initialize(&mdev->dev);
705 	mdev->flags = dev_flags;
706 	mdev->dev.release = mock_dev_release;
707 	mdev->dev.bus = &iommufd_mock_bus_type.bus;
708 
709 	rc = ida_alloc(&mock_dev_ida, GFP_KERNEL);
710 	if (rc < 0)
711 		goto err_put;
712 	mdev->id = rc;
713 
714 	rc = dev_set_name(&mdev->dev, "iommufd_mock%u", mdev->id);
715 	if (rc)
716 		goto err_put;
717 
718 	rc = device_add(&mdev->dev);
719 	if (rc)
720 		goto err_put;
721 	return mdev;
722 
723 err_put:
724 	put_device(&mdev->dev);
725 	return ERR_PTR(rc);
726 }
727 
mock_dev_destroy(struct mock_dev * mdev)728 static void mock_dev_destroy(struct mock_dev *mdev)
729 {
730 	device_unregister(&mdev->dev);
731 }
732 
iommufd_selftest_is_mock_dev(struct device * dev)733 bool iommufd_selftest_is_mock_dev(struct device *dev)
734 {
735 	return dev->release == mock_dev_release;
736 }
737 
738 /* Create an hw_pagetable with the mock domain so we can test the domain ops */
iommufd_test_mock_domain(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)739 static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd,
740 				    struct iommu_test_cmd *cmd)
741 {
742 	struct iommufd_device *idev;
743 	struct selftest_obj *sobj;
744 	u32 pt_id = cmd->id;
745 	u32 dev_flags = 0;
746 	u32 idev_id;
747 	int rc;
748 
749 	sobj = iommufd_object_alloc(ucmd->ictx, sobj, IOMMUFD_OBJ_SELFTEST);
750 	if (IS_ERR(sobj))
751 		return PTR_ERR(sobj);
752 
753 	sobj->idev.ictx = ucmd->ictx;
754 	sobj->type = TYPE_IDEV;
755 
756 	if (cmd->op == IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS)
757 		dev_flags = cmd->mock_domain_flags.dev_flags;
758 
759 	sobj->idev.mock_dev = mock_dev_create(dev_flags);
760 	if (IS_ERR(sobj->idev.mock_dev)) {
761 		rc = PTR_ERR(sobj->idev.mock_dev);
762 		goto out_sobj;
763 	}
764 
765 	idev = iommufd_device_bind(ucmd->ictx, &sobj->idev.mock_dev->dev,
766 				   &idev_id);
767 	if (IS_ERR(idev)) {
768 		rc = PTR_ERR(idev);
769 		goto out_mdev;
770 	}
771 	sobj->idev.idev = idev;
772 
773 	rc = iommufd_device_attach(idev, &pt_id);
774 	if (rc)
775 		goto out_unbind;
776 
777 	/* Userspace must destroy the device_id to destroy the object */
778 	cmd->mock_domain.out_hwpt_id = pt_id;
779 	cmd->mock_domain.out_stdev_id = sobj->obj.id;
780 	cmd->mock_domain.out_idev_id = idev_id;
781 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
782 	if (rc)
783 		goto out_detach;
784 	iommufd_object_finalize(ucmd->ictx, &sobj->obj);
785 	return 0;
786 
787 out_detach:
788 	iommufd_device_detach(idev);
789 out_unbind:
790 	iommufd_device_unbind(idev);
791 out_mdev:
792 	mock_dev_destroy(sobj->idev.mock_dev);
793 out_sobj:
794 	iommufd_object_abort(ucmd->ictx, &sobj->obj);
795 	return rc;
796 }
797 
798 /* Replace the mock domain with a manually allocated hw_pagetable */
iommufd_test_mock_domain_replace(struct iommufd_ucmd * ucmd,unsigned int device_id,u32 pt_id,struct iommu_test_cmd * cmd)799 static int iommufd_test_mock_domain_replace(struct iommufd_ucmd *ucmd,
800 					    unsigned int device_id, u32 pt_id,
801 					    struct iommu_test_cmd *cmd)
802 {
803 	struct iommufd_object *dev_obj;
804 	struct selftest_obj *sobj;
805 	int rc;
806 
807 	/*
808 	 * Prefer to use the OBJ_SELFTEST because the destroy_rwsem will ensure
809 	 * it doesn't race with detach, which is not allowed.
810 	 */
811 	dev_obj =
812 		iommufd_get_object(ucmd->ictx, device_id, IOMMUFD_OBJ_SELFTEST);
813 	if (IS_ERR(dev_obj))
814 		return PTR_ERR(dev_obj);
815 
816 	sobj = container_of(dev_obj, struct selftest_obj, obj);
817 	if (sobj->type != TYPE_IDEV) {
818 		rc = -EINVAL;
819 		goto out_dev_obj;
820 	}
821 
822 	rc = iommufd_device_replace(sobj->idev.idev, &pt_id);
823 	if (rc)
824 		goto out_dev_obj;
825 
826 	cmd->mock_domain_replace.pt_id = pt_id;
827 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
828 
829 out_dev_obj:
830 	iommufd_put_object(ucmd->ictx, dev_obj);
831 	return rc;
832 }
833 
834 /* Add an additional reserved IOVA to the IOAS */
iommufd_test_add_reserved(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long start,size_t length)835 static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd,
836 				     unsigned int mockpt_id,
837 				     unsigned long start, size_t length)
838 {
839 	struct iommufd_ioas *ioas;
840 	int rc;
841 
842 	ioas = iommufd_get_ioas(ucmd->ictx, mockpt_id);
843 	if (IS_ERR(ioas))
844 		return PTR_ERR(ioas);
845 	down_write(&ioas->iopt.iova_rwsem);
846 	rc = iopt_reserve_iova(&ioas->iopt, start, start + length - 1, NULL);
847 	up_write(&ioas->iopt.iova_rwsem);
848 	iommufd_put_object(ucmd->ictx, &ioas->obj);
849 	return rc;
850 }
851 
852 /* Check that every pfn under each iova matches the pfn under a user VA */
iommufd_test_md_check_pa(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long iova,size_t length,void __user * uptr)853 static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd,
854 				    unsigned int mockpt_id, unsigned long iova,
855 				    size_t length, void __user *uptr)
856 {
857 	struct iommufd_hw_pagetable *hwpt;
858 	struct mock_iommu_domain *mock;
859 	uintptr_t end;
860 	int rc;
861 
862 	if (iova % MOCK_IO_PAGE_SIZE || length % MOCK_IO_PAGE_SIZE ||
863 	    (uintptr_t)uptr % MOCK_IO_PAGE_SIZE ||
864 	    check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
865 		return -EINVAL;
866 
867 	hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
868 	if (IS_ERR(hwpt))
869 		return PTR_ERR(hwpt);
870 
871 	for (; length; length -= MOCK_IO_PAGE_SIZE) {
872 		struct page *pages[1];
873 		unsigned long pfn;
874 		long npages;
875 		void *ent;
876 
877 		npages = get_user_pages_fast((uintptr_t)uptr & PAGE_MASK, 1, 0,
878 					     pages);
879 		if (npages < 0) {
880 			rc = npages;
881 			goto out_put;
882 		}
883 		if (WARN_ON(npages != 1)) {
884 			rc = -EFAULT;
885 			goto out_put;
886 		}
887 		pfn = page_to_pfn(pages[0]);
888 		put_page(pages[0]);
889 
890 		ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
891 		if (!ent ||
892 		    (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE !=
893 			    pfn * PAGE_SIZE + ((uintptr_t)uptr % PAGE_SIZE)) {
894 			rc = -EINVAL;
895 			goto out_put;
896 		}
897 		iova += MOCK_IO_PAGE_SIZE;
898 		uptr += MOCK_IO_PAGE_SIZE;
899 	}
900 	rc = 0;
901 
902 out_put:
903 	iommufd_put_object(ucmd->ictx, &hwpt->obj);
904 	return rc;
905 }
906 
907 /* Check that the page ref count matches, to look for missing pin/unpins */
iommufd_test_md_check_refs(struct iommufd_ucmd * ucmd,void __user * uptr,size_t length,unsigned int refs)908 static int iommufd_test_md_check_refs(struct iommufd_ucmd *ucmd,
909 				      void __user *uptr, size_t length,
910 				      unsigned int refs)
911 {
912 	uintptr_t end;
913 
914 	if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE ||
915 	    check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
916 		return -EINVAL;
917 
918 	for (; length; length -= PAGE_SIZE) {
919 		struct page *pages[1];
920 		long npages;
921 
922 		npages = get_user_pages_fast((uintptr_t)uptr, 1, 0, pages);
923 		if (npages < 0)
924 			return npages;
925 		if (WARN_ON(npages != 1))
926 			return -EFAULT;
927 		if (!PageCompound(pages[0])) {
928 			unsigned int count;
929 
930 			count = page_ref_count(pages[0]);
931 			if (count / GUP_PIN_COUNTING_BIAS != refs) {
932 				put_page(pages[0]);
933 				return -EIO;
934 			}
935 		}
936 		put_page(pages[0]);
937 		uptr += PAGE_SIZE;
938 	}
939 	return 0;
940 }
941 
iommufd_test_md_check_iotlb(struct iommufd_ucmd * ucmd,u32 mockpt_id,unsigned int iotlb_id,u32 iotlb)942 static int iommufd_test_md_check_iotlb(struct iommufd_ucmd *ucmd,
943 				       u32 mockpt_id, unsigned int iotlb_id,
944 				       u32 iotlb)
945 {
946 	struct mock_iommu_domain_nested *mock_nested;
947 	struct iommufd_hw_pagetable *hwpt;
948 	int rc = 0;
949 
950 	hwpt = get_md_pagetable_nested(ucmd, mockpt_id, &mock_nested);
951 	if (IS_ERR(hwpt))
952 		return PTR_ERR(hwpt);
953 
954 	mock_nested = container_of(hwpt->domain,
955 				   struct mock_iommu_domain_nested, domain);
956 
957 	if (iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX ||
958 	    mock_nested->iotlb[iotlb_id] != iotlb)
959 		rc = -EINVAL;
960 	iommufd_put_object(ucmd->ictx, &hwpt->obj);
961 	return rc;
962 }
963 
964 struct selftest_access {
965 	struct iommufd_access *access;
966 	struct file *file;
967 	struct mutex lock;
968 	struct list_head items;
969 	unsigned int next_id;
970 	bool destroying;
971 };
972 
973 struct selftest_access_item {
974 	struct list_head items_elm;
975 	unsigned long iova;
976 	size_t length;
977 	unsigned int id;
978 };
979 
980 static const struct file_operations iommfd_test_staccess_fops;
981 
iommufd_access_get(int fd)982 static struct selftest_access *iommufd_access_get(int fd)
983 {
984 	struct file *file;
985 
986 	file = fget(fd);
987 	if (!file)
988 		return ERR_PTR(-EBADFD);
989 
990 	if (file->f_op != &iommfd_test_staccess_fops) {
991 		fput(file);
992 		return ERR_PTR(-EBADFD);
993 	}
994 	return file->private_data;
995 }
996 
iommufd_test_access_unmap(void * data,unsigned long iova,unsigned long length)997 static void iommufd_test_access_unmap(void *data, unsigned long iova,
998 				      unsigned long length)
999 {
1000 	unsigned long iova_last = iova + length - 1;
1001 	struct selftest_access *staccess = data;
1002 	struct selftest_access_item *item;
1003 	struct selftest_access_item *tmp;
1004 
1005 	mutex_lock(&staccess->lock);
1006 	list_for_each_entry_safe(item, tmp, &staccess->items, items_elm) {
1007 		if (iova > item->iova + item->length - 1 ||
1008 		    iova_last < item->iova)
1009 			continue;
1010 		list_del(&item->items_elm);
1011 		iommufd_access_unpin_pages(staccess->access, item->iova,
1012 					   item->length);
1013 		kfree(item);
1014 	}
1015 	mutex_unlock(&staccess->lock);
1016 }
1017 
iommufd_test_access_item_destroy(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned int item_id)1018 static int iommufd_test_access_item_destroy(struct iommufd_ucmd *ucmd,
1019 					    unsigned int access_id,
1020 					    unsigned int item_id)
1021 {
1022 	struct selftest_access_item *item;
1023 	struct selftest_access *staccess;
1024 
1025 	staccess = iommufd_access_get(access_id);
1026 	if (IS_ERR(staccess))
1027 		return PTR_ERR(staccess);
1028 
1029 	mutex_lock(&staccess->lock);
1030 	list_for_each_entry(item, &staccess->items, items_elm) {
1031 		if (item->id == item_id) {
1032 			list_del(&item->items_elm);
1033 			iommufd_access_unpin_pages(staccess->access, item->iova,
1034 						   item->length);
1035 			mutex_unlock(&staccess->lock);
1036 			kfree(item);
1037 			fput(staccess->file);
1038 			return 0;
1039 		}
1040 	}
1041 	mutex_unlock(&staccess->lock);
1042 	fput(staccess->file);
1043 	return -ENOENT;
1044 }
1045 
iommufd_test_staccess_release(struct inode * inode,struct file * filep)1046 static int iommufd_test_staccess_release(struct inode *inode,
1047 					 struct file *filep)
1048 {
1049 	struct selftest_access *staccess = filep->private_data;
1050 
1051 	if (staccess->access) {
1052 		iommufd_test_access_unmap(staccess, 0, ULONG_MAX);
1053 		iommufd_access_destroy(staccess->access);
1054 	}
1055 	mutex_destroy(&staccess->lock);
1056 	kfree(staccess);
1057 	return 0;
1058 }
1059 
1060 static const struct iommufd_access_ops selftest_access_ops_pin = {
1061 	.needs_pin_pages = 1,
1062 	.unmap = iommufd_test_access_unmap,
1063 };
1064 
1065 static const struct iommufd_access_ops selftest_access_ops = {
1066 	.unmap = iommufd_test_access_unmap,
1067 };
1068 
1069 static const struct file_operations iommfd_test_staccess_fops = {
1070 	.release = iommufd_test_staccess_release,
1071 };
1072 
iommufd_test_alloc_access(void)1073 static struct selftest_access *iommufd_test_alloc_access(void)
1074 {
1075 	struct selftest_access *staccess;
1076 	struct file *filep;
1077 
1078 	staccess = kzalloc(sizeof(*staccess), GFP_KERNEL_ACCOUNT);
1079 	if (!staccess)
1080 		return ERR_PTR(-ENOMEM);
1081 	INIT_LIST_HEAD(&staccess->items);
1082 	mutex_init(&staccess->lock);
1083 
1084 	filep = anon_inode_getfile("[iommufd_test_staccess]",
1085 				   &iommfd_test_staccess_fops, staccess,
1086 				   O_RDWR);
1087 	if (IS_ERR(filep)) {
1088 		kfree(staccess);
1089 		return ERR_CAST(filep);
1090 	}
1091 	staccess->file = filep;
1092 	return staccess;
1093 }
1094 
iommufd_test_create_access(struct iommufd_ucmd * ucmd,unsigned int ioas_id,unsigned int flags)1095 static int iommufd_test_create_access(struct iommufd_ucmd *ucmd,
1096 				      unsigned int ioas_id, unsigned int flags)
1097 {
1098 	struct iommu_test_cmd *cmd = ucmd->cmd;
1099 	struct selftest_access *staccess;
1100 	struct iommufd_access *access;
1101 	u32 id;
1102 	int fdno;
1103 	int rc;
1104 
1105 	if (flags & ~MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES)
1106 		return -EOPNOTSUPP;
1107 
1108 	staccess = iommufd_test_alloc_access();
1109 	if (IS_ERR(staccess))
1110 		return PTR_ERR(staccess);
1111 
1112 	fdno = get_unused_fd_flags(O_CLOEXEC);
1113 	if (fdno < 0) {
1114 		rc = -ENOMEM;
1115 		goto out_free_staccess;
1116 	}
1117 
1118 	access = iommufd_access_create(
1119 		ucmd->ictx,
1120 		(flags & MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) ?
1121 			&selftest_access_ops_pin :
1122 			&selftest_access_ops,
1123 		staccess, &id);
1124 	if (IS_ERR(access)) {
1125 		rc = PTR_ERR(access);
1126 		goto out_put_fdno;
1127 	}
1128 	rc = iommufd_access_attach(access, ioas_id);
1129 	if (rc)
1130 		goto out_destroy;
1131 	cmd->create_access.out_access_fd = fdno;
1132 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1133 	if (rc)
1134 		goto out_destroy;
1135 
1136 	staccess->access = access;
1137 	fd_install(fdno, staccess->file);
1138 	return 0;
1139 
1140 out_destroy:
1141 	iommufd_access_destroy(access);
1142 out_put_fdno:
1143 	put_unused_fd(fdno);
1144 out_free_staccess:
1145 	fput(staccess->file);
1146 	return rc;
1147 }
1148 
iommufd_test_access_replace_ioas(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned int ioas_id)1149 static int iommufd_test_access_replace_ioas(struct iommufd_ucmd *ucmd,
1150 					    unsigned int access_id,
1151 					    unsigned int ioas_id)
1152 {
1153 	struct selftest_access *staccess;
1154 	int rc;
1155 
1156 	staccess = iommufd_access_get(access_id);
1157 	if (IS_ERR(staccess))
1158 		return PTR_ERR(staccess);
1159 
1160 	rc = iommufd_access_replace(staccess->access, ioas_id);
1161 	fput(staccess->file);
1162 	return rc;
1163 }
1164 
1165 /* Check that the pages in a page array match the pages in the user VA */
iommufd_test_check_pages(void __user * uptr,struct page ** pages,size_t npages)1166 static int iommufd_test_check_pages(void __user *uptr, struct page **pages,
1167 				    size_t npages)
1168 {
1169 	for (; npages; npages--) {
1170 		struct page *tmp_pages[1];
1171 		long rc;
1172 
1173 		rc = get_user_pages_fast((uintptr_t)uptr, 1, 0, tmp_pages);
1174 		if (rc < 0)
1175 			return rc;
1176 		if (WARN_ON(rc != 1))
1177 			return -EFAULT;
1178 		put_page(tmp_pages[0]);
1179 		if (tmp_pages[0] != *pages)
1180 			return -EBADE;
1181 		pages++;
1182 		uptr += PAGE_SIZE;
1183 	}
1184 	return 0;
1185 }
1186 
iommufd_test_access_pages(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned long iova,size_t length,void __user * uptr,u32 flags)1187 static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd,
1188 				     unsigned int access_id, unsigned long iova,
1189 				     size_t length, void __user *uptr,
1190 				     u32 flags)
1191 {
1192 	struct iommu_test_cmd *cmd = ucmd->cmd;
1193 	struct selftest_access_item *item;
1194 	struct selftest_access *staccess;
1195 	struct page **pages;
1196 	size_t npages;
1197 	int rc;
1198 
1199 	/* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
1200 	if (length > 16*1024*1024)
1201 		return -ENOMEM;
1202 
1203 	if (flags & ~(MOCK_FLAGS_ACCESS_WRITE | MOCK_FLAGS_ACCESS_SYZ))
1204 		return -EOPNOTSUPP;
1205 
1206 	staccess = iommufd_access_get(access_id);
1207 	if (IS_ERR(staccess))
1208 		return PTR_ERR(staccess);
1209 
1210 	if (staccess->access->ops != &selftest_access_ops_pin) {
1211 		rc = -EOPNOTSUPP;
1212 		goto out_put;
1213 	}
1214 
1215 	if (flags & MOCK_FLAGS_ACCESS_SYZ)
1216 		iova = iommufd_test_syz_conv_iova(staccess->access,
1217 					&cmd->access_pages.iova);
1218 
1219 	npages = (ALIGN(iova + length, PAGE_SIZE) -
1220 		  ALIGN_DOWN(iova, PAGE_SIZE)) /
1221 		 PAGE_SIZE;
1222 	pages = kvcalloc(npages, sizeof(*pages), GFP_KERNEL_ACCOUNT);
1223 	if (!pages) {
1224 		rc = -ENOMEM;
1225 		goto out_put;
1226 	}
1227 
1228 	/*
1229 	 * Drivers will need to think very carefully about this locking. The
1230 	 * core code can do multiple unmaps instantaneously after
1231 	 * iommufd_access_pin_pages() and *all* the unmaps must not return until
1232 	 * the range is unpinned. This simple implementation puts a global lock
1233 	 * around the pin, which may not suit drivers that want this to be a
1234 	 * performance path. drivers that get this wrong will trigger WARN_ON
1235 	 * races and cause EDEADLOCK failures to userspace.
1236 	 */
1237 	mutex_lock(&staccess->lock);
1238 	rc = iommufd_access_pin_pages(staccess->access, iova, length, pages,
1239 				      flags & MOCK_FLAGS_ACCESS_WRITE);
1240 	if (rc)
1241 		goto out_unlock;
1242 
1243 	/* For syzkaller allow uptr to be NULL to skip this check */
1244 	if (uptr) {
1245 		rc = iommufd_test_check_pages(
1246 			uptr - (iova - ALIGN_DOWN(iova, PAGE_SIZE)), pages,
1247 			npages);
1248 		if (rc)
1249 			goto out_unaccess;
1250 	}
1251 
1252 	item = kzalloc(sizeof(*item), GFP_KERNEL_ACCOUNT);
1253 	if (!item) {
1254 		rc = -ENOMEM;
1255 		goto out_unaccess;
1256 	}
1257 
1258 	item->iova = iova;
1259 	item->length = length;
1260 	item->id = staccess->next_id++;
1261 	list_add_tail(&item->items_elm, &staccess->items);
1262 
1263 	cmd->access_pages.out_access_pages_id = item->id;
1264 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1265 	if (rc)
1266 		goto out_free_item;
1267 	goto out_unlock;
1268 
1269 out_free_item:
1270 	list_del(&item->items_elm);
1271 	kfree(item);
1272 out_unaccess:
1273 	iommufd_access_unpin_pages(staccess->access, iova, length);
1274 out_unlock:
1275 	mutex_unlock(&staccess->lock);
1276 	kvfree(pages);
1277 out_put:
1278 	fput(staccess->file);
1279 	return rc;
1280 }
1281 
iommufd_test_access_rw(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned long iova,size_t length,void __user * ubuf,unsigned int flags)1282 static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd,
1283 				  unsigned int access_id, unsigned long iova,
1284 				  size_t length, void __user *ubuf,
1285 				  unsigned int flags)
1286 {
1287 	struct iommu_test_cmd *cmd = ucmd->cmd;
1288 	struct selftest_access *staccess;
1289 	void *tmp;
1290 	int rc;
1291 
1292 	/* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
1293 	if (length > 16*1024*1024)
1294 		return -ENOMEM;
1295 
1296 	if (flags & ~(MOCK_ACCESS_RW_WRITE | MOCK_ACCESS_RW_SLOW_PATH |
1297 		      MOCK_FLAGS_ACCESS_SYZ))
1298 		return -EOPNOTSUPP;
1299 
1300 	staccess = iommufd_access_get(access_id);
1301 	if (IS_ERR(staccess))
1302 		return PTR_ERR(staccess);
1303 
1304 	tmp = kvzalloc(length, GFP_KERNEL_ACCOUNT);
1305 	if (!tmp) {
1306 		rc = -ENOMEM;
1307 		goto out_put;
1308 	}
1309 
1310 	if (flags & MOCK_ACCESS_RW_WRITE) {
1311 		if (copy_from_user(tmp, ubuf, length)) {
1312 			rc = -EFAULT;
1313 			goto out_free;
1314 		}
1315 	}
1316 
1317 	if (flags & MOCK_FLAGS_ACCESS_SYZ)
1318 		iova = iommufd_test_syz_conv_iova(staccess->access,
1319 				&cmd->access_rw.iova);
1320 
1321 	rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags);
1322 	if (rc)
1323 		goto out_free;
1324 	if (!(flags & MOCK_ACCESS_RW_WRITE)) {
1325 		if (copy_to_user(ubuf, tmp, length)) {
1326 			rc = -EFAULT;
1327 			goto out_free;
1328 		}
1329 	}
1330 
1331 out_free:
1332 	kvfree(tmp);
1333 out_put:
1334 	fput(staccess->file);
1335 	return rc;
1336 }
1337 static_assert((unsigned int)MOCK_ACCESS_RW_WRITE == IOMMUFD_ACCESS_RW_WRITE);
1338 static_assert((unsigned int)MOCK_ACCESS_RW_SLOW_PATH ==
1339 	      __IOMMUFD_ACCESS_RW_SLOW_PATH);
1340 
iommufd_test_dirty(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long iova,size_t length,unsigned long page_size,void __user * uptr,u32 flags)1341 static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id,
1342 			      unsigned long iova, size_t length,
1343 			      unsigned long page_size, void __user *uptr,
1344 			      u32 flags)
1345 {
1346 	unsigned long i, max;
1347 	struct iommu_test_cmd *cmd = ucmd->cmd;
1348 	struct iommufd_hw_pagetable *hwpt;
1349 	struct mock_iommu_domain *mock;
1350 	int rc, count = 0;
1351 	void *tmp;
1352 
1353 	if (!page_size || !length || iova % page_size || length % page_size ||
1354 	    !uptr)
1355 		return -EINVAL;
1356 
1357 	hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
1358 	if (IS_ERR(hwpt))
1359 		return PTR_ERR(hwpt);
1360 
1361 	if (!(mock->flags & MOCK_DIRTY_TRACK)) {
1362 		rc = -EINVAL;
1363 		goto out_put;
1364 	}
1365 
1366 	max = length / page_size;
1367 	tmp = kvzalloc(DIV_ROUND_UP(max, BITS_PER_LONG) * sizeof(unsigned long),
1368 		       GFP_KERNEL_ACCOUNT);
1369 	if (!tmp) {
1370 		rc = -ENOMEM;
1371 		goto out_put;
1372 	}
1373 
1374 	if (copy_from_user(tmp, uptr,DIV_ROUND_UP(max, BITS_PER_BYTE))) {
1375 		rc = -EFAULT;
1376 		goto out_free;
1377 	}
1378 
1379 	for (i = 0; i < max; i++) {
1380 		unsigned long cur = iova + i * page_size;
1381 		void *ent, *old;
1382 
1383 		if (!test_bit(i, (unsigned long *)tmp))
1384 			continue;
1385 
1386 		ent = xa_load(&mock->pfns, cur / page_size);
1387 		if (ent) {
1388 			unsigned long val;
1389 
1390 			val = xa_to_value(ent) | MOCK_PFN_DIRTY_IOVA;
1391 			old = xa_store(&mock->pfns, cur / page_size,
1392 				       xa_mk_value(val), GFP_KERNEL);
1393 			WARN_ON_ONCE(ent != old);
1394 			count++;
1395 		}
1396 	}
1397 
1398 	cmd->dirty.out_nr_dirty = count;
1399 	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1400 out_free:
1401 	kvfree(tmp);
1402 out_put:
1403 	iommufd_put_object(ucmd->ictx, &hwpt->obj);
1404 	return rc;
1405 }
1406 
iommufd_test_trigger_iopf(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1407 static int iommufd_test_trigger_iopf(struct iommufd_ucmd *ucmd,
1408 				     struct iommu_test_cmd *cmd)
1409 {
1410 	struct iopf_fault event = { };
1411 	struct iommufd_device *idev;
1412 
1413 	idev = iommufd_get_device(ucmd, cmd->trigger_iopf.dev_id);
1414 	if (IS_ERR(idev))
1415 		return PTR_ERR(idev);
1416 
1417 	event.fault.prm.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
1418 	if (cmd->trigger_iopf.pasid != IOMMU_NO_PASID)
1419 		event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1420 	event.fault.type = IOMMU_FAULT_PAGE_REQ;
1421 	event.fault.prm.addr = cmd->trigger_iopf.addr;
1422 	event.fault.prm.pasid = cmd->trigger_iopf.pasid;
1423 	event.fault.prm.grpid = cmd->trigger_iopf.grpid;
1424 	event.fault.prm.perm = cmd->trigger_iopf.perm;
1425 
1426 	iommu_report_device_fault(idev->dev, &event);
1427 	iommufd_put_object(ucmd->ictx, &idev->obj);
1428 
1429 	return 0;
1430 }
1431 
iommufd_selftest_destroy(struct iommufd_object * obj)1432 void iommufd_selftest_destroy(struct iommufd_object *obj)
1433 {
1434 	struct selftest_obj *sobj = container_of(obj, struct selftest_obj, obj);
1435 
1436 	switch (sobj->type) {
1437 	case TYPE_IDEV:
1438 		iommufd_device_detach(sobj->idev.idev);
1439 		iommufd_device_unbind(sobj->idev.idev);
1440 		mock_dev_destroy(sobj->idev.mock_dev);
1441 		break;
1442 	}
1443 }
1444 
iommufd_test(struct iommufd_ucmd * ucmd)1445 int iommufd_test(struct iommufd_ucmd *ucmd)
1446 {
1447 	struct iommu_test_cmd *cmd = ucmd->cmd;
1448 
1449 	switch (cmd->op) {
1450 	case IOMMU_TEST_OP_ADD_RESERVED:
1451 		return iommufd_test_add_reserved(ucmd, cmd->id,
1452 						 cmd->add_reserved.start,
1453 						 cmd->add_reserved.length);
1454 	case IOMMU_TEST_OP_MOCK_DOMAIN:
1455 	case IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS:
1456 		return iommufd_test_mock_domain(ucmd, cmd);
1457 	case IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE:
1458 		return iommufd_test_mock_domain_replace(
1459 			ucmd, cmd->id, cmd->mock_domain_replace.pt_id, cmd);
1460 	case IOMMU_TEST_OP_MD_CHECK_MAP:
1461 		return iommufd_test_md_check_pa(
1462 			ucmd, cmd->id, cmd->check_map.iova,
1463 			cmd->check_map.length,
1464 			u64_to_user_ptr(cmd->check_map.uptr));
1465 	case IOMMU_TEST_OP_MD_CHECK_REFS:
1466 		return iommufd_test_md_check_refs(
1467 			ucmd, u64_to_user_ptr(cmd->check_refs.uptr),
1468 			cmd->check_refs.length, cmd->check_refs.refs);
1469 	case IOMMU_TEST_OP_MD_CHECK_IOTLB:
1470 		return iommufd_test_md_check_iotlb(ucmd, cmd->id,
1471 						   cmd->check_iotlb.id,
1472 						   cmd->check_iotlb.iotlb);
1473 	case IOMMU_TEST_OP_CREATE_ACCESS:
1474 		return iommufd_test_create_access(ucmd, cmd->id,
1475 						  cmd->create_access.flags);
1476 	case IOMMU_TEST_OP_ACCESS_REPLACE_IOAS:
1477 		return iommufd_test_access_replace_ioas(
1478 			ucmd, cmd->id, cmd->access_replace_ioas.ioas_id);
1479 	case IOMMU_TEST_OP_ACCESS_PAGES:
1480 		return iommufd_test_access_pages(
1481 			ucmd, cmd->id, cmd->access_pages.iova,
1482 			cmd->access_pages.length,
1483 			u64_to_user_ptr(cmd->access_pages.uptr),
1484 			cmd->access_pages.flags);
1485 	case IOMMU_TEST_OP_ACCESS_RW:
1486 		return iommufd_test_access_rw(
1487 			ucmd, cmd->id, cmd->access_rw.iova,
1488 			cmd->access_rw.length,
1489 			u64_to_user_ptr(cmd->access_rw.uptr),
1490 			cmd->access_rw.flags);
1491 	case IOMMU_TEST_OP_DESTROY_ACCESS_PAGES:
1492 		return iommufd_test_access_item_destroy(
1493 			ucmd, cmd->id, cmd->destroy_access_pages.access_pages_id);
1494 	case IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT:
1495 		/* Protect _batch_init(), can not be less than elmsz */
1496 		if (cmd->memory_limit.limit <
1497 		    sizeof(unsigned long) + sizeof(u32))
1498 			return -EINVAL;
1499 		iommufd_test_memory_limit = cmd->memory_limit.limit;
1500 		return 0;
1501 	case IOMMU_TEST_OP_DIRTY:
1502 		return iommufd_test_dirty(ucmd, cmd->id, cmd->dirty.iova,
1503 					  cmd->dirty.length,
1504 					  cmd->dirty.page_size,
1505 					  u64_to_user_ptr(cmd->dirty.uptr),
1506 					  cmd->dirty.flags);
1507 	case IOMMU_TEST_OP_TRIGGER_IOPF:
1508 		return iommufd_test_trigger_iopf(ucmd, cmd);
1509 	default:
1510 		return -EOPNOTSUPP;
1511 	}
1512 }
1513 
iommufd_should_fail(void)1514 bool iommufd_should_fail(void)
1515 {
1516 	return should_fail(&fail_iommufd, 1);
1517 }
1518 
iommufd_test_init(void)1519 int __init iommufd_test_init(void)
1520 {
1521 	struct platform_device_info pdevinfo = {
1522 		.name = "iommufd_selftest_iommu",
1523 	};
1524 	int rc;
1525 
1526 	dbgfs_root =
1527 		fault_create_debugfs_attr("fail_iommufd", NULL, &fail_iommufd);
1528 
1529 	selftest_iommu_dev = platform_device_register_full(&pdevinfo);
1530 	if (IS_ERR(selftest_iommu_dev)) {
1531 		rc = PTR_ERR(selftest_iommu_dev);
1532 		goto err_dbgfs;
1533 	}
1534 
1535 	rc = bus_register(&iommufd_mock_bus_type.bus);
1536 	if (rc)
1537 		goto err_platform;
1538 
1539 	rc = iommu_device_sysfs_add(&mock_iommu_device,
1540 				    &selftest_iommu_dev->dev, NULL, "%s",
1541 				    dev_name(&selftest_iommu_dev->dev));
1542 	if (rc)
1543 		goto err_bus;
1544 
1545 	rc = iommu_device_register_bus(&mock_iommu_device, &mock_ops,
1546 				  &iommufd_mock_bus_type.bus,
1547 				  &iommufd_mock_bus_type.nb);
1548 	if (rc)
1549 		goto err_sysfs;
1550 
1551 	mock_iommu_iopf_queue = iopf_queue_alloc("mock-iopfq");
1552 
1553 	return 0;
1554 
1555 err_sysfs:
1556 	iommu_device_sysfs_remove(&mock_iommu_device);
1557 err_bus:
1558 	bus_unregister(&iommufd_mock_bus_type.bus);
1559 err_platform:
1560 	platform_device_unregister(selftest_iommu_dev);
1561 err_dbgfs:
1562 	debugfs_remove_recursive(dbgfs_root);
1563 	return rc;
1564 }
1565 
iommufd_test_exit(void)1566 void iommufd_test_exit(void)
1567 {
1568 	if (mock_iommu_iopf_queue) {
1569 		iopf_queue_free(mock_iommu_iopf_queue);
1570 		mock_iommu_iopf_queue = NULL;
1571 	}
1572 
1573 	iommu_device_sysfs_remove(&mock_iommu_device);
1574 	iommu_device_unregister_bus(&mock_iommu_device,
1575 				    &iommufd_mock_bus_type.bus,
1576 				    &iommufd_mock_bus_type.nb);
1577 	bus_unregister(&iommufd_mock_bus_type.bus);
1578 	platform_device_unregister(selftest_iommu_dev);
1579 	debugfs_remove_recursive(dbgfs_root);
1580 }
1581