1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
3 *
4 * Kernel side components to support tools/testing/selftests/iommu
5 */
6 #include <linux/anon_inodes.h>
7 #include <linux/debugfs.h>
8 #include <linux/dma-buf.h>
9 #include <linux/dma-resv.h>
10 #include <linux/fault-inject.h>
11 #include <linux/file.h>
12 #include <linux/iommu.h>
13 #include <linux/platform_device.h>
14 #include <linux/slab.h>
15 #include <linux/xarray.h>
16 #include <uapi/linux/iommufd.h>
17 #include <linux/generic_pt/iommu.h>
18 #include "../iommu-pages.h"
19
20 #include "../iommu-priv.h"
21 #include "io_pagetable.h"
22 #include "iommufd_private.h"
23 #include "iommufd_test.h"
24
25 static DECLARE_FAULT_ATTR(fail_iommufd);
26 static struct dentry *dbgfs_root;
27 static struct platform_device *selftest_iommu_dev;
28 static const struct iommu_ops mock_ops;
29 static struct iommu_domain_ops domain_nested_ops;
30
31 size_t iommufd_test_memory_limit = 65536;
32
33 struct mock_bus_type {
34 struct bus_type bus;
35 struct notifier_block nb;
36 };
37
38 static struct mock_bus_type iommufd_mock_bus_type = {
39 .bus = {
40 .name = "iommufd_mock",
41 },
42 };
43
44 static DEFINE_IDA(mock_dev_ida);
45
46 enum {
47 MOCK_DIRTY_TRACK = 1,
48 };
49
50 static int mock_dev_enable_iopf(struct device *dev, struct iommu_domain *domain);
51 static void mock_dev_disable_iopf(struct device *dev, struct iommu_domain *domain);
52
53 /*
54 * Syzkaller has trouble randomizing the correct iova to use since it is linked
55 * to the map ioctl's output, and it has no ide about that. So, simplify things.
56 * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset
57 * value. This has a much smaller randomization space and syzkaller can hit it.
58 */
__iommufd_test_syz_conv_iova(struct io_pagetable * iopt,u64 * iova)59 static unsigned long __iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
60 u64 *iova)
61 {
62 struct syz_layout {
63 __u32 nth_area;
64 __u32 offset;
65 };
66 struct syz_layout *syz = (void *)iova;
67 unsigned int nth = syz->nth_area;
68 struct iopt_area *area;
69
70 down_read(&iopt->iova_rwsem);
71 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area;
72 area = iopt_area_iter_next(area, 0, ULONG_MAX)) {
73 if (nth == 0) {
74 up_read(&iopt->iova_rwsem);
75 return iopt_area_iova(area) + syz->offset;
76 }
77 nth--;
78 }
79 up_read(&iopt->iova_rwsem);
80
81 return 0;
82 }
83
iommufd_test_syz_conv_iova(struct iommufd_access * access,u64 * iova)84 static unsigned long iommufd_test_syz_conv_iova(struct iommufd_access *access,
85 u64 *iova)
86 {
87 unsigned long ret;
88
89 mutex_lock(&access->ioas_lock);
90 if (!access->ioas) {
91 mutex_unlock(&access->ioas_lock);
92 return 0;
93 }
94 ret = __iommufd_test_syz_conv_iova(&access->ioas->iopt, iova);
95 mutex_unlock(&access->ioas_lock);
96 return ret;
97 }
98
iommufd_test_syz_conv_iova_id(struct iommufd_ucmd * ucmd,unsigned int ioas_id,u64 * iova,u32 * flags)99 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
100 unsigned int ioas_id, u64 *iova, u32 *flags)
101 {
102 struct iommufd_ioas *ioas;
103
104 if (!(*flags & MOCK_FLAGS_ACCESS_SYZ))
105 return;
106 *flags &= ~(u32)MOCK_FLAGS_ACCESS_SYZ;
107
108 ioas = iommufd_get_ioas(ucmd->ictx, ioas_id);
109 if (IS_ERR(ioas))
110 return;
111 *iova = __iommufd_test_syz_conv_iova(&ioas->iopt, iova);
112 iommufd_put_object(ucmd->ictx, &ioas->obj);
113 }
114
115 struct mock_iommu_domain {
116 union {
117 struct iommu_domain domain;
118 struct pt_iommu iommu;
119 struct pt_iommu_amdv1 amdv1;
120 };
121 unsigned long flags;
122 };
123 PT_IOMMU_CHECK_DOMAIN(struct mock_iommu_domain, iommu, domain);
124 PT_IOMMU_CHECK_DOMAIN(struct mock_iommu_domain, amdv1.iommu, domain);
125
126 static inline struct mock_iommu_domain *
to_mock_domain(struct iommu_domain * domain)127 to_mock_domain(struct iommu_domain *domain)
128 {
129 return container_of(domain, struct mock_iommu_domain, domain);
130 }
131
132 struct mock_iommu_domain_nested {
133 struct iommu_domain domain;
134 struct mock_viommu *mock_viommu;
135 u32 iotlb[MOCK_NESTED_DOMAIN_IOTLB_NUM];
136 };
137
138 static inline struct mock_iommu_domain_nested *
to_mock_nested(struct iommu_domain * domain)139 to_mock_nested(struct iommu_domain *domain)
140 {
141 return container_of(domain, struct mock_iommu_domain_nested, domain);
142 }
143
144 struct mock_viommu {
145 struct iommufd_viommu core;
146 struct mock_iommu_domain *s2_parent;
147 struct mock_hw_queue *hw_queue[IOMMU_TEST_HW_QUEUE_MAX];
148 struct mutex queue_mutex;
149
150 unsigned long mmap_offset;
151 u32 *page; /* Mmap page to test u32 type of in_data */
152 };
153
to_mock_viommu(struct iommufd_viommu * viommu)154 static inline struct mock_viommu *to_mock_viommu(struct iommufd_viommu *viommu)
155 {
156 return container_of(viommu, struct mock_viommu, core);
157 }
158
159 struct mock_hw_queue {
160 struct iommufd_hw_queue core;
161 struct mock_viommu *mock_viommu;
162 struct mock_hw_queue *prev;
163 u16 index;
164 };
165
166 static inline struct mock_hw_queue *
to_mock_hw_queue(struct iommufd_hw_queue * hw_queue)167 to_mock_hw_queue(struct iommufd_hw_queue *hw_queue)
168 {
169 return container_of(hw_queue, struct mock_hw_queue, core);
170 }
171
172 enum selftest_obj_type {
173 TYPE_IDEV,
174 };
175
176 struct mock_dev {
177 struct device dev;
178 struct mock_viommu *viommu;
179 struct rw_semaphore viommu_rwsem;
180 unsigned long flags;
181 unsigned long vdev_id;
182 int id;
183 u32 cache[MOCK_DEV_CACHE_NUM];
184 atomic_t pasid_1024_fake_error;
185 unsigned int iopf_refcount;
186 struct iommu_domain *domain;
187 };
188
to_mock_dev(struct device * dev)189 static inline struct mock_dev *to_mock_dev(struct device *dev)
190 {
191 return container_of(dev, struct mock_dev, dev);
192 }
193
194 struct selftest_obj {
195 struct iommufd_object obj;
196 enum selftest_obj_type type;
197
198 union {
199 struct {
200 struct iommufd_device *idev;
201 struct iommufd_ctx *ictx;
202 struct mock_dev *mock_dev;
203 } idev;
204 };
205 };
206
to_selftest_obj(struct iommufd_object * obj)207 static inline struct selftest_obj *to_selftest_obj(struct iommufd_object *obj)
208 {
209 return container_of(obj, struct selftest_obj, obj);
210 }
211
mock_domain_nop_attach(struct iommu_domain * domain,struct device * dev,struct iommu_domain * old)212 static int mock_domain_nop_attach(struct iommu_domain *domain,
213 struct device *dev, struct iommu_domain *old)
214 {
215 struct mock_dev *mdev = to_mock_dev(dev);
216 struct mock_viommu *new_viommu = NULL;
217 unsigned long vdev_id = 0;
218 int rc;
219
220 if (domain->dirty_ops && (mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY))
221 return -EINVAL;
222
223 iommu_group_mutex_assert(dev);
224 if (domain->type == IOMMU_DOMAIN_NESTED) {
225 new_viommu = to_mock_nested(domain)->mock_viommu;
226 if (new_viommu) {
227 rc = iommufd_viommu_get_vdev_id(&new_viommu->core, dev,
228 &vdev_id);
229 if (rc)
230 return rc;
231 }
232 }
233 if (new_viommu != mdev->viommu) {
234 down_write(&mdev->viommu_rwsem);
235 mdev->viommu = new_viommu;
236 mdev->vdev_id = vdev_id;
237 up_write(&mdev->viommu_rwsem);
238 }
239
240 rc = mock_dev_enable_iopf(dev, domain);
241 if (rc)
242 return rc;
243
244 mock_dev_disable_iopf(dev, mdev->domain);
245 mdev->domain = domain;
246
247 return 0;
248 }
249
mock_domain_set_dev_pasid_nop(struct iommu_domain * domain,struct device * dev,ioasid_t pasid,struct iommu_domain * old)250 static int mock_domain_set_dev_pasid_nop(struct iommu_domain *domain,
251 struct device *dev, ioasid_t pasid,
252 struct iommu_domain *old)
253 {
254 struct mock_dev *mdev = to_mock_dev(dev);
255 int rc;
256
257 /*
258 * Per the first attach with pasid 1024, set the
259 * mdev->pasid_1024_fake_error. Hence the second call of this op
260 * can fake an error to validate the error path of the core. This
261 * is helpful to test the case in which the iommu core needs to
262 * rollback to the old domain due to driver failure. e.g. replace.
263 * User should be careful about the third call of this op, it shall
264 * succeed since the mdev->pasid_1024_fake_error is cleared in the
265 * second call.
266 */
267 if (pasid == 1024) {
268 if (domain->type == IOMMU_DOMAIN_BLOCKED) {
269 atomic_set(&mdev->pasid_1024_fake_error, 0);
270 } else if (atomic_read(&mdev->pasid_1024_fake_error)) {
271 /*
272 * Clear the flag, and fake an error to fail the
273 * replacement.
274 */
275 atomic_set(&mdev->pasid_1024_fake_error, 0);
276 return -ENOMEM;
277 } else {
278 /* Set the flag to fake an error in next call */
279 atomic_set(&mdev->pasid_1024_fake_error, 1);
280 }
281 }
282
283 rc = mock_dev_enable_iopf(dev, domain);
284 if (rc)
285 return rc;
286
287 mock_dev_disable_iopf(dev, old);
288
289 return 0;
290 }
291
292 static const struct iommu_domain_ops mock_blocking_ops = {
293 .attach_dev = mock_domain_nop_attach,
294 .set_dev_pasid = mock_domain_set_dev_pasid_nop
295 };
296
297 static struct iommu_domain mock_blocking_domain = {
298 .type = IOMMU_DOMAIN_BLOCKED,
299 .ops = &mock_blocking_ops,
300 };
301
mock_domain_hw_info(struct device * dev,u32 * length,enum iommu_hw_info_type * type)302 static void *mock_domain_hw_info(struct device *dev, u32 *length,
303 enum iommu_hw_info_type *type)
304 {
305 struct iommu_test_hw_info *info;
306
307 if (*type != IOMMU_HW_INFO_TYPE_DEFAULT &&
308 *type != IOMMU_HW_INFO_TYPE_SELFTEST)
309 return ERR_PTR(-EOPNOTSUPP);
310
311 info = kzalloc(sizeof(*info), GFP_KERNEL);
312 if (!info)
313 return ERR_PTR(-ENOMEM);
314
315 info->test_reg = IOMMU_HW_INFO_SELFTEST_REGVAL;
316 *length = sizeof(*info);
317 *type = IOMMU_HW_INFO_TYPE_SELFTEST;
318
319 return info;
320 }
321
mock_domain_set_dirty_tracking(struct iommu_domain * domain,bool enable)322 static int mock_domain_set_dirty_tracking(struct iommu_domain *domain,
323 bool enable)
324 {
325 struct mock_iommu_domain *mock = to_mock_domain(domain);
326 unsigned long flags = mock->flags;
327
328 if (enable && !domain->dirty_ops)
329 return -EINVAL;
330
331 /* No change? */
332 if (!(enable ^ !!(flags & MOCK_DIRTY_TRACK)))
333 return 0;
334
335 flags = (enable ? flags | MOCK_DIRTY_TRACK : flags & ~MOCK_DIRTY_TRACK);
336
337 mock->flags = flags;
338 return 0;
339 }
340
341 static struct mock_iommu_domain_nested *
__mock_domain_alloc_nested(const struct iommu_user_data * user_data)342 __mock_domain_alloc_nested(const struct iommu_user_data *user_data)
343 {
344 struct mock_iommu_domain_nested *mock_nested;
345 struct iommu_hwpt_selftest user_cfg;
346 int rc, i;
347
348 if (user_data->type != IOMMU_HWPT_DATA_SELFTEST)
349 return ERR_PTR(-EOPNOTSUPP);
350
351 rc = iommu_copy_struct_from_user(&user_cfg, user_data,
352 IOMMU_HWPT_DATA_SELFTEST, iotlb);
353 if (rc)
354 return ERR_PTR(rc);
355
356 mock_nested = kzalloc(sizeof(*mock_nested), GFP_KERNEL);
357 if (!mock_nested)
358 return ERR_PTR(-ENOMEM);
359 mock_nested->domain.ops = &domain_nested_ops;
360 mock_nested->domain.type = IOMMU_DOMAIN_NESTED;
361 for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++)
362 mock_nested->iotlb[i] = user_cfg.iotlb;
363 return mock_nested;
364 }
365
366 static struct iommu_domain *
mock_domain_alloc_nested(struct device * dev,struct iommu_domain * parent,u32 flags,const struct iommu_user_data * user_data)367 mock_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
368 u32 flags, const struct iommu_user_data *user_data)
369 {
370 struct mock_iommu_domain_nested *mock_nested;
371 struct mock_iommu_domain *mock_parent;
372
373 if (flags & ~IOMMU_HWPT_ALLOC_PASID)
374 return ERR_PTR(-EOPNOTSUPP);
375 if (!parent || !(parent->type & __IOMMU_DOMAIN_PAGING))
376 return ERR_PTR(-EINVAL);
377
378 mock_parent = to_mock_domain(parent);
379 if (!mock_parent)
380 return ERR_PTR(-EINVAL);
381
382 mock_nested = __mock_domain_alloc_nested(user_data);
383 if (IS_ERR(mock_nested))
384 return ERR_CAST(mock_nested);
385 return &mock_nested->domain;
386 }
387
mock_domain_free(struct iommu_domain * domain)388 static void mock_domain_free(struct iommu_domain *domain)
389 {
390 struct mock_iommu_domain *mock = to_mock_domain(domain);
391
392 pt_iommu_deinit(&mock->iommu);
393 kfree(mock);
394 }
395
mock_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * gather)396 static void mock_iotlb_sync(struct iommu_domain *domain,
397 struct iommu_iotlb_gather *gather)
398 {
399 iommu_put_pages_list(&gather->freelist);
400 }
401
402 static const struct iommu_domain_ops amdv1_mock_ops = {
403 IOMMU_PT_DOMAIN_OPS(amdv1_mock),
404 .free = mock_domain_free,
405 .attach_dev = mock_domain_nop_attach,
406 .set_dev_pasid = mock_domain_set_dev_pasid_nop,
407 .iotlb_sync = &mock_iotlb_sync,
408 };
409
410 static const struct iommu_domain_ops amdv1_mock_huge_ops = {
411 IOMMU_PT_DOMAIN_OPS(amdv1_mock),
412 .free = mock_domain_free,
413 .attach_dev = mock_domain_nop_attach,
414 .set_dev_pasid = mock_domain_set_dev_pasid_nop,
415 .iotlb_sync = &mock_iotlb_sync,
416 };
417 #undef pt_iommu_amdv1_mock_map_pages
418
419 static const struct iommu_dirty_ops amdv1_mock_dirty_ops = {
420 IOMMU_PT_DIRTY_OPS(amdv1_mock),
421 .set_dirty_tracking = mock_domain_set_dirty_tracking,
422 };
423
424 static const struct iommu_domain_ops amdv1_ops = {
425 IOMMU_PT_DOMAIN_OPS(amdv1),
426 .free = mock_domain_free,
427 .attach_dev = mock_domain_nop_attach,
428 .set_dev_pasid = mock_domain_set_dev_pasid_nop,
429 .iotlb_sync = &mock_iotlb_sync,
430 };
431
432 static const struct iommu_dirty_ops amdv1_dirty_ops = {
433 IOMMU_PT_DIRTY_OPS(amdv1),
434 .set_dirty_tracking = mock_domain_set_dirty_tracking,
435 };
436
437 static struct mock_iommu_domain *
mock_domain_alloc_pgtable(struct device * dev,const struct iommu_hwpt_selftest * user_cfg,u32 flags)438 mock_domain_alloc_pgtable(struct device *dev,
439 const struct iommu_hwpt_selftest *user_cfg, u32 flags)
440 {
441 struct mock_iommu_domain *mock;
442 int rc;
443
444 mock = kzalloc(sizeof(*mock), GFP_KERNEL);
445 if (!mock)
446 return ERR_PTR(-ENOMEM);
447 mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
448
449 mock->amdv1.iommu.nid = NUMA_NO_NODE;
450
451 switch (user_cfg->pagetable_type) {
452 case MOCK_IOMMUPT_DEFAULT:
453 case MOCK_IOMMUPT_HUGE: {
454 struct pt_iommu_amdv1_cfg cfg = {};
455
456 /* The mock version has a 2k page size */
457 cfg.common.hw_max_vasz_lg2 = 56;
458 cfg.common.hw_max_oasz_lg2 = 51;
459 cfg.starting_level = 2;
460 if (user_cfg->pagetable_type == MOCK_IOMMUPT_HUGE)
461 mock->domain.ops = &amdv1_mock_huge_ops;
462 else
463 mock->domain.ops = &amdv1_mock_ops;
464 rc = pt_iommu_amdv1_mock_init(&mock->amdv1, &cfg, GFP_KERNEL);
465 if (rc)
466 goto err_free;
467
468 /*
469 * In huge mode userspace should only provide huge pages, we
470 * have to include PAGE_SIZE for the domain to be accepted by
471 * iommufd.
472 */
473 if (user_cfg->pagetable_type == MOCK_IOMMUPT_HUGE)
474 mock->domain.pgsize_bitmap = MOCK_HUGE_PAGE_SIZE |
475 PAGE_SIZE;
476 if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
477 mock->domain.dirty_ops = &amdv1_mock_dirty_ops;
478 break;
479 }
480
481 case MOCK_IOMMUPT_AMDV1: {
482 struct pt_iommu_amdv1_cfg cfg = {};
483
484 cfg.common.hw_max_vasz_lg2 = 64;
485 cfg.common.hw_max_oasz_lg2 = 52;
486 cfg.common.features = BIT(PT_FEAT_DYNAMIC_TOP) |
487 BIT(PT_FEAT_AMDV1_ENCRYPT_TABLES) |
488 BIT(PT_FEAT_AMDV1_FORCE_COHERENCE);
489 cfg.starting_level = 2;
490 mock->domain.ops = &amdv1_ops;
491 rc = pt_iommu_amdv1_init(&mock->amdv1, &cfg, GFP_KERNEL);
492 if (rc)
493 goto err_free;
494 if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
495 mock->domain.dirty_ops = &amdv1_dirty_ops;
496 break;
497 }
498 default:
499 rc = -EOPNOTSUPP;
500 goto err_free;
501 }
502
503 /*
504 * Override the real aperture to the MOCK aperture for test purposes.
505 */
506 if (user_cfg->pagetable_type == MOCK_IOMMUPT_DEFAULT) {
507 WARN_ON(mock->domain.geometry.aperture_start != 0);
508 WARN_ON(mock->domain.geometry.aperture_end < MOCK_APERTURE_LAST);
509
510 mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
511 mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
512 }
513
514 return mock;
515 err_free:
516 kfree(mock);
517 return ERR_PTR(rc);
518 }
519
520 static struct iommu_domain *
mock_domain_alloc_paging_flags(struct device * dev,u32 flags,const struct iommu_user_data * user_data)521 mock_domain_alloc_paging_flags(struct device *dev, u32 flags,
522 const struct iommu_user_data *user_data)
523 {
524 bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
525 const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
526 IOMMU_HWPT_ALLOC_NEST_PARENT |
527 IOMMU_HWPT_ALLOC_PASID;
528 struct mock_dev *mdev = to_mock_dev(dev);
529 bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY;
530 struct iommu_hwpt_selftest user_cfg = {};
531 struct mock_iommu_domain *mock;
532 int rc;
533
534 if ((flags & ~PAGING_FLAGS) || (has_dirty_flag && no_dirty_ops))
535 return ERR_PTR(-EOPNOTSUPP);
536
537 if (user_data && (user_data->type != IOMMU_HWPT_DATA_SELFTEST &&
538 user_data->type != IOMMU_HWPT_DATA_NONE))
539 return ERR_PTR(-EOPNOTSUPP);
540
541 if (user_data) {
542 rc = iommu_copy_struct_from_user(
543 &user_cfg, user_data, IOMMU_HWPT_DATA_SELFTEST, iotlb);
544 if (rc)
545 return ERR_PTR(rc);
546 }
547
548 mock = mock_domain_alloc_pgtable(dev, &user_cfg, flags);
549 if (IS_ERR(mock))
550 return ERR_CAST(mock);
551 return &mock->domain;
552 }
553
mock_domain_capable(struct device * dev,enum iommu_cap cap)554 static bool mock_domain_capable(struct device *dev, enum iommu_cap cap)
555 {
556 struct mock_dev *mdev = to_mock_dev(dev);
557
558 switch (cap) {
559 case IOMMU_CAP_CACHE_COHERENCY:
560 return true;
561 case IOMMU_CAP_DIRTY_TRACKING:
562 return !(mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY);
563 default:
564 break;
565 }
566
567 return false;
568 }
569
570 static struct iopf_queue *mock_iommu_iopf_queue;
571
572 static struct mock_iommu_device {
573 struct iommu_device iommu_dev;
574 struct completion complete;
575 refcount_t users;
576 } mock_iommu;
577
mock_probe_device(struct device * dev)578 static struct iommu_device *mock_probe_device(struct device *dev)
579 {
580 if (dev->bus != &iommufd_mock_bus_type.bus)
581 return ERR_PTR(-ENODEV);
582 return &mock_iommu.iommu_dev;
583 }
584
mock_domain_page_response(struct device * dev,struct iopf_fault * evt,struct iommu_page_response * msg)585 static void mock_domain_page_response(struct device *dev, struct iopf_fault *evt,
586 struct iommu_page_response *msg)
587 {
588 }
589
mock_dev_enable_iopf(struct device * dev,struct iommu_domain * domain)590 static int mock_dev_enable_iopf(struct device *dev, struct iommu_domain *domain)
591 {
592 struct mock_dev *mdev = to_mock_dev(dev);
593 int ret;
594
595 if (!domain || !domain->iopf_handler)
596 return 0;
597
598 if (!mock_iommu_iopf_queue)
599 return -ENODEV;
600
601 if (mdev->iopf_refcount) {
602 mdev->iopf_refcount++;
603 return 0;
604 }
605
606 ret = iopf_queue_add_device(mock_iommu_iopf_queue, dev);
607 if (ret)
608 return ret;
609
610 mdev->iopf_refcount = 1;
611
612 return 0;
613 }
614
mock_dev_disable_iopf(struct device * dev,struct iommu_domain * domain)615 static void mock_dev_disable_iopf(struct device *dev, struct iommu_domain *domain)
616 {
617 struct mock_dev *mdev = to_mock_dev(dev);
618
619 if (!domain || !domain->iopf_handler)
620 return;
621
622 if (--mdev->iopf_refcount)
623 return;
624
625 iopf_queue_remove_device(mock_iommu_iopf_queue, dev);
626 }
627
mock_viommu_destroy(struct iommufd_viommu * viommu)628 static void mock_viommu_destroy(struct iommufd_viommu *viommu)
629 {
630 struct mock_iommu_device *mock_iommu = container_of(
631 viommu->iommu_dev, struct mock_iommu_device, iommu_dev);
632 struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
633
634 if (refcount_dec_and_test(&mock_iommu->users))
635 complete(&mock_iommu->complete);
636 if (mock_viommu->mmap_offset)
637 iommufd_viommu_destroy_mmap(&mock_viommu->core,
638 mock_viommu->mmap_offset);
639 free_page((unsigned long)mock_viommu->page);
640 mutex_destroy(&mock_viommu->queue_mutex);
641
642 /* iommufd core frees mock_viommu and viommu */
643 }
644
645 static struct iommu_domain *
mock_viommu_alloc_domain_nested(struct iommufd_viommu * viommu,u32 flags,const struct iommu_user_data * user_data)646 mock_viommu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
647 const struct iommu_user_data *user_data)
648 {
649 struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
650 struct mock_iommu_domain_nested *mock_nested;
651
652 if (flags & ~IOMMU_HWPT_ALLOC_PASID)
653 return ERR_PTR(-EOPNOTSUPP);
654
655 mock_nested = __mock_domain_alloc_nested(user_data);
656 if (IS_ERR(mock_nested))
657 return ERR_CAST(mock_nested);
658 mock_nested->mock_viommu = mock_viommu;
659 return &mock_nested->domain;
660 }
661
mock_viommu_cache_invalidate(struct iommufd_viommu * viommu,struct iommu_user_data_array * array)662 static int mock_viommu_cache_invalidate(struct iommufd_viommu *viommu,
663 struct iommu_user_data_array *array)
664 {
665 struct iommu_viommu_invalidate_selftest *cmds;
666 struct iommu_viommu_invalidate_selftest *cur;
667 struct iommu_viommu_invalidate_selftest *end;
668 int rc;
669
670 /* A zero-length array is allowed to validate the array type */
671 if (array->entry_num == 0 &&
672 array->type == IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST) {
673 array->entry_num = 0;
674 return 0;
675 }
676
677 cmds = kcalloc(array->entry_num, sizeof(*cmds), GFP_KERNEL);
678 if (!cmds)
679 return -ENOMEM;
680 cur = cmds;
681 end = cmds + array->entry_num;
682
683 static_assert(sizeof(*cmds) == 3 * sizeof(u32));
684 rc = iommu_copy_struct_from_full_user_array(
685 cmds, sizeof(*cmds), array,
686 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST);
687 if (rc)
688 goto out;
689
690 while (cur != end) {
691 struct mock_dev *mdev;
692 struct device *dev;
693 int i;
694
695 if (cur->flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) {
696 rc = -EOPNOTSUPP;
697 goto out;
698 }
699
700 if (cur->cache_id > MOCK_DEV_CACHE_ID_MAX) {
701 rc = -EINVAL;
702 goto out;
703 }
704
705 xa_lock(&viommu->vdevs);
706 dev = iommufd_viommu_find_dev(viommu,
707 (unsigned long)cur->vdev_id);
708 if (!dev) {
709 xa_unlock(&viommu->vdevs);
710 rc = -EINVAL;
711 goto out;
712 }
713 mdev = container_of(dev, struct mock_dev, dev);
714
715 if (cur->flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) {
716 /* Invalidate all cache entries and ignore cache_id */
717 for (i = 0; i < MOCK_DEV_CACHE_NUM; i++)
718 mdev->cache[i] = 0;
719 } else {
720 mdev->cache[cur->cache_id] = 0;
721 }
722 xa_unlock(&viommu->vdevs);
723
724 cur++;
725 }
726 out:
727 array->entry_num = cur - cmds;
728 kfree(cmds);
729 return rc;
730 }
731
mock_viommu_get_hw_queue_size(struct iommufd_viommu * viommu,enum iommu_hw_queue_type queue_type)732 static size_t mock_viommu_get_hw_queue_size(struct iommufd_viommu *viommu,
733 enum iommu_hw_queue_type queue_type)
734 {
735 if (queue_type != IOMMU_HW_QUEUE_TYPE_SELFTEST)
736 return 0;
737 return HW_QUEUE_STRUCT_SIZE(struct mock_hw_queue, core);
738 }
739
mock_hw_queue_destroy(struct iommufd_hw_queue * hw_queue)740 static void mock_hw_queue_destroy(struct iommufd_hw_queue *hw_queue)
741 {
742 struct mock_hw_queue *mock_hw_queue = to_mock_hw_queue(hw_queue);
743 struct mock_viommu *mock_viommu = mock_hw_queue->mock_viommu;
744
745 mutex_lock(&mock_viommu->queue_mutex);
746 mock_viommu->hw_queue[mock_hw_queue->index] = NULL;
747 if (mock_hw_queue->prev)
748 iommufd_hw_queue_undepend(mock_hw_queue, mock_hw_queue->prev,
749 core);
750 mutex_unlock(&mock_viommu->queue_mutex);
751 }
752
753 /* Test iommufd_hw_queue_depend/undepend() */
mock_hw_queue_init_phys(struct iommufd_hw_queue * hw_queue,u32 index,phys_addr_t base_addr_pa)754 static int mock_hw_queue_init_phys(struct iommufd_hw_queue *hw_queue, u32 index,
755 phys_addr_t base_addr_pa)
756 {
757 struct mock_viommu *mock_viommu = to_mock_viommu(hw_queue->viommu);
758 struct mock_hw_queue *mock_hw_queue = to_mock_hw_queue(hw_queue);
759 struct mock_hw_queue *prev = NULL;
760 int rc = 0;
761
762 if (index >= IOMMU_TEST_HW_QUEUE_MAX)
763 return -EINVAL;
764
765 mutex_lock(&mock_viommu->queue_mutex);
766
767 if (mock_viommu->hw_queue[index]) {
768 rc = -EEXIST;
769 goto unlock;
770 }
771
772 if (index) {
773 prev = mock_viommu->hw_queue[index - 1];
774 if (!prev) {
775 rc = -EIO;
776 goto unlock;
777 }
778 }
779
780 /*
781 * Test to catch a kernel bug if the core converted the physical address
782 * incorrectly. Let mock_domain_iova_to_phys() WARN_ON if it fails.
783 */
784 if (base_addr_pa != iommu_iova_to_phys(&mock_viommu->s2_parent->domain,
785 hw_queue->base_addr)) {
786 rc = -EFAULT;
787 goto unlock;
788 }
789
790 if (prev) {
791 rc = iommufd_hw_queue_depend(mock_hw_queue, prev, core);
792 if (rc)
793 goto unlock;
794 }
795
796 mock_hw_queue->prev = prev;
797 mock_hw_queue->mock_viommu = mock_viommu;
798 mock_viommu->hw_queue[index] = mock_hw_queue;
799
800 hw_queue->destroy = &mock_hw_queue_destroy;
801 unlock:
802 mutex_unlock(&mock_viommu->queue_mutex);
803 return rc;
804 }
805
806 static struct iommufd_viommu_ops mock_viommu_ops = {
807 .destroy = mock_viommu_destroy,
808 .alloc_domain_nested = mock_viommu_alloc_domain_nested,
809 .cache_invalidate = mock_viommu_cache_invalidate,
810 .get_hw_queue_size = mock_viommu_get_hw_queue_size,
811 .hw_queue_init_phys = mock_hw_queue_init_phys,
812 };
813
mock_get_viommu_size(struct device * dev,enum iommu_viommu_type viommu_type)814 static size_t mock_get_viommu_size(struct device *dev,
815 enum iommu_viommu_type viommu_type)
816 {
817 if (viommu_type != IOMMU_VIOMMU_TYPE_SELFTEST)
818 return 0;
819 return VIOMMU_STRUCT_SIZE(struct mock_viommu, core);
820 }
821
mock_viommu_init(struct iommufd_viommu * viommu,struct iommu_domain * parent_domain,const struct iommu_user_data * user_data)822 static int mock_viommu_init(struct iommufd_viommu *viommu,
823 struct iommu_domain *parent_domain,
824 const struct iommu_user_data *user_data)
825 {
826 struct mock_iommu_device *mock_iommu = container_of(
827 viommu->iommu_dev, struct mock_iommu_device, iommu_dev);
828 struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
829 struct iommu_viommu_selftest data;
830 int rc;
831
832 if (user_data) {
833 rc = iommu_copy_struct_from_user(
834 &data, user_data, IOMMU_VIOMMU_TYPE_SELFTEST, out_data);
835 if (rc)
836 return rc;
837
838 /* Allocate two pages */
839 mock_viommu->page =
840 (u32 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
841 if (!mock_viommu->page)
842 return -ENOMEM;
843
844 rc = iommufd_viommu_alloc_mmap(&mock_viommu->core,
845 __pa(mock_viommu->page),
846 PAGE_SIZE * 2,
847 &mock_viommu->mmap_offset);
848 if (rc)
849 goto err_free_page;
850
851 /* For loopback tests on both the page and out_data */
852 *mock_viommu->page = data.in_data;
853 data.out_data = data.in_data;
854 data.out_mmap_length = PAGE_SIZE * 2;
855 data.out_mmap_offset = mock_viommu->mmap_offset;
856 rc = iommu_copy_struct_to_user(
857 user_data, &data, IOMMU_VIOMMU_TYPE_SELFTEST, out_data);
858 if (rc)
859 goto err_destroy_mmap;
860 }
861
862 refcount_inc(&mock_iommu->users);
863 mutex_init(&mock_viommu->queue_mutex);
864 mock_viommu->s2_parent = to_mock_domain(parent_domain);
865
866 viommu->ops = &mock_viommu_ops;
867 return 0;
868
869 err_destroy_mmap:
870 iommufd_viommu_destroy_mmap(&mock_viommu->core,
871 mock_viommu->mmap_offset);
872 err_free_page:
873 free_page((unsigned long)mock_viommu->page);
874 return rc;
875 }
876
877 static const struct iommu_ops mock_ops = {
878 /*
879 * IOMMU_DOMAIN_BLOCKED cannot be returned from def_domain_type()
880 * because it is zero.
881 */
882 .default_domain = &mock_blocking_domain,
883 .blocked_domain = &mock_blocking_domain,
884 .owner = THIS_MODULE,
885 .hw_info = mock_domain_hw_info,
886 .domain_alloc_paging_flags = mock_domain_alloc_paging_flags,
887 .domain_alloc_nested = mock_domain_alloc_nested,
888 .capable = mock_domain_capable,
889 .device_group = generic_device_group,
890 .probe_device = mock_probe_device,
891 .page_response = mock_domain_page_response,
892 .user_pasid_table = true,
893 .get_viommu_size = mock_get_viommu_size,
894 .viommu_init = mock_viommu_init,
895 };
896
mock_domain_free_nested(struct iommu_domain * domain)897 static void mock_domain_free_nested(struct iommu_domain *domain)
898 {
899 kfree(to_mock_nested(domain));
900 }
901
902 static int
mock_domain_cache_invalidate_user(struct iommu_domain * domain,struct iommu_user_data_array * array)903 mock_domain_cache_invalidate_user(struct iommu_domain *domain,
904 struct iommu_user_data_array *array)
905 {
906 struct mock_iommu_domain_nested *mock_nested = to_mock_nested(domain);
907 struct iommu_hwpt_invalidate_selftest inv;
908 u32 processed = 0;
909 int i = 0, j;
910 int rc = 0;
911
912 if (array->type != IOMMU_HWPT_INVALIDATE_DATA_SELFTEST) {
913 rc = -EINVAL;
914 goto out;
915 }
916
917 for ( ; i < array->entry_num; i++) {
918 rc = iommu_copy_struct_from_user_array(&inv, array,
919 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
920 i, iotlb_id);
921 if (rc)
922 break;
923
924 if (inv.flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) {
925 rc = -EOPNOTSUPP;
926 break;
927 }
928
929 if (inv.iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX) {
930 rc = -EINVAL;
931 break;
932 }
933
934 if (inv.flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) {
935 /* Invalidate all mock iotlb entries and ignore iotlb_id */
936 for (j = 0; j < MOCK_NESTED_DOMAIN_IOTLB_NUM; j++)
937 mock_nested->iotlb[j] = 0;
938 } else {
939 mock_nested->iotlb[inv.iotlb_id] = 0;
940 }
941
942 processed++;
943 }
944
945 out:
946 array->entry_num = processed;
947 return rc;
948 }
949
950 static struct iommu_domain_ops domain_nested_ops = {
951 .free = mock_domain_free_nested,
952 .attach_dev = mock_domain_nop_attach,
953 .cache_invalidate_user = mock_domain_cache_invalidate_user,
954 .set_dev_pasid = mock_domain_set_dev_pasid_nop,
955 };
956
957 static inline struct iommufd_hw_pagetable *
__get_md_pagetable(struct iommufd_ucmd * ucmd,u32 mockpt_id,u32 hwpt_type)958 __get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, u32 hwpt_type)
959 {
960 struct iommufd_object *obj;
961
962 obj = iommufd_get_object(ucmd->ictx, mockpt_id, hwpt_type);
963 if (IS_ERR(obj))
964 return ERR_CAST(obj);
965 return container_of(obj, struct iommufd_hw_pagetable, obj);
966 }
967
968 static inline struct iommufd_hw_pagetable *
get_md_pagetable(struct iommufd_ucmd * ucmd,u32 mockpt_id,struct mock_iommu_domain ** mock)969 get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id,
970 struct mock_iommu_domain **mock)
971 {
972 struct iommufd_hw_pagetable *hwpt;
973
974 hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_PAGING);
975 if (IS_ERR(hwpt))
976 return hwpt;
977 if (hwpt->domain->type != IOMMU_DOMAIN_UNMANAGED ||
978 hwpt->domain->owner != &mock_ops) {
979 iommufd_put_object(ucmd->ictx, &hwpt->obj);
980 return ERR_PTR(-EINVAL);
981 }
982 *mock = to_mock_domain(hwpt->domain);
983 return hwpt;
984 }
985
986 static inline struct iommufd_hw_pagetable *
get_md_pagetable_nested(struct iommufd_ucmd * ucmd,u32 mockpt_id,struct mock_iommu_domain_nested ** mock_nested)987 get_md_pagetable_nested(struct iommufd_ucmd *ucmd, u32 mockpt_id,
988 struct mock_iommu_domain_nested **mock_nested)
989 {
990 struct iommufd_hw_pagetable *hwpt;
991
992 hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_NESTED);
993 if (IS_ERR(hwpt))
994 return hwpt;
995 if (hwpt->domain->type != IOMMU_DOMAIN_NESTED ||
996 hwpt->domain->ops != &domain_nested_ops) {
997 iommufd_put_object(ucmd->ictx, &hwpt->obj);
998 return ERR_PTR(-EINVAL);
999 }
1000 *mock_nested = to_mock_nested(hwpt->domain);
1001 return hwpt;
1002 }
1003
mock_dev_release(struct device * dev)1004 static void mock_dev_release(struct device *dev)
1005 {
1006 struct mock_dev *mdev = to_mock_dev(dev);
1007
1008 ida_free(&mock_dev_ida, mdev->id);
1009 kfree(mdev);
1010 }
1011
mock_dev_create(unsigned long dev_flags)1012 static struct mock_dev *mock_dev_create(unsigned long dev_flags)
1013 {
1014 struct property_entry prop[] = {
1015 PROPERTY_ENTRY_U32("pasid-num-bits", 0),
1016 {},
1017 };
1018 const u32 valid_flags = MOCK_FLAGS_DEVICE_NO_DIRTY |
1019 MOCK_FLAGS_DEVICE_PASID;
1020 struct mock_dev *mdev;
1021 int rc, i;
1022
1023 if (dev_flags & ~valid_flags)
1024 return ERR_PTR(-EINVAL);
1025
1026 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
1027 if (!mdev)
1028 return ERR_PTR(-ENOMEM);
1029
1030 init_rwsem(&mdev->viommu_rwsem);
1031 device_initialize(&mdev->dev);
1032 mdev->flags = dev_flags;
1033 mdev->dev.release = mock_dev_release;
1034 mdev->dev.bus = &iommufd_mock_bus_type.bus;
1035 for (i = 0; i < MOCK_DEV_CACHE_NUM; i++)
1036 mdev->cache[i] = IOMMU_TEST_DEV_CACHE_DEFAULT;
1037
1038 rc = ida_alloc(&mock_dev_ida, GFP_KERNEL);
1039 if (rc < 0)
1040 goto err_put;
1041 mdev->id = rc;
1042
1043 rc = dev_set_name(&mdev->dev, "iommufd_mock%u", mdev->id);
1044 if (rc)
1045 goto err_put;
1046
1047 if (dev_flags & MOCK_FLAGS_DEVICE_PASID)
1048 prop[0] = PROPERTY_ENTRY_U32("pasid-num-bits", MOCK_PASID_WIDTH);
1049
1050 rc = device_create_managed_software_node(&mdev->dev, prop, NULL);
1051 if (rc) {
1052 dev_err(&mdev->dev, "add pasid-num-bits property failed, rc: %d", rc);
1053 goto err_put;
1054 }
1055
1056 rc = iommu_mock_device_add(&mdev->dev, &mock_iommu.iommu_dev);
1057 if (rc)
1058 goto err_put;
1059 return mdev;
1060
1061 err_put:
1062 put_device(&mdev->dev);
1063 return ERR_PTR(rc);
1064 }
1065
mock_dev_destroy(struct mock_dev * mdev)1066 static void mock_dev_destroy(struct mock_dev *mdev)
1067 {
1068 device_unregister(&mdev->dev);
1069 }
1070
iommufd_selftest_is_mock_dev(struct device * dev)1071 bool iommufd_selftest_is_mock_dev(struct device *dev)
1072 {
1073 return dev->release == mock_dev_release;
1074 }
1075
1076 /* Create an hw_pagetable with the mock domain so we can test the domain ops */
iommufd_test_mock_domain(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1077 static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd,
1078 struct iommu_test_cmd *cmd)
1079 {
1080 struct iommufd_device *idev;
1081 struct selftest_obj *sobj;
1082 u32 pt_id = cmd->id;
1083 u32 dev_flags = 0;
1084 u32 idev_id;
1085 int rc;
1086
1087 sobj = iommufd_object_alloc(ucmd->ictx, sobj, IOMMUFD_OBJ_SELFTEST);
1088 if (IS_ERR(sobj))
1089 return PTR_ERR(sobj);
1090
1091 sobj->idev.ictx = ucmd->ictx;
1092 sobj->type = TYPE_IDEV;
1093
1094 if (cmd->op == IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS)
1095 dev_flags = cmd->mock_domain_flags.dev_flags;
1096
1097 sobj->idev.mock_dev = mock_dev_create(dev_flags);
1098 if (IS_ERR(sobj->idev.mock_dev)) {
1099 rc = PTR_ERR(sobj->idev.mock_dev);
1100 goto out_sobj;
1101 }
1102
1103 idev = iommufd_device_bind(ucmd->ictx, &sobj->idev.mock_dev->dev,
1104 &idev_id);
1105 if (IS_ERR(idev)) {
1106 rc = PTR_ERR(idev);
1107 goto out_mdev;
1108 }
1109 sobj->idev.idev = idev;
1110
1111 rc = iommufd_device_attach(idev, IOMMU_NO_PASID, &pt_id);
1112 if (rc)
1113 goto out_unbind;
1114
1115 /* Userspace must destroy the device_id to destroy the object */
1116 cmd->mock_domain.out_hwpt_id = pt_id;
1117 cmd->mock_domain.out_stdev_id = sobj->obj.id;
1118 cmd->mock_domain.out_idev_id = idev_id;
1119 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1120 if (rc)
1121 goto out_detach;
1122 iommufd_object_finalize(ucmd->ictx, &sobj->obj);
1123 return 0;
1124
1125 out_detach:
1126 iommufd_device_detach(idev, IOMMU_NO_PASID);
1127 out_unbind:
1128 iommufd_device_unbind(idev);
1129 out_mdev:
1130 mock_dev_destroy(sobj->idev.mock_dev);
1131 out_sobj:
1132 iommufd_object_abort(ucmd->ictx, &sobj->obj);
1133 return rc;
1134 }
1135
1136 static struct selftest_obj *
iommufd_test_get_selftest_obj(struct iommufd_ctx * ictx,u32 id)1137 iommufd_test_get_selftest_obj(struct iommufd_ctx *ictx, u32 id)
1138 {
1139 struct iommufd_object *dev_obj;
1140 struct selftest_obj *sobj;
1141
1142 /*
1143 * Prefer to use the OBJ_SELFTEST because the destroy_rwsem will ensure
1144 * it doesn't race with detach, which is not allowed.
1145 */
1146 dev_obj = iommufd_get_object(ictx, id, IOMMUFD_OBJ_SELFTEST);
1147 if (IS_ERR(dev_obj))
1148 return ERR_CAST(dev_obj);
1149
1150 sobj = to_selftest_obj(dev_obj);
1151 if (sobj->type != TYPE_IDEV) {
1152 iommufd_put_object(ictx, dev_obj);
1153 return ERR_PTR(-EINVAL);
1154 }
1155 return sobj;
1156 }
1157
1158 /* Replace the mock domain with a manually allocated hw_pagetable */
iommufd_test_mock_domain_replace(struct iommufd_ucmd * ucmd,unsigned int device_id,u32 pt_id,struct iommu_test_cmd * cmd)1159 static int iommufd_test_mock_domain_replace(struct iommufd_ucmd *ucmd,
1160 unsigned int device_id, u32 pt_id,
1161 struct iommu_test_cmd *cmd)
1162 {
1163 struct selftest_obj *sobj;
1164 int rc;
1165
1166 sobj = iommufd_test_get_selftest_obj(ucmd->ictx, device_id);
1167 if (IS_ERR(sobj))
1168 return PTR_ERR(sobj);
1169
1170 rc = iommufd_device_replace(sobj->idev.idev, IOMMU_NO_PASID, &pt_id);
1171 if (rc)
1172 goto out_sobj;
1173
1174 cmd->mock_domain_replace.pt_id = pt_id;
1175 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1176
1177 out_sobj:
1178 iommufd_put_object(ucmd->ictx, &sobj->obj);
1179 return rc;
1180 }
1181
1182 /* Add an additional reserved IOVA to the IOAS */
iommufd_test_add_reserved(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long start,size_t length)1183 static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd,
1184 unsigned int mockpt_id,
1185 unsigned long start, size_t length)
1186 {
1187 unsigned long last;
1188 struct iommufd_ioas *ioas;
1189 int rc;
1190
1191 if (!length)
1192 return -EINVAL;
1193 if (check_add_overflow(start, length - 1, &last))
1194 return -EOVERFLOW;
1195
1196 ioas = iommufd_get_ioas(ucmd->ictx, mockpt_id);
1197 if (IS_ERR(ioas))
1198 return PTR_ERR(ioas);
1199 down_write(&ioas->iopt.iova_rwsem);
1200 rc = iopt_reserve_iova(&ioas->iopt, start, last, NULL);
1201 up_write(&ioas->iopt.iova_rwsem);
1202 iommufd_put_object(ucmd->ictx, &ioas->obj);
1203 return rc;
1204 }
1205
1206 /* Check that every pfn under each iova matches the pfn under a user VA */
iommufd_test_md_check_pa(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long iova,size_t length,void __user * uptr)1207 static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd,
1208 unsigned int mockpt_id, unsigned long iova,
1209 size_t length, void __user *uptr)
1210 {
1211 struct iommufd_hw_pagetable *hwpt;
1212 struct mock_iommu_domain *mock;
1213 unsigned int page_size;
1214 uintptr_t end;
1215 int rc;
1216
1217 hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
1218 if (IS_ERR(hwpt))
1219 return PTR_ERR(hwpt);
1220
1221 page_size = 1 << __ffs(mock->domain.pgsize_bitmap);
1222 if (iova % page_size || length % page_size ||
1223 (uintptr_t)uptr % page_size ||
1224 check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end)) {
1225 rc = -EINVAL;
1226 goto out_put;
1227 }
1228
1229 for (; length; length -= page_size) {
1230 struct page *pages[1];
1231 phys_addr_t io_phys;
1232 unsigned long pfn;
1233 long npages;
1234
1235 npages = get_user_pages_fast((uintptr_t)uptr & PAGE_MASK, 1, 0,
1236 pages);
1237 if (npages < 0) {
1238 rc = npages;
1239 goto out_put;
1240 }
1241 if (WARN_ON(npages != 1)) {
1242 rc = -EFAULT;
1243 goto out_put;
1244 }
1245 pfn = page_to_pfn(pages[0]);
1246 put_page(pages[0]);
1247
1248 io_phys = mock->domain.ops->iova_to_phys(&mock->domain, iova);
1249 if (io_phys !=
1250 pfn * PAGE_SIZE + ((uintptr_t)uptr % PAGE_SIZE)) {
1251 rc = -EINVAL;
1252 goto out_put;
1253 }
1254 iova += page_size;
1255 uptr += page_size;
1256 }
1257 rc = 0;
1258
1259 out_put:
1260 iommufd_put_object(ucmd->ictx, &hwpt->obj);
1261 return rc;
1262 }
1263
1264 /* Check that the page ref count matches, to look for missing pin/unpins */
iommufd_test_md_check_refs(struct iommufd_ucmd * ucmd,void __user * uptr,size_t length,unsigned int refs)1265 static int iommufd_test_md_check_refs(struct iommufd_ucmd *ucmd,
1266 void __user *uptr, size_t length,
1267 unsigned int refs)
1268 {
1269 uintptr_t end;
1270
1271 if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE ||
1272 check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
1273 return -EINVAL;
1274
1275 for (; length; length -= PAGE_SIZE) {
1276 struct page *pages[1];
1277 long npages;
1278
1279 npages = get_user_pages_fast((uintptr_t)uptr, 1, 0, pages);
1280 if (npages < 0)
1281 return npages;
1282 if (WARN_ON(npages != 1))
1283 return -EFAULT;
1284 if (!PageCompound(pages[0])) {
1285 unsigned int count;
1286
1287 count = page_ref_count(pages[0]);
1288 if (count / GUP_PIN_COUNTING_BIAS != refs) {
1289 put_page(pages[0]);
1290 return -EIO;
1291 }
1292 }
1293 put_page(pages[0]);
1294 uptr += PAGE_SIZE;
1295 }
1296 return 0;
1297 }
1298
iommufd_test_md_check_iotlb(struct iommufd_ucmd * ucmd,u32 mockpt_id,unsigned int iotlb_id,u32 iotlb)1299 static int iommufd_test_md_check_iotlb(struct iommufd_ucmd *ucmd, u32 mockpt_id,
1300 unsigned int iotlb_id, u32 iotlb)
1301 {
1302 struct mock_iommu_domain_nested *mock_nested;
1303 struct iommufd_hw_pagetable *hwpt;
1304 int rc = 0;
1305
1306 hwpt = get_md_pagetable_nested(ucmd, mockpt_id, &mock_nested);
1307 if (IS_ERR(hwpt))
1308 return PTR_ERR(hwpt);
1309
1310 mock_nested = to_mock_nested(hwpt->domain);
1311
1312 if (iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX ||
1313 mock_nested->iotlb[iotlb_id] != iotlb)
1314 rc = -EINVAL;
1315 iommufd_put_object(ucmd->ictx, &hwpt->obj);
1316 return rc;
1317 }
1318
iommufd_test_dev_check_cache(struct iommufd_ucmd * ucmd,u32 idev_id,unsigned int cache_id,u32 cache)1319 static int iommufd_test_dev_check_cache(struct iommufd_ucmd *ucmd, u32 idev_id,
1320 unsigned int cache_id, u32 cache)
1321 {
1322 struct iommufd_device *idev;
1323 struct mock_dev *mdev;
1324 int rc = 0;
1325
1326 idev = iommufd_get_device(ucmd, idev_id);
1327 if (IS_ERR(idev))
1328 return PTR_ERR(idev);
1329 mdev = container_of(idev->dev, struct mock_dev, dev);
1330
1331 if (cache_id > MOCK_DEV_CACHE_ID_MAX || mdev->cache[cache_id] != cache)
1332 rc = -EINVAL;
1333 iommufd_put_object(ucmd->ictx, &idev->obj);
1334 return rc;
1335 }
1336
1337 struct selftest_access {
1338 struct iommufd_access *access;
1339 struct file *file;
1340 struct mutex lock;
1341 struct list_head items;
1342 unsigned int next_id;
1343 bool destroying;
1344 };
1345
1346 struct selftest_access_item {
1347 struct list_head items_elm;
1348 unsigned long iova;
1349 size_t length;
1350 unsigned int id;
1351 };
1352
1353 static const struct file_operations iommfd_test_staccess_fops;
1354
iommufd_access_get(int fd)1355 static struct selftest_access *iommufd_access_get(int fd)
1356 {
1357 struct file *file;
1358
1359 file = fget(fd);
1360 if (!file)
1361 return ERR_PTR(-EBADFD);
1362
1363 if (file->f_op != &iommfd_test_staccess_fops) {
1364 fput(file);
1365 return ERR_PTR(-EBADFD);
1366 }
1367 return file->private_data;
1368 }
1369
iommufd_test_access_unmap(void * data,unsigned long iova,unsigned long length)1370 static void iommufd_test_access_unmap(void *data, unsigned long iova,
1371 unsigned long length)
1372 {
1373 unsigned long iova_last = iova + length - 1;
1374 struct selftest_access *staccess = data;
1375 struct selftest_access_item *item;
1376 struct selftest_access_item *tmp;
1377
1378 mutex_lock(&staccess->lock);
1379 list_for_each_entry_safe(item, tmp, &staccess->items, items_elm) {
1380 if (iova > item->iova + item->length - 1 ||
1381 iova_last < item->iova)
1382 continue;
1383 list_del(&item->items_elm);
1384 iommufd_access_unpin_pages(staccess->access, item->iova,
1385 item->length);
1386 kfree(item);
1387 }
1388 mutex_unlock(&staccess->lock);
1389 }
1390
iommufd_test_access_item_destroy(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned int item_id)1391 static int iommufd_test_access_item_destroy(struct iommufd_ucmd *ucmd,
1392 unsigned int access_id,
1393 unsigned int item_id)
1394 {
1395 struct selftest_access_item *item;
1396 struct selftest_access *staccess;
1397
1398 staccess = iommufd_access_get(access_id);
1399 if (IS_ERR(staccess))
1400 return PTR_ERR(staccess);
1401
1402 mutex_lock(&staccess->lock);
1403 list_for_each_entry(item, &staccess->items, items_elm) {
1404 if (item->id == item_id) {
1405 list_del(&item->items_elm);
1406 iommufd_access_unpin_pages(staccess->access, item->iova,
1407 item->length);
1408 mutex_unlock(&staccess->lock);
1409 kfree(item);
1410 fput(staccess->file);
1411 return 0;
1412 }
1413 }
1414 mutex_unlock(&staccess->lock);
1415 fput(staccess->file);
1416 return -ENOENT;
1417 }
1418
iommufd_test_staccess_release(struct inode * inode,struct file * filep)1419 static int iommufd_test_staccess_release(struct inode *inode,
1420 struct file *filep)
1421 {
1422 struct selftest_access *staccess = filep->private_data;
1423
1424 if (staccess->access) {
1425 iommufd_test_access_unmap(staccess, 0, ULONG_MAX);
1426 iommufd_access_destroy(staccess->access);
1427 }
1428 mutex_destroy(&staccess->lock);
1429 kfree(staccess);
1430 return 0;
1431 }
1432
1433 static const struct iommufd_access_ops selftest_access_ops_pin = {
1434 .needs_pin_pages = 1,
1435 .unmap = iommufd_test_access_unmap,
1436 };
1437
1438 static const struct iommufd_access_ops selftest_access_ops = {
1439 .unmap = iommufd_test_access_unmap,
1440 };
1441
1442 static const struct file_operations iommfd_test_staccess_fops = {
1443 .release = iommufd_test_staccess_release,
1444 };
1445
iommufd_test_alloc_access(void)1446 static struct selftest_access *iommufd_test_alloc_access(void)
1447 {
1448 struct selftest_access *staccess;
1449 struct file *filep;
1450
1451 staccess = kzalloc(sizeof(*staccess), GFP_KERNEL_ACCOUNT);
1452 if (!staccess)
1453 return ERR_PTR(-ENOMEM);
1454 INIT_LIST_HEAD(&staccess->items);
1455 mutex_init(&staccess->lock);
1456
1457 filep = anon_inode_getfile("[iommufd_test_staccess]",
1458 &iommfd_test_staccess_fops, staccess,
1459 O_RDWR);
1460 if (IS_ERR(filep)) {
1461 kfree(staccess);
1462 return ERR_CAST(filep);
1463 }
1464 staccess->file = filep;
1465 return staccess;
1466 }
1467
iommufd_test_create_access(struct iommufd_ucmd * ucmd,unsigned int ioas_id,unsigned int flags)1468 static int iommufd_test_create_access(struct iommufd_ucmd *ucmd,
1469 unsigned int ioas_id, unsigned int flags)
1470 {
1471 struct iommu_test_cmd *cmd = ucmd->cmd;
1472 struct selftest_access *staccess;
1473 struct iommufd_access *access;
1474 u32 id;
1475 int fdno;
1476 int rc;
1477
1478 if (flags & ~MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES)
1479 return -EOPNOTSUPP;
1480
1481 staccess = iommufd_test_alloc_access();
1482 if (IS_ERR(staccess))
1483 return PTR_ERR(staccess);
1484
1485 fdno = get_unused_fd_flags(O_CLOEXEC);
1486 if (fdno < 0) {
1487 rc = -ENOMEM;
1488 goto out_free_staccess;
1489 }
1490
1491 access = iommufd_access_create(
1492 ucmd->ictx,
1493 (flags & MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) ?
1494 &selftest_access_ops_pin :
1495 &selftest_access_ops,
1496 staccess, &id);
1497 if (IS_ERR(access)) {
1498 rc = PTR_ERR(access);
1499 goto out_put_fdno;
1500 }
1501 rc = iommufd_access_attach(access, ioas_id);
1502 if (rc)
1503 goto out_destroy;
1504 cmd->create_access.out_access_fd = fdno;
1505 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1506 if (rc)
1507 goto out_destroy;
1508
1509 staccess->access = access;
1510 fd_install(fdno, staccess->file);
1511 return 0;
1512
1513 out_destroy:
1514 iommufd_access_destroy(access);
1515 out_put_fdno:
1516 put_unused_fd(fdno);
1517 out_free_staccess:
1518 fput(staccess->file);
1519 return rc;
1520 }
1521
iommufd_test_access_replace_ioas(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned int ioas_id)1522 static int iommufd_test_access_replace_ioas(struct iommufd_ucmd *ucmd,
1523 unsigned int access_id,
1524 unsigned int ioas_id)
1525 {
1526 struct selftest_access *staccess;
1527 int rc;
1528
1529 staccess = iommufd_access_get(access_id);
1530 if (IS_ERR(staccess))
1531 return PTR_ERR(staccess);
1532
1533 rc = iommufd_access_replace(staccess->access, ioas_id);
1534 fput(staccess->file);
1535 return rc;
1536 }
1537
1538 /* Check that the pages in a page array match the pages in the user VA */
iommufd_test_check_pages(void __user * uptr,struct page ** pages,size_t npages)1539 static int iommufd_test_check_pages(void __user *uptr, struct page **pages,
1540 size_t npages)
1541 {
1542 for (; npages; npages--) {
1543 struct page *tmp_pages[1];
1544 long rc;
1545
1546 rc = get_user_pages_fast((uintptr_t)uptr, 1, 0, tmp_pages);
1547 if (rc < 0)
1548 return rc;
1549 if (WARN_ON(rc != 1))
1550 return -EFAULT;
1551 put_page(tmp_pages[0]);
1552 if (tmp_pages[0] != *pages)
1553 return -EBADE;
1554 pages++;
1555 uptr += PAGE_SIZE;
1556 }
1557 return 0;
1558 }
1559
iommufd_test_access_pages(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned long iova,size_t length,void __user * uptr,u32 flags)1560 static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd,
1561 unsigned int access_id, unsigned long iova,
1562 size_t length, void __user *uptr,
1563 u32 flags)
1564 {
1565 struct iommu_test_cmd *cmd = ucmd->cmd;
1566 struct selftest_access_item *item;
1567 struct selftest_access *staccess;
1568 struct page **pages;
1569 size_t npages;
1570 int rc;
1571
1572 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
1573 if (length > 16 * 1024 * 1024)
1574 return -ENOMEM;
1575
1576 if (flags & ~(MOCK_FLAGS_ACCESS_WRITE | MOCK_FLAGS_ACCESS_SYZ))
1577 return -EOPNOTSUPP;
1578
1579 staccess = iommufd_access_get(access_id);
1580 if (IS_ERR(staccess))
1581 return PTR_ERR(staccess);
1582
1583 if (staccess->access->ops != &selftest_access_ops_pin) {
1584 rc = -EOPNOTSUPP;
1585 goto out_put;
1586 }
1587
1588 if (flags & MOCK_FLAGS_ACCESS_SYZ)
1589 iova = iommufd_test_syz_conv_iova(staccess->access,
1590 &cmd->access_pages.iova);
1591
1592 npages = (ALIGN(iova + length, PAGE_SIZE) -
1593 ALIGN_DOWN(iova, PAGE_SIZE)) /
1594 PAGE_SIZE;
1595 pages = kvcalloc(npages, sizeof(*pages), GFP_KERNEL_ACCOUNT);
1596 if (!pages) {
1597 rc = -ENOMEM;
1598 goto out_put;
1599 }
1600
1601 /*
1602 * Drivers will need to think very carefully about this locking. The
1603 * core code can do multiple unmaps instantaneously after
1604 * iommufd_access_pin_pages() and *all* the unmaps must not return until
1605 * the range is unpinned. This simple implementation puts a global lock
1606 * around the pin, which may not suit drivers that want this to be a
1607 * performance path. drivers that get this wrong will trigger WARN_ON
1608 * races and cause EDEADLOCK failures to userspace.
1609 */
1610 mutex_lock(&staccess->lock);
1611 rc = iommufd_access_pin_pages(staccess->access, iova, length, pages,
1612 flags & MOCK_FLAGS_ACCESS_WRITE);
1613 if (rc)
1614 goto out_unlock;
1615
1616 /* For syzkaller allow uptr to be NULL to skip this check */
1617 if (uptr) {
1618 rc = iommufd_test_check_pages(
1619 uptr - (iova - ALIGN_DOWN(iova, PAGE_SIZE)), pages,
1620 npages);
1621 if (rc)
1622 goto out_unaccess;
1623 }
1624
1625 item = kzalloc(sizeof(*item), GFP_KERNEL_ACCOUNT);
1626 if (!item) {
1627 rc = -ENOMEM;
1628 goto out_unaccess;
1629 }
1630
1631 item->iova = iova;
1632 item->length = length;
1633 item->id = staccess->next_id++;
1634 list_add_tail(&item->items_elm, &staccess->items);
1635
1636 cmd->access_pages.out_access_pages_id = item->id;
1637 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1638 if (rc)
1639 goto out_free_item;
1640 goto out_unlock;
1641
1642 out_free_item:
1643 list_del(&item->items_elm);
1644 kfree(item);
1645 out_unaccess:
1646 iommufd_access_unpin_pages(staccess->access, iova, length);
1647 out_unlock:
1648 mutex_unlock(&staccess->lock);
1649 kvfree(pages);
1650 out_put:
1651 fput(staccess->file);
1652 return rc;
1653 }
1654
iommufd_test_access_rw(struct iommufd_ucmd * ucmd,unsigned int access_id,unsigned long iova,size_t length,void __user * ubuf,unsigned int flags)1655 static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd,
1656 unsigned int access_id, unsigned long iova,
1657 size_t length, void __user *ubuf,
1658 unsigned int flags)
1659 {
1660 struct iommu_test_cmd *cmd = ucmd->cmd;
1661 struct selftest_access *staccess;
1662 void *tmp;
1663 int rc;
1664
1665 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
1666 if (length > 16 * 1024 * 1024)
1667 return -ENOMEM;
1668
1669 if (flags & ~(MOCK_ACCESS_RW_WRITE | MOCK_ACCESS_RW_SLOW_PATH |
1670 MOCK_FLAGS_ACCESS_SYZ))
1671 return -EOPNOTSUPP;
1672
1673 staccess = iommufd_access_get(access_id);
1674 if (IS_ERR(staccess))
1675 return PTR_ERR(staccess);
1676
1677 tmp = kvzalloc(length, GFP_KERNEL_ACCOUNT);
1678 if (!tmp) {
1679 rc = -ENOMEM;
1680 goto out_put;
1681 }
1682
1683 if (flags & MOCK_ACCESS_RW_WRITE) {
1684 if (copy_from_user(tmp, ubuf, length)) {
1685 rc = -EFAULT;
1686 goto out_free;
1687 }
1688 }
1689
1690 if (flags & MOCK_FLAGS_ACCESS_SYZ)
1691 iova = iommufd_test_syz_conv_iova(staccess->access,
1692 &cmd->access_rw.iova);
1693
1694 rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags);
1695 if (rc)
1696 goto out_free;
1697 if (!(flags & MOCK_ACCESS_RW_WRITE)) {
1698 if (copy_to_user(ubuf, tmp, length)) {
1699 rc = -EFAULT;
1700 goto out_free;
1701 }
1702 }
1703
1704 out_free:
1705 kvfree(tmp);
1706 out_put:
1707 fput(staccess->file);
1708 return rc;
1709 }
1710 static_assert((unsigned int)MOCK_ACCESS_RW_WRITE == IOMMUFD_ACCESS_RW_WRITE);
1711 static_assert((unsigned int)MOCK_ACCESS_RW_SLOW_PATH ==
1712 __IOMMUFD_ACCESS_RW_SLOW_PATH);
1713
iommufd_test_dirty(struct iommufd_ucmd * ucmd,unsigned int mockpt_id,unsigned long iova,size_t length,unsigned long page_size,void __user * uptr,u32 flags)1714 static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id,
1715 unsigned long iova, size_t length,
1716 unsigned long page_size, void __user *uptr,
1717 u32 flags)
1718 {
1719 unsigned long i, max;
1720 struct iommu_test_cmd *cmd = ucmd->cmd;
1721 struct iommufd_hw_pagetable *hwpt;
1722 struct mock_iommu_domain *mock;
1723 int rc, count = 0;
1724 void *tmp;
1725
1726 if (!page_size || !length || iova % page_size || length % page_size ||
1727 !uptr)
1728 return -EINVAL;
1729
1730 hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
1731 if (IS_ERR(hwpt))
1732 return PTR_ERR(hwpt);
1733
1734 if (!(mock->flags & MOCK_DIRTY_TRACK) || !mock->iommu.ops->set_dirty) {
1735 rc = -EINVAL;
1736 goto out_put;
1737 }
1738
1739 max = length / page_size;
1740 tmp = kvzalloc(DIV_ROUND_UP(max, BITS_PER_LONG) * sizeof(unsigned long),
1741 GFP_KERNEL_ACCOUNT);
1742 if (!tmp) {
1743 rc = -ENOMEM;
1744 goto out_put;
1745 }
1746
1747 if (copy_from_user(tmp, uptr, DIV_ROUND_UP(max, BITS_PER_BYTE))) {
1748 rc = -EFAULT;
1749 goto out_free;
1750 }
1751
1752 for (i = 0; i < max; i++) {
1753 if (!test_bit(i, (unsigned long *)tmp))
1754 continue;
1755 mock->iommu.ops->set_dirty(&mock->iommu, iova + i * page_size);
1756 count++;
1757 }
1758
1759 cmd->dirty.out_nr_dirty = count;
1760 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1761 out_free:
1762 kvfree(tmp);
1763 out_put:
1764 iommufd_put_object(ucmd->ictx, &hwpt->obj);
1765 return rc;
1766 }
1767
iommufd_test_trigger_iopf(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1768 static int iommufd_test_trigger_iopf(struct iommufd_ucmd *ucmd,
1769 struct iommu_test_cmd *cmd)
1770 {
1771 struct iopf_fault event = {};
1772 struct iommufd_device *idev;
1773
1774 idev = iommufd_get_device(ucmd, cmd->trigger_iopf.dev_id);
1775 if (IS_ERR(idev))
1776 return PTR_ERR(idev);
1777
1778 event.fault.prm.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
1779 if (cmd->trigger_iopf.pasid != IOMMU_NO_PASID)
1780 event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1781 event.fault.type = IOMMU_FAULT_PAGE_REQ;
1782 event.fault.prm.addr = cmd->trigger_iopf.addr;
1783 event.fault.prm.pasid = cmd->trigger_iopf.pasid;
1784 event.fault.prm.grpid = cmd->trigger_iopf.grpid;
1785 event.fault.prm.perm = cmd->trigger_iopf.perm;
1786
1787 iommu_report_device_fault(idev->dev, &event);
1788 iommufd_put_object(ucmd->ictx, &idev->obj);
1789
1790 return 0;
1791 }
1792
iommufd_test_trigger_vevent(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1793 static int iommufd_test_trigger_vevent(struct iommufd_ucmd *ucmd,
1794 struct iommu_test_cmd *cmd)
1795 {
1796 struct iommu_viommu_event_selftest test = {};
1797 struct iommufd_device *idev;
1798 struct mock_dev *mdev;
1799 int rc = -ENOENT;
1800
1801 idev = iommufd_get_device(ucmd, cmd->trigger_vevent.dev_id);
1802 if (IS_ERR(idev))
1803 return PTR_ERR(idev);
1804 mdev = to_mock_dev(idev->dev);
1805
1806 down_read(&mdev->viommu_rwsem);
1807 if (!mdev->viommu || !mdev->vdev_id)
1808 goto out_unlock;
1809
1810 test.virt_id = mdev->vdev_id;
1811 rc = iommufd_viommu_report_event(&mdev->viommu->core,
1812 IOMMU_VEVENTQ_TYPE_SELFTEST, &test,
1813 sizeof(test));
1814 out_unlock:
1815 up_read(&mdev->viommu_rwsem);
1816 iommufd_put_object(ucmd->ictx, &idev->obj);
1817
1818 return rc;
1819 }
1820
1821 static inline struct iommufd_hw_pagetable *
iommufd_get_hwpt(struct iommufd_ucmd * ucmd,u32 id)1822 iommufd_get_hwpt(struct iommufd_ucmd *ucmd, u32 id)
1823 {
1824 struct iommufd_object *pt_obj;
1825
1826 pt_obj = iommufd_get_object(ucmd->ictx, id, IOMMUFD_OBJ_ANY);
1827 if (IS_ERR(pt_obj))
1828 return ERR_CAST(pt_obj);
1829
1830 if (pt_obj->type != IOMMUFD_OBJ_HWPT_NESTED &&
1831 pt_obj->type != IOMMUFD_OBJ_HWPT_PAGING) {
1832 iommufd_put_object(ucmd->ictx, pt_obj);
1833 return ERR_PTR(-EINVAL);
1834 }
1835
1836 return container_of(pt_obj, struct iommufd_hw_pagetable, obj);
1837 }
1838
iommufd_test_pasid_check_hwpt(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1839 static int iommufd_test_pasid_check_hwpt(struct iommufd_ucmd *ucmd,
1840 struct iommu_test_cmd *cmd)
1841 {
1842 u32 hwpt_id = cmd->pasid_check.hwpt_id;
1843 struct iommu_domain *attached_domain;
1844 struct iommu_attach_handle *handle;
1845 struct iommufd_hw_pagetable *hwpt;
1846 struct selftest_obj *sobj;
1847 struct mock_dev *mdev;
1848 int rc = 0;
1849
1850 sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
1851 if (IS_ERR(sobj))
1852 return PTR_ERR(sobj);
1853
1854 mdev = sobj->idev.mock_dev;
1855
1856 handle = iommu_attach_handle_get(mdev->dev.iommu_group,
1857 cmd->pasid_check.pasid, 0);
1858 if (IS_ERR(handle))
1859 attached_domain = NULL;
1860 else
1861 attached_domain = handle->domain;
1862
1863 /* hwpt_id == 0 means to check if pasid is detached */
1864 if (!hwpt_id) {
1865 if (attached_domain)
1866 rc = -EINVAL;
1867 goto out_sobj;
1868 }
1869
1870 hwpt = iommufd_get_hwpt(ucmd, hwpt_id);
1871 if (IS_ERR(hwpt)) {
1872 rc = PTR_ERR(hwpt);
1873 goto out_sobj;
1874 }
1875
1876 if (attached_domain != hwpt->domain)
1877 rc = -EINVAL;
1878
1879 iommufd_put_object(ucmd->ictx, &hwpt->obj);
1880 out_sobj:
1881 iommufd_put_object(ucmd->ictx, &sobj->obj);
1882 return rc;
1883 }
1884
iommufd_test_pasid_attach(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1885 static int iommufd_test_pasid_attach(struct iommufd_ucmd *ucmd,
1886 struct iommu_test_cmd *cmd)
1887 {
1888 struct selftest_obj *sobj;
1889 int rc;
1890
1891 sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
1892 if (IS_ERR(sobj))
1893 return PTR_ERR(sobj);
1894
1895 rc = iommufd_device_attach(sobj->idev.idev, cmd->pasid_attach.pasid,
1896 &cmd->pasid_attach.pt_id);
1897 if (rc)
1898 goto out_sobj;
1899
1900 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1901 if (rc)
1902 iommufd_device_detach(sobj->idev.idev, cmd->pasid_attach.pasid);
1903
1904 out_sobj:
1905 iommufd_put_object(ucmd->ictx, &sobj->obj);
1906 return rc;
1907 }
1908
iommufd_test_pasid_replace(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1909 static int iommufd_test_pasid_replace(struct iommufd_ucmd *ucmd,
1910 struct iommu_test_cmd *cmd)
1911 {
1912 struct selftest_obj *sobj;
1913 int rc;
1914
1915 sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
1916 if (IS_ERR(sobj))
1917 return PTR_ERR(sobj);
1918
1919 rc = iommufd_device_replace(sobj->idev.idev, cmd->pasid_attach.pasid,
1920 &cmd->pasid_attach.pt_id);
1921 if (rc)
1922 goto out_sobj;
1923
1924 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1925
1926 out_sobj:
1927 iommufd_put_object(ucmd->ictx, &sobj->obj);
1928 return rc;
1929 }
1930
iommufd_test_pasid_detach(struct iommufd_ucmd * ucmd,struct iommu_test_cmd * cmd)1931 static int iommufd_test_pasid_detach(struct iommufd_ucmd *ucmd,
1932 struct iommu_test_cmd *cmd)
1933 {
1934 struct selftest_obj *sobj;
1935
1936 sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id);
1937 if (IS_ERR(sobj))
1938 return PTR_ERR(sobj);
1939
1940 iommufd_device_detach(sobj->idev.idev, cmd->pasid_detach.pasid);
1941 iommufd_put_object(ucmd->ictx, &sobj->obj);
1942 return 0;
1943 }
1944
iommufd_selftest_destroy(struct iommufd_object * obj)1945 void iommufd_selftest_destroy(struct iommufd_object *obj)
1946 {
1947 struct selftest_obj *sobj = to_selftest_obj(obj);
1948
1949 switch (sobj->type) {
1950 case TYPE_IDEV:
1951 iommufd_device_detach(sobj->idev.idev, IOMMU_NO_PASID);
1952 iommufd_device_unbind(sobj->idev.idev);
1953 mock_dev_destroy(sobj->idev.mock_dev);
1954 break;
1955 }
1956 }
1957
1958 struct iommufd_test_dma_buf {
1959 void *memory;
1960 size_t length;
1961 bool revoked;
1962 };
1963
iommufd_test_dma_buf_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)1964 static int iommufd_test_dma_buf_attach(struct dma_buf *dmabuf,
1965 struct dma_buf_attachment *attachment)
1966 {
1967 return 0;
1968 }
1969
iommufd_test_dma_buf_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)1970 static void iommufd_test_dma_buf_detach(struct dma_buf *dmabuf,
1971 struct dma_buf_attachment *attachment)
1972 {
1973 }
1974
1975 static struct sg_table *
iommufd_test_dma_buf_map(struct dma_buf_attachment * attachment,enum dma_data_direction dir)1976 iommufd_test_dma_buf_map(struct dma_buf_attachment *attachment,
1977 enum dma_data_direction dir)
1978 {
1979 return ERR_PTR(-EOPNOTSUPP);
1980 }
1981
iommufd_test_dma_buf_unmap(struct dma_buf_attachment * attachment,struct sg_table * sgt,enum dma_data_direction dir)1982 static void iommufd_test_dma_buf_unmap(struct dma_buf_attachment *attachment,
1983 struct sg_table *sgt,
1984 enum dma_data_direction dir)
1985 {
1986 }
1987
iommufd_test_dma_buf_release(struct dma_buf * dmabuf)1988 static void iommufd_test_dma_buf_release(struct dma_buf *dmabuf)
1989 {
1990 struct iommufd_test_dma_buf *priv = dmabuf->priv;
1991
1992 kfree(priv->memory);
1993 kfree(priv);
1994 }
1995
1996 static const struct dma_buf_ops iommufd_test_dmabuf_ops = {
1997 .attach = iommufd_test_dma_buf_attach,
1998 .detach = iommufd_test_dma_buf_detach,
1999 .map_dma_buf = iommufd_test_dma_buf_map,
2000 .release = iommufd_test_dma_buf_release,
2001 .unmap_dma_buf = iommufd_test_dma_buf_unmap,
2002 };
2003
iommufd_test_dma_buf_iommufd_map(struct dma_buf_attachment * attachment,struct dma_buf_phys_vec * phys)2004 int iommufd_test_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
2005 struct dma_buf_phys_vec *phys)
2006 {
2007 struct iommufd_test_dma_buf *priv = attachment->dmabuf->priv;
2008
2009 dma_resv_assert_held(attachment->dmabuf->resv);
2010
2011 if (attachment->dmabuf->ops != &iommufd_test_dmabuf_ops)
2012 return -EOPNOTSUPP;
2013
2014 if (priv->revoked)
2015 return -ENODEV;
2016
2017 phys->paddr = virt_to_phys(priv->memory);
2018 phys->len = priv->length;
2019 return 0;
2020 }
2021
iommufd_test_dmabuf_get(struct iommufd_ucmd * ucmd,unsigned int open_flags,size_t len)2022 static int iommufd_test_dmabuf_get(struct iommufd_ucmd *ucmd,
2023 unsigned int open_flags,
2024 size_t len)
2025 {
2026 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
2027 struct iommufd_test_dma_buf *priv;
2028 struct dma_buf *dmabuf;
2029 int rc;
2030
2031 len = ALIGN(len, PAGE_SIZE);
2032 if (len == 0 || len > PAGE_SIZE * 512)
2033 return -EINVAL;
2034
2035 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
2036 if (!priv)
2037 return -ENOMEM;
2038
2039 priv->length = len;
2040 priv->memory = kzalloc(len, GFP_KERNEL);
2041 if (!priv->memory) {
2042 rc = -ENOMEM;
2043 goto err_free;
2044 }
2045
2046 exp_info.ops = &iommufd_test_dmabuf_ops;
2047 exp_info.size = len;
2048 exp_info.flags = open_flags;
2049 exp_info.priv = priv;
2050
2051 dmabuf = dma_buf_export(&exp_info);
2052 if (IS_ERR(dmabuf)) {
2053 rc = PTR_ERR(dmabuf);
2054 goto err_free;
2055 }
2056
2057 return dma_buf_fd(dmabuf, open_flags);
2058
2059 err_free:
2060 kfree(priv->memory);
2061 kfree(priv);
2062 return rc;
2063 }
2064
iommufd_test_dmabuf_revoke(struct iommufd_ucmd * ucmd,int fd,bool revoked)2065 static int iommufd_test_dmabuf_revoke(struct iommufd_ucmd *ucmd, int fd,
2066 bool revoked)
2067 {
2068 struct iommufd_test_dma_buf *priv;
2069 struct dma_buf *dmabuf;
2070 int rc = 0;
2071
2072 dmabuf = dma_buf_get(fd);
2073 if (IS_ERR(dmabuf))
2074 return PTR_ERR(dmabuf);
2075
2076 if (dmabuf->ops != &iommufd_test_dmabuf_ops) {
2077 rc = -EOPNOTSUPP;
2078 goto err_put;
2079 }
2080
2081 priv = dmabuf->priv;
2082 dma_resv_lock(dmabuf->resv, NULL);
2083 priv->revoked = revoked;
2084 dma_buf_move_notify(dmabuf);
2085 dma_resv_unlock(dmabuf->resv);
2086
2087 err_put:
2088 dma_buf_put(dmabuf);
2089 return rc;
2090 }
2091
iommufd_test(struct iommufd_ucmd * ucmd)2092 int iommufd_test(struct iommufd_ucmd *ucmd)
2093 {
2094 struct iommu_test_cmd *cmd = ucmd->cmd;
2095
2096 switch (cmd->op) {
2097 case IOMMU_TEST_OP_ADD_RESERVED:
2098 return iommufd_test_add_reserved(ucmd, cmd->id,
2099 cmd->add_reserved.start,
2100 cmd->add_reserved.length);
2101 case IOMMU_TEST_OP_MOCK_DOMAIN:
2102 case IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS:
2103 return iommufd_test_mock_domain(ucmd, cmd);
2104 case IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE:
2105 return iommufd_test_mock_domain_replace(
2106 ucmd, cmd->id, cmd->mock_domain_replace.pt_id, cmd);
2107 case IOMMU_TEST_OP_MD_CHECK_MAP:
2108 return iommufd_test_md_check_pa(
2109 ucmd, cmd->id, cmd->check_map.iova,
2110 cmd->check_map.length,
2111 u64_to_user_ptr(cmd->check_map.uptr));
2112 case IOMMU_TEST_OP_MD_CHECK_REFS:
2113 return iommufd_test_md_check_refs(
2114 ucmd, u64_to_user_ptr(cmd->check_refs.uptr),
2115 cmd->check_refs.length, cmd->check_refs.refs);
2116 case IOMMU_TEST_OP_MD_CHECK_IOTLB:
2117 return iommufd_test_md_check_iotlb(ucmd, cmd->id,
2118 cmd->check_iotlb.id,
2119 cmd->check_iotlb.iotlb);
2120 case IOMMU_TEST_OP_DEV_CHECK_CACHE:
2121 return iommufd_test_dev_check_cache(ucmd, cmd->id,
2122 cmd->check_dev_cache.id,
2123 cmd->check_dev_cache.cache);
2124 case IOMMU_TEST_OP_CREATE_ACCESS:
2125 return iommufd_test_create_access(ucmd, cmd->id,
2126 cmd->create_access.flags);
2127 case IOMMU_TEST_OP_ACCESS_REPLACE_IOAS:
2128 return iommufd_test_access_replace_ioas(
2129 ucmd, cmd->id, cmd->access_replace_ioas.ioas_id);
2130 case IOMMU_TEST_OP_ACCESS_PAGES:
2131 return iommufd_test_access_pages(
2132 ucmd, cmd->id, cmd->access_pages.iova,
2133 cmd->access_pages.length,
2134 u64_to_user_ptr(cmd->access_pages.uptr),
2135 cmd->access_pages.flags);
2136 case IOMMU_TEST_OP_ACCESS_RW:
2137 return iommufd_test_access_rw(
2138 ucmd, cmd->id, cmd->access_rw.iova,
2139 cmd->access_rw.length,
2140 u64_to_user_ptr(cmd->access_rw.uptr),
2141 cmd->access_rw.flags);
2142 case IOMMU_TEST_OP_DESTROY_ACCESS_PAGES:
2143 return iommufd_test_access_item_destroy(
2144 ucmd, cmd->id, cmd->destroy_access_pages.access_pages_id);
2145 case IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT:
2146 /* Protect _batch_init(), can not be less than elmsz */
2147 if (cmd->memory_limit.limit <
2148 sizeof(unsigned long) + sizeof(u32))
2149 return -EINVAL;
2150 iommufd_test_memory_limit = cmd->memory_limit.limit;
2151 return 0;
2152 case IOMMU_TEST_OP_DIRTY:
2153 return iommufd_test_dirty(ucmd, cmd->id, cmd->dirty.iova,
2154 cmd->dirty.length,
2155 cmd->dirty.page_size,
2156 u64_to_user_ptr(cmd->dirty.uptr),
2157 cmd->dirty.flags);
2158 case IOMMU_TEST_OP_TRIGGER_IOPF:
2159 return iommufd_test_trigger_iopf(ucmd, cmd);
2160 case IOMMU_TEST_OP_TRIGGER_VEVENT:
2161 return iommufd_test_trigger_vevent(ucmd, cmd);
2162 case IOMMU_TEST_OP_PASID_ATTACH:
2163 return iommufd_test_pasid_attach(ucmd, cmd);
2164 case IOMMU_TEST_OP_PASID_REPLACE:
2165 return iommufd_test_pasid_replace(ucmd, cmd);
2166 case IOMMU_TEST_OP_PASID_DETACH:
2167 return iommufd_test_pasid_detach(ucmd, cmd);
2168 case IOMMU_TEST_OP_PASID_CHECK_HWPT:
2169 return iommufd_test_pasid_check_hwpt(ucmd, cmd);
2170 case IOMMU_TEST_OP_DMABUF_GET:
2171 return iommufd_test_dmabuf_get(ucmd, cmd->dmabuf_get.open_flags,
2172 cmd->dmabuf_get.length);
2173 case IOMMU_TEST_OP_DMABUF_REVOKE:
2174 return iommufd_test_dmabuf_revoke(ucmd,
2175 cmd->dmabuf_revoke.dmabuf_fd,
2176 cmd->dmabuf_revoke.revoked);
2177 default:
2178 return -EOPNOTSUPP;
2179 }
2180 }
2181
iommufd_should_fail(void)2182 bool iommufd_should_fail(void)
2183 {
2184 return should_fail(&fail_iommufd, 1);
2185 }
2186
iommufd_test_init(void)2187 int __init iommufd_test_init(void)
2188 {
2189 struct platform_device_info pdevinfo = {
2190 .name = "iommufd_selftest_iommu",
2191 };
2192 int rc;
2193
2194 dbgfs_root =
2195 fault_create_debugfs_attr("fail_iommufd", NULL, &fail_iommufd);
2196
2197 selftest_iommu_dev = platform_device_register_full(&pdevinfo);
2198 if (IS_ERR(selftest_iommu_dev)) {
2199 rc = PTR_ERR(selftest_iommu_dev);
2200 goto err_dbgfs;
2201 }
2202
2203 rc = bus_register(&iommufd_mock_bus_type.bus);
2204 if (rc)
2205 goto err_platform;
2206
2207 rc = iommu_device_sysfs_add(&mock_iommu.iommu_dev,
2208 &selftest_iommu_dev->dev, NULL, "%s",
2209 dev_name(&selftest_iommu_dev->dev));
2210 if (rc)
2211 goto err_bus;
2212
2213 rc = iommu_device_register_bus(&mock_iommu.iommu_dev, &mock_ops,
2214 &iommufd_mock_bus_type.bus,
2215 &iommufd_mock_bus_type.nb);
2216 if (rc)
2217 goto err_sysfs;
2218
2219 refcount_set(&mock_iommu.users, 1);
2220 init_completion(&mock_iommu.complete);
2221
2222 mock_iommu_iopf_queue = iopf_queue_alloc("mock-iopfq");
2223 mock_iommu.iommu_dev.max_pasids = (1 << MOCK_PASID_WIDTH);
2224
2225 return 0;
2226
2227 err_sysfs:
2228 iommu_device_sysfs_remove(&mock_iommu.iommu_dev);
2229 err_bus:
2230 bus_unregister(&iommufd_mock_bus_type.bus);
2231 err_platform:
2232 platform_device_unregister(selftest_iommu_dev);
2233 err_dbgfs:
2234 debugfs_remove_recursive(dbgfs_root);
2235 return rc;
2236 }
2237
iommufd_test_wait_for_users(void)2238 static void iommufd_test_wait_for_users(void)
2239 {
2240 if (refcount_dec_and_test(&mock_iommu.users))
2241 return;
2242 /*
2243 * Time out waiting for iommu device user count to become 0.
2244 *
2245 * Note that this is just making an example here, since the selftest is
2246 * built into the iommufd module, i.e. it only unplugs the iommu device
2247 * when unloading the module. So, it is expected that this WARN_ON will
2248 * not trigger, as long as any iommufd FDs are open.
2249 */
2250 WARN_ON(!wait_for_completion_timeout(&mock_iommu.complete,
2251 msecs_to_jiffies(10000)));
2252 }
2253
iommufd_test_exit(void)2254 void iommufd_test_exit(void)
2255 {
2256 if (mock_iommu_iopf_queue) {
2257 iopf_queue_free(mock_iommu_iopf_queue);
2258 mock_iommu_iopf_queue = NULL;
2259 }
2260
2261 iommufd_test_wait_for_users();
2262 iommu_device_sysfs_remove(&mock_iommu.iommu_dev);
2263 iommu_device_unregister_bus(&mock_iommu.iommu_dev,
2264 &iommufd_mock_bus_type.bus,
2265 &iommufd_mock_bus_type.nb);
2266 bus_unregister(&iommufd_mock_bus_type.bus);
2267 platform_device_unregister(selftest_iommu_dev);
2268 debugfs_remove_recursive(dbgfs_root);
2269 }
2270
2271 MODULE_IMPORT_NS("GENERIC_PT_IOMMU");
2272