1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #ifndef __SELFTEST_IOMMUFD_UTILS
4 #define __SELFTEST_IOMMUFD_UTILS
5
6 #include <unistd.h>
7 #include <stddef.h>
8 #include <sys/fcntl.h>
9 #include <sys/ioctl.h>
10 #include <stdint.h>
11 #include <assert.h>
12
13 #include "../kselftest_harness.h"
14 #include "../../../../drivers/iommu/iommufd/iommufd_test.h"
15
16 /* Hack to make assertions more readable */
17 #define _IOMMU_TEST_CMD(x) IOMMU_TEST_CMD
18
19 /* Imported from include/asm-generic/bitops/generic-non-atomic.h */
20 #define BITS_PER_BYTE 8
21 #define BITS_PER_LONG __BITS_PER_LONG
22 #define BIT_MASK(nr) (1UL << ((nr) % __BITS_PER_LONG))
23 #define BIT_WORD(nr) ((nr) / __BITS_PER_LONG)
24
25 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
26
set_bit(unsigned int nr,unsigned long * addr)27 static inline void set_bit(unsigned int nr, unsigned long *addr)
28 {
29 unsigned long mask = BIT_MASK(nr);
30 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
31
32 *p |= mask;
33 }
34
test_bit(unsigned int nr,unsigned long * addr)35 static inline bool test_bit(unsigned int nr, unsigned long *addr)
36 {
37 return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)));
38 }
39
40 static void *buffer;
41 static unsigned long BUFFER_SIZE;
42
43 static unsigned long PAGE_SIZE;
44
45 #define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
46 #define offsetofend(TYPE, MEMBER) \
47 (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
48
49 /*
50 * Have the kernel check the refcount on pages. I don't know why a freshly
51 * mmap'd anon non-compound page starts out with a ref of 3
52 */
53 #define check_refs(_ptr, _length, _refs) \
54 ({ \
55 struct iommu_test_cmd test_cmd = { \
56 .size = sizeof(test_cmd), \
57 .op = IOMMU_TEST_OP_MD_CHECK_REFS, \
58 .check_refs = { .length = _length, \
59 .uptr = (uintptr_t)(_ptr), \
60 .refs = _refs }, \
61 }; \
62 ASSERT_EQ(0, \
63 ioctl(self->fd, \
64 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS), \
65 &test_cmd)); \
66 })
67
_test_cmd_mock_domain(int fd,unsigned int ioas_id,__u32 * stdev_id,__u32 * hwpt_id,__u32 * idev_id)68 static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *stdev_id,
69 __u32 *hwpt_id, __u32 *idev_id)
70 {
71 struct iommu_test_cmd cmd = {
72 .size = sizeof(cmd),
73 .op = IOMMU_TEST_OP_MOCK_DOMAIN,
74 .id = ioas_id,
75 .mock_domain = {},
76 };
77 int ret;
78
79 ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
80 if (ret)
81 return ret;
82 if (stdev_id)
83 *stdev_id = cmd.mock_domain.out_stdev_id;
84 assert(cmd.id != 0);
85 if (hwpt_id)
86 *hwpt_id = cmd.mock_domain.out_hwpt_id;
87 if (idev_id)
88 *idev_id = cmd.mock_domain.out_idev_id;
89 return 0;
90 }
91 #define test_cmd_mock_domain(ioas_id, stdev_id, hwpt_id, idev_id) \
92 ASSERT_EQ(0, _test_cmd_mock_domain(self->fd, ioas_id, stdev_id, \
93 hwpt_id, idev_id))
94 #define test_err_mock_domain(_errno, ioas_id, stdev_id, hwpt_id) \
95 EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \
96 stdev_id, hwpt_id, NULL))
97
_test_cmd_mock_domain_flags(int fd,unsigned int ioas_id,__u32 stdev_flags,__u32 * stdev_id,__u32 * hwpt_id,__u32 * idev_id)98 static int _test_cmd_mock_domain_flags(int fd, unsigned int ioas_id,
99 __u32 stdev_flags, __u32 *stdev_id,
100 __u32 *hwpt_id, __u32 *idev_id)
101 {
102 struct iommu_test_cmd cmd = {
103 .size = sizeof(cmd),
104 .op = IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS,
105 .id = ioas_id,
106 .mock_domain_flags = { .dev_flags = stdev_flags },
107 };
108 int ret;
109
110 ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
111 if (ret)
112 return ret;
113 if (stdev_id)
114 *stdev_id = cmd.mock_domain_flags.out_stdev_id;
115 assert(cmd.id != 0);
116 if (hwpt_id)
117 *hwpt_id = cmd.mock_domain_flags.out_hwpt_id;
118 if (idev_id)
119 *idev_id = cmd.mock_domain_flags.out_idev_id;
120 return 0;
121 }
122 #define test_cmd_mock_domain_flags(ioas_id, flags, stdev_id, hwpt_id, idev_id) \
123 ASSERT_EQ(0, _test_cmd_mock_domain_flags(self->fd, ioas_id, flags, \
124 stdev_id, hwpt_id, idev_id))
125 #define test_err_mock_domain_flags(_errno, ioas_id, flags, stdev_id, hwpt_id) \
126 EXPECT_ERRNO(_errno, \
127 _test_cmd_mock_domain_flags(self->fd, ioas_id, flags, \
128 stdev_id, hwpt_id, NULL))
129
_test_cmd_mock_domain_replace(int fd,__u32 stdev_id,__u32 pt_id,__u32 * hwpt_id)130 static int _test_cmd_mock_domain_replace(int fd, __u32 stdev_id, __u32 pt_id,
131 __u32 *hwpt_id)
132 {
133 struct iommu_test_cmd cmd = {
134 .size = sizeof(cmd),
135 .op = IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE,
136 .id = stdev_id,
137 .mock_domain_replace = {
138 .pt_id = pt_id,
139 },
140 };
141 int ret;
142
143 ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
144 if (ret)
145 return ret;
146 if (hwpt_id)
147 *hwpt_id = cmd.mock_domain_replace.pt_id;
148 return 0;
149 }
150
151 #define test_cmd_mock_domain_replace(stdev_id, pt_id) \
152 ASSERT_EQ(0, _test_cmd_mock_domain_replace(self->fd, stdev_id, pt_id, \
153 NULL))
154 #define test_err_mock_domain_replace(_errno, stdev_id, pt_id) \
155 EXPECT_ERRNO(_errno, _test_cmd_mock_domain_replace(self->fd, stdev_id, \
156 pt_id, NULL))
157
_test_cmd_hwpt_alloc(int fd,__u32 device_id,__u32 pt_id,__u32 ft_id,__u32 flags,__u32 * hwpt_id,__u32 data_type,void * data,size_t data_len)158 static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, __u32 ft_id,
159 __u32 flags, __u32 *hwpt_id, __u32 data_type,
160 void *data, size_t data_len)
161 {
162 struct iommu_hwpt_alloc cmd = {
163 .size = sizeof(cmd),
164 .flags = flags,
165 .dev_id = device_id,
166 .pt_id = pt_id,
167 .data_type = data_type,
168 .data_len = data_len,
169 .data_uptr = (uint64_t)data,
170 .fault_id = ft_id,
171 };
172 int ret;
173
174 ret = ioctl(fd, IOMMU_HWPT_ALLOC, &cmd);
175 if (ret)
176 return ret;
177 if (hwpt_id)
178 *hwpt_id = cmd.out_hwpt_id;
179 return 0;
180 }
181
182 #define test_cmd_hwpt_alloc(device_id, pt_id, flags, hwpt_id) \
183 ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
184 hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, \
185 0))
186 #define test_err_hwpt_alloc(_errno, device_id, pt_id, flags, hwpt_id) \
187 EXPECT_ERRNO(_errno, _test_cmd_hwpt_alloc( \
188 self->fd, device_id, pt_id, 0, flags, \
189 hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, 0))
190
191 #define test_cmd_hwpt_alloc_nested(device_id, pt_id, flags, hwpt_id, \
192 data_type, data, data_len) \
193 ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
194 hwpt_id, data_type, data, data_len))
195 #define test_err_hwpt_alloc_nested(_errno, device_id, pt_id, flags, hwpt_id, \
196 data_type, data, data_len) \
197 EXPECT_ERRNO(_errno, \
198 _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
199 hwpt_id, data_type, data, data_len))
200
201 #define test_cmd_hwpt_alloc_iopf(device_id, pt_id, fault_id, flags, hwpt_id, \
202 data_type, data, data_len) \
203 ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \
204 flags, hwpt_id, data_type, data, \
205 data_len))
206 #define test_err_hwpt_alloc_iopf(_errno, device_id, pt_id, fault_id, flags, \
207 hwpt_id, data_type, data, data_len) \
208 EXPECT_ERRNO(_errno, \
209 _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \
210 flags, hwpt_id, data_type, data, \
211 data_len))
212
213 #define test_cmd_hwpt_check_iotlb(hwpt_id, iotlb_id, expected) \
214 ({ \
215 struct iommu_test_cmd test_cmd = { \
216 .size = sizeof(test_cmd), \
217 .op = IOMMU_TEST_OP_MD_CHECK_IOTLB, \
218 .id = hwpt_id, \
219 .check_iotlb = { \
220 .id = iotlb_id, \
221 .iotlb = expected, \
222 }, \
223 }; \
224 ASSERT_EQ(0, \
225 ioctl(self->fd, \
226 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_IOTLB), \
227 &test_cmd)); \
228 })
229
230 #define test_cmd_hwpt_check_iotlb_all(hwpt_id, expected) \
231 ({ \
232 int i; \
233 for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++) \
234 test_cmd_hwpt_check_iotlb(hwpt_id, i, expected); \
235 })
236
_test_cmd_hwpt_invalidate(int fd,__u32 hwpt_id,void * reqs,uint32_t data_type,uint32_t lreq,uint32_t * nreqs)237 static int _test_cmd_hwpt_invalidate(int fd, __u32 hwpt_id, void *reqs,
238 uint32_t data_type, uint32_t lreq,
239 uint32_t *nreqs)
240 {
241 struct iommu_hwpt_invalidate cmd = {
242 .size = sizeof(cmd),
243 .hwpt_id = hwpt_id,
244 .data_type = data_type,
245 .data_uptr = (uint64_t)reqs,
246 .entry_len = lreq,
247 .entry_num = *nreqs,
248 };
249 int rc = ioctl(fd, IOMMU_HWPT_INVALIDATE, &cmd);
250 *nreqs = cmd.entry_num;
251 return rc;
252 }
253
254 #define test_cmd_hwpt_invalidate(hwpt_id, reqs, data_type, lreq, nreqs) \
255 ({ \
256 ASSERT_EQ(0, \
257 _test_cmd_hwpt_invalidate(self->fd, hwpt_id, reqs, \
258 data_type, lreq, nreqs)); \
259 })
260 #define test_err_hwpt_invalidate(_errno, hwpt_id, reqs, data_type, lreq, \
261 nreqs) \
262 ({ \
263 EXPECT_ERRNO(_errno, _test_cmd_hwpt_invalidate( \
264 self->fd, hwpt_id, reqs, \
265 data_type, lreq, nreqs)); \
266 })
267
_test_cmd_access_replace_ioas(int fd,__u32 access_id,unsigned int ioas_id)268 static int _test_cmd_access_replace_ioas(int fd, __u32 access_id,
269 unsigned int ioas_id)
270 {
271 struct iommu_test_cmd cmd = {
272 .size = sizeof(cmd),
273 .op = IOMMU_TEST_OP_ACCESS_REPLACE_IOAS,
274 .id = access_id,
275 .access_replace_ioas = { .ioas_id = ioas_id },
276 };
277 int ret;
278
279 ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
280 if (ret)
281 return ret;
282 return 0;
283 }
284 #define test_cmd_access_replace_ioas(access_id, ioas_id) \
285 ASSERT_EQ(0, _test_cmd_access_replace_ioas(self->fd, access_id, ioas_id))
286
_test_cmd_set_dirty_tracking(int fd,__u32 hwpt_id,bool enabled)287 static int _test_cmd_set_dirty_tracking(int fd, __u32 hwpt_id, bool enabled)
288 {
289 struct iommu_hwpt_set_dirty_tracking cmd = {
290 .size = sizeof(cmd),
291 .flags = enabled ? IOMMU_HWPT_DIRTY_TRACKING_ENABLE : 0,
292 .hwpt_id = hwpt_id,
293 };
294 int ret;
295
296 ret = ioctl(fd, IOMMU_HWPT_SET_DIRTY_TRACKING, &cmd);
297 if (ret)
298 return -errno;
299 return 0;
300 }
301 #define test_cmd_set_dirty_tracking(hwpt_id, enabled) \
302 ASSERT_EQ(0, _test_cmd_set_dirty_tracking(self->fd, hwpt_id, enabled))
303
_test_cmd_get_dirty_bitmap(int fd,__u32 hwpt_id,size_t length,__u64 iova,size_t page_size,__u64 * bitmap,__u32 flags)304 static int _test_cmd_get_dirty_bitmap(int fd, __u32 hwpt_id, size_t length,
305 __u64 iova, size_t page_size,
306 __u64 *bitmap, __u32 flags)
307 {
308 struct iommu_hwpt_get_dirty_bitmap cmd = {
309 .size = sizeof(cmd),
310 .hwpt_id = hwpt_id,
311 .flags = flags,
312 .iova = iova,
313 .length = length,
314 .page_size = page_size,
315 .data = (uintptr_t)bitmap,
316 };
317 int ret;
318
319 ret = ioctl(fd, IOMMU_HWPT_GET_DIRTY_BITMAP, &cmd);
320 if (ret)
321 return ret;
322 return 0;
323 }
324
325 #define test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, \
326 bitmap, flags) \
327 ASSERT_EQ(0, _test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, \
328 page_size, bitmap, flags))
329
_test_cmd_mock_domain_set_dirty(int fd,__u32 hwpt_id,size_t length,__u64 iova,size_t page_size,__u64 * bitmap,__u64 * dirty)330 static int _test_cmd_mock_domain_set_dirty(int fd, __u32 hwpt_id, size_t length,
331 __u64 iova, size_t page_size,
332 __u64 *bitmap, __u64 *dirty)
333 {
334 struct iommu_test_cmd cmd = {
335 .size = sizeof(cmd),
336 .op = IOMMU_TEST_OP_DIRTY,
337 .id = hwpt_id,
338 .dirty = {
339 .iova = iova,
340 .length = length,
341 .page_size = page_size,
342 .uptr = (uintptr_t)bitmap,
343 }
344 };
345 int ret;
346
347 ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_DIRTY), &cmd);
348 if (ret)
349 return -ret;
350 if (dirty)
351 *dirty = cmd.dirty.out_nr_dirty;
352 return 0;
353 }
354
355 #define test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, \
356 bitmap, nr) \
357 ASSERT_EQ(0, \
358 _test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, \
359 page_size, bitmap, nr))
360
_test_mock_dirty_bitmaps(int fd,__u32 hwpt_id,size_t length,__u64 iova,size_t page_size,size_t pte_page_size,__u64 * bitmap,__u64 nbits,__u32 flags,struct __test_metadata * _metadata)361 static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length,
362 __u64 iova, size_t page_size,
363 size_t pte_page_size, __u64 *bitmap,
364 __u64 nbits, __u32 flags,
365 struct __test_metadata *_metadata)
366 {
367 unsigned long npte = pte_page_size / page_size, pteset = 2 * npte;
368 unsigned long j, i, nr = nbits / pteset ?: 1;
369 unsigned long bitmap_size = DIV_ROUND_UP(nbits, BITS_PER_BYTE);
370 __u64 out_dirty = 0;
371
372 /* Mark all even bits as dirty in the mock domain */
373 memset(bitmap, 0, bitmap_size);
374 for (i = 0; i < nbits; i += pteset)
375 set_bit(i, (unsigned long *)bitmap);
376
377 test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size,
378 bitmap, &out_dirty);
379 ASSERT_EQ(nr, out_dirty);
380
381 /* Expect all even bits as dirty in the user bitmap */
382 memset(bitmap, 0, bitmap_size);
383 test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
384 flags);
385 /* Beware ASSERT_EQ() is two statements -- braces are not redundant! */
386 for (i = 0; i < nbits; i += pteset) {
387 for (j = 0; j < pteset; j++) {
388 ASSERT_EQ(j < npte,
389 test_bit(i + j, (unsigned long *)bitmap));
390 }
391 ASSERT_EQ(!(i % pteset), test_bit(i, (unsigned long *)bitmap));
392 }
393
394 memset(bitmap, 0, bitmap_size);
395 test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
396 flags);
397
398 /* It as read already -- expect all zeroes */
399 for (i = 0; i < nbits; i += pteset) {
400 for (j = 0; j < pteset; j++) {
401 ASSERT_EQ(
402 (j < npte) &&
403 (flags &
404 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR),
405 test_bit(i + j, (unsigned long *)bitmap));
406 }
407 }
408
409 return 0;
410 }
411 #define test_mock_dirty_bitmaps(hwpt_id, length, iova, page_size, pte_size,\
412 bitmap, bitmap_size, flags, _metadata) \
413 ASSERT_EQ(0, _test_mock_dirty_bitmaps(self->fd, hwpt_id, length, iova, \
414 page_size, pte_size, bitmap, \
415 bitmap_size, flags, _metadata))
416
_test_cmd_create_access(int fd,unsigned int ioas_id,__u32 * access_id,unsigned int flags)417 static int _test_cmd_create_access(int fd, unsigned int ioas_id,
418 __u32 *access_id, unsigned int flags)
419 {
420 struct iommu_test_cmd cmd = {
421 .size = sizeof(cmd),
422 .op = IOMMU_TEST_OP_CREATE_ACCESS,
423 .id = ioas_id,
424 .create_access = { .flags = flags },
425 };
426 int ret;
427
428 ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
429 if (ret)
430 return ret;
431 *access_id = cmd.create_access.out_access_fd;
432 return 0;
433 }
434 #define test_cmd_create_access(ioas_id, access_id, flags) \
435 ASSERT_EQ(0, _test_cmd_create_access(self->fd, ioas_id, access_id, \
436 flags))
437
_test_cmd_destroy_access(unsigned int access_id)438 static int _test_cmd_destroy_access(unsigned int access_id)
439 {
440 return close(access_id);
441 }
442 #define test_cmd_destroy_access(access_id) \
443 ASSERT_EQ(0, _test_cmd_destroy_access(access_id))
444
_test_cmd_destroy_access_pages(int fd,unsigned int access_id,unsigned int access_pages_id)445 static int _test_cmd_destroy_access_pages(int fd, unsigned int access_id,
446 unsigned int access_pages_id)
447 {
448 struct iommu_test_cmd cmd = {
449 .size = sizeof(cmd),
450 .op = IOMMU_TEST_OP_DESTROY_ACCESS_PAGES,
451 .id = access_id,
452 .destroy_access_pages = { .access_pages_id = access_pages_id },
453 };
454 return ioctl(fd, IOMMU_TEST_CMD, &cmd);
455 }
456 #define test_cmd_destroy_access_pages(access_id, access_pages_id) \
457 ASSERT_EQ(0, _test_cmd_destroy_access_pages(self->fd, access_id, \
458 access_pages_id))
459 #define test_err_destroy_access_pages(_errno, access_id, access_pages_id) \
460 EXPECT_ERRNO(_errno, _test_cmd_destroy_access_pages( \
461 self->fd, access_id, access_pages_id))
462
_test_ioctl_destroy(int fd,unsigned int id)463 static int _test_ioctl_destroy(int fd, unsigned int id)
464 {
465 struct iommu_destroy cmd = {
466 .size = sizeof(cmd),
467 .id = id,
468 };
469 return ioctl(fd, IOMMU_DESTROY, &cmd);
470 }
471 #define test_ioctl_destroy(id) ASSERT_EQ(0, _test_ioctl_destroy(self->fd, id))
472
_test_ioctl_ioas_alloc(int fd,__u32 * id)473 static int _test_ioctl_ioas_alloc(int fd, __u32 *id)
474 {
475 struct iommu_ioas_alloc cmd = {
476 .size = sizeof(cmd),
477 };
478 int ret;
479
480 ret = ioctl(fd, IOMMU_IOAS_ALLOC, &cmd);
481 if (ret)
482 return ret;
483 *id = cmd.out_ioas_id;
484 return 0;
485 }
486 #define test_ioctl_ioas_alloc(id) \
487 ({ \
488 ASSERT_EQ(0, _test_ioctl_ioas_alloc(self->fd, id)); \
489 ASSERT_NE(0, *(id)); \
490 })
491
_test_ioctl_ioas_map(int fd,unsigned int ioas_id,void * buffer,size_t length,__u64 * iova,unsigned int flags)492 static int _test_ioctl_ioas_map(int fd, unsigned int ioas_id, void *buffer,
493 size_t length, __u64 *iova, unsigned int flags)
494 {
495 struct iommu_ioas_map cmd = {
496 .size = sizeof(cmd),
497 .flags = flags,
498 .ioas_id = ioas_id,
499 .user_va = (uintptr_t)buffer,
500 .length = length,
501 };
502 int ret;
503
504 if (flags & IOMMU_IOAS_MAP_FIXED_IOVA)
505 cmd.iova = *iova;
506
507 ret = ioctl(fd, IOMMU_IOAS_MAP, &cmd);
508 *iova = cmd.iova;
509 return ret;
510 }
511 #define test_ioctl_ioas_map(buffer, length, iova_p) \
512 ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
513 length, iova_p, \
514 IOMMU_IOAS_MAP_WRITEABLE | \
515 IOMMU_IOAS_MAP_READABLE))
516
517 #define test_err_ioctl_ioas_map(_errno, buffer, length, iova_p) \
518 EXPECT_ERRNO(_errno, \
519 _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
520 length, iova_p, \
521 IOMMU_IOAS_MAP_WRITEABLE | \
522 IOMMU_IOAS_MAP_READABLE))
523
524 #define test_ioctl_ioas_map_id(ioas_id, buffer, length, iova_p) \
525 ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, ioas_id, buffer, length, \
526 iova_p, \
527 IOMMU_IOAS_MAP_WRITEABLE | \
528 IOMMU_IOAS_MAP_READABLE))
529
530 #define test_ioctl_ioas_map_fixed(buffer, length, iova) \
531 ({ \
532 __u64 __iova = iova; \
533 ASSERT_EQ(0, _test_ioctl_ioas_map( \
534 self->fd, self->ioas_id, buffer, length, \
535 &__iova, \
536 IOMMU_IOAS_MAP_FIXED_IOVA | \
537 IOMMU_IOAS_MAP_WRITEABLE | \
538 IOMMU_IOAS_MAP_READABLE)); \
539 })
540
541 #define test_ioctl_ioas_map_fixed_id(ioas_id, buffer, length, iova) \
542 ({ \
543 __u64 __iova = iova; \
544 ASSERT_EQ(0, \
545 _test_ioctl_ioas_map( \
546 self->fd, ioas_id, buffer, length, &__iova, \
547 IOMMU_IOAS_MAP_FIXED_IOVA | \
548 IOMMU_IOAS_MAP_WRITEABLE | \
549 IOMMU_IOAS_MAP_READABLE)); \
550 })
551
552 #define test_err_ioctl_ioas_map_fixed(_errno, buffer, length, iova) \
553 ({ \
554 __u64 __iova = iova; \
555 EXPECT_ERRNO(_errno, \
556 _test_ioctl_ioas_map( \
557 self->fd, self->ioas_id, buffer, length, \
558 &__iova, \
559 IOMMU_IOAS_MAP_FIXED_IOVA | \
560 IOMMU_IOAS_MAP_WRITEABLE | \
561 IOMMU_IOAS_MAP_READABLE)); \
562 })
563
_test_ioctl_ioas_unmap(int fd,unsigned int ioas_id,uint64_t iova,size_t length,uint64_t * out_len)564 static int _test_ioctl_ioas_unmap(int fd, unsigned int ioas_id, uint64_t iova,
565 size_t length, uint64_t *out_len)
566 {
567 struct iommu_ioas_unmap cmd = {
568 .size = sizeof(cmd),
569 .ioas_id = ioas_id,
570 .iova = iova,
571 .length = length,
572 };
573 int ret;
574
575 ret = ioctl(fd, IOMMU_IOAS_UNMAP, &cmd);
576 if (out_len)
577 *out_len = cmd.length;
578 return ret;
579 }
580 #define test_ioctl_ioas_unmap(iova, length) \
581 ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, iova, \
582 length, NULL))
583
584 #define test_ioctl_ioas_unmap_id(ioas_id, iova, length) \
585 ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, ioas_id, iova, length, \
586 NULL))
587
588 #define test_err_ioctl_ioas_unmap(_errno, iova, length) \
589 EXPECT_ERRNO(_errno, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, \
590 iova, length, NULL))
591
_test_ioctl_set_temp_memory_limit(int fd,unsigned int limit)592 static int _test_ioctl_set_temp_memory_limit(int fd, unsigned int limit)
593 {
594 struct iommu_test_cmd memlimit_cmd = {
595 .size = sizeof(memlimit_cmd),
596 .op = IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT,
597 .memory_limit = { .limit = limit },
598 };
599
600 return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT),
601 &memlimit_cmd);
602 }
603
604 #define test_ioctl_set_temp_memory_limit(limit) \
605 ASSERT_EQ(0, _test_ioctl_set_temp_memory_limit(self->fd, limit))
606
607 #define test_ioctl_set_default_memory_limit() \
608 test_ioctl_set_temp_memory_limit(65536)
609
teardown_iommufd(int fd,struct __test_metadata * _metadata)610 static void teardown_iommufd(int fd, struct __test_metadata *_metadata)
611 {
612 struct iommu_test_cmd test_cmd = {
613 .size = sizeof(test_cmd),
614 .op = IOMMU_TEST_OP_MD_CHECK_REFS,
615 .check_refs = { .length = BUFFER_SIZE,
616 .uptr = (uintptr_t)buffer },
617 };
618
619 if (fd == -1)
620 return;
621
622 EXPECT_EQ(0, close(fd));
623
624 fd = open("/dev/iommu", O_RDWR);
625 EXPECT_NE(-1, fd);
626 EXPECT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS),
627 &test_cmd));
628 EXPECT_EQ(0, close(fd));
629 }
630
631 #define EXPECT_ERRNO(expected_errno, cmd) \
632 ({ \
633 ASSERT_EQ(-1, cmd); \
634 EXPECT_EQ(expected_errno, errno); \
635 })
636
637 #endif
638
639 /* @data can be NULL */
_test_cmd_get_hw_info(int fd,__u32 device_id,void * data,size_t data_len,uint32_t * capabilities)640 static int _test_cmd_get_hw_info(int fd, __u32 device_id, void *data,
641 size_t data_len, uint32_t *capabilities)
642 {
643 struct iommu_test_hw_info *info = (struct iommu_test_hw_info *)data;
644 struct iommu_hw_info cmd = {
645 .size = sizeof(cmd),
646 .dev_id = device_id,
647 .data_len = data_len,
648 .data_uptr = (uint64_t)data,
649 .out_capabilities = 0,
650 };
651 int ret;
652
653 ret = ioctl(fd, IOMMU_GET_HW_INFO, &cmd);
654 if (ret)
655 return ret;
656
657 assert(cmd.out_data_type == IOMMU_HW_INFO_TYPE_SELFTEST);
658
659 /*
660 * The struct iommu_test_hw_info should be the one defined
661 * by the current kernel.
662 */
663 assert(cmd.data_len == sizeof(struct iommu_test_hw_info));
664
665 /*
666 * Trailing bytes should be 0 if user buffer is larger than
667 * the data that kernel reports.
668 */
669 if (data_len > cmd.data_len) {
670 char *ptr = (char *)(data + cmd.data_len);
671 int idx = 0;
672
673 while (idx < data_len - cmd.data_len) {
674 assert(!*(ptr + idx));
675 idx++;
676 }
677 }
678
679 if (info) {
680 if (data_len >= offsetofend(struct iommu_test_hw_info, test_reg))
681 assert(info->test_reg == IOMMU_HW_INFO_SELFTEST_REGVAL);
682 if (data_len >= offsetofend(struct iommu_test_hw_info, flags))
683 assert(!info->flags);
684 }
685
686 if (capabilities)
687 *capabilities = cmd.out_capabilities;
688
689 return 0;
690 }
691
692 #define test_cmd_get_hw_info(device_id, data, data_len) \
693 ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, data, \
694 data_len, NULL))
695
696 #define test_err_get_hw_info(_errno, device_id, data, data_len) \
697 EXPECT_ERRNO(_errno, _test_cmd_get_hw_info(self->fd, device_id, data, \
698 data_len, NULL))
699
700 #define test_cmd_get_hw_capabilities(device_id, caps, mask) \
701 ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, 0, &caps))
702
_test_ioctl_fault_alloc(int fd,__u32 * fault_id,__u32 * fault_fd)703 static int _test_ioctl_fault_alloc(int fd, __u32 *fault_id, __u32 *fault_fd)
704 {
705 struct iommu_fault_alloc cmd = {
706 .size = sizeof(cmd),
707 };
708 int ret;
709
710 ret = ioctl(fd, IOMMU_FAULT_QUEUE_ALLOC, &cmd);
711 if (ret)
712 return ret;
713 *fault_id = cmd.out_fault_id;
714 *fault_fd = cmd.out_fault_fd;
715 return 0;
716 }
717
718 #define test_ioctl_fault_alloc(fault_id, fault_fd) \
719 ({ \
720 ASSERT_EQ(0, _test_ioctl_fault_alloc(self->fd, fault_id, \
721 fault_fd)); \
722 ASSERT_NE(0, *(fault_id)); \
723 ASSERT_NE(0, *(fault_fd)); \
724 })
725
_test_cmd_trigger_iopf(int fd,__u32 device_id,__u32 fault_fd)726 static int _test_cmd_trigger_iopf(int fd, __u32 device_id, __u32 fault_fd)
727 {
728 struct iommu_test_cmd trigger_iopf_cmd = {
729 .size = sizeof(trigger_iopf_cmd),
730 .op = IOMMU_TEST_OP_TRIGGER_IOPF,
731 .trigger_iopf = {
732 .dev_id = device_id,
733 .pasid = 0x1,
734 .grpid = 0x2,
735 .perm = IOMMU_PGFAULT_PERM_READ | IOMMU_PGFAULT_PERM_WRITE,
736 .addr = 0xdeadbeaf,
737 },
738 };
739 struct iommu_hwpt_page_response response = {
740 .code = IOMMUFD_PAGE_RESP_SUCCESS,
741 };
742 struct iommu_hwpt_pgfault fault = {};
743 ssize_t bytes;
744 int ret;
745
746 ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_IOPF), &trigger_iopf_cmd);
747 if (ret)
748 return ret;
749
750 bytes = read(fault_fd, &fault, sizeof(fault));
751 if (bytes <= 0)
752 return -EIO;
753
754 response.cookie = fault.cookie;
755
756 bytes = write(fault_fd, &response, sizeof(response));
757 if (bytes <= 0)
758 return -EIO;
759
760 return 0;
761 }
762
763 #define test_cmd_trigger_iopf(device_id, fault_fd) \
764 ASSERT_EQ(0, _test_cmd_trigger_iopf(self->fd, device_id, fault_fd))
765