xref: /linux/tools/testing/selftests/iommu/iommufd_utils.h (revision f5c31bcf604db54470868f3118a60dc4a9ba8813)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #ifndef __SELFTEST_IOMMUFD_UTILS
4 #define __SELFTEST_IOMMUFD_UTILS
5 
6 #include <unistd.h>
7 #include <stddef.h>
8 #include <sys/fcntl.h>
9 #include <sys/ioctl.h>
10 #include <stdint.h>
11 #include <assert.h>
12 
13 #include "../kselftest_harness.h"
14 #include "../../../../drivers/iommu/iommufd/iommufd_test.h"
15 
16 /* Hack to make assertions more readable */
17 #define _IOMMU_TEST_CMD(x) IOMMU_TEST_CMD
18 
19 /* Imported from include/asm-generic/bitops/generic-non-atomic.h */
20 #define BITS_PER_BYTE 8
21 #define BITS_PER_LONG __BITS_PER_LONG
22 #define BIT_MASK(nr) (1UL << ((nr) % __BITS_PER_LONG))
23 #define BIT_WORD(nr) ((nr) / __BITS_PER_LONG)
24 
25 static inline void set_bit(unsigned int nr, unsigned long *addr)
26 {
27 	unsigned long mask = BIT_MASK(nr);
28 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
29 
30 	*p |= mask;
31 }
32 
33 static inline bool test_bit(unsigned int nr, unsigned long *addr)
34 {
35 	return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)));
36 }
37 
38 static void *buffer;
39 static unsigned long BUFFER_SIZE;
40 
41 static unsigned long PAGE_SIZE;
42 
43 #define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
44 #define offsetofend(TYPE, MEMBER) \
45 	(offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
46 
47 /*
48  * Have the kernel check the refcount on pages. I don't know why a freshly
49  * mmap'd anon non-compound page starts out with a ref of 3
50  */
51 #define check_refs(_ptr, _length, _refs)                                      \
52 	({                                                                    \
53 		struct iommu_test_cmd test_cmd = {                            \
54 			.size = sizeof(test_cmd),                             \
55 			.op = IOMMU_TEST_OP_MD_CHECK_REFS,                    \
56 			.check_refs = { .length = _length,                    \
57 					.uptr = (uintptr_t)(_ptr),            \
58 					.refs = _refs },                      \
59 		};                                                            \
60 		ASSERT_EQ(0,                                                  \
61 			  ioctl(self->fd,                                     \
62 				_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS), \
63 				&test_cmd));                                  \
64 	})
65 
66 static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *stdev_id,
67 				 __u32 *hwpt_id, __u32 *idev_id)
68 {
69 	struct iommu_test_cmd cmd = {
70 		.size = sizeof(cmd),
71 		.op = IOMMU_TEST_OP_MOCK_DOMAIN,
72 		.id = ioas_id,
73 		.mock_domain = {},
74 	};
75 	int ret;
76 
77 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
78 	if (ret)
79 		return ret;
80 	if (stdev_id)
81 		*stdev_id = cmd.mock_domain.out_stdev_id;
82 	assert(cmd.id != 0);
83 	if (hwpt_id)
84 		*hwpt_id = cmd.mock_domain.out_hwpt_id;
85 	if (idev_id)
86 		*idev_id = cmd.mock_domain.out_idev_id;
87 	return 0;
88 }
89 #define test_cmd_mock_domain(ioas_id, stdev_id, hwpt_id, idev_id)       \
90 	ASSERT_EQ(0, _test_cmd_mock_domain(self->fd, ioas_id, stdev_id, \
91 					   hwpt_id, idev_id))
92 #define test_err_mock_domain(_errno, ioas_id, stdev_id, hwpt_id)      \
93 	EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \
94 						   stdev_id, hwpt_id, NULL))
95 
96 static int _test_cmd_mock_domain_flags(int fd, unsigned int ioas_id,
97 				       __u32 stdev_flags, __u32 *stdev_id,
98 				       __u32 *hwpt_id, __u32 *idev_id)
99 {
100 	struct iommu_test_cmd cmd = {
101 		.size = sizeof(cmd),
102 		.op = IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS,
103 		.id = ioas_id,
104 		.mock_domain_flags = { .dev_flags = stdev_flags },
105 	};
106 	int ret;
107 
108 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
109 	if (ret)
110 		return ret;
111 	if (stdev_id)
112 		*stdev_id = cmd.mock_domain_flags.out_stdev_id;
113 	assert(cmd.id != 0);
114 	if (hwpt_id)
115 		*hwpt_id = cmd.mock_domain_flags.out_hwpt_id;
116 	if (idev_id)
117 		*idev_id = cmd.mock_domain_flags.out_idev_id;
118 	return 0;
119 }
120 #define test_cmd_mock_domain_flags(ioas_id, flags, stdev_id, hwpt_id, idev_id) \
121 	ASSERT_EQ(0, _test_cmd_mock_domain_flags(self->fd, ioas_id, flags,     \
122 						 stdev_id, hwpt_id, idev_id))
123 #define test_err_mock_domain_flags(_errno, ioas_id, flags, stdev_id, hwpt_id) \
124 	EXPECT_ERRNO(_errno,                                                  \
125 		     _test_cmd_mock_domain_flags(self->fd, ioas_id, flags,    \
126 						 stdev_id, hwpt_id, NULL))
127 
128 static int _test_cmd_mock_domain_replace(int fd, __u32 stdev_id, __u32 pt_id,
129 					 __u32 *hwpt_id)
130 {
131 	struct iommu_test_cmd cmd = {
132 		.size = sizeof(cmd),
133 		.op = IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE,
134 		.id = stdev_id,
135 		.mock_domain_replace = {
136 			.pt_id = pt_id,
137 		},
138 	};
139 	int ret;
140 
141 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
142 	if (ret)
143 		return ret;
144 	if (hwpt_id)
145 		*hwpt_id = cmd.mock_domain_replace.pt_id;
146 	return 0;
147 }
148 
149 #define test_cmd_mock_domain_replace(stdev_id, pt_id)                         \
150 	ASSERT_EQ(0, _test_cmd_mock_domain_replace(self->fd, stdev_id, pt_id, \
151 						   NULL))
152 #define test_err_mock_domain_replace(_errno, stdev_id, pt_id)                  \
153 	EXPECT_ERRNO(_errno, _test_cmd_mock_domain_replace(self->fd, stdev_id, \
154 							   pt_id, NULL))
155 
156 static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id,
157 				__u32 flags, __u32 *hwpt_id, __u32 data_type,
158 				void *data, size_t data_len)
159 {
160 	struct iommu_hwpt_alloc cmd = {
161 		.size = sizeof(cmd),
162 		.flags = flags,
163 		.dev_id = device_id,
164 		.pt_id = pt_id,
165 		.data_type = data_type,
166 		.data_len = data_len,
167 		.data_uptr = (uint64_t)data,
168 	};
169 	int ret;
170 
171 	ret = ioctl(fd, IOMMU_HWPT_ALLOC, &cmd);
172 	if (ret)
173 		return ret;
174 	if (hwpt_id)
175 		*hwpt_id = cmd.out_hwpt_id;
176 	return 0;
177 }
178 
179 #define test_cmd_hwpt_alloc(device_id, pt_id, flags, hwpt_id)                  \
180 	ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, flags,   \
181 					  hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, \
182 					  0))
183 #define test_err_hwpt_alloc(_errno, device_id, pt_id, flags, hwpt_id)   \
184 	EXPECT_ERRNO(_errno, _test_cmd_hwpt_alloc(                      \
185 				     self->fd, device_id, pt_id, flags, \
186 				     hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, 0))
187 
188 #define test_cmd_hwpt_alloc_nested(device_id, pt_id, flags, hwpt_id,         \
189 				   data_type, data, data_len)                \
190 	ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, flags, \
191 					  hwpt_id, data_type, data, data_len))
192 #define test_err_hwpt_alloc_nested(_errno, device_id, pt_id, flags, hwpt_id, \
193 				   data_type, data, data_len)                \
194 	EXPECT_ERRNO(_errno,                                                 \
195 		     _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, flags, \
196 					  hwpt_id, data_type, data, data_len))
197 
198 #define test_cmd_hwpt_check_iotlb(hwpt_id, iotlb_id, expected)                 \
199 	({                                                                     \
200 		struct iommu_test_cmd test_cmd = {                             \
201 			.size = sizeof(test_cmd),                              \
202 			.op = IOMMU_TEST_OP_MD_CHECK_IOTLB,                    \
203 			.id = hwpt_id,                                         \
204 			.check_iotlb = {                                       \
205 				.id = iotlb_id,                                \
206 				.iotlb = expected,                             \
207 			},                                                     \
208 		};                                                             \
209 		ASSERT_EQ(0,                                                   \
210 			  ioctl(self->fd,                                      \
211 				_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_IOTLB), \
212 				&test_cmd));                                   \
213 	})
214 
215 #define test_cmd_hwpt_check_iotlb_all(hwpt_id, expected)                       \
216 	({                                                                     \
217 		int i;                                                         \
218 		for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++)             \
219 			test_cmd_hwpt_check_iotlb(hwpt_id, i, expected);       \
220 	})
221 
222 static int _test_cmd_hwpt_invalidate(int fd, __u32 hwpt_id, void *reqs,
223 				     uint32_t data_type, uint32_t lreq,
224 				     uint32_t *nreqs)
225 {
226 	struct iommu_hwpt_invalidate cmd = {
227 		.size = sizeof(cmd),
228 		.hwpt_id = hwpt_id,
229 		.data_type = data_type,
230 		.data_uptr = (uint64_t)reqs,
231 		.entry_len = lreq,
232 		.entry_num = *nreqs,
233 	};
234 	int rc = ioctl(fd, IOMMU_HWPT_INVALIDATE, &cmd);
235 	*nreqs = cmd.entry_num;
236 	return rc;
237 }
238 
239 #define test_cmd_hwpt_invalidate(hwpt_id, reqs, data_type, lreq, nreqs)       \
240 	({                                                                    \
241 		ASSERT_EQ(0,                                                  \
242 			  _test_cmd_hwpt_invalidate(self->fd, hwpt_id, reqs,  \
243 						    data_type, lreq, nreqs)); \
244 	})
245 #define test_err_hwpt_invalidate(_errno, hwpt_id, reqs, data_type, lreq, \
246 				 nreqs)                                  \
247 	({                                                               \
248 		EXPECT_ERRNO(_errno, _test_cmd_hwpt_invalidate(          \
249 					     self->fd, hwpt_id, reqs,    \
250 					     data_type, lreq, nreqs));   \
251 	})
252 
253 static int _test_cmd_access_replace_ioas(int fd, __u32 access_id,
254 					 unsigned int ioas_id)
255 {
256 	struct iommu_test_cmd cmd = {
257 		.size = sizeof(cmd),
258 		.op = IOMMU_TEST_OP_ACCESS_REPLACE_IOAS,
259 		.id = access_id,
260 		.access_replace_ioas = { .ioas_id = ioas_id },
261 	};
262 	int ret;
263 
264 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
265 	if (ret)
266 		return ret;
267 	return 0;
268 }
269 #define test_cmd_access_replace_ioas(access_id, ioas_id) \
270 	ASSERT_EQ(0, _test_cmd_access_replace_ioas(self->fd, access_id, ioas_id))
271 
272 static int _test_cmd_set_dirty_tracking(int fd, __u32 hwpt_id, bool enabled)
273 {
274 	struct iommu_hwpt_set_dirty_tracking cmd = {
275 		.size = sizeof(cmd),
276 		.flags = enabled ? IOMMU_HWPT_DIRTY_TRACKING_ENABLE : 0,
277 		.hwpt_id = hwpt_id,
278 	};
279 	int ret;
280 
281 	ret = ioctl(fd, IOMMU_HWPT_SET_DIRTY_TRACKING, &cmd);
282 	if (ret)
283 		return -errno;
284 	return 0;
285 }
286 #define test_cmd_set_dirty_tracking(hwpt_id, enabled) \
287 	ASSERT_EQ(0, _test_cmd_set_dirty_tracking(self->fd, hwpt_id, enabled))
288 
289 static int _test_cmd_get_dirty_bitmap(int fd, __u32 hwpt_id, size_t length,
290 				      __u64 iova, size_t page_size,
291 				      __u64 *bitmap, __u32 flags)
292 {
293 	struct iommu_hwpt_get_dirty_bitmap cmd = {
294 		.size = sizeof(cmd),
295 		.hwpt_id = hwpt_id,
296 		.flags = flags,
297 		.iova = iova,
298 		.length = length,
299 		.page_size = page_size,
300 		.data = (uintptr_t)bitmap,
301 	};
302 	int ret;
303 
304 	ret = ioctl(fd, IOMMU_HWPT_GET_DIRTY_BITMAP, &cmd);
305 	if (ret)
306 		return ret;
307 	return 0;
308 }
309 
310 #define test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size,    \
311 				  bitmap, flags)                           \
312 	ASSERT_EQ(0, _test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, \
313 						page_size, bitmap, flags))
314 
315 static int _test_cmd_mock_domain_set_dirty(int fd, __u32 hwpt_id, size_t length,
316 					   __u64 iova, size_t page_size,
317 					   __u64 *bitmap, __u64 *dirty)
318 {
319 	struct iommu_test_cmd cmd = {
320 		.size = sizeof(cmd),
321 		.op = IOMMU_TEST_OP_DIRTY,
322 		.id = hwpt_id,
323 		.dirty = {
324 			.iova = iova,
325 			.length = length,
326 			.page_size = page_size,
327 			.uptr = (uintptr_t)bitmap,
328 		}
329 	};
330 	int ret;
331 
332 	ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_DIRTY), &cmd);
333 	if (ret)
334 		return -ret;
335 	if (dirty)
336 		*dirty = cmd.dirty.out_nr_dirty;
337 	return 0;
338 }
339 
340 #define test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, \
341 				       bitmap, nr)                           \
342 	ASSERT_EQ(0,                                                         \
343 		  _test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, \
344 						  page_size, bitmap, nr))
345 
346 static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length,
347 				    __u64 iova, size_t page_size,
348 				    size_t pte_page_size, __u64 *bitmap,
349 				    __u64 bitmap_size, __u32 flags,
350 				    struct __test_metadata *_metadata)
351 {
352 	unsigned long npte = pte_page_size / page_size, pteset = 2 * npte;
353 	unsigned long nbits = bitmap_size * BITS_PER_BYTE;
354 	unsigned long j, i, nr = nbits / pteset ?: 1;
355 	__u64 out_dirty = 0;
356 
357 	/* Mark all even bits as dirty in the mock domain */
358 	memset(bitmap, 0, bitmap_size);
359 	for (i = 0; i < nbits; i += pteset)
360 		set_bit(i, (unsigned long *)bitmap);
361 
362 	test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size,
363 				       bitmap, &out_dirty);
364 	ASSERT_EQ(nr, out_dirty);
365 
366 	/* Expect all even bits as dirty in the user bitmap */
367 	memset(bitmap, 0, bitmap_size);
368 	test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
369 				  flags);
370 	/* Beware ASSERT_EQ() is two statements -- braces are not redundant! */
371 	for (i = 0; i < nbits; i += pteset) {
372 		for (j = 0; j < pteset; j++) {
373 			ASSERT_EQ(j < npte,
374 				  test_bit(i + j, (unsigned long *)bitmap));
375 		}
376 		ASSERT_EQ(!(i % pteset), test_bit(i, (unsigned long *)bitmap));
377 	}
378 
379 	memset(bitmap, 0, bitmap_size);
380 	test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
381 				  flags);
382 
383 	/* It as read already -- expect all zeroes */
384 	for (i = 0; i < nbits; i += pteset) {
385 		for (j = 0; j < pteset; j++) {
386 			ASSERT_EQ(
387 				(j < npte) &&
388 					(flags &
389 					 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR),
390 				test_bit(i + j, (unsigned long *)bitmap));
391 		}
392 	}
393 
394 	return 0;
395 }
396 #define test_mock_dirty_bitmaps(hwpt_id, length, iova, page_size, pte_size,\
397 				bitmap, bitmap_size, flags, _metadata)     \
398 	ASSERT_EQ(0, _test_mock_dirty_bitmaps(self->fd, hwpt_id, length, iova, \
399 					      page_size, pte_size, bitmap,     \
400 					      bitmap_size, flags, _metadata))
401 
402 static int _test_cmd_create_access(int fd, unsigned int ioas_id,
403 				   __u32 *access_id, unsigned int flags)
404 {
405 	struct iommu_test_cmd cmd = {
406 		.size = sizeof(cmd),
407 		.op = IOMMU_TEST_OP_CREATE_ACCESS,
408 		.id = ioas_id,
409 		.create_access = { .flags = flags },
410 	};
411 	int ret;
412 
413 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
414 	if (ret)
415 		return ret;
416 	*access_id = cmd.create_access.out_access_fd;
417 	return 0;
418 }
419 #define test_cmd_create_access(ioas_id, access_id, flags)                  \
420 	ASSERT_EQ(0, _test_cmd_create_access(self->fd, ioas_id, access_id, \
421 					     flags))
422 
423 static int _test_cmd_destroy_access(unsigned int access_id)
424 {
425 	return close(access_id);
426 }
427 #define test_cmd_destroy_access(access_id) \
428 	ASSERT_EQ(0, _test_cmd_destroy_access(access_id))
429 
430 static int _test_cmd_destroy_access_pages(int fd, unsigned int access_id,
431 					  unsigned int access_pages_id)
432 {
433 	struct iommu_test_cmd cmd = {
434 		.size = sizeof(cmd),
435 		.op = IOMMU_TEST_OP_DESTROY_ACCESS_PAGES,
436 		.id = access_id,
437 		.destroy_access_pages = { .access_pages_id = access_pages_id },
438 	};
439 	return ioctl(fd, IOMMU_TEST_CMD, &cmd);
440 }
441 #define test_cmd_destroy_access_pages(access_id, access_pages_id)        \
442 	ASSERT_EQ(0, _test_cmd_destroy_access_pages(self->fd, access_id, \
443 						    access_pages_id))
444 #define test_err_destroy_access_pages(_errno, access_id, access_pages_id) \
445 	EXPECT_ERRNO(_errno, _test_cmd_destroy_access_pages(              \
446 				     self->fd, access_id, access_pages_id))
447 
448 static int _test_ioctl_destroy(int fd, unsigned int id)
449 {
450 	struct iommu_destroy cmd = {
451 		.size = sizeof(cmd),
452 		.id = id,
453 	};
454 	return ioctl(fd, IOMMU_DESTROY, &cmd);
455 }
456 #define test_ioctl_destroy(id) ASSERT_EQ(0, _test_ioctl_destroy(self->fd, id))
457 
458 static int _test_ioctl_ioas_alloc(int fd, __u32 *id)
459 {
460 	struct iommu_ioas_alloc cmd = {
461 		.size = sizeof(cmd),
462 	};
463 	int ret;
464 
465 	ret = ioctl(fd, IOMMU_IOAS_ALLOC, &cmd);
466 	if (ret)
467 		return ret;
468 	*id = cmd.out_ioas_id;
469 	return 0;
470 }
471 #define test_ioctl_ioas_alloc(id)                                   \
472 	({                                                          \
473 		ASSERT_EQ(0, _test_ioctl_ioas_alloc(self->fd, id)); \
474 		ASSERT_NE(0, *(id));                                \
475 	})
476 
477 static int _test_ioctl_ioas_map(int fd, unsigned int ioas_id, void *buffer,
478 				size_t length, __u64 *iova, unsigned int flags)
479 {
480 	struct iommu_ioas_map cmd = {
481 		.size = sizeof(cmd),
482 		.flags = flags,
483 		.ioas_id = ioas_id,
484 		.user_va = (uintptr_t)buffer,
485 		.length = length,
486 	};
487 	int ret;
488 
489 	if (flags & IOMMU_IOAS_MAP_FIXED_IOVA)
490 		cmd.iova = *iova;
491 
492 	ret = ioctl(fd, IOMMU_IOAS_MAP, &cmd);
493 	*iova = cmd.iova;
494 	return ret;
495 }
496 #define test_ioctl_ioas_map(buffer, length, iova_p)                        \
497 	ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
498 					  length, iova_p,                  \
499 					  IOMMU_IOAS_MAP_WRITEABLE |       \
500 						  IOMMU_IOAS_MAP_READABLE))
501 
502 #define test_err_ioctl_ioas_map(_errno, buffer, length, iova_p)            \
503 	EXPECT_ERRNO(_errno,                                               \
504 		     _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
505 					  length, iova_p,                  \
506 					  IOMMU_IOAS_MAP_WRITEABLE |       \
507 						  IOMMU_IOAS_MAP_READABLE))
508 
509 #define test_ioctl_ioas_map_id(ioas_id, buffer, length, iova_p)              \
510 	ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, ioas_id, buffer, length, \
511 					  iova_p,                            \
512 					  IOMMU_IOAS_MAP_WRITEABLE |         \
513 						  IOMMU_IOAS_MAP_READABLE))
514 
515 #define test_ioctl_ioas_map_fixed(buffer, length, iova)                       \
516 	({                                                                    \
517 		__u64 __iova = iova;                                          \
518 		ASSERT_EQ(0, _test_ioctl_ioas_map(                            \
519 				     self->fd, self->ioas_id, buffer, length, \
520 				     &__iova,                                 \
521 				     IOMMU_IOAS_MAP_FIXED_IOVA |              \
522 					     IOMMU_IOAS_MAP_WRITEABLE |       \
523 					     IOMMU_IOAS_MAP_READABLE));       \
524 	})
525 
526 #define test_ioctl_ioas_map_fixed_id(ioas_id, buffer, length, iova)           \
527 	({                                                                    \
528 		__u64 __iova = iova;                                          \
529 		ASSERT_EQ(0,                                                  \
530 			  _test_ioctl_ioas_map(                               \
531 				  self->fd, ioas_id, buffer, length, &__iova, \
532 				  IOMMU_IOAS_MAP_FIXED_IOVA |                 \
533 					  IOMMU_IOAS_MAP_WRITEABLE |          \
534 					  IOMMU_IOAS_MAP_READABLE));          \
535 	})
536 
537 #define test_err_ioctl_ioas_map_fixed(_errno, buffer, length, iova)           \
538 	({                                                                    \
539 		__u64 __iova = iova;                                          \
540 		EXPECT_ERRNO(_errno,                                          \
541 			     _test_ioctl_ioas_map(                            \
542 				     self->fd, self->ioas_id, buffer, length, \
543 				     &__iova,                                 \
544 				     IOMMU_IOAS_MAP_FIXED_IOVA |              \
545 					     IOMMU_IOAS_MAP_WRITEABLE |       \
546 					     IOMMU_IOAS_MAP_READABLE));       \
547 	})
548 
549 static int _test_ioctl_ioas_unmap(int fd, unsigned int ioas_id, uint64_t iova,
550 				  size_t length, uint64_t *out_len)
551 {
552 	struct iommu_ioas_unmap cmd = {
553 		.size = sizeof(cmd),
554 		.ioas_id = ioas_id,
555 		.iova = iova,
556 		.length = length,
557 	};
558 	int ret;
559 
560 	ret = ioctl(fd, IOMMU_IOAS_UNMAP, &cmd);
561 	if (out_len)
562 		*out_len = cmd.length;
563 	return ret;
564 }
565 #define test_ioctl_ioas_unmap(iova, length)                                \
566 	ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, iova, \
567 					    length, NULL))
568 
569 #define test_ioctl_ioas_unmap_id(ioas_id, iova, length)                      \
570 	ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, ioas_id, iova, length, \
571 					    NULL))
572 
573 #define test_err_ioctl_ioas_unmap(_errno, iova, length)                      \
574 	EXPECT_ERRNO(_errno, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, \
575 						    iova, length, NULL))
576 
577 static int _test_ioctl_set_temp_memory_limit(int fd, unsigned int limit)
578 {
579 	struct iommu_test_cmd memlimit_cmd = {
580 		.size = sizeof(memlimit_cmd),
581 		.op = IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT,
582 		.memory_limit = { .limit = limit },
583 	};
584 
585 	return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT),
586 		     &memlimit_cmd);
587 }
588 
589 #define test_ioctl_set_temp_memory_limit(limit) \
590 	ASSERT_EQ(0, _test_ioctl_set_temp_memory_limit(self->fd, limit))
591 
592 #define test_ioctl_set_default_memory_limit() \
593 	test_ioctl_set_temp_memory_limit(65536)
594 
595 static void teardown_iommufd(int fd, struct __test_metadata *_metadata)
596 {
597 	struct iommu_test_cmd test_cmd = {
598 		.size = sizeof(test_cmd),
599 		.op = IOMMU_TEST_OP_MD_CHECK_REFS,
600 		.check_refs = { .length = BUFFER_SIZE,
601 				.uptr = (uintptr_t)buffer },
602 	};
603 
604 	if (fd == -1)
605 		return;
606 
607 	EXPECT_EQ(0, close(fd));
608 
609 	fd = open("/dev/iommu", O_RDWR);
610 	EXPECT_NE(-1, fd);
611 	EXPECT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS),
612 			   &test_cmd));
613 	EXPECT_EQ(0, close(fd));
614 }
615 
616 #define EXPECT_ERRNO(expected_errno, cmd)         \
617 	({                                        \
618 		ASSERT_EQ(-1, cmd);               \
619 		EXPECT_EQ(expected_errno, errno); \
620 	})
621 
622 #endif
623 
624 /* @data can be NULL */
625 static int _test_cmd_get_hw_info(int fd, __u32 device_id, void *data,
626 				 size_t data_len, uint32_t *capabilities)
627 {
628 	struct iommu_test_hw_info *info = (struct iommu_test_hw_info *)data;
629 	struct iommu_hw_info cmd = {
630 		.size = sizeof(cmd),
631 		.dev_id = device_id,
632 		.data_len = data_len,
633 		.data_uptr = (uint64_t)data,
634 		.out_capabilities = 0,
635 	};
636 	int ret;
637 
638 	ret = ioctl(fd, IOMMU_GET_HW_INFO, &cmd);
639 	if (ret)
640 		return ret;
641 
642 	assert(cmd.out_data_type == IOMMU_HW_INFO_TYPE_SELFTEST);
643 
644 	/*
645 	 * The struct iommu_test_hw_info should be the one defined
646 	 * by the current kernel.
647 	 */
648 	assert(cmd.data_len == sizeof(struct iommu_test_hw_info));
649 
650 	/*
651 	 * Trailing bytes should be 0 if user buffer is larger than
652 	 * the data that kernel reports.
653 	 */
654 	if (data_len > cmd.data_len) {
655 		char *ptr = (char *)(data + cmd.data_len);
656 		int idx = 0;
657 
658 		while (idx < data_len - cmd.data_len) {
659 			assert(!*(ptr + idx));
660 			idx++;
661 		}
662 	}
663 
664 	if (info) {
665 		if (data_len >= offsetofend(struct iommu_test_hw_info, test_reg))
666 			assert(info->test_reg == IOMMU_HW_INFO_SELFTEST_REGVAL);
667 		if (data_len >= offsetofend(struct iommu_test_hw_info, flags))
668 			assert(!info->flags);
669 	}
670 
671 	if (capabilities)
672 		*capabilities = cmd.out_capabilities;
673 
674 	return 0;
675 }
676 
677 #define test_cmd_get_hw_info(device_id, data, data_len)               \
678 	ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, data, \
679 					   data_len, NULL))
680 
681 #define test_err_get_hw_info(_errno, device_id, data, data_len)               \
682 	EXPECT_ERRNO(_errno, _test_cmd_get_hw_info(self->fd, device_id, data, \
683 						   data_len, NULL))
684 
685 #define test_cmd_get_hw_capabilities(device_id, caps, mask) \
686 	ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, 0, &caps))
687