xref: /linux/tools/testing/selftests/iommu/iommufd_utils.h (revision c34e9ab9a612ee8b18273398ef75c207b01f516d)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #ifndef __SELFTEST_IOMMUFD_UTILS
4 #define __SELFTEST_IOMMUFD_UTILS
5 
6 #include <unistd.h>
7 #include <stddef.h>
8 #include <sys/fcntl.h>
9 #include <sys/ioctl.h>
10 #include <stdint.h>
11 #include <assert.h>
12 
13 #include "../kselftest_harness.h"
14 #include "../../../../drivers/iommu/iommufd/iommufd_test.h"
15 
16 /* Hack to make assertions more readable */
17 #define _IOMMU_TEST_CMD(x) IOMMU_TEST_CMD
18 
19 /* Imported from include/asm-generic/bitops/generic-non-atomic.h */
20 #define BITS_PER_BYTE 8
21 #define BITS_PER_LONG __BITS_PER_LONG
22 #define BIT_MASK(nr) (1UL << ((nr) % __BITS_PER_LONG))
23 #define BIT_WORD(nr) ((nr) / __BITS_PER_LONG)
24 
25 enum {
26 	IOPT_PAGES_ACCOUNT_NONE = 0,
27 	IOPT_PAGES_ACCOUNT_USER = 1,
28 	IOPT_PAGES_ACCOUNT_MM = 2,
29 };
30 
31 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
32 
33 static inline void set_bit(unsigned int nr, unsigned long *addr)
34 {
35 	unsigned long mask = BIT_MASK(nr);
36 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
37 
38 	*p |= mask;
39 }
40 
41 static inline bool test_bit(unsigned int nr, unsigned long *addr)
42 {
43 	return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)));
44 }
45 
46 static void *buffer;
47 static unsigned long BUFFER_SIZE;
48 
49 static void *mfd_buffer;
50 static int mfd;
51 
52 static unsigned long PAGE_SIZE;
53 
54 #define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
55 #define offsetofend(TYPE, MEMBER) \
56 	(offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
57 
58 static inline void *memfd_mmap(size_t length, int prot, int flags, int *mfd_p)
59 {
60 	int mfd_flags = (flags & MAP_HUGETLB) ? MFD_HUGETLB : 0;
61 	int mfd = memfd_create("buffer", mfd_flags);
62 
63 	if (mfd <= 0)
64 		return MAP_FAILED;
65 	if (ftruncate(mfd, length))
66 		return MAP_FAILED;
67 	*mfd_p = mfd;
68 	return mmap(0, length, prot, flags, mfd, 0);
69 }
70 
71 /*
72  * Have the kernel check the refcount on pages. I don't know why a freshly
73  * mmap'd anon non-compound page starts out with a ref of 3
74  */
75 #define check_refs(_ptr, _length, _refs)                                      \
76 	({                                                                    \
77 		struct iommu_test_cmd test_cmd = {                            \
78 			.size = sizeof(test_cmd),                             \
79 			.op = IOMMU_TEST_OP_MD_CHECK_REFS,                    \
80 			.check_refs = { .length = _length,                    \
81 					.uptr = (uintptr_t)(_ptr),            \
82 					.refs = _refs },                      \
83 		};                                                            \
84 		ASSERT_EQ(0,                                                  \
85 			  ioctl(self->fd,                                     \
86 				_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS), \
87 				&test_cmd));                                  \
88 	})
89 
90 static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *stdev_id,
91 				 __u32 *hwpt_id, __u32 *idev_id)
92 {
93 	struct iommu_test_cmd cmd = {
94 		.size = sizeof(cmd),
95 		.op = IOMMU_TEST_OP_MOCK_DOMAIN,
96 		.id = ioas_id,
97 		.mock_domain = {},
98 	};
99 	int ret;
100 
101 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
102 	if (ret)
103 		return ret;
104 	if (stdev_id)
105 		*stdev_id = cmd.mock_domain.out_stdev_id;
106 	assert(cmd.id != 0);
107 	if (hwpt_id)
108 		*hwpt_id = cmd.mock_domain.out_hwpt_id;
109 	if (idev_id)
110 		*idev_id = cmd.mock_domain.out_idev_id;
111 	return 0;
112 }
113 #define test_cmd_mock_domain(ioas_id, stdev_id, hwpt_id, idev_id)       \
114 	ASSERT_EQ(0, _test_cmd_mock_domain(self->fd, ioas_id, stdev_id, \
115 					   hwpt_id, idev_id))
116 #define test_err_mock_domain(_errno, ioas_id, stdev_id, hwpt_id)      \
117 	EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \
118 						   stdev_id, hwpt_id, NULL))
119 
120 static int _test_cmd_mock_domain_flags(int fd, unsigned int ioas_id,
121 				       __u32 stdev_flags, __u32 *stdev_id,
122 				       __u32 *hwpt_id, __u32 *idev_id)
123 {
124 	struct iommu_test_cmd cmd = {
125 		.size = sizeof(cmd),
126 		.op = IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS,
127 		.id = ioas_id,
128 		.mock_domain_flags = { .dev_flags = stdev_flags },
129 	};
130 	int ret;
131 
132 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
133 	if (ret)
134 		return ret;
135 	if (stdev_id)
136 		*stdev_id = cmd.mock_domain_flags.out_stdev_id;
137 	assert(cmd.id != 0);
138 	if (hwpt_id)
139 		*hwpt_id = cmd.mock_domain_flags.out_hwpt_id;
140 	if (idev_id)
141 		*idev_id = cmd.mock_domain_flags.out_idev_id;
142 	return 0;
143 }
144 #define test_cmd_mock_domain_flags(ioas_id, flags, stdev_id, hwpt_id, idev_id) \
145 	ASSERT_EQ(0, _test_cmd_mock_domain_flags(self->fd, ioas_id, flags,     \
146 						 stdev_id, hwpt_id, idev_id))
147 #define test_err_mock_domain_flags(_errno, ioas_id, flags, stdev_id, hwpt_id) \
148 	EXPECT_ERRNO(_errno,                                                  \
149 		     _test_cmd_mock_domain_flags(self->fd, ioas_id, flags,    \
150 						 stdev_id, hwpt_id, NULL))
151 
152 static int _test_cmd_mock_domain_replace(int fd, __u32 stdev_id, __u32 pt_id,
153 					 __u32 *hwpt_id)
154 {
155 	struct iommu_test_cmd cmd = {
156 		.size = sizeof(cmd),
157 		.op = IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE,
158 		.id = stdev_id,
159 		.mock_domain_replace = {
160 			.pt_id = pt_id,
161 		},
162 	};
163 	int ret;
164 
165 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
166 	if (ret)
167 		return ret;
168 	if (hwpt_id)
169 		*hwpt_id = cmd.mock_domain_replace.pt_id;
170 	return 0;
171 }
172 
173 #define test_cmd_mock_domain_replace(stdev_id, pt_id)                         \
174 	ASSERT_EQ(0, _test_cmd_mock_domain_replace(self->fd, stdev_id, pt_id, \
175 						   NULL))
176 #define test_err_mock_domain_replace(_errno, stdev_id, pt_id)                  \
177 	EXPECT_ERRNO(_errno, _test_cmd_mock_domain_replace(self->fd, stdev_id, \
178 							   pt_id, NULL))
179 
180 static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, __u32 ft_id,
181 				__u32 flags, __u32 *hwpt_id, __u32 data_type,
182 				void *data, size_t data_len)
183 {
184 	struct iommu_hwpt_alloc cmd = {
185 		.size = sizeof(cmd),
186 		.flags = flags,
187 		.dev_id = device_id,
188 		.pt_id = pt_id,
189 		.data_type = data_type,
190 		.data_len = data_len,
191 		.data_uptr = (uint64_t)data,
192 		.fault_id = ft_id,
193 	};
194 	int ret;
195 
196 	ret = ioctl(fd, IOMMU_HWPT_ALLOC, &cmd);
197 	if (ret)
198 		return ret;
199 	if (hwpt_id)
200 		*hwpt_id = cmd.out_hwpt_id;
201 	return 0;
202 }
203 
204 #define test_cmd_hwpt_alloc(device_id, pt_id, flags, hwpt_id)                  \
205 	ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags,   \
206 					  hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, \
207 					  0))
208 #define test_err_hwpt_alloc(_errno, device_id, pt_id, flags, hwpt_id)   \
209 	EXPECT_ERRNO(_errno, _test_cmd_hwpt_alloc(                      \
210 				     self->fd, device_id, pt_id, 0, flags, \
211 				     hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, 0))
212 
213 #define test_cmd_hwpt_alloc_nested(device_id, pt_id, flags, hwpt_id,         \
214 				   data_type, data, data_len)                \
215 	ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
216 					  hwpt_id, data_type, data, data_len))
217 #define test_err_hwpt_alloc_nested(_errno, device_id, pt_id, flags, hwpt_id, \
218 				   data_type, data, data_len)                \
219 	EXPECT_ERRNO(_errno,                                                 \
220 		     _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
221 					  hwpt_id, data_type, data, data_len))
222 
223 #define test_cmd_hwpt_alloc_iopf(device_id, pt_id, fault_id, flags, hwpt_id,    \
224 				   data_type, data, data_len)                   \
225 	ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \
226 					  flags, hwpt_id, data_type, data,      \
227 					  data_len))
228 #define test_err_hwpt_alloc_iopf(_errno, device_id, pt_id, fault_id, flags,     \
229 				 hwpt_id, data_type, data, data_len)            \
230 	EXPECT_ERRNO(_errno,                                                    \
231 		     _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \
232 					  flags, hwpt_id, data_type, data,      \
233 					  data_len))
234 
235 #define test_cmd_hwpt_check_iotlb(hwpt_id, iotlb_id, expected)                 \
236 	({                                                                     \
237 		struct iommu_test_cmd test_cmd = {                             \
238 			.size = sizeof(test_cmd),                              \
239 			.op = IOMMU_TEST_OP_MD_CHECK_IOTLB,                    \
240 			.id = hwpt_id,                                         \
241 			.check_iotlb = {                                       \
242 				.id = iotlb_id,                                \
243 				.iotlb = expected,                             \
244 			},                                                     \
245 		};                                                             \
246 		ASSERT_EQ(0,                                                   \
247 			  ioctl(self->fd,                                      \
248 				_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_IOTLB), \
249 				&test_cmd));                                   \
250 	})
251 
252 #define test_cmd_hwpt_check_iotlb_all(hwpt_id, expected)                       \
253 	({                                                                     \
254 		int i;                                                         \
255 		for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++)             \
256 			test_cmd_hwpt_check_iotlb(hwpt_id, i, expected);       \
257 	})
258 
259 #define test_cmd_dev_check_cache(device_id, cache_id, expected)                \
260 	({                                                                     \
261 		struct iommu_test_cmd test_cmd = {                             \
262 			.size = sizeof(test_cmd),                              \
263 			.op = IOMMU_TEST_OP_DEV_CHECK_CACHE,                   \
264 			.id = device_id,                                       \
265 			.check_dev_cache = {                                   \
266 				.id = cache_id,                                \
267 				.cache = expected,                             \
268 			},                                                     \
269 		};                                                             \
270 		ASSERT_EQ(0, ioctl(self->fd,                                   \
271 				   _IOMMU_TEST_CMD(                            \
272 					   IOMMU_TEST_OP_DEV_CHECK_CACHE),     \
273 				   &test_cmd));                                \
274 	})
275 
276 #define test_cmd_dev_check_cache_all(device_id, expected)                      \
277 	({                                                                     \
278 		int c;                                                         \
279 		for (c = 0; c < MOCK_DEV_CACHE_NUM; c++)                       \
280 			test_cmd_dev_check_cache(device_id, c, expected);      \
281 	})
282 
283 static int _test_cmd_hwpt_invalidate(int fd, __u32 hwpt_id, void *reqs,
284 				     uint32_t data_type, uint32_t lreq,
285 				     uint32_t *nreqs)
286 {
287 	struct iommu_hwpt_invalidate cmd = {
288 		.size = sizeof(cmd),
289 		.hwpt_id = hwpt_id,
290 		.data_type = data_type,
291 		.data_uptr = (uint64_t)reqs,
292 		.entry_len = lreq,
293 		.entry_num = *nreqs,
294 	};
295 	int rc = ioctl(fd, IOMMU_HWPT_INVALIDATE, &cmd);
296 	*nreqs = cmd.entry_num;
297 	return rc;
298 }
299 
300 #define test_cmd_hwpt_invalidate(hwpt_id, reqs, data_type, lreq, nreqs)       \
301 	({                                                                    \
302 		ASSERT_EQ(0,                                                  \
303 			  _test_cmd_hwpt_invalidate(self->fd, hwpt_id, reqs,  \
304 						    data_type, lreq, nreqs)); \
305 	})
306 #define test_err_hwpt_invalidate(_errno, hwpt_id, reqs, data_type, lreq, \
307 				 nreqs)                                  \
308 	({                                                               \
309 		EXPECT_ERRNO(_errno, _test_cmd_hwpt_invalidate(          \
310 					     self->fd, hwpt_id, reqs,    \
311 					     data_type, lreq, nreqs));   \
312 	})
313 
314 static int _test_cmd_viommu_invalidate(int fd, __u32 viommu_id, void *reqs,
315 				       uint32_t data_type, uint32_t lreq,
316 				       uint32_t *nreqs)
317 {
318 	struct iommu_hwpt_invalidate cmd = {
319 		.size = sizeof(cmd),
320 		.hwpt_id = viommu_id,
321 		.data_type = data_type,
322 		.data_uptr = (uint64_t)reqs,
323 		.entry_len = lreq,
324 		.entry_num = *nreqs,
325 	};
326 	int rc = ioctl(fd, IOMMU_HWPT_INVALIDATE, &cmd);
327 	*nreqs = cmd.entry_num;
328 	return rc;
329 }
330 
331 #define test_cmd_viommu_invalidate(viommu, reqs, lreq, nreqs)                  \
332 	({                                                                     \
333 		ASSERT_EQ(0,                                                   \
334 			  _test_cmd_viommu_invalidate(self->fd, viommu, reqs,  \
335 					IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST, \
336 					lreq, nreqs));                         \
337 	})
338 #define test_err_viommu_invalidate(_errno, viommu_id, reqs, data_type, lreq,   \
339 				 nreqs)                                        \
340 	({                                                                     \
341 		EXPECT_ERRNO(_errno, _test_cmd_viommu_invalidate(              \
342 					     self->fd, viommu_id, reqs,        \
343 					     data_type, lreq, nreqs));         \
344 	})
345 
346 static int _test_cmd_access_replace_ioas(int fd, __u32 access_id,
347 					 unsigned int ioas_id)
348 {
349 	struct iommu_test_cmd cmd = {
350 		.size = sizeof(cmd),
351 		.op = IOMMU_TEST_OP_ACCESS_REPLACE_IOAS,
352 		.id = access_id,
353 		.access_replace_ioas = { .ioas_id = ioas_id },
354 	};
355 	int ret;
356 
357 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
358 	if (ret)
359 		return ret;
360 	return 0;
361 }
362 #define test_cmd_access_replace_ioas(access_id, ioas_id) \
363 	ASSERT_EQ(0, _test_cmd_access_replace_ioas(self->fd, access_id, ioas_id))
364 
365 static int _test_cmd_set_dirty_tracking(int fd, __u32 hwpt_id, bool enabled)
366 {
367 	struct iommu_hwpt_set_dirty_tracking cmd = {
368 		.size = sizeof(cmd),
369 		.flags = enabled ? IOMMU_HWPT_DIRTY_TRACKING_ENABLE : 0,
370 		.hwpt_id = hwpt_id,
371 	};
372 	int ret;
373 
374 	ret = ioctl(fd, IOMMU_HWPT_SET_DIRTY_TRACKING, &cmd);
375 	if (ret)
376 		return -errno;
377 	return 0;
378 }
379 #define test_cmd_set_dirty_tracking(hwpt_id, enabled) \
380 	ASSERT_EQ(0, _test_cmd_set_dirty_tracking(self->fd, hwpt_id, enabled))
381 
382 static int _test_cmd_get_dirty_bitmap(int fd, __u32 hwpt_id, size_t length,
383 				      __u64 iova, size_t page_size,
384 				      __u64 *bitmap, __u32 flags)
385 {
386 	struct iommu_hwpt_get_dirty_bitmap cmd = {
387 		.size = sizeof(cmd),
388 		.hwpt_id = hwpt_id,
389 		.flags = flags,
390 		.iova = iova,
391 		.length = length,
392 		.page_size = page_size,
393 		.data = (uintptr_t)bitmap,
394 	};
395 	int ret;
396 
397 	ret = ioctl(fd, IOMMU_HWPT_GET_DIRTY_BITMAP, &cmd);
398 	if (ret)
399 		return ret;
400 	return 0;
401 }
402 
403 #define test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size,    \
404 				  bitmap, flags)                           \
405 	ASSERT_EQ(0, _test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, \
406 						page_size, bitmap, flags))
407 
408 static int _test_cmd_mock_domain_set_dirty(int fd, __u32 hwpt_id, size_t length,
409 					   __u64 iova, size_t page_size,
410 					   __u64 *bitmap, __u64 *dirty)
411 {
412 	struct iommu_test_cmd cmd = {
413 		.size = sizeof(cmd),
414 		.op = IOMMU_TEST_OP_DIRTY,
415 		.id = hwpt_id,
416 		.dirty = {
417 			.iova = iova,
418 			.length = length,
419 			.page_size = page_size,
420 			.uptr = (uintptr_t)bitmap,
421 		}
422 	};
423 	int ret;
424 
425 	ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_DIRTY), &cmd);
426 	if (ret)
427 		return -ret;
428 	if (dirty)
429 		*dirty = cmd.dirty.out_nr_dirty;
430 	return 0;
431 }
432 
433 #define test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, \
434 				       bitmap, nr)                           \
435 	ASSERT_EQ(0,                                                         \
436 		  _test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, \
437 						  page_size, bitmap, nr))
438 
439 static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length,
440 				    __u64 iova, size_t page_size,
441 				    size_t pte_page_size, __u64 *bitmap,
442 				    __u64 nbits, __u32 flags,
443 				    struct __test_metadata *_metadata)
444 {
445 	unsigned long npte = pte_page_size / page_size, pteset = 2 * npte;
446 	unsigned long j, i, nr = nbits / pteset ?: 1;
447 	unsigned long bitmap_size = DIV_ROUND_UP(nbits, BITS_PER_BYTE);
448 	__u64 out_dirty = 0;
449 
450 	/* Mark all even bits as dirty in the mock domain */
451 	memset(bitmap, 0, bitmap_size);
452 	for (i = 0; i < nbits; i += pteset)
453 		set_bit(i, (unsigned long *)bitmap);
454 
455 	test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size,
456 				       bitmap, &out_dirty);
457 	ASSERT_EQ(nr, out_dirty);
458 
459 	/* Expect all even bits as dirty in the user bitmap */
460 	memset(bitmap, 0, bitmap_size);
461 	test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
462 				  flags);
463 	/* Beware ASSERT_EQ() is two statements -- braces are not redundant! */
464 	for (i = 0; i < nbits; i += pteset) {
465 		for (j = 0; j < pteset; j++) {
466 			ASSERT_EQ(j < npte,
467 				  test_bit(i + j, (unsigned long *)bitmap));
468 		}
469 		ASSERT_EQ(!(i % pteset), test_bit(i, (unsigned long *)bitmap));
470 	}
471 
472 	memset(bitmap, 0, bitmap_size);
473 	test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
474 				  flags);
475 
476 	/* It as read already -- expect all zeroes */
477 	for (i = 0; i < nbits; i += pteset) {
478 		for (j = 0; j < pteset; j++) {
479 			ASSERT_EQ(
480 				(j < npte) &&
481 					(flags &
482 					 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR),
483 				test_bit(i + j, (unsigned long *)bitmap));
484 		}
485 	}
486 
487 	return 0;
488 }
489 #define test_mock_dirty_bitmaps(hwpt_id, length, iova, page_size, pte_size,\
490 				bitmap, bitmap_size, flags, _metadata)     \
491 	ASSERT_EQ(0, _test_mock_dirty_bitmaps(self->fd, hwpt_id, length, iova, \
492 					      page_size, pte_size, bitmap,     \
493 					      bitmap_size, flags, _metadata))
494 
495 static int _test_cmd_create_access(int fd, unsigned int ioas_id,
496 				   __u32 *access_id, unsigned int flags)
497 {
498 	struct iommu_test_cmd cmd = {
499 		.size = sizeof(cmd),
500 		.op = IOMMU_TEST_OP_CREATE_ACCESS,
501 		.id = ioas_id,
502 		.create_access = { .flags = flags },
503 	};
504 	int ret;
505 
506 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
507 	if (ret)
508 		return ret;
509 	*access_id = cmd.create_access.out_access_fd;
510 	return 0;
511 }
512 #define test_cmd_create_access(ioas_id, access_id, flags)                  \
513 	ASSERT_EQ(0, _test_cmd_create_access(self->fd, ioas_id, access_id, \
514 					     flags))
515 
516 static int _test_cmd_destroy_access(unsigned int access_id)
517 {
518 	return close(access_id);
519 }
520 #define test_cmd_destroy_access(access_id) \
521 	ASSERT_EQ(0, _test_cmd_destroy_access(access_id))
522 
523 static int _test_cmd_destroy_access_pages(int fd, unsigned int access_id,
524 					  unsigned int access_pages_id)
525 {
526 	struct iommu_test_cmd cmd = {
527 		.size = sizeof(cmd),
528 		.op = IOMMU_TEST_OP_DESTROY_ACCESS_PAGES,
529 		.id = access_id,
530 		.destroy_access_pages = { .access_pages_id = access_pages_id },
531 	};
532 	return ioctl(fd, IOMMU_TEST_CMD, &cmd);
533 }
534 #define test_cmd_destroy_access_pages(access_id, access_pages_id)        \
535 	ASSERT_EQ(0, _test_cmd_destroy_access_pages(self->fd, access_id, \
536 						    access_pages_id))
537 #define test_err_destroy_access_pages(_errno, access_id, access_pages_id) \
538 	EXPECT_ERRNO(_errno, _test_cmd_destroy_access_pages(              \
539 				     self->fd, access_id, access_pages_id))
540 
541 static int _test_ioctl_destroy(int fd, unsigned int id)
542 {
543 	struct iommu_destroy cmd = {
544 		.size = sizeof(cmd),
545 		.id = id,
546 	};
547 	return ioctl(fd, IOMMU_DESTROY, &cmd);
548 }
549 #define test_ioctl_destroy(id) ASSERT_EQ(0, _test_ioctl_destroy(self->fd, id))
550 
551 static int _test_ioctl_ioas_alloc(int fd, __u32 *id)
552 {
553 	struct iommu_ioas_alloc cmd = {
554 		.size = sizeof(cmd),
555 	};
556 	int ret;
557 
558 	ret = ioctl(fd, IOMMU_IOAS_ALLOC, &cmd);
559 	if (ret)
560 		return ret;
561 	*id = cmd.out_ioas_id;
562 	return 0;
563 }
564 #define test_ioctl_ioas_alloc(id)                                   \
565 	({                                                          \
566 		ASSERT_EQ(0, _test_ioctl_ioas_alloc(self->fd, id)); \
567 		ASSERT_NE(0, *(id));                                \
568 	})
569 
570 static int _test_ioctl_ioas_map(int fd, unsigned int ioas_id, void *buffer,
571 				size_t length, __u64 *iova, unsigned int flags)
572 {
573 	struct iommu_ioas_map cmd = {
574 		.size = sizeof(cmd),
575 		.flags = flags,
576 		.ioas_id = ioas_id,
577 		.user_va = (uintptr_t)buffer,
578 		.length = length,
579 	};
580 	int ret;
581 
582 	if (flags & IOMMU_IOAS_MAP_FIXED_IOVA)
583 		cmd.iova = *iova;
584 
585 	ret = ioctl(fd, IOMMU_IOAS_MAP, &cmd);
586 	*iova = cmd.iova;
587 	return ret;
588 }
589 #define test_ioctl_ioas_map(buffer, length, iova_p)                        \
590 	ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
591 					  length, iova_p,                  \
592 					  IOMMU_IOAS_MAP_WRITEABLE |       \
593 						  IOMMU_IOAS_MAP_READABLE))
594 
595 #define test_err_ioctl_ioas_map(_errno, buffer, length, iova_p)            \
596 	EXPECT_ERRNO(_errno,                                               \
597 		     _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
598 					  length, iova_p,                  \
599 					  IOMMU_IOAS_MAP_WRITEABLE |       \
600 						  IOMMU_IOAS_MAP_READABLE))
601 
602 #define test_ioctl_ioas_map_id(ioas_id, buffer, length, iova_p)              \
603 	ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, ioas_id, buffer, length, \
604 					  iova_p,                            \
605 					  IOMMU_IOAS_MAP_WRITEABLE |         \
606 						  IOMMU_IOAS_MAP_READABLE))
607 
608 #define test_ioctl_ioas_map_fixed(buffer, length, iova)                       \
609 	({                                                                    \
610 		__u64 __iova = iova;                                          \
611 		ASSERT_EQ(0, _test_ioctl_ioas_map(                            \
612 				     self->fd, self->ioas_id, buffer, length, \
613 				     &__iova,                                 \
614 				     IOMMU_IOAS_MAP_FIXED_IOVA |              \
615 					     IOMMU_IOAS_MAP_WRITEABLE |       \
616 					     IOMMU_IOAS_MAP_READABLE));       \
617 	})
618 
619 #define test_ioctl_ioas_map_fixed_id(ioas_id, buffer, length, iova)           \
620 	({                                                                    \
621 		__u64 __iova = iova;                                          \
622 		ASSERT_EQ(0,                                                  \
623 			  _test_ioctl_ioas_map(                               \
624 				  self->fd, ioas_id, buffer, length, &__iova, \
625 				  IOMMU_IOAS_MAP_FIXED_IOVA |                 \
626 					  IOMMU_IOAS_MAP_WRITEABLE |          \
627 					  IOMMU_IOAS_MAP_READABLE));          \
628 	})
629 
630 #define test_err_ioctl_ioas_map_fixed(_errno, buffer, length, iova)           \
631 	({                                                                    \
632 		__u64 __iova = iova;                                          \
633 		EXPECT_ERRNO(_errno,                                          \
634 			     _test_ioctl_ioas_map(                            \
635 				     self->fd, self->ioas_id, buffer, length, \
636 				     &__iova,                                 \
637 				     IOMMU_IOAS_MAP_FIXED_IOVA |              \
638 					     IOMMU_IOAS_MAP_WRITEABLE |       \
639 					     IOMMU_IOAS_MAP_READABLE));       \
640 	})
641 
642 static int _test_ioctl_ioas_unmap(int fd, unsigned int ioas_id, uint64_t iova,
643 				  size_t length, uint64_t *out_len)
644 {
645 	struct iommu_ioas_unmap cmd = {
646 		.size = sizeof(cmd),
647 		.ioas_id = ioas_id,
648 		.iova = iova,
649 		.length = length,
650 	};
651 	int ret;
652 
653 	ret = ioctl(fd, IOMMU_IOAS_UNMAP, &cmd);
654 	if (out_len)
655 		*out_len = cmd.length;
656 	return ret;
657 }
658 #define test_ioctl_ioas_unmap(iova, length)                                \
659 	ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, iova, \
660 					    length, NULL))
661 
662 #define test_ioctl_ioas_unmap_id(ioas_id, iova, length)                      \
663 	ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, ioas_id, iova, length, \
664 					    NULL))
665 
666 #define test_err_ioctl_ioas_unmap(_errno, iova, length)                      \
667 	EXPECT_ERRNO(_errno, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, \
668 						    iova, length, NULL))
669 
670 static int _test_ioctl_ioas_map_file(int fd, unsigned int ioas_id, int mfd,
671 				     size_t start, size_t length, __u64 *iova,
672 				     unsigned int flags)
673 {
674 	struct iommu_ioas_map_file cmd = {
675 		.size = sizeof(cmd),
676 		.flags = flags,
677 		.ioas_id = ioas_id,
678 		.fd = mfd,
679 		.start = start,
680 		.length = length,
681 	};
682 	int ret;
683 
684 	if (flags & IOMMU_IOAS_MAP_FIXED_IOVA)
685 		cmd.iova = *iova;
686 
687 	ret = ioctl(fd, IOMMU_IOAS_MAP_FILE, &cmd);
688 	*iova = cmd.iova;
689 	return ret;
690 }
691 
692 #define test_ioctl_ioas_map_file(mfd, start, length, iova_p)                   \
693 	ASSERT_EQ(0,                                                           \
694 		  _test_ioctl_ioas_map_file(                                   \
695 			  self->fd, self->ioas_id, mfd, start, length, iova_p, \
696 			  IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
697 
698 #define test_err_ioctl_ioas_map_file(_errno, mfd, start, length, iova_p)     \
699 	EXPECT_ERRNO(                                                        \
700 		_errno,                                                      \
701 		_test_ioctl_ioas_map_file(                                   \
702 			self->fd, self->ioas_id, mfd, start, length, iova_p, \
703 			IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
704 
705 #define test_ioctl_ioas_map_id_file(ioas_id, mfd, start, length, iova_p)     \
706 	ASSERT_EQ(0,                                                         \
707 		  _test_ioctl_ioas_map_file(                                 \
708 			  self->fd, ioas_id, mfd, start, length, iova_p,     \
709 			  IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
710 
711 static int _test_ioctl_set_temp_memory_limit(int fd, unsigned int limit)
712 {
713 	struct iommu_test_cmd memlimit_cmd = {
714 		.size = sizeof(memlimit_cmd),
715 		.op = IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT,
716 		.memory_limit = { .limit = limit },
717 	};
718 
719 	return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT),
720 		     &memlimit_cmd);
721 }
722 
723 #define test_ioctl_set_temp_memory_limit(limit) \
724 	ASSERT_EQ(0, _test_ioctl_set_temp_memory_limit(self->fd, limit))
725 
726 #define test_ioctl_set_default_memory_limit() \
727 	test_ioctl_set_temp_memory_limit(65536)
728 
729 static void teardown_iommufd(int fd, struct __test_metadata *_metadata)
730 {
731 	struct iommu_test_cmd test_cmd = {
732 		.size = sizeof(test_cmd),
733 		.op = IOMMU_TEST_OP_MD_CHECK_REFS,
734 		.check_refs = { .length = BUFFER_SIZE,
735 				.uptr = (uintptr_t)buffer },
736 	};
737 
738 	if (fd == -1)
739 		return;
740 
741 	EXPECT_EQ(0, close(fd));
742 
743 	fd = open("/dev/iommu", O_RDWR);
744 	EXPECT_NE(-1, fd);
745 	EXPECT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS),
746 			   &test_cmd));
747 	EXPECT_EQ(0, close(fd));
748 }
749 
750 #define EXPECT_ERRNO(expected_errno, cmd)         \
751 	({                                        \
752 		ASSERT_EQ(-1, cmd);               \
753 		EXPECT_EQ(expected_errno, errno); \
754 	})
755 
756 #endif
757 
758 /* @data can be NULL */
759 static int _test_cmd_get_hw_info(int fd, __u32 device_id, void *data,
760 				 size_t data_len, uint32_t *capabilities)
761 {
762 	struct iommu_test_hw_info *info = (struct iommu_test_hw_info *)data;
763 	struct iommu_hw_info cmd = {
764 		.size = sizeof(cmd),
765 		.dev_id = device_id,
766 		.data_len = data_len,
767 		.data_uptr = (uint64_t)data,
768 		.out_capabilities = 0,
769 	};
770 	int ret;
771 
772 	ret = ioctl(fd, IOMMU_GET_HW_INFO, &cmd);
773 	if (ret)
774 		return ret;
775 
776 	assert(cmd.out_data_type == IOMMU_HW_INFO_TYPE_SELFTEST);
777 
778 	/*
779 	 * The struct iommu_test_hw_info should be the one defined
780 	 * by the current kernel.
781 	 */
782 	assert(cmd.data_len == sizeof(struct iommu_test_hw_info));
783 
784 	/*
785 	 * Trailing bytes should be 0 if user buffer is larger than
786 	 * the data that kernel reports.
787 	 */
788 	if (data_len > cmd.data_len) {
789 		char *ptr = (char *)(data + cmd.data_len);
790 		int idx = 0;
791 
792 		while (idx < data_len - cmd.data_len) {
793 			assert(!*(ptr + idx));
794 			idx++;
795 		}
796 	}
797 
798 	if (info) {
799 		if (data_len >= offsetofend(struct iommu_test_hw_info, test_reg))
800 			assert(info->test_reg == IOMMU_HW_INFO_SELFTEST_REGVAL);
801 		if (data_len >= offsetofend(struct iommu_test_hw_info, flags))
802 			assert(!info->flags);
803 	}
804 
805 	if (capabilities)
806 		*capabilities = cmd.out_capabilities;
807 
808 	return 0;
809 }
810 
811 #define test_cmd_get_hw_info(device_id, data, data_len)               \
812 	ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, data, \
813 					   data_len, NULL))
814 
815 #define test_err_get_hw_info(_errno, device_id, data, data_len)               \
816 	EXPECT_ERRNO(_errno, _test_cmd_get_hw_info(self->fd, device_id, data, \
817 						   data_len, NULL))
818 
819 #define test_cmd_get_hw_capabilities(device_id, caps, mask) \
820 	ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, 0, &caps))
821 
822 static int _test_ioctl_fault_alloc(int fd, __u32 *fault_id, __u32 *fault_fd)
823 {
824 	struct iommu_fault_alloc cmd = {
825 		.size = sizeof(cmd),
826 	};
827 	int ret;
828 
829 	ret = ioctl(fd, IOMMU_FAULT_QUEUE_ALLOC, &cmd);
830 	if (ret)
831 		return ret;
832 	*fault_id = cmd.out_fault_id;
833 	*fault_fd = cmd.out_fault_fd;
834 	return 0;
835 }
836 
837 #define test_ioctl_fault_alloc(fault_id, fault_fd)                       \
838 	({                                                               \
839 		ASSERT_EQ(0, _test_ioctl_fault_alloc(self->fd, fault_id, \
840 						     fault_fd));         \
841 		ASSERT_NE(0, *(fault_id));                               \
842 		ASSERT_NE(0, *(fault_fd));                               \
843 	})
844 
845 static int _test_cmd_trigger_iopf(int fd, __u32 device_id, __u32 fault_fd)
846 {
847 	struct iommu_test_cmd trigger_iopf_cmd = {
848 		.size = sizeof(trigger_iopf_cmd),
849 		.op = IOMMU_TEST_OP_TRIGGER_IOPF,
850 		.trigger_iopf = {
851 			.dev_id = device_id,
852 			.pasid = 0x1,
853 			.grpid = 0x2,
854 			.perm = IOMMU_PGFAULT_PERM_READ | IOMMU_PGFAULT_PERM_WRITE,
855 			.addr = 0xdeadbeaf,
856 		},
857 	};
858 	struct iommu_hwpt_page_response response = {
859 		.code = IOMMUFD_PAGE_RESP_SUCCESS,
860 	};
861 	struct iommu_hwpt_pgfault fault = {};
862 	ssize_t bytes;
863 	int ret;
864 
865 	ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_IOPF), &trigger_iopf_cmd);
866 	if (ret)
867 		return ret;
868 
869 	bytes = read(fault_fd, &fault, sizeof(fault));
870 	if (bytes <= 0)
871 		return -EIO;
872 
873 	response.cookie = fault.cookie;
874 
875 	bytes = write(fault_fd, &response, sizeof(response));
876 	if (bytes <= 0)
877 		return -EIO;
878 
879 	return 0;
880 }
881 
882 #define test_cmd_trigger_iopf(device_id, fault_fd) \
883 	ASSERT_EQ(0, _test_cmd_trigger_iopf(self->fd, device_id, fault_fd))
884 
885 static int _test_cmd_viommu_alloc(int fd, __u32 device_id, __u32 hwpt_id,
886 				  __u32 type, __u32 flags, __u32 *viommu_id)
887 {
888 	struct iommu_viommu_alloc cmd = {
889 		.size = sizeof(cmd),
890 		.flags = flags,
891 		.type = type,
892 		.dev_id = device_id,
893 		.hwpt_id = hwpt_id,
894 	};
895 	int ret;
896 
897 	ret = ioctl(fd, IOMMU_VIOMMU_ALLOC, &cmd);
898 	if (ret)
899 		return ret;
900 	if (viommu_id)
901 		*viommu_id = cmd.out_viommu_id;
902 	return 0;
903 }
904 
905 #define test_cmd_viommu_alloc(device_id, hwpt_id, type, viommu_id)        \
906 	ASSERT_EQ(0, _test_cmd_viommu_alloc(self->fd, device_id, hwpt_id, \
907 					    type, 0, viommu_id))
908 #define test_err_viommu_alloc(_errno, device_id, hwpt_id, type, viommu_id) \
909 	EXPECT_ERRNO(_errno,                                               \
910 		     _test_cmd_viommu_alloc(self->fd, device_id, hwpt_id,  \
911 					    type, 0, viommu_id))
912 
913 static int _test_cmd_vdevice_alloc(int fd, __u32 viommu_id, __u32 idev_id,
914 				   __u64 virt_id, __u32 *vdev_id)
915 {
916 	struct iommu_vdevice_alloc cmd = {
917 		.size = sizeof(cmd),
918 		.dev_id = idev_id,
919 		.viommu_id = viommu_id,
920 		.virt_id = virt_id,
921 	};
922 	int ret;
923 
924 	ret = ioctl(fd, IOMMU_VDEVICE_ALLOC, &cmd);
925 	if (ret)
926 		return ret;
927 	if (vdev_id)
928 		*vdev_id = cmd.out_vdevice_id;
929 	return 0;
930 }
931 
932 #define test_cmd_vdevice_alloc(viommu_id, idev_id, virt_id, vdev_id)       \
933 	ASSERT_EQ(0, _test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id, \
934 					     virt_id, vdev_id))
935 #define test_err_vdevice_alloc(_errno, viommu_id, idev_id, virt_id, vdev_id) \
936 	EXPECT_ERRNO(_errno,                                                 \
937 		     _test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id,   \
938 					     virt_id, vdev_id))
939