xref: /linux/tools/testing/selftests/iommu/iommufd_utils.h (revision ddb7a62af2e766eabb4ab7080e6ed8d6b8915302)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #ifndef __SELFTEST_IOMMUFD_UTILS
4 #define __SELFTEST_IOMMUFD_UTILS
5 
6 #include <unistd.h>
7 #include <stddef.h>
8 #include <sys/fcntl.h>
9 #include <sys/ioctl.h>
10 #include <stdint.h>
11 #include <assert.h>
12 #include <poll.h>
13 
14 #include "../kselftest_harness.h"
15 #include "../../../../drivers/iommu/iommufd/iommufd_test.h"
16 
17 /* Hack to make assertions more readable */
18 #define _IOMMU_TEST_CMD(x) IOMMU_TEST_CMD
19 
20 /* Imported from include/asm-generic/bitops/generic-non-atomic.h */
21 #define BITS_PER_BYTE 8
22 #define BITS_PER_LONG __BITS_PER_LONG
23 #define BIT_MASK(nr) (1UL << ((nr) % __BITS_PER_LONG))
24 #define BIT_WORD(nr) ((nr) / __BITS_PER_LONG)
25 
26 enum {
27 	IOPT_PAGES_ACCOUNT_NONE = 0,
28 	IOPT_PAGES_ACCOUNT_USER = 1,
29 	IOPT_PAGES_ACCOUNT_MM = 2,
30 };
31 
32 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
33 
set_bit(unsigned int nr,unsigned long * addr)34 static inline void set_bit(unsigned int nr, unsigned long *addr)
35 {
36 	unsigned long mask = BIT_MASK(nr);
37 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
38 
39 	*p |= mask;
40 }
41 
test_bit(unsigned int nr,unsigned long * addr)42 static inline bool test_bit(unsigned int nr, unsigned long *addr)
43 {
44 	return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)));
45 }
46 
47 static void *buffer;
48 static unsigned long BUFFER_SIZE;
49 
50 static void *mfd_buffer;
51 static int mfd;
52 
53 static unsigned long PAGE_SIZE;
54 
55 #define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
56 #define offsetofend(TYPE, MEMBER) \
57 	(offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
58 
59 #define test_err_mmap(_errno, length, offset)                                 \
60 	EXPECT_ERRNO(_errno, (long)mmap(NULL, length, PROT_READ | PROT_WRITE, \
61 					MAP_SHARED, self->fd, offset))
62 
memfd_mmap(size_t length,int prot,int flags,int * mfd_p)63 static inline void *memfd_mmap(size_t length, int prot, int flags, int *mfd_p)
64 {
65 	int mfd_flags = (flags & MAP_HUGETLB) ? MFD_HUGETLB : 0;
66 	int mfd = memfd_create("buffer", mfd_flags);
67 	void *buf = MAP_FAILED;
68 
69 	if (mfd <= 0)
70 		return MAP_FAILED;
71 	if (ftruncate(mfd, length))
72 		goto out;
73 	*mfd_p = mfd;
74 	buf = mmap(0, length, prot, flags, mfd, 0);
75 out:
76 	if (buf == MAP_FAILED)
77 		close(mfd);
78 	return buf;
79 }
80 
81 /*
82  * Have the kernel check the refcount on pages. I don't know why a freshly
83  * mmap'd anon non-compound page starts out with a ref of 3
84  */
85 #define check_refs(_ptr, _length, _refs)                                      \
86 	({                                                                    \
87 		struct iommu_test_cmd test_cmd = {                            \
88 			.size = sizeof(test_cmd),                             \
89 			.op = IOMMU_TEST_OP_MD_CHECK_REFS,                    \
90 			.check_refs = { .length = _length,                    \
91 					.uptr = (uintptr_t)(_ptr),            \
92 					.refs = _refs },                      \
93 		};                                                            \
94 		ASSERT_EQ(0,                                                  \
95 			  ioctl(self->fd,                                     \
96 				_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS), \
97 				&test_cmd));                                  \
98 	})
99 
_test_cmd_mock_domain(int fd,unsigned int ioas_id,__u32 * stdev_id,__u32 * hwpt_id,__u32 * idev_id)100 static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *stdev_id,
101 				 __u32 *hwpt_id, __u32 *idev_id)
102 {
103 	struct iommu_test_cmd cmd = {
104 		.size = sizeof(cmd),
105 		.op = IOMMU_TEST_OP_MOCK_DOMAIN,
106 		.id = ioas_id,
107 		.mock_domain = {},
108 	};
109 	int ret;
110 
111 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
112 	if (ret)
113 		return ret;
114 	if (stdev_id)
115 		*stdev_id = cmd.mock_domain.out_stdev_id;
116 	assert(cmd.id != 0);
117 	if (hwpt_id)
118 		*hwpt_id = cmd.mock_domain.out_hwpt_id;
119 	if (idev_id)
120 		*idev_id = cmd.mock_domain.out_idev_id;
121 	return 0;
122 }
123 #define test_cmd_mock_domain(ioas_id, stdev_id, hwpt_id, idev_id)       \
124 	ASSERT_EQ(0, _test_cmd_mock_domain(self->fd, ioas_id, stdev_id, \
125 					   hwpt_id, idev_id))
126 #define test_err_mock_domain(_errno, ioas_id, stdev_id, hwpt_id)      \
127 	EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \
128 						   stdev_id, hwpt_id, NULL))
129 
_test_cmd_mock_domain_flags(int fd,unsigned int ioas_id,__u32 stdev_flags,__u32 * stdev_id,__u32 * hwpt_id,__u32 * idev_id)130 static int _test_cmd_mock_domain_flags(int fd, unsigned int ioas_id,
131 				       __u32 stdev_flags, __u32 *stdev_id,
132 				       __u32 *hwpt_id, __u32 *idev_id)
133 {
134 	struct iommu_test_cmd cmd = {
135 		.size = sizeof(cmd),
136 		.op = IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS,
137 		.id = ioas_id,
138 		.mock_domain_flags = { .dev_flags = stdev_flags },
139 	};
140 	int ret;
141 
142 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
143 	if (ret)
144 		return ret;
145 	if (stdev_id)
146 		*stdev_id = cmd.mock_domain_flags.out_stdev_id;
147 	assert(cmd.id != 0);
148 	if (hwpt_id)
149 		*hwpt_id = cmd.mock_domain_flags.out_hwpt_id;
150 	if (idev_id)
151 		*idev_id = cmd.mock_domain_flags.out_idev_id;
152 	return 0;
153 }
154 #define test_cmd_mock_domain_flags(ioas_id, flags, stdev_id, hwpt_id, idev_id) \
155 	ASSERT_EQ(0, _test_cmd_mock_domain_flags(self->fd, ioas_id, flags,     \
156 						 stdev_id, hwpt_id, idev_id))
157 #define test_err_mock_domain_flags(_errno, ioas_id, flags, stdev_id, hwpt_id) \
158 	EXPECT_ERRNO(_errno,                                                  \
159 		     _test_cmd_mock_domain_flags(self->fd, ioas_id, flags,    \
160 						 stdev_id, hwpt_id, NULL))
161 
_test_cmd_mock_domain_replace(int fd,__u32 stdev_id,__u32 pt_id,__u32 * hwpt_id)162 static int _test_cmd_mock_domain_replace(int fd, __u32 stdev_id, __u32 pt_id,
163 					 __u32 *hwpt_id)
164 {
165 	struct iommu_test_cmd cmd = {
166 		.size = sizeof(cmd),
167 		.op = IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE,
168 		.id = stdev_id,
169 		.mock_domain_replace = {
170 			.pt_id = pt_id,
171 		},
172 	};
173 	int ret;
174 
175 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
176 	if (ret)
177 		return ret;
178 	if (hwpt_id)
179 		*hwpt_id = cmd.mock_domain_replace.pt_id;
180 	return 0;
181 }
182 
183 #define test_cmd_mock_domain_replace(stdev_id, pt_id)                         \
184 	ASSERT_EQ(0, _test_cmd_mock_domain_replace(self->fd, stdev_id, pt_id, \
185 						   NULL))
186 #define test_err_mock_domain_replace(_errno, stdev_id, pt_id)                  \
187 	EXPECT_ERRNO(_errno, _test_cmd_mock_domain_replace(self->fd, stdev_id, \
188 							   pt_id, NULL))
189 
_test_cmd_hwpt_alloc(int fd,__u32 device_id,__u32 pt_id,__u32 ft_id,__u32 flags,__u32 * hwpt_id,__u32 data_type,void * data,size_t data_len)190 static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, __u32 ft_id,
191 				__u32 flags, __u32 *hwpt_id, __u32 data_type,
192 				void *data, size_t data_len)
193 {
194 	struct iommu_hwpt_alloc cmd = {
195 		.size = sizeof(cmd),
196 		.flags = flags,
197 		.dev_id = device_id,
198 		.pt_id = pt_id,
199 		.data_type = data_type,
200 		.data_len = data_len,
201 		.data_uptr = (uint64_t)data,
202 		.fault_id = ft_id,
203 	};
204 	int ret;
205 
206 	ret = ioctl(fd, IOMMU_HWPT_ALLOC, &cmd);
207 	if (ret)
208 		return ret;
209 	if (hwpt_id)
210 		*hwpt_id = cmd.out_hwpt_id;
211 	return 0;
212 }
213 
214 #define test_cmd_hwpt_alloc(device_id, pt_id, flags, hwpt_id)                  \
215 	ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags,   \
216 					  hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, \
217 					  0))
218 #define test_err_hwpt_alloc(_errno, device_id, pt_id, flags, hwpt_id)   \
219 	EXPECT_ERRNO(_errno, _test_cmd_hwpt_alloc(                      \
220 				     self->fd, device_id, pt_id, 0, flags, \
221 				     hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, 0))
222 
223 #define test_cmd_hwpt_alloc_nested(device_id, pt_id, flags, hwpt_id,         \
224 				   data_type, data, data_len)                \
225 	ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
226 					  hwpt_id, data_type, data, data_len))
227 #define test_err_hwpt_alloc_nested(_errno, device_id, pt_id, flags, hwpt_id, \
228 				   data_type, data, data_len)                \
229 	EXPECT_ERRNO(_errno,                                                 \
230 		     _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
231 					  hwpt_id, data_type, data, data_len))
232 
233 #define test_cmd_hwpt_alloc_iopf(device_id, pt_id, fault_id, flags, hwpt_id,    \
234 				   data_type, data, data_len)                   \
235 	ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \
236 					  flags, hwpt_id, data_type, data,      \
237 					  data_len))
238 #define test_err_hwpt_alloc_iopf(_errno, device_id, pt_id, fault_id, flags,     \
239 				 hwpt_id, data_type, data, data_len)            \
240 	EXPECT_ERRNO(_errno,                                                    \
241 		     _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \
242 					  flags, hwpt_id, data_type, data,      \
243 					  data_len))
244 
245 #define test_cmd_hwpt_check_iotlb(hwpt_id, iotlb_id, expected)                 \
246 	({                                                                     \
247 		struct iommu_test_cmd test_cmd = {                             \
248 			.size = sizeof(test_cmd),                              \
249 			.op = IOMMU_TEST_OP_MD_CHECK_IOTLB,                    \
250 			.id = hwpt_id,                                         \
251 			.check_iotlb = {                                       \
252 				.id = iotlb_id,                                \
253 				.iotlb = expected,                             \
254 			},                                                     \
255 		};                                                             \
256 		ASSERT_EQ(0,                                                   \
257 			  ioctl(self->fd,                                      \
258 				_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_IOTLB), \
259 				&test_cmd));                                   \
260 	})
261 
262 #define test_cmd_hwpt_check_iotlb_all(hwpt_id, expected)                       \
263 	({                                                                     \
264 		int i;                                                         \
265 		for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++)             \
266 			test_cmd_hwpt_check_iotlb(hwpt_id, i, expected);       \
267 	})
268 
269 #define test_cmd_dev_check_cache(device_id, cache_id, expected)                \
270 	({                                                                     \
271 		struct iommu_test_cmd test_cmd = {                             \
272 			.size = sizeof(test_cmd),                              \
273 			.op = IOMMU_TEST_OP_DEV_CHECK_CACHE,                   \
274 			.id = device_id,                                       \
275 			.check_dev_cache = {                                   \
276 				.id = cache_id,                                \
277 				.cache = expected,                             \
278 			},                                                     \
279 		};                                                             \
280 		ASSERT_EQ(0, ioctl(self->fd,                                   \
281 				   _IOMMU_TEST_CMD(                            \
282 					   IOMMU_TEST_OP_DEV_CHECK_CACHE),     \
283 				   &test_cmd));                                \
284 	})
285 
286 #define test_cmd_dev_check_cache_all(device_id, expected)                      \
287 	({                                                                     \
288 		int c;                                                         \
289 		for (c = 0; c < MOCK_DEV_CACHE_NUM; c++)                       \
290 			test_cmd_dev_check_cache(device_id, c, expected);      \
291 	})
292 
_test_cmd_hwpt_invalidate(int fd,__u32 hwpt_id,void * reqs,uint32_t data_type,uint32_t lreq,uint32_t * nreqs)293 static int _test_cmd_hwpt_invalidate(int fd, __u32 hwpt_id, void *reqs,
294 				     uint32_t data_type, uint32_t lreq,
295 				     uint32_t *nreqs)
296 {
297 	struct iommu_hwpt_invalidate cmd = {
298 		.size = sizeof(cmd),
299 		.hwpt_id = hwpt_id,
300 		.data_type = data_type,
301 		.data_uptr = (uint64_t)reqs,
302 		.entry_len = lreq,
303 		.entry_num = *nreqs,
304 	};
305 	int rc = ioctl(fd, IOMMU_HWPT_INVALIDATE, &cmd);
306 	*nreqs = cmd.entry_num;
307 	return rc;
308 }
309 
310 #define test_cmd_hwpt_invalidate(hwpt_id, reqs, data_type, lreq, nreqs)       \
311 	({                                                                    \
312 		ASSERT_EQ(0,                                                  \
313 			  _test_cmd_hwpt_invalidate(self->fd, hwpt_id, reqs,  \
314 						    data_type, lreq, nreqs)); \
315 	})
316 #define test_err_hwpt_invalidate(_errno, hwpt_id, reqs, data_type, lreq, \
317 				 nreqs)                                  \
318 	({                                                               \
319 		EXPECT_ERRNO(_errno, _test_cmd_hwpt_invalidate(          \
320 					     self->fd, hwpt_id, reqs,    \
321 					     data_type, lreq, nreqs));   \
322 	})
323 
_test_cmd_viommu_invalidate(int fd,__u32 viommu_id,void * reqs,uint32_t data_type,uint32_t lreq,uint32_t * nreqs)324 static int _test_cmd_viommu_invalidate(int fd, __u32 viommu_id, void *reqs,
325 				       uint32_t data_type, uint32_t lreq,
326 				       uint32_t *nreqs)
327 {
328 	struct iommu_hwpt_invalidate cmd = {
329 		.size = sizeof(cmd),
330 		.hwpt_id = viommu_id,
331 		.data_type = data_type,
332 		.data_uptr = (uint64_t)reqs,
333 		.entry_len = lreq,
334 		.entry_num = *nreqs,
335 	};
336 	int rc = ioctl(fd, IOMMU_HWPT_INVALIDATE, &cmd);
337 	*nreqs = cmd.entry_num;
338 	return rc;
339 }
340 
341 #define test_cmd_viommu_invalidate(viommu, reqs, lreq, nreqs)                  \
342 	({                                                                     \
343 		ASSERT_EQ(0,                                                   \
344 			  _test_cmd_viommu_invalidate(self->fd, viommu, reqs,  \
345 					IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST, \
346 					lreq, nreqs));                         \
347 	})
348 #define test_err_viommu_invalidate(_errno, viommu_id, reqs, data_type, lreq,   \
349 				 nreqs)                                        \
350 	({                                                                     \
351 		EXPECT_ERRNO(_errno, _test_cmd_viommu_invalidate(              \
352 					     self->fd, viommu_id, reqs,        \
353 					     data_type, lreq, nreqs));         \
354 	})
355 
_test_cmd_access_replace_ioas(int fd,__u32 access_id,unsigned int ioas_id)356 static int _test_cmd_access_replace_ioas(int fd, __u32 access_id,
357 					 unsigned int ioas_id)
358 {
359 	struct iommu_test_cmd cmd = {
360 		.size = sizeof(cmd),
361 		.op = IOMMU_TEST_OP_ACCESS_REPLACE_IOAS,
362 		.id = access_id,
363 		.access_replace_ioas = { .ioas_id = ioas_id },
364 	};
365 	int ret;
366 
367 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
368 	if (ret)
369 		return ret;
370 	return 0;
371 }
372 #define test_cmd_access_replace_ioas(access_id, ioas_id) \
373 	ASSERT_EQ(0, _test_cmd_access_replace_ioas(self->fd, access_id, ioas_id))
374 
_test_cmd_set_dirty_tracking(int fd,__u32 hwpt_id,bool enabled)375 static int _test_cmd_set_dirty_tracking(int fd, __u32 hwpt_id, bool enabled)
376 {
377 	struct iommu_hwpt_set_dirty_tracking cmd = {
378 		.size = sizeof(cmd),
379 		.flags = enabled ? IOMMU_HWPT_DIRTY_TRACKING_ENABLE : 0,
380 		.hwpt_id = hwpt_id,
381 	};
382 	int ret;
383 
384 	ret = ioctl(fd, IOMMU_HWPT_SET_DIRTY_TRACKING, &cmd);
385 	if (ret)
386 		return -errno;
387 	return 0;
388 }
389 #define test_cmd_set_dirty_tracking(hwpt_id, enabled) \
390 	ASSERT_EQ(0, _test_cmd_set_dirty_tracking(self->fd, hwpt_id, enabled))
391 
_test_cmd_get_dirty_bitmap(int fd,__u32 hwpt_id,size_t length,__u64 iova,size_t page_size,__u64 * bitmap,__u32 flags)392 static int _test_cmd_get_dirty_bitmap(int fd, __u32 hwpt_id, size_t length,
393 				      __u64 iova, size_t page_size,
394 				      __u64 *bitmap, __u32 flags)
395 {
396 	struct iommu_hwpt_get_dirty_bitmap cmd = {
397 		.size = sizeof(cmd),
398 		.hwpt_id = hwpt_id,
399 		.flags = flags,
400 		.iova = iova,
401 		.length = length,
402 		.page_size = page_size,
403 		.data = (uintptr_t)bitmap,
404 	};
405 	int ret;
406 
407 	ret = ioctl(fd, IOMMU_HWPT_GET_DIRTY_BITMAP, &cmd);
408 	if (ret)
409 		return ret;
410 	return 0;
411 }
412 
413 #define test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size,    \
414 				  bitmap, flags)                           \
415 	ASSERT_EQ(0, _test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, \
416 						page_size, bitmap, flags))
417 
_test_cmd_mock_domain_set_dirty(int fd,__u32 hwpt_id,size_t length,__u64 iova,size_t page_size,__u64 * bitmap,__u64 * dirty)418 static int _test_cmd_mock_domain_set_dirty(int fd, __u32 hwpt_id, size_t length,
419 					   __u64 iova, size_t page_size,
420 					   __u64 *bitmap, __u64 *dirty)
421 {
422 	struct iommu_test_cmd cmd = {
423 		.size = sizeof(cmd),
424 		.op = IOMMU_TEST_OP_DIRTY,
425 		.id = hwpt_id,
426 		.dirty = {
427 			.iova = iova,
428 			.length = length,
429 			.page_size = page_size,
430 			.uptr = (uintptr_t)bitmap,
431 		}
432 	};
433 	int ret;
434 
435 	ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_DIRTY), &cmd);
436 	if (ret)
437 		return -ret;
438 	if (dirty)
439 		*dirty = cmd.dirty.out_nr_dirty;
440 	return 0;
441 }
442 
443 #define test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, \
444 				       bitmap, nr)                           \
445 	ASSERT_EQ(0,                                                         \
446 		  _test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, \
447 						  page_size, bitmap, nr))
448 
_test_mock_dirty_bitmaps(int fd,__u32 hwpt_id,size_t length,__u64 iova,size_t page_size,size_t pte_page_size,__u64 * bitmap,__u64 nbits,__u32 flags,struct __test_metadata * _metadata)449 static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length,
450 				    __u64 iova, size_t page_size,
451 				    size_t pte_page_size, __u64 *bitmap,
452 				    __u64 nbits, __u32 flags,
453 				    struct __test_metadata *_metadata)
454 {
455 	unsigned long npte = pte_page_size / page_size, pteset = 2 * npte;
456 	unsigned long j, i, nr = nbits / pteset ?: 1;
457 	unsigned long bitmap_size = DIV_ROUND_UP(nbits, BITS_PER_BYTE);
458 	__u64 out_dirty = 0;
459 
460 	/* Mark all even bits as dirty in the mock domain */
461 	memset(bitmap, 0, bitmap_size);
462 	for (i = 0; i < nbits; i += pteset)
463 		set_bit(i, (unsigned long *)bitmap);
464 
465 	test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size,
466 				       bitmap, &out_dirty);
467 	ASSERT_EQ(nr, out_dirty);
468 
469 	/* Expect all even bits as dirty in the user bitmap */
470 	memset(bitmap, 0, bitmap_size);
471 	test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
472 				  flags);
473 	/* Beware ASSERT_EQ() is two statements -- braces are not redundant! */
474 	for (i = 0; i < nbits; i += pteset) {
475 		for (j = 0; j < pteset; j++) {
476 			ASSERT_EQ(j < npte,
477 				  test_bit(i + j, (unsigned long *)bitmap));
478 		}
479 		ASSERT_EQ(!(i % pteset), test_bit(i, (unsigned long *)bitmap));
480 	}
481 
482 	memset(bitmap, 0, bitmap_size);
483 	test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
484 				  flags);
485 
486 	/* It as read already -- expect all zeroes */
487 	for (i = 0; i < nbits; i += pteset) {
488 		for (j = 0; j < pteset; j++) {
489 			ASSERT_EQ(
490 				(j < npte) &&
491 					(flags &
492 					 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR),
493 				test_bit(i + j, (unsigned long *)bitmap));
494 		}
495 	}
496 
497 	return 0;
498 }
499 #define test_mock_dirty_bitmaps(hwpt_id, length, iova, page_size, pte_size,\
500 				bitmap, bitmap_size, flags, _metadata)     \
501 	ASSERT_EQ(0, _test_mock_dirty_bitmaps(self->fd, hwpt_id, length, iova, \
502 					      page_size, pte_size, bitmap,     \
503 					      bitmap_size, flags, _metadata))
504 
_test_cmd_create_access(int fd,unsigned int ioas_id,__u32 * access_id,unsigned int flags)505 static int _test_cmd_create_access(int fd, unsigned int ioas_id,
506 				   __u32 *access_id, unsigned int flags)
507 {
508 	struct iommu_test_cmd cmd = {
509 		.size = sizeof(cmd),
510 		.op = IOMMU_TEST_OP_CREATE_ACCESS,
511 		.id = ioas_id,
512 		.create_access = { .flags = flags },
513 	};
514 	int ret;
515 
516 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
517 	if (ret)
518 		return ret;
519 	*access_id = cmd.create_access.out_access_fd;
520 	return 0;
521 }
522 #define test_cmd_create_access(ioas_id, access_id, flags)                  \
523 	ASSERT_EQ(0, _test_cmd_create_access(self->fd, ioas_id, access_id, \
524 					     flags))
525 
_test_cmd_destroy_access(unsigned int access_id)526 static int _test_cmd_destroy_access(unsigned int access_id)
527 {
528 	return close(access_id);
529 }
530 #define test_cmd_destroy_access(access_id) \
531 	ASSERT_EQ(0, _test_cmd_destroy_access(access_id))
532 
_test_cmd_destroy_access_pages(int fd,unsigned int access_id,unsigned int access_pages_id)533 static int _test_cmd_destroy_access_pages(int fd, unsigned int access_id,
534 					  unsigned int access_pages_id)
535 {
536 	struct iommu_test_cmd cmd = {
537 		.size = sizeof(cmd),
538 		.op = IOMMU_TEST_OP_DESTROY_ACCESS_PAGES,
539 		.id = access_id,
540 		.destroy_access_pages = { .access_pages_id = access_pages_id },
541 	};
542 	return ioctl(fd, IOMMU_TEST_CMD, &cmd);
543 }
544 #define test_cmd_destroy_access_pages(access_id, access_pages_id)        \
545 	ASSERT_EQ(0, _test_cmd_destroy_access_pages(self->fd, access_id, \
546 						    access_pages_id))
547 #define test_err_destroy_access_pages(_errno, access_id, access_pages_id) \
548 	EXPECT_ERRNO(_errno, _test_cmd_destroy_access_pages(              \
549 				     self->fd, access_id, access_pages_id))
550 
_test_ioctl_destroy(int fd,unsigned int id)551 static int _test_ioctl_destroy(int fd, unsigned int id)
552 {
553 	struct iommu_destroy cmd = {
554 		.size = sizeof(cmd),
555 		.id = id,
556 	};
557 	return ioctl(fd, IOMMU_DESTROY, &cmd);
558 }
559 #define test_ioctl_destroy(id) ASSERT_EQ(0, _test_ioctl_destroy(self->fd, id))
560 
_test_ioctl_ioas_alloc(int fd,__u32 * id)561 static int _test_ioctl_ioas_alloc(int fd, __u32 *id)
562 {
563 	struct iommu_ioas_alloc cmd = {
564 		.size = sizeof(cmd),
565 	};
566 	int ret;
567 
568 	ret = ioctl(fd, IOMMU_IOAS_ALLOC, &cmd);
569 	if (ret)
570 		return ret;
571 	*id = cmd.out_ioas_id;
572 	return 0;
573 }
574 #define test_ioctl_ioas_alloc(id)                                   \
575 	({                                                          \
576 		ASSERT_EQ(0, _test_ioctl_ioas_alloc(self->fd, id)); \
577 		ASSERT_NE(0, *(id));                                \
578 	})
579 
_test_ioctl_ioas_map(int fd,unsigned int ioas_id,void * buffer,size_t length,__u64 * iova,unsigned int flags)580 static int _test_ioctl_ioas_map(int fd, unsigned int ioas_id, void *buffer,
581 				size_t length, __u64 *iova, unsigned int flags)
582 {
583 	struct iommu_ioas_map cmd = {
584 		.size = sizeof(cmd),
585 		.flags = flags,
586 		.ioas_id = ioas_id,
587 		.user_va = (uintptr_t)buffer,
588 		.length = length,
589 	};
590 	int ret;
591 
592 	if (flags & IOMMU_IOAS_MAP_FIXED_IOVA)
593 		cmd.iova = *iova;
594 
595 	ret = ioctl(fd, IOMMU_IOAS_MAP, &cmd);
596 	*iova = cmd.iova;
597 	return ret;
598 }
599 #define test_ioctl_ioas_map(buffer, length, iova_p)                        \
600 	ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
601 					  length, iova_p,                  \
602 					  IOMMU_IOAS_MAP_WRITEABLE |       \
603 						  IOMMU_IOAS_MAP_READABLE))
604 
605 #define test_err_ioctl_ioas_map(_errno, buffer, length, iova_p)            \
606 	EXPECT_ERRNO(_errno,                                               \
607 		     _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
608 					  length, iova_p,                  \
609 					  IOMMU_IOAS_MAP_WRITEABLE |       \
610 						  IOMMU_IOAS_MAP_READABLE))
611 
612 #define test_ioctl_ioas_map_id(ioas_id, buffer, length, iova_p)              \
613 	ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, ioas_id, buffer, length, \
614 					  iova_p,                            \
615 					  IOMMU_IOAS_MAP_WRITEABLE |         \
616 						  IOMMU_IOAS_MAP_READABLE))
617 
618 #define test_ioctl_ioas_map_fixed(buffer, length, iova)                       \
619 	({                                                                    \
620 		__u64 __iova = iova;                                          \
621 		ASSERT_EQ(0, _test_ioctl_ioas_map(                            \
622 				     self->fd, self->ioas_id, buffer, length, \
623 				     &__iova,                                 \
624 				     IOMMU_IOAS_MAP_FIXED_IOVA |              \
625 					     IOMMU_IOAS_MAP_WRITEABLE |       \
626 					     IOMMU_IOAS_MAP_READABLE));       \
627 	})
628 
629 #define test_ioctl_ioas_map_fixed_id(ioas_id, buffer, length, iova)           \
630 	({                                                                    \
631 		__u64 __iova = iova;                                          \
632 		ASSERT_EQ(0,                                                  \
633 			  _test_ioctl_ioas_map(                               \
634 				  self->fd, ioas_id, buffer, length, &__iova, \
635 				  IOMMU_IOAS_MAP_FIXED_IOVA |                 \
636 					  IOMMU_IOAS_MAP_WRITEABLE |          \
637 					  IOMMU_IOAS_MAP_READABLE));          \
638 	})
639 
640 #define test_err_ioctl_ioas_map_fixed(_errno, buffer, length, iova)           \
641 	({                                                                    \
642 		__u64 __iova = iova;                                          \
643 		EXPECT_ERRNO(_errno,                                          \
644 			     _test_ioctl_ioas_map(                            \
645 				     self->fd, self->ioas_id, buffer, length, \
646 				     &__iova,                                 \
647 				     IOMMU_IOAS_MAP_FIXED_IOVA |              \
648 					     IOMMU_IOAS_MAP_WRITEABLE |       \
649 					     IOMMU_IOAS_MAP_READABLE));       \
650 	})
651 
_test_ioctl_ioas_unmap(int fd,unsigned int ioas_id,uint64_t iova,size_t length,uint64_t * out_len)652 static int _test_ioctl_ioas_unmap(int fd, unsigned int ioas_id, uint64_t iova,
653 				  size_t length, uint64_t *out_len)
654 {
655 	struct iommu_ioas_unmap cmd = {
656 		.size = sizeof(cmd),
657 		.ioas_id = ioas_id,
658 		.iova = iova,
659 		.length = length,
660 	};
661 	int ret;
662 
663 	ret = ioctl(fd, IOMMU_IOAS_UNMAP, &cmd);
664 	if (out_len)
665 		*out_len = cmd.length;
666 	return ret;
667 }
668 #define test_ioctl_ioas_unmap(iova, length)                                \
669 	ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, iova, \
670 					    length, NULL))
671 
672 #define test_ioctl_ioas_unmap_id(ioas_id, iova, length)                      \
673 	ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, ioas_id, iova, length, \
674 					    NULL))
675 
676 #define test_err_ioctl_ioas_unmap(_errno, iova, length)                      \
677 	EXPECT_ERRNO(_errno, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, \
678 						    iova, length, NULL))
679 
_test_ioctl_ioas_map_file(int fd,unsigned int ioas_id,int mfd,size_t start,size_t length,__u64 * iova,unsigned int flags)680 static int _test_ioctl_ioas_map_file(int fd, unsigned int ioas_id, int mfd,
681 				     size_t start, size_t length, __u64 *iova,
682 				     unsigned int flags)
683 {
684 	struct iommu_ioas_map_file cmd = {
685 		.size = sizeof(cmd),
686 		.flags = flags,
687 		.ioas_id = ioas_id,
688 		.fd = mfd,
689 		.start = start,
690 		.length = length,
691 	};
692 	int ret;
693 
694 	if (flags & IOMMU_IOAS_MAP_FIXED_IOVA)
695 		cmd.iova = *iova;
696 
697 	ret = ioctl(fd, IOMMU_IOAS_MAP_FILE, &cmd);
698 	*iova = cmd.iova;
699 	return ret;
700 }
701 
702 #define test_ioctl_ioas_map_file(mfd, start, length, iova_p)                   \
703 	ASSERT_EQ(0,                                                           \
704 		  _test_ioctl_ioas_map_file(                                   \
705 			  self->fd, self->ioas_id, mfd, start, length, iova_p, \
706 			  IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
707 
708 #define test_err_ioctl_ioas_map_file(_errno, mfd, start, length, iova_p)     \
709 	EXPECT_ERRNO(                                                        \
710 		_errno,                                                      \
711 		_test_ioctl_ioas_map_file(                                   \
712 			self->fd, self->ioas_id, mfd, start, length, iova_p, \
713 			IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
714 
715 #define test_ioctl_ioas_map_id_file(ioas_id, mfd, start, length, iova_p)     \
716 	ASSERT_EQ(0,                                                         \
717 		  _test_ioctl_ioas_map_file(                                 \
718 			  self->fd, ioas_id, mfd, start, length, iova_p,     \
719 			  IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
720 
_test_ioctl_set_temp_memory_limit(int fd,unsigned int limit)721 static int _test_ioctl_set_temp_memory_limit(int fd, unsigned int limit)
722 {
723 	struct iommu_test_cmd memlimit_cmd = {
724 		.size = sizeof(memlimit_cmd),
725 		.op = IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT,
726 		.memory_limit = { .limit = limit },
727 	};
728 
729 	return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT),
730 		     &memlimit_cmd);
731 }
732 
733 #define test_ioctl_set_temp_memory_limit(limit) \
734 	ASSERT_EQ(0, _test_ioctl_set_temp_memory_limit(self->fd, limit))
735 
736 #define test_ioctl_set_default_memory_limit() \
737 	test_ioctl_set_temp_memory_limit(65536)
738 
teardown_iommufd(int fd,struct __test_metadata * _metadata)739 static void teardown_iommufd(int fd, struct __test_metadata *_metadata)
740 {
741 	struct iommu_test_cmd test_cmd = {
742 		.size = sizeof(test_cmd),
743 		.op = IOMMU_TEST_OP_MD_CHECK_REFS,
744 		.check_refs = { .length = BUFFER_SIZE,
745 				.uptr = (uintptr_t)buffer },
746 	};
747 
748 	if (fd == -1)
749 		return;
750 
751 	EXPECT_EQ(0, close(fd));
752 
753 	fd = open("/dev/iommu", O_RDWR);
754 	EXPECT_NE(-1, fd);
755 	EXPECT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS),
756 			   &test_cmd));
757 	EXPECT_EQ(0, close(fd));
758 }
759 
760 #define EXPECT_ERRNO(expected_errno, cmd)         \
761 	({                                        \
762 		ASSERT_EQ(-1, cmd);               \
763 		EXPECT_EQ(expected_errno, errno); \
764 	})
765 
766 #endif
767 
768 /* @data can be NULL */
_test_cmd_get_hw_info(int fd,__u32 device_id,__u32 data_type,void * data,size_t data_len,uint32_t * capabilities,uint8_t * max_pasid)769 static int _test_cmd_get_hw_info(int fd, __u32 device_id, __u32 data_type,
770 				 void *data, size_t data_len,
771 				 uint32_t *capabilities, uint8_t *max_pasid)
772 {
773 	struct iommu_test_hw_info *info = (struct iommu_test_hw_info *)data;
774 	struct iommu_hw_info cmd = {
775 		.size = sizeof(cmd),
776 		.dev_id = device_id,
777 		.data_len = data_len,
778 		.in_data_type = data_type,
779 		.data_uptr = (uint64_t)data,
780 		.out_capabilities = 0,
781 	};
782 	int ret;
783 
784 	if (data_type != IOMMU_HW_INFO_TYPE_DEFAULT)
785 		cmd.flags |= IOMMU_HW_INFO_FLAG_INPUT_TYPE;
786 
787 	ret = ioctl(fd, IOMMU_GET_HW_INFO, &cmd);
788 	if (ret)
789 		return ret;
790 
791 	assert(cmd.out_data_type == IOMMU_HW_INFO_TYPE_SELFTEST);
792 
793 	/*
794 	 * The struct iommu_test_hw_info should be the one defined
795 	 * by the current kernel.
796 	 */
797 	assert(cmd.data_len == sizeof(struct iommu_test_hw_info));
798 
799 	/*
800 	 * Trailing bytes should be 0 if user buffer is larger than
801 	 * the data that kernel reports.
802 	 */
803 	if (data_len > cmd.data_len) {
804 		char *ptr = (char *)(data + cmd.data_len);
805 		int idx = 0;
806 
807 		while (idx < data_len - cmd.data_len) {
808 			assert(!*(ptr + idx));
809 			idx++;
810 		}
811 	}
812 
813 	if (info) {
814 		if (data_len >= offsetofend(struct iommu_test_hw_info, test_reg))
815 			assert(info->test_reg == IOMMU_HW_INFO_SELFTEST_REGVAL);
816 		if (data_len >= offsetofend(struct iommu_test_hw_info, flags))
817 			assert(!info->flags);
818 	}
819 
820 	if (max_pasid)
821 		*max_pasid = cmd.out_max_pasid_log2;
822 
823 	if (capabilities)
824 		*capabilities = cmd.out_capabilities;
825 
826 	return 0;
827 }
828 
829 #define test_cmd_get_hw_info(device_id, data_type, data, data_len)         \
830 	ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, data_type, \
831 					   data, data_len, NULL, NULL))
832 
833 #define test_err_get_hw_info(_errno, device_id, data_type, data, data_len) \
834 	EXPECT_ERRNO(_errno,                                               \
835 		     _test_cmd_get_hw_info(self->fd, device_id, data_type, \
836 					   data, data_len, NULL, NULL))
837 
838 #define test_cmd_get_hw_capabilities(device_id, caps)                        \
839 	ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id,              \
840 					   IOMMU_HW_INFO_TYPE_DEFAULT, NULL, \
841 					   0, &caps, NULL))
842 
843 #define test_cmd_get_hw_info_pasid(device_id, max_pasid)                     \
844 	ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id,              \
845 					   IOMMU_HW_INFO_TYPE_DEFAULT, NULL, \
846 					   0, NULL, max_pasid))
847 
_test_ioctl_fault_alloc(int fd,__u32 * fault_id,__u32 * fault_fd)848 static int _test_ioctl_fault_alloc(int fd, __u32 *fault_id, __u32 *fault_fd)
849 {
850 	struct iommu_fault_alloc cmd = {
851 		.size = sizeof(cmd),
852 	};
853 	int ret;
854 
855 	ret = ioctl(fd, IOMMU_FAULT_QUEUE_ALLOC, &cmd);
856 	if (ret)
857 		return ret;
858 	*fault_id = cmd.out_fault_id;
859 	*fault_fd = cmd.out_fault_fd;
860 	return 0;
861 }
862 
863 #define test_ioctl_fault_alloc(fault_id, fault_fd)                       \
864 	({                                                               \
865 		ASSERT_EQ(0, _test_ioctl_fault_alloc(self->fd, fault_id, \
866 						     fault_fd));         \
867 		ASSERT_NE(0, *(fault_id));                               \
868 		ASSERT_NE(0, *(fault_fd));                               \
869 	})
870 
_test_cmd_trigger_iopf(int fd,__u32 device_id,__u32 pasid,__u32 fault_fd)871 static int _test_cmd_trigger_iopf(int fd, __u32 device_id, __u32 pasid,
872 				  __u32 fault_fd)
873 {
874 	struct iommu_test_cmd trigger_iopf_cmd = {
875 		.size = sizeof(trigger_iopf_cmd),
876 		.op = IOMMU_TEST_OP_TRIGGER_IOPF,
877 		.trigger_iopf = {
878 			.dev_id = device_id,
879 			.pasid = pasid,
880 			.grpid = 0x2,
881 			.perm = IOMMU_PGFAULT_PERM_READ | IOMMU_PGFAULT_PERM_WRITE,
882 			.addr = 0xdeadbeaf,
883 		},
884 	};
885 	struct iommu_hwpt_page_response response = {
886 		.code = IOMMUFD_PAGE_RESP_SUCCESS,
887 	};
888 	struct iommu_hwpt_pgfault fault = {};
889 	ssize_t bytes;
890 	int ret;
891 
892 	ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_IOPF), &trigger_iopf_cmd);
893 	if (ret)
894 		return ret;
895 
896 	bytes = read(fault_fd, &fault, sizeof(fault));
897 	if (bytes <= 0)
898 		return -EIO;
899 
900 	response.cookie = fault.cookie;
901 
902 	bytes = write(fault_fd, &response, sizeof(response));
903 	if (bytes <= 0)
904 		return -EIO;
905 
906 	return 0;
907 }
908 
909 #define test_cmd_trigger_iopf(device_id, fault_fd) \
910 	ASSERT_EQ(0, _test_cmd_trigger_iopf(self->fd, device_id, 0x1, fault_fd))
911 #define test_cmd_trigger_iopf_pasid(device_id, pasid, fault_fd) \
912 	ASSERT_EQ(0, _test_cmd_trigger_iopf(self->fd, device_id, \
913 					    pasid, fault_fd))
914 
_test_cmd_viommu_alloc(int fd,__u32 device_id,__u32 hwpt_id,__u32 flags,__u32 type,void * data,__u32 data_len,__u32 * viommu_id)915 static int _test_cmd_viommu_alloc(int fd, __u32 device_id, __u32 hwpt_id,
916 				  __u32 flags, __u32 type, void *data,
917 				  __u32 data_len, __u32 *viommu_id)
918 {
919 	struct iommu_viommu_alloc cmd = {
920 		.size = sizeof(cmd),
921 		.flags = flags,
922 		.type = type,
923 		.dev_id = device_id,
924 		.hwpt_id = hwpt_id,
925 		.data_uptr = (uint64_t)data,
926 		.data_len = data_len,
927 	};
928 	int ret;
929 
930 	ret = ioctl(fd, IOMMU_VIOMMU_ALLOC, &cmd);
931 	if (ret)
932 		return ret;
933 	if (viommu_id)
934 		*viommu_id = cmd.out_viommu_id;
935 	return 0;
936 }
937 
938 #define test_cmd_viommu_alloc(device_id, hwpt_id, type, data, data_len,      \
939 			      viommu_id)                                     \
940 	ASSERT_EQ(0, _test_cmd_viommu_alloc(self->fd, device_id, hwpt_id, 0, \
941 					    type, data, data_len, viommu_id))
942 #define test_err_viommu_alloc(_errno, device_id, hwpt_id, type, data,        \
943 			      data_len, viommu_id)                           \
944 	EXPECT_ERRNO(_errno,                                                 \
945 		     _test_cmd_viommu_alloc(self->fd, device_id, hwpt_id, 0, \
946 					    type, data, data_len, viommu_id))
947 
_test_cmd_vdevice_alloc(int fd,__u32 viommu_id,__u32 idev_id,__u64 virt_id,__u32 * vdev_id)948 static int _test_cmd_vdevice_alloc(int fd, __u32 viommu_id, __u32 idev_id,
949 				   __u64 virt_id, __u32 *vdev_id)
950 {
951 	struct iommu_vdevice_alloc cmd = {
952 		.size = sizeof(cmd),
953 		.dev_id = idev_id,
954 		.viommu_id = viommu_id,
955 		.virt_id = virt_id,
956 	};
957 	int ret;
958 
959 	ret = ioctl(fd, IOMMU_VDEVICE_ALLOC, &cmd);
960 	if (ret)
961 		return ret;
962 	if (vdev_id)
963 		*vdev_id = cmd.out_vdevice_id;
964 	return 0;
965 }
966 
967 #define test_cmd_vdevice_alloc(viommu_id, idev_id, virt_id, vdev_id)       \
968 	ASSERT_EQ(0, _test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id, \
969 					     virt_id, vdev_id))
970 #define test_err_vdevice_alloc(_errno, viommu_id, idev_id, virt_id, vdev_id) \
971 	EXPECT_ERRNO(_errno,                                                 \
972 		     _test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id,   \
973 					     virt_id, vdev_id))
974 
_test_cmd_hw_queue_alloc(int fd,__u32 viommu_id,__u32 type,__u32 idx,__u64 base_addr,__u64 length,__u32 * hw_queue_id)975 static int _test_cmd_hw_queue_alloc(int fd, __u32 viommu_id, __u32 type,
976 				    __u32 idx, __u64 base_addr, __u64 length,
977 				    __u32 *hw_queue_id)
978 {
979 	struct iommu_hw_queue_alloc cmd = {
980 		.size = sizeof(cmd),
981 		.viommu_id = viommu_id,
982 		.type = type,
983 		.index = idx,
984 		.nesting_parent_iova = base_addr,
985 		.length = length,
986 	};
987 	int ret;
988 
989 	ret = ioctl(fd, IOMMU_HW_QUEUE_ALLOC, &cmd);
990 	if (ret)
991 		return ret;
992 	if (hw_queue_id)
993 		*hw_queue_id = cmd.out_hw_queue_id;
994 	return 0;
995 }
996 
997 #define test_cmd_hw_queue_alloc(viommu_id, type, idx, base_addr, len, out_qid) \
998 	ASSERT_EQ(0, _test_cmd_hw_queue_alloc(self->fd, viommu_id, type, idx,  \
999 					      base_addr, len, out_qid))
1000 #define test_err_hw_queue_alloc(_errno, viommu_id, type, idx, base_addr, len, \
1001 				out_qid)                                      \
1002 	EXPECT_ERRNO(_errno,                                                  \
1003 		     _test_cmd_hw_queue_alloc(self->fd, viommu_id, type, idx, \
1004 					      base_addr, len, out_qid))
1005 
_test_cmd_veventq_alloc(int fd,__u32 viommu_id,__u32 type,__u32 * veventq_id,__u32 * veventq_fd)1006 static int _test_cmd_veventq_alloc(int fd, __u32 viommu_id, __u32 type,
1007 				   __u32 *veventq_id, __u32 *veventq_fd)
1008 {
1009 	struct iommu_veventq_alloc cmd = {
1010 		.size = sizeof(cmd),
1011 		.type = type,
1012 		.veventq_depth = 2,
1013 		.viommu_id = viommu_id,
1014 	};
1015 	int ret;
1016 
1017 	ret = ioctl(fd, IOMMU_VEVENTQ_ALLOC, &cmd);
1018 	if (ret)
1019 		return ret;
1020 	if (veventq_id)
1021 		*veventq_id = cmd.out_veventq_id;
1022 	if (veventq_fd)
1023 		*veventq_fd = cmd.out_veventq_fd;
1024 	return 0;
1025 }
1026 
1027 #define test_cmd_veventq_alloc(viommu_id, type, veventq_id, veventq_fd) \
1028 	ASSERT_EQ(0, _test_cmd_veventq_alloc(self->fd, viommu_id, type, \
1029 					     veventq_id, veventq_fd))
1030 #define test_err_veventq_alloc(_errno, viommu_id, type, veventq_id,     \
1031 			       veventq_fd)                              \
1032 	EXPECT_ERRNO(_errno,                                            \
1033 		     _test_cmd_veventq_alloc(self->fd, viommu_id, type, \
1034 					     veventq_id, veventq_fd))
1035 
_test_cmd_trigger_vevents(int fd,__u32 dev_id,__u32 nvevents)1036 static int _test_cmd_trigger_vevents(int fd, __u32 dev_id, __u32 nvevents)
1037 {
1038 	struct iommu_test_cmd trigger_vevent_cmd = {
1039 		.size = sizeof(trigger_vevent_cmd),
1040 		.op = IOMMU_TEST_OP_TRIGGER_VEVENT,
1041 		.trigger_vevent = {
1042 			.dev_id = dev_id,
1043 		},
1044 	};
1045 	int ret;
1046 
1047 	while (nvevents--) {
1048 		ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_VEVENT),
1049 			    &trigger_vevent_cmd);
1050 		if (ret < 0)
1051 			return -1;
1052 	}
1053 	return ret;
1054 }
1055 
1056 #define test_cmd_trigger_vevents(dev_id, nvevents) \
1057 	ASSERT_EQ(0, _test_cmd_trigger_vevents(self->fd, dev_id, nvevents))
1058 
_test_cmd_read_vevents(int fd,__u32 event_fd,__u32 nvevents,__u32 virt_id,int * prev_seq)1059 static int _test_cmd_read_vevents(int fd, __u32 event_fd, __u32 nvevents,
1060 				  __u32 virt_id, int *prev_seq)
1061 {
1062 	struct pollfd pollfd = { .fd = event_fd, .events = POLLIN };
1063 	struct iommu_viommu_event_selftest *event;
1064 	struct iommufd_vevent_header *hdr;
1065 	ssize_t bytes;
1066 	void *data;
1067 	int ret, i;
1068 
1069 	ret = poll(&pollfd, 1, 1000);
1070 	if (ret < 0)
1071 		return -1;
1072 
1073 	data = calloc(nvevents, sizeof(*hdr) + sizeof(*event));
1074 	if (!data) {
1075 		errno = ENOMEM;
1076 		return -1;
1077 	}
1078 
1079 	bytes = read(event_fd, data,
1080 		     nvevents * (sizeof(*hdr) + sizeof(*event)));
1081 	if (bytes <= 0) {
1082 		errno = EFAULT;
1083 		ret = -1;
1084 		goto out_free;
1085 	}
1086 
1087 	for (i = 0; i < nvevents; i++) {
1088 		hdr = data + i * (sizeof(*hdr) + sizeof(*event));
1089 
1090 		if (hdr->flags & IOMMU_VEVENTQ_FLAG_LOST_EVENTS ||
1091 		    hdr->sequence - *prev_seq > 1) {
1092 			*prev_seq = hdr->sequence;
1093 			errno = EOVERFLOW;
1094 			ret = -1;
1095 			goto out_free;
1096 		}
1097 		*prev_seq = hdr->sequence;
1098 		event = data + sizeof(*hdr);
1099 		if (event->virt_id != virt_id) {
1100 			errno = EINVAL;
1101 			ret = -1;
1102 			goto out_free;
1103 		}
1104 	}
1105 
1106 	ret = 0;
1107 out_free:
1108 	free(data);
1109 	return ret;
1110 }
1111 
1112 #define test_cmd_read_vevents(event_fd, nvevents, virt_id, prev_seq)      \
1113 	ASSERT_EQ(0, _test_cmd_read_vevents(self->fd, event_fd, nvevents, \
1114 					    virt_id, prev_seq))
1115 #define test_err_read_vevents(_errno, event_fd, nvevents, virt_id, prev_seq) \
1116 	EXPECT_ERRNO(_errno,                                                 \
1117 		     _test_cmd_read_vevents(self->fd, event_fd, nvevents,    \
1118 					    virt_id, prev_seq))
1119 
_test_cmd_pasid_attach(int fd,__u32 stdev_id,__u32 pasid,__u32 pt_id)1120 static int _test_cmd_pasid_attach(int fd, __u32 stdev_id, __u32 pasid,
1121 				  __u32 pt_id)
1122 {
1123 	struct iommu_test_cmd test_attach = {
1124 		.size = sizeof(test_attach),
1125 		.op = IOMMU_TEST_OP_PASID_ATTACH,
1126 		.id = stdev_id,
1127 		.pasid_attach = {
1128 			.pasid = pasid,
1129 			.pt_id = pt_id,
1130 		},
1131 	};
1132 
1133 	return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_PASID_ATTACH),
1134 		     &test_attach);
1135 }
1136 
1137 #define test_cmd_pasid_attach(pasid, hwpt_id) \
1138 	ASSERT_EQ(0, _test_cmd_pasid_attach(self->fd, self->stdev_id, \
1139 					    pasid, hwpt_id))
1140 
1141 #define test_err_pasid_attach(_errno, pasid, hwpt_id) \
1142 	EXPECT_ERRNO(_errno, \
1143 		     _test_cmd_pasid_attach(self->fd, self->stdev_id, \
1144 					    pasid, hwpt_id))
1145 
_test_cmd_pasid_replace(int fd,__u32 stdev_id,__u32 pasid,__u32 pt_id)1146 static int _test_cmd_pasid_replace(int fd, __u32 stdev_id, __u32 pasid,
1147 				   __u32 pt_id)
1148 {
1149 	struct iommu_test_cmd test_replace = {
1150 		.size = sizeof(test_replace),
1151 		.op = IOMMU_TEST_OP_PASID_REPLACE,
1152 		.id = stdev_id,
1153 		.pasid_replace = {
1154 			.pasid = pasid,
1155 			.pt_id = pt_id,
1156 		},
1157 	};
1158 
1159 	return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_PASID_REPLACE),
1160 		     &test_replace);
1161 }
1162 
1163 #define test_cmd_pasid_replace(pasid, hwpt_id) \
1164 	ASSERT_EQ(0, _test_cmd_pasid_replace(self->fd, self->stdev_id, \
1165 					     pasid, hwpt_id))
1166 
1167 #define test_err_pasid_replace(_errno, pasid, hwpt_id) \
1168 	EXPECT_ERRNO(_errno, \
1169 		     _test_cmd_pasid_replace(self->fd, self->stdev_id, \
1170 					     pasid, hwpt_id))
1171 
_test_cmd_pasid_detach(int fd,__u32 stdev_id,__u32 pasid)1172 static int _test_cmd_pasid_detach(int fd, __u32 stdev_id, __u32 pasid)
1173 {
1174 	struct iommu_test_cmd test_detach = {
1175 		.size = sizeof(test_detach),
1176 		.op = IOMMU_TEST_OP_PASID_DETACH,
1177 		.id = stdev_id,
1178 		.pasid_detach = {
1179 			.pasid = pasid,
1180 		},
1181 	};
1182 
1183 	return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_PASID_DETACH),
1184 		     &test_detach);
1185 }
1186 
1187 #define test_cmd_pasid_detach(pasid) \
1188 	ASSERT_EQ(0, _test_cmd_pasid_detach(self->fd, self->stdev_id, pasid))
1189 
test_cmd_pasid_check_hwpt(int fd,__u32 stdev_id,__u32 pasid,__u32 hwpt_id)1190 static int test_cmd_pasid_check_hwpt(int fd, __u32 stdev_id, __u32 pasid,
1191 				     __u32 hwpt_id)
1192 {
1193 	struct iommu_test_cmd test_pasid_check = {
1194 		.size = sizeof(test_pasid_check),
1195 		.op = IOMMU_TEST_OP_PASID_CHECK_HWPT,
1196 		.id = stdev_id,
1197 		.pasid_check = {
1198 			.pasid = pasid,
1199 			.hwpt_id = hwpt_id,
1200 		},
1201 	};
1202 
1203 	return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_PASID_CHECK_HWPT),
1204 		     &test_pasid_check);
1205 }
1206