xref: /linux/tools/testing/selftests/iommu/iommufd_utils.h (revision 3c894cb29bbf4e36c5f2497cf8ea6fb09e157920)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #ifndef __SELFTEST_IOMMUFD_UTILS
4 #define __SELFTEST_IOMMUFD_UTILS
5 
6 #include <unistd.h>
7 #include <stddef.h>
8 #include <sys/fcntl.h>
9 #include <sys/ioctl.h>
10 #include <stdint.h>
11 #include <assert.h>
12 #include <poll.h>
13 
14 #include "../kselftest_harness.h"
15 #include "../../../../drivers/iommu/iommufd/iommufd_test.h"
16 
17 /* Hack to make assertions more readable */
18 #define _IOMMU_TEST_CMD(x) IOMMU_TEST_CMD
19 
20 /* Imported from include/asm-generic/bitops/generic-non-atomic.h */
21 #define BITS_PER_BYTE 8
22 #define BITS_PER_LONG __BITS_PER_LONG
23 #define BIT_MASK(nr) (1UL << ((nr) % __BITS_PER_LONG))
24 #define BIT_WORD(nr) ((nr) / __BITS_PER_LONG)
25 
26 enum {
27 	IOPT_PAGES_ACCOUNT_NONE = 0,
28 	IOPT_PAGES_ACCOUNT_USER = 1,
29 	IOPT_PAGES_ACCOUNT_MM = 2,
30 };
31 
32 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
33 
set_bit(unsigned int nr,unsigned long * addr)34 static inline void set_bit(unsigned int nr, unsigned long *addr)
35 {
36 	unsigned long mask = BIT_MASK(nr);
37 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
38 
39 	*p |= mask;
40 }
41 
test_bit(unsigned int nr,unsigned long * addr)42 static inline bool test_bit(unsigned int nr, unsigned long *addr)
43 {
44 	return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)));
45 }
46 
47 static void *buffer;
48 static unsigned long BUFFER_SIZE;
49 
50 static void *mfd_buffer;
51 static int mfd;
52 
53 static unsigned long PAGE_SIZE;
54 
55 #define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
56 #define offsetofend(TYPE, MEMBER) \
57 	(offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
58 
memfd_mmap(size_t length,int prot,int flags,int * mfd_p)59 static inline void *memfd_mmap(size_t length, int prot, int flags, int *mfd_p)
60 {
61 	int mfd_flags = (flags & MAP_HUGETLB) ? MFD_HUGETLB : 0;
62 	int mfd = memfd_create("buffer", mfd_flags);
63 	void *buf = MAP_FAILED;
64 
65 	if (mfd <= 0)
66 		return MAP_FAILED;
67 	if (ftruncate(mfd, length))
68 		goto out;
69 	*mfd_p = mfd;
70 	buf = mmap(0, length, prot, flags, mfd, 0);
71 out:
72 	if (buf == MAP_FAILED)
73 		close(mfd);
74 	return buf;
75 }
76 
77 /*
78  * Have the kernel check the refcount on pages. I don't know why a freshly
79  * mmap'd anon non-compound page starts out with a ref of 3
80  */
81 #define check_refs(_ptr, _length, _refs)                                      \
82 	({                                                                    \
83 		struct iommu_test_cmd test_cmd = {                            \
84 			.size = sizeof(test_cmd),                             \
85 			.op = IOMMU_TEST_OP_MD_CHECK_REFS,                    \
86 			.check_refs = { .length = _length,                    \
87 					.uptr = (uintptr_t)(_ptr),            \
88 					.refs = _refs },                      \
89 		};                                                            \
90 		ASSERT_EQ(0,                                                  \
91 			  ioctl(self->fd,                                     \
92 				_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS), \
93 				&test_cmd));                                  \
94 	})
95 
_test_cmd_mock_domain(int fd,unsigned int ioas_id,__u32 * stdev_id,__u32 * hwpt_id,__u32 * idev_id)96 static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *stdev_id,
97 				 __u32 *hwpt_id, __u32 *idev_id)
98 {
99 	struct iommu_test_cmd cmd = {
100 		.size = sizeof(cmd),
101 		.op = IOMMU_TEST_OP_MOCK_DOMAIN,
102 		.id = ioas_id,
103 		.mock_domain = {},
104 	};
105 	int ret;
106 
107 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
108 	if (ret)
109 		return ret;
110 	if (stdev_id)
111 		*stdev_id = cmd.mock_domain.out_stdev_id;
112 	assert(cmd.id != 0);
113 	if (hwpt_id)
114 		*hwpt_id = cmd.mock_domain.out_hwpt_id;
115 	if (idev_id)
116 		*idev_id = cmd.mock_domain.out_idev_id;
117 	return 0;
118 }
119 #define test_cmd_mock_domain(ioas_id, stdev_id, hwpt_id, idev_id)       \
120 	ASSERT_EQ(0, _test_cmd_mock_domain(self->fd, ioas_id, stdev_id, \
121 					   hwpt_id, idev_id))
122 #define test_err_mock_domain(_errno, ioas_id, stdev_id, hwpt_id)      \
123 	EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \
124 						   stdev_id, hwpt_id, NULL))
125 
_test_cmd_mock_domain_flags(int fd,unsigned int ioas_id,__u32 stdev_flags,__u32 * stdev_id,__u32 * hwpt_id,__u32 * idev_id)126 static int _test_cmd_mock_domain_flags(int fd, unsigned int ioas_id,
127 				       __u32 stdev_flags, __u32 *stdev_id,
128 				       __u32 *hwpt_id, __u32 *idev_id)
129 {
130 	struct iommu_test_cmd cmd = {
131 		.size = sizeof(cmd),
132 		.op = IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS,
133 		.id = ioas_id,
134 		.mock_domain_flags = { .dev_flags = stdev_flags },
135 	};
136 	int ret;
137 
138 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
139 	if (ret)
140 		return ret;
141 	if (stdev_id)
142 		*stdev_id = cmd.mock_domain_flags.out_stdev_id;
143 	assert(cmd.id != 0);
144 	if (hwpt_id)
145 		*hwpt_id = cmd.mock_domain_flags.out_hwpt_id;
146 	if (idev_id)
147 		*idev_id = cmd.mock_domain_flags.out_idev_id;
148 	return 0;
149 }
150 #define test_cmd_mock_domain_flags(ioas_id, flags, stdev_id, hwpt_id, idev_id) \
151 	ASSERT_EQ(0, _test_cmd_mock_domain_flags(self->fd, ioas_id, flags,     \
152 						 stdev_id, hwpt_id, idev_id))
153 #define test_err_mock_domain_flags(_errno, ioas_id, flags, stdev_id, hwpt_id) \
154 	EXPECT_ERRNO(_errno,                                                  \
155 		     _test_cmd_mock_domain_flags(self->fd, ioas_id, flags,    \
156 						 stdev_id, hwpt_id, NULL))
157 
_test_cmd_mock_domain_replace(int fd,__u32 stdev_id,__u32 pt_id,__u32 * hwpt_id)158 static int _test_cmd_mock_domain_replace(int fd, __u32 stdev_id, __u32 pt_id,
159 					 __u32 *hwpt_id)
160 {
161 	struct iommu_test_cmd cmd = {
162 		.size = sizeof(cmd),
163 		.op = IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE,
164 		.id = stdev_id,
165 		.mock_domain_replace = {
166 			.pt_id = pt_id,
167 		},
168 	};
169 	int ret;
170 
171 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
172 	if (ret)
173 		return ret;
174 	if (hwpt_id)
175 		*hwpt_id = cmd.mock_domain_replace.pt_id;
176 	return 0;
177 }
178 
179 #define test_cmd_mock_domain_replace(stdev_id, pt_id)                         \
180 	ASSERT_EQ(0, _test_cmd_mock_domain_replace(self->fd, stdev_id, pt_id, \
181 						   NULL))
182 #define test_err_mock_domain_replace(_errno, stdev_id, pt_id)                  \
183 	EXPECT_ERRNO(_errno, _test_cmd_mock_domain_replace(self->fd, stdev_id, \
184 							   pt_id, NULL))
185 
_test_cmd_hwpt_alloc(int fd,__u32 device_id,__u32 pt_id,__u32 ft_id,__u32 flags,__u32 * hwpt_id,__u32 data_type,void * data,size_t data_len)186 static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, __u32 ft_id,
187 				__u32 flags, __u32 *hwpt_id, __u32 data_type,
188 				void *data, size_t data_len)
189 {
190 	struct iommu_hwpt_alloc cmd = {
191 		.size = sizeof(cmd),
192 		.flags = flags,
193 		.dev_id = device_id,
194 		.pt_id = pt_id,
195 		.data_type = data_type,
196 		.data_len = data_len,
197 		.data_uptr = (uint64_t)data,
198 		.fault_id = ft_id,
199 	};
200 	int ret;
201 
202 	ret = ioctl(fd, IOMMU_HWPT_ALLOC, &cmd);
203 	if (ret)
204 		return ret;
205 	if (hwpt_id)
206 		*hwpt_id = cmd.out_hwpt_id;
207 	return 0;
208 }
209 
210 #define test_cmd_hwpt_alloc(device_id, pt_id, flags, hwpt_id)                  \
211 	ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags,   \
212 					  hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, \
213 					  0))
214 #define test_err_hwpt_alloc(_errno, device_id, pt_id, flags, hwpt_id)   \
215 	EXPECT_ERRNO(_errno, _test_cmd_hwpt_alloc(                      \
216 				     self->fd, device_id, pt_id, 0, flags, \
217 				     hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, 0))
218 
219 #define test_cmd_hwpt_alloc_nested(device_id, pt_id, flags, hwpt_id,         \
220 				   data_type, data, data_len)                \
221 	ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
222 					  hwpt_id, data_type, data, data_len))
223 #define test_err_hwpt_alloc_nested(_errno, device_id, pt_id, flags, hwpt_id, \
224 				   data_type, data, data_len)                \
225 	EXPECT_ERRNO(_errno,                                                 \
226 		     _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \
227 					  hwpt_id, data_type, data, data_len))
228 
229 #define test_cmd_hwpt_alloc_iopf(device_id, pt_id, fault_id, flags, hwpt_id,    \
230 				   data_type, data, data_len)                   \
231 	ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \
232 					  flags, hwpt_id, data_type, data,      \
233 					  data_len))
234 #define test_err_hwpt_alloc_iopf(_errno, device_id, pt_id, fault_id, flags,     \
235 				 hwpt_id, data_type, data, data_len)            \
236 	EXPECT_ERRNO(_errno,                                                    \
237 		     _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \
238 					  flags, hwpt_id, data_type, data,      \
239 					  data_len))
240 
241 #define test_cmd_hwpt_check_iotlb(hwpt_id, iotlb_id, expected)                 \
242 	({                                                                     \
243 		struct iommu_test_cmd test_cmd = {                             \
244 			.size = sizeof(test_cmd),                              \
245 			.op = IOMMU_TEST_OP_MD_CHECK_IOTLB,                    \
246 			.id = hwpt_id,                                         \
247 			.check_iotlb = {                                       \
248 				.id = iotlb_id,                                \
249 				.iotlb = expected,                             \
250 			},                                                     \
251 		};                                                             \
252 		ASSERT_EQ(0,                                                   \
253 			  ioctl(self->fd,                                      \
254 				_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_IOTLB), \
255 				&test_cmd));                                   \
256 	})
257 
258 #define test_cmd_hwpt_check_iotlb_all(hwpt_id, expected)                       \
259 	({                                                                     \
260 		int i;                                                         \
261 		for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++)             \
262 			test_cmd_hwpt_check_iotlb(hwpt_id, i, expected);       \
263 	})
264 
265 #define test_cmd_dev_check_cache(device_id, cache_id, expected)                \
266 	({                                                                     \
267 		struct iommu_test_cmd test_cmd = {                             \
268 			.size = sizeof(test_cmd),                              \
269 			.op = IOMMU_TEST_OP_DEV_CHECK_CACHE,                   \
270 			.id = device_id,                                       \
271 			.check_dev_cache = {                                   \
272 				.id = cache_id,                                \
273 				.cache = expected,                             \
274 			},                                                     \
275 		};                                                             \
276 		ASSERT_EQ(0, ioctl(self->fd,                                   \
277 				   _IOMMU_TEST_CMD(                            \
278 					   IOMMU_TEST_OP_DEV_CHECK_CACHE),     \
279 				   &test_cmd));                                \
280 	})
281 
282 #define test_cmd_dev_check_cache_all(device_id, expected)                      \
283 	({                                                                     \
284 		int c;                                                         \
285 		for (c = 0; c < MOCK_DEV_CACHE_NUM; c++)                       \
286 			test_cmd_dev_check_cache(device_id, c, expected);      \
287 	})
288 
_test_cmd_hwpt_invalidate(int fd,__u32 hwpt_id,void * reqs,uint32_t data_type,uint32_t lreq,uint32_t * nreqs)289 static int _test_cmd_hwpt_invalidate(int fd, __u32 hwpt_id, void *reqs,
290 				     uint32_t data_type, uint32_t lreq,
291 				     uint32_t *nreqs)
292 {
293 	struct iommu_hwpt_invalidate cmd = {
294 		.size = sizeof(cmd),
295 		.hwpt_id = hwpt_id,
296 		.data_type = data_type,
297 		.data_uptr = (uint64_t)reqs,
298 		.entry_len = lreq,
299 		.entry_num = *nreqs,
300 	};
301 	int rc = ioctl(fd, IOMMU_HWPT_INVALIDATE, &cmd);
302 	*nreqs = cmd.entry_num;
303 	return rc;
304 }
305 
306 #define test_cmd_hwpt_invalidate(hwpt_id, reqs, data_type, lreq, nreqs)       \
307 	({                                                                    \
308 		ASSERT_EQ(0,                                                  \
309 			  _test_cmd_hwpt_invalidate(self->fd, hwpt_id, reqs,  \
310 						    data_type, lreq, nreqs)); \
311 	})
312 #define test_err_hwpt_invalidate(_errno, hwpt_id, reqs, data_type, lreq, \
313 				 nreqs)                                  \
314 	({                                                               \
315 		EXPECT_ERRNO(_errno, _test_cmd_hwpt_invalidate(          \
316 					     self->fd, hwpt_id, reqs,    \
317 					     data_type, lreq, nreqs));   \
318 	})
319 
_test_cmd_viommu_invalidate(int fd,__u32 viommu_id,void * reqs,uint32_t data_type,uint32_t lreq,uint32_t * nreqs)320 static int _test_cmd_viommu_invalidate(int fd, __u32 viommu_id, void *reqs,
321 				       uint32_t data_type, uint32_t lreq,
322 				       uint32_t *nreqs)
323 {
324 	struct iommu_hwpt_invalidate cmd = {
325 		.size = sizeof(cmd),
326 		.hwpt_id = viommu_id,
327 		.data_type = data_type,
328 		.data_uptr = (uint64_t)reqs,
329 		.entry_len = lreq,
330 		.entry_num = *nreqs,
331 	};
332 	int rc = ioctl(fd, IOMMU_HWPT_INVALIDATE, &cmd);
333 	*nreqs = cmd.entry_num;
334 	return rc;
335 }
336 
337 #define test_cmd_viommu_invalidate(viommu, reqs, lreq, nreqs)                  \
338 	({                                                                     \
339 		ASSERT_EQ(0,                                                   \
340 			  _test_cmd_viommu_invalidate(self->fd, viommu, reqs,  \
341 					IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST, \
342 					lreq, nreqs));                         \
343 	})
344 #define test_err_viommu_invalidate(_errno, viommu_id, reqs, data_type, lreq,   \
345 				 nreqs)                                        \
346 	({                                                                     \
347 		EXPECT_ERRNO(_errno, _test_cmd_viommu_invalidate(              \
348 					     self->fd, viommu_id, reqs,        \
349 					     data_type, lreq, nreqs));         \
350 	})
351 
_test_cmd_access_replace_ioas(int fd,__u32 access_id,unsigned int ioas_id)352 static int _test_cmd_access_replace_ioas(int fd, __u32 access_id,
353 					 unsigned int ioas_id)
354 {
355 	struct iommu_test_cmd cmd = {
356 		.size = sizeof(cmd),
357 		.op = IOMMU_TEST_OP_ACCESS_REPLACE_IOAS,
358 		.id = access_id,
359 		.access_replace_ioas = { .ioas_id = ioas_id },
360 	};
361 	int ret;
362 
363 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
364 	if (ret)
365 		return ret;
366 	return 0;
367 }
368 #define test_cmd_access_replace_ioas(access_id, ioas_id) \
369 	ASSERT_EQ(0, _test_cmd_access_replace_ioas(self->fd, access_id, ioas_id))
370 
_test_cmd_set_dirty_tracking(int fd,__u32 hwpt_id,bool enabled)371 static int _test_cmd_set_dirty_tracking(int fd, __u32 hwpt_id, bool enabled)
372 {
373 	struct iommu_hwpt_set_dirty_tracking cmd = {
374 		.size = sizeof(cmd),
375 		.flags = enabled ? IOMMU_HWPT_DIRTY_TRACKING_ENABLE : 0,
376 		.hwpt_id = hwpt_id,
377 	};
378 	int ret;
379 
380 	ret = ioctl(fd, IOMMU_HWPT_SET_DIRTY_TRACKING, &cmd);
381 	if (ret)
382 		return -errno;
383 	return 0;
384 }
385 #define test_cmd_set_dirty_tracking(hwpt_id, enabled) \
386 	ASSERT_EQ(0, _test_cmd_set_dirty_tracking(self->fd, hwpt_id, enabled))
387 
_test_cmd_get_dirty_bitmap(int fd,__u32 hwpt_id,size_t length,__u64 iova,size_t page_size,__u64 * bitmap,__u32 flags)388 static int _test_cmd_get_dirty_bitmap(int fd, __u32 hwpt_id, size_t length,
389 				      __u64 iova, size_t page_size,
390 				      __u64 *bitmap, __u32 flags)
391 {
392 	struct iommu_hwpt_get_dirty_bitmap cmd = {
393 		.size = sizeof(cmd),
394 		.hwpt_id = hwpt_id,
395 		.flags = flags,
396 		.iova = iova,
397 		.length = length,
398 		.page_size = page_size,
399 		.data = (uintptr_t)bitmap,
400 	};
401 	int ret;
402 
403 	ret = ioctl(fd, IOMMU_HWPT_GET_DIRTY_BITMAP, &cmd);
404 	if (ret)
405 		return ret;
406 	return 0;
407 }
408 
409 #define test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size,    \
410 				  bitmap, flags)                           \
411 	ASSERT_EQ(0, _test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, \
412 						page_size, bitmap, flags))
413 
_test_cmd_mock_domain_set_dirty(int fd,__u32 hwpt_id,size_t length,__u64 iova,size_t page_size,__u64 * bitmap,__u64 * dirty)414 static int _test_cmd_mock_domain_set_dirty(int fd, __u32 hwpt_id, size_t length,
415 					   __u64 iova, size_t page_size,
416 					   __u64 *bitmap, __u64 *dirty)
417 {
418 	struct iommu_test_cmd cmd = {
419 		.size = sizeof(cmd),
420 		.op = IOMMU_TEST_OP_DIRTY,
421 		.id = hwpt_id,
422 		.dirty = {
423 			.iova = iova,
424 			.length = length,
425 			.page_size = page_size,
426 			.uptr = (uintptr_t)bitmap,
427 		}
428 	};
429 	int ret;
430 
431 	ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_DIRTY), &cmd);
432 	if (ret)
433 		return -ret;
434 	if (dirty)
435 		*dirty = cmd.dirty.out_nr_dirty;
436 	return 0;
437 }
438 
439 #define test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, \
440 				       bitmap, nr)                           \
441 	ASSERT_EQ(0,                                                         \
442 		  _test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, \
443 						  page_size, bitmap, nr))
444 
_test_mock_dirty_bitmaps(int fd,__u32 hwpt_id,size_t length,__u64 iova,size_t page_size,size_t pte_page_size,__u64 * bitmap,__u64 nbits,__u32 flags,struct __test_metadata * _metadata)445 static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length,
446 				    __u64 iova, size_t page_size,
447 				    size_t pte_page_size, __u64 *bitmap,
448 				    __u64 nbits, __u32 flags,
449 				    struct __test_metadata *_metadata)
450 {
451 	unsigned long npte = pte_page_size / page_size, pteset = 2 * npte;
452 	unsigned long j, i, nr = nbits / pteset ?: 1;
453 	unsigned long bitmap_size = DIV_ROUND_UP(nbits, BITS_PER_BYTE);
454 	__u64 out_dirty = 0;
455 
456 	/* Mark all even bits as dirty in the mock domain */
457 	memset(bitmap, 0, bitmap_size);
458 	for (i = 0; i < nbits; i += pteset)
459 		set_bit(i, (unsigned long *)bitmap);
460 
461 	test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size,
462 				       bitmap, &out_dirty);
463 	ASSERT_EQ(nr, out_dirty);
464 
465 	/* Expect all even bits as dirty in the user bitmap */
466 	memset(bitmap, 0, bitmap_size);
467 	test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
468 				  flags);
469 	/* Beware ASSERT_EQ() is two statements -- braces are not redundant! */
470 	for (i = 0; i < nbits; i += pteset) {
471 		for (j = 0; j < pteset; j++) {
472 			ASSERT_EQ(j < npte,
473 				  test_bit(i + j, (unsigned long *)bitmap));
474 		}
475 		ASSERT_EQ(!(i % pteset), test_bit(i, (unsigned long *)bitmap));
476 	}
477 
478 	memset(bitmap, 0, bitmap_size);
479 	test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
480 				  flags);
481 
482 	/* It as read already -- expect all zeroes */
483 	for (i = 0; i < nbits; i += pteset) {
484 		for (j = 0; j < pteset; j++) {
485 			ASSERT_EQ(
486 				(j < npte) &&
487 					(flags &
488 					 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR),
489 				test_bit(i + j, (unsigned long *)bitmap));
490 		}
491 	}
492 
493 	return 0;
494 }
495 #define test_mock_dirty_bitmaps(hwpt_id, length, iova, page_size, pte_size,\
496 				bitmap, bitmap_size, flags, _metadata)     \
497 	ASSERT_EQ(0, _test_mock_dirty_bitmaps(self->fd, hwpt_id, length, iova, \
498 					      page_size, pte_size, bitmap,     \
499 					      bitmap_size, flags, _metadata))
500 
_test_cmd_create_access(int fd,unsigned int ioas_id,__u32 * access_id,unsigned int flags)501 static int _test_cmd_create_access(int fd, unsigned int ioas_id,
502 				   __u32 *access_id, unsigned int flags)
503 {
504 	struct iommu_test_cmd cmd = {
505 		.size = sizeof(cmd),
506 		.op = IOMMU_TEST_OP_CREATE_ACCESS,
507 		.id = ioas_id,
508 		.create_access = { .flags = flags },
509 	};
510 	int ret;
511 
512 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
513 	if (ret)
514 		return ret;
515 	*access_id = cmd.create_access.out_access_fd;
516 	return 0;
517 }
518 #define test_cmd_create_access(ioas_id, access_id, flags)                  \
519 	ASSERT_EQ(0, _test_cmd_create_access(self->fd, ioas_id, access_id, \
520 					     flags))
521 
_test_cmd_destroy_access(unsigned int access_id)522 static int _test_cmd_destroy_access(unsigned int access_id)
523 {
524 	return close(access_id);
525 }
526 #define test_cmd_destroy_access(access_id) \
527 	ASSERT_EQ(0, _test_cmd_destroy_access(access_id))
528 
_test_cmd_destroy_access_pages(int fd,unsigned int access_id,unsigned int access_pages_id)529 static int _test_cmd_destroy_access_pages(int fd, unsigned int access_id,
530 					  unsigned int access_pages_id)
531 {
532 	struct iommu_test_cmd cmd = {
533 		.size = sizeof(cmd),
534 		.op = IOMMU_TEST_OP_DESTROY_ACCESS_PAGES,
535 		.id = access_id,
536 		.destroy_access_pages = { .access_pages_id = access_pages_id },
537 	};
538 	return ioctl(fd, IOMMU_TEST_CMD, &cmd);
539 }
540 #define test_cmd_destroy_access_pages(access_id, access_pages_id)        \
541 	ASSERT_EQ(0, _test_cmd_destroy_access_pages(self->fd, access_id, \
542 						    access_pages_id))
543 #define test_err_destroy_access_pages(_errno, access_id, access_pages_id) \
544 	EXPECT_ERRNO(_errno, _test_cmd_destroy_access_pages(              \
545 				     self->fd, access_id, access_pages_id))
546 
_test_ioctl_destroy(int fd,unsigned int id)547 static int _test_ioctl_destroy(int fd, unsigned int id)
548 {
549 	struct iommu_destroy cmd = {
550 		.size = sizeof(cmd),
551 		.id = id,
552 	};
553 	return ioctl(fd, IOMMU_DESTROY, &cmd);
554 }
555 #define test_ioctl_destroy(id) ASSERT_EQ(0, _test_ioctl_destroy(self->fd, id))
556 
_test_ioctl_ioas_alloc(int fd,__u32 * id)557 static int _test_ioctl_ioas_alloc(int fd, __u32 *id)
558 {
559 	struct iommu_ioas_alloc cmd = {
560 		.size = sizeof(cmd),
561 	};
562 	int ret;
563 
564 	ret = ioctl(fd, IOMMU_IOAS_ALLOC, &cmd);
565 	if (ret)
566 		return ret;
567 	*id = cmd.out_ioas_id;
568 	return 0;
569 }
570 #define test_ioctl_ioas_alloc(id)                                   \
571 	({                                                          \
572 		ASSERT_EQ(0, _test_ioctl_ioas_alloc(self->fd, id)); \
573 		ASSERT_NE(0, *(id));                                \
574 	})
575 
_test_ioctl_ioas_map(int fd,unsigned int ioas_id,void * buffer,size_t length,__u64 * iova,unsigned int flags)576 static int _test_ioctl_ioas_map(int fd, unsigned int ioas_id, void *buffer,
577 				size_t length, __u64 *iova, unsigned int flags)
578 {
579 	struct iommu_ioas_map cmd = {
580 		.size = sizeof(cmd),
581 		.flags = flags,
582 		.ioas_id = ioas_id,
583 		.user_va = (uintptr_t)buffer,
584 		.length = length,
585 	};
586 	int ret;
587 
588 	if (flags & IOMMU_IOAS_MAP_FIXED_IOVA)
589 		cmd.iova = *iova;
590 
591 	ret = ioctl(fd, IOMMU_IOAS_MAP, &cmd);
592 	*iova = cmd.iova;
593 	return ret;
594 }
595 #define test_ioctl_ioas_map(buffer, length, iova_p)                        \
596 	ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
597 					  length, iova_p,                  \
598 					  IOMMU_IOAS_MAP_WRITEABLE |       \
599 						  IOMMU_IOAS_MAP_READABLE))
600 
601 #define test_err_ioctl_ioas_map(_errno, buffer, length, iova_p)            \
602 	EXPECT_ERRNO(_errno,                                               \
603 		     _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
604 					  length, iova_p,                  \
605 					  IOMMU_IOAS_MAP_WRITEABLE |       \
606 						  IOMMU_IOAS_MAP_READABLE))
607 
608 #define test_ioctl_ioas_map_id(ioas_id, buffer, length, iova_p)              \
609 	ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, ioas_id, buffer, length, \
610 					  iova_p,                            \
611 					  IOMMU_IOAS_MAP_WRITEABLE |         \
612 						  IOMMU_IOAS_MAP_READABLE))
613 
614 #define test_ioctl_ioas_map_fixed(buffer, length, iova)                       \
615 	({                                                                    \
616 		__u64 __iova = iova;                                          \
617 		ASSERT_EQ(0, _test_ioctl_ioas_map(                            \
618 				     self->fd, self->ioas_id, buffer, length, \
619 				     &__iova,                                 \
620 				     IOMMU_IOAS_MAP_FIXED_IOVA |              \
621 					     IOMMU_IOAS_MAP_WRITEABLE |       \
622 					     IOMMU_IOAS_MAP_READABLE));       \
623 	})
624 
625 #define test_ioctl_ioas_map_fixed_id(ioas_id, buffer, length, iova)           \
626 	({                                                                    \
627 		__u64 __iova = iova;                                          \
628 		ASSERT_EQ(0,                                                  \
629 			  _test_ioctl_ioas_map(                               \
630 				  self->fd, ioas_id, buffer, length, &__iova, \
631 				  IOMMU_IOAS_MAP_FIXED_IOVA |                 \
632 					  IOMMU_IOAS_MAP_WRITEABLE |          \
633 					  IOMMU_IOAS_MAP_READABLE));          \
634 	})
635 
636 #define test_err_ioctl_ioas_map_fixed(_errno, buffer, length, iova)           \
637 	({                                                                    \
638 		__u64 __iova = iova;                                          \
639 		EXPECT_ERRNO(_errno,                                          \
640 			     _test_ioctl_ioas_map(                            \
641 				     self->fd, self->ioas_id, buffer, length, \
642 				     &__iova,                                 \
643 				     IOMMU_IOAS_MAP_FIXED_IOVA |              \
644 					     IOMMU_IOAS_MAP_WRITEABLE |       \
645 					     IOMMU_IOAS_MAP_READABLE));       \
646 	})
647 
_test_ioctl_ioas_unmap(int fd,unsigned int ioas_id,uint64_t iova,size_t length,uint64_t * out_len)648 static int _test_ioctl_ioas_unmap(int fd, unsigned int ioas_id, uint64_t iova,
649 				  size_t length, uint64_t *out_len)
650 {
651 	struct iommu_ioas_unmap cmd = {
652 		.size = sizeof(cmd),
653 		.ioas_id = ioas_id,
654 		.iova = iova,
655 		.length = length,
656 	};
657 	int ret;
658 
659 	ret = ioctl(fd, IOMMU_IOAS_UNMAP, &cmd);
660 	if (out_len)
661 		*out_len = cmd.length;
662 	return ret;
663 }
664 #define test_ioctl_ioas_unmap(iova, length)                                \
665 	ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, iova, \
666 					    length, NULL))
667 
668 #define test_ioctl_ioas_unmap_id(ioas_id, iova, length)                      \
669 	ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, ioas_id, iova, length, \
670 					    NULL))
671 
672 #define test_err_ioctl_ioas_unmap(_errno, iova, length)                      \
673 	EXPECT_ERRNO(_errno, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, \
674 						    iova, length, NULL))
675 
_test_ioctl_ioas_map_file(int fd,unsigned int ioas_id,int mfd,size_t start,size_t length,__u64 * iova,unsigned int flags)676 static int _test_ioctl_ioas_map_file(int fd, unsigned int ioas_id, int mfd,
677 				     size_t start, size_t length, __u64 *iova,
678 				     unsigned int flags)
679 {
680 	struct iommu_ioas_map_file cmd = {
681 		.size = sizeof(cmd),
682 		.flags = flags,
683 		.ioas_id = ioas_id,
684 		.fd = mfd,
685 		.start = start,
686 		.length = length,
687 	};
688 	int ret;
689 
690 	if (flags & IOMMU_IOAS_MAP_FIXED_IOVA)
691 		cmd.iova = *iova;
692 
693 	ret = ioctl(fd, IOMMU_IOAS_MAP_FILE, &cmd);
694 	*iova = cmd.iova;
695 	return ret;
696 }
697 
698 #define test_ioctl_ioas_map_file(mfd, start, length, iova_p)                   \
699 	ASSERT_EQ(0,                                                           \
700 		  _test_ioctl_ioas_map_file(                                   \
701 			  self->fd, self->ioas_id, mfd, start, length, iova_p, \
702 			  IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
703 
704 #define test_err_ioctl_ioas_map_file(_errno, mfd, start, length, iova_p)     \
705 	EXPECT_ERRNO(                                                        \
706 		_errno,                                                      \
707 		_test_ioctl_ioas_map_file(                                   \
708 			self->fd, self->ioas_id, mfd, start, length, iova_p, \
709 			IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
710 
711 #define test_ioctl_ioas_map_id_file(ioas_id, mfd, start, length, iova_p)     \
712 	ASSERT_EQ(0,                                                         \
713 		  _test_ioctl_ioas_map_file(                                 \
714 			  self->fd, ioas_id, mfd, start, length, iova_p,     \
715 			  IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))
716 
_test_ioctl_set_temp_memory_limit(int fd,unsigned int limit)717 static int _test_ioctl_set_temp_memory_limit(int fd, unsigned int limit)
718 {
719 	struct iommu_test_cmd memlimit_cmd = {
720 		.size = sizeof(memlimit_cmd),
721 		.op = IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT,
722 		.memory_limit = { .limit = limit },
723 	};
724 
725 	return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT),
726 		     &memlimit_cmd);
727 }
728 
729 #define test_ioctl_set_temp_memory_limit(limit) \
730 	ASSERT_EQ(0, _test_ioctl_set_temp_memory_limit(self->fd, limit))
731 
732 #define test_ioctl_set_default_memory_limit() \
733 	test_ioctl_set_temp_memory_limit(65536)
734 
teardown_iommufd(int fd,struct __test_metadata * _metadata)735 static void teardown_iommufd(int fd, struct __test_metadata *_metadata)
736 {
737 	struct iommu_test_cmd test_cmd = {
738 		.size = sizeof(test_cmd),
739 		.op = IOMMU_TEST_OP_MD_CHECK_REFS,
740 		.check_refs = { .length = BUFFER_SIZE,
741 				.uptr = (uintptr_t)buffer },
742 	};
743 
744 	if (fd == -1)
745 		return;
746 
747 	EXPECT_EQ(0, close(fd));
748 
749 	fd = open("/dev/iommu", O_RDWR);
750 	EXPECT_NE(-1, fd);
751 	EXPECT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS),
752 			   &test_cmd));
753 	EXPECT_EQ(0, close(fd));
754 }
755 
756 #define EXPECT_ERRNO(expected_errno, cmd)         \
757 	({                                        \
758 		ASSERT_EQ(-1, cmd);               \
759 		EXPECT_EQ(expected_errno, errno); \
760 	})
761 
762 #endif
763 
764 /* @data can be NULL */
_test_cmd_get_hw_info(int fd,__u32 device_id,void * data,size_t data_len,uint32_t * capabilities,uint8_t * max_pasid)765 static int _test_cmd_get_hw_info(int fd, __u32 device_id, void *data,
766 				 size_t data_len, uint32_t *capabilities,
767 				 uint8_t *max_pasid)
768 {
769 	struct iommu_test_hw_info *info = (struct iommu_test_hw_info *)data;
770 	struct iommu_hw_info cmd = {
771 		.size = sizeof(cmd),
772 		.dev_id = device_id,
773 		.data_len = data_len,
774 		.data_uptr = (uint64_t)data,
775 		.out_capabilities = 0,
776 	};
777 	int ret;
778 
779 	ret = ioctl(fd, IOMMU_GET_HW_INFO, &cmd);
780 	if (ret)
781 		return ret;
782 
783 	assert(cmd.out_data_type == IOMMU_HW_INFO_TYPE_SELFTEST);
784 
785 	/*
786 	 * The struct iommu_test_hw_info should be the one defined
787 	 * by the current kernel.
788 	 */
789 	assert(cmd.data_len == sizeof(struct iommu_test_hw_info));
790 
791 	/*
792 	 * Trailing bytes should be 0 if user buffer is larger than
793 	 * the data that kernel reports.
794 	 */
795 	if (data_len > cmd.data_len) {
796 		char *ptr = (char *)(data + cmd.data_len);
797 		int idx = 0;
798 
799 		while (idx < data_len - cmd.data_len) {
800 			assert(!*(ptr + idx));
801 			idx++;
802 		}
803 	}
804 
805 	if (info) {
806 		if (data_len >= offsetofend(struct iommu_test_hw_info, test_reg))
807 			assert(info->test_reg == IOMMU_HW_INFO_SELFTEST_REGVAL);
808 		if (data_len >= offsetofend(struct iommu_test_hw_info, flags))
809 			assert(!info->flags);
810 	}
811 
812 	if (max_pasid)
813 		*max_pasid = cmd.out_max_pasid_log2;
814 
815 	if (capabilities)
816 		*capabilities = cmd.out_capabilities;
817 
818 	return 0;
819 }
820 
821 #define test_cmd_get_hw_info(device_id, data, data_len)               \
822 	ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, data, \
823 					   data_len, NULL, NULL))
824 
825 #define test_err_get_hw_info(_errno, device_id, data, data_len)               \
826 	EXPECT_ERRNO(_errno, _test_cmd_get_hw_info(self->fd, device_id, data, \
827 						   data_len, NULL, NULL))
828 
829 #define test_cmd_get_hw_capabilities(device_id, caps, mask) \
830 	ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, \
831 					   0, &caps, NULL))
832 
833 #define test_cmd_get_hw_info_pasid(device_id, max_pasid)              \
834 	ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, \
835 					   0, NULL, max_pasid))
836 
_test_ioctl_fault_alloc(int fd,__u32 * fault_id,__u32 * fault_fd)837 static int _test_ioctl_fault_alloc(int fd, __u32 *fault_id, __u32 *fault_fd)
838 {
839 	struct iommu_fault_alloc cmd = {
840 		.size = sizeof(cmd),
841 	};
842 	int ret;
843 
844 	ret = ioctl(fd, IOMMU_FAULT_QUEUE_ALLOC, &cmd);
845 	if (ret)
846 		return ret;
847 	*fault_id = cmd.out_fault_id;
848 	*fault_fd = cmd.out_fault_fd;
849 	return 0;
850 }
851 
852 #define test_ioctl_fault_alloc(fault_id, fault_fd)                       \
853 	({                                                               \
854 		ASSERT_EQ(0, _test_ioctl_fault_alloc(self->fd, fault_id, \
855 						     fault_fd));         \
856 		ASSERT_NE(0, *(fault_id));                               \
857 		ASSERT_NE(0, *(fault_fd));                               \
858 	})
859 
_test_cmd_trigger_iopf(int fd,__u32 device_id,__u32 pasid,__u32 fault_fd)860 static int _test_cmd_trigger_iopf(int fd, __u32 device_id, __u32 pasid,
861 				  __u32 fault_fd)
862 {
863 	struct iommu_test_cmd trigger_iopf_cmd = {
864 		.size = sizeof(trigger_iopf_cmd),
865 		.op = IOMMU_TEST_OP_TRIGGER_IOPF,
866 		.trigger_iopf = {
867 			.dev_id = device_id,
868 			.pasid = pasid,
869 			.grpid = 0x2,
870 			.perm = IOMMU_PGFAULT_PERM_READ | IOMMU_PGFAULT_PERM_WRITE,
871 			.addr = 0xdeadbeaf,
872 		},
873 	};
874 	struct iommu_hwpt_page_response response = {
875 		.code = IOMMUFD_PAGE_RESP_SUCCESS,
876 	};
877 	struct iommu_hwpt_pgfault fault = {};
878 	ssize_t bytes;
879 	int ret;
880 
881 	ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_IOPF), &trigger_iopf_cmd);
882 	if (ret)
883 		return ret;
884 
885 	bytes = read(fault_fd, &fault, sizeof(fault));
886 	if (bytes <= 0)
887 		return -EIO;
888 
889 	response.cookie = fault.cookie;
890 
891 	bytes = write(fault_fd, &response, sizeof(response));
892 	if (bytes <= 0)
893 		return -EIO;
894 
895 	return 0;
896 }
897 
898 #define test_cmd_trigger_iopf(device_id, fault_fd) \
899 	ASSERT_EQ(0, _test_cmd_trigger_iopf(self->fd, device_id, 0x1, fault_fd))
900 #define test_cmd_trigger_iopf_pasid(device_id, pasid, fault_fd) \
901 	ASSERT_EQ(0, _test_cmd_trigger_iopf(self->fd, device_id, \
902 					    pasid, fault_fd))
903 
_test_cmd_viommu_alloc(int fd,__u32 device_id,__u32 hwpt_id,__u32 type,__u32 flags,__u32 * viommu_id)904 static int _test_cmd_viommu_alloc(int fd, __u32 device_id, __u32 hwpt_id,
905 				  __u32 type, __u32 flags, __u32 *viommu_id)
906 {
907 	struct iommu_viommu_alloc cmd = {
908 		.size = sizeof(cmd),
909 		.flags = flags,
910 		.type = type,
911 		.dev_id = device_id,
912 		.hwpt_id = hwpt_id,
913 	};
914 	int ret;
915 
916 	ret = ioctl(fd, IOMMU_VIOMMU_ALLOC, &cmd);
917 	if (ret)
918 		return ret;
919 	if (viommu_id)
920 		*viommu_id = cmd.out_viommu_id;
921 	return 0;
922 }
923 
924 #define test_cmd_viommu_alloc(device_id, hwpt_id, type, viommu_id)        \
925 	ASSERT_EQ(0, _test_cmd_viommu_alloc(self->fd, device_id, hwpt_id, \
926 					    type, 0, viommu_id))
927 #define test_err_viommu_alloc(_errno, device_id, hwpt_id, type, viommu_id) \
928 	EXPECT_ERRNO(_errno,                                               \
929 		     _test_cmd_viommu_alloc(self->fd, device_id, hwpt_id,  \
930 					    type, 0, viommu_id))
931 
_test_cmd_vdevice_alloc(int fd,__u32 viommu_id,__u32 idev_id,__u64 virt_id,__u32 * vdev_id)932 static int _test_cmd_vdevice_alloc(int fd, __u32 viommu_id, __u32 idev_id,
933 				   __u64 virt_id, __u32 *vdev_id)
934 {
935 	struct iommu_vdevice_alloc cmd = {
936 		.size = sizeof(cmd),
937 		.dev_id = idev_id,
938 		.viommu_id = viommu_id,
939 		.virt_id = virt_id,
940 	};
941 	int ret;
942 
943 	ret = ioctl(fd, IOMMU_VDEVICE_ALLOC, &cmd);
944 	if (ret)
945 		return ret;
946 	if (vdev_id)
947 		*vdev_id = cmd.out_vdevice_id;
948 	return 0;
949 }
950 
951 #define test_cmd_vdevice_alloc(viommu_id, idev_id, virt_id, vdev_id)       \
952 	ASSERT_EQ(0, _test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id, \
953 					     virt_id, vdev_id))
954 #define test_err_vdevice_alloc(_errno, viommu_id, idev_id, virt_id, vdev_id) \
955 	EXPECT_ERRNO(_errno,                                                 \
956 		     _test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id,   \
957 					     virt_id, vdev_id))
958 
_test_cmd_veventq_alloc(int fd,__u32 viommu_id,__u32 type,__u32 * veventq_id,__u32 * veventq_fd)959 static int _test_cmd_veventq_alloc(int fd, __u32 viommu_id, __u32 type,
960 				   __u32 *veventq_id, __u32 *veventq_fd)
961 {
962 	struct iommu_veventq_alloc cmd = {
963 		.size = sizeof(cmd),
964 		.type = type,
965 		.veventq_depth = 2,
966 		.viommu_id = viommu_id,
967 	};
968 	int ret;
969 
970 	ret = ioctl(fd, IOMMU_VEVENTQ_ALLOC, &cmd);
971 	if (ret)
972 		return ret;
973 	if (veventq_id)
974 		*veventq_id = cmd.out_veventq_id;
975 	if (veventq_fd)
976 		*veventq_fd = cmd.out_veventq_fd;
977 	return 0;
978 }
979 
980 #define test_cmd_veventq_alloc(viommu_id, type, veventq_id, veventq_fd) \
981 	ASSERT_EQ(0, _test_cmd_veventq_alloc(self->fd, viommu_id, type, \
982 					     veventq_id, veventq_fd))
983 #define test_err_veventq_alloc(_errno, viommu_id, type, veventq_id,     \
984 			       veventq_fd)                              \
985 	EXPECT_ERRNO(_errno,                                            \
986 		     _test_cmd_veventq_alloc(self->fd, viommu_id, type, \
987 					     veventq_id, veventq_fd))
988 
_test_cmd_trigger_vevents(int fd,__u32 dev_id,__u32 nvevents)989 static int _test_cmd_trigger_vevents(int fd, __u32 dev_id, __u32 nvevents)
990 {
991 	struct iommu_test_cmd trigger_vevent_cmd = {
992 		.size = sizeof(trigger_vevent_cmd),
993 		.op = IOMMU_TEST_OP_TRIGGER_VEVENT,
994 		.trigger_vevent = {
995 			.dev_id = dev_id,
996 		},
997 	};
998 	int ret;
999 
1000 	while (nvevents--) {
1001 		ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_VEVENT),
1002 			    &trigger_vevent_cmd);
1003 		if (ret < 0)
1004 			return -1;
1005 	}
1006 	return ret;
1007 }
1008 
1009 #define test_cmd_trigger_vevents(dev_id, nvevents) \
1010 	ASSERT_EQ(0, _test_cmd_trigger_vevents(self->fd, dev_id, nvevents))
1011 
_test_cmd_read_vevents(int fd,__u32 event_fd,__u32 nvevents,__u32 virt_id,int * prev_seq)1012 static int _test_cmd_read_vevents(int fd, __u32 event_fd, __u32 nvevents,
1013 				  __u32 virt_id, int *prev_seq)
1014 {
1015 	struct pollfd pollfd = { .fd = event_fd, .events = POLLIN };
1016 	struct iommu_viommu_event_selftest *event;
1017 	struct iommufd_vevent_header *hdr;
1018 	ssize_t bytes;
1019 	void *data;
1020 	int ret, i;
1021 
1022 	ret = poll(&pollfd, 1, 1000);
1023 	if (ret < 0)
1024 		return -1;
1025 
1026 	data = calloc(nvevents, sizeof(*hdr) + sizeof(*event));
1027 	if (!data) {
1028 		errno = ENOMEM;
1029 		return -1;
1030 	}
1031 
1032 	bytes = read(event_fd, data,
1033 		     nvevents * (sizeof(*hdr) + sizeof(*event)));
1034 	if (bytes <= 0) {
1035 		errno = EFAULT;
1036 		ret = -1;
1037 		goto out_free;
1038 	}
1039 
1040 	for (i = 0; i < nvevents; i++) {
1041 		hdr = data + i * (sizeof(*hdr) + sizeof(*event));
1042 
1043 		if (hdr->flags & IOMMU_VEVENTQ_FLAG_LOST_EVENTS ||
1044 		    hdr->sequence - *prev_seq > 1) {
1045 			*prev_seq = hdr->sequence;
1046 			errno = EOVERFLOW;
1047 			ret = -1;
1048 			goto out_free;
1049 		}
1050 		*prev_seq = hdr->sequence;
1051 		event = data + sizeof(*hdr);
1052 		if (event->virt_id != virt_id) {
1053 			errno = EINVAL;
1054 			ret = -1;
1055 			goto out_free;
1056 		}
1057 	}
1058 
1059 	ret = 0;
1060 out_free:
1061 	free(data);
1062 	return ret;
1063 }
1064 
1065 #define test_cmd_read_vevents(event_fd, nvevents, virt_id, prev_seq)      \
1066 	ASSERT_EQ(0, _test_cmd_read_vevents(self->fd, event_fd, nvevents, \
1067 					    virt_id, prev_seq))
1068 #define test_err_read_vevents(_errno, event_fd, nvevents, virt_id, prev_seq) \
1069 	EXPECT_ERRNO(_errno,                                                 \
1070 		     _test_cmd_read_vevents(self->fd, event_fd, nvevents,    \
1071 					    virt_id, prev_seq))
1072 
_test_cmd_pasid_attach(int fd,__u32 stdev_id,__u32 pasid,__u32 pt_id)1073 static int _test_cmd_pasid_attach(int fd, __u32 stdev_id, __u32 pasid,
1074 				  __u32 pt_id)
1075 {
1076 	struct iommu_test_cmd test_attach = {
1077 		.size = sizeof(test_attach),
1078 		.op = IOMMU_TEST_OP_PASID_ATTACH,
1079 		.id = stdev_id,
1080 		.pasid_attach = {
1081 			.pasid = pasid,
1082 			.pt_id = pt_id,
1083 		},
1084 	};
1085 
1086 	return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_PASID_ATTACH),
1087 		     &test_attach);
1088 }
1089 
1090 #define test_cmd_pasid_attach(pasid, hwpt_id) \
1091 	ASSERT_EQ(0, _test_cmd_pasid_attach(self->fd, self->stdev_id, \
1092 					    pasid, hwpt_id))
1093 
1094 #define test_err_pasid_attach(_errno, pasid, hwpt_id) \
1095 	EXPECT_ERRNO(_errno, \
1096 		     _test_cmd_pasid_attach(self->fd, self->stdev_id, \
1097 					    pasid, hwpt_id))
1098 
_test_cmd_pasid_replace(int fd,__u32 stdev_id,__u32 pasid,__u32 pt_id)1099 static int _test_cmd_pasid_replace(int fd, __u32 stdev_id, __u32 pasid,
1100 				   __u32 pt_id)
1101 {
1102 	struct iommu_test_cmd test_replace = {
1103 		.size = sizeof(test_replace),
1104 		.op = IOMMU_TEST_OP_PASID_REPLACE,
1105 		.id = stdev_id,
1106 		.pasid_replace = {
1107 			.pasid = pasid,
1108 			.pt_id = pt_id,
1109 		},
1110 	};
1111 
1112 	return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_PASID_REPLACE),
1113 		     &test_replace);
1114 }
1115 
1116 #define test_cmd_pasid_replace(pasid, hwpt_id) \
1117 	ASSERT_EQ(0, _test_cmd_pasid_replace(self->fd, self->stdev_id, \
1118 					     pasid, hwpt_id))
1119 
1120 #define test_err_pasid_replace(_errno, pasid, hwpt_id) \
1121 	EXPECT_ERRNO(_errno, \
1122 		     _test_cmd_pasid_replace(self->fd, self->stdev_id, \
1123 					     pasid, hwpt_id))
1124 
_test_cmd_pasid_detach(int fd,__u32 stdev_id,__u32 pasid)1125 static int _test_cmd_pasid_detach(int fd, __u32 stdev_id, __u32 pasid)
1126 {
1127 	struct iommu_test_cmd test_detach = {
1128 		.size = sizeof(test_detach),
1129 		.op = IOMMU_TEST_OP_PASID_DETACH,
1130 		.id = stdev_id,
1131 		.pasid_detach = {
1132 			.pasid = pasid,
1133 		},
1134 	};
1135 
1136 	return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_PASID_DETACH),
1137 		     &test_detach);
1138 }
1139 
1140 #define test_cmd_pasid_detach(pasid) \
1141 	ASSERT_EQ(0, _test_cmd_pasid_detach(self->fd, self->stdev_id, pasid))
1142 
test_cmd_pasid_check_hwpt(int fd,__u32 stdev_id,__u32 pasid,__u32 hwpt_id)1143 static int test_cmd_pasid_check_hwpt(int fd, __u32 stdev_id, __u32 pasid,
1144 				     __u32 hwpt_id)
1145 {
1146 	struct iommu_test_cmd test_pasid_check = {
1147 		.size = sizeof(test_pasid_check),
1148 		.op = IOMMU_TEST_OP_PASID_CHECK_HWPT,
1149 		.id = stdev_id,
1150 		.pasid_check = {
1151 			.pasid = pasid,
1152 			.hwpt_id = hwpt_id,
1153 		},
1154 	};
1155 
1156 	return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_PASID_CHECK_HWPT),
1157 		     &test_pasid_check);
1158 }
1159