xref: /linux/tools/testing/selftests/iommu/iommufd.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #include <asm/unistd.h>
4 #include <stdlib.h>
5 #include <sys/capability.h>
6 #include <sys/mman.h>
7 #include <sys/eventfd.h>
8 
9 #define __EXPORTED_HEADERS__
10 #include <linux/vfio.h>
11 
12 #include "iommufd_utils.h"
13 
14 static unsigned long HUGEPAGE_SIZE;
15 
16 #define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
17 #define MOCK_HUGE_PAGE_SIZE (512 * MOCK_PAGE_SIZE)
18 
19 static unsigned long get_huge_page_size(void)
20 {
21 	char buf[80];
22 	int ret;
23 	int fd;
24 
25 	fd = open("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size",
26 		  O_RDONLY);
27 	if (fd < 0)
28 		return 2 * 1024 * 1024;
29 
30 	ret = read(fd, buf, sizeof(buf));
31 	close(fd);
32 	if (ret <= 0 || ret == sizeof(buf))
33 		return 2 * 1024 * 1024;
34 	buf[ret] = 0;
35 	return strtoul(buf, NULL, 10);
36 }
37 
38 static __attribute__((constructor)) void setup_sizes(void)
39 {
40 	void *vrc;
41 	int rc;
42 
43 	PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
44 	HUGEPAGE_SIZE = get_huge_page_size();
45 
46 	BUFFER_SIZE = PAGE_SIZE * 16;
47 	rc = posix_memalign(&buffer, HUGEPAGE_SIZE, BUFFER_SIZE);
48 	assert(!rc);
49 	assert(buffer);
50 	assert((uintptr_t)buffer % HUGEPAGE_SIZE == 0);
51 	vrc = mmap(buffer, BUFFER_SIZE, PROT_READ | PROT_WRITE,
52 		   MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
53 	assert(vrc == buffer);
54 
55 	mfd_buffer = memfd_mmap(BUFFER_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
56 				&mfd);
57 }
58 
59 FIXTURE(iommufd)
60 {
61 	int fd;
62 };
63 
64 FIXTURE_SETUP(iommufd)
65 {
66 	self->fd = open("/dev/iommu", O_RDWR);
67 	ASSERT_NE(-1, self->fd);
68 }
69 
70 FIXTURE_TEARDOWN(iommufd)
71 {
72 	teardown_iommufd(self->fd, _metadata);
73 }
74 
75 TEST_F(iommufd, simple_close)
76 {
77 }
78 
79 TEST_F(iommufd, cmd_fail)
80 {
81 	struct iommu_destroy cmd = { .size = sizeof(cmd), .id = 0 };
82 
83 	/* object id is invalid */
84 	EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, 0));
85 	/* Bad pointer */
86 	EXPECT_ERRNO(EFAULT, ioctl(self->fd, IOMMU_DESTROY, NULL));
87 	/* Unknown ioctl */
88 	EXPECT_ERRNO(ENOTTY,
89 		     ioctl(self->fd, _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE - 1),
90 			   &cmd));
91 }
92 
93 TEST_F(iommufd, cmd_length)
94 {
95 #define TEST_LENGTH(_struct, _ioctl, _last)                              \
96 	{                                                                \
97 		size_t min_size = offsetofend(struct _struct, _last);    \
98 		struct {                                                 \
99 			struct _struct cmd;                              \
100 			uint8_t extra;                                   \
101 		} cmd = { .cmd = { .size = min_size - 1 },               \
102 			  .extra = UINT8_MAX };                          \
103 		int old_errno;                                           \
104 		int rc;                                                  \
105 									 \
106 		EXPECT_ERRNO(EINVAL, ioctl(self->fd, _ioctl, &cmd));     \
107 		cmd.cmd.size = sizeof(struct _struct) + 1;               \
108 		EXPECT_ERRNO(E2BIG, ioctl(self->fd, _ioctl, &cmd));      \
109 		cmd.cmd.size = sizeof(struct _struct);                   \
110 		rc = ioctl(self->fd, _ioctl, &cmd);                      \
111 		old_errno = errno;                                       \
112 		cmd.cmd.size = sizeof(struct _struct) + 1;               \
113 		cmd.extra = 0;                                           \
114 		if (rc) {                                                \
115 			EXPECT_ERRNO(old_errno,                          \
116 				     ioctl(self->fd, _ioctl, &cmd));     \
117 		} else {                                                 \
118 			ASSERT_EQ(0, ioctl(self->fd, _ioctl, &cmd));     \
119 		}                                                        \
120 	}
121 
122 	TEST_LENGTH(iommu_destroy, IOMMU_DESTROY, id);
123 	TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO, __reserved);
124 	TEST_LENGTH(iommu_hwpt_alloc, IOMMU_HWPT_ALLOC, __reserved);
125 	TEST_LENGTH(iommu_hwpt_invalidate, IOMMU_HWPT_INVALIDATE, __reserved);
126 	TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC, out_ioas_id);
127 	TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES,
128 		    out_iova_alignment);
129 	TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS,
130 		    allowed_iovas);
131 	TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP, iova);
132 	TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY, src_iova);
133 	TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP, length);
134 	TEST_LENGTH(iommu_option, IOMMU_OPTION, val64);
135 	TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS, __reserved);
136 	TEST_LENGTH(iommu_ioas_map_file, IOMMU_IOAS_MAP_FILE, iova);
137 	TEST_LENGTH(iommu_viommu_alloc, IOMMU_VIOMMU_ALLOC, out_viommu_id);
138 	TEST_LENGTH(iommu_vdevice_alloc, IOMMU_VDEVICE_ALLOC, virt_id);
139 	TEST_LENGTH(iommu_ioas_change_process, IOMMU_IOAS_CHANGE_PROCESS,
140 		    __reserved);
141 #undef TEST_LENGTH
142 }
143 
144 TEST_F(iommufd, cmd_ex_fail)
145 {
146 	struct {
147 		struct iommu_destroy cmd;
148 		__u64 future;
149 	} cmd = { .cmd = { .size = sizeof(cmd), .id = 0 } };
150 
151 	/* object id is invalid and command is longer */
152 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
153 	/* future area is non-zero */
154 	cmd.future = 1;
155 	EXPECT_ERRNO(E2BIG, ioctl(self->fd, IOMMU_DESTROY, &cmd));
156 	/* Original command "works" */
157 	cmd.cmd.size = sizeof(cmd.cmd);
158 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
159 	/* Short command fails */
160 	cmd.cmd.size = sizeof(cmd.cmd) - 1;
161 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_DESTROY, &cmd));
162 }
163 
164 TEST_F(iommufd, global_options)
165 {
166 	struct iommu_option cmd = {
167 		.size = sizeof(cmd),
168 		.option_id = IOMMU_OPTION_RLIMIT_MODE,
169 		.op = IOMMU_OPTION_OP_GET,
170 		.val64 = 1,
171 	};
172 
173 	cmd.option_id = IOMMU_OPTION_RLIMIT_MODE;
174 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
175 	ASSERT_EQ(0, cmd.val64);
176 
177 	/* This requires root */
178 	cmd.op = IOMMU_OPTION_OP_SET;
179 	cmd.val64 = 1;
180 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
181 	cmd.val64 = 2;
182 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
183 
184 	cmd.op = IOMMU_OPTION_OP_GET;
185 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
186 	ASSERT_EQ(1, cmd.val64);
187 
188 	cmd.op = IOMMU_OPTION_OP_SET;
189 	cmd.val64 = 0;
190 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
191 
192 	cmd.op = IOMMU_OPTION_OP_GET;
193 	cmd.option_id = IOMMU_OPTION_HUGE_PAGES;
194 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
195 	cmd.op = IOMMU_OPTION_OP_SET;
196 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
197 }
198 
199 static void drop_cap_ipc_lock(struct __test_metadata *_metadata)
200 {
201 	cap_t caps;
202 	cap_value_t cap_list[1] = { CAP_IPC_LOCK };
203 
204 	caps = cap_get_proc();
205 	ASSERT_NE(caps, NULL);
206 	ASSERT_NE(-1,
207 		  cap_set_flag(caps, CAP_EFFECTIVE, 1, cap_list, CAP_CLEAR));
208 	ASSERT_NE(-1, cap_set_proc(caps));
209 	cap_free(caps);
210 }
211 
212 static long get_proc_status_value(pid_t pid, const char *var)
213 {
214 	FILE *fp;
215 	char buf[80], tag[80];
216 	long val = -1;
217 
218 	snprintf(buf, sizeof(buf), "/proc/%d/status", pid);
219 	fp = fopen(buf, "r");
220 	if (!fp)
221 		return val;
222 
223 	while (fgets(buf, sizeof(buf), fp))
224 		if (fscanf(fp, "%s %ld\n", tag, &val) == 2 && !strcmp(tag, var))
225 			break;
226 
227 	fclose(fp);
228 	return val;
229 }
230 
231 static long get_vm_pinned(pid_t pid)
232 {
233 	return get_proc_status_value(pid, "VmPin:");
234 }
235 
236 static long get_vm_locked(pid_t pid)
237 {
238 	return get_proc_status_value(pid, "VmLck:");
239 }
240 
241 FIXTURE(change_process)
242 {
243 	int fd;
244 	uint32_t ioas_id;
245 };
246 
247 FIXTURE_VARIANT(change_process)
248 {
249 	int accounting;
250 };
251 
252 FIXTURE_SETUP(change_process)
253 {
254 	self->fd = open("/dev/iommu", O_RDWR);
255 	ASSERT_NE(-1, self->fd);
256 
257 	drop_cap_ipc_lock(_metadata);
258 	if (variant->accounting != IOPT_PAGES_ACCOUNT_NONE) {
259 		struct iommu_option set_limit_cmd = {
260 			.size = sizeof(set_limit_cmd),
261 			.option_id = IOMMU_OPTION_RLIMIT_MODE,
262 			.op = IOMMU_OPTION_OP_SET,
263 			.val64 = (variant->accounting == IOPT_PAGES_ACCOUNT_MM),
264 		};
265 		ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &set_limit_cmd));
266 	}
267 
268 	test_ioctl_ioas_alloc(&self->ioas_id);
269 	test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
270 }
271 
272 FIXTURE_TEARDOWN(change_process)
273 {
274 	teardown_iommufd(self->fd, _metadata);
275 }
276 
277 FIXTURE_VARIANT_ADD(change_process, account_none)
278 {
279 	.accounting = IOPT_PAGES_ACCOUNT_NONE,
280 };
281 
282 FIXTURE_VARIANT_ADD(change_process, account_user)
283 {
284 	.accounting = IOPT_PAGES_ACCOUNT_USER,
285 };
286 
287 FIXTURE_VARIANT_ADD(change_process, account_mm)
288 {
289 	.accounting = IOPT_PAGES_ACCOUNT_MM,
290 };
291 
292 TEST_F(change_process, basic)
293 {
294 	pid_t parent = getpid();
295 	pid_t child;
296 	__u64 iova;
297 	struct iommu_ioas_change_process cmd = {
298 		.size = sizeof(cmd),
299 	};
300 
301 	/* Expect failure if non-file maps exist */
302 	test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
303 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));
304 	test_ioctl_ioas_unmap(iova, PAGE_SIZE);
305 
306 	/* Change process works in current process. */
307 	test_ioctl_ioas_map_file(mfd, 0, PAGE_SIZE, &iova);
308 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));
309 
310 	/* Change process works in another process */
311 	child = fork();
312 	if (!child) {
313 		int nlock = PAGE_SIZE / 1024;
314 
315 		/* Parent accounts for locked memory before */
316 		ASSERT_EQ(nlock, get_vm_pinned(parent));
317 		if (variant->accounting == IOPT_PAGES_ACCOUNT_MM)
318 			ASSERT_EQ(nlock, get_vm_locked(parent));
319 		ASSERT_EQ(0, get_vm_pinned(getpid()));
320 		ASSERT_EQ(0, get_vm_locked(getpid()));
321 
322 		ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));
323 
324 		/* Child accounts for locked memory after */
325 		ASSERT_EQ(0, get_vm_pinned(parent));
326 		ASSERT_EQ(0, get_vm_locked(parent));
327 		ASSERT_EQ(nlock, get_vm_pinned(getpid()));
328 		if (variant->accounting == IOPT_PAGES_ACCOUNT_MM)
329 			ASSERT_EQ(nlock, get_vm_locked(getpid()));
330 
331 		exit(0);
332 	}
333 	ASSERT_NE(-1, child);
334 	ASSERT_EQ(child, waitpid(child, NULL, 0));
335 }
336 
337 FIXTURE(iommufd_ioas)
338 {
339 	int fd;
340 	uint32_t ioas_id;
341 	uint32_t stdev_id;
342 	uint32_t hwpt_id;
343 	uint32_t device_id;
344 	uint64_t base_iova;
345 };
346 
347 FIXTURE_VARIANT(iommufd_ioas)
348 {
349 	unsigned int mock_domains;
350 	unsigned int memory_limit;
351 };
352 
353 FIXTURE_SETUP(iommufd_ioas)
354 {
355 	unsigned int i;
356 
357 
358 	self->fd = open("/dev/iommu", O_RDWR);
359 	ASSERT_NE(-1, self->fd);
360 	test_ioctl_ioas_alloc(&self->ioas_id);
361 
362 	if (!variant->memory_limit) {
363 		test_ioctl_set_default_memory_limit();
364 	} else {
365 		test_ioctl_set_temp_memory_limit(variant->memory_limit);
366 	}
367 
368 	for (i = 0; i != variant->mock_domains; i++) {
369 		test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
370 				     &self->hwpt_id, &self->device_id);
371 		test_cmd_dev_check_cache_all(self->device_id,
372 					     IOMMU_TEST_DEV_CACHE_DEFAULT);
373 		self->base_iova = MOCK_APERTURE_START;
374 	}
375 }
376 
377 FIXTURE_TEARDOWN(iommufd_ioas)
378 {
379 	test_ioctl_set_default_memory_limit();
380 	teardown_iommufd(self->fd, _metadata);
381 }
382 
383 FIXTURE_VARIANT_ADD(iommufd_ioas, no_domain)
384 {
385 };
386 
387 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain)
388 {
389 	.mock_domains = 1,
390 };
391 
392 FIXTURE_VARIANT_ADD(iommufd_ioas, two_mock_domain)
393 {
394 	.mock_domains = 2,
395 };
396 
397 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain_limit)
398 {
399 	.mock_domains = 1,
400 	.memory_limit = 16,
401 };
402 
403 TEST_F(iommufd_ioas, ioas_auto_destroy)
404 {
405 }
406 
407 TEST_F(iommufd_ioas, ioas_destroy)
408 {
409 	if (self->stdev_id) {
410 		/* IOAS cannot be freed while a device has a HWPT using it */
411 		EXPECT_ERRNO(EBUSY,
412 			     _test_ioctl_destroy(self->fd, self->ioas_id));
413 	} else {
414 		/* Can allocate and manually free an IOAS table */
415 		test_ioctl_destroy(self->ioas_id);
416 	}
417 }
418 
419 TEST_F(iommufd_ioas, alloc_hwpt_nested)
420 {
421 	const uint32_t min_data_len =
422 		offsetofend(struct iommu_hwpt_selftest, iotlb);
423 	struct iommu_hwpt_selftest data = {
424 		.iotlb = IOMMU_TEST_IOTLB_DEFAULT,
425 	};
426 	struct iommu_hwpt_invalidate_selftest inv_reqs[2] = {};
427 	uint32_t nested_hwpt_id[2] = {};
428 	uint32_t num_inv;
429 	uint32_t parent_hwpt_id = 0;
430 	uint32_t parent_hwpt_id_not_work = 0;
431 	uint32_t test_hwpt_id = 0;
432 	uint32_t iopf_hwpt_id;
433 	uint32_t fault_id;
434 	uint32_t fault_fd;
435 
436 	if (self->device_id) {
437 		/* Negative tests */
438 		test_err_hwpt_alloc(ENOENT, self->ioas_id, self->device_id, 0,
439 				    &test_hwpt_id);
440 		test_err_hwpt_alloc(EINVAL, self->device_id, self->device_id, 0,
441 				    &test_hwpt_id);
442 
443 		test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
444 				    IOMMU_HWPT_ALLOC_NEST_PARENT,
445 				    &parent_hwpt_id);
446 
447 		test_cmd_hwpt_alloc(self->device_id, self->ioas_id, 0,
448 				    &parent_hwpt_id_not_work);
449 
450 		/* Negative nested tests */
451 		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
452 					   parent_hwpt_id, 0,
453 					   &nested_hwpt_id[0],
454 					   IOMMU_HWPT_DATA_NONE, &data,
455 					   sizeof(data));
456 		test_err_hwpt_alloc_nested(EOPNOTSUPP, self->device_id,
457 					   parent_hwpt_id, 0,
458 					   &nested_hwpt_id[0],
459 					   IOMMU_HWPT_DATA_SELFTEST + 1, &data,
460 					   sizeof(data));
461 		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
462 					   parent_hwpt_id, 0,
463 					   &nested_hwpt_id[0],
464 					   IOMMU_HWPT_DATA_SELFTEST, &data,
465 					   min_data_len - 1);
466 		test_err_hwpt_alloc_nested(EFAULT, self->device_id,
467 					   parent_hwpt_id, 0,
468 					   &nested_hwpt_id[0],
469 					   IOMMU_HWPT_DATA_SELFTEST, NULL,
470 					   sizeof(data));
471 		test_err_hwpt_alloc_nested(
472 			EOPNOTSUPP, self->device_id, parent_hwpt_id,
473 			IOMMU_HWPT_ALLOC_NEST_PARENT, &nested_hwpt_id[0],
474 			IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
475 		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
476 					   parent_hwpt_id_not_work, 0,
477 					   &nested_hwpt_id[0],
478 					   IOMMU_HWPT_DATA_SELFTEST, &data,
479 					   sizeof(data));
480 
481 		/* Allocate two nested hwpts sharing one common parent hwpt */
482 		test_ioctl_fault_alloc(&fault_id, &fault_fd);
483 		test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
484 					   &nested_hwpt_id[0],
485 					   IOMMU_HWPT_DATA_SELFTEST, &data,
486 					   sizeof(data));
487 		test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
488 					   &nested_hwpt_id[1],
489 					   IOMMU_HWPT_DATA_SELFTEST, &data,
490 					   sizeof(data));
491 		test_err_hwpt_alloc_iopf(ENOENT, self->device_id, parent_hwpt_id,
492 					 UINT32_MAX, IOMMU_HWPT_FAULT_ID_VALID,
493 					 &iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST,
494 					 &data, sizeof(data));
495 		test_cmd_hwpt_alloc_iopf(self->device_id, parent_hwpt_id, fault_id,
496 					 IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
497 					 IOMMU_HWPT_DATA_SELFTEST, &data,
498 					 sizeof(data));
499 		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0],
500 					      IOMMU_TEST_IOTLB_DEFAULT);
501 		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1],
502 					      IOMMU_TEST_IOTLB_DEFAULT);
503 
504 		/* Negative test: a nested hwpt on top of a nested hwpt */
505 		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
506 					   nested_hwpt_id[0], 0, &test_hwpt_id,
507 					   IOMMU_HWPT_DATA_SELFTEST, &data,
508 					   sizeof(data));
509 		/* Negative test: parent hwpt now cannot be freed */
510 		EXPECT_ERRNO(EBUSY,
511 			     _test_ioctl_destroy(self->fd, parent_hwpt_id));
512 
513 		/* hwpt_invalidate does not support a parent hwpt */
514 		num_inv = 1;
515 		test_err_hwpt_invalidate(EINVAL, parent_hwpt_id, inv_reqs,
516 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
517 					 sizeof(*inv_reqs), &num_inv);
518 		assert(!num_inv);
519 
520 		/* Check data_type by passing zero-length array */
521 		num_inv = 0;
522 		test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
523 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
524 					 sizeof(*inv_reqs), &num_inv);
525 		assert(!num_inv);
526 
527 		/* Negative test: Invalid data_type */
528 		num_inv = 1;
529 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
530 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST_INVALID,
531 					 sizeof(*inv_reqs), &num_inv);
532 		assert(!num_inv);
533 
534 		/* Negative test: structure size sanity */
535 		num_inv = 1;
536 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
537 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
538 					 sizeof(*inv_reqs) + 1, &num_inv);
539 		assert(!num_inv);
540 
541 		num_inv = 1;
542 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
543 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
544 					 1, &num_inv);
545 		assert(!num_inv);
546 
547 		/* Negative test: invalid flag is passed */
548 		num_inv = 1;
549 		inv_reqs[0].flags = 0xffffffff;
550 		test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
551 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
552 					 sizeof(*inv_reqs), &num_inv);
553 		assert(!num_inv);
554 
555 		/* Negative test: invalid data_uptr when array is not empty */
556 		num_inv = 1;
557 		inv_reqs[0].flags = 0;
558 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], NULL,
559 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
560 					 sizeof(*inv_reqs), &num_inv);
561 		assert(!num_inv);
562 
563 		/* Negative test: invalid entry_len when array is not empty */
564 		num_inv = 1;
565 		inv_reqs[0].flags = 0;
566 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
567 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
568 					 0, &num_inv);
569 		assert(!num_inv);
570 
571 		/* Negative test: invalid iotlb_id */
572 		num_inv = 1;
573 		inv_reqs[0].flags = 0;
574 		inv_reqs[0].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
575 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
576 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
577 					 sizeof(*inv_reqs), &num_inv);
578 		assert(!num_inv);
579 
580 		/*
581 		 * Invalidate the 1st iotlb entry but fail the 2nd request
582 		 * due to invalid flags configuration in the 2nd request.
583 		 */
584 		num_inv = 2;
585 		inv_reqs[0].flags = 0;
586 		inv_reqs[0].iotlb_id = 0;
587 		inv_reqs[1].flags = 0xffffffff;
588 		inv_reqs[1].iotlb_id = 1;
589 		test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
590 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
591 					 sizeof(*inv_reqs), &num_inv);
592 		assert(num_inv == 1);
593 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
594 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
595 					  IOMMU_TEST_IOTLB_DEFAULT);
596 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
597 					  IOMMU_TEST_IOTLB_DEFAULT);
598 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
599 					  IOMMU_TEST_IOTLB_DEFAULT);
600 
601 		/*
602 		 * Invalidate the 1st iotlb entry but fail the 2nd request
603 		 * due to invalid iotlb_id configuration in the 2nd request.
604 		 */
605 		num_inv = 2;
606 		inv_reqs[0].flags = 0;
607 		inv_reqs[0].iotlb_id = 0;
608 		inv_reqs[1].flags = 0;
609 		inv_reqs[1].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
610 		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
611 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
612 					 sizeof(*inv_reqs), &num_inv);
613 		assert(num_inv == 1);
614 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
615 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
616 					  IOMMU_TEST_IOTLB_DEFAULT);
617 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
618 					  IOMMU_TEST_IOTLB_DEFAULT);
619 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
620 					  IOMMU_TEST_IOTLB_DEFAULT);
621 
622 		/* Invalidate the 2nd iotlb entry and verify */
623 		num_inv = 1;
624 		inv_reqs[0].flags = 0;
625 		inv_reqs[0].iotlb_id = 1;
626 		test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
627 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
628 					 sizeof(*inv_reqs), &num_inv);
629 		assert(num_inv == 1);
630 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
631 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1, 0);
632 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
633 					  IOMMU_TEST_IOTLB_DEFAULT);
634 		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
635 					  IOMMU_TEST_IOTLB_DEFAULT);
636 
637 		/* Invalidate the 3rd and 4th iotlb entries and verify */
638 		num_inv = 2;
639 		inv_reqs[0].flags = 0;
640 		inv_reqs[0].iotlb_id = 2;
641 		inv_reqs[1].flags = 0;
642 		inv_reqs[1].iotlb_id = 3;
643 		test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
644 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
645 					 sizeof(*inv_reqs), &num_inv);
646 		assert(num_inv == 2);
647 		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0], 0);
648 
649 		/* Invalidate all iotlb entries for nested_hwpt_id[1] and verify */
650 		num_inv = 1;
651 		inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
652 		test_cmd_hwpt_invalidate(nested_hwpt_id[1], inv_reqs,
653 					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
654 					 sizeof(*inv_reqs), &num_inv);
655 		assert(num_inv == 1);
656 		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1], 0);
657 
658 		/* Attach device to nested_hwpt_id[0] that then will be busy */
659 		test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[0]);
660 		EXPECT_ERRNO(EBUSY,
661 			     _test_ioctl_destroy(self->fd, nested_hwpt_id[0]));
662 
663 		/* Switch from nested_hwpt_id[0] to nested_hwpt_id[1] */
664 		test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[1]);
665 		EXPECT_ERRNO(EBUSY,
666 			     _test_ioctl_destroy(self->fd, nested_hwpt_id[1]));
667 		test_ioctl_destroy(nested_hwpt_id[0]);
668 
669 		/* Switch from nested_hwpt_id[1] to iopf_hwpt_id */
670 		test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
671 		EXPECT_ERRNO(EBUSY,
672 			     _test_ioctl_destroy(self->fd, iopf_hwpt_id));
673 		/* Trigger an IOPF on the device */
674 		test_cmd_trigger_iopf(self->device_id, fault_fd);
675 
676 		/* Detach from nested_hwpt_id[1] and destroy it */
677 		test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
678 		test_ioctl_destroy(nested_hwpt_id[1]);
679 		test_ioctl_destroy(iopf_hwpt_id);
680 
681 		/* Detach from the parent hw_pagetable and destroy it */
682 		test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
683 		test_ioctl_destroy(parent_hwpt_id);
684 		test_ioctl_destroy(parent_hwpt_id_not_work);
685 		close(fault_fd);
686 		test_ioctl_destroy(fault_id);
687 	} else {
688 		test_err_hwpt_alloc(ENOENT, self->device_id, self->ioas_id, 0,
689 				    &parent_hwpt_id);
690 		test_err_hwpt_alloc_nested(ENOENT, self->device_id,
691 					   parent_hwpt_id, 0,
692 					   &nested_hwpt_id[0],
693 					   IOMMU_HWPT_DATA_SELFTEST, &data,
694 					   sizeof(data));
695 		test_err_hwpt_alloc_nested(ENOENT, self->device_id,
696 					   parent_hwpt_id, 0,
697 					   &nested_hwpt_id[1],
698 					   IOMMU_HWPT_DATA_SELFTEST, &data,
699 					   sizeof(data));
700 		test_err_mock_domain_replace(ENOENT, self->stdev_id,
701 					     nested_hwpt_id[0]);
702 		test_err_mock_domain_replace(ENOENT, self->stdev_id,
703 					     nested_hwpt_id[1]);
704 	}
705 }
706 
707 TEST_F(iommufd_ioas, hwpt_attach)
708 {
709 	/* Create a device attached directly to a hwpt */
710 	if (self->stdev_id) {
711 		test_cmd_mock_domain(self->hwpt_id, NULL, NULL, NULL);
712 	} else {
713 		test_err_mock_domain(ENOENT, self->hwpt_id, NULL, NULL);
714 	}
715 }
716 
717 TEST_F(iommufd_ioas, ioas_area_destroy)
718 {
719 	/* Adding an area does not change ability to destroy */
720 	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
721 	if (self->stdev_id)
722 		EXPECT_ERRNO(EBUSY,
723 			     _test_ioctl_destroy(self->fd, self->ioas_id));
724 	else
725 		test_ioctl_destroy(self->ioas_id);
726 }
727 
728 TEST_F(iommufd_ioas, ioas_area_auto_destroy)
729 {
730 	int i;
731 
732 	/* Can allocate and automatically free an IOAS table with many areas */
733 	for (i = 0; i != 10; i++) {
734 		test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
735 					  self->base_iova + i * PAGE_SIZE);
736 	}
737 }
738 
739 TEST_F(iommufd_ioas, get_hw_info)
740 {
741 	struct iommu_test_hw_info buffer_exact;
742 	struct iommu_test_hw_info_buffer_larger {
743 		struct iommu_test_hw_info info;
744 		uint64_t trailing_bytes;
745 	} buffer_larger;
746 	struct iommu_test_hw_info_buffer_smaller {
747 		__u32 flags;
748 	} buffer_smaller;
749 
750 	if (self->device_id) {
751 		/* Provide a zero-size user_buffer */
752 		test_cmd_get_hw_info(self->device_id, NULL, 0);
753 		/* Provide a user_buffer with exact size */
754 		test_cmd_get_hw_info(self->device_id, &buffer_exact, sizeof(buffer_exact));
755 		/*
756 		 * Provide a user_buffer with size larger than the exact size to check if
757 		 * kernel zero the trailing bytes.
758 		 */
759 		test_cmd_get_hw_info(self->device_id, &buffer_larger, sizeof(buffer_larger));
760 		/*
761 		 * Provide a user_buffer with size smaller than the exact size to check if
762 		 * the fields within the size range still gets updated.
763 		 */
764 		test_cmd_get_hw_info(self->device_id, &buffer_smaller, sizeof(buffer_smaller));
765 	} else {
766 		test_err_get_hw_info(ENOENT, self->device_id,
767 				     &buffer_exact, sizeof(buffer_exact));
768 		test_err_get_hw_info(ENOENT, self->device_id,
769 				     &buffer_larger, sizeof(buffer_larger));
770 	}
771 }
772 
773 TEST_F(iommufd_ioas, area)
774 {
775 	int i;
776 
777 	/* Unmap fails if nothing is mapped */
778 	for (i = 0; i != 10; i++)
779 		test_err_ioctl_ioas_unmap(ENOENT, i * PAGE_SIZE, PAGE_SIZE);
780 
781 	/* Unmap works */
782 	for (i = 0; i != 10; i++)
783 		test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
784 					  self->base_iova + i * PAGE_SIZE);
785 	for (i = 0; i != 10; i++)
786 		test_ioctl_ioas_unmap(self->base_iova + i * PAGE_SIZE,
787 				      PAGE_SIZE);
788 
789 	/* Split fails */
790 	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE * 2,
791 				  self->base_iova + 16 * PAGE_SIZE);
792 	test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 16 * PAGE_SIZE,
793 				  PAGE_SIZE);
794 	test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 17 * PAGE_SIZE,
795 				  PAGE_SIZE);
796 
797 	/* Over map fails */
798 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
799 				      self->base_iova + 16 * PAGE_SIZE);
800 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
801 				      self->base_iova + 16 * PAGE_SIZE);
802 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
803 				      self->base_iova + 17 * PAGE_SIZE);
804 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
805 				      self->base_iova + 15 * PAGE_SIZE);
806 	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 3,
807 				      self->base_iova + 15 * PAGE_SIZE);
808 
809 	/* unmap all works */
810 	test_ioctl_ioas_unmap(0, UINT64_MAX);
811 
812 	/* Unmap all succeeds on an empty IOAS */
813 	test_ioctl_ioas_unmap(0, UINT64_MAX);
814 }
815 
816 TEST_F(iommufd_ioas, unmap_fully_contained_areas)
817 {
818 	uint64_t unmap_len;
819 	int i;
820 
821 	/* Give no_domain some space to rewind base_iova */
822 	self->base_iova += 4 * PAGE_SIZE;
823 
824 	for (i = 0; i != 4; i++)
825 		test_ioctl_ioas_map_fixed(buffer, 8 * PAGE_SIZE,
826 					  self->base_iova + i * 16 * PAGE_SIZE);
827 
828 	/* Unmap not fully contained area doesn't work */
829 	test_err_ioctl_ioas_unmap(ENOENT, self->base_iova - 4 * PAGE_SIZE,
830 				  8 * PAGE_SIZE);
831 	test_err_ioctl_ioas_unmap(ENOENT,
832 				  self->base_iova + 3 * 16 * PAGE_SIZE +
833 					  8 * PAGE_SIZE - 4 * PAGE_SIZE,
834 				  8 * PAGE_SIZE);
835 
836 	/* Unmap fully contained areas works */
837 	ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id,
838 					    self->base_iova - 4 * PAGE_SIZE,
839 					    3 * 16 * PAGE_SIZE + 8 * PAGE_SIZE +
840 						    4 * PAGE_SIZE,
841 					    &unmap_len));
842 	ASSERT_EQ(32 * PAGE_SIZE, unmap_len);
843 }
844 
845 TEST_F(iommufd_ioas, area_auto_iova)
846 {
847 	struct iommu_test_cmd test_cmd = {
848 		.size = sizeof(test_cmd),
849 		.op = IOMMU_TEST_OP_ADD_RESERVED,
850 		.id = self->ioas_id,
851 		.add_reserved = { .start = PAGE_SIZE * 4,
852 				  .length = PAGE_SIZE * 100 },
853 	};
854 	struct iommu_iova_range ranges[1] = {};
855 	struct iommu_ioas_allow_iovas allow_cmd = {
856 		.size = sizeof(allow_cmd),
857 		.ioas_id = self->ioas_id,
858 		.num_iovas = 1,
859 		.allowed_iovas = (uintptr_t)ranges,
860 	};
861 	__u64 iovas[10];
862 	int i;
863 
864 	/* Simple 4k pages */
865 	for (i = 0; i != 10; i++)
866 		test_ioctl_ioas_map(buffer, PAGE_SIZE, &iovas[i]);
867 	for (i = 0; i != 10; i++)
868 		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE);
869 
870 	/* Kernel automatically aligns IOVAs properly */
871 	for (i = 0; i != 10; i++) {
872 		size_t length = PAGE_SIZE * (i + 1);
873 
874 		if (self->stdev_id) {
875 			test_ioctl_ioas_map(buffer, length, &iovas[i]);
876 		} else {
877 			test_ioctl_ioas_map((void *)(1UL << 31), length,
878 					    &iovas[i]);
879 		}
880 		EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
881 	}
882 	for (i = 0; i != 10; i++)
883 		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
884 
885 	/* Avoids a reserved region */
886 	ASSERT_EQ(0,
887 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
888 			&test_cmd));
889 	for (i = 0; i != 10; i++) {
890 		size_t length = PAGE_SIZE * (i + 1);
891 
892 		test_ioctl_ioas_map(buffer, length, &iovas[i]);
893 		EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
894 		EXPECT_EQ(false,
895 			  iovas[i] > test_cmd.add_reserved.start &&
896 				  iovas[i] <
897 					  test_cmd.add_reserved.start +
898 						  test_cmd.add_reserved.length);
899 	}
900 	for (i = 0; i != 10; i++)
901 		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
902 
903 	/* Allowed region intersects with a reserved region */
904 	ranges[0].start = PAGE_SIZE;
905 	ranges[0].last = PAGE_SIZE * 600;
906 	EXPECT_ERRNO(EADDRINUSE,
907 		     ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
908 
909 	/* Allocate from an allowed region */
910 	if (self->stdev_id) {
911 		ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE;
912 		ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1;
913 	} else {
914 		ranges[0].start = PAGE_SIZE * 200;
915 		ranges[0].last = PAGE_SIZE * 600 - 1;
916 	}
917 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
918 	for (i = 0; i != 10; i++) {
919 		size_t length = PAGE_SIZE * (i + 1);
920 
921 		test_ioctl_ioas_map(buffer, length, &iovas[i]);
922 		EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
923 		EXPECT_EQ(true, iovas[i] >= ranges[0].start);
924 		EXPECT_EQ(true, iovas[i] <= ranges[0].last);
925 		EXPECT_EQ(true, iovas[i] + length > ranges[0].start);
926 		EXPECT_EQ(true, iovas[i] + length <= ranges[0].last + 1);
927 	}
928 	for (i = 0; i != 10; i++)
929 		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
930 }
931 
932 TEST_F(iommufd_ioas, area_allowed)
933 {
934 	struct iommu_test_cmd test_cmd = {
935 		.size = sizeof(test_cmd),
936 		.op = IOMMU_TEST_OP_ADD_RESERVED,
937 		.id = self->ioas_id,
938 		.add_reserved = { .start = PAGE_SIZE * 4,
939 				  .length = PAGE_SIZE * 100 },
940 	};
941 	struct iommu_iova_range ranges[1] = {};
942 	struct iommu_ioas_allow_iovas allow_cmd = {
943 		.size = sizeof(allow_cmd),
944 		.ioas_id = self->ioas_id,
945 		.num_iovas = 1,
946 		.allowed_iovas = (uintptr_t)ranges,
947 	};
948 
949 	/* Reserved intersects an allowed */
950 	allow_cmd.num_iovas = 1;
951 	ranges[0].start = self->base_iova;
952 	ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
953 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
954 	test_cmd.add_reserved.start = ranges[0].start + PAGE_SIZE;
955 	test_cmd.add_reserved.length = PAGE_SIZE;
956 	EXPECT_ERRNO(EADDRINUSE,
957 		     ioctl(self->fd,
958 			   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
959 			   &test_cmd));
960 	allow_cmd.num_iovas = 0;
961 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
962 
963 	/* Allowed intersects a reserved */
964 	ASSERT_EQ(0,
965 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
966 			&test_cmd));
967 	allow_cmd.num_iovas = 1;
968 	ranges[0].start = self->base_iova;
969 	ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
970 	EXPECT_ERRNO(EADDRINUSE,
971 		     ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
972 }
973 
974 TEST_F(iommufd_ioas, copy_area)
975 {
976 	struct iommu_ioas_copy copy_cmd = {
977 		.size = sizeof(copy_cmd),
978 		.flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
979 		.dst_ioas_id = self->ioas_id,
980 		.src_ioas_id = self->ioas_id,
981 		.length = PAGE_SIZE,
982 	};
983 
984 	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
985 
986 	/* Copy inside a single IOAS */
987 	copy_cmd.src_iova = self->base_iova;
988 	copy_cmd.dst_iova = self->base_iova + PAGE_SIZE;
989 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
990 
991 	/* Copy between IOAS's */
992 	copy_cmd.src_iova = self->base_iova;
993 	copy_cmd.dst_iova = 0;
994 	test_ioctl_ioas_alloc(&copy_cmd.dst_ioas_id);
995 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
996 }
997 
998 TEST_F(iommufd_ioas, iova_ranges)
999 {
1000 	struct iommu_test_cmd test_cmd = {
1001 		.size = sizeof(test_cmd),
1002 		.op = IOMMU_TEST_OP_ADD_RESERVED,
1003 		.id = self->ioas_id,
1004 		.add_reserved = { .start = PAGE_SIZE, .length = PAGE_SIZE },
1005 	};
1006 	struct iommu_iova_range *ranges = buffer;
1007 	struct iommu_ioas_iova_ranges ranges_cmd = {
1008 		.size = sizeof(ranges_cmd),
1009 		.ioas_id = self->ioas_id,
1010 		.num_iovas = BUFFER_SIZE / sizeof(*ranges),
1011 		.allowed_iovas = (uintptr_t)ranges,
1012 	};
1013 
1014 	/* Range can be read */
1015 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1016 	EXPECT_EQ(1, ranges_cmd.num_iovas);
1017 	if (!self->stdev_id) {
1018 		EXPECT_EQ(0, ranges[0].start);
1019 		EXPECT_EQ(SIZE_MAX, ranges[0].last);
1020 		EXPECT_EQ(1, ranges_cmd.out_iova_alignment);
1021 	} else {
1022 		EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
1023 		EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
1024 		EXPECT_EQ(MOCK_PAGE_SIZE, ranges_cmd.out_iova_alignment);
1025 	}
1026 
1027 	/* Buffer too small */
1028 	memset(ranges, 0, BUFFER_SIZE);
1029 	ranges_cmd.num_iovas = 0;
1030 	EXPECT_ERRNO(EMSGSIZE,
1031 		     ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1032 	EXPECT_EQ(1, ranges_cmd.num_iovas);
1033 	EXPECT_EQ(0, ranges[0].start);
1034 	EXPECT_EQ(0, ranges[0].last);
1035 
1036 	/* 2 ranges */
1037 	ASSERT_EQ(0,
1038 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
1039 			&test_cmd));
1040 	ranges_cmd.num_iovas = BUFFER_SIZE / sizeof(*ranges);
1041 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1042 	if (!self->stdev_id) {
1043 		EXPECT_EQ(2, ranges_cmd.num_iovas);
1044 		EXPECT_EQ(0, ranges[0].start);
1045 		EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
1046 		EXPECT_EQ(PAGE_SIZE * 2, ranges[1].start);
1047 		EXPECT_EQ(SIZE_MAX, ranges[1].last);
1048 	} else {
1049 		EXPECT_EQ(1, ranges_cmd.num_iovas);
1050 		EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
1051 		EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
1052 	}
1053 
1054 	/* Buffer too small */
1055 	memset(ranges, 0, BUFFER_SIZE);
1056 	ranges_cmd.num_iovas = 1;
1057 	if (!self->stdev_id) {
1058 		EXPECT_ERRNO(EMSGSIZE, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES,
1059 					     &ranges_cmd));
1060 		EXPECT_EQ(2, ranges_cmd.num_iovas);
1061 		EXPECT_EQ(0, ranges[0].start);
1062 		EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
1063 	} else {
1064 		ASSERT_EQ(0,
1065 			  ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
1066 		EXPECT_EQ(1, ranges_cmd.num_iovas);
1067 		EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
1068 		EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
1069 	}
1070 	EXPECT_EQ(0, ranges[1].start);
1071 	EXPECT_EQ(0, ranges[1].last);
1072 }
1073 
1074 TEST_F(iommufd_ioas, access_domain_destory)
1075 {
1076 	struct iommu_test_cmd access_cmd = {
1077 		.size = sizeof(access_cmd),
1078 		.op = IOMMU_TEST_OP_ACCESS_PAGES,
1079 		.access_pages = { .iova = self->base_iova + PAGE_SIZE,
1080 				  .length = PAGE_SIZE},
1081 	};
1082 	size_t buf_size = 2 * HUGEPAGE_SIZE;
1083 	uint8_t *buf;
1084 
1085 	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
1086 		   MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
1087 		   0);
1088 	ASSERT_NE(MAP_FAILED, buf);
1089 	test_ioctl_ioas_map_fixed(buf, buf_size, self->base_iova);
1090 
1091 	test_cmd_create_access(self->ioas_id, &access_cmd.id,
1092 			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1093 	access_cmd.access_pages.uptr = (uintptr_t)buf + PAGE_SIZE;
1094 	ASSERT_EQ(0,
1095 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1096 			&access_cmd));
1097 
1098 	/* Causes a complicated unpin across a huge page boundary */
1099 	if (self->stdev_id)
1100 		test_ioctl_destroy(self->stdev_id);
1101 
1102 	test_cmd_destroy_access_pages(
1103 		access_cmd.id, access_cmd.access_pages.out_access_pages_id);
1104 	test_cmd_destroy_access(access_cmd.id);
1105 	ASSERT_EQ(0, munmap(buf, buf_size));
1106 }
1107 
1108 TEST_F(iommufd_ioas, access_pin)
1109 {
1110 	struct iommu_test_cmd access_cmd = {
1111 		.size = sizeof(access_cmd),
1112 		.op = IOMMU_TEST_OP_ACCESS_PAGES,
1113 		.access_pages = { .iova = MOCK_APERTURE_START,
1114 				  .length = BUFFER_SIZE,
1115 				  .uptr = (uintptr_t)buffer },
1116 	};
1117 	struct iommu_test_cmd check_map_cmd = {
1118 		.size = sizeof(check_map_cmd),
1119 		.op = IOMMU_TEST_OP_MD_CHECK_MAP,
1120 		.check_map = { .iova = MOCK_APERTURE_START,
1121 			       .length = BUFFER_SIZE,
1122 			       .uptr = (uintptr_t)buffer },
1123 	};
1124 	uint32_t access_pages_id;
1125 	unsigned int npages;
1126 
1127 	test_cmd_create_access(self->ioas_id, &access_cmd.id,
1128 			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1129 
1130 	for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) {
1131 		uint32_t mock_stdev_id;
1132 		uint32_t mock_hwpt_id;
1133 
1134 		access_cmd.access_pages.length = npages * PAGE_SIZE;
1135 
1136 		/* Single map/unmap */
1137 		test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1138 					  MOCK_APERTURE_START);
1139 		ASSERT_EQ(0, ioctl(self->fd,
1140 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1141 				   &access_cmd));
1142 		test_cmd_destroy_access_pages(
1143 			access_cmd.id,
1144 			access_cmd.access_pages.out_access_pages_id);
1145 
1146 		/* Double user */
1147 		ASSERT_EQ(0, ioctl(self->fd,
1148 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1149 				   &access_cmd));
1150 		access_pages_id = access_cmd.access_pages.out_access_pages_id;
1151 		ASSERT_EQ(0, ioctl(self->fd,
1152 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1153 				   &access_cmd));
1154 		test_cmd_destroy_access_pages(
1155 			access_cmd.id,
1156 			access_cmd.access_pages.out_access_pages_id);
1157 		test_cmd_destroy_access_pages(access_cmd.id, access_pages_id);
1158 
1159 		/* Add/remove a domain with a user */
1160 		ASSERT_EQ(0, ioctl(self->fd,
1161 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1162 				   &access_cmd));
1163 		test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1164 				     &mock_hwpt_id, NULL);
1165 		check_map_cmd.id = mock_hwpt_id;
1166 		ASSERT_EQ(0, ioctl(self->fd,
1167 				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP),
1168 				   &check_map_cmd));
1169 
1170 		test_ioctl_destroy(mock_stdev_id);
1171 		test_cmd_destroy_access_pages(
1172 			access_cmd.id,
1173 			access_cmd.access_pages.out_access_pages_id);
1174 
1175 		test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1176 	}
1177 	test_cmd_destroy_access(access_cmd.id);
1178 }
1179 
1180 TEST_F(iommufd_ioas, access_pin_unmap)
1181 {
1182 	struct iommu_test_cmd access_pages_cmd = {
1183 		.size = sizeof(access_pages_cmd),
1184 		.op = IOMMU_TEST_OP_ACCESS_PAGES,
1185 		.access_pages = { .iova = MOCK_APERTURE_START,
1186 				  .length = BUFFER_SIZE,
1187 				  .uptr = (uintptr_t)buffer },
1188 	};
1189 
1190 	test_cmd_create_access(self->ioas_id, &access_pages_cmd.id,
1191 			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1192 	test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, MOCK_APERTURE_START);
1193 	ASSERT_EQ(0,
1194 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1195 			&access_pages_cmd));
1196 
1197 	/* Trigger the unmap op */
1198 	test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1199 
1200 	/* kernel removed the item for us */
1201 	test_err_destroy_access_pages(
1202 		ENOENT, access_pages_cmd.id,
1203 		access_pages_cmd.access_pages.out_access_pages_id);
1204 }
1205 
1206 static void check_access_rw(struct __test_metadata *_metadata, int fd,
1207 			    unsigned int access_id, uint64_t iova,
1208 			    unsigned int def_flags)
1209 {
1210 	uint16_t tmp[32];
1211 	struct iommu_test_cmd access_cmd = {
1212 		.size = sizeof(access_cmd),
1213 		.op = IOMMU_TEST_OP_ACCESS_RW,
1214 		.id = access_id,
1215 		.access_rw = { .uptr = (uintptr_t)tmp },
1216 	};
1217 	uint16_t *buffer16 = buffer;
1218 	unsigned int i;
1219 	void *tmp2;
1220 
1221 	for (i = 0; i != BUFFER_SIZE / sizeof(*buffer16); i++)
1222 		buffer16[i] = rand();
1223 
1224 	for (access_cmd.access_rw.iova = iova + PAGE_SIZE - 50;
1225 	     access_cmd.access_rw.iova < iova + PAGE_SIZE + 50;
1226 	     access_cmd.access_rw.iova++) {
1227 		for (access_cmd.access_rw.length = 1;
1228 		     access_cmd.access_rw.length < sizeof(tmp);
1229 		     access_cmd.access_rw.length++) {
1230 			access_cmd.access_rw.flags = def_flags;
1231 			ASSERT_EQ(0, ioctl(fd,
1232 					   _IOMMU_TEST_CMD(
1233 						   IOMMU_TEST_OP_ACCESS_RW),
1234 					   &access_cmd));
1235 			ASSERT_EQ(0,
1236 				  memcmp(buffer + (access_cmd.access_rw.iova -
1237 						   iova),
1238 					 tmp, access_cmd.access_rw.length));
1239 
1240 			for (i = 0; i != ARRAY_SIZE(tmp); i++)
1241 				tmp[i] = rand();
1242 			access_cmd.access_rw.flags = def_flags |
1243 						     MOCK_ACCESS_RW_WRITE;
1244 			ASSERT_EQ(0, ioctl(fd,
1245 					   _IOMMU_TEST_CMD(
1246 						   IOMMU_TEST_OP_ACCESS_RW),
1247 					   &access_cmd));
1248 			ASSERT_EQ(0,
1249 				  memcmp(buffer + (access_cmd.access_rw.iova -
1250 						   iova),
1251 					 tmp, access_cmd.access_rw.length));
1252 		}
1253 	}
1254 
1255 	/* Multi-page test */
1256 	tmp2 = malloc(BUFFER_SIZE);
1257 	ASSERT_NE(NULL, tmp2);
1258 	access_cmd.access_rw.iova = iova;
1259 	access_cmd.access_rw.length = BUFFER_SIZE;
1260 	access_cmd.access_rw.flags = def_flags;
1261 	access_cmd.access_rw.uptr = (uintptr_t)tmp2;
1262 	ASSERT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
1263 			   &access_cmd));
1264 	ASSERT_EQ(0, memcmp(buffer, tmp2, access_cmd.access_rw.length));
1265 	free(tmp2);
1266 }
1267 
1268 TEST_F(iommufd_ioas, access_rw)
1269 {
1270 	__u32 access_id;
1271 	__u64 iova;
1272 
1273 	test_cmd_create_access(self->ioas_id, &access_id, 0);
1274 	test_ioctl_ioas_map(buffer, BUFFER_SIZE, &iova);
1275 	check_access_rw(_metadata, self->fd, access_id, iova, 0);
1276 	check_access_rw(_metadata, self->fd, access_id, iova,
1277 			MOCK_ACCESS_RW_SLOW_PATH);
1278 	test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1279 	test_cmd_destroy_access(access_id);
1280 }
1281 
1282 TEST_F(iommufd_ioas, access_rw_unaligned)
1283 {
1284 	__u32 access_id;
1285 	__u64 iova;
1286 
1287 	test_cmd_create_access(self->ioas_id, &access_id, 0);
1288 
1289 	/* Unaligned pages */
1290 	iova = self->base_iova + MOCK_PAGE_SIZE;
1291 	test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, iova);
1292 	check_access_rw(_metadata, self->fd, access_id, iova, 0);
1293 	test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1294 	test_cmd_destroy_access(access_id);
1295 }
1296 
1297 TEST_F(iommufd_ioas, fork_gone)
1298 {
1299 	__u32 access_id;
1300 	pid_t child;
1301 
1302 	test_cmd_create_access(self->ioas_id, &access_id, 0);
1303 
1304 	/* Create a mapping with a different mm */
1305 	child = fork();
1306 	if (!child) {
1307 		test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1308 					  MOCK_APERTURE_START);
1309 		exit(0);
1310 	}
1311 	ASSERT_NE(-1, child);
1312 	ASSERT_EQ(child, waitpid(child, NULL, 0));
1313 
1314 	if (self->stdev_id) {
1315 		/*
1316 		 * If a domain already existed then everything was pinned within
1317 		 * the fork, so this copies from one domain to another.
1318 		 */
1319 		test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1320 		check_access_rw(_metadata, self->fd, access_id,
1321 				MOCK_APERTURE_START, 0);
1322 
1323 	} else {
1324 		/*
1325 		 * Otherwise we need to actually pin pages which can't happen
1326 		 * since the fork is gone.
1327 		 */
1328 		test_err_mock_domain(EFAULT, self->ioas_id, NULL, NULL);
1329 	}
1330 
1331 	test_cmd_destroy_access(access_id);
1332 }
1333 
1334 TEST_F(iommufd_ioas, fork_present)
1335 {
1336 	__u32 access_id;
1337 	int pipefds[2];
1338 	uint64_t tmp;
1339 	pid_t child;
1340 	int efd;
1341 
1342 	test_cmd_create_access(self->ioas_id, &access_id, 0);
1343 
1344 	ASSERT_EQ(0, pipe2(pipefds, O_CLOEXEC));
1345 	efd = eventfd(0, EFD_CLOEXEC);
1346 	ASSERT_NE(-1, efd);
1347 
1348 	/* Create a mapping with a different mm */
1349 	child = fork();
1350 	if (!child) {
1351 		__u64 iova;
1352 		uint64_t one = 1;
1353 
1354 		close(pipefds[1]);
1355 		test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1356 					  MOCK_APERTURE_START);
1357 		if (write(efd, &one, sizeof(one)) != sizeof(one))
1358 			exit(100);
1359 		if (read(pipefds[0], &iova, 1) != 1)
1360 			exit(100);
1361 		exit(0);
1362 	}
1363 	close(pipefds[0]);
1364 	ASSERT_NE(-1, child);
1365 	ASSERT_EQ(8, read(efd, &tmp, sizeof(tmp)));
1366 
1367 	/* Read pages from the remote process */
1368 	test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1369 	check_access_rw(_metadata, self->fd, access_id, MOCK_APERTURE_START, 0);
1370 
1371 	ASSERT_EQ(0, close(pipefds[1]));
1372 	ASSERT_EQ(child, waitpid(child, NULL, 0));
1373 
1374 	test_cmd_destroy_access(access_id);
1375 }
1376 
1377 TEST_F(iommufd_ioas, ioas_option_huge_pages)
1378 {
1379 	struct iommu_option cmd = {
1380 		.size = sizeof(cmd),
1381 		.option_id = IOMMU_OPTION_HUGE_PAGES,
1382 		.op = IOMMU_OPTION_OP_GET,
1383 		.val64 = 3,
1384 		.object_id = self->ioas_id,
1385 	};
1386 
1387 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1388 	ASSERT_EQ(1, cmd.val64);
1389 
1390 	cmd.op = IOMMU_OPTION_OP_SET;
1391 	cmd.val64 = 0;
1392 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1393 
1394 	cmd.op = IOMMU_OPTION_OP_GET;
1395 	cmd.val64 = 3;
1396 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1397 	ASSERT_EQ(0, cmd.val64);
1398 
1399 	cmd.op = IOMMU_OPTION_OP_SET;
1400 	cmd.val64 = 2;
1401 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
1402 
1403 	cmd.op = IOMMU_OPTION_OP_SET;
1404 	cmd.val64 = 1;
1405 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1406 }
1407 
1408 TEST_F(iommufd_ioas, ioas_iova_alloc)
1409 {
1410 	unsigned int length;
1411 	__u64 iova;
1412 
1413 	for (length = 1; length != PAGE_SIZE * 2; length++) {
1414 		if (variant->mock_domains && (length % MOCK_PAGE_SIZE)) {
1415 			test_err_ioctl_ioas_map(EINVAL, buffer, length, &iova);
1416 		} else {
1417 			test_ioctl_ioas_map(buffer, length, &iova);
1418 			test_ioctl_ioas_unmap(iova, length);
1419 		}
1420 	}
1421 }
1422 
1423 TEST_F(iommufd_ioas, ioas_align_change)
1424 {
1425 	struct iommu_option cmd = {
1426 		.size = sizeof(cmd),
1427 		.option_id = IOMMU_OPTION_HUGE_PAGES,
1428 		.op = IOMMU_OPTION_OP_SET,
1429 		.object_id = self->ioas_id,
1430 		/* 0 means everything must be aligned to PAGE_SIZE */
1431 		.val64 = 0,
1432 	};
1433 
1434 	/*
1435 	 * We cannot upgrade the alignment using OPTION_HUGE_PAGES when a domain
1436 	 * and map are present.
1437 	 */
1438 	if (variant->mock_domains)
1439 		return;
1440 
1441 	/*
1442 	 * We can upgrade to PAGE_SIZE alignment when things are aligned right
1443 	 */
1444 	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, MOCK_APERTURE_START);
1445 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1446 
1447 	/* Misalignment is rejected at map time */
1448 	test_err_ioctl_ioas_map_fixed(EINVAL, buffer + MOCK_PAGE_SIZE,
1449 				      PAGE_SIZE,
1450 				      MOCK_APERTURE_START + PAGE_SIZE);
1451 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1452 
1453 	/* Reduce alignment */
1454 	cmd.val64 = 1;
1455 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1456 
1457 	/* Confirm misalignment is rejected during alignment upgrade */
1458 	test_ioctl_ioas_map_fixed(buffer + MOCK_PAGE_SIZE, PAGE_SIZE,
1459 				  MOCK_APERTURE_START + PAGE_SIZE);
1460 	cmd.val64 = 0;
1461 	EXPECT_ERRNO(EADDRINUSE, ioctl(self->fd, IOMMU_OPTION, &cmd));
1462 
1463 	test_ioctl_ioas_unmap(MOCK_APERTURE_START + PAGE_SIZE, PAGE_SIZE);
1464 	test_ioctl_ioas_unmap(MOCK_APERTURE_START, PAGE_SIZE);
1465 }
1466 
1467 TEST_F(iommufd_ioas, copy_sweep)
1468 {
1469 	struct iommu_ioas_copy copy_cmd = {
1470 		.size = sizeof(copy_cmd),
1471 		.flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1472 		.src_ioas_id = self->ioas_id,
1473 		.dst_iova = MOCK_APERTURE_START,
1474 		.length = MOCK_PAGE_SIZE,
1475 	};
1476 	unsigned int dst_ioas_id;
1477 	uint64_t last_iova;
1478 	uint64_t iova;
1479 
1480 	test_ioctl_ioas_alloc(&dst_ioas_id);
1481 	copy_cmd.dst_ioas_id = dst_ioas_id;
1482 
1483 	if (variant->mock_domains)
1484 		last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 1;
1485 	else
1486 		last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 2;
1487 
1488 	test_ioctl_ioas_map_fixed(buffer, last_iova - MOCK_APERTURE_START + 1,
1489 				  MOCK_APERTURE_START);
1490 
1491 	for (iova = MOCK_APERTURE_START - PAGE_SIZE; iova <= last_iova;
1492 	     iova += 511) {
1493 		copy_cmd.src_iova = iova;
1494 		if (iova < MOCK_APERTURE_START ||
1495 		    iova + copy_cmd.length - 1 > last_iova) {
1496 			EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_IOAS_COPY,
1497 						   &copy_cmd));
1498 		} else {
1499 			ASSERT_EQ(0,
1500 				  ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
1501 			test_ioctl_ioas_unmap_id(dst_ioas_id, copy_cmd.dst_iova,
1502 						 copy_cmd.length);
1503 		}
1504 	}
1505 
1506 	test_ioctl_destroy(dst_ioas_id);
1507 }
1508 
1509 FIXTURE(iommufd_mock_domain)
1510 {
1511 	int fd;
1512 	uint32_t ioas_id;
1513 	uint32_t hwpt_id;
1514 	uint32_t hwpt_ids[2];
1515 	uint32_t stdev_ids[2];
1516 	uint32_t idev_ids[2];
1517 	int mmap_flags;
1518 	size_t mmap_buf_size;
1519 };
1520 
1521 FIXTURE_VARIANT(iommufd_mock_domain)
1522 {
1523 	unsigned int mock_domains;
1524 	bool hugepages;
1525 	bool file;
1526 };
1527 
1528 FIXTURE_SETUP(iommufd_mock_domain)
1529 {
1530 	unsigned int i;
1531 
1532 	self->fd = open("/dev/iommu", O_RDWR);
1533 	ASSERT_NE(-1, self->fd);
1534 	test_ioctl_ioas_alloc(&self->ioas_id);
1535 
1536 	ASSERT_GE(ARRAY_SIZE(self->hwpt_ids), variant->mock_domains);
1537 
1538 	for (i = 0; i != variant->mock_domains; i++) {
1539 		test_cmd_mock_domain(self->ioas_id, &self->stdev_ids[i],
1540 				     &self->hwpt_ids[i], &self->idev_ids[i]);
1541 		test_cmd_dev_check_cache_all(self->idev_ids[0],
1542 					     IOMMU_TEST_DEV_CACHE_DEFAULT);
1543 	}
1544 	self->hwpt_id = self->hwpt_ids[0];
1545 
1546 	self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS;
1547 	self->mmap_buf_size = PAGE_SIZE * 8;
1548 	if (variant->hugepages) {
1549 		/*
1550 		 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
1551 		 * not available.
1552 		 */
1553 		self->mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
1554 		self->mmap_buf_size = HUGEPAGE_SIZE * 2;
1555 	}
1556 }
1557 
1558 FIXTURE_TEARDOWN(iommufd_mock_domain)
1559 {
1560 	teardown_iommufd(self->fd, _metadata);
1561 }
1562 
1563 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain)
1564 {
1565 	.mock_domains = 1,
1566 	.hugepages = false,
1567 	.file = false,
1568 };
1569 
1570 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains)
1571 {
1572 	.mock_domains = 2,
1573 	.hugepages = false,
1574 	.file = false,
1575 };
1576 
1577 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_hugepage)
1578 {
1579 	.mock_domains = 1,
1580 	.hugepages = true,
1581 	.file = false,
1582 };
1583 
1584 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
1585 {
1586 	.mock_domains = 2,
1587 	.hugepages = true,
1588 	.file = false,
1589 };
1590 
1591 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_file)
1592 {
1593 	.mock_domains = 1,
1594 	.hugepages = false,
1595 	.file = true,
1596 };
1597 
1598 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_file_hugepage)
1599 {
1600 	.mock_domains = 1,
1601 	.hugepages = true,
1602 	.file = true,
1603 };
1604 
1605 
1606 /* Have the kernel check that the user pages made it to the iommu_domain */
1607 #define check_mock_iova(_ptr, _iova, _length)                                \
1608 	({                                                                   \
1609 		struct iommu_test_cmd check_map_cmd = {                      \
1610 			.size = sizeof(check_map_cmd),                       \
1611 			.op = IOMMU_TEST_OP_MD_CHECK_MAP,                    \
1612 			.id = self->hwpt_id,                                 \
1613 			.check_map = { .iova = _iova,                        \
1614 				       .length = _length,                    \
1615 				       .uptr = (uintptr_t)(_ptr) },          \
1616 		};                                                           \
1617 		ASSERT_EQ(0,                                                 \
1618 			  ioctl(self->fd,                                    \
1619 				_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), \
1620 				&check_map_cmd));                            \
1621 		if (self->hwpt_ids[1]) {                                     \
1622 			check_map_cmd.id = self->hwpt_ids[1];                \
1623 			ASSERT_EQ(0,                                         \
1624 				  ioctl(self->fd,                            \
1625 					_IOMMU_TEST_CMD(                     \
1626 						IOMMU_TEST_OP_MD_CHECK_MAP), \
1627 					&check_map_cmd));                    \
1628 		}                                                            \
1629 	})
1630 
1631 static void
1632 test_basic_mmap(struct __test_metadata *_metadata,
1633 		struct _test_data_iommufd_mock_domain *self,
1634 		const struct _fixture_variant_iommufd_mock_domain *variant)
1635 {
1636 	size_t buf_size = self->mmap_buf_size;
1637 	uint8_t *buf;
1638 	__u64 iova;
1639 
1640 	/* Simple one page map */
1641 	test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
1642 	check_mock_iova(buffer, iova, PAGE_SIZE);
1643 
1644 	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1645 		   0);
1646 	ASSERT_NE(MAP_FAILED, buf);
1647 
1648 	/* EFAULT half way through mapping */
1649 	ASSERT_EQ(0, munmap(buf + buf_size / 2, buf_size / 2));
1650 	test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1651 
1652 	/* EFAULT on first page */
1653 	ASSERT_EQ(0, munmap(buf, buf_size / 2));
1654 	test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1655 }
1656 
1657 static void
1658 test_basic_file(struct __test_metadata *_metadata,
1659 		struct _test_data_iommufd_mock_domain *self,
1660 		const struct _fixture_variant_iommufd_mock_domain *variant)
1661 {
1662 	size_t buf_size = self->mmap_buf_size;
1663 	uint8_t *buf;
1664 	__u64 iova;
1665 	int mfd_tmp;
1666 	int prot = PROT_READ | PROT_WRITE;
1667 
1668 	/* Simple one page map */
1669 	test_ioctl_ioas_map_file(mfd, 0, PAGE_SIZE, &iova);
1670 	check_mock_iova(mfd_buffer, iova, PAGE_SIZE);
1671 
1672 	buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd_tmp);
1673 	ASSERT_NE(MAP_FAILED, buf);
1674 
1675 	test_err_ioctl_ioas_map_file(EINVAL, mfd_tmp, 0, buf_size + 1, &iova);
1676 
1677 	ASSERT_EQ(0, ftruncate(mfd_tmp, 0));
1678 	test_err_ioctl_ioas_map_file(EINVAL, mfd_tmp, 0, buf_size, &iova);
1679 
1680 	close(mfd_tmp);
1681 }
1682 
1683 TEST_F(iommufd_mock_domain, basic)
1684 {
1685 	if (variant->file)
1686 		test_basic_file(_metadata, self, variant);
1687 	else
1688 		test_basic_mmap(_metadata, self, variant);
1689 }
1690 
1691 TEST_F(iommufd_mock_domain, ro_unshare)
1692 {
1693 	uint8_t *buf;
1694 	__u64 iova;
1695 	int fd;
1696 
1697 	fd = open("/proc/self/exe", O_RDONLY);
1698 	ASSERT_NE(-1, fd);
1699 
1700 	buf = mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1701 	ASSERT_NE(MAP_FAILED, buf);
1702 	close(fd);
1703 
1704 	/*
1705 	 * There have been lots of changes to the "unshare" mechanism in
1706 	 * get_user_pages(), make sure it works right. The write to the page
1707 	 * after we map it for reading should not change the assigned PFN.
1708 	 */
1709 	ASSERT_EQ(0,
1710 		  _test_ioctl_ioas_map(self->fd, self->ioas_id, buf, PAGE_SIZE,
1711 				       &iova, IOMMU_IOAS_MAP_READABLE));
1712 	check_mock_iova(buf, iova, PAGE_SIZE);
1713 	memset(buf, 1, PAGE_SIZE);
1714 	check_mock_iova(buf, iova, PAGE_SIZE);
1715 	ASSERT_EQ(0, munmap(buf, PAGE_SIZE));
1716 }
1717 
1718 TEST_F(iommufd_mock_domain, all_aligns)
1719 {
1720 	size_t test_step = variant->hugepages ? (self->mmap_buf_size / 16) :
1721 						MOCK_PAGE_SIZE;
1722 	size_t buf_size = self->mmap_buf_size;
1723 	unsigned int start;
1724 	unsigned int end;
1725 	uint8_t *buf;
1726 	int prot = PROT_READ | PROT_WRITE;
1727 	int mfd;
1728 
1729 	if (variant->file)
1730 		buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd);
1731 	else
1732 		buf = mmap(0, buf_size, prot, self->mmap_flags, -1, 0);
1733 	ASSERT_NE(MAP_FAILED, buf);
1734 	check_refs(buf, buf_size, 0);
1735 
1736 	/*
1737 	 * Map every combination of page size and alignment within a big region,
1738 	 * less for hugepage case as it takes so long to finish.
1739 	 */
1740 	for (start = 0; start < buf_size; start += test_step) {
1741 		if (variant->hugepages)
1742 			end = buf_size;
1743 		else
1744 			end = start + MOCK_PAGE_SIZE;
1745 		for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1746 			size_t length = end - start;
1747 			__u64 iova;
1748 
1749 			if (variant->file) {
1750 				test_ioctl_ioas_map_file(mfd, start, length,
1751 							 &iova);
1752 			} else {
1753 				test_ioctl_ioas_map(buf + start, length, &iova);
1754 			}
1755 			check_mock_iova(buf + start, iova, length);
1756 			check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1757 				   end / PAGE_SIZE * PAGE_SIZE -
1758 					   start / PAGE_SIZE * PAGE_SIZE,
1759 				   1);
1760 
1761 			test_ioctl_ioas_unmap(iova, length);
1762 		}
1763 	}
1764 	check_refs(buf, buf_size, 0);
1765 	ASSERT_EQ(0, munmap(buf, buf_size));
1766 	if (variant->file)
1767 		close(mfd);
1768 }
1769 
1770 TEST_F(iommufd_mock_domain, all_aligns_copy)
1771 {
1772 	size_t test_step = variant->hugepages ? self->mmap_buf_size / 16 :
1773 						MOCK_PAGE_SIZE;
1774 	size_t buf_size = self->mmap_buf_size;
1775 	unsigned int start;
1776 	unsigned int end;
1777 	uint8_t *buf;
1778 	int prot = PROT_READ | PROT_WRITE;
1779 	int mfd;
1780 
1781 	if (variant->file)
1782 		buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd);
1783 	else
1784 		buf = mmap(0, buf_size, prot, self->mmap_flags, -1, 0);
1785 	ASSERT_NE(MAP_FAILED, buf);
1786 	check_refs(buf, buf_size, 0);
1787 
1788 	/*
1789 	 * Map every combination of page size and alignment within a big region,
1790 	 * less for hugepage case as it takes so long to finish.
1791 	 */
1792 	for (start = 0; start < buf_size; start += test_step) {
1793 		if (variant->hugepages)
1794 			end = buf_size;
1795 		else
1796 			end = start + MOCK_PAGE_SIZE;
1797 		for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1798 			size_t length = end - start;
1799 			unsigned int old_id;
1800 			uint32_t mock_stdev_id;
1801 			__u64 iova;
1802 
1803 			if (variant->file) {
1804 				test_ioctl_ioas_map_file(mfd, start, length,
1805 							 &iova);
1806 			} else {
1807 				test_ioctl_ioas_map(buf + start, length, &iova);
1808 			}
1809 
1810 			/* Add and destroy a domain while the area exists */
1811 			old_id = self->hwpt_ids[1];
1812 			test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1813 					     &self->hwpt_ids[1], NULL);
1814 
1815 			check_mock_iova(buf + start, iova, length);
1816 			check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1817 				   end / PAGE_SIZE * PAGE_SIZE -
1818 					   start / PAGE_SIZE * PAGE_SIZE,
1819 				   1);
1820 
1821 			test_ioctl_destroy(mock_stdev_id);
1822 			self->hwpt_ids[1] = old_id;
1823 
1824 			test_ioctl_ioas_unmap(iova, length);
1825 		}
1826 	}
1827 	check_refs(buf, buf_size, 0);
1828 	ASSERT_EQ(0, munmap(buf, buf_size));
1829 	if (variant->file)
1830 		close(mfd);
1831 }
1832 
1833 TEST_F(iommufd_mock_domain, user_copy)
1834 {
1835 	void *buf = variant->file ? mfd_buffer : buffer;
1836 	struct iommu_test_cmd access_cmd = {
1837 		.size = sizeof(access_cmd),
1838 		.op = IOMMU_TEST_OP_ACCESS_PAGES,
1839 		.access_pages = { .length = BUFFER_SIZE,
1840 				  .uptr = (uintptr_t)buf },
1841 	};
1842 	struct iommu_ioas_copy copy_cmd = {
1843 		.size = sizeof(copy_cmd),
1844 		.flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
1845 		.dst_ioas_id = self->ioas_id,
1846 		.dst_iova = MOCK_APERTURE_START,
1847 		.length = BUFFER_SIZE,
1848 	};
1849 	struct iommu_ioas_unmap unmap_cmd = {
1850 		.size = sizeof(unmap_cmd),
1851 		.ioas_id = self->ioas_id,
1852 		.iova = MOCK_APERTURE_START,
1853 		.length = BUFFER_SIZE,
1854 	};
1855 	unsigned int new_ioas_id, ioas_id;
1856 
1857 	/* Pin the pages in an IOAS with no domains then copy to an IOAS with domains */
1858 	test_ioctl_ioas_alloc(&ioas_id);
1859 	if (variant->file) {
1860 		test_ioctl_ioas_map_id_file(ioas_id, mfd, 0, BUFFER_SIZE,
1861 					    &copy_cmd.src_iova);
1862 	} else {
1863 		test_ioctl_ioas_map_id(ioas_id, buf, BUFFER_SIZE,
1864 				       &copy_cmd.src_iova);
1865 	}
1866 	test_cmd_create_access(ioas_id, &access_cmd.id,
1867 			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1868 
1869 	access_cmd.access_pages.iova = copy_cmd.src_iova;
1870 	ASSERT_EQ(0,
1871 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1872 			&access_cmd));
1873 	copy_cmd.src_ioas_id = ioas_id;
1874 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
1875 	check_mock_iova(buf, MOCK_APERTURE_START, BUFFER_SIZE);
1876 
1877 	/* Now replace the ioas with a new one */
1878 	test_ioctl_ioas_alloc(&new_ioas_id);
1879 	if (variant->file) {
1880 		test_ioctl_ioas_map_id_file(new_ioas_id, mfd, 0, BUFFER_SIZE,
1881 					    &copy_cmd.src_iova);
1882 	} else {
1883 		test_ioctl_ioas_map_id(new_ioas_id, buf, BUFFER_SIZE,
1884 				       &copy_cmd.src_iova);
1885 	}
1886 	test_cmd_access_replace_ioas(access_cmd.id, new_ioas_id);
1887 
1888 	/* Destroy the old ioas and cleanup copied mapping */
1889 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_UNMAP, &unmap_cmd));
1890 	test_ioctl_destroy(ioas_id);
1891 
1892 	/* Then run the same test again with the new ioas */
1893 	access_cmd.access_pages.iova = copy_cmd.src_iova;
1894 	ASSERT_EQ(0,
1895 		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1896 			&access_cmd));
1897 	copy_cmd.src_ioas_id = new_ioas_id;
1898 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
1899 	check_mock_iova(buf, MOCK_APERTURE_START, BUFFER_SIZE);
1900 
1901 	test_cmd_destroy_access_pages(
1902 		access_cmd.id, access_cmd.access_pages.out_access_pages_id);
1903 	test_cmd_destroy_access(access_cmd.id);
1904 
1905 	test_ioctl_destroy(new_ioas_id);
1906 }
1907 
1908 TEST_F(iommufd_mock_domain, replace)
1909 {
1910 	uint32_t ioas_id;
1911 
1912 	test_ioctl_ioas_alloc(&ioas_id);
1913 
1914 	test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
1915 
1916 	/*
1917 	 * Replacing the IOAS causes the prior HWPT to be deallocated, thus we
1918 	 * should get enoent when we try to use it.
1919 	 */
1920 	if (variant->mock_domains == 1)
1921 		test_err_mock_domain_replace(ENOENT, self->stdev_ids[0],
1922 					     self->hwpt_ids[0]);
1923 
1924 	test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
1925 	if (variant->mock_domains >= 2) {
1926 		test_cmd_mock_domain_replace(self->stdev_ids[0],
1927 					     self->hwpt_ids[1]);
1928 		test_cmd_mock_domain_replace(self->stdev_ids[0],
1929 					     self->hwpt_ids[1]);
1930 		test_cmd_mock_domain_replace(self->stdev_ids[0],
1931 					     self->hwpt_ids[0]);
1932 	}
1933 
1934 	test_cmd_mock_domain_replace(self->stdev_ids[0], self->ioas_id);
1935 	test_ioctl_destroy(ioas_id);
1936 }
1937 
1938 TEST_F(iommufd_mock_domain, alloc_hwpt)
1939 {
1940 	int i;
1941 
1942 	for (i = 0; i != variant->mock_domains; i++) {
1943 		uint32_t hwpt_id[2];
1944 		uint32_t stddev_id;
1945 
1946 		test_err_hwpt_alloc(EOPNOTSUPP,
1947 				    self->idev_ids[i], self->ioas_id,
1948 				    ~IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[0]);
1949 		test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
1950 				    0, &hwpt_id[0]);
1951 		test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
1952 				    IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[1]);
1953 
1954 		/* Do a hw_pagetable rotation test */
1955 		test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[0]);
1956 		EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[0]));
1957 		test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[1]);
1958 		EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[1]));
1959 		test_cmd_mock_domain_replace(self->stdev_ids[i], self->ioas_id);
1960 		test_ioctl_destroy(hwpt_id[1]);
1961 
1962 		test_cmd_mock_domain(hwpt_id[0], &stddev_id, NULL, NULL);
1963 		test_ioctl_destroy(stddev_id);
1964 		test_ioctl_destroy(hwpt_id[0]);
1965 	}
1966 }
1967 
1968 FIXTURE(iommufd_dirty_tracking)
1969 {
1970 	int fd;
1971 	uint32_t ioas_id;
1972 	uint32_t hwpt_id;
1973 	uint32_t stdev_id;
1974 	uint32_t idev_id;
1975 	unsigned long page_size;
1976 	unsigned long bitmap_size;
1977 	void *bitmap;
1978 	void *buffer;
1979 };
1980 
1981 FIXTURE_VARIANT(iommufd_dirty_tracking)
1982 {
1983 	unsigned long buffer_size;
1984 	bool hugepages;
1985 };
1986 
1987 FIXTURE_SETUP(iommufd_dirty_tracking)
1988 {
1989 	unsigned long size;
1990 	int mmap_flags;
1991 	void *vrc;
1992 	int rc;
1993 
1994 	if (variant->buffer_size < MOCK_PAGE_SIZE) {
1995 		SKIP(return,
1996 		     "Skipping buffer_size=%lu, less than MOCK_PAGE_SIZE=%lu",
1997 		     variant->buffer_size, MOCK_PAGE_SIZE);
1998 	}
1999 
2000 	self->fd = open("/dev/iommu", O_RDWR);
2001 	ASSERT_NE(-1, self->fd);
2002 
2003 	rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, variant->buffer_size);
2004 	if (rc || !self->buffer) {
2005 		SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
2006 			   variant->buffer_size, rc);
2007 	}
2008 
2009 	mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED;
2010 	if (variant->hugepages) {
2011 		/*
2012 		 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
2013 		 * not available.
2014 		 */
2015 		mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
2016 	}
2017 	assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0);
2018 	vrc = mmap(self->buffer, variant->buffer_size, PROT_READ | PROT_WRITE,
2019 		   mmap_flags, -1, 0);
2020 	assert(vrc == self->buffer);
2021 
2022 	self->page_size = MOCK_PAGE_SIZE;
2023 	self->bitmap_size = variant->buffer_size / self->page_size;
2024 
2025 	/* Provision with an extra (PAGE_SIZE) for the unaligned case */
2026 	size = DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE);
2027 	rc = posix_memalign(&self->bitmap, PAGE_SIZE, size + PAGE_SIZE);
2028 	assert(!rc);
2029 	assert(self->bitmap);
2030 	assert((uintptr_t)self->bitmap % PAGE_SIZE == 0);
2031 
2032 	test_ioctl_ioas_alloc(&self->ioas_id);
2033 	/* Enable 1M mock IOMMU hugepages */
2034 	if (variant->hugepages) {
2035 		test_cmd_mock_domain_flags(self->ioas_id,
2036 					   MOCK_FLAGS_DEVICE_HUGE_IOVA,
2037 					   &self->stdev_id, &self->hwpt_id,
2038 					   &self->idev_id);
2039 	} else {
2040 		test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
2041 				     &self->hwpt_id, &self->idev_id);
2042 	}
2043 }
2044 
2045 FIXTURE_TEARDOWN(iommufd_dirty_tracking)
2046 {
2047 	munmap(self->buffer, variant->buffer_size);
2048 	munmap(self->bitmap, DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE));
2049 	teardown_iommufd(self->fd, _metadata);
2050 }
2051 
2052 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty8k)
2053 {
2054 	/* half of an u8 index bitmap */
2055 	.buffer_size = 8UL * 1024UL,
2056 };
2057 
2058 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty16k)
2059 {
2060 	/* one u8 index bitmap */
2061 	.buffer_size = 16UL * 1024UL,
2062 };
2063 
2064 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64k)
2065 {
2066 	/* one u32 index bitmap */
2067 	.buffer_size = 64UL * 1024UL,
2068 };
2069 
2070 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k)
2071 {
2072 	/* one u64 index bitmap */
2073 	.buffer_size = 128UL * 1024UL,
2074 };
2075 
2076 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty320k)
2077 {
2078 	/* two u64 index and trailing end bitmap */
2079 	.buffer_size = 320UL * 1024UL,
2080 };
2081 
2082 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M)
2083 {
2084 	/* 4K bitmap (64M IOVA range) */
2085 	.buffer_size = 64UL * 1024UL * 1024UL,
2086 };
2087 
2088 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M_huge)
2089 {
2090 	/* 4K bitmap (64M IOVA range) */
2091 	.buffer_size = 64UL * 1024UL * 1024UL,
2092 	.hugepages = true,
2093 };
2094 
2095 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M)
2096 {
2097 	/* 8K bitmap (128M IOVA range) */
2098 	.buffer_size = 128UL * 1024UL * 1024UL,
2099 };
2100 
2101 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge)
2102 {
2103 	/* 8K bitmap (128M IOVA range) */
2104 	.buffer_size = 128UL * 1024UL * 1024UL,
2105 	.hugepages = true,
2106 };
2107 
2108 TEST_F(iommufd_dirty_tracking, enforce_dirty)
2109 {
2110 	uint32_t ioas_id, stddev_id, idev_id;
2111 	uint32_t hwpt_id, _hwpt_id;
2112 	uint32_t dev_flags;
2113 
2114 	/* Regular case */
2115 	dev_flags = MOCK_FLAGS_DEVICE_NO_DIRTY;
2116 	test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
2117 			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2118 	test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
2119 	test_err_mock_domain_flags(EINVAL, hwpt_id, dev_flags, &stddev_id,
2120 				   NULL);
2121 	test_ioctl_destroy(stddev_id);
2122 	test_ioctl_destroy(hwpt_id);
2123 
2124 	/* IOMMU device does not support dirty tracking */
2125 	test_ioctl_ioas_alloc(&ioas_id);
2126 	test_cmd_mock_domain_flags(ioas_id, dev_flags, &stddev_id, &_hwpt_id,
2127 				   &idev_id);
2128 	test_err_hwpt_alloc(EOPNOTSUPP, idev_id, ioas_id,
2129 			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2130 	test_ioctl_destroy(stddev_id);
2131 }
2132 
2133 TEST_F(iommufd_dirty_tracking, set_dirty_tracking)
2134 {
2135 	uint32_t stddev_id;
2136 	uint32_t hwpt_id;
2137 
2138 	test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
2139 			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2140 	test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
2141 	test_cmd_set_dirty_tracking(hwpt_id, true);
2142 	test_cmd_set_dirty_tracking(hwpt_id, false);
2143 
2144 	test_ioctl_destroy(stddev_id);
2145 	test_ioctl_destroy(hwpt_id);
2146 }
2147 
2148 TEST_F(iommufd_dirty_tracking, device_dirty_capability)
2149 {
2150 	uint32_t caps = 0;
2151 	uint32_t stddev_id;
2152 	uint32_t hwpt_id;
2153 
2154 	test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, 0, &hwpt_id);
2155 	test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
2156 	test_cmd_get_hw_capabilities(self->idev_id, caps,
2157 				     IOMMU_HW_CAP_DIRTY_TRACKING);
2158 	ASSERT_EQ(IOMMU_HW_CAP_DIRTY_TRACKING,
2159 		  caps & IOMMU_HW_CAP_DIRTY_TRACKING);
2160 
2161 	test_ioctl_destroy(stddev_id);
2162 	test_ioctl_destroy(hwpt_id);
2163 }
2164 
2165 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap)
2166 {
2167 	uint32_t page_size = MOCK_PAGE_SIZE;
2168 	uint32_t hwpt_id;
2169 	uint32_t ioas_id;
2170 
2171 	if (variant->hugepages)
2172 		page_size = MOCK_HUGE_PAGE_SIZE;
2173 
2174 	test_ioctl_ioas_alloc(&ioas_id);
2175 	test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
2176 				     variant->buffer_size, MOCK_APERTURE_START);
2177 
2178 	test_cmd_hwpt_alloc(self->idev_id, ioas_id,
2179 			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2180 
2181 	test_cmd_set_dirty_tracking(hwpt_id, true);
2182 
2183 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2184 				MOCK_APERTURE_START, self->page_size, page_size,
2185 				self->bitmap, self->bitmap_size, 0, _metadata);
2186 
2187 	/* PAGE_SIZE unaligned bitmap */
2188 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2189 				MOCK_APERTURE_START, self->page_size, page_size,
2190 				self->bitmap + MOCK_PAGE_SIZE,
2191 				self->bitmap_size, 0, _metadata);
2192 
2193 	/* u64 unaligned bitmap */
2194 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2195 				MOCK_APERTURE_START, self->page_size, page_size,
2196 				self->bitmap + 0xff1, self->bitmap_size, 0,
2197 				_metadata);
2198 
2199 	test_ioctl_destroy(hwpt_id);
2200 }
2201 
2202 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear)
2203 {
2204 	uint32_t page_size = MOCK_PAGE_SIZE;
2205 	uint32_t hwpt_id;
2206 	uint32_t ioas_id;
2207 
2208 	if (variant->hugepages)
2209 		page_size = MOCK_HUGE_PAGE_SIZE;
2210 
2211 	test_ioctl_ioas_alloc(&ioas_id);
2212 	test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
2213 				     variant->buffer_size, MOCK_APERTURE_START);
2214 
2215 	test_cmd_hwpt_alloc(self->idev_id, ioas_id,
2216 			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
2217 
2218 	test_cmd_set_dirty_tracking(hwpt_id, true);
2219 
2220 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2221 				MOCK_APERTURE_START, self->page_size, page_size,
2222 				self->bitmap, self->bitmap_size,
2223 				IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2224 				_metadata);
2225 
2226 	/* Unaligned bitmap */
2227 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2228 				MOCK_APERTURE_START, self->page_size, page_size,
2229 				self->bitmap + MOCK_PAGE_SIZE,
2230 				self->bitmap_size,
2231 				IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2232 				_metadata);
2233 
2234 	/* u64 unaligned bitmap */
2235 	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
2236 				MOCK_APERTURE_START, self->page_size, page_size,
2237 				self->bitmap + 0xff1, self->bitmap_size,
2238 				IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
2239 				_metadata);
2240 
2241 	test_ioctl_destroy(hwpt_id);
2242 }
2243 
2244 /* VFIO compatibility IOCTLs */
2245 
2246 TEST_F(iommufd, simple_ioctls)
2247 {
2248 	ASSERT_EQ(VFIO_API_VERSION, ioctl(self->fd, VFIO_GET_API_VERSION));
2249 	ASSERT_EQ(1, ioctl(self->fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU));
2250 }
2251 
2252 TEST_F(iommufd, unmap_cmd)
2253 {
2254 	struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2255 		.iova = MOCK_APERTURE_START,
2256 		.size = PAGE_SIZE,
2257 	};
2258 
2259 	unmap_cmd.argsz = 1;
2260 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2261 
2262 	unmap_cmd.argsz = sizeof(unmap_cmd);
2263 	unmap_cmd.flags = 1 << 31;
2264 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2265 
2266 	unmap_cmd.flags = 0;
2267 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2268 }
2269 
2270 TEST_F(iommufd, map_cmd)
2271 {
2272 	struct vfio_iommu_type1_dma_map map_cmd = {
2273 		.iova = MOCK_APERTURE_START,
2274 		.size = PAGE_SIZE,
2275 		.vaddr = (__u64)buffer,
2276 	};
2277 
2278 	map_cmd.argsz = 1;
2279 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2280 
2281 	map_cmd.argsz = sizeof(map_cmd);
2282 	map_cmd.flags = 1 << 31;
2283 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2284 
2285 	/* Requires a domain to be attached */
2286 	map_cmd.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
2287 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2288 }
2289 
2290 TEST_F(iommufd, info_cmd)
2291 {
2292 	struct vfio_iommu_type1_info info_cmd = {};
2293 
2294 	/* Invalid argsz */
2295 	info_cmd.argsz = 1;
2296 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
2297 
2298 	info_cmd.argsz = sizeof(info_cmd);
2299 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
2300 }
2301 
2302 TEST_F(iommufd, set_iommu_cmd)
2303 {
2304 	/* Requires a domain to be attached */
2305 	EXPECT_ERRNO(ENODEV,
2306 		     ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1v2_IOMMU));
2307 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU));
2308 }
2309 
2310 TEST_F(iommufd, vfio_ioas)
2311 {
2312 	struct iommu_vfio_ioas vfio_ioas_cmd = {
2313 		.size = sizeof(vfio_ioas_cmd),
2314 		.op = IOMMU_VFIO_IOAS_GET,
2315 	};
2316 	__u32 ioas_id;
2317 
2318 	/* ENODEV if there is no compat ioas */
2319 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2320 
2321 	/* Invalid id for set */
2322 	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_SET;
2323 	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2324 
2325 	/* Valid id for set*/
2326 	test_ioctl_ioas_alloc(&ioas_id);
2327 	vfio_ioas_cmd.ioas_id = ioas_id;
2328 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2329 
2330 	/* Same id comes back from get */
2331 	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2332 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2333 	ASSERT_EQ(ioas_id, vfio_ioas_cmd.ioas_id);
2334 
2335 	/* Clear works */
2336 	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_CLEAR;
2337 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2338 	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2339 	EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2340 }
2341 
2342 FIXTURE(vfio_compat_mock_domain)
2343 {
2344 	int fd;
2345 	uint32_t ioas_id;
2346 };
2347 
2348 FIXTURE_VARIANT(vfio_compat_mock_domain)
2349 {
2350 	unsigned int version;
2351 };
2352 
2353 FIXTURE_SETUP(vfio_compat_mock_domain)
2354 {
2355 	struct iommu_vfio_ioas vfio_ioas_cmd = {
2356 		.size = sizeof(vfio_ioas_cmd),
2357 		.op = IOMMU_VFIO_IOAS_SET,
2358 	};
2359 
2360 	self->fd = open("/dev/iommu", O_RDWR);
2361 	ASSERT_NE(-1, self->fd);
2362 
2363 	/* Create what VFIO would consider a group */
2364 	test_ioctl_ioas_alloc(&self->ioas_id);
2365 	test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
2366 
2367 	/* Attach it to the vfio compat */
2368 	vfio_ioas_cmd.ioas_id = self->ioas_id;
2369 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2370 	ASSERT_EQ(0, ioctl(self->fd, VFIO_SET_IOMMU, variant->version));
2371 }
2372 
2373 FIXTURE_TEARDOWN(vfio_compat_mock_domain)
2374 {
2375 	teardown_iommufd(self->fd, _metadata);
2376 }
2377 
2378 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v2)
2379 {
2380 	.version = VFIO_TYPE1v2_IOMMU,
2381 };
2382 
2383 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v0)
2384 {
2385 	.version = VFIO_TYPE1_IOMMU,
2386 };
2387 
2388 TEST_F(vfio_compat_mock_domain, simple_close)
2389 {
2390 }
2391 
2392 TEST_F(vfio_compat_mock_domain, option_huge_pages)
2393 {
2394 	struct iommu_option cmd = {
2395 		.size = sizeof(cmd),
2396 		.option_id = IOMMU_OPTION_HUGE_PAGES,
2397 		.op = IOMMU_OPTION_OP_GET,
2398 		.val64 = 3,
2399 		.object_id = self->ioas_id,
2400 	};
2401 
2402 	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
2403 	if (variant->version == VFIO_TYPE1_IOMMU) {
2404 		ASSERT_EQ(0, cmd.val64);
2405 	} else {
2406 		ASSERT_EQ(1, cmd.val64);
2407 	}
2408 }
2409 
2410 /*
2411  * Execute an ioctl command stored in buffer and check that the result does not
2412  * overflow memory.
2413  */
2414 static bool is_filled(const void *buf, uint8_t c, size_t len)
2415 {
2416 	const uint8_t *cbuf = buf;
2417 
2418 	for (; len; cbuf++, len--)
2419 		if (*cbuf != c)
2420 			return false;
2421 	return true;
2422 }
2423 
2424 #define ioctl_check_buf(fd, cmd)                                         \
2425 	({                                                               \
2426 		size_t _cmd_len = *(__u32 *)buffer;                      \
2427 									 \
2428 		memset(buffer + _cmd_len, 0xAA, BUFFER_SIZE - _cmd_len); \
2429 		ASSERT_EQ(0, ioctl(fd, cmd, buffer));                    \
2430 		ASSERT_EQ(true, is_filled(buffer + _cmd_len, 0xAA,       \
2431 					  BUFFER_SIZE - _cmd_len));      \
2432 	})
2433 
2434 static void check_vfio_info_cap_chain(struct __test_metadata *_metadata,
2435 				      struct vfio_iommu_type1_info *info_cmd)
2436 {
2437 	const struct vfio_info_cap_header *cap;
2438 
2439 	ASSERT_GE(info_cmd->argsz, info_cmd->cap_offset + sizeof(*cap));
2440 	cap = buffer + info_cmd->cap_offset;
2441 	while (true) {
2442 		size_t cap_size;
2443 
2444 		if (cap->next)
2445 			cap_size = (buffer + cap->next) - (void *)cap;
2446 		else
2447 			cap_size = (buffer + info_cmd->argsz) - (void *)cap;
2448 
2449 		switch (cap->id) {
2450 		case VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE: {
2451 			struct vfio_iommu_type1_info_cap_iova_range *data =
2452 				(void *)cap;
2453 
2454 			ASSERT_EQ(1, data->header.version);
2455 			ASSERT_EQ(1, data->nr_iovas);
2456 			EXPECT_EQ(MOCK_APERTURE_START,
2457 				  data->iova_ranges[0].start);
2458 			EXPECT_EQ(MOCK_APERTURE_LAST, data->iova_ranges[0].end);
2459 			break;
2460 		}
2461 		case VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL: {
2462 			struct vfio_iommu_type1_info_dma_avail *data =
2463 				(void *)cap;
2464 
2465 			ASSERT_EQ(1, data->header.version);
2466 			ASSERT_EQ(sizeof(*data), cap_size);
2467 			break;
2468 		}
2469 		default:
2470 			ASSERT_EQ(false, true);
2471 			break;
2472 		}
2473 		if (!cap->next)
2474 			break;
2475 
2476 		ASSERT_GE(info_cmd->argsz, cap->next + sizeof(*cap));
2477 		ASSERT_GE(buffer + cap->next, (void *)cap);
2478 		cap = buffer + cap->next;
2479 	}
2480 }
2481 
2482 TEST_F(vfio_compat_mock_domain, get_info)
2483 {
2484 	struct vfio_iommu_type1_info *info_cmd = buffer;
2485 	unsigned int i;
2486 	size_t caplen;
2487 
2488 	/* Pre-cap ABI */
2489 	*info_cmd = (struct vfio_iommu_type1_info){
2490 		.argsz = offsetof(struct vfio_iommu_type1_info, cap_offset),
2491 	};
2492 	ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2493 	ASSERT_NE(0, info_cmd->iova_pgsizes);
2494 	ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2495 		  info_cmd->flags);
2496 
2497 	/* Read the cap chain size */
2498 	*info_cmd = (struct vfio_iommu_type1_info){
2499 		.argsz = sizeof(*info_cmd),
2500 	};
2501 	ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2502 	ASSERT_NE(0, info_cmd->iova_pgsizes);
2503 	ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2504 		  info_cmd->flags);
2505 	ASSERT_EQ(0, info_cmd->cap_offset);
2506 	ASSERT_LT(sizeof(*info_cmd), info_cmd->argsz);
2507 
2508 	/* Read the caps, kernel should never create a corrupted caps */
2509 	caplen = info_cmd->argsz;
2510 	for (i = sizeof(*info_cmd); i < caplen; i++) {
2511 		*info_cmd = (struct vfio_iommu_type1_info){
2512 			.argsz = i,
2513 		};
2514 		ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2515 		ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2516 			  info_cmd->flags);
2517 		if (!info_cmd->cap_offset)
2518 			continue;
2519 		check_vfio_info_cap_chain(_metadata, info_cmd);
2520 	}
2521 }
2522 
2523 static void shuffle_array(unsigned long *array, size_t nelms)
2524 {
2525 	unsigned int i;
2526 
2527 	/* Shuffle */
2528 	for (i = 0; i != nelms; i++) {
2529 		unsigned long tmp = array[i];
2530 		unsigned int other = rand() % (nelms - i);
2531 
2532 		array[i] = array[other];
2533 		array[other] = tmp;
2534 	}
2535 }
2536 
2537 TEST_F(vfio_compat_mock_domain, map)
2538 {
2539 	struct vfio_iommu_type1_dma_map map_cmd = {
2540 		.argsz = sizeof(map_cmd),
2541 		.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2542 		.vaddr = (uintptr_t)buffer,
2543 		.size = BUFFER_SIZE,
2544 		.iova = MOCK_APERTURE_START,
2545 	};
2546 	struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2547 		.argsz = sizeof(unmap_cmd),
2548 		.size = BUFFER_SIZE,
2549 		.iova = MOCK_APERTURE_START,
2550 	};
2551 	unsigned long pages_iova[BUFFER_SIZE / PAGE_SIZE];
2552 	unsigned int i;
2553 
2554 	/* Simple map/unmap */
2555 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2556 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2557 	ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2558 
2559 	/* UNMAP_FLAG_ALL requires 0 iova/size */
2560 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2561 	unmap_cmd.flags = VFIO_DMA_UNMAP_FLAG_ALL;
2562 	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2563 
2564 	unmap_cmd.iova = 0;
2565 	unmap_cmd.size = 0;
2566 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2567 	ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2568 
2569 	/* Small pages */
2570 	for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2571 		map_cmd.iova = pages_iova[i] =
2572 			MOCK_APERTURE_START + i * PAGE_SIZE;
2573 		map_cmd.vaddr = (uintptr_t)buffer + i * PAGE_SIZE;
2574 		map_cmd.size = PAGE_SIZE;
2575 		ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2576 	}
2577 	shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2578 
2579 	unmap_cmd.flags = 0;
2580 	unmap_cmd.size = PAGE_SIZE;
2581 	for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2582 		unmap_cmd.iova = pages_iova[i];
2583 		ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2584 	}
2585 }
2586 
2587 TEST_F(vfio_compat_mock_domain, huge_map)
2588 {
2589 	size_t buf_size = HUGEPAGE_SIZE * 2;
2590 	struct vfio_iommu_type1_dma_map map_cmd = {
2591 		.argsz = sizeof(map_cmd),
2592 		.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2593 		.size = buf_size,
2594 		.iova = MOCK_APERTURE_START,
2595 	};
2596 	struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2597 		.argsz = sizeof(unmap_cmd),
2598 	};
2599 	unsigned long pages_iova[16];
2600 	unsigned int i;
2601 	void *buf;
2602 
2603 	/* Test huge pages and splitting */
2604 	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
2605 		   MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
2606 		   0);
2607 	ASSERT_NE(MAP_FAILED, buf);
2608 	map_cmd.vaddr = (uintptr_t)buf;
2609 	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2610 
2611 	unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2612 	for (i = 0; i != ARRAY_SIZE(pages_iova); i++)
2613 		pages_iova[i] = MOCK_APERTURE_START + (i * unmap_cmd.size);
2614 	shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2615 
2616 	/* type1 mode can cut up larger mappings, type1v2 always fails */
2617 	for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2618 		unmap_cmd.iova = pages_iova[i];
2619 		unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2620 		if (variant->version == VFIO_TYPE1_IOMMU) {
2621 			ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2622 					   &unmap_cmd));
2623 		} else {
2624 			EXPECT_ERRNO(ENOENT,
2625 				     ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2626 					   &unmap_cmd));
2627 		}
2628 	}
2629 }
2630 
2631 FIXTURE(iommufd_viommu)
2632 {
2633 	int fd;
2634 	uint32_t ioas_id;
2635 	uint32_t stdev_id;
2636 	uint32_t hwpt_id;
2637 	uint32_t nested_hwpt_id;
2638 	uint32_t device_id;
2639 	uint32_t viommu_id;
2640 };
2641 
2642 FIXTURE_VARIANT(iommufd_viommu)
2643 {
2644 	unsigned int viommu;
2645 };
2646 
2647 FIXTURE_SETUP(iommufd_viommu)
2648 {
2649 	self->fd = open("/dev/iommu", O_RDWR);
2650 	ASSERT_NE(-1, self->fd);
2651 	test_ioctl_ioas_alloc(&self->ioas_id);
2652 	test_ioctl_set_default_memory_limit();
2653 
2654 	if (variant->viommu) {
2655 		struct iommu_hwpt_selftest data = {
2656 			.iotlb = IOMMU_TEST_IOTLB_DEFAULT,
2657 		};
2658 
2659 		test_cmd_mock_domain(self->ioas_id, &self->stdev_id, NULL,
2660 				     &self->device_id);
2661 
2662 		/* Allocate a nesting parent hwpt */
2663 		test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
2664 				    IOMMU_HWPT_ALLOC_NEST_PARENT,
2665 				    &self->hwpt_id);
2666 
2667 		/* Allocate a vIOMMU taking refcount of the parent hwpt */
2668 		test_cmd_viommu_alloc(self->device_id, self->hwpt_id,
2669 				      IOMMU_VIOMMU_TYPE_SELFTEST,
2670 				      &self->viommu_id);
2671 
2672 		/* Allocate a regular nested hwpt */
2673 		test_cmd_hwpt_alloc_nested(self->device_id, self->viommu_id, 0,
2674 					   &self->nested_hwpt_id,
2675 					   IOMMU_HWPT_DATA_SELFTEST, &data,
2676 					   sizeof(data));
2677 	}
2678 }
2679 
2680 FIXTURE_TEARDOWN(iommufd_viommu)
2681 {
2682 	teardown_iommufd(self->fd, _metadata);
2683 }
2684 
2685 FIXTURE_VARIANT_ADD(iommufd_viommu, no_viommu)
2686 {
2687 	.viommu = 0,
2688 };
2689 
2690 FIXTURE_VARIANT_ADD(iommufd_viommu, mock_viommu)
2691 {
2692 	.viommu = 1,
2693 };
2694 
2695 TEST_F(iommufd_viommu, viommu_auto_destroy)
2696 {
2697 }
2698 
2699 TEST_F(iommufd_viommu, viommu_negative_tests)
2700 {
2701 	uint32_t device_id = self->device_id;
2702 	uint32_t ioas_id = self->ioas_id;
2703 	uint32_t hwpt_id;
2704 
2705 	if (self->device_id) {
2706 		/* Negative test -- invalid hwpt (hwpt_id=0) */
2707 		test_err_viommu_alloc(ENOENT, device_id, 0,
2708 				      IOMMU_VIOMMU_TYPE_SELFTEST, NULL);
2709 
2710 		/* Negative test -- not a nesting parent hwpt */
2711 		test_cmd_hwpt_alloc(device_id, ioas_id, 0, &hwpt_id);
2712 		test_err_viommu_alloc(EINVAL, device_id, hwpt_id,
2713 				      IOMMU_VIOMMU_TYPE_SELFTEST, NULL);
2714 		test_ioctl_destroy(hwpt_id);
2715 
2716 		/* Negative test -- unsupported viommu type */
2717 		test_err_viommu_alloc(EOPNOTSUPP, device_id, self->hwpt_id,
2718 				      0xdead, NULL);
2719 		EXPECT_ERRNO(EBUSY,
2720 			     _test_ioctl_destroy(self->fd, self->hwpt_id));
2721 		EXPECT_ERRNO(EBUSY,
2722 			     _test_ioctl_destroy(self->fd, self->viommu_id));
2723 	} else {
2724 		test_err_viommu_alloc(ENOENT, self->device_id, self->hwpt_id,
2725 				      IOMMU_VIOMMU_TYPE_SELFTEST, NULL);
2726 	}
2727 }
2728 
2729 TEST_F(iommufd_viommu, viommu_alloc_nested_iopf)
2730 {
2731 	struct iommu_hwpt_selftest data = {
2732 		.iotlb = IOMMU_TEST_IOTLB_DEFAULT,
2733 	};
2734 	uint32_t viommu_id = self->viommu_id;
2735 	uint32_t dev_id = self->device_id;
2736 	uint32_t iopf_hwpt_id;
2737 	uint32_t fault_id;
2738 	uint32_t fault_fd;
2739 
2740 	if (self->device_id) {
2741 		test_ioctl_fault_alloc(&fault_id, &fault_fd);
2742 		test_err_hwpt_alloc_iopf(
2743 			ENOENT, dev_id, viommu_id, UINT32_MAX,
2744 			IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
2745 			IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
2746 		test_err_hwpt_alloc_iopf(
2747 			EOPNOTSUPP, dev_id, viommu_id, fault_id,
2748 			IOMMU_HWPT_FAULT_ID_VALID | (1 << 31), &iopf_hwpt_id,
2749 			IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
2750 		test_cmd_hwpt_alloc_iopf(
2751 			dev_id, viommu_id, fault_id, IOMMU_HWPT_FAULT_ID_VALID,
2752 			&iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST, &data,
2753 			sizeof(data));
2754 
2755 		test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
2756 		EXPECT_ERRNO(EBUSY,
2757 			     _test_ioctl_destroy(self->fd, iopf_hwpt_id));
2758 		test_cmd_trigger_iopf(dev_id, fault_fd);
2759 
2760 		test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
2761 		test_ioctl_destroy(iopf_hwpt_id);
2762 		close(fault_fd);
2763 		test_ioctl_destroy(fault_id);
2764 	}
2765 }
2766 
2767 TEST_F(iommufd_viommu, vdevice_alloc)
2768 {
2769 	uint32_t viommu_id = self->viommu_id;
2770 	uint32_t dev_id = self->device_id;
2771 	uint32_t vdev_id = 0;
2772 
2773 	if (dev_id) {
2774 		/* Set vdev_id to 0x99, unset it, and set to 0x88 */
2775 		test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
2776 		test_err_vdevice_alloc(EEXIST, viommu_id, dev_id, 0x99,
2777 				       &vdev_id);
2778 		test_ioctl_destroy(vdev_id);
2779 		test_cmd_vdevice_alloc(viommu_id, dev_id, 0x88, &vdev_id);
2780 		test_ioctl_destroy(vdev_id);
2781 	} else {
2782 		test_err_vdevice_alloc(ENOENT, viommu_id, dev_id, 0x99, NULL);
2783 	}
2784 }
2785 
2786 TEST_F(iommufd_viommu, vdevice_cache)
2787 {
2788 	struct iommu_viommu_invalidate_selftest inv_reqs[2] = {};
2789 	uint32_t viommu_id = self->viommu_id;
2790 	uint32_t dev_id = self->device_id;
2791 	uint32_t vdev_id = 0;
2792 	uint32_t num_inv;
2793 
2794 	if (dev_id) {
2795 		test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
2796 
2797 		test_cmd_dev_check_cache_all(dev_id,
2798 					     IOMMU_TEST_DEV_CACHE_DEFAULT);
2799 
2800 		/* Check data_type by passing zero-length array */
2801 		num_inv = 0;
2802 		test_cmd_viommu_invalidate(viommu_id, inv_reqs,
2803 					   sizeof(*inv_reqs), &num_inv);
2804 		assert(!num_inv);
2805 
2806 		/* Negative test: Invalid data_type */
2807 		num_inv = 1;
2808 		test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2809 					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST_INVALID,
2810 					   sizeof(*inv_reqs), &num_inv);
2811 		assert(!num_inv);
2812 
2813 		/* Negative test: structure size sanity */
2814 		num_inv = 1;
2815 		test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2816 					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2817 					   sizeof(*inv_reqs) + 1, &num_inv);
2818 		assert(!num_inv);
2819 
2820 		num_inv = 1;
2821 		test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2822 					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2823 					   1, &num_inv);
2824 		assert(!num_inv);
2825 
2826 		/* Negative test: invalid flag is passed */
2827 		num_inv = 1;
2828 		inv_reqs[0].flags = 0xffffffff;
2829 		inv_reqs[0].vdev_id = 0x99;
2830 		test_err_viommu_invalidate(EOPNOTSUPP, viommu_id, inv_reqs,
2831 					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2832 					   sizeof(*inv_reqs), &num_inv);
2833 		assert(!num_inv);
2834 
2835 		/* Negative test: invalid data_uptr when array is not empty */
2836 		num_inv = 1;
2837 		inv_reqs[0].flags = 0;
2838 		inv_reqs[0].vdev_id = 0x99;
2839 		test_err_viommu_invalidate(EINVAL, viommu_id, NULL,
2840 					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2841 					   sizeof(*inv_reqs), &num_inv);
2842 		assert(!num_inv);
2843 
2844 		/* Negative test: invalid entry_len when array is not empty */
2845 		num_inv = 1;
2846 		inv_reqs[0].flags = 0;
2847 		inv_reqs[0].vdev_id = 0x99;
2848 		test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2849 					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2850 					   0, &num_inv);
2851 		assert(!num_inv);
2852 
2853 		/* Negative test: invalid cache_id */
2854 		num_inv = 1;
2855 		inv_reqs[0].flags = 0;
2856 		inv_reqs[0].vdev_id = 0x99;
2857 		inv_reqs[0].cache_id = MOCK_DEV_CACHE_ID_MAX + 1;
2858 		test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2859 					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2860 					   sizeof(*inv_reqs), &num_inv);
2861 		assert(!num_inv);
2862 
2863 		/* Negative test: invalid vdev_id */
2864 		num_inv = 1;
2865 		inv_reqs[0].flags = 0;
2866 		inv_reqs[0].vdev_id = 0x9;
2867 		inv_reqs[0].cache_id = 0;
2868 		test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2869 					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2870 					   sizeof(*inv_reqs), &num_inv);
2871 		assert(!num_inv);
2872 
2873 		/*
2874 		 * Invalidate the 1st cache entry but fail the 2nd request
2875 		 * due to invalid flags configuration in the 2nd request.
2876 		 */
2877 		num_inv = 2;
2878 		inv_reqs[0].flags = 0;
2879 		inv_reqs[0].vdev_id = 0x99;
2880 		inv_reqs[0].cache_id = 0;
2881 		inv_reqs[1].flags = 0xffffffff;
2882 		inv_reqs[1].vdev_id = 0x99;
2883 		inv_reqs[1].cache_id = 1;
2884 		test_err_viommu_invalidate(EOPNOTSUPP, viommu_id, inv_reqs,
2885 					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2886 					   sizeof(*inv_reqs), &num_inv);
2887 		assert(num_inv == 1);
2888 		test_cmd_dev_check_cache(dev_id, 0, 0);
2889 		test_cmd_dev_check_cache(dev_id, 1,
2890 					 IOMMU_TEST_DEV_CACHE_DEFAULT);
2891 		test_cmd_dev_check_cache(dev_id, 2,
2892 					 IOMMU_TEST_DEV_CACHE_DEFAULT);
2893 		test_cmd_dev_check_cache(dev_id, 3,
2894 					 IOMMU_TEST_DEV_CACHE_DEFAULT);
2895 
2896 		/*
2897 		 * Invalidate the 1st cache entry but fail the 2nd request
2898 		 * due to invalid cache_id configuration in the 2nd request.
2899 		 */
2900 		num_inv = 2;
2901 		inv_reqs[0].flags = 0;
2902 		inv_reqs[0].vdev_id = 0x99;
2903 		inv_reqs[0].cache_id = 0;
2904 		inv_reqs[1].flags = 0;
2905 		inv_reqs[1].vdev_id = 0x99;
2906 		inv_reqs[1].cache_id = MOCK_DEV_CACHE_ID_MAX + 1;
2907 		test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
2908 					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
2909 					   sizeof(*inv_reqs), &num_inv);
2910 		assert(num_inv == 1);
2911 		test_cmd_dev_check_cache(dev_id, 0, 0);
2912 		test_cmd_dev_check_cache(dev_id, 1,
2913 					 IOMMU_TEST_DEV_CACHE_DEFAULT);
2914 		test_cmd_dev_check_cache(dev_id, 2,
2915 					 IOMMU_TEST_DEV_CACHE_DEFAULT);
2916 		test_cmd_dev_check_cache(dev_id, 3,
2917 					 IOMMU_TEST_DEV_CACHE_DEFAULT);
2918 
2919 		/* Invalidate the 2nd cache entry and verify */
2920 		num_inv = 1;
2921 		inv_reqs[0].flags = 0;
2922 		inv_reqs[0].vdev_id = 0x99;
2923 		inv_reqs[0].cache_id = 1;
2924 		test_cmd_viommu_invalidate(viommu_id, inv_reqs,
2925 					   sizeof(*inv_reqs), &num_inv);
2926 		assert(num_inv == 1);
2927 		test_cmd_dev_check_cache(dev_id, 0, 0);
2928 		test_cmd_dev_check_cache(dev_id, 1, 0);
2929 		test_cmd_dev_check_cache(dev_id, 2,
2930 					 IOMMU_TEST_DEV_CACHE_DEFAULT);
2931 		test_cmd_dev_check_cache(dev_id, 3,
2932 					 IOMMU_TEST_DEV_CACHE_DEFAULT);
2933 
2934 		/* Invalidate the 3rd and 4th cache entries and verify */
2935 		num_inv = 2;
2936 		inv_reqs[0].flags = 0;
2937 		inv_reqs[0].vdev_id = 0x99;
2938 		inv_reqs[0].cache_id = 2;
2939 		inv_reqs[1].flags = 0;
2940 		inv_reqs[1].vdev_id = 0x99;
2941 		inv_reqs[1].cache_id = 3;
2942 		test_cmd_viommu_invalidate(viommu_id, inv_reqs,
2943 					   sizeof(*inv_reqs), &num_inv);
2944 		assert(num_inv == 2);
2945 		test_cmd_dev_check_cache_all(dev_id, 0);
2946 
2947 		/* Invalidate all cache entries for nested_dev_id[1] and verify */
2948 		num_inv = 1;
2949 		inv_reqs[0].vdev_id = 0x99;
2950 		inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
2951 		test_cmd_viommu_invalidate(viommu_id, inv_reqs,
2952 					   sizeof(*inv_reqs), &num_inv);
2953 		assert(num_inv == 1);
2954 		test_cmd_dev_check_cache_all(dev_id, 0);
2955 		test_ioctl_destroy(vdev_id);
2956 	}
2957 }
2958 
2959 TEST_HARNESS_MAIN
2960