xref: /linux/tools/testing/selftests/iommu/iommufd_utils.h (revision 1c07425e902cd3137961c3d45b4271bf8a9b8eb9)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3 #ifndef __SELFTEST_IOMMUFD_UTILS
4 #define __SELFTEST_IOMMUFD_UTILS
5 
6 #include <unistd.h>
7 #include <stddef.h>
8 #include <sys/fcntl.h>
9 #include <sys/ioctl.h>
10 #include <stdint.h>
11 #include <assert.h>
12 
13 #include "../kselftest_harness.h"
14 #include "../../../../drivers/iommu/iommufd/iommufd_test.h"
15 
16 /* Hack to make assertions more readable */
17 #define _IOMMU_TEST_CMD(x) IOMMU_TEST_CMD
18 
19 static void *buffer;
20 static unsigned long BUFFER_SIZE;
21 
22 /*
23  * Have the kernel check the refcount on pages. I don't know why a freshly
24  * mmap'd anon non-compound page starts out with a ref of 3
25  */
26 #define check_refs(_ptr, _length, _refs)                                      \
27 	({                                                                    \
28 		struct iommu_test_cmd test_cmd = {                            \
29 			.size = sizeof(test_cmd),                             \
30 			.op = IOMMU_TEST_OP_MD_CHECK_REFS,                    \
31 			.check_refs = { .length = _length,                    \
32 					.uptr = (uintptr_t)(_ptr),            \
33 					.refs = _refs },                      \
34 		};                                                            \
35 		ASSERT_EQ(0,                                                  \
36 			  ioctl(self->fd,                                     \
37 				_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS), \
38 				&test_cmd));                                  \
39 	})
40 
41 static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *device_id,
42 				 __u32 *hwpt_id)
43 {
44 	struct iommu_test_cmd cmd = {
45 		.size = sizeof(cmd),
46 		.op = IOMMU_TEST_OP_MOCK_DOMAIN,
47 		.id = ioas_id,
48 		.mock_domain = {},
49 	};
50 	int ret;
51 
52 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
53 	if (ret)
54 		return ret;
55 	if (device_id)
56 		*device_id = cmd.mock_domain.out_device_id;
57 	assert(cmd.id != 0);
58 	if (hwpt_id)
59 		*hwpt_id = cmd.mock_domain.out_hwpt_id;
60 	return 0;
61 }
62 #define test_cmd_mock_domain(ioas_id, device_id, hwpt_id)                \
63 	ASSERT_EQ(0, _test_cmd_mock_domain(self->fd, ioas_id, device_id, \
64 					   hwpt_id))
65 #define test_err_mock_domain(_errno, ioas_id, device_id, hwpt_id)     \
66 	EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \
67 						   device_id, hwpt_id))
68 
69 static int _test_cmd_create_access(int fd, unsigned int ioas_id,
70 				   __u32 *access_id, unsigned int flags)
71 {
72 	struct iommu_test_cmd cmd = {
73 		.size = sizeof(cmd),
74 		.op = IOMMU_TEST_OP_CREATE_ACCESS,
75 		.id = ioas_id,
76 		.create_access = { .flags = flags },
77 	};
78 	int ret;
79 
80 	ret = ioctl(fd, IOMMU_TEST_CMD, &cmd);
81 	if (ret)
82 		return ret;
83 	*access_id = cmd.create_access.out_access_fd;
84 	return 0;
85 }
86 #define test_cmd_create_access(ioas_id, access_id, flags)                  \
87 	ASSERT_EQ(0, _test_cmd_create_access(self->fd, ioas_id, access_id, \
88 					     flags))
89 
90 static int _test_cmd_destroy_access(unsigned int access_id)
91 {
92 	return close(access_id);
93 }
94 #define test_cmd_destroy_access(access_id) \
95 	ASSERT_EQ(0, _test_cmd_destroy_access(access_id))
96 
97 static int _test_cmd_destroy_access_pages(int fd, unsigned int access_id,
98 					  unsigned int access_pages_id)
99 {
100 	struct iommu_test_cmd cmd = {
101 		.size = sizeof(cmd),
102 		.op = IOMMU_TEST_OP_DESTROY_ACCESS_PAGES,
103 		.id = access_id,
104 		.destroy_access_pages = { .access_pages_id = access_pages_id },
105 	};
106 	return ioctl(fd, IOMMU_TEST_CMD, &cmd);
107 }
108 #define test_cmd_destroy_access_pages(access_id, access_pages_id)        \
109 	ASSERT_EQ(0, _test_cmd_destroy_access_pages(self->fd, access_id, \
110 						    access_pages_id))
111 #define test_err_destroy_access_pages(_errno, access_id, access_pages_id) \
112 	EXPECT_ERRNO(_errno, _test_cmd_destroy_access_pages(              \
113 				     self->fd, access_id, access_pages_id))
114 
115 static int _test_ioctl_destroy(int fd, unsigned int id)
116 {
117 	struct iommu_destroy cmd = {
118 		.size = sizeof(cmd),
119 		.id = id,
120 	};
121 	return ioctl(fd, IOMMU_DESTROY, &cmd);
122 }
123 #define test_ioctl_destroy(id) ASSERT_EQ(0, _test_ioctl_destroy(self->fd, id))
124 
125 static int _test_ioctl_ioas_alloc(int fd, __u32 *id)
126 {
127 	struct iommu_ioas_alloc cmd = {
128 		.size = sizeof(cmd),
129 	};
130 	int ret;
131 
132 	ret = ioctl(fd, IOMMU_IOAS_ALLOC, &cmd);
133 	if (ret)
134 		return ret;
135 	*id = cmd.out_ioas_id;
136 	return 0;
137 }
138 #define test_ioctl_ioas_alloc(id)                                   \
139 	({                                                          \
140 		ASSERT_EQ(0, _test_ioctl_ioas_alloc(self->fd, id)); \
141 		ASSERT_NE(0, *(id));                                \
142 	})
143 
144 static int _test_ioctl_ioas_map(int fd, unsigned int ioas_id, void *buffer,
145 				size_t length, __u64 *iova, unsigned int flags)
146 {
147 	struct iommu_ioas_map cmd = {
148 		.size = sizeof(cmd),
149 		.flags = flags,
150 		.ioas_id = ioas_id,
151 		.user_va = (uintptr_t)buffer,
152 		.length = length,
153 	};
154 	int ret;
155 
156 	if (flags & IOMMU_IOAS_MAP_FIXED_IOVA)
157 		cmd.iova = *iova;
158 
159 	ret = ioctl(fd, IOMMU_IOAS_MAP, &cmd);
160 	*iova = cmd.iova;
161 	return ret;
162 }
163 #define test_ioctl_ioas_map(buffer, length, iova_p)                        \
164 	ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
165 					  length, iova_p,                  \
166 					  IOMMU_IOAS_MAP_WRITEABLE |       \
167 						  IOMMU_IOAS_MAP_READABLE))
168 
169 #define test_err_ioctl_ioas_map(_errno, buffer, length, iova_p)            \
170 	EXPECT_ERRNO(_errno,                                               \
171 		     _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \
172 					  length, iova_p,                  \
173 					  IOMMU_IOAS_MAP_WRITEABLE |       \
174 						  IOMMU_IOAS_MAP_READABLE))
175 
176 #define test_ioctl_ioas_map_id(ioas_id, buffer, length, iova_p)              \
177 	ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, ioas_id, buffer, length, \
178 					  iova_p,                            \
179 					  IOMMU_IOAS_MAP_WRITEABLE |         \
180 						  IOMMU_IOAS_MAP_READABLE))
181 
182 #define test_ioctl_ioas_map_fixed(buffer, length, iova)                       \
183 	({                                                                    \
184 		__u64 __iova = iova;                                          \
185 		ASSERT_EQ(0, _test_ioctl_ioas_map(                            \
186 				     self->fd, self->ioas_id, buffer, length, \
187 				     &__iova,                                 \
188 				     IOMMU_IOAS_MAP_FIXED_IOVA |              \
189 					     IOMMU_IOAS_MAP_WRITEABLE |       \
190 					     IOMMU_IOAS_MAP_READABLE));       \
191 	})
192 
193 #define test_err_ioctl_ioas_map_fixed(_errno, buffer, length, iova)           \
194 	({                                                                    \
195 		__u64 __iova = iova;                                          \
196 		EXPECT_ERRNO(_errno,                                          \
197 			     _test_ioctl_ioas_map(                            \
198 				     self->fd, self->ioas_id, buffer, length, \
199 				     &__iova,                                 \
200 				     IOMMU_IOAS_MAP_FIXED_IOVA |              \
201 					     IOMMU_IOAS_MAP_WRITEABLE |       \
202 					     IOMMU_IOAS_MAP_READABLE));       \
203 	})
204 
205 static int _test_ioctl_ioas_unmap(int fd, unsigned int ioas_id, uint64_t iova,
206 				  size_t length, uint64_t *out_len)
207 {
208 	struct iommu_ioas_unmap cmd = {
209 		.size = sizeof(cmd),
210 		.ioas_id = ioas_id,
211 		.iova = iova,
212 		.length = length,
213 	};
214 	int ret;
215 
216 	ret = ioctl(fd, IOMMU_IOAS_UNMAP, &cmd);
217 	if (out_len)
218 		*out_len = cmd.length;
219 	return ret;
220 }
221 #define test_ioctl_ioas_unmap(iova, length)                                \
222 	ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, iova, \
223 					    length, NULL))
224 
225 #define test_ioctl_ioas_unmap_id(ioas_id, iova, length)                      \
226 	ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, ioas_id, iova, length, \
227 					    NULL))
228 
229 #define test_err_ioctl_ioas_unmap(_errno, iova, length)                      \
230 	EXPECT_ERRNO(_errno, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, \
231 						    iova, length, NULL))
232 
233 static int _test_ioctl_set_temp_memory_limit(int fd, unsigned int limit)
234 {
235 	struct iommu_test_cmd memlimit_cmd = {
236 		.size = sizeof(memlimit_cmd),
237 		.op = IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT,
238 		.memory_limit = { .limit = limit },
239 	};
240 
241 	return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT),
242 		     &memlimit_cmd);
243 }
244 
245 #define test_ioctl_set_temp_memory_limit(limit) \
246 	ASSERT_EQ(0, _test_ioctl_set_temp_memory_limit(self->fd, limit))
247 
248 #define test_ioctl_set_default_memory_limit() \
249 	test_ioctl_set_temp_memory_limit(65536)
250 
251 static void teardown_iommufd(int fd, struct __test_metadata *_metadata)
252 {
253 	struct iommu_test_cmd test_cmd = {
254 		.size = sizeof(test_cmd),
255 		.op = IOMMU_TEST_OP_MD_CHECK_REFS,
256 		.check_refs = { .length = BUFFER_SIZE,
257 				.uptr = (uintptr_t)buffer },
258 	};
259 
260 	if (fd == -1)
261 		return;
262 
263 	EXPECT_EQ(0, close(fd));
264 
265 	fd = open("/dev/iommu", O_RDWR);
266 	EXPECT_NE(-1, fd);
267 	EXPECT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS),
268 			   &test_cmd));
269 	EXPECT_EQ(0, close(fd));
270 }
271 
272 #define EXPECT_ERRNO(expected_errno, cmd)         \
273 	({                                        \
274 		ASSERT_EQ(-1, cmd);               \
275 		EXPECT_EQ(expected_errno, errno); \
276 	})
277 
278 #endif
279