1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <dirent.h>
4 #include <errno.h>
5 #include <fcntl.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <sys/ioctl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14
15 #include <linux/dma-buf.h>
16 #include <linux/dma-heap.h>
17 #include <drm/drm.h>
18 #include "../kselftest.h"
19
20 #define DEVPATH "/dev/dma_heap"
21
check_vgem(int fd)22 static int check_vgem(int fd)
23 {
24 drm_version_t version = { 0 };
25 char name[5];
26 int ret;
27
28 version.name_len = 4;
29 version.name = name;
30
31 ret = ioctl(fd, DRM_IOCTL_VERSION, &version);
32 if (ret || version.name_len != 4)
33 return 0;
34
35 name[4] = '\0';
36
37 return !strcmp(name, "vgem");
38 }
39
open_vgem(void)40 static int open_vgem(void)
41 {
42 int i, fd;
43 const char *drmstr = "/dev/dri/card";
44
45 fd = -1;
46 for (i = 0; i < 16; i++) {
47 char name[80];
48
49 snprintf(name, 80, "%s%u", drmstr, i);
50
51 fd = open(name, O_RDWR);
52 if (fd < 0)
53 continue;
54
55 if (!check_vgem(fd)) {
56 close(fd);
57 fd = -1;
58 continue;
59 } else {
60 break;
61 }
62 }
63 return fd;
64 }
65
import_vgem_fd(int vgem_fd,int dma_buf_fd,uint32_t * handle)66 static int import_vgem_fd(int vgem_fd, int dma_buf_fd, uint32_t *handle)
67 {
68 struct drm_prime_handle import_handle = {
69 .fd = dma_buf_fd,
70 .flags = 0,
71 .handle = 0,
72 };
73 int ret;
74
75 ret = ioctl(vgem_fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &import_handle);
76 if (ret == 0)
77 *handle = import_handle.handle;
78 return ret;
79 }
80
close_handle(int vgem_fd,uint32_t handle)81 static void close_handle(int vgem_fd, uint32_t handle)
82 {
83 struct drm_gem_close close = {
84 .handle = handle,
85 };
86
87 ioctl(vgem_fd, DRM_IOCTL_GEM_CLOSE, &close);
88 }
89
dmabuf_heap_open(char * name)90 static int dmabuf_heap_open(char *name)
91 {
92 int ret, fd;
93 char buf[256];
94
95 ret = snprintf(buf, 256, "%s/%s", DEVPATH, name);
96 if (ret < 0)
97 ksft_exit_fail_msg("snprintf failed! %d\n", ret);
98
99 fd = open(buf, O_RDWR);
100 if (fd < 0)
101 ksft_exit_fail_msg("open %s failed: %s\n", buf, strerror(errno));
102
103 return fd;
104 }
105
dmabuf_heap_alloc_fdflags(int fd,size_t len,unsigned int fd_flags,unsigned int heap_flags,int * dmabuf_fd)106 static int dmabuf_heap_alloc_fdflags(int fd, size_t len, unsigned int fd_flags,
107 unsigned int heap_flags, int *dmabuf_fd)
108 {
109 struct dma_heap_allocation_data data = {
110 .len = len,
111 .fd = 0,
112 .fd_flags = fd_flags,
113 .heap_flags = heap_flags,
114 };
115 int ret;
116
117 if (!dmabuf_fd)
118 return -EINVAL;
119
120 ret = ioctl(fd, DMA_HEAP_IOCTL_ALLOC, &data);
121 if (ret < 0)
122 return ret;
123 *dmabuf_fd = (int)data.fd;
124 return ret;
125 }
126
dmabuf_heap_alloc(int fd,size_t len,unsigned int flags,int * dmabuf_fd)127 static int dmabuf_heap_alloc(int fd, size_t len, unsigned int flags,
128 int *dmabuf_fd)
129 {
130 return dmabuf_heap_alloc_fdflags(fd, len, O_RDWR | O_CLOEXEC, flags,
131 dmabuf_fd);
132 }
133
dmabuf_sync(int fd,int start_stop)134 static int dmabuf_sync(int fd, int start_stop)
135 {
136 struct dma_buf_sync sync = {
137 .flags = start_stop | DMA_BUF_SYNC_RW,
138 };
139
140 return ioctl(fd, DMA_BUF_IOCTL_SYNC, &sync);
141 }
142
143 #define ONE_MEG (1024 * 1024)
144
test_alloc_and_import(char * heap_name)145 static void test_alloc_and_import(char *heap_name)
146 {
147 int heap_fd = -1, dmabuf_fd = -1, importer_fd = -1;
148 uint32_t handle = 0;
149 void *p = NULL;
150 int ret;
151
152 heap_fd = dmabuf_heap_open(heap_name);
153
154 ksft_print_msg("Testing allocation and importing:\n");
155 ret = dmabuf_heap_alloc(heap_fd, ONE_MEG, 0, &dmabuf_fd);
156 if (ret) {
157 ksft_test_result_fail("FAIL (Allocation Failed!) %d\n", ret);
158 return;
159 }
160
161 /* mmap and write a simple pattern */
162 p = mmap(NULL, ONE_MEG, PROT_READ | PROT_WRITE, MAP_SHARED, dmabuf_fd, 0);
163 if (p == MAP_FAILED) {
164 ksft_test_result_fail("FAIL (mmap() failed): %s\n", strerror(errno));
165 goto close_and_return;
166 }
167
168 dmabuf_sync(dmabuf_fd, DMA_BUF_SYNC_START);
169 memset(p, 1, ONE_MEG / 2);
170 memset((char *)p + ONE_MEG / 2, 0, ONE_MEG / 2);
171 dmabuf_sync(dmabuf_fd, DMA_BUF_SYNC_END);
172
173 importer_fd = open_vgem();
174 if (importer_fd < 0) {
175 ksft_test_result_skip("Could not open vgem %d\n", importer_fd);
176 } else {
177 ret = import_vgem_fd(importer_fd, dmabuf_fd, &handle);
178 ksft_test_result(ret >= 0, "Import buffer %d\n", ret);
179 }
180
181 ret = dmabuf_sync(dmabuf_fd, DMA_BUF_SYNC_START);
182 if (ret < 0) {
183 ksft_print_msg("FAIL (DMA_BUF_SYNC_START failed!) %d\n", ret);
184 goto out;
185 }
186
187 memset(p, 0xff, ONE_MEG);
188 ret = dmabuf_sync(dmabuf_fd, DMA_BUF_SYNC_END);
189 if (ret < 0) {
190 ksft_print_msg("FAIL (DMA_BUF_SYNC_END failed!) %d\n", ret);
191 goto out;
192 }
193
194 close_handle(importer_fd, handle);
195 ksft_test_result_pass("%s dmabuf sync succeeded\n", __func__);
196 return;
197
198 out:
199 ksft_test_result_fail("%s dmabuf sync failed\n", __func__);
200 munmap(p, ONE_MEG);
201 close(importer_fd);
202
203 close_and_return:
204 close(dmabuf_fd);
205 close(heap_fd);
206 }
207
test_alloc_zeroed(char * heap_name,size_t size)208 static void test_alloc_zeroed(char *heap_name, size_t size)
209 {
210 int heap_fd = -1, dmabuf_fd[32];
211 int i, j, k, ret;
212 void *p = NULL;
213 char *c;
214
215 ksft_print_msg("Testing alloced %ldk buffers are zeroed:\n", size / 1024);
216 heap_fd = dmabuf_heap_open(heap_name);
217
218 /* Allocate and fill a bunch of buffers */
219 for (i = 0; i < 32; i++) {
220 ret = dmabuf_heap_alloc(heap_fd, size, 0, &dmabuf_fd[i]);
221 if (ret) {
222 ksft_test_result_fail("FAIL (Allocation (%i) failed) %d\n", i, ret);
223 goto close_and_return;
224 }
225
226 /* mmap and fill with simple pattern */
227 p = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, dmabuf_fd[i], 0);
228 if (p == MAP_FAILED) {
229 ksft_test_result_fail("FAIL (mmap() failed!): %s\n", strerror(errno));
230 goto close_and_return;
231 }
232
233 dmabuf_sync(dmabuf_fd[i], DMA_BUF_SYNC_START);
234 memset(p, 0xff, size);
235 dmabuf_sync(dmabuf_fd[i], DMA_BUF_SYNC_END);
236 munmap(p, size);
237 }
238 /* close them all */
239 for (i = 0; i < 32; i++)
240 close(dmabuf_fd[i]);
241 ksft_test_result_pass("Allocate and fill a bunch of buffers\n");
242
243 /* Allocate and validate all buffers are zeroed */
244 for (i = 0; i < 32; i++) {
245 ret = dmabuf_heap_alloc(heap_fd, size, 0, &dmabuf_fd[i]);
246 if (ret < 0) {
247 ksft_test_result_fail("FAIL (Allocation (%i) failed) %d\n", i, ret);
248 goto close_and_return;
249 }
250
251 /* mmap and validate everything is zero */
252 p = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, dmabuf_fd[i], 0);
253 if (p == MAP_FAILED) {
254 ksft_test_result_fail("FAIL (mmap() failed!): %s\n", strerror(errno));
255 goto close_and_return;
256 }
257
258 dmabuf_sync(dmabuf_fd[i], DMA_BUF_SYNC_START);
259 c = (char *)p;
260 for (j = 0; j < size; j++) {
261 if (c[j] != 0) {
262 ksft_print_msg("FAIL (Allocated buffer not zeroed @ %i)\n", j);
263 dmabuf_sync(dmabuf_fd[i], DMA_BUF_SYNC_END);
264 munmap(p, size);
265 goto out;
266 }
267 }
268 dmabuf_sync(dmabuf_fd[i], DMA_BUF_SYNC_END);
269 munmap(p, size);
270 }
271
272 out:
273 ksft_test_result(i == 32, "Allocate and validate all buffers are zeroed\n");
274
275 close_and_return:
276 /* close them all */
277 for (k = 0; k < i; k++)
278 close(dmabuf_fd[k]);
279
280 close(heap_fd);
281 return;
282 }
283
284 /* Test the ioctl version compatibility w/ a smaller structure then expected */
dmabuf_heap_alloc_older(int fd,size_t len,unsigned int flags,int * dmabuf_fd)285 static int dmabuf_heap_alloc_older(int fd, size_t len, unsigned int flags,
286 int *dmabuf_fd)
287 {
288 int ret;
289 unsigned int older_alloc_ioctl;
290 struct dma_heap_allocation_data_smaller {
291 __u64 len;
292 __u32 fd;
293 __u32 fd_flags;
294 } data = {
295 .len = len,
296 .fd = 0,
297 .fd_flags = O_RDWR | O_CLOEXEC,
298 };
299
300 older_alloc_ioctl = _IOWR(DMA_HEAP_IOC_MAGIC, 0x0,
301 struct dma_heap_allocation_data_smaller);
302 if (!dmabuf_fd)
303 return -EINVAL;
304
305 ret = ioctl(fd, older_alloc_ioctl, &data);
306 if (ret < 0)
307 return ret;
308 *dmabuf_fd = (int)data.fd;
309 return ret;
310 }
311
312 /* Test the ioctl version compatibility w/ a larger structure then expected */
dmabuf_heap_alloc_newer(int fd,size_t len,unsigned int flags,int * dmabuf_fd)313 static int dmabuf_heap_alloc_newer(int fd, size_t len, unsigned int flags,
314 int *dmabuf_fd)
315 {
316 int ret;
317 unsigned int newer_alloc_ioctl;
318 struct dma_heap_allocation_data_bigger {
319 __u64 len;
320 __u32 fd;
321 __u32 fd_flags;
322 __u64 heap_flags;
323 __u64 garbage1;
324 __u64 garbage2;
325 __u64 garbage3;
326 } data = {
327 .len = len,
328 .fd = 0,
329 .fd_flags = O_RDWR | O_CLOEXEC,
330 .heap_flags = flags,
331 .garbage1 = 0xffffffff,
332 .garbage2 = 0x88888888,
333 .garbage3 = 0x11111111,
334 };
335
336 newer_alloc_ioctl = _IOWR(DMA_HEAP_IOC_MAGIC, 0x0,
337 struct dma_heap_allocation_data_bigger);
338 if (!dmabuf_fd)
339 return -EINVAL;
340
341 ret = ioctl(fd, newer_alloc_ioctl, &data);
342 if (ret < 0)
343 return ret;
344
345 *dmabuf_fd = (int)data.fd;
346 return ret;
347 }
348
test_alloc_compat(char * heap_name)349 static void test_alloc_compat(char *heap_name)
350 {
351 int ret, heap_fd = -1, dmabuf_fd = -1;
352
353 heap_fd = dmabuf_heap_open(heap_name);
354
355 ksft_print_msg("Testing (theoretical) older alloc compat:\n");
356 ret = dmabuf_heap_alloc_older(heap_fd, ONE_MEG, 0, &dmabuf_fd);
357 if (dmabuf_fd >= 0)
358 close(dmabuf_fd);
359 ksft_test_result(!ret, "dmabuf_heap_alloc_older\n");
360
361 ksft_print_msg("Testing (theoretical) newer alloc compat:\n");
362 ret = dmabuf_heap_alloc_newer(heap_fd, ONE_MEG, 0, &dmabuf_fd);
363 if (dmabuf_fd >= 0)
364 close(dmabuf_fd);
365 ksft_test_result(!ret, "dmabuf_heap_alloc_newer\n");
366
367 close(heap_fd);
368 }
369
test_alloc_errors(char * heap_name)370 static void test_alloc_errors(char *heap_name)
371 {
372 int heap_fd = -1, dmabuf_fd = -1;
373 int ret;
374
375 heap_fd = dmabuf_heap_open(heap_name);
376
377 ksft_print_msg("Testing expected error cases:\n");
378 ret = dmabuf_heap_alloc(0, ONE_MEG, 0x111111, &dmabuf_fd);
379 ksft_test_result(ret, "Error expected on invalid fd %d\n", ret);
380
381 ret = dmabuf_heap_alloc(heap_fd, ONE_MEG, 0x111111, &dmabuf_fd);
382 ksft_test_result(ret, "Error expected on invalid heap flags %d\n", ret);
383
384 ret = dmabuf_heap_alloc_fdflags(heap_fd, ONE_MEG,
385 ~(O_RDWR | O_CLOEXEC), 0, &dmabuf_fd);
386 ksft_test_result(ret, "Error expected on invalid heap flags %d\n", ret);
387
388 if (dmabuf_fd >= 0)
389 close(dmabuf_fd);
390 close(heap_fd);
391 }
392
numer_of_heaps(void)393 static int numer_of_heaps(void)
394 {
395 DIR *d = opendir(DEVPATH);
396 struct dirent *dir;
397 int heaps = 0;
398
399 while ((dir = readdir(d))) {
400 if (!strncmp(dir->d_name, ".", 2))
401 continue;
402 if (!strncmp(dir->d_name, "..", 3))
403 continue;
404 heaps++;
405 }
406
407 return heaps;
408 }
409
main(void)410 int main(void)
411 {
412 struct dirent *dir;
413 DIR *d;
414
415 ksft_print_header();
416
417 d = opendir(DEVPATH);
418 if (!d) {
419 ksft_print_msg("No %s directory?\n", DEVPATH);
420 return KSFT_SKIP;
421 }
422
423 ksft_set_plan(11 * numer_of_heaps());
424
425 while ((dir = readdir(d))) {
426 if (!strncmp(dir->d_name, ".", 2))
427 continue;
428 if (!strncmp(dir->d_name, "..", 3))
429 continue;
430
431 ksft_print_msg("Testing heap: %s\n", dir->d_name);
432 ksft_print_msg("=======================================\n");
433 test_alloc_and_import(dir->d_name);
434 test_alloc_zeroed(dir->d_name, 4 * 1024);
435 test_alloc_zeroed(dir->d_name, ONE_MEG);
436 test_alloc_compat(dir->d_name);
437 test_alloc_errors(dir->d_name);
438 }
439 closedir(d);
440
441 ksft_finished();
442 }
443