xref: /linux/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c (revision 151ebcf0797b1a3ba53c8843dc21748c80e098c7)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #include <linux/vmalloc.h>
8 #include "mock_dmabuf.h"
9 
10 static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment,
11 					 enum dma_data_direction dir)
12 {
13 	struct mock_dmabuf *mock = to_mock(attachment->dmabuf);
14 	struct sg_table *st;
15 	struct scatterlist *sg;
16 	int i, err;
17 
18 	st = kmalloc(sizeof(*st), GFP_KERNEL);
19 	if (!st)
20 		return ERR_PTR(-ENOMEM);
21 
22 	err = sg_alloc_table(st, mock->npages, GFP_KERNEL);
23 	if (err)
24 		goto err_free;
25 
26 	sg = st->sgl;
27 	for (i = 0; i < mock->npages; i++) {
28 		sg_set_page(sg, mock->pages[i], PAGE_SIZE, 0);
29 		sg = sg_next(sg);
30 	}
31 
32 	err = dma_map_sgtable(attachment->dev, st, dir, 0);
33 	if (err)
34 		goto err_st;
35 
36 	return st;
37 
38 err_st:
39 	sg_free_table(st);
40 err_free:
41 	kfree(st);
42 	return ERR_PTR(err);
43 }
44 
45 static void mock_unmap_dma_buf(struct dma_buf_attachment *attachment,
46 			       struct sg_table *st,
47 			       enum dma_data_direction dir)
48 {
49 	dma_unmap_sgtable(attachment->dev, st, dir, 0);
50 	sg_free_table(st);
51 	kfree(st);
52 }
53 
54 static void mock_dmabuf_release(struct dma_buf *dma_buf)
55 {
56 	struct mock_dmabuf *mock = to_mock(dma_buf);
57 	int i;
58 
59 	for (i = 0; i < mock->npages; i++)
60 		put_page(mock->pages[i]);
61 
62 	kfree(mock);
63 }
64 
65 static int mock_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
66 {
67 	struct mock_dmabuf *mock = to_mock(dma_buf);
68 	void *vaddr;
69 
70 	vaddr = vm_map_ram(mock->pages, mock->npages, 0);
71 	if (!vaddr)
72 		return -ENOMEM;
73 	iosys_map_set_vaddr(map, vaddr);
74 
75 	return 0;
76 }
77 
78 static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
79 {
80 	struct mock_dmabuf *mock = to_mock(dma_buf);
81 
82 	vm_unmap_ram(map->vaddr, mock->npages);
83 }
84 
85 static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
86 {
87 	return -ENODEV;
88 }
89 
90 static const struct dma_buf_ops mock_dmabuf_ops =  {
91 	.map_dma_buf = mock_map_dma_buf,
92 	.unmap_dma_buf = mock_unmap_dma_buf,
93 	.release = mock_dmabuf_release,
94 	.mmap = mock_dmabuf_mmap,
95 	.vmap = mock_dmabuf_vmap,
96 	.vunmap = mock_dmabuf_vunmap,
97 };
98 
99 static struct dma_buf *mock_dmabuf(int npages)
100 {
101 	struct mock_dmabuf *mock;
102 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
103 	struct dma_buf *dmabuf;
104 	int i;
105 
106 	mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *),
107 		       GFP_KERNEL);
108 	if (!mock)
109 		return ERR_PTR(-ENOMEM);
110 
111 	mock->npages = npages;
112 	for (i = 0; i < npages; i++) {
113 		mock->pages[i] = alloc_page(GFP_KERNEL);
114 		if (!mock->pages[i])
115 			goto err;
116 	}
117 
118 	exp_info.ops = &mock_dmabuf_ops;
119 	exp_info.size = npages * PAGE_SIZE;
120 	exp_info.flags = O_CLOEXEC;
121 	exp_info.priv = mock;
122 
123 	dmabuf = dma_buf_export(&exp_info);
124 	if (IS_ERR(dmabuf))
125 		goto err;
126 
127 	return dmabuf;
128 
129 err:
130 	while (i--)
131 		put_page(mock->pages[i]);
132 	kfree(mock);
133 	return ERR_PTR(-ENOMEM);
134 }
135