xref: /linux/drivers/gpu/drm/mediatek/mtk_gem.c (revision d6112dddbf354d21ff2fcd49338df68782492c73)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015 MediaTek Inc.
4  * Copyright (c) 2025 Collabora Ltd.
5  *                    AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
6  */
7 
8 #include <linux/dma-buf.h>
9 #include <linux/vmalloc.h>
10 
11 #include <drm/drm.h>
12 #include <drm/drm_device.h>
13 #include <drm/drm_gem.h>
14 #include <drm/drm_gem_dma_helper.h>
15 #include <drm/drm_prime.h>
16 #include <drm/drm_print.h>
17 
18 #include "mtk_drm_drv.h"
19 #include "mtk_gem.h"
20 
21 static int mtk_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
22 
mtk_gem_free_object(struct drm_gem_object * obj)23 static void mtk_gem_free_object(struct drm_gem_object *obj)
24 {
25 	struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
26 	struct mtk_drm_private *priv = obj->dev->dev_private;
27 
28 	if (dma_obj->sgt)
29 		drm_prime_gem_destroy(obj, dma_obj->sgt);
30 	else
31 		dma_free_wc(priv->dma_dev, dma_obj->base.size,
32 			    dma_obj->vaddr, dma_obj->dma_addr);
33 
34 	/* release file pointer to gem object. */
35 	drm_gem_object_release(obj);
36 
37 	kfree(dma_obj);
38 }
39 
40 /*
41  * Allocate a sg_table for this GEM object.
42  * Note: Both the table's contents, and the sg_table itself must be freed by
43  *       the caller.
44  * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
45  */
mtk_gem_prime_get_sg_table(struct drm_gem_object * obj)46 static struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj)
47 {
48 	struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
49 	struct mtk_drm_private *priv = obj->dev->dev_private;
50 	struct sg_table *sgt;
51 	int ret;
52 
53 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
54 	if (!sgt)
55 		return ERR_PTR(-ENOMEM);
56 
57 	ret = dma_get_sgtable(priv->dma_dev, sgt, dma_obj->vaddr,
58 			      dma_obj->dma_addr, obj->size);
59 	if (ret) {
60 		DRM_ERROR("failed to allocate sgt, %d\n", ret);
61 		kfree(sgt);
62 		return ERR_PTR(ret);
63 	}
64 
65 	return sgt;
66 }
67 
68 static const struct drm_gem_object_funcs mtk_gem_object_funcs = {
69 	.free = mtk_gem_free_object,
70 	.print_info = drm_gem_dma_object_print_info,
71 	.get_sg_table = mtk_gem_prime_get_sg_table,
72 	.vmap = drm_gem_dma_object_vmap,
73 	.mmap = mtk_gem_object_mmap,
74 	.vm_ops = &drm_gem_dma_vm_ops,
75 };
76 
mtk_gem_init(struct drm_device * dev,unsigned long size,bool private)77 static struct drm_gem_dma_object *mtk_gem_init(struct drm_device *dev,
78 					unsigned long size, bool private)
79 {
80 	struct drm_gem_dma_object *dma_obj;
81 	int ret;
82 
83 	size = round_up(size, PAGE_SIZE);
84 
85 	if (size == 0)
86 		return ERR_PTR(-EINVAL);
87 
88 	dma_obj = kzalloc(sizeof(*dma_obj), GFP_KERNEL);
89 	if (!dma_obj)
90 		return ERR_PTR(-ENOMEM);
91 
92 	dma_obj->base.funcs = &mtk_gem_object_funcs;
93 
94 	if (private) {
95 		ret = 0;
96 		drm_gem_private_object_init(dev, &dma_obj->base, size);
97 	} else {
98 		ret = drm_gem_object_init(dev, &dma_obj->base, size);
99 	}
100 	if (ret) {
101 		DRM_ERROR("failed to initialize gem object\n");
102 		kfree(dma_obj);
103 		return ERR_PTR(ret);
104 	}
105 
106 	return dma_obj;
107 }
108 
mtk_gem_create(struct drm_device * dev,size_t size)109 static struct drm_gem_dma_object *mtk_gem_create(struct drm_device *dev, size_t size)
110 {
111 	struct mtk_drm_private *priv = dev->dev_private;
112 	struct drm_gem_dma_object *dma_obj;
113 	struct drm_gem_object *obj;
114 	int ret;
115 
116 	dma_obj = mtk_gem_init(dev, size, false);
117 	if (IS_ERR(dma_obj))
118 		return ERR_CAST(dma_obj);
119 
120 	obj = &dma_obj->base;
121 
122 	dma_obj->vaddr = dma_alloc_wc(priv->dma_dev, obj->size,
123 				      &dma_obj->dma_addr,
124 				      GFP_KERNEL | __GFP_NOWARN);
125 	if (!dma_obj->vaddr) {
126 		DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size);
127 		ret = -ENOMEM;
128 		goto err_gem_free;
129 	}
130 
131 	DRM_DEBUG_DRIVER("vaddr = %p dma_addr = %pad size = %zu\n",
132 			 dma_obj->vaddr, &dma_obj->dma_addr,
133 			 size);
134 
135 	return dma_obj;
136 
137 err_gem_free:
138 	drm_gem_object_release(obj);
139 	kfree(dma_obj);
140 	return ERR_PTR(ret);
141 }
142 
mtk_gem_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)143 int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
144 			struct drm_mode_create_dumb *args)
145 {
146 	struct drm_gem_dma_object *dma_obj;
147 	int ret;
148 
149 	args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
150 
151 	/*
152 	 * Multiply 2 variables of different types,
153 	 * for example: args->size = args->spacing * args->height;
154 	 * may cause coverity issue with unintentional overflow.
155 	 */
156 	args->size = args->pitch;
157 	args->size *= args->height;
158 
159 	dma_obj = mtk_gem_create(dev, args->size);
160 	if (IS_ERR(dma_obj))
161 		return PTR_ERR(dma_obj);
162 
163 	/*
164 	 * allocate a id of idr table where the obj is registered
165 	 * and handle has the id what user can see.
166 	 */
167 	ret = drm_gem_handle_create(file_priv, &dma_obj->base, &args->handle);
168 	if (ret)
169 		goto err_handle_create;
170 
171 	/* drop reference from allocate - handle holds it now. */
172 	drm_gem_object_put(&dma_obj->base);
173 
174 	return 0;
175 
176 err_handle_create:
177 	mtk_gem_free_object(&dma_obj->base);
178 	return ret;
179 }
180 
mtk_gem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)181 static int mtk_gem_object_mmap(struct drm_gem_object *obj,
182 			       struct vm_area_struct *vma)
183 
184 {
185 	struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
186 	struct mtk_drm_private *priv = obj->dev->dev_private;
187 	int ret;
188 
189 	/*
190 	 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
191 	 * whole buffer from the start.
192 	 */
193 	vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
194 
195 	/*
196 	 * dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
197 	 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
198 	 */
199 	vm_flags_mod(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP, VM_PFNMAP);
200 
201 	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
202 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
203 
204 	ret = dma_mmap_wc(priv->dma_dev, vma, dma_obj->vaddr,
205 			  dma_obj->dma_addr, obj->size);
206 	if (ret)
207 		drm_gem_vm_close(vma);
208 
209 	return ret;
210 }
211 
mtk_gem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)212 struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
213 			struct dma_buf_attachment *attach, struct sg_table *sgt)
214 {
215 	struct drm_gem_dma_object *dma_obj;
216 
217 	/* check if the entries in the sg_table are contiguous */
218 	if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) {
219 		DRM_ERROR("sg_table is not contiguous");
220 		return ERR_PTR(-EINVAL);
221 	}
222 
223 	dma_obj = mtk_gem_init(dev, attach->dmabuf->size, true);
224 	if (IS_ERR(dma_obj))
225 		return ERR_CAST(dma_obj);
226 
227 	dma_obj->dma_addr = sg_dma_address(sgt->sgl);
228 	dma_obj->sgt = sgt;
229 
230 	return &dma_obj->base;
231 }
232