xref: /linux/drivers/gpu/drm/loongson/lsdc_gem.c (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2023 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/dma-buf.h>
7 
8 #include <drm/drm_debugfs.h>
9 #include <drm/drm_file.h>
10 #include <drm/drm_gem.h>
11 #include <drm/drm_prime.h>
12 
13 #include "lsdc_drv.h"
14 #include "lsdc_gem.h"
15 #include "lsdc_ttm.h"
16 
17 static int lsdc_gem_prime_pin(struct drm_gem_object *obj)
18 {
19 	struct lsdc_bo *lbo = gem_to_lsdc_bo(obj);
20 	int ret;
21 
22 	dma_resv_assert_held(obj->resv);
23 
24 	ret = lsdc_bo_pin(lbo, LSDC_GEM_DOMAIN_GTT, NULL);
25 	if (likely(ret == 0))
26 		lbo->sharing_count++;
27 
28 	return ret;
29 }
30 
31 static void lsdc_gem_prime_unpin(struct drm_gem_object *obj)
32 {
33 	struct lsdc_bo *lbo = gem_to_lsdc_bo(obj);
34 
35 	dma_resv_assert_held(obj->resv);
36 
37 	lsdc_bo_unpin(lbo);
38 	if (lbo->sharing_count)
39 		lbo->sharing_count--;
40 }
41 
42 static struct sg_table *lsdc_gem_prime_get_sg_table(struct drm_gem_object *obj)
43 {
44 	struct ttm_buffer_object *tbo = to_ttm_bo(obj);
45 	struct ttm_tt *tt = tbo->ttm;
46 
47 	if (!tt) {
48 		drm_err(obj->dev, "sharing a buffer without backing memory\n");
49 		return ERR_PTR(-ENOMEM);
50 	}
51 
52 	return drm_prime_pages_to_sg(obj->dev, tt->pages, tt->num_pages);
53 }
54 
55 static void lsdc_gem_object_free(struct drm_gem_object *obj)
56 {
57 	struct ttm_buffer_object *tbo = to_ttm_bo(obj);
58 
59 	if (tbo)
60 		ttm_bo_put(tbo);
61 }
62 
63 static int lsdc_gem_object_vmap(struct drm_gem_object *obj, struct iosys_map *map)
64 {
65 	struct ttm_buffer_object *tbo = to_ttm_bo(obj);
66 	struct lsdc_bo *lbo = to_lsdc_bo(tbo);
67 	int ret;
68 
69 	if (lbo->vmap_count > 0) {
70 		++lbo->vmap_count;
71 		goto out;
72 	}
73 
74 	ret = lsdc_bo_pin(lbo, 0, NULL);
75 	if (unlikely(ret)) {
76 		drm_err(obj->dev, "pin %p for vmap failed\n", lbo);
77 		return ret;
78 	}
79 
80 	ret = ttm_bo_vmap(tbo, &lbo->map);
81 	if (ret) {
82 		drm_err(obj->dev, "ttm bo vmap failed\n");
83 		lsdc_bo_unpin(lbo);
84 		return ret;
85 	}
86 
87 	lbo->vmap_count = 1;
88 
89 out:
90 	*map = lbo->map;
91 
92 	return 0;
93 }
94 
95 static void lsdc_gem_object_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
96 {
97 	struct ttm_buffer_object *tbo = to_ttm_bo(obj);
98 	struct lsdc_bo *lbo = to_lsdc_bo(tbo);
99 
100 	if (unlikely(!lbo->vmap_count)) {
101 		drm_warn(obj->dev, "%p is not mapped\n", lbo);
102 		return;
103 	}
104 
105 	--lbo->vmap_count;
106 	if (lbo->vmap_count == 0) {
107 		ttm_bo_vunmap(tbo, &lbo->map);
108 
109 		lsdc_bo_unpin(lbo);
110 	}
111 }
112 
113 static int lsdc_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
114 {
115 	struct ttm_buffer_object *tbo = to_ttm_bo(obj);
116 	int ret;
117 
118 	ret = ttm_bo_mmap_obj(vma, tbo);
119 	if (unlikely(ret)) {
120 		drm_warn(obj->dev, "mmap %p failed\n", tbo);
121 		return ret;
122 	}
123 
124 	drm_gem_object_put(obj);
125 
126 	return 0;
127 }
128 
129 static const struct drm_gem_object_funcs lsdc_gem_object_funcs = {
130 	.free = lsdc_gem_object_free,
131 	.export = drm_gem_prime_export,
132 	.pin = lsdc_gem_prime_pin,
133 	.unpin = lsdc_gem_prime_unpin,
134 	.get_sg_table = lsdc_gem_prime_get_sg_table,
135 	.vmap = lsdc_gem_object_vmap,
136 	.vunmap = lsdc_gem_object_vunmap,
137 	.mmap = lsdc_gem_object_mmap,
138 };
139 
140 struct drm_gem_object *lsdc_gem_object_create(struct drm_device *ddev,
141 					      u32 domain,
142 					      size_t size,
143 					      bool kerenl,
144 					      struct sg_table *sg,
145 					      struct dma_resv *resv)
146 {
147 	struct lsdc_device *ldev = to_lsdc(ddev);
148 	struct drm_gem_object *gobj;
149 	struct lsdc_bo *lbo;
150 	int ret;
151 
152 	lbo = lsdc_bo_create(ddev, domain, size, kerenl, sg, resv);
153 	if (IS_ERR(lbo)) {
154 		ret = PTR_ERR(lbo);
155 		return ERR_PTR(ret);
156 	}
157 
158 	if (!sg) {
159 		/* VRAM is filled with random data */
160 		lsdc_bo_clear(lbo);
161 	}
162 
163 	gobj = &lbo->tbo.base;
164 	gobj->funcs = &lsdc_gem_object_funcs;
165 
166 	/* tracking the BOs we created */
167 	mutex_lock(&ldev->gem.mutex);
168 	list_add_tail(&lbo->list, &ldev->gem.objects);
169 	mutex_unlock(&ldev->gem.mutex);
170 
171 	return gobj;
172 }
173 
174 struct drm_gem_object *
175 lsdc_prime_import_sg_table(struct drm_device *ddev,
176 			   struct dma_buf_attachment *attach,
177 			   struct sg_table *sg)
178 {
179 	struct dma_resv *resv = attach->dmabuf->resv;
180 	u64 size = attach->dmabuf->size;
181 	struct drm_gem_object *gobj;
182 	struct lsdc_bo *lbo;
183 
184 	dma_resv_lock(resv, NULL);
185 	gobj = lsdc_gem_object_create(ddev, LSDC_GEM_DOMAIN_GTT, size, false,
186 				      sg, resv);
187 	dma_resv_unlock(resv);
188 
189 	if (IS_ERR(gobj)) {
190 		drm_err(ddev, "Failed to import sg table\n");
191 		return gobj;
192 	}
193 
194 	lbo = gem_to_lsdc_bo(gobj);
195 	lbo->sharing_count = 1;
196 
197 	return gobj;
198 }
199 
200 int lsdc_dumb_create(struct drm_file *file, struct drm_device *ddev,
201 		     struct drm_mode_create_dumb *args)
202 {
203 	struct lsdc_device *ldev = to_lsdc(ddev);
204 	const struct lsdc_desc *descp = ldev->descp;
205 	u32 domain = LSDC_GEM_DOMAIN_VRAM;
206 	struct drm_gem_object *gobj;
207 	size_t size;
208 	u32 pitch;
209 	u32 handle;
210 	int ret;
211 
212 	if (!args->width || !args->height)
213 		return -EINVAL;
214 
215 	if (args->bpp != 32 && args->bpp != 16)
216 		return -EINVAL;
217 
218 	pitch = args->width * args->bpp / 8;
219 	pitch = ALIGN(pitch, descp->pitch_align);
220 	size = pitch * args->height;
221 	size = ALIGN(size, PAGE_SIZE);
222 
223 	/* Maximum single bo size allowed is the half vram size available */
224 	if (size > ldev->vram_size / 2) {
225 		drm_err(ddev, "Requesting(%zuMiB) failed\n", size >> 20);
226 		return -ENOMEM;
227 	}
228 
229 	gobj = lsdc_gem_object_create(ddev, domain, size, false, NULL, NULL);
230 	if (IS_ERR(gobj)) {
231 		drm_err(ddev, "Failed to create gem object\n");
232 		return PTR_ERR(gobj);
233 	}
234 
235 	ret = drm_gem_handle_create(file, gobj, &handle);
236 
237 	/* drop reference from allocate, handle holds it now */
238 	drm_gem_object_put(gobj);
239 	if (ret)
240 		return ret;
241 
242 	args->pitch = pitch;
243 	args->size = size;
244 	args->handle = handle;
245 
246 	return 0;
247 }
248 
249 int lsdc_dumb_map_offset(struct drm_file *filp, struct drm_device *ddev,
250 			 u32 handle, uint64_t *offset)
251 {
252 	struct drm_gem_object *gobj;
253 
254 	gobj = drm_gem_object_lookup(filp, handle);
255 	if (!gobj)
256 		return -ENOENT;
257 
258 	*offset = drm_vma_node_offset_addr(&gobj->vma_node);
259 
260 	drm_gem_object_put(gobj);
261 
262 	return 0;
263 }
264 
265 void lsdc_gem_init(struct drm_device *ddev)
266 {
267 	struct lsdc_device *ldev = to_lsdc(ddev);
268 
269 	mutex_init(&ldev->gem.mutex);
270 	INIT_LIST_HEAD(&ldev->gem.objects);
271 }
272 
273 int lsdc_show_buffer_object(struct seq_file *m, void *arg)
274 {
275 	struct drm_info_node *node = (struct drm_info_node *)m->private;
276 	struct drm_device *ddev = node->minor->dev;
277 	struct lsdc_device *ldev = to_lsdc(ddev);
278 	struct lsdc_bo *lbo;
279 	unsigned int i;
280 
281 	mutex_lock(&ldev->gem.mutex);
282 
283 	i = 0;
284 
285 	list_for_each_entry(lbo, &ldev->gem.objects, list) {
286 		struct ttm_buffer_object *tbo = &lbo->tbo;
287 		struct ttm_resource *resource = tbo->resource;
288 
289 		seq_printf(m, "bo[%04u][%p]: size: %8zuKiB %s offset: %8llx\n",
290 			   i, lbo, lsdc_bo_size(lbo) >> 10,
291 			   lsdc_mem_type_to_str(resource->mem_type),
292 			   lsdc_bo_gpu_offset(lbo));
293 		i++;
294 	}
295 
296 	mutex_unlock(&ldev->gem.mutex);
297 
298 	seq_printf(m, "Pinned BO size: VRAM: %zuKiB, GTT: %zu KiB\n",
299 		   ldev->vram_pinned_size >> 10, ldev->gtt_pinned_size >> 10);
300 
301 	return 0;
302 }
303