xref: /linux/drivers/gpu/drm/loongson/lsdc_gem.c (revision face6a3615a649456eb4549f6d474221d877d604)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2023 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/dma-buf.h>
7 
8 #include <drm/drm_debugfs.h>
9 #include <drm/drm_dumb_buffers.h>
10 #include <drm/drm_file.h>
11 #include <drm/drm_gem.h>
12 #include <drm/drm_prime.h>
13 
14 #include "lsdc_drv.h"
15 #include "lsdc_gem.h"
16 #include "lsdc_ttm.h"
17 
18 static int lsdc_gem_prime_pin(struct drm_gem_object *obj)
19 {
20 	struct lsdc_bo *lbo = gem_to_lsdc_bo(obj);
21 	int ret;
22 
23 	dma_resv_assert_held(obj->resv);
24 
25 	ret = lsdc_bo_pin(lbo, LSDC_GEM_DOMAIN_GTT, NULL);
26 	if (likely(ret == 0))
27 		lbo->sharing_count++;
28 
29 	return ret;
30 }
31 
32 static void lsdc_gem_prime_unpin(struct drm_gem_object *obj)
33 {
34 	struct lsdc_bo *lbo = gem_to_lsdc_bo(obj);
35 
36 	dma_resv_assert_held(obj->resv);
37 
38 	lsdc_bo_unpin(lbo);
39 	if (lbo->sharing_count)
40 		lbo->sharing_count--;
41 }
42 
43 static struct sg_table *lsdc_gem_prime_get_sg_table(struct drm_gem_object *obj)
44 {
45 	struct ttm_buffer_object *tbo = to_ttm_bo(obj);
46 	struct ttm_tt *tt = tbo->ttm;
47 
48 	if (!tt) {
49 		drm_err(obj->dev, "sharing a buffer without backing memory\n");
50 		return ERR_PTR(-ENOMEM);
51 	}
52 
53 	return drm_prime_pages_to_sg(obj->dev, tt->pages, tt->num_pages);
54 }
55 
56 static void lsdc_gem_object_free(struct drm_gem_object *obj)
57 {
58 	struct ttm_buffer_object *tbo = to_ttm_bo(obj);
59 
60 	if (tbo)
61 		ttm_bo_fini(tbo);
62 }
63 
64 static int lsdc_gem_object_vmap(struct drm_gem_object *obj, struct iosys_map *map)
65 {
66 	struct ttm_buffer_object *tbo = to_ttm_bo(obj);
67 	struct lsdc_bo *lbo = to_lsdc_bo(tbo);
68 	int ret;
69 
70 	if (lbo->vmap_count > 0) {
71 		++lbo->vmap_count;
72 		goto out;
73 	}
74 
75 	ret = lsdc_bo_pin(lbo, 0, NULL);
76 	if (unlikely(ret)) {
77 		drm_err(obj->dev, "pin %p for vmap failed\n", lbo);
78 		return ret;
79 	}
80 
81 	ret = ttm_bo_vmap(tbo, &lbo->map);
82 	if (ret) {
83 		drm_err(obj->dev, "ttm bo vmap failed\n");
84 		lsdc_bo_unpin(lbo);
85 		return ret;
86 	}
87 
88 	lbo->vmap_count = 1;
89 
90 out:
91 	*map = lbo->map;
92 
93 	return 0;
94 }
95 
96 static void lsdc_gem_object_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
97 {
98 	struct ttm_buffer_object *tbo = to_ttm_bo(obj);
99 	struct lsdc_bo *lbo = to_lsdc_bo(tbo);
100 
101 	if (unlikely(!lbo->vmap_count)) {
102 		drm_warn(obj->dev, "%p is not mapped\n", lbo);
103 		return;
104 	}
105 
106 	--lbo->vmap_count;
107 	if (lbo->vmap_count == 0) {
108 		ttm_bo_vunmap(tbo, &lbo->map);
109 
110 		lsdc_bo_unpin(lbo);
111 	}
112 }
113 
114 static int lsdc_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
115 {
116 	struct ttm_buffer_object *tbo = to_ttm_bo(obj);
117 	int ret;
118 
119 	ret = ttm_bo_mmap_obj(vma, tbo);
120 	if (unlikely(ret)) {
121 		drm_warn(obj->dev, "mmap %p failed\n", tbo);
122 		return ret;
123 	}
124 
125 	drm_gem_object_put(obj);
126 
127 	return 0;
128 }
129 
130 static const struct drm_gem_object_funcs lsdc_gem_object_funcs = {
131 	.free = lsdc_gem_object_free,
132 	.export = drm_gem_prime_export,
133 	.pin = lsdc_gem_prime_pin,
134 	.unpin = lsdc_gem_prime_unpin,
135 	.get_sg_table = lsdc_gem_prime_get_sg_table,
136 	.vmap = lsdc_gem_object_vmap,
137 	.vunmap = lsdc_gem_object_vunmap,
138 	.mmap = lsdc_gem_object_mmap,
139 };
140 
141 struct drm_gem_object *lsdc_gem_object_create(struct drm_device *ddev,
142 					      u32 domain,
143 					      size_t size,
144 					      bool kerenl,
145 					      struct sg_table *sg,
146 					      struct dma_resv *resv)
147 {
148 	struct lsdc_device *ldev = to_lsdc(ddev);
149 	struct drm_gem_object *gobj;
150 	struct lsdc_bo *lbo;
151 	int ret;
152 
153 	lbo = lsdc_bo_create(ddev, domain, size, kerenl, sg, resv);
154 	if (IS_ERR(lbo)) {
155 		ret = PTR_ERR(lbo);
156 		return ERR_PTR(ret);
157 	}
158 
159 	if (!sg) {
160 		/* VRAM is filled with random data */
161 		lsdc_bo_clear(lbo);
162 	}
163 
164 	gobj = &lbo->tbo.base;
165 	gobj->funcs = &lsdc_gem_object_funcs;
166 
167 	/* tracking the BOs we created */
168 	mutex_lock(&ldev->gem.mutex);
169 	list_add_tail(&lbo->list, &ldev->gem.objects);
170 	mutex_unlock(&ldev->gem.mutex);
171 
172 	return gobj;
173 }
174 
175 struct drm_gem_object *
176 lsdc_prime_import_sg_table(struct drm_device *ddev,
177 			   struct dma_buf_attachment *attach,
178 			   struct sg_table *sg)
179 {
180 	struct dma_resv *resv = attach->dmabuf->resv;
181 	u64 size = attach->dmabuf->size;
182 	struct drm_gem_object *gobj;
183 	struct lsdc_bo *lbo;
184 
185 	dma_resv_lock(resv, NULL);
186 	gobj = lsdc_gem_object_create(ddev, LSDC_GEM_DOMAIN_GTT, size, false,
187 				      sg, resv);
188 	dma_resv_unlock(resv);
189 
190 	if (IS_ERR(gobj)) {
191 		drm_err(ddev, "Failed to import sg table\n");
192 		return gobj;
193 	}
194 
195 	lbo = gem_to_lsdc_bo(gobj);
196 	lbo->sharing_count = 1;
197 
198 	return gobj;
199 }
200 
201 int lsdc_dumb_create(struct drm_file *file, struct drm_device *ddev,
202 		     struct drm_mode_create_dumb *args)
203 {
204 	struct lsdc_device *ldev = to_lsdc(ddev);
205 	const struct lsdc_desc *descp = ldev->descp;
206 	u32 domain = LSDC_GEM_DOMAIN_VRAM;
207 	struct drm_gem_object *gobj;
208 	int ret;
209 
210 	ret = drm_mode_size_dumb(ddev, args, descp->pitch_align, 0);
211 	if (ret)
212 		return ret;
213 
214 	/* Maximum single bo size allowed is the half vram size available */
215 	if (args->size > ldev->vram_size / 2) {
216 		drm_err(ddev, "Requesting(%zuMiB) failed\n", (size_t)(args->size >> PAGE_SHIFT));
217 		return -ENOMEM;
218 	}
219 
220 	gobj = lsdc_gem_object_create(ddev, domain, args->size, false, NULL, NULL);
221 	if (IS_ERR(gobj)) {
222 		drm_err(ddev, "Failed to create gem object\n");
223 		return PTR_ERR(gobj);
224 	}
225 
226 	ret = drm_gem_handle_create(file, gobj, &args->handle);
227 
228 	/* drop reference from allocate, handle holds it now */
229 	drm_gem_object_put(gobj);
230 	if (ret)
231 		return ret;
232 
233 	return 0;
234 }
235 
236 int lsdc_dumb_map_offset(struct drm_file *filp, struct drm_device *ddev,
237 			 u32 handle, uint64_t *offset)
238 {
239 	struct drm_gem_object *gobj;
240 
241 	gobj = drm_gem_object_lookup(filp, handle);
242 	if (!gobj)
243 		return -ENOENT;
244 
245 	*offset = drm_vma_node_offset_addr(&gobj->vma_node);
246 
247 	drm_gem_object_put(gobj);
248 
249 	return 0;
250 }
251 
252 void lsdc_gem_init(struct drm_device *ddev)
253 {
254 	struct lsdc_device *ldev = to_lsdc(ddev);
255 
256 	mutex_init(&ldev->gem.mutex);
257 	INIT_LIST_HEAD(&ldev->gem.objects);
258 }
259 
260 int lsdc_show_buffer_object(struct seq_file *m, void *arg)
261 {
262 	struct drm_info_node *node = (struct drm_info_node *)m->private;
263 	struct drm_device *ddev = node->minor->dev;
264 	struct lsdc_device *ldev = to_lsdc(ddev);
265 	struct lsdc_bo *lbo;
266 	unsigned int i;
267 
268 	mutex_lock(&ldev->gem.mutex);
269 
270 	i = 0;
271 
272 	list_for_each_entry(lbo, &ldev->gem.objects, list) {
273 		struct ttm_buffer_object *tbo = &lbo->tbo;
274 		struct ttm_resource *resource = tbo->resource;
275 
276 		seq_printf(m, "bo[%04u][%p]: size: %8zuKiB %s offset: %8llx\n",
277 			   i, lbo, lsdc_bo_size(lbo) >> 10,
278 			   lsdc_mem_type_to_str(resource->mem_type),
279 			   lsdc_bo_gpu_offset(lbo));
280 		i++;
281 	}
282 
283 	mutex_unlock(&ldev->gem.mutex);
284 
285 	seq_printf(m, "Pinned BO size: VRAM: %zuKiB, GTT: %zu KiB\n",
286 		   ldev->vram_pinned_size >> 10, ldev->gtt_pinned_size >> 10);
287 
288 	return 0;
289 }
290