xref: /linux/drivers/gpu/drm/panthor/panthor_gem.c (revision 22c55fb9eb92395d999b8404d73e58540d11bdd8)
1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 /* Copyright 2023 Collabora ltd. */
4 
5 #include <linux/cleanup.h>
6 #include <linux/dma-buf.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/err.h>
9 #include <linux/slab.h>
10 
11 #include <drm/panthor_drm.h>
12 
13 #include "panthor_device.h"
14 #include "panthor_fw.h"
15 #include "panthor_gem.h"
16 #include "panthor_mmu.h"
17 
18 #ifdef CONFIG_DEBUG_FS
19 static void panthor_gem_debugfs_bo_init(struct panthor_gem_object *bo)
20 {
21 	INIT_LIST_HEAD(&bo->debugfs.node);
22 }
23 
24 static void panthor_gem_debugfs_bo_add(struct panthor_gem_object *bo)
25 {
26 	struct panthor_device *ptdev = container_of(bo->base.base.dev,
27 						    struct panthor_device, base);
28 
29 	bo->debugfs.creator.tgid = current->group_leader->pid;
30 	get_task_comm(bo->debugfs.creator.process_name, current->group_leader);
31 
32 	mutex_lock(&ptdev->gems.lock);
33 	list_add_tail(&bo->debugfs.node, &ptdev->gems.node);
34 	mutex_unlock(&ptdev->gems.lock);
35 }
36 
37 static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo)
38 {
39 	struct panthor_device *ptdev = container_of(bo->base.base.dev,
40 						    struct panthor_device, base);
41 
42 	if (list_empty(&bo->debugfs.node))
43 		return;
44 
45 	mutex_lock(&ptdev->gems.lock);
46 	list_del_init(&bo->debugfs.node);
47 	mutex_unlock(&ptdev->gems.lock);
48 }
49 
50 static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags)
51 {
52 	bo->debugfs.flags = usage_flags;
53 	panthor_gem_debugfs_bo_add(bo);
54 }
55 #else
56 static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo) {}
57 static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags) {}
58 static void panthor_gem_debugfs_bo_init(struct panthor_gem_object *bo) {}
59 #endif
60 
61 static void panthor_gem_free_object(struct drm_gem_object *obj)
62 {
63 	struct panthor_gem_object *bo = to_panthor_bo(obj);
64 	struct drm_gem_object *vm_root_gem = bo->exclusive_vm_root_gem;
65 
66 	panthor_gem_debugfs_bo_rm(bo);
67 
68 	/*
69 	 * Label might have been allocated with kstrdup_const(),
70 	 * we need to take that into account when freeing the memory
71 	 */
72 	kfree_const(bo->label.str);
73 
74 	mutex_destroy(&bo->label.lock);
75 
76 	drm_gem_free_mmap_offset(&bo->base.base);
77 	mutex_destroy(&bo->gpuva_list_lock);
78 	drm_gem_shmem_free(&bo->base);
79 	drm_gem_object_put(vm_root_gem);
80 }
81 
82 /**
83  * panthor_kernel_bo_destroy() - Destroy a kernel buffer object
84  * @bo: Kernel buffer object to destroy. If NULL or an ERR_PTR(), the destruction
85  * is skipped.
86  */
87 void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo)
88 {
89 	struct panthor_vm *vm;
90 	int ret;
91 
92 	if (IS_ERR_OR_NULL(bo))
93 		return;
94 
95 	vm = bo->vm;
96 	panthor_kernel_bo_vunmap(bo);
97 
98 	if (drm_WARN_ON(bo->obj->dev,
99 			to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm)))
100 		goto out_free_bo;
101 
102 	ret = panthor_vm_unmap_range(vm, bo->va_node.start, bo->va_node.size);
103 	if (ret)
104 		goto out_free_bo;
105 
106 	panthor_vm_free_va(vm, &bo->va_node);
107 	drm_gem_object_put(bo->obj);
108 
109 out_free_bo:
110 	panthor_vm_put(vm);
111 	kfree(bo);
112 }
113 
114 /**
115  * panthor_kernel_bo_create() - Create and map a GEM object to a VM
116  * @ptdev: Device.
117  * @vm: VM to map the GEM to. If NULL, the kernel object is not GPU mapped.
118  * @size: Size of the buffer object.
119  * @bo_flags: Combination of drm_panthor_bo_flags flags.
120  * @vm_map_flags: Combination of drm_panthor_vm_bind_op_flags (only those
121  * that are related to map operations).
122  * @gpu_va: GPU address assigned when mapping to the VM.
123  * If gpu_va == PANTHOR_VM_KERNEL_AUTO_VA, the virtual address will be
124  * automatically allocated.
125  * @name: Descriptive label of the BO's contents
126  *
127  * Return: A valid pointer in case of success, an ERR_PTR() otherwise.
128  */
129 struct panthor_kernel_bo *
130 panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
131 			 size_t size, u32 bo_flags, u32 vm_map_flags,
132 			 u64 gpu_va, const char *name)
133 {
134 	struct drm_gem_shmem_object *obj;
135 	struct panthor_kernel_bo *kbo;
136 	struct panthor_gem_object *bo;
137 	u32 debug_flags = PANTHOR_DEBUGFS_GEM_USAGE_FLAG_KERNEL;
138 	int ret;
139 
140 	if (drm_WARN_ON(&ptdev->base, !vm))
141 		return ERR_PTR(-EINVAL);
142 
143 	kbo = kzalloc(sizeof(*kbo), GFP_KERNEL);
144 	if (!kbo)
145 		return ERR_PTR(-ENOMEM);
146 
147 	obj = drm_gem_shmem_create(&ptdev->base, size);
148 	if (IS_ERR(obj)) {
149 		ret = PTR_ERR(obj);
150 		goto err_free_bo;
151 	}
152 
153 	bo = to_panthor_bo(&obj->base);
154 	kbo->obj = &obj->base;
155 	bo->flags = bo_flags;
156 
157 	if (vm == panthor_fw_vm(ptdev))
158 		debug_flags |= PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED;
159 
160 	panthor_gem_kernel_bo_set_label(kbo, name);
161 	panthor_gem_debugfs_set_usage_flags(to_panthor_bo(kbo->obj), debug_flags);
162 
163 	/* The system and GPU MMU page size might differ, which becomes a
164 	 * problem for FW sections that need to be mapped at explicit address
165 	 * since our PAGE_SIZE alignment might cover a VA range that's
166 	 * expected to be used for another section.
167 	 * Make sure we never map more than we need.
168 	 */
169 	size = ALIGN(size, panthor_vm_page_size(vm));
170 	ret = panthor_vm_alloc_va(vm, gpu_va, size, &kbo->va_node);
171 	if (ret)
172 		goto err_put_obj;
173 
174 	ret = panthor_vm_map_bo_range(vm, bo, 0, size, kbo->va_node.start, vm_map_flags);
175 	if (ret)
176 		goto err_free_va;
177 
178 	kbo->vm = panthor_vm_get(vm);
179 	bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm);
180 	drm_gem_object_get(bo->exclusive_vm_root_gem);
181 	bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
182 	return kbo;
183 
184 err_free_va:
185 	panthor_vm_free_va(vm, &kbo->va_node);
186 
187 err_put_obj:
188 	drm_gem_object_put(&obj->base);
189 
190 err_free_bo:
191 	kfree(kbo);
192 	return ERR_PTR(ret);
193 }
194 
195 static struct dma_buf *
196 panthor_gem_prime_export(struct drm_gem_object *obj, int flags)
197 {
198 	/* We can't export GEMs that have an exclusive VM. */
199 	if (to_panthor_bo(obj)->exclusive_vm_root_gem)
200 		return ERR_PTR(-EINVAL);
201 
202 	return drm_gem_prime_export(obj, flags);
203 }
204 
205 static enum drm_gem_object_status panthor_gem_status(struct drm_gem_object *obj)
206 {
207 	struct panthor_gem_object *bo = to_panthor_bo(obj);
208 	enum drm_gem_object_status res = 0;
209 
210 	if (drm_gem_is_imported(&bo->base.base) || bo->base.pages)
211 		res |= DRM_GEM_OBJECT_RESIDENT;
212 
213 	return res;
214 }
215 
216 static const struct drm_gem_object_funcs panthor_gem_funcs = {
217 	.free = panthor_gem_free_object,
218 	.print_info = drm_gem_shmem_object_print_info,
219 	.pin = drm_gem_shmem_object_pin,
220 	.unpin = drm_gem_shmem_object_unpin,
221 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
222 	.vmap = drm_gem_shmem_object_vmap,
223 	.vunmap = drm_gem_shmem_object_vunmap,
224 	.mmap = drm_gem_shmem_object_mmap,
225 	.status = panthor_gem_status,
226 	.export = panthor_gem_prime_export,
227 	.vm_ops = &drm_gem_shmem_vm_ops,
228 };
229 
230 /**
231  * panthor_gem_create_object - Implementation of driver->gem_create_object.
232  * @ddev: DRM device
233  * @size: Size in bytes of the memory the object will reference
234  *
235  * This lets the GEM helpers allocate object structs for us, and keep
236  * our BO stats correct.
237  */
238 struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t size)
239 {
240 	struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
241 	struct panthor_gem_object *obj;
242 
243 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
244 	if (!obj)
245 		return ERR_PTR(-ENOMEM);
246 
247 	obj->base.base.funcs = &panthor_gem_funcs;
248 	obj->base.map_wc = !ptdev->coherent;
249 	mutex_init(&obj->gpuva_list_lock);
250 	drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock);
251 	mutex_init(&obj->label.lock);
252 
253 	panthor_gem_debugfs_bo_init(obj);
254 
255 	return &obj->base.base;
256 }
257 
258 /**
259  * panthor_gem_create_with_handle() - Create a GEM object and attach it to a handle.
260  * @file: DRM file.
261  * @ddev: DRM device.
262  * @exclusive_vm: Exclusive VM. Not NULL if the GEM object can't be shared.
263  * @size: Size of the GEM object to allocate.
264  * @flags: Combination of drm_panthor_bo_flags flags.
265  * @handle: Pointer holding the handle pointing to the new GEM object.
266  *
267  * Return: Zero on success
268  */
269 int
270 panthor_gem_create_with_handle(struct drm_file *file,
271 			       struct drm_device *ddev,
272 			       struct panthor_vm *exclusive_vm,
273 			       u64 *size, u32 flags, u32 *handle)
274 {
275 	int ret;
276 	struct drm_gem_shmem_object *shmem;
277 	struct panthor_gem_object *bo;
278 
279 	shmem = drm_gem_shmem_create(ddev, *size);
280 	if (IS_ERR(shmem))
281 		return PTR_ERR(shmem);
282 
283 	bo = to_panthor_bo(&shmem->base);
284 	bo->flags = flags;
285 
286 	if (exclusive_vm) {
287 		bo->exclusive_vm_root_gem = panthor_vm_root_gem(exclusive_vm);
288 		drm_gem_object_get(bo->exclusive_vm_root_gem);
289 		bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
290 	}
291 
292 	panthor_gem_debugfs_set_usage_flags(bo, 0);
293 
294 	/*
295 	 * Allocate an id of idr table where the obj is registered
296 	 * and handle has the id what user can see.
297 	 */
298 	ret = drm_gem_handle_create(file, &shmem->base, handle);
299 	if (!ret)
300 		*size = bo->base.base.size;
301 
302 	/* drop reference from allocate - handle holds it now. */
303 	drm_gem_object_put(&shmem->base);
304 
305 	return ret;
306 }
307 
308 void
309 panthor_gem_bo_set_label(struct drm_gem_object *obj, const char *label)
310 {
311 	struct panthor_gem_object *bo = to_panthor_bo(obj);
312 	const char *old_label;
313 
314 	scoped_guard(mutex, &bo->label.lock) {
315 		old_label = bo->label.str;
316 		bo->label.str = label;
317 	}
318 
319 	kfree_const(old_label);
320 }
321 
322 void
323 panthor_gem_kernel_bo_set_label(struct panthor_kernel_bo *bo, const char *label)
324 {
325 	const char *str;
326 
327 	/* We should never attempt labelling a UM-exposed GEM object */
328 	if (drm_WARN_ON(bo->obj->dev, bo->obj->handle_count > 0))
329 		return;
330 
331 	if (!label)
332 		return;
333 
334 	str = kstrdup_const(label, GFP_KERNEL);
335 	if (!str) {
336 		/* Failing to allocate memory for a label isn't a fatal condition */
337 		drm_warn(bo->obj->dev, "Not enough memory to allocate BO label");
338 		return;
339 	}
340 
341 	panthor_gem_bo_set_label(bo->obj, str);
342 }
343 
344 #ifdef CONFIG_DEBUG_FS
345 struct gem_size_totals {
346 	size_t size;
347 	size_t resident;
348 	size_t reclaimable;
349 };
350 
351 static void panthor_gem_debugfs_print_flag_names(struct seq_file *m)
352 {
353 	int len;
354 	int i;
355 
356 	static const char * const gem_state_flags_names[] = {
357 		[PANTHOR_DEBUGFS_GEM_STATE_IMPORTED_BIT] = "imported",
358 		[PANTHOR_DEBUGFS_GEM_STATE_EXPORTED_BIT] = "exported",
359 	};
360 
361 	static const char * const gem_usage_flags_names[] = {
362 		[PANTHOR_DEBUGFS_GEM_USAGE_KERNEL_BIT] = "kernel",
363 		[PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT] = "fw-mapped",
364 	};
365 
366 	seq_puts(m, "GEM state flags: ");
367 	for (i = 0, len = ARRAY_SIZE(gem_state_flags_names); i < len; i++) {
368 		if (!gem_state_flags_names[i])
369 			continue;
370 		seq_printf(m, "%s (0x%x)%s", gem_state_flags_names[i],
371 			   (u32)BIT(i), (i < len - 1) ? ", " : "\n");
372 	}
373 
374 	seq_puts(m, "GEM usage flags: ");
375 	for (i = 0, len = ARRAY_SIZE(gem_usage_flags_names); i < len; i++) {
376 		if (!gem_usage_flags_names[i])
377 			continue;
378 		seq_printf(m, "%s (0x%x)%s", gem_usage_flags_names[i],
379 			   (u32)BIT(i), (i < len - 1) ? ", " : "\n\n");
380 	}
381 }
382 
383 static void panthor_gem_debugfs_bo_print(struct panthor_gem_object *bo,
384 					 struct seq_file *m,
385 					 struct gem_size_totals *totals)
386 {
387 	unsigned int refcount = kref_read(&bo->base.base.refcount);
388 	char creator_info[32] = {};
389 	size_t resident_size;
390 	u32 gem_usage_flags = bo->debugfs.flags;
391 	u32 gem_state_flags = 0;
392 
393 	/* Skip BOs being destroyed. */
394 	if (!refcount)
395 		return;
396 
397 	resident_size = bo->base.pages ? bo->base.base.size : 0;
398 
399 	snprintf(creator_info, sizeof(creator_info),
400 		 "%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid);
401 	seq_printf(m, "%-32s%-16d%-16d%-16zd%-16zd0x%-16lx",
402 		   creator_info,
403 		   bo->base.base.name,
404 		   refcount,
405 		   bo->base.base.size,
406 		   resident_size,
407 		   drm_vma_node_start(&bo->base.base.vma_node));
408 
409 	if (bo->base.base.import_attach)
410 		gem_state_flags |= PANTHOR_DEBUGFS_GEM_STATE_FLAG_IMPORTED;
411 	if (bo->base.base.dma_buf)
412 		gem_state_flags |= PANTHOR_DEBUGFS_GEM_STATE_FLAG_EXPORTED;
413 
414 	seq_printf(m, "0x%-8x 0x%-10x", gem_state_flags, gem_usage_flags);
415 
416 	scoped_guard(mutex, &bo->label.lock) {
417 		seq_printf(m, "%s\n", bo->label.str ? : "");
418 	}
419 
420 	totals->size += bo->base.base.size;
421 	totals->resident += resident_size;
422 	if (bo->base.madv > 0)
423 		totals->reclaimable += resident_size;
424 }
425 
426 void panthor_gem_debugfs_print_bos(struct panthor_device *ptdev,
427 				   struct seq_file *m)
428 {
429 	struct gem_size_totals totals = {0};
430 	struct panthor_gem_object *bo;
431 
432 	panthor_gem_debugfs_print_flag_names(m);
433 
434 	seq_puts(m, "created-by                      global-name     refcount        size            resident-size   file-offset       state      usage       label\n");
435 	seq_puts(m, "----------------------------------------------------------------------------------------------------------------------------------------------\n");
436 
437 	scoped_guard(mutex, &ptdev->gems.lock) {
438 		list_for_each_entry(bo, &ptdev->gems.node, debugfs.node) {
439 			panthor_gem_debugfs_bo_print(bo, m, &totals);
440 		}
441 	}
442 
443 	seq_puts(m, "==============================================================================================================================================\n");
444 	seq_printf(m, "Total size: %zd, Total resident: %zd, Total reclaimable: %zd\n",
445 		   totals.size, totals.resident, totals.reclaimable);
446 }
447 #endif
448