1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 /* Copyright 2023 Collabora ltd. */
4
5 #include <linux/cleanup.h>
6 #include <linux/dma-buf.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/err.h>
9 #include <linux/slab.h>
10
11 #include <drm/panthor_drm.h>
12
13 #include "panthor_device.h"
14 #include "panthor_fw.h"
15 #include "panthor_gem.h"
16 #include "panthor_mmu.h"
17
18 #ifdef CONFIG_DEBUG_FS
panthor_gem_debugfs_bo_init(struct panthor_gem_object * bo)19 static void panthor_gem_debugfs_bo_init(struct panthor_gem_object *bo)
20 {
21 INIT_LIST_HEAD(&bo->debugfs.node);
22 }
23
panthor_gem_debugfs_bo_add(struct panthor_gem_object * bo)24 static void panthor_gem_debugfs_bo_add(struct panthor_gem_object *bo)
25 {
26 struct panthor_device *ptdev = container_of(bo->base.base.dev,
27 struct panthor_device, base);
28
29 bo->debugfs.creator.tgid = current->group_leader->pid;
30 get_task_comm(bo->debugfs.creator.process_name, current->group_leader);
31
32 mutex_lock(&ptdev->gems.lock);
33 list_add_tail(&bo->debugfs.node, &ptdev->gems.node);
34 mutex_unlock(&ptdev->gems.lock);
35 }
36
panthor_gem_debugfs_bo_rm(struct panthor_gem_object * bo)37 static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo)
38 {
39 struct panthor_device *ptdev = container_of(bo->base.base.dev,
40 struct panthor_device, base);
41
42 if (list_empty(&bo->debugfs.node))
43 return;
44
45 mutex_lock(&ptdev->gems.lock);
46 list_del_init(&bo->debugfs.node);
47 mutex_unlock(&ptdev->gems.lock);
48 }
49
panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object * bo,u32 usage_flags)50 static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags)
51 {
52 bo->debugfs.flags = usage_flags;
53 panthor_gem_debugfs_bo_add(bo);
54 }
55 #else
panthor_gem_debugfs_bo_rm(struct panthor_gem_object * bo)56 static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo) {}
panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object * bo,u32 usage_flags)57 static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags) {}
panthor_gem_debugfs_bo_init(struct panthor_gem_object * bo)58 static void panthor_gem_debugfs_bo_init(struct panthor_gem_object *bo) {}
59 #endif
60
panthor_gem_free_object(struct drm_gem_object * obj)61 static void panthor_gem_free_object(struct drm_gem_object *obj)
62 {
63 struct panthor_gem_object *bo = to_panthor_bo(obj);
64 struct drm_gem_object *vm_root_gem = bo->exclusive_vm_root_gem;
65
66 panthor_gem_debugfs_bo_rm(bo);
67
68 /*
69 * Label might have been allocated with kstrdup_const(),
70 * we need to take that into account when freeing the memory
71 */
72 kfree_const(bo->label.str);
73
74 mutex_destroy(&bo->label.lock);
75
76 drm_gem_free_mmap_offset(&bo->base.base);
77 drm_gem_shmem_free(&bo->base);
78 drm_gem_object_put(vm_root_gem);
79 }
80
81 /**
82 * panthor_kernel_bo_destroy() - Destroy a kernel buffer object
83 * @bo: Kernel buffer object to destroy. If NULL or an ERR_PTR(), the destruction
84 * is skipped.
85 */
panthor_kernel_bo_destroy(struct panthor_kernel_bo * bo)86 void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo)
87 {
88 struct panthor_vm *vm;
89 int ret;
90
91 if (IS_ERR_OR_NULL(bo))
92 return;
93
94 vm = bo->vm;
95 panthor_kernel_bo_vunmap(bo);
96
97 if (drm_WARN_ON(bo->obj->dev,
98 to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm)))
99 goto out_free_bo;
100
101 ret = panthor_vm_unmap_range(vm, bo->va_node.start, bo->va_node.size);
102 if (ret)
103 goto out_free_bo;
104
105 panthor_vm_free_va(vm, &bo->va_node);
106 drm_gem_object_put(bo->obj);
107
108 out_free_bo:
109 panthor_vm_put(vm);
110 kfree(bo);
111 }
112
113 /**
114 * panthor_kernel_bo_create() - Create and map a GEM object to a VM
115 * @ptdev: Device.
116 * @vm: VM to map the GEM to. If NULL, the kernel object is not GPU mapped.
117 * @size: Size of the buffer object.
118 * @bo_flags: Combination of drm_panthor_bo_flags flags.
119 * @vm_map_flags: Combination of drm_panthor_vm_bind_op_flags (only those
120 * that are related to map operations).
121 * @gpu_va: GPU address assigned when mapping to the VM.
122 * If gpu_va == PANTHOR_VM_KERNEL_AUTO_VA, the virtual address will be
123 * automatically allocated.
124 * @name: Descriptive label of the BO's contents
125 *
126 * Return: A valid pointer in case of success, an ERR_PTR() otherwise.
127 */
128 struct panthor_kernel_bo *
panthor_kernel_bo_create(struct panthor_device * ptdev,struct panthor_vm * vm,size_t size,u32 bo_flags,u32 vm_map_flags,u64 gpu_va,const char * name)129 panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
130 size_t size, u32 bo_flags, u32 vm_map_flags,
131 u64 gpu_va, const char *name)
132 {
133 struct drm_gem_shmem_object *obj;
134 struct panthor_kernel_bo *kbo;
135 struct panthor_gem_object *bo;
136 u32 debug_flags = PANTHOR_DEBUGFS_GEM_USAGE_FLAG_KERNEL;
137 int ret;
138
139 if (drm_WARN_ON(&ptdev->base, !vm))
140 return ERR_PTR(-EINVAL);
141
142 kbo = kzalloc(sizeof(*kbo), GFP_KERNEL);
143 if (!kbo)
144 return ERR_PTR(-ENOMEM);
145
146 obj = drm_gem_shmem_create(&ptdev->base, size);
147 if (IS_ERR(obj)) {
148 ret = PTR_ERR(obj);
149 goto err_free_bo;
150 }
151
152 bo = to_panthor_bo(&obj->base);
153 kbo->obj = &obj->base;
154 bo->flags = bo_flags;
155
156 if (vm == panthor_fw_vm(ptdev))
157 debug_flags |= PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED;
158
159 panthor_gem_kernel_bo_set_label(kbo, name);
160 panthor_gem_debugfs_set_usage_flags(to_panthor_bo(kbo->obj), debug_flags);
161
162 /* The system and GPU MMU page size might differ, which becomes a
163 * problem for FW sections that need to be mapped at explicit address
164 * since our PAGE_SIZE alignment might cover a VA range that's
165 * expected to be used for another section.
166 * Make sure we never map more than we need.
167 */
168 size = ALIGN(size, panthor_vm_page_size(vm));
169 ret = panthor_vm_alloc_va(vm, gpu_va, size, &kbo->va_node);
170 if (ret)
171 goto err_put_obj;
172
173 ret = panthor_vm_map_bo_range(vm, bo, 0, size, kbo->va_node.start, vm_map_flags);
174 if (ret)
175 goto err_free_va;
176
177 kbo->vm = panthor_vm_get(vm);
178 bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm);
179 drm_gem_object_get(bo->exclusive_vm_root_gem);
180 bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
181 return kbo;
182
183 err_free_va:
184 panthor_vm_free_va(vm, &kbo->va_node);
185
186 err_put_obj:
187 drm_gem_object_put(&obj->base);
188
189 err_free_bo:
190 kfree(kbo);
191 return ERR_PTR(ret);
192 }
193
194 static struct dma_buf *
panthor_gem_prime_export(struct drm_gem_object * obj,int flags)195 panthor_gem_prime_export(struct drm_gem_object *obj, int flags)
196 {
197 /* We can't export GEMs that have an exclusive VM. */
198 if (to_panthor_bo(obj)->exclusive_vm_root_gem)
199 return ERR_PTR(-EINVAL);
200
201 return drm_gem_prime_export(obj, flags);
202 }
203
panthor_gem_status(struct drm_gem_object * obj)204 static enum drm_gem_object_status panthor_gem_status(struct drm_gem_object *obj)
205 {
206 struct panthor_gem_object *bo = to_panthor_bo(obj);
207 enum drm_gem_object_status res = 0;
208
209 if (drm_gem_is_imported(&bo->base.base) || bo->base.pages)
210 res |= DRM_GEM_OBJECT_RESIDENT;
211
212 return res;
213 }
214
215 static const struct drm_gem_object_funcs panthor_gem_funcs = {
216 .free = panthor_gem_free_object,
217 .print_info = drm_gem_shmem_object_print_info,
218 .pin = drm_gem_shmem_object_pin,
219 .unpin = drm_gem_shmem_object_unpin,
220 .get_sg_table = drm_gem_shmem_object_get_sg_table,
221 .vmap = drm_gem_shmem_object_vmap,
222 .vunmap = drm_gem_shmem_object_vunmap,
223 .mmap = drm_gem_shmem_object_mmap,
224 .status = panthor_gem_status,
225 .export = panthor_gem_prime_export,
226 .vm_ops = &drm_gem_shmem_vm_ops,
227 };
228
229 /**
230 * panthor_gem_create_object - Implementation of driver->gem_create_object.
231 * @ddev: DRM device
232 * @size: Size in bytes of the memory the object will reference
233 *
234 * This lets the GEM helpers allocate object structs for us, and keep
235 * our BO stats correct.
236 */
panthor_gem_create_object(struct drm_device * ddev,size_t size)237 struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t size)
238 {
239 struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
240 struct panthor_gem_object *obj;
241
242 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
243 if (!obj)
244 return ERR_PTR(-ENOMEM);
245
246 obj->base.base.funcs = &panthor_gem_funcs;
247 obj->base.map_wc = !ptdev->coherent;
248 mutex_init(&obj->label.lock);
249
250 panthor_gem_debugfs_bo_init(obj);
251
252 return &obj->base.base;
253 }
254
255 /**
256 * panthor_gem_create_with_handle() - Create a GEM object and attach it to a handle.
257 * @file: DRM file.
258 * @ddev: DRM device.
259 * @exclusive_vm: Exclusive VM. Not NULL if the GEM object can't be shared.
260 * @size: Size of the GEM object to allocate.
261 * @flags: Combination of drm_panthor_bo_flags flags.
262 * @handle: Pointer holding the handle pointing to the new GEM object.
263 *
264 * Return: Zero on success
265 */
266 int
panthor_gem_create_with_handle(struct drm_file * file,struct drm_device * ddev,struct panthor_vm * exclusive_vm,u64 * size,u32 flags,u32 * handle)267 panthor_gem_create_with_handle(struct drm_file *file,
268 struct drm_device *ddev,
269 struct panthor_vm *exclusive_vm,
270 u64 *size, u32 flags, u32 *handle)
271 {
272 int ret;
273 struct drm_gem_shmem_object *shmem;
274 struct panthor_gem_object *bo;
275
276 shmem = drm_gem_shmem_create(ddev, *size);
277 if (IS_ERR(shmem))
278 return PTR_ERR(shmem);
279
280 bo = to_panthor_bo(&shmem->base);
281 bo->flags = flags;
282
283 if (exclusive_vm) {
284 bo->exclusive_vm_root_gem = panthor_vm_root_gem(exclusive_vm);
285 drm_gem_object_get(bo->exclusive_vm_root_gem);
286 bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
287 }
288
289 panthor_gem_debugfs_set_usage_flags(bo, 0);
290
291 /* If this is a write-combine mapping, we query the sgt to force a CPU
292 * cache flush (dma_map_sgtable() is called when the sgt is created).
293 * This ensures the zero-ing is visible to any uncached mapping created
294 * by vmap/mmap.
295 * FIXME: Ideally this should be done when pages are allocated, not at
296 * BO creation time.
297 */
298 if (shmem->map_wc) {
299 struct sg_table *sgt;
300
301 sgt = drm_gem_shmem_get_pages_sgt(shmem);
302 if (IS_ERR(sgt)) {
303 ret = PTR_ERR(sgt);
304 goto out_put_gem;
305 }
306 }
307
308 /*
309 * Allocate an id of idr table where the obj is registered
310 * and handle has the id what user can see.
311 */
312 ret = drm_gem_handle_create(file, &shmem->base, handle);
313 if (!ret)
314 *size = bo->base.base.size;
315
316 out_put_gem:
317 /* drop reference from allocate - handle holds it now. */
318 drm_gem_object_put(&shmem->base);
319
320 return ret;
321 }
322
323 void
panthor_gem_bo_set_label(struct drm_gem_object * obj,const char * label)324 panthor_gem_bo_set_label(struct drm_gem_object *obj, const char *label)
325 {
326 struct panthor_gem_object *bo = to_panthor_bo(obj);
327 const char *old_label;
328
329 scoped_guard(mutex, &bo->label.lock) {
330 old_label = bo->label.str;
331 bo->label.str = label;
332 }
333
334 kfree_const(old_label);
335 }
336
337 void
panthor_gem_kernel_bo_set_label(struct panthor_kernel_bo * bo,const char * label)338 panthor_gem_kernel_bo_set_label(struct panthor_kernel_bo *bo, const char *label)
339 {
340 const char *str;
341
342 /* We should never attempt labelling a UM-exposed GEM object */
343 if (drm_WARN_ON(bo->obj->dev, bo->obj->handle_count > 0))
344 return;
345
346 if (!label)
347 return;
348
349 str = kstrdup_const(label, GFP_KERNEL);
350 if (!str) {
351 /* Failing to allocate memory for a label isn't a fatal condition */
352 drm_warn(bo->obj->dev, "Not enough memory to allocate BO label");
353 return;
354 }
355
356 panthor_gem_bo_set_label(bo->obj, str);
357 }
358
359 #ifdef CONFIG_DEBUG_FS
360 struct gem_size_totals {
361 size_t size;
362 size_t resident;
363 size_t reclaimable;
364 };
365
panthor_gem_debugfs_print_flag_names(struct seq_file * m)366 static void panthor_gem_debugfs_print_flag_names(struct seq_file *m)
367 {
368 int len;
369 int i;
370
371 static const char * const gem_state_flags_names[] = {
372 [PANTHOR_DEBUGFS_GEM_STATE_IMPORTED_BIT] = "imported",
373 [PANTHOR_DEBUGFS_GEM_STATE_EXPORTED_BIT] = "exported",
374 };
375
376 static const char * const gem_usage_flags_names[] = {
377 [PANTHOR_DEBUGFS_GEM_USAGE_KERNEL_BIT] = "kernel",
378 [PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT] = "fw-mapped",
379 };
380
381 seq_puts(m, "GEM state flags: ");
382 for (i = 0, len = ARRAY_SIZE(gem_state_flags_names); i < len; i++) {
383 if (!gem_state_flags_names[i])
384 continue;
385 seq_printf(m, "%s (0x%x)%s", gem_state_flags_names[i],
386 (u32)BIT(i), (i < len - 1) ? ", " : "\n");
387 }
388
389 seq_puts(m, "GEM usage flags: ");
390 for (i = 0, len = ARRAY_SIZE(gem_usage_flags_names); i < len; i++) {
391 if (!gem_usage_flags_names[i])
392 continue;
393 seq_printf(m, "%s (0x%x)%s", gem_usage_flags_names[i],
394 (u32)BIT(i), (i < len - 1) ? ", " : "\n\n");
395 }
396 }
397
panthor_gem_debugfs_bo_print(struct panthor_gem_object * bo,struct seq_file * m,struct gem_size_totals * totals)398 static void panthor_gem_debugfs_bo_print(struct panthor_gem_object *bo,
399 struct seq_file *m,
400 struct gem_size_totals *totals)
401 {
402 unsigned int refcount = kref_read(&bo->base.base.refcount);
403 char creator_info[32] = {};
404 size_t resident_size;
405 u32 gem_usage_flags = bo->debugfs.flags;
406 u32 gem_state_flags = 0;
407
408 /* Skip BOs being destroyed. */
409 if (!refcount)
410 return;
411
412 resident_size = bo->base.pages ? bo->base.base.size : 0;
413
414 snprintf(creator_info, sizeof(creator_info),
415 "%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid);
416 seq_printf(m, "%-32s%-16d%-16d%-16zd%-16zd0x%-16lx",
417 creator_info,
418 bo->base.base.name,
419 refcount,
420 bo->base.base.size,
421 resident_size,
422 drm_vma_node_start(&bo->base.base.vma_node));
423
424 if (bo->base.base.import_attach)
425 gem_state_flags |= PANTHOR_DEBUGFS_GEM_STATE_FLAG_IMPORTED;
426 if (bo->base.base.dma_buf)
427 gem_state_flags |= PANTHOR_DEBUGFS_GEM_STATE_FLAG_EXPORTED;
428
429 seq_printf(m, "0x%-8x 0x%-10x", gem_state_flags, gem_usage_flags);
430
431 scoped_guard(mutex, &bo->label.lock) {
432 seq_printf(m, "%s\n", bo->label.str ? : "");
433 }
434
435 totals->size += bo->base.base.size;
436 totals->resident += resident_size;
437 if (bo->base.madv > 0)
438 totals->reclaimable += resident_size;
439 }
440
panthor_gem_debugfs_print_bos(struct panthor_device * ptdev,struct seq_file * m)441 void panthor_gem_debugfs_print_bos(struct panthor_device *ptdev,
442 struct seq_file *m)
443 {
444 struct gem_size_totals totals = {0};
445 struct panthor_gem_object *bo;
446
447 panthor_gem_debugfs_print_flag_names(m);
448
449 seq_puts(m, "created-by global-name refcount size resident-size file-offset state usage label\n");
450 seq_puts(m, "----------------------------------------------------------------------------------------------------------------------------------------------\n");
451
452 scoped_guard(mutex, &ptdev->gems.lock) {
453 list_for_each_entry(bo, &ptdev->gems.node, debugfs.node) {
454 panthor_gem_debugfs_bo_print(bo, m, &totals);
455 }
456 }
457
458 seq_puts(m, "==============================================================================================================================================\n");
459 seq_printf(m, "Total size: %zd, Total resident: %zd, Total reclaimable: %zd\n",
460 totals.size, totals.resident, totals.reclaimable);
461 }
462 #endif
463