1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 /* Copyright 2025 Amazon.com, Inc. or its affiliates */
4
5 #include <linux/cleanup.h>
6 #include <linux/err.h>
7 #include <linux/slab.h>
8 #include <linux/dma-buf.h>
9 #include <linux/dma-mapping.h>
10
11 #include <drm/panfrost_drm.h>
12 #include <drm/drm_print.h>
13 #include "panfrost_device.h"
14 #include "panfrost_drv.h"
15 #include "panfrost_gem.h"
16 #include "panfrost_mmu.h"
17
panfrost_gem_init(struct panfrost_device * pfdev)18 void panfrost_gem_init(struct panfrost_device *pfdev)
19 {
20 int err;
21
22 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
23 !panfrost_transparent_hugepage)
24 return;
25
26 err = drm_gem_huge_mnt_create(&pfdev->base, "within_size");
27 if (drm_gem_get_huge_mnt(&pfdev->base))
28 drm_info(&pfdev->base, "Using Transparent Hugepage\n");
29 else if (err)
30 drm_warn(&pfdev->base, "Can't use Transparent Hugepage (%d)\n",
31 err);
32 }
33
34 #ifdef CONFIG_DEBUG_FS
panfrost_gem_debugfs_bo_add(struct panfrost_device * pfdev,struct panfrost_gem_object * bo)35 static void panfrost_gem_debugfs_bo_add(struct panfrost_device *pfdev,
36 struct panfrost_gem_object *bo)
37 {
38 bo->debugfs.creator.tgid = current->tgid;
39 get_task_comm(bo->debugfs.creator.process_name, current->group_leader);
40
41 mutex_lock(&pfdev->debugfs.gems_lock);
42 list_add_tail(&bo->debugfs.node, &pfdev->debugfs.gems_list);
43 mutex_unlock(&pfdev->debugfs.gems_lock);
44 }
45
panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object * bo)46 static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo)
47 {
48 struct panfrost_device *pfdev = to_panfrost_device(bo->base.base.dev);
49
50 if (list_empty(&bo->debugfs.node))
51 return;
52
53 mutex_lock(&pfdev->debugfs.gems_lock);
54 list_del_init(&bo->debugfs.node);
55 mutex_unlock(&pfdev->debugfs.gems_lock);
56 }
57 #else
panfrost_gem_debugfs_bo_add(struct panfrost_device * pfdev,struct panfrost_gem_object * bo)58 static void panfrost_gem_debugfs_bo_add(struct panfrost_device *pfdev,
59 struct panfrost_gem_object *bo)
60 {}
panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object * bo)61 static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo) {}
62 #endif
63
64 /* Called DRM core on the last userspace/kernel unreference of the
65 * BO.
66 */
panfrost_gem_free_object(struct drm_gem_object * obj)67 static void panfrost_gem_free_object(struct drm_gem_object *obj)
68 {
69 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
70 struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
71
72 /*
73 * Make sure the BO is no longer inserted in the shrinker list before
74 * taking care of the destruction itself. If we don't do that we have a
75 * race condition between this function and what's done in
76 * panfrost_gem_shrinker_scan().
77 */
78 mutex_lock(&pfdev->shrinker_lock);
79 list_del_init(&bo->base.madv_list);
80 mutex_unlock(&pfdev->shrinker_lock);
81
82 /*
83 * If we still have mappings attached to the BO, there's a problem in
84 * our refcounting.
85 */
86 WARN_ON_ONCE(!list_empty(&bo->mappings.list));
87
88 kfree_const(bo->label.str);
89 panfrost_gem_debugfs_bo_rm(bo);
90 mutex_destroy(&bo->label.lock);
91
92 if (bo->sgts) {
93 int i;
94 int n_sgt = bo->base.base.size / SZ_2M;
95
96 for (i = 0; i < n_sgt; i++) {
97 if (bo->sgts[i].sgl) {
98 dma_unmap_sgtable(pfdev->base.dev, &bo->sgts[i],
99 DMA_BIDIRECTIONAL, 0);
100 sg_free_table(&bo->sgts[i]);
101 }
102 }
103 kvfree(bo->sgts);
104 }
105
106 drm_gem_shmem_free(&bo->base);
107 }
108
109 struct panfrost_gem_mapping *
panfrost_gem_mapping_get(struct panfrost_gem_object * bo,struct panfrost_file_priv * priv)110 panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
111 struct panfrost_file_priv *priv)
112 {
113 struct panfrost_gem_mapping *iter, *mapping = NULL;
114
115 mutex_lock(&bo->mappings.lock);
116 list_for_each_entry(iter, &bo->mappings.list, node) {
117 if (iter->mmu == priv->mmu) {
118 kref_get(&iter->refcount);
119 mapping = iter;
120 break;
121 }
122 }
123 mutex_unlock(&bo->mappings.lock);
124
125 return mapping;
126 }
127
128 static void
panfrost_gem_teardown_mapping(struct panfrost_gem_mapping * mapping)129 panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
130 {
131 if (mapping->active)
132 panfrost_mmu_unmap(mapping);
133
134 spin_lock(&mapping->mmu->mm_lock);
135 if (drm_mm_node_allocated(&mapping->mmnode))
136 drm_mm_remove_node(&mapping->mmnode);
137 spin_unlock(&mapping->mmu->mm_lock);
138 }
139
panfrost_gem_mapping_release(struct kref * kref)140 static void panfrost_gem_mapping_release(struct kref *kref)
141 {
142 struct panfrost_gem_mapping *mapping;
143
144 mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
145
146 panfrost_gem_teardown_mapping(mapping);
147 drm_gem_object_put(&mapping->obj->base.base);
148 panfrost_mmu_ctx_put(mapping->mmu);
149 kfree(mapping);
150 }
151
panfrost_gem_mapping_put(struct panfrost_gem_mapping * mapping)152 void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
153 {
154 if (!mapping)
155 return;
156
157 kref_put(&mapping->refcount, panfrost_gem_mapping_release);
158 }
159
panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object * bo)160 void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo)
161 {
162 struct panfrost_gem_mapping *mapping;
163
164 list_for_each_entry(mapping, &bo->mappings.list, node)
165 panfrost_gem_teardown_mapping(mapping);
166 }
167
panfrost_gem_open(struct drm_gem_object * obj,struct drm_file * file_priv)168 int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
169 {
170 int ret;
171 size_t size = obj->size;
172 u64 align;
173 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
174 unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
175 struct panfrost_file_priv *priv = file_priv->driver_priv;
176 struct panfrost_gem_mapping *mapping;
177
178 mapping = kzalloc_obj(*mapping);
179 if (!mapping)
180 return -ENOMEM;
181
182 INIT_LIST_HEAD(&mapping->node);
183 kref_init(&mapping->refcount);
184 drm_gem_object_get(obj);
185 mapping->obj = bo;
186
187 /*
188 * Executable buffers cannot cross a 16MB boundary as the program
189 * counter is 24-bits. We assume executable buffers will be less than
190 * 16MB and aligning executable buffers to their size will avoid
191 * crossing a 16MB boundary.
192 */
193 if (!bo->noexec)
194 align = size >> PAGE_SHIFT;
195 else
196 align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
197
198 mapping->mmu = panfrost_mmu_ctx_get(priv->mmu);
199 spin_lock(&mapping->mmu->mm_lock);
200 ret = drm_mm_insert_node_generic(&mapping->mmu->mm, &mapping->mmnode,
201 size >> PAGE_SHIFT, align, color, 0);
202 spin_unlock(&mapping->mmu->mm_lock);
203 if (ret)
204 goto err;
205
206 if (!bo->is_heap) {
207 ret = panfrost_mmu_map(mapping);
208 if (ret)
209 goto err;
210 }
211
212 mutex_lock(&bo->mappings.lock);
213 WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
214 list_add_tail(&mapping->node, &bo->mappings.list);
215 mutex_unlock(&bo->mappings.lock);
216
217 err:
218 if (ret)
219 panfrost_gem_mapping_put(mapping);
220 return ret;
221 }
222
panfrost_gem_close(struct drm_gem_object * obj,struct drm_file * file_priv)223 void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
224 {
225 struct panfrost_file_priv *priv = file_priv->driver_priv;
226 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
227 struct panfrost_gem_mapping *mapping = NULL, *iter;
228
229 mutex_lock(&bo->mappings.lock);
230 list_for_each_entry(iter, &bo->mappings.list, node) {
231 if (iter->mmu == priv->mmu) {
232 mapping = iter;
233 list_del(&iter->node);
234 break;
235 }
236 }
237 mutex_unlock(&bo->mappings.lock);
238
239 panfrost_gem_mapping_put(mapping);
240 }
241
panfrost_gem_pin(struct drm_gem_object * obj)242 static int panfrost_gem_pin(struct drm_gem_object *obj)
243 {
244 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
245
246 if (bo->is_heap)
247 return -EINVAL;
248
249 return drm_gem_shmem_pin_locked(&bo->base);
250 }
251
panfrost_gem_status(struct drm_gem_object * obj)252 static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj)
253 {
254 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
255 enum drm_gem_object_status res = 0;
256
257 if (drm_gem_is_imported(&bo->base.base) || bo->base.pages)
258 res |= DRM_GEM_OBJECT_RESIDENT;
259
260 if (bo->base.madv == PANFROST_MADV_DONTNEED)
261 res |= DRM_GEM_OBJECT_PURGEABLE;
262
263 return res;
264 }
265
panfrost_gem_rss(struct drm_gem_object * obj)266 static size_t panfrost_gem_rss(struct drm_gem_object *obj)
267 {
268 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
269
270 if (bo->is_heap) {
271 return bo->heap_rss_size;
272 } else if (bo->base.pages) {
273 WARN_ON(bo->heap_rss_size);
274 return bo->base.base.size;
275 }
276
277 return 0;
278 }
279
280 static struct sg_table *
panfrost_gem_prime_map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction dir)281 panfrost_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
282 enum dma_data_direction dir)
283 {
284 struct sg_table *sgt = drm_gem_map_dma_buf(attach, dir);
285
286 if (!IS_ERR(sgt))
287 attach->priv = sgt;
288
289 return sgt;
290 }
291
292 static void
panfrost_gem_prime_unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sgt,enum dma_data_direction dir)293 panfrost_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
294 struct sg_table *sgt,
295 enum dma_data_direction dir)
296 {
297 attach->priv = NULL;
298 drm_gem_unmap_dma_buf(attach, sgt, dir);
299 }
300
301 static int
panfrost_gem_prime_begin_cpu_access(struct dma_buf * dma_buf,enum dma_data_direction dir)302 panfrost_gem_prime_begin_cpu_access(struct dma_buf *dma_buf,
303 enum dma_data_direction dir)
304 {
305 struct drm_gem_object *obj = dma_buf->priv;
306 struct drm_device *dev = obj->dev;
307 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
308 struct dma_buf_attachment *attach;
309
310 dma_resv_lock(obj->resv, NULL);
311 if (shmem->sgt)
312 dma_sync_sgtable_for_cpu(dev->dev, shmem->sgt, dir);
313
314 if (shmem->vaddr)
315 invalidate_kernel_vmap_range(shmem->vaddr, shmem->base.size);
316
317 list_for_each_entry(attach, &dma_buf->attachments, node) {
318 struct sg_table *sgt = attach->priv;
319
320 if (sgt)
321 dma_sync_sgtable_for_cpu(attach->dev, sgt, dir);
322 }
323 dma_resv_unlock(obj->resv);
324
325 return 0;
326 }
327
328 static int
panfrost_gem_prime_end_cpu_access(struct dma_buf * dma_buf,enum dma_data_direction dir)329 panfrost_gem_prime_end_cpu_access(struct dma_buf *dma_buf,
330 enum dma_data_direction dir)
331 {
332 struct drm_gem_object *obj = dma_buf->priv;
333 struct drm_device *dev = obj->dev;
334 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
335 struct dma_buf_attachment *attach;
336
337 dma_resv_lock(obj->resv, NULL);
338 list_for_each_entry(attach, &dma_buf->attachments, node) {
339 struct sg_table *sgt = attach->priv;
340
341 if (sgt)
342 dma_sync_sgtable_for_device(attach->dev, sgt, dir);
343 }
344
345 if (shmem->vaddr)
346 flush_kernel_vmap_range(shmem->vaddr, shmem->base.size);
347
348 if (shmem->sgt)
349 dma_sync_sgtable_for_device(dev->dev, shmem->sgt, dir);
350
351 dma_resv_unlock(obj->resv);
352 return 0;
353 }
354
355 static const struct dma_buf_ops panfrost_dma_buf_ops = {
356 .attach = drm_gem_map_attach,
357 .detach = drm_gem_map_detach,
358 .map_dma_buf = panfrost_gem_prime_map_dma_buf,
359 .unmap_dma_buf = panfrost_gem_prime_unmap_dma_buf,
360 .release = drm_gem_dmabuf_release,
361 .mmap = drm_gem_dmabuf_mmap,
362 .vmap = drm_gem_dmabuf_vmap,
363 .vunmap = drm_gem_dmabuf_vunmap,
364 .begin_cpu_access = panfrost_gem_prime_begin_cpu_access,
365 .end_cpu_access = panfrost_gem_prime_end_cpu_access,
366 };
367
368 static struct dma_buf *
panfrost_gem_prime_export(struct drm_gem_object * obj,int flags)369 panfrost_gem_prime_export(struct drm_gem_object *obj, int flags)
370 {
371 struct drm_device *dev = obj->dev;
372 struct dma_buf_export_info exp_info = {
373 .exp_name = KBUILD_MODNAME,
374 .owner = THIS_MODULE,
375 .ops = &panfrost_dma_buf_ops,
376 .size = obj->size,
377 .flags = flags,
378 .priv = obj,
379 .resv = obj->resv,
380 };
381
382 return drm_gem_dmabuf_export(dev, &exp_info);
383 }
384
385 struct drm_gem_object *
panfrost_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)386 panfrost_gem_prime_import(struct drm_device *dev,
387 struct dma_buf *dma_buf)
388 {
389 struct drm_gem_object *obj = dma_buf->priv;
390
391 if (dma_buf->ops == &panfrost_dma_buf_ops && obj->dev == dev) {
392 /* Importing dmabuf exported from our own gem increases
393 * refcount on gem itself instead of f_count of dmabuf.
394 */
395 drm_gem_object_get(obj);
396 return obj;
397 }
398
399 return drm_gem_prime_import(dev, dma_buf);
400 }
401
402 static const struct drm_gem_object_funcs panfrost_gem_funcs = {
403 .free = panfrost_gem_free_object,
404 .open = panfrost_gem_open,
405 .close = panfrost_gem_close,
406 .print_info = drm_gem_shmem_object_print_info,
407 .pin = panfrost_gem_pin,
408 .unpin = drm_gem_shmem_object_unpin,
409 .get_sg_table = drm_gem_shmem_object_get_sg_table,
410 .export = panfrost_gem_prime_export,
411 .vmap = drm_gem_shmem_object_vmap,
412 .vunmap = drm_gem_shmem_object_vunmap,
413 .mmap = drm_gem_shmem_object_mmap,
414 .status = panfrost_gem_status,
415 .rss = panfrost_gem_rss,
416 .vm_ops = &drm_gem_shmem_vm_ops,
417 };
418
419 /**
420 * panfrost_gem_create_object - Implementation of driver->gem_create_object.
421 * @dev: DRM device
422 * @size: Size in bytes of the memory the object will reference
423 *
424 * This lets the GEM helpers allocate object structs for us, and keep
425 * our BO stats correct.
426 */
panfrost_gem_create_object(struct drm_device * dev,size_t size)427 struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
428 {
429 struct panfrost_device *pfdev = to_panfrost_device(dev);
430 struct panfrost_gem_object *obj;
431
432 obj = kzalloc_obj(*obj);
433 if (!obj)
434 return ERR_PTR(-ENOMEM);
435
436 INIT_LIST_HEAD(&obj->mappings.list);
437 mutex_init(&obj->mappings.lock);
438 obj->base.base.funcs = &panfrost_gem_funcs;
439 obj->base.map_wc = !pfdev->coherent;
440 mutex_init(&obj->label.lock);
441
442 panfrost_gem_debugfs_bo_add(pfdev, obj);
443
444 return &obj->base.base;
445 }
446
447 static bool
should_map_wc(struct panfrost_gem_object * bo)448 should_map_wc(struct panfrost_gem_object *bo)
449 {
450 struct panfrost_device *pfdev = to_panfrost_device(bo->base.base.dev);
451
452 /* We can't do uncached mappings if the device is coherent,
453 * because the zeroing done by the shmem layer at page allocation
454 * time happens on a cached mapping which isn't CPU-flushed (at least
455 * not on Arm64 where the flush is deferred to PTE setup time, and
456 * only done conditionally based on the mapping permissions). We can't
457 * rely on dma_map_sgtable()/dma_sync_sgtable_for_xxx() either to flush
458 * those, because they are NOPed if dma_dev_coherent() returns true.
459 */
460 if (pfdev->coherent)
461 return false;
462
463 /* Cached mappings are explicitly requested, so no write-combine. */
464 if (bo->wb_mmap)
465 return false;
466
467 /* The default is write-combine. */
468 return true;
469 }
470
471 struct panfrost_gem_object *
panfrost_gem_create(struct drm_device * dev,size_t size,u32 flags)472 panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags)
473 {
474 struct drm_gem_shmem_object *shmem;
475 struct panfrost_gem_object *bo;
476
477 /* The heap buffer is not supposed to be CPU-visible, so don't allow
478 * WB_MMAP on those.
479 */
480 if ((flags & PANFROST_BO_HEAP) && (flags & PANFROST_BO_WB_MMAP))
481 return ERR_PTR(-EINVAL);
482
483 /* Round up heap allocations to 2MB to keep fault handling simple */
484 if (flags & PANFROST_BO_HEAP)
485 size = roundup(size, SZ_2M);
486
487 shmem = drm_gem_shmem_create(dev, size);
488 if (IS_ERR(shmem))
489 return ERR_CAST(shmem);
490
491 bo = to_panfrost_bo(&shmem->base);
492 bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
493 bo->is_heap = !!(flags & PANFROST_BO_HEAP);
494 bo->wb_mmap = !!(flags & PANFROST_BO_WB_MMAP);
495 bo->base.map_wc = should_map_wc(bo);
496
497 return bo;
498 }
499
500 struct drm_gem_object *
panfrost_gem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)501 panfrost_gem_prime_import_sg_table(struct drm_device *dev,
502 struct dma_buf_attachment *attach,
503 struct sg_table *sgt)
504 {
505 struct drm_gem_object *obj;
506 struct panfrost_gem_object *bo;
507
508 obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
509 if (IS_ERR(obj))
510 return ERR_CAST(obj);
511
512 bo = to_panfrost_bo(obj);
513 bo->noexec = true;
514
515 /*
516 * We assign this generic label because this function cannot
517 * be reached through any of the Panfrost UM driver-specific
518 * code paths, unless one is given by explicitly calling the
519 * SET_LABEL_BO ioctl. It is therefore preferable to have a
520 * blanket BO tag that tells us the object was imported from
521 * another driver than nothing at all.
522 */
523 panfrost_gem_internal_set_label(obj, "GEM PRIME buffer");
524
525 return obj;
526 }
527
528 void
panfrost_gem_set_label(struct drm_gem_object * obj,const char * label)529 panfrost_gem_set_label(struct drm_gem_object *obj, const char *label)
530 {
531 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
532 const char *old_label;
533
534 scoped_guard(mutex, &bo->label.lock) {
535 old_label = bo->label.str;
536 bo->label.str = label;
537 }
538
539 kfree_const(old_label);
540 }
541
542 int
panfrost_gem_sync(struct drm_gem_object * obj,u32 type,u32 offset,u32 size)543 panfrost_gem_sync(struct drm_gem_object *obj, u32 type, u32 offset, u32 size)
544 {
545 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
546 struct drm_gem_shmem_object *shmem = &bo->base;
547 const struct drm_device *dev = shmem->base.dev;
548 struct sg_table *sgt;
549 struct scatterlist *sgl;
550 unsigned int count;
551
552 /* Make sure the range is in bounds. */
553 if (offset + size < offset || offset + size > shmem->base.size)
554 return -EINVAL;
555
556 /* Disallow CPU-cache maintenance on imported buffers. */
557 if (drm_gem_is_imported(&shmem->base))
558 return -EINVAL;
559
560 switch (type) {
561 case PANFROST_BO_SYNC_CPU_CACHE_FLUSH:
562 case PANFROST_BO_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE:
563 break;
564
565 default:
566 return -EINVAL;
567 }
568
569 /* Don't bother if it's WC-mapped */
570 if (shmem->map_wc)
571 return 0;
572
573 /* Nothing to do if the size is zero. */
574 if (size == 0)
575 return 0;
576
577 sgt = drm_gem_shmem_get_pages_sgt(shmem);
578 if (IS_ERR(sgt))
579 return PTR_ERR(sgt);
580
581 for_each_sgtable_dma_sg(sgt, sgl, count) {
582 if (size == 0)
583 break;
584
585 dma_addr_t paddr = sg_dma_address(sgl);
586 size_t len = sg_dma_len(sgl);
587
588 if (len <= offset) {
589 offset -= len;
590 continue;
591 }
592
593 paddr += offset;
594 len -= offset;
595 len = min_t(size_t, len, size);
596 size -= len;
597 offset = 0;
598
599 /* It's unclear whether dma_sync_xxx() is the right API to do CPU
600 * cache maintenance given an IOMMU can register their own
601 * implementation doing more than just CPU cache flushes/invalidation,
602 * and what we really care about here is CPU caches only, but that's
603 * the best we have that is both arch-agnostic and does at least the
604 * CPU cache maintenance on a <page,offset,size> tuple.
605 *
606 * Also, I wish we could do a single
607 *
608 * dma_sync_single_for_device(BIDIR)
609 *
610 * and get a flush+invalidate, but that's not how it's implemented
611 * in practice (at least on arm64), so we have to make it
612 *
613 * dma_sync_single_for_device(TO_DEVICE)
614 * dma_sync_single_for_cpu(FROM_DEVICE)
615 *
616 * for the flush+invalidate case.
617 */
618 dma_sync_single_for_device(dev->dev, paddr, len, DMA_TO_DEVICE);
619 if (type == PANFROST_BO_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE)
620 dma_sync_single_for_cpu(dev->dev, paddr, len, DMA_FROM_DEVICE);
621 }
622
623 return 0;
624 }
625
626 void
panfrost_gem_internal_set_label(struct drm_gem_object * obj,const char * label)627 panfrost_gem_internal_set_label(struct drm_gem_object *obj, const char *label)
628 {
629 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
630 const char *str;
631
632 /* We should never attempt labelling a UM-exposed GEM object */
633 if (drm_WARN_ON(bo->base.base.dev, bo->base.base.handle_count > 0))
634 return;
635
636 if (!label)
637 return;
638
639 str = kstrdup_const(label, GFP_KERNEL);
640 if (!str) {
641 /* Failing to allocate memory for a label isn't a fatal condition */
642 drm_warn(bo->base.base.dev, "Not enough memory to allocate BO label");
643 return;
644 }
645
646 panfrost_gem_set_label(obj, str);
647 }
648
649 #ifdef CONFIG_DEBUG_FS
650 struct gem_size_totals {
651 size_t size;
652 size_t resident;
653 size_t reclaimable;
654 };
655
656 struct flag_def {
657 u32 flag;
658 const char *name;
659 };
660
panfrost_gem_debugfs_print_flag_names(struct seq_file * m)661 static void panfrost_gem_debugfs_print_flag_names(struct seq_file *m)
662 {
663 int len;
664 int i;
665
666 static const struct flag_def gem_state_flags_names[] = {
667 {PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED, "imported"},
668 {PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED, "exported"},
669 {PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGED, "purged"},
670 {PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGEABLE, "purgeable"},
671 };
672
673 seq_puts(m, "GEM state flags: ");
674 for (i = 0, len = ARRAY_SIZE(gem_state_flags_names); i < len; i++) {
675 seq_printf(m, "%s (0x%x)%s", gem_state_flags_names[i].name,
676 gem_state_flags_names[i].flag, (i < len - 1) ? ", " : "\n\n");
677 }
678 }
679
panfrost_gem_debugfs_bo_print(struct panfrost_gem_object * bo,struct seq_file * m,struct gem_size_totals * totals)680 static void panfrost_gem_debugfs_bo_print(struct panfrost_gem_object *bo,
681 struct seq_file *m,
682 struct gem_size_totals *totals)
683 {
684 unsigned int refcount = kref_read(&bo->base.base.refcount);
685 char creator_info[32] = {};
686 size_t resident_size;
687 u32 gem_state_flags = 0;
688
689 /* Skip BOs being destroyed. */
690 if (!refcount)
691 return;
692
693 resident_size = panfrost_gem_rss(&bo->base.base);
694
695 snprintf(creator_info, sizeof(creator_info),
696 "%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid);
697 seq_printf(m, "%-32s%-16d%-16d%-16zd%-16zd0x%-16lx",
698 creator_info,
699 bo->base.base.name,
700 refcount,
701 bo->base.base.size,
702 resident_size,
703 drm_vma_node_start(&bo->base.base.vma_node));
704
705 if (bo->base.base.import_attach)
706 gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED;
707 if (bo->base.base.dma_buf)
708 gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED;
709
710 if (bo->base.madv < 0)
711 gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGED;
712 else if (bo->base.madv > 0)
713 gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGEABLE;
714
715 seq_printf(m, "0x%-10x", gem_state_flags);
716
717 scoped_guard(mutex, &bo->label.lock) {
718 seq_printf(m, "%s\n", bo->label.str ? : "");
719 }
720
721 totals->size += bo->base.base.size;
722 totals->resident += resident_size;
723 if (bo->base.madv > 0)
724 totals->reclaimable += resident_size;
725 }
726
panfrost_gem_debugfs_print_bos(struct panfrost_device * pfdev,struct seq_file * m)727 void panfrost_gem_debugfs_print_bos(struct panfrost_device *pfdev,
728 struct seq_file *m)
729 {
730 struct gem_size_totals totals = {0};
731 struct panfrost_gem_object *bo;
732
733 panfrost_gem_debugfs_print_flag_names(m);
734
735 seq_puts(m, "created-by global-name refcount size resident-size file-offset state label\n");
736 seq_puts(m, "-----------------------------------------------------------------------------------------------------------------------------------\n");
737
738 scoped_guard(mutex, &pfdev->debugfs.gems_lock) {
739 list_for_each_entry(bo, &pfdev->debugfs.gems_list, debugfs.node) {
740 panfrost_gem_debugfs_bo_print(bo, m, &totals);
741 }
742 }
743
744 seq_puts(m, "===================================================================================================================================\n");
745 seq_printf(m, "Total size: %zd, Total resident: %zd, Total reclaimable: %zd\n",
746 totals.size, totals.resident, totals.reclaimable);
747 }
748 #endif
749