xref: /linux/drivers/gpu/drm/panfrost/panfrost_gem.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 
4 #include <linux/cleanup.h>
5 #include <linux/err.h>
6 #include <linux/slab.h>
7 #include <linux/dma-buf.h>
8 #include <linux/dma-mapping.h>
9 
10 #include <drm/panfrost_drm.h>
11 #include <drm/drm_print.h>
12 #include "panfrost_device.h"
13 #include "panfrost_gem.h"
14 #include "panfrost_mmu.h"
15 
16 #ifdef CONFIG_DEBUG_FS
17 static void panfrost_gem_debugfs_bo_add(struct panfrost_device *pfdev,
18 					struct panfrost_gem_object *bo)
19 {
20 	bo->debugfs.creator.tgid = current->group_leader->pid;
21 	get_task_comm(bo->debugfs.creator.process_name, current->group_leader);
22 
23 	mutex_lock(&pfdev->debugfs.gems_lock);
24 	list_add_tail(&bo->debugfs.node, &pfdev->debugfs.gems_list);
25 	mutex_unlock(&pfdev->debugfs.gems_lock);
26 }
27 
28 static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo)
29 {
30 	struct panfrost_device *pfdev = to_panfrost_device(bo->base.base.dev);
31 
32 	if (list_empty(&bo->debugfs.node))
33 		return;
34 
35 	mutex_lock(&pfdev->debugfs.gems_lock);
36 	list_del_init(&bo->debugfs.node);
37 	mutex_unlock(&pfdev->debugfs.gems_lock);
38 }
39 #else
40 static void panfrost_gem_debugfs_bo_add(struct panfrost_device *pfdev,
41 					struct panfrost_gem_object *bo)
42 {}
43 static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo) {}
44 #endif
45 
46 /* Called DRM core on the last userspace/kernel unreference of the
47  * BO.
48  */
49 static void panfrost_gem_free_object(struct drm_gem_object *obj)
50 {
51 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
52 	struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
53 
54 	/*
55 	 * Make sure the BO is no longer inserted in the shrinker list before
56 	 * taking care of the destruction itself. If we don't do that we have a
57 	 * race condition between this function and what's done in
58 	 * panfrost_gem_shrinker_scan().
59 	 */
60 	mutex_lock(&pfdev->shrinker_lock);
61 	list_del_init(&bo->base.madv_list);
62 	mutex_unlock(&pfdev->shrinker_lock);
63 
64 	/*
65 	 * If we still have mappings attached to the BO, there's a problem in
66 	 * our refcounting.
67 	 */
68 	WARN_ON_ONCE(!list_empty(&bo->mappings.list));
69 
70 	kfree_const(bo->label.str);
71 	panfrost_gem_debugfs_bo_rm(bo);
72 	mutex_destroy(&bo->label.lock);
73 
74 	if (bo->sgts) {
75 		int i;
76 		int n_sgt = bo->base.base.size / SZ_2M;
77 
78 		for (i = 0; i < n_sgt; i++) {
79 			if (bo->sgts[i].sgl) {
80 				dma_unmap_sgtable(pfdev->base.dev, &bo->sgts[i],
81 						  DMA_BIDIRECTIONAL, 0);
82 				sg_free_table(&bo->sgts[i]);
83 			}
84 		}
85 		kvfree(bo->sgts);
86 	}
87 
88 	drm_gem_shmem_free(&bo->base);
89 }
90 
91 struct panfrost_gem_mapping *
92 panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
93 			 struct panfrost_file_priv *priv)
94 {
95 	struct panfrost_gem_mapping *iter, *mapping = NULL;
96 
97 	mutex_lock(&bo->mappings.lock);
98 	list_for_each_entry(iter, &bo->mappings.list, node) {
99 		if (iter->mmu == priv->mmu) {
100 			kref_get(&iter->refcount);
101 			mapping = iter;
102 			break;
103 		}
104 	}
105 	mutex_unlock(&bo->mappings.lock);
106 
107 	return mapping;
108 }
109 
110 static void
111 panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
112 {
113 	if (mapping->active)
114 		panfrost_mmu_unmap(mapping);
115 
116 	spin_lock(&mapping->mmu->mm_lock);
117 	if (drm_mm_node_allocated(&mapping->mmnode))
118 		drm_mm_remove_node(&mapping->mmnode);
119 	spin_unlock(&mapping->mmu->mm_lock);
120 }
121 
122 static void panfrost_gem_mapping_release(struct kref *kref)
123 {
124 	struct panfrost_gem_mapping *mapping;
125 
126 	mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
127 
128 	panfrost_gem_teardown_mapping(mapping);
129 	drm_gem_object_put(&mapping->obj->base.base);
130 	panfrost_mmu_ctx_put(mapping->mmu);
131 	kfree(mapping);
132 }
133 
134 void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
135 {
136 	if (!mapping)
137 		return;
138 
139 	kref_put(&mapping->refcount, panfrost_gem_mapping_release);
140 }
141 
142 void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo)
143 {
144 	struct panfrost_gem_mapping *mapping;
145 
146 	list_for_each_entry(mapping, &bo->mappings.list, node)
147 		panfrost_gem_teardown_mapping(mapping);
148 }
149 
150 int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
151 {
152 	int ret;
153 	size_t size = obj->size;
154 	u64 align;
155 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
156 	unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
157 	struct panfrost_file_priv *priv = file_priv->driver_priv;
158 	struct panfrost_gem_mapping *mapping;
159 
160 	mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
161 	if (!mapping)
162 		return -ENOMEM;
163 
164 	INIT_LIST_HEAD(&mapping->node);
165 	kref_init(&mapping->refcount);
166 	drm_gem_object_get(obj);
167 	mapping->obj = bo;
168 
169 	/*
170 	 * Executable buffers cannot cross a 16MB boundary as the program
171 	 * counter is 24-bits. We assume executable buffers will be less than
172 	 * 16MB and aligning executable buffers to their size will avoid
173 	 * crossing a 16MB boundary.
174 	 */
175 	if (!bo->noexec)
176 		align = size >> PAGE_SHIFT;
177 	else
178 		align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
179 
180 	mapping->mmu = panfrost_mmu_ctx_get(priv->mmu);
181 	spin_lock(&mapping->mmu->mm_lock);
182 	ret = drm_mm_insert_node_generic(&mapping->mmu->mm, &mapping->mmnode,
183 					 size >> PAGE_SHIFT, align, color, 0);
184 	spin_unlock(&mapping->mmu->mm_lock);
185 	if (ret)
186 		goto err;
187 
188 	if (!bo->is_heap) {
189 		ret = panfrost_mmu_map(mapping);
190 		if (ret)
191 			goto err;
192 	}
193 
194 	mutex_lock(&bo->mappings.lock);
195 	WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
196 	list_add_tail(&mapping->node, &bo->mappings.list);
197 	mutex_unlock(&bo->mappings.lock);
198 
199 err:
200 	if (ret)
201 		panfrost_gem_mapping_put(mapping);
202 	return ret;
203 }
204 
205 void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
206 {
207 	struct panfrost_file_priv *priv = file_priv->driver_priv;
208 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
209 	struct panfrost_gem_mapping *mapping = NULL, *iter;
210 
211 	mutex_lock(&bo->mappings.lock);
212 	list_for_each_entry(iter, &bo->mappings.list, node) {
213 		if (iter->mmu == priv->mmu) {
214 			mapping = iter;
215 			list_del(&iter->node);
216 			break;
217 		}
218 	}
219 	mutex_unlock(&bo->mappings.lock);
220 
221 	panfrost_gem_mapping_put(mapping);
222 }
223 
224 static int panfrost_gem_pin(struct drm_gem_object *obj)
225 {
226 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
227 
228 	if (bo->is_heap)
229 		return -EINVAL;
230 
231 	return drm_gem_shmem_pin_locked(&bo->base);
232 }
233 
234 static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj)
235 {
236 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
237 	enum drm_gem_object_status res = 0;
238 
239 	if (drm_gem_is_imported(&bo->base.base) || bo->base.pages)
240 		res |= DRM_GEM_OBJECT_RESIDENT;
241 
242 	if (bo->base.madv == PANFROST_MADV_DONTNEED)
243 		res |= DRM_GEM_OBJECT_PURGEABLE;
244 
245 	return res;
246 }
247 
248 static size_t panfrost_gem_rss(struct drm_gem_object *obj)
249 {
250 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
251 
252 	if (bo->is_heap) {
253 		return bo->heap_rss_size;
254 	} else if (bo->base.pages) {
255 		WARN_ON(bo->heap_rss_size);
256 		return bo->base.base.size;
257 	}
258 
259 	return 0;
260 }
261 
262 static const struct drm_gem_object_funcs panfrost_gem_funcs = {
263 	.free = panfrost_gem_free_object,
264 	.open = panfrost_gem_open,
265 	.close = panfrost_gem_close,
266 	.print_info = drm_gem_shmem_object_print_info,
267 	.pin = panfrost_gem_pin,
268 	.unpin = drm_gem_shmem_object_unpin,
269 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
270 	.vmap = drm_gem_shmem_object_vmap,
271 	.vunmap = drm_gem_shmem_object_vunmap,
272 	.mmap = drm_gem_shmem_object_mmap,
273 	.status = panfrost_gem_status,
274 	.rss = panfrost_gem_rss,
275 	.vm_ops = &drm_gem_shmem_vm_ops,
276 };
277 
278 /**
279  * panfrost_gem_create_object - Implementation of driver->gem_create_object.
280  * @dev: DRM device
281  * @size: Size in bytes of the memory the object will reference
282  *
283  * This lets the GEM helpers allocate object structs for us, and keep
284  * our BO stats correct.
285  */
286 struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
287 {
288 	struct panfrost_device *pfdev = to_panfrost_device(dev);
289 	struct panfrost_gem_object *obj;
290 
291 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
292 	if (!obj)
293 		return ERR_PTR(-ENOMEM);
294 
295 	INIT_LIST_HEAD(&obj->mappings.list);
296 	mutex_init(&obj->mappings.lock);
297 	obj->base.base.funcs = &panfrost_gem_funcs;
298 	obj->base.map_wc = !pfdev->coherent;
299 	mutex_init(&obj->label.lock);
300 
301 	panfrost_gem_debugfs_bo_add(pfdev, obj);
302 
303 	return &obj->base.base;
304 }
305 
306 struct panfrost_gem_object *
307 panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags)
308 {
309 	struct drm_gem_shmem_object *shmem;
310 	struct panfrost_gem_object *bo;
311 
312 	/* Round up heap allocations to 2MB to keep fault handling simple */
313 	if (flags & PANFROST_BO_HEAP)
314 		size = roundup(size, SZ_2M);
315 
316 	shmem = drm_gem_shmem_create(dev, size);
317 	if (IS_ERR(shmem))
318 		return ERR_CAST(shmem);
319 
320 	bo = to_panfrost_bo(&shmem->base);
321 	bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
322 	bo->is_heap = !!(flags & PANFROST_BO_HEAP);
323 
324 	return bo;
325 }
326 
327 struct drm_gem_object *
328 panfrost_gem_prime_import_sg_table(struct drm_device *dev,
329 				   struct dma_buf_attachment *attach,
330 				   struct sg_table *sgt)
331 {
332 	struct drm_gem_object *obj;
333 	struct panfrost_gem_object *bo;
334 
335 	obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
336 	if (IS_ERR(obj))
337 		return ERR_CAST(obj);
338 
339 	bo = to_panfrost_bo(obj);
340 	bo->noexec = true;
341 
342 	/*
343 	 * We assign this generic label because this function cannot
344 	 * be reached through any of the Panfrost UM driver-specific
345 	 * code paths, unless one is given by explicitly calling the
346 	 * SET_LABEL_BO ioctl. It is therefore preferable to have a
347 	 * blanket BO tag that tells us the object was imported from
348 	 * another driver than nothing at all.
349 	 */
350 	panfrost_gem_internal_set_label(obj, "GEM PRIME buffer");
351 
352 	return obj;
353 }
354 
355 void
356 panfrost_gem_set_label(struct drm_gem_object *obj, const char *label)
357 {
358 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
359 	const char *old_label;
360 
361 	scoped_guard(mutex, &bo->label.lock) {
362 		old_label = bo->label.str;
363 		bo->label.str = label;
364 	}
365 
366 	kfree_const(old_label);
367 }
368 
369 void
370 panfrost_gem_internal_set_label(struct drm_gem_object *obj, const char *label)
371 {
372 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
373 	const char *str;
374 
375 	/* We should never attempt labelling a UM-exposed GEM object */
376 	if (drm_WARN_ON(bo->base.base.dev, bo->base.base.handle_count > 0))
377 		return;
378 
379 	if (!label)
380 		return;
381 
382 	str = kstrdup_const(label, GFP_KERNEL);
383 	if (!str) {
384 		/* Failing to allocate memory for a label isn't a fatal condition */
385 		drm_warn(bo->base.base.dev, "Not enough memory to allocate BO label");
386 		return;
387 	}
388 
389 	panfrost_gem_set_label(obj, str);
390 }
391 
392 #ifdef CONFIG_DEBUG_FS
393 struct gem_size_totals {
394 	size_t size;
395 	size_t resident;
396 	size_t reclaimable;
397 };
398 
399 struct flag_def {
400 	u32 flag;
401 	const char *name;
402 };
403 
404 static void panfrost_gem_debugfs_print_flag_names(struct seq_file *m)
405 {
406 	int len;
407 	int i;
408 
409 	static const struct flag_def gem_state_flags_names[] = {
410 		{PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED, "imported"},
411 		{PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED, "exported"},
412 		{PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGED, "purged"},
413 		{PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGEABLE, "purgeable"},
414 	};
415 
416 	seq_puts(m, "GEM state flags: ");
417 	for (i = 0, len = ARRAY_SIZE(gem_state_flags_names); i < len; i++) {
418 		seq_printf(m, "%s (0x%x)%s", gem_state_flags_names[i].name,
419 			   gem_state_flags_names[i].flag, (i < len - 1) ? ", " : "\n\n");
420 	}
421 }
422 
423 static void panfrost_gem_debugfs_bo_print(struct panfrost_gem_object *bo,
424 					  struct seq_file *m,
425 					  struct gem_size_totals *totals)
426 {
427 	unsigned int refcount = kref_read(&bo->base.base.refcount);
428 	char creator_info[32] = {};
429 	size_t resident_size;
430 	u32 gem_state_flags = 0;
431 
432 	/* Skip BOs being destroyed. */
433 	if (!refcount)
434 		return;
435 
436 	resident_size = panfrost_gem_rss(&bo->base.base);
437 
438 	snprintf(creator_info, sizeof(creator_info),
439 		 "%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid);
440 	seq_printf(m, "%-32s%-16d%-16d%-16zd%-16zd0x%-16lx",
441 		   creator_info,
442 		   bo->base.base.name,
443 		   refcount,
444 		   bo->base.base.size,
445 		   resident_size,
446 		   drm_vma_node_start(&bo->base.base.vma_node));
447 
448 	if (bo->base.base.import_attach)
449 		gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED;
450 	if (bo->base.base.dma_buf)
451 		gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED;
452 
453 	if (bo->base.madv < 0)
454 		gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGED;
455 	else if (bo->base.madv > 0)
456 		gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGEABLE;
457 
458 	seq_printf(m, "0x%-10x", gem_state_flags);
459 
460 	scoped_guard(mutex, &bo->label.lock) {
461 		seq_printf(m, "%s\n", bo->label.str ? : "");
462 	}
463 
464 	totals->size += bo->base.base.size;
465 	totals->resident += resident_size;
466 	if (bo->base.madv > 0)
467 		totals->reclaimable += resident_size;
468 }
469 
470 void panfrost_gem_debugfs_print_bos(struct panfrost_device *pfdev,
471 				    struct seq_file *m)
472 {
473 	struct gem_size_totals totals = {0};
474 	struct panfrost_gem_object *bo;
475 
476 	panfrost_gem_debugfs_print_flag_names(m);
477 
478 	seq_puts(m, "created-by                      global-name     refcount        size            resident-size   file-offset       state       label\n");
479 	seq_puts(m, "-----------------------------------------------------------------------------------------------------------------------------------\n");
480 
481 	scoped_guard(mutex, &pfdev->debugfs.gems_lock) {
482 		list_for_each_entry(bo, &pfdev->debugfs.gems_list, debugfs.node) {
483 			panfrost_gem_debugfs_bo_print(bo, m, &totals);
484 		}
485 	}
486 
487 	seq_puts(m, "===================================================================================================================================\n");
488 	seq_printf(m, "Total size: %zd, Total resident: %zd, Total reclaimable: %zd\n",
489 		   totals.size, totals.resident, totals.reclaimable);
490 }
491 #endif
492