xref: /linux/drivers/gpu/drm/panfrost/panfrost_gem.c (revision dfd4b508c8c6106083698a0dd5e35aecc7c48725)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 
4 #include <linux/cleanup.h>
5 #include <linux/err.h>
6 #include <linux/slab.h>
7 #include <linux/dma-buf.h>
8 #include <linux/dma-mapping.h>
9 
10 #include <drm/panfrost_drm.h>
11 #include "panfrost_device.h"
12 #include "panfrost_gem.h"
13 #include "panfrost_mmu.h"
14 
15 #ifdef CONFIG_DEBUG_FS
panfrost_gem_debugfs_bo_add(struct panfrost_device * pfdev,struct panfrost_gem_object * bo)16 static void panfrost_gem_debugfs_bo_add(struct panfrost_device *pfdev,
17 					struct panfrost_gem_object *bo)
18 {
19 	bo->debugfs.creator.tgid = current->group_leader->pid;
20 	get_task_comm(bo->debugfs.creator.process_name, current->group_leader);
21 
22 	mutex_lock(&pfdev->debugfs.gems_lock);
23 	list_add_tail(&bo->debugfs.node, &pfdev->debugfs.gems_list);
24 	mutex_unlock(&pfdev->debugfs.gems_lock);
25 }
26 
panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object * bo)27 static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo)
28 {
29 	struct panfrost_device *pfdev = bo->base.base.dev->dev_private;
30 
31 	if (list_empty(&bo->debugfs.node))
32 		return;
33 
34 	mutex_lock(&pfdev->debugfs.gems_lock);
35 	list_del_init(&bo->debugfs.node);
36 	mutex_unlock(&pfdev->debugfs.gems_lock);
37 }
38 #else
panfrost_gem_debugfs_bo_add(struct panfrost_device * pfdev,struct panfrost_gem_object * bo)39 static void panfrost_gem_debugfs_bo_add(struct panfrost_device *pfdev,
40 					struct panfrost_gem_object *bo)
41 {}
panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object * bo)42 static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo) {}
43 #endif
44 
45 /* Called DRM core on the last userspace/kernel unreference of the
46  * BO.
47  */
panfrost_gem_free_object(struct drm_gem_object * obj)48 static void panfrost_gem_free_object(struct drm_gem_object *obj)
49 {
50 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
51 	struct panfrost_device *pfdev = obj->dev->dev_private;
52 
53 	/*
54 	 * Make sure the BO is no longer inserted in the shrinker list before
55 	 * taking care of the destruction itself. If we don't do that we have a
56 	 * race condition between this function and what's done in
57 	 * panfrost_gem_shrinker_scan().
58 	 */
59 	mutex_lock(&pfdev->shrinker_lock);
60 	list_del_init(&bo->base.madv_list);
61 	mutex_unlock(&pfdev->shrinker_lock);
62 
63 	/*
64 	 * If we still have mappings attached to the BO, there's a problem in
65 	 * our refcounting.
66 	 */
67 	WARN_ON_ONCE(!list_empty(&bo->mappings.list));
68 
69 	kfree_const(bo->label.str);
70 	panfrost_gem_debugfs_bo_rm(bo);
71 	mutex_destroy(&bo->label.lock);
72 
73 	if (bo->sgts) {
74 		int i;
75 		int n_sgt = bo->base.base.size / SZ_2M;
76 
77 		for (i = 0; i < n_sgt; i++) {
78 			if (bo->sgts[i].sgl) {
79 				dma_unmap_sgtable(pfdev->dev, &bo->sgts[i],
80 						  DMA_BIDIRECTIONAL, 0);
81 				sg_free_table(&bo->sgts[i]);
82 			}
83 		}
84 		kvfree(bo->sgts);
85 	}
86 
87 	drm_gem_shmem_free(&bo->base);
88 }
89 
90 struct panfrost_gem_mapping *
panfrost_gem_mapping_get(struct panfrost_gem_object * bo,struct panfrost_file_priv * priv)91 panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
92 			 struct panfrost_file_priv *priv)
93 {
94 	struct panfrost_gem_mapping *iter, *mapping = NULL;
95 
96 	mutex_lock(&bo->mappings.lock);
97 	list_for_each_entry(iter, &bo->mappings.list, node) {
98 		if (iter->mmu == priv->mmu) {
99 			kref_get(&iter->refcount);
100 			mapping = iter;
101 			break;
102 		}
103 	}
104 	mutex_unlock(&bo->mappings.lock);
105 
106 	return mapping;
107 }
108 
109 static void
panfrost_gem_teardown_mapping(struct panfrost_gem_mapping * mapping)110 panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
111 {
112 	if (mapping->active)
113 		panfrost_mmu_unmap(mapping);
114 
115 	spin_lock(&mapping->mmu->mm_lock);
116 	if (drm_mm_node_allocated(&mapping->mmnode))
117 		drm_mm_remove_node(&mapping->mmnode);
118 	spin_unlock(&mapping->mmu->mm_lock);
119 }
120 
panfrost_gem_mapping_release(struct kref * kref)121 static void panfrost_gem_mapping_release(struct kref *kref)
122 {
123 	struct panfrost_gem_mapping *mapping;
124 
125 	mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
126 
127 	panfrost_gem_teardown_mapping(mapping);
128 	drm_gem_object_put(&mapping->obj->base.base);
129 	panfrost_mmu_ctx_put(mapping->mmu);
130 	kfree(mapping);
131 }
132 
panfrost_gem_mapping_put(struct panfrost_gem_mapping * mapping)133 void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
134 {
135 	if (!mapping)
136 		return;
137 
138 	kref_put(&mapping->refcount, panfrost_gem_mapping_release);
139 }
140 
panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object * bo)141 void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo)
142 {
143 	struct panfrost_gem_mapping *mapping;
144 
145 	list_for_each_entry(mapping, &bo->mappings.list, node)
146 		panfrost_gem_teardown_mapping(mapping);
147 }
148 
panfrost_gem_open(struct drm_gem_object * obj,struct drm_file * file_priv)149 int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
150 {
151 	int ret;
152 	size_t size = obj->size;
153 	u64 align;
154 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
155 	unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
156 	struct panfrost_file_priv *priv = file_priv->driver_priv;
157 	struct panfrost_gem_mapping *mapping;
158 
159 	mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
160 	if (!mapping)
161 		return -ENOMEM;
162 
163 	INIT_LIST_HEAD(&mapping->node);
164 	kref_init(&mapping->refcount);
165 	drm_gem_object_get(obj);
166 	mapping->obj = bo;
167 
168 	/*
169 	 * Executable buffers cannot cross a 16MB boundary as the program
170 	 * counter is 24-bits. We assume executable buffers will be less than
171 	 * 16MB and aligning executable buffers to their size will avoid
172 	 * crossing a 16MB boundary.
173 	 */
174 	if (!bo->noexec)
175 		align = size >> PAGE_SHIFT;
176 	else
177 		align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
178 
179 	mapping->mmu = panfrost_mmu_ctx_get(priv->mmu);
180 	spin_lock(&mapping->mmu->mm_lock);
181 	ret = drm_mm_insert_node_generic(&mapping->mmu->mm, &mapping->mmnode,
182 					 size >> PAGE_SHIFT, align, color, 0);
183 	spin_unlock(&mapping->mmu->mm_lock);
184 	if (ret)
185 		goto err;
186 
187 	if (!bo->is_heap) {
188 		ret = panfrost_mmu_map(mapping);
189 		if (ret)
190 			goto err;
191 	}
192 
193 	mutex_lock(&bo->mappings.lock);
194 	WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
195 	list_add_tail(&mapping->node, &bo->mappings.list);
196 	mutex_unlock(&bo->mappings.lock);
197 
198 err:
199 	if (ret)
200 		panfrost_gem_mapping_put(mapping);
201 	return ret;
202 }
203 
panfrost_gem_close(struct drm_gem_object * obj,struct drm_file * file_priv)204 void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
205 {
206 	struct panfrost_file_priv *priv = file_priv->driver_priv;
207 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
208 	struct panfrost_gem_mapping *mapping = NULL, *iter;
209 
210 	mutex_lock(&bo->mappings.lock);
211 	list_for_each_entry(iter, &bo->mappings.list, node) {
212 		if (iter->mmu == priv->mmu) {
213 			mapping = iter;
214 			list_del(&iter->node);
215 			break;
216 		}
217 	}
218 	mutex_unlock(&bo->mappings.lock);
219 
220 	panfrost_gem_mapping_put(mapping);
221 }
222 
panfrost_gem_pin(struct drm_gem_object * obj)223 static int panfrost_gem_pin(struct drm_gem_object *obj)
224 {
225 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
226 
227 	if (bo->is_heap)
228 		return -EINVAL;
229 
230 	return drm_gem_shmem_pin_locked(&bo->base);
231 }
232 
panfrost_gem_status(struct drm_gem_object * obj)233 static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj)
234 {
235 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
236 	enum drm_gem_object_status res = 0;
237 
238 	if (drm_gem_is_imported(&bo->base.base) || bo->base.pages)
239 		res |= DRM_GEM_OBJECT_RESIDENT;
240 
241 	if (bo->base.madv == PANFROST_MADV_DONTNEED)
242 		res |= DRM_GEM_OBJECT_PURGEABLE;
243 
244 	return res;
245 }
246 
panfrost_gem_rss(struct drm_gem_object * obj)247 static size_t panfrost_gem_rss(struct drm_gem_object *obj)
248 {
249 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
250 
251 	if (bo->is_heap) {
252 		return bo->heap_rss_size;
253 	} else if (bo->base.pages) {
254 		WARN_ON(bo->heap_rss_size);
255 		return bo->base.base.size;
256 	}
257 
258 	return 0;
259 }
260 
261 static const struct drm_gem_object_funcs panfrost_gem_funcs = {
262 	.free = panfrost_gem_free_object,
263 	.open = panfrost_gem_open,
264 	.close = panfrost_gem_close,
265 	.print_info = drm_gem_shmem_object_print_info,
266 	.pin = panfrost_gem_pin,
267 	.unpin = drm_gem_shmem_object_unpin,
268 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
269 	.vmap = drm_gem_shmem_object_vmap,
270 	.vunmap = drm_gem_shmem_object_vunmap,
271 	.mmap = drm_gem_shmem_object_mmap,
272 	.status = panfrost_gem_status,
273 	.rss = panfrost_gem_rss,
274 	.vm_ops = &drm_gem_shmem_vm_ops,
275 };
276 
277 /**
278  * panfrost_gem_create_object - Implementation of driver->gem_create_object.
279  * @dev: DRM device
280  * @size: Size in bytes of the memory the object will reference
281  *
282  * This lets the GEM helpers allocate object structs for us, and keep
283  * our BO stats correct.
284  */
panfrost_gem_create_object(struct drm_device * dev,size_t size)285 struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
286 {
287 	struct panfrost_device *pfdev = dev->dev_private;
288 	struct panfrost_gem_object *obj;
289 
290 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
291 	if (!obj)
292 		return ERR_PTR(-ENOMEM);
293 
294 	INIT_LIST_HEAD(&obj->mappings.list);
295 	mutex_init(&obj->mappings.lock);
296 	obj->base.base.funcs = &panfrost_gem_funcs;
297 	obj->base.map_wc = !pfdev->coherent;
298 	mutex_init(&obj->label.lock);
299 
300 	panfrost_gem_debugfs_bo_add(pfdev, obj);
301 
302 	return &obj->base.base;
303 }
304 
305 struct panfrost_gem_object *
panfrost_gem_create(struct drm_device * dev,size_t size,u32 flags)306 panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags)
307 {
308 	struct drm_gem_shmem_object *shmem;
309 	struct panfrost_gem_object *bo;
310 
311 	/* Round up heap allocations to 2MB to keep fault handling simple */
312 	if (flags & PANFROST_BO_HEAP)
313 		size = roundup(size, SZ_2M);
314 
315 	shmem = drm_gem_shmem_create(dev, size);
316 	if (IS_ERR(shmem))
317 		return ERR_CAST(shmem);
318 
319 	bo = to_panfrost_bo(&shmem->base);
320 	bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
321 	bo->is_heap = !!(flags & PANFROST_BO_HEAP);
322 
323 	return bo;
324 }
325 
326 struct drm_gem_object *
panfrost_gem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)327 panfrost_gem_prime_import_sg_table(struct drm_device *dev,
328 				   struct dma_buf_attachment *attach,
329 				   struct sg_table *sgt)
330 {
331 	struct drm_gem_object *obj;
332 	struct panfrost_gem_object *bo;
333 
334 	obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
335 	if (IS_ERR(obj))
336 		return ERR_CAST(obj);
337 
338 	bo = to_panfrost_bo(obj);
339 	bo->noexec = true;
340 
341 	/*
342 	 * We assign this generic label because this function cannot
343 	 * be reached through any of the Panfrost UM driver-specific
344 	 * code paths, unless one is given by explicitly calling the
345 	 * SET_LABEL_BO ioctl. It is therefore preferable to have a
346 	 * blanket BO tag that tells us the object was imported from
347 	 * another driver than nothing at all.
348 	 */
349 	panfrost_gem_internal_set_label(obj, "GEM PRIME buffer");
350 
351 	return obj;
352 }
353 
354 void
panfrost_gem_set_label(struct drm_gem_object * obj,const char * label)355 panfrost_gem_set_label(struct drm_gem_object *obj, const char *label)
356 {
357 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
358 	const char *old_label;
359 
360 	scoped_guard(mutex, &bo->label.lock) {
361 		old_label = bo->label.str;
362 		bo->label.str = label;
363 	}
364 
365 	kfree_const(old_label);
366 }
367 
368 void
panfrost_gem_internal_set_label(struct drm_gem_object * obj,const char * label)369 panfrost_gem_internal_set_label(struct drm_gem_object *obj, const char *label)
370 {
371 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
372 	const char *str;
373 
374 	/* We should never attempt labelling a UM-exposed GEM object */
375 	if (drm_WARN_ON(bo->base.base.dev, bo->base.base.handle_count > 0))
376 		return;
377 
378 	if (!label)
379 		return;
380 
381 	str = kstrdup_const(label, GFP_KERNEL);
382 	if (!str) {
383 		/* Failing to allocate memory for a label isn't a fatal condition */
384 		drm_warn(bo->base.base.dev, "Not enough memory to allocate BO label");
385 		return;
386 	}
387 
388 	panfrost_gem_set_label(obj, str);
389 }
390 
391 #ifdef CONFIG_DEBUG_FS
392 struct gem_size_totals {
393 	size_t size;
394 	size_t resident;
395 	size_t reclaimable;
396 };
397 
398 struct flag_def {
399 	u32 flag;
400 	const char *name;
401 };
402 
panfrost_gem_debugfs_print_flag_names(struct seq_file * m)403 static void panfrost_gem_debugfs_print_flag_names(struct seq_file *m)
404 {
405 	int len;
406 	int i;
407 
408 	static const struct flag_def gem_state_flags_names[] = {
409 		{PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED, "imported"},
410 		{PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED, "exported"},
411 		{PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGED, "purged"},
412 		{PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGEABLE, "purgeable"},
413 	};
414 
415 	seq_puts(m, "GEM state flags: ");
416 	for (i = 0, len = ARRAY_SIZE(gem_state_flags_names); i < len; i++) {
417 		seq_printf(m, "%s (0x%x)%s", gem_state_flags_names[i].name,
418 			   gem_state_flags_names[i].flag, (i < len - 1) ? ", " : "\n\n");
419 	}
420 }
421 
panfrost_gem_debugfs_bo_print(struct panfrost_gem_object * bo,struct seq_file * m,struct gem_size_totals * totals)422 static void panfrost_gem_debugfs_bo_print(struct panfrost_gem_object *bo,
423 					  struct seq_file *m,
424 					  struct gem_size_totals *totals)
425 {
426 	unsigned int refcount = kref_read(&bo->base.base.refcount);
427 	char creator_info[32] = {};
428 	size_t resident_size;
429 	u32 gem_state_flags = 0;
430 
431 	/* Skip BOs being destroyed. */
432 	if (!refcount)
433 		return;
434 
435 	resident_size = panfrost_gem_rss(&bo->base.base);
436 
437 	snprintf(creator_info, sizeof(creator_info),
438 		 "%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid);
439 	seq_printf(m, "%-32s%-16d%-16d%-16zd%-16zd0x%-16lx",
440 		   creator_info,
441 		   bo->base.base.name,
442 		   refcount,
443 		   bo->base.base.size,
444 		   resident_size,
445 		   drm_vma_node_start(&bo->base.base.vma_node));
446 
447 	if (bo->base.base.import_attach)
448 		gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED;
449 	if (bo->base.base.dma_buf)
450 		gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED;
451 
452 	if (bo->base.madv < 0)
453 		gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGED;
454 	else if (bo->base.madv > 0)
455 		gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGEABLE;
456 
457 	seq_printf(m, "0x%-10x", gem_state_flags);
458 
459 	scoped_guard(mutex, &bo->label.lock) {
460 		seq_printf(m, "%s\n", bo->label.str ? : "");
461 	}
462 
463 	totals->size += bo->base.base.size;
464 	totals->resident += resident_size;
465 	if (bo->base.madv > 0)
466 		totals->reclaimable += resident_size;
467 }
468 
panfrost_gem_debugfs_print_bos(struct panfrost_device * pfdev,struct seq_file * m)469 void panfrost_gem_debugfs_print_bos(struct panfrost_device *pfdev,
470 				    struct seq_file *m)
471 {
472 	struct gem_size_totals totals = {0};
473 	struct panfrost_gem_object *bo;
474 
475 	panfrost_gem_debugfs_print_flag_names(m);
476 
477 	seq_puts(m, "created-by                      global-name     refcount        size            resident-size   file-offset       state       label\n");
478 	seq_puts(m, "-----------------------------------------------------------------------------------------------------------------------------------\n");
479 
480 	scoped_guard(mutex, &pfdev->debugfs.gems_lock) {
481 		list_for_each_entry(bo, &pfdev->debugfs.gems_list, debugfs.node) {
482 			panfrost_gem_debugfs_bo_print(bo, m, &totals);
483 		}
484 	}
485 
486 	seq_puts(m, "===================================================================================================================================\n");
487 	seq_printf(m, "Total size: %zd, Total resident: %zd, Total reclaimable: %zd\n",
488 		   totals.size, totals.resident, totals.reclaimable);
489 }
490 #endif
491