xref: /linux/drivers/gpu/drm/vc4/vc4_bo.c (revision de848da12f752170c2ebe114804a985314fd5a6a)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2c8b75bcaSEric Anholt /*
3c8b75bcaSEric Anholt  *  Copyright © 2015 Broadcom
4c8b75bcaSEric Anholt  */
5c8b75bcaSEric Anholt 
672f793f1SEric Anholt /**
772f793f1SEric Anholt  * DOC: VC4 GEM BO management support
8c8b75bcaSEric Anholt  *
9c8b75bcaSEric Anholt  * The VC4 GPU architecture (both scanout and rendering) has direct
10c8b75bcaSEric Anholt  * access to system memory with no MMU in between.  To support it, we
114a83c26aSDanilo Krummrich  * use the GEM DMA helper functions to allocate contiguous ranges of
12c8b75bcaSEric Anholt  * physical memory for our BOs.
13c826a6e1SEric Anholt  *
144a83c26aSDanilo Krummrich  * Since the DMA allocator is very slow, we keep a cache of recently
15c826a6e1SEric Anholt  * freed BOs around so that the kernel's allocation of objects for 3D
16c826a6e1SEric Anholt  * rendering can return quickly.
17c8b75bcaSEric Anholt  */
18c8b75bcaSEric Anholt 
19cdec4d36SEric Anholt #include <linux/dma-buf.h>
20cdec4d36SEric Anholt 
21720cf96dSVille Syrjälä #include <drm/drm_fourcc.h>
22720cf96dSVille Syrjälä 
23c8b75bcaSEric Anholt #include "vc4_drv.h"
24d5bc60f6SEric Anholt #include "uapi/drm/vc4_drm.h"
25c8b75bcaSEric Anholt 
26ccfe8e9cSThomas Zimmermann static const struct drm_gem_object_funcs vc4_gem_object_funcs;
27dd602022SThomas Zimmermann 
28f3099462SEric Anholt static const char * const bo_type_names[] = {
29f3099462SEric Anholt 	"kernel",
30f3099462SEric Anholt 	"V3D",
31f3099462SEric Anholt 	"V3D shader",
32f3099462SEric Anholt 	"dumb",
33f3099462SEric Anholt 	"binner",
34f3099462SEric Anholt 	"RCL",
35f3099462SEric Anholt 	"BCL",
36f3099462SEric Anholt 	"kernel BO cache",
37f3099462SEric Anholt };
38f3099462SEric Anholt 
39f3099462SEric Anholt static bool is_user_label(int label)
40f3099462SEric Anholt {
41f3099462SEric Anholt 	return label >= VC4_BO_TYPE_COUNT;
42f3099462SEric Anholt }
43f3099462SEric Anholt 
4413f0ec34SEric Anholt static void vc4_bo_stats_print(struct drm_printer *p, struct vc4_dev *vc4)
45c8b75bcaSEric Anholt {
46f3099462SEric Anholt 	int i;
47f3099462SEric Anholt 
48f3099462SEric Anholt 	for (i = 0; i < vc4->num_labels; i++) {
49f3099462SEric Anholt 		if (!vc4->bo_labels[i].num_allocated)
50f3099462SEric Anholt 			continue;
51f3099462SEric Anholt 
5213f0ec34SEric Anholt 		drm_printf(p, "%30s: %6dkb BOs (%d)\n",
53f3099462SEric Anholt 			   vc4->bo_labels[i].name,
54f3099462SEric Anholt 			   vc4->bo_labels[i].size_allocated / 1024,
55f3099462SEric Anholt 			   vc4->bo_labels[i].num_allocated);
56f3099462SEric Anholt 	}
57b9f19259SBoris Brezillon 
58b9f19259SBoris Brezillon 	mutex_lock(&vc4->purgeable.lock);
59b9f19259SBoris Brezillon 	if (vc4->purgeable.num)
6013f0ec34SEric Anholt 		drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
61b9f19259SBoris Brezillon 			   vc4->purgeable.size / 1024, vc4->purgeable.num);
62b9f19259SBoris Brezillon 
63b9f19259SBoris Brezillon 	if (vc4->purgeable.purged_num)
6413f0ec34SEric Anholt 		drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "total purged BO",
65b9f19259SBoris Brezillon 			   vc4->purgeable.purged_size / 1024,
66b9f19259SBoris Brezillon 			   vc4->purgeable.purged_num);
67b9f19259SBoris Brezillon 	mutex_unlock(&vc4->purgeable.lock);
68c826a6e1SEric Anholt }
69c826a6e1SEric Anholt 
70c9be804cSEric Anholt static int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
71c826a6e1SEric Anholt {
72f2ede40eSMaíra Canal 	struct drm_debugfs_entry *entry = m->private;
73f2ede40eSMaíra Canal 	struct drm_device *dev = entry->dev;
74c826a6e1SEric Anholt 	struct vc4_dev *vc4 = to_vc4_dev(dev);
7513f0ec34SEric Anholt 	struct drm_printer p = drm_seq_file_printer(m);
76c826a6e1SEric Anholt 
7713f0ec34SEric Anholt 	vc4_bo_stats_print(&p, vc4);
78b9f19259SBoris Brezillon 
79c826a6e1SEric Anholt 	return 0;
80c826a6e1SEric Anholt }
81c826a6e1SEric Anholt 
82f3099462SEric Anholt /* Takes ownership of *name and returns the appropriate slot for it in
83f3099462SEric Anholt  * the bo_labels[] array, extending it as necessary.
84f3099462SEric Anholt  *
85f3099462SEric Anholt  * This is inefficient and could use a hash table instead of walking
86f3099462SEric Anholt  * an array and strcmp()ing.  However, the assumption is that user
87f3099462SEric Anholt  * labeling will be infrequent (scanout buffers and other long-lived
88f3099462SEric Anholt  * objects, or debug driver builds), so we can live with it for now.
89f3099462SEric Anholt  */
90f3099462SEric Anholt static int vc4_get_user_label(struct vc4_dev *vc4, const char *name)
91f3099462SEric Anholt {
92f3099462SEric Anholt 	int i;
93f3099462SEric Anholt 	int free_slot = -1;
94f3099462SEric Anholt 
95f3099462SEric Anholt 	for (i = 0; i < vc4->num_labels; i++) {
96f3099462SEric Anholt 		if (!vc4->bo_labels[i].name) {
97f3099462SEric Anholt 			free_slot = i;
98f3099462SEric Anholt 		} else if (strcmp(vc4->bo_labels[i].name, name) == 0) {
99f3099462SEric Anholt 			kfree(name);
100f3099462SEric Anholt 			return i;
101f3099462SEric Anholt 		}
102f3099462SEric Anholt 	}
103f3099462SEric Anholt 
104f3099462SEric Anholt 	if (free_slot != -1) {
105f3099462SEric Anholt 		WARN_ON(vc4->bo_labels[free_slot].num_allocated != 0);
106f3099462SEric Anholt 		vc4->bo_labels[free_slot].name = name;
107f3099462SEric Anholt 		return free_slot;
108f3099462SEric Anholt 	} else {
109f3099462SEric Anholt 		u32 new_label_count = vc4->num_labels + 1;
110f3099462SEric Anholt 		struct vc4_label *new_labels =
111f3099462SEric Anholt 			krealloc(vc4->bo_labels,
112f3099462SEric Anholt 				 new_label_count * sizeof(*new_labels),
113f3099462SEric Anholt 				 GFP_KERNEL);
114f3099462SEric Anholt 
115f3099462SEric Anholt 		if (!new_labels) {
116f3099462SEric Anholt 			kfree(name);
117f3099462SEric Anholt 			return -1;
118f3099462SEric Anholt 		}
119f3099462SEric Anholt 
120f3099462SEric Anholt 		free_slot = vc4->num_labels;
121f3099462SEric Anholt 		vc4->bo_labels = new_labels;
122f3099462SEric Anholt 		vc4->num_labels = new_label_count;
123f3099462SEric Anholt 
124f3099462SEric Anholt 		vc4->bo_labels[free_slot].name = name;
125f3099462SEric Anholt 		vc4->bo_labels[free_slot].num_allocated = 0;
126f3099462SEric Anholt 		vc4->bo_labels[free_slot].size_allocated = 0;
127f3099462SEric Anholt 
128f3099462SEric Anholt 		return free_slot;
129f3099462SEric Anholt 	}
130f3099462SEric Anholt }
131f3099462SEric Anholt 
132f3099462SEric Anholt static void vc4_bo_set_label(struct drm_gem_object *gem_obj, int label)
133f3099462SEric Anholt {
134f3099462SEric Anholt 	struct vc4_bo *bo = to_vc4_bo(gem_obj);
135f3099462SEric Anholt 	struct vc4_dev *vc4 = to_vc4_dev(gem_obj->dev);
136f3099462SEric Anholt 
137f3099462SEric Anholt 	lockdep_assert_held(&vc4->bo_lock);
138f3099462SEric Anholt 
139f3099462SEric Anholt 	if (label != -1) {
140f3099462SEric Anholt 		vc4->bo_labels[label].num_allocated++;
141f3099462SEric Anholt 		vc4->bo_labels[label].size_allocated += gem_obj->size;
142f3099462SEric Anholt 	}
143f3099462SEric Anholt 
144f3099462SEric Anholt 	vc4->bo_labels[bo->label].num_allocated--;
145f3099462SEric Anholt 	vc4->bo_labels[bo->label].size_allocated -= gem_obj->size;
146f3099462SEric Anholt 
147f3099462SEric Anholt 	if (vc4->bo_labels[bo->label].num_allocated == 0 &&
148f3099462SEric Anholt 	    is_user_label(bo->label)) {
149f3099462SEric Anholt 		/* Free user BO label slots on last unreference.
150f3099462SEric Anholt 		 * Slots are just where we track the stats for a given
151f3099462SEric Anholt 		 * name, and once a name is unused we can reuse that
152f3099462SEric Anholt 		 * slot.
153f3099462SEric Anholt 		 */
154f3099462SEric Anholt 		kfree(vc4->bo_labels[bo->label].name);
155f3099462SEric Anholt 		vc4->bo_labels[bo->label].name = NULL;
156f3099462SEric Anholt 	}
157f3099462SEric Anholt 
158f3099462SEric Anholt 	bo->label = label;
159f3099462SEric Anholt }
160f3099462SEric Anholt 
161c826a6e1SEric Anholt static uint32_t bo_page_index(size_t size)
162c826a6e1SEric Anholt {
163c826a6e1SEric Anholt 	return (size / PAGE_SIZE) - 1;
164c826a6e1SEric Anholt }
165c826a6e1SEric Anholt 
166c826a6e1SEric Anholt static void vc4_bo_destroy(struct vc4_bo *bo)
167c826a6e1SEric Anholt {
168c826a6e1SEric Anholt 	struct drm_gem_object *obj = &bo->base.base;
1694e6b1e91SEric Anholt 	struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
1704e6b1e91SEric Anholt 
1714e6b1e91SEric Anholt 	lockdep_assert_held(&vc4->bo_lock);
172f3099462SEric Anholt 
173f3099462SEric Anholt 	vc4_bo_set_label(obj, -1);
174c826a6e1SEric Anholt 
175463873d5SEric Anholt 	if (bo->validated_shader) {
176c0db1b67SDaniel J Blueman 		kfree(bo->validated_shader->uniform_addr_offsets);
177463873d5SEric Anholt 		kfree(bo->validated_shader->texture_samples);
178463873d5SEric Anholt 		kfree(bo->validated_shader);
179463873d5SEric Anholt 		bo->validated_shader = NULL;
180463873d5SEric Anholt 	}
181463873d5SEric Anholt 
18207a2975cSMaxime Ripard 	mutex_destroy(&bo->madv_lock);
1834a83c26aSDanilo Krummrich 	drm_gem_dma_free(&bo->base);
184c826a6e1SEric Anholt }
185c826a6e1SEric Anholt 
186c826a6e1SEric Anholt static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
187c826a6e1SEric Anholt {
1884e6b1e91SEric Anholt 	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
1894e6b1e91SEric Anholt 
1904e6b1e91SEric Anholt 	lockdep_assert_held(&vc4->bo_lock);
191c826a6e1SEric Anholt 	list_del(&bo->unref_head);
192c826a6e1SEric Anholt 	list_del(&bo->size_head);
193c826a6e1SEric Anholt }
194c826a6e1SEric Anholt 
195c826a6e1SEric Anholt static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
196c826a6e1SEric Anholt 						     size_t size)
197c826a6e1SEric Anholt {
198c826a6e1SEric Anholt 	struct vc4_dev *vc4 = to_vc4_dev(dev);
199c826a6e1SEric Anholt 	uint32_t page_index = bo_page_index(size);
200c826a6e1SEric Anholt 
201c826a6e1SEric Anholt 	if (vc4->bo_cache.size_list_size <= page_index) {
202c826a6e1SEric Anholt 		uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
203c826a6e1SEric Anholt 					page_index + 1);
204c826a6e1SEric Anholt 		struct list_head *new_list;
205c826a6e1SEric Anholt 		uint32_t i;
206c826a6e1SEric Anholt 
207c826a6e1SEric Anholt 		new_list = kmalloc_array(new_size, sizeof(struct list_head),
208c826a6e1SEric Anholt 					 GFP_KERNEL);
209c826a6e1SEric Anholt 		if (!new_list)
210c826a6e1SEric Anholt 			return NULL;
211c826a6e1SEric Anholt 
212c826a6e1SEric Anholt 		/* Rebase the old cached BO lists to their new list
213c826a6e1SEric Anholt 		 * head locations.
214c826a6e1SEric Anholt 		 */
215c826a6e1SEric Anholt 		for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
216c826a6e1SEric Anholt 			struct list_head *old_list =
217c826a6e1SEric Anholt 				&vc4->bo_cache.size_list[i];
218c826a6e1SEric Anholt 
219c826a6e1SEric Anholt 			if (list_empty(old_list))
220c826a6e1SEric Anholt 				INIT_LIST_HEAD(&new_list[i]);
221c826a6e1SEric Anholt 			else
222c826a6e1SEric Anholt 				list_replace(old_list, &new_list[i]);
223c826a6e1SEric Anholt 		}
224c826a6e1SEric Anholt 		/* And initialize the brand new BO list heads. */
225c826a6e1SEric Anholt 		for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
226c826a6e1SEric Anholt 			INIT_LIST_HEAD(&new_list[i]);
227c826a6e1SEric Anholt 
228c826a6e1SEric Anholt 		kfree(vc4->bo_cache.size_list);
229c826a6e1SEric Anholt 		vc4->bo_cache.size_list = new_list;
230c826a6e1SEric Anholt 		vc4->bo_cache.size_list_size = new_size;
231c826a6e1SEric Anholt 	}
232c826a6e1SEric Anholt 
233c826a6e1SEric Anholt 	return &vc4->bo_cache.size_list[page_index];
234c826a6e1SEric Anholt }
235c826a6e1SEric Anholt 
236ea903838SBaoyou Xie static void vc4_bo_cache_purge(struct drm_device *dev)
237c826a6e1SEric Anholt {
238c826a6e1SEric Anholt 	struct vc4_dev *vc4 = to_vc4_dev(dev);
239c826a6e1SEric Anholt 
240c826a6e1SEric Anholt 	mutex_lock(&vc4->bo_lock);
241c826a6e1SEric Anholt 	while (!list_empty(&vc4->bo_cache.time_list)) {
242c826a6e1SEric Anholt 		struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
243c826a6e1SEric Anholt 						    struct vc4_bo, unref_head);
244c826a6e1SEric Anholt 		vc4_bo_remove_from_cache(bo);
245c826a6e1SEric Anholt 		vc4_bo_destroy(bo);
246c826a6e1SEric Anholt 	}
247c826a6e1SEric Anholt 	mutex_unlock(&vc4->bo_lock);
248c826a6e1SEric Anholt }
249c826a6e1SEric Anholt 
250b9f19259SBoris Brezillon void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo)
251b9f19259SBoris Brezillon {
252b9f19259SBoris Brezillon 	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
253b9f19259SBoris Brezillon 
25430f8c74cSMaxime Ripard 	if (WARN_ON_ONCE(vc4->is_vc5))
25530f8c74cSMaxime Ripard 		return;
25630f8c74cSMaxime Ripard 
257b9f19259SBoris Brezillon 	mutex_lock(&vc4->purgeable.lock);
258b9f19259SBoris Brezillon 	list_add_tail(&bo->size_head, &vc4->purgeable.list);
259b9f19259SBoris Brezillon 	vc4->purgeable.num++;
260b9f19259SBoris Brezillon 	vc4->purgeable.size += bo->base.base.size;
261b9f19259SBoris Brezillon 	mutex_unlock(&vc4->purgeable.lock);
262b9f19259SBoris Brezillon }
263b9f19259SBoris Brezillon 
264b9f19259SBoris Brezillon static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo)
265b9f19259SBoris Brezillon {
266b9f19259SBoris Brezillon 	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
267b9f19259SBoris Brezillon 
26830f8c74cSMaxime Ripard 	if (WARN_ON_ONCE(vc4->is_vc5))
26930f8c74cSMaxime Ripard 		return;
27030f8c74cSMaxime Ripard 
271b9f19259SBoris Brezillon 	/* list_del_init() is used here because the caller might release
272b9f19259SBoris Brezillon 	 * the purgeable lock in order to acquire the madv one and update the
273b9f19259SBoris Brezillon 	 * madv status.
274b9f19259SBoris Brezillon 	 * During this short period of time a user might decide to mark
275b9f19259SBoris Brezillon 	 * the BO as unpurgeable, and if bo->madv is set to
276b9f19259SBoris Brezillon 	 * VC4_MADV_DONTNEED it will try to remove the BO from the
277b9f19259SBoris Brezillon 	 * purgeable list which will fail if the ->next/prev fields
278b9f19259SBoris Brezillon 	 * are set to LIST_POISON1/LIST_POISON2 (which is what
279b9f19259SBoris Brezillon 	 * list_del() does).
280b9f19259SBoris Brezillon 	 * Re-initializing the list element guarantees that list_del()
281b9f19259SBoris Brezillon 	 * will work correctly even if it's a NOP.
282b9f19259SBoris Brezillon 	 */
283b9f19259SBoris Brezillon 	list_del_init(&bo->size_head);
284b9f19259SBoris Brezillon 	vc4->purgeable.num--;
285b9f19259SBoris Brezillon 	vc4->purgeable.size -= bo->base.base.size;
286b9f19259SBoris Brezillon }
287b9f19259SBoris Brezillon 
288b9f19259SBoris Brezillon void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo)
289b9f19259SBoris Brezillon {
290b9f19259SBoris Brezillon 	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
291b9f19259SBoris Brezillon 
292b9f19259SBoris Brezillon 	mutex_lock(&vc4->purgeable.lock);
293b9f19259SBoris Brezillon 	vc4_bo_remove_from_purgeable_pool_locked(bo);
294b9f19259SBoris Brezillon 	mutex_unlock(&vc4->purgeable.lock);
295b9f19259SBoris Brezillon }
296b9f19259SBoris Brezillon 
297b9f19259SBoris Brezillon static void vc4_bo_purge(struct drm_gem_object *obj)
298b9f19259SBoris Brezillon {
299b9f19259SBoris Brezillon 	struct vc4_bo *bo = to_vc4_bo(obj);
300b9f19259SBoris Brezillon 	struct drm_device *dev = obj->dev;
301b9f19259SBoris Brezillon 
302b9f19259SBoris Brezillon 	WARN_ON(!mutex_is_locked(&bo->madv_lock));
303b9f19259SBoris Brezillon 	WARN_ON(bo->madv != VC4_MADV_DONTNEED);
304b9f19259SBoris Brezillon 
305b9f19259SBoris Brezillon 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
306b9f19259SBoris Brezillon 
3078c30eeccSDanilo Krummrich 	dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.dma_addr);
308b9f19259SBoris Brezillon 	bo->base.vaddr = NULL;
309b9f19259SBoris Brezillon 	bo->madv = __VC4_MADV_PURGED;
310b9f19259SBoris Brezillon }
311b9f19259SBoris Brezillon 
312b9f19259SBoris Brezillon static void vc4_bo_userspace_cache_purge(struct drm_device *dev)
313b9f19259SBoris Brezillon {
314b9f19259SBoris Brezillon 	struct vc4_dev *vc4 = to_vc4_dev(dev);
315b9f19259SBoris Brezillon 
316b9f19259SBoris Brezillon 	mutex_lock(&vc4->purgeable.lock);
317b9f19259SBoris Brezillon 	while (!list_empty(&vc4->purgeable.list)) {
318b9f19259SBoris Brezillon 		struct vc4_bo *bo = list_first_entry(&vc4->purgeable.list,
319b9f19259SBoris Brezillon 						     struct vc4_bo, size_head);
320b9f19259SBoris Brezillon 		struct drm_gem_object *obj = &bo->base.base;
321b9f19259SBoris Brezillon 		size_t purged_size = 0;
322b9f19259SBoris Brezillon 
323b9f19259SBoris Brezillon 		vc4_bo_remove_from_purgeable_pool_locked(bo);
324b9f19259SBoris Brezillon 
325b9f19259SBoris Brezillon 		/* Release the purgeable lock while we're purging the BO so
326b9f19259SBoris Brezillon 		 * that other people can continue inserting things in the
327b9f19259SBoris Brezillon 		 * purgeable pool without having to wait for all BOs to be
328b9f19259SBoris Brezillon 		 * purged.
329b9f19259SBoris Brezillon 		 */
330b9f19259SBoris Brezillon 		mutex_unlock(&vc4->purgeable.lock);
331b9f19259SBoris Brezillon 		mutex_lock(&bo->madv_lock);
332b9f19259SBoris Brezillon 
333b9f19259SBoris Brezillon 		/* Since we released the purgeable pool lock before acquiring
334b9f19259SBoris Brezillon 		 * the BO madv one, the user may have marked the BO as WILLNEED
335b9f19259SBoris Brezillon 		 * and re-used it in the meantime.
336b9f19259SBoris Brezillon 		 * Before purging the BO we need to make sure
337b9f19259SBoris Brezillon 		 * - it is still marked as DONTNEED
338b9f19259SBoris Brezillon 		 * - it has not been re-inserted in the purgeable list
339b9f19259SBoris Brezillon 		 * - it is not used by HW blocks
340b9f19259SBoris Brezillon 		 * If one of these conditions is not met, just skip the entry.
341b9f19259SBoris Brezillon 		 */
342b9f19259SBoris Brezillon 		if (bo->madv == VC4_MADV_DONTNEED &&
343b9f19259SBoris Brezillon 		    list_empty(&bo->size_head) &&
344b9f19259SBoris Brezillon 		    !refcount_read(&bo->usecnt)) {
345b9f19259SBoris Brezillon 			purged_size = bo->base.base.size;
346b9f19259SBoris Brezillon 			vc4_bo_purge(obj);
347b9f19259SBoris Brezillon 		}
348b9f19259SBoris Brezillon 		mutex_unlock(&bo->madv_lock);
349b9f19259SBoris Brezillon 		mutex_lock(&vc4->purgeable.lock);
350b9f19259SBoris Brezillon 
351b9f19259SBoris Brezillon 		if (purged_size) {
352b9f19259SBoris Brezillon 			vc4->purgeable.purged_size += purged_size;
353b9f19259SBoris Brezillon 			vc4->purgeable.purged_num++;
354b9f19259SBoris Brezillon 		}
355b9f19259SBoris Brezillon 	}
356b9f19259SBoris Brezillon 	mutex_unlock(&vc4->purgeable.lock);
357b9f19259SBoris Brezillon }
358b9f19259SBoris Brezillon 
359c826a6e1SEric Anholt static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
360f3099462SEric Anholt 					    uint32_t size,
361f3099462SEric Anholt 					    enum vc4_kernel_bo_type type)
362c826a6e1SEric Anholt {
363c826a6e1SEric Anholt 	struct vc4_dev *vc4 = to_vc4_dev(dev);
364c826a6e1SEric Anholt 	uint32_t page_index = bo_page_index(size);
365c826a6e1SEric Anholt 	struct vc4_bo *bo = NULL;
366c826a6e1SEric Anholt 
367c826a6e1SEric Anholt 	mutex_lock(&vc4->bo_lock);
368c826a6e1SEric Anholt 	if (page_index >= vc4->bo_cache.size_list_size)
369c826a6e1SEric Anholt 		goto out;
370c826a6e1SEric Anholt 
371c826a6e1SEric Anholt 	if (list_empty(&vc4->bo_cache.size_list[page_index]))
372c826a6e1SEric Anholt 		goto out;
373c826a6e1SEric Anholt 
374c826a6e1SEric Anholt 	bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
375c826a6e1SEric Anholt 			      struct vc4_bo, size_head);
376c826a6e1SEric Anholt 	vc4_bo_remove_from_cache(bo);
377c826a6e1SEric Anholt 	kref_init(&bo->base.base.refcount);
378c826a6e1SEric Anholt 
379c826a6e1SEric Anholt out:
380f3099462SEric Anholt 	if (bo)
381f3099462SEric Anholt 		vc4_bo_set_label(&bo->base.base, type);
382c826a6e1SEric Anholt 	mutex_unlock(&vc4->bo_lock);
383c826a6e1SEric Anholt 	return bo;
384c826a6e1SEric Anholt }
385c826a6e1SEric Anholt 
386c826a6e1SEric Anholt /**
387e9d2871fSMauro Carvalho Chehab  * vc4_create_object - Implementation of driver->gem_create_object.
38872f793f1SEric Anholt  * @dev: DRM device
38972f793f1SEric Anholt  * @size: Size in bytes of the memory the object will reference
390c826a6e1SEric Anholt  *
3914a83c26aSDanilo Krummrich  * This lets the DMA helpers allocate object structs for us, and keep
392c826a6e1SEric Anholt  * our BO stats correct.
393c826a6e1SEric Anholt  */
394c826a6e1SEric Anholt struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
395c826a6e1SEric Anholt {
396c826a6e1SEric Anholt 	struct vc4_dev *vc4 = to_vc4_dev(dev);
397c826a6e1SEric Anholt 	struct vc4_bo *bo;
398c826a6e1SEric Anholt 
39930f8c74cSMaxime Ripard 	if (WARN_ON_ONCE(vc4->is_vc5))
40030f8c74cSMaxime Ripard 		return ERR_PTR(-ENODEV);
40130f8c74cSMaxime Ripard 
402c826a6e1SEric Anholt 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
403c826a6e1SEric Anholt 	if (!bo)
404c826a6e1SEric Anholt 		return ERR_PTR(-ENOMEM);
405c826a6e1SEric Anholt 
406b9f19259SBoris Brezillon 	bo->madv = VC4_MADV_WILLNEED;
407b9f19259SBoris Brezillon 	refcount_set(&bo->usecnt, 0);
408374146caSMaxime Ripard 
40907a2975cSMaxime Ripard 	mutex_init(&bo->madv_lock);
410374146caSMaxime Ripard 
411c826a6e1SEric Anholt 	mutex_lock(&vc4->bo_lock);
412f3099462SEric Anholt 	bo->label = VC4_BO_TYPE_KERNEL;
413f3099462SEric Anholt 	vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
414f3099462SEric Anholt 	vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
415c826a6e1SEric Anholt 	mutex_unlock(&vc4->bo_lock);
416c826a6e1SEric Anholt 
417dd602022SThomas Zimmermann 	bo->base.base.funcs = &vc4_gem_object_funcs;
418dd602022SThomas Zimmermann 
419c826a6e1SEric Anholt 	return &bo->base.base;
420c826a6e1SEric Anholt }
421c826a6e1SEric Anholt 
422c826a6e1SEric Anholt struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
423f3099462SEric Anholt 			     bool allow_unzeroed, enum vc4_kernel_bo_type type)
424c826a6e1SEric Anholt {
425c826a6e1SEric Anholt 	size_t size = roundup(unaligned_size, PAGE_SIZE);
426c826a6e1SEric Anholt 	struct vc4_dev *vc4 = to_vc4_dev(dev);
4274a83c26aSDanilo Krummrich 	struct drm_gem_dma_object *dma_obj;
428eb981383SEric Anholt 	struct vc4_bo *bo;
429c8b75bcaSEric Anholt 
43030f8c74cSMaxime Ripard 	if (WARN_ON_ONCE(vc4->is_vc5))
43130f8c74cSMaxime Ripard 		return ERR_PTR(-ENODEV);
43230f8c74cSMaxime Ripard 
433c826a6e1SEric Anholt 	if (size == 0)
4342c68f1fcSEric Anholt 		return ERR_PTR(-EINVAL);
435c826a6e1SEric Anholt 
436c826a6e1SEric Anholt 	/* First, try to get a vc4_bo from the kernel BO cache. */
437f3099462SEric Anholt 	bo = vc4_bo_get_from_cache(dev, size, type);
438eb981383SEric Anholt 	if (bo) {
439eb981383SEric Anholt 		if (!allow_unzeroed)
440eb981383SEric Anholt 			memset(bo->base.vaddr, 0, bo->base.base.size);
441c826a6e1SEric Anholt 		return bo;
442c826a6e1SEric Anholt 	}
443c826a6e1SEric Anholt 
4444a83c26aSDanilo Krummrich 	dma_obj = drm_gem_dma_create(dev, size);
4454a83c26aSDanilo Krummrich 	if (IS_ERR(dma_obj)) {
446c826a6e1SEric Anholt 		/*
4474a83c26aSDanilo Krummrich 		 * If we've run out of DMA memory, kill the cache of
4484a83c26aSDanilo Krummrich 		 * DMA allocations we've got laying around and try again.
449c826a6e1SEric Anholt 		 */
450c826a6e1SEric Anholt 		vc4_bo_cache_purge(dev);
4514a83c26aSDanilo Krummrich 		dma_obj = drm_gem_dma_create(dev, size);
452b9f19259SBoris Brezillon 	}
453b9f19259SBoris Brezillon 
4544a83c26aSDanilo Krummrich 	if (IS_ERR(dma_obj)) {
455b9f19259SBoris Brezillon 		/*
4564a83c26aSDanilo Krummrich 		 * Still not enough DMA memory, purge the userspace BO
457b9f19259SBoris Brezillon 		 * cache and retry.
458b9f19259SBoris Brezillon 		 * This is sub-optimal since we purge the whole userspace
459b9f19259SBoris Brezillon 		 * BO cache which forces user that want to re-use the BO to
460b9f19259SBoris Brezillon 		 * restore its initial content.
461b9f19259SBoris Brezillon 		 * Ideally, we should purge entries one by one and retry
4624a83c26aSDanilo Krummrich 		 * after each to see if DMA allocation succeeds. Or even
463b9f19259SBoris Brezillon 		 * better, try to find an entry with at least the same
464b9f19259SBoris Brezillon 		 * size.
465b9f19259SBoris Brezillon 		 */
466b9f19259SBoris Brezillon 		vc4_bo_userspace_cache_purge(dev);
4674a83c26aSDanilo Krummrich 		dma_obj = drm_gem_dma_create(dev, size);
468b9f19259SBoris Brezillon 	}
469b9f19259SBoris Brezillon 
4704a83c26aSDanilo Krummrich 	if (IS_ERR(dma_obj)) {
47184d7d472SMaxime Ripard 		struct drm_printer p = drm_info_printer(vc4->base.dev);
472*59ac702aSStefan Wahren 		drm_err(dev, "Failed to allocate from GEM DMA helper:\n");
47313f0ec34SEric Anholt 		vc4_bo_stats_print(&p, vc4);
4742c68f1fcSEric Anholt 		return ERR_PTR(-ENOMEM);
475c826a6e1SEric Anholt 	}
4764a83c26aSDanilo Krummrich 	bo = to_vc4_bo(&dma_obj->base);
477f3099462SEric Anholt 
478b9f19259SBoris Brezillon 	/* By default, BOs do not support the MADV ioctl. This will be enabled
479b9f19259SBoris Brezillon 	 * only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB
480b9f19259SBoris Brezillon 	 * BOs).
481b9f19259SBoris Brezillon 	 */
482b9f19259SBoris Brezillon 	bo->madv = __VC4_MADV_NOTSUPP;
483b9f19259SBoris Brezillon 
484f3099462SEric Anholt 	mutex_lock(&vc4->bo_lock);
4854a83c26aSDanilo Krummrich 	vc4_bo_set_label(&dma_obj->base, type);
486f3099462SEric Anholt 	mutex_unlock(&vc4->bo_lock);
487f3099462SEric Anholt 
488f3099462SEric Anholt 	return bo;
489c8b75bcaSEric Anholt }
490c8b75bcaSEric Anholt 
491dd2dfd44SMaxime Ripard int vc4_bo_dumb_create(struct drm_file *file_priv,
492c8b75bcaSEric Anholt 		       struct drm_device *dev,
493c8b75bcaSEric Anholt 		       struct drm_mode_create_dumb *args)
494c8b75bcaSEric Anholt {
49530f8c74cSMaxime Ripard 	struct vc4_dev *vc4 = to_vc4_dev(dev);
496c8b75bcaSEric Anholt 	struct vc4_bo *bo = NULL;
497c8b75bcaSEric Anholt 	int ret;
498c8b75bcaSEric Anholt 
49930f8c74cSMaxime Ripard 	if (WARN_ON_ONCE(vc4->is_vc5))
50030f8c74cSMaxime Ripard 		return -ENODEV;
501c8b75bcaSEric Anholt 
5023d763742SMaxime Ripard 	ret = vc4_dumb_fixup_args(args);
5033d763742SMaxime Ripard 	if (ret)
5043d763742SMaxime Ripard 		return ret;
505c8b75bcaSEric Anholt 
506f3099462SEric Anholt 	bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB);
5072c68f1fcSEric Anholt 	if (IS_ERR(bo))
5082c68f1fcSEric Anholt 		return PTR_ERR(bo);
509c8b75bcaSEric Anholt 
510b9f19259SBoris Brezillon 	bo->madv = VC4_MADV_WILLNEED;
511b9f19259SBoris Brezillon 
512c8b75bcaSEric Anholt 	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
513f7a8cd30SEmil Velikov 	drm_gem_object_put(&bo->base.base);
514c8b75bcaSEric Anholt 
515c8b75bcaSEric Anholt 	return ret;
516c8b75bcaSEric Anholt }
517c826a6e1SEric Anholt 
518c826a6e1SEric Anholt static void vc4_bo_cache_free_old(struct drm_device *dev)
519c826a6e1SEric Anholt {
520c826a6e1SEric Anholt 	struct vc4_dev *vc4 = to_vc4_dev(dev);
521c826a6e1SEric Anholt 	unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
522c826a6e1SEric Anholt 
5234e6b1e91SEric Anholt 	lockdep_assert_held(&vc4->bo_lock);
5244e6b1e91SEric Anholt 
525c826a6e1SEric Anholt 	while (!list_empty(&vc4->bo_cache.time_list)) {
526c826a6e1SEric Anholt 		struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
527c826a6e1SEric Anholt 						    struct vc4_bo, unref_head);
528c826a6e1SEric Anholt 		if (time_before(expire_time, bo->free_time)) {
529c826a6e1SEric Anholt 			mod_timer(&vc4->bo_cache.time_timer,
530c826a6e1SEric Anholt 				  round_jiffies_up(jiffies +
531c826a6e1SEric Anholt 						   msecs_to_jiffies(1000)));
532c826a6e1SEric Anholt 			return;
533c826a6e1SEric Anholt 		}
534c826a6e1SEric Anholt 
535c826a6e1SEric Anholt 		vc4_bo_remove_from_cache(bo);
536c826a6e1SEric Anholt 		vc4_bo_destroy(bo);
537c826a6e1SEric Anholt 	}
538c826a6e1SEric Anholt }
539c826a6e1SEric Anholt 
540c826a6e1SEric Anholt /* Called on the last userspace/kernel unreference of the BO.  Returns
541c826a6e1SEric Anholt  * it to the BO cache if possible, otherwise frees it.
542c826a6e1SEric Anholt  */
543ccfe8e9cSThomas Zimmermann static void vc4_free_object(struct drm_gem_object *gem_bo)
544c826a6e1SEric Anholt {
545c826a6e1SEric Anholt 	struct drm_device *dev = gem_bo->dev;
546c826a6e1SEric Anholt 	struct vc4_dev *vc4 = to_vc4_dev(dev);
547c826a6e1SEric Anholt 	struct vc4_bo *bo = to_vc4_bo(gem_bo);
548c826a6e1SEric Anholt 	struct list_head *cache_list;
549c826a6e1SEric Anholt 
550b9f19259SBoris Brezillon 	/* Remove the BO from the purgeable list. */
551b9f19259SBoris Brezillon 	mutex_lock(&bo->madv_lock);
552b9f19259SBoris Brezillon 	if (bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt))
553b9f19259SBoris Brezillon 		vc4_bo_remove_from_purgeable_pool(bo);
554b9f19259SBoris Brezillon 	mutex_unlock(&bo->madv_lock);
555b9f19259SBoris Brezillon 
556c826a6e1SEric Anholt 	mutex_lock(&vc4->bo_lock);
557c826a6e1SEric Anholt 	/* If the object references someone else's memory, we can't cache it.
558c826a6e1SEric Anholt 	 */
559c826a6e1SEric Anholt 	if (gem_bo->import_attach) {
560c826a6e1SEric Anholt 		vc4_bo_destroy(bo);
561c826a6e1SEric Anholt 		goto out;
562c826a6e1SEric Anholt 	}
563c826a6e1SEric Anholt 
564c826a6e1SEric Anholt 	/* Don't cache if it was publicly named. */
565c826a6e1SEric Anholt 	if (gem_bo->name) {
566c826a6e1SEric Anholt 		vc4_bo_destroy(bo);
567c826a6e1SEric Anholt 		goto out;
568c826a6e1SEric Anholt 	}
569c826a6e1SEric Anholt 
5704a83c26aSDanilo Krummrich 	/* If this object was partially constructed but DMA allocation
571b9f19259SBoris Brezillon 	 * had failed, just free it. Can also happen when the BO has been
572b9f19259SBoris Brezillon 	 * purged.
573ca39b449SEric Anholt 	 */
574ca39b449SEric Anholt 	if (!bo->base.vaddr) {
575ca39b449SEric Anholt 		vc4_bo_destroy(bo);
576ca39b449SEric Anholt 		goto out;
577ca39b449SEric Anholt 	}
578ca39b449SEric Anholt 
579c826a6e1SEric Anholt 	cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
580c826a6e1SEric Anholt 	if (!cache_list) {
581c826a6e1SEric Anholt 		vc4_bo_destroy(bo);
582c826a6e1SEric Anholt 		goto out;
583c826a6e1SEric Anholt 	}
584c826a6e1SEric Anholt 
585463873d5SEric Anholt 	if (bo->validated_shader) {
586c0db1b67SDaniel J Blueman 		kfree(bo->validated_shader->uniform_addr_offsets);
587463873d5SEric Anholt 		kfree(bo->validated_shader->texture_samples);
588463873d5SEric Anholt 		kfree(bo->validated_shader);
589463873d5SEric Anholt 		bo->validated_shader = NULL;
590463873d5SEric Anholt 	}
591463873d5SEric Anholt 
592b9f19259SBoris Brezillon 	/* Reset madv and usecnt before adding the BO to the cache. */
593b9f19259SBoris Brezillon 	bo->madv = __VC4_MADV_NOTSUPP;
594b9f19259SBoris Brezillon 	refcount_set(&bo->usecnt, 0);
595b9f19259SBoris Brezillon 
59683753117SEric Anholt 	bo->t_format = false;
597c826a6e1SEric Anholt 	bo->free_time = jiffies;
598c826a6e1SEric Anholt 	list_add(&bo->size_head, cache_list);
599c826a6e1SEric Anholt 	list_add(&bo->unref_head, &vc4->bo_cache.time_list);
600c826a6e1SEric Anholt 
601f3099462SEric Anholt 	vc4_bo_set_label(&bo->base.base, VC4_BO_TYPE_KERNEL_CACHE);
602c826a6e1SEric Anholt 
603c826a6e1SEric Anholt 	vc4_bo_cache_free_old(dev);
604c826a6e1SEric Anholt 
605c826a6e1SEric Anholt out:
606c826a6e1SEric Anholt 	mutex_unlock(&vc4->bo_lock);
607c826a6e1SEric Anholt }
608c826a6e1SEric Anholt 
609c826a6e1SEric Anholt static void vc4_bo_cache_time_work(struct work_struct *work)
610c826a6e1SEric Anholt {
611c826a6e1SEric Anholt 	struct vc4_dev *vc4 =
612c826a6e1SEric Anholt 		container_of(work, struct vc4_dev, bo_cache.time_work);
61384d7d472SMaxime Ripard 	struct drm_device *dev = &vc4->base;
614c826a6e1SEric Anholt 
615c826a6e1SEric Anholt 	mutex_lock(&vc4->bo_lock);
616c826a6e1SEric Anholt 	vc4_bo_cache_free_old(dev);
617c826a6e1SEric Anholt 	mutex_unlock(&vc4->bo_lock);
618c826a6e1SEric Anholt }
619c826a6e1SEric Anholt 
620b9f19259SBoris Brezillon int vc4_bo_inc_usecnt(struct vc4_bo *bo)
621b9f19259SBoris Brezillon {
62230f8c74cSMaxime Ripard 	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
623b9f19259SBoris Brezillon 	int ret;
624b9f19259SBoris Brezillon 
62530f8c74cSMaxime Ripard 	if (WARN_ON_ONCE(vc4->is_vc5))
62630f8c74cSMaxime Ripard 		return -ENODEV;
62730f8c74cSMaxime Ripard 
628b9f19259SBoris Brezillon 	/* Fast path: if the BO is already retained by someone, no need to
629b9f19259SBoris Brezillon 	 * check the madv status.
630b9f19259SBoris Brezillon 	 */
631b9f19259SBoris Brezillon 	if (refcount_inc_not_zero(&bo->usecnt))
632b9f19259SBoris Brezillon 		return 0;
633b9f19259SBoris Brezillon 
634b9f19259SBoris Brezillon 	mutex_lock(&bo->madv_lock);
635b9f19259SBoris Brezillon 	switch (bo->madv) {
636b9f19259SBoris Brezillon 	case VC4_MADV_WILLNEED:
6375bfd4013SBoris Brezillon 		if (!refcount_inc_not_zero(&bo->usecnt))
6385bfd4013SBoris Brezillon 			refcount_set(&bo->usecnt, 1);
639b9f19259SBoris Brezillon 		ret = 0;
640b9f19259SBoris Brezillon 		break;
641b9f19259SBoris Brezillon 	case VC4_MADV_DONTNEED:
642b9f19259SBoris Brezillon 		/* We shouldn't use a BO marked as purgeable if at least
643b9f19259SBoris Brezillon 		 * someone else retained its content by incrementing usecnt.
644b9f19259SBoris Brezillon 		 * Luckily the BO hasn't been purged yet, but something wrong
645b9f19259SBoris Brezillon 		 * is happening here. Just throw an error instead of
646b9f19259SBoris Brezillon 		 * authorizing this use case.
647b9f19259SBoris Brezillon 		 */
648b9f19259SBoris Brezillon 	case __VC4_MADV_PURGED:
649b9f19259SBoris Brezillon 		/* We can't use a purged BO. */
650b9f19259SBoris Brezillon 	default:
651b9f19259SBoris Brezillon 		/* Invalid madv value. */
652b9f19259SBoris Brezillon 		ret = -EINVAL;
653b9f19259SBoris Brezillon 		break;
654b9f19259SBoris Brezillon 	}
655b9f19259SBoris Brezillon 	mutex_unlock(&bo->madv_lock);
656b9f19259SBoris Brezillon 
657b9f19259SBoris Brezillon 	return ret;
658b9f19259SBoris Brezillon }
659b9f19259SBoris Brezillon 
660b9f19259SBoris Brezillon void vc4_bo_dec_usecnt(struct vc4_bo *bo)
661b9f19259SBoris Brezillon {
66230f8c74cSMaxime Ripard 	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
66330f8c74cSMaxime Ripard 
66430f8c74cSMaxime Ripard 	if (WARN_ON_ONCE(vc4->is_vc5))
66530f8c74cSMaxime Ripard 		return;
66630f8c74cSMaxime Ripard 
667b9f19259SBoris Brezillon 	/* Fast path: if the BO is still retained by someone, no need to test
668b9f19259SBoris Brezillon 	 * the madv value.
669b9f19259SBoris Brezillon 	 */
670b9f19259SBoris Brezillon 	if (refcount_dec_not_one(&bo->usecnt))
671b9f19259SBoris Brezillon 		return;
672b9f19259SBoris Brezillon 
673b9f19259SBoris Brezillon 	mutex_lock(&bo->madv_lock);
674b9f19259SBoris Brezillon 	if (refcount_dec_and_test(&bo->usecnt) &&
675b9f19259SBoris Brezillon 	    bo->madv == VC4_MADV_DONTNEED)
676b9f19259SBoris Brezillon 		vc4_bo_add_to_purgeable_pool(bo);
677b9f19259SBoris Brezillon 	mutex_unlock(&bo->madv_lock);
678b9f19259SBoris Brezillon }
679b9f19259SBoris Brezillon 
6800078730fSKees Cook static void vc4_bo_cache_time_timer(struct timer_list *t)
681c826a6e1SEric Anholt {
6820078730fSKees Cook 	struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer);
683c826a6e1SEric Anholt 
684c826a6e1SEric Anholt 	schedule_work(&vc4->bo_cache.time_work);
685c826a6e1SEric Anholt }
686c826a6e1SEric Anholt 
687ccfe8e9cSThomas Zimmermann static struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags)
688463873d5SEric Anholt {
689463873d5SEric Anholt 	struct vc4_bo *bo = to_vc4_bo(obj);
690b9f19259SBoris Brezillon 	struct dma_buf *dmabuf;
691b9f19259SBoris Brezillon 	int ret;
692463873d5SEric Anholt 
693463873d5SEric Anholt 	if (bo->validated_shader) {
694fb95992aSEric Anholt 		DRM_DEBUG("Attempting to export shader BO\n");
695463873d5SEric Anholt 		return ERR_PTR(-EINVAL);
696463873d5SEric Anholt 	}
697463873d5SEric Anholt 
698b9f19259SBoris Brezillon 	/* Note: as soon as the BO is exported it becomes unpurgeable, because
699b9f19259SBoris Brezillon 	 * noone ever decrements the usecnt even if the reference held by the
700b9f19259SBoris Brezillon 	 * exported BO is released. This shouldn't be a problem since we don't
701b9f19259SBoris Brezillon 	 * expect exported BOs to be marked as purgeable.
702b9f19259SBoris Brezillon 	 */
703b9f19259SBoris Brezillon 	ret = vc4_bo_inc_usecnt(bo);
704b9f19259SBoris Brezillon 	if (ret) {
705*59ac702aSStefan Wahren 		drm_err(obj->dev, "Failed to increment BO usecnt\n");
706b9f19259SBoris Brezillon 		return ERR_PTR(ret);
707b9f19259SBoris Brezillon 	}
708b9f19259SBoris Brezillon 
709e4fa8457SDaniel Vetter 	dmabuf = drm_gem_prime_export(obj, flags);
710b9f19259SBoris Brezillon 	if (IS_ERR(dmabuf))
711b9f19259SBoris Brezillon 		vc4_bo_dec_usecnt(bo);
712b9f19259SBoris Brezillon 
713b9f19259SBoris Brezillon 	return dmabuf;
714b9f19259SBoris Brezillon }
715b9f19259SBoris Brezillon 
716dd602022SThomas Zimmermann static vm_fault_t vc4_fault(struct vm_fault *vmf)
717b9f19259SBoris Brezillon {
718b9f19259SBoris Brezillon 	struct vm_area_struct *vma = vmf->vma;
719b9f19259SBoris Brezillon 	struct drm_gem_object *obj = vma->vm_private_data;
720b9f19259SBoris Brezillon 	struct vc4_bo *bo = to_vc4_bo(obj);
721b9f19259SBoris Brezillon 
722b9f19259SBoris Brezillon 	/* The only reason we would end up here is when user-space accesses
723b9f19259SBoris Brezillon 	 * BO's memory after it's been purged.
724b9f19259SBoris Brezillon 	 */
725b9f19259SBoris Brezillon 	mutex_lock(&bo->madv_lock);
726b9f19259SBoris Brezillon 	WARN_ON(bo->madv != __VC4_MADV_PURGED);
727b9f19259SBoris Brezillon 	mutex_unlock(&bo->madv_lock);
728b9f19259SBoris Brezillon 
729b9f19259SBoris Brezillon 	return VM_FAULT_SIGBUS;
730463873d5SEric Anholt }
731463873d5SEric Anholt 
732fa49fdbeSThomas Zimmermann static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
733463873d5SEric Anholt {
734fa49fdbeSThomas Zimmermann 	struct vc4_bo *bo = to_vc4_bo(obj);
735463873d5SEric Anholt 
736463873d5SEric Anholt 	if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
737a7af4d67SColin Ian King 		DRM_DEBUG("mmapping of shader BOs for writing not allowed.\n");
738463873d5SEric Anholt 		return -EINVAL;
739463873d5SEric Anholt 	}
740463873d5SEric Anholt 
741b9f19259SBoris Brezillon 	if (bo->madv != VC4_MADV_WILLNEED) {
742a7af4d67SColin Ian King 		DRM_DEBUG("mmapping of %s BO not allowed\n",
743b9f19259SBoris Brezillon 			  bo->madv == VC4_MADV_DONTNEED ?
744b9f19259SBoris Brezillon 			  "purgeable" : "purged");
745b9f19259SBoris Brezillon 		return -EINVAL;
746b9f19259SBoris Brezillon 	}
747b9f19259SBoris Brezillon 
7484a83c26aSDanilo Krummrich 	return drm_gem_dma_mmap(&bo->base, vma);
749463873d5SEric Anholt }
750463873d5SEric Anholt 
751ccfe8e9cSThomas Zimmermann static const struct vm_operations_struct vc4_vm_ops = {
752ccfe8e9cSThomas Zimmermann 	.fault = vc4_fault,
753ccfe8e9cSThomas Zimmermann 	.open = drm_gem_vm_open,
754ccfe8e9cSThomas Zimmermann 	.close = drm_gem_vm_close,
755ccfe8e9cSThomas Zimmermann };
756ccfe8e9cSThomas Zimmermann 
757ccfe8e9cSThomas Zimmermann static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
758ccfe8e9cSThomas Zimmermann 	.free = vc4_free_object,
759ccfe8e9cSThomas Zimmermann 	.export = vc4_prime_export,
7604a83c26aSDanilo Krummrich 	.get_sg_table = drm_gem_dma_object_get_sg_table,
7614a83c26aSDanilo Krummrich 	.vmap = drm_gem_dma_object_vmap,
762fa49fdbeSThomas Zimmermann 	.mmap = vc4_gem_object_mmap,
763ccfe8e9cSThomas Zimmermann 	.vm_ops = &vc4_vm_ops,
764ccfe8e9cSThomas Zimmermann };
765ccfe8e9cSThomas Zimmermann 
76635c8b4b2SPaul Kocialkowski static int vc4_grab_bin_bo(struct vc4_dev *vc4, struct vc4_file *vc4file)
76735c8b4b2SPaul Kocialkowski {
76835c8b4b2SPaul Kocialkowski 	if (!vc4->v3d)
76935c8b4b2SPaul Kocialkowski 		return -ENODEV;
77035c8b4b2SPaul Kocialkowski 
77135c8b4b2SPaul Kocialkowski 	if (vc4file->bin_bo_used)
77235c8b4b2SPaul Kocialkowski 		return 0;
77335c8b4b2SPaul Kocialkowski 
774a425e980SMinghao Chi 	return vc4_v3d_bin_bo_get(vc4, &vc4file->bin_bo_used);
77535c8b4b2SPaul Kocialkowski }
77635c8b4b2SPaul Kocialkowski 
777d5bc60f6SEric Anholt int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
778d5bc60f6SEric Anholt 			struct drm_file *file_priv)
779d5bc60f6SEric Anholt {
780d5bc60f6SEric Anholt 	struct drm_vc4_create_bo *args = data;
78135c8b4b2SPaul Kocialkowski 	struct vc4_file *vc4file = file_priv->driver_priv;
78235c8b4b2SPaul Kocialkowski 	struct vc4_dev *vc4 = to_vc4_dev(dev);
783d5bc60f6SEric Anholt 	struct vc4_bo *bo = NULL;
784d5bc60f6SEric Anholt 	int ret;
785d5bc60f6SEric Anholt 
78630f8c74cSMaxime Ripard 	if (WARN_ON_ONCE(vc4->is_vc5))
78730f8c74cSMaxime Ripard 		return -ENODEV;
78830f8c74cSMaxime Ripard 
78935c8b4b2SPaul Kocialkowski 	ret = vc4_grab_bin_bo(vc4, vc4file);
79035c8b4b2SPaul Kocialkowski 	if (ret)
79135c8b4b2SPaul Kocialkowski 		return ret;
79235c8b4b2SPaul Kocialkowski 
793d5bc60f6SEric Anholt 	/*
794d5bc60f6SEric Anholt 	 * We can't allocate from the BO cache, because the BOs don't
795d5bc60f6SEric Anholt 	 * get zeroed, and that might leak data between users.
796d5bc60f6SEric Anholt 	 */
797f3099462SEric Anholt 	bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_V3D);
7982c68f1fcSEric Anholt 	if (IS_ERR(bo))
7992c68f1fcSEric Anholt 		return PTR_ERR(bo);
800d5bc60f6SEric Anholt 
801b9f19259SBoris Brezillon 	bo->madv = VC4_MADV_WILLNEED;
802b9f19259SBoris Brezillon 
803d5bc60f6SEric Anholt 	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
804f7a8cd30SEmil Velikov 	drm_gem_object_put(&bo->base.base);
805d5bc60f6SEric Anholt 
806d5bc60f6SEric Anholt 	return ret;
807d5bc60f6SEric Anholt }
808d5bc60f6SEric Anholt 
809d5bc60f6SEric Anholt int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
810d5bc60f6SEric Anholt 		      struct drm_file *file_priv)
811d5bc60f6SEric Anholt {
81230f8c74cSMaxime Ripard 	struct vc4_dev *vc4 = to_vc4_dev(dev);
813d5bc60f6SEric Anholt 	struct drm_vc4_mmap_bo *args = data;
814d5bc60f6SEric Anholt 	struct drm_gem_object *gem_obj;
815d5bc60f6SEric Anholt 
81630f8c74cSMaxime Ripard 	if (WARN_ON_ONCE(vc4->is_vc5))
81730f8c74cSMaxime Ripard 		return -ENODEV;
81830f8c74cSMaxime Ripard 
819a8ad0bd8SChris Wilson 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
820d5bc60f6SEric Anholt 	if (!gem_obj) {
821fb95992aSEric Anholt 		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
822d5bc60f6SEric Anholt 		return -EINVAL;
823d5bc60f6SEric Anholt 	}
824d5bc60f6SEric Anholt 
825d5bc60f6SEric Anholt 	/* The mmap offset was set up at BO allocation time. */
826d5bc60f6SEric Anholt 	args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
827d5bc60f6SEric Anholt 
828f7a8cd30SEmil Velikov 	drm_gem_object_put(gem_obj);
829d5bc60f6SEric Anholt 	return 0;
830d5bc60f6SEric Anholt }
831d5bc60f6SEric Anholt 
832463873d5SEric Anholt int
833463873d5SEric Anholt vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
834463873d5SEric Anholt 			   struct drm_file *file_priv)
835463873d5SEric Anholt {
836463873d5SEric Anholt 	struct drm_vc4_create_shader_bo *args = data;
83735c8b4b2SPaul Kocialkowski 	struct vc4_file *vc4file = file_priv->driver_priv;
83835c8b4b2SPaul Kocialkowski 	struct vc4_dev *vc4 = to_vc4_dev(dev);
839463873d5SEric Anholt 	struct vc4_bo *bo = NULL;
840463873d5SEric Anholt 	int ret;
841463873d5SEric Anholt 
84230f8c74cSMaxime Ripard 	if (WARN_ON_ONCE(vc4->is_vc5))
84330f8c74cSMaxime Ripard 		return -ENODEV;
84430f8c74cSMaxime Ripard 
845463873d5SEric Anholt 	if (args->size == 0)
846463873d5SEric Anholt 		return -EINVAL;
847463873d5SEric Anholt 
848463873d5SEric Anholt 	if (args->size % sizeof(u64) != 0)
849463873d5SEric Anholt 		return -EINVAL;
850463873d5SEric Anholt 
851463873d5SEric Anholt 	if (args->flags != 0) {
852463873d5SEric Anholt 		DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
853463873d5SEric Anholt 		return -EINVAL;
854463873d5SEric Anholt 	}
855463873d5SEric Anholt 
856463873d5SEric Anholt 	if (args->pad != 0) {
857463873d5SEric Anholt 		DRM_INFO("Pad set: 0x%08x\n", args->pad);
858463873d5SEric Anholt 		return -EINVAL;
859463873d5SEric Anholt 	}
860463873d5SEric Anholt 
86135c8b4b2SPaul Kocialkowski 	ret = vc4_grab_bin_bo(vc4, vc4file);
86235c8b4b2SPaul Kocialkowski 	if (ret)
86335c8b4b2SPaul Kocialkowski 		return ret;
86435c8b4b2SPaul Kocialkowski 
865f3099462SEric Anholt 	bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER);
8662c68f1fcSEric Anholt 	if (IS_ERR(bo))
8672c68f1fcSEric Anholt 		return PTR_ERR(bo);
868463873d5SEric Anholt 
869b9f19259SBoris Brezillon 	bo->madv = VC4_MADV_WILLNEED;
870b9f19259SBoris Brezillon 
871585cb132SDan Carpenter 	if (copy_from_user(bo->base.vaddr,
872463873d5SEric Anholt 			     (void __user *)(uintptr_t)args->data,
873585cb132SDan Carpenter 			     args->size)) {
874585cb132SDan Carpenter 		ret = -EFAULT;
875463873d5SEric Anholt 		goto fail;
876585cb132SDan Carpenter 	}
877463873d5SEric Anholt 	/* Clear the rest of the memory from allocating from the BO
878463873d5SEric Anholt 	 * cache.
879463873d5SEric Anholt 	 */
880463873d5SEric Anholt 	memset(bo->base.vaddr + args->size, 0,
881463873d5SEric Anholt 	       bo->base.base.size - args->size);
882463873d5SEric Anholt 
883463873d5SEric Anholt 	bo->validated_shader = vc4_validate_shader(&bo->base);
884463873d5SEric Anholt 	if (!bo->validated_shader) {
885463873d5SEric Anholt 		ret = -EINVAL;
886463873d5SEric Anholt 		goto fail;
887463873d5SEric Anholt 	}
888463873d5SEric Anholt 
889463873d5SEric Anholt 	/* We have to create the handle after validation, to avoid
890463873d5SEric Anholt 	 * races for users to do doing things like mmap the shader BO.
891463873d5SEric Anholt 	 */
892463873d5SEric Anholt 	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
893463873d5SEric Anholt 
894463873d5SEric Anholt fail:
895f7a8cd30SEmil Velikov 	drm_gem_object_put(&bo->base.base);
896463873d5SEric Anholt 
897463873d5SEric Anholt 	return ret;
898463873d5SEric Anholt }
899463873d5SEric Anholt 
90083753117SEric Anholt /**
90183753117SEric Anholt  * vc4_set_tiling_ioctl() - Sets the tiling modifier for a BO.
90283753117SEric Anholt  * @dev: DRM device
90383753117SEric Anholt  * @data: ioctl argument
90483753117SEric Anholt  * @file_priv: DRM file for this fd
90583753117SEric Anholt  *
90683753117SEric Anholt  * The tiling state of the BO decides the default modifier of an fb if
90783753117SEric Anholt  * no specific modifier was set by userspace, and the return value of
90883753117SEric Anholt  * vc4_get_tiling_ioctl() (so that userspace can treat a BO it
90983753117SEric Anholt  * received from dmabuf as the same tiling format as the producer
91083753117SEric Anholt  * used).
91183753117SEric Anholt  */
91283753117SEric Anholt int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
91383753117SEric Anholt 			 struct drm_file *file_priv)
91483753117SEric Anholt {
91530f8c74cSMaxime Ripard 	struct vc4_dev *vc4 = to_vc4_dev(dev);
91683753117SEric Anholt 	struct drm_vc4_set_tiling *args = data;
91783753117SEric Anholt 	struct drm_gem_object *gem_obj;
91883753117SEric Anholt 	struct vc4_bo *bo;
91983753117SEric Anholt 	bool t_format;
92083753117SEric Anholt 
92130f8c74cSMaxime Ripard 	if (WARN_ON_ONCE(vc4->is_vc5))
92230f8c74cSMaxime Ripard 		return -ENODEV;
92330f8c74cSMaxime Ripard 
92483753117SEric Anholt 	if (args->flags != 0)
92583753117SEric Anholt 		return -EINVAL;
92683753117SEric Anholt 
92783753117SEric Anholt 	switch (args->modifier) {
92883753117SEric Anholt 	case DRM_FORMAT_MOD_NONE:
92983753117SEric Anholt 		t_format = false;
93083753117SEric Anholt 		break;
93183753117SEric Anholt 	case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
93283753117SEric Anholt 		t_format = true;
93383753117SEric Anholt 		break;
93483753117SEric Anholt 	default:
93583753117SEric Anholt 		return -EINVAL;
93683753117SEric Anholt 	}
93783753117SEric Anholt 
93883753117SEric Anholt 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
93983753117SEric Anholt 	if (!gem_obj) {
940fb95992aSEric Anholt 		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
94183753117SEric Anholt 		return -ENOENT;
94283753117SEric Anholt 	}
94383753117SEric Anholt 	bo = to_vc4_bo(gem_obj);
94483753117SEric Anholt 	bo->t_format = t_format;
94583753117SEric Anholt 
946f7a8cd30SEmil Velikov 	drm_gem_object_put(gem_obj);
94783753117SEric Anholt 
94883753117SEric Anholt 	return 0;
94983753117SEric Anholt }
95083753117SEric Anholt 
95183753117SEric Anholt /**
95283753117SEric Anholt  * vc4_get_tiling_ioctl() - Gets the tiling modifier for a BO.
95383753117SEric Anholt  * @dev: DRM device
95483753117SEric Anholt  * @data: ioctl argument
95583753117SEric Anholt  * @file_priv: DRM file for this fd
95683753117SEric Anholt  *
95783753117SEric Anholt  * Returns the tiling modifier for a BO as set by vc4_set_tiling_ioctl().
95883753117SEric Anholt  */
95983753117SEric Anholt int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
96083753117SEric Anholt 			 struct drm_file *file_priv)
96183753117SEric Anholt {
96230f8c74cSMaxime Ripard 	struct vc4_dev *vc4 = to_vc4_dev(dev);
96383753117SEric Anholt 	struct drm_vc4_get_tiling *args = data;
96483753117SEric Anholt 	struct drm_gem_object *gem_obj;
96583753117SEric Anholt 	struct vc4_bo *bo;
96683753117SEric Anholt 
96730f8c74cSMaxime Ripard 	if (WARN_ON_ONCE(vc4->is_vc5))
96830f8c74cSMaxime Ripard 		return -ENODEV;
96930f8c74cSMaxime Ripard 
97083753117SEric Anholt 	if (args->flags != 0 || args->modifier != 0)
97183753117SEric Anholt 		return -EINVAL;
97283753117SEric Anholt 
97383753117SEric Anholt 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
97483753117SEric Anholt 	if (!gem_obj) {
975fb95992aSEric Anholt 		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
97683753117SEric Anholt 		return -ENOENT;
97783753117SEric Anholt 	}
97883753117SEric Anholt 	bo = to_vc4_bo(gem_obj);
97983753117SEric Anholt 
98083753117SEric Anholt 	if (bo->t_format)
98183753117SEric Anholt 		args->modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
98283753117SEric Anholt 	else
98383753117SEric Anholt 		args->modifier = DRM_FORMAT_MOD_NONE;
98483753117SEric Anholt 
985f7a8cd30SEmil Velikov 	drm_gem_object_put(gem_obj);
98683753117SEric Anholt 
98783753117SEric Anholt 	return 0;
98883753117SEric Anholt }
98983753117SEric Anholt 
990445b287eSMaxime Ripard int vc4_bo_debugfs_init(struct drm_minor *minor)
991445b287eSMaxime Ripard {
992445b287eSMaxime Ripard 	struct drm_device *drm = minor->dev;
993445b287eSMaxime Ripard 	struct vc4_dev *vc4 = to_vc4_dev(drm);
994445b287eSMaxime Ripard 
995445b287eSMaxime Ripard 	if (!vc4->v3d)
996445b287eSMaxime Ripard 		return -ENODEV;
997445b287eSMaxime Ripard 
998f2ede40eSMaíra Canal 	drm_debugfs_add_file(drm, "bo_stats", vc4_bo_stats_debugfs, NULL);
999445b287eSMaxime Ripard 
1000445b287eSMaxime Ripard 	return 0;
1001445b287eSMaxime Ripard }
1002445b287eSMaxime Ripard 
10031c80be48SMaxime Ripard static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused);
1004f3099462SEric Anholt int vc4_bo_cache_init(struct drm_device *dev)
1005c826a6e1SEric Anholt {
1006c826a6e1SEric Anholt 	struct vc4_dev *vc4 = to_vc4_dev(dev);
1007374146caSMaxime Ripard 	int ret;
1008f3099462SEric Anholt 	int i;
1009f3099462SEric Anholt 
101030f8c74cSMaxime Ripard 	if (WARN_ON_ONCE(vc4->is_vc5))
101130f8c74cSMaxime Ripard 		return -ENODEV;
101230f8c74cSMaxime Ripard 
1013f3099462SEric Anholt 	/* Create the initial set of BO labels that the kernel will
1014f3099462SEric Anholt 	 * use.  This lets us avoid a bunch of string reallocation in
1015f3099462SEric Anholt 	 * the kernel's draw and BO allocation paths.
1016f3099462SEric Anholt 	 */
1017f3099462SEric Anholt 	vc4->bo_labels = kcalloc(VC4_BO_TYPE_COUNT, sizeof(*vc4->bo_labels),
1018f3099462SEric Anholt 				 GFP_KERNEL);
1019f3099462SEric Anholt 	if (!vc4->bo_labels)
1020f3099462SEric Anholt 		return -ENOMEM;
1021f3099462SEric Anholt 	vc4->num_labels = VC4_BO_TYPE_COUNT;
1022f3099462SEric Anholt 
1023f3099462SEric Anholt 	BUILD_BUG_ON(ARRAY_SIZE(bo_type_names) != VC4_BO_TYPE_COUNT);
1024f3099462SEric Anholt 	for (i = 0; i < VC4_BO_TYPE_COUNT; i++)
1025f3099462SEric Anholt 		vc4->bo_labels[i].name = bo_type_names[i];
1026c826a6e1SEric Anholt 
1027374146caSMaxime Ripard 	ret = drmm_mutex_init(dev, &vc4->bo_lock);
1028374146caSMaxime Ripard 	if (ret) {
1029374146caSMaxime Ripard 		kfree(vc4->bo_labels);
1030374146caSMaxime Ripard 		return ret;
1031374146caSMaxime Ripard 	}
1032374146caSMaxime Ripard 
1033c826a6e1SEric Anholt 	INIT_LIST_HEAD(&vc4->bo_cache.time_list);
1034c826a6e1SEric Anholt 
1035c826a6e1SEric Anholt 	INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
10360078730fSKees Cook 	timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
1037f3099462SEric Anholt 
10381c80be48SMaxime Ripard 	return drmm_add_action_or_reset(dev, vc4_bo_cache_destroy, NULL);
1039c826a6e1SEric Anholt }
1040c826a6e1SEric Anholt 
10411c80be48SMaxime Ripard static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused)
1042c826a6e1SEric Anholt {
1043c826a6e1SEric Anholt 	struct vc4_dev *vc4 = to_vc4_dev(dev);
1044f3099462SEric Anholt 	int i;
1045c826a6e1SEric Anholt 
1046c826a6e1SEric Anholt 	del_timer(&vc4->bo_cache.time_timer);
1047c826a6e1SEric Anholt 	cancel_work_sync(&vc4->bo_cache.time_work);
1048c826a6e1SEric Anholt 
1049c826a6e1SEric Anholt 	vc4_bo_cache_purge(dev);
1050c826a6e1SEric Anholt 
1051f3099462SEric Anholt 	for (i = 0; i < vc4->num_labels; i++) {
1052f3099462SEric Anholt 		if (vc4->bo_labels[i].num_allocated) {
1053*59ac702aSStefan Wahren 			drm_err(dev, "Destroying BO cache with %d %s "
1054f3099462SEric Anholt 				"BOs still allocated\n",
1055f3099462SEric Anholt 				vc4->bo_labels[i].num_allocated,
1056f3099462SEric Anholt 				vc4->bo_labels[i].name);
1057c826a6e1SEric Anholt 		}
1058f3099462SEric Anholt 
1059f3099462SEric Anholt 		if (is_user_label(i))
1060f3099462SEric Anholt 			kfree(vc4->bo_labels[i].name);
1061f3099462SEric Anholt 	}
1062f3099462SEric Anholt 	kfree(vc4->bo_labels);
1063f3099462SEric Anholt }
1064f3099462SEric Anholt 
1065f3099462SEric Anholt int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
1066f3099462SEric Anholt 		       struct drm_file *file_priv)
1067f3099462SEric Anholt {
1068f3099462SEric Anholt 	struct vc4_dev *vc4 = to_vc4_dev(dev);
1069f3099462SEric Anholt 	struct drm_vc4_label_bo *args = data;
1070f3099462SEric Anholt 	char *name;
1071f3099462SEric Anholt 	struct drm_gem_object *gem_obj;
1072f3099462SEric Anholt 	int ret = 0, label;
1073f3099462SEric Anholt 
107430f8c74cSMaxime Ripard 	if (WARN_ON_ONCE(vc4->is_vc5))
107530f8c74cSMaxime Ripard 		return -ENODEV;
107630f8c74cSMaxime Ripard 
1077f3099462SEric Anholt 	if (!args->len)
1078f3099462SEric Anholt 		return -EINVAL;
1079f3099462SEric Anholt 
1080f3099462SEric Anholt 	name = strndup_user(u64_to_user_ptr(args->name), args->len + 1);
1081f3099462SEric Anholt 	if (IS_ERR(name))
1082f3099462SEric Anholt 		return PTR_ERR(name);
1083f3099462SEric Anholt 
1084f3099462SEric Anholt 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1085f3099462SEric Anholt 	if (!gem_obj) {
1086*59ac702aSStefan Wahren 		drm_err(dev, "Failed to look up GEM BO %d\n", args->handle);
1087f3099462SEric Anholt 		kfree(name);
1088f3099462SEric Anholt 		return -ENOENT;
1089f3099462SEric Anholt 	}
1090f3099462SEric Anholt 
1091f3099462SEric Anholt 	mutex_lock(&vc4->bo_lock);
1092f3099462SEric Anholt 	label = vc4_get_user_label(vc4, name);
1093f3099462SEric Anholt 	if (label != -1)
1094f3099462SEric Anholt 		vc4_bo_set_label(gem_obj, label);
1095f3099462SEric Anholt 	else
1096f3099462SEric Anholt 		ret = -ENOMEM;
1097f3099462SEric Anholt 	mutex_unlock(&vc4->bo_lock);
1098f3099462SEric Anholt 
1099f7a8cd30SEmil Velikov 	drm_gem_object_put(gem_obj);
1100f3099462SEric Anholt 
1101f3099462SEric Anholt 	return ret;
1102c826a6e1SEric Anholt }
1103