xref: /linux/drivers/gpu/drm/vc4/vc4_bo.c (revision f69e98a91a01fd7c5755dd710e94a17d6e9f583f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright © 2015 Broadcom
4  */
5 
6 /**
7  * DOC: VC4 GEM BO management support
8  *
9  * The VC4 GPU architecture (both scanout and rendering) has direct
10  * access to system memory with no MMU in between.  To support it, we
11  * use the GEM CMA helper functions to allocate contiguous ranges of
12  * physical memory for our BOs.
13  *
14  * Since the CMA allocator is very slow, we keep a cache of recently
15  * freed BOs around so that the kernel's allocation of objects for 3D
16  * rendering can return quickly.
17  */
18 
19 #include <linux/dma-buf.h>
20 
21 #include "vc4_drv.h"
22 #include "uapi/drm/vc4_drm.h"
23 
24 static const struct drm_gem_object_funcs vc4_gem_object_funcs;
25 
26 static const char * const bo_type_names[] = {
27 	"kernel",
28 	"V3D",
29 	"V3D shader",
30 	"dumb",
31 	"binner",
32 	"RCL",
33 	"BCL",
34 	"kernel BO cache",
35 };
36 
37 static bool is_user_label(int label)
38 {
39 	return label >= VC4_BO_TYPE_COUNT;
40 }
41 
42 static void vc4_bo_stats_print(struct drm_printer *p, struct vc4_dev *vc4)
43 {
44 	int i;
45 
46 	for (i = 0; i < vc4->num_labels; i++) {
47 		if (!vc4->bo_labels[i].num_allocated)
48 			continue;
49 
50 		drm_printf(p, "%30s: %6dkb BOs (%d)\n",
51 			   vc4->bo_labels[i].name,
52 			   vc4->bo_labels[i].size_allocated / 1024,
53 			   vc4->bo_labels[i].num_allocated);
54 	}
55 
56 	mutex_lock(&vc4->purgeable.lock);
57 	if (vc4->purgeable.num)
58 		drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
59 			   vc4->purgeable.size / 1024, vc4->purgeable.num);
60 
61 	if (vc4->purgeable.purged_num)
62 		drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "total purged BO",
63 			   vc4->purgeable.purged_size / 1024,
64 			   vc4->purgeable.purged_num);
65 	mutex_unlock(&vc4->purgeable.lock);
66 }
67 
68 static int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
69 {
70 	struct drm_info_node *node = (struct drm_info_node *)m->private;
71 	struct drm_device *dev = node->minor->dev;
72 	struct vc4_dev *vc4 = to_vc4_dev(dev);
73 	struct drm_printer p = drm_seq_file_printer(m);
74 
75 	vc4_bo_stats_print(&p, vc4);
76 
77 	return 0;
78 }
79 
80 /* Takes ownership of *name and returns the appropriate slot for it in
81  * the bo_labels[] array, extending it as necessary.
82  *
83  * This is inefficient and could use a hash table instead of walking
84  * an array and strcmp()ing.  However, the assumption is that user
85  * labeling will be infrequent (scanout buffers and other long-lived
86  * objects, or debug driver builds), so we can live with it for now.
87  */
88 static int vc4_get_user_label(struct vc4_dev *vc4, const char *name)
89 {
90 	int i;
91 	int free_slot = -1;
92 
93 	for (i = 0; i < vc4->num_labels; i++) {
94 		if (!vc4->bo_labels[i].name) {
95 			free_slot = i;
96 		} else if (strcmp(vc4->bo_labels[i].name, name) == 0) {
97 			kfree(name);
98 			return i;
99 		}
100 	}
101 
102 	if (free_slot != -1) {
103 		WARN_ON(vc4->bo_labels[free_slot].num_allocated != 0);
104 		vc4->bo_labels[free_slot].name = name;
105 		return free_slot;
106 	} else {
107 		u32 new_label_count = vc4->num_labels + 1;
108 		struct vc4_label *new_labels =
109 			krealloc(vc4->bo_labels,
110 				 new_label_count * sizeof(*new_labels),
111 				 GFP_KERNEL);
112 
113 		if (!new_labels) {
114 			kfree(name);
115 			return -1;
116 		}
117 
118 		free_slot = vc4->num_labels;
119 		vc4->bo_labels = new_labels;
120 		vc4->num_labels = new_label_count;
121 
122 		vc4->bo_labels[free_slot].name = name;
123 		vc4->bo_labels[free_slot].num_allocated = 0;
124 		vc4->bo_labels[free_slot].size_allocated = 0;
125 
126 		return free_slot;
127 	}
128 }
129 
130 static void vc4_bo_set_label(struct drm_gem_object *gem_obj, int label)
131 {
132 	struct vc4_bo *bo = to_vc4_bo(gem_obj);
133 	struct vc4_dev *vc4 = to_vc4_dev(gem_obj->dev);
134 
135 	lockdep_assert_held(&vc4->bo_lock);
136 
137 	if (label != -1) {
138 		vc4->bo_labels[label].num_allocated++;
139 		vc4->bo_labels[label].size_allocated += gem_obj->size;
140 	}
141 
142 	vc4->bo_labels[bo->label].num_allocated--;
143 	vc4->bo_labels[bo->label].size_allocated -= gem_obj->size;
144 
145 	if (vc4->bo_labels[bo->label].num_allocated == 0 &&
146 	    is_user_label(bo->label)) {
147 		/* Free user BO label slots on last unreference.
148 		 * Slots are just where we track the stats for a given
149 		 * name, and once a name is unused we can reuse that
150 		 * slot.
151 		 */
152 		kfree(vc4->bo_labels[bo->label].name);
153 		vc4->bo_labels[bo->label].name = NULL;
154 	}
155 
156 	bo->label = label;
157 }
158 
159 static uint32_t bo_page_index(size_t size)
160 {
161 	return (size / PAGE_SIZE) - 1;
162 }
163 
164 static void vc4_bo_destroy(struct vc4_bo *bo)
165 {
166 	struct drm_gem_object *obj = &bo->base.base;
167 	struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
168 
169 	lockdep_assert_held(&vc4->bo_lock);
170 
171 	vc4_bo_set_label(obj, -1);
172 
173 	if (bo->validated_shader) {
174 		kfree(bo->validated_shader->uniform_addr_offsets);
175 		kfree(bo->validated_shader->texture_samples);
176 		kfree(bo->validated_shader);
177 		bo->validated_shader = NULL;
178 	}
179 
180 	drm_gem_cma_free(&bo->base);
181 }
182 
183 static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
184 {
185 	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
186 
187 	lockdep_assert_held(&vc4->bo_lock);
188 	list_del(&bo->unref_head);
189 	list_del(&bo->size_head);
190 }
191 
192 static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
193 						     size_t size)
194 {
195 	struct vc4_dev *vc4 = to_vc4_dev(dev);
196 	uint32_t page_index = bo_page_index(size);
197 
198 	if (vc4->bo_cache.size_list_size <= page_index) {
199 		uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
200 					page_index + 1);
201 		struct list_head *new_list;
202 		uint32_t i;
203 
204 		new_list = kmalloc_array(new_size, sizeof(struct list_head),
205 					 GFP_KERNEL);
206 		if (!new_list)
207 			return NULL;
208 
209 		/* Rebase the old cached BO lists to their new list
210 		 * head locations.
211 		 */
212 		for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
213 			struct list_head *old_list =
214 				&vc4->bo_cache.size_list[i];
215 
216 			if (list_empty(old_list))
217 				INIT_LIST_HEAD(&new_list[i]);
218 			else
219 				list_replace(old_list, &new_list[i]);
220 		}
221 		/* And initialize the brand new BO list heads. */
222 		for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
223 			INIT_LIST_HEAD(&new_list[i]);
224 
225 		kfree(vc4->bo_cache.size_list);
226 		vc4->bo_cache.size_list = new_list;
227 		vc4->bo_cache.size_list_size = new_size;
228 	}
229 
230 	return &vc4->bo_cache.size_list[page_index];
231 }
232 
233 static void vc4_bo_cache_purge(struct drm_device *dev)
234 {
235 	struct vc4_dev *vc4 = to_vc4_dev(dev);
236 
237 	mutex_lock(&vc4->bo_lock);
238 	while (!list_empty(&vc4->bo_cache.time_list)) {
239 		struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
240 						    struct vc4_bo, unref_head);
241 		vc4_bo_remove_from_cache(bo);
242 		vc4_bo_destroy(bo);
243 	}
244 	mutex_unlock(&vc4->bo_lock);
245 }
246 
247 void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo)
248 {
249 	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
250 
251 	mutex_lock(&vc4->purgeable.lock);
252 	list_add_tail(&bo->size_head, &vc4->purgeable.list);
253 	vc4->purgeable.num++;
254 	vc4->purgeable.size += bo->base.base.size;
255 	mutex_unlock(&vc4->purgeable.lock);
256 }
257 
258 static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo)
259 {
260 	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
261 
262 	/* list_del_init() is used here because the caller might release
263 	 * the purgeable lock in order to acquire the madv one and update the
264 	 * madv status.
265 	 * During this short period of time a user might decide to mark
266 	 * the BO as unpurgeable, and if bo->madv is set to
267 	 * VC4_MADV_DONTNEED it will try to remove the BO from the
268 	 * purgeable list which will fail if the ->next/prev fields
269 	 * are set to LIST_POISON1/LIST_POISON2 (which is what
270 	 * list_del() does).
271 	 * Re-initializing the list element guarantees that list_del()
272 	 * will work correctly even if it's a NOP.
273 	 */
274 	list_del_init(&bo->size_head);
275 	vc4->purgeable.num--;
276 	vc4->purgeable.size -= bo->base.base.size;
277 }
278 
279 void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo)
280 {
281 	struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
282 
283 	mutex_lock(&vc4->purgeable.lock);
284 	vc4_bo_remove_from_purgeable_pool_locked(bo);
285 	mutex_unlock(&vc4->purgeable.lock);
286 }
287 
288 static void vc4_bo_purge(struct drm_gem_object *obj)
289 {
290 	struct vc4_bo *bo = to_vc4_bo(obj);
291 	struct drm_device *dev = obj->dev;
292 
293 	WARN_ON(!mutex_is_locked(&bo->madv_lock));
294 	WARN_ON(bo->madv != VC4_MADV_DONTNEED);
295 
296 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
297 
298 	dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.paddr);
299 	bo->base.vaddr = NULL;
300 	bo->madv = __VC4_MADV_PURGED;
301 }
302 
303 static void vc4_bo_userspace_cache_purge(struct drm_device *dev)
304 {
305 	struct vc4_dev *vc4 = to_vc4_dev(dev);
306 
307 	mutex_lock(&vc4->purgeable.lock);
308 	while (!list_empty(&vc4->purgeable.list)) {
309 		struct vc4_bo *bo = list_first_entry(&vc4->purgeable.list,
310 						     struct vc4_bo, size_head);
311 		struct drm_gem_object *obj = &bo->base.base;
312 		size_t purged_size = 0;
313 
314 		vc4_bo_remove_from_purgeable_pool_locked(bo);
315 
316 		/* Release the purgeable lock while we're purging the BO so
317 		 * that other people can continue inserting things in the
318 		 * purgeable pool without having to wait for all BOs to be
319 		 * purged.
320 		 */
321 		mutex_unlock(&vc4->purgeable.lock);
322 		mutex_lock(&bo->madv_lock);
323 
324 		/* Since we released the purgeable pool lock before acquiring
325 		 * the BO madv one, the user may have marked the BO as WILLNEED
326 		 * and re-used it in the meantime.
327 		 * Before purging the BO we need to make sure
328 		 * - it is still marked as DONTNEED
329 		 * - it has not been re-inserted in the purgeable list
330 		 * - it is not used by HW blocks
331 		 * If one of these conditions is not met, just skip the entry.
332 		 */
333 		if (bo->madv == VC4_MADV_DONTNEED &&
334 		    list_empty(&bo->size_head) &&
335 		    !refcount_read(&bo->usecnt)) {
336 			purged_size = bo->base.base.size;
337 			vc4_bo_purge(obj);
338 		}
339 		mutex_unlock(&bo->madv_lock);
340 		mutex_lock(&vc4->purgeable.lock);
341 
342 		if (purged_size) {
343 			vc4->purgeable.purged_size += purged_size;
344 			vc4->purgeable.purged_num++;
345 		}
346 	}
347 	mutex_unlock(&vc4->purgeable.lock);
348 }
349 
350 static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
351 					    uint32_t size,
352 					    enum vc4_kernel_bo_type type)
353 {
354 	struct vc4_dev *vc4 = to_vc4_dev(dev);
355 	uint32_t page_index = bo_page_index(size);
356 	struct vc4_bo *bo = NULL;
357 
358 	mutex_lock(&vc4->bo_lock);
359 	if (page_index >= vc4->bo_cache.size_list_size)
360 		goto out;
361 
362 	if (list_empty(&vc4->bo_cache.size_list[page_index]))
363 		goto out;
364 
365 	bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
366 			      struct vc4_bo, size_head);
367 	vc4_bo_remove_from_cache(bo);
368 	kref_init(&bo->base.base.refcount);
369 
370 out:
371 	if (bo)
372 		vc4_bo_set_label(&bo->base.base, type);
373 	mutex_unlock(&vc4->bo_lock);
374 	return bo;
375 }
376 
377 /**
378  * vc4_create_object - Implementation of driver->gem_create_object.
379  * @dev: DRM device
380  * @size: Size in bytes of the memory the object will reference
381  *
382  * This lets the CMA helpers allocate object structs for us, and keep
383  * our BO stats correct.
384  */
385 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
386 {
387 	struct vc4_dev *vc4 = to_vc4_dev(dev);
388 	struct vc4_bo *bo;
389 
390 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
391 	if (!bo)
392 		return ERR_PTR(-ENOMEM);
393 
394 	bo->madv = VC4_MADV_WILLNEED;
395 	refcount_set(&bo->usecnt, 0);
396 	mutex_init(&bo->madv_lock);
397 	mutex_lock(&vc4->bo_lock);
398 	bo->label = VC4_BO_TYPE_KERNEL;
399 	vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
400 	vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
401 	mutex_unlock(&vc4->bo_lock);
402 
403 	bo->base.base.funcs = &vc4_gem_object_funcs;
404 
405 	return &bo->base.base;
406 }
407 
408 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
409 			     bool allow_unzeroed, enum vc4_kernel_bo_type type)
410 {
411 	size_t size = roundup(unaligned_size, PAGE_SIZE);
412 	struct vc4_dev *vc4 = to_vc4_dev(dev);
413 	struct drm_gem_cma_object *cma_obj;
414 	struct vc4_bo *bo;
415 
416 	if (size == 0)
417 		return ERR_PTR(-EINVAL);
418 
419 	/* First, try to get a vc4_bo from the kernel BO cache. */
420 	bo = vc4_bo_get_from_cache(dev, size, type);
421 	if (bo) {
422 		if (!allow_unzeroed)
423 			memset(bo->base.vaddr, 0, bo->base.base.size);
424 		return bo;
425 	}
426 
427 	cma_obj = drm_gem_cma_create(dev, size);
428 	if (IS_ERR(cma_obj)) {
429 		/*
430 		 * If we've run out of CMA memory, kill the cache of
431 		 * CMA allocations we've got laying around and try again.
432 		 */
433 		vc4_bo_cache_purge(dev);
434 		cma_obj = drm_gem_cma_create(dev, size);
435 	}
436 
437 	if (IS_ERR(cma_obj)) {
438 		/*
439 		 * Still not enough CMA memory, purge the userspace BO
440 		 * cache and retry.
441 		 * This is sub-optimal since we purge the whole userspace
442 		 * BO cache which forces user that want to re-use the BO to
443 		 * restore its initial content.
444 		 * Ideally, we should purge entries one by one and retry
445 		 * after each to see if CMA allocation succeeds. Or even
446 		 * better, try to find an entry with at least the same
447 		 * size.
448 		 */
449 		vc4_bo_userspace_cache_purge(dev);
450 		cma_obj = drm_gem_cma_create(dev, size);
451 	}
452 
453 	if (IS_ERR(cma_obj)) {
454 		struct drm_printer p = drm_info_printer(vc4->base.dev);
455 		DRM_ERROR("Failed to allocate from CMA:\n");
456 		vc4_bo_stats_print(&p, vc4);
457 		return ERR_PTR(-ENOMEM);
458 	}
459 	bo = to_vc4_bo(&cma_obj->base);
460 
461 	/* By default, BOs do not support the MADV ioctl. This will be enabled
462 	 * only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB
463 	 * BOs).
464 	 */
465 	bo->madv = __VC4_MADV_NOTSUPP;
466 
467 	mutex_lock(&vc4->bo_lock);
468 	vc4_bo_set_label(&cma_obj->base, type);
469 	mutex_unlock(&vc4->bo_lock);
470 
471 	return bo;
472 }
473 
474 int vc4_dumb_create(struct drm_file *file_priv,
475 		    struct drm_device *dev,
476 		    struct drm_mode_create_dumb *args)
477 {
478 	int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
479 	struct vc4_bo *bo = NULL;
480 	int ret;
481 
482 	if (args->pitch < min_pitch)
483 		args->pitch = min_pitch;
484 
485 	if (args->size < args->pitch * args->height)
486 		args->size = args->pitch * args->height;
487 
488 	bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB);
489 	if (IS_ERR(bo))
490 		return PTR_ERR(bo);
491 
492 	bo->madv = VC4_MADV_WILLNEED;
493 
494 	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
495 	drm_gem_object_put(&bo->base.base);
496 
497 	return ret;
498 }
499 
500 static void vc4_bo_cache_free_old(struct drm_device *dev)
501 {
502 	struct vc4_dev *vc4 = to_vc4_dev(dev);
503 	unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
504 
505 	lockdep_assert_held(&vc4->bo_lock);
506 
507 	while (!list_empty(&vc4->bo_cache.time_list)) {
508 		struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
509 						    struct vc4_bo, unref_head);
510 		if (time_before(expire_time, bo->free_time)) {
511 			mod_timer(&vc4->bo_cache.time_timer,
512 				  round_jiffies_up(jiffies +
513 						   msecs_to_jiffies(1000)));
514 			return;
515 		}
516 
517 		vc4_bo_remove_from_cache(bo);
518 		vc4_bo_destroy(bo);
519 	}
520 }
521 
522 /* Called on the last userspace/kernel unreference of the BO.  Returns
523  * it to the BO cache if possible, otherwise frees it.
524  */
525 static void vc4_free_object(struct drm_gem_object *gem_bo)
526 {
527 	struct drm_device *dev = gem_bo->dev;
528 	struct vc4_dev *vc4 = to_vc4_dev(dev);
529 	struct vc4_bo *bo = to_vc4_bo(gem_bo);
530 	struct list_head *cache_list;
531 
532 	/* Remove the BO from the purgeable list. */
533 	mutex_lock(&bo->madv_lock);
534 	if (bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt))
535 		vc4_bo_remove_from_purgeable_pool(bo);
536 	mutex_unlock(&bo->madv_lock);
537 
538 	mutex_lock(&vc4->bo_lock);
539 	/* If the object references someone else's memory, we can't cache it.
540 	 */
541 	if (gem_bo->import_attach) {
542 		vc4_bo_destroy(bo);
543 		goto out;
544 	}
545 
546 	/* Don't cache if it was publicly named. */
547 	if (gem_bo->name) {
548 		vc4_bo_destroy(bo);
549 		goto out;
550 	}
551 
552 	/* If this object was partially constructed but CMA allocation
553 	 * had failed, just free it. Can also happen when the BO has been
554 	 * purged.
555 	 */
556 	if (!bo->base.vaddr) {
557 		vc4_bo_destroy(bo);
558 		goto out;
559 	}
560 
561 	cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
562 	if (!cache_list) {
563 		vc4_bo_destroy(bo);
564 		goto out;
565 	}
566 
567 	if (bo->validated_shader) {
568 		kfree(bo->validated_shader->uniform_addr_offsets);
569 		kfree(bo->validated_shader->texture_samples);
570 		kfree(bo->validated_shader);
571 		bo->validated_shader = NULL;
572 	}
573 
574 	/* Reset madv and usecnt before adding the BO to the cache. */
575 	bo->madv = __VC4_MADV_NOTSUPP;
576 	refcount_set(&bo->usecnt, 0);
577 
578 	bo->t_format = false;
579 	bo->free_time = jiffies;
580 	list_add(&bo->size_head, cache_list);
581 	list_add(&bo->unref_head, &vc4->bo_cache.time_list);
582 
583 	vc4_bo_set_label(&bo->base.base, VC4_BO_TYPE_KERNEL_CACHE);
584 
585 	vc4_bo_cache_free_old(dev);
586 
587 out:
588 	mutex_unlock(&vc4->bo_lock);
589 }
590 
591 static void vc4_bo_cache_time_work(struct work_struct *work)
592 {
593 	struct vc4_dev *vc4 =
594 		container_of(work, struct vc4_dev, bo_cache.time_work);
595 	struct drm_device *dev = &vc4->base;
596 
597 	mutex_lock(&vc4->bo_lock);
598 	vc4_bo_cache_free_old(dev);
599 	mutex_unlock(&vc4->bo_lock);
600 }
601 
602 int vc4_bo_inc_usecnt(struct vc4_bo *bo)
603 {
604 	int ret;
605 
606 	/* Fast path: if the BO is already retained by someone, no need to
607 	 * check the madv status.
608 	 */
609 	if (refcount_inc_not_zero(&bo->usecnt))
610 		return 0;
611 
612 	mutex_lock(&bo->madv_lock);
613 	switch (bo->madv) {
614 	case VC4_MADV_WILLNEED:
615 		if (!refcount_inc_not_zero(&bo->usecnt))
616 			refcount_set(&bo->usecnt, 1);
617 		ret = 0;
618 		break;
619 	case VC4_MADV_DONTNEED:
620 		/* We shouldn't use a BO marked as purgeable if at least
621 		 * someone else retained its content by incrementing usecnt.
622 		 * Luckily the BO hasn't been purged yet, but something wrong
623 		 * is happening here. Just throw an error instead of
624 		 * authorizing this use case.
625 		 */
626 	case __VC4_MADV_PURGED:
627 		/* We can't use a purged BO. */
628 	default:
629 		/* Invalid madv value. */
630 		ret = -EINVAL;
631 		break;
632 	}
633 	mutex_unlock(&bo->madv_lock);
634 
635 	return ret;
636 }
637 
638 void vc4_bo_dec_usecnt(struct vc4_bo *bo)
639 {
640 	/* Fast path: if the BO is still retained by someone, no need to test
641 	 * the madv value.
642 	 */
643 	if (refcount_dec_not_one(&bo->usecnt))
644 		return;
645 
646 	mutex_lock(&bo->madv_lock);
647 	if (refcount_dec_and_test(&bo->usecnt) &&
648 	    bo->madv == VC4_MADV_DONTNEED)
649 		vc4_bo_add_to_purgeable_pool(bo);
650 	mutex_unlock(&bo->madv_lock);
651 }
652 
653 static void vc4_bo_cache_time_timer(struct timer_list *t)
654 {
655 	struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer);
656 
657 	schedule_work(&vc4->bo_cache.time_work);
658 }
659 
660 static struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags)
661 {
662 	struct vc4_bo *bo = to_vc4_bo(obj);
663 	struct dma_buf *dmabuf;
664 	int ret;
665 
666 	if (bo->validated_shader) {
667 		DRM_DEBUG("Attempting to export shader BO\n");
668 		return ERR_PTR(-EINVAL);
669 	}
670 
671 	/* Note: as soon as the BO is exported it becomes unpurgeable, because
672 	 * noone ever decrements the usecnt even if the reference held by the
673 	 * exported BO is released. This shouldn't be a problem since we don't
674 	 * expect exported BOs to be marked as purgeable.
675 	 */
676 	ret = vc4_bo_inc_usecnt(bo);
677 	if (ret) {
678 		DRM_ERROR("Failed to increment BO usecnt\n");
679 		return ERR_PTR(ret);
680 	}
681 
682 	dmabuf = drm_gem_prime_export(obj, flags);
683 	if (IS_ERR(dmabuf))
684 		vc4_bo_dec_usecnt(bo);
685 
686 	return dmabuf;
687 }
688 
689 static vm_fault_t vc4_fault(struct vm_fault *vmf)
690 {
691 	struct vm_area_struct *vma = vmf->vma;
692 	struct drm_gem_object *obj = vma->vm_private_data;
693 	struct vc4_bo *bo = to_vc4_bo(obj);
694 
695 	/* The only reason we would end up here is when user-space accesses
696 	 * BO's memory after it's been purged.
697 	 */
698 	mutex_lock(&bo->madv_lock);
699 	WARN_ON(bo->madv != __VC4_MADV_PURGED);
700 	mutex_unlock(&bo->madv_lock);
701 
702 	return VM_FAULT_SIGBUS;
703 }
704 
705 static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
706 {
707 	struct vc4_bo *bo = to_vc4_bo(obj);
708 
709 	if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
710 		DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
711 		return -EINVAL;
712 	}
713 
714 	if (bo->madv != VC4_MADV_WILLNEED) {
715 		DRM_DEBUG("mmaping of %s BO not allowed\n",
716 			  bo->madv == VC4_MADV_DONTNEED ?
717 			  "purgeable" : "purged");
718 		return -EINVAL;
719 	}
720 
721 	return drm_gem_cma_mmap(&bo->base, vma);
722 }
723 
724 static const struct vm_operations_struct vc4_vm_ops = {
725 	.fault = vc4_fault,
726 	.open = drm_gem_vm_open,
727 	.close = drm_gem_vm_close,
728 };
729 
730 static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
731 	.free = vc4_free_object,
732 	.export = vc4_prime_export,
733 	.get_sg_table = drm_gem_cma_object_get_sg_table,
734 	.vmap = drm_gem_cma_object_vmap,
735 	.mmap = vc4_gem_object_mmap,
736 	.vm_ops = &vc4_vm_ops,
737 };
738 
739 static int vc4_grab_bin_bo(struct vc4_dev *vc4, struct vc4_file *vc4file)
740 {
741 	if (!vc4->v3d)
742 		return -ENODEV;
743 
744 	if (vc4file->bin_bo_used)
745 		return 0;
746 
747 	return vc4_v3d_bin_bo_get(vc4, &vc4file->bin_bo_used);
748 }
749 
750 int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
751 			struct drm_file *file_priv)
752 {
753 	struct drm_vc4_create_bo *args = data;
754 	struct vc4_file *vc4file = file_priv->driver_priv;
755 	struct vc4_dev *vc4 = to_vc4_dev(dev);
756 	struct vc4_bo *bo = NULL;
757 	int ret;
758 
759 	ret = vc4_grab_bin_bo(vc4, vc4file);
760 	if (ret)
761 		return ret;
762 
763 	/*
764 	 * We can't allocate from the BO cache, because the BOs don't
765 	 * get zeroed, and that might leak data between users.
766 	 */
767 	bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_V3D);
768 	if (IS_ERR(bo))
769 		return PTR_ERR(bo);
770 
771 	bo->madv = VC4_MADV_WILLNEED;
772 
773 	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
774 	drm_gem_object_put(&bo->base.base);
775 
776 	return ret;
777 }
778 
779 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
780 		      struct drm_file *file_priv)
781 {
782 	struct drm_vc4_mmap_bo *args = data;
783 	struct drm_gem_object *gem_obj;
784 
785 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
786 	if (!gem_obj) {
787 		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
788 		return -EINVAL;
789 	}
790 
791 	/* The mmap offset was set up at BO allocation time. */
792 	args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
793 
794 	drm_gem_object_put(gem_obj);
795 	return 0;
796 }
797 
798 int
799 vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
800 			   struct drm_file *file_priv)
801 {
802 	struct drm_vc4_create_shader_bo *args = data;
803 	struct vc4_file *vc4file = file_priv->driver_priv;
804 	struct vc4_dev *vc4 = to_vc4_dev(dev);
805 	struct vc4_bo *bo = NULL;
806 	int ret;
807 
808 	if (args->size == 0)
809 		return -EINVAL;
810 
811 	if (args->size % sizeof(u64) != 0)
812 		return -EINVAL;
813 
814 	if (args->flags != 0) {
815 		DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
816 		return -EINVAL;
817 	}
818 
819 	if (args->pad != 0) {
820 		DRM_INFO("Pad set: 0x%08x\n", args->pad);
821 		return -EINVAL;
822 	}
823 
824 	ret = vc4_grab_bin_bo(vc4, vc4file);
825 	if (ret)
826 		return ret;
827 
828 	bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER);
829 	if (IS_ERR(bo))
830 		return PTR_ERR(bo);
831 
832 	bo->madv = VC4_MADV_WILLNEED;
833 
834 	if (copy_from_user(bo->base.vaddr,
835 			     (void __user *)(uintptr_t)args->data,
836 			     args->size)) {
837 		ret = -EFAULT;
838 		goto fail;
839 	}
840 	/* Clear the rest of the memory from allocating from the BO
841 	 * cache.
842 	 */
843 	memset(bo->base.vaddr + args->size, 0,
844 	       bo->base.base.size - args->size);
845 
846 	bo->validated_shader = vc4_validate_shader(&bo->base);
847 	if (!bo->validated_shader) {
848 		ret = -EINVAL;
849 		goto fail;
850 	}
851 
852 	/* We have to create the handle after validation, to avoid
853 	 * races for users to do doing things like mmap the shader BO.
854 	 */
855 	ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
856 
857 fail:
858 	drm_gem_object_put(&bo->base.base);
859 
860 	return ret;
861 }
862 
863 /**
864  * vc4_set_tiling_ioctl() - Sets the tiling modifier for a BO.
865  * @dev: DRM device
866  * @data: ioctl argument
867  * @file_priv: DRM file for this fd
868  *
869  * The tiling state of the BO decides the default modifier of an fb if
870  * no specific modifier was set by userspace, and the return value of
871  * vc4_get_tiling_ioctl() (so that userspace can treat a BO it
872  * received from dmabuf as the same tiling format as the producer
873  * used).
874  */
875 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
876 			 struct drm_file *file_priv)
877 {
878 	struct drm_vc4_set_tiling *args = data;
879 	struct drm_gem_object *gem_obj;
880 	struct vc4_bo *bo;
881 	bool t_format;
882 
883 	if (args->flags != 0)
884 		return -EINVAL;
885 
886 	switch (args->modifier) {
887 	case DRM_FORMAT_MOD_NONE:
888 		t_format = false;
889 		break;
890 	case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
891 		t_format = true;
892 		break;
893 	default:
894 		return -EINVAL;
895 	}
896 
897 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
898 	if (!gem_obj) {
899 		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
900 		return -ENOENT;
901 	}
902 	bo = to_vc4_bo(gem_obj);
903 	bo->t_format = t_format;
904 
905 	drm_gem_object_put(gem_obj);
906 
907 	return 0;
908 }
909 
910 /**
911  * vc4_get_tiling_ioctl() - Gets the tiling modifier for a BO.
912  * @dev: DRM device
913  * @data: ioctl argument
914  * @file_priv: DRM file for this fd
915  *
916  * Returns the tiling modifier for a BO as set by vc4_set_tiling_ioctl().
917  */
918 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
919 			 struct drm_file *file_priv)
920 {
921 	struct drm_vc4_get_tiling *args = data;
922 	struct drm_gem_object *gem_obj;
923 	struct vc4_bo *bo;
924 
925 	if (args->flags != 0 || args->modifier != 0)
926 		return -EINVAL;
927 
928 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
929 	if (!gem_obj) {
930 		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
931 		return -ENOENT;
932 	}
933 	bo = to_vc4_bo(gem_obj);
934 
935 	if (bo->t_format)
936 		args->modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
937 	else
938 		args->modifier = DRM_FORMAT_MOD_NONE;
939 
940 	drm_gem_object_put(gem_obj);
941 
942 	return 0;
943 }
944 
945 static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused);
946 int vc4_bo_cache_init(struct drm_device *dev)
947 {
948 	struct vc4_dev *vc4 = to_vc4_dev(dev);
949 	int i;
950 
951 	/* Create the initial set of BO labels that the kernel will
952 	 * use.  This lets us avoid a bunch of string reallocation in
953 	 * the kernel's draw and BO allocation paths.
954 	 */
955 	vc4->bo_labels = kcalloc(VC4_BO_TYPE_COUNT, sizeof(*vc4->bo_labels),
956 				 GFP_KERNEL);
957 	if (!vc4->bo_labels)
958 		return -ENOMEM;
959 	vc4->num_labels = VC4_BO_TYPE_COUNT;
960 
961 	BUILD_BUG_ON(ARRAY_SIZE(bo_type_names) != VC4_BO_TYPE_COUNT);
962 	for (i = 0; i < VC4_BO_TYPE_COUNT; i++)
963 		vc4->bo_labels[i].name = bo_type_names[i];
964 
965 	mutex_init(&vc4->bo_lock);
966 
967 	vc4_debugfs_add_file(dev, "bo_stats", vc4_bo_stats_debugfs, NULL);
968 
969 	INIT_LIST_HEAD(&vc4->bo_cache.time_list);
970 
971 	INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
972 	timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
973 
974 	return drmm_add_action_or_reset(dev, vc4_bo_cache_destroy, NULL);
975 }
976 
977 static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused)
978 {
979 	struct vc4_dev *vc4 = to_vc4_dev(dev);
980 	int i;
981 
982 	del_timer(&vc4->bo_cache.time_timer);
983 	cancel_work_sync(&vc4->bo_cache.time_work);
984 
985 	vc4_bo_cache_purge(dev);
986 
987 	for (i = 0; i < vc4->num_labels; i++) {
988 		if (vc4->bo_labels[i].num_allocated) {
989 			DRM_ERROR("Destroying BO cache with %d %s "
990 				  "BOs still allocated\n",
991 				  vc4->bo_labels[i].num_allocated,
992 				  vc4->bo_labels[i].name);
993 		}
994 
995 		if (is_user_label(i))
996 			kfree(vc4->bo_labels[i].name);
997 	}
998 	kfree(vc4->bo_labels);
999 }
1000 
1001 int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
1002 		       struct drm_file *file_priv)
1003 {
1004 	struct vc4_dev *vc4 = to_vc4_dev(dev);
1005 	struct drm_vc4_label_bo *args = data;
1006 	char *name;
1007 	struct drm_gem_object *gem_obj;
1008 	int ret = 0, label;
1009 
1010 	if (!args->len)
1011 		return -EINVAL;
1012 
1013 	name = strndup_user(u64_to_user_ptr(args->name), args->len + 1);
1014 	if (IS_ERR(name))
1015 		return PTR_ERR(name);
1016 
1017 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1018 	if (!gem_obj) {
1019 		DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
1020 		kfree(name);
1021 		return -ENOENT;
1022 	}
1023 
1024 	mutex_lock(&vc4->bo_lock);
1025 	label = vc4_get_user_label(vc4, name);
1026 	if (label != -1)
1027 		vc4_bo_set_label(gem_obj, label);
1028 	else
1029 		ret = -ENOMEM;
1030 	mutex_unlock(&vc4->bo_lock);
1031 
1032 	drm_gem_object_put(gem_obj);
1033 
1034 	return ret;
1035 }
1036