xref: /linux/drivers/gpu/drm/msm/msm_gem.c (revision beb69e81724634063b9dbae4bc79e2e011fdeeb1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/dma-map-ops.h>
8 #include <linux/vmalloc.h>
9 #include <linux/spinlock.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/dma-buf.h>
12 
13 #include <drm/drm_prime.h>
14 #include <drm/drm_file.h>
15 
16 #include <trace/events/gpu_mem.h>
17 
18 #include "msm_drv.h"
19 #include "msm_fence.h"
20 #include "msm_gem.h"
21 #include "msm_gpu.h"
22 #include "msm_mmu.h"
23 
24 static dma_addr_t physaddr(struct drm_gem_object *obj)
25 {
26 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
27 	struct msm_drm_private *priv = obj->dev->dev_private;
28 	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
29 			priv->vram.paddr;
30 }
31 
32 static bool use_pages(struct drm_gem_object *obj)
33 {
34 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
35 	return !msm_obj->vram_node;
36 }
37 
38 static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
39 {
40 	uint64_t total_mem = atomic64_add_return(size, &priv->total_mem);
41 	trace_gpu_mem_total(0, 0, total_mem);
42 }
43 
44 static void update_ctx_mem(struct drm_file *file, ssize_t size)
45 {
46 	struct msm_file_private *ctx = file->driver_priv;
47 	uint64_t ctx_mem = atomic64_add_return(size, &ctx->ctx_mem);
48 
49 	rcu_read_lock(); /* Locks file->pid! */
50 	trace_gpu_mem_total(0, pid_nr(rcu_dereference(file->pid)), ctx_mem);
51 	rcu_read_unlock();
52 
53 }
54 
55 static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file)
56 {
57 	update_ctx_mem(file, obj->size);
58 	return 0;
59 }
60 
61 static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
62 {
63 	update_ctx_mem(file, -obj->size);
64 }
65 
66 /*
67  * Cache sync.. this is a bit over-complicated, to fit dma-mapping
68  * API.  Really GPU cache is out of scope here (handled on cmdstream)
69  * and all we need to do is invalidate newly allocated pages before
70  * mapping to CPU as uncached/writecombine.
71  *
72  * On top of this, we have the added headache, that depending on
73  * display generation, the display's iommu may be wired up to either
74  * the toplevel drm device (mdss), or to the mdp sub-node, meaning
75  * that here we either have dma-direct or iommu ops.
76  *
77  * Let this be a cautionary tail of abstraction gone wrong.
78  */
79 
80 static void sync_for_device(struct msm_gem_object *msm_obj)
81 {
82 	struct device *dev = msm_obj->base.dev->dev;
83 
84 	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
85 }
86 
87 static void sync_for_cpu(struct msm_gem_object *msm_obj)
88 {
89 	struct device *dev = msm_obj->base.dev->dev;
90 
91 	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
92 }
93 
94 static void update_lru_active(struct drm_gem_object *obj)
95 {
96 	struct msm_drm_private *priv = obj->dev->dev_private;
97 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
98 
99 	GEM_WARN_ON(!msm_obj->pages);
100 
101 	if (msm_obj->pin_count) {
102 		drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
103 	} else if (msm_obj->madv == MSM_MADV_WILLNEED) {
104 		drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj);
105 	} else {
106 		GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
107 
108 		drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj);
109 	}
110 }
111 
112 static void update_lru_locked(struct drm_gem_object *obj)
113 {
114 	struct msm_drm_private *priv = obj->dev->dev_private;
115 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
116 
117 	msm_gem_assert_locked(&msm_obj->base);
118 
119 	if (!msm_obj->pages) {
120 		GEM_WARN_ON(msm_obj->pin_count);
121 
122 		drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj);
123 	} else {
124 		update_lru_active(obj);
125 	}
126 }
127 
128 static void update_lru(struct drm_gem_object *obj)
129 {
130 	struct msm_drm_private *priv = obj->dev->dev_private;
131 
132 	mutex_lock(&priv->lru.lock);
133 	update_lru_locked(obj);
134 	mutex_unlock(&priv->lru.lock);
135 }
136 
137 /* allocate pages from VRAM carveout, used when no IOMMU: */
138 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
139 {
140 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
141 	struct msm_drm_private *priv = obj->dev->dev_private;
142 	dma_addr_t paddr;
143 	struct page **p;
144 	int ret, i;
145 
146 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
147 	if (!p)
148 		return ERR_PTR(-ENOMEM);
149 
150 	spin_lock(&priv->vram.lock);
151 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
152 	spin_unlock(&priv->vram.lock);
153 	if (ret) {
154 		kvfree(p);
155 		return ERR_PTR(ret);
156 	}
157 
158 	paddr = physaddr(obj);
159 	for (i = 0; i < npages; i++) {
160 		p[i] = pfn_to_page(__phys_to_pfn(paddr));
161 		paddr += PAGE_SIZE;
162 	}
163 
164 	return p;
165 }
166 
167 static struct page **get_pages(struct drm_gem_object *obj)
168 {
169 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
170 
171 	msm_gem_assert_locked(obj);
172 
173 	if (!msm_obj->pages) {
174 		struct drm_device *dev = obj->dev;
175 		struct page **p;
176 		int npages = obj->size >> PAGE_SHIFT;
177 
178 		if (use_pages(obj))
179 			p = drm_gem_get_pages(obj);
180 		else
181 			p = get_pages_vram(obj, npages);
182 
183 		if (IS_ERR(p)) {
184 			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
185 					PTR_ERR(p));
186 			return p;
187 		}
188 
189 		update_device_mem(dev->dev_private, obj->size);
190 
191 		msm_obj->pages = p;
192 
193 		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
194 		if (IS_ERR(msm_obj->sgt)) {
195 			void *ptr = ERR_CAST(msm_obj->sgt);
196 
197 			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
198 			msm_obj->sgt = NULL;
199 			return ptr;
200 		}
201 
202 		/* For non-cached buffers, ensure the new pages are clean
203 		 * because display controller, GPU, etc. are not coherent:
204 		 */
205 		if (msm_obj->flags & MSM_BO_WC)
206 			sync_for_device(msm_obj);
207 
208 		update_lru(obj);
209 	}
210 
211 	return msm_obj->pages;
212 }
213 
214 static void put_pages_vram(struct drm_gem_object *obj)
215 {
216 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
217 	struct msm_drm_private *priv = obj->dev->dev_private;
218 
219 	spin_lock(&priv->vram.lock);
220 	drm_mm_remove_node(msm_obj->vram_node);
221 	spin_unlock(&priv->vram.lock);
222 
223 	kvfree(msm_obj->pages);
224 }
225 
226 static void put_pages(struct drm_gem_object *obj)
227 {
228 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
229 
230 	if (msm_obj->pages) {
231 		if (msm_obj->sgt) {
232 			/* For non-cached buffers, ensure the new
233 			 * pages are clean because display controller,
234 			 * GPU, etc. are not coherent:
235 			 */
236 			if (msm_obj->flags & MSM_BO_WC)
237 				sync_for_cpu(msm_obj);
238 
239 			sg_free_table(msm_obj->sgt);
240 			kfree(msm_obj->sgt);
241 			msm_obj->sgt = NULL;
242 		}
243 
244 		update_device_mem(obj->dev->dev_private, -obj->size);
245 
246 		if (use_pages(obj))
247 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
248 		else
249 			put_pages_vram(obj);
250 
251 		msm_obj->pages = NULL;
252 		update_lru(obj);
253 	}
254 }
255 
256 static struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj,
257 					      unsigned madv)
258 {
259 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
260 
261 	msm_gem_assert_locked(obj);
262 
263 	if (msm_obj->madv > madv) {
264 		DRM_DEV_DEBUG_DRIVER(obj->dev->dev, "Invalid madv state: %u vs %u\n",
265 				     msm_obj->madv, madv);
266 		return ERR_PTR(-EBUSY);
267 	}
268 
269 	return get_pages(obj);
270 }
271 
272 /*
273  * Update the pin count of the object, call under lru.lock
274  */
275 void msm_gem_pin_obj_locked(struct drm_gem_object *obj)
276 {
277 	struct msm_drm_private *priv = obj->dev->dev_private;
278 
279 	msm_gem_assert_locked(obj);
280 
281 	to_msm_bo(obj)->pin_count++;
282 	drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
283 }
284 
285 static void pin_obj_locked(struct drm_gem_object *obj)
286 {
287 	struct msm_drm_private *priv = obj->dev->dev_private;
288 
289 	mutex_lock(&priv->lru.lock);
290 	msm_gem_pin_obj_locked(obj);
291 	mutex_unlock(&priv->lru.lock);
292 }
293 
294 struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
295 {
296 	struct page **p;
297 
298 	msm_gem_assert_locked(obj);
299 
300 	p = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
301 	if (!IS_ERR(p))
302 		pin_obj_locked(obj);
303 
304 	return p;
305 }
306 
307 void msm_gem_unpin_pages_locked(struct drm_gem_object *obj)
308 {
309 	msm_gem_assert_locked(obj);
310 
311 	msm_gem_unpin_locked(obj);
312 }
313 
314 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
315 {
316 	if (msm_obj->flags & MSM_BO_WC)
317 		return pgprot_writecombine(prot);
318 	return prot;
319 }
320 
321 static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
322 {
323 	struct vm_area_struct *vma = vmf->vma;
324 	struct drm_gem_object *obj = vma->vm_private_data;
325 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
326 	struct page **pages;
327 	unsigned long pfn;
328 	pgoff_t pgoff;
329 	int err;
330 	vm_fault_t ret;
331 
332 	/*
333 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
334 	 * a reference on obj. So, we dont need to hold one here.
335 	 */
336 	err = msm_gem_lock_interruptible(obj);
337 	if (err) {
338 		ret = VM_FAULT_NOPAGE;
339 		goto out;
340 	}
341 
342 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
343 		msm_gem_unlock(obj);
344 		return VM_FAULT_SIGBUS;
345 	}
346 
347 	/* make sure we have pages attached now */
348 	pages = get_pages(obj);
349 	if (IS_ERR(pages)) {
350 		ret = vmf_error(PTR_ERR(pages));
351 		goto out_unlock;
352 	}
353 
354 	/* We don't use vmf->pgoff since that has the fake offset: */
355 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
356 
357 	pfn = page_to_pfn(pages[pgoff]);
358 
359 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
360 			pfn, pfn << PAGE_SHIFT);
361 
362 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
363 
364 out_unlock:
365 	msm_gem_unlock(obj);
366 out:
367 	return ret;
368 }
369 
370 /** get mmap offset */
371 static uint64_t mmap_offset(struct drm_gem_object *obj)
372 {
373 	struct drm_device *dev = obj->dev;
374 	int ret;
375 
376 	msm_gem_assert_locked(obj);
377 
378 	/* Make it mmapable */
379 	ret = drm_gem_create_mmap_offset(obj);
380 
381 	if (ret) {
382 		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
383 		return 0;
384 	}
385 
386 	return drm_vma_node_offset_addr(&obj->vma_node);
387 }
388 
389 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
390 {
391 	uint64_t offset;
392 
393 	msm_gem_lock(obj);
394 	offset = mmap_offset(obj);
395 	msm_gem_unlock(obj);
396 	return offset;
397 }
398 
399 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
400 		struct msm_gem_address_space *aspace)
401 {
402 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
403 	struct msm_gem_vma *vma;
404 
405 	msm_gem_assert_locked(obj);
406 
407 	vma = msm_gem_vma_new(aspace);
408 	if (!vma)
409 		return ERR_PTR(-ENOMEM);
410 
411 	list_add_tail(&vma->list, &msm_obj->vmas);
412 
413 	return vma;
414 }
415 
416 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
417 		struct msm_gem_address_space *aspace)
418 {
419 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
420 	struct msm_gem_vma *vma;
421 
422 	msm_gem_assert_locked(obj);
423 
424 	list_for_each_entry(vma, &msm_obj->vmas, list) {
425 		if (vma->aspace == aspace)
426 			return vma;
427 	}
428 
429 	return NULL;
430 }
431 
432 static void del_vma(struct msm_gem_vma *vma)
433 {
434 	if (!vma)
435 		return;
436 
437 	list_del(&vma->list);
438 	kfree(vma);
439 }
440 
441 /*
442  * If close is true, this also closes the VMA (releasing the allocated
443  * iova range) in addition to removing the iommu mapping.  In the eviction
444  * case (!close), we keep the iova allocated, but only remove the iommu
445  * mapping.
446  */
447 static void
448 put_iova_spaces(struct drm_gem_object *obj, bool close)
449 {
450 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
451 	struct msm_gem_vma *vma;
452 
453 	msm_gem_assert_locked(obj);
454 
455 	list_for_each_entry(vma, &msm_obj->vmas, list) {
456 		if (vma->aspace) {
457 			msm_gem_vma_purge(vma);
458 			if (close)
459 				msm_gem_vma_close(vma);
460 		}
461 	}
462 }
463 
464 /* Called with msm_obj locked */
465 static void
466 put_iova_vmas(struct drm_gem_object *obj)
467 {
468 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
469 	struct msm_gem_vma *vma, *tmp;
470 
471 	msm_gem_assert_locked(obj);
472 
473 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
474 		del_vma(vma);
475 	}
476 }
477 
478 static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
479 		struct msm_gem_address_space *aspace,
480 		u64 range_start, u64 range_end)
481 {
482 	struct msm_gem_vma *vma;
483 
484 	msm_gem_assert_locked(obj);
485 
486 	vma = lookup_vma(obj, aspace);
487 
488 	if (!vma) {
489 		int ret;
490 
491 		vma = add_vma(obj, aspace);
492 		if (IS_ERR(vma))
493 			return vma;
494 
495 		ret = msm_gem_vma_init(vma, obj->size,
496 			range_start, range_end);
497 		if (ret) {
498 			del_vma(vma);
499 			return ERR_PTR(ret);
500 		}
501 	} else {
502 		GEM_WARN_ON(vma->iova < range_start);
503 		GEM_WARN_ON((vma->iova + obj->size) > range_end);
504 	}
505 
506 	return vma;
507 }
508 
509 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
510 {
511 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
512 	struct page **pages;
513 	int prot = IOMMU_READ;
514 
515 	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
516 		prot |= IOMMU_WRITE;
517 
518 	if (msm_obj->flags & MSM_BO_MAP_PRIV)
519 		prot |= IOMMU_PRIV;
520 
521 	if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
522 		prot |= IOMMU_CACHE;
523 
524 	msm_gem_assert_locked(obj);
525 
526 	pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
527 	if (IS_ERR(pages))
528 		return PTR_ERR(pages);
529 
530 	return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
531 }
532 
533 void msm_gem_unpin_locked(struct drm_gem_object *obj)
534 {
535 	struct msm_drm_private *priv = obj->dev->dev_private;
536 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
537 
538 	msm_gem_assert_locked(obj);
539 
540 	mutex_lock(&priv->lru.lock);
541 	msm_obj->pin_count--;
542 	GEM_WARN_ON(msm_obj->pin_count < 0);
543 	update_lru_locked(obj);
544 	mutex_unlock(&priv->lru.lock);
545 }
546 
547 /* Special unpin path for use in fence-signaling path, avoiding the need
548  * to hold the obj lock by only depending on things that a protected by
549  * the LRU lock.  In particular we know that that we already have backing
550  * and and that the object's dma_resv has the fence for the current
551  * submit/job which will prevent us racing against page eviction.
552  */
553 void msm_gem_unpin_active(struct drm_gem_object *obj)
554 {
555 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
556 
557 	msm_obj->pin_count--;
558 	GEM_WARN_ON(msm_obj->pin_count < 0);
559 	update_lru_active(obj);
560 }
561 
562 struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
563 					   struct msm_gem_address_space *aspace)
564 {
565 	return get_vma_locked(obj, aspace, 0, U64_MAX);
566 }
567 
568 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
569 		struct msm_gem_address_space *aspace, uint64_t *iova,
570 		u64 range_start, u64 range_end)
571 {
572 	struct msm_gem_vma *vma;
573 	int ret;
574 
575 	msm_gem_assert_locked(obj);
576 
577 	vma = get_vma_locked(obj, aspace, range_start, range_end);
578 	if (IS_ERR(vma))
579 		return PTR_ERR(vma);
580 
581 	ret = msm_gem_pin_vma_locked(obj, vma);
582 	if (!ret) {
583 		*iova = vma->iova;
584 		pin_obj_locked(obj);
585 	}
586 
587 	return ret;
588 }
589 
590 /*
591  * get iova and pin it. Should have a matching put
592  * limits iova to specified range (in pages)
593  */
594 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
595 		struct msm_gem_address_space *aspace, uint64_t *iova,
596 		u64 range_start, u64 range_end)
597 {
598 	int ret;
599 
600 	msm_gem_lock(obj);
601 	ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
602 	msm_gem_unlock(obj);
603 
604 	return ret;
605 }
606 
607 /* get iova and pin it. Should have a matching put */
608 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
609 		struct msm_gem_address_space *aspace, uint64_t *iova)
610 {
611 	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
612 }
613 
614 /*
615  * Get an iova but don't pin it. Doesn't need a put because iovas are currently
616  * valid for the life of the object
617  */
618 int msm_gem_get_iova(struct drm_gem_object *obj,
619 		struct msm_gem_address_space *aspace, uint64_t *iova)
620 {
621 	struct msm_gem_vma *vma;
622 	int ret = 0;
623 
624 	msm_gem_lock(obj);
625 	vma = get_vma_locked(obj, aspace, 0, U64_MAX);
626 	if (IS_ERR(vma)) {
627 		ret = PTR_ERR(vma);
628 	} else {
629 		*iova = vma->iova;
630 	}
631 	msm_gem_unlock(obj);
632 
633 	return ret;
634 }
635 
636 static int clear_iova(struct drm_gem_object *obj,
637 		      struct msm_gem_address_space *aspace)
638 {
639 	struct msm_gem_vma *vma = lookup_vma(obj, aspace);
640 
641 	if (!vma)
642 		return 0;
643 
644 	msm_gem_vma_purge(vma);
645 	msm_gem_vma_close(vma);
646 	del_vma(vma);
647 
648 	return 0;
649 }
650 
651 /*
652  * Get the requested iova but don't pin it.  Fails if the requested iova is
653  * not available.  Doesn't need a put because iovas are currently valid for
654  * the life of the object.
655  *
656  * Setting an iova of zero will clear the vma.
657  */
658 int msm_gem_set_iova(struct drm_gem_object *obj,
659 		     struct msm_gem_address_space *aspace, uint64_t iova)
660 {
661 	int ret = 0;
662 
663 	msm_gem_lock(obj);
664 	if (!iova) {
665 		ret = clear_iova(obj, aspace);
666 	} else {
667 		struct msm_gem_vma *vma;
668 		vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
669 		if (IS_ERR(vma)) {
670 			ret = PTR_ERR(vma);
671 		} else if (GEM_WARN_ON(vma->iova != iova)) {
672 			clear_iova(obj, aspace);
673 			ret = -EBUSY;
674 		}
675 	}
676 	msm_gem_unlock(obj);
677 
678 	return ret;
679 }
680 
681 /*
682  * Unpin a iova by updating the reference counts. The memory isn't actually
683  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
684  * to get rid of it
685  */
686 void msm_gem_unpin_iova(struct drm_gem_object *obj,
687 		struct msm_gem_address_space *aspace)
688 {
689 	struct msm_gem_vma *vma;
690 
691 	msm_gem_lock(obj);
692 	vma = lookup_vma(obj, aspace);
693 	if (!GEM_WARN_ON(!vma)) {
694 		msm_gem_unpin_locked(obj);
695 	}
696 	msm_gem_unlock(obj);
697 }
698 
699 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
700 		struct drm_mode_create_dumb *args)
701 {
702 	args->pitch = align_pitch(args->width, args->bpp);
703 	args->size  = PAGE_ALIGN(args->pitch * args->height);
704 	return msm_gem_new_handle(dev, file, args->size,
705 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
706 }
707 
708 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
709 		uint32_t handle, uint64_t *offset)
710 {
711 	struct drm_gem_object *obj;
712 	int ret = 0;
713 
714 	/* GEM does all our handle to object mapping */
715 	obj = drm_gem_object_lookup(file, handle);
716 	if (obj == NULL) {
717 		ret = -ENOENT;
718 		goto fail;
719 	}
720 
721 	*offset = msm_gem_mmap_offset(obj);
722 
723 	drm_gem_object_put(obj);
724 
725 fail:
726 	return ret;
727 }
728 
729 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
730 {
731 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
732 	struct page **pages;
733 	int ret = 0;
734 
735 	msm_gem_assert_locked(obj);
736 
737 	if (drm_gem_is_imported(obj))
738 		return ERR_PTR(-ENODEV);
739 
740 	pages = msm_gem_get_pages_locked(obj, madv);
741 	if (IS_ERR(pages))
742 		return ERR_CAST(pages);
743 
744 	pin_obj_locked(obj);
745 
746 	/* increment vmap_count *before* vmap() call, so shrinker can
747 	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
748 	 * This guarantees that we won't try to msm_gem_vunmap() this
749 	 * same object from within the vmap() call (while we already
750 	 * hold msm_obj lock)
751 	 */
752 	msm_obj->vmap_count++;
753 
754 	if (!msm_obj->vaddr) {
755 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
756 				VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
757 		if (msm_obj->vaddr == NULL) {
758 			ret = -ENOMEM;
759 			goto fail;
760 		}
761 	}
762 
763 	return msm_obj->vaddr;
764 
765 fail:
766 	msm_obj->vmap_count--;
767 	msm_gem_unpin_locked(obj);
768 	return ERR_PTR(ret);
769 }
770 
771 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
772 {
773 	return get_vaddr(obj, MSM_MADV_WILLNEED);
774 }
775 
776 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
777 {
778 	void *ret;
779 
780 	msm_gem_lock(obj);
781 	ret = msm_gem_get_vaddr_locked(obj);
782 	msm_gem_unlock(obj);
783 
784 	return ret;
785 }
786 
787 /*
788  * Don't use this!  It is for the very special case of dumping
789  * submits from GPU hangs or faults, were the bo may already
790  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
791  * active list.
792  */
793 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
794 {
795 	return get_vaddr(obj, __MSM_MADV_PURGED);
796 }
797 
798 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
799 {
800 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
801 
802 	msm_gem_assert_locked(obj);
803 	GEM_WARN_ON(msm_obj->vmap_count < 1);
804 
805 	msm_obj->vmap_count--;
806 	msm_gem_unpin_locked(obj);
807 }
808 
809 void msm_gem_put_vaddr(struct drm_gem_object *obj)
810 {
811 	msm_gem_lock(obj);
812 	msm_gem_put_vaddr_locked(obj);
813 	msm_gem_unlock(obj);
814 }
815 
816 /* Update madvise status, returns true if not purged, else
817  * false or -errno.
818  */
819 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
820 {
821 	struct msm_drm_private *priv = obj->dev->dev_private;
822 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
823 
824 	msm_gem_lock(obj);
825 
826 	mutex_lock(&priv->lru.lock);
827 
828 	if (msm_obj->madv != __MSM_MADV_PURGED)
829 		msm_obj->madv = madv;
830 
831 	madv = msm_obj->madv;
832 
833 	/* If the obj is inactive, we might need to move it
834 	 * between inactive lists
835 	 */
836 	update_lru_locked(obj);
837 
838 	mutex_unlock(&priv->lru.lock);
839 
840 	msm_gem_unlock(obj);
841 
842 	return (madv != __MSM_MADV_PURGED);
843 }
844 
845 void msm_gem_purge(struct drm_gem_object *obj)
846 {
847 	struct drm_device *dev = obj->dev;
848 	struct msm_drm_private *priv = obj->dev->dev_private;
849 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
850 
851 	msm_gem_assert_locked(obj);
852 	GEM_WARN_ON(!is_purgeable(msm_obj));
853 
854 	/* Get rid of any iommu mapping(s): */
855 	put_iova_spaces(obj, true);
856 
857 	msm_gem_vunmap(obj);
858 
859 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
860 
861 	put_pages(obj);
862 
863 	put_iova_vmas(obj);
864 
865 	mutex_lock(&priv->lru.lock);
866 	/* A one-way transition: */
867 	msm_obj->madv = __MSM_MADV_PURGED;
868 	mutex_unlock(&priv->lru.lock);
869 
870 	drm_gem_free_mmap_offset(obj);
871 
872 	/* Our goal here is to return as much of the memory as
873 	 * is possible back to the system as we are called from OOM.
874 	 * To do this we must instruct the shmfs to drop all of its
875 	 * backing pages, *now*.
876 	 */
877 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
878 
879 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
880 			0, (loff_t)-1);
881 }
882 
883 /*
884  * Unpin the backing pages and make them available to be swapped out.
885  */
886 void msm_gem_evict(struct drm_gem_object *obj)
887 {
888 	struct drm_device *dev = obj->dev;
889 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
890 
891 	msm_gem_assert_locked(obj);
892 	GEM_WARN_ON(is_unevictable(msm_obj));
893 
894 	/* Get rid of any iommu mapping(s): */
895 	put_iova_spaces(obj, false);
896 
897 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
898 
899 	put_pages(obj);
900 }
901 
902 void msm_gem_vunmap(struct drm_gem_object *obj)
903 {
904 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
905 
906 	msm_gem_assert_locked(obj);
907 
908 	if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
909 		return;
910 
911 	vunmap(msm_obj->vaddr);
912 	msm_obj->vaddr = NULL;
913 }
914 
915 bool msm_gem_active(struct drm_gem_object *obj)
916 {
917 	msm_gem_assert_locked(obj);
918 
919 	if (to_msm_bo(obj)->pin_count)
920 		return true;
921 
922 	return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
923 }
924 
925 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
926 {
927 	bool write = !!(op & MSM_PREP_WRITE);
928 	unsigned long remain =
929 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
930 	long ret;
931 
932 	if (op & MSM_PREP_BOOST) {
933 		dma_resv_set_deadline(obj->resv, dma_resv_usage_rw(write),
934 				      ktime_get());
935 	}
936 
937 	ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
938 				    true,  remain);
939 	if (ret == 0)
940 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
941 	else if (ret < 0)
942 		return ret;
943 
944 	/* TODO cache maintenance */
945 
946 	return 0;
947 }
948 
949 int msm_gem_cpu_fini(struct drm_gem_object *obj)
950 {
951 	/* TODO cache maintenance */
952 	return 0;
953 }
954 
955 #ifdef CONFIG_DEBUG_FS
956 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
957 		struct msm_gem_stats *stats)
958 {
959 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
960 	struct dma_resv *robj = obj->resv;
961 	struct msm_gem_vma *vma;
962 	uint64_t off = drm_vma_node_start(&obj->vma_node);
963 	const char *madv;
964 
965 	msm_gem_lock(obj);
966 
967 	stats->all.count++;
968 	stats->all.size += obj->size;
969 
970 	if (msm_gem_active(obj)) {
971 		stats->active.count++;
972 		stats->active.size += obj->size;
973 	}
974 
975 	if (msm_obj->pages) {
976 		stats->resident.count++;
977 		stats->resident.size += obj->size;
978 	}
979 
980 	switch (msm_obj->madv) {
981 	case __MSM_MADV_PURGED:
982 		stats->purged.count++;
983 		stats->purged.size += obj->size;
984 		madv = " purged";
985 		break;
986 	case MSM_MADV_DONTNEED:
987 		stats->purgeable.count++;
988 		stats->purgeable.size += obj->size;
989 		madv = " purgeable";
990 		break;
991 	case MSM_MADV_WILLNEED:
992 	default:
993 		madv = "";
994 		break;
995 	}
996 
997 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
998 			msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
999 			obj->name, kref_read(&obj->refcount),
1000 			off, msm_obj->vaddr);
1001 
1002 	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
1003 
1004 	if (!list_empty(&msm_obj->vmas)) {
1005 
1006 		seq_puts(m, "      vmas:");
1007 
1008 		list_for_each_entry(vma, &msm_obj->vmas, list) {
1009 			const char *name, *comm;
1010 			if (vma->aspace) {
1011 				struct msm_gem_address_space *aspace = vma->aspace;
1012 				struct task_struct *task =
1013 					get_pid_task(aspace->pid, PIDTYPE_PID);
1014 				if (task) {
1015 					comm = kstrdup(task->comm, GFP_KERNEL);
1016 					put_task_struct(task);
1017 				} else {
1018 					comm = NULL;
1019 				}
1020 				name = aspace->name;
1021 			} else {
1022 				name = comm = NULL;
1023 			}
1024 			seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s]",
1025 				name, comm ? ":" : "", comm ? comm : "",
1026 				vma->aspace, vma->iova,
1027 				vma->mapped ? "mapped" : "unmapped");
1028 			kfree(comm);
1029 		}
1030 
1031 		seq_puts(m, "\n");
1032 	}
1033 
1034 	dma_resv_describe(robj, m);
1035 	msm_gem_unlock(obj);
1036 }
1037 
1038 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
1039 {
1040 	struct msm_gem_stats stats = {};
1041 	struct msm_gem_object *msm_obj;
1042 
1043 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
1044 	list_for_each_entry(msm_obj, list, node) {
1045 		struct drm_gem_object *obj = &msm_obj->base;
1046 		seq_puts(m, "   ");
1047 		msm_gem_describe(obj, m, &stats);
1048 	}
1049 
1050 	seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
1051 			stats.all.count, stats.all.size);
1052 	seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
1053 			stats.active.count, stats.active.size);
1054 	seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
1055 			stats.resident.count, stats.resident.size);
1056 	seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
1057 			stats.purgeable.count, stats.purgeable.size);
1058 	seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
1059 			stats.purged.count, stats.purged.size);
1060 }
1061 #endif
1062 
1063 /* don't call directly!  Use drm_gem_object_put() */
1064 static void msm_gem_free_object(struct drm_gem_object *obj)
1065 {
1066 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1067 	struct drm_device *dev = obj->dev;
1068 	struct msm_drm_private *priv = dev->dev_private;
1069 
1070 	mutex_lock(&priv->obj_lock);
1071 	list_del(&msm_obj->node);
1072 	mutex_unlock(&priv->obj_lock);
1073 
1074 	put_iova_spaces(obj, true);
1075 
1076 	if (drm_gem_is_imported(obj)) {
1077 		GEM_WARN_ON(msm_obj->vaddr);
1078 
1079 		/* Don't drop the pages for imported dmabuf, as they are not
1080 		 * ours, just free the array we allocated:
1081 		 */
1082 		kvfree(msm_obj->pages);
1083 
1084 		put_iova_vmas(obj);
1085 
1086 		drm_prime_gem_destroy(obj, msm_obj->sgt);
1087 	} else {
1088 		msm_gem_vunmap(obj);
1089 		put_pages(obj);
1090 		put_iova_vmas(obj);
1091 	}
1092 
1093 	drm_gem_object_release(obj);
1094 
1095 	kfree(msm_obj->metadata);
1096 	kfree(msm_obj);
1097 }
1098 
1099 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1100 {
1101 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1102 
1103 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1104 	vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1105 
1106 	return 0;
1107 }
1108 
1109 /* convenience method to construct a GEM buffer object, and userspace handle */
1110 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1111 		uint32_t size, uint32_t flags, uint32_t *handle,
1112 		char *name)
1113 {
1114 	struct drm_gem_object *obj;
1115 	int ret;
1116 
1117 	obj = msm_gem_new(dev, size, flags);
1118 
1119 	if (IS_ERR(obj))
1120 		return PTR_ERR(obj);
1121 
1122 	if (name)
1123 		msm_gem_object_set_name(obj, "%s", name);
1124 
1125 	ret = drm_gem_handle_create(file, obj, handle);
1126 
1127 	/* drop reference from allocate - handle holds it now */
1128 	drm_gem_object_put(obj);
1129 
1130 	return ret;
1131 }
1132 
1133 static enum drm_gem_object_status msm_gem_status(struct drm_gem_object *obj)
1134 {
1135 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1136 	enum drm_gem_object_status status = 0;
1137 
1138 	if (msm_obj->pages)
1139 		status |= DRM_GEM_OBJECT_RESIDENT;
1140 
1141 	if (msm_obj->madv == MSM_MADV_DONTNEED)
1142 		status |= DRM_GEM_OBJECT_PURGEABLE;
1143 
1144 	return status;
1145 }
1146 
1147 static const struct vm_operations_struct vm_ops = {
1148 	.fault = msm_gem_fault,
1149 	.open = drm_gem_vm_open,
1150 	.close = drm_gem_vm_close,
1151 };
1152 
1153 static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1154 	.free = msm_gem_free_object,
1155 	.open = msm_gem_open,
1156 	.close = msm_gem_close,
1157 	.pin = msm_gem_prime_pin,
1158 	.unpin = msm_gem_prime_unpin,
1159 	.get_sg_table = msm_gem_prime_get_sg_table,
1160 	.vmap = msm_gem_prime_vmap,
1161 	.vunmap = msm_gem_prime_vunmap,
1162 	.mmap = msm_gem_object_mmap,
1163 	.status = msm_gem_status,
1164 	.vm_ops = &vm_ops,
1165 };
1166 
1167 static int msm_gem_new_impl(struct drm_device *dev,
1168 		uint32_t size, uint32_t flags,
1169 		struct drm_gem_object **obj)
1170 {
1171 	struct msm_drm_private *priv = dev->dev_private;
1172 	struct msm_gem_object *msm_obj;
1173 
1174 	switch (flags & MSM_BO_CACHE_MASK) {
1175 	case MSM_BO_CACHED:
1176 	case MSM_BO_WC:
1177 		break;
1178 	case MSM_BO_CACHED_COHERENT:
1179 		if (priv->has_cached_coherent)
1180 			break;
1181 		fallthrough;
1182 	default:
1183 		DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1184 				(flags & MSM_BO_CACHE_MASK));
1185 		return -EINVAL;
1186 	}
1187 
1188 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1189 	if (!msm_obj)
1190 		return -ENOMEM;
1191 
1192 	msm_obj->flags = flags;
1193 	msm_obj->madv = MSM_MADV_WILLNEED;
1194 
1195 	INIT_LIST_HEAD(&msm_obj->node);
1196 	INIT_LIST_HEAD(&msm_obj->vmas);
1197 
1198 	*obj = &msm_obj->base;
1199 	(*obj)->funcs = &msm_gem_object_funcs;
1200 
1201 	return 0;
1202 }
1203 
1204 struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
1205 {
1206 	struct msm_drm_private *priv = dev->dev_private;
1207 	struct msm_gem_object *msm_obj;
1208 	struct drm_gem_object *obj = NULL;
1209 	bool use_vram = false;
1210 	int ret;
1211 
1212 	size = PAGE_ALIGN(size);
1213 
1214 	if (!msm_use_mmu(dev))
1215 		use_vram = true;
1216 	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1217 		use_vram = true;
1218 
1219 	if (GEM_WARN_ON(use_vram && !priv->vram.size))
1220 		return ERR_PTR(-EINVAL);
1221 
1222 	/* Disallow zero sized objects as they make the underlying
1223 	 * infrastructure grumpy
1224 	 */
1225 	if (size == 0)
1226 		return ERR_PTR(-EINVAL);
1227 
1228 	ret = msm_gem_new_impl(dev, size, flags, &obj);
1229 	if (ret)
1230 		return ERR_PTR(ret);
1231 
1232 	msm_obj = to_msm_bo(obj);
1233 
1234 	if (use_vram) {
1235 		struct msm_gem_vma *vma;
1236 		struct page **pages;
1237 
1238 		drm_gem_private_object_init(dev, obj, size);
1239 
1240 		msm_gem_lock(obj);
1241 
1242 		vma = add_vma(obj, NULL);
1243 		msm_gem_unlock(obj);
1244 		if (IS_ERR(vma)) {
1245 			ret = PTR_ERR(vma);
1246 			goto fail;
1247 		}
1248 
1249 		to_msm_bo(obj)->vram_node = &vma->node;
1250 
1251 		msm_gem_lock(obj);
1252 		pages = get_pages(obj);
1253 		msm_gem_unlock(obj);
1254 		if (IS_ERR(pages)) {
1255 			ret = PTR_ERR(pages);
1256 			goto fail;
1257 		}
1258 
1259 		vma->iova = physaddr(obj);
1260 	} else {
1261 		ret = drm_gem_object_init(dev, obj, size);
1262 		if (ret)
1263 			goto fail;
1264 		/*
1265 		 * Our buffers are kept pinned, so allocating them from the
1266 		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1267 		 * See comments above new_inode() why this is required _and_
1268 		 * expected if you're going to pin these pages.
1269 		 */
1270 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1271 	}
1272 
1273 	drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
1274 
1275 	mutex_lock(&priv->obj_lock);
1276 	list_add_tail(&msm_obj->node, &priv->objects);
1277 	mutex_unlock(&priv->obj_lock);
1278 
1279 	ret = drm_gem_create_mmap_offset(obj);
1280 	if (ret)
1281 		goto fail;
1282 
1283 	return obj;
1284 
1285 fail:
1286 	drm_gem_object_put(obj);
1287 	return ERR_PTR(ret);
1288 }
1289 
1290 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1291 		struct dma_buf *dmabuf, struct sg_table *sgt)
1292 {
1293 	struct msm_drm_private *priv = dev->dev_private;
1294 	struct msm_gem_object *msm_obj;
1295 	struct drm_gem_object *obj;
1296 	uint32_t size;
1297 	int ret, npages;
1298 
1299 	/* if we don't have IOMMU, don't bother pretending we can import: */
1300 	if (!msm_use_mmu(dev)) {
1301 		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1302 		return ERR_PTR(-EINVAL);
1303 	}
1304 
1305 	size = PAGE_ALIGN(dmabuf->size);
1306 
1307 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1308 	if (ret)
1309 		return ERR_PTR(ret);
1310 
1311 	drm_gem_private_object_init(dev, obj, size);
1312 
1313 	npages = size / PAGE_SIZE;
1314 
1315 	msm_obj = to_msm_bo(obj);
1316 	msm_gem_lock(obj);
1317 	msm_obj->sgt = sgt;
1318 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1319 	if (!msm_obj->pages) {
1320 		msm_gem_unlock(obj);
1321 		ret = -ENOMEM;
1322 		goto fail;
1323 	}
1324 
1325 	ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1326 	if (ret) {
1327 		msm_gem_unlock(obj);
1328 		goto fail;
1329 	}
1330 
1331 	msm_gem_unlock(obj);
1332 
1333 	drm_gem_lru_move_tail(&priv->lru.pinned, obj);
1334 
1335 	mutex_lock(&priv->obj_lock);
1336 	list_add_tail(&msm_obj->node, &priv->objects);
1337 	mutex_unlock(&priv->obj_lock);
1338 
1339 	ret = drm_gem_create_mmap_offset(obj);
1340 	if (ret)
1341 		goto fail;
1342 
1343 	return obj;
1344 
1345 fail:
1346 	drm_gem_object_put(obj);
1347 	return ERR_PTR(ret);
1348 }
1349 
1350 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1351 		uint32_t flags, struct msm_gem_address_space *aspace,
1352 		struct drm_gem_object **bo, uint64_t *iova)
1353 {
1354 	void *vaddr;
1355 	struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1356 	int ret;
1357 
1358 	if (IS_ERR(obj))
1359 		return ERR_CAST(obj);
1360 
1361 	if (iova) {
1362 		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1363 		if (ret)
1364 			goto err;
1365 	}
1366 
1367 	vaddr = msm_gem_get_vaddr(obj);
1368 	if (IS_ERR(vaddr)) {
1369 		msm_gem_unpin_iova(obj, aspace);
1370 		ret = PTR_ERR(vaddr);
1371 		goto err;
1372 	}
1373 
1374 	if (bo)
1375 		*bo = obj;
1376 
1377 	return vaddr;
1378 err:
1379 	drm_gem_object_put(obj);
1380 
1381 	return ERR_PTR(ret);
1382 
1383 }
1384 
1385 void msm_gem_kernel_put(struct drm_gem_object *bo,
1386 		struct msm_gem_address_space *aspace)
1387 {
1388 	if (IS_ERR_OR_NULL(bo))
1389 		return;
1390 
1391 	msm_gem_put_vaddr(bo);
1392 	msm_gem_unpin_iova(bo, aspace);
1393 	drm_gem_object_put(bo);
1394 }
1395 
1396 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1397 {
1398 	struct msm_gem_object *msm_obj = to_msm_bo(bo);
1399 	va_list ap;
1400 
1401 	if (!fmt)
1402 		return;
1403 
1404 	va_start(ap, fmt);
1405 	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1406 	va_end(ap);
1407 }
1408