xref: /linux/drivers/gpu/drm/msm/msm_gem.c (revision 376b1446153ca67e7028e6b9555d9b17477f568b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/dma-map-ops.h>
8 #include <linux/vmalloc.h>
9 #include <linux/spinlock.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/dma-buf.h>
12 #include <linux/pfn_t.h>
13 
14 #include <drm/drm_prime.h>
15 
16 #include "msm_drv.h"
17 #include "msm_fence.h"
18 #include "msm_gem.h"
19 #include "msm_gpu.h"
20 #include "msm_mmu.h"
21 
22 static void update_lru(struct drm_gem_object *obj);
23 
24 static dma_addr_t physaddr(struct drm_gem_object *obj)
25 {
26 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
27 	struct msm_drm_private *priv = obj->dev->dev_private;
28 	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
29 			priv->vram.paddr;
30 }
31 
32 static bool use_pages(struct drm_gem_object *obj)
33 {
34 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
35 	return !msm_obj->vram_node;
36 }
37 
38 /*
39  * Cache sync.. this is a bit over-complicated, to fit dma-mapping
40  * API.  Really GPU cache is out of scope here (handled on cmdstream)
41  * and all we need to do is invalidate newly allocated pages before
42  * mapping to CPU as uncached/writecombine.
43  *
44  * On top of this, we have the added headache, that depending on
45  * display generation, the display's iommu may be wired up to either
46  * the toplevel drm device (mdss), or to the mdp sub-node, meaning
47  * that here we either have dma-direct or iommu ops.
48  *
49  * Let this be a cautionary tail of abstraction gone wrong.
50  */
51 
52 static void sync_for_device(struct msm_gem_object *msm_obj)
53 {
54 	struct device *dev = msm_obj->base.dev->dev;
55 
56 	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
57 }
58 
59 static void sync_for_cpu(struct msm_gem_object *msm_obj)
60 {
61 	struct device *dev = msm_obj->base.dev->dev;
62 
63 	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
64 }
65 
66 /* allocate pages from VRAM carveout, used when no IOMMU: */
67 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
68 {
69 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
70 	struct msm_drm_private *priv = obj->dev->dev_private;
71 	dma_addr_t paddr;
72 	struct page **p;
73 	int ret, i;
74 
75 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
76 	if (!p)
77 		return ERR_PTR(-ENOMEM);
78 
79 	spin_lock(&priv->vram.lock);
80 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
81 	spin_unlock(&priv->vram.lock);
82 	if (ret) {
83 		kvfree(p);
84 		return ERR_PTR(ret);
85 	}
86 
87 	paddr = physaddr(obj);
88 	for (i = 0; i < npages; i++) {
89 		p[i] = pfn_to_page(__phys_to_pfn(paddr));
90 		paddr += PAGE_SIZE;
91 	}
92 
93 	return p;
94 }
95 
96 static struct page **get_pages(struct drm_gem_object *obj)
97 {
98 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
99 
100 	msm_gem_assert_locked(obj);
101 
102 	if (!msm_obj->pages) {
103 		struct drm_device *dev = obj->dev;
104 		struct page **p;
105 		int npages = obj->size >> PAGE_SHIFT;
106 
107 		if (use_pages(obj))
108 			p = drm_gem_get_pages(obj);
109 		else
110 			p = get_pages_vram(obj, npages);
111 
112 		if (IS_ERR(p)) {
113 			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
114 					PTR_ERR(p));
115 			return p;
116 		}
117 
118 		msm_obj->pages = p;
119 
120 		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
121 		if (IS_ERR(msm_obj->sgt)) {
122 			void *ptr = ERR_CAST(msm_obj->sgt);
123 
124 			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
125 			msm_obj->sgt = NULL;
126 			return ptr;
127 		}
128 
129 		/* For non-cached buffers, ensure the new pages are clean
130 		 * because display controller, GPU, etc. are not coherent:
131 		 */
132 		if (msm_obj->flags & MSM_BO_WC)
133 			sync_for_device(msm_obj);
134 
135 		update_lru(obj);
136 	}
137 
138 	return msm_obj->pages;
139 }
140 
141 static void put_pages_vram(struct drm_gem_object *obj)
142 {
143 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
144 	struct msm_drm_private *priv = obj->dev->dev_private;
145 
146 	spin_lock(&priv->vram.lock);
147 	drm_mm_remove_node(msm_obj->vram_node);
148 	spin_unlock(&priv->vram.lock);
149 
150 	kvfree(msm_obj->pages);
151 }
152 
153 static void put_pages(struct drm_gem_object *obj)
154 {
155 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
156 
157 	if (msm_obj->pages) {
158 		if (msm_obj->sgt) {
159 			/* For non-cached buffers, ensure the new
160 			 * pages are clean because display controller,
161 			 * GPU, etc. are not coherent:
162 			 */
163 			if (msm_obj->flags & MSM_BO_WC)
164 				sync_for_cpu(msm_obj);
165 
166 			sg_free_table(msm_obj->sgt);
167 			kfree(msm_obj->sgt);
168 			msm_obj->sgt = NULL;
169 		}
170 
171 		if (use_pages(obj))
172 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
173 		else
174 			put_pages_vram(obj);
175 
176 		msm_obj->pages = NULL;
177 		update_lru(obj);
178 	}
179 }
180 
181 static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
182 {
183 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
184 	struct page **p;
185 
186 	msm_gem_assert_locked(obj);
187 
188 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
189 		return ERR_PTR(-EBUSY);
190 	}
191 
192 	p = get_pages(obj);
193 	if (!IS_ERR(p)) {
194 		to_msm_bo(obj)->pin_count++;
195 		update_lru(obj);
196 	}
197 
198 	return p;
199 }
200 
201 struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
202 {
203 	struct page **p;
204 
205 	msm_gem_lock(obj);
206 	p = msm_gem_pin_pages_locked(obj);
207 	msm_gem_unlock(obj);
208 
209 	return p;
210 }
211 
212 void msm_gem_unpin_pages(struct drm_gem_object *obj)
213 {
214 	msm_gem_lock(obj);
215 	msm_gem_unpin_locked(obj);
216 	msm_gem_unlock(obj);
217 }
218 
219 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
220 {
221 	if (msm_obj->flags & MSM_BO_WC)
222 		return pgprot_writecombine(prot);
223 	return prot;
224 }
225 
226 static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
227 {
228 	struct vm_area_struct *vma = vmf->vma;
229 	struct drm_gem_object *obj = vma->vm_private_data;
230 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
231 	struct page **pages;
232 	unsigned long pfn;
233 	pgoff_t pgoff;
234 	int err;
235 	vm_fault_t ret;
236 
237 	/*
238 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
239 	 * a reference on obj. So, we dont need to hold one here.
240 	 */
241 	err = msm_gem_lock_interruptible(obj);
242 	if (err) {
243 		ret = VM_FAULT_NOPAGE;
244 		goto out;
245 	}
246 
247 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
248 		msm_gem_unlock(obj);
249 		return VM_FAULT_SIGBUS;
250 	}
251 
252 	/* make sure we have pages attached now */
253 	pages = get_pages(obj);
254 	if (IS_ERR(pages)) {
255 		ret = vmf_error(PTR_ERR(pages));
256 		goto out_unlock;
257 	}
258 
259 	/* We don't use vmf->pgoff since that has the fake offset: */
260 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
261 
262 	pfn = page_to_pfn(pages[pgoff]);
263 
264 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
265 			pfn, pfn << PAGE_SHIFT);
266 
267 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
268 
269 out_unlock:
270 	msm_gem_unlock(obj);
271 out:
272 	return ret;
273 }
274 
275 /** get mmap offset */
276 static uint64_t mmap_offset(struct drm_gem_object *obj)
277 {
278 	struct drm_device *dev = obj->dev;
279 	int ret;
280 
281 	msm_gem_assert_locked(obj);
282 
283 	/* Make it mmapable */
284 	ret = drm_gem_create_mmap_offset(obj);
285 
286 	if (ret) {
287 		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
288 		return 0;
289 	}
290 
291 	return drm_vma_node_offset_addr(&obj->vma_node);
292 }
293 
294 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
295 {
296 	uint64_t offset;
297 
298 	msm_gem_lock(obj);
299 	offset = mmap_offset(obj);
300 	msm_gem_unlock(obj);
301 	return offset;
302 }
303 
304 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
305 		struct msm_gem_address_space *aspace)
306 {
307 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
308 	struct msm_gem_vma *vma;
309 
310 	msm_gem_assert_locked(obj);
311 
312 	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
313 	if (!vma)
314 		return ERR_PTR(-ENOMEM);
315 
316 	vma->aspace = aspace;
317 
318 	list_add_tail(&vma->list, &msm_obj->vmas);
319 
320 	return vma;
321 }
322 
323 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
324 		struct msm_gem_address_space *aspace)
325 {
326 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
327 	struct msm_gem_vma *vma;
328 
329 	msm_gem_assert_locked(obj);
330 
331 	list_for_each_entry(vma, &msm_obj->vmas, list) {
332 		if (vma->aspace == aspace)
333 			return vma;
334 	}
335 
336 	return NULL;
337 }
338 
339 static void del_vma(struct msm_gem_vma *vma)
340 {
341 	if (!vma)
342 		return;
343 
344 	list_del(&vma->list);
345 	kfree(vma);
346 }
347 
348 /*
349  * If close is true, this also closes the VMA (releasing the allocated
350  * iova range) in addition to removing the iommu mapping.  In the eviction
351  * case (!close), we keep the iova allocated, but only remove the iommu
352  * mapping.
353  */
354 static void
355 put_iova_spaces(struct drm_gem_object *obj, bool close)
356 {
357 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
358 	struct msm_gem_vma *vma;
359 
360 	msm_gem_assert_locked(obj);
361 
362 	list_for_each_entry(vma, &msm_obj->vmas, list) {
363 		if (vma->aspace) {
364 			msm_gem_purge_vma(vma->aspace, vma);
365 			if (close)
366 				msm_gem_close_vma(vma->aspace, vma);
367 		}
368 	}
369 }
370 
371 /* Called with msm_obj locked */
372 static void
373 put_iova_vmas(struct drm_gem_object *obj)
374 {
375 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
376 	struct msm_gem_vma *vma, *tmp;
377 
378 	msm_gem_assert_locked(obj);
379 
380 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
381 		del_vma(vma);
382 	}
383 }
384 
385 static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
386 		struct msm_gem_address_space *aspace,
387 		u64 range_start, u64 range_end)
388 {
389 	struct msm_gem_vma *vma;
390 
391 	msm_gem_assert_locked(obj);
392 
393 	vma = lookup_vma(obj, aspace);
394 
395 	if (!vma) {
396 		int ret;
397 
398 		vma = add_vma(obj, aspace);
399 		if (IS_ERR(vma))
400 			return vma;
401 
402 		ret = msm_gem_init_vma(aspace, vma, obj->size,
403 			range_start, range_end);
404 		if (ret) {
405 			del_vma(vma);
406 			return ERR_PTR(ret);
407 		}
408 	} else {
409 		GEM_WARN_ON(vma->iova < range_start);
410 		GEM_WARN_ON((vma->iova + obj->size) > range_end);
411 	}
412 
413 	return vma;
414 }
415 
416 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
417 {
418 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
419 	struct page **pages;
420 	int ret, prot = IOMMU_READ;
421 
422 	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
423 		prot |= IOMMU_WRITE;
424 
425 	if (msm_obj->flags & MSM_BO_MAP_PRIV)
426 		prot |= IOMMU_PRIV;
427 
428 	if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
429 		prot |= IOMMU_CACHE;
430 
431 	msm_gem_assert_locked(obj);
432 
433 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
434 		return -EBUSY;
435 
436 	pages = msm_gem_pin_pages_locked(obj);
437 	if (IS_ERR(pages))
438 		return PTR_ERR(pages);
439 
440 	ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size);
441 	if (ret)
442 		msm_gem_unpin_locked(obj);
443 
444 	return ret;
445 }
446 
447 void msm_gem_unpin_locked(struct drm_gem_object *obj)
448 {
449 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
450 
451 	msm_gem_assert_locked(obj);
452 
453 	msm_obj->pin_count--;
454 	GEM_WARN_ON(msm_obj->pin_count < 0);
455 
456 	update_lru(obj);
457 }
458 
459 struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
460 					   struct msm_gem_address_space *aspace)
461 {
462 	return get_vma_locked(obj, aspace, 0, U64_MAX);
463 }
464 
465 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
466 		struct msm_gem_address_space *aspace, uint64_t *iova,
467 		u64 range_start, u64 range_end)
468 {
469 	struct msm_gem_vma *vma;
470 	int ret;
471 
472 	msm_gem_assert_locked(obj);
473 
474 	vma = get_vma_locked(obj, aspace, range_start, range_end);
475 	if (IS_ERR(vma))
476 		return PTR_ERR(vma);
477 
478 	ret = msm_gem_pin_vma_locked(obj, vma);
479 	if (!ret)
480 		*iova = vma->iova;
481 
482 	return ret;
483 }
484 
485 /*
486  * get iova and pin it. Should have a matching put
487  * limits iova to specified range (in pages)
488  */
489 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
490 		struct msm_gem_address_space *aspace, uint64_t *iova,
491 		u64 range_start, u64 range_end)
492 {
493 	int ret;
494 
495 	msm_gem_lock(obj);
496 	ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
497 	msm_gem_unlock(obj);
498 
499 	return ret;
500 }
501 
502 /* get iova and pin it. Should have a matching put */
503 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
504 		struct msm_gem_address_space *aspace, uint64_t *iova)
505 {
506 	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
507 }
508 
509 /*
510  * Get an iova but don't pin it. Doesn't need a put because iovas are currently
511  * valid for the life of the object
512  */
513 int msm_gem_get_iova(struct drm_gem_object *obj,
514 		struct msm_gem_address_space *aspace, uint64_t *iova)
515 {
516 	struct msm_gem_vma *vma;
517 	int ret = 0;
518 
519 	msm_gem_lock(obj);
520 	vma = get_vma_locked(obj, aspace, 0, U64_MAX);
521 	if (IS_ERR(vma)) {
522 		ret = PTR_ERR(vma);
523 	} else {
524 		*iova = vma->iova;
525 	}
526 	msm_gem_unlock(obj);
527 
528 	return ret;
529 }
530 
531 static int clear_iova(struct drm_gem_object *obj,
532 		      struct msm_gem_address_space *aspace)
533 {
534 	struct msm_gem_vma *vma = lookup_vma(obj, aspace);
535 
536 	if (!vma)
537 		return 0;
538 
539 	if (msm_gem_vma_inuse(vma))
540 		return -EBUSY;
541 
542 	msm_gem_purge_vma(vma->aspace, vma);
543 	msm_gem_close_vma(vma->aspace, vma);
544 	del_vma(vma);
545 
546 	return 0;
547 }
548 
549 /*
550  * Get the requested iova but don't pin it.  Fails if the requested iova is
551  * not available.  Doesn't need a put because iovas are currently valid for
552  * the life of the object.
553  *
554  * Setting an iova of zero will clear the vma.
555  */
556 int msm_gem_set_iova(struct drm_gem_object *obj,
557 		     struct msm_gem_address_space *aspace, uint64_t iova)
558 {
559 	int ret = 0;
560 
561 	msm_gem_lock(obj);
562 	if (!iova) {
563 		ret = clear_iova(obj, aspace);
564 	} else {
565 		struct msm_gem_vma *vma;
566 		vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
567 		if (IS_ERR(vma)) {
568 			ret = PTR_ERR(vma);
569 		} else if (GEM_WARN_ON(vma->iova != iova)) {
570 			clear_iova(obj, aspace);
571 			ret = -EBUSY;
572 		}
573 	}
574 	msm_gem_unlock(obj);
575 
576 	return ret;
577 }
578 
579 /*
580  * Unpin a iova by updating the reference counts. The memory isn't actually
581  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
582  * to get rid of it
583  */
584 void msm_gem_unpin_iova(struct drm_gem_object *obj,
585 		struct msm_gem_address_space *aspace)
586 {
587 	struct msm_gem_vma *vma;
588 
589 	msm_gem_lock(obj);
590 	vma = lookup_vma(obj, aspace);
591 	if (!GEM_WARN_ON(!vma)) {
592 		msm_gem_unpin_vma(vma);
593 		msm_gem_unpin_locked(obj);
594 	}
595 	msm_gem_unlock(obj);
596 }
597 
598 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
599 		struct drm_mode_create_dumb *args)
600 {
601 	args->pitch = align_pitch(args->width, args->bpp);
602 	args->size  = PAGE_ALIGN(args->pitch * args->height);
603 	return msm_gem_new_handle(dev, file, args->size,
604 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
605 }
606 
607 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
608 		uint32_t handle, uint64_t *offset)
609 {
610 	struct drm_gem_object *obj;
611 	int ret = 0;
612 
613 	/* GEM does all our handle to object mapping */
614 	obj = drm_gem_object_lookup(file, handle);
615 	if (obj == NULL) {
616 		ret = -ENOENT;
617 		goto fail;
618 	}
619 
620 	*offset = msm_gem_mmap_offset(obj);
621 
622 	drm_gem_object_put(obj);
623 
624 fail:
625 	return ret;
626 }
627 
628 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
629 {
630 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
631 	int ret = 0;
632 
633 	msm_gem_assert_locked(obj);
634 
635 	if (obj->import_attach)
636 		return ERR_PTR(-ENODEV);
637 
638 	if (GEM_WARN_ON(msm_obj->madv > madv)) {
639 		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
640 			msm_obj->madv, madv);
641 		return ERR_PTR(-EBUSY);
642 	}
643 
644 	/* increment vmap_count *before* vmap() call, so shrinker can
645 	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
646 	 * This guarantees that we won't try to msm_gem_vunmap() this
647 	 * same object from within the vmap() call (while we already
648 	 * hold msm_obj lock)
649 	 */
650 	msm_obj->vmap_count++;
651 
652 	if (!msm_obj->vaddr) {
653 		struct page **pages = get_pages(obj);
654 		if (IS_ERR(pages)) {
655 			ret = PTR_ERR(pages);
656 			goto fail;
657 		}
658 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
659 				VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
660 		if (msm_obj->vaddr == NULL) {
661 			ret = -ENOMEM;
662 			goto fail;
663 		}
664 
665 		update_lru(obj);
666 	}
667 
668 	return msm_obj->vaddr;
669 
670 fail:
671 	msm_obj->vmap_count--;
672 	return ERR_PTR(ret);
673 }
674 
675 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
676 {
677 	return get_vaddr(obj, MSM_MADV_WILLNEED);
678 }
679 
680 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
681 {
682 	void *ret;
683 
684 	msm_gem_lock(obj);
685 	ret = msm_gem_get_vaddr_locked(obj);
686 	msm_gem_unlock(obj);
687 
688 	return ret;
689 }
690 
691 /*
692  * Don't use this!  It is for the very special case of dumping
693  * submits from GPU hangs or faults, were the bo may already
694  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
695  * active list.
696  */
697 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
698 {
699 	return get_vaddr(obj, __MSM_MADV_PURGED);
700 }
701 
702 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
703 {
704 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
705 
706 	msm_gem_assert_locked(obj);
707 	GEM_WARN_ON(msm_obj->vmap_count < 1);
708 
709 	msm_obj->vmap_count--;
710 }
711 
712 void msm_gem_put_vaddr(struct drm_gem_object *obj)
713 {
714 	msm_gem_lock(obj);
715 	msm_gem_put_vaddr_locked(obj);
716 	msm_gem_unlock(obj);
717 }
718 
719 /* Update madvise status, returns true if not purged, else
720  * false or -errno.
721  */
722 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
723 {
724 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
725 
726 	msm_gem_lock(obj);
727 
728 	if (msm_obj->madv != __MSM_MADV_PURGED)
729 		msm_obj->madv = madv;
730 
731 	madv = msm_obj->madv;
732 
733 	/* If the obj is inactive, we might need to move it
734 	 * between inactive lists
735 	 */
736 	update_lru(obj);
737 
738 	msm_gem_unlock(obj);
739 
740 	return (madv != __MSM_MADV_PURGED);
741 }
742 
743 void msm_gem_purge(struct drm_gem_object *obj)
744 {
745 	struct drm_device *dev = obj->dev;
746 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
747 
748 	msm_gem_assert_locked(obj);
749 	GEM_WARN_ON(!is_purgeable(msm_obj));
750 
751 	/* Get rid of any iommu mapping(s): */
752 	put_iova_spaces(obj, true);
753 
754 	msm_gem_vunmap(obj);
755 
756 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
757 
758 	put_pages(obj);
759 
760 	put_iova_vmas(obj);
761 
762 	msm_obj->madv = __MSM_MADV_PURGED;
763 
764 	drm_gem_free_mmap_offset(obj);
765 
766 	/* Our goal here is to return as much of the memory as
767 	 * is possible back to the system as we are called from OOM.
768 	 * To do this we must instruct the shmfs to drop all of its
769 	 * backing pages, *now*.
770 	 */
771 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
772 
773 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
774 			0, (loff_t)-1);
775 }
776 
777 /*
778  * Unpin the backing pages and make them available to be swapped out.
779  */
780 void msm_gem_evict(struct drm_gem_object *obj)
781 {
782 	struct drm_device *dev = obj->dev;
783 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
784 
785 	msm_gem_assert_locked(obj);
786 	GEM_WARN_ON(is_unevictable(msm_obj));
787 
788 	/* Get rid of any iommu mapping(s): */
789 	put_iova_spaces(obj, false);
790 
791 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
792 
793 	put_pages(obj);
794 }
795 
796 void msm_gem_vunmap(struct drm_gem_object *obj)
797 {
798 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
799 
800 	msm_gem_assert_locked(obj);
801 
802 	if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
803 		return;
804 
805 	vunmap(msm_obj->vaddr);
806 	msm_obj->vaddr = NULL;
807 }
808 
809 static void update_lru(struct drm_gem_object *obj)
810 {
811 	struct msm_drm_private *priv = obj->dev->dev_private;
812 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
813 
814 	msm_gem_assert_locked(&msm_obj->base);
815 
816 	if (!msm_obj->pages) {
817 		GEM_WARN_ON(msm_obj->pin_count);
818 		GEM_WARN_ON(msm_obj->vmap_count);
819 
820 		drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
821 	} else if (msm_obj->pin_count || msm_obj->vmap_count) {
822 		drm_gem_lru_move_tail(&priv->lru.pinned, obj);
823 	} else if (msm_obj->madv == MSM_MADV_WILLNEED) {
824 		drm_gem_lru_move_tail(&priv->lru.willneed, obj);
825 	} else {
826 		GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
827 
828 		drm_gem_lru_move_tail(&priv->lru.dontneed, obj);
829 	}
830 }
831 
832 bool msm_gem_active(struct drm_gem_object *obj)
833 {
834 	msm_gem_assert_locked(obj);
835 
836 	if (to_msm_bo(obj)->pin_count)
837 		return true;
838 
839 	return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
840 }
841 
842 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
843 {
844 	bool write = !!(op & MSM_PREP_WRITE);
845 	unsigned long remain =
846 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
847 	long ret;
848 
849 	ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
850 				    true,  remain);
851 	if (ret == 0)
852 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
853 	else if (ret < 0)
854 		return ret;
855 
856 	/* TODO cache maintenance */
857 
858 	return 0;
859 }
860 
861 int msm_gem_cpu_fini(struct drm_gem_object *obj)
862 {
863 	/* TODO cache maintenance */
864 	return 0;
865 }
866 
867 #ifdef CONFIG_DEBUG_FS
868 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
869 		struct msm_gem_stats *stats)
870 {
871 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
872 	struct dma_resv *robj = obj->resv;
873 	struct msm_gem_vma *vma;
874 	uint64_t off = drm_vma_node_start(&obj->vma_node);
875 	const char *madv;
876 
877 	msm_gem_lock(obj);
878 
879 	stats->all.count++;
880 	stats->all.size += obj->size;
881 
882 	if (msm_gem_active(obj)) {
883 		stats->active.count++;
884 		stats->active.size += obj->size;
885 	}
886 
887 	if (msm_obj->pages) {
888 		stats->resident.count++;
889 		stats->resident.size += obj->size;
890 	}
891 
892 	switch (msm_obj->madv) {
893 	case __MSM_MADV_PURGED:
894 		stats->purged.count++;
895 		stats->purged.size += obj->size;
896 		madv = " purged";
897 		break;
898 	case MSM_MADV_DONTNEED:
899 		stats->purgeable.count++;
900 		stats->purgeable.size += obj->size;
901 		madv = " purgeable";
902 		break;
903 	case MSM_MADV_WILLNEED:
904 	default:
905 		madv = "";
906 		break;
907 	}
908 
909 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
910 			msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
911 			obj->name, kref_read(&obj->refcount),
912 			off, msm_obj->vaddr);
913 
914 	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
915 
916 	if (!list_empty(&msm_obj->vmas)) {
917 
918 		seq_puts(m, "      vmas:");
919 
920 		list_for_each_entry(vma, &msm_obj->vmas, list) {
921 			const char *name, *comm;
922 			if (vma->aspace) {
923 				struct msm_gem_address_space *aspace = vma->aspace;
924 				struct task_struct *task =
925 					get_pid_task(aspace->pid, PIDTYPE_PID);
926 				if (task) {
927 					comm = kstrdup(task->comm, GFP_KERNEL);
928 					put_task_struct(task);
929 				} else {
930 					comm = NULL;
931 				}
932 				name = aspace->name;
933 			} else {
934 				name = comm = NULL;
935 			}
936 			seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
937 				name, comm ? ":" : "", comm ? comm : "",
938 				vma->aspace, vma->iova,
939 				vma->mapped ? "mapped" : "unmapped",
940 				msm_gem_vma_inuse(vma));
941 			kfree(comm);
942 		}
943 
944 		seq_puts(m, "\n");
945 	}
946 
947 	dma_resv_describe(robj, m);
948 	msm_gem_unlock(obj);
949 }
950 
951 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
952 {
953 	struct msm_gem_stats stats = {};
954 	struct msm_gem_object *msm_obj;
955 
956 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
957 	list_for_each_entry(msm_obj, list, node) {
958 		struct drm_gem_object *obj = &msm_obj->base;
959 		seq_puts(m, "   ");
960 		msm_gem_describe(obj, m, &stats);
961 	}
962 
963 	seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
964 			stats.all.count, stats.all.size);
965 	seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
966 			stats.active.count, stats.active.size);
967 	seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
968 			stats.resident.count, stats.resident.size);
969 	seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
970 			stats.purgeable.count, stats.purgeable.size);
971 	seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
972 			stats.purged.count, stats.purged.size);
973 }
974 #endif
975 
976 /* don't call directly!  Use drm_gem_object_put() */
977 static void msm_gem_free_object(struct drm_gem_object *obj)
978 {
979 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
980 	struct drm_device *dev = obj->dev;
981 	struct msm_drm_private *priv = dev->dev_private;
982 
983 	mutex_lock(&priv->obj_lock);
984 	list_del(&msm_obj->node);
985 	mutex_unlock(&priv->obj_lock);
986 
987 	put_iova_spaces(obj, true);
988 
989 	if (obj->import_attach) {
990 		GEM_WARN_ON(msm_obj->vaddr);
991 
992 		/* Don't drop the pages for imported dmabuf, as they are not
993 		 * ours, just free the array we allocated:
994 		 */
995 		kvfree(msm_obj->pages);
996 
997 		put_iova_vmas(obj);
998 
999 		drm_prime_gem_destroy(obj, msm_obj->sgt);
1000 	} else {
1001 		msm_gem_vunmap(obj);
1002 		put_pages(obj);
1003 		put_iova_vmas(obj);
1004 	}
1005 
1006 	drm_gem_object_release(obj);
1007 
1008 	kfree(msm_obj);
1009 }
1010 
1011 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1012 {
1013 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1014 
1015 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1016 	vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1017 
1018 	return 0;
1019 }
1020 
1021 /* convenience method to construct a GEM buffer object, and userspace handle */
1022 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1023 		uint32_t size, uint32_t flags, uint32_t *handle,
1024 		char *name)
1025 {
1026 	struct drm_gem_object *obj;
1027 	int ret;
1028 
1029 	obj = msm_gem_new(dev, size, flags);
1030 
1031 	if (IS_ERR(obj))
1032 		return PTR_ERR(obj);
1033 
1034 	if (name)
1035 		msm_gem_object_set_name(obj, "%s", name);
1036 
1037 	ret = drm_gem_handle_create(file, obj, handle);
1038 
1039 	/* drop reference from allocate - handle holds it now */
1040 	drm_gem_object_put(obj);
1041 
1042 	return ret;
1043 }
1044 
1045 static const struct vm_operations_struct vm_ops = {
1046 	.fault = msm_gem_fault,
1047 	.open = drm_gem_vm_open,
1048 	.close = drm_gem_vm_close,
1049 };
1050 
1051 static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1052 	.free = msm_gem_free_object,
1053 	.pin = msm_gem_prime_pin,
1054 	.unpin = msm_gem_prime_unpin,
1055 	.get_sg_table = msm_gem_prime_get_sg_table,
1056 	.vmap = msm_gem_prime_vmap,
1057 	.vunmap = msm_gem_prime_vunmap,
1058 	.mmap = msm_gem_object_mmap,
1059 	.vm_ops = &vm_ops,
1060 };
1061 
1062 static int msm_gem_new_impl(struct drm_device *dev,
1063 		uint32_t size, uint32_t flags,
1064 		struct drm_gem_object **obj)
1065 {
1066 	struct msm_drm_private *priv = dev->dev_private;
1067 	struct msm_gem_object *msm_obj;
1068 
1069 	switch (flags & MSM_BO_CACHE_MASK) {
1070 	case MSM_BO_CACHED:
1071 	case MSM_BO_WC:
1072 		break;
1073 	case MSM_BO_CACHED_COHERENT:
1074 		if (priv->has_cached_coherent)
1075 			break;
1076 		fallthrough;
1077 	default:
1078 		DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1079 				(flags & MSM_BO_CACHE_MASK));
1080 		return -EINVAL;
1081 	}
1082 
1083 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1084 	if (!msm_obj)
1085 		return -ENOMEM;
1086 
1087 	msm_obj->flags = flags;
1088 	msm_obj->madv = MSM_MADV_WILLNEED;
1089 
1090 	INIT_LIST_HEAD(&msm_obj->node);
1091 	INIT_LIST_HEAD(&msm_obj->vmas);
1092 
1093 	*obj = &msm_obj->base;
1094 	(*obj)->funcs = &msm_gem_object_funcs;
1095 
1096 	return 0;
1097 }
1098 
1099 struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
1100 {
1101 	struct msm_drm_private *priv = dev->dev_private;
1102 	struct msm_gem_object *msm_obj;
1103 	struct drm_gem_object *obj = NULL;
1104 	bool use_vram = false;
1105 	int ret;
1106 
1107 	size = PAGE_ALIGN(size);
1108 
1109 	if (!msm_use_mmu(dev))
1110 		use_vram = true;
1111 	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1112 		use_vram = true;
1113 
1114 	if (GEM_WARN_ON(use_vram && !priv->vram.size))
1115 		return ERR_PTR(-EINVAL);
1116 
1117 	/* Disallow zero sized objects as they make the underlying
1118 	 * infrastructure grumpy
1119 	 */
1120 	if (size == 0)
1121 		return ERR_PTR(-EINVAL);
1122 
1123 	ret = msm_gem_new_impl(dev, size, flags, &obj);
1124 	if (ret)
1125 		return ERR_PTR(ret);
1126 
1127 	msm_obj = to_msm_bo(obj);
1128 
1129 	if (use_vram) {
1130 		struct msm_gem_vma *vma;
1131 		struct page **pages;
1132 
1133 		drm_gem_private_object_init(dev, obj, size);
1134 
1135 		msm_gem_lock(obj);
1136 
1137 		vma = add_vma(obj, NULL);
1138 		msm_gem_unlock(obj);
1139 		if (IS_ERR(vma)) {
1140 			ret = PTR_ERR(vma);
1141 			goto fail;
1142 		}
1143 
1144 		to_msm_bo(obj)->vram_node = &vma->node;
1145 
1146 		msm_gem_lock(obj);
1147 		pages = get_pages(obj);
1148 		msm_gem_unlock(obj);
1149 		if (IS_ERR(pages)) {
1150 			ret = PTR_ERR(pages);
1151 			goto fail;
1152 		}
1153 
1154 		vma->iova = physaddr(obj);
1155 	} else {
1156 		ret = drm_gem_object_init(dev, obj, size);
1157 		if (ret)
1158 			goto fail;
1159 		/*
1160 		 * Our buffers are kept pinned, so allocating them from the
1161 		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1162 		 * See comments above new_inode() why this is required _and_
1163 		 * expected if you're going to pin these pages.
1164 		 */
1165 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1166 	}
1167 
1168 	drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
1169 
1170 	mutex_lock(&priv->obj_lock);
1171 	list_add_tail(&msm_obj->node, &priv->objects);
1172 	mutex_unlock(&priv->obj_lock);
1173 
1174 	return obj;
1175 
1176 fail:
1177 	drm_gem_object_put(obj);
1178 	return ERR_PTR(ret);
1179 }
1180 
1181 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1182 		struct dma_buf *dmabuf, struct sg_table *sgt)
1183 {
1184 	struct msm_drm_private *priv = dev->dev_private;
1185 	struct msm_gem_object *msm_obj;
1186 	struct drm_gem_object *obj;
1187 	uint32_t size;
1188 	int ret, npages;
1189 
1190 	/* if we don't have IOMMU, don't bother pretending we can import: */
1191 	if (!msm_use_mmu(dev)) {
1192 		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1193 		return ERR_PTR(-EINVAL);
1194 	}
1195 
1196 	size = PAGE_ALIGN(dmabuf->size);
1197 
1198 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1199 	if (ret)
1200 		return ERR_PTR(ret);
1201 
1202 	drm_gem_private_object_init(dev, obj, size);
1203 
1204 	npages = size / PAGE_SIZE;
1205 
1206 	msm_obj = to_msm_bo(obj);
1207 	msm_gem_lock(obj);
1208 	msm_obj->sgt = sgt;
1209 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1210 	if (!msm_obj->pages) {
1211 		msm_gem_unlock(obj);
1212 		ret = -ENOMEM;
1213 		goto fail;
1214 	}
1215 
1216 	ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1217 	if (ret) {
1218 		msm_gem_unlock(obj);
1219 		goto fail;
1220 	}
1221 
1222 	msm_gem_unlock(obj);
1223 
1224 	drm_gem_lru_move_tail(&priv->lru.pinned, obj);
1225 
1226 	mutex_lock(&priv->obj_lock);
1227 	list_add_tail(&msm_obj->node, &priv->objects);
1228 	mutex_unlock(&priv->obj_lock);
1229 
1230 	return obj;
1231 
1232 fail:
1233 	drm_gem_object_put(obj);
1234 	return ERR_PTR(ret);
1235 }
1236 
1237 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1238 		uint32_t flags, struct msm_gem_address_space *aspace,
1239 		struct drm_gem_object **bo, uint64_t *iova)
1240 {
1241 	void *vaddr;
1242 	struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1243 	int ret;
1244 
1245 	if (IS_ERR(obj))
1246 		return ERR_CAST(obj);
1247 
1248 	if (iova) {
1249 		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1250 		if (ret)
1251 			goto err;
1252 	}
1253 
1254 	vaddr = msm_gem_get_vaddr(obj);
1255 	if (IS_ERR(vaddr)) {
1256 		msm_gem_unpin_iova(obj, aspace);
1257 		ret = PTR_ERR(vaddr);
1258 		goto err;
1259 	}
1260 
1261 	if (bo)
1262 		*bo = obj;
1263 
1264 	return vaddr;
1265 err:
1266 	drm_gem_object_put(obj);
1267 
1268 	return ERR_PTR(ret);
1269 
1270 }
1271 
1272 void msm_gem_kernel_put(struct drm_gem_object *bo,
1273 		struct msm_gem_address_space *aspace)
1274 {
1275 	if (IS_ERR_OR_NULL(bo))
1276 		return;
1277 
1278 	msm_gem_put_vaddr(bo);
1279 	msm_gem_unpin_iova(bo, aspace);
1280 	drm_gem_object_put(bo);
1281 }
1282 
1283 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1284 {
1285 	struct msm_gem_object *msm_obj = to_msm_bo(bo);
1286 	va_list ap;
1287 
1288 	if (!fmt)
1289 		return;
1290 
1291 	va_start(ap, fmt);
1292 	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1293 	va_end(ap);
1294 }
1295