xref: /linux/drivers/gpu/drm/msm/msm_gem.c (revision 4fd18fc38757217c746aa063ba9e4729814dc737)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/dma-map-ops.h>
8 #include <linux/spinlock.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/dma-buf.h>
11 #include <linux/pfn_t.h>
12 
13 #include <drm/drm_prime.h>
14 
15 #include "msm_drv.h"
16 #include "msm_fence.h"
17 #include "msm_gem.h"
18 #include "msm_gpu.h"
19 #include "msm_mmu.h"
20 
21 static void update_inactive(struct msm_gem_object *msm_obj);
22 
23 static dma_addr_t physaddr(struct drm_gem_object *obj)
24 {
25 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
26 	struct msm_drm_private *priv = obj->dev->dev_private;
27 	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
28 			priv->vram.paddr;
29 }
30 
31 static bool use_pages(struct drm_gem_object *obj)
32 {
33 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
34 	return !msm_obj->vram_node;
35 }
36 
37 /*
38  * Cache sync.. this is a bit over-complicated, to fit dma-mapping
39  * API.  Really GPU cache is out of scope here (handled on cmdstream)
40  * and all we need to do is invalidate newly allocated pages before
41  * mapping to CPU as uncached/writecombine.
42  *
43  * On top of this, we have the added headache, that depending on
44  * display generation, the display's iommu may be wired up to either
45  * the toplevel drm device (mdss), or to the mdp sub-node, meaning
46  * that here we either have dma-direct or iommu ops.
47  *
48  * Let this be a cautionary tail of abstraction gone wrong.
49  */
50 
51 static void sync_for_device(struct msm_gem_object *msm_obj)
52 {
53 	struct device *dev = msm_obj->base.dev->dev;
54 
55 	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
56 }
57 
58 static void sync_for_cpu(struct msm_gem_object *msm_obj)
59 {
60 	struct device *dev = msm_obj->base.dev->dev;
61 
62 	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
63 }
64 
65 /* allocate pages from VRAM carveout, used when no IOMMU: */
66 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
67 {
68 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
69 	struct msm_drm_private *priv = obj->dev->dev_private;
70 	dma_addr_t paddr;
71 	struct page **p;
72 	int ret, i;
73 
74 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
75 	if (!p)
76 		return ERR_PTR(-ENOMEM);
77 
78 	spin_lock(&priv->vram.lock);
79 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
80 	spin_unlock(&priv->vram.lock);
81 	if (ret) {
82 		kvfree(p);
83 		return ERR_PTR(ret);
84 	}
85 
86 	paddr = physaddr(obj);
87 	for (i = 0; i < npages; i++) {
88 		p[i] = phys_to_page(paddr);
89 		paddr += PAGE_SIZE;
90 	}
91 
92 	return p;
93 }
94 
95 static struct page **get_pages(struct drm_gem_object *obj)
96 {
97 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
98 
99 	if (!msm_obj->pages) {
100 		struct drm_device *dev = obj->dev;
101 		struct page **p;
102 		int npages = obj->size >> PAGE_SHIFT;
103 
104 		if (use_pages(obj))
105 			p = drm_gem_get_pages(obj);
106 		else
107 			p = get_pages_vram(obj, npages);
108 
109 		if (IS_ERR(p)) {
110 			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
111 					PTR_ERR(p));
112 			return p;
113 		}
114 
115 		msm_obj->pages = p;
116 
117 		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
118 		if (IS_ERR(msm_obj->sgt)) {
119 			void *ptr = ERR_CAST(msm_obj->sgt);
120 
121 			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
122 			msm_obj->sgt = NULL;
123 			return ptr;
124 		}
125 
126 		/* For non-cached buffers, ensure the new pages are clean
127 		 * because display controller, GPU, etc. are not coherent:
128 		 */
129 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
130 			sync_for_device(msm_obj);
131 	}
132 
133 	return msm_obj->pages;
134 }
135 
136 static void put_pages_vram(struct drm_gem_object *obj)
137 {
138 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
139 	struct msm_drm_private *priv = obj->dev->dev_private;
140 
141 	spin_lock(&priv->vram.lock);
142 	drm_mm_remove_node(msm_obj->vram_node);
143 	spin_unlock(&priv->vram.lock);
144 
145 	kvfree(msm_obj->pages);
146 }
147 
148 static void put_pages(struct drm_gem_object *obj)
149 {
150 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
151 
152 	if (msm_obj->pages) {
153 		if (msm_obj->sgt) {
154 			/* For non-cached buffers, ensure the new
155 			 * pages are clean because display controller,
156 			 * GPU, etc. are not coherent:
157 			 */
158 			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
159 				sync_for_cpu(msm_obj);
160 
161 			sg_free_table(msm_obj->sgt);
162 			kfree(msm_obj->sgt);
163 		}
164 
165 		if (use_pages(obj))
166 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
167 		else
168 			put_pages_vram(obj);
169 
170 		msm_obj->pages = NULL;
171 	}
172 }
173 
174 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
175 {
176 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
177 	struct page **p;
178 
179 	msm_gem_lock(obj);
180 
181 	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
182 		msm_gem_unlock(obj);
183 		return ERR_PTR(-EBUSY);
184 	}
185 
186 	p = get_pages(obj);
187 	msm_gem_unlock(obj);
188 	return p;
189 }
190 
191 void msm_gem_put_pages(struct drm_gem_object *obj)
192 {
193 	/* when we start tracking the pin count, then do something here */
194 }
195 
196 int msm_gem_mmap_obj(struct drm_gem_object *obj,
197 		struct vm_area_struct *vma)
198 {
199 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
200 
201 	vma->vm_flags &= ~VM_PFNMAP;
202 	vma->vm_flags |= VM_MIXEDMAP;
203 
204 	if (msm_obj->flags & MSM_BO_WC) {
205 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
206 	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
207 		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
208 	} else {
209 		/*
210 		 * Shunt off cached objs to shmem file so they have their own
211 		 * address_space (so unmap_mapping_range does what we want,
212 		 * in particular in the case of mmap'd dmabufs)
213 		 */
214 		vma->vm_pgoff = 0;
215 		vma_set_file(vma, obj->filp);
216 
217 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
218 	}
219 
220 	return 0;
221 }
222 
223 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
224 {
225 	int ret;
226 
227 	ret = drm_gem_mmap(filp, vma);
228 	if (ret) {
229 		DBG("mmap failed: %d", ret);
230 		return ret;
231 	}
232 
233 	return msm_gem_mmap_obj(vma->vm_private_data, vma);
234 }
235 
236 static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
237 {
238 	struct vm_area_struct *vma = vmf->vma;
239 	struct drm_gem_object *obj = vma->vm_private_data;
240 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
241 	struct page **pages;
242 	unsigned long pfn;
243 	pgoff_t pgoff;
244 	int err;
245 	vm_fault_t ret;
246 
247 	/*
248 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
249 	 * a reference on obj. So, we dont need to hold one here.
250 	 */
251 	err = msm_gem_lock_interruptible(obj);
252 	if (err) {
253 		ret = VM_FAULT_NOPAGE;
254 		goto out;
255 	}
256 
257 	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
258 		msm_gem_unlock(obj);
259 		return VM_FAULT_SIGBUS;
260 	}
261 
262 	/* make sure we have pages attached now */
263 	pages = get_pages(obj);
264 	if (IS_ERR(pages)) {
265 		ret = vmf_error(PTR_ERR(pages));
266 		goto out_unlock;
267 	}
268 
269 	/* We don't use vmf->pgoff since that has the fake offset: */
270 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
271 
272 	pfn = page_to_pfn(pages[pgoff]);
273 
274 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
275 			pfn, pfn << PAGE_SHIFT);
276 
277 	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
278 out_unlock:
279 	msm_gem_unlock(obj);
280 out:
281 	return ret;
282 }
283 
284 /** get mmap offset */
285 static uint64_t mmap_offset(struct drm_gem_object *obj)
286 {
287 	struct drm_device *dev = obj->dev;
288 	int ret;
289 
290 	WARN_ON(!msm_gem_is_locked(obj));
291 
292 	/* Make it mmapable */
293 	ret = drm_gem_create_mmap_offset(obj);
294 
295 	if (ret) {
296 		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
297 		return 0;
298 	}
299 
300 	return drm_vma_node_offset_addr(&obj->vma_node);
301 }
302 
303 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
304 {
305 	uint64_t offset;
306 
307 	msm_gem_lock(obj);
308 	offset = mmap_offset(obj);
309 	msm_gem_unlock(obj);
310 	return offset;
311 }
312 
313 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
314 		struct msm_gem_address_space *aspace)
315 {
316 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
317 	struct msm_gem_vma *vma;
318 
319 	WARN_ON(!msm_gem_is_locked(obj));
320 
321 	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
322 	if (!vma)
323 		return ERR_PTR(-ENOMEM);
324 
325 	vma->aspace = aspace;
326 
327 	list_add_tail(&vma->list, &msm_obj->vmas);
328 
329 	return vma;
330 }
331 
332 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
333 		struct msm_gem_address_space *aspace)
334 {
335 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
336 	struct msm_gem_vma *vma;
337 
338 	WARN_ON(!msm_gem_is_locked(obj));
339 
340 	list_for_each_entry(vma, &msm_obj->vmas, list) {
341 		if (vma->aspace == aspace)
342 			return vma;
343 	}
344 
345 	return NULL;
346 }
347 
348 static void del_vma(struct msm_gem_vma *vma)
349 {
350 	if (!vma)
351 		return;
352 
353 	list_del(&vma->list);
354 	kfree(vma);
355 }
356 
357 /* Called with msm_obj locked */
358 static void
359 put_iova_spaces(struct drm_gem_object *obj)
360 {
361 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
362 	struct msm_gem_vma *vma;
363 
364 	WARN_ON(!msm_gem_is_locked(obj));
365 
366 	list_for_each_entry(vma, &msm_obj->vmas, list) {
367 		if (vma->aspace) {
368 			msm_gem_purge_vma(vma->aspace, vma);
369 			msm_gem_close_vma(vma->aspace, vma);
370 		}
371 	}
372 }
373 
374 /* Called with msm_obj locked */
375 static void
376 put_iova_vmas(struct drm_gem_object *obj)
377 {
378 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
379 	struct msm_gem_vma *vma, *tmp;
380 
381 	WARN_ON(!msm_gem_is_locked(obj));
382 
383 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
384 		del_vma(vma);
385 	}
386 }
387 
388 static int get_iova_locked(struct drm_gem_object *obj,
389 		struct msm_gem_address_space *aspace, uint64_t *iova,
390 		u64 range_start, u64 range_end)
391 {
392 	struct msm_gem_vma *vma;
393 	int ret = 0;
394 
395 	WARN_ON(!msm_gem_is_locked(obj));
396 
397 	vma = lookup_vma(obj, aspace);
398 
399 	if (!vma) {
400 		vma = add_vma(obj, aspace);
401 		if (IS_ERR(vma))
402 			return PTR_ERR(vma);
403 
404 		ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
405 			range_start, range_end);
406 		if (ret) {
407 			del_vma(vma);
408 			return ret;
409 		}
410 	}
411 
412 	*iova = vma->iova;
413 	return 0;
414 }
415 
416 static int msm_gem_pin_iova(struct drm_gem_object *obj,
417 		struct msm_gem_address_space *aspace)
418 {
419 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
420 	struct msm_gem_vma *vma;
421 	struct page **pages;
422 	int prot = IOMMU_READ;
423 
424 	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
425 		prot |= IOMMU_WRITE;
426 
427 	if (msm_obj->flags & MSM_BO_MAP_PRIV)
428 		prot |= IOMMU_PRIV;
429 
430 	WARN_ON(!msm_gem_is_locked(obj));
431 
432 	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
433 		return -EBUSY;
434 
435 	vma = lookup_vma(obj, aspace);
436 	if (WARN_ON(!vma))
437 		return -EINVAL;
438 
439 	pages = get_pages(obj);
440 	if (IS_ERR(pages))
441 		return PTR_ERR(pages);
442 
443 	return msm_gem_map_vma(aspace, vma, prot,
444 			msm_obj->sgt, obj->size >> PAGE_SHIFT);
445 }
446 
447 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
448 		struct msm_gem_address_space *aspace, uint64_t *iova,
449 		u64 range_start, u64 range_end)
450 {
451 	u64 local;
452 	int ret;
453 
454 	WARN_ON(!msm_gem_is_locked(obj));
455 
456 	ret = get_iova_locked(obj, aspace, &local,
457 		range_start, range_end);
458 
459 	if (!ret)
460 		ret = msm_gem_pin_iova(obj, aspace);
461 
462 	if (!ret)
463 		*iova = local;
464 
465 	return ret;
466 }
467 
468 /*
469  * get iova and pin it. Should have a matching put
470  * limits iova to specified range (in pages)
471  */
472 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
473 		struct msm_gem_address_space *aspace, uint64_t *iova,
474 		u64 range_start, u64 range_end)
475 {
476 	int ret;
477 
478 	msm_gem_lock(obj);
479 	ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
480 	msm_gem_unlock(obj);
481 
482 	return ret;
483 }
484 
485 int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
486 		struct msm_gem_address_space *aspace, uint64_t *iova)
487 {
488 	return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX);
489 }
490 
491 /* get iova and pin it. Should have a matching put */
492 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
493 		struct msm_gem_address_space *aspace, uint64_t *iova)
494 {
495 	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
496 }
497 
498 /*
499  * Get an iova but don't pin it. Doesn't need a put because iovas are currently
500  * valid for the life of the object
501  */
502 int msm_gem_get_iova(struct drm_gem_object *obj,
503 		struct msm_gem_address_space *aspace, uint64_t *iova)
504 {
505 	int ret;
506 
507 	msm_gem_lock(obj);
508 	ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX);
509 	msm_gem_unlock(obj);
510 
511 	return ret;
512 }
513 
514 /* get iova without taking a reference, used in places where you have
515  * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
516  */
517 uint64_t msm_gem_iova(struct drm_gem_object *obj,
518 		struct msm_gem_address_space *aspace)
519 {
520 	struct msm_gem_vma *vma;
521 
522 	msm_gem_lock(obj);
523 	vma = lookup_vma(obj, aspace);
524 	msm_gem_unlock(obj);
525 	WARN_ON(!vma);
526 
527 	return vma ? vma->iova : 0;
528 }
529 
530 /*
531  * Locked variant of msm_gem_unpin_iova()
532  */
533 void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
534 		struct msm_gem_address_space *aspace)
535 {
536 	struct msm_gem_vma *vma;
537 
538 	WARN_ON(!msm_gem_is_locked(obj));
539 
540 	vma = lookup_vma(obj, aspace);
541 
542 	if (!WARN_ON(!vma))
543 		msm_gem_unmap_vma(aspace, vma);
544 }
545 
546 /*
547  * Unpin a iova by updating the reference counts. The memory isn't actually
548  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
549  * to get rid of it
550  */
551 void msm_gem_unpin_iova(struct drm_gem_object *obj,
552 		struct msm_gem_address_space *aspace)
553 {
554 	msm_gem_lock(obj);
555 	msm_gem_unpin_iova_locked(obj, aspace);
556 	msm_gem_unlock(obj);
557 }
558 
559 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
560 		struct drm_mode_create_dumb *args)
561 {
562 	args->pitch = align_pitch(args->width, args->bpp);
563 	args->size  = PAGE_ALIGN(args->pitch * args->height);
564 	return msm_gem_new_handle(dev, file, args->size,
565 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
566 }
567 
568 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
569 		uint32_t handle, uint64_t *offset)
570 {
571 	struct drm_gem_object *obj;
572 	int ret = 0;
573 
574 	/* GEM does all our handle to object mapping */
575 	obj = drm_gem_object_lookup(file, handle);
576 	if (obj == NULL) {
577 		ret = -ENOENT;
578 		goto fail;
579 	}
580 
581 	*offset = msm_gem_mmap_offset(obj);
582 
583 	drm_gem_object_put(obj);
584 
585 fail:
586 	return ret;
587 }
588 
589 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
590 {
591 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
592 	int ret = 0;
593 
594 	WARN_ON(!msm_gem_is_locked(obj));
595 
596 	if (obj->import_attach)
597 		return ERR_PTR(-ENODEV);
598 
599 	if (WARN_ON(msm_obj->madv > madv)) {
600 		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
601 			msm_obj->madv, madv);
602 		return ERR_PTR(-EBUSY);
603 	}
604 
605 	/* increment vmap_count *before* vmap() call, so shrinker can
606 	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
607 	 * This guarantees that we won't try to msm_gem_vunmap() this
608 	 * same object from within the vmap() call (while we already
609 	 * hold msm_obj lock)
610 	 */
611 	msm_obj->vmap_count++;
612 
613 	if (!msm_obj->vaddr) {
614 		struct page **pages = get_pages(obj);
615 		if (IS_ERR(pages)) {
616 			ret = PTR_ERR(pages);
617 			goto fail;
618 		}
619 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
620 				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
621 		if (msm_obj->vaddr == NULL) {
622 			ret = -ENOMEM;
623 			goto fail;
624 		}
625 	}
626 
627 	return msm_obj->vaddr;
628 
629 fail:
630 	msm_obj->vmap_count--;
631 	return ERR_PTR(ret);
632 }
633 
634 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
635 {
636 	return get_vaddr(obj, MSM_MADV_WILLNEED);
637 }
638 
639 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
640 {
641 	void *ret;
642 
643 	msm_gem_lock(obj);
644 	ret = msm_gem_get_vaddr_locked(obj);
645 	msm_gem_unlock(obj);
646 
647 	return ret;
648 }
649 
650 /*
651  * Don't use this!  It is for the very special case of dumping
652  * submits from GPU hangs or faults, were the bo may already
653  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
654  * active list.
655  */
656 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
657 {
658 	return get_vaddr(obj, __MSM_MADV_PURGED);
659 }
660 
661 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
662 {
663 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
664 
665 	WARN_ON(!msm_gem_is_locked(obj));
666 	WARN_ON(msm_obj->vmap_count < 1);
667 
668 	msm_obj->vmap_count--;
669 }
670 
671 void msm_gem_put_vaddr(struct drm_gem_object *obj)
672 {
673 	msm_gem_lock(obj);
674 	msm_gem_put_vaddr_locked(obj);
675 	msm_gem_unlock(obj);
676 }
677 
678 /* Update madvise status, returns true if not purged, else
679  * false or -errno.
680  */
681 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
682 {
683 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
684 
685 	msm_gem_lock(obj);
686 
687 	if (msm_obj->madv != __MSM_MADV_PURGED)
688 		msm_obj->madv = madv;
689 
690 	madv = msm_obj->madv;
691 
692 	/* If the obj is inactive, we might need to move it
693 	 * between inactive lists
694 	 */
695 	if (msm_obj->active_count == 0)
696 		update_inactive(msm_obj);
697 
698 	msm_gem_unlock(obj);
699 
700 	return (madv != __MSM_MADV_PURGED);
701 }
702 
703 void msm_gem_purge(struct drm_gem_object *obj)
704 {
705 	struct drm_device *dev = obj->dev;
706 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
707 
708 	WARN_ON(!is_purgeable(msm_obj));
709 	WARN_ON(obj->import_attach);
710 
711 	put_iova_spaces(obj);
712 
713 	msm_gem_vunmap(obj);
714 
715 	put_pages(obj);
716 
717 	put_iova_vmas(obj);
718 
719 	msm_obj->madv = __MSM_MADV_PURGED;
720 
721 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
722 	drm_gem_free_mmap_offset(obj);
723 
724 	/* Our goal here is to return as much of the memory as
725 	 * is possible back to the system as we are called from OOM.
726 	 * To do this we must instruct the shmfs to drop all of its
727 	 * backing pages, *now*.
728 	 */
729 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
730 
731 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
732 			0, (loff_t)-1);
733 }
734 
735 void msm_gem_vunmap(struct drm_gem_object *obj)
736 {
737 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
738 
739 	WARN_ON(!msm_gem_is_locked(obj));
740 
741 	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
742 		return;
743 
744 	vunmap(msm_obj->vaddr);
745 	msm_obj->vaddr = NULL;
746 }
747 
748 /* must be called before _move_to_active().. */
749 int msm_gem_sync_object(struct drm_gem_object *obj,
750 		struct msm_fence_context *fctx, bool exclusive)
751 {
752 	struct dma_resv_list *fobj;
753 	struct dma_fence *fence;
754 	int i, ret;
755 
756 	fobj = dma_resv_get_list(obj->resv);
757 	if (!fobj || (fobj->shared_count == 0)) {
758 		fence = dma_resv_get_excl(obj->resv);
759 		/* don't need to wait on our own fences, since ring is fifo */
760 		if (fence && (fence->context != fctx->context)) {
761 			ret = dma_fence_wait(fence, true);
762 			if (ret)
763 				return ret;
764 		}
765 	}
766 
767 	if (!exclusive || !fobj)
768 		return 0;
769 
770 	for (i = 0; i < fobj->shared_count; i++) {
771 		fence = rcu_dereference_protected(fobj->shared[i],
772 						dma_resv_held(obj->resv));
773 		if (fence->context != fctx->context) {
774 			ret = dma_fence_wait(fence, true);
775 			if (ret)
776 				return ret;
777 		}
778 	}
779 
780 	return 0;
781 }
782 
783 void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
784 {
785 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
786 	struct msm_drm_private *priv = obj->dev->dev_private;
787 
788 	might_sleep();
789 	WARN_ON(!msm_gem_is_locked(obj));
790 	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
791 
792 	if (msm_obj->active_count++ == 0) {
793 		mutex_lock(&priv->mm_lock);
794 		list_del_init(&msm_obj->mm_list);
795 		list_add_tail(&msm_obj->mm_list, &gpu->active_list);
796 		mutex_unlock(&priv->mm_lock);
797 	}
798 }
799 
800 void msm_gem_active_put(struct drm_gem_object *obj)
801 {
802 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
803 
804 	might_sleep();
805 	WARN_ON(!msm_gem_is_locked(obj));
806 
807 	if (--msm_obj->active_count == 0) {
808 		update_inactive(msm_obj);
809 	}
810 }
811 
812 static void update_inactive(struct msm_gem_object *msm_obj)
813 {
814 	struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
815 
816 	mutex_lock(&priv->mm_lock);
817 	WARN_ON(msm_obj->active_count != 0);
818 
819 	list_del_init(&msm_obj->mm_list);
820 	if (msm_obj->madv == MSM_MADV_WILLNEED)
821 		list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
822 	else
823 		list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
824 
825 	mutex_unlock(&priv->mm_lock);
826 }
827 
828 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
829 {
830 	bool write = !!(op & MSM_PREP_WRITE);
831 	unsigned long remain =
832 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
833 	long ret;
834 
835 	ret = dma_resv_wait_timeout_rcu(obj->resv, write,
836 						  true,  remain);
837 	if (ret == 0)
838 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
839 	else if (ret < 0)
840 		return ret;
841 
842 	/* TODO cache maintenance */
843 
844 	return 0;
845 }
846 
847 int msm_gem_cpu_fini(struct drm_gem_object *obj)
848 {
849 	/* TODO cache maintenance */
850 	return 0;
851 }
852 
853 #ifdef CONFIG_DEBUG_FS
854 static void describe_fence(struct dma_fence *fence, const char *type,
855 		struct seq_file *m)
856 {
857 	if (!dma_fence_is_signaled(fence))
858 		seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
859 				fence->ops->get_driver_name(fence),
860 				fence->ops->get_timeline_name(fence),
861 				fence->seqno);
862 }
863 
864 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
865 {
866 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
867 	struct dma_resv *robj = obj->resv;
868 	struct dma_resv_list *fobj;
869 	struct dma_fence *fence;
870 	struct msm_gem_vma *vma;
871 	uint64_t off = drm_vma_node_start(&obj->vma_node);
872 	const char *madv;
873 
874 	msm_gem_lock(obj);
875 
876 	switch (msm_obj->madv) {
877 	case __MSM_MADV_PURGED:
878 		madv = " purged";
879 		break;
880 	case MSM_MADV_DONTNEED:
881 		madv = " purgeable";
882 		break;
883 	case MSM_MADV_WILLNEED:
884 	default:
885 		madv = "";
886 		break;
887 	}
888 
889 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
890 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
891 			obj->name, kref_read(&obj->refcount),
892 			off, msm_obj->vaddr);
893 
894 	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
895 
896 	if (!list_empty(&msm_obj->vmas)) {
897 
898 		seq_puts(m, "      vmas:");
899 
900 		list_for_each_entry(vma, &msm_obj->vmas, list) {
901 			const char *name, *comm;
902 			if (vma->aspace) {
903 				struct msm_gem_address_space *aspace = vma->aspace;
904 				struct task_struct *task =
905 					get_pid_task(aspace->pid, PIDTYPE_PID);
906 				if (task) {
907 					comm = kstrdup(task->comm, GFP_KERNEL);
908 				} else {
909 					comm = NULL;
910 				}
911 				name = aspace->name;
912 			} else {
913 				name = comm = NULL;
914 			}
915 			seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
916 				name, comm ? ":" : "", comm ? comm : "",
917 				vma->aspace, vma->iova,
918 				vma->mapped ? "mapped" : "unmapped",
919 				vma->inuse);
920 			kfree(comm);
921 		}
922 
923 		seq_puts(m, "\n");
924 	}
925 
926 	rcu_read_lock();
927 	fobj = rcu_dereference(robj->fence);
928 	if (fobj) {
929 		unsigned int i, shared_count = fobj->shared_count;
930 
931 		for (i = 0; i < shared_count; i++) {
932 			fence = rcu_dereference(fobj->shared[i]);
933 			describe_fence(fence, "Shared", m);
934 		}
935 	}
936 
937 	fence = rcu_dereference(robj->fence_excl);
938 	if (fence)
939 		describe_fence(fence, "Exclusive", m);
940 	rcu_read_unlock();
941 
942 	msm_gem_unlock(obj);
943 }
944 
945 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
946 {
947 	struct msm_gem_object *msm_obj;
948 	int count = 0;
949 	size_t size = 0;
950 
951 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
952 	list_for_each_entry(msm_obj, list, mm_list) {
953 		struct drm_gem_object *obj = &msm_obj->base;
954 		seq_puts(m, "   ");
955 		msm_gem_describe(obj, m);
956 		count++;
957 		size += obj->size;
958 	}
959 
960 	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
961 }
962 #endif
963 
964 /* don't call directly!  Use drm_gem_object_put_locked() and friends */
965 void msm_gem_free_object(struct drm_gem_object *obj)
966 {
967 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
968 	struct drm_device *dev = obj->dev;
969 	struct msm_drm_private *priv = dev->dev_private;
970 
971 	mutex_lock(&priv->mm_lock);
972 	list_del(&msm_obj->mm_list);
973 	mutex_unlock(&priv->mm_lock);
974 
975 	msm_gem_lock(obj);
976 
977 	/* object should not be on active list: */
978 	WARN_ON(is_active(msm_obj));
979 
980 	put_iova_spaces(obj);
981 
982 	if (obj->import_attach) {
983 		WARN_ON(msm_obj->vaddr);
984 
985 		/* Don't drop the pages for imported dmabuf, as they are not
986 		 * ours, just free the array we allocated:
987 		 */
988 		if (msm_obj->pages)
989 			kvfree(msm_obj->pages);
990 
991 		/* dma_buf_detach() grabs resv lock, so we need to unlock
992 		 * prior to drm_prime_gem_destroy
993 		 */
994 		msm_gem_unlock(obj);
995 
996 		drm_prime_gem_destroy(obj, msm_obj->sgt);
997 	} else {
998 		msm_gem_vunmap(obj);
999 		put_pages(obj);
1000 		msm_gem_unlock(obj);
1001 	}
1002 
1003 	put_iova_vmas(obj);
1004 
1005 	drm_gem_object_release(obj);
1006 
1007 	kfree(msm_obj);
1008 }
1009 
1010 /* convenience method to construct a GEM buffer object, and userspace handle */
1011 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1012 		uint32_t size, uint32_t flags, uint32_t *handle,
1013 		char *name)
1014 {
1015 	struct drm_gem_object *obj;
1016 	int ret;
1017 
1018 	obj = msm_gem_new(dev, size, flags);
1019 
1020 	if (IS_ERR(obj))
1021 		return PTR_ERR(obj);
1022 
1023 	if (name)
1024 		msm_gem_object_set_name(obj, "%s", name);
1025 
1026 	ret = drm_gem_handle_create(file, obj, handle);
1027 
1028 	/* drop reference from allocate - handle holds it now */
1029 	drm_gem_object_put(obj);
1030 
1031 	return ret;
1032 }
1033 
1034 static const struct vm_operations_struct vm_ops = {
1035 	.fault = msm_gem_fault,
1036 	.open = drm_gem_vm_open,
1037 	.close = drm_gem_vm_close,
1038 };
1039 
1040 static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1041 	.free = msm_gem_free_object,
1042 	.pin = msm_gem_prime_pin,
1043 	.unpin = msm_gem_prime_unpin,
1044 	.get_sg_table = msm_gem_prime_get_sg_table,
1045 	.vmap = msm_gem_prime_vmap,
1046 	.vunmap = msm_gem_prime_vunmap,
1047 	.vm_ops = &vm_ops,
1048 };
1049 
1050 static int msm_gem_new_impl(struct drm_device *dev,
1051 		uint32_t size, uint32_t flags,
1052 		struct drm_gem_object **obj)
1053 {
1054 	struct msm_gem_object *msm_obj;
1055 
1056 	switch (flags & MSM_BO_CACHE_MASK) {
1057 	case MSM_BO_UNCACHED:
1058 	case MSM_BO_CACHED:
1059 	case MSM_BO_WC:
1060 		break;
1061 	default:
1062 		DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
1063 				(flags & MSM_BO_CACHE_MASK));
1064 		return -EINVAL;
1065 	}
1066 
1067 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1068 	if (!msm_obj)
1069 		return -ENOMEM;
1070 
1071 	msm_obj->flags = flags;
1072 	msm_obj->madv = MSM_MADV_WILLNEED;
1073 
1074 	INIT_LIST_HEAD(&msm_obj->submit_entry);
1075 	INIT_LIST_HEAD(&msm_obj->vmas);
1076 
1077 	*obj = &msm_obj->base;
1078 	(*obj)->funcs = &msm_gem_object_funcs;
1079 
1080 	return 0;
1081 }
1082 
1083 static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
1084 		uint32_t size, uint32_t flags, bool struct_mutex_locked)
1085 {
1086 	struct msm_drm_private *priv = dev->dev_private;
1087 	struct msm_gem_object *msm_obj;
1088 	struct drm_gem_object *obj = NULL;
1089 	bool use_vram = false;
1090 	int ret;
1091 
1092 	size = PAGE_ALIGN(size);
1093 
1094 	if (!msm_use_mmu(dev))
1095 		use_vram = true;
1096 	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1097 		use_vram = true;
1098 
1099 	if (WARN_ON(use_vram && !priv->vram.size))
1100 		return ERR_PTR(-EINVAL);
1101 
1102 	/* Disallow zero sized objects as they make the underlying
1103 	 * infrastructure grumpy
1104 	 */
1105 	if (size == 0)
1106 		return ERR_PTR(-EINVAL);
1107 
1108 	ret = msm_gem_new_impl(dev, size, flags, &obj);
1109 	if (ret)
1110 		goto fail;
1111 
1112 	msm_obj = to_msm_bo(obj);
1113 
1114 	if (use_vram) {
1115 		struct msm_gem_vma *vma;
1116 		struct page **pages;
1117 
1118 		msm_gem_lock(obj);
1119 
1120 		vma = add_vma(obj, NULL);
1121 		msm_gem_unlock(obj);
1122 		if (IS_ERR(vma)) {
1123 			ret = PTR_ERR(vma);
1124 			goto fail;
1125 		}
1126 
1127 		to_msm_bo(obj)->vram_node = &vma->node;
1128 
1129 		drm_gem_private_object_init(dev, obj, size);
1130 
1131 		pages = get_pages(obj);
1132 		if (IS_ERR(pages)) {
1133 			ret = PTR_ERR(pages);
1134 			goto fail;
1135 		}
1136 
1137 		vma->iova = physaddr(obj);
1138 	} else {
1139 		ret = drm_gem_object_init(dev, obj, size);
1140 		if (ret)
1141 			goto fail;
1142 		/*
1143 		 * Our buffers are kept pinned, so allocating them from the
1144 		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1145 		 * See comments above new_inode() why this is required _and_
1146 		 * expected if you're going to pin these pages.
1147 		 */
1148 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1149 	}
1150 
1151 	mutex_lock(&priv->mm_lock);
1152 	/* Initially obj is idle, obj->madv == WILLNEED: */
1153 	list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
1154 	mutex_unlock(&priv->mm_lock);
1155 
1156 	return obj;
1157 
1158 fail:
1159 	if (struct_mutex_locked) {
1160 		drm_gem_object_put_locked(obj);
1161 	} else {
1162 		drm_gem_object_put(obj);
1163 	}
1164 	return ERR_PTR(ret);
1165 }
1166 
1167 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1168 		uint32_t size, uint32_t flags)
1169 {
1170 	return _msm_gem_new(dev, size, flags, true);
1171 }
1172 
1173 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1174 		uint32_t size, uint32_t flags)
1175 {
1176 	return _msm_gem_new(dev, size, flags, false);
1177 }
1178 
1179 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1180 		struct dma_buf *dmabuf, struct sg_table *sgt)
1181 {
1182 	struct msm_drm_private *priv = dev->dev_private;
1183 	struct msm_gem_object *msm_obj;
1184 	struct drm_gem_object *obj;
1185 	uint32_t size;
1186 	int ret, npages;
1187 
1188 	/* if we don't have IOMMU, don't bother pretending we can import: */
1189 	if (!msm_use_mmu(dev)) {
1190 		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1191 		return ERR_PTR(-EINVAL);
1192 	}
1193 
1194 	size = PAGE_ALIGN(dmabuf->size);
1195 
1196 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1197 	if (ret)
1198 		goto fail;
1199 
1200 	drm_gem_private_object_init(dev, obj, size);
1201 
1202 	npages = size / PAGE_SIZE;
1203 
1204 	msm_obj = to_msm_bo(obj);
1205 	msm_gem_lock(obj);
1206 	msm_obj->sgt = sgt;
1207 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1208 	if (!msm_obj->pages) {
1209 		msm_gem_unlock(obj);
1210 		ret = -ENOMEM;
1211 		goto fail;
1212 	}
1213 
1214 	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1215 	if (ret) {
1216 		msm_gem_unlock(obj);
1217 		goto fail;
1218 	}
1219 
1220 	msm_gem_unlock(obj);
1221 
1222 	mutex_lock(&priv->mm_lock);
1223 	list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
1224 	mutex_unlock(&priv->mm_lock);
1225 
1226 	return obj;
1227 
1228 fail:
1229 	drm_gem_object_put(obj);
1230 	return ERR_PTR(ret);
1231 }
1232 
1233 static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1234 		uint32_t flags, struct msm_gem_address_space *aspace,
1235 		struct drm_gem_object **bo, uint64_t *iova, bool locked)
1236 {
1237 	void *vaddr;
1238 	struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1239 	int ret;
1240 
1241 	if (IS_ERR(obj))
1242 		return ERR_CAST(obj);
1243 
1244 	if (iova) {
1245 		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1246 		if (ret)
1247 			goto err;
1248 	}
1249 
1250 	vaddr = msm_gem_get_vaddr(obj);
1251 	if (IS_ERR(vaddr)) {
1252 		msm_gem_unpin_iova(obj, aspace);
1253 		ret = PTR_ERR(vaddr);
1254 		goto err;
1255 	}
1256 
1257 	if (bo)
1258 		*bo = obj;
1259 
1260 	return vaddr;
1261 err:
1262 	if (locked)
1263 		drm_gem_object_put_locked(obj);
1264 	else
1265 		drm_gem_object_put(obj);
1266 
1267 	return ERR_PTR(ret);
1268 
1269 }
1270 
1271 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1272 		uint32_t flags, struct msm_gem_address_space *aspace,
1273 		struct drm_gem_object **bo, uint64_t *iova)
1274 {
1275 	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1276 }
1277 
1278 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1279 		uint32_t flags, struct msm_gem_address_space *aspace,
1280 		struct drm_gem_object **bo, uint64_t *iova)
1281 {
1282 	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1283 }
1284 
1285 void msm_gem_kernel_put(struct drm_gem_object *bo,
1286 		struct msm_gem_address_space *aspace, bool locked)
1287 {
1288 	if (IS_ERR_OR_NULL(bo))
1289 		return;
1290 
1291 	msm_gem_put_vaddr(bo);
1292 	msm_gem_unpin_iova(bo, aspace);
1293 
1294 	if (locked)
1295 		drm_gem_object_put_locked(bo);
1296 	else
1297 		drm_gem_object_put(bo);
1298 }
1299 
1300 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1301 {
1302 	struct msm_gem_object *msm_obj = to_msm_bo(bo);
1303 	va_list ap;
1304 
1305 	if (!fmt)
1306 		return;
1307 
1308 	va_start(ap, fmt);
1309 	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1310 	va_end(ap);
1311 }
1312