xref: /linux/drivers/gpu/drm/msm/msm_gem.c (revision 3f1c07fc21c68bd3bd2df9d2c9441f6485e934d9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/dma-map-ops.h>
8 #include <linux/vmalloc.h>
9 #include <linux/spinlock.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/dma-buf.h>
12 
13 #include <drm/drm_dumb_buffers.h>
14 #include <drm/drm_prime.h>
15 #include <drm/drm_file.h>
16 #include <drm/drm_fourcc.h>
17 
18 #include <trace/events/gpu_mem.h>
19 
20 #include "msm_drv.h"
21 #include "msm_gem.h"
22 #include "msm_gpu.h"
23 #include "msm_kms.h"
24 
update_device_mem(struct msm_drm_private * priv,ssize_t size)25 static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
26 {
27 	uint64_t total_mem = atomic64_add_return(size, &priv->total_mem);
28 	trace_gpu_mem_total(0, 0, total_mem);
29 }
30 
update_ctx_mem(struct drm_file * file,ssize_t size)31 static void update_ctx_mem(struct drm_file *file, ssize_t size)
32 {
33 	struct msm_context *ctx = file->driver_priv;
34 	uint64_t ctx_mem = atomic64_add_return(size, &ctx->ctx_mem);
35 
36 	rcu_read_lock(); /* Locks file->pid! */
37 	trace_gpu_mem_total(0, pid_nr(rcu_dereference(file->pid)), ctx_mem);
38 	rcu_read_unlock();
39 
40 }
41 
msm_gem_open(struct drm_gem_object * obj,struct drm_file * file)42 static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file)
43 {
44 	msm_gem_vma_get(obj);
45 	update_ctx_mem(file, obj->size);
46 	return 0;
47 }
48 
49 static void put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm,
50 			    bool close, const char *reason);
51 
msm_gem_close(struct drm_gem_object * obj,struct drm_file * file)52 static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
53 {
54 	struct msm_context *ctx = file->driver_priv;
55 	struct drm_exec exec;
56 
57 	update_ctx_mem(file, -obj->size);
58 	msm_gem_vma_put(obj);
59 
60 	/*
61 	 * If VM isn't created yet, nothing to cleanup.  And in fact calling
62 	 * put_iova_spaces() with vm=NULL would be bad, in that it will tear-
63 	 * down the mappings of shared buffers in other contexts.
64 	 */
65 	if (!ctx->vm)
66 		return;
67 
68 	/*
69 	 * VM_BIND does not depend on implicit teardown of VMAs on handle
70 	 * close, but instead on implicit teardown of the VM when the device
71 	 * is closed (see msm_gem_vm_close())
72 	 */
73 	if (msm_context_is_vmbind(ctx))
74 		return;
75 
76 	/*
77 	 * TODO we might need to kick this to a queue to avoid blocking
78 	 * in CLOSE ioctl
79 	 */
80 	dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_BOOKKEEP, false,
81 			      MAX_SCHEDULE_TIMEOUT);
82 
83 	msm_gem_lock_vm_and_obj(&exec, obj, ctx->vm);
84 	put_iova_spaces(obj, ctx->vm, true, "close");
85 	drm_exec_fini(&exec);     /* drop locks */
86 }
87 
88 /*
89  * Get/put for kms->vm VMA
90  */
91 
msm_gem_vma_get(struct drm_gem_object * obj)92 void msm_gem_vma_get(struct drm_gem_object *obj)
93 {
94 	atomic_inc(&to_msm_bo(obj)->vma_ref);
95 }
96 
msm_gem_vma_put(struct drm_gem_object * obj)97 void msm_gem_vma_put(struct drm_gem_object *obj)
98 {
99 	struct msm_drm_private *priv = obj->dev->dev_private;
100 
101 	if (atomic_dec_return(&to_msm_bo(obj)->vma_ref))
102 		return;
103 
104 	if (!priv->kms)
105 		return;
106 
107 #ifdef CONFIG_DRM_MSM_KMS
108 	struct drm_exec exec;
109 
110 	msm_gem_lock_vm_and_obj(&exec, obj, priv->kms->vm);
111 	put_iova_spaces(obj, priv->kms->vm, true, "vma_put");
112 	drm_exec_fini(&exec);     /* drop locks */
113 #endif
114 }
115 
116 /*
117  * Cache sync.. this is a bit over-complicated, to fit dma-mapping
118  * API.  Really GPU cache is out of scope here (handled on cmdstream)
119  * and all we need to do is invalidate newly allocated pages before
120  * mapping to CPU as uncached/writecombine.
121  *
122  * On top of this, we have the added headache, that depending on
123  * display generation, the display's iommu may be wired up to either
124  * the toplevel drm device (mdss), or to the mdp sub-node, meaning
125  * that here we either have dma-direct or iommu ops.
126  *
127  * Let this be a cautionary tail of abstraction gone wrong.
128  */
129 
sync_for_device(struct msm_gem_object * msm_obj)130 static void sync_for_device(struct msm_gem_object *msm_obj)
131 {
132 	struct device *dev = msm_obj->base.dev->dev;
133 
134 	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
135 }
136 
sync_for_cpu(struct msm_gem_object * msm_obj)137 static void sync_for_cpu(struct msm_gem_object *msm_obj)
138 {
139 	struct device *dev = msm_obj->base.dev->dev;
140 
141 	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
142 }
143 
update_lru_active(struct drm_gem_object * obj)144 static void update_lru_active(struct drm_gem_object *obj)
145 {
146 	struct msm_drm_private *priv = obj->dev->dev_private;
147 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
148 
149 	GEM_WARN_ON(!msm_obj->pages);
150 
151 	if (msm_obj->pin_count) {
152 		drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
153 	} else if (msm_obj->madv == MSM_MADV_WILLNEED) {
154 		drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj);
155 	} else {
156 		GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
157 
158 		drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj);
159 	}
160 }
161 
update_lru_locked(struct drm_gem_object * obj)162 static void update_lru_locked(struct drm_gem_object *obj)
163 {
164 	struct msm_drm_private *priv = obj->dev->dev_private;
165 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
166 
167 	msm_gem_assert_locked(&msm_obj->base);
168 
169 	if (!msm_obj->pages) {
170 		GEM_WARN_ON(msm_obj->pin_count);
171 
172 		drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj);
173 	} else {
174 		update_lru_active(obj);
175 	}
176 }
177 
update_lru(struct drm_gem_object * obj)178 static void update_lru(struct drm_gem_object *obj)
179 {
180 	struct msm_drm_private *priv = obj->dev->dev_private;
181 
182 	mutex_lock(&priv->lru.lock);
183 	update_lru_locked(obj);
184 	mutex_unlock(&priv->lru.lock);
185 }
186 
get_pages(struct drm_gem_object * obj)187 static struct page **get_pages(struct drm_gem_object *obj)
188 {
189 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
190 
191 	msm_gem_assert_locked(obj);
192 
193 	if (!msm_obj->pages) {
194 		struct drm_device *dev = obj->dev;
195 		struct page **p;
196 		size_t npages = obj->size >> PAGE_SHIFT;
197 
198 		p = drm_gem_get_pages(obj);
199 
200 		if (IS_ERR(p)) {
201 			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
202 					PTR_ERR(p));
203 			return p;
204 		}
205 
206 		update_device_mem(dev->dev_private, obj->size);
207 
208 		msm_obj->pages = p;
209 
210 		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
211 		if (IS_ERR(msm_obj->sgt)) {
212 			void *ptr = ERR_CAST(msm_obj->sgt);
213 
214 			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
215 			msm_obj->sgt = NULL;
216 			return ptr;
217 		}
218 
219 		/* For non-cached buffers, ensure the new pages are clean
220 		 * because display controller, GPU, etc. are not coherent:
221 		 */
222 		if (msm_obj->flags & MSM_BO_WC)
223 			sync_for_device(msm_obj);
224 
225 		update_lru(obj);
226 	}
227 
228 	return msm_obj->pages;
229 }
230 
put_pages(struct drm_gem_object * obj)231 static void put_pages(struct drm_gem_object *obj)
232 {
233 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
234 
235 	/*
236 	 * Skip gpuvm in the object free path to avoid a WARN_ON() splat.
237 	 * See explaination in msm_gem_assert_locked()
238 	 */
239 	if (kref_read(&obj->refcount))
240 		drm_gpuvm_bo_gem_evict(obj, true);
241 
242 	if (msm_obj->pages) {
243 		if (msm_obj->sgt) {
244 			/* For non-cached buffers, ensure the new
245 			 * pages are clean because display controller,
246 			 * GPU, etc. are not coherent:
247 			 */
248 			if (msm_obj->flags & MSM_BO_WC)
249 				sync_for_cpu(msm_obj);
250 
251 			sg_free_table(msm_obj->sgt);
252 			kfree(msm_obj->sgt);
253 			msm_obj->sgt = NULL;
254 		}
255 
256 		update_device_mem(obj->dev->dev_private, -obj->size);
257 
258 		drm_gem_put_pages(obj, msm_obj->pages, true, false);
259 
260 		msm_obj->pages = NULL;
261 		update_lru(obj);
262 	}
263 }
264 
msm_gem_get_pages_locked(struct drm_gem_object * obj,unsigned madv)265 struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, unsigned madv)
266 {
267 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
268 
269 	msm_gem_assert_locked(obj);
270 
271 	if (msm_obj->madv > madv) {
272 		DRM_DEV_DEBUG_DRIVER(obj->dev->dev, "Invalid madv state: %u vs %u\n",
273 				     msm_obj->madv, madv);
274 		return ERR_PTR(-EBUSY);
275 	}
276 
277 	return get_pages(obj);
278 }
279 
280 /*
281  * Update the pin count of the object, call under lru.lock
282  */
msm_gem_pin_obj_locked(struct drm_gem_object * obj)283 void msm_gem_pin_obj_locked(struct drm_gem_object *obj)
284 {
285 	struct msm_drm_private *priv = obj->dev->dev_private;
286 
287 	msm_gem_assert_locked(obj);
288 
289 	to_msm_bo(obj)->pin_count++;
290 	drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
291 }
292 
pin_obj_locked(struct drm_gem_object * obj)293 static void pin_obj_locked(struct drm_gem_object *obj)
294 {
295 	struct msm_drm_private *priv = obj->dev->dev_private;
296 
297 	mutex_lock(&priv->lru.lock);
298 	msm_gem_pin_obj_locked(obj);
299 	mutex_unlock(&priv->lru.lock);
300 }
301 
msm_gem_pin_pages_locked(struct drm_gem_object * obj)302 struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
303 {
304 	struct page **p;
305 
306 	msm_gem_assert_locked(obj);
307 
308 	p = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
309 	if (!IS_ERR(p))
310 		pin_obj_locked(obj);
311 
312 	return p;
313 }
314 
msm_gem_unpin_pages_locked(struct drm_gem_object * obj)315 void msm_gem_unpin_pages_locked(struct drm_gem_object *obj)
316 {
317 	msm_gem_assert_locked(obj);
318 
319 	msm_gem_unpin_locked(obj);
320 }
321 
msm_gem_pgprot(struct msm_gem_object * msm_obj,pgprot_t prot)322 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
323 {
324 	if (msm_obj->flags & MSM_BO_WC)
325 		return pgprot_writecombine(prot);
326 	return prot;
327 }
328 
msm_gem_fault(struct vm_fault * vmf)329 static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
330 {
331 	struct vm_area_struct *vma = vmf->vma;
332 	struct drm_gem_object *obj = vma->vm_private_data;
333 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
334 	struct page **pages;
335 	unsigned long pfn;
336 	pgoff_t pgoff;
337 	int err;
338 	vm_fault_t ret;
339 
340 	/*
341 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
342 	 * a reference on obj. So, we dont need to hold one here.
343 	 */
344 	err = msm_gem_lock_interruptible(obj);
345 	if (err) {
346 		ret = VM_FAULT_NOPAGE;
347 		goto out;
348 	}
349 
350 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
351 		msm_gem_unlock(obj);
352 		return VM_FAULT_SIGBUS;
353 	}
354 
355 	/* make sure we have pages attached now */
356 	pages = get_pages(obj);
357 	if (IS_ERR(pages)) {
358 		ret = vmf_error(PTR_ERR(pages));
359 		goto out_unlock;
360 	}
361 
362 	/* We don't use vmf->pgoff since that has the fake offset: */
363 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
364 
365 	pfn = page_to_pfn(pages[pgoff]);
366 
367 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
368 			pfn, pfn << PAGE_SHIFT);
369 
370 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
371 
372 out_unlock:
373 	msm_gem_unlock(obj);
374 out:
375 	return ret;
376 }
377 
378 /** get mmap offset */
mmap_offset(struct drm_gem_object * obj)379 static uint64_t mmap_offset(struct drm_gem_object *obj)
380 {
381 	struct drm_device *dev = obj->dev;
382 	int ret;
383 
384 	msm_gem_assert_locked(obj);
385 
386 	/* Make it mmapable */
387 	ret = drm_gem_create_mmap_offset(obj);
388 
389 	if (ret) {
390 		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
391 		return 0;
392 	}
393 
394 	return drm_vma_node_offset_addr(&obj->vma_node);
395 }
396 
msm_gem_mmap_offset(struct drm_gem_object * obj)397 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
398 {
399 	uint64_t offset;
400 
401 	msm_gem_lock(obj);
402 	offset = mmap_offset(obj);
403 	msm_gem_unlock(obj);
404 	return offset;
405 }
406 
lookup_vma(struct drm_gem_object * obj,struct drm_gpuvm * vm)407 static struct drm_gpuva *lookup_vma(struct drm_gem_object *obj,
408 				    struct drm_gpuvm *vm)
409 {
410 	struct drm_gpuvm_bo *vm_bo;
411 
412 	msm_gem_assert_locked(obj);
413 
414 	drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
415 		struct drm_gpuva *vma;
416 
417 		drm_gpuvm_bo_for_each_va (vma, vm_bo) {
418 			if (vma->vm == vm) {
419 				/* lookup_vma() should only be used in paths
420 				 * with at most one vma per vm
421 				 */
422 				GEM_WARN_ON(!list_is_singular(&vm_bo->list.gpuva));
423 
424 				return vma;
425 			}
426 		}
427 	}
428 
429 	return NULL;
430 }
431 
432 /*
433  * If close is true, this also closes the VMA (releasing the allocated
434  * iova range) in addition to removing the iommu mapping.  In the eviction
435  * case (!close), we keep the iova allocated, but only remove the iommu
436  * mapping.
437  */
438 static void
put_iova_spaces(struct drm_gem_object * obj,struct drm_gpuvm * vm,bool close,const char * reason)439 put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm,
440 		bool close, const char *reason)
441 {
442 	struct drm_gpuvm_bo *vm_bo, *tmp;
443 
444 	msm_gem_assert_locked(obj);
445 
446 	drm_gem_for_each_gpuvm_bo_safe (vm_bo, tmp, obj) {
447 		struct drm_gpuva *vma, *vmatmp;
448 
449 		if (vm && vm_bo->vm != vm)
450 			continue;
451 
452 		drm_gpuvm_bo_get(vm_bo);
453 
454 		drm_gpuvm_bo_for_each_va_safe (vma, vmatmp, vm_bo) {
455 			msm_gem_vma_unmap(vma, reason);
456 			if (close)
457 				msm_gem_vma_close(vma);
458 		}
459 
460 		drm_gpuvm_bo_put(vm_bo);
461 	}
462 }
463 
get_vma_locked(struct drm_gem_object * obj,struct drm_gpuvm * vm,u64 range_start,u64 range_end)464 static struct drm_gpuva *get_vma_locked(struct drm_gem_object *obj,
465 					struct drm_gpuvm *vm, u64 range_start,
466 					u64 range_end)
467 {
468 	struct drm_gpuva *vma;
469 
470 	msm_gem_assert_locked(obj);
471 
472 	vma = lookup_vma(obj, vm);
473 
474 	if (!vma) {
475 		vma = msm_gem_vma_new(vm, obj, 0, range_start, range_end);
476 	} else {
477 		GEM_WARN_ON(vma->va.addr < range_start);
478 		GEM_WARN_ON((vma->va.addr + obj->size) > range_end);
479 	}
480 
481 	return vma;
482 }
483 
msm_gem_prot(struct drm_gem_object * obj)484 int msm_gem_prot(struct drm_gem_object *obj)
485 {
486 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
487 	int prot = IOMMU_READ;
488 
489 	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
490 		prot |= IOMMU_WRITE;
491 
492 	if (msm_obj->flags & MSM_BO_MAP_PRIV)
493 		prot |= IOMMU_PRIV;
494 
495 	if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
496 		prot |= IOMMU_CACHE;
497 
498 	return prot;
499 }
500 
msm_gem_pin_vma_locked(struct drm_gem_object * obj,struct drm_gpuva * vma)501 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma)
502 {
503 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
504 	struct page **pages;
505 	int prot = msm_gem_prot(obj);
506 
507 	msm_gem_assert_locked(obj);
508 
509 	pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
510 	if (IS_ERR(pages))
511 		return PTR_ERR(pages);
512 
513 	return msm_gem_vma_map(vma, prot, msm_obj->sgt);
514 }
515 
msm_gem_unpin_locked(struct drm_gem_object * obj)516 void msm_gem_unpin_locked(struct drm_gem_object *obj)
517 {
518 	struct msm_drm_private *priv = obj->dev->dev_private;
519 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
520 
521 	msm_gem_assert_locked(obj);
522 
523 	mutex_lock(&priv->lru.lock);
524 	msm_obj->pin_count--;
525 	GEM_WARN_ON(msm_obj->pin_count < 0);
526 	update_lru_locked(obj);
527 	mutex_unlock(&priv->lru.lock);
528 }
529 
530 /* Special unpin path for use in fence-signaling path, avoiding the need
531  * to hold the obj lock by only depending on things that a protected by
532  * the LRU lock.  In particular we know that that we already have backing
533  * and and that the object's dma_resv has the fence for the current
534  * submit/job which will prevent us racing against page eviction.
535  */
msm_gem_unpin_active(struct drm_gem_object * obj)536 void msm_gem_unpin_active(struct drm_gem_object *obj)
537 {
538 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
539 
540 	msm_obj->pin_count--;
541 	GEM_WARN_ON(msm_obj->pin_count < 0);
542 	update_lru_active(obj);
543 }
544 
msm_gem_get_vma_locked(struct drm_gem_object * obj,struct drm_gpuvm * vm)545 struct drm_gpuva *msm_gem_get_vma_locked(struct drm_gem_object *obj,
546 					 struct drm_gpuvm *vm)
547 {
548 	return get_vma_locked(obj, vm, 0, U64_MAX);
549 }
550 
get_and_pin_iova_range_locked(struct drm_gem_object * obj,struct drm_gpuvm * vm,uint64_t * iova,u64 range_start,u64 range_end)551 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
552 					 struct drm_gpuvm *vm, uint64_t *iova,
553 					 u64 range_start, u64 range_end)
554 {
555 	struct drm_gpuva *vma;
556 	int ret;
557 
558 	msm_gem_assert_locked(obj);
559 
560 	if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
561 		return -EINVAL;
562 
563 	vma = get_vma_locked(obj, vm, range_start, range_end);
564 	if (IS_ERR(vma))
565 		return PTR_ERR(vma);
566 
567 	ret = msm_gem_pin_vma_locked(obj, vma);
568 	if (!ret) {
569 		*iova = vma->va.addr;
570 		pin_obj_locked(obj);
571 	}
572 
573 	return ret;
574 }
575 
576 /*
577  * get iova and pin it. Should have a matching put
578  * limits iova to specified range (in pages)
579  */
msm_gem_get_and_pin_iova_range(struct drm_gem_object * obj,struct drm_gpuvm * vm,uint64_t * iova,u64 range_start,u64 range_end)580 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
581 				   struct drm_gpuvm *vm, uint64_t *iova,
582 				   u64 range_start, u64 range_end)
583 {
584 	struct drm_exec exec;
585 	int ret;
586 
587 	msm_gem_lock_vm_and_obj(&exec, obj, vm);
588 	ret = get_and_pin_iova_range_locked(obj, vm, iova, range_start, range_end);
589 	drm_exec_fini(&exec);     /* drop locks */
590 
591 	return ret;
592 }
593 
594 /* get iova and pin it. Should have a matching put */
msm_gem_get_and_pin_iova(struct drm_gem_object * obj,struct drm_gpuvm * vm,uint64_t * iova)595 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
596 			     uint64_t *iova)
597 {
598 	return msm_gem_get_and_pin_iova_range(obj, vm, iova, 0, U64_MAX);
599 }
600 
601 /*
602  * Get an iova but don't pin it. Doesn't need a put because iovas are currently
603  * valid for the life of the object
604  */
msm_gem_get_iova(struct drm_gem_object * obj,struct drm_gpuvm * vm,uint64_t * iova)605 int msm_gem_get_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
606 		     uint64_t *iova)
607 {
608 	struct drm_gpuva *vma;
609 	struct drm_exec exec;
610 	int ret = 0;
611 
612 	msm_gem_lock_vm_and_obj(&exec, obj, vm);
613 	vma = get_vma_locked(obj, vm, 0, U64_MAX);
614 	if (IS_ERR(vma)) {
615 		ret = PTR_ERR(vma);
616 	} else {
617 		*iova = vma->va.addr;
618 	}
619 	drm_exec_fini(&exec);     /* drop locks */
620 
621 	return ret;
622 }
623 
clear_iova(struct drm_gem_object * obj,struct drm_gpuvm * vm)624 static int clear_iova(struct drm_gem_object *obj,
625 		      struct drm_gpuvm *vm)
626 {
627 	struct drm_gpuva *vma = lookup_vma(obj, vm);
628 
629 	if (!vma)
630 		return 0;
631 
632 	msm_gem_vma_unmap(vma, NULL);
633 	msm_gem_vma_close(vma);
634 
635 	return 0;
636 }
637 
638 /*
639  * Get the requested iova but don't pin it.  Fails if the requested iova is
640  * not available.  Doesn't need a put because iovas are currently valid for
641  * the life of the object.
642  *
643  * Setting an iova of zero will clear the vma.
644  */
msm_gem_set_iova(struct drm_gem_object * obj,struct drm_gpuvm * vm,uint64_t iova)645 int msm_gem_set_iova(struct drm_gem_object *obj,
646 		     struct drm_gpuvm *vm, uint64_t iova)
647 {
648 	struct drm_exec exec;
649 	int ret = 0;
650 
651 	msm_gem_lock_vm_and_obj(&exec, obj, vm);
652 	if (!iova) {
653 		ret = clear_iova(obj, vm);
654 	} else {
655 		struct drm_gpuva *vma;
656 		vma = get_vma_locked(obj, vm, iova, iova + obj->size);
657 		if (IS_ERR(vma)) {
658 			ret = PTR_ERR(vma);
659 		} else if (GEM_WARN_ON(vma->va.addr != iova)) {
660 			clear_iova(obj, vm);
661 			ret = -EBUSY;
662 		}
663 	}
664 	drm_exec_fini(&exec);     /* drop locks */
665 
666 	return ret;
667 }
668 
is_kms_vm(struct drm_gpuvm * vm)669 static bool is_kms_vm(struct drm_gpuvm *vm)
670 {
671 #ifdef CONFIG_DRM_MSM_KMS
672 	struct msm_drm_private *priv = vm->drm->dev_private;
673 
674 	return priv->kms && (priv->kms->vm == vm);
675 #else
676 	return false;
677 #endif
678 }
679 
680 /*
681  * Unpin a iova by updating the reference counts. The memory isn't actually
682  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
683  * to get rid of it
684  */
msm_gem_unpin_iova(struct drm_gem_object * obj,struct drm_gpuvm * vm)685 void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm)
686 {
687 	struct drm_gpuva *vma;
688 	struct drm_exec exec;
689 
690 	msm_gem_lock_vm_and_obj(&exec, obj, vm);
691 	vma = lookup_vma(obj, vm);
692 	if (vma) {
693 		msm_gem_unpin_locked(obj);
694 	}
695 	if (!is_kms_vm(vm))
696 		put_iova_spaces(obj, vm, true, "close");
697 	drm_exec_fini(&exec);     /* drop locks */
698 }
699 
msm_gem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)700 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
701 		struct drm_mode_create_dumb *args)
702 {
703 	u32 fourcc;
704 	u64 pitch_align;
705 	int ret;
706 
707 	/*
708 	 * Adreno needs pitch aligned to 32 pixels. Compute the number
709 	 * of bytes for a block of 32 pixels at the given color format.
710 	 * Use the result as pitch alignment.
711 	 */
712 	fourcc = drm_driver_color_mode_format(dev, args->bpp);
713 	if (fourcc != DRM_FORMAT_INVALID) {
714 		const struct drm_format_info *info;
715 
716 		info = drm_format_info(fourcc);
717 		if (!info)
718 			return -EINVAL;
719 		pitch_align = drm_format_info_min_pitch(info, 0, 32);
720 	} else {
721 		pitch_align = round_up(args->width, 32) * DIV_ROUND_UP(args->bpp, SZ_8);
722 	}
723 	if (!pitch_align || pitch_align > U32_MAX)
724 		return -EINVAL;
725 	ret = drm_mode_size_dumb(dev, args, pitch_align, 0);
726 	if (ret)
727 		return ret;
728 
729 	return msm_gem_new_handle(dev, file, args->size,
730 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
731 }
732 
msm_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,uint32_t handle,uint64_t * offset)733 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
734 		uint32_t handle, uint64_t *offset)
735 {
736 	struct drm_gem_object *obj;
737 	int ret = 0;
738 
739 	/* GEM does all our handle to object mapping */
740 	obj = drm_gem_object_lookup(file, handle);
741 	if (obj == NULL) {
742 		ret = -ENOENT;
743 		goto fail;
744 	}
745 
746 	*offset = msm_gem_mmap_offset(obj);
747 
748 	drm_gem_object_put(obj);
749 
750 fail:
751 	return ret;
752 }
753 
get_vaddr(struct drm_gem_object * obj,unsigned madv)754 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
755 {
756 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
757 	struct page **pages;
758 	int ret = 0;
759 
760 	msm_gem_assert_locked(obj);
761 
762 	if (drm_gem_is_imported(obj))
763 		return ERR_PTR(-ENODEV);
764 
765 	pages = msm_gem_get_pages_locked(obj, madv);
766 	if (IS_ERR(pages))
767 		return ERR_CAST(pages);
768 
769 	pin_obj_locked(obj);
770 
771 	/* increment vmap_count *before* vmap() call, so shrinker can
772 	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
773 	 * This guarantees that we won't try to msm_gem_vunmap() this
774 	 * same object from within the vmap() call (while we already
775 	 * hold msm_obj lock)
776 	 */
777 	msm_obj->vmap_count++;
778 
779 	if (!msm_obj->vaddr) {
780 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
781 				VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
782 		if (msm_obj->vaddr == NULL) {
783 			ret = -ENOMEM;
784 			goto fail;
785 		}
786 	}
787 
788 	return msm_obj->vaddr;
789 
790 fail:
791 	msm_obj->vmap_count--;
792 	msm_gem_unpin_locked(obj);
793 	return ERR_PTR(ret);
794 }
795 
msm_gem_get_vaddr_locked(struct drm_gem_object * obj)796 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
797 {
798 	return get_vaddr(obj, MSM_MADV_WILLNEED);
799 }
800 
msm_gem_get_vaddr(struct drm_gem_object * obj)801 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
802 {
803 	void *ret;
804 
805 	msm_gem_lock(obj);
806 	ret = msm_gem_get_vaddr_locked(obj);
807 	msm_gem_unlock(obj);
808 
809 	return ret;
810 }
811 
812 /*
813  * Don't use this!  It is for the very special case of dumping
814  * submits from GPU hangs or faults, were the bo may already
815  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
816  * active list.
817  */
msm_gem_get_vaddr_active(struct drm_gem_object * obj)818 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
819 {
820 	return get_vaddr(obj, __MSM_MADV_PURGED);
821 }
822 
msm_gem_put_vaddr_locked(struct drm_gem_object * obj)823 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
824 {
825 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
826 
827 	msm_gem_assert_locked(obj);
828 	GEM_WARN_ON(msm_obj->vmap_count < 1);
829 
830 	msm_obj->vmap_count--;
831 	msm_gem_unpin_locked(obj);
832 }
833 
msm_gem_put_vaddr(struct drm_gem_object * obj)834 void msm_gem_put_vaddr(struct drm_gem_object *obj)
835 {
836 	msm_gem_lock(obj);
837 	msm_gem_put_vaddr_locked(obj);
838 	msm_gem_unlock(obj);
839 }
840 
841 /* Update madvise status, returns true if not purged, else
842  * false or -errno.
843  */
msm_gem_madvise(struct drm_gem_object * obj,unsigned madv)844 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
845 {
846 	struct msm_drm_private *priv = obj->dev->dev_private;
847 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
848 
849 	msm_gem_lock(obj);
850 
851 	mutex_lock(&priv->lru.lock);
852 
853 	if (msm_obj->madv != __MSM_MADV_PURGED)
854 		msm_obj->madv = madv;
855 
856 	madv = msm_obj->madv;
857 
858 	/* If the obj is inactive, we might need to move it
859 	 * between inactive lists
860 	 */
861 	update_lru_locked(obj);
862 
863 	mutex_unlock(&priv->lru.lock);
864 
865 	msm_gem_unlock(obj);
866 
867 	return (madv != __MSM_MADV_PURGED);
868 }
869 
msm_gem_purge(struct drm_gem_object * obj)870 void msm_gem_purge(struct drm_gem_object *obj)
871 {
872 	struct drm_device *dev = obj->dev;
873 	struct msm_drm_private *priv = obj->dev->dev_private;
874 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
875 
876 	msm_gem_assert_locked(obj);
877 	GEM_WARN_ON(!is_purgeable(msm_obj));
878 
879 	/* Get rid of any iommu mapping(s): */
880 	put_iova_spaces(obj, NULL, false, "purge");
881 
882 	msm_gem_vunmap(obj);
883 
884 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
885 
886 	put_pages(obj);
887 
888 	mutex_lock(&priv->lru.lock);
889 	/* A one-way transition: */
890 	msm_obj->madv = __MSM_MADV_PURGED;
891 	mutex_unlock(&priv->lru.lock);
892 
893 	drm_gem_free_mmap_offset(obj);
894 
895 	/* Our goal here is to return as much of the memory as
896 	 * is possible back to the system as we are called from OOM.
897 	 * To do this we must instruct the shmfs to drop all of its
898 	 * backing pages, *now*.
899 	 */
900 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
901 
902 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
903 			0, (loff_t)-1);
904 }
905 
906 /*
907  * Unpin the backing pages and make them available to be swapped out.
908  */
msm_gem_evict(struct drm_gem_object * obj)909 void msm_gem_evict(struct drm_gem_object *obj)
910 {
911 	struct drm_device *dev = obj->dev;
912 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
913 
914 	msm_gem_assert_locked(obj);
915 	GEM_WARN_ON(is_unevictable(msm_obj));
916 
917 	/* Get rid of any iommu mapping(s): */
918 	put_iova_spaces(obj, NULL, false, "evict");
919 
920 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
921 
922 	put_pages(obj);
923 }
924 
msm_gem_vunmap(struct drm_gem_object * obj)925 void msm_gem_vunmap(struct drm_gem_object *obj)
926 {
927 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
928 
929 	msm_gem_assert_locked(obj);
930 
931 	if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
932 		return;
933 
934 	vunmap(msm_obj->vaddr);
935 	msm_obj->vaddr = NULL;
936 }
937 
msm_gem_active(struct drm_gem_object * obj)938 bool msm_gem_active(struct drm_gem_object *obj)
939 {
940 	msm_gem_assert_locked(obj);
941 
942 	if (to_msm_bo(obj)->pin_count)
943 		return true;
944 
945 	return !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_BOOKKEEP);
946 }
947 
msm_gem_cpu_prep(struct drm_gem_object * obj,uint32_t op,ktime_t * timeout)948 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
949 {
950 	bool write = !!(op & MSM_PREP_WRITE);
951 	unsigned long remain =
952 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
953 	long ret;
954 
955 	if (op & MSM_PREP_BOOST) {
956 		dma_resv_set_deadline(obj->resv, dma_resv_usage_rw(write),
957 				      ktime_get());
958 	}
959 
960 	ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
961 				    true,  remain);
962 	if (ret == 0)
963 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
964 	else if (ret < 0)
965 		return ret;
966 
967 	/* TODO cache maintenance */
968 
969 	return 0;
970 }
971 
msm_gem_cpu_fini(struct drm_gem_object * obj)972 int msm_gem_cpu_fini(struct drm_gem_object *obj)
973 {
974 	/* TODO cache maintenance */
975 	return 0;
976 }
977 
978 #ifdef CONFIG_DEBUG_FS
msm_gem_describe(struct drm_gem_object * obj,struct seq_file * m,struct msm_gem_stats * stats)979 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
980 		struct msm_gem_stats *stats)
981 {
982 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
983 	struct dma_resv *robj = obj->resv;
984 	uint64_t off = drm_vma_node_start(&obj->vma_node);
985 	const char *madv;
986 
987 	if (!msm_gem_trylock(obj))
988 		return;
989 
990 	stats->all.count++;
991 	stats->all.size += obj->size;
992 
993 	if (msm_gem_active(obj)) {
994 		stats->active.count++;
995 		stats->active.size += obj->size;
996 	}
997 
998 	if (msm_obj->pages) {
999 		stats->resident.count++;
1000 		stats->resident.size += obj->size;
1001 	}
1002 
1003 	switch (msm_obj->madv) {
1004 	case __MSM_MADV_PURGED:
1005 		stats->purged.count++;
1006 		stats->purged.size += obj->size;
1007 		madv = " purged";
1008 		break;
1009 	case MSM_MADV_DONTNEED:
1010 		stats->purgeable.count++;
1011 		stats->purgeable.size += obj->size;
1012 		madv = " purgeable";
1013 		break;
1014 	case MSM_MADV_WILLNEED:
1015 	default:
1016 		madv = "";
1017 		break;
1018 	}
1019 
1020 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
1021 			msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
1022 			obj->name, kref_read(&obj->refcount),
1023 			off, msm_obj->vaddr);
1024 
1025 	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
1026 
1027 	if (!list_empty(&obj->gpuva.list)) {
1028 		struct drm_gpuvm_bo *vm_bo;
1029 
1030 		seq_puts(m, "      vmas:");
1031 
1032 		drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
1033 			struct drm_gpuva *vma;
1034 
1035 			drm_gpuvm_bo_for_each_va (vma, vm_bo) {
1036 				const char *name, *comm;
1037 				struct msm_gem_vm *vm = to_msm_vm(vma->vm);
1038 				struct task_struct *task =
1039 					get_pid_task(vm->pid, PIDTYPE_PID);
1040 				if (task) {
1041 					comm = kstrdup(task->comm, GFP_KERNEL);
1042 					put_task_struct(task);
1043 				} else {
1044 					comm = NULL;
1045 				}
1046 				name = vm->base.name;
1047 
1048 				seq_printf(m, " [%s%s%s: vm=%p, %08llx, %smapped]",
1049 					   name, comm ? ":" : "", comm ? comm : "",
1050 					   vma->vm, vma->va.addr,
1051 					   to_msm_vma(vma)->mapped ? "" : "un");
1052 				kfree(comm);
1053 			}
1054 		}
1055 
1056 		seq_puts(m, "\n");
1057 	}
1058 
1059 	dma_resv_describe(robj, m);
1060 	msm_gem_unlock(obj);
1061 }
1062 
msm_gem_describe_objects(struct list_head * list,struct seq_file * m)1063 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
1064 {
1065 	struct msm_gem_stats stats = {};
1066 	struct msm_gem_object *msm_obj;
1067 
1068 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
1069 	list_for_each_entry(msm_obj, list, node) {
1070 		struct drm_gem_object *obj = &msm_obj->base;
1071 		seq_puts(m, "   ");
1072 		msm_gem_describe(obj, m, &stats);
1073 	}
1074 
1075 	seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
1076 			stats.all.count, stats.all.size);
1077 	seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
1078 			stats.active.count, stats.active.size);
1079 	seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
1080 			stats.resident.count, stats.resident.size);
1081 	seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
1082 			stats.purgeable.count, stats.purgeable.size);
1083 	seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
1084 			stats.purged.count, stats.purged.size);
1085 }
1086 #endif
1087 
1088 /* don't call directly!  Use drm_gem_object_put() */
msm_gem_free_object(struct drm_gem_object * obj)1089 static void msm_gem_free_object(struct drm_gem_object *obj)
1090 {
1091 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1092 	struct drm_device *dev = obj->dev;
1093 	struct msm_drm_private *priv = dev->dev_private;
1094 	struct drm_exec exec;
1095 
1096 	mutex_lock(&priv->obj_lock);
1097 	list_del(&msm_obj->node);
1098 	mutex_unlock(&priv->obj_lock);
1099 
1100 	/*
1101 	 * We need to lock any VMs the object is still attached to, but not
1102 	 * the object itself (see explaination in msm_gem_assert_locked()),
1103 	 * so just open-code this special case.
1104 	 *
1105 	 * Note that we skip the dance if we aren't attached to any VM.  This
1106 	 * is load bearing.  The driver needs to support two usage models:
1107 	 *
1108 	 * 1. Legacy kernel managed VM: Userspace expects the VMA's to be
1109 	 *    implicitly torn down when the object is freed, the VMA's do
1110 	 *    not hold a hard reference to the BO.
1111 	 *
1112 	 * 2. VM_BIND, userspace managed VM: The VMA holds a reference to the
1113 	 *    BO.  This can be dropped when the VM is closed and it's associated
1114 	 *    VMAs are torn down.  (See msm_gem_vm_close()).
1115 	 *
1116 	 * In the latter case the last reference to a BO can be dropped while
1117 	 * we already have the VM locked.  It would have already been removed
1118 	 * from the gpuva list, but lockdep doesn't know that.  Or understand
1119 	 * the differences between the two usage models.
1120 	 */
1121 	if (!list_empty(&obj->gpuva.list)) {
1122 		drm_exec_init(&exec, 0, 0);
1123 		drm_exec_until_all_locked (&exec) {
1124 			struct drm_gpuvm_bo *vm_bo;
1125 			drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
1126 				drm_exec_lock_obj(&exec,
1127 						  drm_gpuvm_resv_obj(vm_bo->vm));
1128 				drm_exec_retry_on_contention(&exec);
1129 			}
1130 		}
1131 		put_iova_spaces(obj, NULL, true, "free");
1132 		drm_exec_fini(&exec);     /* drop locks */
1133 	}
1134 
1135 	if (drm_gem_is_imported(obj)) {
1136 		GEM_WARN_ON(msm_obj->vaddr);
1137 
1138 		/* Don't drop the pages for imported dmabuf, as they are not
1139 		 * ours, just free the array we allocated:
1140 		 */
1141 		kvfree(msm_obj->pages);
1142 
1143 		drm_prime_gem_destroy(obj, msm_obj->sgt);
1144 	} else {
1145 		msm_gem_vunmap(obj);
1146 		put_pages(obj);
1147 	}
1148 
1149 	/*
1150 	 * In error paths, we could end up here before msm_gem_new_handle()
1151 	 * has changed obj->resv to point to the shared resv.  In this case,
1152 	 * we don't want to drop a ref to the shared r_obj that we haven't
1153 	 * taken yet.
1154 	 */
1155 	if ((msm_obj->flags & MSM_BO_NO_SHARE) && (obj->resv != &obj->_resv)) {
1156 		struct drm_gem_object *r_obj =
1157 			container_of(obj->resv, struct drm_gem_object, _resv);
1158 
1159 		/* Drop reference we hold to shared resv obj: */
1160 		drm_gem_object_put(r_obj);
1161 	}
1162 
1163 	drm_gem_object_release(obj);
1164 
1165 	kfree(msm_obj->metadata);
1166 	kfree(msm_obj);
1167 }
1168 
msm_gem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)1169 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1170 {
1171 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1172 
1173 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1174 	vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1175 
1176 	return 0;
1177 }
1178 
1179 /* convenience method to construct a GEM buffer object, and userspace handle */
msm_gem_new_handle(struct drm_device * dev,struct drm_file * file,size_t size,uint32_t flags,uint32_t * handle,char * name)1180 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1181 		size_t size, uint32_t flags, uint32_t *handle,
1182 		char *name)
1183 {
1184 	struct drm_gem_object *obj;
1185 	int ret;
1186 
1187 	obj = msm_gem_new(dev, size, flags);
1188 
1189 	if (IS_ERR(obj))
1190 		return PTR_ERR(obj);
1191 
1192 	if (name)
1193 		msm_gem_object_set_name(obj, "%s", name);
1194 
1195 	if (flags & MSM_BO_NO_SHARE) {
1196 		struct msm_context *ctx = file->driver_priv;
1197 		struct drm_gem_object *r_obj = drm_gpuvm_resv_obj(ctx->vm);
1198 
1199 		drm_gem_object_get(r_obj);
1200 
1201 		obj->resv = r_obj->resv;
1202 	}
1203 
1204 	ret = drm_gem_handle_create(file, obj, handle);
1205 
1206 	/* drop reference from allocate - handle holds it now */
1207 	drm_gem_object_put(obj);
1208 
1209 	return ret;
1210 }
1211 
msm_gem_status(struct drm_gem_object * obj)1212 static enum drm_gem_object_status msm_gem_status(struct drm_gem_object *obj)
1213 {
1214 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1215 	enum drm_gem_object_status status = 0;
1216 
1217 	if (msm_obj->pages)
1218 		status |= DRM_GEM_OBJECT_RESIDENT;
1219 
1220 	if (msm_obj->madv == MSM_MADV_DONTNEED)
1221 		status |= DRM_GEM_OBJECT_PURGEABLE;
1222 
1223 	return status;
1224 }
1225 
1226 static const struct vm_operations_struct vm_ops = {
1227 	.fault = msm_gem_fault,
1228 	.open = drm_gem_vm_open,
1229 	.close = drm_gem_vm_close,
1230 };
1231 
1232 static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1233 	.free = msm_gem_free_object,
1234 	.open = msm_gem_open,
1235 	.close = msm_gem_close,
1236 	.export = msm_gem_prime_export,
1237 	.pin = msm_gem_prime_pin,
1238 	.unpin = msm_gem_prime_unpin,
1239 	.get_sg_table = msm_gem_prime_get_sg_table,
1240 	.vmap = msm_gem_prime_vmap,
1241 	.vunmap = msm_gem_prime_vunmap,
1242 	.mmap = msm_gem_object_mmap,
1243 	.status = msm_gem_status,
1244 	.vm_ops = &vm_ops,
1245 };
1246 
msm_gem_new_impl(struct drm_device * dev,uint32_t flags,struct drm_gem_object ** obj)1247 static int msm_gem_new_impl(struct drm_device *dev, uint32_t flags,
1248 			    struct drm_gem_object **obj)
1249 {
1250 	struct msm_drm_private *priv = dev->dev_private;
1251 	struct msm_gem_object *msm_obj;
1252 
1253 	switch (flags & MSM_BO_CACHE_MASK) {
1254 	case MSM_BO_CACHED:
1255 	case MSM_BO_WC:
1256 		break;
1257 	case MSM_BO_CACHED_COHERENT:
1258 		if (priv->has_cached_coherent)
1259 			break;
1260 		fallthrough;
1261 	default:
1262 		DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1263 				(flags & MSM_BO_CACHE_MASK));
1264 		return -EINVAL;
1265 	}
1266 
1267 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1268 	if (!msm_obj)
1269 		return -ENOMEM;
1270 
1271 	msm_obj->flags = flags;
1272 	msm_obj->madv = MSM_MADV_WILLNEED;
1273 
1274 	INIT_LIST_HEAD(&msm_obj->node);
1275 
1276 	*obj = &msm_obj->base;
1277 	(*obj)->funcs = &msm_gem_object_funcs;
1278 
1279 	return 0;
1280 }
1281 
msm_gem_new(struct drm_device * dev,size_t size,uint32_t flags)1282 struct drm_gem_object *msm_gem_new(struct drm_device *dev, size_t size, uint32_t flags)
1283 {
1284 	struct msm_drm_private *priv = dev->dev_private;
1285 	struct msm_gem_object *msm_obj;
1286 	struct drm_gem_object *obj = NULL;
1287 	int ret;
1288 
1289 	size = PAGE_ALIGN(size);
1290 
1291 	/* Disallow zero sized objects as they make the underlying
1292 	 * infrastructure grumpy
1293 	 */
1294 	if (size == 0)
1295 		return ERR_PTR(-EINVAL);
1296 
1297 	ret = msm_gem_new_impl(dev, flags, &obj);
1298 	if (ret)
1299 		return ERR_PTR(ret);
1300 
1301 	msm_obj = to_msm_bo(obj);
1302 
1303 	ret = drm_gem_object_init(dev, obj, size);
1304 	if (ret)
1305 		goto fail;
1306 	/*
1307 	 * Our buffers are kept pinned, so allocating them from the
1308 	 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1309 	 * See comments above new_inode() why this is required _and_
1310 	 * expected if you're going to pin these pages.
1311 	 */
1312 	mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1313 
1314 	drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
1315 
1316 	mutex_lock(&priv->obj_lock);
1317 	list_add_tail(&msm_obj->node, &priv->objects);
1318 	mutex_unlock(&priv->obj_lock);
1319 
1320 	ret = drm_gem_create_mmap_offset(obj);
1321 	if (ret)
1322 		goto fail;
1323 
1324 	return obj;
1325 
1326 fail:
1327 	drm_gem_object_put(obj);
1328 	return ERR_PTR(ret);
1329 }
1330 
msm_gem_import(struct drm_device * dev,struct dma_buf * dmabuf,struct sg_table * sgt)1331 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1332 		struct dma_buf *dmabuf, struct sg_table *sgt)
1333 {
1334 	struct msm_drm_private *priv = dev->dev_private;
1335 	struct msm_gem_object *msm_obj;
1336 	struct drm_gem_object *obj;
1337 	size_t size, npages;
1338 	int ret;
1339 
1340 	size = PAGE_ALIGN(dmabuf->size);
1341 
1342 	ret = msm_gem_new_impl(dev, MSM_BO_WC, &obj);
1343 	if (ret)
1344 		return ERR_PTR(ret);
1345 
1346 	drm_gem_private_object_init(dev, obj, size);
1347 
1348 	npages = size / PAGE_SIZE;
1349 
1350 	msm_obj = to_msm_bo(obj);
1351 	msm_gem_lock(obj);
1352 	msm_obj->sgt = sgt;
1353 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1354 	if (!msm_obj->pages) {
1355 		msm_gem_unlock(obj);
1356 		ret = -ENOMEM;
1357 		goto fail;
1358 	}
1359 
1360 	ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1361 	if (ret) {
1362 		msm_gem_unlock(obj);
1363 		goto fail;
1364 	}
1365 
1366 	msm_gem_unlock(obj);
1367 
1368 	drm_gem_lru_move_tail(&priv->lru.pinned, obj);
1369 
1370 	mutex_lock(&priv->obj_lock);
1371 	list_add_tail(&msm_obj->node, &priv->objects);
1372 	mutex_unlock(&priv->obj_lock);
1373 
1374 	ret = drm_gem_create_mmap_offset(obj);
1375 	if (ret)
1376 		goto fail;
1377 
1378 	return obj;
1379 
1380 fail:
1381 	drm_gem_object_put(obj);
1382 	return ERR_PTR(ret);
1383 }
1384 
msm_gem_kernel_new(struct drm_device * dev,size_t size,uint32_t flags,struct drm_gpuvm * vm,struct drm_gem_object ** bo,uint64_t * iova)1385 void *msm_gem_kernel_new(struct drm_device *dev, size_t size, uint32_t flags,
1386 			 struct drm_gpuvm *vm, struct drm_gem_object **bo,
1387 			 uint64_t *iova)
1388 {
1389 	void *vaddr;
1390 	struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1391 	int ret;
1392 
1393 	if (IS_ERR(obj))
1394 		return ERR_CAST(obj);
1395 
1396 	if (iova) {
1397 		ret = msm_gem_get_and_pin_iova(obj, vm, iova);
1398 		if (ret)
1399 			goto err;
1400 	}
1401 
1402 	vaddr = msm_gem_get_vaddr(obj);
1403 	if (IS_ERR(vaddr)) {
1404 		msm_gem_unpin_iova(obj, vm);
1405 		ret = PTR_ERR(vaddr);
1406 		goto err;
1407 	}
1408 
1409 	if (bo)
1410 		*bo = obj;
1411 
1412 	return vaddr;
1413 err:
1414 	drm_gem_object_put(obj);
1415 
1416 	return ERR_PTR(ret);
1417 
1418 }
1419 
msm_gem_kernel_put(struct drm_gem_object * bo,struct drm_gpuvm * vm)1420 void msm_gem_kernel_put(struct drm_gem_object *bo, struct drm_gpuvm *vm)
1421 {
1422 	if (IS_ERR_OR_NULL(bo))
1423 		return;
1424 
1425 	msm_gem_put_vaddr(bo);
1426 	msm_gem_unpin_iova(bo, vm);
1427 	drm_gem_object_put(bo);
1428 }
1429 
msm_gem_object_set_name(struct drm_gem_object * bo,const char * fmt,...)1430 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1431 {
1432 	struct msm_gem_object *msm_obj = to_msm_bo(bo);
1433 	va_list ap;
1434 
1435 	if (!fmt)
1436 		return;
1437 
1438 	va_start(ap, fmt);
1439 	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1440 	va_end(ap);
1441 }
1442