xref: /linux/drivers/gpu/drm/msm/msm_gem.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/dma-map-ops.h>
8 #include <linux/vmalloc.h>
9 #include <linux/spinlock.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/dma-buf.h>
12 #include <linux/pfn_t.h>
13 
14 #include <drm/drm_prime.h>
15 #include <drm/drm_file.h>
16 
17 #include <trace/events/gpu_mem.h>
18 
19 #include "msm_drv.h"
20 #include "msm_fence.h"
21 #include "msm_gem.h"
22 #include "msm_gpu.h"
23 #include "msm_mmu.h"
24 
physaddr(struct drm_gem_object * obj)25 static dma_addr_t physaddr(struct drm_gem_object *obj)
26 {
27 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
28 	struct msm_drm_private *priv = obj->dev->dev_private;
29 	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
30 			priv->vram.paddr;
31 }
32 
use_pages(struct drm_gem_object * obj)33 static bool use_pages(struct drm_gem_object *obj)
34 {
35 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
36 	return !msm_obj->vram_node;
37 }
38 
update_device_mem(struct msm_drm_private * priv,ssize_t size)39 static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
40 {
41 	uint64_t total_mem = atomic64_add_return(size, &priv->total_mem);
42 	trace_gpu_mem_total(0, 0, total_mem);
43 }
44 
update_ctx_mem(struct drm_file * file,ssize_t size)45 static void update_ctx_mem(struct drm_file *file, ssize_t size)
46 {
47 	struct msm_file_private *ctx = file->driver_priv;
48 	uint64_t ctx_mem = atomic64_add_return(size, &ctx->ctx_mem);
49 
50 	rcu_read_lock(); /* Locks file->pid! */
51 	trace_gpu_mem_total(0, pid_nr(rcu_dereference(file->pid)), ctx_mem);
52 	rcu_read_unlock();
53 
54 }
55 
msm_gem_open(struct drm_gem_object * obj,struct drm_file * file)56 static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file)
57 {
58 	update_ctx_mem(file, obj->size);
59 	return 0;
60 }
61 
msm_gem_close(struct drm_gem_object * obj,struct drm_file * file)62 static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
63 {
64 	update_ctx_mem(file, -obj->size);
65 }
66 
67 /*
68  * Cache sync.. this is a bit over-complicated, to fit dma-mapping
69  * API.  Really GPU cache is out of scope here (handled on cmdstream)
70  * and all we need to do is invalidate newly allocated pages before
71  * mapping to CPU as uncached/writecombine.
72  *
73  * On top of this, we have the added headache, that depending on
74  * display generation, the display's iommu may be wired up to either
75  * the toplevel drm device (mdss), or to the mdp sub-node, meaning
76  * that here we either have dma-direct or iommu ops.
77  *
78  * Let this be a cautionary tail of abstraction gone wrong.
79  */
80 
sync_for_device(struct msm_gem_object * msm_obj)81 static void sync_for_device(struct msm_gem_object *msm_obj)
82 {
83 	struct device *dev = msm_obj->base.dev->dev;
84 
85 	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
86 }
87 
sync_for_cpu(struct msm_gem_object * msm_obj)88 static void sync_for_cpu(struct msm_gem_object *msm_obj)
89 {
90 	struct device *dev = msm_obj->base.dev->dev;
91 
92 	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
93 }
94 
update_lru_active(struct drm_gem_object * obj)95 static void update_lru_active(struct drm_gem_object *obj)
96 {
97 	struct msm_drm_private *priv = obj->dev->dev_private;
98 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
99 
100 	GEM_WARN_ON(!msm_obj->pages);
101 
102 	if (msm_obj->pin_count) {
103 		drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
104 	} else if (msm_obj->madv == MSM_MADV_WILLNEED) {
105 		drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj);
106 	} else {
107 		GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
108 
109 		drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj);
110 	}
111 }
112 
update_lru_locked(struct drm_gem_object * obj)113 static void update_lru_locked(struct drm_gem_object *obj)
114 {
115 	struct msm_drm_private *priv = obj->dev->dev_private;
116 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
117 
118 	msm_gem_assert_locked(&msm_obj->base);
119 
120 	if (!msm_obj->pages) {
121 		GEM_WARN_ON(msm_obj->pin_count);
122 
123 		drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj);
124 	} else {
125 		update_lru_active(obj);
126 	}
127 }
128 
update_lru(struct drm_gem_object * obj)129 static void update_lru(struct drm_gem_object *obj)
130 {
131 	struct msm_drm_private *priv = obj->dev->dev_private;
132 
133 	mutex_lock(&priv->lru.lock);
134 	update_lru_locked(obj);
135 	mutex_unlock(&priv->lru.lock);
136 }
137 
138 /* allocate pages from VRAM carveout, used when no IOMMU: */
get_pages_vram(struct drm_gem_object * obj,int npages)139 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
140 {
141 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
142 	struct msm_drm_private *priv = obj->dev->dev_private;
143 	dma_addr_t paddr;
144 	struct page **p;
145 	int ret, i;
146 
147 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
148 	if (!p)
149 		return ERR_PTR(-ENOMEM);
150 
151 	spin_lock(&priv->vram.lock);
152 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
153 	spin_unlock(&priv->vram.lock);
154 	if (ret) {
155 		kvfree(p);
156 		return ERR_PTR(ret);
157 	}
158 
159 	paddr = physaddr(obj);
160 	for (i = 0; i < npages; i++) {
161 		p[i] = pfn_to_page(__phys_to_pfn(paddr));
162 		paddr += PAGE_SIZE;
163 	}
164 
165 	return p;
166 }
167 
get_pages(struct drm_gem_object * obj)168 static struct page **get_pages(struct drm_gem_object *obj)
169 {
170 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
171 
172 	msm_gem_assert_locked(obj);
173 
174 	if (!msm_obj->pages) {
175 		struct drm_device *dev = obj->dev;
176 		struct page **p;
177 		int npages = obj->size >> PAGE_SHIFT;
178 
179 		if (use_pages(obj))
180 			p = drm_gem_get_pages(obj);
181 		else
182 			p = get_pages_vram(obj, npages);
183 
184 		if (IS_ERR(p)) {
185 			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
186 					PTR_ERR(p));
187 			return p;
188 		}
189 
190 		update_device_mem(dev->dev_private, obj->size);
191 
192 		msm_obj->pages = p;
193 
194 		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
195 		if (IS_ERR(msm_obj->sgt)) {
196 			void *ptr = ERR_CAST(msm_obj->sgt);
197 
198 			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
199 			msm_obj->sgt = NULL;
200 			return ptr;
201 		}
202 
203 		/* For non-cached buffers, ensure the new pages are clean
204 		 * because display controller, GPU, etc. are not coherent:
205 		 */
206 		if (msm_obj->flags & MSM_BO_WC)
207 			sync_for_device(msm_obj);
208 
209 		update_lru(obj);
210 	}
211 
212 	return msm_obj->pages;
213 }
214 
put_pages_vram(struct drm_gem_object * obj)215 static void put_pages_vram(struct drm_gem_object *obj)
216 {
217 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
218 	struct msm_drm_private *priv = obj->dev->dev_private;
219 
220 	spin_lock(&priv->vram.lock);
221 	drm_mm_remove_node(msm_obj->vram_node);
222 	spin_unlock(&priv->vram.lock);
223 
224 	kvfree(msm_obj->pages);
225 }
226 
put_pages(struct drm_gem_object * obj)227 static void put_pages(struct drm_gem_object *obj)
228 {
229 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
230 
231 	if (msm_obj->pages) {
232 		if (msm_obj->sgt) {
233 			/* For non-cached buffers, ensure the new
234 			 * pages are clean because display controller,
235 			 * GPU, etc. are not coherent:
236 			 */
237 			if (msm_obj->flags & MSM_BO_WC)
238 				sync_for_cpu(msm_obj);
239 
240 			sg_free_table(msm_obj->sgt);
241 			kfree(msm_obj->sgt);
242 			msm_obj->sgt = NULL;
243 		}
244 
245 		update_device_mem(obj->dev->dev_private, -obj->size);
246 
247 		if (use_pages(obj))
248 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
249 		else
250 			put_pages_vram(obj);
251 
252 		msm_obj->pages = NULL;
253 		update_lru(obj);
254 	}
255 }
256 
msm_gem_get_pages_locked(struct drm_gem_object * obj,unsigned madv)257 static struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj,
258 					      unsigned madv)
259 {
260 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
261 
262 	msm_gem_assert_locked(obj);
263 
264 	if (msm_obj->madv > madv) {
265 		DRM_DEV_DEBUG_DRIVER(obj->dev->dev, "Invalid madv state: %u vs %u\n",
266 				     msm_obj->madv, madv);
267 		return ERR_PTR(-EBUSY);
268 	}
269 
270 	return get_pages(obj);
271 }
272 
273 /*
274  * Update the pin count of the object, call under lru.lock
275  */
msm_gem_pin_obj_locked(struct drm_gem_object * obj)276 void msm_gem_pin_obj_locked(struct drm_gem_object *obj)
277 {
278 	struct msm_drm_private *priv = obj->dev->dev_private;
279 
280 	msm_gem_assert_locked(obj);
281 
282 	to_msm_bo(obj)->pin_count++;
283 	drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
284 }
285 
pin_obj_locked(struct drm_gem_object * obj)286 static void pin_obj_locked(struct drm_gem_object *obj)
287 {
288 	struct msm_drm_private *priv = obj->dev->dev_private;
289 
290 	mutex_lock(&priv->lru.lock);
291 	msm_gem_pin_obj_locked(obj);
292 	mutex_unlock(&priv->lru.lock);
293 }
294 
msm_gem_pin_pages_locked(struct drm_gem_object * obj)295 struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
296 {
297 	struct page **p;
298 
299 	msm_gem_assert_locked(obj);
300 
301 	p = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
302 	if (!IS_ERR(p))
303 		pin_obj_locked(obj);
304 
305 	return p;
306 }
307 
msm_gem_unpin_pages_locked(struct drm_gem_object * obj)308 void msm_gem_unpin_pages_locked(struct drm_gem_object *obj)
309 {
310 	msm_gem_assert_locked(obj);
311 
312 	msm_gem_unpin_locked(obj);
313 }
314 
msm_gem_pgprot(struct msm_gem_object * msm_obj,pgprot_t prot)315 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
316 {
317 	if (msm_obj->flags & MSM_BO_WC)
318 		return pgprot_writecombine(prot);
319 	return prot;
320 }
321 
msm_gem_fault(struct vm_fault * vmf)322 static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
323 {
324 	struct vm_area_struct *vma = vmf->vma;
325 	struct drm_gem_object *obj = vma->vm_private_data;
326 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
327 	struct page **pages;
328 	unsigned long pfn;
329 	pgoff_t pgoff;
330 	int err;
331 	vm_fault_t ret;
332 
333 	/*
334 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
335 	 * a reference on obj. So, we dont need to hold one here.
336 	 */
337 	err = msm_gem_lock_interruptible(obj);
338 	if (err) {
339 		ret = VM_FAULT_NOPAGE;
340 		goto out;
341 	}
342 
343 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
344 		msm_gem_unlock(obj);
345 		return VM_FAULT_SIGBUS;
346 	}
347 
348 	/* make sure we have pages attached now */
349 	pages = get_pages(obj);
350 	if (IS_ERR(pages)) {
351 		ret = vmf_error(PTR_ERR(pages));
352 		goto out_unlock;
353 	}
354 
355 	/* We don't use vmf->pgoff since that has the fake offset: */
356 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
357 
358 	pfn = page_to_pfn(pages[pgoff]);
359 
360 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
361 			pfn, pfn << PAGE_SHIFT);
362 
363 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
364 
365 out_unlock:
366 	msm_gem_unlock(obj);
367 out:
368 	return ret;
369 }
370 
371 /** get mmap offset */
mmap_offset(struct drm_gem_object * obj)372 static uint64_t mmap_offset(struct drm_gem_object *obj)
373 {
374 	struct drm_device *dev = obj->dev;
375 	int ret;
376 
377 	msm_gem_assert_locked(obj);
378 
379 	/* Make it mmapable */
380 	ret = drm_gem_create_mmap_offset(obj);
381 
382 	if (ret) {
383 		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
384 		return 0;
385 	}
386 
387 	return drm_vma_node_offset_addr(&obj->vma_node);
388 }
389 
msm_gem_mmap_offset(struct drm_gem_object * obj)390 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
391 {
392 	uint64_t offset;
393 
394 	msm_gem_lock(obj);
395 	offset = mmap_offset(obj);
396 	msm_gem_unlock(obj);
397 	return offset;
398 }
399 
add_vma(struct drm_gem_object * obj,struct msm_gem_address_space * aspace)400 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
401 		struct msm_gem_address_space *aspace)
402 {
403 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
404 	struct msm_gem_vma *vma;
405 
406 	msm_gem_assert_locked(obj);
407 
408 	vma = msm_gem_vma_new(aspace);
409 	if (!vma)
410 		return ERR_PTR(-ENOMEM);
411 
412 	list_add_tail(&vma->list, &msm_obj->vmas);
413 
414 	return vma;
415 }
416 
lookup_vma(struct drm_gem_object * obj,struct msm_gem_address_space * aspace)417 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
418 		struct msm_gem_address_space *aspace)
419 {
420 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
421 	struct msm_gem_vma *vma;
422 
423 	msm_gem_assert_locked(obj);
424 
425 	list_for_each_entry(vma, &msm_obj->vmas, list) {
426 		if (vma->aspace == aspace)
427 			return vma;
428 	}
429 
430 	return NULL;
431 }
432 
del_vma(struct msm_gem_vma * vma)433 static void del_vma(struct msm_gem_vma *vma)
434 {
435 	if (!vma)
436 		return;
437 
438 	list_del(&vma->list);
439 	kfree(vma);
440 }
441 
442 /*
443  * If close is true, this also closes the VMA (releasing the allocated
444  * iova range) in addition to removing the iommu mapping.  In the eviction
445  * case (!close), we keep the iova allocated, but only remove the iommu
446  * mapping.
447  */
448 static void
put_iova_spaces(struct drm_gem_object * obj,bool close)449 put_iova_spaces(struct drm_gem_object *obj, bool close)
450 {
451 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
452 	struct msm_gem_vma *vma;
453 
454 	msm_gem_assert_locked(obj);
455 
456 	list_for_each_entry(vma, &msm_obj->vmas, list) {
457 		if (vma->aspace) {
458 			msm_gem_vma_purge(vma);
459 			if (close)
460 				msm_gem_vma_close(vma);
461 		}
462 	}
463 }
464 
465 /* Called with msm_obj locked */
466 static void
put_iova_vmas(struct drm_gem_object * obj)467 put_iova_vmas(struct drm_gem_object *obj)
468 {
469 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
470 	struct msm_gem_vma *vma, *tmp;
471 
472 	msm_gem_assert_locked(obj);
473 
474 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
475 		del_vma(vma);
476 	}
477 }
478 
get_vma_locked(struct drm_gem_object * obj,struct msm_gem_address_space * aspace,u64 range_start,u64 range_end)479 static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
480 		struct msm_gem_address_space *aspace,
481 		u64 range_start, u64 range_end)
482 {
483 	struct msm_gem_vma *vma;
484 
485 	msm_gem_assert_locked(obj);
486 
487 	vma = lookup_vma(obj, aspace);
488 
489 	if (!vma) {
490 		int ret;
491 
492 		vma = add_vma(obj, aspace);
493 		if (IS_ERR(vma))
494 			return vma;
495 
496 		ret = msm_gem_vma_init(vma, obj->size,
497 			range_start, range_end);
498 		if (ret) {
499 			del_vma(vma);
500 			return ERR_PTR(ret);
501 		}
502 	} else {
503 		GEM_WARN_ON(vma->iova < range_start);
504 		GEM_WARN_ON((vma->iova + obj->size) > range_end);
505 	}
506 
507 	return vma;
508 }
509 
msm_gem_pin_vma_locked(struct drm_gem_object * obj,struct msm_gem_vma * vma)510 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
511 {
512 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
513 	struct page **pages;
514 	int prot = IOMMU_READ;
515 
516 	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
517 		prot |= IOMMU_WRITE;
518 
519 	if (msm_obj->flags & MSM_BO_MAP_PRIV)
520 		prot |= IOMMU_PRIV;
521 
522 	if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
523 		prot |= IOMMU_CACHE;
524 
525 	msm_gem_assert_locked(obj);
526 
527 	pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
528 	if (IS_ERR(pages))
529 		return PTR_ERR(pages);
530 
531 	return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
532 }
533 
msm_gem_unpin_locked(struct drm_gem_object * obj)534 void msm_gem_unpin_locked(struct drm_gem_object *obj)
535 {
536 	struct msm_drm_private *priv = obj->dev->dev_private;
537 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
538 
539 	msm_gem_assert_locked(obj);
540 
541 	mutex_lock(&priv->lru.lock);
542 	msm_obj->pin_count--;
543 	GEM_WARN_ON(msm_obj->pin_count < 0);
544 	update_lru_locked(obj);
545 	mutex_unlock(&priv->lru.lock);
546 }
547 
548 /* Special unpin path for use in fence-signaling path, avoiding the need
549  * to hold the obj lock by only depending on things that a protected by
550  * the LRU lock.  In particular we know that that we already have backing
551  * and and that the object's dma_resv has the fence for the current
552  * submit/job which will prevent us racing against page eviction.
553  */
msm_gem_unpin_active(struct drm_gem_object * obj)554 void msm_gem_unpin_active(struct drm_gem_object *obj)
555 {
556 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
557 
558 	msm_obj->pin_count--;
559 	GEM_WARN_ON(msm_obj->pin_count < 0);
560 	update_lru_active(obj);
561 }
562 
msm_gem_get_vma_locked(struct drm_gem_object * obj,struct msm_gem_address_space * aspace)563 struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
564 					   struct msm_gem_address_space *aspace)
565 {
566 	return get_vma_locked(obj, aspace, 0, U64_MAX);
567 }
568 
get_and_pin_iova_range_locked(struct drm_gem_object * obj,struct msm_gem_address_space * aspace,uint64_t * iova,u64 range_start,u64 range_end)569 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
570 		struct msm_gem_address_space *aspace, uint64_t *iova,
571 		u64 range_start, u64 range_end)
572 {
573 	struct msm_gem_vma *vma;
574 	int ret;
575 
576 	msm_gem_assert_locked(obj);
577 
578 	vma = get_vma_locked(obj, aspace, range_start, range_end);
579 	if (IS_ERR(vma))
580 		return PTR_ERR(vma);
581 
582 	ret = msm_gem_pin_vma_locked(obj, vma);
583 	if (!ret) {
584 		*iova = vma->iova;
585 		pin_obj_locked(obj);
586 	}
587 
588 	return ret;
589 }
590 
591 /*
592  * get iova and pin it. Should have a matching put
593  * limits iova to specified range (in pages)
594  */
msm_gem_get_and_pin_iova_range(struct drm_gem_object * obj,struct msm_gem_address_space * aspace,uint64_t * iova,u64 range_start,u64 range_end)595 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
596 		struct msm_gem_address_space *aspace, uint64_t *iova,
597 		u64 range_start, u64 range_end)
598 {
599 	int ret;
600 
601 	msm_gem_lock(obj);
602 	ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
603 	msm_gem_unlock(obj);
604 
605 	return ret;
606 }
607 
608 /* get iova and pin it. Should have a matching put */
msm_gem_get_and_pin_iova(struct drm_gem_object * obj,struct msm_gem_address_space * aspace,uint64_t * iova)609 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
610 		struct msm_gem_address_space *aspace, uint64_t *iova)
611 {
612 	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
613 }
614 
615 /*
616  * Get an iova but don't pin it. Doesn't need a put because iovas are currently
617  * valid for the life of the object
618  */
msm_gem_get_iova(struct drm_gem_object * obj,struct msm_gem_address_space * aspace,uint64_t * iova)619 int msm_gem_get_iova(struct drm_gem_object *obj,
620 		struct msm_gem_address_space *aspace, uint64_t *iova)
621 {
622 	struct msm_gem_vma *vma;
623 	int ret = 0;
624 
625 	msm_gem_lock(obj);
626 	vma = get_vma_locked(obj, aspace, 0, U64_MAX);
627 	if (IS_ERR(vma)) {
628 		ret = PTR_ERR(vma);
629 	} else {
630 		*iova = vma->iova;
631 	}
632 	msm_gem_unlock(obj);
633 
634 	return ret;
635 }
636 
clear_iova(struct drm_gem_object * obj,struct msm_gem_address_space * aspace)637 static int clear_iova(struct drm_gem_object *obj,
638 		      struct msm_gem_address_space *aspace)
639 {
640 	struct msm_gem_vma *vma = lookup_vma(obj, aspace);
641 
642 	if (!vma)
643 		return 0;
644 
645 	msm_gem_vma_purge(vma);
646 	msm_gem_vma_close(vma);
647 	del_vma(vma);
648 
649 	return 0;
650 }
651 
652 /*
653  * Get the requested iova but don't pin it.  Fails if the requested iova is
654  * not available.  Doesn't need a put because iovas are currently valid for
655  * the life of the object.
656  *
657  * Setting an iova of zero will clear the vma.
658  */
msm_gem_set_iova(struct drm_gem_object * obj,struct msm_gem_address_space * aspace,uint64_t iova)659 int msm_gem_set_iova(struct drm_gem_object *obj,
660 		     struct msm_gem_address_space *aspace, uint64_t iova)
661 {
662 	int ret = 0;
663 
664 	msm_gem_lock(obj);
665 	if (!iova) {
666 		ret = clear_iova(obj, aspace);
667 	} else {
668 		struct msm_gem_vma *vma;
669 		vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
670 		if (IS_ERR(vma)) {
671 			ret = PTR_ERR(vma);
672 		} else if (GEM_WARN_ON(vma->iova != iova)) {
673 			clear_iova(obj, aspace);
674 			ret = -EBUSY;
675 		}
676 	}
677 	msm_gem_unlock(obj);
678 
679 	return ret;
680 }
681 
682 /*
683  * Unpin a iova by updating the reference counts. The memory isn't actually
684  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
685  * to get rid of it
686  */
msm_gem_unpin_iova(struct drm_gem_object * obj,struct msm_gem_address_space * aspace)687 void msm_gem_unpin_iova(struct drm_gem_object *obj,
688 		struct msm_gem_address_space *aspace)
689 {
690 	struct msm_gem_vma *vma;
691 
692 	msm_gem_lock(obj);
693 	vma = lookup_vma(obj, aspace);
694 	if (!GEM_WARN_ON(!vma)) {
695 		msm_gem_unpin_locked(obj);
696 	}
697 	msm_gem_unlock(obj);
698 }
699 
msm_gem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)700 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
701 		struct drm_mode_create_dumb *args)
702 {
703 	args->pitch = align_pitch(args->width, args->bpp);
704 	args->size  = PAGE_ALIGN(args->pitch * args->height);
705 	return msm_gem_new_handle(dev, file, args->size,
706 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
707 }
708 
msm_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,uint32_t handle,uint64_t * offset)709 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
710 		uint32_t handle, uint64_t *offset)
711 {
712 	struct drm_gem_object *obj;
713 	int ret = 0;
714 
715 	/* GEM does all our handle to object mapping */
716 	obj = drm_gem_object_lookup(file, handle);
717 	if (obj == NULL) {
718 		ret = -ENOENT;
719 		goto fail;
720 	}
721 
722 	*offset = msm_gem_mmap_offset(obj);
723 
724 	drm_gem_object_put(obj);
725 
726 fail:
727 	return ret;
728 }
729 
get_vaddr(struct drm_gem_object * obj,unsigned madv)730 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
731 {
732 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
733 	struct page **pages;
734 	int ret = 0;
735 
736 	msm_gem_assert_locked(obj);
737 
738 	if (obj->import_attach)
739 		return ERR_PTR(-ENODEV);
740 
741 	pages = msm_gem_get_pages_locked(obj, madv);
742 	if (IS_ERR(pages))
743 		return ERR_CAST(pages);
744 
745 	pin_obj_locked(obj);
746 
747 	/* increment vmap_count *before* vmap() call, so shrinker can
748 	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
749 	 * This guarantees that we won't try to msm_gem_vunmap() this
750 	 * same object from within the vmap() call (while we already
751 	 * hold msm_obj lock)
752 	 */
753 	msm_obj->vmap_count++;
754 
755 	if (!msm_obj->vaddr) {
756 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
757 				VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
758 		if (msm_obj->vaddr == NULL) {
759 			ret = -ENOMEM;
760 			goto fail;
761 		}
762 	}
763 
764 	return msm_obj->vaddr;
765 
766 fail:
767 	msm_obj->vmap_count--;
768 	msm_gem_unpin_locked(obj);
769 	return ERR_PTR(ret);
770 }
771 
msm_gem_get_vaddr_locked(struct drm_gem_object * obj)772 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
773 {
774 	return get_vaddr(obj, MSM_MADV_WILLNEED);
775 }
776 
msm_gem_get_vaddr(struct drm_gem_object * obj)777 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
778 {
779 	void *ret;
780 
781 	msm_gem_lock(obj);
782 	ret = msm_gem_get_vaddr_locked(obj);
783 	msm_gem_unlock(obj);
784 
785 	return ret;
786 }
787 
788 /*
789  * Don't use this!  It is for the very special case of dumping
790  * submits from GPU hangs or faults, were the bo may already
791  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
792  * active list.
793  */
msm_gem_get_vaddr_active(struct drm_gem_object * obj)794 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
795 {
796 	return get_vaddr(obj, __MSM_MADV_PURGED);
797 }
798 
msm_gem_put_vaddr_locked(struct drm_gem_object * obj)799 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
800 {
801 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
802 
803 	msm_gem_assert_locked(obj);
804 	GEM_WARN_ON(msm_obj->vmap_count < 1);
805 
806 	msm_obj->vmap_count--;
807 	msm_gem_unpin_locked(obj);
808 }
809 
msm_gem_put_vaddr(struct drm_gem_object * obj)810 void msm_gem_put_vaddr(struct drm_gem_object *obj)
811 {
812 	msm_gem_lock(obj);
813 	msm_gem_put_vaddr_locked(obj);
814 	msm_gem_unlock(obj);
815 }
816 
817 /* Update madvise status, returns true if not purged, else
818  * false or -errno.
819  */
msm_gem_madvise(struct drm_gem_object * obj,unsigned madv)820 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
821 {
822 	struct msm_drm_private *priv = obj->dev->dev_private;
823 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
824 
825 	msm_gem_lock(obj);
826 
827 	mutex_lock(&priv->lru.lock);
828 
829 	if (msm_obj->madv != __MSM_MADV_PURGED)
830 		msm_obj->madv = madv;
831 
832 	madv = msm_obj->madv;
833 
834 	/* If the obj is inactive, we might need to move it
835 	 * between inactive lists
836 	 */
837 	update_lru_locked(obj);
838 
839 	mutex_unlock(&priv->lru.lock);
840 
841 	msm_gem_unlock(obj);
842 
843 	return (madv != __MSM_MADV_PURGED);
844 }
845 
msm_gem_purge(struct drm_gem_object * obj)846 void msm_gem_purge(struct drm_gem_object *obj)
847 {
848 	struct drm_device *dev = obj->dev;
849 	struct msm_drm_private *priv = obj->dev->dev_private;
850 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
851 
852 	msm_gem_assert_locked(obj);
853 	GEM_WARN_ON(!is_purgeable(msm_obj));
854 
855 	/* Get rid of any iommu mapping(s): */
856 	put_iova_spaces(obj, true);
857 
858 	msm_gem_vunmap(obj);
859 
860 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
861 
862 	put_pages(obj);
863 
864 	put_iova_vmas(obj);
865 
866 	mutex_lock(&priv->lru.lock);
867 	/* A one-way transition: */
868 	msm_obj->madv = __MSM_MADV_PURGED;
869 	mutex_unlock(&priv->lru.lock);
870 
871 	drm_gem_free_mmap_offset(obj);
872 
873 	/* Our goal here is to return as much of the memory as
874 	 * is possible back to the system as we are called from OOM.
875 	 * To do this we must instruct the shmfs to drop all of its
876 	 * backing pages, *now*.
877 	 */
878 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
879 
880 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
881 			0, (loff_t)-1);
882 }
883 
884 /*
885  * Unpin the backing pages and make them available to be swapped out.
886  */
msm_gem_evict(struct drm_gem_object * obj)887 void msm_gem_evict(struct drm_gem_object *obj)
888 {
889 	struct drm_device *dev = obj->dev;
890 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
891 
892 	msm_gem_assert_locked(obj);
893 	GEM_WARN_ON(is_unevictable(msm_obj));
894 
895 	/* Get rid of any iommu mapping(s): */
896 	put_iova_spaces(obj, false);
897 
898 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
899 
900 	put_pages(obj);
901 }
902 
msm_gem_vunmap(struct drm_gem_object * obj)903 void msm_gem_vunmap(struct drm_gem_object *obj)
904 {
905 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
906 
907 	msm_gem_assert_locked(obj);
908 
909 	if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
910 		return;
911 
912 	vunmap(msm_obj->vaddr);
913 	msm_obj->vaddr = NULL;
914 }
915 
msm_gem_active(struct drm_gem_object * obj)916 bool msm_gem_active(struct drm_gem_object *obj)
917 {
918 	msm_gem_assert_locked(obj);
919 
920 	if (to_msm_bo(obj)->pin_count)
921 		return true;
922 
923 	return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
924 }
925 
msm_gem_cpu_prep(struct drm_gem_object * obj,uint32_t op,ktime_t * timeout)926 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
927 {
928 	bool write = !!(op & MSM_PREP_WRITE);
929 	unsigned long remain =
930 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
931 	long ret;
932 
933 	if (op & MSM_PREP_BOOST) {
934 		dma_resv_set_deadline(obj->resv, dma_resv_usage_rw(write),
935 				      ktime_get());
936 	}
937 
938 	ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
939 				    true,  remain);
940 	if (ret == 0)
941 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
942 	else if (ret < 0)
943 		return ret;
944 
945 	/* TODO cache maintenance */
946 
947 	return 0;
948 }
949 
msm_gem_cpu_fini(struct drm_gem_object * obj)950 int msm_gem_cpu_fini(struct drm_gem_object *obj)
951 {
952 	/* TODO cache maintenance */
953 	return 0;
954 }
955 
956 #ifdef CONFIG_DEBUG_FS
msm_gem_describe(struct drm_gem_object * obj,struct seq_file * m,struct msm_gem_stats * stats)957 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
958 		struct msm_gem_stats *stats)
959 {
960 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
961 	struct dma_resv *robj = obj->resv;
962 	struct msm_gem_vma *vma;
963 	uint64_t off = drm_vma_node_start(&obj->vma_node);
964 	const char *madv;
965 
966 	msm_gem_lock(obj);
967 
968 	stats->all.count++;
969 	stats->all.size += obj->size;
970 
971 	if (msm_gem_active(obj)) {
972 		stats->active.count++;
973 		stats->active.size += obj->size;
974 	}
975 
976 	if (msm_obj->pages) {
977 		stats->resident.count++;
978 		stats->resident.size += obj->size;
979 	}
980 
981 	switch (msm_obj->madv) {
982 	case __MSM_MADV_PURGED:
983 		stats->purged.count++;
984 		stats->purged.size += obj->size;
985 		madv = " purged";
986 		break;
987 	case MSM_MADV_DONTNEED:
988 		stats->purgeable.count++;
989 		stats->purgeable.size += obj->size;
990 		madv = " purgeable";
991 		break;
992 	case MSM_MADV_WILLNEED:
993 	default:
994 		madv = "";
995 		break;
996 	}
997 
998 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
999 			msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
1000 			obj->name, kref_read(&obj->refcount),
1001 			off, msm_obj->vaddr);
1002 
1003 	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
1004 
1005 	if (!list_empty(&msm_obj->vmas)) {
1006 
1007 		seq_puts(m, "      vmas:");
1008 
1009 		list_for_each_entry(vma, &msm_obj->vmas, list) {
1010 			const char *name, *comm;
1011 			if (vma->aspace) {
1012 				struct msm_gem_address_space *aspace = vma->aspace;
1013 				struct task_struct *task =
1014 					get_pid_task(aspace->pid, PIDTYPE_PID);
1015 				if (task) {
1016 					comm = kstrdup(task->comm, GFP_KERNEL);
1017 					put_task_struct(task);
1018 				} else {
1019 					comm = NULL;
1020 				}
1021 				name = aspace->name;
1022 			} else {
1023 				name = comm = NULL;
1024 			}
1025 			seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s]",
1026 				name, comm ? ":" : "", comm ? comm : "",
1027 				vma->aspace, vma->iova,
1028 				vma->mapped ? "mapped" : "unmapped");
1029 			kfree(comm);
1030 		}
1031 
1032 		seq_puts(m, "\n");
1033 	}
1034 
1035 	dma_resv_describe(robj, m);
1036 	msm_gem_unlock(obj);
1037 }
1038 
msm_gem_describe_objects(struct list_head * list,struct seq_file * m)1039 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
1040 {
1041 	struct msm_gem_stats stats = {};
1042 	struct msm_gem_object *msm_obj;
1043 
1044 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
1045 	list_for_each_entry(msm_obj, list, node) {
1046 		struct drm_gem_object *obj = &msm_obj->base;
1047 		seq_puts(m, "   ");
1048 		msm_gem_describe(obj, m, &stats);
1049 	}
1050 
1051 	seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
1052 			stats.all.count, stats.all.size);
1053 	seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
1054 			stats.active.count, stats.active.size);
1055 	seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
1056 			stats.resident.count, stats.resident.size);
1057 	seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
1058 			stats.purgeable.count, stats.purgeable.size);
1059 	seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
1060 			stats.purged.count, stats.purged.size);
1061 }
1062 #endif
1063 
1064 /* don't call directly!  Use drm_gem_object_put() */
msm_gem_free_object(struct drm_gem_object * obj)1065 static void msm_gem_free_object(struct drm_gem_object *obj)
1066 {
1067 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1068 	struct drm_device *dev = obj->dev;
1069 	struct msm_drm_private *priv = dev->dev_private;
1070 
1071 	mutex_lock(&priv->obj_lock);
1072 	list_del(&msm_obj->node);
1073 	mutex_unlock(&priv->obj_lock);
1074 
1075 	put_iova_spaces(obj, true);
1076 
1077 	if (obj->import_attach) {
1078 		GEM_WARN_ON(msm_obj->vaddr);
1079 
1080 		/* Don't drop the pages for imported dmabuf, as they are not
1081 		 * ours, just free the array we allocated:
1082 		 */
1083 		kvfree(msm_obj->pages);
1084 
1085 		put_iova_vmas(obj);
1086 
1087 		drm_prime_gem_destroy(obj, msm_obj->sgt);
1088 	} else {
1089 		msm_gem_vunmap(obj);
1090 		put_pages(obj);
1091 		put_iova_vmas(obj);
1092 	}
1093 
1094 	drm_gem_object_release(obj);
1095 
1096 	kfree(msm_obj->metadata);
1097 	kfree(msm_obj);
1098 }
1099 
msm_gem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)1100 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1101 {
1102 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1103 
1104 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1105 	vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1106 
1107 	return 0;
1108 }
1109 
1110 /* convenience method to construct a GEM buffer object, and userspace handle */
msm_gem_new_handle(struct drm_device * dev,struct drm_file * file,uint32_t size,uint32_t flags,uint32_t * handle,char * name)1111 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1112 		uint32_t size, uint32_t flags, uint32_t *handle,
1113 		char *name)
1114 {
1115 	struct drm_gem_object *obj;
1116 	int ret;
1117 
1118 	obj = msm_gem_new(dev, size, flags);
1119 
1120 	if (IS_ERR(obj))
1121 		return PTR_ERR(obj);
1122 
1123 	if (name)
1124 		msm_gem_object_set_name(obj, "%s", name);
1125 
1126 	ret = drm_gem_handle_create(file, obj, handle);
1127 
1128 	/* drop reference from allocate - handle holds it now */
1129 	drm_gem_object_put(obj);
1130 
1131 	return ret;
1132 }
1133 
msm_gem_status(struct drm_gem_object * obj)1134 static enum drm_gem_object_status msm_gem_status(struct drm_gem_object *obj)
1135 {
1136 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1137 	enum drm_gem_object_status status = 0;
1138 
1139 	if (msm_obj->pages)
1140 		status |= DRM_GEM_OBJECT_RESIDENT;
1141 
1142 	if (msm_obj->madv == MSM_MADV_DONTNEED)
1143 		status |= DRM_GEM_OBJECT_PURGEABLE;
1144 
1145 	return status;
1146 }
1147 
1148 static const struct vm_operations_struct vm_ops = {
1149 	.fault = msm_gem_fault,
1150 	.open = drm_gem_vm_open,
1151 	.close = drm_gem_vm_close,
1152 };
1153 
1154 static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1155 	.free = msm_gem_free_object,
1156 	.open = msm_gem_open,
1157 	.close = msm_gem_close,
1158 	.pin = msm_gem_prime_pin,
1159 	.unpin = msm_gem_prime_unpin,
1160 	.get_sg_table = msm_gem_prime_get_sg_table,
1161 	.vmap = msm_gem_prime_vmap,
1162 	.vunmap = msm_gem_prime_vunmap,
1163 	.mmap = msm_gem_object_mmap,
1164 	.status = msm_gem_status,
1165 	.vm_ops = &vm_ops,
1166 };
1167 
msm_gem_new_impl(struct drm_device * dev,uint32_t size,uint32_t flags,struct drm_gem_object ** obj)1168 static int msm_gem_new_impl(struct drm_device *dev,
1169 		uint32_t size, uint32_t flags,
1170 		struct drm_gem_object **obj)
1171 {
1172 	struct msm_drm_private *priv = dev->dev_private;
1173 	struct msm_gem_object *msm_obj;
1174 
1175 	switch (flags & MSM_BO_CACHE_MASK) {
1176 	case MSM_BO_CACHED:
1177 	case MSM_BO_WC:
1178 		break;
1179 	case MSM_BO_CACHED_COHERENT:
1180 		if (priv->has_cached_coherent)
1181 			break;
1182 		fallthrough;
1183 	default:
1184 		DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1185 				(flags & MSM_BO_CACHE_MASK));
1186 		return -EINVAL;
1187 	}
1188 
1189 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1190 	if (!msm_obj)
1191 		return -ENOMEM;
1192 
1193 	msm_obj->flags = flags;
1194 	msm_obj->madv = MSM_MADV_WILLNEED;
1195 
1196 	INIT_LIST_HEAD(&msm_obj->node);
1197 	INIT_LIST_HEAD(&msm_obj->vmas);
1198 
1199 	*obj = &msm_obj->base;
1200 	(*obj)->funcs = &msm_gem_object_funcs;
1201 
1202 	return 0;
1203 }
1204 
msm_gem_new(struct drm_device * dev,uint32_t size,uint32_t flags)1205 struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
1206 {
1207 	struct msm_drm_private *priv = dev->dev_private;
1208 	struct msm_gem_object *msm_obj;
1209 	struct drm_gem_object *obj = NULL;
1210 	bool use_vram = false;
1211 	int ret;
1212 
1213 	size = PAGE_ALIGN(size);
1214 
1215 	if (!msm_use_mmu(dev))
1216 		use_vram = true;
1217 	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1218 		use_vram = true;
1219 
1220 	if (GEM_WARN_ON(use_vram && !priv->vram.size))
1221 		return ERR_PTR(-EINVAL);
1222 
1223 	/* Disallow zero sized objects as they make the underlying
1224 	 * infrastructure grumpy
1225 	 */
1226 	if (size == 0)
1227 		return ERR_PTR(-EINVAL);
1228 
1229 	ret = msm_gem_new_impl(dev, size, flags, &obj);
1230 	if (ret)
1231 		return ERR_PTR(ret);
1232 
1233 	msm_obj = to_msm_bo(obj);
1234 
1235 	if (use_vram) {
1236 		struct msm_gem_vma *vma;
1237 		struct page **pages;
1238 
1239 		drm_gem_private_object_init(dev, obj, size);
1240 
1241 		msm_gem_lock(obj);
1242 
1243 		vma = add_vma(obj, NULL);
1244 		msm_gem_unlock(obj);
1245 		if (IS_ERR(vma)) {
1246 			ret = PTR_ERR(vma);
1247 			goto fail;
1248 		}
1249 
1250 		to_msm_bo(obj)->vram_node = &vma->node;
1251 
1252 		msm_gem_lock(obj);
1253 		pages = get_pages(obj);
1254 		msm_gem_unlock(obj);
1255 		if (IS_ERR(pages)) {
1256 			ret = PTR_ERR(pages);
1257 			goto fail;
1258 		}
1259 
1260 		vma->iova = physaddr(obj);
1261 	} else {
1262 		ret = drm_gem_object_init(dev, obj, size);
1263 		if (ret)
1264 			goto fail;
1265 		/*
1266 		 * Our buffers are kept pinned, so allocating them from the
1267 		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1268 		 * See comments above new_inode() why this is required _and_
1269 		 * expected if you're going to pin these pages.
1270 		 */
1271 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1272 	}
1273 
1274 	drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
1275 
1276 	mutex_lock(&priv->obj_lock);
1277 	list_add_tail(&msm_obj->node, &priv->objects);
1278 	mutex_unlock(&priv->obj_lock);
1279 
1280 	ret = drm_gem_create_mmap_offset(obj);
1281 	if (ret)
1282 		goto fail;
1283 
1284 	return obj;
1285 
1286 fail:
1287 	drm_gem_object_put(obj);
1288 	return ERR_PTR(ret);
1289 }
1290 
msm_gem_import(struct drm_device * dev,struct dma_buf * dmabuf,struct sg_table * sgt)1291 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1292 		struct dma_buf *dmabuf, struct sg_table *sgt)
1293 {
1294 	struct msm_drm_private *priv = dev->dev_private;
1295 	struct msm_gem_object *msm_obj;
1296 	struct drm_gem_object *obj;
1297 	uint32_t size;
1298 	int ret, npages;
1299 
1300 	/* if we don't have IOMMU, don't bother pretending we can import: */
1301 	if (!msm_use_mmu(dev)) {
1302 		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1303 		return ERR_PTR(-EINVAL);
1304 	}
1305 
1306 	size = PAGE_ALIGN(dmabuf->size);
1307 
1308 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1309 	if (ret)
1310 		return ERR_PTR(ret);
1311 
1312 	drm_gem_private_object_init(dev, obj, size);
1313 
1314 	npages = size / PAGE_SIZE;
1315 
1316 	msm_obj = to_msm_bo(obj);
1317 	msm_gem_lock(obj);
1318 	msm_obj->sgt = sgt;
1319 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1320 	if (!msm_obj->pages) {
1321 		msm_gem_unlock(obj);
1322 		ret = -ENOMEM;
1323 		goto fail;
1324 	}
1325 
1326 	ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1327 	if (ret) {
1328 		msm_gem_unlock(obj);
1329 		goto fail;
1330 	}
1331 
1332 	msm_gem_unlock(obj);
1333 
1334 	drm_gem_lru_move_tail(&priv->lru.pinned, obj);
1335 
1336 	mutex_lock(&priv->obj_lock);
1337 	list_add_tail(&msm_obj->node, &priv->objects);
1338 	mutex_unlock(&priv->obj_lock);
1339 
1340 	ret = drm_gem_create_mmap_offset(obj);
1341 	if (ret)
1342 		goto fail;
1343 
1344 	return obj;
1345 
1346 fail:
1347 	drm_gem_object_put(obj);
1348 	return ERR_PTR(ret);
1349 }
1350 
msm_gem_kernel_new(struct drm_device * dev,uint32_t size,uint32_t flags,struct msm_gem_address_space * aspace,struct drm_gem_object ** bo,uint64_t * iova)1351 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1352 		uint32_t flags, struct msm_gem_address_space *aspace,
1353 		struct drm_gem_object **bo, uint64_t *iova)
1354 {
1355 	void *vaddr;
1356 	struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1357 	int ret;
1358 
1359 	if (IS_ERR(obj))
1360 		return ERR_CAST(obj);
1361 
1362 	if (iova) {
1363 		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1364 		if (ret)
1365 			goto err;
1366 	}
1367 
1368 	vaddr = msm_gem_get_vaddr(obj);
1369 	if (IS_ERR(vaddr)) {
1370 		msm_gem_unpin_iova(obj, aspace);
1371 		ret = PTR_ERR(vaddr);
1372 		goto err;
1373 	}
1374 
1375 	if (bo)
1376 		*bo = obj;
1377 
1378 	return vaddr;
1379 err:
1380 	drm_gem_object_put(obj);
1381 
1382 	return ERR_PTR(ret);
1383 
1384 }
1385 
msm_gem_kernel_put(struct drm_gem_object * bo,struct msm_gem_address_space * aspace)1386 void msm_gem_kernel_put(struct drm_gem_object *bo,
1387 		struct msm_gem_address_space *aspace)
1388 {
1389 	if (IS_ERR_OR_NULL(bo))
1390 		return;
1391 
1392 	msm_gem_put_vaddr(bo);
1393 	msm_gem_unpin_iova(bo, aspace);
1394 	drm_gem_object_put(bo);
1395 }
1396 
msm_gem_object_set_name(struct drm_gem_object * bo,const char * fmt,...)1397 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1398 {
1399 	struct msm_gem_object *msm_obj = to_msm_bo(bo);
1400 	va_list ap;
1401 
1402 	if (!fmt)
1403 		return;
1404 
1405 	va_start(ap, fmt);
1406 	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1407 	va_end(ap);
1408 }
1409