1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7 #include <linux/dma-map-ops.h>
8 #include <linux/vmalloc.h>
9 #include <linux/spinlock.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/dma-buf.h>
12
13 #include <drm/drm_prime.h>
14 #include <drm/drm_file.h>
15
16 #include <trace/events/gpu_mem.h>
17
18 #include "msm_drv.h"
19 #include "msm_gem.h"
20 #include "msm_gpu.h"
21 #include "msm_kms.h"
22
update_device_mem(struct msm_drm_private * priv,ssize_t size)23 static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
24 {
25 uint64_t total_mem = atomic64_add_return(size, &priv->total_mem);
26 trace_gpu_mem_total(0, 0, total_mem);
27 }
28
update_ctx_mem(struct drm_file * file,ssize_t size)29 static void update_ctx_mem(struct drm_file *file, ssize_t size)
30 {
31 struct msm_context *ctx = file->driver_priv;
32 uint64_t ctx_mem = atomic64_add_return(size, &ctx->ctx_mem);
33
34 rcu_read_lock(); /* Locks file->pid! */
35 trace_gpu_mem_total(0, pid_nr(rcu_dereference(file->pid)), ctx_mem);
36 rcu_read_unlock();
37
38 }
39
msm_gem_open(struct drm_gem_object * obj,struct drm_file * file)40 static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file)
41 {
42 msm_gem_vma_get(obj);
43 update_ctx_mem(file, obj->size);
44 return 0;
45 }
46
47 static void put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm,
48 bool close, const char *reason);
49
msm_gem_close(struct drm_gem_object * obj,struct drm_file * file)50 static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
51 {
52 struct msm_context *ctx = file->driver_priv;
53 struct drm_exec exec;
54
55 update_ctx_mem(file, -obj->size);
56 msm_gem_vma_put(obj);
57
58 /*
59 * If VM isn't created yet, nothing to cleanup. And in fact calling
60 * put_iova_spaces() with vm=NULL would be bad, in that it will tear-
61 * down the mappings of shared buffers in other contexts.
62 */
63 if (!ctx->vm)
64 return;
65
66 /*
67 * VM_BIND does not depend on implicit teardown of VMAs on handle
68 * close, but instead on implicit teardown of the VM when the device
69 * is closed (see msm_gem_vm_close())
70 */
71 if (msm_context_is_vmbind(ctx))
72 return;
73
74 /*
75 * TODO we might need to kick this to a queue to avoid blocking
76 * in CLOSE ioctl
77 */
78 dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_BOOKKEEP, false,
79 MAX_SCHEDULE_TIMEOUT);
80
81 msm_gem_lock_vm_and_obj(&exec, obj, ctx->vm);
82 put_iova_spaces(obj, ctx->vm, true, "close");
83 drm_exec_fini(&exec); /* drop locks */
84 }
85
86 /*
87 * Get/put for kms->vm VMA
88 */
89
msm_gem_vma_get(struct drm_gem_object * obj)90 void msm_gem_vma_get(struct drm_gem_object *obj)
91 {
92 atomic_inc(&to_msm_bo(obj)->vma_ref);
93 }
94
msm_gem_vma_put(struct drm_gem_object * obj)95 void msm_gem_vma_put(struct drm_gem_object *obj)
96 {
97 struct msm_drm_private *priv = obj->dev->dev_private;
98
99 if (atomic_dec_return(&to_msm_bo(obj)->vma_ref))
100 return;
101
102 if (!priv->kms)
103 return;
104
105 #ifdef CONFIG_DRM_MSM_KMS
106 struct drm_exec exec;
107
108 msm_gem_lock_vm_and_obj(&exec, obj, priv->kms->vm);
109 put_iova_spaces(obj, priv->kms->vm, true, "vma_put");
110 drm_exec_fini(&exec); /* drop locks */
111 #endif
112 }
113
114 /*
115 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
116 * API. Really GPU cache is out of scope here (handled on cmdstream)
117 * and all we need to do is invalidate newly allocated pages before
118 * mapping to CPU as uncached/writecombine.
119 *
120 * On top of this, we have the added headache, that depending on
121 * display generation, the display's iommu may be wired up to either
122 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
123 * that here we either have dma-direct or iommu ops.
124 *
125 * Let this be a cautionary tail of abstraction gone wrong.
126 */
127
sync_for_device(struct msm_gem_object * msm_obj)128 static void sync_for_device(struct msm_gem_object *msm_obj)
129 {
130 struct device *dev = msm_obj->base.dev->dev;
131
132 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
133 }
134
sync_for_cpu(struct msm_gem_object * msm_obj)135 static void sync_for_cpu(struct msm_gem_object *msm_obj)
136 {
137 struct device *dev = msm_obj->base.dev->dev;
138
139 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
140 }
141
update_lru_active(struct drm_gem_object * obj)142 static void update_lru_active(struct drm_gem_object *obj)
143 {
144 struct msm_drm_private *priv = obj->dev->dev_private;
145 struct msm_gem_object *msm_obj = to_msm_bo(obj);
146
147 GEM_WARN_ON(!msm_obj->pages);
148
149 if (msm_obj->pin_count) {
150 drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
151 } else if (msm_obj->madv == MSM_MADV_WILLNEED) {
152 drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj);
153 } else {
154 GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
155
156 drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj);
157 }
158 }
159
update_lru_locked(struct drm_gem_object * obj)160 static void update_lru_locked(struct drm_gem_object *obj)
161 {
162 struct msm_drm_private *priv = obj->dev->dev_private;
163 struct msm_gem_object *msm_obj = to_msm_bo(obj);
164
165 msm_gem_assert_locked(&msm_obj->base);
166
167 if (!msm_obj->pages) {
168 GEM_WARN_ON(msm_obj->pin_count);
169
170 drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj);
171 } else {
172 update_lru_active(obj);
173 }
174 }
175
update_lru(struct drm_gem_object * obj)176 static void update_lru(struct drm_gem_object *obj)
177 {
178 struct msm_drm_private *priv = obj->dev->dev_private;
179
180 mutex_lock(&priv->lru.lock);
181 update_lru_locked(obj);
182 mutex_unlock(&priv->lru.lock);
183 }
184
get_pages(struct drm_gem_object * obj)185 static struct page **get_pages(struct drm_gem_object *obj)
186 {
187 struct msm_gem_object *msm_obj = to_msm_bo(obj);
188
189 msm_gem_assert_locked(obj);
190
191 if (!msm_obj->pages) {
192 struct drm_device *dev = obj->dev;
193 struct page **p;
194 int npages = obj->size >> PAGE_SHIFT;
195
196 p = drm_gem_get_pages(obj);
197
198 if (IS_ERR(p)) {
199 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
200 PTR_ERR(p));
201 return p;
202 }
203
204 update_device_mem(dev->dev_private, obj->size);
205
206 msm_obj->pages = p;
207
208 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
209 if (IS_ERR(msm_obj->sgt)) {
210 void *ptr = ERR_CAST(msm_obj->sgt);
211
212 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
213 msm_obj->sgt = NULL;
214 return ptr;
215 }
216
217 /* For non-cached buffers, ensure the new pages are clean
218 * because display controller, GPU, etc. are not coherent:
219 */
220 if (msm_obj->flags & MSM_BO_WC)
221 sync_for_device(msm_obj);
222
223 update_lru(obj);
224 }
225
226 return msm_obj->pages;
227 }
228
put_pages(struct drm_gem_object * obj)229 static void put_pages(struct drm_gem_object *obj)
230 {
231 struct msm_gem_object *msm_obj = to_msm_bo(obj);
232
233 /*
234 * Skip gpuvm in the object free path to avoid a WARN_ON() splat.
235 * See explaination in msm_gem_assert_locked()
236 */
237 if (kref_read(&obj->refcount))
238 drm_gpuvm_bo_gem_evict(obj, true);
239
240 if (msm_obj->pages) {
241 if (msm_obj->sgt) {
242 /* For non-cached buffers, ensure the new
243 * pages are clean because display controller,
244 * GPU, etc. are not coherent:
245 */
246 if (msm_obj->flags & MSM_BO_WC)
247 sync_for_cpu(msm_obj);
248
249 sg_free_table(msm_obj->sgt);
250 kfree(msm_obj->sgt);
251 msm_obj->sgt = NULL;
252 }
253
254 update_device_mem(obj->dev->dev_private, -obj->size);
255
256 drm_gem_put_pages(obj, msm_obj->pages, true, false);
257
258 msm_obj->pages = NULL;
259 update_lru(obj);
260 }
261 }
262
msm_gem_get_pages_locked(struct drm_gem_object * obj,unsigned madv)263 struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, unsigned madv)
264 {
265 struct msm_gem_object *msm_obj = to_msm_bo(obj);
266
267 msm_gem_assert_locked(obj);
268
269 if (msm_obj->madv > madv) {
270 DRM_DEV_DEBUG_DRIVER(obj->dev->dev, "Invalid madv state: %u vs %u\n",
271 msm_obj->madv, madv);
272 return ERR_PTR(-EBUSY);
273 }
274
275 return get_pages(obj);
276 }
277
278 /*
279 * Update the pin count of the object, call under lru.lock
280 */
msm_gem_pin_obj_locked(struct drm_gem_object * obj)281 void msm_gem_pin_obj_locked(struct drm_gem_object *obj)
282 {
283 struct msm_drm_private *priv = obj->dev->dev_private;
284
285 msm_gem_assert_locked(obj);
286
287 to_msm_bo(obj)->pin_count++;
288 drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
289 }
290
pin_obj_locked(struct drm_gem_object * obj)291 static void pin_obj_locked(struct drm_gem_object *obj)
292 {
293 struct msm_drm_private *priv = obj->dev->dev_private;
294
295 mutex_lock(&priv->lru.lock);
296 msm_gem_pin_obj_locked(obj);
297 mutex_unlock(&priv->lru.lock);
298 }
299
msm_gem_pin_pages_locked(struct drm_gem_object * obj)300 struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
301 {
302 struct page **p;
303
304 msm_gem_assert_locked(obj);
305
306 p = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
307 if (!IS_ERR(p))
308 pin_obj_locked(obj);
309
310 return p;
311 }
312
msm_gem_unpin_pages_locked(struct drm_gem_object * obj)313 void msm_gem_unpin_pages_locked(struct drm_gem_object *obj)
314 {
315 msm_gem_assert_locked(obj);
316
317 msm_gem_unpin_locked(obj);
318 }
319
msm_gem_pgprot(struct msm_gem_object * msm_obj,pgprot_t prot)320 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
321 {
322 if (msm_obj->flags & MSM_BO_WC)
323 return pgprot_writecombine(prot);
324 return prot;
325 }
326
msm_gem_fault(struct vm_fault * vmf)327 static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
328 {
329 struct vm_area_struct *vma = vmf->vma;
330 struct drm_gem_object *obj = vma->vm_private_data;
331 struct msm_gem_object *msm_obj = to_msm_bo(obj);
332 struct page **pages;
333 unsigned long pfn;
334 pgoff_t pgoff;
335 int err;
336 vm_fault_t ret;
337
338 /*
339 * vm_ops.open/drm_gem_mmap_obj and close get and put
340 * a reference on obj. So, we dont need to hold one here.
341 */
342 err = msm_gem_lock_interruptible(obj);
343 if (err) {
344 ret = VM_FAULT_NOPAGE;
345 goto out;
346 }
347
348 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
349 msm_gem_unlock(obj);
350 return VM_FAULT_SIGBUS;
351 }
352
353 /* make sure we have pages attached now */
354 pages = get_pages(obj);
355 if (IS_ERR(pages)) {
356 ret = vmf_error(PTR_ERR(pages));
357 goto out_unlock;
358 }
359
360 /* We don't use vmf->pgoff since that has the fake offset: */
361 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
362
363 pfn = page_to_pfn(pages[pgoff]);
364
365 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
366 pfn, pfn << PAGE_SHIFT);
367
368 ret = vmf_insert_pfn(vma, vmf->address, pfn);
369
370 out_unlock:
371 msm_gem_unlock(obj);
372 out:
373 return ret;
374 }
375
376 /** get mmap offset */
mmap_offset(struct drm_gem_object * obj)377 static uint64_t mmap_offset(struct drm_gem_object *obj)
378 {
379 struct drm_device *dev = obj->dev;
380 int ret;
381
382 msm_gem_assert_locked(obj);
383
384 /* Make it mmapable */
385 ret = drm_gem_create_mmap_offset(obj);
386
387 if (ret) {
388 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
389 return 0;
390 }
391
392 return drm_vma_node_offset_addr(&obj->vma_node);
393 }
394
msm_gem_mmap_offset(struct drm_gem_object * obj)395 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
396 {
397 uint64_t offset;
398
399 msm_gem_lock(obj);
400 offset = mmap_offset(obj);
401 msm_gem_unlock(obj);
402 return offset;
403 }
404
lookup_vma(struct drm_gem_object * obj,struct drm_gpuvm * vm)405 static struct drm_gpuva *lookup_vma(struct drm_gem_object *obj,
406 struct drm_gpuvm *vm)
407 {
408 struct drm_gpuvm_bo *vm_bo;
409
410 msm_gem_assert_locked(obj);
411
412 drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
413 struct drm_gpuva *vma;
414
415 drm_gpuvm_bo_for_each_va (vma, vm_bo) {
416 if (vma->vm == vm) {
417 /* lookup_vma() should only be used in paths
418 * with at most one vma per vm
419 */
420 GEM_WARN_ON(!list_is_singular(&vm_bo->list.gpuva));
421
422 return vma;
423 }
424 }
425 }
426
427 return NULL;
428 }
429
430 /*
431 * If close is true, this also closes the VMA (releasing the allocated
432 * iova range) in addition to removing the iommu mapping. In the eviction
433 * case (!close), we keep the iova allocated, but only remove the iommu
434 * mapping.
435 */
436 static void
put_iova_spaces(struct drm_gem_object * obj,struct drm_gpuvm * vm,bool close,const char * reason)437 put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm,
438 bool close, const char *reason)
439 {
440 struct drm_gpuvm_bo *vm_bo, *tmp;
441
442 msm_gem_assert_locked(obj);
443
444 drm_gem_for_each_gpuvm_bo_safe (vm_bo, tmp, obj) {
445 struct drm_gpuva *vma, *vmatmp;
446
447 if (vm && vm_bo->vm != vm)
448 continue;
449
450 drm_gpuvm_bo_get(vm_bo);
451
452 drm_gpuvm_bo_for_each_va_safe (vma, vmatmp, vm_bo) {
453 msm_gem_vma_unmap(vma, reason);
454 if (close)
455 msm_gem_vma_close(vma);
456 }
457
458 drm_gpuvm_bo_put(vm_bo);
459 }
460 }
461
get_vma_locked(struct drm_gem_object * obj,struct drm_gpuvm * vm,u64 range_start,u64 range_end)462 static struct drm_gpuva *get_vma_locked(struct drm_gem_object *obj,
463 struct drm_gpuvm *vm, u64 range_start,
464 u64 range_end)
465 {
466 struct drm_gpuva *vma;
467
468 msm_gem_assert_locked(obj);
469
470 vma = lookup_vma(obj, vm);
471
472 if (!vma) {
473 vma = msm_gem_vma_new(vm, obj, 0, range_start, range_end);
474 } else {
475 GEM_WARN_ON(vma->va.addr < range_start);
476 GEM_WARN_ON((vma->va.addr + obj->size) > range_end);
477 }
478
479 return vma;
480 }
481
msm_gem_prot(struct drm_gem_object * obj)482 int msm_gem_prot(struct drm_gem_object *obj)
483 {
484 struct msm_gem_object *msm_obj = to_msm_bo(obj);
485 int prot = IOMMU_READ;
486
487 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
488 prot |= IOMMU_WRITE;
489
490 if (msm_obj->flags & MSM_BO_MAP_PRIV)
491 prot |= IOMMU_PRIV;
492
493 if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
494 prot |= IOMMU_CACHE;
495
496 return prot;
497 }
498
msm_gem_pin_vma_locked(struct drm_gem_object * obj,struct drm_gpuva * vma)499 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma)
500 {
501 struct msm_gem_object *msm_obj = to_msm_bo(obj);
502 struct page **pages;
503 int prot = msm_gem_prot(obj);
504
505 msm_gem_assert_locked(obj);
506
507 pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
508 if (IS_ERR(pages))
509 return PTR_ERR(pages);
510
511 return msm_gem_vma_map(vma, prot, msm_obj->sgt);
512 }
513
msm_gem_unpin_locked(struct drm_gem_object * obj)514 void msm_gem_unpin_locked(struct drm_gem_object *obj)
515 {
516 struct msm_drm_private *priv = obj->dev->dev_private;
517 struct msm_gem_object *msm_obj = to_msm_bo(obj);
518
519 msm_gem_assert_locked(obj);
520
521 mutex_lock(&priv->lru.lock);
522 msm_obj->pin_count--;
523 GEM_WARN_ON(msm_obj->pin_count < 0);
524 update_lru_locked(obj);
525 mutex_unlock(&priv->lru.lock);
526 }
527
528 /* Special unpin path for use in fence-signaling path, avoiding the need
529 * to hold the obj lock by only depending on things that a protected by
530 * the LRU lock. In particular we know that that we already have backing
531 * and and that the object's dma_resv has the fence for the current
532 * submit/job which will prevent us racing against page eviction.
533 */
msm_gem_unpin_active(struct drm_gem_object * obj)534 void msm_gem_unpin_active(struct drm_gem_object *obj)
535 {
536 struct msm_gem_object *msm_obj = to_msm_bo(obj);
537
538 msm_obj->pin_count--;
539 GEM_WARN_ON(msm_obj->pin_count < 0);
540 update_lru_active(obj);
541 }
542
msm_gem_get_vma_locked(struct drm_gem_object * obj,struct drm_gpuvm * vm)543 struct drm_gpuva *msm_gem_get_vma_locked(struct drm_gem_object *obj,
544 struct drm_gpuvm *vm)
545 {
546 return get_vma_locked(obj, vm, 0, U64_MAX);
547 }
548
get_and_pin_iova_range_locked(struct drm_gem_object * obj,struct drm_gpuvm * vm,uint64_t * iova,u64 range_start,u64 range_end)549 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
550 struct drm_gpuvm *vm, uint64_t *iova,
551 u64 range_start, u64 range_end)
552 {
553 struct drm_gpuva *vma;
554 int ret;
555
556 msm_gem_assert_locked(obj);
557
558 if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE)
559 return -EINVAL;
560
561 vma = get_vma_locked(obj, vm, range_start, range_end);
562 if (IS_ERR(vma))
563 return PTR_ERR(vma);
564
565 ret = msm_gem_pin_vma_locked(obj, vma);
566 if (!ret) {
567 *iova = vma->va.addr;
568 pin_obj_locked(obj);
569 }
570
571 return ret;
572 }
573
574 /*
575 * get iova and pin it. Should have a matching put
576 * limits iova to specified range (in pages)
577 */
msm_gem_get_and_pin_iova_range(struct drm_gem_object * obj,struct drm_gpuvm * vm,uint64_t * iova,u64 range_start,u64 range_end)578 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
579 struct drm_gpuvm *vm, uint64_t *iova,
580 u64 range_start, u64 range_end)
581 {
582 struct drm_exec exec;
583 int ret;
584
585 msm_gem_lock_vm_and_obj(&exec, obj, vm);
586 ret = get_and_pin_iova_range_locked(obj, vm, iova, range_start, range_end);
587 drm_exec_fini(&exec); /* drop locks */
588
589 return ret;
590 }
591
592 /* get iova and pin it. Should have a matching put */
msm_gem_get_and_pin_iova(struct drm_gem_object * obj,struct drm_gpuvm * vm,uint64_t * iova)593 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
594 uint64_t *iova)
595 {
596 return msm_gem_get_and_pin_iova_range(obj, vm, iova, 0, U64_MAX);
597 }
598
599 /*
600 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
601 * valid for the life of the object
602 */
msm_gem_get_iova(struct drm_gem_object * obj,struct drm_gpuvm * vm,uint64_t * iova)603 int msm_gem_get_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
604 uint64_t *iova)
605 {
606 struct drm_gpuva *vma;
607 struct drm_exec exec;
608 int ret = 0;
609
610 msm_gem_lock_vm_and_obj(&exec, obj, vm);
611 vma = get_vma_locked(obj, vm, 0, U64_MAX);
612 if (IS_ERR(vma)) {
613 ret = PTR_ERR(vma);
614 } else {
615 *iova = vma->va.addr;
616 }
617 drm_exec_fini(&exec); /* drop locks */
618
619 return ret;
620 }
621
clear_iova(struct drm_gem_object * obj,struct drm_gpuvm * vm)622 static int clear_iova(struct drm_gem_object *obj,
623 struct drm_gpuvm *vm)
624 {
625 struct drm_gpuva *vma = lookup_vma(obj, vm);
626
627 if (!vma)
628 return 0;
629
630 msm_gem_vma_unmap(vma, NULL);
631 msm_gem_vma_close(vma);
632
633 return 0;
634 }
635
636 /*
637 * Get the requested iova but don't pin it. Fails if the requested iova is
638 * not available. Doesn't need a put because iovas are currently valid for
639 * the life of the object.
640 *
641 * Setting an iova of zero will clear the vma.
642 */
msm_gem_set_iova(struct drm_gem_object * obj,struct drm_gpuvm * vm,uint64_t iova)643 int msm_gem_set_iova(struct drm_gem_object *obj,
644 struct drm_gpuvm *vm, uint64_t iova)
645 {
646 struct drm_exec exec;
647 int ret = 0;
648
649 msm_gem_lock_vm_and_obj(&exec, obj, vm);
650 if (!iova) {
651 ret = clear_iova(obj, vm);
652 } else {
653 struct drm_gpuva *vma;
654 vma = get_vma_locked(obj, vm, iova, iova + obj->size);
655 if (IS_ERR(vma)) {
656 ret = PTR_ERR(vma);
657 } else if (GEM_WARN_ON(vma->va.addr != iova)) {
658 clear_iova(obj, vm);
659 ret = -EBUSY;
660 }
661 }
662 drm_exec_fini(&exec); /* drop locks */
663
664 return ret;
665 }
666
is_kms_vm(struct drm_gpuvm * vm)667 static bool is_kms_vm(struct drm_gpuvm *vm)
668 {
669 #ifdef CONFIG_DRM_MSM_KMS
670 struct msm_drm_private *priv = vm->drm->dev_private;
671
672 return priv->kms && (priv->kms->vm == vm);
673 #else
674 return false;
675 #endif
676 }
677
678 /*
679 * Unpin a iova by updating the reference counts. The memory isn't actually
680 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
681 * to get rid of it
682 */
msm_gem_unpin_iova(struct drm_gem_object * obj,struct drm_gpuvm * vm)683 void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm)
684 {
685 struct drm_gpuva *vma;
686 struct drm_exec exec;
687
688 msm_gem_lock_vm_and_obj(&exec, obj, vm);
689 vma = lookup_vma(obj, vm);
690 if (vma) {
691 msm_gem_unpin_locked(obj);
692 }
693 if (!is_kms_vm(vm))
694 put_iova_spaces(obj, vm, true, "close");
695 drm_exec_fini(&exec); /* drop locks */
696 }
697
msm_gem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)698 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
699 struct drm_mode_create_dumb *args)
700 {
701 args->pitch = align_pitch(args->width, args->bpp);
702 args->size = PAGE_ALIGN(args->pitch * args->height);
703 return msm_gem_new_handle(dev, file, args->size,
704 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
705 }
706
msm_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,uint32_t handle,uint64_t * offset)707 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
708 uint32_t handle, uint64_t *offset)
709 {
710 struct drm_gem_object *obj;
711 int ret = 0;
712
713 /* GEM does all our handle to object mapping */
714 obj = drm_gem_object_lookup(file, handle);
715 if (obj == NULL) {
716 ret = -ENOENT;
717 goto fail;
718 }
719
720 *offset = msm_gem_mmap_offset(obj);
721
722 drm_gem_object_put(obj);
723
724 fail:
725 return ret;
726 }
727
get_vaddr(struct drm_gem_object * obj,unsigned madv)728 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
729 {
730 struct msm_gem_object *msm_obj = to_msm_bo(obj);
731 struct page **pages;
732 int ret = 0;
733
734 msm_gem_assert_locked(obj);
735
736 if (drm_gem_is_imported(obj))
737 return ERR_PTR(-ENODEV);
738
739 pages = msm_gem_get_pages_locked(obj, madv);
740 if (IS_ERR(pages))
741 return ERR_CAST(pages);
742
743 pin_obj_locked(obj);
744
745 /* increment vmap_count *before* vmap() call, so shrinker can
746 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
747 * This guarantees that we won't try to msm_gem_vunmap() this
748 * same object from within the vmap() call (while we already
749 * hold msm_obj lock)
750 */
751 msm_obj->vmap_count++;
752
753 if (!msm_obj->vaddr) {
754 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
755 VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
756 if (msm_obj->vaddr == NULL) {
757 ret = -ENOMEM;
758 goto fail;
759 }
760 }
761
762 return msm_obj->vaddr;
763
764 fail:
765 msm_obj->vmap_count--;
766 msm_gem_unpin_locked(obj);
767 return ERR_PTR(ret);
768 }
769
msm_gem_get_vaddr_locked(struct drm_gem_object * obj)770 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
771 {
772 return get_vaddr(obj, MSM_MADV_WILLNEED);
773 }
774
msm_gem_get_vaddr(struct drm_gem_object * obj)775 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
776 {
777 void *ret;
778
779 msm_gem_lock(obj);
780 ret = msm_gem_get_vaddr_locked(obj);
781 msm_gem_unlock(obj);
782
783 return ret;
784 }
785
786 /*
787 * Don't use this! It is for the very special case of dumping
788 * submits from GPU hangs or faults, were the bo may already
789 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
790 * active list.
791 */
msm_gem_get_vaddr_active(struct drm_gem_object * obj)792 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
793 {
794 return get_vaddr(obj, __MSM_MADV_PURGED);
795 }
796
msm_gem_put_vaddr_locked(struct drm_gem_object * obj)797 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
798 {
799 struct msm_gem_object *msm_obj = to_msm_bo(obj);
800
801 msm_gem_assert_locked(obj);
802 GEM_WARN_ON(msm_obj->vmap_count < 1);
803
804 msm_obj->vmap_count--;
805 msm_gem_unpin_locked(obj);
806 }
807
msm_gem_put_vaddr(struct drm_gem_object * obj)808 void msm_gem_put_vaddr(struct drm_gem_object *obj)
809 {
810 msm_gem_lock(obj);
811 msm_gem_put_vaddr_locked(obj);
812 msm_gem_unlock(obj);
813 }
814
815 /* Update madvise status, returns true if not purged, else
816 * false or -errno.
817 */
msm_gem_madvise(struct drm_gem_object * obj,unsigned madv)818 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
819 {
820 struct msm_drm_private *priv = obj->dev->dev_private;
821 struct msm_gem_object *msm_obj = to_msm_bo(obj);
822
823 msm_gem_lock(obj);
824
825 mutex_lock(&priv->lru.lock);
826
827 if (msm_obj->madv != __MSM_MADV_PURGED)
828 msm_obj->madv = madv;
829
830 madv = msm_obj->madv;
831
832 /* If the obj is inactive, we might need to move it
833 * between inactive lists
834 */
835 update_lru_locked(obj);
836
837 mutex_unlock(&priv->lru.lock);
838
839 msm_gem_unlock(obj);
840
841 return (madv != __MSM_MADV_PURGED);
842 }
843
msm_gem_purge(struct drm_gem_object * obj)844 void msm_gem_purge(struct drm_gem_object *obj)
845 {
846 struct drm_device *dev = obj->dev;
847 struct msm_drm_private *priv = obj->dev->dev_private;
848 struct msm_gem_object *msm_obj = to_msm_bo(obj);
849
850 msm_gem_assert_locked(obj);
851 GEM_WARN_ON(!is_purgeable(msm_obj));
852
853 /* Get rid of any iommu mapping(s): */
854 put_iova_spaces(obj, NULL, false, "purge");
855
856 msm_gem_vunmap(obj);
857
858 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
859
860 put_pages(obj);
861
862 mutex_lock(&priv->lru.lock);
863 /* A one-way transition: */
864 msm_obj->madv = __MSM_MADV_PURGED;
865 mutex_unlock(&priv->lru.lock);
866
867 drm_gem_free_mmap_offset(obj);
868
869 /* Our goal here is to return as much of the memory as
870 * is possible back to the system as we are called from OOM.
871 * To do this we must instruct the shmfs to drop all of its
872 * backing pages, *now*.
873 */
874 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
875
876 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
877 0, (loff_t)-1);
878 }
879
880 /*
881 * Unpin the backing pages and make them available to be swapped out.
882 */
msm_gem_evict(struct drm_gem_object * obj)883 void msm_gem_evict(struct drm_gem_object *obj)
884 {
885 struct drm_device *dev = obj->dev;
886 struct msm_gem_object *msm_obj = to_msm_bo(obj);
887
888 msm_gem_assert_locked(obj);
889 GEM_WARN_ON(is_unevictable(msm_obj));
890
891 /* Get rid of any iommu mapping(s): */
892 put_iova_spaces(obj, NULL, false, "evict");
893
894 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
895
896 put_pages(obj);
897 }
898
msm_gem_vunmap(struct drm_gem_object * obj)899 void msm_gem_vunmap(struct drm_gem_object *obj)
900 {
901 struct msm_gem_object *msm_obj = to_msm_bo(obj);
902
903 msm_gem_assert_locked(obj);
904
905 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
906 return;
907
908 vunmap(msm_obj->vaddr);
909 msm_obj->vaddr = NULL;
910 }
911
msm_gem_active(struct drm_gem_object * obj)912 bool msm_gem_active(struct drm_gem_object *obj)
913 {
914 msm_gem_assert_locked(obj);
915
916 if (to_msm_bo(obj)->pin_count)
917 return true;
918
919 return !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_BOOKKEEP);
920 }
921
msm_gem_cpu_prep(struct drm_gem_object * obj,uint32_t op,ktime_t * timeout)922 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
923 {
924 bool write = !!(op & MSM_PREP_WRITE);
925 unsigned long remain =
926 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
927 long ret;
928
929 if (op & MSM_PREP_BOOST) {
930 dma_resv_set_deadline(obj->resv, dma_resv_usage_rw(write),
931 ktime_get());
932 }
933
934 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
935 true, remain);
936 if (ret == 0)
937 return remain == 0 ? -EBUSY : -ETIMEDOUT;
938 else if (ret < 0)
939 return ret;
940
941 /* TODO cache maintenance */
942
943 return 0;
944 }
945
msm_gem_cpu_fini(struct drm_gem_object * obj)946 int msm_gem_cpu_fini(struct drm_gem_object *obj)
947 {
948 /* TODO cache maintenance */
949 return 0;
950 }
951
952 #ifdef CONFIG_DEBUG_FS
msm_gem_describe(struct drm_gem_object * obj,struct seq_file * m,struct msm_gem_stats * stats)953 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
954 struct msm_gem_stats *stats)
955 {
956 struct msm_gem_object *msm_obj = to_msm_bo(obj);
957 struct dma_resv *robj = obj->resv;
958 uint64_t off = drm_vma_node_start(&obj->vma_node);
959 const char *madv;
960
961 if (!msm_gem_trylock(obj))
962 return;
963
964 stats->all.count++;
965 stats->all.size += obj->size;
966
967 if (msm_gem_active(obj)) {
968 stats->active.count++;
969 stats->active.size += obj->size;
970 }
971
972 if (msm_obj->pages) {
973 stats->resident.count++;
974 stats->resident.size += obj->size;
975 }
976
977 switch (msm_obj->madv) {
978 case __MSM_MADV_PURGED:
979 stats->purged.count++;
980 stats->purged.size += obj->size;
981 madv = " purged";
982 break;
983 case MSM_MADV_DONTNEED:
984 stats->purgeable.count++;
985 stats->purgeable.size += obj->size;
986 madv = " purgeable";
987 break;
988 case MSM_MADV_WILLNEED:
989 default:
990 madv = "";
991 break;
992 }
993
994 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
995 msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
996 obj->name, kref_read(&obj->refcount),
997 off, msm_obj->vaddr);
998
999 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
1000
1001 if (!list_empty(&obj->gpuva.list)) {
1002 struct drm_gpuvm_bo *vm_bo;
1003
1004 seq_puts(m, " vmas:");
1005
1006 drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
1007 struct drm_gpuva *vma;
1008
1009 drm_gpuvm_bo_for_each_va (vma, vm_bo) {
1010 const char *name, *comm;
1011 struct msm_gem_vm *vm = to_msm_vm(vma->vm);
1012 struct task_struct *task =
1013 get_pid_task(vm->pid, PIDTYPE_PID);
1014 if (task) {
1015 comm = kstrdup(task->comm, GFP_KERNEL);
1016 put_task_struct(task);
1017 } else {
1018 comm = NULL;
1019 }
1020 name = vm->base.name;
1021
1022 seq_printf(m, " [%s%s%s: vm=%p, %08llx, %smapped]",
1023 name, comm ? ":" : "", comm ? comm : "",
1024 vma->vm, vma->va.addr,
1025 to_msm_vma(vma)->mapped ? "" : "un");
1026 kfree(comm);
1027 }
1028 }
1029
1030 seq_puts(m, "\n");
1031 }
1032
1033 dma_resv_describe(robj, m);
1034 msm_gem_unlock(obj);
1035 }
1036
msm_gem_describe_objects(struct list_head * list,struct seq_file * m)1037 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
1038 {
1039 struct msm_gem_stats stats = {};
1040 struct msm_gem_object *msm_obj;
1041
1042 seq_puts(m, " flags id ref offset kaddr size madv name\n");
1043 list_for_each_entry(msm_obj, list, node) {
1044 struct drm_gem_object *obj = &msm_obj->base;
1045 seq_puts(m, " ");
1046 msm_gem_describe(obj, m, &stats);
1047 }
1048
1049 seq_printf(m, "Total: %4d objects, %9zu bytes\n",
1050 stats.all.count, stats.all.size);
1051 seq_printf(m, "Active: %4d objects, %9zu bytes\n",
1052 stats.active.count, stats.active.size);
1053 seq_printf(m, "Resident: %4d objects, %9zu bytes\n",
1054 stats.resident.count, stats.resident.size);
1055 seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
1056 stats.purgeable.count, stats.purgeable.size);
1057 seq_printf(m, "Purged: %4d objects, %9zu bytes\n",
1058 stats.purged.count, stats.purged.size);
1059 }
1060 #endif
1061
1062 /* don't call directly! Use drm_gem_object_put() */
msm_gem_free_object(struct drm_gem_object * obj)1063 static void msm_gem_free_object(struct drm_gem_object *obj)
1064 {
1065 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1066 struct drm_device *dev = obj->dev;
1067 struct msm_drm_private *priv = dev->dev_private;
1068 struct drm_exec exec;
1069
1070 mutex_lock(&priv->obj_lock);
1071 list_del(&msm_obj->node);
1072 mutex_unlock(&priv->obj_lock);
1073
1074 /*
1075 * We need to lock any VMs the object is still attached to, but not
1076 * the object itself (see explaination in msm_gem_assert_locked()),
1077 * so just open-code this special case.
1078 *
1079 * Note that we skip the dance if we aren't attached to any VM. This
1080 * is load bearing. The driver needs to support two usage models:
1081 *
1082 * 1. Legacy kernel managed VM: Userspace expects the VMA's to be
1083 * implicitly torn down when the object is freed, the VMA's do
1084 * not hold a hard reference to the BO.
1085 *
1086 * 2. VM_BIND, userspace managed VM: The VMA holds a reference to the
1087 * BO. This can be dropped when the VM is closed and it's associated
1088 * VMAs are torn down. (See msm_gem_vm_close()).
1089 *
1090 * In the latter case the last reference to a BO can be dropped while
1091 * we already have the VM locked. It would have already been removed
1092 * from the gpuva list, but lockdep doesn't know that. Or understand
1093 * the differences between the two usage models.
1094 */
1095 if (!list_empty(&obj->gpuva.list)) {
1096 drm_exec_init(&exec, 0, 0);
1097 drm_exec_until_all_locked (&exec) {
1098 struct drm_gpuvm_bo *vm_bo;
1099 drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
1100 drm_exec_lock_obj(&exec,
1101 drm_gpuvm_resv_obj(vm_bo->vm));
1102 drm_exec_retry_on_contention(&exec);
1103 }
1104 }
1105 put_iova_spaces(obj, NULL, true, "free");
1106 drm_exec_fini(&exec); /* drop locks */
1107 }
1108
1109 if (drm_gem_is_imported(obj)) {
1110 GEM_WARN_ON(msm_obj->vaddr);
1111
1112 /* Don't drop the pages for imported dmabuf, as they are not
1113 * ours, just free the array we allocated:
1114 */
1115 kvfree(msm_obj->pages);
1116
1117 drm_prime_gem_destroy(obj, msm_obj->sgt);
1118 } else {
1119 msm_gem_vunmap(obj);
1120 put_pages(obj);
1121 }
1122
1123 if (obj->resv != &obj->_resv) {
1124 struct drm_gem_object *r_obj =
1125 container_of(obj->resv, struct drm_gem_object, _resv);
1126
1127 WARN_ON(!(msm_obj->flags & MSM_BO_NO_SHARE));
1128
1129 /* Drop reference we hold to shared resv obj: */
1130 drm_gem_object_put(r_obj);
1131 }
1132
1133 drm_gem_object_release(obj);
1134
1135 kfree(msm_obj->metadata);
1136 kfree(msm_obj);
1137 }
1138
msm_gem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)1139 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1140 {
1141 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1142
1143 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1144 vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1145
1146 return 0;
1147 }
1148
1149 /* convenience method to construct a GEM buffer object, and userspace handle */
msm_gem_new_handle(struct drm_device * dev,struct drm_file * file,uint32_t size,uint32_t flags,uint32_t * handle,char * name)1150 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1151 uint32_t size, uint32_t flags, uint32_t *handle,
1152 char *name)
1153 {
1154 struct drm_gem_object *obj;
1155 int ret;
1156
1157 obj = msm_gem_new(dev, size, flags);
1158
1159 if (IS_ERR(obj))
1160 return PTR_ERR(obj);
1161
1162 if (name)
1163 msm_gem_object_set_name(obj, "%s", name);
1164
1165 if (flags & MSM_BO_NO_SHARE) {
1166 struct msm_context *ctx = file->driver_priv;
1167 struct drm_gem_object *r_obj = drm_gpuvm_resv_obj(ctx->vm);
1168
1169 drm_gem_object_get(r_obj);
1170
1171 obj->resv = r_obj->resv;
1172 }
1173
1174 ret = drm_gem_handle_create(file, obj, handle);
1175
1176 /* drop reference from allocate - handle holds it now */
1177 drm_gem_object_put(obj);
1178
1179 return ret;
1180 }
1181
msm_gem_status(struct drm_gem_object * obj)1182 static enum drm_gem_object_status msm_gem_status(struct drm_gem_object *obj)
1183 {
1184 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1185 enum drm_gem_object_status status = 0;
1186
1187 if (msm_obj->pages)
1188 status |= DRM_GEM_OBJECT_RESIDENT;
1189
1190 if (msm_obj->madv == MSM_MADV_DONTNEED)
1191 status |= DRM_GEM_OBJECT_PURGEABLE;
1192
1193 return status;
1194 }
1195
1196 static const struct vm_operations_struct vm_ops = {
1197 .fault = msm_gem_fault,
1198 .open = drm_gem_vm_open,
1199 .close = drm_gem_vm_close,
1200 };
1201
1202 static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1203 .free = msm_gem_free_object,
1204 .open = msm_gem_open,
1205 .close = msm_gem_close,
1206 .export = msm_gem_prime_export,
1207 .pin = msm_gem_prime_pin,
1208 .unpin = msm_gem_prime_unpin,
1209 .get_sg_table = msm_gem_prime_get_sg_table,
1210 .vmap = msm_gem_prime_vmap,
1211 .vunmap = msm_gem_prime_vunmap,
1212 .mmap = msm_gem_object_mmap,
1213 .status = msm_gem_status,
1214 .vm_ops = &vm_ops,
1215 };
1216
msm_gem_new_impl(struct drm_device * dev,uint32_t size,uint32_t flags,struct drm_gem_object ** obj)1217 static int msm_gem_new_impl(struct drm_device *dev,
1218 uint32_t size, uint32_t flags,
1219 struct drm_gem_object **obj)
1220 {
1221 struct msm_drm_private *priv = dev->dev_private;
1222 struct msm_gem_object *msm_obj;
1223
1224 switch (flags & MSM_BO_CACHE_MASK) {
1225 case MSM_BO_CACHED:
1226 case MSM_BO_WC:
1227 break;
1228 case MSM_BO_CACHED_COHERENT:
1229 if (priv->has_cached_coherent)
1230 break;
1231 fallthrough;
1232 default:
1233 DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1234 (flags & MSM_BO_CACHE_MASK));
1235 return -EINVAL;
1236 }
1237
1238 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1239 if (!msm_obj)
1240 return -ENOMEM;
1241
1242 msm_obj->flags = flags;
1243 msm_obj->madv = MSM_MADV_WILLNEED;
1244
1245 INIT_LIST_HEAD(&msm_obj->node);
1246
1247 *obj = &msm_obj->base;
1248 (*obj)->funcs = &msm_gem_object_funcs;
1249
1250 return 0;
1251 }
1252
msm_gem_new(struct drm_device * dev,uint32_t size,uint32_t flags)1253 struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
1254 {
1255 struct msm_drm_private *priv = dev->dev_private;
1256 struct msm_gem_object *msm_obj;
1257 struct drm_gem_object *obj = NULL;
1258 int ret;
1259
1260 size = PAGE_ALIGN(size);
1261
1262 /* Disallow zero sized objects as they make the underlying
1263 * infrastructure grumpy
1264 */
1265 if (size == 0)
1266 return ERR_PTR(-EINVAL);
1267
1268 ret = msm_gem_new_impl(dev, size, flags, &obj);
1269 if (ret)
1270 return ERR_PTR(ret);
1271
1272 msm_obj = to_msm_bo(obj);
1273
1274 ret = drm_gem_object_init(dev, obj, size);
1275 if (ret)
1276 goto fail;
1277 /*
1278 * Our buffers are kept pinned, so allocating them from the
1279 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1280 * See comments above new_inode() why this is required _and_
1281 * expected if you're going to pin these pages.
1282 */
1283 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1284
1285 drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
1286
1287 mutex_lock(&priv->obj_lock);
1288 list_add_tail(&msm_obj->node, &priv->objects);
1289 mutex_unlock(&priv->obj_lock);
1290
1291 ret = drm_gem_create_mmap_offset(obj);
1292 if (ret)
1293 goto fail;
1294
1295 return obj;
1296
1297 fail:
1298 drm_gem_object_put(obj);
1299 return ERR_PTR(ret);
1300 }
1301
msm_gem_import(struct drm_device * dev,struct dma_buf * dmabuf,struct sg_table * sgt)1302 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1303 struct dma_buf *dmabuf, struct sg_table *sgt)
1304 {
1305 struct msm_drm_private *priv = dev->dev_private;
1306 struct msm_gem_object *msm_obj;
1307 struct drm_gem_object *obj;
1308 uint32_t size;
1309 int ret, npages;
1310
1311 size = PAGE_ALIGN(dmabuf->size);
1312
1313 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1314 if (ret)
1315 return ERR_PTR(ret);
1316
1317 drm_gem_private_object_init(dev, obj, size);
1318
1319 npages = size / PAGE_SIZE;
1320
1321 msm_obj = to_msm_bo(obj);
1322 msm_gem_lock(obj);
1323 msm_obj->sgt = sgt;
1324 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1325 if (!msm_obj->pages) {
1326 msm_gem_unlock(obj);
1327 ret = -ENOMEM;
1328 goto fail;
1329 }
1330
1331 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1332 if (ret) {
1333 msm_gem_unlock(obj);
1334 goto fail;
1335 }
1336
1337 msm_gem_unlock(obj);
1338
1339 drm_gem_lru_move_tail(&priv->lru.pinned, obj);
1340
1341 mutex_lock(&priv->obj_lock);
1342 list_add_tail(&msm_obj->node, &priv->objects);
1343 mutex_unlock(&priv->obj_lock);
1344
1345 ret = drm_gem_create_mmap_offset(obj);
1346 if (ret)
1347 goto fail;
1348
1349 return obj;
1350
1351 fail:
1352 drm_gem_object_put(obj);
1353 return ERR_PTR(ret);
1354 }
1355
msm_gem_kernel_new(struct drm_device * dev,uint32_t size,uint32_t flags,struct drm_gpuvm * vm,struct drm_gem_object ** bo,uint64_t * iova)1356 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, uint32_t flags,
1357 struct drm_gpuvm *vm, struct drm_gem_object **bo,
1358 uint64_t *iova)
1359 {
1360 void *vaddr;
1361 struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1362 int ret;
1363
1364 if (IS_ERR(obj))
1365 return ERR_CAST(obj);
1366
1367 if (iova) {
1368 ret = msm_gem_get_and_pin_iova(obj, vm, iova);
1369 if (ret)
1370 goto err;
1371 }
1372
1373 vaddr = msm_gem_get_vaddr(obj);
1374 if (IS_ERR(vaddr)) {
1375 msm_gem_unpin_iova(obj, vm);
1376 ret = PTR_ERR(vaddr);
1377 goto err;
1378 }
1379
1380 if (bo)
1381 *bo = obj;
1382
1383 return vaddr;
1384 err:
1385 drm_gem_object_put(obj);
1386
1387 return ERR_PTR(ret);
1388
1389 }
1390
msm_gem_kernel_put(struct drm_gem_object * bo,struct drm_gpuvm * vm)1391 void msm_gem_kernel_put(struct drm_gem_object *bo, struct drm_gpuvm *vm)
1392 {
1393 if (IS_ERR_OR_NULL(bo))
1394 return;
1395
1396 msm_gem_put_vaddr(bo);
1397 msm_gem_unpin_iova(bo, vm);
1398 drm_gem_object_put(bo);
1399 }
1400
msm_gem_object_set_name(struct drm_gem_object * bo,const char * fmt,...)1401 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1402 {
1403 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1404 va_list ap;
1405
1406 if (!fmt)
1407 return;
1408
1409 va_start(ap, fmt);
1410 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1411 va_end(ap);
1412 }
1413