1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2016 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7 #include "drm/drm_file.h"
8 #include "drm/msm_drm.h"
9 #include "linux/file.h"
10 #include "linux/sync_file.h"
11
12 #include "msm_drv.h"
13 #include "msm_gem.h"
14 #include "msm_gpu.h"
15 #include "msm_mmu.h"
16 #include "msm_syncobj.h"
17
18 #define vm_dbg(fmt, ...) pr_debug("%s:%d: "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
19
20 static uint vm_log_shift = 0;
21 MODULE_PARM_DESC(vm_log_shift, "Length of VM op log");
22 module_param_named(vm_log_shift, vm_log_shift, uint, 0600);
23
24 /**
25 * struct msm_vm_map_op - create new pgtable mapping
26 */
27 struct msm_vm_map_op {
28 /** @iova: start address for mapping */
29 uint64_t iova;
30 /** @range: size of the region to map */
31 uint64_t range;
32 /** @offset: offset into @sgt to map */
33 uint64_t offset;
34 /** @sgt: pages to map, or NULL for a PRR mapping */
35 struct sg_table *sgt;
36 /** @prot: the mapping protection flags */
37 int prot;
38
39 /**
40 * @queue_id: The id of the submitqueue the operation is performed
41 * on, or zero for (in particular) UNMAP ops triggered outside of
42 * a submitqueue (ie. process cleanup)
43 */
44 int queue_id;
45 };
46
47 /**
48 * struct msm_vm_unmap_op - unmap a range of pages from pgtable
49 */
50 struct msm_vm_unmap_op {
51 /** @iova: start address for unmap */
52 uint64_t iova;
53 /** @range: size of region to unmap */
54 uint64_t range;
55
56 /** @reason: The reason for the unmap */
57 const char *reason;
58
59 /**
60 * @queue_id: The id of the submitqueue the operation is performed
61 * on, or zero for (in particular) UNMAP ops triggered outside of
62 * a submitqueue (ie. process cleanup)
63 */
64 int queue_id;
65 };
66
67 /**
68 * struct msm_vm_op - A MAP or UNMAP operation
69 */
70 struct msm_vm_op {
71 /** @op: The operation type */
72 enum {
73 MSM_VM_OP_MAP = 1,
74 MSM_VM_OP_UNMAP,
75 } op;
76 union {
77 /** @map: Parameters used if op == MSM_VMA_OP_MAP */
78 struct msm_vm_map_op map;
79 /** @unmap: Parameters used if op == MSM_VMA_OP_UNMAP */
80 struct msm_vm_unmap_op unmap;
81 };
82 /** @node: list head in msm_vm_bind_job::vm_ops */
83 struct list_head node;
84
85 /**
86 * @obj: backing object for pages to be mapped/unmapped
87 *
88 * Async unmap ops, in particular, must hold a reference to the
89 * original GEM object backing the mapping that will be unmapped.
90 * But the same can be required in the map path, for example if
91 * there is not a corresponding unmap op, such as process exit.
92 *
93 * This ensures that the pages backing the mapping are not freed
94 * before the mapping is torn down.
95 */
96 struct drm_gem_object *obj;
97 };
98
99 /**
100 * struct msm_vm_bind_job - Tracking for a VM_BIND ioctl
101 *
102 * A table of userspace requested VM updates (MSM_VM_BIND_OP_UNMAP/MAP/MAP_NULL)
103 * gets applied to the vm, generating a list of VM ops (MSM_VM_OP_MAP/UNMAP)
104 * which are applied to the pgtables asynchronously. For example a userspace
105 * requested MSM_VM_BIND_OP_MAP could end up generating both an MSM_VM_OP_UNMAP
106 * to unmap an existing mapping, and a MSM_VM_OP_MAP to apply the new mapping.
107 */
108 struct msm_vm_bind_job {
109 /** @base: base class for drm_sched jobs */
110 struct drm_sched_job base;
111 /** @vm: The VM being operated on */
112 struct drm_gpuvm *vm;
113 /** @fence: The fence that is signaled when job completes */
114 struct dma_fence *fence;
115 /** @queue: The queue that the job runs on */
116 struct msm_gpu_submitqueue *queue;
117 /** @prealloc: Tracking for pre-allocated MMU pgtable pages */
118 struct msm_mmu_prealloc prealloc;
119 /** @vm_ops: a list of struct msm_vm_op */
120 struct list_head vm_ops;
121 /** @bos_pinned: are the GEM objects being bound pinned? */
122 bool bos_pinned;
123 /** @nr_ops: the number of userspace requested ops */
124 unsigned int nr_ops;
125 /**
126 * @ops: the userspace requested ops
127 *
128 * The userspace requested ops are copied/parsed and validated
129 * before we start applying the updates to try to do as much up-
130 * front error checking as possible, to avoid the VM being in an
131 * undefined state due to partially executed VM_BIND.
132 *
133 * This table also serves to hold a reference to the backing GEM
134 * objects.
135 */
136 struct msm_vm_bind_op {
137 uint32_t op;
138 uint32_t flags;
139 union {
140 struct drm_gem_object *obj;
141 uint32_t handle;
142 };
143 uint64_t obj_offset;
144 uint64_t iova;
145 uint64_t range;
146 } ops[];
147 };
148
149 #define job_foreach_bo(obj, _job) \
150 for (unsigned i = 0; i < (_job)->nr_ops; i++) \
151 if ((obj = (_job)->ops[i].obj))
152
to_msm_vm_bind_job(struct drm_sched_job * job)153 static inline struct msm_vm_bind_job *to_msm_vm_bind_job(struct drm_sched_job *job)
154 {
155 return container_of(job, struct msm_vm_bind_job, base);
156 }
157
158 static void
msm_gem_vm_free(struct drm_gpuvm * gpuvm)159 msm_gem_vm_free(struct drm_gpuvm *gpuvm)
160 {
161 struct msm_gem_vm *vm = container_of(gpuvm, struct msm_gem_vm, base);
162
163 drm_mm_takedown(&vm->mm);
164 if (vm->mmu)
165 vm->mmu->funcs->destroy(vm->mmu);
166 dma_fence_put(vm->last_fence);
167 put_pid(vm->pid);
168 kfree(vm->log);
169 kfree(vm);
170 }
171
172 /**
173 * msm_gem_vm_unusable() - Mark a VM as unusable
174 * @gpuvm: the VM to mark unusable
175 */
176 void
msm_gem_vm_unusable(struct drm_gpuvm * gpuvm)177 msm_gem_vm_unusable(struct drm_gpuvm *gpuvm)
178 {
179 struct msm_gem_vm *vm = to_msm_vm(gpuvm);
180 uint32_t vm_log_len = (1 << vm->log_shift);
181 uint32_t vm_log_mask = vm_log_len - 1;
182 uint32_t nr_vm_logs;
183 int first;
184
185 vm->unusable = true;
186
187 /* Bail if no log, or empty log: */
188 if (!vm->log || !vm->log[0].op)
189 return;
190
191 mutex_lock(&vm->mmu_lock);
192
193 /*
194 * log_idx is the next entry to overwrite, meaning it is the oldest, or
195 * first, entry (other than the special case handled below where the
196 * log hasn't wrapped around yet)
197 */
198 first = vm->log_idx;
199
200 if (!vm->log[first].op) {
201 /*
202 * If the next log entry has not been written yet, then only
203 * entries 0 to idx-1 are valid (ie. we haven't wrapped around
204 * yet)
205 */
206 nr_vm_logs = MAX(0, first - 1);
207 first = 0;
208 } else {
209 nr_vm_logs = vm_log_len;
210 }
211
212 pr_err("vm-log:\n");
213 for (int i = 0; i < nr_vm_logs; i++) {
214 int idx = (i + first) & vm_log_mask;
215 struct msm_gem_vm_log_entry *e = &vm->log[idx];
216 pr_err(" - %s:%d: 0x%016llx-0x%016llx\n",
217 e->op, e->queue_id, e->iova,
218 e->iova + e->range);
219 }
220
221 mutex_unlock(&vm->mmu_lock);
222 }
223
224 static void
vm_log(struct msm_gem_vm * vm,const char * op,uint64_t iova,uint64_t range,int queue_id)225 vm_log(struct msm_gem_vm *vm, const char *op, uint64_t iova, uint64_t range, int queue_id)
226 {
227 int idx;
228
229 if (!vm->managed)
230 lockdep_assert_held(&vm->mmu_lock);
231
232 vm_dbg("%s:%p:%d: %016llx %016llx", op, vm, queue_id, iova, iova + range);
233
234 if (!vm->log)
235 return;
236
237 idx = vm->log_idx;
238 vm->log[idx].op = op;
239 vm->log[idx].iova = iova;
240 vm->log[idx].range = range;
241 vm->log[idx].queue_id = queue_id;
242 vm->log_idx = (vm->log_idx + 1) & ((1 << vm->log_shift) - 1);
243 }
244
245 static void
vm_unmap_op(struct msm_gem_vm * vm,const struct msm_vm_unmap_op * op)246 vm_unmap_op(struct msm_gem_vm *vm, const struct msm_vm_unmap_op *op)
247 {
248 const char *reason = op->reason;
249
250 if (!reason)
251 reason = "unmap";
252
253 vm_log(vm, reason, op->iova, op->range, op->queue_id);
254
255 vm->mmu->funcs->unmap(vm->mmu, op->iova, op->range);
256 }
257
258 static int
vm_map_op(struct msm_gem_vm * vm,const struct msm_vm_map_op * op)259 vm_map_op(struct msm_gem_vm *vm, const struct msm_vm_map_op *op)
260 {
261 vm_log(vm, "map", op->iova, op->range, op->queue_id);
262
263 return vm->mmu->funcs->map(vm->mmu, op->iova, op->sgt, op->offset,
264 op->range, op->prot);
265 }
266
267 /* Actually unmap memory for the vma */
msm_gem_vma_unmap(struct drm_gpuva * vma,const char * reason)268 void msm_gem_vma_unmap(struct drm_gpuva *vma, const char *reason)
269 {
270 struct msm_gem_vm *vm = to_msm_vm(vma->vm);
271 struct msm_gem_vma *msm_vma = to_msm_vma(vma);
272
273 /* Don't do anything if the memory isn't mapped */
274 if (!msm_vma->mapped)
275 return;
276
277 /*
278 * The mmu_lock is only needed when preallocation is used. But
279 * in that case we don't need to worry about recursion into
280 * shrinker
281 */
282 if (!vm->managed)
283 mutex_lock(&vm->mmu_lock);
284
285 vm_unmap_op(vm, &(struct msm_vm_unmap_op){
286 .iova = vma->va.addr,
287 .range = vma->va.range,
288 .reason = reason,
289 });
290
291 if (!vm->managed)
292 mutex_unlock(&vm->mmu_lock);
293
294 msm_vma->mapped = false;
295 }
296
297 /* Map and pin vma: */
298 int
msm_gem_vma_map(struct drm_gpuva * vma,int prot,struct sg_table * sgt)299 msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
300 {
301 struct msm_gem_vm *vm = to_msm_vm(vma->vm);
302 struct msm_gem_vma *msm_vma = to_msm_vma(vma);
303 int ret;
304
305 if (GEM_WARN_ON(!vma->va.addr))
306 return -EINVAL;
307
308 if (msm_vma->mapped)
309 return 0;
310
311 msm_vma->mapped = true;
312
313 /*
314 * The mmu_lock is only needed when preallocation is used. But
315 * in that case we don't need to worry about recursion into
316 * shrinker
317 */
318 if (!vm->managed)
319 mutex_lock(&vm->mmu_lock);
320
321 /*
322 * NOTE: if not using pgtable preallocation, we cannot hold
323 * a lock across map/unmap which is also used in the job_run()
324 * path, as this can cause deadlock in job_run() vs shrinker/
325 * reclaim.
326 */
327 ret = vm_map_op(vm, &(struct msm_vm_map_op){
328 .iova = vma->va.addr,
329 .range = vma->va.range,
330 .offset = vma->gem.offset,
331 .sgt = sgt,
332 .prot = prot,
333 });
334
335 if (!vm->managed)
336 mutex_unlock(&vm->mmu_lock);
337
338 if (ret)
339 msm_vma->mapped = false;
340
341 return ret;
342 }
343
344 /* Close an iova. Warn if it is still in use */
msm_gem_vma_close(struct drm_gpuva * vma)345 void msm_gem_vma_close(struct drm_gpuva *vma)
346 {
347 struct msm_gem_vm *vm = to_msm_vm(vma->vm);
348 struct msm_gem_vma *msm_vma = to_msm_vma(vma);
349
350 GEM_WARN_ON(msm_vma->mapped);
351
352 drm_gpuvm_resv_assert_held(&vm->base);
353
354 if (vma->gem.obj)
355 msm_gem_assert_locked(vma->gem.obj);
356
357 if (vma->va.addr && vm->managed)
358 drm_mm_remove_node(&msm_vma->node);
359
360 drm_gpuva_remove(vma);
361 drm_gpuva_unlink(vma);
362
363 kfree(vma);
364 }
365
366 /* Create a new vma and allocate an iova for it */
367 struct drm_gpuva *
msm_gem_vma_new(struct drm_gpuvm * gpuvm,struct drm_gem_object * obj,u64 offset,u64 range_start,u64 range_end)368 msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
369 u64 offset, u64 range_start, u64 range_end)
370 {
371 struct msm_gem_vm *vm = to_msm_vm(gpuvm);
372 struct drm_gpuvm_bo *vm_bo;
373 struct msm_gem_vma *vma;
374 int ret;
375
376 drm_gpuvm_resv_assert_held(&vm->base);
377
378 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
379 if (!vma)
380 return ERR_PTR(-ENOMEM);
381
382 if (vm->managed) {
383 BUG_ON(offset != 0);
384 BUG_ON(!obj); /* NULL mappings not valid for kernel managed VM */
385 ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node,
386 obj->size, PAGE_SIZE, 0,
387 range_start, range_end, 0);
388
389 if (ret)
390 goto err_free_vma;
391
392 range_start = vma->node.start;
393 range_end = range_start + obj->size;
394 }
395
396 if (obj)
397 GEM_WARN_ON((range_end - range_start) > obj->size);
398
399 struct drm_gpuva_op_map op_map = {
400 .va.addr = range_start,
401 .va.range = range_end - range_start,
402 .gem.obj = obj,
403 .gem.offset = offset,
404 };
405
406 drm_gpuva_init_from_op(&vma->base, &op_map);
407 vma->mapped = false;
408
409 ret = drm_gpuva_insert(&vm->base, &vma->base);
410 if (ret)
411 goto err_free_range;
412
413 if (!obj)
414 return &vma->base;
415
416 vm_bo = drm_gpuvm_bo_obtain(&vm->base, obj);
417 if (IS_ERR(vm_bo)) {
418 ret = PTR_ERR(vm_bo);
419 goto err_va_remove;
420 }
421
422 drm_gpuvm_bo_extobj_add(vm_bo);
423 drm_gpuva_link(&vma->base, vm_bo);
424 GEM_WARN_ON(drm_gpuvm_bo_put(vm_bo));
425
426 return &vma->base;
427
428 err_va_remove:
429 drm_gpuva_remove(&vma->base);
430 err_free_range:
431 if (vm->managed)
432 drm_mm_remove_node(&vma->node);
433 err_free_vma:
434 kfree(vma);
435 return ERR_PTR(ret);
436 }
437
438 static int
msm_gem_vm_bo_validate(struct drm_gpuvm_bo * vm_bo,struct drm_exec * exec)439 msm_gem_vm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
440 {
441 struct drm_gem_object *obj = vm_bo->obj;
442 struct drm_gpuva *vma;
443 int ret;
444
445 vm_dbg("validate: %p", obj);
446
447 msm_gem_assert_locked(obj);
448
449 drm_gpuvm_bo_for_each_va (vma, vm_bo) {
450 ret = msm_gem_pin_vma_locked(obj, vma);
451 if (ret)
452 return ret;
453 }
454
455 return 0;
456 }
457
458 struct op_arg {
459 unsigned flags;
460 struct msm_vm_bind_job *job;
461 const struct msm_vm_bind_op *op;
462 bool kept;
463 };
464
465 static int
vm_op_enqueue(struct op_arg * arg,struct msm_vm_op _op)466 vm_op_enqueue(struct op_arg *arg, struct msm_vm_op _op)
467 {
468 struct msm_vm_op *op = kmalloc(sizeof(*op), GFP_KERNEL);
469 if (!op)
470 return -ENOMEM;
471
472 *op = _op;
473 list_add_tail(&op->node, &arg->job->vm_ops);
474
475 if (op->obj)
476 drm_gem_object_get(op->obj);
477
478 return 0;
479 }
480
481 static struct drm_gpuva *
vma_from_op(struct op_arg * arg,struct drm_gpuva_op_map * op)482 vma_from_op(struct op_arg *arg, struct drm_gpuva_op_map *op)
483 {
484 return msm_gem_vma_new(arg->job->vm, op->gem.obj, op->gem.offset,
485 op->va.addr, op->va.addr + op->va.range);
486 }
487
488 static int
msm_gem_vm_sm_step_map(struct drm_gpuva_op * op,void * _arg)489 msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *_arg)
490 {
491 struct op_arg *arg = _arg;
492 struct msm_vm_bind_job *job = arg->job;
493 struct drm_gem_object *obj = op->map.gem.obj;
494 struct drm_gpuva *vma;
495 struct sg_table *sgt;
496 unsigned prot;
497 int ret;
498
499 if (arg->kept)
500 return 0;
501
502 vma = vma_from_op(arg, &op->map);
503 if (WARN_ON(IS_ERR(vma)))
504 return PTR_ERR(vma);
505
506 vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj,
507 vma->va.addr, vma->va.range);
508
509 if (obj) {
510 sgt = to_msm_bo(obj)->sgt;
511 prot = msm_gem_prot(obj);
512 } else {
513 sgt = NULL;
514 prot = IOMMU_READ | IOMMU_WRITE;
515 }
516
517 ret = vm_op_enqueue(arg, (struct msm_vm_op){
518 .op = MSM_VM_OP_MAP,
519 .map = {
520 .sgt = sgt,
521 .iova = vma->va.addr,
522 .range = vma->va.range,
523 .offset = vma->gem.offset,
524 .prot = prot,
525 .queue_id = job->queue->id,
526 },
527 .obj = vma->gem.obj,
528 });
529
530 if (ret)
531 return ret;
532
533 vma->flags = ((struct op_arg *)arg)->flags;
534 to_msm_vma(vma)->mapped = true;
535
536 return 0;
537 }
538
539 static int
msm_gem_vm_sm_step_remap(struct drm_gpuva_op * op,void * arg)540 msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg)
541 {
542 struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job;
543 struct drm_gpuvm *vm = job->vm;
544 struct drm_gpuva *orig_vma = op->remap.unmap->va;
545 struct drm_gpuva *prev_vma = NULL, *next_vma = NULL;
546 struct drm_gpuvm_bo *vm_bo = orig_vma->vm_bo;
547 bool mapped = to_msm_vma(orig_vma)->mapped;
548 unsigned flags;
549 int ret;
550
551 vm_dbg("orig_vma: %p:%p:%p: %016llx %016llx", vm, orig_vma,
552 orig_vma->gem.obj, orig_vma->va.addr, orig_vma->va.range);
553
554 if (mapped) {
555 uint64_t unmap_start, unmap_range;
556
557 drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range);
558
559 ret = vm_op_enqueue(arg, (struct msm_vm_op){
560 .op = MSM_VM_OP_UNMAP,
561 .unmap = {
562 .iova = unmap_start,
563 .range = unmap_range,
564 .queue_id = job->queue->id,
565 },
566 .obj = orig_vma->gem.obj,
567 });
568
569 if (ret)
570 return ret;
571
572 /*
573 * Part of this GEM obj is still mapped, but we're going to kill the
574 * existing VMA and replace it with one or two new ones (ie. two if
575 * the unmapped range is in the middle of the existing (unmap) VMA).
576 * So just set the state to unmapped:
577 */
578 to_msm_vma(orig_vma)->mapped = false;
579 }
580
581 /*
582 * Hold a ref to the vm_bo between the msm_gem_vma_close() and the
583 * creation of the new prev/next vma's, in case the vm_bo is tracked
584 * in the VM's evict list:
585 */
586 if (vm_bo)
587 drm_gpuvm_bo_get(vm_bo);
588
589 /*
590 * The prev_vma and/or next_vma are replacing the unmapped vma, and
591 * therefore should preserve it's flags:
592 */
593 flags = orig_vma->flags;
594
595 msm_gem_vma_close(orig_vma);
596
597 if (op->remap.prev) {
598 prev_vma = vma_from_op(arg, op->remap.prev);
599 if (WARN_ON(IS_ERR(prev_vma)))
600 return PTR_ERR(prev_vma);
601
602 vm_dbg("prev_vma: %p:%p: %016llx %016llx", vm, prev_vma, prev_vma->va.addr, prev_vma->va.range);
603 to_msm_vma(prev_vma)->mapped = mapped;
604 prev_vma->flags = flags;
605 }
606
607 if (op->remap.next) {
608 next_vma = vma_from_op(arg, op->remap.next);
609 if (WARN_ON(IS_ERR(next_vma)))
610 return PTR_ERR(next_vma);
611
612 vm_dbg("next_vma: %p:%p: %016llx %016llx", vm, next_vma, next_vma->va.addr, next_vma->va.range);
613 to_msm_vma(next_vma)->mapped = mapped;
614 next_vma->flags = flags;
615 }
616
617 if (!mapped)
618 drm_gpuvm_bo_evict(vm_bo, true);
619
620 /* Drop the previous ref: */
621 drm_gpuvm_bo_put(vm_bo);
622
623 return 0;
624 }
625
626 static int
msm_gem_vm_sm_step_unmap(struct drm_gpuva_op * op,void * _arg)627 msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *_arg)
628 {
629 struct op_arg *arg = _arg;
630 struct msm_vm_bind_job *job = arg->job;
631 struct drm_gpuva *vma = op->unmap.va;
632 struct msm_gem_vma *msm_vma = to_msm_vma(vma);
633 int ret;
634
635 vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj,
636 vma->va.addr, vma->va.range);
637
638 /*
639 * Detect in-place remap. Turnip does this to change the vma flags,
640 * in particular MSM_VMA_DUMP. In this case we want to avoid actually
641 * touching the page tables, as that would require synchronization
642 * against SUBMIT jobs running on the GPU.
643 */
644 if (op->unmap.keep &&
645 (arg->op->op == MSM_VM_BIND_OP_MAP) &&
646 (vma->gem.obj == arg->op->obj) &&
647 (vma->gem.offset == arg->op->obj_offset) &&
648 (vma->va.addr == arg->op->iova) &&
649 (vma->va.range == arg->op->range)) {
650 /* We are only expecting a single in-place unmap+map cb pair: */
651 WARN_ON(arg->kept);
652
653 /* Leave the existing VMA in place, but signal that to the map cb: */
654 arg->kept = true;
655
656 /* Only flags are changing, so update that in-place: */
657 unsigned orig_flags = vma->flags & (DRM_GPUVA_USERBITS - 1);
658 vma->flags = orig_flags | arg->flags;
659
660 return 0;
661 }
662
663 if (!msm_vma->mapped)
664 goto out_close;
665
666 ret = vm_op_enqueue(arg, (struct msm_vm_op){
667 .op = MSM_VM_OP_UNMAP,
668 .unmap = {
669 .iova = vma->va.addr,
670 .range = vma->va.range,
671 .queue_id = job->queue->id,
672 },
673 .obj = vma->gem.obj,
674 });
675
676 if (ret)
677 return ret;
678
679 msm_vma->mapped = false;
680
681 out_close:
682 msm_gem_vma_close(vma);
683
684 return 0;
685 }
686
687 static const struct drm_gpuvm_ops msm_gpuvm_ops = {
688 .vm_free = msm_gem_vm_free,
689 .vm_bo_validate = msm_gem_vm_bo_validate,
690 .sm_step_map = msm_gem_vm_sm_step_map,
691 .sm_step_remap = msm_gem_vm_sm_step_remap,
692 .sm_step_unmap = msm_gem_vm_sm_step_unmap,
693 };
694
695 static struct dma_fence *
msm_vma_job_run(struct drm_sched_job * _job)696 msm_vma_job_run(struct drm_sched_job *_job)
697 {
698 struct msm_vm_bind_job *job = to_msm_vm_bind_job(_job);
699 struct msm_gem_vm *vm = to_msm_vm(job->vm);
700 struct drm_gem_object *obj;
701 int ret = vm->unusable ? -EINVAL : 0;
702
703 vm_dbg("");
704
705 mutex_lock(&vm->mmu_lock);
706 vm->mmu->prealloc = &job->prealloc;
707
708 while (!list_empty(&job->vm_ops)) {
709 struct msm_vm_op *op =
710 list_first_entry(&job->vm_ops, struct msm_vm_op, node);
711
712 switch (op->op) {
713 case MSM_VM_OP_MAP:
714 /*
715 * On error, stop trying to map new things.. but we
716 * still want to process the unmaps (or in particular,
717 * the drm_gem_object_put()s)
718 */
719 if (!ret)
720 ret = vm_map_op(vm, &op->map);
721 break;
722 case MSM_VM_OP_UNMAP:
723 vm_unmap_op(vm, &op->unmap);
724 break;
725 }
726 drm_gem_object_put(op->obj);
727 list_del(&op->node);
728 kfree(op);
729 }
730
731 vm->mmu->prealloc = NULL;
732 mutex_unlock(&vm->mmu_lock);
733
734 /*
735 * We failed to perform at least _some_ of the pgtable updates, so
736 * now the VM is in an undefined state. Game over!
737 */
738 if (ret)
739 msm_gem_vm_unusable(job->vm);
740
741 job_foreach_bo (obj, job) {
742 msm_gem_lock(obj);
743 msm_gem_unpin_locked(obj);
744 msm_gem_unlock(obj);
745 }
746
747 /* VM_BIND ops are synchronous, so no fence to wait on: */
748 return NULL;
749 }
750
751 static void
msm_vma_job_free(struct drm_sched_job * _job)752 msm_vma_job_free(struct drm_sched_job *_job)
753 {
754 struct msm_vm_bind_job *job = to_msm_vm_bind_job(_job);
755 struct msm_gem_vm *vm = to_msm_vm(job->vm);
756 struct drm_gem_object *obj;
757
758 vm->mmu->funcs->prealloc_cleanup(vm->mmu, &job->prealloc);
759
760 atomic_sub(job->prealloc.count, &vm->prealloc_throttle.in_flight);
761
762 drm_sched_job_cleanup(_job);
763
764 job_foreach_bo (obj, job)
765 drm_gem_object_put(obj);
766
767 msm_submitqueue_put(job->queue);
768 dma_fence_put(job->fence);
769
770 /* In error paths, we could have unexecuted ops: */
771 while (!list_empty(&job->vm_ops)) {
772 struct msm_vm_op *op =
773 list_first_entry(&job->vm_ops, struct msm_vm_op, node);
774 list_del(&op->node);
775 kfree(op);
776 }
777
778 wake_up(&vm->prealloc_throttle.wait);
779
780 kfree(job);
781 }
782
783 static const struct drm_sched_backend_ops msm_vm_bind_ops = {
784 .run_job = msm_vma_job_run,
785 .free_job = msm_vma_job_free
786 };
787
788 /**
789 * msm_gem_vm_create() - Create and initialize a &msm_gem_vm
790 * @drm: the drm device
791 * @mmu: the backing MMU objects handling mapping/unmapping
792 * @name: the name of the VM
793 * @va_start: the start offset of the VA space
794 * @va_size: the size of the VA space
795 * @managed: is it a kernel managed VM?
796 *
797 * In a kernel managed VM, the kernel handles address allocation, and only
798 * synchronous operations are supported. In a user managed VM, userspace
799 * handles virtual address allocation, and both async and sync operations
800 * are supported.
801 *
802 * Returns: pointer to the created &struct drm_gpuvm on success
803 * or an ERR_PTR(-errno) on failure.
804 */
805 struct drm_gpuvm *
msm_gem_vm_create(struct drm_device * drm,struct msm_mmu * mmu,const char * name,u64 va_start,u64 va_size,bool managed)806 msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
807 u64 va_start, u64 va_size, bool managed)
808 {
809 /*
810 * We mostly want to use DRM_GPUVM_RESV_PROTECTED, except that
811 * makes drm_gpuvm_bo_evict() a no-op for extobjs (ie. we loose
812 * tracking that an extobj is evicted) :facepalm:
813 */
814 enum drm_gpuvm_flags flags = 0;
815 struct msm_gem_vm *vm;
816 struct drm_gem_object *dummy_gem;
817 int ret = 0;
818
819 if (IS_ERR(mmu))
820 return ERR_CAST(mmu);
821
822 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
823 if (!vm)
824 return ERR_PTR(-ENOMEM);
825
826 dummy_gem = drm_gpuvm_resv_object_alloc(drm);
827 if (!dummy_gem) {
828 ret = -ENOMEM;
829 goto err_free_vm;
830 }
831
832 if (!managed) {
833 struct drm_sched_init_args args = {
834 .ops = &msm_vm_bind_ops,
835 .num_rqs = 1,
836 .credit_limit = 1,
837 .timeout = MAX_SCHEDULE_TIMEOUT,
838 .name = "msm-vm-bind",
839 .dev = drm->dev,
840 };
841
842 ret = drm_sched_init(&vm->sched, &args);
843 if (ret)
844 goto err_free_dummy;
845
846 init_waitqueue_head(&vm->prealloc_throttle.wait);
847 }
848
849 drm_gpuvm_init(&vm->base, name, flags, drm, dummy_gem,
850 va_start, va_size, 0, 0, &msm_gpuvm_ops);
851 drm_gem_object_put(dummy_gem);
852
853 vm->mmu = mmu;
854 mutex_init(&vm->mmu_lock);
855 vm->managed = managed;
856
857 drm_mm_init(&vm->mm, va_start, va_size);
858
859 /*
860 * We don't really need vm log for kernel managed VMs, as the kernel
861 * is responsible for ensuring that GEM objs are mapped if they are
862 * used by a submit. Furthermore we piggyback on mmu_lock to serialize
863 * access to the log.
864 *
865 * Limit the max log_shift to 8 to prevent userspace from asking us
866 * for an unreasonable log size.
867 */
868 if (!managed)
869 vm->log_shift = MIN(vm_log_shift, 8);
870
871 if (vm->log_shift) {
872 vm->log = kmalloc_array(1 << vm->log_shift, sizeof(vm->log[0]),
873 GFP_KERNEL | __GFP_ZERO);
874 }
875
876 return &vm->base;
877
878 err_free_dummy:
879 drm_gem_object_put(dummy_gem);
880
881 err_free_vm:
882 kfree(vm);
883 return ERR_PTR(ret);
884 }
885
886 /**
887 * msm_gem_vm_close() - Close a VM
888 * @gpuvm: The VM to close
889 *
890 * Called when the drm device file is closed, to tear down VM related resources
891 * (which will drop refcounts to GEM objects that were still mapped into the
892 * VM at the time).
893 */
894 void
msm_gem_vm_close(struct drm_gpuvm * gpuvm)895 msm_gem_vm_close(struct drm_gpuvm *gpuvm)
896 {
897 struct msm_gem_vm *vm = to_msm_vm(gpuvm);
898 struct drm_gpuva *vma, *tmp;
899 struct drm_exec exec;
900
901 /*
902 * For kernel managed VMs, the VMAs are torn down when the handle is
903 * closed, so nothing more to do.
904 */
905 if (vm->managed)
906 return;
907
908 if (vm->last_fence)
909 dma_fence_wait(vm->last_fence, false);
910
911 /* Kill the scheduler now, so we aren't racing with it for cleanup: */
912 drm_sched_stop(&vm->sched, NULL);
913 drm_sched_fini(&vm->sched);
914
915 /* Tear down any remaining mappings: */
916 drm_exec_init(&exec, 0, 2);
917 drm_exec_until_all_locked (&exec) {
918 drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(gpuvm));
919 drm_exec_retry_on_contention(&exec);
920
921 drm_gpuvm_for_each_va_safe (vma, tmp, gpuvm) {
922 struct drm_gem_object *obj = vma->gem.obj;
923
924 /*
925 * MSM_BO_NO_SHARE objects share the same resv as the
926 * VM, in which case the obj is already locked:
927 */
928 if (obj && (obj->resv == drm_gpuvm_resv(gpuvm)))
929 obj = NULL;
930
931 if (obj) {
932 drm_exec_lock_obj(&exec, obj);
933 drm_exec_retry_on_contention(&exec);
934 }
935
936 msm_gem_vma_unmap(vma, "close");
937 msm_gem_vma_close(vma);
938
939 if (obj) {
940 drm_exec_unlock_obj(&exec, obj);
941 }
942 }
943 }
944 drm_exec_fini(&exec);
945 }
946
947
948 static struct msm_vm_bind_job *
vm_bind_job_create(struct drm_device * dev,struct drm_file * file,struct msm_gpu_submitqueue * queue,uint32_t nr_ops)949 vm_bind_job_create(struct drm_device *dev, struct drm_file *file,
950 struct msm_gpu_submitqueue *queue, uint32_t nr_ops)
951 {
952 struct msm_vm_bind_job *job;
953 uint64_t sz;
954 int ret;
955
956 sz = struct_size(job, ops, nr_ops);
957
958 if (sz > SIZE_MAX)
959 return ERR_PTR(-ENOMEM);
960
961 job = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
962 if (!job)
963 return ERR_PTR(-ENOMEM);
964
965 ret = drm_sched_job_init(&job->base, queue->entity, 1, queue,
966 file->client_id);
967 if (ret) {
968 kfree(job);
969 return ERR_PTR(ret);
970 }
971
972 job->vm = msm_context_vm(dev, queue->ctx);
973 job->queue = queue;
974 INIT_LIST_HEAD(&job->vm_ops);
975
976 return job;
977 }
978
invalid_alignment(uint64_t addr)979 static bool invalid_alignment(uint64_t addr)
980 {
981 /*
982 * Technically this is about GPU alignment, not CPU alignment. But
983 * I've not seen any qcom SoC where the SMMU does not support the
984 * CPU's smallest page size.
985 */
986 return !PAGE_ALIGNED(addr);
987 }
988
989 static int
lookup_op(struct msm_vm_bind_job * job,const struct drm_msm_vm_bind_op * op)990 lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)
991 {
992 struct drm_device *dev = job->vm->drm;
993 struct msm_drm_private *priv = dev->dev_private;
994 int i = job->nr_ops++;
995 int ret = 0;
996
997 job->ops[i].op = op->op;
998 job->ops[i].handle = op->handle;
999 job->ops[i].obj_offset = op->obj_offset;
1000 job->ops[i].iova = op->iova;
1001 job->ops[i].range = op->range;
1002 job->ops[i].flags = op->flags;
1003
1004 if (op->flags & ~MSM_VM_BIND_OP_FLAGS)
1005 ret = UERR(EINVAL, dev, "invalid flags: %x\n", op->flags);
1006
1007 if (invalid_alignment(op->iova))
1008 ret = UERR(EINVAL, dev, "invalid address: %016llx\n", op->iova);
1009
1010 if (invalid_alignment(op->obj_offset))
1011 ret = UERR(EINVAL, dev, "invalid bo_offset: %016llx\n", op->obj_offset);
1012
1013 if (invalid_alignment(op->range))
1014 ret = UERR(EINVAL, dev, "invalid range: %016llx\n", op->range);
1015
1016 if (!drm_gpuvm_range_valid(job->vm, op->iova, op->range))
1017 ret = UERR(EINVAL, dev, "invalid range: %016llx, %016llx\n", op->iova, op->range);
1018
1019 /*
1020 * MAP must specify a valid handle. But the handle MBZ for
1021 * UNMAP or MAP_NULL.
1022 */
1023 if (op->op == MSM_VM_BIND_OP_MAP) {
1024 if (!op->handle)
1025 ret = UERR(EINVAL, dev, "invalid handle\n");
1026 } else if (op->handle) {
1027 ret = UERR(EINVAL, dev, "handle must be zero\n");
1028 }
1029
1030 switch (op->op) {
1031 case MSM_VM_BIND_OP_MAP:
1032 case MSM_VM_BIND_OP_MAP_NULL:
1033 case MSM_VM_BIND_OP_UNMAP:
1034 break;
1035 default:
1036 ret = UERR(EINVAL, dev, "invalid op: %u\n", op->op);
1037 break;
1038 }
1039
1040 if ((op->op == MSM_VM_BIND_OP_MAP_NULL) &&
1041 !adreno_smmu_has_prr(priv->gpu)) {
1042 ret = UERR(EINVAL, dev, "PRR not supported\n");
1043 }
1044
1045 return ret;
1046 }
1047
1048 /*
1049 * ioctl parsing, parameter validation, and GEM handle lookup
1050 */
1051 static int
vm_bind_job_lookup_ops(struct msm_vm_bind_job * job,struct drm_msm_vm_bind * args,struct drm_file * file,int * nr_bos)1052 vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args,
1053 struct drm_file *file, int *nr_bos)
1054 {
1055 struct drm_device *dev = job->vm->drm;
1056 int ret = 0;
1057 int cnt = 0;
1058 int i = -1;
1059
1060 if (args->nr_ops == 1) {
1061 /* Single op case, the op is inlined: */
1062 ret = lookup_op(job, &args->op);
1063 } else {
1064 for (unsigned i = 0; i < args->nr_ops; i++) {
1065 struct drm_msm_vm_bind_op op;
1066 void __user *userptr =
1067 u64_to_user_ptr(args->ops + (i * sizeof(op)));
1068
1069 /* make sure we don't have garbage flags, in case we hit
1070 * error path before flags is initialized:
1071 */
1072 job->ops[i].flags = 0;
1073
1074 if (copy_from_user(&op, userptr, sizeof(op))) {
1075 ret = -EFAULT;
1076 break;
1077 }
1078
1079 ret = lookup_op(job, &op);
1080 if (ret)
1081 break;
1082 }
1083 }
1084
1085 if (ret) {
1086 job->nr_ops = 0;
1087 goto out;
1088 }
1089
1090 spin_lock(&file->table_lock);
1091
1092 for (i = 0; i < args->nr_ops; i++) {
1093 struct msm_vm_bind_op *op = &job->ops[i];
1094 struct drm_gem_object *obj;
1095
1096 if (!op->handle) {
1097 op->obj = NULL;
1098 continue;
1099 }
1100
1101 /*
1102 * normally use drm_gem_object_lookup(), but for bulk lookup
1103 * all under single table_lock just hit object_idr directly:
1104 */
1105 obj = idr_find(&file->object_idr, op->handle);
1106 if (!obj) {
1107 ret = UERR(EINVAL, dev, "invalid handle %u at index %u\n", op->handle, i);
1108 goto out_unlock;
1109 }
1110
1111 drm_gem_object_get(obj);
1112
1113 op->obj = obj;
1114 cnt++;
1115
1116 if ((op->range + op->obj_offset) > obj->size) {
1117 ret = UERR(EINVAL, dev, "invalid range: %016llx + %016llx > %016zx\n",
1118 op->range, op->obj_offset, obj->size);
1119 goto out_unlock;
1120 }
1121 }
1122
1123 *nr_bos = cnt;
1124
1125 out_unlock:
1126 spin_unlock(&file->table_lock);
1127
1128 if (ret) {
1129 for (; i >= 0; i--) {
1130 struct msm_vm_bind_op *op = &job->ops[i];
1131
1132 if (!op->obj)
1133 continue;
1134
1135 drm_gem_object_put(op->obj);
1136 op->obj = NULL;
1137 }
1138 }
1139 out:
1140 return ret;
1141 }
1142
1143 static void
prealloc_count(struct msm_vm_bind_job * job,struct msm_vm_bind_op * first,struct msm_vm_bind_op * last)1144 prealloc_count(struct msm_vm_bind_job *job,
1145 struct msm_vm_bind_op *first,
1146 struct msm_vm_bind_op *last)
1147 {
1148 struct msm_mmu *mmu = to_msm_vm(job->vm)->mmu;
1149
1150 if (!first)
1151 return;
1152
1153 uint64_t start_iova = first->iova;
1154 uint64_t end_iova = last->iova + last->range;
1155
1156 mmu->funcs->prealloc_count(mmu, &job->prealloc, start_iova, end_iova - start_iova);
1157 }
1158
1159 static bool
ops_are_same_pte(struct msm_vm_bind_op * first,struct msm_vm_bind_op * next)1160 ops_are_same_pte(struct msm_vm_bind_op *first, struct msm_vm_bind_op *next)
1161 {
1162 /*
1163 * Last level pte covers 2MB.. so we should merge two ops, from
1164 * the PoV of figuring out how much pgtable pages to pre-allocate
1165 * if they land in the same 2MB range:
1166 */
1167 uint64_t pte_mask = ~(SZ_2M - 1);
1168 return ((first->iova + first->range) & pte_mask) == (next->iova & pte_mask);
1169 }
1170
1171 /*
1172 * Determine the amount of memory to prealloc for pgtables. For sparse images,
1173 * in particular, userspace plays some tricks with the order of page mappings
1174 * to get the desired swizzle pattern, resulting in a large # of tiny MAP ops.
1175 * So detect when multiple MAP operations are physically contiguous, and count
1176 * them as a single mapping. Otherwise the prealloc_count() will not realize
1177 * they can share pagetable pages and vastly overcount.
1178 */
1179 static int
vm_bind_prealloc_count(struct msm_vm_bind_job * job)1180 vm_bind_prealloc_count(struct msm_vm_bind_job *job)
1181 {
1182 struct msm_vm_bind_op *first = NULL, *last = NULL;
1183 struct msm_gem_vm *vm = to_msm_vm(job->vm);
1184 int ret;
1185
1186 for (int i = 0; i < job->nr_ops; i++) {
1187 struct msm_vm_bind_op *op = &job->ops[i];
1188
1189 /* We only care about MAP/MAP_NULL: */
1190 if (op->op == MSM_VM_BIND_OP_UNMAP)
1191 continue;
1192
1193 /*
1194 * If op is contiguous with last in the current range, then
1195 * it becomes the new last in the range and we continue
1196 * looping:
1197 */
1198 if (last && ops_are_same_pte(last, op)) {
1199 last = op;
1200 continue;
1201 }
1202
1203 /*
1204 * If op is not contiguous with the current range, flush
1205 * the current range and start anew:
1206 */
1207 prealloc_count(job, first, last);
1208 first = last = op;
1209 }
1210
1211 /* Flush the remaining range: */
1212 prealloc_count(job, first, last);
1213
1214 /*
1215 * Now that we know the needed amount to pre-alloc, throttle on pending
1216 * VM_BIND jobs if we already have too much pre-alloc memory in flight
1217 */
1218 ret = wait_event_interruptible(
1219 vm->prealloc_throttle.wait,
1220 atomic_read(&vm->prealloc_throttle.in_flight) <= 1024);
1221 if (ret)
1222 return ret;
1223
1224 atomic_add(job->prealloc.count, &vm->prealloc_throttle.in_flight);
1225
1226 return 0;
1227 }
1228
1229 /*
1230 * Lock VM and GEM objects
1231 */
1232 static int
vm_bind_job_lock_objects(struct msm_vm_bind_job * job,struct drm_exec * exec)1233 vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec)
1234 {
1235 int ret;
1236
1237 /* Lock VM and objects: */
1238 drm_exec_until_all_locked (exec) {
1239 ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(job->vm));
1240 drm_exec_retry_on_contention(exec);
1241 if (ret)
1242 return ret;
1243
1244 for (unsigned i = 0; i < job->nr_ops; i++) {
1245 const struct msm_vm_bind_op *op = &job->ops[i];
1246
1247 switch (op->op) {
1248 case MSM_VM_BIND_OP_UNMAP:
1249 ret = drm_gpuvm_sm_unmap_exec_lock(job->vm, exec,
1250 op->iova,
1251 op->obj_offset);
1252 break;
1253 case MSM_VM_BIND_OP_MAP:
1254 case MSM_VM_BIND_OP_MAP_NULL: {
1255 struct drm_gpuvm_map_req map_req = {
1256 .map.va.addr = op->iova,
1257 .map.va.range = op->range,
1258 .map.gem.obj = op->obj,
1259 .map.gem.offset = op->obj_offset,
1260 };
1261
1262 ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, &map_req);
1263 break;
1264 }
1265 default:
1266 /*
1267 * lookup_op() should have already thrown an error for
1268 * invalid ops
1269 */
1270 WARN_ON("unreachable");
1271 }
1272
1273 drm_exec_retry_on_contention(exec);
1274 if (ret)
1275 return ret;
1276 }
1277 }
1278
1279 return 0;
1280 }
1281
1282 /*
1283 * Pin GEM objects, ensuring that we have backing pages. Pinning will move
1284 * the object to the pinned LRU so that the shrinker knows to first consider
1285 * other objects for evicting.
1286 */
1287 static int
vm_bind_job_pin_objects(struct msm_vm_bind_job * job)1288 vm_bind_job_pin_objects(struct msm_vm_bind_job *job)
1289 {
1290 struct drm_gem_object *obj;
1291
1292 /*
1293 * First loop, before holding the LRU lock, avoids holding the
1294 * LRU lock while calling msm_gem_pin_vma_locked (which could
1295 * trigger get_pages())
1296 */
1297 job_foreach_bo (obj, job) {
1298 struct page **pages;
1299
1300 pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
1301 if (IS_ERR(pages))
1302 return PTR_ERR(pages);
1303 }
1304
1305 struct msm_drm_private *priv = job->vm->drm->dev_private;
1306
1307 /*
1308 * A second loop while holding the LRU lock (a) avoids acquiring/dropping
1309 * the LRU lock for each individual bo, while (b) avoiding holding the
1310 * LRU lock while calling msm_gem_pin_vma_locked() (which could trigger
1311 * get_pages() which could trigger reclaim.. and if we held the LRU lock
1312 * could trigger deadlock with the shrinker).
1313 */
1314 mutex_lock(&priv->lru.lock);
1315 job_foreach_bo (obj, job)
1316 msm_gem_pin_obj_locked(obj);
1317 mutex_unlock(&priv->lru.lock);
1318
1319 job->bos_pinned = true;
1320
1321 return 0;
1322 }
1323
1324 /*
1325 * Unpin GEM objects. Normally this is done after the bind job is run.
1326 */
1327 static void
vm_bind_job_unpin_objects(struct msm_vm_bind_job * job)1328 vm_bind_job_unpin_objects(struct msm_vm_bind_job *job)
1329 {
1330 struct drm_gem_object *obj;
1331
1332 if (!job->bos_pinned)
1333 return;
1334
1335 job_foreach_bo (obj, job)
1336 msm_gem_unpin_locked(obj);
1337
1338 job->bos_pinned = false;
1339 }
1340
1341 /*
1342 * Pre-allocate pgtable memory, and translate the VM bind requests into a
1343 * sequence of pgtable updates to be applied asynchronously.
1344 */
1345 static int
vm_bind_job_prepare(struct msm_vm_bind_job * job)1346 vm_bind_job_prepare(struct msm_vm_bind_job *job)
1347 {
1348 struct msm_gem_vm *vm = to_msm_vm(job->vm);
1349 struct msm_mmu *mmu = vm->mmu;
1350 int ret;
1351
1352 ret = mmu->funcs->prealloc_allocate(mmu, &job->prealloc);
1353 if (ret)
1354 return ret;
1355
1356 for (unsigned i = 0; i < job->nr_ops; i++) {
1357 const struct msm_vm_bind_op *op = &job->ops[i];
1358 struct op_arg arg = {
1359 .job = job,
1360 .op = op,
1361 };
1362
1363 switch (op->op) {
1364 case MSM_VM_BIND_OP_UNMAP:
1365 ret = drm_gpuvm_sm_unmap(job->vm, &arg, op->iova,
1366 op->range);
1367 break;
1368 case MSM_VM_BIND_OP_MAP:
1369 if (op->flags & MSM_VM_BIND_OP_DUMP)
1370 arg.flags |= MSM_VMA_DUMP;
1371 fallthrough;
1372 case MSM_VM_BIND_OP_MAP_NULL: {
1373 struct drm_gpuvm_map_req map_req = {
1374 .map.va.addr = op->iova,
1375 .map.va.range = op->range,
1376 .map.gem.obj = op->obj,
1377 .map.gem.offset = op->obj_offset,
1378 };
1379
1380 ret = drm_gpuvm_sm_map(job->vm, &arg, &map_req);
1381 break;
1382 }
1383 default:
1384 /*
1385 * lookup_op() should have already thrown an error for
1386 * invalid ops
1387 */
1388 BUG_ON("unreachable");
1389 }
1390
1391 if (ret) {
1392 /*
1393 * If we've already started modifying the vm, we can't
1394 * adequetly describe to userspace the intermediate
1395 * state the vm is in. So throw up our hands!
1396 */
1397 if (i > 0)
1398 msm_gem_vm_unusable(job->vm);
1399 return ret;
1400 }
1401 }
1402
1403 return 0;
1404 }
1405
1406 /*
1407 * Attach fences to the GEM objects being bound. This will signify to
1408 * the shrinker that they are busy even after dropping the locks (ie.
1409 * drm_exec_fini())
1410 */
1411 static void
vm_bind_job_attach_fences(struct msm_vm_bind_job * job)1412 vm_bind_job_attach_fences(struct msm_vm_bind_job *job)
1413 {
1414 for (unsigned i = 0; i < job->nr_ops; i++) {
1415 struct drm_gem_object *obj = job->ops[i].obj;
1416
1417 if (!obj)
1418 continue;
1419
1420 dma_resv_add_fence(obj->resv, job->fence,
1421 DMA_RESV_USAGE_KERNEL);
1422 }
1423 }
1424
1425 int
msm_ioctl_vm_bind(struct drm_device * dev,void * data,struct drm_file * file)1426 msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file)
1427 {
1428 struct msm_drm_private *priv = dev->dev_private;
1429 struct drm_msm_vm_bind *args = data;
1430 struct msm_context *ctx = file->driver_priv;
1431 struct msm_vm_bind_job *job = NULL;
1432 struct msm_gpu *gpu = priv->gpu;
1433 struct msm_gpu_submitqueue *queue;
1434 struct msm_syncobj_post_dep *post_deps = NULL;
1435 struct drm_syncobj **syncobjs_to_reset = NULL;
1436 struct sync_file *sync_file = NULL;
1437 struct dma_fence *fence;
1438 int out_fence_fd = -1;
1439 int ret, nr_bos = 0;
1440 unsigned i;
1441
1442 if (!gpu)
1443 return -ENXIO;
1444
1445 /*
1446 * Maybe we could allow just UNMAP ops? OTOH userspace should just
1447 * immediately close the device file and all will be torn down.
1448 */
1449 if (to_msm_vm(msm_context_vm(dev, ctx))->unusable)
1450 return UERR(EPIPE, dev, "context is unusable");
1451
1452 /*
1453 * Technically, you cannot create a VM_BIND submitqueue in the first
1454 * place, if you haven't opted in to VM_BIND context. But it is
1455 * cleaner / less confusing, to check this case directly.
1456 */
1457 if (!msm_context_is_vmbind(ctx))
1458 return UERR(EINVAL, dev, "context does not support vmbind");
1459
1460 if (args->flags & ~MSM_VM_BIND_FLAGS)
1461 return UERR(EINVAL, dev, "invalid flags");
1462
1463 queue = msm_submitqueue_get(ctx, args->queue_id);
1464 if (!queue)
1465 return -ENOENT;
1466
1467 if (!(queue->flags & MSM_SUBMITQUEUE_VM_BIND)) {
1468 ret = UERR(EINVAL, dev, "Invalid queue type");
1469 goto out_post_unlock;
1470 }
1471
1472 if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) {
1473 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
1474 if (out_fence_fd < 0) {
1475 ret = out_fence_fd;
1476 goto out_post_unlock;
1477 }
1478 }
1479
1480 job = vm_bind_job_create(dev, file, queue, args->nr_ops);
1481 if (IS_ERR(job)) {
1482 ret = PTR_ERR(job);
1483 goto out_post_unlock;
1484 }
1485
1486 ret = mutex_lock_interruptible(&queue->lock);
1487 if (ret)
1488 goto out_post_unlock;
1489
1490 if (args->flags & MSM_VM_BIND_FENCE_FD_IN) {
1491 struct dma_fence *in_fence;
1492
1493 in_fence = sync_file_get_fence(args->fence_fd);
1494
1495 if (!in_fence) {
1496 ret = UERR(EINVAL, dev, "invalid in-fence");
1497 goto out_unlock;
1498 }
1499
1500 ret = drm_sched_job_add_dependency(&job->base, in_fence);
1501 if (ret)
1502 goto out_unlock;
1503 }
1504
1505 if (args->in_syncobjs > 0) {
1506 syncobjs_to_reset = msm_syncobj_parse_deps(dev, &job->base,
1507 file, args->in_syncobjs,
1508 args->nr_in_syncobjs,
1509 args->syncobj_stride);
1510 if (IS_ERR(syncobjs_to_reset)) {
1511 ret = PTR_ERR(syncobjs_to_reset);
1512 goto out_unlock;
1513 }
1514 }
1515
1516 if (args->out_syncobjs > 0) {
1517 post_deps = msm_syncobj_parse_post_deps(dev, file,
1518 args->out_syncobjs,
1519 args->nr_out_syncobjs,
1520 args->syncobj_stride);
1521 if (IS_ERR(post_deps)) {
1522 ret = PTR_ERR(post_deps);
1523 goto out_unlock;
1524 }
1525 }
1526
1527 ret = vm_bind_job_lookup_ops(job, args, file, &nr_bos);
1528 if (ret)
1529 goto out_unlock;
1530
1531 ret = vm_bind_prealloc_count(job);
1532 if (ret)
1533 goto out_unlock;
1534
1535 struct drm_exec exec;
1536 unsigned flags = DRM_EXEC_IGNORE_DUPLICATES | DRM_EXEC_INTERRUPTIBLE_WAIT;
1537 drm_exec_init(&exec, flags, nr_bos + 1);
1538
1539 ret = vm_bind_job_lock_objects(job, &exec);
1540 if (ret)
1541 goto out;
1542
1543 ret = vm_bind_job_pin_objects(job);
1544 if (ret)
1545 goto out;
1546
1547 ret = vm_bind_job_prepare(job);
1548 if (ret)
1549 goto out;
1550
1551 drm_sched_job_arm(&job->base);
1552
1553 job->fence = dma_fence_get(&job->base.s_fence->finished);
1554
1555 if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) {
1556 sync_file = sync_file_create(job->fence);
1557 if (!sync_file)
1558 ret = -ENOMEM;
1559 }
1560
1561 if (ret)
1562 goto out;
1563
1564 vm_bind_job_attach_fences(job);
1565
1566 /*
1567 * The job can be free'd (and fence unref'd) at any point after
1568 * drm_sched_entity_push_job(), so we need to hold our own ref
1569 */
1570 fence = dma_fence_get(job->fence);
1571
1572 drm_sched_entity_push_job(&job->base);
1573
1574 msm_syncobj_reset(syncobjs_to_reset, args->nr_in_syncobjs);
1575 msm_syncobj_process_post_deps(post_deps, args->nr_out_syncobjs, fence);
1576
1577 dma_fence_put(fence);
1578
1579 out:
1580 if (ret)
1581 vm_bind_job_unpin_objects(job);
1582
1583 drm_exec_fini(&exec);
1584 out_unlock:
1585 mutex_unlock(&queue->lock);
1586 out_post_unlock:
1587 if (ret) {
1588 if (out_fence_fd >= 0)
1589 put_unused_fd(out_fence_fd);
1590 if (sync_file)
1591 fput(sync_file->file);
1592 } else if (sync_file) {
1593 fd_install(out_fence_fd, sync_file->file);
1594 args->fence_fd = out_fence_fd;
1595 }
1596
1597 if (!IS_ERR_OR_NULL(job)) {
1598 if (ret)
1599 msm_vma_job_free(&job->base);
1600 } else {
1601 /*
1602 * If the submit hasn't yet taken ownership of the queue
1603 * then we need to drop the reference ourself:
1604 */
1605 msm_submitqueue_put(queue);
1606 }
1607
1608 if (!IS_ERR_OR_NULL(post_deps)) {
1609 for (i = 0; i < args->nr_out_syncobjs; ++i) {
1610 kfree(post_deps[i].chain);
1611 drm_syncobj_put(post_deps[i].syncobj);
1612 }
1613 kfree(post_deps);
1614 }
1615
1616 if (!IS_ERR_OR_NULL(syncobjs_to_reset)) {
1617 for (i = 0; i < args->nr_in_syncobjs; ++i) {
1618 if (syncobjs_to_reset[i])
1619 drm_syncobj_put(syncobjs_to_reset[i]);
1620 }
1621 kfree(syncobjs_to_reset);
1622 }
1623
1624 return ret;
1625 }
1626