Lines Matching +full:vm +full:- +full:map
1 // SPDX-License-Identifier: GPL-2.0-only
21 MODULE_PARM_DESC(vm_log_shift, "Length of VM op log");
25 * struct msm_vm_map_op - create new pgtable mapping
30 /** @range: size of the region to map */
32 /** @offset: offset into @sgt to map */
34 /** @sgt: pages to map, or NULL for a PRR mapping */
48 * struct msm_vm_unmap_op - unmap a range of pages from pgtable
68 * struct msm_vma_op - A MAP or UNMAP operation
77 /** @map: Parameters used if op == MSM_VMA_OP_MAP */
78 struct msm_vm_map_op map; member
90 * But the same can be required in the map path, for example if
100 * struct msm_vm_bind_job - Tracking for a VM_BIND ioctl
102 * A table of userspace requested VM updates (MSM_VM_BIND_OP_UNMAP/MAP/MAP_NULL)
103 * gets applied to the vm, generating a list of VM ops (MSM_VM_OP_MAP/UNMAP)
111 /** @vm: The VM being operated on */
112 struct drm_gpuvm *vm; member
117 /** @prealloc: Tracking for pre-allocated MMU pgtable pages */
129 * before we start applying the updates to try to do as much up-
130 * front error checking as possible, to avoid the VM being in an
150 for (unsigned i = 0; i < (_job)->nr_ops; i++) \
151 if ((obj = (_job)->ops[i].obj))
161 struct msm_gem_vm *vm = container_of(gpuvm, struct msm_gem_vm, base); in msm_gem_vm_free() local
163 drm_mm_takedown(&vm->mm); in msm_gem_vm_free()
164 if (vm->mmu) in msm_gem_vm_free()
165 vm->mmu->funcs->destroy(vm->mmu); in msm_gem_vm_free()
166 dma_fence_put(vm->last_fence); in msm_gem_vm_free()
167 put_pid(vm->pid); in msm_gem_vm_free()
168 kfree(vm->log); in msm_gem_vm_free()
169 kfree(vm); in msm_gem_vm_free()
173 * msm_gem_vm_unusable() - Mark a VM as unusable
174 * @gpuvm: the VM to mark unusable
179 struct msm_gem_vm *vm = to_msm_vm(gpuvm); in msm_gem_vm_unusable() local
180 uint32_t vm_log_len = (1 << vm->log_shift); in msm_gem_vm_unusable()
181 uint32_t vm_log_mask = vm_log_len - 1; in msm_gem_vm_unusable()
185 vm->unusable = true; in msm_gem_vm_unusable()
188 if (!vm->log || !vm->log[0].op) in msm_gem_vm_unusable()
191 mutex_lock(&vm->mmu_lock); in msm_gem_vm_unusable()
198 first = vm->log_idx; in msm_gem_vm_unusable()
200 if (!vm->log[first].op) { in msm_gem_vm_unusable()
203 * entries 0 to idx-1 are valid (ie. we haven't wrapped around in msm_gem_vm_unusable()
206 nr_vm_logs = MAX(0, first - 1); in msm_gem_vm_unusable()
212 pr_err("vm-log:\n"); in msm_gem_vm_unusable()
215 struct msm_gem_vm_log_entry *e = &vm->log[idx]; in msm_gem_vm_unusable()
216 pr_err(" - %s:%d: 0x%016llx-0x%016llx\n", in msm_gem_vm_unusable()
217 e->op, e->queue_id, e->iova, in msm_gem_vm_unusable()
218 e->iova + e->range); in msm_gem_vm_unusable()
221 mutex_unlock(&vm->mmu_lock); in msm_gem_vm_unusable()
225 vm_log(struct msm_gem_vm *vm, const char *op, uint64_t iova, uint64_t range, int queue_id) in vm_log() argument
229 if (!vm->managed) in vm_log()
230 lockdep_assert_held(&vm->mmu_lock); in vm_log()
232 vm_dbg("%s:%p:%d: %016llx %016llx", op, vm, queue_id, iova, iova + range); in vm_log()
234 if (!vm->log) in vm_log()
237 idx = vm->log_idx; in vm_log()
238 vm->log[idx].op = op; in vm_log()
239 vm->log[idx].iova = iova; in vm_log()
240 vm->log[idx].range = range; in vm_log()
241 vm->log[idx].queue_id = queue_id; in vm_log()
242 vm->log_idx = (vm->log_idx + 1) & ((1 << vm->log_shift) - 1); in vm_log()
246 vm_unmap_op(struct msm_gem_vm *vm, const struct msm_vm_unmap_op *op) in vm_unmap_op() argument
248 const char *reason = op->reason; in vm_unmap_op()
253 vm_log(vm, reason, op->iova, op->range, op->queue_id); in vm_unmap_op()
255 vm->mmu->funcs->unmap(vm->mmu, op->iova, op->range); in vm_unmap_op()
259 vm_map_op(struct msm_gem_vm *vm, const struct msm_vm_map_op *op) in vm_map_op() argument
261 vm_log(vm, "map", op->iova, op->range, op->queue_id); in vm_map_op()
263 return vm->mmu->funcs->map(vm->mmu, op->iova, op->sgt, op->offset, in vm_map_op()
264 op->range, op->prot); in vm_map_op()
270 struct msm_gem_vm *vm = to_msm_vm(vma->vm); in msm_gem_vma_unmap() local
274 if (!msm_vma->mapped) in msm_gem_vma_unmap()
282 if (!vm->managed) in msm_gem_vma_unmap()
283 mutex_lock(&vm->mmu_lock); in msm_gem_vma_unmap()
285 vm_unmap_op(vm, &(struct msm_vm_unmap_op){ in msm_gem_vma_unmap()
286 .iova = vma->va.addr, in msm_gem_vma_unmap()
287 .range = vma->va.range, in msm_gem_vma_unmap()
291 if (!vm->managed) in msm_gem_vma_unmap()
292 mutex_unlock(&vm->mmu_lock); in msm_gem_vma_unmap()
294 msm_vma->mapped = false; in msm_gem_vma_unmap()
297 /* Map and pin vma: */
301 struct msm_gem_vm *vm = to_msm_vm(vma->vm); in msm_gem_vma_map() local
305 if (GEM_WARN_ON(!vma->va.addr)) in msm_gem_vma_map()
306 return -EINVAL; in msm_gem_vma_map()
308 if (msm_vma->mapped) in msm_gem_vma_map()
311 msm_vma->mapped = true; in msm_gem_vma_map()
318 if (!vm->managed) in msm_gem_vma_map()
319 mutex_lock(&vm->mmu_lock); in msm_gem_vma_map()
323 * a lock across map/unmap which is also used in the job_run() in msm_gem_vma_map()
327 ret = vm_map_op(vm, &(struct msm_vm_map_op){ in msm_gem_vma_map()
328 .iova = vma->va.addr, in msm_gem_vma_map()
329 .range = vma->va.range, in msm_gem_vma_map()
330 .offset = vma->gem.offset, in msm_gem_vma_map()
335 if (!vm->managed) in msm_gem_vma_map()
336 mutex_unlock(&vm->mmu_lock); in msm_gem_vma_map()
339 msm_vma->mapped = false; in msm_gem_vma_map()
347 struct msm_gem_vm *vm = to_msm_vm(vma->vm); in msm_gem_vma_close() local
350 GEM_WARN_ON(msm_vma->mapped); in msm_gem_vma_close()
352 drm_gpuvm_resv_assert_held(&vm->base); in msm_gem_vma_close()
354 if (vma->gem.obj) in msm_gem_vma_close()
355 msm_gem_assert_locked(vma->gem.obj); in msm_gem_vma_close()
357 if (vma->va.addr && vm->managed) in msm_gem_vma_close()
358 drm_mm_remove_node(&msm_vma->node); in msm_gem_vma_close()
371 struct msm_gem_vm *vm = to_msm_vm(gpuvm); in msm_gem_vma_new() local
376 drm_gpuvm_resv_assert_held(&vm->base); in msm_gem_vma_new()
380 return ERR_PTR(-ENOMEM); in msm_gem_vma_new()
382 if (vm->managed) { in msm_gem_vma_new()
384 BUG_ON(!obj); /* NULL mappings not valid for kernel managed VM */ in msm_gem_vma_new()
385 ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node, in msm_gem_vma_new()
386 obj->size, PAGE_SIZE, 0, in msm_gem_vma_new()
392 range_start = vma->node.start; in msm_gem_vma_new()
393 range_end = range_start + obj->size; in msm_gem_vma_new()
397 GEM_WARN_ON((range_end - range_start) > obj->size); in msm_gem_vma_new()
401 .va.range = range_end - range_start, in msm_gem_vma_new()
406 drm_gpuva_init_from_op(&vma->base, &op_map); in msm_gem_vma_new()
407 vma->mapped = false; in msm_gem_vma_new()
409 ret = drm_gpuva_insert(&vm->base, &vma->base); in msm_gem_vma_new()
414 return &vma->base; in msm_gem_vma_new()
416 vm_bo = drm_gpuvm_bo_obtain(&vm->base, obj); in msm_gem_vma_new()
423 drm_gpuva_link(&vma->base, vm_bo); in msm_gem_vma_new()
426 return &vma->base; in msm_gem_vma_new()
429 drm_gpuva_remove(&vma->base); in msm_gem_vma_new()
431 if (vm->managed) in msm_gem_vma_new()
432 drm_mm_remove_node(&vma->node); in msm_gem_vma_new()
441 struct drm_gem_object *obj = vm_bo->obj; in msm_gem_vm_bo_validate()
470 list_add_tail(&op->node, &arg->job->vm_ops); in vm_op_enqueue()
472 if (op->obj) in vm_op_enqueue()
473 drm_gem_object_get(op->obj); in vm_op_enqueue()
479 return msm_gem_vma_new(arg->job->vm, op->gem.obj, op->gem.offset, in vma_from_op()
480 op->va.addr, op->va.addr + op->va.range); in vma_from_op()
487 struct msm_vm_bind_job *job = arg->job; in msm_gem_vm_sm_step_map()
488 struct drm_gem_object *obj = op->map.gem.obj; in msm_gem_vm_sm_step_map()
493 if (arg->kept) in msm_gem_vm_sm_step_map()
496 vma = vma_from_op(arg, &op->map); in msm_gem_vm_sm_step_map()
500 vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj, in msm_gem_vm_sm_step_map()
501 vma->va.addr, vma->va.range); in msm_gem_vm_sm_step_map()
503 vma->flags = ((struct op_arg *)arg)->flags; in msm_gem_vm_sm_step_map()
506 sgt = to_msm_bo(obj)->sgt; in msm_gem_vm_sm_step_map()
515 .map = { in msm_gem_vm_sm_step_map()
517 .iova = vma->va.addr, in msm_gem_vm_sm_step_map()
518 .range = vma->va.range, in msm_gem_vm_sm_step_map()
519 .offset = vma->gem.offset, in msm_gem_vm_sm_step_map()
521 .queue_id = job->queue->id, in msm_gem_vm_sm_step_map()
523 .obj = vma->gem.obj, in msm_gem_vm_sm_step_map()
526 to_msm_vma(vma)->mapped = true; in msm_gem_vm_sm_step_map()
534 struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job; in msm_gem_vm_sm_step_remap()
535 struct drm_gpuvm *vm = job->vm; in msm_gem_vm_sm_step_remap() local
536 struct drm_gpuva *orig_vma = op->remap.unmap->va; in msm_gem_vm_sm_step_remap()
538 struct drm_gpuvm_bo *vm_bo = orig_vma->vm_bo; in msm_gem_vm_sm_step_remap()
539 bool mapped = to_msm_vma(orig_vma)->mapped; in msm_gem_vm_sm_step_remap()
542 vm_dbg("orig_vma: %p:%p:%p: %016llx %016llx", vm, orig_vma, in msm_gem_vm_sm_step_remap()
543 orig_vma->gem.obj, orig_vma->va.addr, orig_vma->va.range); in msm_gem_vm_sm_step_remap()
548 drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range); in msm_gem_vm_sm_step_remap()
555 .queue_id = job->queue->id, in msm_gem_vm_sm_step_remap()
557 .obj = orig_vma->gem.obj, in msm_gem_vm_sm_step_remap()
566 to_msm_vma(orig_vma)->mapped = false; in msm_gem_vm_sm_step_remap()
572 * in the VM's evict list: in msm_gem_vm_sm_step_remap()
581 flags = orig_vma->flags; in msm_gem_vm_sm_step_remap()
585 if (op->remap.prev) { in msm_gem_vm_sm_step_remap()
586 prev_vma = vma_from_op(arg, op->remap.prev); in msm_gem_vm_sm_step_remap()
590 vm_dbg("prev_vma: %p:%p: %016llx %016llx", vm, prev_vma, prev_vma->va.addr, prev_vma->va.range); in msm_gem_vm_sm_step_remap()
591 to_msm_vma(prev_vma)->mapped = mapped; in msm_gem_vm_sm_step_remap()
592 prev_vma->flags = flags; in msm_gem_vm_sm_step_remap()
595 if (op->remap.next) { in msm_gem_vm_sm_step_remap()
596 next_vma = vma_from_op(arg, op->remap.next); in msm_gem_vm_sm_step_remap()
600 vm_dbg("next_vma: %p:%p: %016llx %016llx", vm, next_vma, next_vma->va.addr, next_vma->va.range); in msm_gem_vm_sm_step_remap()
601 to_msm_vma(next_vma)->mapped = mapped; in msm_gem_vm_sm_step_remap()
602 next_vma->flags = flags; in msm_gem_vm_sm_step_remap()
618 struct msm_vm_bind_job *job = arg->job; in msm_gem_vm_sm_step_unmap()
619 struct drm_gpuva *vma = op->unmap.va; in msm_gem_vm_sm_step_unmap()
622 vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj, in msm_gem_vm_sm_step_unmap()
623 vma->va.addr, vma->va.range); in msm_gem_vm_sm_step_unmap()
626 * Detect in-place remap. Turnip does this to change the vma flags, in msm_gem_vm_sm_step_unmap()
631 if (op->unmap.keep && in msm_gem_vm_sm_step_unmap()
632 (arg->op->op == MSM_VM_BIND_OP_MAP) && in msm_gem_vm_sm_step_unmap()
633 (vma->gem.obj == arg->op->obj) && in msm_gem_vm_sm_step_unmap()
634 (vma->gem.offset == arg->op->obj_offset) && in msm_gem_vm_sm_step_unmap()
635 (vma->va.addr == arg->op->iova) && in msm_gem_vm_sm_step_unmap()
636 (vma->va.range == arg->op->range)) { in msm_gem_vm_sm_step_unmap()
637 /* We are only expecting a single in-place unmap+map cb pair: */ in msm_gem_vm_sm_step_unmap()
638 WARN_ON(arg->kept); in msm_gem_vm_sm_step_unmap()
640 /* Leave the existing VMA in place, but signal that to the map cb: */ in msm_gem_vm_sm_step_unmap()
641 arg->kept = true; in msm_gem_vm_sm_step_unmap()
643 /* Only flags are changing, so update that in-place: */ in msm_gem_vm_sm_step_unmap()
644 unsigned orig_flags = vma->flags & (DRM_GPUVA_USERBITS - 1); in msm_gem_vm_sm_step_unmap()
645 vma->flags = orig_flags | arg->flags; in msm_gem_vm_sm_step_unmap()
650 if (!msm_vma->mapped) in msm_gem_vm_sm_step_unmap()
656 .iova = vma->va.addr, in msm_gem_vm_sm_step_unmap()
657 .range = vma->va.range, in msm_gem_vm_sm_step_unmap()
658 .queue_id = job->queue->id, in msm_gem_vm_sm_step_unmap()
660 .obj = vma->gem.obj, in msm_gem_vm_sm_step_unmap()
663 msm_vma->mapped = false; in msm_gem_vm_sm_step_unmap()
683 struct msm_gem_vm *vm = to_msm_vm(job->vm); in msm_vma_job_run() local
685 int ret = vm->unusable ? -EINVAL : 0; in msm_vma_job_run()
689 mutex_lock(&vm->mmu_lock); in msm_vma_job_run()
690 vm->mmu->prealloc = &job->prealloc; in msm_vma_job_run()
692 while (!list_empty(&job->vm_ops)) { in msm_vma_job_run()
694 list_first_entry(&job->vm_ops, struct msm_vm_op, node); in msm_vma_job_run()
696 switch (op->op) { in msm_vma_job_run()
699 * On error, stop trying to map new things.. but we in msm_vma_job_run()
704 ret = vm_map_op(vm, &op->map); in msm_vma_job_run()
707 vm_unmap_op(vm, &op->unmap); in msm_vma_job_run()
710 drm_gem_object_put(op->obj); in msm_vma_job_run()
711 list_del(&op->node); in msm_vma_job_run()
715 vm->mmu->prealloc = NULL; in msm_vma_job_run()
716 mutex_unlock(&vm->mmu_lock); in msm_vma_job_run()
720 * now the VM is in an undefined state. Game over! in msm_vma_job_run()
723 msm_gem_vm_unusable(job->vm); in msm_vma_job_run()
739 struct msm_gem_vm *vm = to_msm_vm(job->vm); in msm_vma_job_free() local
742 vm->mmu->funcs->prealloc_cleanup(vm->mmu, &job->prealloc); in msm_vma_job_free()
744 atomic_sub(job->prealloc.count, &vm->prealloc_throttle.in_flight); in msm_vma_job_free()
751 msm_submitqueue_put(job->queue); in msm_vma_job_free()
752 dma_fence_put(job->fence); in msm_vma_job_free()
755 while (!list_empty(&job->vm_ops)) { in msm_vma_job_free()
757 list_first_entry(&job->vm_ops, struct msm_vm_op, node); in msm_vma_job_free()
758 list_del(&op->node); in msm_vma_job_free()
762 wake_up(&vm->prealloc_throttle.wait); in msm_vma_job_free()
773 * msm_gem_vm_create() - Create and initialize a &msm_gem_vm
776 * @name: the name of the VM
779 * @managed: is it a kernel managed VM?
781 * In a kernel managed VM, the kernel handles address allocation, and only
782 * synchronous operations are supported. In a user managed VM, userspace
792 * makes drm_gpuvm_bo_evict() a no-op for extobjs (ie. we loose in msm_gem_vm_create()
796 struct msm_gem_vm *vm; in msm_gem_vm_create() local
803 vm = kzalloc(sizeof(*vm), GFP_KERNEL); in msm_gem_vm_create()
804 if (!vm) in msm_gem_vm_create()
805 return ERR_PTR(-ENOMEM); in msm_gem_vm_create()
809 ret = -ENOMEM; in msm_gem_vm_create()
819 .name = "msm-vm-bind", in msm_gem_vm_create()
820 .dev = drm->dev, in msm_gem_vm_create()
823 ret = drm_sched_init(&vm->sched, &args); in msm_gem_vm_create()
827 init_waitqueue_head(&vm->prealloc_throttle.wait); in msm_gem_vm_create()
830 drm_gpuvm_init(&vm->base, name, flags, drm, dummy_gem, in msm_gem_vm_create()
834 vm->mmu = mmu; in msm_gem_vm_create()
835 mutex_init(&vm->mmu_lock); in msm_gem_vm_create()
836 vm->managed = managed; in msm_gem_vm_create()
838 drm_mm_init(&vm->mm, va_start, va_size); in msm_gem_vm_create()
841 * We don't really need vm log for kernel managed VMs, as the kernel in msm_gem_vm_create()
850 vm->log_shift = MIN(vm_log_shift, 8); in msm_gem_vm_create()
852 if (vm->log_shift) { in msm_gem_vm_create()
853 vm->log = kmalloc_array(1 << vm->log_shift, sizeof(vm->log[0]), in msm_gem_vm_create()
857 return &vm->base; in msm_gem_vm_create()
863 kfree(vm); in msm_gem_vm_create()
868 * msm_gem_vm_close() - Close a VM
869 * @gpuvm: The VM to close
871 * Called when the drm device file is closed, to tear down VM related resources
873 * VM at the time).
878 struct msm_gem_vm *vm = to_msm_vm(gpuvm); in msm_gem_vm_close() local
886 if (vm->managed) in msm_gem_vm_close()
889 if (vm->last_fence) in msm_gem_vm_close()
890 dma_fence_wait(vm->last_fence, false); in msm_gem_vm_close()
893 drm_sched_stop(&vm->sched, NULL); in msm_gem_vm_close()
894 drm_sched_fini(&vm->sched); in msm_gem_vm_close()
903 struct drm_gem_object *obj = vma->gem.obj; in msm_gem_vm_close()
907 * VM, in which case the obj is already locked: in msm_gem_vm_close()
909 if (obj && (obj->resv == drm_gpuvm_resv(gpuvm))) in msm_gem_vm_close()
940 return ERR_PTR(-ENOMEM); in vm_bind_job_create()
944 return ERR_PTR(-ENOMEM); in vm_bind_job_create()
946 ret = drm_sched_job_init(&job->base, queue->entity, 1, queue, in vm_bind_job_create()
947 file->client_id); in vm_bind_job_create()
953 job->vm = msm_context_vm(dev, queue->ctx); in vm_bind_job_create()
954 job->queue = queue; in vm_bind_job_create()
955 INIT_LIST_HEAD(&job->vm_ops); in vm_bind_job_create()
973 struct drm_device *dev = job->vm->drm; in lookup_op()
974 struct msm_drm_private *priv = dev->dev_private; in lookup_op()
975 int i = job->nr_ops++; in lookup_op()
978 job->ops[i].op = op->op; in lookup_op()
979 job->ops[i].handle = op->handle; in lookup_op()
980 job->ops[i].obj_offset = op->obj_offset; in lookup_op()
981 job->ops[i].iova = op->iova; in lookup_op()
982 job->ops[i].range = op->range; in lookup_op()
983 job->ops[i].flags = op->flags; in lookup_op()
985 if (op->flags & ~MSM_VM_BIND_OP_FLAGS) in lookup_op()
986 ret = UERR(EINVAL, dev, "invalid flags: %x\n", op->flags); in lookup_op()
988 if (invalid_alignment(op->iova)) in lookup_op()
989 ret = UERR(EINVAL, dev, "invalid address: %016llx\n", op->iova); in lookup_op()
991 if (invalid_alignment(op->obj_offset)) in lookup_op()
992 ret = UERR(EINVAL, dev, "invalid bo_offset: %016llx\n", op->obj_offset); in lookup_op()
994 if (invalid_alignment(op->range)) in lookup_op()
995 ret = UERR(EINVAL, dev, "invalid range: %016llx\n", op->range); in lookup_op()
997 if (!drm_gpuvm_range_valid(job->vm, op->iova, op->range)) in lookup_op()
998 ret = UERR(EINVAL, dev, "invalid range: %016llx, %016llx\n", op->iova, op->range); in lookup_op()
1001 * MAP must specify a valid handle. But the handle MBZ for in lookup_op()
1004 if (op->op == MSM_VM_BIND_OP_MAP) { in lookup_op()
1005 if (!op->handle) in lookup_op()
1007 } else if (op->handle) { in lookup_op()
1011 switch (op->op) { in lookup_op()
1017 ret = UERR(EINVAL, dev, "invalid op: %u\n", op->op); in lookup_op()
1021 if ((op->op == MSM_VM_BIND_OP_MAP_NULL) && in lookup_op()
1022 !adreno_smmu_has_prr(priv->gpu)) { in lookup_op()
1036 struct drm_device *dev = job->vm->drm; in vm_bind_job_lookup_ops()
1039 int i = -1; in vm_bind_job_lookup_ops()
1041 if (args->nr_ops == 1) { in vm_bind_job_lookup_ops()
1043 ret = lookup_op(job, &args->op); in vm_bind_job_lookup_ops()
1045 for (unsigned i = 0; i < args->nr_ops; i++) { in vm_bind_job_lookup_ops()
1048 u64_to_user_ptr(args->ops + (i * sizeof(op))); in vm_bind_job_lookup_ops()
1053 job->ops[i].flags = 0; in vm_bind_job_lookup_ops()
1056 ret = -EFAULT; in vm_bind_job_lookup_ops()
1067 job->nr_ops = 0; in vm_bind_job_lookup_ops()
1071 spin_lock(&file->table_lock); in vm_bind_job_lookup_ops()
1073 for (i = 0; i < args->nr_ops; i++) { in vm_bind_job_lookup_ops()
1074 struct msm_vm_bind_op *op = &job->ops[i]; in vm_bind_job_lookup_ops()
1077 if (!op->handle) { in vm_bind_job_lookup_ops()
1078 op->obj = NULL; in vm_bind_job_lookup_ops()
1086 obj = idr_find(&file->object_idr, op->handle); in vm_bind_job_lookup_ops()
1088 ret = UERR(EINVAL, dev, "invalid handle %u at index %u\n", op->handle, i); in vm_bind_job_lookup_ops()
1094 op->obj = obj; in vm_bind_job_lookup_ops()
1097 if ((op->range + op->obj_offset) > obj->size) { in vm_bind_job_lookup_ops()
1099 op->range, op->obj_offset, obj->size); in vm_bind_job_lookup_ops()
1107 spin_unlock(&file->table_lock); in vm_bind_job_lookup_ops()
1110 for (; i >= 0; i--) { in vm_bind_job_lookup_ops()
1111 struct msm_vm_bind_op *op = &job->ops[i]; in vm_bind_job_lookup_ops()
1113 if (!op->obj) in vm_bind_job_lookup_ops()
1116 drm_gem_object_put(op->obj); in vm_bind_job_lookup_ops()
1117 op->obj = NULL; in vm_bind_job_lookup_ops()
1129 struct msm_mmu *mmu = to_msm_vm(job->vm)->mmu; in prealloc_count()
1134 uint64_t start_iova = first->iova; in prealloc_count()
1135 uint64_t end_iova = last->iova + last->range; in prealloc_count()
1137 mmu->funcs->prealloc_count(mmu, &job->prealloc, start_iova, end_iova - start_iova); in prealloc_count()
1145 * the PoV of figuring out how much pgtable pages to pre-allocate in ops_are_same_pte()
1148 uint64_t pte_mask = ~(SZ_2M - 1); in ops_are_same_pte()
1149 return ((first->iova + first->range) & pte_mask) == (next->iova & pte_mask); in ops_are_same_pte()
1155 * to get the desired swizzle pattern, resulting in a large # of tiny MAP ops.
1156 * So detect when multiple MAP operations are physically contiguous, and count
1164 struct msm_gem_vm *vm = to_msm_vm(job->vm); in vm_bind_prealloc_count() local
1167 for (int i = 0; i < job->nr_ops; i++) { in vm_bind_prealloc_count()
1168 struct msm_vm_bind_op *op = &job->ops[i]; in vm_bind_prealloc_count()
1170 /* We only care about MAP/MAP_NULL: */ in vm_bind_prealloc_count()
1171 if (op->op == MSM_VM_BIND_OP_UNMAP) in vm_bind_prealloc_count()
1196 * Now that we know the needed amount to pre-alloc, throttle on pending in vm_bind_prealloc_count()
1197 * VM_BIND jobs if we already have too much pre-alloc memory in flight in vm_bind_prealloc_count()
1200 vm->prealloc_throttle.wait, in vm_bind_prealloc_count()
1201 atomic_read(&vm->prealloc_throttle.in_flight) <= 1024); in vm_bind_prealloc_count()
1205 atomic_add(job->prealloc.count, &vm->prealloc_throttle.in_flight); in vm_bind_prealloc_count()
1211 * Lock VM and GEM objects
1218 /* Lock VM and objects: */ in vm_bind_job_lock_objects()
1220 ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(job->vm)); in vm_bind_job_lock_objects()
1225 for (unsigned i = 0; i < job->nr_ops; i++) { in vm_bind_job_lock_objects()
1226 const struct msm_vm_bind_op *op = &job->ops[i]; in vm_bind_job_lock_objects()
1228 switch (op->op) { in vm_bind_job_lock_objects()
1230 ret = drm_gpuvm_sm_unmap_exec_lock(job->vm, exec, in vm_bind_job_lock_objects()
1231 op->iova, in vm_bind_job_lock_objects()
1232 op->obj_offset); in vm_bind_job_lock_objects()
1237 .map.va.addr = op->iova, in vm_bind_job_lock_objects()
1238 .map.va.range = op->range, in vm_bind_job_lock_objects()
1239 .map.gem.obj = op->obj, in vm_bind_job_lock_objects()
1240 .map.gem.offset = op->obj_offset, in vm_bind_job_lock_objects()
1243 ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, &map_req); in vm_bind_job_lock_objects()
1286 struct msm_drm_private *priv = job->vm->drm->dev_private; in vm_bind_job_pin_objects()
1295 mutex_lock(&priv->lru.lock); in vm_bind_job_pin_objects()
1298 mutex_unlock(&priv->lru.lock); in vm_bind_job_pin_objects()
1300 job->bos_pinned = true; in vm_bind_job_pin_objects()
1313 if (!job->bos_pinned) in vm_bind_job_unpin_objects()
1319 job->bos_pinned = false; in vm_bind_job_unpin_objects()
1323 * Pre-allocate pgtable memory, and translate the VM bind requests into a
1329 struct msm_gem_vm *vm = to_msm_vm(job->vm); in vm_bind_job_prepare() local
1330 struct msm_mmu *mmu = vm->mmu; in vm_bind_job_prepare()
1333 ret = mmu->funcs->prealloc_allocate(mmu, &job->prealloc); in vm_bind_job_prepare()
1337 for (unsigned i = 0; i < job->nr_ops; i++) { in vm_bind_job_prepare()
1338 const struct msm_vm_bind_op *op = &job->ops[i]; in vm_bind_job_prepare()
1344 switch (op->op) { in vm_bind_job_prepare()
1346 ret = drm_gpuvm_sm_unmap(job->vm, &arg, op->iova, in vm_bind_job_prepare()
1347 op->range); in vm_bind_job_prepare()
1350 if (op->flags & MSM_VM_BIND_OP_DUMP) in vm_bind_job_prepare()
1355 .map.va.addr = op->iova, in vm_bind_job_prepare()
1356 .map.va.range = op->range, in vm_bind_job_prepare()
1357 .map.gem.obj = op->obj, in vm_bind_job_prepare()
1358 .map.gem.offset = op->obj_offset, in vm_bind_job_prepare()
1361 ret = drm_gpuvm_sm_map(job->vm, &arg, &map_req); in vm_bind_job_prepare()
1374 * If we've already started modifying the vm, we can't in vm_bind_job_prepare()
1376 * state the vm is in. So throw up our hands! in vm_bind_job_prepare()
1379 msm_gem_vm_unusable(job->vm); in vm_bind_job_prepare()
1395 for (unsigned i = 0; i < job->nr_ops; i++) { in vm_bind_job_attach_fences()
1396 struct drm_gem_object *obj = job->ops[i].obj; in vm_bind_job_attach_fences()
1401 dma_resv_add_fence(obj->resv, job->fence, in vm_bind_job_attach_fences()
1409 struct msm_drm_private *priv = dev->dev_private; in msm_ioctl_vm_bind()
1411 struct msm_context *ctx = file->driver_priv; in msm_ioctl_vm_bind()
1413 struct msm_gpu *gpu = priv->gpu; in msm_ioctl_vm_bind()
1419 int out_fence_fd = -1; in msm_ioctl_vm_bind()
1424 return -ENXIO; in msm_ioctl_vm_bind()
1430 if (to_msm_vm(msm_context_vm(dev, ctx))->unusable) in msm_ioctl_vm_bind()
1441 if (args->flags & ~MSM_VM_BIND_FLAGS) in msm_ioctl_vm_bind()
1444 queue = msm_submitqueue_get(ctx, args->queue_id); in msm_ioctl_vm_bind()
1446 return -ENOENT; in msm_ioctl_vm_bind()
1448 if (!(queue->flags & MSM_SUBMITQUEUE_VM_BIND)) { in msm_ioctl_vm_bind()
1453 if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) { in msm_ioctl_vm_bind()
1461 job = vm_bind_job_create(dev, file, queue, args->nr_ops); in msm_ioctl_vm_bind()
1467 ret = mutex_lock_interruptible(&queue->lock); in msm_ioctl_vm_bind()
1471 if (args->flags & MSM_VM_BIND_FENCE_FD_IN) { in msm_ioctl_vm_bind()
1474 in_fence = sync_file_get_fence(args->fence_fd); in msm_ioctl_vm_bind()
1477 ret = UERR(EINVAL, dev, "invalid in-fence"); in msm_ioctl_vm_bind()
1481 ret = drm_sched_job_add_dependency(&job->base, in_fence); in msm_ioctl_vm_bind()
1486 if (args->in_syncobjs > 0) { in msm_ioctl_vm_bind()
1487 syncobjs_to_reset = msm_syncobj_parse_deps(dev, &job->base, in msm_ioctl_vm_bind()
1488 file, args->in_syncobjs, in msm_ioctl_vm_bind()
1489 args->nr_in_syncobjs, in msm_ioctl_vm_bind()
1490 args->syncobj_stride); in msm_ioctl_vm_bind()
1497 if (args->out_syncobjs > 0) { in msm_ioctl_vm_bind()
1499 args->out_syncobjs, in msm_ioctl_vm_bind()
1500 args->nr_out_syncobjs, in msm_ioctl_vm_bind()
1501 args->syncobj_stride); in msm_ioctl_vm_bind()
1532 drm_sched_job_arm(&job->base); in msm_ioctl_vm_bind()
1534 job->fence = dma_fence_get(&job->base.s_fence->finished); in msm_ioctl_vm_bind()
1536 if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) { in msm_ioctl_vm_bind()
1537 sync_file = sync_file_create(job->fence); in msm_ioctl_vm_bind()
1539 ret = -ENOMEM; in msm_ioctl_vm_bind()
1551 fence = dma_fence_get(job->fence); in msm_ioctl_vm_bind()
1553 drm_sched_entity_push_job(&job->base); in msm_ioctl_vm_bind()
1555 msm_syncobj_reset(syncobjs_to_reset, args->nr_in_syncobjs); in msm_ioctl_vm_bind()
1556 msm_syncobj_process_post_deps(post_deps, args->nr_out_syncobjs, fence); in msm_ioctl_vm_bind()
1566 mutex_unlock(&queue->lock); in msm_ioctl_vm_bind()
1572 fput(sync_file->file); in msm_ioctl_vm_bind()
1574 fd_install(out_fence_fd, sync_file->file); in msm_ioctl_vm_bind()
1575 args->fence_fd = out_fence_fd; in msm_ioctl_vm_bind()
1580 msm_vma_job_free(&job->base); in msm_ioctl_vm_bind()
1590 for (i = 0; i < args->nr_out_syncobjs; ++i) { in msm_ioctl_vm_bind()
1598 for (i = 0; i < args->nr_in_syncobjs; ++i) { in msm_ioctl_vm_bind()