Lines Matching refs:vma
267 /* Actually unmap memory for the vma */
268 void msm_gem_vma_unmap(struct drm_gpuva *vma, const char *reason)
270 struct msm_gem_vm *vm = to_msm_vm(vma->vm);
271 struct msm_gem_vma *msm_vma = to_msm_vma(vma);
286 .iova = vma->va.addr,
287 .range = vma->va.range,
297 /* Map and pin vma: */
299 msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
301 struct msm_gem_vm *vm = to_msm_vm(vma->vm);
302 struct msm_gem_vma *msm_vma = to_msm_vma(vma);
305 if (GEM_WARN_ON(!vma->va.addr))
331 .iova = vma->va.addr,
332 .range = vma->va.range,
333 .offset = vma->gem.offset,
348 void msm_gem_vma_close(struct drm_gpuva *vma)
350 struct msm_gem_vm *vm = to_msm_vm(vma->vm);
351 struct msm_gem_vma *msm_vma = to_msm_vma(vma);
357 if (vma->gem.obj)
358 msm_gem_assert_locked(vma->gem.obj);
360 if (vma->va.addr && vm->managed)
363 drm_gpuva_remove(vma);
364 drm_gpuva_unlink(vma);
366 kfree(vma);
369 /* Create a new vma and allocate an iova for it */
376 struct msm_gem_vma *vma;
381 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
382 if (!vma)
388 ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node,
395 range_start = vma->node.start;
402 drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset);
403 vma->mapped = false;
405 ret = drm_gpuva_insert(&vm->base, &vma->base);
410 return &vma->base;
419 drm_gpuva_link(&vma->base, vm_bo);
422 return &vma->base;
425 drm_gpuva_remove(&vma->base);
428 drm_mm_remove_node(&vma->node);
430 kfree(vma);
438 struct drm_gpuva *vma;
445 drm_gpuvm_bo_for_each_va (vma, vm_bo) {
446 ret = msm_gem_pin_vma_locked(obj, vma);
482 struct drm_gpuva *vma;
486 vma = vma_from_op(arg, &op->map);
487 if (WARN_ON(IS_ERR(vma)))
488 return PTR_ERR(vma);
490 vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj,
491 vma->va.addr, vma->va.range);
493 vma->flags = ((struct op_arg *)arg)->flags;
507 .iova = vma->va.addr,
508 .range = vma->va.range,
509 .offset = vma->gem.offset,
513 .obj = vma->gem.obj,
516 to_msm_vma(vma)->mapped = true;
561 * creation of the new prev/next vma's, in case the vm_bo is tracked
568 * The prev_vma and/or next_vma are replacing the unmapped vma, and
608 struct drm_gpuva *vma = op->unmap.va;
609 struct msm_gem_vma *msm_vma = to_msm_vma(vma);
611 vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj,
612 vma->va.addr, vma->va.range);
620 .iova = vma->va.addr,
621 .range = vma->va.range,
624 .obj = vma->gem.obj,
630 msm_gem_vma_close(vma);
843 struct drm_gpuva *vma, *tmp;
866 drm_gpuvm_for_each_va_safe (vma, tmp, gpuvm) {
867 struct drm_gem_object *obj = vma->gem.obj;
881 msm_gem_vma_unmap(vma, "close");
882 msm_gem_vma_close(vma);