Lines Matching +full:row +full:- +full:hold
26 #include <linux/dma-fence-array.h>
57 if (kref_read(&vma->vm->ref))
58 assert_object_held_shared(vma->obj);
81 if (!vma->node.stack) {
82 drm_dbg(vma->obj->base.dev,
84 vma->node.start, vma->node.size, reason);
88 stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0);
89 drm_dbg(vma->obj->base.dev,
91 vma->node.start, vma->node.size, reason, buf);
112 return -ENOENT;
126 intel_gt_pm_get_untracked(vma->vm->gt);
141 intel_gt_pm_put_async_untracked(vma->vm->gt);
152 struct i915_vma *pos = ERR_PTR(-E2BIG);
158 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
162 return ERR_PTR(-ENOMEM);
164 vma->ops = &vm->vma_ops;
165 vma->obj = obj;
166 vma->size = obj->base.size;
167 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
169 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0);
174 might_lock(&vma->active.mutex);
178 INIT_LIST_HEAD(&vma->closed_link);
179 INIT_LIST_HEAD(&vma->obj_link);
180 RB_CLEAR_NODE(&vma->obj_node);
182 if (view && view->type != I915_GTT_VIEW_NORMAL) {
183 vma->gtt_view = *view;
184 if (view->type == I915_GTT_VIEW_PARTIAL) {
186 view->partial.offset,
187 view->partial.size,
188 obj->base.size >> PAGE_SHIFT));
189 vma->size = view->partial.size;
190 vma->size <<= PAGE_SHIFT;
191 GEM_BUG_ON(vma->size > obj->base.size);
192 } else if (view->type == I915_GTT_VIEW_ROTATED) {
193 vma->size = intel_rotation_info_size(&view->rotated);
194 vma->size <<= PAGE_SHIFT;
195 } else if (view->type == I915_GTT_VIEW_REMAPPED) {
196 vma->size = intel_remapped_info_size(&view->remapped);
197 vma->size <<= PAGE_SHIFT;
201 if (unlikely(vma->size > vm->total))
204 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
206 err = mutex_lock_interruptible(&vm->mutex);
212 vma->vm = vm;
213 list_add_tail(&vma->vm_link, &vm->unbound_list);
215 spin_lock(&obj->vma.lock);
217 if (unlikely(overflows_type(vma->size, u32)))
220 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
223 if (unlikely(vma->fence_size < vma->size || /* overflow */
224 vma->fence_size > vm->total))
227 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
229 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
232 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
238 p = &obj->vma.tree.rb_node;
252 p = &rb->rb_right;
254 p = &rb->rb_left;
258 rb_link_node(&vma->obj_node, rb, p);
259 rb_insert_color(&vma->obj_node, &obj->vma.tree);
263 * We put the GGTT vma at the start of the vma-list, followed
268 list_add(&vma->obj_link, &obj->vma.list);
270 list_add_tail(&vma->obj_link, &obj->vma.list);
272 spin_unlock(&obj->vma.lock);
273 mutex_unlock(&vm->mutex);
278 spin_unlock(&obj->vma.lock);
279 list_del_init(&vma->vm_link);
280 mutex_unlock(&vm->mutex);
293 rb = obj->vma.tree.rb_node;
303 rb = rb->rb_right;
305 rb = rb->rb_left;
312 * i915_vma_instance - return the singleton instance of the VMA
332 GEM_BUG_ON(!kref_read(&vm->ref));
334 spin_lock(&obj->vma.lock);
336 spin_unlock(&obj->vma.lock);
360 struct i915_vma_resource *vma_res = vw->vma_res;
368 if (i915_gem_object_has_unknown_state(vw->obj))
371 vma_res->ops->bind_vma(vma_res->vm, &vw->stash,
372 vma_res, vw->pat_index, vw->flags);
379 if (vw->obj)
380 i915_gem_object_put(vw->obj);
382 i915_vm_free_pt_stash(vw->vm, &vw->stash);
383 if (vw->vma_res)
384 i915_vma_resource_put(vw->vma_res);
401 dma_fence_work_init(&vw->base, &bind_ops);
402 vw->base.dma.error = -EAGAIN; /* disable the worker by default */
411 if (rcu_access_pointer(vma->active.excl.fence)) {
415 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
429 struct dma_fence *fence = i915_active_fence_get(&vma->active.excl);
436 err = fence->error;
438 err = -EBUSY;
452 struct drm_i915_gem_object *obj = vma->obj;
454 i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
455 obj->mm.rsgt, i915_gem_object_is_readonly(obj),
456 i915_gem_object_is_lmem(obj), obj->mm.region,
457 vma->ops, vma->private, __i915_vma_offset(vma),
458 __i915_vma_size(vma), vma->size, vma->guard);
462 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
470 * DMA addresses are taken from the scatter-gather table of this object (or of
471 * this VMA in case of non-default GGTT views) and PTE entries set up.
484 lockdep_assert_held(&vma->vm->mutex);
485 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
486 GEM_BUG_ON(vma->size > i915_vma_size(vma));
488 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
489 vma->node.size,
490 vma->vm->total))) {
492 return -ENODEV;
497 return -EINVAL;
503 vma_flags = atomic_read(&vma->flags);
512 GEM_BUG_ON(!atomic_read(&vma->pages_count));
515 if (work && bind_flags & vma->vm->bind_async_flags)
516 ret = i915_vma_resource_bind_dep_await(vma->vm,
517 &work->base.chain,
518 vma->node.start,
519 vma->node.size,
525 ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start,
526 vma->node.size, true);
532 if (vma->resource || !vma_res) {
538 vma->resource = vma_res;
541 if (work && bind_flags & vma->vm->bind_async_flags) {
544 work->vma_res = i915_vma_resource_get(vma->resource);
545 work->pat_index = pat_index;
546 work->flags = bind_flags;
554 * part of the obj->resv->excl_fence as it only affects
557 prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
559 __i915_sw_fence_await_dma_fence(&work->base.chain,
561 &work->cb);
565 work->base.dma.error = 0; /* enable the queue_work() */
566 work->obj = i915_gem_object_get(vma->obj);
568 ret = i915_gem_object_wait_moving_fence(vma->obj, true);
570 i915_vma_resource_free(vma->resource);
571 vma->resource = NULL;
575 vma->ops->bind_vma(vma->vm, NULL, vma->resource, pat_index,
579 atomic_or(bind_flags, &vma->flags);
588 if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
589 return IOMEM_ERR_PTR(-EINVAL);
595 ptr = READ_ONCE(vma->iomap);
599 * instead, which already supports mapping non-contiguous chunks
603 if (i915_gem_object_is_lmem(vma->obj)) {
604 ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
605 vma->obj->base.size);
607 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
612 i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
621 err = -ENOMEM;
625 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
627 __i915_gem_object_release_map(vma->obj);
630 ptr = vma->iomap;
654 intel_gt_flush_ggtt_writes(vma->vm->gt);
659 GEM_BUG_ON(vma->iomap == NULL);
678 obj = vma->obj;
692 if (!drm_mm_node_allocated(&vma->node))
717 vma->guard < (flags & PIN_OFFSET_MASK))
728 GEM_BUG_ON(!vma->fence_size);
730 fenceable = (i915_vma_size(vma) >= vma->fence_size &&
731 IS_ALIGNED(i915_vma_offset(vma), vma->fence_alignment));
733 mappable = i915_ggtt_offset(vma) + vma->fence_size <=
734 i915_vm_to_ggtt(vma->vm)->mappable_end;
744 struct drm_mm_node *node = &vma->node;
754 if (!i915_vm_has_cache_coloring(vma->vm))
759 GEM_BUG_ON(list_empty(&node->node_list));
775 * i915_vma_insert - finds a slot for the vma in its address space
798 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
801 size = max(size, vma->size);
802 alignment = max_t(typeof(alignment), alignment, vma->display_alignment);
804 size = max_t(typeof(size), size, vma->fence_size);
806 alignment, vma->fence_alignment);
813 guard = vma->guard; /* retain guard across rebinds */
828 end = vma->vm->total;
830 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
832 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
835 alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj));
842 if (size > end - 2 * guard) {
843 drm_dbg(vma->obj->base.dev,
846 return -ENOSPC;
851 if (i915_vm_has_cache_coloring(vma->vm))
852 color = vma->obj->pat_index;
858 return -EINVAL;
863 * of the vma->node due to the guard pages.
865 if (offset < guard || offset + size > end - guard)
866 return -ENOSPC;
868 ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
870 offset - guard,
884 if (upper_32_bits(end - 1) &&
885 vma->page_sizes.sg > I915_GTT_PAGE_SIZE &&
886 !HAS_64K_PAGES(vma->vm->i915)) {
888 * We can't mix 64K and 4K PTEs in the same page-table
894 rounddown_pow_of_two(vma->page_sizes.sg |
900 * also checks that we exclude the aliasing-ppgtt.
906 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
910 ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node,
916 GEM_BUG_ON(vma->node.start < start);
917 GEM_BUG_ON(vma->node.start + vma->node.size > end);
919 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
922 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
923 vma->guard = guard;
931 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
936 * vma, we can drop its hold on the backing storage and allow
939 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
946 bound = atomic_read(&vma->flags);
964 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
975 unsigned int column, row;
981 src_idx = src_stride * (height - 1) + column + offset;
982 for (row = 0; row < height; row++) {
983 st->nents++;
994 src_idx -= src_stride;
997 left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
1002 st->nents++;
1023 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1026 int ret = -ENOMEM;
1038 st->nents = 0;
1039 sg = st->sgl;
1041 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1042 sg = rotate_pages(obj, rot_info->plane[i].offset,
1043 rot_info->plane[i].width, rot_info->plane[i].height,
1044 rot_info->plane[i].src_stride,
1045 rot_info->plane[i].dst_stride,
1054 drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
1055 obj->base.size, rot_info->plane[0].width,
1056 rot_info->plane[0].height, size);
1065 st->nents++;
1088 unsigned int row;
1096 for (row = 0; row < height; row++) {
1113 st->nents++;
1121 left -= length;
1124 offset += src_stride - width;
1126 left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
1154 len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
1161 st->nents++;
1162 count -= len >> PAGE_SHIFT;
1202 if (rem_info->plane_alignment)
1203 alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset;
1205 if (rem_info->plane[color_plane].linear)
1207 rem_info->plane[color_plane].offset,
1209 rem_info->plane[color_plane].size,
1215 rem_info->plane[color_plane].offset,
1217 rem_info->plane[color_plane].width,
1218 rem_info->plane[color_plane].height,
1219 rem_info->plane[color_plane].src_stride,
1220 rem_info->plane[color_plane].dst_stride,
1232 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1236 int ret = -ENOMEM;
1248 st->nents = 0;
1249 sg = st->sgl;
1251 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
1262 drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
1263 obj->base.size, rem_info->plane[0].width,
1264 rem_info->plane[0].height, size);
1275 unsigned int count = view->partial.size;
1276 int ret = -ENOMEM;
1286 st->nents = 0;
1288 sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl);
1307 * The vma->pages are only valid within the lifespan of the borrowed
1308 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
1309 * must be the vma->pages. A simple rule is that vma->pages must only
1310 * be accessed when the obj->mm.pages are pinned.
1312 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
1314 switch (vma->gtt_view.type) {
1316 GEM_BUG_ON(vma->gtt_view.type);
1319 pages = vma->obj->mm.pages;
1324 intel_rotate_pages(&vma->gtt_view.rotated, vma->obj);
1329 intel_remap_pages(&vma->gtt_view.remapped, vma->obj);
1333 pages = intel_partial_pages(&vma->gtt_view, vma->obj);
1338 drm_err(&vma->vm->i915->drm,
1340 vma->gtt_view.type, PTR_ERR(pages));
1344 vma->pages = pages;
1353 if (atomic_add_unless(&vma->pages_count, 1, 0))
1356 err = i915_gem_object_pin_pages(vma->obj);
1364 vma->page_sizes = vma->obj->mm.page_sizes;
1365 atomic_inc(&vma->pages_count);
1370 __i915_gem_object_unpin_pages(vma->obj);
1391 for_each_gt(gt, vm->i915, id)
1399 GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
1401 if (atomic_sub_return(count, &vma->pages_count) == 0) {
1402 if (vma->pages != vma->obj->mm.pages) {
1403 sg_free_table(vma->pages);
1404 kfree(vma->pages);
1406 vma->pages = NULL;
1408 i915_gem_object_unpin_pages(vma->obj);
1414 if (atomic_add_unless(&vma->pages_count, -1, 1))
1424 lockdep_assert_held(&vma->vm->mutex);
1427 count = atomic_read(&vma->pages_count);
1461 * In case of a global GTT, we must hold a runtime-pm wakeref
1462 * while global PTEs are updated. In other cases, we hold
1465 * vm->mutex, get the first rpm wakeref outside of the mutex.
1467 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
1469 if (flags & vma->vm->bind_async_flags) {
1471 err = i915_vm_lock_objects(vma->vm, ww);
1477 err = -ENOMEM;
1481 work->vm = vma->vm;
1483 err = i915_gem_object_get_moving_fence(vma->obj, &moving);
1487 dma_fence_work_chain(&work->base, moving);
1490 if (vma->vm->allocate_va_range) {
1491 err = i915_vm_alloc_pt_stash(vma->vm,
1492 &work->stash,
1493 vma->size);
1497 err = i915_vm_map_pt_stash(vma->vm, &work->stash);
1510 * Differentiate between user/kernel vma inside the aliasing-ppgtt.
1513 * aliasing-ppgtt, but it is still vitally important to try and
1516 * inversions when we have to evict them the mmu_notifier callbacks -
1522 * NB this may cause us to mask real lock inversions -- while the
1526 err = mutex_lock_interruptible_nested(&vma->vm->mutex,
1531 /* No more allocations allowed now we hold vm->mutex */
1534 err = -ENOENT;
1538 bound = atomic_read(&vma->flags);
1540 err = -ENOMEM;
1545 err = -EAGAIN; /* pins are meant to be fairly temporary */
1555 err = i915_active_acquire(&vma->active);
1564 if (i915_is_ggtt(vma->vm))
1568 GEM_BUG_ON(!vma->pages);
1570 vma->obj->pat_index,
1578 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
1579 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1591 drm_mm_remove_node(&vma->node);
1594 i915_active_release(&vma->active);
1596 mutex_unlock(&vma->vm->mutex);
1609 intel_vm_no_concurrent_access_wa(vma->vm->i915))
1610 dma_fence_work_commit(&work->base);
1612 dma_fence_work_commit_imm(&work->base);
1615 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
1631 err = i915_gem_object_lock(vma->obj, &ww);
1634 if (err == -EDEADLK) {
1658 struct i915_address_space *vm = vma->vm;
1666 if (err != -ENOSPC) {
1676 list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
1678 if (mutex_lock_interruptible(&vm->mutex) == 0) {
1685 mutex_unlock(&vm->mutex);
1701 lockdep_assert_not_held(&vma->obj->base.resv->lock.base);
1704 err = i915_gem_object_lock(vma->obj, &_ww);
1713 * i915_ggtt_clear_scanout - Clear scanout flag for all objects ggtt vmas
1724 spin_lock(&obj->vma.lock);
1727 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
1729 spin_unlock(&obj->vma.lock);
1747 list_add(&vma->closed_link, >->closed_vma);
1752 struct intel_gt *gt = vma->vm->gt;
1758 GEM_BUG_ON(!atomic_read(&vma->open_count));
1759 if (atomic_dec_and_lock_irqsave(&vma->open_count,
1760 >->closed_lock,
1763 spin_unlock_irqrestore(>->closed_lock, flags);
1769 list_del_init(&vma->closed_link);
1774 struct intel_gt *gt = vma->vm->gt;
1776 spin_lock_irq(>->closed_lock);
1779 spin_unlock_irq(>->closed_lock);
1784 if (!drm_mm_node_allocated(&vma->node))
1787 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1789 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1795 struct drm_i915_gem_object *obj = vma->obj;
1799 spin_lock(&obj->vma.lock);
1800 list_del(&vma->obj_link);
1801 if (!RB_EMPTY_NODE(&vma->obj_node))
1802 rb_erase(&vma->obj_node, &obj->vma.tree);
1804 spin_unlock(&obj->vma.lock);
1806 spin_lock_irq(>->closed_lock);
1808 spin_unlock_irq(>->closed_lock);
1811 i915_vm_resv_put(vma->vm);
1813 i915_active_fini(&vma->active);
1814 GEM_WARN_ON(vma->resource);
1819 * i915_vma_destroy_locked - Remove all weak reference to the vma and put
1827 * - __i915_gem_object_pages_fini()
1828 * - __i915_vm_close() - Blocks the above function by taking a reference on
1830 * - __i915_vma_parked() - Blocks the above functions by taking a reference
1840 * - vm->mutex
1841 * - obj->vma.lock
1842 * - gt->closed_lock
1846 lockdep_assert_held(&vma->vm->mutex);
1849 list_del_init(&vma->vm_link);
1850 release_references(vma, vma->vm->gt, false);
1858 mutex_lock(&vma->vm->mutex);
1860 list_del_init(&vma->vm_link);
1861 vm_ddestroy = vma->vm_ddestroy;
1862 vma->vm_ddestroy = false;
1864 /* vma->vm may be freed when releasing vma->vm->mutex. */
1865 gt = vma->vm->gt;
1866 mutex_unlock(&vma->vm->mutex);
1875 spin_lock_irq(>->closed_lock);
1876 list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) {
1877 struct drm_i915_gem_object *obj = vma->obj;
1878 struct i915_address_space *vm = vma->vm;
1882 if (!kref_get_unless_zero(&obj->base.refcount))
1890 list_move(&vma->closed_link, &closed);
1892 spin_unlock_irq(>->closed_lock);
1896 struct drm_i915_gem_object *obj = vma->obj;
1897 struct i915_address_space *vm = vma->vm;
1900 INIT_LIST_HEAD(&vma->closed_link);
1905 spin_lock_irq(>->closed_lock);
1906 list_add(&vma->closed_link, >->closed_vma);
1907 spin_unlock_irq(>->closed_lock);
1919 if (vma->iomap == NULL)
1922 if (page_unmask_bits(vma->iomap))
1923 __i915_gem_object_release_map(vma->obj);
1925 io_mapping_unmap(vma->iomap);
1926 vma->iomap = NULL;
1938 GEM_BUG_ON(!vma->obj->userfault_count);
1940 node = &vma->mmo->vma_node;
1941 vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
1942 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1944 vma->size,
1948 if (!--vma->obj->userfault_count)
1949 list_del(&vma->obj->userfault_link);
1955 return __i915_request_await_exclusive(rq, &vma->active);
1967 return i915_active_add_request(&vma->active, rq);
1975 struct drm_i915_gem_object *obj = vma->obj;
1980 GEM_BUG_ON(!vma->pages);
1983 err = i915_request_await_object(rq, vma->obj, flags & EXEC_OBJECT_WRITE);
2001 err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
2011 if (intel_frontbuffer_invalidate(&front->base, ORIGIN_CS))
2012 i915_active_add_request(&front->write, rq);
2024 obj->write_domain = I915_GEM_DOMAIN_RENDER;
2025 obj->read_domains = 0;
2028 obj->write_domain = 0;
2032 dma_resv_add_fence(vma->obj->base.resv, curr, usage);
2035 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
2036 i915_active_add_request(&vma->fence->active, rq);
2038 obj->read_domains |= I915_GEM_GPU_DOMAINS;
2039 obj->mm.dirty = true;
2047 struct i915_vma_resource *vma_res = vma->resource;
2059 * before the unbind, other due to non-strict nature of those
2064 * bit from set-domain, as we mark all GGTT vma associated
2066 * are currently unbinding this one -- so if this vma will be
2080 GEM_BUG_ON(vma->fence);
2084 GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt);
2087 vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
2088 kref_read(&vma->vm->ref);
2089 vma_res->skip_pte_rewrite = !kref_read(&vma->vm->ref) ||
2090 vma->vm->skip_pte_rewrite;
2095 vma->obj->mm.tlb);
2099 vma->resource = NULL;
2102 &vma->flags);
2112 vma_invalidate_tlb(vma->vm, vma->obj->mm.tlb);
2129 lockdep_assert_held(&vma->vm->mutex);
2132 if (!drm_mm_node_allocated(&vma->node))
2137 return -EAGAIN;
2143 * a residual pin skipping the vm->mutex) to complete.
2152 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
2160 lockdep_assert_held(&vma->vm->mutex);
2162 if (!drm_mm_node_allocated(&vma->node))
2166 &vma->obj->mm.rsgt->table != vma->resource->bi.pages)
2167 return ERR_PTR(-EAGAIN);
2178 if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active,
2181 return ERR_PTR(-EBUSY);
2186 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
2193 struct i915_address_space *vm = vma->vm;
2197 assert_object_held_shared(vma->obj);
2204 if (!drm_mm_node_allocated(&vma->node))
2209 return -EAGAIN;
2214 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
2216 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
2221 mutex_unlock(&vm->mutex);
2225 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
2231 struct drm_i915_gem_object *obj = vma->obj;
2232 struct i915_address_space *vm = vma->vm;
2238 * We need the dma-resv lock since we add the
2239 * unbind fence to the dma-resv object.
2243 if (!drm_mm_node_allocated(&vma->node))
2248 return -EAGAIN;
2251 if (!obj->mm.rsgt)
2252 return -EBUSY;
2254 err = dma_resv_reserve_fences(obj->base.resv, 2);
2256 return -EBUSY;
2261 * kmalloc and it's in the dma-fence signalling critical path.
2264 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
2266 if (trylock_vm && !mutex_trylock(&vm->mutex)) {
2267 err = -EBUSY;
2270 err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref);
2276 mutex_unlock(&vm->mutex);
2282 dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_READ);
2287 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
2295 i915_gem_object_lock(vma->obj, NULL);
2297 i915_gem_object_unlock(vma->obj);
2304 i915_gem_object_make_unshrinkable(vma->obj);
2310 i915_gem_object_make_shrinkable(vma->obj);
2315 i915_gem_object_make_purgeable(vma->obj);
2331 return -ENOMEM;