Lines Matching defs:obj

112 i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj)
119 if (obj->mm.n_placements <= 1)
157 i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
161 unsigned int num_allowed = obj->mm.n_placements;
162 unsigned int flags = obj->flags;
165 i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
166 obj->mm.region, &places[0], obj->bo_offset,
167 obj->base.size, flags);
171 i915_ttm_place_from_region(obj->mm.placements[i],
172 &places[i + 1], obj->bo_offset,
173 obj->base.size, flags);
270 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
283 if (obj->flags & I915_BO_ALLOC_CPU_CLEAR && (!bo->resource ||
287 caching = i915_ttm_select_tt_caching(obj);
288 if (i915_gem_object_is_shrinkable(obj) && caching == ttm_cached) {
294 if (i915_gem_object_needs_ccs_pages(obj))
306 i915_tt->dev = obj->base.dev->dev;
357 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
371 if (!i915_gem_object_evictable(obj))
385 * @obj: The GEM object
390 void i915_ttm_free_cached_io_rsgt(struct drm_i915_gem_object *obj)
395 if (!obj->ttm.cached_io_rsgt)
399 radix_tree_for_each_slot(slot, &obj->ttm.get_io_page.radix, &iter, 0)
400 radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index);
403 i915_refct_sgt_put(obj->ttm.cached_io_rsgt);
404 obj->ttm.cached_io_rsgt = NULL;
409 * @obj: The object
416 int i915_ttm_purge(struct drm_i915_gem_object *obj)
418 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
428 if (obj->mm.madv == __I915_MADV_PURGED)
447 obj->write_domain = 0;
448 obj->read_domains = 0;
449 i915_ttm_adjust_gem_after_move(obj);
450 i915_ttm_free_cached_io_rsgt(obj);
451 obj->mm.madv = __I915_MADV_PURGED;
456 static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
458 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
480 switch (obj->mm.madv) {
482 return i915_ttm_purge(obj);
498 __shmem_writeback(obj->base.size, i915_tt->filp->f_mapping);
505 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
514 __i915_gem_object_pages_fini(obj);
515 i915_ttm_free_cached_io_rsgt(obj);
550 * @obj: The GEM object used for sg-table caching
562 i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
565 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
573 page_alignment = obj->mm.region->min_page_size;
581 if (!obj->ttm.cached_io_rsgt) {
584 rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region,
590 obj->ttm.cached_io_rsgt = rsgt;
592 return i915_refct_sgt_get(obj->ttm.cached_io_rsgt);
595 return intel_region_ttm_resource_to_rsgt(obj->mm.region, res,
599 static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
601 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
604 WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED);
617 return i915_ttm_purge(obj);
622 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
630 GEM_WARN_ON(obj->ttm.cached_io_rsgt);
631 if (!ret && obj->mm.madv != I915_MADV_WILLNEED)
632 i915_ttm_purge(obj);
655 struct drm_i915_gem_object *obj = i915_ttm_to_gem(mem->bo);
661 if (!kref_get_unless_zero(&obj->base.refcount))
664 assert_object_held(obj);
666 unknown_state = i915_gem_object_has_unknown_state(obj);
667 i915_gem_object_put(obj);
686 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
694 base = obj->mm.region->iomap.base - obj->mm.region->region.start;
695 sg = i915_gem_object_page_iter_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs);
704 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
705 resource_size_t iomap = obj->mm.region->iomap.base -
706 obj->mm.region->region.start;
724 daddr = i915_gem_object_get_dma_address(obj, page);
773 static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
776 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
815 i915_ttm_adjust_domains_after_move(obj);
816 i915_ttm_adjust_gem_after_move(obj);
819 if (!i915_gem_object_has_pages(obj)) {
821 i915_ttm_resource_get_st(obj, bo->resource);
826 GEM_BUG_ON(obj->mm.rsgt);
827 obj->mm.rsgt = rsgt;
828 __i915_gem_object_set_pages(obj, &rsgt->table);
831 GEM_BUG_ON(bo->ttm && ((obj->base.size >> PAGE_SHIFT) < bo->ttm->num_pages));
832 i915_ttm_adjust_lru(obj);
836 static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
842 if (overflows_type(obj->base.size >> PAGE_SHIFT, unsigned int))
845 GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS);
848 i915_ttm_placement_from_obj(obj, places, &placement);
850 return __i915_ttm_get_pages(obj, &placement);
867 static int __i915_ttm_migrate(struct drm_i915_gem_object *obj,
875 i915_ttm_place_from_region(mr, &requested, obj->bo_offset,
876 obj->base.size, flags);
880 ret = __i915_ttm_get_pages(obj, &placement);
889 if (obj->mm.region != mr) {
890 i915_gem_object_release_memory_region(obj);
891 i915_gem_object_init_memory_region(obj, mr);
897 static int i915_ttm_migrate(struct drm_i915_gem_object *obj,
901 return __i915_ttm_migrate(obj, mr, flags);
904 static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
915 if (obj->mm.rsgt)
916 i915_refct_sgt_put(fetch_and_zero(&obj->mm.rsgt));
921 * @obj: The object
923 void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
925 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
962 if (kref_get_unless_zero(&obj->base.refcount)) {
963 if (shrinkable != obj->mm.ttm_shrinkable) {
965 if (obj->mm.madv == I915_MADV_WILLNEED)
966 __i915_gem_object_make_shrinkable(obj);
968 __i915_gem_object_make_purgeable(obj);
970 i915_gem_object_make_unshrinkable(obj);
973 obj->mm.ttm_shrinkable = shrinkable;
975 i915_gem_object_put(obj);
985 } else if (obj->mm.madv != I915_MADV_WILLNEED) {
987 } else if (!i915_gem_object_has_pages(obj)) {
1003 !(obj->flags & I915_BO_ALLOC_GPU_ONLY))
1028 static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
1030 GEM_BUG_ON(!obj->ttm.created);
1032 ttm_bo_put(i915_gem_to_ttm(obj));
1040 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
1046 if (unlikely(i915_gem_object_is_readonly(obj) &&
1054 if (obj->mm.madv != I915_MADV_WILLNEED) {
1083 for (i = 0; i < obj->mm.n_placements; i++) {
1084 struct intel_memory_region *mr = obj->mm.placements[i];
1090 flags = obj->flags;
1092 err = __i915_ttm_migrate(obj, mr, flags);
1108 wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
1125 if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
1126 obj->userfault_count = 1;
1127 spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
1128 list_add(&obj->userfault_link, &to_i915(obj->base.dev)->runtime_pm.lmem_userfault_list);
1129 spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
1135 intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref,
1138 i915_ttm_adjust_lru(obj);
1144 intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
1153 struct drm_i915_gem_object *obj =
1156 if (i915_gem_object_is_readonly(obj) && write)
1164 struct drm_i915_gem_object *obj =
1168 i915_gem_object_get(obj);
1173 struct drm_i915_gem_object *obj =
1177 i915_gem_object_put(obj);
1187 static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
1190 GEM_BUG_ON(!drm_mm_node_allocated(&obj->base.vma_node.vm_node));
1192 return drm_vma_node_offset_addr(&obj->base.vma_node);
1195 static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
1197 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
1200 assert_object_held_shared(obj);
1203 wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
1205 /* userfault_count is protected by obj lock and rpm wakeref. */
1206 if (obj->userfault_count) {
1207 spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
1208 list_del(&obj->userfault_link);
1209 spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
1210 obj->userfault_count = 0;
1214 GEM_WARN_ON(obj->userfault_count);
1216 ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
1219 intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
1243 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
1245 i915_gem_object_release_memory_region(obj);
1246 mutex_destroy(&obj->ttm.get_io_page.lock);
1248 if (obj->ttm.created) {
1258 if (obj->mm.ttm_shrinkable)
1259 i915_gem_object_make_unshrinkable(obj);
1261 i915_ttm_backup_free(obj);
1264 __i915_gem_free_object(obj);
1266 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
1268 __i915_gem_object_fini(obj);
1275 * @obj: The gem object.
1282 struct drm_i915_gem_object *obj,
1297 drm_gem_private_object_init(&i915->drm, &obj->base, size);
1298 i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
1300 obj->bo_offset = offset;
1303 obj->mm.region = mem;
1304 INIT_LIST_HEAD(&obj->mm.region_link);
1306 INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
1307 mutex_init(&obj->ttm.get_io_page.lock);
1308 bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
1311 obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
1314 GEM_BUG_ON(page_size && obj->mm.n_placements);
1322 i915_gem_object_make_unshrinkable(obj);
1327 * destructor until obj->ttm.created is true.
1331 ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), bo_type,
1348 obj->ttm.created = true;
1349 i915_gem_object_release_memory_region(obj);
1350 i915_gem_object_init_memory_region(obj, mem);
1351 i915_ttm_adjust_domains_after_move(obj);
1352 i915_ttm_adjust_gem_after_move(obj);
1353 i915_gem_object_unlock(obj);