Lines Matching defs:obj

59 bool i915_gem_object_has_cache_level(const struct drm_i915_gem_object *obj,
67 if (obj->pat_set_by_user)
74 return obj->pat_index == i915_gem_get_pat_index(obj_to_i915(obj), lvl);
79 struct drm_i915_gem_object *obj;
81 obj = kmem_cache_zalloc(slab_objects, GFP_KERNEL);
82 if (!obj)
84 obj->base.funcs = &i915_gem_object_funcs;
86 return obj;
89 void i915_gem_object_free(struct drm_i915_gem_object *obj)
91 return kmem_cache_free(slab_objects, obj);
94 void i915_gem_object_init(struct drm_i915_gem_object *obj,
102 BUILD_BUG_ON(offsetof(typeof(*obj), base) !=
103 offsetof(typeof(*obj), __do_not_access.base));
105 spin_lock_init(&obj->vma.lock);
106 INIT_LIST_HEAD(&obj->vma.list);
108 INIT_LIST_HEAD(&obj->mm.link);
111 INIT_LIST_HEAD(&obj->client_link);
114 INIT_LIST_HEAD(&obj->lut_list);
115 spin_lock_init(&obj->lut_lock);
117 spin_lock_init(&obj->mmo.lock);
118 obj->mmo.offsets = RB_ROOT;
120 init_rcu_head(&obj->rcu);
122 obj->ops = ops;
124 obj->flags = flags;
126 obj->mm.madv = I915_MADV_WILLNEED;
127 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
128 mutex_init(&obj->mm.get_page.lock);
129 INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN);
130 mutex_init(&obj->mm.get_dma_page.lock);
135 * @obj: The gem object to cleanup
142 void __i915_gem_object_fini(struct drm_i915_gem_object *obj)
144 mutex_destroy(&obj->mm.get_page.lock);
145 mutex_destroy(&obj->mm.get_dma_page.lock);
146 dma_resv_fini(&obj->base._resv);
152 * @obj: #drm_i915_gem_object
155 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
158 struct drm_i915_private *i915 = to_i915(obj->base.dev);
160 obj->pat_index = i915_gem_get_pat_index(i915, cache_level);
163 obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
166 obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
168 obj->cache_coherent = 0;
170 obj->cache_dirty =
171 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) &&
177 * @obj: #drm_i915_gem_object
183 void i915_gem_object_set_pat_index(struct drm_i915_gem_object *obj,
186 struct drm_i915_private *i915 = to_i915(obj->base.dev);
188 if (obj->pat_index == pat_index)
191 obj->pat_index = pat_index;
194 obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
197 obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
199 obj->cache_coherent = 0;
201 obj->cache_dirty =
202 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) &&
206 bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj)
208 struct drm_i915_private *i915 = to_i915(obj->base.dev);
214 if (!(obj->flags & I915_BO_ALLOC_USER))
220 if (obj->pat_set_by_user)
240 struct drm_i915_gem_object *obj = to_intel_bo(gem);
247 spin_lock(&obj->lut_lock);
248 list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
257 if (&ln->obj_link != &obj->lut_list) {
259 if (cond_resched_lock(&obj->lut_lock))
264 spin_unlock(&obj->lut_lock);
266 spin_lock(&obj->mmo.lock);
267 rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
269 spin_unlock(&obj->mmo.lock);
283 GEM_BUG_ON(vma->obj != obj);
291 i915_gem_object_put(obj);
297 struct drm_i915_gem_object *obj =
298 container_of(head, typeof(*obj), rcu);
299 struct drm_i915_private *i915 = to_i915(obj->base.dev);
302 if (obj->mm.n_placements > 1)
303 kfree(obj->mm.placements);
305 i915_gem_object_free(obj);
311 static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
315 if (obj->userfault_count && !IS_DGFX(to_i915(obj->base.dev)))
316 i915_gem_object_release_mmap_gtt(obj);
318 if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
321 i915_gem_object_release_mmap_offset(obj);
324 &obj->mmo.offsets,
326 drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
330 obj->mmo.offsets = RB_ROOT;
336 * @obj: The gem object to clean up
343 void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
345 assert_object_held_shared(obj);
347 if (!list_empty(&obj->vma.list)) {
350 spin_lock(&obj->vma.lock);
351 while ((vma = list_first_entry_or_null(&obj->vma.list,
354 GEM_BUG_ON(vma->obj != obj);
355 spin_unlock(&obj->vma.lock);
359 spin_lock(&obj->vma.lock);
361 spin_unlock(&obj->vma.lock);
364 __i915_gem_object_free_mmaps(obj);
366 atomic_set(&obj->mm.pages_pin_count, 0);
374 if (obj->base.import_attach)
375 i915_gem_object_lock(obj, NULL);
377 __i915_gem_object_put_pages(obj);
379 if (obj->base.import_attach)
380 i915_gem_object_unlock(obj);
382 GEM_BUG_ON(i915_gem_object_has_pages(obj));
385 void __i915_gem_free_object(struct drm_i915_gem_object *obj)
387 trace_i915_gem_object_destroy(obj);
389 GEM_BUG_ON(!list_empty(&obj->lut_list));
391 bitmap_free(obj->bit_17);
393 if (obj->base.import_attach)
394 drm_prime_gem_destroy(&obj->base, NULL);
396 drm_gem_free_mmap_offset(&obj->base);
398 if (obj->ops->release)
399 obj->ops->release(obj);
401 if (obj->shares_resv_from)
402 i915_vm_resv_put(obj->shares_resv_from);
404 __i915_gem_object_fini(obj);
410 struct drm_i915_gem_object *obj, *on;
412 llist_for_each_entry_safe(obj, on, freed, freed) {
414 if (obj->ops->delayed_free) {
415 obj->ops->delayed_free(obj);
419 __i915_gem_object_pages_fini(obj);
420 __i915_gem_free_object(obj);
423 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
446 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
447 struct drm_i915_private *i915 = to_i915(obj->base.dev);
449 GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
451 i915_drm_client_remove_object(obj);
472 if (llist_add(&obj->freed, &i915->mm.free_list))
476 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
481 front = i915_gem_object_get_frontbuffer(obj);
488 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
493 front = i915_gem_object_get_frontbuffer(obj);
501 i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
506 src_ptr = kmap_local_page(i915_gem_object_get_page(obj, idx))
508 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
516 i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
519 dma_addr_t dma = i915_gem_object_get_dma_address(obj, idx);
523 src_map = io_mapping_map_wc(&obj->mm.region->iomap,
524 dma - obj->mm.region->region.start,
534 static bool object_has_mappable_iomem(struct drm_i915_gem_object *obj)
536 GEM_BUG_ON(!i915_gem_object_has_iomem(obj));
538 if (IS_DGFX(to_i915(obj->base.dev)))
539 return i915_ttm_resource_mappable(i915_gem_to_ttm(obj)->resource);
546 * @obj: GEM object to read from
551 * Reads data from @obj at the specified offset. The requested region to read
552 * from can't cross a page boundary. The caller must ensure that @obj pages
553 * are pinned and that @obj is synced wrt. any related writes.
555 * Return: %0 on success or -ENODEV if the type of @obj's backing store is
558 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
561 GEM_BUG_ON(offset >= obj->base.size);
563 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
565 if (i915_gem_object_has_struct_page(obj))
566 i915_gem_object_read_from_page_kmap(obj, offset, dst, size);
567 else if (i915_gem_object_has_iomem(obj) && object_has_mappable_iomem(obj))
568 i915_gem_object_read_from_page_iomap(obj, offset, dst, size);
577 * @obj: The object to check
588 bool i915_gem_object_evictable(struct drm_i915_gem_object *obj)
591 int pin_count = atomic_read(&obj->mm.pages_pin_count);
596 spin_lock(&obj->vma.lock);
597 list_for_each_entry(vma, &obj->vma.list, obj_link) {
599 spin_unlock(&obj->vma.lock);
605 spin_unlock(&obj->vma.lock);
614 * @obj: Pointer to the object.
619 bool i915_gem_object_migratable(struct drm_i915_gem_object *obj)
621 struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
626 return obj->mm.n_placements > 1;
631 * @obj: The object to query.
638 bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
641 if (IS_DGFX(to_i915(obj->base.dev)) &&
642 i915_gem_object_evictable((void __force *)obj))
643 assert_object_held_shared(obj);
645 return obj->mem_flags & I915_BO_FLAG_STRUCT_PAGE;
650 * @obj: The object to query.
657 bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
660 if (IS_DGFX(to_i915(obj->base.dev)) &&
661 i915_gem_object_evictable((void __force *)obj))
662 assert_object_held_shared(obj);
664 return obj->mem_flags & I915_BO_FLAG_IOMEM;
670 * @obj: The object to migrate
684 bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
687 struct drm_i915_private *i915 = to_i915(obj->base.dev);
688 unsigned int num_allowed = obj->mm.n_placements;
693 GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
699 if (!IS_ALIGNED(obj->base.size, mr->min_page_size))
702 if (obj->mm.region == mr)
705 if (!i915_gem_object_evictable(obj))
708 if (!obj->ops->migrate)
711 if (!(obj->flags & I915_BO_ALLOC_USER))
718 if (mr == obj->mm.placements[i])
727 * @obj: The object to migrate.
750 int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
754 return __i915_gem_object_migrate(obj, ww, id, obj->flags);
760 * @obj: The object to migrate.
764 * @flags: The object flags. Normally just obj->flags.
784 int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
789 struct drm_i915_private *i915 = to_i915(obj->base.dev);
793 GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
794 assert_object_held(obj);
799 if (!i915_gem_object_can_migrate(obj, id))
802 if (!obj->ops->migrate) {
803 if (GEM_WARN_ON(obj->mm.region != mr))
808 return obj->ops->migrate(obj, mr, flags);
814 * @obj: Pointer to the object
819 bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
824 if (!obj->mm.n_placements) {
827 return i915_gem_object_has_iomem(obj);
829 return i915_gem_object_has_pages(obj);
837 for (i = 0; i < obj->mm.n_placements; i++) {
838 if (obj->mm.placements[i]->type == type)
850 * @obj: Pointer to the object
854 bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
859 if (!HAS_FLAT_CCS(to_i915(obj->base.dev)))
862 if (obj->flags & I915_BO_ALLOC_CCS_AUX)
865 for (i = 0; i < obj->mm.n_placements; i++) {
867 if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
870 obj->mm.placements[i]->type == INTEL_MEMORY_LOCAL)
880 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
883 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
895 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
897 i915_gem_object_flush_map(obj);
898 i915_gem_object_unpin_map(obj);
930 * @obj: The object whose moving fence to get.
939 int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
942 return dma_resv_get_singleton(obj->base.resv, DMA_RESV_USAGE_KERNEL,
948 * @obj: The object whose moving fence to wait for.
958 int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
963 assert_object_held(obj);
965 ret = dma_resv_wait_timeout(obj->base. resv, DMA_RESV_USAGE_KERNEL,
969 else if (ret > 0 && i915_gem_object_has_unknown_state(obj))
983 bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj)
991 return obj->mm.unknown_state;