Lines Matching defs:obj

64 	struct drm_i915_gem_object *obj;
80 obj = i915_gem_object_lookup(file, args->handle);
81 if (!obj)
87 if (!obj->base.filp) {
92 if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
97 addr = vm_mmap(obj->base.filp, 0, args->size,
112 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
121 i915_gem_object_put(obj);
127 i915_gem_object_put(obj);
131 static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
133 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
199 compute_partial_view(const struct drm_i915_gem_object *obj,
205 if (i915_gem_object_is_tiled(obj))
206 chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
212 (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
215 if (chunk >= obj->base.size >> PAGE_SHIFT)
255 struct drm_i915_gem_object *obj = mmo->obj;
261 if (unlikely(i915_gem_object_is_readonly(obj) &&
265 if (i915_gem_object_lock_interruptible(obj, NULL))
268 err = i915_gem_object_pin_pages(obj);
273 if (!i915_gem_object_has_struct_page(obj)) {
274 iomap = obj->mm.region->iomap.base;
275 iomap -= obj->mm.region->region.start;
279 /* PTEs are revoked in obj->ops->put_pages() */
282 obj->mm.pages->sgl, obj_offset, iomap);
285 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
286 obj->mm.dirty = true;
289 i915_gem_object_unpin_pages(obj);
292 i915_gem_object_unlock(obj);
342 struct drm_i915_gem_object *obj = mmo->obj;
343 struct drm_device *dev = obj->base.dev;
362 trace_i915_gem_object_fault(obj, page_offset, true, write);
368 ret = i915_gem_object_lock(obj, &ww);
373 if (i915_gem_object_is_readonly(obj) && write) {
378 ret = i915_gem_object_pin_pages(obj);
387 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
394 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
406 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
410 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
426 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
443 if (!(i915_gem_object_has_cache_level(obj, I915_CACHE_NONE) ||
470 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
471 list_add(&obj->userfault_link, &to_gt(i915)->ggtt->userfault_list);
482 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
484 obj->mm.dirty = true;
494 i915_gem_object_unpin_pages(obj);
511 struct drm_i915_gem_object *obj = mmo->obj;
516 if (i915_gem_object_is_readonly(obj) && write)
520 if (range_overflows_t(u64, addr, len, obj->base.size))
525 err = i915_gem_object_lock(obj, &ww);
530 vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC);
538 __i915_gem_object_flush_map(obj, addr, len);
543 i915_gem_object_unpin_map(obj);
558 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
562 GEM_BUG_ON(!obj->userfault_count);
564 for_each_ggtt_vma(vma, obj)
567 GEM_BUG_ON(obj->userfault_count);
578 void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
580 struct drm_i915_private *i915 = to_i915(obj->base.dev);
595 if (!obj->userfault_count)
598 __i915_gem_object_release_mmap_gtt(obj);
615 void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj)
617 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
626 GEM_BUG_ON(!obj->userfault_count);
627 list_del(&obj->userfault_link);
628 obj->userfault_count = 0;
631 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
635 if (obj->ops->unmap_virtual)
636 obj->ops->unmap_virtual(obj);
638 spin_lock(&obj->mmo.lock);
640 &obj->mmo.offsets, offset) {
648 spin_unlock(&obj->mmo.lock);
650 obj->base.dev->anon_inode->i_mapping);
651 spin_lock(&obj->mmo.lock);
653 spin_unlock(&obj->mmo.lock);
657 lookup_mmo(struct drm_i915_gem_object *obj,
662 spin_lock(&obj->mmo.lock);
663 rb = obj->mmo.offsets.rb_node;
669 spin_unlock(&obj->mmo.lock);
678 spin_unlock(&obj->mmo.lock);
684 insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
688 spin_lock(&obj->mmo.lock);
690 p = &obj->mmo.offsets.rb_node;
698 spin_unlock(&obj->mmo.lock);
699 drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
711 rb_insert_color(&mmo->offset, &obj->mmo.offsets);
712 spin_unlock(&obj->mmo.lock);
718 mmap_offset_attach(struct drm_i915_gem_object *obj,
722 struct drm_i915_private *i915 = to_i915(obj->base.dev);
726 GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops);
728 mmo = lookup_mmo(obj, mmap_type);
736 mmo->obj = obj;
740 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
741 &mmo->vma_node, obj->base.size / PAGE_SIZE);
752 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
753 &mmo->vma_node, obj->base.size / PAGE_SIZE);
758 mmo = insert_mmo(obj, mmo);
759 GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
771 __assign_mmap_offset(struct drm_i915_gem_object *obj,
777 if (i915_gem_object_never_mmap(obj))
780 if (obj->ops->mmap_offset) {
784 *offset = obj->ops->mmap_offset(obj);
792 !i915_gem_object_has_struct_page(obj) &&
793 !i915_gem_object_has_iomem(obj))
796 mmo = mmap_offset_attach(obj, mmap_type, file);
810 struct drm_i915_gem_object *obj;
813 obj = i915_gem_object_lookup(file, handle);
814 if (!obj)
817 err = i915_gem_object_lock_interruptible(obj, NULL);
820 err = __assign_mmap_offset(obj, mmap_type, offset, file);
821 i915_gem_object_unlock(obj);
823 i915_gem_object_put(obj);
923 struct drm_i915_gem_object *obj = mmo->obj;
925 GEM_BUG_ON(!obj);
926 i915_gem_object_get(obj);
932 struct drm_i915_gem_object *obj = mmo->obj;
934 GEM_BUG_ON(!obj);
935 i915_gem_object_put(obj);
989 i915_gem_object_mmap(struct drm_i915_gem_object *obj,
993 struct drm_i915_private *i915 = to_i915(obj->base.dev);
997 if (i915_gem_object_is_readonly(obj)) {
999 i915_gem_object_put(obj);
1007 i915_gem_object_put(obj);
1014 * We keep the ref on mmo->obj, not vm_file, but we require
1025 if (obj->ops->mmap_ops) {
1027 vma->vm_ops = obj->ops->mmap_ops;
1028 vma->vm_private_data = obj->base.vma_node.driver_private;
1077 struct drm_i915_gem_object *obj = NULL;
1096 obj = i915_gem_object_get_rcu(mmo->obj);
1098 GEM_BUG_ON(obj && obj->ops->mmap_ops);
1100 obj = i915_gem_object_get_rcu
1104 GEM_BUG_ON(obj && !obj->ops->mmap_ops);
1109 if (!obj)
1112 return i915_gem_object_mmap(obj, mmo, vma);
1115 int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma)
1117 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1127 if (obj->ops->mmap_ops) {
1132 vma->vm_pgoff += drm_vma_node_start(&obj->base.vma_node);
1136 mmo = mmap_offset_attach(obj, mmap_type, NULL);
1146 * this obj and then it gets decreased by the vm_ops->close().
1147 * To balance this increase the obj ref_count here.
1149 obj = i915_gem_object_get(obj);
1150 return i915_gem_object_mmap(obj, mmo, vma);