Lines Matching defs:obj

205 static int shmem_get_pages(struct drm_i915_gem_object *obj)
207 struct drm_i915_private *i915 = to_i915(obj->base.dev);
208 struct intel_memory_region *mem = obj->mm.region;
209 struct address_space *mapping = obj->base.filp->f_mapping;
219 GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
220 GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
227 ret = shmem_sg_alloc_table(i915, st, obj->base.size, mem, mapping,
232 ret = i915_gem_gtt_prepare_pages(obj, st);
248 obj->base.size >> PAGE_SHIFT);
253 if (i915_gem_object_needs_bit17_swizzle(obj))
254 i915_gem_object_do_bit_17_swizzle(obj, st);
256 if (i915_gem_object_can_bypass_llc(obj))
257 obj->cache_dirty = true;
259 __i915_gem_object_set_pages(obj, st);
284 shmem_truncate(struct drm_i915_gem_object *obj)
292 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
293 obj->mm.madv = __I915_MADV_PURGED;
294 obj->mm.pages = ERR_PTR(-EFAULT);
325 shmem_writeback(struct drm_i915_gem_object *obj)
327 __shmem_writeback(obj->base.size, obj->base.filp->f_mapping);
330 static int shmem_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
332 switch (obj->mm.madv) {
334 return i915_gem_object_truncate(obj);
340 shmem_writeback(obj);
346 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
350 struct drm_i915_private *i915 = to_i915(obj->base.dev);
352 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
354 if (obj->mm.madv == I915_MADV_DONTNEED)
355 obj->mm.dirty = false;
358 (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
359 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
362 __start_cpu_write(obj);
372 obj->cache_dirty = true;
375 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages)
377 __i915_gem_object_release_shmem(obj, pages, true);
379 i915_gem_gtt_finish_pages(obj, pages);
381 if (i915_gem_object_needs_bit17_swizzle(obj))
382 i915_gem_object_save_bit_17_swizzle(obj, pages);
384 shmem_sg_free_table(pages, file_inode(obj->base.filp)->i_mapping,
385 obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED);
387 obj->mm.dirty = false;
391 shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
393 if (likely(i915_gem_object_has_struct_page(obj)))
394 i915_gem_object_put_pages_shmem(obj, pages);
396 i915_gem_object_put_pages_phys(obj, pages);
400 shmem_pwrite(struct drm_i915_gem_object *obj,
404 struct file *file = obj->base.filp;
413 if (!i915_gem_object_has_struct_page(obj))
414 return i915_gem_object_pwrite_phys(obj, arg);
425 if (i915_gem_object_has_pages(obj))
428 if (obj->mm.madv != I915_MADV_WILLNEED)
454 shmem_pread(struct drm_i915_gem_object *obj,
457 if (!i915_gem_object_has_struct_page(obj))
458 return i915_gem_object_pread_phys(obj, arg);
463 static void shmem_release(struct drm_i915_gem_object *obj)
465 if (i915_gem_object_has_struct_page(obj))
466 i915_gem_object_release_memory_region(obj);
468 fput(obj->base.filp);
487 struct drm_gem_object *obj,
493 drm_gem_private_object_init(&i915->drm, obj, size);
517 obj->filp = filp;
522 struct drm_i915_gem_object *obj,
535 ret = __create_shmem(i915, &obj->base, size);
546 mapping = obj->base.filp->f_mapping;
550 i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, flags);
551 obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
552 obj->write_domain = I915_GEM_DOMAIN_CPU;
553 obj->read_domains = I915_GEM_DOMAIN_CPU;
579 i915_gem_object_set_cache_coherency(obj, cache_level);
581 i915_gem_object_init_memory_region(obj, mem);
599 struct drm_i915_gem_object *obj;
605 obj = i915_gem_object_create_shmem(i915, round_up(size, PAGE_SIZE));
606 if (IS_ERR(obj))
607 return obj;
609 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
611 file = obj->base.filp;
622 return obj;
625 i915_gem_object_put(obj);
659 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj)
661 return obj->ops == &i915_gem_shmem_ops;