Lines Matching defs:obj
19 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
21 struct drm_i915_private *i915 = to_i915(obj->base.dev);
33 return !(i915_gem_object_has_cache_level(obj, I915_CACHE_NONE) ||
34 i915_gem_object_has_cache_level(obj, I915_CACHE_WT));
37 bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
39 struct drm_i915_private *i915 = to_i915(obj->base.dev);
41 if (obj->cache_dirty)
47 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
51 return i915_gem_object_is_framebuffer(obj);
55 flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
59 assert_object_held(obj);
61 if (!(obj->write_domain & flush_domains))
64 switch (obj->write_domain) {
66 spin_lock(&obj->vma.lock);
67 for_each_ggtt_vma(vma, obj)
69 spin_unlock(&obj->vma.lock);
71 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
79 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
83 if (gpu_write_needs_clflush(obj))
84 obj->cache_dirty = true;
88 obj->write_domain = 0;
91 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
97 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
98 if (obj->cache_dirty)
99 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
100 obj->write_domain = 0;
103 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
105 if (!i915_gem_object_is_framebuffer(obj))
108 i915_gem_object_lock(obj, NULL);
109 __i915_gem_object_flush_for_display(obj);
110 i915_gem_object_unlock(obj);
113 void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj)
115 if (i915_gem_object_is_framebuffer(obj))
116 __i915_gem_object_flush_for_display(obj);
122 * @obj: object to act on
129 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
133 assert_object_held(obj);
135 ret = i915_gem_object_wait(obj,
142 if (obj->write_domain == I915_GEM_DOMAIN_WC)
145 /* Flush and acquire obj->pages so that we are coherent through
148 * For example, if the obj->filp was moved to swap without us
150 * continue to assume that the obj remained out of the CPU cached
153 ret = i915_gem_object_pin_pages(obj);
157 flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
163 if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
169 GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
170 obj->read_domains |= I915_GEM_DOMAIN_WC;
172 obj->read_domains = I915_GEM_DOMAIN_WC;
173 obj->write_domain = I915_GEM_DOMAIN_WC;
174 obj->mm.dirty = true;
177 i915_gem_object_unpin_pages(obj);
184 * @obj: object to act on
191 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
195 assert_object_held(obj);
197 ret = i915_gem_object_wait(obj,
204 if (obj->write_domain == I915_GEM_DOMAIN_GTT)
207 /* Flush and acquire obj->pages so that we are coherent through
210 * For example, if the obj->filp was moved to swap without us
212 * continue to assume that the obj remained out of the CPU cached
215 ret = i915_gem_object_pin_pages(obj);
219 flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
225 if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
231 GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
232 obj->read_domains |= I915_GEM_DOMAIN_GTT;
236 obj->read_domains = I915_GEM_DOMAIN_GTT;
237 obj->write_domain = I915_GEM_DOMAIN_GTT;
238 obj->mm.dirty = true;
240 spin_lock(&obj->vma.lock);
241 for_each_ggtt_vma(vma, obj)
244 spin_unlock(&obj->vma.lock);
247 i915_gem_object_unpin_pages(obj);
253 * @obj: object to act on
266 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
277 if (i915_gem_object_has_cache_level(obj, cache_level))
280 ret = i915_gem_object_wait(obj,
288 i915_gem_object_set_cache_coherency(obj, cache_level);
289 obj->cache_dirty = true;
292 return i915_gem_object_unbind(obj,
301 struct drm_i915_gem_object *obj;
308 obj = i915_gem_object_lookup_rcu(file, args->handle);
309 if (!obj) {
318 if (obj->pat_set_by_user) {
323 if (i915_gem_object_has_cache_level(obj, I915_CACHE_LLC) ||
324 i915_gem_object_has_cache_level(obj, I915_CACHE_L3_LLC))
326 else if (i915_gem_object_has_cache_level(obj, I915_CACHE_WT))
340 struct drm_i915_gem_object *obj;
373 obj = i915_gem_object_lookup(file, args->handle);
374 if (!obj)
381 if (obj->pat_set_by_user) {
390 if (i915_gem_object_is_proxy(obj)) {
395 if (!i915_gem_object_is_userptr(obj) ||
402 ret = i915_gem_object_lock_interruptible(obj, NULL);
406 ret = i915_gem_object_set_cache_level(obj, level);
407 i915_gem_object_unlock(obj);
410 i915_gem_object_put(obj);
421 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
427 struct drm_i915_private *i915 = to_i915(obj->base.dev);
432 if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj))
445 ret = i915_gem_object_set_cache_level(obj,
466 vma = i915_gem_object_ggtt_pin_ww(obj, ww, view, 0, alignment,
470 vma = i915_gem_object_ggtt_pin_ww(obj, ww, view, 0,
478 i915_gem_object_flush_if_display_locked(obj);
486 * @obj: object to act on
493 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
497 assert_object_held(obj);
499 ret = i915_gem_object_wait(obj,
506 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
509 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
510 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
511 obj->read_domains |= I915_GEM_DOMAIN_CPU;
517 GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
523 __start_cpu_write(obj);
541 struct drm_i915_gem_object *obj;
563 obj = i915_gem_object_lookup(file, args->handle);
564 if (!obj)
572 err = i915_gem_object_wait(obj,
580 if (i915_gem_object_is_userptr(obj)) {
585 err = i915_gem_object_userptr_validate(obj);
587 err = i915_gem_object_wait(obj,
601 if (i915_gem_object_is_proxy(obj)) {
606 err = i915_gem_object_lock_interruptible(obj, NULL);
611 * Flush and acquire obj->pages so that we are coherent through
614 * For example, if the obj->filp was moved to swap without us
616 * continue to assume that the obj remained out of the CPU cached
619 err = i915_gem_object_pin_pages(obj);
627 * no-ops. If obj->write_domain is set, we must be in the same
628 * obj->read_domains, and only that domain. Therefore, if that
629 * obj->write_domain matches the request read_domains, we are
633 if (READ_ONCE(obj->write_domain) == read_domains)
637 err = i915_gem_object_set_to_wc_domain(obj, write_domain);
639 err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
641 err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
644 i915_gem_object_unpin_pages(obj);
647 i915_gem_object_unlock(obj);
650 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
653 i915_gem_object_put(obj);
662 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
668 if (!i915_gem_object_has_struct_page(obj))
671 assert_object_held(obj);
673 ret = i915_gem_object_wait(obj,
679 ret = i915_gem_object_pin_pages(obj);
683 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
685 ret = i915_gem_object_set_to_cpu_domain(obj, false);
692 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
699 if (!obj->cache_dirty &&
700 !(obj->read_domains & I915_GEM_DOMAIN_CPU))
708 i915_gem_object_unpin_pages(obj);
712 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
718 if (!i915_gem_object_has_struct_page(obj))
721 assert_object_held(obj);
723 ret = i915_gem_object_wait(obj,
730 ret = i915_gem_object_pin_pages(obj);
734 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
736 ret = i915_gem_object_set_to_cpu_domain(obj, true);
743 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
750 if (!obj->cache_dirty) {
757 if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
762 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
763 obj->mm.dirty = true;
768 i915_gem_object_unpin_pages(obj);