Lines Matching refs:ce

27 	struct intel_context *ce = container_of(rcu, typeof(*ce), rcu);  in rcu_context_free()  local
29 trace_intel_context_free(ce); in rcu_context_free()
30 if (intel_context_has_own_state(ce)) in rcu_context_free()
31 fput(ce->default_state); in rcu_context_free()
32 kmem_cache_free(slab_ce, ce); in rcu_context_free()
35 void intel_context_free(struct intel_context *ce) in intel_context_free() argument
37 call_rcu(&ce->rcu, rcu_context_free); in intel_context_free()
43 struct intel_context *ce; in intel_context_create() local
45 ce = intel_context_alloc(); in intel_context_create()
46 if (!ce) in intel_context_create()
49 intel_context_init(ce, engine); in intel_context_create()
50 trace_intel_context_create(ce); in intel_context_create()
51 return ce; in intel_context_create()
54 int intel_context_alloc_state(struct intel_context *ce) in intel_context_alloc_state() argument
59 if (mutex_lock_interruptible(&ce->pin_mutex)) in intel_context_alloc_state()
62 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { in intel_context_alloc_state()
63 if (intel_context_is_banned(ce)) { in intel_context_alloc_state()
68 err = ce->ops->alloc(ce); in intel_context_alloc_state()
72 set_bit(CONTEXT_ALLOC_BIT, &ce->flags); in intel_context_alloc_state()
75 ctx = rcu_dereference(ce->gem_context); in intel_context_alloc_state()
82 ce); in intel_context_alloc_state()
88 mutex_unlock(&ce->pin_mutex); in intel_context_alloc_state()
92 static int intel_context_active_acquire(struct intel_context *ce) in intel_context_active_acquire() argument
96 __i915_active_acquire(&ce->active); in intel_context_active_acquire()
98 if (intel_context_is_barrier(ce) || intel_engine_uses_guc(ce->engine) || in intel_context_active_acquire()
99 intel_context_is_parallel(ce)) in intel_context_active_acquire()
103 err = i915_active_acquire_preallocate_barrier(&ce->active, in intel_context_active_acquire()
104 ce->engine); in intel_context_active_acquire()
106 i915_active_release(&ce->active); in intel_context_active_acquire()
111 static void intel_context_active_release(struct intel_context *ce) in intel_context_active_release() argument
114 i915_active_acquire_barrier(&ce->active); in intel_context_active_release()
115 i915_active_release(&ce->active); in intel_context_active_release()
178 static int intel_context_pre_pin(struct intel_context *ce, in intel_context_pre_pin() argument
183 CE_TRACE(ce, "active\n"); in intel_context_pre_pin()
185 err = __ring_active(ce->ring, ww); in intel_context_pre_pin()
189 err = intel_timeline_pin(ce->timeline, ww); in intel_context_pre_pin()
193 if (!ce->state) in intel_context_pre_pin()
196 err = __context_pin_state(ce->state, ww); in intel_context_pre_pin()
204 intel_timeline_unpin(ce->timeline); in intel_context_pre_pin()
206 __ring_retire(ce->ring); in intel_context_pre_pin()
210 static void intel_context_post_unpin(struct intel_context *ce) in intel_context_post_unpin() argument
212 if (ce->state) in intel_context_post_unpin()
213 __context_unpin_state(ce->state); in intel_context_post_unpin()
215 intel_timeline_unpin(ce->timeline); in intel_context_post_unpin()
216 __ring_retire(ce->ring); in intel_context_post_unpin()
219 int __intel_context_do_pin_ww(struct intel_context *ce, in __intel_context_do_pin_ww() argument
226 if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) { in __intel_context_do_pin_ww()
227 err = intel_context_alloc_state(ce); in __intel_context_do_pin_ww()
238 err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww); in __intel_context_do_pin_ww()
240 err = i915_gem_object_lock(ce->ring->vma->obj, ww); in __intel_context_do_pin_ww()
241 if (!err && ce->state) in __intel_context_do_pin_ww()
242 err = i915_gem_object_lock(ce->state->obj, ww); in __intel_context_do_pin_ww()
244 err = intel_context_pre_pin(ce, ww); in __intel_context_do_pin_ww()
248 err = ce->ops->pre_pin(ce, ww, &vaddr); in __intel_context_do_pin_ww()
252 err = i915_active_acquire(&ce->active); in __intel_context_do_pin_ww()
256 err = mutex_lock_interruptible(&ce->pin_mutex); in __intel_context_do_pin_ww()
260 intel_engine_pm_might_get(ce->engine); in __intel_context_do_pin_ww()
262 if (unlikely(intel_context_is_closed(ce))) { in __intel_context_do_pin_ww()
267 if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) { in __intel_context_do_pin_ww()
268 err = intel_context_active_acquire(ce); in __intel_context_do_pin_ww()
272 err = ce->ops->pin(ce, vaddr); in __intel_context_do_pin_ww()
274 intel_context_active_release(ce); in __intel_context_do_pin_ww()
278 CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n", in __intel_context_do_pin_ww()
279 i915_ggtt_offset(ce->ring->vma), in __intel_context_do_pin_ww()
280 ce->ring->head, ce->ring->tail); in __intel_context_do_pin_ww()
284 atomic_inc(&ce->pin_count); in __intel_context_do_pin_ww()
287 GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */ in __intel_context_do_pin_ww()
289 trace_intel_context_do_pin(ce); in __intel_context_do_pin_ww()
292 mutex_unlock(&ce->pin_mutex); in __intel_context_do_pin_ww()
294 i915_active_release(&ce->active); in __intel_context_do_pin_ww()
297 ce->ops->post_unpin(ce); in __intel_context_do_pin_ww()
299 intel_context_post_unpin(ce); in __intel_context_do_pin_ww()
307 i915_gem_ww_unlock_single(ce->timeline->hwsp_ggtt->obj); in __intel_context_do_pin_ww()
312 int __intel_context_do_pin(struct intel_context *ce) in __intel_context_do_pin() argument
319 err = __intel_context_do_pin_ww(ce, &ww); in __intel_context_do_pin()
329 void __intel_context_do_unpin(struct intel_context *ce, int sub) in __intel_context_do_unpin() argument
331 if (!atomic_sub_and_test(sub, &ce->pin_count)) in __intel_context_do_unpin()
334 CE_TRACE(ce, "unpin\n"); in __intel_context_do_unpin()
335 ce->ops->unpin(ce); in __intel_context_do_unpin()
336 ce->ops->post_unpin(ce); in __intel_context_do_unpin()
344 intel_context_get(ce); in __intel_context_do_unpin()
345 intel_context_active_release(ce); in __intel_context_do_unpin()
346 trace_intel_context_do_unpin(ce); in __intel_context_do_unpin()
347 intel_context_put(ce); in __intel_context_do_unpin()
352 struct intel_context *ce = container_of(active, typeof(*ce), active); in __intel_context_retire() local
354 CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n", in __intel_context_retire()
355 intel_context_get_total_runtime_ns(ce), in __intel_context_retire()
356 intel_context_get_avg_runtime_ns(ce)); in __intel_context_retire()
358 set_bit(CONTEXT_VALID_BIT, &ce->flags); in __intel_context_retire()
359 intel_context_post_unpin(ce); in __intel_context_retire()
360 intel_context_put(ce); in __intel_context_retire()
365 struct intel_context *ce = container_of(active, typeof(*ce), active); in __intel_context_active() local
367 intel_context_get(ce); in __intel_context_active()
370 GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active)); in __intel_context_active()
371 __intel_ring_pin(ce->ring); in __intel_context_active()
373 __intel_timeline_pin(ce->timeline); in __intel_context_active()
375 if (ce->state) { in __intel_context_active()
376 GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active)); in __intel_context_active()
377 __i915_vma_pin(ce->state); in __intel_context_active()
378 i915_vma_make_unshrinkable(ce->state); in __intel_context_active()
392 intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) in intel_context_init() argument
397 kref_init(&ce->ref); in intel_context_init()
399 ce->engine = engine; in intel_context_init()
400 ce->ops = engine->cops; in intel_context_init()
401 ce->sseu = engine->sseu; in intel_context_init()
402 ce->ring = NULL; in intel_context_init()
403 ce->ring_size = SZ_4K; in intel_context_init()
405 ewma_runtime_init(&ce->stats.runtime.avg); in intel_context_init()
407 ce->vm = i915_vm_get(engine->gt->vm); in intel_context_init()
410 spin_lock_init(&ce->signal_lock); in intel_context_init()
411 INIT_LIST_HEAD(&ce->signals); in intel_context_init()
413 mutex_init(&ce->pin_mutex); in intel_context_init()
415 spin_lock_init(&ce->guc_state.lock); in intel_context_init()
416 INIT_LIST_HEAD(&ce->guc_state.fences); in intel_context_init()
417 INIT_LIST_HEAD(&ce->guc_state.requests); in intel_context_init()
419 ce->guc_id.id = GUC_INVALID_CONTEXT_ID; in intel_context_init()
420 INIT_LIST_HEAD(&ce->guc_id.link); in intel_context_init()
422 INIT_LIST_HEAD(&ce->destroyed_link); in intel_context_init()
424 INIT_LIST_HEAD(&ce->parallel.child_list); in intel_context_init()
430 i915_sw_fence_init(&ce->guc_state.blocked, in intel_context_init()
432 i915_sw_fence_commit(&ce->guc_state.blocked); in intel_context_init()
434 i915_active_init(&ce->active, in intel_context_init()
438 void intel_context_fini(struct intel_context *ce) in intel_context_fini() argument
442 if (ce->timeline) in intel_context_fini()
443 intel_timeline_put(ce->timeline); in intel_context_fini()
444 i915_vm_put(ce->vm); in intel_context_fini()
447 if (intel_context_is_parent(ce)) in intel_context_fini()
448 for_each_child_safe(ce, child, next) in intel_context_fini()
451 mutex_destroy(&ce->pin_mutex); in intel_context_fini()
452 i915_active_fini(&ce->active); in intel_context_fini()
453 i915_sw_fence_fini(&ce->guc_state.blocked); in intel_context_fini()
470 void intel_context_enter_engine(struct intel_context *ce) in intel_context_enter_engine() argument
472 intel_engine_pm_get(ce->engine); in intel_context_enter_engine()
473 intel_timeline_enter(ce->timeline); in intel_context_enter_engine()
476 void intel_context_exit_engine(struct intel_context *ce) in intel_context_exit_engine() argument
478 intel_timeline_exit(ce->timeline); in intel_context_exit_engine()
479 intel_engine_pm_put(ce->engine); in intel_context_exit_engine()
482 int intel_context_prepare_remote_request(struct intel_context *ce, in intel_context_prepare_remote_request() argument
485 struct intel_timeline *tl = ce->timeline; in intel_context_prepare_remote_request()
489 GEM_BUG_ON(rq->context == ce); in intel_context_prepare_remote_request()
505 GEM_BUG_ON(i915_active_is_idle(&ce->active)); in intel_context_prepare_remote_request()
506 return i915_active_add_request(&ce->active, rq); in intel_context_prepare_remote_request()
509 struct i915_request *intel_context_create_request(struct intel_context *ce) in intel_context_create_request() argument
517 err = intel_context_pin_ww(ce, &ww); in intel_context_create_request()
519 rq = i915_request_create(ce); in intel_context_create_request()
520 intel_context_unpin(ce); in intel_context_create_request()
539 lockdep_unpin_lock(&ce->timeline->mutex, rq->cookie); in intel_context_create_request()
540 mutex_release(&ce->timeline->mutex.dep_map, _RET_IP_); in intel_context_create_request()
541 mutex_acquire(&ce->timeline->mutex.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_); in intel_context_create_request()
542 rq->cookie = lockdep_pin_lock(&ce->timeline->mutex); in intel_context_create_request()
547 struct i915_request *intel_context_get_active_request(struct intel_context *ce) in intel_context_get_active_request() argument
549 struct intel_context *parent = intel_context_to_parent(ce); in intel_context_get_active_request()
553 GEM_BUG_ON(!intel_engine_uses_guc(ce->engine)); in intel_context_get_active_request()
564 if (rq->context != ce) in intel_context_get_active_request()
597 u64 intel_context_get_total_runtime_ns(struct intel_context *ce) in intel_context_get_total_runtime_ns() argument
601 if (ce->ops->update_stats) in intel_context_get_total_runtime_ns()
602 ce->ops->update_stats(ce); in intel_context_get_total_runtime_ns()
604 total = ce->stats.runtime.total; in intel_context_get_total_runtime_ns()
605 if (ce->ops->flags & COPS_RUNTIME_CYCLES) in intel_context_get_total_runtime_ns()
606 total *= ce->engine->gt->clock_period_ns; in intel_context_get_total_runtime_ns()
608 active = READ_ONCE(ce->stats.active); in intel_context_get_total_runtime_ns()
615 u64 intel_context_get_avg_runtime_ns(struct intel_context *ce) in intel_context_get_avg_runtime_ns() argument
617 u64 avg = ewma_runtime_read(&ce->stats.runtime.avg); in intel_context_get_avg_runtime_ns()
619 if (ce->ops->flags & COPS_RUNTIME_CYCLES) in intel_context_get_avg_runtime_ns()
620 avg *= ce->engine->gt->clock_period_ns; in intel_context_get_avg_runtime_ns()
625 bool intel_context_ban(struct intel_context *ce, struct i915_request *rq) in intel_context_ban() argument
627 bool ret = intel_context_set_banned(ce); in intel_context_ban()
629 trace_intel_context_ban(ce); in intel_context_ban()
631 if (ce->ops->revoke) in intel_context_ban()
632 ce->ops->revoke(ce, rq, in intel_context_ban()
638 bool intel_context_revoke(struct intel_context *ce) in intel_context_revoke() argument
640 bool ret = intel_context_set_exiting(ce); in intel_context_revoke()
642 if (ce->ops->revoke) in intel_context_revoke()
643 ce->ops->revoke(ce, NULL, ce->engine->props.preempt_timeout_ms); in intel_context_revoke()