Lines Matching refs:ce
22 #define CE_TRACE(ce, fmt, ...) do { \ argument
23 const struct intel_context *ce__ = (ce); \
33 void intel_context_init(struct intel_context *ce,
35 void intel_context_fini(struct intel_context *ce);
43 int intel_context_alloc_state(struct intel_context *ce);
45 void intel_context_free(struct intel_context *ce);
47 int intel_context_reconfigure_sseu(struct intel_context *ce,
52 static inline bool intel_context_is_child(struct intel_context *ce) in intel_context_is_child() argument
54 return !!ce->parallel.parent; in intel_context_is_child()
57 static inline bool intel_context_is_parent(struct intel_context *ce) in intel_context_is_parent() argument
59 return !!ce->parallel.number_children; in intel_context_is_parent()
62 static inline bool intel_context_is_pinned(struct intel_context *ce);
65 intel_context_to_parent(struct intel_context *ce) in intel_context_to_parent() argument
67 if (intel_context_is_child(ce)) { in intel_context_to_parent()
75 GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent)); in intel_context_to_parent()
77 return ce->parallel.parent; in intel_context_to_parent()
79 return ce; in intel_context_to_parent()
83 static inline bool intel_context_is_parallel(struct intel_context *ce) in intel_context_is_parallel() argument
85 return intel_context_is_child(ce) || intel_context_is_parent(ce); in intel_context_is_parallel()
91 #define for_each_child(parent, ce)\ argument
92 list_for_each_entry(ce, &(parent)->parallel.child_list,\
94 #define for_each_child_safe(parent, ce, cn)\ argument
95 list_for_each_entry_safe(ce, cn, &(parent)->parallel.child_list,\
106 static inline int intel_context_lock_pinned(struct intel_context *ce) in intel_context_lock_pinned() argument
107 __acquires(ce->pin_mutex) in intel_context_lock_pinned()
109 return mutex_lock_interruptible(&ce->pin_mutex); in intel_context_lock_pinned()
122 intel_context_is_pinned(struct intel_context *ce) in intel_context_is_pinned() argument
124 return atomic_read(&ce->pin_count); in intel_context_is_pinned()
127 static inline void intel_context_cancel_request(struct intel_context *ce, in intel_context_cancel_request() argument
130 GEM_BUG_ON(!ce->ops->cancel_request); in intel_context_cancel_request()
131 return ce->ops->cancel_request(ce, rq); in intel_context_cancel_request()
140 static inline void intel_context_unlock_pinned(struct intel_context *ce) in intel_context_unlock_pinned() argument
141 __releases(ce->pin_mutex) in intel_context_unlock_pinned()
143 mutex_unlock(&ce->pin_mutex); in intel_context_unlock_pinned()
146 int __intel_context_do_pin(struct intel_context *ce);
147 int __intel_context_do_pin_ww(struct intel_context *ce,
150 static inline bool intel_context_pin_if_active(struct intel_context *ce) in intel_context_pin_if_active() argument
152 return atomic_inc_not_zero(&ce->pin_count); in intel_context_pin_if_active()
155 static inline int intel_context_pin(struct intel_context *ce) in intel_context_pin() argument
157 if (likely(intel_context_pin_if_active(ce))) in intel_context_pin()
160 return __intel_context_do_pin(ce); in intel_context_pin()
163 static inline int intel_context_pin_ww(struct intel_context *ce, in intel_context_pin_ww() argument
166 if (likely(intel_context_pin_if_active(ce))) in intel_context_pin_ww()
169 return __intel_context_do_pin_ww(ce, ww); in intel_context_pin_ww()
172 static inline void __intel_context_pin(struct intel_context *ce) in __intel_context_pin() argument
174 GEM_BUG_ON(!intel_context_is_pinned(ce)); in __intel_context_pin()
175 atomic_inc(&ce->pin_count); in __intel_context_pin()
178 void __intel_context_do_unpin(struct intel_context *ce, int sub);
180 static inline void intel_context_sched_disable_unpin(struct intel_context *ce) in intel_context_sched_disable_unpin() argument
182 __intel_context_do_unpin(ce, 2); in intel_context_sched_disable_unpin()
185 static inline void intel_context_unpin(struct intel_context *ce) in intel_context_unpin() argument
187 if (!ce->ops->sched_disable) { in intel_context_unpin()
188 __intel_context_do_unpin(ce, 1); in intel_context_unpin()
196 while (!atomic_add_unless(&ce->pin_count, -1, 1)) { in intel_context_unpin()
197 if (atomic_cmpxchg(&ce->pin_count, 1, 2) == 1) { in intel_context_unpin()
198 ce->ops->sched_disable(ce); in intel_context_unpin()
205 void intel_context_enter_engine(struct intel_context *ce);
206 void intel_context_exit_engine(struct intel_context *ce);
208 static inline void intel_context_enter(struct intel_context *ce) in intel_context_enter() argument
210 lockdep_assert_held(&ce->timeline->mutex); in intel_context_enter()
211 if (ce->active_count++) in intel_context_enter()
214 ce->ops->enter(ce); in intel_context_enter()
215 ce->wakeref = intel_gt_pm_get(ce->vm->gt); in intel_context_enter()
218 static inline void intel_context_mark_active(struct intel_context *ce) in intel_context_mark_active() argument
220 lockdep_assert(lockdep_is_held(&ce->timeline->mutex) || in intel_context_mark_active()
221 test_bit(CONTEXT_IS_PARKING, &ce->flags)); in intel_context_mark_active()
222 ++ce->active_count; in intel_context_mark_active()
225 static inline void intel_context_exit(struct intel_context *ce) in intel_context_exit() argument
227 lockdep_assert_held(&ce->timeline->mutex); in intel_context_exit()
228 GEM_BUG_ON(!ce->active_count); in intel_context_exit()
229 if (--ce->active_count) in intel_context_exit()
232 intel_gt_pm_put_async(ce->vm->gt, ce->wakeref); in intel_context_exit()
233 ce->ops->exit(ce); in intel_context_exit()
236 static inline struct intel_context *intel_context_get(struct intel_context *ce) in intel_context_get() argument
238 kref_get(&ce->ref); in intel_context_get()
239 return ce; in intel_context_get()
242 static inline void intel_context_put(struct intel_context *ce) in intel_context_put() argument
244 kref_put(&ce->ref, ce->ops->destroy); in intel_context_put()
248 intel_context_timeline_lock(struct intel_context *ce) in intel_context_timeline_lock() argument
249 __acquires(&ce->timeline->mutex) in intel_context_timeline_lock()
251 struct intel_timeline *tl = ce->timeline; in intel_context_timeline_lock()
254 if (intel_context_is_parent(ce)) in intel_context_timeline_lock()
256 else if (intel_context_is_child(ce)) in intel_context_timeline_lock()
258 ce->parallel.child_index + 1); in intel_context_timeline_lock()
273 int intel_context_prepare_remote_request(struct intel_context *ce,
276 struct i915_request *intel_context_create_request(struct intel_context *ce);
278 struct i915_request *intel_context_get_active_request(struct intel_context *ce);
280 static inline bool intel_context_is_barrier(const struct intel_context *ce) in intel_context_is_barrier() argument
282 return test_bit(CONTEXT_BARRIER_BIT, &ce->flags); in intel_context_is_barrier()
285 static inline void intel_context_close(struct intel_context *ce) in intel_context_close() argument
287 set_bit(CONTEXT_CLOSED_BIT, &ce->flags); in intel_context_close()
289 if (ce->ops->close) in intel_context_close()
290 ce->ops->close(ce); in intel_context_close()
293 static inline bool intel_context_is_closed(const struct intel_context *ce) in intel_context_is_closed() argument
295 return test_bit(CONTEXT_CLOSED_BIT, &ce->flags); in intel_context_is_closed()
298 static inline bool intel_context_has_inflight(const struct intel_context *ce) in intel_context_has_inflight() argument
300 return test_bit(COPS_HAS_INFLIGHT_BIT, &ce->ops->flags); in intel_context_has_inflight()
303 static inline bool intel_context_use_semaphores(const struct intel_context *ce) in intel_context_use_semaphores() argument
305 return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); in intel_context_use_semaphores()
308 static inline void intel_context_set_use_semaphores(struct intel_context *ce) in intel_context_set_use_semaphores() argument
310 set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); in intel_context_set_use_semaphores()
313 static inline void intel_context_clear_use_semaphores(struct intel_context *ce) in intel_context_clear_use_semaphores() argument
315 clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); in intel_context_clear_use_semaphores()
318 static inline bool intel_context_is_banned(const struct intel_context *ce) in intel_context_is_banned() argument
320 return test_bit(CONTEXT_BANNED, &ce->flags); in intel_context_is_banned()
323 static inline bool intel_context_set_banned(struct intel_context *ce) in intel_context_set_banned() argument
325 return test_and_set_bit(CONTEXT_BANNED, &ce->flags); in intel_context_set_banned()
328 bool intel_context_ban(struct intel_context *ce, struct i915_request *rq);
330 static inline bool intel_context_is_schedulable(const struct intel_context *ce) in intel_context_is_schedulable() argument
332 return !test_bit(CONTEXT_EXITING, &ce->flags) && in intel_context_is_schedulable()
333 !test_bit(CONTEXT_BANNED, &ce->flags); in intel_context_is_schedulable()
336 static inline bool intel_context_is_exiting(const struct intel_context *ce) in intel_context_is_exiting() argument
338 return test_bit(CONTEXT_EXITING, &ce->flags); in intel_context_is_exiting()
341 static inline bool intel_context_set_exiting(struct intel_context *ce) in intel_context_set_exiting() argument
343 return test_and_set_bit(CONTEXT_EXITING, &ce->flags); in intel_context_set_exiting()
346 bool intel_context_revoke(struct intel_context *ce);
349 intel_context_force_single_submission(const struct intel_context *ce) in intel_context_force_single_submission() argument
351 return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags); in intel_context_force_single_submission()
355 intel_context_set_single_submission(struct intel_context *ce) in intel_context_set_single_submission() argument
357 __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags); in intel_context_set_single_submission()
361 intel_context_nopreempt(const struct intel_context *ce) in intel_context_nopreempt() argument
363 return test_bit(CONTEXT_NOPREEMPT, &ce->flags); in intel_context_nopreempt()
367 intel_context_set_nopreempt(struct intel_context *ce) in intel_context_set_nopreempt() argument
369 set_bit(CONTEXT_NOPREEMPT, &ce->flags); in intel_context_set_nopreempt()
373 intel_context_clear_nopreempt(struct intel_context *ce) in intel_context_clear_nopreempt() argument
375 clear_bit(CONTEXT_NOPREEMPT, &ce->flags); in intel_context_clear_nopreempt()
379 static inline bool intel_context_has_own_state(const struct intel_context *ce) in intel_context_has_own_state() argument
381 return test_bit(CONTEXT_OWN_STATE, &ce->flags); in intel_context_has_own_state()
384 static inline bool intel_context_set_own_state(struct intel_context *ce) in intel_context_set_own_state() argument
386 return test_and_set_bit(CONTEXT_OWN_STATE, &ce->flags); in intel_context_set_own_state()
389 static inline bool intel_context_has_own_state(const struct intel_context *ce) in intel_context_has_own_state() argument
394 static inline bool intel_context_set_own_state(struct intel_context *ce) in intel_context_set_own_state() argument
400 u64 intel_context_get_total_runtime_ns(struct intel_context *ce);
401 u64 intel_context_get_avg_runtime_ns(struct intel_context *ce);