1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #ifndef _XE_SCHED_JOB_H_ 7 #define _XE_SCHED_JOB_H_ 8 9 #include "xe_sched_job_types.h" 10 11 struct drm_printer; 12 struct xe_vm; 13 struct xe_sync_entry; 14 15 #define XE_SCHED_HANG_LIMIT 1 16 #define XE_SCHED_JOB_TIMEOUT LONG_MAX 17 18 int xe_sched_job_module_init(void); 19 void xe_sched_job_module_exit(void); 20 21 struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q, 22 u64 *batch_addr); 23 void xe_sched_job_destroy(struct kref *ref); 24 25 /** 26 * xe_sched_job_get - get reference to Xe schedule job 27 * @job: Xe schedule job object 28 * 29 * Increment Xe schedule job's reference count 30 */ 31 static inline struct xe_sched_job *xe_sched_job_get(struct xe_sched_job *job) 32 { 33 kref_get(&job->refcount); 34 return job; 35 } 36 37 /** 38 * xe_sched_job_put - put reference to Xe schedule job 39 * @job: Xe schedule job object 40 * 41 * Decrement Xe schedule job's reference count, call xe_sched_job_destroy when 42 * reference count == 0. 43 */ 44 static inline void xe_sched_job_put(struct xe_sched_job *job) 45 { 46 kref_put(&job->refcount, xe_sched_job_destroy); 47 } 48 49 void xe_sched_job_set_error(struct xe_sched_job *job, int error); 50 static inline bool xe_sched_job_is_error(struct xe_sched_job *job) 51 { 52 return job->fence->error < 0; 53 } 54 55 bool xe_sched_job_started(struct xe_sched_job *job); 56 bool xe_sched_job_completed(struct xe_sched_job *job); 57 58 void xe_sched_job_arm(struct xe_sched_job *job); 59 void xe_sched_job_push(struct xe_sched_job *job); 60 61 void xe_sched_job_init_user_fence(struct xe_sched_job *job, 62 struct xe_sync_entry *sync); 63 64 static inline struct xe_sched_job * 65 to_xe_sched_job(struct drm_sched_job *drm) 66 { 67 return container_of(drm, struct xe_sched_job, drm); 68 } 69 70 static inline u32 xe_sched_job_seqno(struct xe_sched_job *job) 71 { 72 return job->fence ? job->fence->seqno : 0; 73 } 74 75 static inline u32 xe_sched_job_lrc_seqno(struct xe_sched_job *job) 76 { 77 return job->lrc_seqno; 78 } 79 80 static inline void 81 xe_sched_job_add_migrate_flush(struct xe_sched_job *job, u32 flags) 82 { 83 job->migrate_flush_flags = flags; 84 } 85 86 bool xe_sched_job_is_migration(struct xe_exec_queue *q); 87 88 struct xe_sched_job_snapshot *xe_sched_job_snapshot_capture(struct xe_sched_job *job); 89 void xe_sched_job_snapshot_free(struct xe_sched_job_snapshot *snapshot); 90 void xe_sched_job_snapshot_print(struct xe_sched_job_snapshot *snapshot, struct drm_printer *p); 91 92 int xe_sched_job_add_deps(struct xe_sched_job *job, struct dma_resv *resv, 93 enum dma_resv_usage usage); 94 95 #endif 96