1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #ifndef _XE_EXEC_QUEUE_TYPES_H_ 7 #define _XE_EXEC_QUEUE_TYPES_H_ 8 9 #include <linux/kref.h> 10 11 #include <drm/gpu_scheduler.h> 12 13 #include "xe_gpu_scheduler_types.h" 14 #include "xe_hw_engine_types.h" 15 #include "xe_hw_fence_types.h" 16 #include "xe_lrc_types.h" 17 18 struct xe_execlist_exec_queue; 19 struct xe_gt; 20 struct xe_guc_exec_queue; 21 struct xe_hw_engine; 22 struct xe_vm; 23 24 enum xe_exec_queue_priority { 25 XE_EXEC_QUEUE_PRIORITY_UNSET = -2, /* For execlist usage only */ 26 XE_EXEC_QUEUE_PRIORITY_LOW = 0, 27 XE_EXEC_QUEUE_PRIORITY_NORMAL, 28 XE_EXEC_QUEUE_PRIORITY_HIGH, 29 XE_EXEC_QUEUE_PRIORITY_KERNEL, 30 31 XE_EXEC_QUEUE_PRIORITY_COUNT 32 }; 33 34 /** 35 * struct xe_exec_queue - Execution queue 36 * 37 * Contains all state necessary for submissions. Can either be a user object or 38 * a kernel object. 39 */ 40 struct xe_exec_queue { 41 /** @xef: Back pointer to xe file if this is user created exec queue */ 42 struct xe_file *xef; 43 44 /** @gt: GT structure this exec queue can submit to */ 45 struct xe_gt *gt; 46 /** 47 * @hwe: A hardware of the same class. May (physical engine) or may not 48 * (virtual engine) be where jobs actual engine up running. Should never 49 * really be used for submissions. 50 */ 51 struct xe_hw_engine *hwe; 52 /** @refcount: ref count of this exec queue */ 53 struct kref refcount; 54 /** @vm: VM (address space) for this exec queue */ 55 struct xe_vm *vm; 56 /** @class: class of this exec queue */ 57 enum xe_engine_class class; 58 /** 59 * @logical_mask: logical mask of where job submitted to exec queue can run 60 */ 61 u32 logical_mask; 62 /** @name: name of this exec queue */ 63 char name[MAX_FENCE_NAME_LEN]; 64 /** @width: width (number BB submitted per exec) of this exec queue */ 65 u16 width; 66 /** @msix_vec: MSI-X vector (for platforms that support it) */ 67 u16 msix_vec; 68 /** @fence_irq: fence IRQ used to signal job completion */ 69 struct xe_hw_fence_irq *fence_irq; 70 71 /** 72 * @last_fence: last fence on exec queue, protected by vm->lock in write 73 * mode if bind exec queue, protected by dma resv lock if non-bind exec 74 * queue 75 */ 76 struct dma_fence *last_fence; 77 78 /* queue used for kernel submission only */ 79 #define EXEC_QUEUE_FLAG_KERNEL BIT(0) 80 /* kernel engine only destroyed at driver unload */ 81 #define EXEC_QUEUE_FLAG_PERMANENT BIT(1) 82 /* for VM jobs. Caller needs to hold rpm ref when creating queue with this flag */ 83 #define EXEC_QUEUE_FLAG_VM BIT(2) 84 /* child of VM queue for multi-tile VM jobs */ 85 #define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(3) 86 /* kernel exec_queue only, set priority to highest level */ 87 #define EXEC_QUEUE_FLAG_HIGH_PRIORITY BIT(4) 88 /* flag to indicate low latency hint to guc */ 89 #define EXEC_QUEUE_FLAG_LOW_LATENCY BIT(5) 90 /* for migration (kernel copy, clear, bind) jobs */ 91 #define EXEC_QUEUE_FLAG_MIGRATE BIT(6) 92 93 /** 94 * @flags: flags for this exec queue, should statically setup aside from ban 95 * bit 96 */ 97 unsigned long flags; 98 99 union { 100 /** @multi_gt_list: list head for VM bind engines if multi-GT */ 101 struct list_head multi_gt_list; 102 /** @multi_gt_link: link for VM bind engines if multi-GT */ 103 struct list_head multi_gt_link; 104 }; 105 106 union { 107 /** @execlist: execlist backend specific state for exec queue */ 108 struct xe_execlist_exec_queue *execlist; 109 /** @guc: GuC backend specific state for exec queue */ 110 struct xe_guc_exec_queue *guc; 111 }; 112 113 /** @sched_props: scheduling properties */ 114 struct { 115 /** @sched_props.timeslice_us: timeslice period in micro-seconds */ 116 u32 timeslice_us; 117 /** @sched_props.preempt_timeout_us: preemption timeout in micro-seconds */ 118 u32 preempt_timeout_us; 119 /** @sched_props.job_timeout_ms: job timeout in milliseconds */ 120 u32 job_timeout_ms; 121 /** @sched_props.priority: priority of this exec queue */ 122 enum xe_exec_queue_priority priority; 123 } sched_props; 124 125 /** @lr: long-running exec queue state */ 126 struct { 127 /** @lr.pfence: preemption fence */ 128 struct dma_fence *pfence; 129 /** @lr.context: preemption fence context */ 130 u64 context; 131 /** @lr.seqno: preemption fence seqno */ 132 u32 seqno; 133 /** @lr.link: link into VM's list of exec queues */ 134 struct list_head link; 135 } lr; 136 137 #define XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT 0 138 #define XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT 1 139 #define XE_EXEC_QUEUE_TLB_INVAL_COUNT (XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT + 1) 140 141 /** @tlb_inval: TLB invalidations exec queue state */ 142 struct { 143 /** 144 * @tlb_inval.dep_scheduler: The TLB invalidation 145 * dependency scheduler 146 */ 147 struct xe_dep_scheduler *dep_scheduler; 148 } tlb_inval[XE_EXEC_QUEUE_TLB_INVAL_COUNT]; 149 150 /** @pxp: PXP info tracking */ 151 struct { 152 /** @pxp.type: PXP session type used by this queue */ 153 u8 type; 154 /** @pxp.link: link into the list of PXP exec queues */ 155 struct list_head link; 156 } pxp; 157 158 /** @ops: submission backend exec queue operations */ 159 const struct xe_exec_queue_ops *ops; 160 161 /** @ring_ops: ring operations for this exec queue */ 162 const struct xe_ring_ops *ring_ops; 163 /** @entity: DRM sched entity for this exec queue (1 to 1 relationship) */ 164 struct drm_sched_entity *entity; 165 /** 166 * @tlb_flush_seqno: The seqno of the last rebind tlb flush performed 167 * Protected by @vm's resv. Unused if @vm == NULL. 168 */ 169 u64 tlb_flush_seqno; 170 /** @hw_engine_group_link: link into exec queues in the same hw engine group */ 171 struct list_head hw_engine_group_link; 172 /** @lrc: logical ring context for this exec queue */ 173 struct xe_lrc *lrc[] __counted_by(width); 174 }; 175 176 /** 177 * struct xe_exec_queue_ops - Submission backend exec queue operations 178 */ 179 struct xe_exec_queue_ops { 180 /** @init: Initialize exec queue for submission backend */ 181 int (*init)(struct xe_exec_queue *q); 182 /** @kill: Kill inflight submissions for backend */ 183 void (*kill)(struct xe_exec_queue *q); 184 /** @fini: Undoes the init() for submission backend */ 185 void (*fini)(struct xe_exec_queue *q); 186 /** 187 * @destroy: Destroy exec queue for submission backend. The backend 188 * function must call xe_exec_queue_fini() (which will in turn call the 189 * fini() backend function) to ensure the queue is properly cleaned up. 190 */ 191 void (*destroy)(struct xe_exec_queue *q); 192 /** @set_priority: Set priority for exec queue */ 193 int (*set_priority)(struct xe_exec_queue *q, 194 enum xe_exec_queue_priority priority); 195 /** @set_timeslice: Set timeslice for exec queue */ 196 int (*set_timeslice)(struct xe_exec_queue *q, u32 timeslice_us); 197 /** @set_preempt_timeout: Set preemption timeout for exec queue */ 198 int (*set_preempt_timeout)(struct xe_exec_queue *q, u32 preempt_timeout_us); 199 /** 200 * @suspend: Suspend exec queue from executing, allowed to be called 201 * multiple times in a row before resume with the caveat that 202 * suspend_wait returns before calling suspend again. 203 */ 204 int (*suspend)(struct xe_exec_queue *q); 205 /** 206 * @suspend_wait: Wait for an exec queue to suspend executing, should be 207 * call after suspend. In dma-fencing path thus must return within a 208 * reasonable amount of time. -ETIME return shall indicate an error 209 * waiting for suspend resulting in associated VM getting killed. 210 */ 211 int (*suspend_wait)(struct xe_exec_queue *q); 212 /** 213 * @resume: Resume exec queue execution, exec queue must be in a suspended 214 * state and dma fence returned from most recent suspend call must be 215 * signalled when this function is called. 216 */ 217 void (*resume)(struct xe_exec_queue *q); 218 /** @reset_status: check exec queue reset status */ 219 bool (*reset_status)(struct xe_exec_queue *q); 220 }; 221 222 #endif 223