xref: /linux/drivers/gpu/drm/xe/xe_exec_queue.h (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #ifndef _XE_EXEC_QUEUE_H_
7 #define _XE_EXEC_QUEUE_H_
8 
9 #include "xe_exec_queue_types.h"
10 #include "xe_vm_types.h"
11 
12 struct drm_device;
13 struct drm_file;
14 struct xe_device;
15 struct xe_file;
16 
17 #define for_each_tlb_inval(__i)	\
18 	for (__i = XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT; \
19 	     __i <= XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT; ++__i)
20 
21 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
22 					   u32 logical_mask, u16 width,
23 					   struct xe_hw_engine *hw_engine, u32 flags,
24 					   u64 extensions);
25 struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
26 						 struct xe_vm *vm,
27 						 enum xe_engine_class class,
28 						 u32 flags, u64 extensions);
29 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
30 						struct xe_tile *tile,
31 						struct xe_vm *user_vm,
32 						u32 flags, u64 extensions);
33 
34 void xe_exec_queue_fini(struct xe_exec_queue *q);
35 void xe_exec_queue_destroy(struct kref *ref);
36 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance);
37 
38 static inline struct xe_exec_queue *
39 xe_exec_queue_get_unless_zero(struct xe_exec_queue *q)
40 {
41 	if (kref_get_unless_zero(&q->refcount))
42 		return q;
43 
44 	return NULL;
45 }
46 
47 struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id);
48 
49 static inline struct xe_exec_queue *xe_exec_queue_get(struct xe_exec_queue *q)
50 {
51 	kref_get(&q->refcount);
52 	return q;
53 }
54 
55 static inline void xe_exec_queue_put(struct xe_exec_queue *q)
56 {
57 	kref_put(&q->refcount, xe_exec_queue_destroy);
58 }
59 
60 static inline bool xe_exec_queue_is_parallel(struct xe_exec_queue *q)
61 {
62 	return q->width > 1;
63 }
64 
65 static inline bool xe_exec_queue_uses_pxp(struct xe_exec_queue *q)
66 {
67 	return q->pxp.type;
68 }
69 
70 /**
71  * xe_exec_queue_is_multi_queue() - Whether an exec_queue is part of a queue group.
72  * @q: The exec_queue
73  *
74  * Return: True if the exec_queue is part of a queue group, false otherwise.
75  */
76 static inline bool xe_exec_queue_is_multi_queue(struct xe_exec_queue *q)
77 {
78 	return q->multi_queue.valid;
79 }
80 
81 /**
82  * xe_exec_queue_is_multi_queue_primary() - Whether an exec_queue is primary queue
83  * of a multi queue group.
84  * @q: The exec_queue
85  *
86  * Return: True if @q is primary queue of a queue group, false otherwise.
87  */
88 static inline bool xe_exec_queue_is_multi_queue_primary(struct xe_exec_queue *q)
89 {
90 	return q->multi_queue.is_primary;
91 }
92 
93 /**
94  * xe_exec_queue_is_multi_queue_secondary() - Whether an exec_queue is secondary queue
95  * of a multi queue group.
96  * @q: The exec_queue
97  *
98  * Return: True if @q is secondary queue of a queue group, false otherwise.
99  */
100 static inline bool xe_exec_queue_is_multi_queue_secondary(struct xe_exec_queue *q)
101 {
102 	return xe_exec_queue_is_multi_queue(q) && !xe_exec_queue_is_multi_queue_primary(q);
103 }
104 
105 /**
106  * xe_exec_queue_multi_queue_primary() - Get multi queue group's primary queue
107  * @q: The exec_queue
108  *
109  * If @q belongs to a multi queue group, then the primary queue of the group will
110  * be returned. Otherwise, @q will be returned.
111  */
112 static inline struct xe_exec_queue *xe_exec_queue_multi_queue_primary(struct xe_exec_queue *q)
113 {
114 	return xe_exec_queue_is_multi_queue(q) ? q->multi_queue.group->primary : q;
115 }
116 
117 bool xe_exec_queue_is_lr(struct xe_exec_queue *q);
118 
119 bool xe_exec_queue_is_idle(struct xe_exec_queue *q);
120 
121 void xe_exec_queue_kill(struct xe_exec_queue *q);
122 
123 int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
124 			       struct drm_file *file);
125 int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
126 				struct drm_file *file);
127 int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
128 				     struct drm_file *file);
129 int xe_exec_queue_set_property_ioctl(struct drm_device *dev, void *data,
130 				     struct drm_file *file);
131 enum xe_exec_queue_priority xe_exec_queue_device_get_max_priority(struct xe_device *xe);
132 
133 void xe_exec_queue_last_fence_put(struct xe_exec_queue *e, struct xe_vm *vm);
134 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *e);
135 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *e,
136 					       struct xe_vm *vm);
137 struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *e,
138 							  struct xe_vm *vm);
139 void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct xe_vm *vm,
140 				  struct dma_fence *fence);
141 
142 void xe_exec_queue_tlb_inval_last_fence_put(struct xe_exec_queue *q,
143 					    struct xe_vm *vm,
144 					    unsigned int type);
145 
146 void xe_exec_queue_tlb_inval_last_fence_put_unlocked(struct xe_exec_queue *q,
147 						     unsigned int type);
148 
149 struct dma_fence *xe_exec_queue_tlb_inval_last_fence_get(struct xe_exec_queue *q,
150 							 struct xe_vm *vm,
151 							 unsigned int type);
152 
153 void xe_exec_queue_tlb_inval_last_fence_set(struct xe_exec_queue *q,
154 					    struct xe_vm *vm,
155 					    struct dma_fence *fence,
156 					    unsigned int type);
157 
158 void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q);
159 
160 int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch);
161 
162 struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q);
163 
164 /**
165  * xe_exec_queue_idle_skip_suspend() - Can exec queue skip suspend
166  * @q: The exec_queue
167  *
168  * If an exec queue is not parallel and is idle, the suspend steps can be
169  * skipped in the submission backend immediatley signaling the suspend fence.
170  * Parallel queues cannot skip this step due to limitations in the submission
171  * backend.
172  *
173  * Return: True if exec queue is idle and can skip suspend steps, False
174  * otherwise
175  */
176 static inline bool xe_exec_queue_idle_skip_suspend(struct xe_exec_queue *q)
177 {
178 	return !xe_exec_queue_is_parallel(q) && xe_exec_queue_is_idle(q);
179 }
180 
181 #endif
182