xref: /linux/drivers/gpu/drm/xe/xe_sched_job.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_sched_job.h"
7 
8 #include <uapi/drm/xe_drm.h>
9 #include <linux/dma-fence-chain.h>
10 #include <linux/slab.h>
11 
12 #include "xe_device.h"
13 #include "xe_exec_queue.h"
14 #include "xe_gt_types.h"
15 #include "xe_hw_engine_types.h"
16 #include "xe_hw_fence.h"
17 #include "xe_lrc.h"
18 #include "xe_macros.h"
19 #include "xe_pm.h"
20 #include "xe_sync_types.h"
21 #include "xe_trace.h"
22 #include "xe_vm.h"
23 
24 static struct kmem_cache *xe_sched_job_slab;
25 static struct kmem_cache *xe_sched_job_parallel_slab;
26 
27 int __init xe_sched_job_module_init(void)
28 {
29 	xe_sched_job_slab =
30 		kmem_cache_create("xe_sched_job",
31 				  sizeof(struct xe_sched_job) +
32 				  sizeof(struct xe_job_ptrs), 0,
33 				  SLAB_HWCACHE_ALIGN, NULL);
34 	if (!xe_sched_job_slab)
35 		return -ENOMEM;
36 
37 	xe_sched_job_parallel_slab =
38 		kmem_cache_create("xe_sched_job_parallel",
39 				  sizeof(struct xe_sched_job) +
40 				  sizeof(struct xe_job_ptrs) *
41 				  XE_HW_ENGINE_MAX_INSTANCE, 0,
42 				  SLAB_HWCACHE_ALIGN, NULL);
43 	if (!xe_sched_job_parallel_slab) {
44 		kmem_cache_destroy(xe_sched_job_slab);
45 		return -ENOMEM;
46 	}
47 
48 	return 0;
49 }
50 
51 void xe_sched_job_module_exit(void)
52 {
53 	kmem_cache_destroy(xe_sched_job_slab);
54 	kmem_cache_destroy(xe_sched_job_parallel_slab);
55 }
56 
57 static struct xe_sched_job *job_alloc(bool parallel)
58 {
59 	return kmem_cache_zalloc(parallel ? xe_sched_job_parallel_slab :
60 				 xe_sched_job_slab, GFP_KERNEL);
61 }
62 
63 bool xe_sched_job_is_migration(struct xe_exec_queue *q)
64 {
65 	return q->vm && (q->vm->flags & XE_VM_FLAG_MIGRATION);
66 }
67 
68 static void job_free(struct xe_sched_job *job)
69 {
70 	struct xe_exec_queue *q = job->q;
71 	bool is_migration = xe_sched_job_is_migration(q);
72 
73 	kmem_cache_free(xe_exec_queue_is_parallel(job->q) || is_migration ?
74 			xe_sched_job_parallel_slab : xe_sched_job_slab, job);
75 }
76 
77 static struct xe_device *job_to_xe(struct xe_sched_job *job)
78 {
79 	return gt_to_xe(job->q->gt);
80 }
81 
82 /* Free unused pre-allocated fences */
83 static void xe_sched_job_free_fences(struct xe_sched_job *job)
84 {
85 	int i;
86 
87 	for (i = 0; i < job->q->width; ++i) {
88 		struct xe_job_ptrs *ptrs = &job->ptrs[i];
89 
90 		if (ptrs->lrc_fence)
91 			xe_lrc_free_seqno_fence(ptrs->lrc_fence);
92 		dma_fence_chain_free(ptrs->chain_fence);
93 	}
94 }
95 
96 struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
97 					 u64 *batch_addr)
98 {
99 	bool is_migration = xe_sched_job_is_migration(q);
100 	struct xe_sched_job *job;
101 	int err;
102 	int i;
103 	u32 width;
104 
105 	/* only a kernel context can submit a vm-less job */
106 	XE_WARN_ON(!q->vm && !(q->flags & EXEC_QUEUE_FLAG_KERNEL));
107 
108 	job = job_alloc(xe_exec_queue_is_parallel(q) || is_migration);
109 	if (!job)
110 		return ERR_PTR(-ENOMEM);
111 
112 	job->q = q;
113 	job->sample_timestamp = U64_MAX;
114 	kref_init(&job->refcount);
115 	xe_exec_queue_get(job->q);
116 
117 	err = drm_sched_job_init(&job->drm, q->entity, 1, NULL,
118 				 q->xef ? q->xef->drm->client_id : 0);
119 	if (err)
120 		goto err_free;
121 
122 	for (i = 0; i < q->width; ++i) {
123 		struct dma_fence *fence = xe_lrc_alloc_seqno_fence();
124 		struct dma_fence_chain *chain;
125 
126 		if (IS_ERR(fence)) {
127 			err = PTR_ERR(fence);
128 			goto err_sched_job;
129 		}
130 		job->ptrs[i].lrc_fence = fence;
131 
132 		if (i + 1 == q->width)
133 			continue;
134 
135 		chain = dma_fence_chain_alloc();
136 		if (!chain) {
137 			err = -ENOMEM;
138 			goto err_sched_job;
139 		}
140 		job->ptrs[i].chain_fence = chain;
141 	}
142 
143 	width = q->width;
144 	if (is_migration)
145 		width = 2;
146 
147 	for (i = 0; i < width; ++i)
148 		job->ptrs[i].batch_addr = batch_addr[i];
149 
150 	atomic_inc(&q->job_cnt);
151 	xe_pm_runtime_get_noresume(job_to_xe(job));
152 	trace_xe_sched_job_create(job);
153 	return job;
154 
155 err_sched_job:
156 	xe_sched_job_free_fences(job);
157 	drm_sched_job_cleanup(&job->drm);
158 err_free:
159 	xe_exec_queue_put(q);
160 	job_free(job);
161 	return ERR_PTR(err);
162 }
163 
164 /**
165  * xe_sched_job_destroy - Destroy Xe schedule job
166  * @ref: reference to Xe schedule job
167  *
168  * Called when ref == 0, drop a reference to job's xe_engine + fence, cleanup
169  * base DRM schedule job, and free memory for Xe schedule job.
170  */
171 void xe_sched_job_destroy(struct kref *ref)
172 {
173 	struct xe_sched_job *job =
174 		container_of(ref, struct xe_sched_job, refcount);
175 	struct xe_device *xe = job_to_xe(job);
176 	struct xe_exec_queue *q = job->q;
177 
178 	xe_sched_job_free_fences(job);
179 	dma_fence_put(job->fence);
180 	drm_sched_job_cleanup(&job->drm);
181 	job_free(job);
182 	atomic_dec(&q->job_cnt);
183 	xe_exec_queue_put(q);
184 	xe_pm_runtime_put(xe);
185 }
186 
187 /* Set the error status under the fence to avoid racing with signaling */
188 static bool xe_fence_set_error(struct dma_fence *fence, int error)
189 {
190 	unsigned long irq_flags;
191 	bool signaled;
192 
193 	spin_lock_irqsave(fence->lock, irq_flags);
194 	signaled = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
195 	if (!signaled)
196 		dma_fence_set_error(fence, error);
197 	spin_unlock_irqrestore(fence->lock, irq_flags);
198 
199 	return signaled;
200 }
201 
202 void xe_sched_job_set_error(struct xe_sched_job *job, int error)
203 {
204 	if (xe_fence_set_error(job->fence, error))
205 		return;
206 
207 	if (dma_fence_is_chain(job->fence)) {
208 		struct dma_fence *iter;
209 
210 		dma_fence_chain_for_each(iter, job->fence)
211 			xe_fence_set_error(dma_fence_chain_contained(iter),
212 					   error);
213 	}
214 
215 	trace_xe_sched_job_set_error(job);
216 
217 	dma_fence_enable_sw_signaling(job->fence);
218 	xe_hw_fence_irq_run(job->q->fence_irq);
219 }
220 
221 bool xe_sched_job_started(struct xe_sched_job *job)
222 {
223 	struct dma_fence *fence = dma_fence_chain_contained(job->fence);
224 	struct xe_lrc *lrc = job->q->lrc[0];
225 
226 	return !__dma_fence_is_later(fence,
227 				     xe_sched_job_lrc_seqno(job),
228 				     xe_lrc_start_seqno(lrc));
229 }
230 
231 bool xe_sched_job_completed(struct xe_sched_job *job)
232 {
233 	struct dma_fence *fence = dma_fence_chain_contained(job->fence);
234 	struct xe_lrc *lrc = job->q->lrc[0];
235 
236 	/*
237 	 * Can safely check just LRC[0] seqno as that is last seqno written when
238 	 * parallel handshake is done.
239 	 */
240 
241 	return !__dma_fence_is_later(fence,
242 				     xe_sched_job_lrc_seqno(job),
243 				     xe_lrc_seqno(lrc));
244 }
245 
246 void xe_sched_job_arm(struct xe_sched_job *job)
247 {
248 	struct xe_exec_queue *q = job->q;
249 	struct dma_fence *fence, *prev;
250 	struct xe_vm *vm = q->vm;
251 	u64 seqno = 0;
252 	int i;
253 
254 	/* Migration and kernel engines have their own locking */
255 	if (IS_ENABLED(CONFIG_LOCKDEP) &&
256 	    !(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) {
257 		lockdep_assert_held(&q->vm->lock);
258 		if (!xe_vm_in_lr_mode(q->vm))
259 			xe_vm_assert_held(q->vm);
260 	}
261 
262 	if (vm && !xe_sched_job_is_migration(q) && !xe_vm_in_lr_mode(vm) &&
263 	    (vm->batch_invalidate_tlb || vm->tlb_flush_seqno != q->tlb_flush_seqno)) {
264 		xe_vm_assert_held(vm);
265 		q->tlb_flush_seqno = vm->tlb_flush_seqno;
266 		job->ring_ops_flush_tlb = true;
267 	}
268 
269 	/* Arm the pre-allocated fences */
270 	for (i = 0; i < q->width; prev = fence, ++i) {
271 		struct dma_fence_chain *chain;
272 
273 		fence = job->ptrs[i].lrc_fence;
274 		xe_lrc_init_seqno_fence(q->lrc[i], fence);
275 		job->ptrs[i].lrc_fence = NULL;
276 		if (!i) {
277 			job->lrc_seqno = fence->seqno;
278 			continue;
279 		} else {
280 			xe_assert(gt_to_xe(q->gt), job->lrc_seqno == fence->seqno);
281 		}
282 
283 		chain = job->ptrs[i - 1].chain_fence;
284 		dma_fence_chain_init(chain, prev, fence, seqno++);
285 		job->ptrs[i - 1].chain_fence = NULL;
286 		fence = &chain->base;
287 	}
288 
289 	job->fence = dma_fence_get(fence);	/* Pairs with put in scheduler */
290 	drm_sched_job_arm(&job->drm);
291 }
292 
293 void xe_sched_job_push(struct xe_sched_job *job)
294 {
295 	xe_sched_job_get(job);
296 	trace_xe_sched_job_exec(job);
297 	drm_sched_entity_push_job(&job->drm);
298 	xe_sched_job_put(job);
299 }
300 
301 /**
302  * xe_sched_job_init_user_fence - Initialize user_fence for the job
303  * @job: job whose user_fence needs an init
304  * @sync: sync to be use to init user_fence
305  */
306 void xe_sched_job_init_user_fence(struct xe_sched_job *job,
307 				  struct xe_sync_entry *sync)
308 {
309 	if (sync->type != DRM_XE_SYNC_TYPE_USER_FENCE)
310 		return;
311 
312 	job->user_fence.used = true;
313 	job->user_fence.addr = sync->addr;
314 	job->user_fence.value = sync->timeline_value;
315 }
316 
317 struct xe_sched_job_snapshot *
318 xe_sched_job_snapshot_capture(struct xe_sched_job *job)
319 {
320 	struct xe_exec_queue *q = job->q;
321 	struct xe_device *xe = q->gt->tile->xe;
322 	struct xe_sched_job_snapshot *snapshot;
323 	size_t len = sizeof(*snapshot) + (sizeof(u64) * q->width);
324 	u16 i;
325 
326 	snapshot = kzalloc(len, GFP_ATOMIC);
327 	if (!snapshot)
328 		return NULL;
329 
330 	snapshot->batch_addr_len = q->width;
331 	for (i = 0; i < q->width; i++)
332 		snapshot->batch_addr[i] =
333 			xe_device_uncanonicalize_addr(xe, job->ptrs[i].batch_addr);
334 
335 	return snapshot;
336 }
337 
338 void xe_sched_job_snapshot_free(struct xe_sched_job_snapshot *snapshot)
339 {
340 	kfree(snapshot);
341 }
342 
343 void
344 xe_sched_job_snapshot_print(struct xe_sched_job_snapshot *snapshot,
345 			    struct drm_printer *p)
346 {
347 	u16 i;
348 
349 	if (!snapshot)
350 		return;
351 
352 	for (i = 0; i < snapshot->batch_addr_len; i++)
353 		drm_printf(p, "batch_addr[%u]: 0x%016llx\n", i, snapshot->batch_addr[i]);
354 }
355 
356 int xe_sched_job_add_deps(struct xe_sched_job *job, struct dma_resv *resv,
357 			  enum dma_resv_usage usage)
358 {
359 	return drm_sched_job_add_resv_dependencies(&job->drm, resv, usage);
360 }
361