1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "xe_guc_submit.h"
7
8 #include <linux/bitfield.h>
9 #include <linux/bitmap.h>
10 #include <linux/circ_buf.h>
11 #include <linux/delay.h>
12 #include <linux/dma-fence-array.h>
13 #include <linux/math64.h>
14
15 #include <drm/drm_managed.h>
16
17 #include "abi/guc_actions_abi.h"
18 #include "abi/guc_klvs_abi.h"
19 #include "regs/xe_lrc_layout.h"
20 #include "xe_assert.h"
21 #include "xe_devcoredump.h"
22 #include "xe_device.h"
23 #include "xe_exec_queue.h"
24 #include "xe_force_wake.h"
25 #include "xe_gpu_scheduler.h"
26 #include "xe_gt.h"
27 #include "xe_gt_clock.h"
28 #include "xe_gt_printk.h"
29 #include "xe_guc.h"
30 #include "xe_guc_ct.h"
31 #include "xe_guc_exec_queue_types.h"
32 #include "xe_guc_id_mgr.h"
33 #include "xe_guc_submit_types.h"
34 #include "xe_hw_engine.h"
35 #include "xe_hw_fence.h"
36 #include "xe_lrc.h"
37 #include "xe_macros.h"
38 #include "xe_map.h"
39 #include "xe_mocs.h"
40 #include "xe_pm.h"
41 #include "xe_ring_ops_types.h"
42 #include "xe_sched_job.h"
43 #include "xe_trace.h"
44 #include "xe_vm.h"
45
46 static struct xe_guc *
exec_queue_to_guc(struct xe_exec_queue * q)47 exec_queue_to_guc(struct xe_exec_queue *q)
48 {
49 return &q->gt->uc.guc;
50 }
51
52 /*
53 * Helpers for engine state, using an atomic as some of the bits can transition
54 * as the same time (e.g. a suspend can be happning at the same time as schedule
55 * engine done being processed).
56 */
57 #define EXEC_QUEUE_STATE_REGISTERED (1 << 0)
58 #define EXEC_QUEUE_STATE_ENABLED (1 << 1)
59 #define EXEC_QUEUE_STATE_PENDING_ENABLE (1 << 2)
60 #define EXEC_QUEUE_STATE_PENDING_DISABLE (1 << 3)
61 #define EXEC_QUEUE_STATE_DESTROYED (1 << 4)
62 #define EXEC_QUEUE_STATE_SUSPENDED (1 << 5)
63 #define EXEC_QUEUE_STATE_RESET (1 << 6)
64 #define EXEC_QUEUE_STATE_KILLED (1 << 7)
65 #define EXEC_QUEUE_STATE_WEDGED (1 << 8)
66 #define EXEC_QUEUE_STATE_BANNED (1 << 9)
67 #define EXEC_QUEUE_STATE_CHECK_TIMEOUT (1 << 10)
68 #define EXEC_QUEUE_STATE_EXTRA_REF (1 << 11)
69
exec_queue_registered(struct xe_exec_queue * q)70 static bool exec_queue_registered(struct xe_exec_queue *q)
71 {
72 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED;
73 }
74
set_exec_queue_registered(struct xe_exec_queue * q)75 static void set_exec_queue_registered(struct xe_exec_queue *q)
76 {
77 atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state);
78 }
79
clear_exec_queue_registered(struct xe_exec_queue * q)80 static void clear_exec_queue_registered(struct xe_exec_queue *q)
81 {
82 atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state);
83 }
84
exec_queue_enabled(struct xe_exec_queue * q)85 static bool exec_queue_enabled(struct xe_exec_queue *q)
86 {
87 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_ENABLED;
88 }
89
set_exec_queue_enabled(struct xe_exec_queue * q)90 static void set_exec_queue_enabled(struct xe_exec_queue *q)
91 {
92 atomic_or(EXEC_QUEUE_STATE_ENABLED, &q->guc->state);
93 }
94
clear_exec_queue_enabled(struct xe_exec_queue * q)95 static void clear_exec_queue_enabled(struct xe_exec_queue *q)
96 {
97 atomic_and(~EXEC_QUEUE_STATE_ENABLED, &q->guc->state);
98 }
99
exec_queue_pending_enable(struct xe_exec_queue * q)100 static bool exec_queue_pending_enable(struct xe_exec_queue *q)
101 {
102 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_ENABLE;
103 }
104
set_exec_queue_pending_enable(struct xe_exec_queue * q)105 static void set_exec_queue_pending_enable(struct xe_exec_queue *q)
106 {
107 atomic_or(EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state);
108 }
109
clear_exec_queue_pending_enable(struct xe_exec_queue * q)110 static void clear_exec_queue_pending_enable(struct xe_exec_queue *q)
111 {
112 atomic_and(~EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state);
113 }
114
exec_queue_pending_disable(struct xe_exec_queue * q)115 static bool exec_queue_pending_disable(struct xe_exec_queue *q)
116 {
117 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_DISABLE;
118 }
119
set_exec_queue_pending_disable(struct xe_exec_queue * q)120 static void set_exec_queue_pending_disable(struct xe_exec_queue *q)
121 {
122 atomic_or(EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state);
123 }
124
clear_exec_queue_pending_disable(struct xe_exec_queue * q)125 static void clear_exec_queue_pending_disable(struct xe_exec_queue *q)
126 {
127 atomic_and(~EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state);
128 }
129
exec_queue_destroyed(struct xe_exec_queue * q)130 static bool exec_queue_destroyed(struct xe_exec_queue *q)
131 {
132 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_DESTROYED;
133 }
134
set_exec_queue_destroyed(struct xe_exec_queue * q)135 static void set_exec_queue_destroyed(struct xe_exec_queue *q)
136 {
137 atomic_or(EXEC_QUEUE_STATE_DESTROYED, &q->guc->state);
138 }
139
exec_queue_banned(struct xe_exec_queue * q)140 static bool exec_queue_banned(struct xe_exec_queue *q)
141 {
142 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_BANNED;
143 }
144
set_exec_queue_banned(struct xe_exec_queue * q)145 static void set_exec_queue_banned(struct xe_exec_queue *q)
146 {
147 atomic_or(EXEC_QUEUE_STATE_BANNED, &q->guc->state);
148 }
149
exec_queue_suspended(struct xe_exec_queue * q)150 static bool exec_queue_suspended(struct xe_exec_queue *q)
151 {
152 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_SUSPENDED;
153 }
154
set_exec_queue_suspended(struct xe_exec_queue * q)155 static void set_exec_queue_suspended(struct xe_exec_queue *q)
156 {
157 atomic_or(EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state);
158 }
159
clear_exec_queue_suspended(struct xe_exec_queue * q)160 static void clear_exec_queue_suspended(struct xe_exec_queue *q)
161 {
162 atomic_and(~EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state);
163 }
164
exec_queue_reset(struct xe_exec_queue * q)165 static bool exec_queue_reset(struct xe_exec_queue *q)
166 {
167 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_RESET;
168 }
169
set_exec_queue_reset(struct xe_exec_queue * q)170 static void set_exec_queue_reset(struct xe_exec_queue *q)
171 {
172 atomic_or(EXEC_QUEUE_STATE_RESET, &q->guc->state);
173 }
174
exec_queue_killed(struct xe_exec_queue * q)175 static bool exec_queue_killed(struct xe_exec_queue *q)
176 {
177 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_KILLED;
178 }
179
set_exec_queue_killed(struct xe_exec_queue * q)180 static void set_exec_queue_killed(struct xe_exec_queue *q)
181 {
182 atomic_or(EXEC_QUEUE_STATE_KILLED, &q->guc->state);
183 }
184
exec_queue_wedged(struct xe_exec_queue * q)185 static bool exec_queue_wedged(struct xe_exec_queue *q)
186 {
187 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_WEDGED;
188 }
189
set_exec_queue_wedged(struct xe_exec_queue * q)190 static void set_exec_queue_wedged(struct xe_exec_queue *q)
191 {
192 atomic_or(EXEC_QUEUE_STATE_WEDGED, &q->guc->state);
193 }
194
exec_queue_check_timeout(struct xe_exec_queue * q)195 static bool exec_queue_check_timeout(struct xe_exec_queue *q)
196 {
197 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_CHECK_TIMEOUT;
198 }
199
set_exec_queue_check_timeout(struct xe_exec_queue * q)200 static void set_exec_queue_check_timeout(struct xe_exec_queue *q)
201 {
202 atomic_or(EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state);
203 }
204
clear_exec_queue_check_timeout(struct xe_exec_queue * q)205 static void clear_exec_queue_check_timeout(struct xe_exec_queue *q)
206 {
207 atomic_and(~EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state);
208 }
209
exec_queue_extra_ref(struct xe_exec_queue * q)210 static bool exec_queue_extra_ref(struct xe_exec_queue *q)
211 {
212 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_EXTRA_REF;
213 }
214
set_exec_queue_extra_ref(struct xe_exec_queue * q)215 static void set_exec_queue_extra_ref(struct xe_exec_queue *q)
216 {
217 atomic_or(EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state);
218 }
219
exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue * q)220 static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
221 {
222 return (atomic_read(&q->guc->state) &
223 (EXEC_QUEUE_STATE_WEDGED | EXEC_QUEUE_STATE_KILLED |
224 EXEC_QUEUE_STATE_BANNED));
225 }
226
guc_submit_fini(struct drm_device * drm,void * arg)227 static void guc_submit_fini(struct drm_device *drm, void *arg)
228 {
229 struct xe_guc *guc = arg;
230
231 xa_destroy(&guc->submission_state.exec_queue_lookup);
232 }
233
guc_submit_wedged_fini(void * arg)234 static void guc_submit_wedged_fini(void *arg)
235 {
236 struct xe_guc *guc = arg;
237 struct xe_exec_queue *q;
238 unsigned long index;
239
240 mutex_lock(&guc->submission_state.lock);
241 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
242 if (exec_queue_wedged(q)) {
243 mutex_unlock(&guc->submission_state.lock);
244 xe_exec_queue_put(q);
245 mutex_lock(&guc->submission_state.lock);
246 }
247 }
248 mutex_unlock(&guc->submission_state.lock);
249 }
250
251 static const struct xe_exec_queue_ops guc_exec_queue_ops;
252
primelockdep(struct xe_guc * guc)253 static void primelockdep(struct xe_guc *guc)
254 {
255 if (!IS_ENABLED(CONFIG_LOCKDEP))
256 return;
257
258 fs_reclaim_acquire(GFP_KERNEL);
259
260 mutex_lock(&guc->submission_state.lock);
261 mutex_unlock(&guc->submission_state.lock);
262
263 fs_reclaim_release(GFP_KERNEL);
264 }
265
266 /**
267 * xe_guc_submit_init() - Initialize GuC submission.
268 * @guc: the &xe_guc to initialize
269 * @num_ids: number of GuC context IDs to use
270 *
271 * The bare-metal or PF driver can pass ~0 as &num_ids to indicate that all
272 * GuC context IDs supported by the GuC firmware should be used for submission.
273 *
274 * Only VF drivers will have to provide explicit number of GuC context IDs
275 * that they can use for submission.
276 *
277 * Return: 0 on success or a negative error code on failure.
278 */
xe_guc_submit_init(struct xe_guc * guc,unsigned int num_ids)279 int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
280 {
281 struct xe_device *xe = guc_to_xe(guc);
282 struct xe_gt *gt = guc_to_gt(guc);
283 int err;
284
285 err = drmm_mutex_init(&xe->drm, &guc->submission_state.lock);
286 if (err)
287 return err;
288
289 err = xe_guc_id_mgr_init(&guc->submission_state.idm, num_ids);
290 if (err)
291 return err;
292
293 gt->exec_queue_ops = &guc_exec_queue_ops;
294
295 xa_init(&guc->submission_state.exec_queue_lookup);
296
297 init_waitqueue_head(&guc->submission_state.fini_wq);
298
299 primelockdep(guc);
300
301 return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
302 }
303
__release_guc_id(struct xe_guc * guc,struct xe_exec_queue * q,u32 xa_count)304 static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count)
305 {
306 int i;
307
308 lockdep_assert_held(&guc->submission_state.lock);
309
310 for (i = 0; i < xa_count; ++i)
311 xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id + i);
312
313 xe_guc_id_mgr_release_locked(&guc->submission_state.idm,
314 q->guc->id, q->width);
315
316 if (xa_empty(&guc->submission_state.exec_queue_lookup))
317 wake_up(&guc->submission_state.fini_wq);
318 }
319
alloc_guc_id(struct xe_guc * guc,struct xe_exec_queue * q)320 static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
321 {
322 int ret;
323 int i;
324
325 /*
326 * Must use GFP_NOWAIT as this lock is in the dma fence signalling path,
327 * worse case user gets -ENOMEM on engine create and has to try again.
328 *
329 * FIXME: Have caller pre-alloc or post-alloc /w GFP_KERNEL to prevent
330 * failure.
331 */
332 lockdep_assert_held(&guc->submission_state.lock);
333
334 ret = xe_guc_id_mgr_reserve_locked(&guc->submission_state.idm,
335 q->width);
336 if (ret < 0)
337 return ret;
338
339 q->guc->id = ret;
340
341 for (i = 0; i < q->width; ++i) {
342 ret = xa_err(xa_store(&guc->submission_state.exec_queue_lookup,
343 q->guc->id + i, q, GFP_NOWAIT));
344 if (ret)
345 goto err_release;
346 }
347
348 return 0;
349
350 err_release:
351 __release_guc_id(guc, q, i);
352
353 return ret;
354 }
355
release_guc_id(struct xe_guc * guc,struct xe_exec_queue * q)356 static void release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
357 {
358 mutex_lock(&guc->submission_state.lock);
359 __release_guc_id(guc, q, q->width);
360 mutex_unlock(&guc->submission_state.lock);
361 }
362
363 struct exec_queue_policy {
364 u32 count;
365 struct guc_update_exec_queue_policy h2g;
366 };
367
__guc_exec_queue_policy_action_size(struct exec_queue_policy * policy)368 static u32 __guc_exec_queue_policy_action_size(struct exec_queue_policy *policy)
369 {
370 size_t bytes = sizeof(policy->h2g.header) +
371 (sizeof(policy->h2g.klv[0]) * policy->count);
372
373 return bytes / sizeof(u32);
374 }
375
__guc_exec_queue_policy_start_klv(struct exec_queue_policy * policy,u16 guc_id)376 static void __guc_exec_queue_policy_start_klv(struct exec_queue_policy *policy,
377 u16 guc_id)
378 {
379 policy->h2g.header.action =
380 XE_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES;
381 policy->h2g.header.guc_id = guc_id;
382 policy->count = 0;
383 }
384
385 #define MAKE_EXEC_QUEUE_POLICY_ADD(func, id) \
386 static void __guc_exec_queue_policy_add_##func(struct exec_queue_policy *policy, \
387 u32 data) \
388 { \
389 XE_WARN_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \
390 \
391 policy->h2g.klv[policy->count].kl = \
392 FIELD_PREP(GUC_KLV_0_KEY, \
393 GUC_CONTEXT_POLICIES_KLV_ID_##id) | \
394 FIELD_PREP(GUC_KLV_0_LEN, 1); \
395 policy->h2g.klv[policy->count].value = data; \
396 policy->count++; \
397 }
398
399 MAKE_EXEC_QUEUE_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
400 MAKE_EXEC_QUEUE_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
401 MAKE_EXEC_QUEUE_POLICY_ADD(priority, SCHEDULING_PRIORITY)
402 #undef MAKE_EXEC_QUEUE_POLICY_ADD
403
404 static const int xe_exec_queue_prio_to_guc[] = {
405 [XE_EXEC_QUEUE_PRIORITY_LOW] = GUC_CLIENT_PRIORITY_NORMAL,
406 [XE_EXEC_QUEUE_PRIORITY_NORMAL] = GUC_CLIENT_PRIORITY_KMD_NORMAL,
407 [XE_EXEC_QUEUE_PRIORITY_HIGH] = GUC_CLIENT_PRIORITY_HIGH,
408 [XE_EXEC_QUEUE_PRIORITY_KERNEL] = GUC_CLIENT_PRIORITY_KMD_HIGH,
409 };
410
init_policies(struct xe_guc * guc,struct xe_exec_queue * q)411 static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
412 {
413 struct exec_queue_policy policy;
414 struct xe_device *xe = guc_to_xe(guc);
415 enum xe_exec_queue_priority prio = q->sched_props.priority;
416 u32 timeslice_us = q->sched_props.timeslice_us;
417 u32 preempt_timeout_us = q->sched_props.preempt_timeout_us;
418
419 xe_assert(xe, exec_queue_registered(q));
420
421 __guc_exec_queue_policy_start_klv(&policy, q->guc->id);
422 __guc_exec_queue_policy_add_priority(&policy, xe_exec_queue_prio_to_guc[prio]);
423 __guc_exec_queue_policy_add_execution_quantum(&policy, timeslice_us);
424 __guc_exec_queue_policy_add_preemption_timeout(&policy, preempt_timeout_us);
425
426 xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g,
427 __guc_exec_queue_policy_action_size(&policy), 0, 0);
428 }
429
set_min_preemption_timeout(struct xe_guc * guc,struct xe_exec_queue * q)430 static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_exec_queue *q)
431 {
432 struct exec_queue_policy policy;
433
434 __guc_exec_queue_policy_start_klv(&policy, q->guc->id);
435 __guc_exec_queue_policy_add_preemption_timeout(&policy, 1);
436
437 xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g,
438 __guc_exec_queue_policy_action_size(&policy), 0, 0);
439 }
440
441 #define parallel_read(xe_, map_, field_) \
442 xe_map_rd_field(xe_, &map_, 0, struct guc_submit_parallel_scratch, \
443 field_)
444 #define parallel_write(xe_, map_, field_, val_) \
445 xe_map_wr_field(xe_, &map_, 0, struct guc_submit_parallel_scratch, \
446 field_, val_)
447
__register_mlrc_exec_queue(struct xe_guc * guc,struct xe_exec_queue * q,struct guc_ctxt_registration_info * info)448 static void __register_mlrc_exec_queue(struct xe_guc *guc,
449 struct xe_exec_queue *q,
450 struct guc_ctxt_registration_info *info)
451 {
452 #define MAX_MLRC_REG_SIZE (13 + XE_HW_ENGINE_MAX_INSTANCE * 2)
453 struct xe_device *xe = guc_to_xe(guc);
454 u32 action[MAX_MLRC_REG_SIZE];
455 int len = 0;
456 int i;
457
458 xe_assert(xe, xe_exec_queue_is_parallel(q));
459
460 action[len++] = XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
461 action[len++] = info->flags;
462 action[len++] = info->context_idx;
463 action[len++] = info->engine_class;
464 action[len++] = info->engine_submit_mask;
465 action[len++] = info->wq_desc_lo;
466 action[len++] = info->wq_desc_hi;
467 action[len++] = info->wq_base_lo;
468 action[len++] = info->wq_base_hi;
469 action[len++] = info->wq_size;
470 action[len++] = q->width;
471 action[len++] = info->hwlrca_lo;
472 action[len++] = info->hwlrca_hi;
473
474 for (i = 1; i < q->width; ++i) {
475 struct xe_lrc *lrc = q->lrc[i];
476
477 action[len++] = lower_32_bits(xe_lrc_descriptor(lrc));
478 action[len++] = upper_32_bits(xe_lrc_descriptor(lrc));
479 }
480
481 xe_assert(xe, len <= MAX_MLRC_REG_SIZE);
482 #undef MAX_MLRC_REG_SIZE
483
484 xe_guc_ct_send(&guc->ct, action, len, 0, 0);
485 }
486
__register_exec_queue(struct xe_guc * guc,struct guc_ctxt_registration_info * info)487 static void __register_exec_queue(struct xe_guc *guc,
488 struct guc_ctxt_registration_info *info)
489 {
490 u32 action[] = {
491 XE_GUC_ACTION_REGISTER_CONTEXT,
492 info->flags,
493 info->context_idx,
494 info->engine_class,
495 info->engine_submit_mask,
496 info->wq_desc_lo,
497 info->wq_desc_hi,
498 info->wq_base_lo,
499 info->wq_base_hi,
500 info->wq_size,
501 info->hwlrca_lo,
502 info->hwlrca_hi,
503 };
504
505 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
506 }
507
register_exec_queue(struct xe_exec_queue * q)508 static void register_exec_queue(struct xe_exec_queue *q)
509 {
510 struct xe_guc *guc = exec_queue_to_guc(q);
511 struct xe_device *xe = guc_to_xe(guc);
512 struct xe_lrc *lrc = q->lrc[0];
513 struct guc_ctxt_registration_info info;
514
515 xe_assert(xe, !exec_queue_registered(q));
516
517 memset(&info, 0, sizeof(info));
518 info.context_idx = q->guc->id;
519 info.engine_class = xe_engine_class_to_guc_class(q->class);
520 info.engine_submit_mask = q->logical_mask;
521 info.hwlrca_lo = lower_32_bits(xe_lrc_descriptor(lrc));
522 info.hwlrca_hi = upper_32_bits(xe_lrc_descriptor(lrc));
523 info.flags = CONTEXT_REGISTRATION_FLAG_KMD;
524
525 if (xe_exec_queue_is_parallel(q)) {
526 u64 ggtt_addr = xe_lrc_parallel_ggtt_addr(lrc);
527 struct iosys_map map = xe_lrc_parallel_map(lrc);
528
529 info.wq_desc_lo = lower_32_bits(ggtt_addr +
530 offsetof(struct guc_submit_parallel_scratch, wq_desc));
531 info.wq_desc_hi = upper_32_bits(ggtt_addr +
532 offsetof(struct guc_submit_parallel_scratch, wq_desc));
533 info.wq_base_lo = lower_32_bits(ggtt_addr +
534 offsetof(struct guc_submit_parallel_scratch, wq[0]));
535 info.wq_base_hi = upper_32_bits(ggtt_addr +
536 offsetof(struct guc_submit_parallel_scratch, wq[0]));
537 info.wq_size = WQ_SIZE;
538
539 q->guc->wqi_head = 0;
540 q->guc->wqi_tail = 0;
541 xe_map_memset(xe, &map, 0, 0, PARALLEL_SCRATCH_SIZE - WQ_SIZE);
542 parallel_write(xe, map, wq_desc.wq_status, WQ_STATUS_ACTIVE);
543 }
544
545 /*
546 * We must keep a reference for LR engines if engine is registered with
547 * the GuC as jobs signal immediately and can't destroy an engine if the
548 * GuC has a reference to it.
549 */
550 if (xe_exec_queue_is_lr(q))
551 xe_exec_queue_get(q);
552
553 set_exec_queue_registered(q);
554 trace_xe_exec_queue_register(q);
555 if (xe_exec_queue_is_parallel(q))
556 __register_mlrc_exec_queue(guc, q, &info);
557 else
558 __register_exec_queue(guc, &info);
559 init_policies(guc, q);
560 }
561
wq_space_until_wrap(struct xe_exec_queue * q)562 static u32 wq_space_until_wrap(struct xe_exec_queue *q)
563 {
564 return (WQ_SIZE - q->guc->wqi_tail);
565 }
566
wq_wait_for_space(struct xe_exec_queue * q,u32 wqi_size)567 static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
568 {
569 struct xe_guc *guc = exec_queue_to_guc(q);
570 struct xe_device *xe = guc_to_xe(guc);
571 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
572 unsigned int sleep_period_ms = 1;
573
574 #define AVAILABLE_SPACE \
575 CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE)
576 if (wqi_size > AVAILABLE_SPACE) {
577 try_again:
578 q->guc->wqi_head = parallel_read(xe, map, wq_desc.head);
579 if (wqi_size > AVAILABLE_SPACE) {
580 if (sleep_period_ms == 1024) {
581 xe_gt_reset_async(q->gt);
582 return -ENODEV;
583 }
584
585 msleep(sleep_period_ms);
586 sleep_period_ms <<= 1;
587 goto try_again;
588 }
589 }
590 #undef AVAILABLE_SPACE
591
592 return 0;
593 }
594
wq_noop_append(struct xe_exec_queue * q)595 static int wq_noop_append(struct xe_exec_queue *q)
596 {
597 struct xe_guc *guc = exec_queue_to_guc(q);
598 struct xe_device *xe = guc_to_xe(guc);
599 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
600 u32 len_dw = wq_space_until_wrap(q) / sizeof(u32) - 1;
601
602 if (wq_wait_for_space(q, wq_space_until_wrap(q)))
603 return -ENODEV;
604
605 xe_assert(xe, FIELD_FIT(WQ_LEN_MASK, len_dw));
606
607 parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)],
608 FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
609 FIELD_PREP(WQ_LEN_MASK, len_dw));
610 q->guc->wqi_tail = 0;
611
612 return 0;
613 }
614
wq_item_append(struct xe_exec_queue * q)615 static void wq_item_append(struct xe_exec_queue *q)
616 {
617 struct xe_guc *guc = exec_queue_to_guc(q);
618 struct xe_device *xe = guc_to_xe(guc);
619 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
620 #define WQ_HEADER_SIZE 4 /* Includes 1 LRC address too */
621 u32 wqi[XE_HW_ENGINE_MAX_INSTANCE + (WQ_HEADER_SIZE - 1)];
622 u32 wqi_size = (q->width + (WQ_HEADER_SIZE - 1)) * sizeof(u32);
623 u32 len_dw = (wqi_size / sizeof(u32)) - 1;
624 int i = 0, j;
625
626 if (wqi_size > wq_space_until_wrap(q)) {
627 if (wq_noop_append(q))
628 return;
629 }
630 if (wq_wait_for_space(q, wqi_size))
631 return;
632
633 wqi[i++] = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) |
634 FIELD_PREP(WQ_LEN_MASK, len_dw);
635 wqi[i++] = xe_lrc_descriptor(q->lrc[0]);
636 wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, q->guc->id) |
637 FIELD_PREP(WQ_RING_TAIL_MASK, q->lrc[0]->ring.tail / sizeof(u64));
638 wqi[i++] = 0;
639 for (j = 1; j < q->width; ++j) {
640 struct xe_lrc *lrc = q->lrc[j];
641
642 wqi[i++] = lrc->ring.tail / sizeof(u64);
643 }
644
645 xe_assert(xe, i == wqi_size / sizeof(u32));
646
647 iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch,
648 wq[q->guc->wqi_tail / sizeof(u32)]));
649 xe_map_memcpy_to(xe, &map, 0, wqi, wqi_size);
650 q->guc->wqi_tail += wqi_size;
651 xe_assert(xe, q->guc->wqi_tail <= WQ_SIZE);
652
653 xe_device_wmb(xe);
654
655 map = xe_lrc_parallel_map(q->lrc[0]);
656 parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail);
657 }
658
659 #define RESUME_PENDING ~0x0ull
submit_exec_queue(struct xe_exec_queue * q)660 static void submit_exec_queue(struct xe_exec_queue *q)
661 {
662 struct xe_guc *guc = exec_queue_to_guc(q);
663 struct xe_device *xe = guc_to_xe(guc);
664 struct xe_lrc *lrc = q->lrc[0];
665 u32 action[3];
666 u32 g2h_len = 0;
667 u32 num_g2h = 0;
668 int len = 0;
669 bool extra_submit = false;
670
671 xe_assert(xe, exec_queue_registered(q));
672
673 if (xe_exec_queue_is_parallel(q))
674 wq_item_append(q);
675 else
676 xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
677
678 if (exec_queue_suspended(q) && !xe_exec_queue_is_parallel(q))
679 return;
680
681 if (!exec_queue_enabled(q) && !exec_queue_suspended(q)) {
682 action[len++] = XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET;
683 action[len++] = q->guc->id;
684 action[len++] = GUC_CONTEXT_ENABLE;
685 g2h_len = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
686 num_g2h = 1;
687 if (xe_exec_queue_is_parallel(q))
688 extra_submit = true;
689
690 q->guc->resume_time = RESUME_PENDING;
691 set_exec_queue_pending_enable(q);
692 set_exec_queue_enabled(q);
693 trace_xe_exec_queue_scheduling_enable(q);
694 } else {
695 action[len++] = XE_GUC_ACTION_SCHED_CONTEXT;
696 action[len++] = q->guc->id;
697 trace_xe_exec_queue_submit(q);
698 }
699
700 xe_guc_ct_send(&guc->ct, action, len, g2h_len, num_g2h);
701
702 if (extra_submit) {
703 len = 0;
704 action[len++] = XE_GUC_ACTION_SCHED_CONTEXT;
705 action[len++] = q->guc->id;
706 trace_xe_exec_queue_submit(q);
707
708 xe_guc_ct_send(&guc->ct, action, len, 0, 0);
709 }
710 }
711
712 static struct dma_fence *
guc_exec_queue_run_job(struct drm_sched_job * drm_job)713 guc_exec_queue_run_job(struct drm_sched_job *drm_job)
714 {
715 struct xe_sched_job *job = to_xe_sched_job(drm_job);
716 struct xe_exec_queue *q = job->q;
717 struct xe_guc *guc = exec_queue_to_guc(q);
718 struct xe_device *xe = guc_to_xe(guc);
719 bool lr = xe_exec_queue_is_lr(q);
720
721 xe_assert(xe, !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
722 exec_queue_banned(q) || exec_queue_suspended(q));
723
724 trace_xe_sched_job_run(job);
725
726 if (!exec_queue_killed_or_banned_or_wedged(q) && !xe_sched_job_is_error(job)) {
727 if (!exec_queue_registered(q))
728 register_exec_queue(q);
729 if (!lr) /* LR jobs are emitted in the exec IOCTL */
730 q->ring_ops->emit_job(job);
731 submit_exec_queue(q);
732 }
733
734 if (lr) {
735 xe_sched_job_set_error(job, -EOPNOTSUPP);
736 return NULL;
737 } else if (test_and_set_bit(JOB_FLAG_SUBMIT, &job->fence->flags)) {
738 return job->fence;
739 } else {
740 return dma_fence_get(job->fence);
741 }
742 }
743
guc_exec_queue_free_job(struct drm_sched_job * drm_job)744 static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
745 {
746 struct xe_sched_job *job = to_xe_sched_job(drm_job);
747
748 xe_exec_queue_update_run_ticks(job->q);
749
750 trace_xe_sched_job_free(job);
751 xe_sched_job_put(job);
752 }
753
guc_read_stopped(struct xe_guc * guc)754 static int guc_read_stopped(struct xe_guc *guc)
755 {
756 return atomic_read(&guc->submission_state.stopped);
757 }
758
759 #define MAKE_SCHED_CONTEXT_ACTION(q, enable_disable) \
760 u32 action[] = { \
761 XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET, \
762 q->guc->id, \
763 GUC_CONTEXT_##enable_disable, \
764 }
765
disable_scheduling_deregister(struct xe_guc * guc,struct xe_exec_queue * q)766 static void disable_scheduling_deregister(struct xe_guc *guc,
767 struct xe_exec_queue *q)
768 {
769 MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
770 struct xe_device *xe = guc_to_xe(guc);
771 int ret;
772
773 set_min_preemption_timeout(guc, q);
774 smp_rmb();
775 ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_enable(q) ||
776 guc_read_stopped(guc), HZ * 5);
777 if (!ret) {
778 struct xe_gpu_scheduler *sched = &q->guc->sched;
779
780 drm_warn(&xe->drm, "Pending enable failed to respond");
781 xe_sched_submission_start(sched);
782 xe_gt_reset_async(q->gt);
783 xe_sched_tdr_queue_imm(sched);
784 return;
785 }
786
787 clear_exec_queue_enabled(q);
788 set_exec_queue_pending_disable(q);
789 set_exec_queue_destroyed(q);
790 trace_xe_exec_queue_scheduling_disable(q);
791
792 /*
793 * Reserve space for both G2H here as the 2nd G2H is sent from a G2H
794 * handler and we are not allowed to reserved G2H space in handlers.
795 */
796 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
797 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET +
798 G2H_LEN_DW_DEREGISTER_CONTEXT, 2);
799 }
800
xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue * q)801 static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q)
802 {
803 struct xe_guc *guc = exec_queue_to_guc(q);
804 struct xe_device *xe = guc_to_xe(guc);
805
806 /** to wakeup xe_wait_user_fence ioctl if exec queue is reset */
807 wake_up_all(&xe->ufence_wq);
808
809 if (xe_exec_queue_is_lr(q))
810 queue_work(guc_to_gt(guc)->ordered_wq, &q->guc->lr_tdr);
811 else
812 xe_sched_tdr_queue_imm(&q->guc->sched);
813 }
814
815 /**
816 * xe_guc_submit_wedge() - Wedge GuC submission
817 * @guc: the GuC object
818 *
819 * Save exec queue's registered with GuC state by taking a ref to each queue.
820 * Register a DRMM handler to drop refs upon driver unload.
821 */
xe_guc_submit_wedge(struct xe_guc * guc)822 void xe_guc_submit_wedge(struct xe_guc *guc)
823 {
824 struct xe_device *xe = guc_to_xe(guc);
825 struct xe_exec_queue *q;
826 unsigned long index;
827 int err;
828
829 xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode);
830
831 err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev,
832 guc_submit_wedged_fini, guc);
833 if (err) {
834 drm_err(&xe->drm, "Failed to register xe_guc_submit clean-up on wedged.mode=2. Although device is wedged.\n");
835 return;
836 }
837
838 mutex_lock(&guc->submission_state.lock);
839 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
840 if (xe_exec_queue_get_unless_zero(q))
841 set_exec_queue_wedged(q);
842 mutex_unlock(&guc->submission_state.lock);
843 }
844
guc_submit_hint_wedged(struct xe_guc * guc)845 static bool guc_submit_hint_wedged(struct xe_guc *guc)
846 {
847 struct xe_device *xe = guc_to_xe(guc);
848
849 if (xe->wedged.mode != 2)
850 return false;
851
852 if (xe_device_wedged(xe))
853 return true;
854
855 xe_device_declare_wedged(xe);
856
857 return true;
858 }
859
xe_guc_exec_queue_lr_cleanup(struct work_struct * w)860 static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
861 {
862 struct xe_guc_exec_queue *ge =
863 container_of(w, struct xe_guc_exec_queue, lr_tdr);
864 struct xe_exec_queue *q = ge->q;
865 struct xe_guc *guc = exec_queue_to_guc(q);
866 struct xe_device *xe = guc_to_xe(guc);
867 struct xe_gpu_scheduler *sched = &ge->sched;
868 bool wedged;
869
870 xe_assert(xe, xe_exec_queue_is_lr(q));
871 trace_xe_exec_queue_lr_cleanup(q);
872
873 wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
874
875 /* Kill the run_job / process_msg entry points */
876 xe_sched_submission_stop(sched);
877
878 /*
879 * Engine state now mostly stable, disable scheduling / deregister if
880 * needed. This cleanup routine might be called multiple times, where
881 * the actual async engine deregister drops the final engine ref.
882 * Calling disable_scheduling_deregister will mark the engine as
883 * destroyed and fire off the CT requests to disable scheduling /
884 * deregister, which we only want to do once. We also don't want to mark
885 * the engine as pending_disable again as this may race with the
886 * xe_guc_deregister_done_handler() which treats it as an unexpected
887 * state.
888 */
889 if (!wedged && exec_queue_registered(q) && !exec_queue_destroyed(q)) {
890 struct xe_guc *guc = exec_queue_to_guc(q);
891 int ret;
892
893 set_exec_queue_banned(q);
894 disable_scheduling_deregister(guc, q);
895
896 /*
897 * Must wait for scheduling to be disabled before signalling
898 * any fences, if GT broken the GT reset code should signal us.
899 */
900 ret = wait_event_timeout(guc->ct.wq,
901 !exec_queue_pending_disable(q) ||
902 guc_read_stopped(guc), HZ * 5);
903 if (!ret) {
904 drm_warn(&xe->drm, "Schedule disable failed to respond");
905 xe_sched_submission_start(sched);
906 xe_gt_reset_async(q->gt);
907 return;
908 }
909 }
910
911 xe_sched_submission_start(sched);
912 }
913
914 #define ADJUST_FIVE_PERCENT(__t) mul_u64_u32_div(__t, 105, 100)
915
check_timeout(struct xe_exec_queue * q,struct xe_sched_job * job)916 static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job)
917 {
918 struct xe_gt *gt = guc_to_gt(exec_queue_to_guc(q));
919 u32 ctx_timestamp, ctx_job_timestamp;
920 u32 timeout_ms = q->sched_props.job_timeout_ms;
921 u32 diff;
922 u64 running_time_ms;
923
924 if (!xe_sched_job_started(job)) {
925 xe_gt_warn(gt, "Check job timeout: seqno=%u, lrc_seqno=%u, guc_id=%d, not started",
926 xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
927 q->guc->id);
928
929 return xe_sched_invalidate_job(job, 2);
930 }
931
932 ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]);
933 ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]);
934
935 /*
936 * Counter wraps at ~223s at the usual 19.2MHz, be paranoid catch
937 * possible overflows with a high timeout.
938 */
939 xe_gt_assert(gt, timeout_ms < 100 * MSEC_PER_SEC);
940
941 if (ctx_timestamp < ctx_job_timestamp)
942 diff = ctx_timestamp + U32_MAX - ctx_job_timestamp;
943 else
944 diff = ctx_timestamp - ctx_job_timestamp;
945
946 /*
947 * Ensure timeout is within 5% to account for an GuC scheduling latency
948 */
949 running_time_ms =
950 ADJUST_FIVE_PERCENT(xe_gt_clock_interval_to_ms(gt, diff));
951
952 xe_gt_dbg(gt,
953 "Check job timeout: seqno=%u, lrc_seqno=%u, guc_id=%d, running_time_ms=%llu, timeout_ms=%u, diff=0x%08x",
954 xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
955 q->guc->id, running_time_ms, timeout_ms, diff);
956
957 return running_time_ms >= timeout_ms;
958 }
959
enable_scheduling(struct xe_exec_queue * q)960 static void enable_scheduling(struct xe_exec_queue *q)
961 {
962 MAKE_SCHED_CONTEXT_ACTION(q, ENABLE);
963 struct xe_guc *guc = exec_queue_to_guc(q);
964 int ret;
965
966 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
967 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
968 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
969 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q));
970
971 set_exec_queue_pending_enable(q);
972 set_exec_queue_enabled(q);
973 trace_xe_exec_queue_scheduling_enable(q);
974
975 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
976 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, 1);
977
978 ret = wait_event_timeout(guc->ct.wq,
979 !exec_queue_pending_enable(q) ||
980 guc_read_stopped(guc), HZ * 5);
981 if (!ret || guc_read_stopped(guc)) {
982 xe_gt_warn(guc_to_gt(guc), "Schedule enable failed to respond");
983 set_exec_queue_banned(q);
984 xe_gt_reset_async(q->gt);
985 xe_sched_tdr_queue_imm(&q->guc->sched);
986 }
987 }
988
disable_scheduling(struct xe_exec_queue * q,bool immediate)989 static void disable_scheduling(struct xe_exec_queue *q, bool immediate)
990 {
991 MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
992 struct xe_guc *guc = exec_queue_to_guc(q);
993
994 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
995 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
996 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
997
998 if (immediate)
999 set_min_preemption_timeout(guc, q);
1000 clear_exec_queue_enabled(q);
1001 set_exec_queue_pending_disable(q);
1002 trace_xe_exec_queue_scheduling_disable(q);
1003
1004 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
1005 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, 1);
1006 }
1007
__deregister_exec_queue(struct xe_guc * guc,struct xe_exec_queue * q)1008 static void __deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
1009 {
1010 u32 action[] = {
1011 XE_GUC_ACTION_DEREGISTER_CONTEXT,
1012 q->guc->id,
1013 };
1014
1015 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
1016 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
1017 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q));
1018 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
1019
1020 set_exec_queue_destroyed(q);
1021 trace_xe_exec_queue_deregister(q);
1022
1023 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
1024 G2H_LEN_DW_DEREGISTER_CONTEXT, 1);
1025 }
1026
1027 static enum drm_gpu_sched_stat
guc_exec_queue_timedout_job(struct drm_sched_job * drm_job)1028 guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
1029 {
1030 struct xe_sched_job *job = to_xe_sched_job(drm_job);
1031 struct xe_sched_job *tmp_job;
1032 struct xe_exec_queue *q = job->q;
1033 struct xe_gpu_scheduler *sched = &q->guc->sched;
1034 struct xe_guc *guc = exec_queue_to_guc(q);
1035 const char *process_name = "no process";
1036 int err = -ETIME;
1037 pid_t pid = -1;
1038 int i = 0;
1039 bool wedged, skip_timeout_check;
1040
1041 /*
1042 * TDR has fired before free job worker. Common if exec queue
1043 * immediately closed after last fence signaled. Add back to pending
1044 * list so job can be freed and kick scheduler ensuring free job is not
1045 * lost.
1046 */
1047 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) {
1048 xe_sched_add_pending_job(sched, job);
1049 xe_sched_submission_start(sched);
1050
1051 return DRM_GPU_SCHED_STAT_NOMINAL;
1052 }
1053
1054 /* Kill the run_job entry point */
1055 xe_sched_submission_stop(sched);
1056
1057 /* Must check all state after stopping scheduler */
1058 skip_timeout_check = exec_queue_reset(q) ||
1059 exec_queue_killed_or_banned_or_wedged(q) ||
1060 exec_queue_destroyed(q);
1061
1062 /*
1063 * XXX: Sampling timeout doesn't work in wedged mode as we have to
1064 * modify scheduling state to read timestamp. We could read the
1065 * timestamp from a register to accumulate current running time but this
1066 * doesn't work for SRIOV. For now assuming timeouts in wedged mode are
1067 * genuine timeouts.
1068 */
1069 wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
1070
1071 /* Engine state now stable, disable scheduling to check timestamp */
1072 if (!wedged && exec_queue_registered(q)) {
1073 int ret;
1074
1075 if (exec_queue_reset(q))
1076 err = -EIO;
1077
1078 if (!exec_queue_destroyed(q)) {
1079 /*
1080 * Wait for any pending G2H to flush out before
1081 * modifying state
1082 */
1083 ret = wait_event_timeout(guc->ct.wq,
1084 !exec_queue_pending_enable(q) ||
1085 guc_read_stopped(guc), HZ * 5);
1086 if (!ret || guc_read_stopped(guc))
1087 goto trigger_reset;
1088
1089 /*
1090 * Flag communicates to G2H handler that schedule
1091 * disable originated from a timeout check. The G2H then
1092 * avoid triggering cleanup or deregistering the exec
1093 * queue.
1094 */
1095 set_exec_queue_check_timeout(q);
1096 disable_scheduling(q, skip_timeout_check);
1097 }
1098
1099 /*
1100 * Must wait for scheduling to be disabled before signalling
1101 * any fences, if GT broken the GT reset code should signal us.
1102 *
1103 * FIXME: Tests can generate a ton of 0x6000 (IOMMU CAT fault
1104 * error) messages which can cause the schedule disable to get
1105 * lost. If this occurs, trigger a GT reset to recover.
1106 */
1107 smp_rmb();
1108 ret = wait_event_timeout(guc->ct.wq,
1109 !exec_queue_pending_disable(q) ||
1110 guc_read_stopped(guc), HZ * 5);
1111 if (!ret || guc_read_stopped(guc)) {
1112 trigger_reset:
1113 if (!ret)
1114 xe_gt_warn(guc_to_gt(guc), "Schedule disable failed to respond");
1115 set_exec_queue_extra_ref(q);
1116 xe_exec_queue_get(q); /* GT reset owns this */
1117 set_exec_queue_banned(q);
1118 xe_gt_reset_async(q->gt);
1119 xe_sched_tdr_queue_imm(sched);
1120 goto rearm;
1121 }
1122 }
1123
1124 /*
1125 * Check if job is actually timed out, if so restart job execution and TDR
1126 */
1127 if (!wedged && !skip_timeout_check && !check_timeout(q, job) &&
1128 !exec_queue_reset(q) && exec_queue_registered(q)) {
1129 clear_exec_queue_check_timeout(q);
1130 goto sched_enable;
1131 }
1132
1133 if (q->vm && q->vm->xef) {
1134 process_name = q->vm->xef->process_name;
1135 pid = q->vm->xef->pid;
1136 }
1137 xe_gt_notice(guc_to_gt(guc), "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx in %s [%d]",
1138 xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
1139 q->guc->id, q->flags, process_name, pid);
1140
1141 trace_xe_sched_job_timedout(job);
1142
1143 if (!exec_queue_killed(q))
1144 xe_devcoredump(job);
1145
1146 /*
1147 * Kernel jobs should never fail, nor should VM jobs if they do
1148 * somethings has gone wrong and the GT needs a reset
1149 */
1150 xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_KERNEL,
1151 "Kernel-submitted job timed out\n");
1152 xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q),
1153 "VM job timed out on non-killed execqueue\n");
1154 if (!wedged && (q->flags & EXEC_QUEUE_FLAG_KERNEL ||
1155 (q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q)))) {
1156 if (!xe_sched_invalidate_job(job, 2)) {
1157 clear_exec_queue_check_timeout(q);
1158 xe_gt_reset_async(q->gt);
1159 goto rearm;
1160 }
1161 }
1162
1163 /* Finish cleaning up exec queue via deregister */
1164 set_exec_queue_banned(q);
1165 if (!wedged && exec_queue_registered(q) && !exec_queue_destroyed(q)) {
1166 set_exec_queue_extra_ref(q);
1167 xe_exec_queue_get(q);
1168 __deregister_exec_queue(guc, q);
1169 }
1170
1171 /* Stop fence signaling */
1172 xe_hw_fence_irq_stop(q->fence_irq);
1173
1174 /*
1175 * Fence state now stable, stop / start scheduler which cleans up any
1176 * fences that are complete
1177 */
1178 xe_sched_add_pending_job(sched, job);
1179 xe_sched_submission_start(sched);
1180
1181 xe_guc_exec_queue_trigger_cleanup(q);
1182
1183 /* Mark all outstanding jobs as bad, thus completing them */
1184 spin_lock(&sched->base.job_list_lock);
1185 list_for_each_entry(tmp_job, &sched->base.pending_list, drm.list)
1186 xe_sched_job_set_error(tmp_job, !i++ ? err : -ECANCELED);
1187 spin_unlock(&sched->base.job_list_lock);
1188
1189 /* Start fence signaling */
1190 xe_hw_fence_irq_start(q->fence_irq);
1191
1192 return DRM_GPU_SCHED_STAT_NOMINAL;
1193
1194 sched_enable:
1195 enable_scheduling(q);
1196 rearm:
1197 /*
1198 * XXX: Ideally want to adjust timeout based on current exection time
1199 * but there is not currently an easy way to do in DRM scheduler. With
1200 * some thought, do this in a follow up.
1201 */
1202 xe_sched_add_pending_job(sched, job);
1203 xe_sched_submission_start(sched);
1204
1205 return DRM_GPU_SCHED_STAT_NOMINAL;
1206 }
1207
__guc_exec_queue_fini_async(struct work_struct * w)1208 static void __guc_exec_queue_fini_async(struct work_struct *w)
1209 {
1210 struct xe_guc_exec_queue *ge =
1211 container_of(w, struct xe_guc_exec_queue, fini_async);
1212 struct xe_exec_queue *q = ge->q;
1213 struct xe_guc *guc = exec_queue_to_guc(q);
1214
1215 xe_pm_runtime_get(guc_to_xe(guc));
1216 trace_xe_exec_queue_destroy(q);
1217
1218 if (xe_exec_queue_is_lr(q))
1219 cancel_work_sync(&ge->lr_tdr);
1220 release_guc_id(guc, q);
1221 xe_sched_entity_fini(&ge->entity);
1222 xe_sched_fini(&ge->sched);
1223
1224 kfree(ge);
1225 xe_exec_queue_fini(q);
1226 xe_pm_runtime_put(guc_to_xe(guc));
1227 }
1228
guc_exec_queue_fini_async(struct xe_exec_queue * q)1229 static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
1230 {
1231 struct xe_guc *guc = exec_queue_to_guc(q);
1232 struct xe_device *xe = guc_to_xe(guc);
1233
1234 INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
1235
1236 /* We must block on kernel engines so slabs are empty on driver unload */
1237 if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q))
1238 __guc_exec_queue_fini_async(&q->guc->fini_async);
1239 else
1240 queue_work(xe->destroy_wq, &q->guc->fini_async);
1241 }
1242
__guc_exec_queue_fini(struct xe_guc * guc,struct xe_exec_queue * q)1243 static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
1244 {
1245 /*
1246 * Might be done from within the GPU scheduler, need to do async as we
1247 * fini the scheduler when the engine is fini'd, the scheduler can't
1248 * complete fini within itself (circular dependency). Async resolves
1249 * this we and don't really care when everything is fini'd, just that it
1250 * is.
1251 */
1252 guc_exec_queue_fini_async(q);
1253 }
1254
__guc_exec_queue_process_msg_cleanup(struct xe_sched_msg * msg)1255 static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
1256 {
1257 struct xe_exec_queue *q = msg->private_data;
1258 struct xe_guc *guc = exec_queue_to_guc(q);
1259 struct xe_device *xe = guc_to_xe(guc);
1260
1261 xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_PERMANENT));
1262 trace_xe_exec_queue_cleanup_entity(q);
1263
1264 if (exec_queue_registered(q))
1265 disable_scheduling_deregister(guc, q);
1266 else
1267 __guc_exec_queue_fini(guc, q);
1268 }
1269
guc_exec_queue_allowed_to_change_state(struct xe_exec_queue * q)1270 static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q)
1271 {
1272 return !exec_queue_killed_or_banned_or_wedged(q) && exec_queue_registered(q);
1273 }
1274
__guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg * msg)1275 static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *msg)
1276 {
1277 struct xe_exec_queue *q = msg->private_data;
1278 struct xe_guc *guc = exec_queue_to_guc(q);
1279
1280 if (guc_exec_queue_allowed_to_change_state(q))
1281 init_policies(guc, q);
1282 kfree(msg);
1283 }
1284
__suspend_fence_signal(struct xe_exec_queue * q)1285 static void __suspend_fence_signal(struct xe_exec_queue *q)
1286 {
1287 if (!q->guc->suspend_pending)
1288 return;
1289
1290 WRITE_ONCE(q->guc->suspend_pending, false);
1291 wake_up(&q->guc->suspend_wait);
1292 }
1293
suspend_fence_signal(struct xe_exec_queue * q)1294 static void suspend_fence_signal(struct xe_exec_queue *q)
1295 {
1296 struct xe_guc *guc = exec_queue_to_guc(q);
1297 struct xe_device *xe = guc_to_xe(guc);
1298
1299 xe_assert(xe, exec_queue_suspended(q) || exec_queue_killed(q) ||
1300 guc_read_stopped(guc));
1301 xe_assert(xe, q->guc->suspend_pending);
1302
1303 __suspend_fence_signal(q);
1304 }
1305
__guc_exec_queue_process_msg_suspend(struct xe_sched_msg * msg)1306 static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
1307 {
1308 struct xe_exec_queue *q = msg->private_data;
1309 struct xe_guc *guc = exec_queue_to_guc(q);
1310
1311 if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) &&
1312 exec_queue_enabled(q)) {
1313 wait_event(guc->ct.wq, q->guc->resume_time != RESUME_PENDING ||
1314 guc_read_stopped(guc));
1315
1316 if (!guc_read_stopped(guc)) {
1317 s64 since_resume_ms =
1318 ktime_ms_delta(ktime_get(),
1319 q->guc->resume_time);
1320 s64 wait_ms = q->vm->preempt.min_run_period_ms -
1321 since_resume_ms;
1322
1323 if (wait_ms > 0 && q->guc->resume_time)
1324 msleep(wait_ms);
1325
1326 set_exec_queue_suspended(q);
1327 disable_scheduling(q, false);
1328 }
1329 } else if (q->guc->suspend_pending) {
1330 set_exec_queue_suspended(q);
1331 suspend_fence_signal(q);
1332 }
1333 }
1334
__guc_exec_queue_process_msg_resume(struct xe_sched_msg * msg)1335 static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
1336 {
1337 struct xe_exec_queue *q = msg->private_data;
1338
1339 if (guc_exec_queue_allowed_to_change_state(q)) {
1340 clear_exec_queue_suspended(q);
1341 if (!exec_queue_enabled(q)) {
1342 q->guc->resume_time = RESUME_PENDING;
1343 enable_scheduling(q);
1344 }
1345 } else {
1346 clear_exec_queue_suspended(q);
1347 }
1348 }
1349
1350 #define CLEANUP 1 /* Non-zero values to catch uninitialized msg */
1351 #define SET_SCHED_PROPS 2
1352 #define SUSPEND 3
1353 #define RESUME 4
1354 #define OPCODE_MASK 0xf
1355 #define MSG_LOCKED BIT(8)
1356
guc_exec_queue_process_msg(struct xe_sched_msg * msg)1357 static void guc_exec_queue_process_msg(struct xe_sched_msg *msg)
1358 {
1359 struct xe_device *xe = guc_to_xe(exec_queue_to_guc(msg->private_data));
1360
1361 trace_xe_sched_msg_recv(msg);
1362
1363 switch (msg->opcode) {
1364 case CLEANUP:
1365 __guc_exec_queue_process_msg_cleanup(msg);
1366 break;
1367 case SET_SCHED_PROPS:
1368 __guc_exec_queue_process_msg_set_sched_props(msg);
1369 break;
1370 case SUSPEND:
1371 __guc_exec_queue_process_msg_suspend(msg);
1372 break;
1373 case RESUME:
1374 __guc_exec_queue_process_msg_resume(msg);
1375 break;
1376 default:
1377 XE_WARN_ON("Unknown message type");
1378 }
1379
1380 xe_pm_runtime_put(xe);
1381 }
1382
1383 static const struct drm_sched_backend_ops drm_sched_ops = {
1384 .run_job = guc_exec_queue_run_job,
1385 .free_job = guc_exec_queue_free_job,
1386 .timedout_job = guc_exec_queue_timedout_job,
1387 };
1388
1389 static const struct xe_sched_backend_ops xe_sched_ops = {
1390 .process_msg = guc_exec_queue_process_msg,
1391 };
1392
guc_exec_queue_init(struct xe_exec_queue * q)1393 static int guc_exec_queue_init(struct xe_exec_queue *q)
1394 {
1395 struct xe_gpu_scheduler *sched;
1396 struct xe_guc *guc = exec_queue_to_guc(q);
1397 struct xe_device *xe = guc_to_xe(guc);
1398 struct xe_guc_exec_queue *ge;
1399 long timeout;
1400 int err, i;
1401
1402 xe_assert(xe, xe_device_uc_enabled(guc_to_xe(guc)));
1403
1404 ge = kzalloc(sizeof(*ge), GFP_KERNEL);
1405 if (!ge)
1406 return -ENOMEM;
1407
1408 q->guc = ge;
1409 ge->q = q;
1410 init_waitqueue_head(&ge->suspend_wait);
1411
1412 for (i = 0; i < MAX_STATIC_MSG_TYPE; ++i)
1413 INIT_LIST_HEAD(&ge->static_msgs[i].link);
1414
1415 timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
1416 msecs_to_jiffies(q->sched_props.job_timeout_ms);
1417 err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
1418 NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
1419 timeout, guc_to_gt(guc)->ordered_wq, NULL,
1420 q->name, gt_to_xe(q->gt)->drm.dev);
1421 if (err)
1422 goto err_free;
1423
1424 sched = &ge->sched;
1425 err = xe_sched_entity_init(&ge->entity, sched);
1426 if (err)
1427 goto err_sched;
1428
1429 if (xe_exec_queue_is_lr(q))
1430 INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup);
1431
1432 mutex_lock(&guc->submission_state.lock);
1433
1434 err = alloc_guc_id(guc, q);
1435 if (err)
1436 goto err_entity;
1437
1438 q->entity = &ge->entity;
1439
1440 if (guc_read_stopped(guc))
1441 xe_sched_stop(sched);
1442
1443 mutex_unlock(&guc->submission_state.lock);
1444
1445 xe_exec_queue_assign_name(q, q->guc->id);
1446
1447 trace_xe_exec_queue_create(q);
1448
1449 return 0;
1450
1451 err_entity:
1452 mutex_unlock(&guc->submission_state.lock);
1453 xe_sched_entity_fini(&ge->entity);
1454 err_sched:
1455 xe_sched_fini(&ge->sched);
1456 err_free:
1457 kfree(ge);
1458
1459 return err;
1460 }
1461
guc_exec_queue_kill(struct xe_exec_queue * q)1462 static void guc_exec_queue_kill(struct xe_exec_queue *q)
1463 {
1464 trace_xe_exec_queue_kill(q);
1465 set_exec_queue_killed(q);
1466 __suspend_fence_signal(q);
1467 xe_guc_exec_queue_trigger_cleanup(q);
1468 }
1469
guc_exec_queue_add_msg(struct xe_exec_queue * q,struct xe_sched_msg * msg,u32 opcode)1470 static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg,
1471 u32 opcode)
1472 {
1473 xe_pm_runtime_get_noresume(guc_to_xe(exec_queue_to_guc(q)));
1474
1475 INIT_LIST_HEAD(&msg->link);
1476 msg->opcode = opcode & OPCODE_MASK;
1477 msg->private_data = q;
1478
1479 trace_xe_sched_msg_add(msg);
1480 if (opcode & MSG_LOCKED)
1481 xe_sched_add_msg_locked(&q->guc->sched, msg);
1482 else
1483 xe_sched_add_msg(&q->guc->sched, msg);
1484 }
1485
guc_exec_queue_try_add_msg(struct xe_exec_queue * q,struct xe_sched_msg * msg,u32 opcode)1486 static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q,
1487 struct xe_sched_msg *msg,
1488 u32 opcode)
1489 {
1490 if (!list_empty(&msg->link))
1491 return false;
1492
1493 guc_exec_queue_add_msg(q, msg, opcode | MSG_LOCKED);
1494
1495 return true;
1496 }
1497
1498 #define STATIC_MSG_CLEANUP 0
1499 #define STATIC_MSG_SUSPEND 1
1500 #define STATIC_MSG_RESUME 2
guc_exec_queue_fini(struct xe_exec_queue * q)1501 static void guc_exec_queue_fini(struct xe_exec_queue *q)
1502 {
1503 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
1504
1505 if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && !exec_queue_wedged(q))
1506 guc_exec_queue_add_msg(q, msg, CLEANUP);
1507 else
1508 __guc_exec_queue_fini(exec_queue_to_guc(q), q);
1509 }
1510
guc_exec_queue_set_priority(struct xe_exec_queue * q,enum xe_exec_queue_priority priority)1511 static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
1512 enum xe_exec_queue_priority priority)
1513 {
1514 struct xe_sched_msg *msg;
1515
1516 if (q->sched_props.priority == priority ||
1517 exec_queue_killed_or_banned_or_wedged(q))
1518 return 0;
1519
1520 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1521 if (!msg)
1522 return -ENOMEM;
1523
1524 q->sched_props.priority = priority;
1525 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
1526
1527 return 0;
1528 }
1529
guc_exec_queue_set_timeslice(struct xe_exec_queue * q,u32 timeslice_us)1530 static int guc_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us)
1531 {
1532 struct xe_sched_msg *msg;
1533
1534 if (q->sched_props.timeslice_us == timeslice_us ||
1535 exec_queue_killed_or_banned_or_wedged(q))
1536 return 0;
1537
1538 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1539 if (!msg)
1540 return -ENOMEM;
1541
1542 q->sched_props.timeslice_us = timeslice_us;
1543 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
1544
1545 return 0;
1546 }
1547
guc_exec_queue_set_preempt_timeout(struct xe_exec_queue * q,u32 preempt_timeout_us)1548 static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
1549 u32 preempt_timeout_us)
1550 {
1551 struct xe_sched_msg *msg;
1552
1553 if (q->sched_props.preempt_timeout_us == preempt_timeout_us ||
1554 exec_queue_killed_or_banned_or_wedged(q))
1555 return 0;
1556
1557 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1558 if (!msg)
1559 return -ENOMEM;
1560
1561 q->sched_props.preempt_timeout_us = preempt_timeout_us;
1562 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
1563
1564 return 0;
1565 }
1566
guc_exec_queue_suspend(struct xe_exec_queue * q)1567 static int guc_exec_queue_suspend(struct xe_exec_queue *q)
1568 {
1569 struct xe_gpu_scheduler *sched = &q->guc->sched;
1570 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND;
1571
1572 if (exec_queue_killed_or_banned_or_wedged(q))
1573 return -EINVAL;
1574
1575 xe_sched_msg_lock(sched);
1576 if (guc_exec_queue_try_add_msg(q, msg, SUSPEND))
1577 q->guc->suspend_pending = true;
1578 xe_sched_msg_unlock(sched);
1579
1580 return 0;
1581 }
1582
guc_exec_queue_suspend_wait(struct xe_exec_queue * q)1583 static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
1584 {
1585 struct xe_guc *guc = exec_queue_to_guc(q);
1586 int ret;
1587
1588 /*
1589 * Likely don't need to check exec_queue_killed() as we clear
1590 * suspend_pending upon kill but to be paranoid but races in which
1591 * suspend_pending is set after kill also check kill here.
1592 */
1593 ret = wait_event_interruptible_timeout(q->guc->suspend_wait,
1594 !READ_ONCE(q->guc->suspend_pending) ||
1595 exec_queue_killed(q) ||
1596 guc_read_stopped(guc),
1597 HZ * 5);
1598
1599 if (!ret) {
1600 xe_gt_warn(guc_to_gt(guc),
1601 "Suspend fence, guc_id=%d, failed to respond",
1602 q->guc->id);
1603 /* XXX: Trigger GT reset? */
1604 return -ETIME;
1605 }
1606
1607 return ret < 0 ? ret : 0;
1608 }
1609
guc_exec_queue_resume(struct xe_exec_queue * q)1610 static void guc_exec_queue_resume(struct xe_exec_queue *q)
1611 {
1612 struct xe_gpu_scheduler *sched = &q->guc->sched;
1613 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME;
1614 struct xe_guc *guc = exec_queue_to_guc(q);
1615 struct xe_device *xe = guc_to_xe(guc);
1616
1617 xe_assert(xe, !q->guc->suspend_pending);
1618
1619 xe_sched_msg_lock(sched);
1620 guc_exec_queue_try_add_msg(q, msg, RESUME);
1621 xe_sched_msg_unlock(sched);
1622 }
1623
guc_exec_queue_reset_status(struct xe_exec_queue * q)1624 static bool guc_exec_queue_reset_status(struct xe_exec_queue *q)
1625 {
1626 return exec_queue_reset(q) || exec_queue_killed_or_banned_or_wedged(q);
1627 }
1628
1629 /*
1630 * All of these functions are an abstraction layer which other parts of XE can
1631 * use to trap into the GuC backend. All of these functions, aside from init,
1632 * really shouldn't do much other than trap into the DRM scheduler which
1633 * synchronizes these operations.
1634 */
1635 static const struct xe_exec_queue_ops guc_exec_queue_ops = {
1636 .init = guc_exec_queue_init,
1637 .kill = guc_exec_queue_kill,
1638 .fini = guc_exec_queue_fini,
1639 .set_priority = guc_exec_queue_set_priority,
1640 .set_timeslice = guc_exec_queue_set_timeslice,
1641 .set_preempt_timeout = guc_exec_queue_set_preempt_timeout,
1642 .suspend = guc_exec_queue_suspend,
1643 .suspend_wait = guc_exec_queue_suspend_wait,
1644 .resume = guc_exec_queue_resume,
1645 .reset_status = guc_exec_queue_reset_status,
1646 };
1647
guc_exec_queue_stop(struct xe_guc * guc,struct xe_exec_queue * q)1648 static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
1649 {
1650 struct xe_gpu_scheduler *sched = &q->guc->sched;
1651
1652 /* Stop scheduling + flush any DRM scheduler operations */
1653 xe_sched_submission_stop(sched);
1654
1655 /* Clean up lost G2H + reset engine state */
1656 if (exec_queue_registered(q)) {
1657 if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
1658 xe_exec_queue_put(q);
1659 else if (exec_queue_destroyed(q))
1660 __guc_exec_queue_fini(guc, q);
1661 }
1662 if (q->guc->suspend_pending) {
1663 set_exec_queue_suspended(q);
1664 suspend_fence_signal(q);
1665 }
1666 atomic_and(EXEC_QUEUE_STATE_WEDGED | EXEC_QUEUE_STATE_BANNED |
1667 EXEC_QUEUE_STATE_KILLED | EXEC_QUEUE_STATE_DESTROYED |
1668 EXEC_QUEUE_STATE_SUSPENDED,
1669 &q->guc->state);
1670 q->guc->resume_time = 0;
1671 trace_xe_exec_queue_stop(q);
1672
1673 /*
1674 * Ban any engine (aside from kernel and engines used for VM ops) with a
1675 * started but not complete job or if a job has gone through a GT reset
1676 * more than twice.
1677 */
1678 if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) {
1679 struct xe_sched_job *job = xe_sched_first_pending_job(sched);
1680 bool ban = false;
1681
1682 if (job) {
1683 if ((xe_sched_job_started(job) &&
1684 !xe_sched_job_completed(job)) ||
1685 xe_sched_invalidate_job(job, 2)) {
1686 trace_xe_sched_job_ban(job);
1687 ban = true;
1688 }
1689 } else if (xe_exec_queue_is_lr(q) &&
1690 (xe_lrc_ring_head(q->lrc[0]) != xe_lrc_ring_tail(q->lrc[0]))) {
1691 ban = true;
1692 }
1693
1694 if (ban) {
1695 set_exec_queue_banned(q);
1696 xe_guc_exec_queue_trigger_cleanup(q);
1697 }
1698 }
1699 }
1700
xe_guc_submit_reset_prepare(struct xe_guc * guc)1701 int xe_guc_submit_reset_prepare(struct xe_guc *guc)
1702 {
1703 int ret;
1704
1705 /*
1706 * Using an atomic here rather than submission_state.lock as this
1707 * function can be called while holding the CT lock (engine reset
1708 * failure). submission_state.lock needs the CT lock to resubmit jobs.
1709 * Atomic is not ideal, but it works to prevent against concurrent reset
1710 * and releasing any TDRs waiting on guc->submission_state.stopped.
1711 */
1712 ret = atomic_fetch_or(1, &guc->submission_state.stopped);
1713 smp_wmb();
1714 wake_up_all(&guc->ct.wq);
1715
1716 return ret;
1717 }
1718
xe_guc_submit_reset_wait(struct xe_guc * guc)1719 void xe_guc_submit_reset_wait(struct xe_guc *guc)
1720 {
1721 wait_event(guc->ct.wq, xe_device_wedged(guc_to_xe(guc)) ||
1722 !guc_read_stopped(guc));
1723 }
1724
xe_guc_submit_stop(struct xe_guc * guc)1725 void xe_guc_submit_stop(struct xe_guc *guc)
1726 {
1727 struct xe_exec_queue *q;
1728 unsigned long index;
1729 struct xe_device *xe = guc_to_xe(guc);
1730
1731 xe_assert(xe, guc_read_stopped(guc) == 1);
1732
1733 mutex_lock(&guc->submission_state.lock);
1734
1735 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
1736 /* Prevent redundant attempts to stop parallel queues */
1737 if (q->guc->id != index)
1738 continue;
1739
1740 guc_exec_queue_stop(guc, q);
1741 }
1742
1743 mutex_unlock(&guc->submission_state.lock);
1744
1745 /*
1746 * No one can enter the backend at this point, aside from new engine
1747 * creation which is protected by guc->submission_state.lock.
1748 */
1749
1750 }
1751
guc_exec_queue_start(struct xe_exec_queue * q)1752 static void guc_exec_queue_start(struct xe_exec_queue *q)
1753 {
1754 struct xe_gpu_scheduler *sched = &q->guc->sched;
1755
1756 if (!exec_queue_killed_or_banned_or_wedged(q)) {
1757 int i;
1758
1759 trace_xe_exec_queue_resubmit(q);
1760 for (i = 0; i < q->width; ++i)
1761 xe_lrc_set_ring_head(q->lrc[i], q->lrc[i]->ring.tail);
1762 xe_sched_resubmit_jobs(sched);
1763 }
1764
1765 xe_sched_submission_start(sched);
1766 xe_sched_submission_resume_tdr(sched);
1767 }
1768
xe_guc_submit_start(struct xe_guc * guc)1769 int xe_guc_submit_start(struct xe_guc *guc)
1770 {
1771 struct xe_exec_queue *q;
1772 unsigned long index;
1773 struct xe_device *xe = guc_to_xe(guc);
1774
1775 xe_assert(xe, guc_read_stopped(guc) == 1);
1776
1777 mutex_lock(&guc->submission_state.lock);
1778 atomic_dec(&guc->submission_state.stopped);
1779 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
1780 /* Prevent redundant attempts to start parallel queues */
1781 if (q->guc->id != index)
1782 continue;
1783
1784 guc_exec_queue_start(q);
1785 }
1786 mutex_unlock(&guc->submission_state.lock);
1787
1788 wake_up_all(&guc->ct.wq);
1789
1790 return 0;
1791 }
1792
1793 static struct xe_exec_queue *
g2h_exec_queue_lookup(struct xe_guc * guc,u32 guc_id)1794 g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
1795 {
1796 struct xe_device *xe = guc_to_xe(guc);
1797 struct xe_exec_queue *q;
1798
1799 if (unlikely(guc_id >= GUC_ID_MAX)) {
1800 drm_err(&xe->drm, "Invalid guc_id %u", guc_id);
1801 return NULL;
1802 }
1803
1804 q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id);
1805 if (unlikely(!q)) {
1806 drm_err(&xe->drm, "Not engine present for guc_id %u", guc_id);
1807 return NULL;
1808 }
1809
1810 xe_assert(xe, guc_id >= q->guc->id);
1811 xe_assert(xe, guc_id < (q->guc->id + q->width));
1812
1813 return q;
1814 }
1815
deregister_exec_queue(struct xe_guc * guc,struct xe_exec_queue * q)1816 static void deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
1817 {
1818 u32 action[] = {
1819 XE_GUC_ACTION_DEREGISTER_CONTEXT,
1820 q->guc->id,
1821 };
1822
1823 xe_gt_assert(guc_to_gt(guc), exec_queue_destroyed(q));
1824 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
1825 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
1826 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q));
1827
1828 trace_xe_exec_queue_deregister(q);
1829
1830 xe_guc_ct_send_g2h_handler(&guc->ct, action, ARRAY_SIZE(action));
1831 }
1832
handle_sched_done(struct xe_guc * guc,struct xe_exec_queue * q,u32 runnable_state)1833 static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q,
1834 u32 runnable_state)
1835 {
1836 trace_xe_exec_queue_scheduling_done(q);
1837
1838 if (runnable_state == 1) {
1839 xe_gt_assert(guc_to_gt(guc), exec_queue_pending_enable(q));
1840
1841 q->guc->resume_time = ktime_get();
1842 clear_exec_queue_pending_enable(q);
1843 smp_wmb();
1844 wake_up_all(&guc->ct.wq);
1845 } else {
1846 bool check_timeout = exec_queue_check_timeout(q);
1847
1848 xe_gt_assert(guc_to_gt(guc), runnable_state == 0);
1849 xe_gt_assert(guc_to_gt(guc), exec_queue_pending_disable(q));
1850
1851 clear_exec_queue_pending_disable(q);
1852 if (q->guc->suspend_pending) {
1853 suspend_fence_signal(q);
1854 } else {
1855 if (exec_queue_banned(q) || check_timeout) {
1856 smp_wmb();
1857 wake_up_all(&guc->ct.wq);
1858 }
1859 if (!check_timeout)
1860 deregister_exec_queue(guc, q);
1861 }
1862 }
1863 }
1864
xe_guc_sched_done_handler(struct xe_guc * guc,u32 * msg,u32 len)1865 int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
1866 {
1867 struct xe_device *xe = guc_to_xe(guc);
1868 struct xe_exec_queue *q;
1869 u32 guc_id = msg[0];
1870 u32 runnable_state = msg[1];
1871
1872 if (unlikely(len < 2)) {
1873 drm_err(&xe->drm, "Invalid length %u", len);
1874 return -EPROTO;
1875 }
1876
1877 q = g2h_exec_queue_lookup(guc, guc_id);
1878 if (unlikely(!q))
1879 return -EPROTO;
1880
1881 if (unlikely(!exec_queue_pending_enable(q) &&
1882 !exec_queue_pending_disable(q))) {
1883 xe_gt_err(guc_to_gt(guc),
1884 "SCHED_DONE: Unexpected engine state 0x%04x, guc_id=%d, runnable_state=%u",
1885 atomic_read(&q->guc->state), q->guc->id,
1886 runnable_state);
1887 return -EPROTO;
1888 }
1889
1890 handle_sched_done(guc, q, runnable_state);
1891
1892 return 0;
1893 }
1894
handle_deregister_done(struct xe_guc * guc,struct xe_exec_queue * q)1895 static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
1896 {
1897 trace_xe_exec_queue_deregister_done(q);
1898
1899 clear_exec_queue_registered(q);
1900
1901 if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
1902 xe_exec_queue_put(q);
1903 else
1904 __guc_exec_queue_fini(guc, q);
1905 }
1906
xe_guc_deregister_done_handler(struct xe_guc * guc,u32 * msg,u32 len)1907 int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
1908 {
1909 struct xe_device *xe = guc_to_xe(guc);
1910 struct xe_exec_queue *q;
1911 u32 guc_id = msg[0];
1912
1913 if (unlikely(len < 1)) {
1914 drm_err(&xe->drm, "Invalid length %u", len);
1915 return -EPROTO;
1916 }
1917
1918 q = g2h_exec_queue_lookup(guc, guc_id);
1919 if (unlikely(!q))
1920 return -EPROTO;
1921
1922 if (!exec_queue_destroyed(q) || exec_queue_pending_disable(q) ||
1923 exec_queue_pending_enable(q) || exec_queue_enabled(q)) {
1924 xe_gt_err(guc_to_gt(guc),
1925 "DEREGISTER_DONE: Unexpected engine state 0x%04x, guc_id=%d",
1926 atomic_read(&q->guc->state), q->guc->id);
1927 return -EPROTO;
1928 }
1929
1930 handle_deregister_done(guc, q);
1931
1932 return 0;
1933 }
1934
xe_guc_exec_queue_reset_handler(struct xe_guc * guc,u32 * msg,u32 len)1935 int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
1936 {
1937 struct xe_gt *gt = guc_to_gt(guc);
1938 struct xe_device *xe = guc_to_xe(guc);
1939 struct xe_exec_queue *q;
1940 u32 guc_id = msg[0];
1941
1942 if (unlikely(len < 1)) {
1943 drm_err(&xe->drm, "Invalid length %u", len);
1944 return -EPROTO;
1945 }
1946
1947 q = g2h_exec_queue_lookup(guc, guc_id);
1948 if (unlikely(!q))
1949 return -EPROTO;
1950
1951 xe_gt_info(gt, "Engine reset: engine_class=%s, logical_mask: 0x%x, guc_id=%d",
1952 xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
1953
1954 /* FIXME: Do error capture, most likely async */
1955
1956 trace_xe_exec_queue_reset(q);
1957
1958 /*
1959 * A banned engine is a NOP at this point (came from
1960 * guc_exec_queue_timedout_job). Otherwise, kick drm scheduler to cancel
1961 * jobs by setting timeout of the job to the minimum value kicking
1962 * guc_exec_queue_timedout_job.
1963 */
1964 set_exec_queue_reset(q);
1965 if (!exec_queue_banned(q) && !exec_queue_check_timeout(q))
1966 xe_guc_exec_queue_trigger_cleanup(q);
1967
1968 return 0;
1969 }
1970
xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc * guc,u32 * msg,u32 len)1971 int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
1972 u32 len)
1973 {
1974 struct xe_gt *gt = guc_to_gt(guc);
1975 struct xe_device *xe = guc_to_xe(guc);
1976 struct xe_exec_queue *q;
1977 u32 guc_id = msg[0];
1978
1979 if (unlikely(len < 1)) {
1980 drm_err(&xe->drm, "Invalid length %u", len);
1981 return -EPROTO;
1982 }
1983
1984 q = g2h_exec_queue_lookup(guc, guc_id);
1985 if (unlikely(!q))
1986 return -EPROTO;
1987
1988 xe_gt_dbg(gt, "Engine memory cat error: engine_class=%s, logical_mask: 0x%x, guc_id=%d",
1989 xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
1990
1991 trace_xe_exec_queue_memory_cat_error(q);
1992
1993 /* Treat the same as engine reset */
1994 set_exec_queue_reset(q);
1995 if (!exec_queue_banned(q) && !exec_queue_check_timeout(q))
1996 xe_guc_exec_queue_trigger_cleanup(q);
1997
1998 return 0;
1999 }
2000
xe_guc_exec_queue_reset_failure_handler(struct xe_guc * guc,u32 * msg,u32 len)2001 int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len)
2002 {
2003 struct xe_device *xe = guc_to_xe(guc);
2004 u8 guc_class, instance;
2005 u32 reason;
2006
2007 if (unlikely(len != 3)) {
2008 drm_err(&xe->drm, "Invalid length %u", len);
2009 return -EPROTO;
2010 }
2011
2012 guc_class = msg[0];
2013 instance = msg[1];
2014 reason = msg[2];
2015
2016 /* Unexpected failure of a hardware feature, log an actual error */
2017 drm_err(&xe->drm, "GuC engine reset request failed on %d:%d because 0x%08X",
2018 guc_class, instance, reason);
2019
2020 xe_gt_reset_async(guc_to_gt(guc));
2021
2022 return 0;
2023 }
2024
2025 static void
guc_exec_queue_wq_snapshot_capture(struct xe_exec_queue * q,struct xe_guc_submit_exec_queue_snapshot * snapshot)2026 guc_exec_queue_wq_snapshot_capture(struct xe_exec_queue *q,
2027 struct xe_guc_submit_exec_queue_snapshot *snapshot)
2028 {
2029 struct xe_guc *guc = exec_queue_to_guc(q);
2030 struct xe_device *xe = guc_to_xe(guc);
2031 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
2032 int i;
2033
2034 snapshot->guc.wqi_head = q->guc->wqi_head;
2035 snapshot->guc.wqi_tail = q->guc->wqi_tail;
2036 snapshot->parallel.wq_desc.head = parallel_read(xe, map, wq_desc.head);
2037 snapshot->parallel.wq_desc.tail = parallel_read(xe, map, wq_desc.tail);
2038 snapshot->parallel.wq_desc.status = parallel_read(xe, map,
2039 wq_desc.wq_status);
2040
2041 if (snapshot->parallel.wq_desc.head !=
2042 snapshot->parallel.wq_desc.tail) {
2043 for (i = snapshot->parallel.wq_desc.head;
2044 i != snapshot->parallel.wq_desc.tail;
2045 i = (i + sizeof(u32)) % WQ_SIZE)
2046 snapshot->parallel.wq[i / sizeof(u32)] =
2047 parallel_read(xe, map, wq[i / sizeof(u32)]);
2048 }
2049 }
2050
2051 static void
guc_exec_queue_wq_snapshot_print(struct xe_guc_submit_exec_queue_snapshot * snapshot,struct drm_printer * p)2052 guc_exec_queue_wq_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
2053 struct drm_printer *p)
2054 {
2055 int i;
2056
2057 drm_printf(p, "\tWQ head: %u (internal), %d (memory)\n",
2058 snapshot->guc.wqi_head, snapshot->parallel.wq_desc.head);
2059 drm_printf(p, "\tWQ tail: %u (internal), %d (memory)\n",
2060 snapshot->guc.wqi_tail, snapshot->parallel.wq_desc.tail);
2061 drm_printf(p, "\tWQ status: %u\n", snapshot->parallel.wq_desc.status);
2062
2063 if (snapshot->parallel.wq_desc.head !=
2064 snapshot->parallel.wq_desc.tail) {
2065 for (i = snapshot->parallel.wq_desc.head;
2066 i != snapshot->parallel.wq_desc.tail;
2067 i = (i + sizeof(u32)) % WQ_SIZE)
2068 drm_printf(p, "\tWQ[%zu]: 0x%08x\n", i / sizeof(u32),
2069 snapshot->parallel.wq[i / sizeof(u32)]);
2070 }
2071 }
2072
2073 /**
2074 * xe_guc_exec_queue_snapshot_capture - Take a quick snapshot of the GuC Engine.
2075 * @q: faulty exec queue
2076 *
2077 * This can be printed out in a later stage like during dev_coredump
2078 * analysis.
2079 *
2080 * Returns: a GuC Submit Engine snapshot object that must be freed by the
2081 * caller, using `xe_guc_exec_queue_snapshot_free`.
2082 */
2083 struct xe_guc_submit_exec_queue_snapshot *
xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue * q)2084 xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q)
2085 {
2086 struct xe_gpu_scheduler *sched = &q->guc->sched;
2087 struct xe_guc_submit_exec_queue_snapshot *snapshot;
2088 int i;
2089
2090 snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
2091
2092 if (!snapshot)
2093 return NULL;
2094
2095 snapshot->guc.id = q->guc->id;
2096 memcpy(&snapshot->name, &q->name, sizeof(snapshot->name));
2097 snapshot->class = q->class;
2098 snapshot->logical_mask = q->logical_mask;
2099 snapshot->width = q->width;
2100 snapshot->refcount = kref_read(&q->refcount);
2101 snapshot->sched_timeout = sched->base.timeout;
2102 snapshot->sched_props.timeslice_us = q->sched_props.timeslice_us;
2103 snapshot->sched_props.preempt_timeout_us =
2104 q->sched_props.preempt_timeout_us;
2105
2106 snapshot->lrc = kmalloc_array(q->width, sizeof(struct xe_lrc_snapshot *),
2107 GFP_ATOMIC);
2108
2109 if (snapshot->lrc) {
2110 for (i = 0; i < q->width; ++i) {
2111 struct xe_lrc *lrc = q->lrc[i];
2112
2113 snapshot->lrc[i] = xe_lrc_snapshot_capture(lrc);
2114 }
2115 }
2116
2117 snapshot->schedule_state = atomic_read(&q->guc->state);
2118 snapshot->exec_queue_flags = q->flags;
2119
2120 snapshot->parallel_execution = xe_exec_queue_is_parallel(q);
2121 if (snapshot->parallel_execution)
2122 guc_exec_queue_wq_snapshot_capture(q, snapshot);
2123
2124 spin_lock(&sched->base.job_list_lock);
2125 snapshot->pending_list_size = list_count_nodes(&sched->base.pending_list);
2126 snapshot->pending_list = kmalloc_array(snapshot->pending_list_size,
2127 sizeof(struct pending_list_snapshot),
2128 GFP_ATOMIC);
2129
2130 if (snapshot->pending_list) {
2131 struct xe_sched_job *job_iter;
2132
2133 i = 0;
2134 list_for_each_entry(job_iter, &sched->base.pending_list, drm.list) {
2135 snapshot->pending_list[i].seqno =
2136 xe_sched_job_seqno(job_iter);
2137 snapshot->pending_list[i].fence =
2138 dma_fence_is_signaled(job_iter->fence) ? 1 : 0;
2139 snapshot->pending_list[i].finished =
2140 dma_fence_is_signaled(&job_iter->drm.s_fence->finished)
2141 ? 1 : 0;
2142 i++;
2143 }
2144 }
2145
2146 spin_unlock(&sched->base.job_list_lock);
2147
2148 return snapshot;
2149 }
2150
2151 /**
2152 * xe_guc_exec_queue_snapshot_capture_delayed - Take delayed part of snapshot of the GuC Engine.
2153 * @snapshot: Previously captured snapshot of job.
2154 *
2155 * This captures some data that requires taking some locks, so it cannot be done in signaling path.
2156 */
2157 void
xe_guc_exec_queue_snapshot_capture_delayed(struct xe_guc_submit_exec_queue_snapshot * snapshot)2158 xe_guc_exec_queue_snapshot_capture_delayed(struct xe_guc_submit_exec_queue_snapshot *snapshot)
2159 {
2160 int i;
2161
2162 if (!snapshot || !snapshot->lrc)
2163 return;
2164
2165 for (i = 0; i < snapshot->width; ++i)
2166 xe_lrc_snapshot_capture_delayed(snapshot->lrc[i]);
2167 }
2168
2169 /**
2170 * xe_guc_exec_queue_snapshot_print - Print out a given GuC Engine snapshot.
2171 * @snapshot: GuC Submit Engine snapshot object.
2172 * @p: drm_printer where it will be printed out.
2173 *
2174 * This function prints out a given GuC Submit Engine snapshot object.
2175 */
2176 void
xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot * snapshot,struct drm_printer * p)2177 xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
2178 struct drm_printer *p)
2179 {
2180 int i;
2181
2182 if (!snapshot)
2183 return;
2184
2185 drm_printf(p, "\nGuC ID: %d\n", snapshot->guc.id);
2186 drm_printf(p, "\tName: %s\n", snapshot->name);
2187 drm_printf(p, "\tClass: %d\n", snapshot->class);
2188 drm_printf(p, "\tLogical mask: 0x%x\n", snapshot->logical_mask);
2189 drm_printf(p, "\tWidth: %d\n", snapshot->width);
2190 drm_printf(p, "\tRef: %d\n", snapshot->refcount);
2191 drm_printf(p, "\tTimeout: %ld (ms)\n", snapshot->sched_timeout);
2192 drm_printf(p, "\tTimeslice: %u (us)\n",
2193 snapshot->sched_props.timeslice_us);
2194 drm_printf(p, "\tPreempt timeout: %u (us)\n",
2195 snapshot->sched_props.preempt_timeout_us);
2196
2197 for (i = 0; snapshot->lrc && i < snapshot->width; ++i)
2198 xe_lrc_snapshot_print(snapshot->lrc[i], p);
2199
2200 drm_printf(p, "\tSchedule State: 0x%x\n", snapshot->schedule_state);
2201 drm_printf(p, "\tFlags: 0x%lx\n", snapshot->exec_queue_flags);
2202
2203 if (snapshot->parallel_execution)
2204 guc_exec_queue_wq_snapshot_print(snapshot, p);
2205
2206 for (i = 0; snapshot->pending_list && i < snapshot->pending_list_size;
2207 i++)
2208 drm_printf(p, "\tJob: seqno=%d, fence=%d, finished=%d\n",
2209 snapshot->pending_list[i].seqno,
2210 snapshot->pending_list[i].fence,
2211 snapshot->pending_list[i].finished);
2212 }
2213
2214 /**
2215 * xe_guc_exec_queue_snapshot_free - Free all allocated objects for a given
2216 * snapshot.
2217 * @snapshot: GuC Submit Engine snapshot object.
2218 *
2219 * This function free all the memory that needed to be allocated at capture
2220 * time.
2221 */
xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot * snapshot)2222 void xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *snapshot)
2223 {
2224 int i;
2225
2226 if (!snapshot)
2227 return;
2228
2229 if (snapshot->lrc) {
2230 for (i = 0; i < snapshot->width; i++)
2231 xe_lrc_snapshot_free(snapshot->lrc[i]);
2232 kfree(snapshot->lrc);
2233 }
2234 kfree(snapshot->pending_list);
2235 kfree(snapshot);
2236 }
2237
guc_exec_queue_print(struct xe_exec_queue * q,struct drm_printer * p)2238 static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p)
2239 {
2240 struct xe_guc_submit_exec_queue_snapshot *snapshot;
2241
2242 snapshot = xe_guc_exec_queue_snapshot_capture(q);
2243 xe_guc_exec_queue_snapshot_print(snapshot, p);
2244 xe_guc_exec_queue_snapshot_free(snapshot);
2245 }
2246
2247 /**
2248 * xe_guc_submit_print - GuC Submit Print.
2249 * @guc: GuC.
2250 * @p: drm_printer where it will be printed out.
2251 *
2252 * This function capture and prints snapshots of **all** GuC Engines.
2253 */
xe_guc_submit_print(struct xe_guc * guc,struct drm_printer * p)2254 void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p)
2255 {
2256 struct xe_exec_queue *q;
2257 unsigned long index;
2258
2259 if (!xe_device_uc_enabled(guc_to_xe(guc)))
2260 return;
2261
2262 mutex_lock(&guc->submission_state.lock);
2263 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
2264 guc_exec_queue_print(q, p);
2265 mutex_unlock(&guc->submission_state.lock);
2266 }
2267