1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2008-2018 Intel Corporation
4 */
5
6 #include <linux/sched/mm.h>
7 #include <linux/stop_machine.h>
8 #include <linux/string_helpers.h>
9
10 #include "display/intel_display_reset.h"
11 #include "display/intel_overlay.h"
12
13 #include "gem/i915_gem_context.h"
14
15 #include "gt/intel_gt_regs.h"
16
17 #include "gt/uc/intel_gsc_fw.h"
18
19 #include "i915_drv.h"
20 #include "i915_file_private.h"
21 #include "i915_gpu_error.h"
22 #include "i915_irq.h"
23 #include "i915_reg.h"
24 #include "intel_breadcrumbs.h"
25 #include "intel_engine_pm.h"
26 #include "intel_engine_regs.h"
27 #include "intel_gt.h"
28 #include "intel_gt_pm.h"
29 #include "intel_gt_print.h"
30 #include "intel_gt_requests.h"
31 #include "intel_mchbar_regs.h"
32 #include "intel_pci_config.h"
33 #include "intel_reset.h"
34
35 #include "uc/intel_guc.h"
36
37 #define RESET_MAX_RETRIES 3
38
client_mark_guilty(struct i915_gem_context * ctx,bool banned)39 static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
40 {
41 struct drm_i915_file_private *file_priv = ctx->file_priv;
42 unsigned long prev_hang;
43 unsigned int score;
44
45 if (IS_ERR_OR_NULL(file_priv))
46 return;
47
48 score = 0;
49 if (banned)
50 score = I915_CLIENT_SCORE_CONTEXT_BAN;
51
52 prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
53 if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
54 score += I915_CLIENT_SCORE_HANG_FAST;
55
56 if (score) {
57 atomic_add(score, &file_priv->ban_score);
58
59 drm_dbg(&ctx->i915->drm,
60 "client %s: gained %u ban score, now %u\n",
61 ctx->name, score,
62 atomic_read(&file_priv->ban_score));
63 }
64 }
65
mark_guilty(struct i915_request * rq)66 static bool mark_guilty(struct i915_request *rq)
67 {
68 struct i915_gem_context *ctx;
69 unsigned long prev_hang;
70 bool banned;
71 int i;
72
73 if (intel_context_is_closed(rq->context))
74 return true;
75
76 rcu_read_lock();
77 ctx = rcu_dereference(rq->context->gem_context);
78 if (ctx && !kref_get_unless_zero(&ctx->ref))
79 ctx = NULL;
80 rcu_read_unlock();
81 if (!ctx)
82 return intel_context_is_banned(rq->context);
83
84 atomic_inc(&ctx->guilty_count);
85
86 /* Cool contexts are too cool to be banned! (Used for reset testing.) */
87 if (!i915_gem_context_is_bannable(ctx)) {
88 banned = false;
89 goto out;
90 }
91
92 drm_notice(&ctx->i915->drm,
93 "%s context reset due to GPU hang\n",
94 ctx->name);
95
96 /* Record the timestamp for the last N hangs */
97 prev_hang = ctx->hang_timestamp[0];
98 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
99 ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
100 ctx->hang_timestamp[i] = jiffies;
101
102 /* If we have hung N+1 times in rapid succession, we ban the context! */
103 banned = !i915_gem_context_is_recoverable(ctx);
104 if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
105 banned = true;
106 if (banned)
107 drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n",
108 ctx->name, atomic_read(&ctx->guilty_count));
109
110 client_mark_guilty(ctx, banned);
111
112 out:
113 i915_gem_context_put(ctx);
114 return banned;
115 }
116
mark_innocent(struct i915_request * rq)117 static void mark_innocent(struct i915_request *rq)
118 {
119 struct i915_gem_context *ctx;
120
121 rcu_read_lock();
122 ctx = rcu_dereference(rq->context->gem_context);
123 if (ctx)
124 atomic_inc(&ctx->active_count);
125 rcu_read_unlock();
126 }
127
__i915_request_reset(struct i915_request * rq,bool guilty)128 void __i915_request_reset(struct i915_request *rq, bool guilty)
129 {
130 bool banned = false;
131
132 RQ_TRACE(rq, "guilty? %s\n", str_yes_no(guilty));
133 GEM_BUG_ON(__i915_request_is_complete(rq));
134
135 rcu_read_lock(); /* protect the GEM context */
136 if (guilty) {
137 i915_request_set_error_once(rq, -EIO);
138 __i915_request_skip(rq);
139 banned = mark_guilty(rq);
140 } else {
141 i915_request_set_error_once(rq, -EAGAIN);
142 mark_innocent(rq);
143 }
144 rcu_read_unlock();
145
146 if (banned)
147 intel_context_ban(rq->context, rq);
148 }
149
i915_in_reset(struct pci_dev * pdev)150 static bool i915_in_reset(struct pci_dev *pdev)
151 {
152 u8 gdrst;
153
154 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
155 return gdrst & GRDOM_RESET_STATUS;
156 }
157
i915_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)158 static int i915_do_reset(struct intel_gt *gt,
159 intel_engine_mask_t engine_mask,
160 unsigned int retry)
161 {
162 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
163 int err;
164
165 /* Assert reset for at least 50 usec, and wait for acknowledgement. */
166 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
167 udelay(50);
168 err = _wait_for_atomic(i915_in_reset(pdev), 50000, 0);
169
170 /* Clear the reset request. */
171 pci_write_config_byte(pdev, I915_GDRST, 0);
172 udelay(50);
173 if (!err)
174 err = _wait_for_atomic(!i915_in_reset(pdev), 50000, 0);
175
176 return err;
177 }
178
g4x_reset_complete(struct pci_dev * pdev)179 static bool g4x_reset_complete(struct pci_dev *pdev)
180 {
181 u8 gdrst;
182
183 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
184 return (gdrst & GRDOM_RESET_ENABLE) == 0;
185 }
186
g33_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)187 static int g33_do_reset(struct intel_gt *gt,
188 intel_engine_mask_t engine_mask,
189 unsigned int retry)
190 {
191 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
192
193 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
194 return _wait_for_atomic(g4x_reset_complete(pdev), 50000, 0);
195 }
196
g4x_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)197 static int g4x_do_reset(struct intel_gt *gt,
198 intel_engine_mask_t engine_mask,
199 unsigned int retry)
200 {
201 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
202 struct intel_uncore *uncore = gt->uncore;
203 int ret;
204
205 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
206 intel_uncore_rmw_fw(uncore, VDECCLK_GATE_D, 0, VCP_UNIT_CLOCK_GATE_DISABLE);
207 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
208
209 pci_write_config_byte(pdev, I915_GDRST,
210 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
211 ret = _wait_for_atomic(g4x_reset_complete(pdev), 50000, 0);
212 if (ret) {
213 GT_TRACE(gt, "Wait for media reset failed\n");
214 goto out;
215 }
216
217 pci_write_config_byte(pdev, I915_GDRST,
218 GRDOM_RENDER | GRDOM_RESET_ENABLE);
219 ret = _wait_for_atomic(g4x_reset_complete(pdev), 50000, 0);
220 if (ret) {
221 GT_TRACE(gt, "Wait for render reset failed\n");
222 goto out;
223 }
224
225 out:
226 pci_write_config_byte(pdev, I915_GDRST, 0);
227
228 intel_uncore_rmw_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE, 0);
229 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
230
231 return ret;
232 }
233
ilk_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)234 static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
235 unsigned int retry)
236 {
237 struct intel_uncore *uncore = gt->uncore;
238 int ret;
239
240 intel_uncore_write_fw(uncore, ILK_GDSR,
241 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
242 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
243 ILK_GRDOM_RESET_ENABLE, 0,
244 5000, 0,
245 NULL);
246 if (ret) {
247 GT_TRACE(gt, "Wait for render reset failed\n");
248 goto out;
249 }
250
251 intel_uncore_write_fw(uncore, ILK_GDSR,
252 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
253 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
254 ILK_GRDOM_RESET_ENABLE, 0,
255 5000, 0,
256 NULL);
257 if (ret) {
258 GT_TRACE(gt, "Wait for media reset failed\n");
259 goto out;
260 }
261
262 out:
263 intel_uncore_write_fw(uncore, ILK_GDSR, 0);
264 intel_uncore_posting_read_fw(uncore, ILK_GDSR);
265 return ret;
266 }
267
268 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
gen6_hw_domain_reset(struct intel_gt * gt,u32 hw_domain_mask)269 static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
270 {
271 struct intel_uncore *uncore = gt->uncore;
272 int loops;
273 int err;
274
275 /*
276 * On some platforms, e.g. Jasperlake, we see that the engine register
277 * state is not cleared until shortly after GDRST reports completion,
278 * causing a failure as we try to immediately resume while the internal
279 * state is still in flux. If we immediately repeat the reset, the
280 * second reset appears to serialise with the first, and since it is a
281 * no-op, the registers should retain their reset value. However, there
282 * is still a concern that upon leaving the second reset, the internal
283 * engine state is still in flux and not ready for resuming.
284 *
285 * Starting on MTL, there are some prep steps that we need to do when
286 * resetting some engines that need to be applied every time we write to
287 * GEN6_GDRST. As those are time consuming (tens of ms), we don't want
288 * to perform that twice, so, since the Jasperlake issue hasn't been
289 * observed on MTL, we avoid repeating the reset on newer platforms.
290 */
291 loops = GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 70) ? 2 : 1;
292
293 /*
294 * GEN6_GDRST is not in the gt power well, no need to check
295 * for fifo space for the write or forcewake the chip for
296 * the read
297 */
298 do {
299 intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
300
301 /* Wait for the device to ack the reset requests. */
302 err = __intel_wait_for_register_fw(uncore, GEN6_GDRST,
303 hw_domain_mask, 0,
304 2000, 0,
305 NULL);
306 } while (err == 0 && --loops);
307 if (err)
308 GT_TRACE(gt,
309 "Wait for 0x%08x engines reset failed\n",
310 hw_domain_mask);
311
312 /*
313 * As we have observed that the engine state is still volatile
314 * after GDRST is acked, impose a small delay to let everything settle.
315 */
316 udelay(50);
317
318 return err;
319 }
320
__gen6_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)321 static int __gen6_reset_engines(struct intel_gt *gt,
322 intel_engine_mask_t engine_mask,
323 unsigned int retry)
324 {
325 struct intel_engine_cs *engine;
326 u32 hw_mask;
327
328 if (engine_mask == ALL_ENGINES) {
329 hw_mask = GEN6_GRDOM_FULL;
330 } else {
331 intel_engine_mask_t tmp;
332
333 hw_mask = 0;
334 for_each_engine_masked(engine, gt, engine_mask, tmp) {
335 hw_mask |= engine->reset_domain;
336 }
337 }
338
339 return gen6_hw_domain_reset(gt, hw_mask);
340 }
341
gen6_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)342 static int gen6_reset_engines(struct intel_gt *gt,
343 intel_engine_mask_t engine_mask,
344 unsigned int retry)
345 {
346 unsigned long flags;
347 int ret;
348
349 spin_lock_irqsave(>->uncore->lock, flags);
350 ret = __gen6_reset_engines(gt, engine_mask, retry);
351 spin_unlock_irqrestore(>->uncore->lock, flags);
352
353 return ret;
354 }
355
find_sfc_paired_vecs_engine(struct intel_engine_cs * engine)356 static struct intel_engine_cs *find_sfc_paired_vecs_engine(struct intel_engine_cs *engine)
357 {
358 int vecs_id;
359
360 GEM_BUG_ON(engine->class != VIDEO_DECODE_CLASS);
361
362 vecs_id = _VECS((engine->instance) / 2);
363
364 return engine->gt->engine[vecs_id];
365 }
366
367 struct sfc_lock_data {
368 i915_reg_t lock_reg;
369 i915_reg_t ack_reg;
370 i915_reg_t usage_reg;
371 u32 lock_bit;
372 u32 ack_bit;
373 u32 usage_bit;
374 u32 reset_bit;
375 };
376
get_sfc_forced_lock_data(struct intel_engine_cs * engine,struct sfc_lock_data * sfc_lock)377 static void get_sfc_forced_lock_data(struct intel_engine_cs *engine,
378 struct sfc_lock_data *sfc_lock)
379 {
380 switch (engine->class) {
381 default:
382 MISSING_CASE(engine->class);
383 fallthrough;
384 case VIDEO_DECODE_CLASS:
385 sfc_lock->lock_reg = GEN11_VCS_SFC_FORCED_LOCK(engine->mmio_base);
386 sfc_lock->lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
387
388 sfc_lock->ack_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
389 sfc_lock->ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
390
391 sfc_lock->usage_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
392 sfc_lock->usage_bit = GEN11_VCS_SFC_USAGE_BIT;
393 sfc_lock->reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
394
395 break;
396 case VIDEO_ENHANCEMENT_CLASS:
397 sfc_lock->lock_reg = GEN11_VECS_SFC_FORCED_LOCK(engine->mmio_base);
398 sfc_lock->lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
399
400 sfc_lock->ack_reg = GEN11_VECS_SFC_LOCK_ACK(engine->mmio_base);
401 sfc_lock->ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
402
403 sfc_lock->usage_reg = GEN11_VECS_SFC_USAGE(engine->mmio_base);
404 sfc_lock->usage_bit = GEN11_VECS_SFC_USAGE_BIT;
405 sfc_lock->reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
406
407 break;
408 }
409 }
410
gen11_lock_sfc(struct intel_engine_cs * engine,u32 * reset_mask,u32 * unlock_mask)411 static int gen11_lock_sfc(struct intel_engine_cs *engine,
412 u32 *reset_mask,
413 u32 *unlock_mask)
414 {
415 struct intel_uncore *uncore = engine->uncore;
416 u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
417 struct sfc_lock_data sfc_lock;
418 bool lock_obtained, lock_to_other = false;
419 int ret;
420
421 switch (engine->class) {
422 case VIDEO_DECODE_CLASS:
423 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
424 return 0;
425
426 fallthrough;
427 case VIDEO_ENHANCEMENT_CLASS:
428 get_sfc_forced_lock_data(engine, &sfc_lock);
429
430 break;
431 default:
432 return 0;
433 }
434
435 if (!(intel_uncore_read_fw(uncore, sfc_lock.usage_reg) & sfc_lock.usage_bit)) {
436 struct intel_engine_cs *paired_vecs;
437
438 if (engine->class != VIDEO_DECODE_CLASS ||
439 GRAPHICS_VER(engine->i915) != 12)
440 return 0;
441
442 /*
443 * Wa_14010733141
444 *
445 * If the VCS-MFX isn't using the SFC, we also need to check
446 * whether VCS-HCP is using it. If so, we need to issue a *VE*
447 * forced lock on the VE engine that shares the same SFC.
448 */
449 if (!(intel_uncore_read_fw(uncore,
450 GEN12_HCP_SFC_LOCK_STATUS(engine->mmio_base)) &
451 GEN12_HCP_SFC_USAGE_BIT))
452 return 0;
453
454 paired_vecs = find_sfc_paired_vecs_engine(engine);
455 get_sfc_forced_lock_data(paired_vecs, &sfc_lock);
456 lock_to_other = true;
457 *unlock_mask |= paired_vecs->mask;
458 } else {
459 *unlock_mask |= engine->mask;
460 }
461
462 /*
463 * If the engine is using an SFC, tell the engine that a software reset
464 * is going to happen. The engine will then try to force lock the SFC.
465 * If SFC ends up being locked to the engine we want to reset, we have
466 * to reset it as well (we will unlock it once the reset sequence is
467 * completed).
468 */
469 intel_uncore_rmw_fw(uncore, sfc_lock.lock_reg, 0, sfc_lock.lock_bit);
470
471 ret = __intel_wait_for_register_fw(uncore,
472 sfc_lock.ack_reg,
473 sfc_lock.ack_bit,
474 sfc_lock.ack_bit,
475 1000, 0, NULL);
476
477 /*
478 * Was the SFC released while we were trying to lock it?
479 *
480 * We should reset both the engine and the SFC if:
481 * - We were locking the SFC to this engine and the lock succeeded
482 * OR
483 * - We were locking the SFC to a different engine (Wa_14010733141)
484 * but the SFC was released before the lock was obtained.
485 *
486 * Otherwise we need only reset the engine by itself and we can
487 * leave the SFC alone.
488 */
489 lock_obtained = (intel_uncore_read_fw(uncore, sfc_lock.usage_reg) &
490 sfc_lock.usage_bit) != 0;
491 if (lock_obtained == lock_to_other)
492 return 0;
493
494 if (ret) {
495 ENGINE_TRACE(engine, "Wait for SFC forced lock ack failed\n");
496 return ret;
497 }
498
499 *reset_mask |= sfc_lock.reset_bit;
500 return 0;
501 }
502
gen11_unlock_sfc(struct intel_engine_cs * engine)503 static void gen11_unlock_sfc(struct intel_engine_cs *engine)
504 {
505 struct intel_uncore *uncore = engine->uncore;
506 u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
507 struct sfc_lock_data sfc_lock = {};
508
509 if (engine->class != VIDEO_DECODE_CLASS &&
510 engine->class != VIDEO_ENHANCEMENT_CLASS)
511 return;
512
513 if (engine->class == VIDEO_DECODE_CLASS &&
514 (BIT(engine->instance) & vdbox_sfc_access) == 0)
515 return;
516
517 get_sfc_forced_lock_data(engine, &sfc_lock);
518
519 intel_uncore_rmw_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit, 0);
520 }
521
__gen11_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)522 static int __gen11_reset_engines(struct intel_gt *gt,
523 intel_engine_mask_t engine_mask,
524 unsigned int retry)
525 {
526 struct intel_engine_cs *engine;
527 intel_engine_mask_t tmp;
528 u32 reset_mask, unlock_mask = 0;
529 int ret;
530
531 if (engine_mask == ALL_ENGINES) {
532 reset_mask = GEN11_GRDOM_FULL;
533 } else {
534 reset_mask = 0;
535 for_each_engine_masked(engine, gt, engine_mask, tmp) {
536 reset_mask |= engine->reset_domain;
537 ret = gen11_lock_sfc(engine, &reset_mask, &unlock_mask);
538 if (ret)
539 goto sfc_unlock;
540 }
541 }
542
543 ret = gen6_hw_domain_reset(gt, reset_mask);
544
545 sfc_unlock:
546 /*
547 * We unlock the SFC based on the lock status and not the result of
548 * gen11_lock_sfc to make sure that we clean properly if something
549 * wrong happened during the lock (e.g. lock acquired after timeout
550 * expiration).
551 *
552 * Due to Wa_14010733141, we may have locked an SFC to an engine that
553 * wasn't being reset. So instead of calling gen11_unlock_sfc()
554 * on engine_mask, we instead call it on the mask of engines that our
555 * gen11_lock_sfc() calls told us actually had locks attempted.
556 */
557 for_each_engine_masked(engine, gt, unlock_mask, tmp)
558 gen11_unlock_sfc(engine);
559
560 return ret;
561 }
562
gen8_engine_reset_prepare(struct intel_engine_cs * engine)563 static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
564 {
565 struct intel_uncore *uncore = engine->uncore;
566 const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
567 u32 request, mask, ack;
568 int ret;
569
570 if (I915_SELFTEST_ONLY(should_fail(&engine->reset_timeout, 1)))
571 return -ETIMEDOUT;
572
573 ack = intel_uncore_read_fw(uncore, reg);
574 if (ack & RESET_CTL_CAT_ERROR) {
575 /*
576 * For catastrophic errors, ready-for-reset sequence
577 * needs to be bypassed: HAS#396813
578 */
579 request = RESET_CTL_CAT_ERROR;
580 mask = RESET_CTL_CAT_ERROR;
581
582 /* Catastrophic errors need to be cleared by HW */
583 ack = 0;
584 } else if (!(ack & RESET_CTL_READY_TO_RESET)) {
585 request = RESET_CTL_REQUEST_RESET;
586 mask = RESET_CTL_READY_TO_RESET;
587 ack = RESET_CTL_READY_TO_RESET;
588 } else {
589 return 0;
590 }
591
592 intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
593 ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
594 700, 0, NULL);
595 if (ret)
596 gt_err(engine->gt,
597 "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
598 engine->name, request,
599 intel_uncore_read_fw(uncore, reg));
600
601 return ret;
602 }
603
gen8_engine_reset_cancel(struct intel_engine_cs * engine)604 static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
605 {
606 intel_uncore_write_fw(engine->uncore,
607 RING_RESET_CTL(engine->mmio_base),
608 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
609 }
610
gen8_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)611 static int gen8_reset_engines(struct intel_gt *gt,
612 intel_engine_mask_t engine_mask,
613 unsigned int retry)
614 {
615 struct intel_engine_cs *engine;
616 const bool reset_non_ready = retry >= 1;
617 intel_engine_mask_t tmp;
618 unsigned long flags;
619 int ret;
620
621 spin_lock_irqsave(>->uncore->lock, flags);
622
623 for_each_engine_masked(engine, gt, engine_mask, tmp) {
624 ret = gen8_engine_reset_prepare(engine);
625 if (ret && !reset_non_ready)
626 goto skip_reset;
627
628 /*
629 * If this is not the first failed attempt to prepare,
630 * we decide to proceed anyway.
631 *
632 * By doing so we risk context corruption and with
633 * some gens (kbl), possible system hang if reset
634 * happens during active bb execution.
635 *
636 * We rather take context corruption instead of
637 * failed reset with a wedged driver/gpu. And
638 * active bb execution case should be covered by
639 * stop_engines() we have before the reset.
640 */
641 }
642
643 /*
644 * Wa_22011100796:dg2, whenever Full soft reset is required,
645 * reset all individual engines firstly, and then do a full soft reset.
646 *
647 * This is best effort, so ignore any error from the initial reset.
648 */
649 if (IS_DG2(gt->i915) && engine_mask == ALL_ENGINES)
650 __gen11_reset_engines(gt, gt->info.engine_mask, 0);
651
652 if (GRAPHICS_VER(gt->i915) >= 11)
653 ret = __gen11_reset_engines(gt, engine_mask, retry);
654 else
655 ret = __gen6_reset_engines(gt, engine_mask, retry);
656
657 skip_reset:
658 for_each_engine_masked(engine, gt, engine_mask, tmp)
659 gen8_engine_reset_cancel(engine);
660
661 spin_unlock_irqrestore(>->uncore->lock, flags);
662
663 return ret;
664 }
665
mock_reset(struct intel_gt * gt,intel_engine_mask_t mask,unsigned int retry)666 static int mock_reset(struct intel_gt *gt,
667 intel_engine_mask_t mask,
668 unsigned int retry)
669 {
670 return 0;
671 }
672
673 typedef int (*reset_func)(struct intel_gt *,
674 intel_engine_mask_t engine_mask,
675 unsigned int retry);
676
intel_get_gpu_reset(const struct intel_gt * gt)677 static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
678 {
679 struct drm_i915_private *i915 = gt->i915;
680
681 if (is_mock_gt(gt))
682 return mock_reset;
683 else if (GRAPHICS_VER(i915) >= 8)
684 return gen8_reset_engines;
685 else if (GRAPHICS_VER(i915) >= 6)
686 return gen6_reset_engines;
687 else if (GRAPHICS_VER(i915) >= 5)
688 return ilk_do_reset;
689 else if (IS_G4X(i915))
690 return g4x_do_reset;
691 else if (IS_G33(i915) || IS_PINEVIEW(i915))
692 return g33_do_reset;
693 else if (GRAPHICS_VER(i915) >= 3)
694 return i915_do_reset;
695 else
696 return NULL;
697 }
698
__reset_guc(struct intel_gt * gt)699 static int __reset_guc(struct intel_gt *gt)
700 {
701 u32 guc_domain =
702 GRAPHICS_VER(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
703
704 return gen6_hw_domain_reset(gt, guc_domain);
705 }
706
needs_wa_14015076503(struct intel_gt * gt,intel_engine_mask_t engine_mask)707 static bool needs_wa_14015076503(struct intel_gt *gt, intel_engine_mask_t engine_mask)
708 {
709 if (MEDIA_VER_FULL(gt->i915) != IP_VER(13, 0) || !HAS_ENGINE(gt, GSC0))
710 return false;
711
712 if (!__HAS_ENGINE(engine_mask, GSC0))
713 return false;
714
715 return intel_gsc_uc_fw_init_done(>->uc.gsc);
716 }
717
718 static intel_engine_mask_t
wa_14015076503_start(struct intel_gt * gt,intel_engine_mask_t engine_mask,bool first)719 wa_14015076503_start(struct intel_gt *gt, intel_engine_mask_t engine_mask, bool first)
720 {
721 if (!needs_wa_14015076503(gt, engine_mask))
722 return engine_mask;
723
724 /*
725 * wa_14015076503: if the GSC FW is loaded, we need to alert it that
726 * we're going to do a GSC engine reset and then wait for 200ms for the
727 * FW to get ready for it. However, if this is the first ALL_ENGINES
728 * reset attempt and the GSC is not busy, we can try to instead reset
729 * the GuC and all the other engines individually to avoid the 200ms
730 * wait.
731 * Skipping the GSC engine is safe because, differently from other
732 * engines, the GSCCS only role is to forward the commands to the GSC
733 * FW, so it doesn't have any HW outside of the CS itself and therefore
734 * it has no state that we don't explicitly re-init on resume or on
735 * context switch LRC or power context). The HW for the GSC uC is
736 * managed by the GSC FW so we don't need to care about that.
737 */
738 if (engine_mask == ALL_ENGINES && first && intel_engine_is_idle(gt->engine[GSC0])) {
739 __reset_guc(gt);
740 engine_mask = gt->info.engine_mask & ~BIT(GSC0);
741 } else {
742 intel_uncore_rmw(gt->uncore,
743 HECI_H_GS1(MTL_GSC_HECI2_BASE),
744 0, HECI_H_GS1_ER_PREP);
745
746 /* make sure the reset bit is clear when writing the CSR reg */
747 intel_uncore_rmw(gt->uncore,
748 HECI_H_CSR(MTL_GSC_HECI2_BASE),
749 HECI_H_CSR_RST, HECI_H_CSR_IG);
750 msleep(200);
751 }
752
753 return engine_mask;
754 }
755
756 static void
wa_14015076503_end(struct intel_gt * gt,intel_engine_mask_t engine_mask)757 wa_14015076503_end(struct intel_gt *gt, intel_engine_mask_t engine_mask)
758 {
759 if (!needs_wa_14015076503(gt, engine_mask))
760 return;
761
762 intel_uncore_rmw(gt->uncore,
763 HECI_H_GS1(MTL_GSC_HECI2_BASE),
764 HECI_H_GS1_ER_PREP, 0);
765 }
766
__intel_gt_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask)767 static int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
768 {
769 const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
770 reset_func reset;
771 int ret = -ETIMEDOUT;
772 int retry;
773
774 reset = intel_get_gpu_reset(gt);
775 if (!reset)
776 return -ENODEV;
777
778 /*
779 * If the power well sleeps during the reset, the reset
780 * request may be dropped and never completes (causing -EIO).
781 */
782 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
783 for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
784 intel_engine_mask_t reset_mask;
785
786 reset_mask = wa_14015076503_start(gt, engine_mask, !retry);
787
788 GT_TRACE(gt, "engine_mask=%x\n", reset_mask);
789 ret = reset(gt, reset_mask, retry);
790
791 wa_14015076503_end(gt, reset_mask);
792 }
793 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
794
795 return ret;
796 }
797
intel_has_gpu_reset(const struct intel_gt * gt)798 bool intel_has_gpu_reset(const struct intel_gt *gt)
799 {
800 if (!gt->i915->params.reset)
801 return NULL;
802
803 return intel_get_gpu_reset(gt);
804 }
805
intel_has_reset_engine(const struct intel_gt * gt)806 bool intel_has_reset_engine(const struct intel_gt *gt)
807 {
808 if (gt->i915->params.reset < 2)
809 return false;
810
811 return INTEL_INFO(gt->i915)->has_reset_engine;
812 }
813
intel_reset_guc(struct intel_gt * gt)814 int intel_reset_guc(struct intel_gt *gt)
815 {
816 int ret;
817
818 GEM_BUG_ON(!HAS_GT_UC(gt->i915));
819
820 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
821 ret = __reset_guc(gt);
822 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
823
824 return ret;
825 }
826
827 /*
828 * Ensure irq handler finishes, and not run again.
829 * Also return the active request so that we only search for it once.
830 */
reset_prepare_engine(struct intel_engine_cs * engine)831 static void reset_prepare_engine(struct intel_engine_cs *engine)
832 {
833 /*
834 * During the reset sequence, we must prevent the engine from
835 * entering RC6. As the context state is undefined until we restart
836 * the engine, if it does enter RC6 during the reset, the state
837 * written to the powercontext is undefined and so we may lose
838 * GPU state upon resume, i.e. fail to restart after a reset.
839 */
840 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
841 if (engine->reset.prepare)
842 engine->reset.prepare(engine);
843 }
844
revoke_mmaps(struct intel_gt * gt)845 static void revoke_mmaps(struct intel_gt *gt)
846 {
847 int i;
848
849 for (i = 0; i < gt->ggtt->num_fences; i++) {
850 struct drm_vma_offset_node *node;
851 struct i915_vma *vma;
852 u64 vma_offset;
853
854 vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
855 if (!vma)
856 continue;
857
858 if (!i915_vma_has_userfault(vma))
859 continue;
860
861 GEM_BUG_ON(vma->fence != >->ggtt->fence_regs[i]);
862
863 if (!vma->mmo)
864 continue;
865
866 node = &vma->mmo->vma_node;
867 vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
868
869 unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
870 drm_vma_node_offset_addr(node) + vma_offset,
871 vma->size,
872 1);
873 }
874 }
875
reset_prepare(struct intel_gt * gt)876 static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
877 {
878 struct intel_engine_cs *engine;
879 intel_engine_mask_t awake = 0;
880 enum intel_engine_id id;
881
882 /**
883 * For GuC mode with submission enabled, ensure submission
884 * is disabled before stopping ring.
885 *
886 * For GuC mode with submission disabled, ensure that GuC is not
887 * sanitized, do that after engine reset. reset_prepare()
888 * is followed by engine reset which in this mode requires GuC to
889 * process any CSB FIFO entries generated by the resets.
890 */
891 if (intel_uc_uses_guc_submission(>->uc))
892 intel_uc_reset_prepare(>->uc);
893
894 for_each_engine(engine, gt, id) {
895 if (intel_engine_pm_get_if_awake(engine))
896 awake |= engine->mask;
897 reset_prepare_engine(engine);
898 }
899
900 return awake;
901 }
902
gt_revoke(struct intel_gt * gt)903 static void gt_revoke(struct intel_gt *gt)
904 {
905 revoke_mmaps(gt);
906 }
907
gt_reset(struct intel_gt * gt,intel_engine_mask_t stalled_mask)908 static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
909 {
910 struct intel_engine_cs *engine;
911 enum intel_engine_id id;
912 int err;
913
914 /*
915 * Everything depends on having the GTT running, so we need to start
916 * there.
917 */
918 err = i915_ggtt_enable_hw(gt->i915);
919 if (err)
920 return err;
921
922 local_bh_disable();
923 for_each_engine(engine, gt, id)
924 __intel_engine_reset(engine, stalled_mask & engine->mask);
925 local_bh_enable();
926
927 intel_uc_reset(>->uc, ALL_ENGINES);
928
929 intel_ggtt_restore_fences(gt->ggtt);
930
931 return err;
932 }
933
reset_finish_engine(struct intel_engine_cs * engine)934 static void reset_finish_engine(struct intel_engine_cs *engine)
935 {
936 if (engine->reset.finish)
937 engine->reset.finish(engine);
938 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
939
940 intel_engine_signal_breadcrumbs(engine);
941 }
942
reset_finish(struct intel_gt * gt,intel_engine_mask_t awake)943 static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
944 {
945 struct intel_engine_cs *engine;
946 enum intel_engine_id id;
947
948 for_each_engine(engine, gt, id) {
949 reset_finish_engine(engine);
950 if (awake & engine->mask)
951 intel_engine_pm_put(engine);
952 }
953
954 intel_uc_reset_finish(>->uc);
955 }
956
nop_submit_request(struct i915_request * request)957 static void nop_submit_request(struct i915_request *request)
958 {
959 RQ_TRACE(request, "-EIO\n");
960
961 request = i915_request_mark_eio(request);
962 if (request) {
963 i915_request_submit(request);
964 intel_engine_signal_breadcrumbs(request->engine);
965
966 i915_request_put(request);
967 }
968 }
969
__intel_gt_set_wedged(struct intel_gt * gt)970 static void __intel_gt_set_wedged(struct intel_gt *gt)
971 {
972 struct intel_engine_cs *engine;
973 intel_engine_mask_t awake;
974 enum intel_engine_id id;
975
976 if (test_bit(I915_WEDGED, >->reset.flags))
977 return;
978
979 GT_TRACE(gt, "start\n");
980
981 /*
982 * First, stop submission to hw, but do not yet complete requests by
983 * rolling the global seqno forward (since this would complete requests
984 * for which we haven't set the fence error to EIO yet).
985 */
986 awake = reset_prepare(gt);
987
988 /* Even if the GPU reset fails, it should still stop the engines */
989 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
990 intel_gt_reset_all_engines(gt);
991
992 for_each_engine(engine, gt, id)
993 engine->submit_request = nop_submit_request;
994
995 /*
996 * Make sure no request can slip through without getting completed by
997 * either this call here to intel_engine_write_global_seqno, or the one
998 * in nop_submit_request.
999 */
1000 synchronize_rcu_expedited();
1001 set_bit(I915_WEDGED, >->reset.flags);
1002
1003 /* Mark all executing requests as skipped */
1004 local_bh_disable();
1005 for_each_engine(engine, gt, id)
1006 if (engine->reset.cancel)
1007 engine->reset.cancel(engine);
1008 intel_uc_cancel_requests(>->uc);
1009 local_bh_enable();
1010
1011 reset_finish(gt, awake);
1012
1013 GT_TRACE(gt, "end\n");
1014 }
1015
set_wedged_work(struct work_struct * w)1016 static void set_wedged_work(struct work_struct *w)
1017 {
1018 struct intel_gt *gt = container_of(w, struct intel_gt, wedge);
1019 intel_wakeref_t wf;
1020
1021 with_intel_runtime_pm(gt->uncore->rpm, wf)
1022 __intel_gt_set_wedged(gt);
1023 }
1024
intel_gt_set_wedged(struct intel_gt * gt)1025 void intel_gt_set_wedged(struct intel_gt *gt)
1026 {
1027 intel_wakeref_t wakeref;
1028
1029 if (test_bit(I915_WEDGED, >->reset.flags))
1030 return;
1031
1032 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1033 mutex_lock(>->reset.mutex);
1034
1035 if (GEM_SHOW_DEBUG()) {
1036 struct drm_printer p = drm_dbg_printer(>->i915->drm,
1037 DRM_UT_DRIVER, NULL);
1038 struct intel_engine_cs *engine;
1039 enum intel_engine_id id;
1040
1041 drm_printf(&p, "called from %pS\n", (void *)_RET_IP_);
1042 for_each_engine(engine, gt, id) {
1043 if (intel_engine_is_idle(engine))
1044 continue;
1045
1046 intel_engine_dump(engine, &p, "%s\n", engine->name);
1047 }
1048 }
1049
1050 __intel_gt_set_wedged(gt);
1051
1052 mutex_unlock(>->reset.mutex);
1053 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1054 }
1055
__intel_gt_unset_wedged(struct intel_gt * gt)1056 static bool __intel_gt_unset_wedged(struct intel_gt *gt)
1057 {
1058 struct intel_gt_timelines *timelines = >->timelines;
1059 struct intel_timeline *tl;
1060 bool ok;
1061
1062 if (!test_bit(I915_WEDGED, >->reset.flags))
1063 return true;
1064
1065 /* Never fully initialised, recovery impossible */
1066 if (intel_gt_has_unrecoverable_error(gt))
1067 return false;
1068
1069 GT_TRACE(gt, "start\n");
1070
1071 /*
1072 * Before unwedging, make sure that all pending operations
1073 * are flushed and errored out - we may have requests waiting upon
1074 * third party fences. We marked all inflight requests as EIO, and
1075 * every execbuf since returned EIO, for consistency we want all
1076 * the currently pending requests to also be marked as EIO, which
1077 * is done inside our nop_submit_request - and so we must wait.
1078 *
1079 * No more can be submitted until we reset the wedged bit.
1080 */
1081 spin_lock(&timelines->lock);
1082 list_for_each_entry(tl, &timelines->active_list, link) {
1083 struct dma_fence *fence;
1084
1085 fence = i915_active_fence_get(&tl->last_request);
1086 if (!fence)
1087 continue;
1088
1089 spin_unlock(&timelines->lock);
1090
1091 /*
1092 * All internal dependencies (i915_requests) will have
1093 * been flushed by the set-wedge, but we may be stuck waiting
1094 * for external fences. These should all be capped to 10s
1095 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
1096 * in the worst case.
1097 */
1098 dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
1099 dma_fence_put(fence);
1100
1101 /* Restart iteration after droping lock */
1102 spin_lock(&timelines->lock);
1103 tl = list_entry(&timelines->active_list, typeof(*tl), link);
1104 }
1105 spin_unlock(&timelines->lock);
1106
1107 /* We must reset pending GPU events before restoring our submission */
1108 ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
1109 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1110 ok = intel_gt_reset_all_engines(gt) == 0;
1111 if (!ok) {
1112 /*
1113 * Warn CI about the unrecoverable wedged condition.
1114 * Time for a reboot.
1115 */
1116 gt_err(gt, "Unrecoverable wedged condition\n");
1117 add_taint_for_CI(gt->i915, TAINT_WARN);
1118 return false;
1119 }
1120
1121 /*
1122 * Undo nop_submit_request. We prevent all new i915 requests from
1123 * being queued (by disallowing execbuf whilst wedged) so having
1124 * waited for all active requests above, we know the system is idle
1125 * and do not have to worry about a thread being inside
1126 * engine->submit_request() as we swap over. So unlike installing
1127 * the nop_submit_request on reset, we can do this from normal
1128 * context and do not require stop_machine().
1129 */
1130 intel_engines_reset_default_submission(gt);
1131
1132 GT_TRACE(gt, "end\n");
1133
1134 smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
1135 clear_bit(I915_WEDGED, >->reset.flags);
1136
1137 return true;
1138 }
1139
intel_gt_unset_wedged(struct intel_gt * gt)1140 bool intel_gt_unset_wedged(struct intel_gt *gt)
1141 {
1142 bool result;
1143
1144 mutex_lock(>->reset.mutex);
1145 result = __intel_gt_unset_wedged(gt);
1146 mutex_unlock(>->reset.mutex);
1147
1148 return result;
1149 }
1150
do_reset(struct intel_gt * gt,intel_engine_mask_t stalled_mask)1151 static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
1152 {
1153 int err, i;
1154
1155 err = intel_gt_reset_all_engines(gt);
1156 for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
1157 msleep(10 * (i + 1));
1158 err = intel_gt_reset_all_engines(gt);
1159 }
1160 if (err)
1161 return err;
1162
1163 return gt_reset(gt, stalled_mask);
1164 }
1165
resume(struct intel_gt * gt)1166 static int resume(struct intel_gt *gt)
1167 {
1168 struct intel_engine_cs *engine;
1169 enum intel_engine_id id;
1170 int ret;
1171
1172 for_each_engine(engine, gt, id) {
1173 ret = intel_engine_resume(engine);
1174 if (ret)
1175 return ret;
1176 }
1177
1178 return 0;
1179 }
1180
1181 /**
1182 * intel_gt_reset - reset chip after a hang
1183 * @gt: #intel_gt to reset
1184 * @stalled_mask: mask of the stalled engines with the guilty requests
1185 * @reason: user error message for why we are resetting
1186 *
1187 * Reset the chip. Useful if a hang is detected. Marks the device as wedged
1188 * on failure.
1189 *
1190 * Procedure is fairly simple:
1191 * - reset the chip using the reset reg
1192 * - re-init context state
1193 * - re-init hardware status page
1194 * - re-init ring buffer
1195 * - re-init interrupt state
1196 * - re-init display
1197 */
intel_gt_reset(struct intel_gt * gt,intel_engine_mask_t stalled_mask,const char * reason)1198 void intel_gt_reset(struct intel_gt *gt,
1199 intel_engine_mask_t stalled_mask,
1200 const char *reason)
1201 {
1202 struct intel_display *display = >->i915->display;
1203 intel_engine_mask_t awake;
1204 int ret;
1205
1206 GT_TRACE(gt, "flags=%lx\n", gt->reset.flags);
1207
1208 might_sleep();
1209 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, >->reset.flags));
1210
1211 /*
1212 * FIXME: Revoking cpu mmap ptes cannot be done from a dma_fence
1213 * critical section like gpu reset.
1214 */
1215 gt_revoke(gt);
1216
1217 mutex_lock(>->reset.mutex);
1218
1219 /* Clear any previous failed attempts at recovery. Time to try again. */
1220 if (!__intel_gt_unset_wedged(gt))
1221 goto unlock;
1222
1223 if (reason)
1224 gt_notice(gt, "Resetting chip for %s\n", reason);
1225 atomic_inc(>->i915->gpu_error.reset_count);
1226
1227 awake = reset_prepare(gt);
1228
1229 if (!intel_has_gpu_reset(gt)) {
1230 if (gt->i915->params.reset)
1231 gt_err(gt, "GPU reset not supported\n");
1232 else
1233 gt_dbg(gt, "GPU reset disabled\n");
1234 goto error;
1235 }
1236
1237 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1238 intel_irq_suspend(gt->i915);
1239
1240 if (do_reset(gt, stalled_mask)) {
1241 gt_err(gt, "Failed to reset chip\n");
1242 goto taint;
1243 }
1244
1245 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1246 intel_irq_resume(gt->i915);
1247
1248 intel_overlay_reset(display);
1249
1250 /* sanitize uC after engine reset */
1251 if (!intel_uc_uses_guc_submission(>->uc))
1252 intel_uc_reset_prepare(>->uc);
1253 /*
1254 * Next we need to restore the context, but we don't use those
1255 * yet either...
1256 *
1257 * Ring buffer needs to be re-initialized in the KMS case, or if X
1258 * was running at the time of the reset (i.e. we weren't VT
1259 * switched away).
1260 */
1261 ret = intel_gt_init_hw(gt);
1262 if (ret) {
1263 gt_err(gt, "Failed to initialise HW following reset (%d)\n", ret);
1264 goto taint;
1265 }
1266
1267 ret = resume(gt);
1268 if (ret) {
1269 gt_err(gt, "Failed to resume (%d)\n", ret);
1270 goto taint;
1271 }
1272
1273 finish:
1274 reset_finish(gt, awake);
1275 unlock:
1276 mutex_unlock(>->reset.mutex);
1277 return;
1278
1279 taint:
1280 /*
1281 * History tells us that if we cannot reset the GPU now, we
1282 * never will. This then impacts everything that is run
1283 * subsequently. On failing the reset, we mark the driver
1284 * as wedged, preventing further execution on the GPU.
1285 * We also want to go one step further and add a taint to the
1286 * kernel so that any subsequent faults can be traced back to
1287 * this failure. This is important for CI, where if the
1288 * GPU/driver fails we would like to reboot and restart testing
1289 * rather than continue on into oblivion. For everyone else,
1290 * the system should still plod along, but they have been warned!
1291 */
1292 add_taint_for_CI(gt->i915, TAINT_WARN);
1293 error:
1294 __intel_gt_set_wedged(gt);
1295 goto finish;
1296 }
1297
1298 /**
1299 * intel_gt_reset_all_engines() - Reset all engines in the given gt.
1300 * @gt: the GT to reset all engines for.
1301 *
1302 * This function resets all engines within the given gt.
1303 *
1304 * Returns:
1305 * Zero on success, negative error code on failure.
1306 */
intel_gt_reset_all_engines(struct intel_gt * gt)1307 int intel_gt_reset_all_engines(struct intel_gt *gt)
1308 {
1309 return __intel_gt_reset(gt, ALL_ENGINES);
1310 }
1311
1312 /**
1313 * intel_gt_reset_engine() - Reset a specific engine within a gt.
1314 * @engine: engine to be reset.
1315 *
1316 * This function resets the specified engine within a gt.
1317 *
1318 * Returns:
1319 * Zero on success, negative error code on failure.
1320 */
intel_gt_reset_engine(struct intel_engine_cs * engine)1321 int intel_gt_reset_engine(struct intel_engine_cs *engine)
1322 {
1323 return __intel_gt_reset(engine->gt, engine->mask);
1324 }
1325
__intel_engine_reset_bh(struct intel_engine_cs * engine,const char * msg)1326 int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
1327 {
1328 struct intel_gt *gt = engine->gt;
1329 int ret;
1330
1331 ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
1332 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, >->reset.flags));
1333
1334 if (intel_engine_uses_guc(engine))
1335 return -ENODEV;
1336
1337 if (!intel_engine_pm_get_if_awake(engine))
1338 return 0;
1339
1340 reset_prepare_engine(engine);
1341
1342 if (msg)
1343 drm_notice(&engine->i915->drm,
1344 "Resetting %s for %s\n", engine->name, msg);
1345 i915_increase_reset_engine_count(&engine->i915->gpu_error, engine);
1346
1347 ret = intel_gt_reset_engine(engine);
1348 if (ret) {
1349 /* If we fail here, we expect to fallback to a global reset */
1350 ENGINE_TRACE(engine, "Failed to reset %s, err: %d\n", engine->name, ret);
1351 goto out;
1352 }
1353
1354 /*
1355 * The request that caused the hang is stuck on elsp, we know the
1356 * active request and can drop it, adjust head to skip the offending
1357 * request to resume executing remaining requests in the queue.
1358 */
1359 __intel_engine_reset(engine, true);
1360
1361 /*
1362 * The engine and its registers (and workarounds in case of render)
1363 * have been reset to their default values. Follow the init_ring
1364 * process to program RING_MODE, HWSP and re-enable submission.
1365 */
1366 ret = intel_engine_resume(engine);
1367
1368 out:
1369 intel_engine_cancel_stop_cs(engine);
1370 reset_finish_engine(engine);
1371 intel_engine_pm_put_async(engine);
1372 return ret;
1373 }
1374
1375 /**
1376 * intel_engine_reset - reset GPU engine to recover from a hang
1377 * @engine: engine to reset
1378 * @msg: reason for GPU reset; or NULL for no drm_notice()
1379 *
1380 * Reset a specific GPU engine. Useful if a hang is detected.
1381 * Returns zero on successful reset or otherwise an error code.
1382 *
1383 * Procedure is:
1384 * - identifies the request that caused the hang and it is dropped
1385 * - reset engine (which will force the engine to idle)
1386 * - re-init/configure engine
1387 */
intel_engine_reset(struct intel_engine_cs * engine,const char * msg)1388 int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
1389 {
1390 int err;
1391
1392 local_bh_disable();
1393 err = __intel_engine_reset_bh(engine, msg);
1394 local_bh_enable();
1395
1396 return err;
1397 }
1398
intel_gt_reset_global(struct intel_gt * gt,u32 engine_mask,const char * reason)1399 static void intel_gt_reset_global(struct intel_gt *gt,
1400 u32 engine_mask,
1401 const char *reason)
1402 {
1403 struct kobject *kobj = >->i915->drm.primary->kdev->kobj;
1404 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1405 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1406 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1407 struct intel_wedge_me w;
1408
1409 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
1410
1411 GT_TRACE(gt, "resetting chip, engines=%x\n", engine_mask);
1412 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1413
1414 /* Use a watchdog to ensure that our reset completes */
1415 intel_wedge_on_timeout(&w, gt, 60 * HZ) {
1416 intel_display_reset_prepare(gt->i915);
1417
1418 intel_gt_reset(gt, engine_mask, reason);
1419
1420 intel_display_reset_finish(gt->i915);
1421 }
1422
1423 if (!test_bit(I915_WEDGED, >->reset.flags))
1424 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
1425 }
1426
1427 /**
1428 * intel_gt_handle_error - handle a gpu error
1429 * @gt: the intel_gt
1430 * @engine_mask: mask representing engines that are hung
1431 * @flags: control flags
1432 * @fmt: Error message format string
1433 *
1434 * Do some basic checking of register state at error time and
1435 * dump it to the syslog. Also call i915_capture_error_state() to make
1436 * sure we get a record and make it available in debugfs. Fire a uevent
1437 * so userspace knows something bad happened (should trigger collection
1438 * of a ring dump etc.).
1439 */
intel_gt_handle_error(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned long flags,const char * fmt,...)1440 void intel_gt_handle_error(struct intel_gt *gt,
1441 intel_engine_mask_t engine_mask,
1442 unsigned long flags,
1443 const char *fmt, ...)
1444 {
1445 struct intel_engine_cs *engine;
1446 intel_wakeref_t wakeref;
1447 intel_engine_mask_t tmp;
1448 char error_msg[80];
1449 char *msg = NULL;
1450
1451 if (fmt) {
1452 va_list args;
1453
1454 va_start(args, fmt);
1455 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
1456 va_end(args);
1457
1458 msg = error_msg;
1459 }
1460
1461 /*
1462 * In most cases it's guaranteed that we get here with an RPM
1463 * reference held, for example because there is a pending GPU
1464 * request that won't finish until the reset is done. This
1465 * isn't the case at least when we get here by doing a
1466 * simulated reset via debugfs, so get an RPM reference.
1467 */
1468 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1469
1470 engine_mask &= gt->info.engine_mask;
1471
1472 if (flags & I915_ERROR_CAPTURE) {
1473 i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_NONE);
1474 intel_gt_clear_error_registers(gt, engine_mask);
1475 }
1476
1477 /*
1478 * Try engine reset when available. We fall back to full reset if
1479 * single reset fails.
1480 */
1481 if (!intel_uc_uses_guc_submission(>->uc) &&
1482 intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
1483 local_bh_disable();
1484 for_each_engine_masked(engine, gt, engine_mask, tmp) {
1485 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
1486 if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1487 >->reset.flags))
1488 continue;
1489
1490 if (__intel_engine_reset_bh(engine, msg) == 0)
1491 engine_mask &= ~engine->mask;
1492
1493 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
1494 >->reset.flags);
1495 }
1496 local_bh_enable();
1497 }
1498
1499 if (!engine_mask)
1500 goto out;
1501
1502 /* Full reset needs the mutex, stop any other user trying to do so. */
1503 if (test_and_set_bit(I915_RESET_BACKOFF, >->reset.flags)) {
1504 wait_event(gt->reset.queue,
1505 !test_bit(I915_RESET_BACKOFF, >->reset.flags));
1506 goto out; /* piggy-back on the other reset */
1507 }
1508
1509 /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
1510 synchronize_rcu_expedited();
1511
1512 /*
1513 * Prevent any other reset-engine attempt. We don't do this for GuC
1514 * submission the GuC owns the per-engine reset, not the i915.
1515 */
1516 if (!intel_uc_uses_guc_submission(>->uc)) {
1517 for_each_engine(engine, gt, tmp) {
1518 while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1519 >->reset.flags))
1520 wait_on_bit(>->reset.flags,
1521 I915_RESET_ENGINE + engine->id,
1522 TASK_UNINTERRUPTIBLE);
1523 }
1524 }
1525
1526 /* Flush everyone using a resource about to be clobbered */
1527 synchronize_srcu_expedited(>->reset.backoff_srcu);
1528
1529 intel_gt_reset_global(gt, engine_mask, msg);
1530
1531 if (!intel_uc_uses_guc_submission(>->uc)) {
1532 for_each_engine(engine, gt, tmp)
1533 clear_bit_unlock(I915_RESET_ENGINE + engine->id,
1534 >->reset.flags);
1535 }
1536 clear_bit_unlock(I915_RESET_BACKOFF, >->reset.flags);
1537 smp_mb__after_atomic();
1538 wake_up_all(>->reset.queue);
1539
1540 out:
1541 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1542 }
1543
_intel_gt_reset_lock(struct intel_gt * gt,int * srcu,bool retry)1544 static int _intel_gt_reset_lock(struct intel_gt *gt, int *srcu, bool retry)
1545 {
1546 might_lock(>->reset.backoff_srcu);
1547 if (retry)
1548 might_sleep();
1549
1550 rcu_read_lock();
1551 while (test_bit(I915_RESET_BACKOFF, >->reset.flags)) {
1552 rcu_read_unlock();
1553
1554 if (!retry)
1555 return -EBUSY;
1556
1557 if (wait_event_interruptible(gt->reset.queue,
1558 !test_bit(I915_RESET_BACKOFF,
1559 >->reset.flags)))
1560 return -EINTR;
1561
1562 rcu_read_lock();
1563 }
1564 *srcu = srcu_read_lock(>->reset.backoff_srcu);
1565 rcu_read_unlock();
1566
1567 return 0;
1568 }
1569
intel_gt_reset_trylock(struct intel_gt * gt,int * srcu)1570 int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
1571 {
1572 return _intel_gt_reset_lock(gt, srcu, false);
1573 }
1574
intel_gt_reset_lock_interruptible(struct intel_gt * gt,int * srcu)1575 int intel_gt_reset_lock_interruptible(struct intel_gt *gt, int *srcu)
1576 {
1577 return _intel_gt_reset_lock(gt, srcu, true);
1578 }
1579
intel_gt_reset_unlock(struct intel_gt * gt,int tag)1580 void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
1581 __releases(>->reset.backoff_srcu)
1582 {
1583 srcu_read_unlock(>->reset.backoff_srcu, tag);
1584 }
1585
intel_gt_terminally_wedged(struct intel_gt * gt)1586 int intel_gt_terminally_wedged(struct intel_gt *gt)
1587 {
1588 might_sleep();
1589
1590 if (!intel_gt_is_wedged(gt))
1591 return 0;
1592
1593 if (intel_gt_has_unrecoverable_error(gt))
1594 return -EIO;
1595
1596 /* Reset still in progress? Maybe we will recover? */
1597 if (wait_event_interruptible(gt->reset.queue,
1598 !test_bit(I915_RESET_BACKOFF,
1599 >->reset.flags)))
1600 return -EINTR;
1601
1602 return intel_gt_is_wedged(gt) ? -EIO : 0;
1603 }
1604
intel_gt_set_wedged_on_init(struct intel_gt * gt)1605 void intel_gt_set_wedged_on_init(struct intel_gt *gt)
1606 {
1607 BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
1608 I915_WEDGED_ON_INIT);
1609 intel_gt_set_wedged(gt);
1610 i915_disable_error_state(gt->i915, -ENODEV);
1611 set_bit(I915_WEDGED_ON_INIT, >->reset.flags);
1612
1613 /* Wedged on init is non-recoverable */
1614 gt_err(gt, "Non-recoverable wedged on init\n");
1615 add_taint_for_CI(gt->i915, TAINT_WARN);
1616 }
1617
intel_gt_set_wedged_on_fini(struct intel_gt * gt)1618 void intel_gt_set_wedged_on_fini(struct intel_gt *gt)
1619 {
1620 intel_gt_set_wedged(gt);
1621 i915_disable_error_state(gt->i915, -ENODEV);
1622 set_bit(I915_WEDGED_ON_FINI, >->reset.flags);
1623 intel_gt_retire_requests(gt); /* cleanup any wedged requests */
1624 }
1625
intel_gt_init_reset(struct intel_gt * gt)1626 void intel_gt_init_reset(struct intel_gt *gt)
1627 {
1628 init_waitqueue_head(>->reset.queue);
1629 mutex_init(>->reset.mutex);
1630 init_srcu_struct(>->reset.backoff_srcu);
1631 INIT_WORK(>->wedge, set_wedged_work);
1632
1633 /*
1634 * While undesirable to wait inside the shrinker, complain anyway.
1635 *
1636 * If we have to wait during shrinking, we guarantee forward progress
1637 * by forcing the reset. Therefore during the reset we must not
1638 * re-enter the shrinker. By declaring that we take the reset mutex
1639 * within the shrinker, we forbid ourselves from performing any
1640 * fs-reclaim or taking related locks during reset.
1641 */
1642 i915_gem_shrinker_taints_mutex(gt->i915, >->reset.mutex);
1643
1644 /* no GPU until we are ready! */
1645 __set_bit(I915_WEDGED, >->reset.flags);
1646 }
1647
intel_gt_fini_reset(struct intel_gt * gt)1648 void intel_gt_fini_reset(struct intel_gt *gt)
1649 {
1650 cleanup_srcu_struct(>->reset.backoff_srcu);
1651 }
1652
intel_wedge_me(struct work_struct * work)1653 static void intel_wedge_me(struct work_struct *work)
1654 {
1655 struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
1656
1657 gt_err(w->gt, "%s timed out, cancelling all in-flight rendering.\n", w->name);
1658 set_wedged_work(&w->gt->wedge);
1659 }
1660
__intel_init_wedge(struct intel_wedge_me * w,struct intel_gt * gt,long timeout,const char * name)1661 void __intel_init_wedge(struct intel_wedge_me *w,
1662 struct intel_gt *gt,
1663 long timeout,
1664 const char *name)
1665 {
1666 w->gt = gt;
1667 w->name = name;
1668
1669 INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
1670 queue_delayed_work(gt->i915->unordered_wq, &w->work, timeout);
1671 }
1672
__intel_fini_wedge(struct intel_wedge_me * w)1673 void __intel_fini_wedge(struct intel_wedge_me *w)
1674 {
1675 cancel_delayed_work_sync(&w->work);
1676 destroy_delayed_work_on_stack(&w->work);
1677 w->gt = NULL;
1678 }
1679
1680 /*
1681 * Wa_22011802037 requires that we (or the GuC) ensure that no command
1682 * streamers are executing MI_FORCE_WAKE while an engine reset is initiated.
1683 */
intel_engine_reset_needs_wa_22011802037(struct intel_gt * gt)1684 bool intel_engine_reset_needs_wa_22011802037(struct intel_gt *gt)
1685 {
1686 if (GRAPHICS_VER(gt->i915) < 11)
1687 return false;
1688
1689 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0))
1690 return true;
1691
1692 if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
1693 return false;
1694
1695 return true;
1696 }
1697
1698 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1699 #include "selftest_reset.c"
1700 #include "selftest_hangcheck.c"
1701 #endif
1702