1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2008-2018 Intel Corporation
4 */
5
6 #include <linux/sched/mm.h>
7 #include <linux/stop_machine.h>
8 #include <linux/string_helpers.h>
9
10 #include "display/intel_display_reset.h"
11 #include "display/intel_overlay.h"
12 #include "gem/i915_gem_context.h"
13 #include "gt/intel_gt_regs.h"
14 #include "gt/uc/intel_gsc_fw.h"
15 #include "uc/intel_guc.h"
16
17 #include "i915_drv.h"
18 #include "i915_file_private.h"
19 #include "i915_gpu_error.h"
20 #include "i915_irq.h"
21 #include "i915_reg.h"
22 #include "i915_wait_util.h"
23 #include "intel_breadcrumbs.h"
24 #include "intel_engine_pm.h"
25 #include "intel_engine_regs.h"
26 #include "intel_gt.h"
27 #include "intel_gt_pm.h"
28 #include "intel_gt_print.h"
29 #include "intel_gt_requests.h"
30 #include "intel_mchbar_regs.h"
31 #include "intel_pci_config.h"
32 #include "intel_reset.h"
33
34 #define RESET_MAX_RETRIES 3
35
client_mark_guilty(struct i915_gem_context * ctx,bool banned)36 static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
37 {
38 struct drm_i915_file_private *file_priv = ctx->file_priv;
39 unsigned long prev_hang;
40 unsigned int score;
41
42 if (IS_ERR_OR_NULL(file_priv))
43 return;
44
45 score = 0;
46 if (banned)
47 score = I915_CLIENT_SCORE_CONTEXT_BAN;
48
49 prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
50 if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
51 score += I915_CLIENT_SCORE_HANG_FAST;
52
53 if (score) {
54 atomic_add(score, &file_priv->ban_score);
55
56 drm_dbg(&ctx->i915->drm,
57 "client %s: gained %u ban score, now %u\n",
58 ctx->name, score,
59 atomic_read(&file_priv->ban_score));
60 }
61 }
62
mark_guilty(struct i915_request * rq)63 static bool mark_guilty(struct i915_request *rq)
64 {
65 struct i915_gem_context *ctx;
66 unsigned long prev_hang;
67 bool banned;
68 int i;
69
70 if (intel_context_is_closed(rq->context))
71 return true;
72
73 rcu_read_lock();
74 ctx = rcu_dereference(rq->context->gem_context);
75 if (ctx && !kref_get_unless_zero(&ctx->ref))
76 ctx = NULL;
77 rcu_read_unlock();
78 if (!ctx)
79 return intel_context_is_banned(rq->context);
80
81 atomic_inc(&ctx->guilty_count);
82
83 /* Cool contexts are too cool to be banned! (Used for reset testing.) */
84 if (!i915_gem_context_is_bannable(ctx)) {
85 banned = false;
86 goto out;
87 }
88
89 drm_notice(&ctx->i915->drm,
90 "%s context reset due to GPU hang\n",
91 ctx->name);
92
93 /* Record the timestamp for the last N hangs */
94 prev_hang = ctx->hang_timestamp[0];
95 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
96 ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
97 ctx->hang_timestamp[i] = jiffies;
98
99 /* If we have hung N+1 times in rapid succession, we ban the context! */
100 banned = !i915_gem_context_is_recoverable(ctx);
101 if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
102 banned = true;
103 if (banned)
104 drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n",
105 ctx->name, atomic_read(&ctx->guilty_count));
106
107 client_mark_guilty(ctx, banned);
108
109 out:
110 i915_gem_context_put(ctx);
111 return banned;
112 }
113
mark_innocent(struct i915_request * rq)114 static void mark_innocent(struct i915_request *rq)
115 {
116 struct i915_gem_context *ctx;
117
118 rcu_read_lock();
119 ctx = rcu_dereference(rq->context->gem_context);
120 if (ctx)
121 atomic_inc(&ctx->active_count);
122 rcu_read_unlock();
123 }
124
__i915_request_reset(struct i915_request * rq,bool guilty)125 void __i915_request_reset(struct i915_request *rq, bool guilty)
126 {
127 bool banned = false;
128
129 RQ_TRACE(rq, "guilty? %s\n", str_yes_no(guilty));
130 GEM_BUG_ON(__i915_request_is_complete(rq));
131
132 rcu_read_lock(); /* protect the GEM context */
133 if (guilty) {
134 i915_request_set_error_once(rq, -EIO);
135 if (!i915_request_signaled(rq))
136 __i915_request_skip(rq);
137 banned = mark_guilty(rq);
138 } else {
139 i915_request_set_error_once(rq, -EAGAIN);
140 mark_innocent(rq);
141 }
142 rcu_read_unlock();
143
144 if (banned)
145 intel_context_ban(rq->context, rq);
146 }
147
i915_in_reset(struct pci_dev * pdev)148 static bool i915_in_reset(struct pci_dev *pdev)
149 {
150 u8 gdrst;
151
152 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
153 return gdrst & GRDOM_RESET_STATUS;
154 }
155
i915_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)156 static int i915_do_reset(struct intel_gt *gt,
157 intel_engine_mask_t engine_mask,
158 unsigned int retry)
159 {
160 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
161 int err;
162
163 /* Assert reset for at least 50 usec, and wait for acknowledgement. */
164 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
165 udelay(50);
166 err = _wait_for_atomic(i915_in_reset(pdev), 50000, 0);
167
168 /* Clear the reset request. */
169 pci_write_config_byte(pdev, I915_GDRST, 0);
170 udelay(50);
171 if (!err)
172 err = _wait_for_atomic(!i915_in_reset(pdev), 50000, 0);
173
174 return err;
175 }
176
g4x_reset_complete(struct pci_dev * pdev)177 static bool g4x_reset_complete(struct pci_dev *pdev)
178 {
179 u8 gdrst;
180
181 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
182 return (gdrst & GRDOM_RESET_ENABLE) == 0;
183 }
184
g33_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)185 static int g33_do_reset(struct intel_gt *gt,
186 intel_engine_mask_t engine_mask,
187 unsigned int retry)
188 {
189 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
190
191 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
192 return _wait_for_atomic(g4x_reset_complete(pdev), 50000, 0);
193 }
194
g4x_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)195 static int g4x_do_reset(struct intel_gt *gt,
196 intel_engine_mask_t engine_mask,
197 unsigned int retry)
198 {
199 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
200 struct intel_uncore *uncore = gt->uncore;
201 int ret;
202
203 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
204 intel_uncore_rmw_fw(uncore, VDECCLK_GATE_D, 0, VCP_UNIT_CLOCK_GATE_DISABLE);
205 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
206
207 pci_write_config_byte(pdev, I915_GDRST,
208 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
209 ret = _wait_for_atomic(g4x_reset_complete(pdev), 50000, 0);
210 if (ret) {
211 GT_TRACE(gt, "Wait for media reset failed\n");
212 goto out;
213 }
214
215 pci_write_config_byte(pdev, I915_GDRST,
216 GRDOM_RENDER | GRDOM_RESET_ENABLE);
217 ret = _wait_for_atomic(g4x_reset_complete(pdev), 50000, 0);
218 if (ret) {
219 GT_TRACE(gt, "Wait for render reset failed\n");
220 goto out;
221 }
222
223 out:
224 pci_write_config_byte(pdev, I915_GDRST, 0);
225
226 intel_uncore_rmw_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE, 0);
227 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
228
229 return ret;
230 }
231
ilk_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)232 static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
233 unsigned int retry)
234 {
235 struct intel_uncore *uncore = gt->uncore;
236 int ret;
237
238 intel_uncore_write_fw(uncore, ILK_GDSR,
239 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
240 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
241 ILK_GRDOM_RESET_ENABLE, 0,
242 5000, 0,
243 NULL);
244 if (ret) {
245 GT_TRACE(gt, "Wait for render reset failed\n");
246 goto out;
247 }
248
249 intel_uncore_write_fw(uncore, ILK_GDSR,
250 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
251 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
252 ILK_GRDOM_RESET_ENABLE, 0,
253 5000, 0,
254 NULL);
255 if (ret) {
256 GT_TRACE(gt, "Wait for media reset failed\n");
257 goto out;
258 }
259
260 out:
261 intel_uncore_write_fw(uncore, ILK_GDSR, 0);
262 intel_uncore_posting_read_fw(uncore, ILK_GDSR);
263 return ret;
264 }
265
266 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
gen6_hw_domain_reset(struct intel_gt * gt,u32 hw_domain_mask)267 static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
268 {
269 struct intel_uncore *uncore = gt->uncore;
270 int loops;
271 int err;
272
273 /*
274 * On some platforms, e.g. Jasperlake, we see that the engine register
275 * state is not cleared until shortly after GDRST reports completion,
276 * causing a failure as we try to immediately resume while the internal
277 * state is still in flux. If we immediately repeat the reset, the
278 * second reset appears to serialise with the first, and since it is a
279 * no-op, the registers should retain their reset value. However, there
280 * is still a concern that upon leaving the second reset, the internal
281 * engine state is still in flux and not ready for resuming.
282 *
283 * Starting on MTL, there are some prep steps that we need to do when
284 * resetting some engines that need to be applied every time we write to
285 * GEN6_GDRST. As those are time consuming (tens of ms), we don't want
286 * to perform that twice, so, since the Jasperlake issue hasn't been
287 * observed on MTL, we avoid repeating the reset on newer platforms.
288 */
289 loops = GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 70) ? 2 : 1;
290
291 /*
292 * GEN6_GDRST is not in the gt power well, no need to check
293 * for fifo space for the write or forcewake the chip for
294 * the read
295 */
296 do {
297 intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
298
299 /* Wait for the device to ack the reset requests. */
300 err = __intel_wait_for_register_fw(uncore, GEN6_GDRST,
301 hw_domain_mask, 0,
302 2000, 0,
303 NULL);
304 } while (err == 0 && --loops);
305 if (err)
306 GT_TRACE(gt,
307 "Wait for 0x%08x engines reset failed\n",
308 hw_domain_mask);
309
310 /*
311 * As we have observed that the engine state is still volatile
312 * after GDRST is acked, impose a small delay to let everything settle.
313 */
314 udelay(50);
315
316 return err;
317 }
318
__gen6_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)319 static int __gen6_reset_engines(struct intel_gt *gt,
320 intel_engine_mask_t engine_mask,
321 unsigned int retry)
322 {
323 struct intel_engine_cs *engine;
324 u32 hw_mask;
325
326 if (engine_mask == ALL_ENGINES) {
327 hw_mask = GEN6_GRDOM_FULL;
328 } else {
329 intel_engine_mask_t tmp;
330
331 hw_mask = 0;
332 for_each_engine_masked(engine, gt, engine_mask, tmp) {
333 hw_mask |= engine->reset_domain;
334 }
335 }
336
337 return gen6_hw_domain_reset(gt, hw_mask);
338 }
339
gen6_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)340 static int gen6_reset_engines(struct intel_gt *gt,
341 intel_engine_mask_t engine_mask,
342 unsigned int retry)
343 {
344 unsigned long flags;
345 int ret;
346
347 spin_lock_irqsave(>->uncore->lock, flags);
348 ret = __gen6_reset_engines(gt, engine_mask, retry);
349 spin_unlock_irqrestore(>->uncore->lock, flags);
350
351 return ret;
352 }
353
find_sfc_paired_vecs_engine(struct intel_engine_cs * engine)354 static struct intel_engine_cs *find_sfc_paired_vecs_engine(struct intel_engine_cs *engine)
355 {
356 int vecs_id;
357
358 GEM_BUG_ON(engine->class != VIDEO_DECODE_CLASS);
359
360 vecs_id = _VECS((engine->instance) / 2);
361
362 return engine->gt->engine[vecs_id];
363 }
364
365 struct sfc_lock_data {
366 i915_reg_t lock_reg;
367 i915_reg_t ack_reg;
368 i915_reg_t usage_reg;
369 u32 lock_bit;
370 u32 ack_bit;
371 u32 usage_bit;
372 u32 reset_bit;
373 };
374
get_sfc_forced_lock_data(struct intel_engine_cs * engine,struct sfc_lock_data * sfc_lock)375 static void get_sfc_forced_lock_data(struct intel_engine_cs *engine,
376 struct sfc_lock_data *sfc_lock)
377 {
378 switch (engine->class) {
379 default:
380 MISSING_CASE(engine->class);
381 fallthrough;
382 case VIDEO_DECODE_CLASS:
383 sfc_lock->lock_reg = GEN11_VCS_SFC_FORCED_LOCK(engine->mmio_base);
384 sfc_lock->lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
385
386 sfc_lock->ack_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
387 sfc_lock->ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
388
389 sfc_lock->usage_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
390 sfc_lock->usage_bit = GEN11_VCS_SFC_USAGE_BIT;
391 sfc_lock->reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
392
393 break;
394 case VIDEO_ENHANCEMENT_CLASS:
395 sfc_lock->lock_reg = GEN11_VECS_SFC_FORCED_LOCK(engine->mmio_base);
396 sfc_lock->lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
397
398 sfc_lock->ack_reg = GEN11_VECS_SFC_LOCK_ACK(engine->mmio_base);
399 sfc_lock->ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
400
401 sfc_lock->usage_reg = GEN11_VECS_SFC_USAGE(engine->mmio_base);
402 sfc_lock->usage_bit = GEN11_VECS_SFC_USAGE_BIT;
403 sfc_lock->reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
404
405 break;
406 }
407 }
408
gen11_lock_sfc(struct intel_engine_cs * engine,u32 * reset_mask,u32 * unlock_mask)409 static int gen11_lock_sfc(struct intel_engine_cs *engine,
410 u32 *reset_mask,
411 u32 *unlock_mask)
412 {
413 struct intel_uncore *uncore = engine->uncore;
414 u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
415 struct sfc_lock_data sfc_lock;
416 bool lock_obtained, lock_to_other = false;
417 int ret;
418
419 switch (engine->class) {
420 case VIDEO_DECODE_CLASS:
421 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
422 return 0;
423
424 fallthrough;
425 case VIDEO_ENHANCEMENT_CLASS:
426 get_sfc_forced_lock_data(engine, &sfc_lock);
427
428 break;
429 default:
430 return 0;
431 }
432
433 if (!(intel_uncore_read_fw(uncore, sfc_lock.usage_reg) & sfc_lock.usage_bit)) {
434 struct intel_engine_cs *paired_vecs;
435
436 if (engine->class != VIDEO_DECODE_CLASS ||
437 GRAPHICS_VER(engine->i915) != 12)
438 return 0;
439
440 /*
441 * Wa_14010733141
442 *
443 * If the VCS-MFX isn't using the SFC, we also need to check
444 * whether VCS-HCP is using it. If so, we need to issue a *VE*
445 * forced lock on the VE engine that shares the same SFC.
446 */
447 if (!(intel_uncore_read_fw(uncore,
448 GEN12_HCP_SFC_LOCK_STATUS(engine->mmio_base)) &
449 GEN12_HCP_SFC_USAGE_BIT))
450 return 0;
451
452 paired_vecs = find_sfc_paired_vecs_engine(engine);
453 get_sfc_forced_lock_data(paired_vecs, &sfc_lock);
454 lock_to_other = true;
455 *unlock_mask |= paired_vecs->mask;
456 } else {
457 *unlock_mask |= engine->mask;
458 }
459
460 /*
461 * If the engine is using an SFC, tell the engine that a software reset
462 * is going to happen. The engine will then try to force lock the SFC.
463 * If SFC ends up being locked to the engine we want to reset, we have
464 * to reset it as well (we will unlock it once the reset sequence is
465 * completed).
466 */
467 intel_uncore_rmw_fw(uncore, sfc_lock.lock_reg, 0, sfc_lock.lock_bit);
468
469 ret = __intel_wait_for_register_fw(uncore,
470 sfc_lock.ack_reg,
471 sfc_lock.ack_bit,
472 sfc_lock.ack_bit,
473 1000, 0, NULL);
474
475 /*
476 * Was the SFC released while we were trying to lock it?
477 *
478 * We should reset both the engine and the SFC if:
479 * - We were locking the SFC to this engine and the lock succeeded
480 * OR
481 * - We were locking the SFC to a different engine (Wa_14010733141)
482 * but the SFC was released before the lock was obtained.
483 *
484 * Otherwise we need only reset the engine by itself and we can
485 * leave the SFC alone.
486 */
487 lock_obtained = (intel_uncore_read_fw(uncore, sfc_lock.usage_reg) &
488 sfc_lock.usage_bit) != 0;
489 if (lock_obtained == lock_to_other)
490 return 0;
491
492 if (ret) {
493 ENGINE_TRACE(engine, "Wait for SFC forced lock ack failed\n");
494 return ret;
495 }
496
497 *reset_mask |= sfc_lock.reset_bit;
498 return 0;
499 }
500
gen11_unlock_sfc(struct intel_engine_cs * engine)501 static void gen11_unlock_sfc(struct intel_engine_cs *engine)
502 {
503 struct intel_uncore *uncore = engine->uncore;
504 u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
505 struct sfc_lock_data sfc_lock = {};
506
507 if (engine->class != VIDEO_DECODE_CLASS &&
508 engine->class != VIDEO_ENHANCEMENT_CLASS)
509 return;
510
511 if (engine->class == VIDEO_DECODE_CLASS &&
512 (BIT(engine->instance) & vdbox_sfc_access) == 0)
513 return;
514
515 get_sfc_forced_lock_data(engine, &sfc_lock);
516
517 intel_uncore_rmw_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit, 0);
518 }
519
__gen11_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)520 static int __gen11_reset_engines(struct intel_gt *gt,
521 intel_engine_mask_t engine_mask,
522 unsigned int retry)
523 {
524 struct intel_engine_cs *engine;
525 intel_engine_mask_t tmp;
526 u32 reset_mask, unlock_mask = 0;
527 int ret;
528
529 if (engine_mask == ALL_ENGINES) {
530 reset_mask = GEN11_GRDOM_FULL;
531 } else {
532 reset_mask = 0;
533 for_each_engine_masked(engine, gt, engine_mask, tmp) {
534 reset_mask |= engine->reset_domain;
535 ret = gen11_lock_sfc(engine, &reset_mask, &unlock_mask);
536 if (ret)
537 goto sfc_unlock;
538 }
539 }
540
541 ret = gen6_hw_domain_reset(gt, reset_mask);
542
543 sfc_unlock:
544 /*
545 * We unlock the SFC based on the lock status and not the result of
546 * gen11_lock_sfc to make sure that we clean properly if something
547 * wrong happened during the lock (e.g. lock acquired after timeout
548 * expiration).
549 *
550 * Due to Wa_14010733141, we may have locked an SFC to an engine that
551 * wasn't being reset. So instead of calling gen11_unlock_sfc()
552 * on engine_mask, we instead call it on the mask of engines that our
553 * gen11_lock_sfc() calls told us actually had locks attempted.
554 */
555 for_each_engine_masked(engine, gt, unlock_mask, tmp)
556 gen11_unlock_sfc(engine);
557
558 return ret;
559 }
560
gen8_engine_reset_prepare(struct intel_engine_cs * engine)561 static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
562 {
563 struct intel_uncore *uncore = engine->uncore;
564 const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
565 u32 request, mask, ack;
566 int ret;
567
568 if (I915_SELFTEST_ONLY(should_fail(&engine->reset_timeout, 1)))
569 return -ETIMEDOUT;
570
571 ack = intel_uncore_read_fw(uncore, reg);
572 if (ack & RESET_CTL_CAT_ERROR) {
573 /*
574 * For catastrophic errors, ready-for-reset sequence
575 * needs to be bypassed: HAS#396813
576 */
577 request = RESET_CTL_CAT_ERROR;
578 mask = RESET_CTL_CAT_ERROR;
579
580 /* Catastrophic errors need to be cleared by HW */
581 ack = 0;
582 } else if (!(ack & RESET_CTL_READY_TO_RESET)) {
583 request = RESET_CTL_REQUEST_RESET;
584 mask = RESET_CTL_READY_TO_RESET;
585 ack = RESET_CTL_READY_TO_RESET;
586 } else {
587 return 0;
588 }
589
590 intel_uncore_write_fw(uncore, reg, REG_MASKED_FIELD_ENABLE(request));
591 ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
592 700, 0, NULL);
593 if (ret)
594 gt_err(engine->gt,
595 "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
596 engine->name, request,
597 intel_uncore_read_fw(uncore, reg));
598
599 return ret;
600 }
601
gen8_engine_reset_cancel(struct intel_engine_cs * engine)602 static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
603 {
604 intel_uncore_write_fw(engine->uncore,
605 RING_RESET_CTL(engine->mmio_base),
606 REG_MASKED_FIELD_DISABLE(RESET_CTL_REQUEST_RESET));
607 }
608
gen8_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)609 static int gen8_reset_engines(struct intel_gt *gt,
610 intel_engine_mask_t engine_mask,
611 unsigned int retry)
612 {
613 struct intel_engine_cs *engine;
614 const bool reset_non_ready = retry >= 1;
615 intel_engine_mask_t tmp;
616 unsigned long flags;
617 int ret;
618
619 spin_lock_irqsave(>->uncore->lock, flags);
620
621 for_each_engine_masked(engine, gt, engine_mask, tmp) {
622 ret = gen8_engine_reset_prepare(engine);
623 if (ret && !reset_non_ready)
624 goto skip_reset;
625
626 /*
627 * If this is not the first failed attempt to prepare,
628 * we decide to proceed anyway.
629 *
630 * By doing so we risk context corruption and with
631 * some gens (kbl), possible system hang if reset
632 * happens during active bb execution.
633 *
634 * We rather take context corruption instead of
635 * failed reset with a wedged driver/gpu. And
636 * active bb execution case should be covered by
637 * stop_engines() we have before the reset.
638 */
639 }
640
641 /*
642 * Wa_22011100796:dg2, whenever Full soft reset is required,
643 * reset all individual engines firstly, and then do a full soft reset.
644 *
645 * This is best effort, so ignore any error from the initial reset.
646 */
647 if (IS_DG2(gt->i915) && engine_mask == ALL_ENGINES)
648 __gen11_reset_engines(gt, gt->info.engine_mask, 0);
649
650 if (GRAPHICS_VER(gt->i915) >= 11)
651 ret = __gen11_reset_engines(gt, engine_mask, retry);
652 else
653 ret = __gen6_reset_engines(gt, engine_mask, retry);
654
655 skip_reset:
656 for_each_engine_masked(engine, gt, engine_mask, tmp)
657 gen8_engine_reset_cancel(engine);
658
659 spin_unlock_irqrestore(>->uncore->lock, flags);
660
661 return ret;
662 }
663
mock_reset(struct intel_gt * gt,intel_engine_mask_t mask,unsigned int retry)664 static int mock_reset(struct intel_gt *gt,
665 intel_engine_mask_t mask,
666 unsigned int retry)
667 {
668 return 0;
669 }
670
671 typedef int (*reset_func)(struct intel_gt *,
672 intel_engine_mask_t engine_mask,
673 unsigned int retry);
674
intel_get_gpu_reset(const struct intel_gt * gt)675 static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
676 {
677 struct drm_i915_private *i915 = gt->i915;
678
679 if (is_mock_gt(gt))
680 return mock_reset;
681 else if (GRAPHICS_VER(i915) >= 8)
682 return gen8_reset_engines;
683 else if (GRAPHICS_VER(i915) >= 6)
684 return gen6_reset_engines;
685 else if (GRAPHICS_VER(i915) >= 5)
686 return ilk_do_reset;
687 else if (IS_G4X(i915))
688 return g4x_do_reset;
689 else if (IS_G33(i915) || IS_PINEVIEW(i915))
690 return g33_do_reset;
691 else if (GRAPHICS_VER(i915) >= 3)
692 return i915_do_reset;
693 else
694 return NULL;
695 }
696
__reset_guc(struct intel_gt * gt)697 static int __reset_guc(struct intel_gt *gt)
698 {
699 u32 guc_domain =
700 GRAPHICS_VER(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
701
702 return gen6_hw_domain_reset(gt, guc_domain);
703 }
704
needs_wa_14015076503(struct intel_gt * gt,intel_engine_mask_t engine_mask)705 static bool needs_wa_14015076503(struct intel_gt *gt, intel_engine_mask_t engine_mask)
706 {
707 if (MEDIA_VER_FULL(gt->i915) != IP_VER(13, 0) || !HAS_ENGINE(gt, GSC0))
708 return false;
709
710 if (!__HAS_ENGINE(engine_mask, GSC0))
711 return false;
712
713 return intel_gsc_uc_fw_init_done(>->uc.gsc);
714 }
715
716 static intel_engine_mask_t
wa_14015076503_start(struct intel_gt * gt,intel_engine_mask_t engine_mask,bool first)717 wa_14015076503_start(struct intel_gt *gt, intel_engine_mask_t engine_mask, bool first)
718 {
719 if (!needs_wa_14015076503(gt, engine_mask))
720 return engine_mask;
721
722 /*
723 * wa_14015076503: if the GSC FW is loaded, we need to alert it that
724 * we're going to do a GSC engine reset and then wait for 200ms for the
725 * FW to get ready for it. However, if this is the first ALL_ENGINES
726 * reset attempt and the GSC is not busy, we can try to instead reset
727 * the GuC and all the other engines individually to avoid the 200ms
728 * wait.
729 * Skipping the GSC engine is safe because, differently from other
730 * engines, the GSCCS only role is to forward the commands to the GSC
731 * FW, so it doesn't have any HW outside of the CS itself and therefore
732 * it has no state that we don't explicitly re-init on resume or on
733 * context switch LRC or power context). The HW for the GSC uC is
734 * managed by the GSC FW so we don't need to care about that.
735 */
736 if (engine_mask == ALL_ENGINES && first && intel_engine_is_idle(gt->engine[GSC0])) {
737 __reset_guc(gt);
738 engine_mask = gt->info.engine_mask & ~BIT(GSC0);
739 } else {
740 intel_uncore_rmw(gt->uncore,
741 HECI_H_GS1(MTL_GSC_HECI2_BASE),
742 0, HECI_H_GS1_ER_PREP);
743
744 /* make sure the reset bit is clear when writing the CSR reg */
745 intel_uncore_rmw(gt->uncore,
746 HECI_H_CSR(MTL_GSC_HECI2_BASE),
747 HECI_H_CSR_RST, HECI_H_CSR_IG);
748 msleep(200);
749 }
750
751 return engine_mask;
752 }
753
754 static void
wa_14015076503_end(struct intel_gt * gt,intel_engine_mask_t engine_mask)755 wa_14015076503_end(struct intel_gt *gt, intel_engine_mask_t engine_mask)
756 {
757 if (!needs_wa_14015076503(gt, engine_mask))
758 return;
759
760 intel_uncore_rmw(gt->uncore,
761 HECI_H_GS1(MTL_GSC_HECI2_BASE),
762 HECI_H_GS1_ER_PREP, 0);
763 }
764
__intel_gt_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask)765 static int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
766 {
767 const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
768 reset_func reset;
769 int ret = -ETIMEDOUT;
770 int retry;
771
772 reset = intel_get_gpu_reset(gt);
773 if (!reset)
774 return -ENODEV;
775
776 /*
777 * If the power well sleeps during the reset, the reset
778 * request may be dropped and never completes (causing -EIO).
779 */
780 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
781 for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
782 intel_engine_mask_t reset_mask;
783
784 reset_mask = wa_14015076503_start(gt, engine_mask, !retry);
785
786 GT_TRACE(gt, "engine_mask=%x\n", reset_mask);
787 ret = reset(gt, reset_mask, retry);
788
789 wa_14015076503_end(gt, reset_mask);
790 }
791 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
792
793 return ret;
794 }
795
intel_has_gpu_reset(const struct intel_gt * gt)796 bool intel_has_gpu_reset(const struct intel_gt *gt)
797 {
798 if (!gt->i915->params.reset)
799 return NULL;
800
801 return intel_get_gpu_reset(gt);
802 }
803
intel_has_reset_engine(const struct intel_gt * gt)804 bool intel_has_reset_engine(const struct intel_gt *gt)
805 {
806 if (gt->i915->params.reset < 2)
807 return false;
808
809 return INTEL_INFO(gt->i915)->has_reset_engine;
810 }
811
intel_reset_guc(struct intel_gt * gt)812 int intel_reset_guc(struct intel_gt *gt)
813 {
814 int ret;
815
816 GEM_BUG_ON(!HAS_GT_UC(gt->i915));
817
818 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
819 ret = __reset_guc(gt);
820 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
821
822 return ret;
823 }
824
825 /*
826 * Ensure irq handler finishes, and not run again.
827 * Also return the active request so that we only search for it once.
828 */
reset_prepare_engine(struct intel_engine_cs * engine)829 static void reset_prepare_engine(struct intel_engine_cs *engine)
830 {
831 /*
832 * During the reset sequence, we must prevent the engine from
833 * entering RC6. As the context state is undefined until we restart
834 * the engine, if it does enter RC6 during the reset, the state
835 * written to the powercontext is undefined and so we may lose
836 * GPU state upon resume, i.e. fail to restart after a reset.
837 */
838 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
839 if (engine->reset.prepare)
840 engine->reset.prepare(engine);
841 }
842
revoke_mmaps(struct intel_gt * gt)843 static void revoke_mmaps(struct intel_gt *gt)
844 {
845 int i;
846
847 for (i = 0; i < gt->ggtt->num_fences; i++) {
848 struct drm_vma_offset_node *node;
849 struct i915_vma *vma;
850 u64 vma_offset;
851
852 vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
853 if (!vma)
854 continue;
855
856 if (!i915_vma_has_userfault(vma))
857 continue;
858
859 GEM_BUG_ON(vma->fence != >->ggtt->fence_regs[i]);
860
861 if (!vma->mmo)
862 continue;
863
864 node = &vma->mmo->vma_node;
865 vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
866
867 unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
868 drm_vma_node_offset_addr(node) + vma_offset,
869 vma->size,
870 1);
871 }
872 }
873
reset_prepare(struct intel_gt * gt)874 static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
875 {
876 struct intel_engine_cs *engine;
877 intel_engine_mask_t awake = 0;
878 enum intel_engine_id id;
879
880 /**
881 * For GuC mode with submission enabled, ensure submission
882 * is disabled before stopping ring.
883 *
884 * For GuC mode with submission disabled, ensure that GuC is not
885 * sanitized, do that after engine reset. reset_prepare()
886 * is followed by engine reset which in this mode requires GuC to
887 * process any CSB FIFO entries generated by the resets.
888 */
889 if (intel_uc_uses_guc_submission(>->uc))
890 intel_uc_reset_prepare(>->uc);
891
892 for_each_engine(engine, gt, id) {
893 if (intel_engine_pm_get_if_awake(engine))
894 awake |= engine->mask;
895 reset_prepare_engine(engine);
896 }
897
898 return awake;
899 }
900
gt_revoke(struct intel_gt * gt)901 static void gt_revoke(struct intel_gt *gt)
902 {
903 revoke_mmaps(gt);
904 }
905
gt_reset(struct intel_gt * gt,intel_engine_mask_t stalled_mask)906 static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
907 {
908 struct intel_engine_cs *engine;
909 enum intel_engine_id id;
910 int err;
911
912 /*
913 * Everything depends on having the GTT running, so we need to start
914 * there.
915 */
916 err = i915_ggtt_enable_hw(gt->i915);
917 if (err)
918 return err;
919
920 local_bh_disable();
921 for_each_engine(engine, gt, id)
922 __intel_engine_reset(engine, stalled_mask & engine->mask);
923 local_bh_enable();
924
925 intel_uc_reset(>->uc, ALL_ENGINES);
926
927 intel_ggtt_restore_fences(gt->ggtt);
928
929 return err;
930 }
931
reset_finish_engine(struct intel_engine_cs * engine)932 static void reset_finish_engine(struct intel_engine_cs *engine)
933 {
934 if (engine->reset.finish)
935 engine->reset.finish(engine);
936 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
937
938 intel_engine_signal_breadcrumbs(engine);
939 }
940
reset_finish(struct intel_gt * gt,intel_engine_mask_t awake)941 static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
942 {
943 struct intel_engine_cs *engine;
944 enum intel_engine_id id;
945
946 for_each_engine(engine, gt, id) {
947 reset_finish_engine(engine);
948 if (awake & engine->mask)
949 intel_engine_pm_put(engine);
950 }
951
952 intel_uc_reset_finish(>->uc);
953 }
954
nop_submit_request(struct i915_request * request)955 static void nop_submit_request(struct i915_request *request)
956 {
957 RQ_TRACE(request, "-EIO\n");
958
959 request = i915_request_mark_eio(request);
960 if (request) {
961 i915_request_submit(request);
962 intel_engine_signal_breadcrumbs(request->engine);
963
964 i915_request_put(request);
965 }
966 }
967
__intel_gt_set_wedged(struct intel_gt * gt)968 static void __intel_gt_set_wedged(struct intel_gt *gt)
969 {
970 struct intel_engine_cs *engine;
971 intel_engine_mask_t awake;
972 enum intel_engine_id id;
973
974 if (test_bit(I915_WEDGED, >->reset.flags))
975 return;
976
977 GT_TRACE(gt, "start\n");
978
979 /*
980 * First, stop submission to hw, but do not yet complete requests by
981 * rolling the global seqno forward (since this would complete requests
982 * for which we haven't set the fence error to EIO yet).
983 */
984 awake = reset_prepare(gt);
985
986 /* Even if the GPU reset fails, it should still stop the engines */
987 if (!intel_gt_gpu_reset_clobbers_display(gt))
988 intel_gt_reset_all_engines(gt);
989
990 for_each_engine(engine, gt, id)
991 engine->submit_request = nop_submit_request;
992
993 /*
994 * Make sure no request can slip through without getting completed by
995 * either this call here to intel_engine_write_global_seqno, or the one
996 * in nop_submit_request.
997 */
998 synchronize_rcu_expedited();
999 set_bit(I915_WEDGED, >->reset.flags);
1000
1001 /* Mark all executing requests as skipped */
1002 local_bh_disable();
1003 for_each_engine(engine, gt, id)
1004 if (engine->reset.cancel)
1005 engine->reset.cancel(engine);
1006 intel_uc_cancel_requests(>->uc);
1007 local_bh_enable();
1008
1009 reset_finish(gt, awake);
1010
1011 GT_TRACE(gt, "end\n");
1012 }
1013
set_wedged_work(struct work_struct * w)1014 static void set_wedged_work(struct work_struct *w)
1015 {
1016 struct intel_gt *gt = container_of(w, struct intel_gt, wedge);
1017 intel_wakeref_t wf;
1018
1019 with_intel_runtime_pm(gt->uncore->rpm, wf)
1020 __intel_gt_set_wedged(gt);
1021 }
1022
intel_gt_set_wedged(struct intel_gt * gt)1023 void intel_gt_set_wedged(struct intel_gt *gt)
1024 {
1025 intel_wakeref_t wakeref;
1026
1027 if (test_bit(I915_WEDGED, >->reset.flags))
1028 return;
1029
1030 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1031 mutex_lock(>->reset.mutex);
1032
1033 if (GEM_SHOW_DEBUG()) {
1034 struct drm_printer p = drm_dbg_printer(>->i915->drm,
1035 DRM_UT_DRIVER, NULL);
1036 struct intel_engine_cs *engine;
1037 enum intel_engine_id id;
1038
1039 drm_printf(&p, "called from %pS\n", (void *)_RET_IP_);
1040 for_each_engine(engine, gt, id) {
1041 if (intel_engine_is_idle(engine))
1042 continue;
1043
1044 intel_engine_dump(engine, &p, "%s\n", engine->name);
1045 }
1046 }
1047
1048 __intel_gt_set_wedged(gt);
1049
1050 mutex_unlock(>->reset.mutex);
1051 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1052 }
1053
__intel_gt_unset_wedged(struct intel_gt * gt)1054 static bool __intel_gt_unset_wedged(struct intel_gt *gt)
1055 {
1056 struct intel_gt_timelines *timelines = >->timelines;
1057 struct intel_timeline *tl;
1058 bool ok;
1059
1060 if (!test_bit(I915_WEDGED, >->reset.flags))
1061 return true;
1062
1063 /* Never fully initialised, recovery impossible */
1064 if (intel_gt_has_unrecoverable_error(gt))
1065 return false;
1066
1067 GT_TRACE(gt, "start\n");
1068
1069 /*
1070 * Before unwedging, make sure that all pending operations
1071 * are flushed and errored out - we may have requests waiting upon
1072 * third party fences. We marked all inflight requests as EIO, and
1073 * every execbuf since returned EIO, for consistency we want all
1074 * the currently pending requests to also be marked as EIO, which
1075 * is done inside our nop_submit_request - and so we must wait.
1076 *
1077 * No more can be submitted until we reset the wedged bit.
1078 */
1079 spin_lock(&timelines->lock);
1080 list_for_each_entry(tl, &timelines->active_list, link) {
1081 struct dma_fence *fence;
1082
1083 fence = i915_active_fence_get(&tl->last_request);
1084 if (!fence)
1085 continue;
1086
1087 spin_unlock(&timelines->lock);
1088
1089 /*
1090 * All internal dependencies (i915_requests) will have
1091 * been flushed by the set-wedge, but we may be stuck waiting
1092 * for external fences. These should all be capped to 10s
1093 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
1094 * in the worst case.
1095 */
1096 dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
1097 dma_fence_put(fence);
1098
1099 /* Restart iteration after dropping lock */
1100 spin_lock(&timelines->lock);
1101 tl = list_entry(&timelines->active_list, typeof(*tl), link);
1102 }
1103 spin_unlock(&timelines->lock);
1104
1105 /* We must reset pending GPU events before restoring our submission */
1106 ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
1107 if (!intel_gt_gpu_reset_clobbers_display(gt))
1108 ok = intel_gt_reset_all_engines(gt) == 0;
1109 if (!ok) {
1110 /*
1111 * Warn CI about the unrecoverable wedged condition.
1112 * Time for a reboot.
1113 */
1114 add_taint_for_CI(gt->i915, TAINT_WARN);
1115 return false;
1116 }
1117
1118 /*
1119 * Undo nop_submit_request. We prevent all new i915 requests from
1120 * being queued (by disallowing execbuf whilst wedged) so having
1121 * waited for all active requests above, we know the system is idle
1122 * and do not have to worry about a thread being inside
1123 * engine->submit_request() as we swap over. So unlike installing
1124 * the nop_submit_request on reset, we can do this from normal
1125 * context and do not require stop_machine().
1126 */
1127 intel_engines_reset_default_submission(gt);
1128
1129 GT_TRACE(gt, "end\n");
1130
1131 smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
1132 clear_bit(I915_WEDGED, >->reset.flags);
1133
1134 return true;
1135 }
1136
intel_gt_unset_wedged(struct intel_gt * gt)1137 bool intel_gt_unset_wedged(struct intel_gt *gt)
1138 {
1139 bool result;
1140
1141 mutex_lock(>->reset.mutex);
1142 result = __intel_gt_unset_wedged(gt);
1143 mutex_unlock(>->reset.mutex);
1144
1145 return result;
1146 }
1147
do_reset(struct intel_gt * gt,intel_engine_mask_t stalled_mask)1148 static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
1149 {
1150 int err, i;
1151
1152 err = intel_gt_reset_all_engines(gt);
1153 for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
1154 msleep(10 * (i + 1));
1155 err = intel_gt_reset_all_engines(gt);
1156 }
1157 if (err)
1158 return err;
1159
1160 return gt_reset(gt, stalled_mask);
1161 }
1162
resume(struct intel_gt * gt)1163 static int resume(struct intel_gt *gt)
1164 {
1165 struct intel_engine_cs *engine;
1166 enum intel_engine_id id;
1167 int ret;
1168
1169 for_each_engine(engine, gt, id) {
1170 ret = intel_engine_resume(engine);
1171 if (ret)
1172 return ret;
1173 }
1174
1175 return 0;
1176 }
1177
intel_gt_gpu_reset_clobbers_display(struct intel_gt * gt)1178 bool intel_gt_gpu_reset_clobbers_display(struct intel_gt *gt)
1179 {
1180 struct drm_i915_private *i915 = gt->i915;
1181
1182 return INTEL_INFO(i915)->gpu_reset_clobbers_display;
1183 }
1184
1185 /**
1186 * intel_gt_reset - reset chip after a hang
1187 * @gt: #intel_gt to reset
1188 * @stalled_mask: mask of the stalled engines with the guilty requests
1189 * @reason: user error message for why we are resetting
1190 *
1191 * Reset the chip. Useful if a hang is detected. Marks the device as wedged
1192 * on failure.
1193 *
1194 * Procedure is fairly simple:
1195 * - reset the chip using the reset reg
1196 * - re-init context state
1197 * - re-init hardware status page
1198 * - re-init ring buffer
1199 * - re-init interrupt state
1200 * - re-init display
1201 */
intel_gt_reset(struct intel_gt * gt,intel_engine_mask_t stalled_mask,const char * reason)1202 void intel_gt_reset(struct intel_gt *gt,
1203 intel_engine_mask_t stalled_mask,
1204 const char *reason)
1205 {
1206 struct intel_display *display = gt->i915->display;
1207 intel_engine_mask_t awake;
1208 int ret;
1209
1210 GT_TRACE(gt, "flags=%lx\n", gt->reset.flags);
1211
1212 might_sleep();
1213 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, >->reset.flags));
1214
1215 /*
1216 * FIXME: Revoking cpu mmap ptes cannot be done from a dma_fence
1217 * critical section like gpu reset.
1218 */
1219 gt_revoke(gt);
1220
1221 mutex_lock(>->reset.mutex);
1222
1223 /* Clear any previous failed attempts at recovery. Time to try again. */
1224 if (!__intel_gt_unset_wedged(gt))
1225 goto unlock;
1226
1227 if (reason)
1228 gt_notice(gt, "Resetting chip for %s\n", reason);
1229 atomic_inc(>->i915->gpu_error.reset_count);
1230
1231 awake = reset_prepare(gt);
1232
1233 if (!intel_has_gpu_reset(gt)) {
1234 if (gt->i915->params.reset)
1235 gt_err(gt, "GPU reset not supported\n");
1236 else
1237 gt_dbg(gt, "GPU reset disabled\n");
1238 goto error;
1239 }
1240
1241 if (intel_gt_gpu_reset_clobbers_display(gt))
1242 intel_irq_suspend(gt->i915);
1243
1244 if (do_reset(gt, stalled_mask)) {
1245 gt_err(gt, "Failed to reset chip\n");
1246 goto taint;
1247 }
1248
1249 if (intel_gt_gpu_reset_clobbers_display(gt))
1250 intel_irq_resume(gt->i915);
1251
1252 intel_overlay_reset(display);
1253
1254 /* sanitize uC after engine reset */
1255 if (!intel_uc_uses_guc_submission(>->uc))
1256 intel_uc_reset_prepare(>->uc);
1257 /*
1258 * Next we need to restore the context, but we don't use those
1259 * yet either...
1260 *
1261 * Ring buffer needs to be re-initialized in the KMS case, or if X
1262 * was running at the time of the reset (i.e. we weren't VT
1263 * switched away).
1264 */
1265 ret = intel_gt_init_hw(gt);
1266 if (ret) {
1267 gt_err(gt, "Failed to initialise HW following reset (%d)\n", ret);
1268 goto taint;
1269 }
1270
1271 ret = resume(gt);
1272 if (ret)
1273 goto taint;
1274
1275 finish:
1276 reset_finish(gt, awake);
1277 unlock:
1278 mutex_unlock(>->reset.mutex);
1279 return;
1280
1281 taint:
1282 /*
1283 * History tells us that if we cannot reset the GPU now, we
1284 * never will. This then impacts everything that is run
1285 * subsequently. On failing the reset, we mark the driver
1286 * as wedged, preventing further execution on the GPU.
1287 * We also want to go one step further and add a taint to the
1288 * kernel so that any subsequent faults can be traced back to
1289 * this failure. This is important for CI, where if the
1290 * GPU/driver fails we would like to reboot and restart testing
1291 * rather than continue on into oblivion. For everyone else,
1292 * the system should still plod along, but they have been warned!
1293 */
1294 add_taint_for_CI(gt->i915, TAINT_WARN);
1295 error:
1296 __intel_gt_set_wedged(gt);
1297 goto finish;
1298 }
1299
1300 /**
1301 * intel_gt_reset_all_engines() - Reset all engines in the given gt.
1302 * @gt: the GT to reset all engines for.
1303 *
1304 * This function resets all engines within the given gt.
1305 *
1306 * Returns:
1307 * Zero on success, negative error code on failure.
1308 */
intel_gt_reset_all_engines(struct intel_gt * gt)1309 int intel_gt_reset_all_engines(struct intel_gt *gt)
1310 {
1311 return __intel_gt_reset(gt, ALL_ENGINES);
1312 }
1313
1314 /**
1315 * intel_gt_reset_engine() - Reset a specific engine within a gt.
1316 * @engine: engine to be reset.
1317 *
1318 * This function resets the specified engine within a gt.
1319 *
1320 * Returns:
1321 * Zero on success, negative error code on failure.
1322 */
intel_gt_reset_engine(struct intel_engine_cs * engine)1323 int intel_gt_reset_engine(struct intel_engine_cs *engine)
1324 {
1325 return __intel_gt_reset(engine->gt, engine->mask);
1326 }
1327
__intel_engine_reset_bh(struct intel_engine_cs * engine,const char * msg)1328 int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
1329 {
1330 struct intel_gt *gt = engine->gt;
1331 int ret;
1332
1333 ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
1334 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, >->reset.flags));
1335
1336 if (intel_engine_uses_guc(engine))
1337 return -ENODEV;
1338
1339 if (!intel_engine_pm_get_if_awake(engine))
1340 return 0;
1341
1342 reset_prepare_engine(engine);
1343
1344 if (msg)
1345 drm_notice(&engine->i915->drm,
1346 "Resetting %s for %s\n", engine->name, msg);
1347 i915_increase_reset_engine_count(&engine->i915->gpu_error, engine);
1348
1349 ret = intel_gt_reset_engine(engine);
1350 if (ret) {
1351 /* If we fail here, we expect to fallback to a global reset */
1352 ENGINE_TRACE(engine, "Failed to reset %s, err: %d\n", engine->name, ret);
1353 goto out;
1354 }
1355
1356 /*
1357 * The request that caused the hang is stuck on elsp, we know the
1358 * active request and can drop it, adjust head to skip the offending
1359 * request to resume executing remaining requests in the queue.
1360 */
1361 __intel_engine_reset(engine, true);
1362
1363 /*
1364 * The engine and its registers (and workarounds in case of render)
1365 * have been reset to their default values. Follow the init_ring
1366 * process to program RING_MODE, HWSP and re-enable submission.
1367 */
1368 ret = intel_engine_resume(engine);
1369
1370 out:
1371 intel_engine_cancel_stop_cs(engine);
1372 reset_finish_engine(engine);
1373 intel_engine_pm_put_async(engine);
1374 return ret;
1375 }
1376
1377 /**
1378 * intel_engine_reset - reset GPU engine to recover from a hang
1379 * @engine: engine to reset
1380 * @msg: reason for GPU reset; or NULL for no drm_notice()
1381 *
1382 * Reset a specific GPU engine. Useful if a hang is detected.
1383 * Returns zero on successful reset or otherwise an error code.
1384 *
1385 * Procedure is:
1386 * - identifies the request that caused the hang and it is dropped
1387 * - reset engine (which will force the engine to idle)
1388 * - re-init/configure engine
1389 */
intel_engine_reset(struct intel_engine_cs * engine,const char * msg)1390 int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
1391 {
1392 int err;
1393
1394 local_bh_disable();
1395 err = __intel_engine_reset_bh(engine, msg);
1396 local_bh_enable();
1397
1398 return err;
1399 }
1400
display_reset_modeset_stuck(void * gt)1401 static void display_reset_modeset_stuck(void *gt)
1402 {
1403 intel_gt_set_wedged(gt);
1404 }
1405
intel_gt_reset_global(struct intel_gt * gt,u32 engine_mask,const char * reason)1406 static void intel_gt_reset_global(struct intel_gt *gt,
1407 u32 engine_mask,
1408 const char *reason)
1409 {
1410 struct kobject *kobj = >->i915->drm.primary->kdev->kobj;
1411 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1412 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1413 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1414 struct intel_wedge_me w;
1415
1416 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
1417
1418 GT_TRACE(gt, "resetting chip, engines=%x\n", engine_mask);
1419 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1420
1421 /* Use a watchdog to ensure that our reset completes */
1422 intel_wedge_on_timeout(&w, gt, 60 * HZ) {
1423 struct drm_i915_private *i915 = gt->i915;
1424 struct intel_display *display = i915->display;
1425 bool need_display_reset;
1426 bool reset_display;
1427
1428 need_display_reset = intel_gt_gpu_reset_clobbers_display(gt) &&
1429 intel_has_gpu_reset(gt);
1430
1431 reset_display = intel_display_reset_test(display) ||
1432 need_display_reset;
1433
1434 if (reset_display)
1435 reset_display = intel_display_reset_prepare(display,
1436 display_reset_modeset_stuck,
1437 gt);
1438
1439 intel_gt_reset(gt, engine_mask, reason);
1440
1441 if (reset_display)
1442 intel_display_reset_finish(display, !need_display_reset);
1443 }
1444
1445 if (!test_bit(I915_WEDGED, >->reset.flags))
1446 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
1447 else
1448 drm_dev_wedged_event(>->i915->drm,
1449 DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET,
1450 NULL);
1451 }
1452
1453 /**
1454 * intel_gt_handle_error - handle a gpu error
1455 * @gt: the intel_gt
1456 * @engine_mask: mask representing engines that are hung
1457 * @flags: control flags
1458 * @fmt: Error message format string
1459 *
1460 * Do some basic checking of register state at error time and
1461 * dump it to the syslog. Also call i915_capture_error_state() to make
1462 * sure we get a record and make it available in debugfs. Fire a uevent
1463 * so userspace knows something bad happened (should trigger collection
1464 * of a ring dump etc.).
1465 */
intel_gt_handle_error(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned long flags,const char * fmt,...)1466 void intel_gt_handle_error(struct intel_gt *gt,
1467 intel_engine_mask_t engine_mask,
1468 unsigned long flags,
1469 const char *fmt, ...)
1470 {
1471 struct intel_engine_cs *engine;
1472 intel_wakeref_t wakeref;
1473 intel_engine_mask_t tmp;
1474 char error_msg[80];
1475 char *msg = NULL;
1476
1477 if (fmt) {
1478 va_list args;
1479
1480 va_start(args, fmt);
1481 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
1482 va_end(args);
1483
1484 msg = error_msg;
1485 }
1486
1487 /*
1488 * In most cases it's guaranteed that we get here with an RPM
1489 * reference held, for example because there is a pending GPU
1490 * request that won't finish until the reset is done. This
1491 * isn't the case at least when we get here by doing a
1492 * simulated reset via debugfs, so get an RPM reference.
1493 */
1494 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1495
1496 engine_mask &= gt->info.engine_mask;
1497
1498 if (flags & I915_ERROR_CAPTURE) {
1499 i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_NONE);
1500 intel_gt_clear_error_registers(gt, engine_mask);
1501 }
1502
1503 /*
1504 * Try engine reset when available. We fall back to full reset if
1505 * single reset fails.
1506 */
1507 if (!intel_uc_uses_guc_submission(>->uc) &&
1508 intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
1509 local_bh_disable();
1510 for_each_engine_masked(engine, gt, engine_mask, tmp) {
1511 BUILD_BUG_ON(I915_RESET_BACKOFF >= I915_RESET_ENGINE);
1512 if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1513 >->reset.flags))
1514 continue;
1515
1516 if (__intel_engine_reset_bh(engine, msg) == 0)
1517 engine_mask &= ~engine->mask;
1518
1519 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
1520 >->reset.flags);
1521 }
1522 local_bh_enable();
1523 }
1524
1525 if (!engine_mask)
1526 goto out;
1527
1528 /* Full reset needs the mutex, stop any other user trying to do so. */
1529 if (test_and_set_bit(I915_RESET_BACKOFF, >->reset.flags)) {
1530 wait_event(gt->reset.queue,
1531 !test_bit(I915_RESET_BACKOFF, >->reset.flags));
1532 goto out; /* piggy-back on the other reset */
1533 }
1534
1535 /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
1536 synchronize_rcu_expedited();
1537
1538 /*
1539 * Prevent any other reset-engine attempt. We don't do this for GuC
1540 * submission the GuC owns the per-engine reset, not the i915.
1541 */
1542 if (!intel_uc_uses_guc_submission(>->uc)) {
1543 for_each_engine(engine, gt, tmp) {
1544 while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1545 >->reset.flags))
1546 wait_on_bit(>->reset.flags,
1547 I915_RESET_ENGINE + engine->id,
1548 TASK_UNINTERRUPTIBLE);
1549 }
1550 }
1551
1552 /* Flush everyone using a resource about to be clobbered */
1553 synchronize_srcu_expedited(>->reset.backoff_srcu);
1554
1555 intel_gt_reset_global(gt, engine_mask, msg);
1556
1557 if (!intel_uc_uses_guc_submission(>->uc)) {
1558 for_each_engine(engine, gt, tmp)
1559 clear_bit_unlock(I915_RESET_ENGINE + engine->id,
1560 >->reset.flags);
1561 }
1562 clear_bit_unlock(I915_RESET_BACKOFF, >->reset.flags);
1563 smp_mb__after_atomic();
1564 wake_up_all(>->reset.queue);
1565
1566 out:
1567 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1568 }
1569
_intel_gt_reset_lock(struct intel_gt * gt,int * srcu,bool retry)1570 static int _intel_gt_reset_lock(struct intel_gt *gt, int *srcu, bool retry)
1571 {
1572 might_lock(>->reset.backoff_srcu);
1573 if (retry)
1574 might_sleep();
1575
1576 rcu_read_lock();
1577 while (test_bit(I915_RESET_BACKOFF, >->reset.flags)) {
1578 rcu_read_unlock();
1579
1580 if (!retry)
1581 return -EBUSY;
1582
1583 if (wait_event_interruptible(gt->reset.queue,
1584 !test_bit(I915_RESET_BACKOFF,
1585 >->reset.flags)))
1586 return -EINTR;
1587
1588 rcu_read_lock();
1589 }
1590 *srcu = srcu_read_lock(>->reset.backoff_srcu);
1591 rcu_read_unlock();
1592
1593 return 0;
1594 }
1595
intel_gt_reset_trylock(struct intel_gt * gt,int * srcu)1596 int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
1597 {
1598 return _intel_gt_reset_lock(gt, srcu, false);
1599 }
1600
intel_gt_reset_lock_interruptible(struct intel_gt * gt,int * srcu)1601 int intel_gt_reset_lock_interruptible(struct intel_gt *gt, int *srcu)
1602 {
1603 return _intel_gt_reset_lock(gt, srcu, true);
1604 }
1605
intel_gt_reset_unlock(struct intel_gt * gt,int tag)1606 void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
1607 __releases(>->reset.backoff_srcu)
1608 {
1609 srcu_read_unlock(>->reset.backoff_srcu, tag);
1610 }
1611
intel_gt_terminally_wedged(struct intel_gt * gt)1612 int intel_gt_terminally_wedged(struct intel_gt *gt)
1613 {
1614 might_sleep();
1615
1616 if (!intel_gt_is_wedged(gt))
1617 return 0;
1618
1619 if (intel_gt_has_unrecoverable_error(gt))
1620 return -EIO;
1621
1622 /* Reset still in progress? Maybe we will recover? */
1623 if (wait_event_interruptible(gt->reset.queue,
1624 !test_bit(I915_RESET_BACKOFF,
1625 >->reset.flags)))
1626 return -EINTR;
1627
1628 return intel_gt_is_wedged(gt) ? -EIO : 0;
1629 }
1630
intel_gt_set_wedged_on_init(struct intel_gt * gt)1631 void intel_gt_set_wedged_on_init(struct intel_gt *gt)
1632 {
1633 BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
1634 I915_WEDGED_ON_INIT);
1635 intel_gt_set_wedged(gt);
1636 i915_disable_error_state(gt->i915, -ENODEV);
1637 set_bit(I915_WEDGED_ON_INIT, >->reset.flags);
1638
1639 /* Wedged on init is non-recoverable */
1640 add_taint_for_CI(gt->i915, TAINT_WARN);
1641 }
1642
intel_gt_set_wedged_on_fini(struct intel_gt * gt)1643 void intel_gt_set_wedged_on_fini(struct intel_gt *gt)
1644 {
1645 intel_gt_set_wedged(gt);
1646 i915_disable_error_state(gt->i915, -ENODEV);
1647 set_bit(I915_WEDGED_ON_FINI, >->reset.flags);
1648 intel_gt_retire_requests(gt); /* cleanup any wedged requests */
1649 }
1650
intel_gt_init_reset(struct intel_gt * gt)1651 void intel_gt_init_reset(struct intel_gt *gt)
1652 {
1653 init_waitqueue_head(>->reset.queue);
1654 mutex_init(>->reset.mutex);
1655 init_srcu_struct(>->reset.backoff_srcu);
1656 INIT_WORK(>->wedge, set_wedged_work);
1657
1658 /*
1659 * While undesirable to wait inside the shrinker, complain anyway.
1660 *
1661 * If we have to wait during shrinking, we guarantee forward progress
1662 * by forcing the reset. Therefore during the reset we must not
1663 * re-enter the shrinker. By declaring that we take the reset mutex
1664 * within the shrinker, we forbid ourselves from performing any
1665 * fs-reclaim or taking related locks during reset.
1666 */
1667 i915_gem_shrinker_taints_mutex(gt->i915, >->reset.mutex);
1668
1669 /* no GPU until we are ready! */
1670 __set_bit(I915_WEDGED, >->reset.flags);
1671 }
1672
intel_gt_fini_reset(struct intel_gt * gt)1673 void intel_gt_fini_reset(struct intel_gt *gt)
1674 {
1675 cleanup_srcu_struct(>->reset.backoff_srcu);
1676 }
1677
intel_wedge_me(struct work_struct * work)1678 static void intel_wedge_me(struct work_struct *work)
1679 {
1680 struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
1681
1682 gt_err(w->gt, "%s timed out, cancelling all in-flight rendering.\n", w->name);
1683 set_wedged_work(&w->gt->wedge);
1684 }
1685
__intel_init_wedge(struct intel_wedge_me * w,struct intel_gt * gt,long timeout,const char * name)1686 void __intel_init_wedge(struct intel_wedge_me *w,
1687 struct intel_gt *gt,
1688 long timeout,
1689 const char *name)
1690 {
1691 w->gt = gt;
1692 w->name = name;
1693
1694 INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
1695 queue_delayed_work(gt->i915->unordered_wq, &w->work, timeout);
1696 }
1697
__intel_fini_wedge(struct intel_wedge_me * w)1698 void __intel_fini_wedge(struct intel_wedge_me *w)
1699 {
1700 cancel_delayed_work_sync(&w->work);
1701 destroy_delayed_work_on_stack(&w->work);
1702 w->gt = NULL;
1703 }
1704
1705 /*
1706 * Wa_22011802037 requires that we (or the GuC) ensure that no command
1707 * streamers are executing MI_FORCE_WAKE while an engine reset is initiated.
1708 */
intel_engine_reset_needs_wa_22011802037(struct intel_gt * gt)1709 bool intel_engine_reset_needs_wa_22011802037(struct intel_gt *gt)
1710 {
1711 if (GRAPHICS_VER(gt->i915) < 11)
1712 return false;
1713
1714 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0))
1715 return true;
1716
1717 if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
1718 return false;
1719
1720 return true;
1721 }
1722
1723 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1724 #include "selftest_reset.c"
1725 #include "selftest_hangcheck.c"
1726 #endif
1727