xref: /linux/drivers/gpu/drm/i915/gt/intel_reset.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2008-2018 Intel Corporation
4  */
5 
6 #include <linux/sched/mm.h>
7 #include <linux/stop_machine.h>
8 #include <linux/string_helpers.h>
9 
10 #include "display/intel_display_reset.h"
11 #include "display/intel_overlay.h"
12 
13 #include "gem/i915_gem_context.h"
14 
15 #include "gt/intel_gt_regs.h"
16 
17 #include "gt/uc/intel_gsc_fw.h"
18 
19 #include "i915_drv.h"
20 #include "i915_file_private.h"
21 #include "i915_gpu_error.h"
22 #include "i915_irq.h"
23 #include "i915_reg.h"
24 #include "intel_breadcrumbs.h"
25 #include "intel_engine_pm.h"
26 #include "intel_engine_regs.h"
27 #include "intel_gt.h"
28 #include "intel_gt_pm.h"
29 #include "intel_gt_print.h"
30 #include "intel_gt_requests.h"
31 #include "intel_mchbar_regs.h"
32 #include "intel_pci_config.h"
33 #include "intel_reset.h"
34 
35 #include "uc/intel_guc.h"
36 
37 #define RESET_MAX_RETRIES 3
38 
client_mark_guilty(struct i915_gem_context * ctx,bool banned)39 static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
40 {
41 	struct drm_i915_file_private *file_priv = ctx->file_priv;
42 	unsigned long prev_hang;
43 	unsigned int score;
44 
45 	if (IS_ERR_OR_NULL(file_priv))
46 		return;
47 
48 	score = 0;
49 	if (banned)
50 		score = I915_CLIENT_SCORE_CONTEXT_BAN;
51 
52 	prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
53 	if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
54 		score += I915_CLIENT_SCORE_HANG_FAST;
55 
56 	if (score) {
57 		atomic_add(score, &file_priv->ban_score);
58 
59 		drm_dbg(&ctx->i915->drm,
60 			"client %s: gained %u ban score, now %u\n",
61 			ctx->name, score,
62 			atomic_read(&file_priv->ban_score));
63 	}
64 }
65 
mark_guilty(struct i915_request * rq)66 static bool mark_guilty(struct i915_request *rq)
67 {
68 	struct i915_gem_context *ctx;
69 	unsigned long prev_hang;
70 	bool banned;
71 	int i;
72 
73 	if (intel_context_is_closed(rq->context))
74 		return true;
75 
76 	rcu_read_lock();
77 	ctx = rcu_dereference(rq->context->gem_context);
78 	if (ctx && !kref_get_unless_zero(&ctx->ref))
79 		ctx = NULL;
80 	rcu_read_unlock();
81 	if (!ctx)
82 		return intel_context_is_banned(rq->context);
83 
84 	atomic_inc(&ctx->guilty_count);
85 
86 	/* Cool contexts are too cool to be banned! (Used for reset testing.) */
87 	if (!i915_gem_context_is_bannable(ctx)) {
88 		banned = false;
89 		goto out;
90 	}
91 
92 	drm_notice(&ctx->i915->drm,
93 		   "%s context reset due to GPU hang\n",
94 		   ctx->name);
95 
96 	/* Record the timestamp for the last N hangs */
97 	prev_hang = ctx->hang_timestamp[0];
98 	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
99 		ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
100 	ctx->hang_timestamp[i] = jiffies;
101 
102 	/* If we have hung N+1 times in rapid succession, we ban the context! */
103 	banned = !i915_gem_context_is_recoverable(ctx);
104 	if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
105 		banned = true;
106 	if (banned)
107 		drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n",
108 			ctx->name, atomic_read(&ctx->guilty_count));
109 
110 	client_mark_guilty(ctx, banned);
111 
112 out:
113 	i915_gem_context_put(ctx);
114 	return banned;
115 }
116 
mark_innocent(struct i915_request * rq)117 static void mark_innocent(struct i915_request *rq)
118 {
119 	struct i915_gem_context *ctx;
120 
121 	rcu_read_lock();
122 	ctx = rcu_dereference(rq->context->gem_context);
123 	if (ctx)
124 		atomic_inc(&ctx->active_count);
125 	rcu_read_unlock();
126 }
127 
__i915_request_reset(struct i915_request * rq,bool guilty)128 void __i915_request_reset(struct i915_request *rq, bool guilty)
129 {
130 	bool banned = false;
131 
132 	RQ_TRACE(rq, "guilty? %s\n", str_yes_no(guilty));
133 	GEM_BUG_ON(__i915_request_is_complete(rq));
134 
135 	rcu_read_lock(); /* protect the GEM context */
136 	if (guilty) {
137 		i915_request_set_error_once(rq, -EIO);
138 		__i915_request_skip(rq);
139 		banned = mark_guilty(rq);
140 	} else {
141 		i915_request_set_error_once(rq, -EAGAIN);
142 		mark_innocent(rq);
143 	}
144 	rcu_read_unlock();
145 
146 	if (banned)
147 		intel_context_ban(rq->context, rq);
148 }
149 
i915_in_reset(struct pci_dev * pdev)150 static bool i915_in_reset(struct pci_dev *pdev)
151 {
152 	u8 gdrst;
153 
154 	pci_read_config_byte(pdev, I915_GDRST, &gdrst);
155 	return gdrst & GRDOM_RESET_STATUS;
156 }
157 
i915_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)158 static int i915_do_reset(struct intel_gt *gt,
159 			 intel_engine_mask_t engine_mask,
160 			 unsigned int retry)
161 {
162 	struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
163 	int err;
164 
165 	/* Assert reset for at least 50 usec, and wait for acknowledgement. */
166 	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
167 	udelay(50);
168 	err = _wait_for_atomic(i915_in_reset(pdev), 50000, 0);
169 
170 	/* Clear the reset request. */
171 	pci_write_config_byte(pdev, I915_GDRST, 0);
172 	udelay(50);
173 	if (!err)
174 		err = _wait_for_atomic(!i915_in_reset(pdev), 50000, 0);
175 
176 	return err;
177 }
178 
g4x_reset_complete(struct pci_dev * pdev)179 static bool g4x_reset_complete(struct pci_dev *pdev)
180 {
181 	u8 gdrst;
182 
183 	pci_read_config_byte(pdev, I915_GDRST, &gdrst);
184 	return (gdrst & GRDOM_RESET_ENABLE) == 0;
185 }
186 
g33_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)187 static int g33_do_reset(struct intel_gt *gt,
188 			intel_engine_mask_t engine_mask,
189 			unsigned int retry)
190 {
191 	struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
192 
193 	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
194 	return _wait_for_atomic(g4x_reset_complete(pdev), 50000, 0);
195 }
196 
g4x_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)197 static int g4x_do_reset(struct intel_gt *gt,
198 			intel_engine_mask_t engine_mask,
199 			unsigned int retry)
200 {
201 	struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
202 	struct intel_uncore *uncore = gt->uncore;
203 	int ret;
204 
205 	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
206 	intel_uncore_rmw_fw(uncore, VDECCLK_GATE_D, 0, VCP_UNIT_CLOCK_GATE_DISABLE);
207 	intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
208 
209 	pci_write_config_byte(pdev, I915_GDRST,
210 			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
211 	ret =  _wait_for_atomic(g4x_reset_complete(pdev), 50000, 0);
212 	if (ret) {
213 		GT_TRACE(gt, "Wait for media reset failed\n");
214 		goto out;
215 	}
216 
217 	pci_write_config_byte(pdev, I915_GDRST,
218 			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
219 	ret =  _wait_for_atomic(g4x_reset_complete(pdev), 50000, 0);
220 	if (ret) {
221 		GT_TRACE(gt, "Wait for render reset failed\n");
222 		goto out;
223 	}
224 
225 out:
226 	pci_write_config_byte(pdev, I915_GDRST, 0);
227 
228 	intel_uncore_rmw_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE, 0);
229 	intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
230 
231 	return ret;
232 }
233 
ilk_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)234 static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
235 			unsigned int retry)
236 {
237 	struct intel_uncore *uncore = gt->uncore;
238 	int ret;
239 
240 	intel_uncore_write_fw(uncore, ILK_GDSR,
241 			      ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
242 	ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
243 					   ILK_GRDOM_RESET_ENABLE, 0,
244 					   5000, 0,
245 					   NULL);
246 	if (ret) {
247 		GT_TRACE(gt, "Wait for render reset failed\n");
248 		goto out;
249 	}
250 
251 	intel_uncore_write_fw(uncore, ILK_GDSR,
252 			      ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
253 	ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
254 					   ILK_GRDOM_RESET_ENABLE, 0,
255 					   5000, 0,
256 					   NULL);
257 	if (ret) {
258 		GT_TRACE(gt, "Wait for media reset failed\n");
259 		goto out;
260 	}
261 
262 out:
263 	intel_uncore_write_fw(uncore, ILK_GDSR, 0);
264 	intel_uncore_posting_read_fw(uncore, ILK_GDSR);
265 	return ret;
266 }
267 
268 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
gen6_hw_domain_reset(struct intel_gt * gt,u32 hw_domain_mask)269 static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
270 {
271 	struct intel_uncore *uncore = gt->uncore;
272 	int loops;
273 	int err;
274 
275 	/*
276 	 * On some platforms, e.g. Jasperlake, we see that the engine register
277 	 * state is not cleared until shortly after GDRST reports completion,
278 	 * causing a failure as we try to immediately resume while the internal
279 	 * state is still in flux. If we immediately repeat the reset, the
280 	 * second reset appears to serialise with the first, and since it is a
281 	 * no-op, the registers should retain their reset value. However, there
282 	 * is still a concern that upon leaving the second reset, the internal
283 	 * engine state is still in flux and not ready for resuming.
284 	 *
285 	 * Starting on MTL, there are some prep steps that we need to do when
286 	 * resetting some engines that need to be applied every time we write to
287 	 * GEN6_GDRST. As those are time consuming (tens of ms), we don't want
288 	 * to perform that twice, so, since the Jasperlake issue hasn't been
289 	 * observed on MTL, we avoid repeating the reset on newer platforms.
290 	 */
291 	loops = GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 70) ? 2 : 1;
292 
293 	/*
294 	 * GEN6_GDRST is not in the gt power well, no need to check
295 	 * for fifo space for the write or forcewake the chip for
296 	 * the read
297 	 */
298 	do {
299 		intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
300 
301 		/* Wait for the device to ack the reset requests. */
302 		err = __intel_wait_for_register_fw(uncore, GEN6_GDRST,
303 						   hw_domain_mask, 0,
304 						   2000, 0,
305 						   NULL);
306 	} while (err == 0 && --loops);
307 	if (err)
308 		GT_TRACE(gt,
309 			 "Wait for 0x%08x engines reset failed\n",
310 			 hw_domain_mask);
311 
312 	/*
313 	 * As we have observed that the engine state is still volatile
314 	 * after GDRST is acked, impose a small delay to let everything settle.
315 	 */
316 	udelay(50);
317 
318 	return err;
319 }
320 
__gen6_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)321 static int __gen6_reset_engines(struct intel_gt *gt,
322 				intel_engine_mask_t engine_mask,
323 				unsigned int retry)
324 {
325 	struct intel_engine_cs *engine;
326 	u32 hw_mask;
327 
328 	if (engine_mask == ALL_ENGINES) {
329 		hw_mask = GEN6_GRDOM_FULL;
330 	} else {
331 		intel_engine_mask_t tmp;
332 
333 		hw_mask = 0;
334 		for_each_engine_masked(engine, gt, engine_mask, tmp) {
335 			hw_mask |= engine->reset_domain;
336 		}
337 	}
338 
339 	return gen6_hw_domain_reset(gt, hw_mask);
340 }
341 
gen6_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)342 static int gen6_reset_engines(struct intel_gt *gt,
343 			      intel_engine_mask_t engine_mask,
344 			      unsigned int retry)
345 {
346 	unsigned long flags;
347 	int ret;
348 
349 	spin_lock_irqsave(&gt->uncore->lock, flags);
350 	ret = __gen6_reset_engines(gt, engine_mask, retry);
351 	spin_unlock_irqrestore(&gt->uncore->lock, flags);
352 
353 	return ret;
354 }
355 
find_sfc_paired_vecs_engine(struct intel_engine_cs * engine)356 static struct intel_engine_cs *find_sfc_paired_vecs_engine(struct intel_engine_cs *engine)
357 {
358 	int vecs_id;
359 
360 	GEM_BUG_ON(engine->class != VIDEO_DECODE_CLASS);
361 
362 	vecs_id = _VECS((engine->instance) / 2);
363 
364 	return engine->gt->engine[vecs_id];
365 }
366 
367 struct sfc_lock_data {
368 	i915_reg_t lock_reg;
369 	i915_reg_t ack_reg;
370 	i915_reg_t usage_reg;
371 	u32 lock_bit;
372 	u32 ack_bit;
373 	u32 usage_bit;
374 	u32 reset_bit;
375 };
376 
get_sfc_forced_lock_data(struct intel_engine_cs * engine,struct sfc_lock_data * sfc_lock)377 static void get_sfc_forced_lock_data(struct intel_engine_cs *engine,
378 				     struct sfc_lock_data *sfc_lock)
379 {
380 	switch (engine->class) {
381 	default:
382 		MISSING_CASE(engine->class);
383 		fallthrough;
384 	case VIDEO_DECODE_CLASS:
385 		sfc_lock->lock_reg = GEN11_VCS_SFC_FORCED_LOCK(engine->mmio_base);
386 		sfc_lock->lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
387 
388 		sfc_lock->ack_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
389 		sfc_lock->ack_bit  = GEN11_VCS_SFC_LOCK_ACK_BIT;
390 
391 		sfc_lock->usage_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
392 		sfc_lock->usage_bit = GEN11_VCS_SFC_USAGE_BIT;
393 		sfc_lock->reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
394 
395 		break;
396 	case VIDEO_ENHANCEMENT_CLASS:
397 		sfc_lock->lock_reg = GEN11_VECS_SFC_FORCED_LOCK(engine->mmio_base);
398 		sfc_lock->lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
399 
400 		sfc_lock->ack_reg = GEN11_VECS_SFC_LOCK_ACK(engine->mmio_base);
401 		sfc_lock->ack_bit  = GEN11_VECS_SFC_LOCK_ACK_BIT;
402 
403 		sfc_lock->usage_reg = GEN11_VECS_SFC_USAGE(engine->mmio_base);
404 		sfc_lock->usage_bit = GEN11_VECS_SFC_USAGE_BIT;
405 		sfc_lock->reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
406 
407 		break;
408 	}
409 }
410 
gen11_lock_sfc(struct intel_engine_cs * engine,u32 * reset_mask,u32 * unlock_mask)411 static int gen11_lock_sfc(struct intel_engine_cs *engine,
412 			  u32 *reset_mask,
413 			  u32 *unlock_mask)
414 {
415 	struct intel_uncore *uncore = engine->uncore;
416 	u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
417 	struct sfc_lock_data sfc_lock;
418 	bool lock_obtained, lock_to_other = false;
419 	int ret;
420 
421 	switch (engine->class) {
422 	case VIDEO_DECODE_CLASS:
423 		if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
424 			return 0;
425 
426 		fallthrough;
427 	case VIDEO_ENHANCEMENT_CLASS:
428 		get_sfc_forced_lock_data(engine, &sfc_lock);
429 
430 		break;
431 	default:
432 		return 0;
433 	}
434 
435 	if (!(intel_uncore_read_fw(uncore, sfc_lock.usage_reg) & sfc_lock.usage_bit)) {
436 		struct intel_engine_cs *paired_vecs;
437 
438 		if (engine->class != VIDEO_DECODE_CLASS ||
439 		    GRAPHICS_VER(engine->i915) != 12)
440 			return 0;
441 
442 		/*
443 		 * Wa_14010733141
444 		 *
445 		 * If the VCS-MFX isn't using the SFC, we also need to check
446 		 * whether VCS-HCP is using it.  If so, we need to issue a *VE*
447 		 * forced lock on the VE engine that shares the same SFC.
448 		 */
449 		if (!(intel_uncore_read_fw(uncore,
450 					   GEN12_HCP_SFC_LOCK_STATUS(engine->mmio_base)) &
451 		      GEN12_HCP_SFC_USAGE_BIT))
452 			return 0;
453 
454 		paired_vecs = find_sfc_paired_vecs_engine(engine);
455 		get_sfc_forced_lock_data(paired_vecs, &sfc_lock);
456 		lock_to_other = true;
457 		*unlock_mask |= paired_vecs->mask;
458 	} else {
459 		*unlock_mask |= engine->mask;
460 	}
461 
462 	/*
463 	 * If the engine is using an SFC, tell the engine that a software reset
464 	 * is going to happen. The engine will then try to force lock the SFC.
465 	 * If SFC ends up being locked to the engine we want to reset, we have
466 	 * to reset it as well (we will unlock it once the reset sequence is
467 	 * completed).
468 	 */
469 	intel_uncore_rmw_fw(uncore, sfc_lock.lock_reg, 0, sfc_lock.lock_bit);
470 
471 	ret = __intel_wait_for_register_fw(uncore,
472 					   sfc_lock.ack_reg,
473 					   sfc_lock.ack_bit,
474 					   sfc_lock.ack_bit,
475 					   1000, 0, NULL);
476 
477 	/*
478 	 * Was the SFC released while we were trying to lock it?
479 	 *
480 	 * We should reset both the engine and the SFC if:
481 	 *  - We were locking the SFC to this engine and the lock succeeded
482 	 *       OR
483 	 *  - We were locking the SFC to a different engine (Wa_14010733141)
484 	 *    but the SFC was released before the lock was obtained.
485 	 *
486 	 * Otherwise we need only reset the engine by itself and we can
487 	 * leave the SFC alone.
488 	 */
489 	lock_obtained = (intel_uncore_read_fw(uncore, sfc_lock.usage_reg) &
490 			sfc_lock.usage_bit) != 0;
491 	if (lock_obtained == lock_to_other)
492 		return 0;
493 
494 	if (ret) {
495 		ENGINE_TRACE(engine, "Wait for SFC forced lock ack failed\n");
496 		return ret;
497 	}
498 
499 	*reset_mask |= sfc_lock.reset_bit;
500 	return 0;
501 }
502 
gen11_unlock_sfc(struct intel_engine_cs * engine)503 static void gen11_unlock_sfc(struct intel_engine_cs *engine)
504 {
505 	struct intel_uncore *uncore = engine->uncore;
506 	u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
507 	struct sfc_lock_data sfc_lock = {};
508 
509 	if (engine->class != VIDEO_DECODE_CLASS &&
510 	    engine->class != VIDEO_ENHANCEMENT_CLASS)
511 		return;
512 
513 	if (engine->class == VIDEO_DECODE_CLASS &&
514 	    (BIT(engine->instance) & vdbox_sfc_access) == 0)
515 		return;
516 
517 	get_sfc_forced_lock_data(engine, &sfc_lock);
518 
519 	intel_uncore_rmw_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit, 0);
520 }
521 
__gen11_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)522 static int __gen11_reset_engines(struct intel_gt *gt,
523 				 intel_engine_mask_t engine_mask,
524 				 unsigned int retry)
525 {
526 	struct intel_engine_cs *engine;
527 	intel_engine_mask_t tmp;
528 	u32 reset_mask, unlock_mask = 0;
529 	int ret;
530 
531 	if (engine_mask == ALL_ENGINES) {
532 		reset_mask = GEN11_GRDOM_FULL;
533 	} else {
534 		reset_mask = 0;
535 		for_each_engine_masked(engine, gt, engine_mask, tmp) {
536 			reset_mask |= engine->reset_domain;
537 			ret = gen11_lock_sfc(engine, &reset_mask, &unlock_mask);
538 			if (ret)
539 				goto sfc_unlock;
540 		}
541 	}
542 
543 	ret = gen6_hw_domain_reset(gt, reset_mask);
544 
545 sfc_unlock:
546 	/*
547 	 * We unlock the SFC based on the lock status and not the result of
548 	 * gen11_lock_sfc to make sure that we clean properly if something
549 	 * wrong happened during the lock (e.g. lock acquired after timeout
550 	 * expiration).
551 	 *
552 	 * Due to Wa_14010733141, we may have locked an SFC to an engine that
553 	 * wasn't being reset.  So instead of calling gen11_unlock_sfc()
554 	 * on engine_mask, we instead call it on the mask of engines that our
555 	 * gen11_lock_sfc() calls told us actually had locks attempted.
556 	 */
557 	for_each_engine_masked(engine, gt, unlock_mask, tmp)
558 		gen11_unlock_sfc(engine);
559 
560 	return ret;
561 }
562 
gen8_engine_reset_prepare(struct intel_engine_cs * engine)563 static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
564 {
565 	struct intel_uncore *uncore = engine->uncore;
566 	const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
567 	u32 request, mask, ack;
568 	int ret;
569 
570 	if (I915_SELFTEST_ONLY(should_fail(&engine->reset_timeout, 1)))
571 		return -ETIMEDOUT;
572 
573 	ack = intel_uncore_read_fw(uncore, reg);
574 	if (ack & RESET_CTL_CAT_ERROR) {
575 		/*
576 		 * For catastrophic errors, ready-for-reset sequence
577 		 * needs to be bypassed: HAS#396813
578 		 */
579 		request = RESET_CTL_CAT_ERROR;
580 		mask = RESET_CTL_CAT_ERROR;
581 
582 		/* Catastrophic errors need to be cleared by HW */
583 		ack = 0;
584 	} else if (!(ack & RESET_CTL_READY_TO_RESET)) {
585 		request = RESET_CTL_REQUEST_RESET;
586 		mask = RESET_CTL_READY_TO_RESET;
587 		ack = RESET_CTL_READY_TO_RESET;
588 	} else {
589 		return 0;
590 	}
591 
592 	intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
593 	ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
594 					   700, 0, NULL);
595 	if (ret)
596 		gt_err(engine->gt,
597 		       "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
598 		       engine->name, request,
599 		       intel_uncore_read_fw(uncore, reg));
600 
601 	return ret;
602 }
603 
gen8_engine_reset_cancel(struct intel_engine_cs * engine)604 static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
605 {
606 	intel_uncore_write_fw(engine->uncore,
607 			      RING_RESET_CTL(engine->mmio_base),
608 			      _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
609 }
610 
gen8_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)611 static int gen8_reset_engines(struct intel_gt *gt,
612 			      intel_engine_mask_t engine_mask,
613 			      unsigned int retry)
614 {
615 	struct intel_engine_cs *engine;
616 	const bool reset_non_ready = retry >= 1;
617 	intel_engine_mask_t tmp;
618 	unsigned long flags;
619 	int ret;
620 
621 	spin_lock_irqsave(&gt->uncore->lock, flags);
622 
623 	for_each_engine_masked(engine, gt, engine_mask, tmp) {
624 		ret = gen8_engine_reset_prepare(engine);
625 		if (ret && !reset_non_ready)
626 			goto skip_reset;
627 
628 		/*
629 		 * If this is not the first failed attempt to prepare,
630 		 * we decide to proceed anyway.
631 		 *
632 		 * By doing so we risk context corruption and with
633 		 * some gens (kbl), possible system hang if reset
634 		 * happens during active bb execution.
635 		 *
636 		 * We rather take context corruption instead of
637 		 * failed reset with a wedged driver/gpu. And
638 		 * active bb execution case should be covered by
639 		 * stop_engines() we have before the reset.
640 		 */
641 	}
642 
643 	/*
644 	 * Wa_22011100796:dg2, whenever Full soft reset is required,
645 	 * reset all individual engines firstly, and then do a full soft reset.
646 	 *
647 	 * This is best effort, so ignore any error from the initial reset.
648 	 */
649 	if (IS_DG2(gt->i915) && engine_mask == ALL_ENGINES)
650 		__gen11_reset_engines(gt, gt->info.engine_mask, 0);
651 
652 	if (GRAPHICS_VER(gt->i915) >= 11)
653 		ret = __gen11_reset_engines(gt, engine_mask, retry);
654 	else
655 		ret = __gen6_reset_engines(gt, engine_mask, retry);
656 
657 skip_reset:
658 	for_each_engine_masked(engine, gt, engine_mask, tmp)
659 		gen8_engine_reset_cancel(engine);
660 
661 	spin_unlock_irqrestore(&gt->uncore->lock, flags);
662 
663 	return ret;
664 }
665 
mock_reset(struct intel_gt * gt,intel_engine_mask_t mask,unsigned int retry)666 static int mock_reset(struct intel_gt *gt,
667 		      intel_engine_mask_t mask,
668 		      unsigned int retry)
669 {
670 	return 0;
671 }
672 
673 typedef int (*reset_func)(struct intel_gt *,
674 			  intel_engine_mask_t engine_mask,
675 			  unsigned int retry);
676 
intel_get_gpu_reset(const struct intel_gt * gt)677 static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
678 {
679 	struct drm_i915_private *i915 = gt->i915;
680 
681 	if (is_mock_gt(gt))
682 		return mock_reset;
683 	else if (GRAPHICS_VER(i915) >= 8)
684 		return gen8_reset_engines;
685 	else if (GRAPHICS_VER(i915) >= 6)
686 		return gen6_reset_engines;
687 	else if (GRAPHICS_VER(i915) >= 5)
688 		return ilk_do_reset;
689 	else if (IS_G4X(i915))
690 		return g4x_do_reset;
691 	else if (IS_G33(i915) || IS_PINEVIEW(i915))
692 		return g33_do_reset;
693 	else if (GRAPHICS_VER(i915) >= 3)
694 		return i915_do_reset;
695 	else
696 		return NULL;
697 }
698 
__reset_guc(struct intel_gt * gt)699 static int __reset_guc(struct intel_gt *gt)
700 {
701 	u32 guc_domain =
702 		GRAPHICS_VER(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
703 
704 	return gen6_hw_domain_reset(gt, guc_domain);
705 }
706 
needs_wa_14015076503(struct intel_gt * gt,intel_engine_mask_t engine_mask)707 static bool needs_wa_14015076503(struct intel_gt *gt, intel_engine_mask_t engine_mask)
708 {
709 	if (MEDIA_VER_FULL(gt->i915) != IP_VER(13, 0) || !HAS_ENGINE(gt, GSC0))
710 		return false;
711 
712 	if (!__HAS_ENGINE(engine_mask, GSC0))
713 		return false;
714 
715 	return intel_gsc_uc_fw_init_done(&gt->uc.gsc);
716 }
717 
718 static intel_engine_mask_t
wa_14015076503_start(struct intel_gt * gt,intel_engine_mask_t engine_mask,bool first)719 wa_14015076503_start(struct intel_gt *gt, intel_engine_mask_t engine_mask, bool first)
720 {
721 	if (!needs_wa_14015076503(gt, engine_mask))
722 		return engine_mask;
723 
724 	/*
725 	 * wa_14015076503: if the GSC FW is loaded, we need to alert it that
726 	 * we're going to do a GSC engine reset and then wait for 200ms for the
727 	 * FW to get ready for it. However, if this is the first ALL_ENGINES
728 	 * reset attempt and the GSC is not busy, we can try to instead reset
729 	 * the GuC and all the other engines individually to avoid the 200ms
730 	 * wait.
731 	 * Skipping the GSC engine is safe because, differently from other
732 	 * engines, the GSCCS only role is to forward the commands to the GSC
733 	 * FW, so it doesn't have any HW outside of the CS itself and therefore
734 	 * it has no state that we don't explicitly re-init on resume or on
735 	 * context switch LRC or power context). The HW for the GSC uC is
736 	 * managed by the GSC FW so we don't need to care about that.
737 	 */
738 	if (engine_mask == ALL_ENGINES && first && intel_engine_is_idle(gt->engine[GSC0])) {
739 		__reset_guc(gt);
740 		engine_mask = gt->info.engine_mask & ~BIT(GSC0);
741 	} else {
742 		intel_uncore_rmw(gt->uncore,
743 				 HECI_H_GS1(MTL_GSC_HECI2_BASE),
744 				 0, HECI_H_GS1_ER_PREP);
745 
746 		/* make sure the reset bit is clear when writing the CSR reg */
747 		intel_uncore_rmw(gt->uncore,
748 				 HECI_H_CSR(MTL_GSC_HECI2_BASE),
749 				 HECI_H_CSR_RST, HECI_H_CSR_IG);
750 		msleep(200);
751 	}
752 
753 	return engine_mask;
754 }
755 
756 static void
wa_14015076503_end(struct intel_gt * gt,intel_engine_mask_t engine_mask)757 wa_14015076503_end(struct intel_gt *gt, intel_engine_mask_t engine_mask)
758 {
759 	if (!needs_wa_14015076503(gt, engine_mask))
760 		return;
761 
762 	intel_uncore_rmw(gt->uncore,
763 			 HECI_H_GS1(MTL_GSC_HECI2_BASE),
764 			 HECI_H_GS1_ER_PREP, 0);
765 }
766 
__intel_gt_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask)767 static int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
768 {
769 	const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
770 	reset_func reset;
771 	int ret = -ETIMEDOUT;
772 	int retry;
773 
774 	reset = intel_get_gpu_reset(gt);
775 	if (!reset)
776 		return -ENODEV;
777 
778 	/*
779 	 * If the power well sleeps during the reset, the reset
780 	 * request may be dropped and never completes (causing -EIO).
781 	 */
782 	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
783 	for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
784 		intel_engine_mask_t reset_mask;
785 
786 		reset_mask = wa_14015076503_start(gt, engine_mask, !retry);
787 
788 		GT_TRACE(gt, "engine_mask=%x\n", reset_mask);
789 		ret = reset(gt, reset_mask, retry);
790 
791 		wa_14015076503_end(gt, reset_mask);
792 	}
793 	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
794 
795 	return ret;
796 }
797 
intel_has_gpu_reset(const struct intel_gt * gt)798 bool intel_has_gpu_reset(const struct intel_gt *gt)
799 {
800 	if (!gt->i915->params.reset)
801 		return NULL;
802 
803 	return intel_get_gpu_reset(gt);
804 }
805 
intel_has_reset_engine(const struct intel_gt * gt)806 bool intel_has_reset_engine(const struct intel_gt *gt)
807 {
808 	if (gt->i915->params.reset < 2)
809 		return false;
810 
811 	return INTEL_INFO(gt->i915)->has_reset_engine;
812 }
813 
intel_reset_guc(struct intel_gt * gt)814 int intel_reset_guc(struct intel_gt *gt)
815 {
816 	int ret;
817 
818 	GEM_BUG_ON(!HAS_GT_UC(gt->i915));
819 
820 	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
821 	ret = __reset_guc(gt);
822 	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
823 
824 	return ret;
825 }
826 
827 /*
828  * Ensure irq handler finishes, and not run again.
829  * Also return the active request so that we only search for it once.
830  */
reset_prepare_engine(struct intel_engine_cs * engine)831 static void reset_prepare_engine(struct intel_engine_cs *engine)
832 {
833 	/*
834 	 * During the reset sequence, we must prevent the engine from
835 	 * entering RC6. As the context state is undefined until we restart
836 	 * the engine, if it does enter RC6 during the reset, the state
837 	 * written to the powercontext is undefined and so we may lose
838 	 * GPU state upon resume, i.e. fail to restart after a reset.
839 	 */
840 	intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
841 	if (engine->reset.prepare)
842 		engine->reset.prepare(engine);
843 }
844 
revoke_mmaps(struct intel_gt * gt)845 static void revoke_mmaps(struct intel_gt *gt)
846 {
847 	int i;
848 
849 	for (i = 0; i < gt->ggtt->num_fences; i++) {
850 		struct drm_vma_offset_node *node;
851 		struct i915_vma *vma;
852 		u64 vma_offset;
853 
854 		vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
855 		if (!vma)
856 			continue;
857 
858 		if (!i915_vma_has_userfault(vma))
859 			continue;
860 
861 		GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i]);
862 
863 		if (!vma->mmo)
864 			continue;
865 
866 		node = &vma->mmo->vma_node;
867 		vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
868 
869 		unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
870 				    drm_vma_node_offset_addr(node) + vma_offset,
871 				    vma->size,
872 				    1);
873 	}
874 }
875 
reset_prepare(struct intel_gt * gt)876 static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
877 {
878 	struct intel_engine_cs *engine;
879 	intel_engine_mask_t awake = 0;
880 	enum intel_engine_id id;
881 
882 	/**
883 	 * For GuC mode with submission enabled, ensure submission
884 	 * is disabled before stopping ring.
885 	 *
886 	 * For GuC mode with submission disabled, ensure that GuC is not
887 	 * sanitized, do that after engine reset. reset_prepare()
888 	 * is followed by engine reset which in this mode requires GuC to
889 	 * process any CSB FIFO entries generated by the resets.
890 	 */
891 	if (intel_uc_uses_guc_submission(&gt->uc))
892 		intel_uc_reset_prepare(&gt->uc);
893 
894 	for_each_engine(engine, gt, id) {
895 		if (intel_engine_pm_get_if_awake(engine))
896 			awake |= engine->mask;
897 		reset_prepare_engine(engine);
898 	}
899 
900 	return awake;
901 }
902 
gt_revoke(struct intel_gt * gt)903 static void gt_revoke(struct intel_gt *gt)
904 {
905 	revoke_mmaps(gt);
906 }
907 
gt_reset(struct intel_gt * gt,intel_engine_mask_t stalled_mask)908 static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
909 {
910 	struct intel_engine_cs *engine;
911 	enum intel_engine_id id;
912 	int err;
913 
914 	/*
915 	 * Everything depends on having the GTT running, so we need to start
916 	 * there.
917 	 */
918 	err = i915_ggtt_enable_hw(gt->i915);
919 	if (err)
920 		return err;
921 
922 	local_bh_disable();
923 	for_each_engine(engine, gt, id)
924 		__intel_engine_reset(engine, stalled_mask & engine->mask);
925 	local_bh_enable();
926 
927 	intel_uc_reset(&gt->uc, ALL_ENGINES);
928 
929 	intel_ggtt_restore_fences(gt->ggtt);
930 
931 	return err;
932 }
933 
reset_finish_engine(struct intel_engine_cs * engine)934 static void reset_finish_engine(struct intel_engine_cs *engine)
935 {
936 	if (engine->reset.finish)
937 		engine->reset.finish(engine);
938 	intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
939 
940 	intel_engine_signal_breadcrumbs(engine);
941 }
942 
reset_finish(struct intel_gt * gt,intel_engine_mask_t awake)943 static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
944 {
945 	struct intel_engine_cs *engine;
946 	enum intel_engine_id id;
947 
948 	for_each_engine(engine, gt, id) {
949 		reset_finish_engine(engine);
950 		if (awake & engine->mask)
951 			intel_engine_pm_put(engine);
952 	}
953 
954 	intel_uc_reset_finish(&gt->uc);
955 }
956 
nop_submit_request(struct i915_request * request)957 static void nop_submit_request(struct i915_request *request)
958 {
959 	RQ_TRACE(request, "-EIO\n");
960 
961 	request = i915_request_mark_eio(request);
962 	if (request) {
963 		i915_request_submit(request);
964 		intel_engine_signal_breadcrumbs(request->engine);
965 
966 		i915_request_put(request);
967 	}
968 }
969 
__intel_gt_set_wedged(struct intel_gt * gt)970 static void __intel_gt_set_wedged(struct intel_gt *gt)
971 {
972 	struct intel_engine_cs *engine;
973 	intel_engine_mask_t awake;
974 	enum intel_engine_id id;
975 
976 	if (test_bit(I915_WEDGED, &gt->reset.flags))
977 		return;
978 
979 	GT_TRACE(gt, "start\n");
980 
981 	/*
982 	 * First, stop submission to hw, but do not yet complete requests by
983 	 * rolling the global seqno forward (since this would complete requests
984 	 * for which we haven't set the fence error to EIO yet).
985 	 */
986 	awake = reset_prepare(gt);
987 
988 	/* Even if the GPU reset fails, it should still stop the engines */
989 	if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
990 		intel_gt_reset_all_engines(gt);
991 
992 	for_each_engine(engine, gt, id)
993 		engine->submit_request = nop_submit_request;
994 
995 	/*
996 	 * Make sure no request can slip through without getting completed by
997 	 * either this call here to intel_engine_write_global_seqno, or the one
998 	 * in nop_submit_request.
999 	 */
1000 	synchronize_rcu_expedited();
1001 	set_bit(I915_WEDGED, &gt->reset.flags);
1002 
1003 	/* Mark all executing requests as skipped */
1004 	local_bh_disable();
1005 	for_each_engine(engine, gt, id)
1006 		if (engine->reset.cancel)
1007 			engine->reset.cancel(engine);
1008 	intel_uc_cancel_requests(&gt->uc);
1009 	local_bh_enable();
1010 
1011 	reset_finish(gt, awake);
1012 
1013 	GT_TRACE(gt, "end\n");
1014 }
1015 
set_wedged_work(struct work_struct * w)1016 static void set_wedged_work(struct work_struct *w)
1017 {
1018 	struct intel_gt *gt = container_of(w, struct intel_gt, wedge);
1019 	intel_wakeref_t wf;
1020 
1021 	with_intel_runtime_pm(gt->uncore->rpm, wf)
1022 		__intel_gt_set_wedged(gt);
1023 }
1024 
intel_gt_set_wedged(struct intel_gt * gt)1025 void intel_gt_set_wedged(struct intel_gt *gt)
1026 {
1027 	intel_wakeref_t wakeref;
1028 
1029 	if (test_bit(I915_WEDGED, &gt->reset.flags))
1030 		return;
1031 
1032 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1033 	mutex_lock(&gt->reset.mutex);
1034 
1035 	if (GEM_SHOW_DEBUG()) {
1036 		struct drm_printer p = drm_dbg_printer(&gt->i915->drm,
1037 						       DRM_UT_DRIVER, NULL);
1038 		struct intel_engine_cs *engine;
1039 		enum intel_engine_id id;
1040 
1041 		drm_printf(&p, "called from %pS\n", (void *)_RET_IP_);
1042 		for_each_engine(engine, gt, id) {
1043 			if (intel_engine_is_idle(engine))
1044 				continue;
1045 
1046 			intel_engine_dump(engine, &p, "%s\n", engine->name);
1047 		}
1048 	}
1049 
1050 	__intel_gt_set_wedged(gt);
1051 
1052 	mutex_unlock(&gt->reset.mutex);
1053 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1054 }
1055 
__intel_gt_unset_wedged(struct intel_gt * gt)1056 static bool __intel_gt_unset_wedged(struct intel_gt *gt)
1057 {
1058 	struct intel_gt_timelines *timelines = &gt->timelines;
1059 	struct intel_timeline *tl;
1060 	bool ok;
1061 
1062 	if (!test_bit(I915_WEDGED, &gt->reset.flags))
1063 		return true;
1064 
1065 	/* Never fully initialised, recovery impossible */
1066 	if (intel_gt_has_unrecoverable_error(gt))
1067 		return false;
1068 
1069 	GT_TRACE(gt, "start\n");
1070 
1071 	/*
1072 	 * Before unwedging, make sure that all pending operations
1073 	 * are flushed and errored out - we may have requests waiting upon
1074 	 * third party fences. We marked all inflight requests as EIO, and
1075 	 * every execbuf since returned EIO, for consistency we want all
1076 	 * the currently pending requests to also be marked as EIO, which
1077 	 * is done inside our nop_submit_request - and so we must wait.
1078 	 *
1079 	 * No more can be submitted until we reset the wedged bit.
1080 	 */
1081 	spin_lock(&timelines->lock);
1082 	list_for_each_entry(tl, &timelines->active_list, link) {
1083 		struct dma_fence *fence;
1084 
1085 		fence = i915_active_fence_get(&tl->last_request);
1086 		if (!fence)
1087 			continue;
1088 
1089 		spin_unlock(&timelines->lock);
1090 
1091 		/*
1092 		 * All internal dependencies (i915_requests) will have
1093 		 * been flushed by the set-wedge, but we may be stuck waiting
1094 		 * for external fences. These should all be capped to 10s
1095 		 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
1096 		 * in the worst case.
1097 		 */
1098 		dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
1099 		dma_fence_put(fence);
1100 
1101 		/* Restart iteration after droping lock */
1102 		spin_lock(&timelines->lock);
1103 		tl = list_entry(&timelines->active_list, typeof(*tl), link);
1104 	}
1105 	spin_unlock(&timelines->lock);
1106 
1107 	/* We must reset pending GPU events before restoring our submission */
1108 	ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
1109 	if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1110 		ok = intel_gt_reset_all_engines(gt) == 0;
1111 	if (!ok) {
1112 		/*
1113 		 * Warn CI about the unrecoverable wedged condition.
1114 		 * Time for a reboot.
1115 		 */
1116 		add_taint_for_CI(gt->i915, TAINT_WARN);
1117 		return false;
1118 	}
1119 
1120 	/*
1121 	 * Undo nop_submit_request. We prevent all new i915 requests from
1122 	 * being queued (by disallowing execbuf whilst wedged) so having
1123 	 * waited for all active requests above, we know the system is idle
1124 	 * and do not have to worry about a thread being inside
1125 	 * engine->submit_request() as we swap over. So unlike installing
1126 	 * the nop_submit_request on reset, we can do this from normal
1127 	 * context and do not require stop_machine().
1128 	 */
1129 	intel_engines_reset_default_submission(gt);
1130 
1131 	GT_TRACE(gt, "end\n");
1132 
1133 	smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
1134 	clear_bit(I915_WEDGED, &gt->reset.flags);
1135 
1136 	return true;
1137 }
1138 
intel_gt_unset_wedged(struct intel_gt * gt)1139 bool intel_gt_unset_wedged(struct intel_gt *gt)
1140 {
1141 	bool result;
1142 
1143 	mutex_lock(&gt->reset.mutex);
1144 	result = __intel_gt_unset_wedged(gt);
1145 	mutex_unlock(&gt->reset.mutex);
1146 
1147 	return result;
1148 }
1149 
do_reset(struct intel_gt * gt,intel_engine_mask_t stalled_mask)1150 static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
1151 {
1152 	int err, i;
1153 
1154 	err = intel_gt_reset_all_engines(gt);
1155 	for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
1156 		msleep(10 * (i + 1));
1157 		err = intel_gt_reset_all_engines(gt);
1158 	}
1159 	if (err)
1160 		return err;
1161 
1162 	return gt_reset(gt, stalled_mask);
1163 }
1164 
resume(struct intel_gt * gt)1165 static int resume(struct intel_gt *gt)
1166 {
1167 	struct intel_engine_cs *engine;
1168 	enum intel_engine_id id;
1169 	int ret;
1170 
1171 	for_each_engine(engine, gt, id) {
1172 		ret = intel_engine_resume(engine);
1173 		if (ret)
1174 			return ret;
1175 	}
1176 
1177 	return 0;
1178 }
1179 
1180 /**
1181  * intel_gt_reset - reset chip after a hang
1182  * @gt: #intel_gt to reset
1183  * @stalled_mask: mask of the stalled engines with the guilty requests
1184  * @reason: user error message for why we are resetting
1185  *
1186  * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
1187  * on failure.
1188  *
1189  * Procedure is fairly simple:
1190  *   - reset the chip using the reset reg
1191  *   - re-init context state
1192  *   - re-init hardware status page
1193  *   - re-init ring buffer
1194  *   - re-init interrupt state
1195  *   - re-init display
1196  */
intel_gt_reset(struct intel_gt * gt,intel_engine_mask_t stalled_mask,const char * reason)1197 void intel_gt_reset(struct intel_gt *gt,
1198 		    intel_engine_mask_t stalled_mask,
1199 		    const char *reason)
1200 {
1201 	intel_engine_mask_t awake;
1202 	int ret;
1203 
1204 	GT_TRACE(gt, "flags=%lx\n", gt->reset.flags);
1205 
1206 	might_sleep();
1207 	GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
1208 
1209 	/*
1210 	 * FIXME: Revoking cpu mmap ptes cannot be done from a dma_fence
1211 	 * critical section like gpu reset.
1212 	 */
1213 	gt_revoke(gt);
1214 
1215 	mutex_lock(&gt->reset.mutex);
1216 
1217 	/* Clear any previous failed attempts at recovery. Time to try again. */
1218 	if (!__intel_gt_unset_wedged(gt))
1219 		goto unlock;
1220 
1221 	if (reason)
1222 		gt_notice(gt, "Resetting chip for %s\n", reason);
1223 	atomic_inc(&gt->i915->gpu_error.reset_count);
1224 
1225 	awake = reset_prepare(gt);
1226 
1227 	if (!intel_has_gpu_reset(gt)) {
1228 		if (gt->i915->params.reset)
1229 			gt_err(gt, "GPU reset not supported\n");
1230 		else
1231 			gt_dbg(gt, "GPU reset disabled\n");
1232 		goto error;
1233 	}
1234 
1235 	if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1236 		intel_runtime_pm_disable_interrupts(gt->i915);
1237 
1238 	if (do_reset(gt, stalled_mask)) {
1239 		gt_err(gt, "Failed to reset chip\n");
1240 		goto taint;
1241 	}
1242 
1243 	if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1244 		intel_runtime_pm_enable_interrupts(gt->i915);
1245 
1246 	intel_overlay_reset(gt->i915);
1247 
1248 	/* sanitize uC after engine reset */
1249 	if (!intel_uc_uses_guc_submission(&gt->uc))
1250 		intel_uc_reset_prepare(&gt->uc);
1251 	/*
1252 	 * Next we need to restore the context, but we don't use those
1253 	 * yet either...
1254 	 *
1255 	 * Ring buffer needs to be re-initialized in the KMS case, or if X
1256 	 * was running at the time of the reset (i.e. we weren't VT
1257 	 * switched away).
1258 	 */
1259 	ret = intel_gt_init_hw(gt);
1260 	if (ret) {
1261 		gt_err(gt, "Failed to initialise HW following reset (%d)\n", ret);
1262 		goto taint;
1263 	}
1264 
1265 	ret = resume(gt);
1266 	if (ret)
1267 		goto taint;
1268 
1269 finish:
1270 	reset_finish(gt, awake);
1271 unlock:
1272 	mutex_unlock(&gt->reset.mutex);
1273 	return;
1274 
1275 taint:
1276 	/*
1277 	 * History tells us that if we cannot reset the GPU now, we
1278 	 * never will. This then impacts everything that is run
1279 	 * subsequently. On failing the reset, we mark the driver
1280 	 * as wedged, preventing further execution on the GPU.
1281 	 * We also want to go one step further and add a taint to the
1282 	 * kernel so that any subsequent faults can be traced back to
1283 	 * this failure. This is important for CI, where if the
1284 	 * GPU/driver fails we would like to reboot and restart testing
1285 	 * rather than continue on into oblivion. For everyone else,
1286 	 * the system should still plod along, but they have been warned!
1287 	 */
1288 	add_taint_for_CI(gt->i915, TAINT_WARN);
1289 error:
1290 	__intel_gt_set_wedged(gt);
1291 	goto finish;
1292 }
1293 
1294 /**
1295  * intel_gt_reset_all_engines() - Reset all engines in the given gt.
1296  * @gt: the GT to reset all engines for.
1297  *
1298  * This function resets all engines within the given gt.
1299  *
1300  * Returns:
1301  * Zero on success, negative error code on failure.
1302  */
intel_gt_reset_all_engines(struct intel_gt * gt)1303 int intel_gt_reset_all_engines(struct intel_gt *gt)
1304 {
1305 	return __intel_gt_reset(gt, ALL_ENGINES);
1306 }
1307 
1308 /**
1309  * intel_gt_reset_engine() - Reset a specific engine within a gt.
1310  * @engine: engine to be reset.
1311  *
1312  * This function resets the specified engine within a gt.
1313  *
1314  * Returns:
1315  * Zero on success, negative error code on failure.
1316  */
intel_gt_reset_engine(struct intel_engine_cs * engine)1317 int intel_gt_reset_engine(struct intel_engine_cs *engine)
1318 {
1319 	return __intel_gt_reset(engine->gt, engine->mask);
1320 }
1321 
__intel_engine_reset_bh(struct intel_engine_cs * engine,const char * msg)1322 int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
1323 {
1324 	struct intel_gt *gt = engine->gt;
1325 	int ret;
1326 
1327 	ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
1328 	GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags));
1329 
1330 	if (intel_engine_uses_guc(engine))
1331 		return -ENODEV;
1332 
1333 	if (!intel_engine_pm_get_if_awake(engine))
1334 		return 0;
1335 
1336 	reset_prepare_engine(engine);
1337 
1338 	if (msg)
1339 		drm_notice(&engine->i915->drm,
1340 			   "Resetting %s for %s\n", engine->name, msg);
1341 	i915_increase_reset_engine_count(&engine->i915->gpu_error, engine);
1342 
1343 	ret = intel_gt_reset_engine(engine);
1344 	if (ret) {
1345 		/* If we fail here, we expect to fallback to a global reset */
1346 		ENGINE_TRACE(engine, "Failed to reset %s, err: %d\n", engine->name, ret);
1347 		goto out;
1348 	}
1349 
1350 	/*
1351 	 * The request that caused the hang is stuck on elsp, we know the
1352 	 * active request and can drop it, adjust head to skip the offending
1353 	 * request to resume executing remaining requests in the queue.
1354 	 */
1355 	__intel_engine_reset(engine, true);
1356 
1357 	/*
1358 	 * The engine and its registers (and workarounds in case of render)
1359 	 * have been reset to their default values. Follow the init_ring
1360 	 * process to program RING_MODE, HWSP and re-enable submission.
1361 	 */
1362 	ret = intel_engine_resume(engine);
1363 
1364 out:
1365 	intel_engine_cancel_stop_cs(engine);
1366 	reset_finish_engine(engine);
1367 	intel_engine_pm_put_async(engine);
1368 	return ret;
1369 }
1370 
1371 /**
1372  * intel_engine_reset - reset GPU engine to recover from a hang
1373  * @engine: engine to reset
1374  * @msg: reason for GPU reset; or NULL for no drm_notice()
1375  *
1376  * Reset a specific GPU engine. Useful if a hang is detected.
1377  * Returns zero on successful reset or otherwise an error code.
1378  *
1379  * Procedure is:
1380  *  - identifies the request that caused the hang and it is dropped
1381  *  - reset engine (which will force the engine to idle)
1382  *  - re-init/configure engine
1383  */
intel_engine_reset(struct intel_engine_cs * engine,const char * msg)1384 int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
1385 {
1386 	int err;
1387 
1388 	local_bh_disable();
1389 	err = __intel_engine_reset_bh(engine, msg);
1390 	local_bh_enable();
1391 
1392 	return err;
1393 }
1394 
intel_gt_reset_global(struct intel_gt * gt,u32 engine_mask,const char * reason)1395 static void intel_gt_reset_global(struct intel_gt *gt,
1396 				  u32 engine_mask,
1397 				  const char *reason)
1398 {
1399 	struct kobject *kobj = &gt->i915->drm.primary->kdev->kobj;
1400 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1401 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1402 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1403 	struct intel_wedge_me w;
1404 
1405 	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
1406 
1407 	GT_TRACE(gt, "resetting chip, engines=%x\n", engine_mask);
1408 	kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1409 
1410 	/* Use a watchdog to ensure that our reset completes */
1411 	intel_wedge_on_timeout(&w, gt, 60 * HZ) {
1412 		intel_display_reset_prepare(gt->i915);
1413 
1414 		intel_gt_reset(gt, engine_mask, reason);
1415 
1416 		intel_display_reset_finish(gt->i915);
1417 	}
1418 
1419 	if (!test_bit(I915_WEDGED, &gt->reset.flags))
1420 		kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
1421 }
1422 
1423 /**
1424  * intel_gt_handle_error - handle a gpu error
1425  * @gt: the intel_gt
1426  * @engine_mask: mask representing engines that are hung
1427  * @flags: control flags
1428  * @fmt: Error message format string
1429  *
1430  * Do some basic checking of register state at error time and
1431  * dump it to the syslog.  Also call i915_capture_error_state() to make
1432  * sure we get a record and make it available in debugfs.  Fire a uevent
1433  * so userspace knows something bad happened (should trigger collection
1434  * of a ring dump etc.).
1435  */
intel_gt_handle_error(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned long flags,const char * fmt,...)1436 void intel_gt_handle_error(struct intel_gt *gt,
1437 			   intel_engine_mask_t engine_mask,
1438 			   unsigned long flags,
1439 			   const char *fmt, ...)
1440 {
1441 	struct intel_engine_cs *engine;
1442 	intel_wakeref_t wakeref;
1443 	intel_engine_mask_t tmp;
1444 	char error_msg[80];
1445 	char *msg = NULL;
1446 
1447 	if (fmt) {
1448 		va_list args;
1449 
1450 		va_start(args, fmt);
1451 		vscnprintf(error_msg, sizeof(error_msg), fmt, args);
1452 		va_end(args);
1453 
1454 		msg = error_msg;
1455 	}
1456 
1457 	/*
1458 	 * In most cases it's guaranteed that we get here with an RPM
1459 	 * reference held, for example because there is a pending GPU
1460 	 * request that won't finish until the reset is done. This
1461 	 * isn't the case at least when we get here by doing a
1462 	 * simulated reset via debugfs, so get an RPM reference.
1463 	 */
1464 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1465 
1466 	engine_mask &= gt->info.engine_mask;
1467 
1468 	if (flags & I915_ERROR_CAPTURE) {
1469 		i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_NONE);
1470 		intel_gt_clear_error_registers(gt, engine_mask);
1471 	}
1472 
1473 	/*
1474 	 * Try engine reset when available. We fall back to full reset if
1475 	 * single reset fails.
1476 	 */
1477 	if (!intel_uc_uses_guc_submission(&gt->uc) &&
1478 	    intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
1479 		local_bh_disable();
1480 		for_each_engine_masked(engine, gt, engine_mask, tmp) {
1481 			BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
1482 			if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1483 					     &gt->reset.flags))
1484 				continue;
1485 
1486 			if (__intel_engine_reset_bh(engine, msg) == 0)
1487 				engine_mask &= ~engine->mask;
1488 
1489 			clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
1490 					      &gt->reset.flags);
1491 		}
1492 		local_bh_enable();
1493 	}
1494 
1495 	if (!engine_mask)
1496 		goto out;
1497 
1498 	/* Full reset needs the mutex, stop any other user trying to do so. */
1499 	if (test_and_set_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
1500 		wait_event(gt->reset.queue,
1501 			   !test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
1502 		goto out; /* piggy-back on the other reset */
1503 	}
1504 
1505 	/* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
1506 	synchronize_rcu_expedited();
1507 
1508 	/*
1509 	 * Prevent any other reset-engine attempt. We don't do this for GuC
1510 	 * submission the GuC owns the per-engine reset, not the i915.
1511 	 */
1512 	if (!intel_uc_uses_guc_submission(&gt->uc)) {
1513 		for_each_engine(engine, gt, tmp) {
1514 			while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1515 						&gt->reset.flags))
1516 				wait_on_bit(&gt->reset.flags,
1517 					    I915_RESET_ENGINE + engine->id,
1518 					    TASK_UNINTERRUPTIBLE);
1519 		}
1520 	}
1521 
1522 	/* Flush everyone using a resource about to be clobbered */
1523 	synchronize_srcu_expedited(&gt->reset.backoff_srcu);
1524 
1525 	intel_gt_reset_global(gt, engine_mask, msg);
1526 
1527 	if (!intel_uc_uses_guc_submission(&gt->uc)) {
1528 		for_each_engine(engine, gt, tmp)
1529 			clear_bit_unlock(I915_RESET_ENGINE + engine->id,
1530 					 &gt->reset.flags);
1531 	}
1532 	clear_bit_unlock(I915_RESET_BACKOFF, &gt->reset.flags);
1533 	smp_mb__after_atomic();
1534 	wake_up_all(&gt->reset.queue);
1535 
1536 out:
1537 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1538 }
1539 
_intel_gt_reset_lock(struct intel_gt * gt,int * srcu,bool retry)1540 static int _intel_gt_reset_lock(struct intel_gt *gt, int *srcu, bool retry)
1541 {
1542 	might_lock(&gt->reset.backoff_srcu);
1543 	if (retry)
1544 		might_sleep();
1545 
1546 	rcu_read_lock();
1547 	while (test_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
1548 		rcu_read_unlock();
1549 
1550 		if (!retry)
1551 			return -EBUSY;
1552 
1553 		if (wait_event_interruptible(gt->reset.queue,
1554 					     !test_bit(I915_RESET_BACKOFF,
1555 						       &gt->reset.flags)))
1556 			return -EINTR;
1557 
1558 		rcu_read_lock();
1559 	}
1560 	*srcu = srcu_read_lock(&gt->reset.backoff_srcu);
1561 	rcu_read_unlock();
1562 
1563 	return 0;
1564 }
1565 
intel_gt_reset_trylock(struct intel_gt * gt,int * srcu)1566 int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
1567 {
1568 	return _intel_gt_reset_lock(gt, srcu, false);
1569 }
1570 
intel_gt_reset_lock_interruptible(struct intel_gt * gt,int * srcu)1571 int intel_gt_reset_lock_interruptible(struct intel_gt *gt, int *srcu)
1572 {
1573 	return _intel_gt_reset_lock(gt, srcu, true);
1574 }
1575 
intel_gt_reset_unlock(struct intel_gt * gt,int tag)1576 void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
1577 __releases(&gt->reset.backoff_srcu)
1578 {
1579 	srcu_read_unlock(&gt->reset.backoff_srcu, tag);
1580 }
1581 
intel_gt_terminally_wedged(struct intel_gt * gt)1582 int intel_gt_terminally_wedged(struct intel_gt *gt)
1583 {
1584 	might_sleep();
1585 
1586 	if (!intel_gt_is_wedged(gt))
1587 		return 0;
1588 
1589 	if (intel_gt_has_unrecoverable_error(gt))
1590 		return -EIO;
1591 
1592 	/* Reset still in progress? Maybe we will recover? */
1593 	if (wait_event_interruptible(gt->reset.queue,
1594 				     !test_bit(I915_RESET_BACKOFF,
1595 					       &gt->reset.flags)))
1596 		return -EINTR;
1597 
1598 	return intel_gt_is_wedged(gt) ? -EIO : 0;
1599 }
1600 
intel_gt_set_wedged_on_init(struct intel_gt * gt)1601 void intel_gt_set_wedged_on_init(struct intel_gt *gt)
1602 {
1603 	BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
1604 		     I915_WEDGED_ON_INIT);
1605 	intel_gt_set_wedged(gt);
1606 	i915_disable_error_state(gt->i915, -ENODEV);
1607 	set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
1608 
1609 	/* Wedged on init is non-recoverable */
1610 	add_taint_for_CI(gt->i915, TAINT_WARN);
1611 }
1612 
intel_gt_set_wedged_on_fini(struct intel_gt * gt)1613 void intel_gt_set_wedged_on_fini(struct intel_gt *gt)
1614 {
1615 	intel_gt_set_wedged(gt);
1616 	i915_disable_error_state(gt->i915, -ENODEV);
1617 	set_bit(I915_WEDGED_ON_FINI, &gt->reset.flags);
1618 	intel_gt_retire_requests(gt); /* cleanup any wedged requests */
1619 }
1620 
intel_gt_init_reset(struct intel_gt * gt)1621 void intel_gt_init_reset(struct intel_gt *gt)
1622 {
1623 	init_waitqueue_head(&gt->reset.queue);
1624 	mutex_init(&gt->reset.mutex);
1625 	init_srcu_struct(&gt->reset.backoff_srcu);
1626 	INIT_WORK(&gt->wedge, set_wedged_work);
1627 
1628 	/*
1629 	 * While undesirable to wait inside the shrinker, complain anyway.
1630 	 *
1631 	 * If we have to wait during shrinking, we guarantee forward progress
1632 	 * by forcing the reset. Therefore during the reset we must not
1633 	 * re-enter the shrinker. By declaring that we take the reset mutex
1634 	 * within the shrinker, we forbid ourselves from performing any
1635 	 * fs-reclaim or taking related locks during reset.
1636 	 */
1637 	i915_gem_shrinker_taints_mutex(gt->i915, &gt->reset.mutex);
1638 
1639 	/* no GPU until we are ready! */
1640 	__set_bit(I915_WEDGED, &gt->reset.flags);
1641 }
1642 
intel_gt_fini_reset(struct intel_gt * gt)1643 void intel_gt_fini_reset(struct intel_gt *gt)
1644 {
1645 	cleanup_srcu_struct(&gt->reset.backoff_srcu);
1646 }
1647 
intel_wedge_me(struct work_struct * work)1648 static void intel_wedge_me(struct work_struct *work)
1649 {
1650 	struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
1651 
1652 	gt_err(w->gt, "%s timed out, cancelling all in-flight rendering.\n", w->name);
1653 	set_wedged_work(&w->gt->wedge);
1654 }
1655 
__intel_init_wedge(struct intel_wedge_me * w,struct intel_gt * gt,long timeout,const char * name)1656 void __intel_init_wedge(struct intel_wedge_me *w,
1657 			struct intel_gt *gt,
1658 			long timeout,
1659 			const char *name)
1660 {
1661 	w->gt = gt;
1662 	w->name = name;
1663 
1664 	INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
1665 	queue_delayed_work(gt->i915->unordered_wq, &w->work, timeout);
1666 }
1667 
__intel_fini_wedge(struct intel_wedge_me * w)1668 void __intel_fini_wedge(struct intel_wedge_me *w)
1669 {
1670 	cancel_delayed_work_sync(&w->work);
1671 	destroy_delayed_work_on_stack(&w->work);
1672 	w->gt = NULL;
1673 }
1674 
1675 /*
1676  * Wa_22011802037 requires that we (or the GuC) ensure that no command
1677  * streamers are executing MI_FORCE_WAKE while an engine reset is initiated.
1678  */
intel_engine_reset_needs_wa_22011802037(struct intel_gt * gt)1679 bool intel_engine_reset_needs_wa_22011802037(struct intel_gt *gt)
1680 {
1681 	if (GRAPHICS_VER(gt->i915) < 11)
1682 		return false;
1683 
1684 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0))
1685 		return true;
1686 
1687 	if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
1688 		return false;
1689 
1690 	return true;
1691 }
1692 
1693 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1694 #include "selftest_reset.c"
1695 #include "selftest_hangcheck.c"
1696 #endif
1697