xref: /linux/drivers/gpu/drm/i915/gt/intel_gt.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <drm/drm_managed.h>
7 #include <drm/intel/intel-gtt.h>
8 #include <drm/intel/intel_gmd_interrupt_regs.h>
9 
10 #include "gem/i915_gem_internal.h"
11 #include "gem/i915_gem_lmem.h"
12 
13 #include "i915_drv.h"
14 #include "i915_perf_oa_regs.h"
15 #include "i915_reg.h"
16 #include "intel_context.h"
17 #include "intel_engine_pm.h"
18 #include "intel_engine_regs.h"
19 #include "intel_ggtt_gmch.h"
20 #include "intel_gt.h"
21 #include "intel_gt_buffer_pool.h"
22 #include "intel_gt_clock_utils.h"
23 #include "intel_gt_debugfs.h"
24 #include "intel_gt_mcr.h"
25 #include "intel_gt_pm.h"
26 #include "intel_gt_print.h"
27 #include "intel_gt_regs.h"
28 #include "intel_gt_requests.h"
29 #include "intel_migrate.h"
30 #include "intel_mocs.h"
31 #include "intel_pci_config.h"
32 #include "intel_rc6.h"
33 #include "intel_renderstate.h"
34 #include "intel_rps.h"
35 #include "intel_sa_media.h"
36 #include "intel_gt_sysfs.h"
37 #include "intel_tlb.h"
38 #include "intel_uncore.h"
39 #include "shmem_utils.h"
40 
41 void intel_gt_common_init_early(struct intel_gt *gt)
42 {
43 	spin_lock_init(gt->irq_lock);
44 
45 	INIT_LIST_HEAD(&gt->closed_vma);
46 	spin_lock_init(&gt->closed_lock);
47 
48 	init_llist_head(&gt->watchdog.list);
49 	INIT_WORK(&gt->watchdog.work, intel_gt_watchdog_work);
50 
51 	intel_gt_init_buffer_pool(gt);
52 	intel_gt_init_reset(gt);
53 	intel_gt_init_requests(gt);
54 	intel_gt_init_timelines(gt);
55 	intel_gt_init_tlb(gt);
56 	intel_gt_pm_init_early(gt);
57 
58 	intel_wopcm_init_early(&gt->wopcm);
59 	intel_uc_init_early(&gt->uc);
60 	intel_rps_init_early(&gt->rps);
61 }
62 
63 /* Preliminary initialization of Tile 0 */
64 int intel_root_gt_init_early(struct drm_i915_private *i915)
65 {
66 	struct intel_gt *gt;
67 
68 	gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL);
69 	if (!gt)
70 		return -ENOMEM;
71 
72 	i915->gt[0] = gt;
73 
74 	gt->i915 = i915;
75 	gt->uncore = &i915->uncore;
76 	gt->irq_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->irq_lock), GFP_KERNEL);
77 	if (!gt->irq_lock)
78 		return -ENOMEM;
79 
80 	intel_gt_common_init_early(gt);
81 
82 	return 0;
83 }
84 
85 static int intel_gt_probe_lmem(struct intel_gt *gt)
86 {
87 	struct drm_i915_private *i915 = gt->i915;
88 	unsigned int instance = gt->info.id;
89 	int id = INTEL_REGION_LMEM_0 + instance;
90 	struct intel_memory_region *mem;
91 	int err;
92 
93 	mem = intel_gt_setup_lmem(gt);
94 	if (IS_ERR(mem)) {
95 		err = PTR_ERR(mem);
96 		if (err == -ENODEV)
97 			return 0;
98 
99 		gt_err(gt, "Failed to setup region(%d) type=%d\n",
100 		       err, INTEL_MEMORY_LOCAL);
101 		return err;
102 	}
103 
104 	mem->id = id;
105 	mem->instance = instance;
106 
107 	intel_memory_region_set_name(mem, "local%u", mem->instance);
108 
109 	GEM_BUG_ON(!HAS_REGION(i915, id));
110 	GEM_BUG_ON(i915->mm.regions[id]);
111 	i915->mm.regions[id] = mem;
112 
113 	return 0;
114 }
115 
116 int intel_gt_assign_ggtt(struct intel_gt *gt)
117 {
118 	/* Media GT shares primary GT's GGTT */
119 	if (gt->type == GT_MEDIA) {
120 		gt->ggtt = to_gt(gt->i915)->ggtt;
121 	} else {
122 		gt->ggtt = i915_ggtt_create(gt->i915);
123 		if (IS_ERR(gt->ggtt))
124 			return PTR_ERR(gt->ggtt);
125 	}
126 
127 	list_add_tail(&gt->ggtt_link, &gt->ggtt->gt_list);
128 
129 	return 0;
130 }
131 
132 int intel_gt_init_mmio(struct intel_gt *gt)
133 {
134 	intel_gt_init_clock_frequency(gt);
135 
136 	intel_uc_init_mmio(&gt->uc);
137 	intel_sseu_info_init(gt);
138 	intel_gt_mcr_init(gt);
139 
140 	return intel_engines_init_mmio(gt);
141 }
142 
143 static void init_unused_ring(struct intel_gt *gt, u32 base)
144 {
145 	struct intel_uncore *uncore = gt->uncore;
146 
147 	intel_uncore_write(uncore, RING_CTL(base), 0);
148 	intel_uncore_write(uncore, RING_HEAD(base), 0);
149 	intel_uncore_write(uncore, RING_TAIL(base), 0);
150 	intel_uncore_write(uncore, RING_START(base), 0);
151 }
152 
153 static void init_unused_rings(struct intel_gt *gt)
154 {
155 	struct drm_i915_private *i915 = gt->i915;
156 
157 	if (IS_I830(i915)) {
158 		init_unused_ring(gt, PRB1_BASE);
159 		init_unused_ring(gt, SRB0_BASE);
160 		init_unused_ring(gt, SRB1_BASE);
161 		init_unused_ring(gt, SRB2_BASE);
162 		init_unused_ring(gt, SRB3_BASE);
163 	} else if (GRAPHICS_VER(i915) == 2) {
164 		init_unused_ring(gt, SRB0_BASE);
165 		init_unused_ring(gt, SRB1_BASE);
166 	} else if (GRAPHICS_VER(i915) == 3) {
167 		init_unused_ring(gt, PRB1_BASE);
168 		init_unused_ring(gt, PRB2_BASE);
169 	}
170 }
171 
172 int intel_gt_init_hw(struct intel_gt *gt)
173 {
174 	struct drm_i915_private *i915 = gt->i915;
175 	struct intel_uncore *uncore = gt->uncore;
176 	int ret;
177 
178 	gt->last_init_time = ktime_get();
179 
180 	/* Double layer security blanket, see i915_gem_init() */
181 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
182 
183 	if (HAS_EDRAM(i915) && GRAPHICS_VER(i915) < 9)
184 		intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
185 
186 	if (IS_HASWELL(i915))
187 		intel_uncore_write(uncore,
188 				   HSW_MI_PREDICATE_RESULT_2,
189 				   INTEL_INFO(i915)->gt == 3 ?
190 				   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
191 
192 	/* Apply the GT workarounds... */
193 	intel_gt_apply_workarounds(gt);
194 	/* ...and determine whether they are sticking. */
195 	intel_gt_verify_workarounds(gt, "init");
196 
197 	intel_gt_init_swizzling(gt);
198 
199 	/*
200 	 * At least 830 can leave some of the unused rings
201 	 * "active" (ie. head != tail) after resume which
202 	 * will prevent c3 entry. Makes sure all unused rings
203 	 * are totally idle.
204 	 */
205 	init_unused_rings(gt);
206 
207 	ret = i915_ppgtt_init_hw(gt);
208 	if (ret) {
209 		gt_err(gt, "Enabling PPGTT failed (%d)\n", ret);
210 		goto out;
211 	}
212 
213 	/* We can't enable contexts until all firmware is loaded */
214 	ret = intel_uc_init_hw(&gt->uc);
215 	if (ret) {
216 		gt_probe_error(gt, "Enabling uc failed (%d)\n", ret);
217 		goto out;
218 	}
219 
220 	intel_mocs_init(gt);
221 
222 out:
223 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
224 	return ret;
225 }
226 
227 static void gen6_clear_engine_error_register(struct intel_engine_cs *engine)
228 {
229 	GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
230 	GEN6_RING_FAULT_REG_POSTING_READ(engine);
231 }
232 
233 i915_reg_t intel_gt_perf_limit_reasons_reg(struct intel_gt *gt)
234 {
235 	/* GT0_PERF_LIMIT_REASONS is available only for Gen11+ */
236 	if (GRAPHICS_VER(gt->i915) < 11)
237 		return INVALID_MMIO_REG;
238 
239 	return gt->type == GT_MEDIA ?
240 		MTL_MEDIA_PERF_LIMIT_REASONS : GT0_PERF_LIMIT_REASONS;
241 }
242 
243 void
244 intel_gt_clear_error_registers(struct intel_gt *gt,
245 			       intel_engine_mask_t engine_mask)
246 {
247 	struct drm_i915_private *i915 = gt->i915;
248 	struct intel_uncore *uncore = gt->uncore;
249 	u32 eir;
250 
251 	if (GRAPHICS_VER(i915) != 2)
252 		intel_uncore_write(uncore, PGTBL_ER, 0);
253 
254 	if (GRAPHICS_VER(i915) < 4)
255 		intel_uncore_write(uncore, IPEIR(RENDER_RING_BASE), 0);
256 	else
257 		intel_uncore_write(uncore, IPEIR_I965, 0);
258 
259 	intel_uncore_write(uncore, EIR, 0);
260 	eir = intel_uncore_read(uncore, EIR);
261 	if (eir) {
262 		/*
263 		 * some errors might have become stuck,
264 		 * mask them.
265 		 */
266 		gt_dbg(gt, "EIR stuck: 0x%08x, masking\n", eir);
267 		intel_uncore_rmw(uncore, EMR, 0, eir);
268 		intel_uncore_write(uncore, GEN2_IIR,
269 				   I915_MASTER_ERROR_INTERRUPT);
270 	}
271 
272 	/*
273 	 * For the media GT, this ring fault register is not replicated,
274 	 * so don't do multicast/replicated register read/write operation on it.
275 	 */
276 	if (MEDIA_VER(i915) >= 13 && gt->type == GT_MEDIA) {
277 		intel_uncore_rmw(uncore, XELPMP_RING_FAULT_REG,
278 				 RING_FAULT_VALID, 0);
279 		intel_uncore_posting_read(uncore,
280 					  XELPMP_RING_FAULT_REG);
281 
282 	} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
283 		intel_gt_mcr_multicast_rmw(gt, XEHP_RING_FAULT_REG,
284 					   RING_FAULT_VALID, 0);
285 		intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
286 
287 	} else if (GRAPHICS_VER(i915) >= 12) {
288 		intel_uncore_rmw(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID, 0);
289 		intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
290 	} else if (GRAPHICS_VER(i915) >= 8) {
291 		intel_uncore_rmw(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID, 0);
292 		intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
293 	} else if (GRAPHICS_VER(i915) >= 6) {
294 		struct intel_engine_cs *engine;
295 		enum intel_engine_id id;
296 
297 		for_each_engine_masked(engine, gt, engine_mask, id)
298 			gen6_clear_engine_error_register(engine);
299 	}
300 }
301 
302 static void gen6_check_faults(struct intel_gt *gt)
303 {
304 	struct intel_engine_cs *engine;
305 	enum intel_engine_id id;
306 
307 	for_each_engine(engine, gt, id) {
308 		u32 fault;
309 
310 		fault = GEN6_RING_FAULT_REG_READ(engine);
311 
312 		if (fault & RING_FAULT_VALID) {
313 			gt_dbg(gt, "Unexpected fault\n"
314 			       "\tAddr: 0x%08x\n"
315 			       "\tAddress space: %s\n"
316 			       "\tSource ID: %d\n"
317 			       "\tType: %d\n",
318 			       fault & RING_FAULT_VADDR_MASK,
319 			       fault & RING_FAULT_GTTSEL_MASK ?
320 			       "GGTT" : "PPGTT",
321 			       REG_FIELD_GET(RING_FAULT_SRCID_MASK, fault),
322 			       REG_FIELD_GET(RING_FAULT_FAULT_TYPE_MASK, fault));
323 		}
324 	}
325 }
326 
327 static void gen8_report_fault(struct intel_gt *gt, u32 fault,
328 			      u32 fault_data0, u32 fault_data1)
329 {
330 	u64 fault_addr;
331 
332 	fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
333 		((u64)fault_data0 << 12);
334 
335 	gt_dbg(gt, "Unexpected fault\n"
336 	       "\tAddr: 0x%08x_%08x\n"
337 	       "\tAddress space: %s\n"
338 	       "\tEngine ID: %d\n"
339 	       "\tSource ID: %d\n"
340 	       "\tType: %d\n",
341 	       upper_32_bits(fault_addr), lower_32_bits(fault_addr),
342 	       fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
343 	       REG_FIELD_GET(RING_FAULT_ENGINE_ID_MASK, fault),
344 	       REG_FIELD_GET(RING_FAULT_SRCID_MASK, fault),
345 	       REG_FIELD_GET(RING_FAULT_FAULT_TYPE_MASK, fault));
346 }
347 
348 static void xehp_check_faults(struct intel_gt *gt)
349 {
350 	u32 fault;
351 
352 	/*
353 	 * Although the fault register now lives in an MCR register range,
354 	 * the GAM registers are special and we only truly need to read
355 	 * the "primary" GAM instance rather than handling each instance
356 	 * individually.  intel_gt_mcr_read_any() will automatically steer
357 	 * toward the primary instance.
358 	 */
359 	fault = intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
360 	if (fault & RING_FAULT_VALID)
361 		gen8_report_fault(gt, fault,
362 				  intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA0),
363 				  intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA1));
364 }
365 
366 static void gen8_check_faults(struct intel_gt *gt)
367 {
368 	struct intel_uncore *uncore = gt->uncore;
369 	i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
370 	u32 fault;
371 
372 	if (GRAPHICS_VER(gt->i915) >= 12) {
373 		fault_reg = GEN12_RING_FAULT_REG;
374 		fault_data0_reg = GEN12_FAULT_TLB_DATA0;
375 		fault_data1_reg = GEN12_FAULT_TLB_DATA1;
376 	} else {
377 		fault_reg = GEN8_RING_FAULT_REG;
378 		fault_data0_reg = GEN8_FAULT_TLB_DATA0;
379 		fault_data1_reg = GEN8_FAULT_TLB_DATA1;
380 	}
381 
382 	fault = intel_uncore_read(uncore, fault_reg);
383 	if (fault & RING_FAULT_VALID)
384 		gen8_report_fault(gt, fault,
385 				  intel_uncore_read(uncore, fault_data0_reg),
386 				  intel_uncore_read(uncore, fault_data1_reg));
387 }
388 
389 void intel_gt_check_and_clear_faults(struct intel_gt *gt)
390 {
391 	struct drm_i915_private *i915 = gt->i915;
392 
393 	/* From GEN8 onwards we only have one 'All Engine Fault Register' */
394 	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
395 		xehp_check_faults(gt);
396 	else if (GRAPHICS_VER(i915) >= 8)
397 		gen8_check_faults(gt);
398 	else if (GRAPHICS_VER(i915) >= 6)
399 		gen6_check_faults(gt);
400 	else
401 		return;
402 
403 	intel_gt_clear_error_registers(gt, ALL_ENGINES);
404 }
405 
406 void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
407 {
408 	struct intel_uncore *uncore = gt->uncore;
409 	intel_wakeref_t wakeref;
410 
411 	/*
412 	 * No actual flushing is required for the GTT write domain for reads
413 	 * from the GTT domain. Writes to it "immediately" go to main memory
414 	 * as far as we know, so there's no chipset flush. It also doesn't
415 	 * land in the GPU render cache.
416 	 *
417 	 * However, we do have to enforce the order so that all writes through
418 	 * the GTT land before any writes to the device, such as updates to
419 	 * the GATT itself.
420 	 *
421 	 * We also have to wait a bit for the writes to land from the GTT.
422 	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
423 	 * timing. This issue has only been observed when switching quickly
424 	 * between GTT writes and CPU reads from inside the kernel on recent hw,
425 	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
426 	 * system agents we cannot reproduce this behaviour, until Cannonlake
427 	 * that was!).
428 	 */
429 
430 	wmb();
431 
432 	if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
433 		return;
434 
435 	intel_gt_chipset_flush(gt);
436 
437 	with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
438 		unsigned long flags;
439 
440 		spin_lock_irqsave(&uncore->lock, flags);
441 		intel_uncore_posting_read_fw(uncore,
442 					     RING_TAIL(RENDER_RING_BASE));
443 		spin_unlock_irqrestore(&uncore->lock, flags);
444 	}
445 }
446 
447 void intel_gt_chipset_flush(struct intel_gt *gt)
448 {
449 	wmb();
450 	if (GRAPHICS_VER(gt->i915) < 6)
451 		intel_ggtt_gmch_flush();
452 }
453 
454 void intel_gt_driver_register(struct intel_gt *gt)
455 {
456 	intel_gsc_init(&gt->gsc, gt->i915);
457 
458 	intel_rps_driver_register(&gt->rps);
459 
460 	intel_gt_debugfs_register(gt);
461 	intel_gt_sysfs_register(gt);
462 }
463 
464 static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
465 {
466 	struct drm_i915_private *i915 = gt->i915;
467 	struct drm_i915_gem_object *obj;
468 	struct i915_vma *vma;
469 	int ret;
470 
471 	obj = i915_gem_object_create_lmem(i915, size,
472 					  I915_BO_ALLOC_VOLATILE |
473 					  I915_BO_ALLOC_GPU_ONLY);
474 	if (IS_ERR(obj) && !IS_METEORLAKE(i915)) /* Wa_22018444074 */
475 		obj = i915_gem_object_create_stolen(i915, size);
476 	if (IS_ERR(obj))
477 		obj = i915_gem_object_create_internal(i915, size);
478 	if (IS_ERR(obj)) {
479 		gt_err(gt, "Failed to allocate scratch page\n");
480 		return PTR_ERR(obj);
481 	}
482 
483 	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
484 	if (IS_ERR(vma)) {
485 		ret = PTR_ERR(vma);
486 		goto err_unref;
487 	}
488 
489 	ret = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
490 	if (ret)
491 		goto err_unref;
492 
493 	gt->scratch = i915_vma_make_unshrinkable(vma);
494 
495 	return 0;
496 
497 err_unref:
498 	i915_gem_object_put(obj);
499 	return ret;
500 }
501 
502 static void intel_gt_fini_scratch(struct intel_gt *gt)
503 {
504 	i915_vma_unpin_and_release(&gt->scratch, 0);
505 }
506 
507 static struct i915_address_space *kernel_vm(struct intel_gt *gt)
508 {
509 	if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
510 		return &i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY)->vm;
511 	else
512 		return i915_vm_get(&gt->ggtt->vm);
513 }
514 
515 static int __engines_record_defaults(struct intel_gt *gt)
516 {
517 	struct i915_request *requests[I915_NUM_ENGINES] = {};
518 	struct intel_engine_cs *engine;
519 	enum intel_engine_id id;
520 	int err = 0;
521 
522 	/*
523 	 * As we reset the gpu during very early sanitisation, the current
524 	 * register state on the GPU should reflect its defaults values.
525 	 * We load a context onto the hw (with restore-inhibit), then switch
526 	 * over to a second context to save that default register state. We
527 	 * can then prime every new context with that state so they all start
528 	 * from the same default HW values.
529 	 */
530 
531 	for_each_engine(engine, gt, id) {
532 		struct intel_renderstate so;
533 		struct intel_context *ce;
534 		struct i915_request *rq;
535 
536 		/* We must be able to switch to something! */
537 		GEM_BUG_ON(!engine->kernel_context);
538 
539 		ce = intel_context_create(engine);
540 		if (IS_ERR(ce)) {
541 			err = PTR_ERR(ce);
542 			goto out;
543 		}
544 
545 		err = intel_renderstate_init(&so, ce);
546 		if (err)
547 			goto err;
548 
549 		rq = i915_request_create(ce);
550 		if (IS_ERR(rq)) {
551 			err = PTR_ERR(rq);
552 			goto err_fini;
553 		}
554 
555 		err = intel_engine_emit_ctx_wa(rq);
556 		if (err)
557 			goto err_rq;
558 
559 		err = intel_renderstate_emit(&so, rq);
560 		if (err)
561 			goto err_rq;
562 
563 err_rq:
564 		requests[id] = i915_request_get(rq);
565 		i915_request_add(rq);
566 err_fini:
567 		intel_renderstate_fini(&so, ce);
568 err:
569 		if (err) {
570 			intel_context_put(ce);
571 			goto out;
572 		}
573 	}
574 
575 	/* Flush the default context image to memory, and enable powersaving. */
576 	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
577 		err = -EIO;
578 		goto out;
579 	}
580 
581 	for (id = 0; id < ARRAY_SIZE(requests); id++) {
582 		struct i915_request *rq;
583 		struct file *state;
584 
585 		rq = requests[id];
586 		if (!rq)
587 			continue;
588 
589 		if (rq->fence.error) {
590 			err = -EIO;
591 			goto out;
592 		}
593 
594 		GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
595 		if (!rq->context->state)
596 			continue;
597 
598 		/* Keep a copy of the state's backing pages; free the obj */
599 		state = shmem_create_from_object(rq->context->state->obj);
600 		if (IS_ERR(state)) {
601 			err = PTR_ERR(state);
602 			goto out;
603 		}
604 		rq->engine->default_state = state;
605 	}
606 
607 out:
608 	/*
609 	 * If we have to abandon now, we expect the engines to be idle
610 	 * and ready to be torn-down. The quickest way we can accomplish
611 	 * this is by declaring ourselves wedged.
612 	 */
613 	if (err)
614 		intel_gt_set_wedged(gt);
615 
616 	for (id = 0; id < ARRAY_SIZE(requests); id++) {
617 		struct intel_context *ce;
618 		struct i915_request *rq;
619 
620 		rq = requests[id];
621 		if (!rq)
622 			continue;
623 
624 		ce = rq->context;
625 		i915_request_put(rq);
626 		intel_context_put(ce);
627 	}
628 	return err;
629 }
630 
631 static int __engines_verify_workarounds(struct intel_gt *gt)
632 {
633 	struct intel_engine_cs *engine;
634 	enum intel_engine_id id;
635 	int err = 0;
636 
637 	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
638 		return 0;
639 
640 	for_each_engine(engine, gt, id) {
641 		if (intel_engine_verify_workarounds(engine, "load"))
642 			err = -EIO;
643 	}
644 
645 	/* Flush and restore the kernel context for safety */
646 	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME)
647 		err = -EIO;
648 
649 	return err;
650 }
651 
652 static void __intel_gt_disable(struct intel_gt *gt)
653 {
654 	intel_gt_set_wedged_on_fini(gt);
655 
656 	intel_gt_suspend_prepare(gt);
657 	intel_gt_suspend_late(gt);
658 
659 	GEM_BUG_ON(intel_gt_pm_is_awake(gt));
660 }
661 
662 int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
663 {
664 	long remaining_timeout;
665 
666 	/* If the device is asleep, we have no requests outstanding */
667 	if (!intel_gt_pm_is_awake(gt))
668 		return 0;
669 
670 	while ((timeout = intel_gt_retire_requests_timeout(gt, timeout,
671 							   &remaining_timeout)) > 0) {
672 		cond_resched();
673 		if (signal_pending(current))
674 			return -EINTR;
675 	}
676 
677 	if (timeout)
678 		return timeout;
679 
680 	if (remaining_timeout < 0)
681 		remaining_timeout = 0;
682 
683 	return intel_uc_wait_for_idle(&gt->uc, remaining_timeout);
684 }
685 
686 int intel_gt_init(struct intel_gt *gt)
687 {
688 	int err;
689 
690 	intel_gt_init_workarounds(gt);
691 
692 	/*
693 	 * This is just a security blanket to placate dragons.
694 	 * On some systems, we very sporadically observe that the first TLBs
695 	 * used by the CS may be stale, despite us poking the TLB reset. If
696 	 * we hold the forcewake during initialisation these problems
697 	 * just magically go away.
698 	 */
699 	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
700 
701 	err = intel_gt_init_scratch(gt,
702 				    GRAPHICS_VER(gt->i915) == 2 ? SZ_256K : SZ_4K);
703 	if (err)
704 		goto out_fw;
705 
706 	intel_gt_pm_init(gt);
707 
708 	gt->vm = kernel_vm(gt);
709 	if (!gt->vm) {
710 		err = -ENOMEM;
711 		goto err_pm;
712 	}
713 
714 	intel_set_mocs_index(gt);
715 
716 	err = intel_engines_init(gt);
717 	if (err)
718 		goto err_engines;
719 
720 	err = intel_uc_init(&gt->uc);
721 	if (err)
722 		goto err_engines;
723 
724 	err = intel_gt_resume(gt);
725 	if (err)
726 		goto err_uc_init;
727 
728 	err = intel_gt_init_hwconfig(gt);
729 	if (err)
730 		gt_err(gt, "Failed to retrieve hwconfig table: %pe\n", ERR_PTR(err));
731 
732 	err = __engines_record_defaults(gt);
733 	if (err)
734 		goto err_gt;
735 
736 	err = __engines_verify_workarounds(gt);
737 	if (err)
738 		goto err_gt;
739 
740 	intel_uc_init_late(&gt->uc);
741 
742 	intel_migrate_init(&gt->migrate, gt);
743 
744 	goto out_fw;
745 err_gt:
746 	__intel_gt_disable(gt);
747 	intel_uc_fini_hw(&gt->uc);
748 err_uc_init:
749 	intel_uc_fini(&gt->uc);
750 err_engines:
751 	intel_engines_release(gt);
752 	i915_vm_put(fetch_and_zero(&gt->vm));
753 err_pm:
754 	intel_gt_pm_fini(gt);
755 	intel_gt_fini_scratch(gt);
756 out_fw:
757 	if (err)
758 		intel_gt_set_wedged_on_init(gt);
759 	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
760 	return err;
761 }
762 ALLOW_ERROR_INJECTION(intel_gt_init, ERRNO);
763 
764 void intel_gt_driver_remove(struct intel_gt *gt)
765 {
766 	__intel_gt_disable(gt);
767 
768 	intel_migrate_fini(&gt->migrate);
769 	intel_uc_driver_remove(&gt->uc);
770 
771 	intel_engines_release(gt);
772 
773 	intel_gt_flush_buffer_pool(gt);
774 }
775 
776 void intel_gt_driver_unregister(struct intel_gt *gt)
777 {
778 	intel_wakeref_t wakeref;
779 
780 	intel_gt_sysfs_unregister(gt);
781 	intel_rps_driver_unregister(&gt->rps);
782 	intel_gsc_fini(&gt->gsc);
783 
784 	/*
785 	 * If we unload the driver and wedge before the GSC worker is complete,
786 	 * the worker will hit an error on its submission to the GSC engine and
787 	 * then exit. This is hard to hit for a user, but it is reproducible
788 	 * with skipping selftests. The error is handled gracefully by the
789 	 * worker, so there are no functional issues, but we still end up with
790 	 * an error message in dmesg, which is something we want to avoid as
791 	 * this is a supported scenario. We could modify the worker to better
792 	 * handle a wedging occurring during its execution, but that gets
793 	 * complicated for a couple of reasons:
794 	 * - We do want the error on runtime wedging, because there are
795 	 *   implications for subsystems outside of GT (i.e., PXP, HDCP), it's
796 	 *   only the error on driver unload that we want to silence.
797 	 * - The worker is responsible for multiple submissions (GSC FW load,
798 	 *   HuC auth, SW proxy), so all of those will have to be adapted to
799 	 *   handle the wedged_on_fini scenario.
800 	 * Therefore, it's much simpler to just wait for the worker to be done
801 	 * before wedging on driver removal, also considering that the worker
802 	 * will likely already be idle in the great majority of non-selftest
803 	 * scenarios.
804 	 */
805 	intel_gsc_uc_flush_work(&gt->uc.gsc);
806 
807 	/*
808 	 * Upon unregistering the device to prevent any new users, cancel
809 	 * all in-flight requests so that we can quickly unbind the active
810 	 * resources.
811 	 */
812 	intel_gt_set_wedged_on_fini(gt);
813 
814 	/* Scrub all HW state upon release */
815 	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
816 		intel_gt_reset_all_engines(gt);
817 }
818 
819 void intel_gt_driver_release(struct intel_gt *gt)
820 {
821 	struct i915_address_space *vm;
822 
823 	vm = fetch_and_zero(&gt->vm);
824 	if (vm) /* FIXME being called twice on error paths :( */
825 		i915_vm_put(vm);
826 
827 	intel_wa_list_free(&gt->wa_list);
828 	intel_gt_pm_fini(gt);
829 	intel_gt_fini_scratch(gt);
830 	intel_gt_fini_buffer_pool(gt);
831 	intel_gt_fini_hwconfig(gt);
832 }
833 
834 void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
835 {
836 	struct intel_gt *gt;
837 	unsigned int id;
838 
839 	/* We need to wait for inflight RCU frees to release their grip */
840 	rcu_barrier();
841 
842 	for_each_gt(gt, i915, id) {
843 		intel_uc_driver_late_release(&gt->uc);
844 		intel_gt_fini_requests(gt);
845 		intel_gt_fini_reset(gt);
846 		intel_gt_fini_timelines(gt);
847 		intel_gt_fini_tlb(gt);
848 		intel_engines_free(gt);
849 	}
850 }
851 
852 static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
853 {
854 	int ret;
855 
856 	if (!gt_is_root(gt)) {
857 		struct intel_uncore *uncore;
858 		spinlock_t *irq_lock;
859 
860 		uncore = drmm_kzalloc(&gt->i915->drm, sizeof(*uncore), GFP_KERNEL);
861 		if (!uncore)
862 			return -ENOMEM;
863 
864 		irq_lock = drmm_kzalloc(&gt->i915->drm, sizeof(*irq_lock), GFP_KERNEL);
865 		if (!irq_lock)
866 			return -ENOMEM;
867 
868 		gt->uncore = uncore;
869 		gt->irq_lock = irq_lock;
870 
871 		intel_gt_common_init_early(gt);
872 	}
873 
874 	intel_uncore_init_early(gt->uncore, gt);
875 
876 	ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
877 	if (ret)
878 		return ret;
879 
880 	gt->phys_addr = phys_addr;
881 
882 	return 0;
883 }
884 
885 int intel_gt_probe_all(struct drm_i915_private *i915)
886 {
887 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
888 	struct intel_gt *gt = to_gt(i915);
889 	const struct intel_gt_definition *gtdef;
890 	phys_addr_t phys_addr;
891 	unsigned int mmio_bar;
892 	unsigned int i;
893 	int ret;
894 
895 	mmio_bar = intel_mmio_bar(GRAPHICS_VER(i915));
896 	phys_addr = pci_resource_start(pdev, mmio_bar);
897 
898 	/*
899 	 * We always have at least one primary GT on any device
900 	 * and it has been already initialized early during probe
901 	 * in i915_driver_probe()
902 	 */
903 	gt->i915 = i915;
904 	gt->name = "Primary GT";
905 	gt->info.engine_mask = INTEL_INFO(i915)->platform_engine_mask;
906 
907 	gt_dbg(gt, "Setting up %s\n", gt->name);
908 	ret = intel_gt_tile_setup(gt, phys_addr);
909 	if (ret)
910 		return ret;
911 
912 	if (!HAS_EXTRA_GT_LIST(i915))
913 		return 0;
914 
915 	for (i = 1, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1];
916 	     gtdef->name != NULL;
917 	     i++, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]) {
918 		gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL);
919 		if (!gt) {
920 			ret = -ENOMEM;
921 			goto err;
922 		}
923 
924 		gt->i915 = i915;
925 		gt->name = gtdef->name;
926 		gt->type = gtdef->type;
927 		gt->info.engine_mask = gtdef->engine_mask;
928 		gt->info.id = i;
929 
930 		gt_dbg(gt, "Setting up %s\n", gt->name);
931 		if (GEM_WARN_ON(range_overflows_t(resource_size_t,
932 						  gtdef->mapping_base,
933 						  SZ_16M,
934 						  pci_resource_len(pdev, mmio_bar)))) {
935 			ret = -ENODEV;
936 			goto err;
937 		}
938 
939 		switch (gtdef->type) {
940 		case GT_TILE:
941 			ret = intel_gt_tile_setup(gt, phys_addr + gtdef->mapping_base);
942 			break;
943 
944 		case GT_MEDIA:
945 			ret = intel_sa_mediagt_setup(gt, phys_addr + gtdef->mapping_base,
946 						     gtdef->gsi_offset);
947 			break;
948 
949 		case GT_PRIMARY:
950 			/* Primary GT should not appear in extra GT list */
951 		default:
952 			MISSING_CASE(gtdef->type);
953 			ret = -ENODEV;
954 		}
955 
956 		if (ret)
957 			goto err;
958 
959 		i915->gt[i] = gt;
960 	}
961 
962 	return 0;
963 
964 err:
965 	i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
966 	return ret;
967 }
968 
969 int intel_gt_tiles_init(struct drm_i915_private *i915)
970 {
971 	struct intel_gt *gt;
972 	unsigned int id;
973 	int ret;
974 
975 	for_each_gt(gt, i915, id) {
976 		ret = intel_gt_probe_lmem(gt);
977 		if (ret)
978 			return ret;
979 	}
980 
981 	return 0;
982 }
983 
984 void intel_gt_info_print(const struct intel_gt_info *info,
985 			 struct drm_printer *p)
986 {
987 	drm_printf(p, "available engines: %x\n", info->engine_mask);
988 
989 	intel_sseu_dump(&info->sseu, p);
990 }
991 
992 enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
993 					      struct drm_i915_gem_object *obj,
994 					      bool always_coherent)
995 {
996 	/*
997 	 * Wa_22016122933: always return I915_MAP_WC for Media
998 	 * version 13.0 when the object is on the Media GT
999 	 */
1000 	if (i915_gem_object_is_lmem(obj) || intel_gt_needs_wa_22016122933(gt))
1001 		return I915_MAP_WC;
1002 	if (HAS_LLC(gt->i915) || always_coherent)
1003 		return I915_MAP_WB;
1004 	else
1005 		return I915_MAP_WC;
1006 }
1007 
1008 bool intel_gt_needs_wa_16018031267(struct intel_gt *gt)
1009 {
1010 	/* Wa_16018031267, Wa_16018063123 */
1011 	return IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 55), IP_VER(12, 71));
1012 }
1013 
1014 bool intel_gt_needs_wa_22016122933(struct intel_gt *gt)
1015 {
1016 	return MEDIA_VER_FULL(gt->i915) == IP_VER(13, 0) && gt->type == GT_MEDIA;
1017 }
1018 
1019 static void __intel_gt_bind_context_set_ready(struct intel_gt *gt, bool ready)
1020 {
1021 	struct intel_engine_cs *engine = gt->engine[BCS0];
1022 
1023 	if (engine && engine->bind_context)
1024 		engine->bind_context_ready = ready;
1025 }
1026 
1027 /**
1028  * intel_gt_bind_context_set_ready - Set the context binding as ready
1029  *
1030  * @gt: GT structure
1031  *
1032  * This function marks the binder context as ready.
1033  */
1034 void intel_gt_bind_context_set_ready(struct intel_gt *gt)
1035 {
1036 	__intel_gt_bind_context_set_ready(gt, true);
1037 }
1038 
1039 /**
1040  * intel_gt_bind_context_set_unready - Set the context binding as ready
1041  * @gt: GT structure
1042  *
1043  * This function marks the binder context as not ready.
1044  */
1045 
1046 void intel_gt_bind_context_set_unready(struct intel_gt *gt)
1047 {
1048 	__intel_gt_bind_context_set_ready(gt, false);
1049 }
1050 
1051 /**
1052  * intel_gt_is_bind_context_ready - Check if context binding is ready
1053  *
1054  * @gt: GT structure
1055  *
1056  * This function returns binder context's ready status.
1057  */
1058 bool intel_gt_is_bind_context_ready(struct intel_gt *gt)
1059 {
1060 	struct intel_engine_cs *engine = gt->engine[BCS0];
1061 
1062 	if (engine)
1063 		return engine->bind_context_ready;
1064 
1065 	return false;
1066 }
1067