xref: /linux/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c (revision 34dc1baba215b826e454b8d19e4f24adbeb7d00d)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014-2019 Intel Corporation
4  */
5 
6 #include <linux/bsearch.h>
7 
8 #include "gem/i915_gem_lmem.h"
9 #include "gt/intel_engine_regs.h"
10 #include "gt/intel_gt.h"
11 #include "gt/intel_gt_mcr.h"
12 #include "gt/intel_gt_regs.h"
13 #include "gt/intel_lrc.h"
14 #include "gt/shmem_utils.h"
15 #include "intel_guc_ads.h"
16 #include "intel_guc_capture.h"
17 #include "intel_guc_fwif.h"
18 #include "intel_guc_print.h"
19 #include "intel_uc.h"
20 #include "i915_drv.h"
21 
22 /*
23  * The Additional Data Struct (ADS) has pointers for different buffers used by
24  * the GuC. One single gem object contains the ADS struct itself (guc_ads) and
25  * all the extra buffers indirectly linked via the ADS struct's entries.
26  *
27  * Layout of the ADS blob allocated for the GuC:
28  *
29  *      +---------------------------------------+ <== base
30  *      | guc_ads                               |
31  *      +---------------------------------------+
32  *      | guc_policies                          |
33  *      +---------------------------------------+
34  *      | guc_gt_system_info                    |
35  *      +---------------------------------------+
36  *      | guc_engine_usage                      |
37  *      +---------------------------------------+ <== static
38  *      | guc_mmio_reg[countA] (engine 0.0)     |
39  *      | guc_mmio_reg[countB] (engine 0.1)     |
40  *      | guc_mmio_reg[countC] (engine 1.0)     |
41  *      |   ...                                 |
42  *      +---------------------------------------+ <== dynamic
43  *      | padding                               |
44  *      +---------------------------------------+ <== 4K aligned
45  *      | golden contexts                       |
46  *      +---------------------------------------+
47  *      | padding                               |
48  *      +---------------------------------------+ <== 4K aligned
49  *      | capture lists                         |
50  *      +---------------------------------------+
51  *      | padding                               |
52  *      +---------------------------------------+ <== 4K aligned
53  *      | private data                          |
54  *      +---------------------------------------+
55  *      | padding                               |
56  *      +---------------------------------------+ <== 4K aligned
57  */
58 struct __guc_ads_blob {
59 	struct guc_ads ads;
60 	struct guc_policies policies;
61 	struct guc_gt_system_info system_info;
62 	struct guc_engine_usage engine_usage;
63 	/* From here on, location is dynamic! Refer to above diagram. */
64 	struct guc_mmio_reg regset[];
65 } __packed;
66 
67 #define ads_blob_read(guc_, field_)					\
68 	iosys_map_rd_field(&(guc_)->ads_map, 0, struct __guc_ads_blob, field_)
69 
70 #define ads_blob_write(guc_, field_, val_)				\
71 	iosys_map_wr_field(&(guc_)->ads_map, 0, struct __guc_ads_blob,	\
72 			   field_, val_)
73 
74 #define info_map_write(map_, field_, val_) \
75 	iosys_map_wr_field(map_, 0, struct guc_gt_system_info, field_, val_)
76 
77 #define info_map_read(map_, field_) \
78 	iosys_map_rd_field(map_, 0, struct guc_gt_system_info, field_)
79 
80 static u32 guc_ads_regset_size(struct intel_guc *guc)
81 {
82 	GEM_BUG_ON(!guc->ads_regset_size);
83 	return guc->ads_regset_size;
84 }
85 
86 static u32 guc_ads_golden_ctxt_size(struct intel_guc *guc)
87 {
88 	return PAGE_ALIGN(guc->ads_golden_ctxt_size);
89 }
90 
91 static u32 guc_ads_capture_size(struct intel_guc *guc)
92 {
93 	return PAGE_ALIGN(guc->ads_capture_size);
94 }
95 
96 static u32 guc_ads_private_data_size(struct intel_guc *guc)
97 {
98 	return PAGE_ALIGN(guc->fw.private_data_size);
99 }
100 
101 static u32 guc_ads_regset_offset(struct intel_guc *guc)
102 {
103 	return offsetof(struct __guc_ads_blob, regset);
104 }
105 
106 static u32 guc_ads_golden_ctxt_offset(struct intel_guc *guc)
107 {
108 	u32 offset;
109 
110 	offset = guc_ads_regset_offset(guc) +
111 		 guc_ads_regset_size(guc);
112 
113 	return PAGE_ALIGN(offset);
114 }
115 
116 static u32 guc_ads_capture_offset(struct intel_guc *guc)
117 {
118 	u32 offset;
119 
120 	offset = guc_ads_golden_ctxt_offset(guc) +
121 		 guc_ads_golden_ctxt_size(guc);
122 
123 	return PAGE_ALIGN(offset);
124 }
125 
126 static u32 guc_ads_private_data_offset(struct intel_guc *guc)
127 {
128 	u32 offset;
129 
130 	offset = guc_ads_capture_offset(guc) +
131 		 guc_ads_capture_size(guc);
132 
133 	return PAGE_ALIGN(offset);
134 }
135 
136 static u32 guc_ads_blob_size(struct intel_guc *guc)
137 {
138 	return guc_ads_private_data_offset(guc) +
139 	       guc_ads_private_data_size(guc);
140 }
141 
142 static void guc_policies_init(struct intel_guc *guc)
143 {
144 	struct intel_gt *gt = guc_to_gt(guc);
145 	struct drm_i915_private *i915 = gt->i915;
146 	u32 global_flags = 0;
147 
148 	ads_blob_write(guc, policies.dpc_promote_time,
149 		       GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US);
150 	ads_blob_write(guc, policies.max_num_work_items,
151 		       GLOBAL_POLICY_MAX_NUM_WI);
152 
153 	if (i915->params.reset < 2)
154 		global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET;
155 
156 	ads_blob_write(guc, policies.global_flags, global_flags);
157 	ads_blob_write(guc, policies.is_valid, 1);
158 }
159 
160 void intel_guc_ads_print_policy_info(struct intel_guc *guc,
161 				     struct drm_printer *dp)
162 {
163 	if (unlikely(iosys_map_is_null(&guc->ads_map)))
164 		return;
165 
166 	drm_printf(dp, "Global scheduling policies:\n");
167 	drm_printf(dp, "  DPC promote time   = %u\n",
168 		   ads_blob_read(guc, policies.dpc_promote_time));
169 	drm_printf(dp, "  Max num work items = %u\n",
170 		   ads_blob_read(guc, policies.max_num_work_items));
171 	drm_printf(dp, "  Flags              = %u\n",
172 		   ads_blob_read(guc, policies.global_flags));
173 }
174 
175 static int guc_action_policies_update(struct intel_guc *guc, u32 policy_offset)
176 {
177 	u32 action[] = {
178 		INTEL_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE,
179 		policy_offset
180 	};
181 
182 	return intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
183 }
184 
185 int intel_guc_global_policies_update(struct intel_guc *guc)
186 {
187 	struct intel_gt *gt = guc_to_gt(guc);
188 	u32 scheduler_policies;
189 	intel_wakeref_t wakeref;
190 	int ret;
191 
192 	if (iosys_map_is_null(&guc->ads_map))
193 		return -EOPNOTSUPP;
194 
195 	scheduler_policies = ads_blob_read(guc, ads.scheduler_policies);
196 	GEM_BUG_ON(!scheduler_policies);
197 
198 	guc_policies_init(guc);
199 
200 	if (!intel_guc_is_ready(guc))
201 		return 0;
202 
203 	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
204 		ret = guc_action_policies_update(guc, scheduler_policies);
205 
206 	return ret;
207 }
208 
209 static void guc_mapping_table_init(struct intel_gt *gt,
210 				   struct iosys_map *info_map)
211 {
212 	unsigned int i, j;
213 	struct intel_engine_cs *engine;
214 	enum intel_engine_id id;
215 
216 	/* Table must be set to invalid values for entries not used */
217 	for (i = 0; i < GUC_MAX_ENGINE_CLASSES; ++i)
218 		for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; ++j)
219 			info_map_write(info_map, mapping_table[i][j],
220 				       GUC_MAX_INSTANCES_PER_CLASS);
221 
222 	for_each_engine(engine, gt, id) {
223 		u8 guc_class = engine_class_to_guc_class(engine->class);
224 
225 		info_map_write(info_map, mapping_table[guc_class][ilog2(engine->logical_mask)],
226 			       engine->instance);
227 	}
228 }
229 
230 /*
231  * The save/restore register list must be pre-calculated to a temporary
232  * buffer before it can be copied inside the ADS.
233  */
234 struct temp_regset {
235 	/*
236 	 * ptr to the section of the storage for the engine currently being
237 	 * worked on
238 	 */
239 	struct guc_mmio_reg *registers;
240 	/* ptr to the base of the allocated storage for all engines */
241 	struct guc_mmio_reg *storage;
242 	u32 storage_used;
243 	u32 storage_max;
244 };
245 
246 static int guc_mmio_reg_cmp(const void *a, const void *b)
247 {
248 	const struct guc_mmio_reg *ra = a;
249 	const struct guc_mmio_reg *rb = b;
250 
251 	return (int)ra->offset - (int)rb->offset;
252 }
253 
254 static struct guc_mmio_reg * __must_check
255 __mmio_reg_add(struct temp_regset *regset, struct guc_mmio_reg *reg)
256 {
257 	u32 pos = regset->storage_used;
258 	struct guc_mmio_reg *slot;
259 
260 	if (pos >= regset->storage_max) {
261 		size_t size = ALIGN((pos + 1) * sizeof(*slot), PAGE_SIZE);
262 		struct guc_mmio_reg *r = krealloc(regset->storage,
263 						  size, GFP_KERNEL);
264 		if (!r) {
265 			WARN_ONCE(1, "Incomplete regset list: can't add register (%d)\n",
266 				  -ENOMEM);
267 			return ERR_PTR(-ENOMEM);
268 		}
269 
270 		regset->registers = r + (regset->registers - regset->storage);
271 		regset->storage = r;
272 		regset->storage_max = size / sizeof(*slot);
273 	}
274 
275 	slot = &regset->storage[pos];
276 	regset->storage_used++;
277 	*slot = *reg;
278 
279 	return slot;
280 }
281 
282 static long __must_check guc_mmio_reg_add(struct intel_gt *gt,
283 					  struct temp_regset *regset,
284 					  u32 offset, u32 flags)
285 {
286 	u32 count = regset->storage_used - (regset->registers - regset->storage);
287 	struct guc_mmio_reg entry = {
288 		.offset = offset,
289 		.flags = flags,
290 	};
291 	struct guc_mmio_reg *slot;
292 
293 	/*
294 	 * The mmio list is built using separate lists within the driver.
295 	 * It's possible that at some point we may attempt to add the same
296 	 * register more than once. Do not consider this an error; silently
297 	 * move on if the register is already in the list.
298 	 */
299 	if (bsearch(&entry, regset->registers, count,
300 		    sizeof(entry), guc_mmio_reg_cmp))
301 		return 0;
302 
303 	slot = __mmio_reg_add(regset, &entry);
304 	if (IS_ERR(slot))
305 		return PTR_ERR(slot);
306 
307 	while (slot-- > regset->registers) {
308 		GEM_BUG_ON(slot[0].offset == slot[1].offset);
309 		if (slot[1].offset > slot[0].offset)
310 			break;
311 
312 		swap(slot[1], slot[0]);
313 	}
314 
315 	return 0;
316 }
317 
318 #define GUC_MMIO_REG_ADD(gt, regset, reg, masked) \
319 	guc_mmio_reg_add(gt, \
320 			 regset, \
321 			 i915_mmio_reg_offset(reg), \
322 			 (masked) ? GUC_REGSET_MASKED : 0)
323 
324 #define GUC_REGSET_STEERING(group, instance) ( \
325 	FIELD_PREP(GUC_REGSET_STEERING_GROUP, (group)) | \
326 	FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, (instance)) | \
327 	GUC_REGSET_NEEDS_STEERING \
328 )
329 
330 static long __must_check guc_mcr_reg_add(struct intel_gt *gt,
331 					 struct temp_regset *regset,
332 					 i915_mcr_reg_t reg, u32 flags)
333 {
334 	u8 group, inst;
335 
336 	/*
337 	 * The GuC doesn't have a default steering, so we need to explicitly
338 	 * steer all registers that need steering. However, we do not keep track
339 	 * of all the steering ranges, only of those that have a chance of using
340 	 * a non-default steering from the i915 pov. Instead of adding such
341 	 * tracking, it is easier to just program the default steering for all
342 	 * regs that don't need a non-default one.
343 	 */
344 	intel_gt_mcr_get_nonterminated_steering(gt, reg, &group, &inst);
345 	flags |= GUC_REGSET_STEERING(group, inst);
346 
347 	return guc_mmio_reg_add(gt, regset, i915_mmio_reg_offset(reg), flags);
348 }
349 
350 #define GUC_MCR_REG_ADD(gt, regset, reg, masked) \
351 	guc_mcr_reg_add(gt, \
352 			 regset, \
353 			 (reg), \
354 			 (masked) ? GUC_REGSET_MASKED : 0)
355 
356 static int guc_mmio_regset_init(struct temp_regset *regset,
357 				struct intel_engine_cs *engine)
358 {
359 	struct intel_gt *gt = engine->gt;
360 	const u32 base = engine->mmio_base;
361 	struct i915_wa_list *wal = &engine->wa_list;
362 	struct i915_wa *wa;
363 	unsigned int i;
364 	int ret = 0;
365 
366 	/*
367 	 * Each engine's registers point to a new start relative to
368 	 * storage
369 	 */
370 	regset->registers = regset->storage + regset->storage_used;
371 
372 	ret |= GUC_MMIO_REG_ADD(gt, regset, RING_MODE_GEN7(base), true);
373 	ret |= GUC_MMIO_REG_ADD(gt, regset, RING_HWS_PGA(base), false);
374 	ret |= GUC_MMIO_REG_ADD(gt, regset, RING_IMR(base), false);
375 
376 	if ((engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) &&
377 	    CCS_MASK(engine->gt))
378 		ret |= GUC_MMIO_REG_ADD(gt, regset, GEN12_RCU_MODE, true);
379 
380 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
381 		ret |= GUC_MMIO_REG_ADD(gt, regset, wa->reg, wa->masked_reg);
382 
383 	/* Be extra paranoid and include all whitelist registers. */
384 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++)
385 		ret |= GUC_MMIO_REG_ADD(gt, regset,
386 					RING_FORCE_TO_NONPRIV(base, i),
387 					false);
388 
389 	/* add in local MOCS registers */
390 	for (i = 0; i < LNCFCMOCS_REG_COUNT; i++)
391 		if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
392 			ret |= GUC_MCR_REG_ADD(gt, regset, XEHP_LNCFCMOCS(i), false);
393 		else
394 			ret |= GUC_MMIO_REG_ADD(gt, regset, GEN9_LNCFCMOCS(i), false);
395 
396 	if (GRAPHICS_VER(engine->i915) >= 12) {
397 		ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL0, false);
398 		ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL1, false);
399 		ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL2, false);
400 		ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL3, false);
401 		ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL4, false);
402 		ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL5, false);
403 		ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL6, false);
404 	}
405 
406 	return ret ? -1 : 0;
407 }
408 
409 static long guc_mmio_reg_state_create(struct intel_guc *guc)
410 {
411 	struct intel_gt *gt = guc_to_gt(guc);
412 	struct intel_engine_cs *engine;
413 	enum intel_engine_id id;
414 	struct temp_regset temp_set = {};
415 	long total = 0;
416 	long ret;
417 
418 	for_each_engine(engine, gt, id) {
419 		u32 used = temp_set.storage_used;
420 
421 		ret = guc_mmio_regset_init(&temp_set, engine);
422 		if (ret < 0)
423 			goto fail_regset_init;
424 
425 		guc->ads_regset_count[id] = temp_set.storage_used - used;
426 		total += guc->ads_regset_count[id];
427 	}
428 
429 	guc->ads_regset = temp_set.storage;
430 
431 	guc_dbg(guc, "Used %zu KB for temporary ADS regset\n",
432 		(temp_set.storage_max * sizeof(struct guc_mmio_reg)) >> 10);
433 
434 	return total * sizeof(struct guc_mmio_reg);
435 
436 fail_regset_init:
437 	kfree(temp_set.storage);
438 	return ret;
439 }
440 
441 static void guc_mmio_reg_state_init(struct intel_guc *guc)
442 {
443 	struct intel_gt *gt = guc_to_gt(guc);
444 	struct intel_engine_cs *engine;
445 	enum intel_engine_id id;
446 	u32 addr_ggtt, offset;
447 
448 	offset = guc_ads_regset_offset(guc);
449 	addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
450 
451 	iosys_map_memcpy_to(&guc->ads_map, offset, guc->ads_regset,
452 			    guc->ads_regset_size);
453 
454 	for_each_engine(engine, gt, id) {
455 		u32 count = guc->ads_regset_count[id];
456 		u8 guc_class;
457 
458 		/* Class index is checked in class converter */
459 		GEM_BUG_ON(engine->instance >= GUC_MAX_INSTANCES_PER_CLASS);
460 
461 		guc_class = engine_class_to_guc_class(engine->class);
462 
463 		if (!count) {
464 			ads_blob_write(guc,
465 				       ads.reg_state_list[guc_class][engine->instance].address,
466 				       0);
467 			ads_blob_write(guc,
468 				       ads.reg_state_list[guc_class][engine->instance].count,
469 				       0);
470 			continue;
471 		}
472 
473 		ads_blob_write(guc,
474 			       ads.reg_state_list[guc_class][engine->instance].address,
475 			       addr_ggtt);
476 		ads_blob_write(guc,
477 			       ads.reg_state_list[guc_class][engine->instance].count,
478 			       count);
479 
480 		addr_ggtt += count * sizeof(struct guc_mmio_reg);
481 	}
482 }
483 
484 static void fill_engine_enable_masks(struct intel_gt *gt,
485 				     struct iosys_map *info_map)
486 {
487 	info_map_write(info_map, engine_enabled_masks[GUC_RENDER_CLASS], RCS_MASK(gt));
488 	info_map_write(info_map, engine_enabled_masks[GUC_COMPUTE_CLASS], CCS_MASK(gt));
489 	info_map_write(info_map, engine_enabled_masks[GUC_BLITTER_CLASS], BCS_MASK(gt));
490 	info_map_write(info_map, engine_enabled_masks[GUC_VIDEO_CLASS], VDBOX_MASK(gt));
491 	info_map_write(info_map, engine_enabled_masks[GUC_VIDEOENHANCE_CLASS], VEBOX_MASK(gt));
492 
493 	/* The GSC engine is an instance (6) of OTHER_CLASS */
494 	if (gt->engine[GSC0])
495 		info_map_write(info_map, engine_enabled_masks[GUC_GSC_OTHER_CLASS],
496 			       BIT(gt->engine[GSC0]->instance));
497 }
498 
499 #define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
500 #define XEHP_LR_HW_CONTEXT_SIZE (96 * sizeof(u32))
501 #define LR_HW_CONTEXT_SZ(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50) ? \
502 				    XEHP_LR_HW_CONTEXT_SIZE : \
503 				    LR_HW_CONTEXT_SIZE)
504 #define LRC_SKIP_SIZE(i915) (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SZ(i915))
505 static int guc_prep_golden_context(struct intel_guc *guc)
506 {
507 	struct intel_gt *gt = guc_to_gt(guc);
508 	u32 addr_ggtt, offset;
509 	u32 total_size = 0, alloc_size, real_size;
510 	u8 engine_class, guc_class;
511 	struct guc_gt_system_info local_info;
512 	struct iosys_map info_map;
513 
514 	/*
515 	 * Reserve the memory for the golden contexts and point GuC at it but
516 	 * leave it empty for now. The context data will be filled in later
517 	 * once there is something available to put there.
518 	 *
519 	 * Note that the HWSP and ring context are not included.
520 	 *
521 	 * Note also that the storage must be pinned in the GGTT, so that the
522 	 * address won't change after GuC has been told where to find it. The
523 	 * GuC will also validate that the LRC base + size fall within the
524 	 * allowed GGTT range.
525 	 */
526 	if (!iosys_map_is_null(&guc->ads_map)) {
527 		offset = guc_ads_golden_ctxt_offset(guc);
528 		addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
529 		info_map = IOSYS_MAP_INIT_OFFSET(&guc->ads_map,
530 						 offsetof(struct __guc_ads_blob, system_info));
531 	} else {
532 		memset(&local_info, 0, sizeof(local_info));
533 		iosys_map_set_vaddr(&info_map, &local_info);
534 		fill_engine_enable_masks(gt, &info_map);
535 	}
536 
537 	for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) {
538 		guc_class = engine_class_to_guc_class(engine_class);
539 
540 		if (!info_map_read(&info_map, engine_enabled_masks[guc_class]))
541 			continue;
542 
543 		real_size = intel_engine_context_size(gt, engine_class);
544 		alloc_size = PAGE_ALIGN(real_size);
545 		total_size += alloc_size;
546 
547 		if (iosys_map_is_null(&guc->ads_map))
548 			continue;
549 
550 		/*
551 		 * This interface is slightly confusing. We need to pass the
552 		 * base address of the full golden context and the size of just
553 		 * the engine state, which is the section of the context image
554 		 * that starts after the execlists context. This is required to
555 		 * allow the GuC to restore just the engine state when a
556 		 * watchdog reset occurs.
557 		 * We calculate the engine state size by removing the size of
558 		 * what comes before it in the context image (which is identical
559 		 * on all engines).
560 		 */
561 		ads_blob_write(guc, ads.eng_state_size[guc_class],
562 			       real_size - LRC_SKIP_SIZE(gt->i915));
563 		ads_blob_write(guc, ads.golden_context_lrca[guc_class],
564 			       addr_ggtt);
565 
566 		addr_ggtt += alloc_size;
567 	}
568 
569 	/* Make sure current size matches what we calculated previously */
570 	if (guc->ads_golden_ctxt_size)
571 		GEM_BUG_ON(guc->ads_golden_ctxt_size != total_size);
572 
573 	return total_size;
574 }
575 
576 static struct intel_engine_cs *find_engine_state(struct intel_gt *gt, u8 engine_class)
577 {
578 	struct intel_engine_cs *engine;
579 	enum intel_engine_id id;
580 
581 	for_each_engine(engine, gt, id) {
582 		if (engine->class != engine_class)
583 			continue;
584 
585 		if (!engine->default_state)
586 			continue;
587 
588 		return engine;
589 	}
590 
591 	return NULL;
592 }
593 
594 static void guc_init_golden_context(struct intel_guc *guc)
595 {
596 	struct intel_engine_cs *engine;
597 	struct intel_gt *gt = guc_to_gt(guc);
598 	unsigned long offset;
599 	u32 addr_ggtt, total_size = 0, alloc_size, real_size;
600 	u8 engine_class, guc_class;
601 
602 	if (!intel_uc_uses_guc_submission(&gt->uc))
603 		return;
604 
605 	GEM_BUG_ON(iosys_map_is_null(&guc->ads_map));
606 
607 	/*
608 	 * Go back and fill in the golden context data now that it is
609 	 * available.
610 	 */
611 	offset = guc_ads_golden_ctxt_offset(guc);
612 	addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
613 
614 	for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) {
615 		guc_class = engine_class_to_guc_class(engine_class);
616 		if (!ads_blob_read(guc, system_info.engine_enabled_masks[guc_class]))
617 			continue;
618 
619 		real_size = intel_engine_context_size(gt, engine_class);
620 		alloc_size = PAGE_ALIGN(real_size);
621 		total_size += alloc_size;
622 
623 		engine = find_engine_state(gt, engine_class);
624 		if (!engine) {
625 			guc_err(guc, "No engine state recorded for class %d!\n",
626 				engine_class);
627 			ads_blob_write(guc, ads.eng_state_size[guc_class], 0);
628 			ads_blob_write(guc, ads.golden_context_lrca[guc_class], 0);
629 			continue;
630 		}
631 
632 		GEM_BUG_ON(ads_blob_read(guc, ads.eng_state_size[guc_class]) !=
633 			   real_size - LRC_SKIP_SIZE(gt->i915));
634 		GEM_BUG_ON(ads_blob_read(guc, ads.golden_context_lrca[guc_class]) != addr_ggtt);
635 
636 		addr_ggtt += alloc_size;
637 
638 		shmem_read_to_iosys_map(engine->default_state, 0, &guc->ads_map,
639 					offset, real_size);
640 		offset += alloc_size;
641 	}
642 
643 	GEM_BUG_ON(guc->ads_golden_ctxt_size != total_size);
644 }
645 
646 static u32 guc_get_capture_engine_mask(struct iosys_map *info_map, u32 capture_class)
647 {
648 	u32 mask;
649 
650 	switch (capture_class) {
651 	case GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE:
652 		mask = info_map_read(info_map, engine_enabled_masks[GUC_RENDER_CLASS]);
653 		mask |= info_map_read(info_map, engine_enabled_masks[GUC_COMPUTE_CLASS]);
654 		break;
655 
656 	case GUC_CAPTURE_LIST_CLASS_VIDEO:
657 		mask = info_map_read(info_map, engine_enabled_masks[GUC_VIDEO_CLASS]);
658 		break;
659 
660 	case GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE:
661 		mask = info_map_read(info_map, engine_enabled_masks[GUC_VIDEOENHANCE_CLASS]);
662 		break;
663 
664 	case GUC_CAPTURE_LIST_CLASS_BLITTER:
665 		mask = info_map_read(info_map, engine_enabled_masks[GUC_BLITTER_CLASS]);
666 		break;
667 
668 	case GUC_CAPTURE_LIST_CLASS_GSC_OTHER:
669 		mask = info_map_read(info_map, engine_enabled_masks[GUC_GSC_OTHER_CLASS]);
670 		break;
671 
672 	default:
673 		mask = 0;
674 	}
675 
676 	return mask;
677 }
678 
679 static int
680 guc_capture_prep_lists(struct intel_guc *guc)
681 {
682 	struct intel_gt *gt = guc_to_gt(guc);
683 	u32 ads_ggtt, capture_offset, null_ggtt, total_size = 0;
684 	struct guc_gt_system_info local_info;
685 	struct iosys_map info_map;
686 	bool ads_is_mapped;
687 	size_t size = 0;
688 	void *ptr;
689 	int i, j;
690 
691 	ads_is_mapped = !iosys_map_is_null(&guc->ads_map);
692 	if (ads_is_mapped) {
693 		capture_offset = guc_ads_capture_offset(guc);
694 		ads_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma);
695 		info_map = IOSYS_MAP_INIT_OFFSET(&guc->ads_map,
696 						 offsetof(struct __guc_ads_blob, system_info));
697 	} else {
698 		memset(&local_info, 0, sizeof(local_info));
699 		iosys_map_set_vaddr(&info_map, &local_info);
700 		fill_engine_enable_masks(gt, &info_map);
701 	}
702 
703 	/* first, set aside the first page for a capture_list with zero descriptors */
704 	total_size = PAGE_SIZE;
705 	if (ads_is_mapped) {
706 		if (!intel_guc_capture_getnullheader(guc, &ptr, &size))
707 			iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
708 		null_ggtt = ads_ggtt + capture_offset;
709 		capture_offset += PAGE_SIZE;
710 	}
711 
712 	for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) {
713 		for (j = 0; j < GUC_MAX_ENGINE_CLASSES; j++) {
714 			u32 engine_mask = guc_get_capture_engine_mask(&info_map, j);
715 
716 			/* null list if we dont have said engine or list */
717 			if (!engine_mask) {
718 				if (ads_is_mapped) {
719 					ads_blob_write(guc, ads.capture_class[i][j], null_ggtt);
720 					ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt);
721 				}
722 				continue;
723 			}
724 			if (intel_guc_capture_getlistsize(guc, i,
725 							  GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
726 							  j, &size)) {
727 				if (ads_is_mapped)
728 					ads_blob_write(guc, ads.capture_class[i][j], null_ggtt);
729 				goto engine_instance_list;
730 			}
731 			total_size += size;
732 			if (ads_is_mapped) {
733 				if (total_size > guc->ads_capture_size ||
734 				    intel_guc_capture_getlist(guc, i,
735 							      GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
736 							      j, &ptr)) {
737 					ads_blob_write(guc, ads.capture_class[i][j], null_ggtt);
738 					continue;
739 				}
740 				ads_blob_write(guc, ads.capture_class[i][j], ads_ggtt +
741 					       capture_offset);
742 				iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
743 				capture_offset += size;
744 			}
745 engine_instance_list:
746 			if (intel_guc_capture_getlistsize(guc, i,
747 							  GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
748 							  j, &size)) {
749 				if (ads_is_mapped)
750 					ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt);
751 				continue;
752 			}
753 			total_size += size;
754 			if (ads_is_mapped) {
755 				if (total_size > guc->ads_capture_size ||
756 				    intel_guc_capture_getlist(guc, i,
757 							      GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
758 							      j, &ptr)) {
759 					ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt);
760 					continue;
761 				}
762 				ads_blob_write(guc, ads.capture_instance[i][j], ads_ggtt +
763 					       capture_offset);
764 				iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
765 				capture_offset += size;
766 			}
767 		}
768 		if (intel_guc_capture_getlistsize(guc, i, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &size)) {
769 			if (ads_is_mapped)
770 				ads_blob_write(guc, ads.capture_global[i], null_ggtt);
771 			continue;
772 		}
773 		total_size += size;
774 		if (ads_is_mapped) {
775 			if (total_size > guc->ads_capture_size ||
776 			    intel_guc_capture_getlist(guc, i, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0,
777 						      &ptr)) {
778 				ads_blob_write(guc, ads.capture_global[i], null_ggtt);
779 				continue;
780 			}
781 			ads_blob_write(guc, ads.capture_global[i], ads_ggtt + capture_offset);
782 			iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
783 			capture_offset += size;
784 		}
785 	}
786 
787 	if (guc->ads_capture_size && guc->ads_capture_size != PAGE_ALIGN(total_size))
788 		guc_warn(guc, "ADS capture alloc size changed from %d to %d\n",
789 			 guc->ads_capture_size, PAGE_ALIGN(total_size));
790 
791 	return PAGE_ALIGN(total_size);
792 }
793 
794 static void __guc_ads_init(struct intel_guc *guc)
795 {
796 	struct intel_gt *gt = guc_to_gt(guc);
797 	struct drm_i915_private *i915 = gt->i915;
798 	struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(&guc->ads_map,
799 			offsetof(struct __guc_ads_blob, system_info));
800 	u32 base;
801 
802 	/* GuC scheduling policies */
803 	guc_policies_init(guc);
804 
805 	/* System info */
806 	fill_engine_enable_masks(gt, &info_map);
807 
808 	ads_blob_write(guc, system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED],
809 		       hweight8(gt->info.sseu.slice_mask));
810 	ads_blob_write(guc, system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK],
811 		       gt->info.vdbox_sfc_access);
812 
813 	if (GRAPHICS_VER(i915) >= 12 && !IS_DGFX(i915)) {
814 		u32 distdbreg = intel_uncore_read(gt->uncore,
815 						  GEN12_DIST_DBS_POPULATED);
816 		ads_blob_write(guc,
817 			       system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI],
818 			       ((distdbreg >> GEN12_DOORBELLS_PER_SQIDI_SHIFT)
819 				& GEN12_DOORBELLS_PER_SQIDI) + 1);
820 	}
821 
822 	/* Golden contexts for re-initialising after a watchdog reset */
823 	guc_prep_golden_context(guc);
824 
825 	guc_mapping_table_init(guc_to_gt(guc), &info_map);
826 
827 	base = intel_guc_ggtt_offset(guc, guc->ads_vma);
828 
829 	/* Lists for error capture debug */
830 	guc_capture_prep_lists(guc);
831 
832 	/* ADS */
833 	ads_blob_write(guc, ads.scheduler_policies, base +
834 		       offsetof(struct __guc_ads_blob, policies));
835 	ads_blob_write(guc, ads.gt_system_info, base +
836 		       offsetof(struct __guc_ads_blob, system_info));
837 
838 	/* MMIO save/restore list */
839 	guc_mmio_reg_state_init(guc);
840 
841 	/* Private Data */
842 	ads_blob_write(guc, ads.private_data, base +
843 		       guc_ads_private_data_offset(guc));
844 
845 	i915_gem_object_flush_map(guc->ads_vma->obj);
846 }
847 
848 /**
849  * intel_guc_ads_create() - allocates and initializes GuC ADS.
850  * @guc: intel_guc struct
851  *
852  * GuC needs memory block (Additional Data Struct), where it will store
853  * some data. Allocate and initialize such memory block for GuC use.
854  */
855 int intel_guc_ads_create(struct intel_guc *guc)
856 {
857 	void *ads_blob;
858 	u32 size;
859 	int ret;
860 
861 	GEM_BUG_ON(guc->ads_vma);
862 
863 	/*
864 	 * Create reg state size dynamically on system memory to be copied to
865 	 * the final ads blob on gt init/reset
866 	 */
867 	ret = guc_mmio_reg_state_create(guc);
868 	if (ret < 0)
869 		return ret;
870 	guc->ads_regset_size = ret;
871 
872 	/* Likewise the golden contexts: */
873 	ret = guc_prep_golden_context(guc);
874 	if (ret < 0)
875 		return ret;
876 	guc->ads_golden_ctxt_size = ret;
877 
878 	/* Likewise the capture lists: */
879 	ret = guc_capture_prep_lists(guc);
880 	if (ret < 0)
881 		return ret;
882 	guc->ads_capture_size = ret;
883 
884 	/* Now the total size can be determined: */
885 	size = guc_ads_blob_size(guc);
886 
887 	ret = intel_guc_allocate_and_map_vma(guc, size, &guc->ads_vma,
888 					     &ads_blob);
889 	if (ret)
890 		return ret;
891 
892 	if (i915_gem_object_is_lmem(guc->ads_vma->obj))
893 		iosys_map_set_vaddr_iomem(&guc->ads_map, (void __iomem *)ads_blob);
894 	else
895 		iosys_map_set_vaddr(&guc->ads_map, ads_blob);
896 
897 	__guc_ads_init(guc);
898 
899 	return 0;
900 }
901 
902 void intel_guc_ads_init_late(struct intel_guc *guc)
903 {
904 	/*
905 	 * The golden context setup requires the saved engine state from
906 	 * __engines_record_defaults(). However, that requires engines to be
907 	 * operational which means the ADS must already have been configured.
908 	 * Fortunately, the golden context state is not needed until a hang
909 	 * occurs, so it can be filled in during this late init phase.
910 	 */
911 	guc_init_golden_context(guc);
912 }
913 
914 void intel_guc_ads_destroy(struct intel_guc *guc)
915 {
916 	i915_vma_unpin_and_release(&guc->ads_vma, I915_VMA_RELEASE_MAP);
917 	iosys_map_clear(&guc->ads_map);
918 	kfree(guc->ads_regset);
919 }
920 
921 static void guc_ads_private_data_reset(struct intel_guc *guc)
922 {
923 	u32 size;
924 
925 	size = guc_ads_private_data_size(guc);
926 	if (!size)
927 		return;
928 
929 	iosys_map_memset(&guc->ads_map, guc_ads_private_data_offset(guc),
930 			 0, size);
931 }
932 
933 /**
934  * intel_guc_ads_reset() - prepares GuC Additional Data Struct for reuse
935  * @guc: intel_guc struct
936  *
937  * GuC stores some data in ADS, which might be stale after a reset.
938  * Reinitialize whole ADS in case any part of it was corrupted during
939  * previous GuC run.
940  */
941 void intel_guc_ads_reset(struct intel_guc *guc)
942 {
943 	if (!guc->ads_vma)
944 		return;
945 
946 	__guc_ads_init(guc);
947 
948 	guc_ads_private_data_reset(guc);
949 }
950 
951 u32 intel_guc_engine_usage_offset(struct intel_guc *guc)
952 {
953 	return intel_guc_ggtt_offset(guc, guc->ads_vma) +
954 		offsetof(struct __guc_ads_blob, engine_usage);
955 }
956 
957 struct iosys_map intel_guc_engine_usage_record_map(struct intel_engine_cs *engine)
958 {
959 	struct intel_guc *guc = &engine->gt->uc.guc;
960 	u8 guc_class = engine_class_to_guc_class(engine->class);
961 	size_t offset = offsetof(struct __guc_ads_blob,
962 				 engine_usage.engines[guc_class][ilog2(engine->logical_mask)]);
963 
964 	return IOSYS_MAP_INIT_OFFSET(&guc->ads_map, offset);
965 }
966