xref: /linux/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c (revision 9f2c9170934eace462499ba0bfe042cc72900173)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014-2019 Intel Corporation
4  */
5 
6 #include <linux/bsearch.h>
7 
8 #include "gem/i915_gem_lmem.h"
9 #include "gt/intel_engine_regs.h"
10 #include "gt/intel_gt.h"
11 #include "gt/intel_gt_mcr.h"
12 #include "gt/intel_gt_regs.h"
13 #include "gt/intel_lrc.h"
14 #include "gt/shmem_utils.h"
15 #include "intel_guc_ads.h"
16 #include "intel_guc_capture.h"
17 #include "intel_guc_fwif.h"
18 #include "intel_uc.h"
19 #include "i915_drv.h"
20 
21 /*
22  * The Additional Data Struct (ADS) has pointers for different buffers used by
23  * the GuC. One single gem object contains the ADS struct itself (guc_ads) and
24  * all the extra buffers indirectly linked via the ADS struct's entries.
25  *
26  * Layout of the ADS blob allocated for the GuC:
27  *
28  *      +---------------------------------------+ <== base
29  *      | guc_ads                               |
30  *      +---------------------------------------+
31  *      | guc_policies                          |
32  *      +---------------------------------------+
33  *      | guc_gt_system_info                    |
34  *      +---------------------------------------+
35  *      | guc_engine_usage                      |
36  *      +---------------------------------------+ <== static
37  *      | guc_mmio_reg[countA] (engine 0.0)     |
38  *      | guc_mmio_reg[countB] (engine 0.1)     |
39  *      | guc_mmio_reg[countC] (engine 1.0)     |
40  *      |   ...                                 |
41  *      +---------------------------------------+ <== dynamic
42  *      | padding                               |
43  *      +---------------------------------------+ <== 4K aligned
44  *      | golden contexts                       |
45  *      +---------------------------------------+
46  *      | padding                               |
47  *      +---------------------------------------+ <== 4K aligned
48  *      | capture lists                         |
49  *      +---------------------------------------+
50  *      | padding                               |
51  *      +---------------------------------------+ <== 4K aligned
52  *      | private data                          |
53  *      +---------------------------------------+
54  *      | padding                               |
55  *      +---------------------------------------+ <== 4K aligned
56  */
57 struct __guc_ads_blob {
58 	struct guc_ads ads;
59 	struct guc_policies policies;
60 	struct guc_gt_system_info system_info;
61 	struct guc_engine_usage engine_usage;
62 	/* From here on, location is dynamic! Refer to above diagram. */
63 	struct guc_mmio_reg regset[];
64 } __packed;
65 
66 #define ads_blob_read(guc_, field_)					\
67 	iosys_map_rd_field(&(guc_)->ads_map, 0, struct __guc_ads_blob, field_)
68 
69 #define ads_blob_write(guc_, field_, val_)				\
70 	iosys_map_wr_field(&(guc_)->ads_map, 0, struct __guc_ads_blob,	\
71 			   field_, val_)
72 
73 #define info_map_write(map_, field_, val_) \
74 	iosys_map_wr_field(map_, 0, struct guc_gt_system_info, field_, val_)
75 
76 #define info_map_read(map_, field_) \
77 	iosys_map_rd_field(map_, 0, struct guc_gt_system_info, field_)
78 
79 static u32 guc_ads_regset_size(struct intel_guc *guc)
80 {
81 	GEM_BUG_ON(!guc->ads_regset_size);
82 	return guc->ads_regset_size;
83 }
84 
85 static u32 guc_ads_golden_ctxt_size(struct intel_guc *guc)
86 {
87 	return PAGE_ALIGN(guc->ads_golden_ctxt_size);
88 }
89 
90 static u32 guc_ads_capture_size(struct intel_guc *guc)
91 {
92 	return PAGE_ALIGN(guc->ads_capture_size);
93 }
94 
95 static u32 guc_ads_private_data_size(struct intel_guc *guc)
96 {
97 	return PAGE_ALIGN(guc->fw.private_data_size);
98 }
99 
100 static u32 guc_ads_regset_offset(struct intel_guc *guc)
101 {
102 	return offsetof(struct __guc_ads_blob, regset);
103 }
104 
105 static u32 guc_ads_golden_ctxt_offset(struct intel_guc *guc)
106 {
107 	u32 offset;
108 
109 	offset = guc_ads_regset_offset(guc) +
110 		 guc_ads_regset_size(guc);
111 
112 	return PAGE_ALIGN(offset);
113 }
114 
115 static u32 guc_ads_capture_offset(struct intel_guc *guc)
116 {
117 	u32 offset;
118 
119 	offset = guc_ads_golden_ctxt_offset(guc) +
120 		 guc_ads_golden_ctxt_size(guc);
121 
122 	return PAGE_ALIGN(offset);
123 }
124 
125 static u32 guc_ads_private_data_offset(struct intel_guc *guc)
126 {
127 	u32 offset;
128 
129 	offset = guc_ads_capture_offset(guc) +
130 		 guc_ads_capture_size(guc);
131 
132 	return PAGE_ALIGN(offset);
133 }
134 
135 static u32 guc_ads_blob_size(struct intel_guc *guc)
136 {
137 	return guc_ads_private_data_offset(guc) +
138 	       guc_ads_private_data_size(guc);
139 }
140 
141 static void guc_policies_init(struct intel_guc *guc)
142 {
143 	struct intel_gt *gt = guc_to_gt(guc);
144 	struct drm_i915_private *i915 = gt->i915;
145 	u32 global_flags = 0;
146 
147 	ads_blob_write(guc, policies.dpc_promote_time,
148 		       GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US);
149 	ads_blob_write(guc, policies.max_num_work_items,
150 		       GLOBAL_POLICY_MAX_NUM_WI);
151 
152 	if (i915->params.reset < 2)
153 		global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET;
154 
155 	ads_blob_write(guc, policies.global_flags, global_flags);
156 	ads_blob_write(guc, policies.is_valid, 1);
157 }
158 
159 void intel_guc_ads_print_policy_info(struct intel_guc *guc,
160 				     struct drm_printer *dp)
161 {
162 	if (unlikely(iosys_map_is_null(&guc->ads_map)))
163 		return;
164 
165 	drm_printf(dp, "Global scheduling policies:\n");
166 	drm_printf(dp, "  DPC promote time   = %u\n",
167 		   ads_blob_read(guc, policies.dpc_promote_time));
168 	drm_printf(dp, "  Max num work items = %u\n",
169 		   ads_blob_read(guc, policies.max_num_work_items));
170 	drm_printf(dp, "  Flags              = %u\n",
171 		   ads_blob_read(guc, policies.global_flags));
172 }
173 
174 static int guc_action_policies_update(struct intel_guc *guc, u32 policy_offset)
175 {
176 	u32 action[] = {
177 		INTEL_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE,
178 		policy_offset
179 	};
180 
181 	return intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
182 }
183 
184 int intel_guc_global_policies_update(struct intel_guc *guc)
185 {
186 	struct intel_gt *gt = guc_to_gt(guc);
187 	u32 scheduler_policies;
188 	intel_wakeref_t wakeref;
189 	int ret;
190 
191 	if (iosys_map_is_null(&guc->ads_map))
192 		return -EOPNOTSUPP;
193 
194 	scheduler_policies = ads_blob_read(guc, ads.scheduler_policies);
195 	GEM_BUG_ON(!scheduler_policies);
196 
197 	guc_policies_init(guc);
198 
199 	if (!intel_guc_is_ready(guc))
200 		return 0;
201 
202 	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
203 		ret = guc_action_policies_update(guc, scheduler_policies);
204 
205 	return ret;
206 }
207 
208 static void guc_mapping_table_init(struct intel_gt *gt,
209 				   struct iosys_map *info_map)
210 {
211 	unsigned int i, j;
212 	struct intel_engine_cs *engine;
213 	enum intel_engine_id id;
214 
215 	/* Table must be set to invalid values for entries not used */
216 	for (i = 0; i < GUC_MAX_ENGINE_CLASSES; ++i)
217 		for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; ++j)
218 			info_map_write(info_map, mapping_table[i][j],
219 				       GUC_MAX_INSTANCES_PER_CLASS);
220 
221 	for_each_engine(engine, gt, id) {
222 		u8 guc_class = engine_class_to_guc_class(engine->class);
223 
224 		info_map_write(info_map, mapping_table[guc_class][ilog2(engine->logical_mask)],
225 			       engine->instance);
226 	}
227 }
228 
229 /*
230  * The save/restore register list must be pre-calculated to a temporary
231  * buffer before it can be copied inside the ADS.
232  */
233 struct temp_regset {
234 	/*
235 	 * ptr to the section of the storage for the engine currently being
236 	 * worked on
237 	 */
238 	struct guc_mmio_reg *registers;
239 	/* ptr to the base of the allocated storage for all engines */
240 	struct guc_mmio_reg *storage;
241 	u32 storage_used;
242 	u32 storage_max;
243 };
244 
245 static int guc_mmio_reg_cmp(const void *a, const void *b)
246 {
247 	const struct guc_mmio_reg *ra = a;
248 	const struct guc_mmio_reg *rb = b;
249 
250 	return (int)ra->offset - (int)rb->offset;
251 }
252 
253 static struct guc_mmio_reg * __must_check
254 __mmio_reg_add(struct temp_regset *regset, struct guc_mmio_reg *reg)
255 {
256 	u32 pos = regset->storage_used;
257 	struct guc_mmio_reg *slot;
258 
259 	if (pos >= regset->storage_max) {
260 		size_t size = ALIGN((pos + 1) * sizeof(*slot), PAGE_SIZE);
261 		struct guc_mmio_reg *r = krealloc(regset->storage,
262 						  size, GFP_KERNEL);
263 		if (!r) {
264 			WARN_ONCE(1, "Incomplete regset list: can't add register (%d)\n",
265 				  -ENOMEM);
266 			return ERR_PTR(-ENOMEM);
267 		}
268 
269 		regset->registers = r + (regset->registers - regset->storage);
270 		regset->storage = r;
271 		regset->storage_max = size / sizeof(*slot);
272 	}
273 
274 	slot = &regset->storage[pos];
275 	regset->storage_used++;
276 	*slot = *reg;
277 
278 	return slot;
279 }
280 
281 static long __must_check guc_mmio_reg_add(struct intel_gt *gt,
282 					  struct temp_regset *regset,
283 					  u32 offset, u32 flags)
284 {
285 	u32 count = regset->storage_used - (regset->registers - regset->storage);
286 	struct guc_mmio_reg entry = {
287 		.offset = offset,
288 		.flags = flags,
289 	};
290 	struct guc_mmio_reg *slot;
291 
292 	/*
293 	 * The mmio list is built using separate lists within the driver.
294 	 * It's possible that at some point we may attempt to add the same
295 	 * register more than once. Do not consider this an error; silently
296 	 * move on if the register is already in the list.
297 	 */
298 	if (bsearch(&entry, regset->registers, count,
299 		    sizeof(entry), guc_mmio_reg_cmp))
300 		return 0;
301 
302 	slot = __mmio_reg_add(regset, &entry);
303 	if (IS_ERR(slot))
304 		return PTR_ERR(slot);
305 
306 	while (slot-- > regset->registers) {
307 		GEM_BUG_ON(slot[0].offset == slot[1].offset);
308 		if (slot[1].offset > slot[0].offset)
309 			break;
310 
311 		swap(slot[1], slot[0]);
312 	}
313 
314 	return 0;
315 }
316 
317 #define GUC_MMIO_REG_ADD(gt, regset, reg, masked) \
318 	guc_mmio_reg_add(gt, \
319 			 regset, \
320 			 i915_mmio_reg_offset(reg), \
321 			 (masked) ? GUC_REGSET_MASKED : 0)
322 
323 #define GUC_REGSET_STEERING(group, instance) ( \
324 	FIELD_PREP(GUC_REGSET_STEERING_GROUP, (group)) | \
325 	FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, (instance)) | \
326 	GUC_REGSET_NEEDS_STEERING \
327 )
328 
329 static long __must_check guc_mcr_reg_add(struct intel_gt *gt,
330 					 struct temp_regset *regset,
331 					 i915_mcr_reg_t reg, u32 flags)
332 {
333 	u8 group, inst;
334 
335 	/*
336 	 * The GuC doesn't have a default steering, so we need to explicitly
337 	 * steer all registers that need steering. However, we do not keep track
338 	 * of all the steering ranges, only of those that have a chance of using
339 	 * a non-default steering from the i915 pov. Instead of adding such
340 	 * tracking, it is easier to just program the default steering for all
341 	 * regs that don't need a non-default one.
342 	 */
343 	intel_gt_mcr_get_nonterminated_steering(gt, reg, &group, &inst);
344 	flags |= GUC_REGSET_STEERING(group, inst);
345 
346 	return guc_mmio_reg_add(gt, regset, i915_mmio_reg_offset(reg), flags);
347 }
348 
349 #define GUC_MCR_REG_ADD(gt, regset, reg, masked) \
350 	guc_mcr_reg_add(gt, \
351 			 regset, \
352 			 (reg), \
353 			 (masked) ? GUC_REGSET_MASKED : 0)
354 
355 static int guc_mmio_regset_init(struct temp_regset *regset,
356 				struct intel_engine_cs *engine)
357 {
358 	struct intel_gt *gt = engine->gt;
359 	const u32 base = engine->mmio_base;
360 	struct i915_wa_list *wal = &engine->wa_list;
361 	struct i915_wa *wa;
362 	unsigned int i;
363 	int ret = 0;
364 
365 	/*
366 	 * Each engine's registers point to a new start relative to
367 	 * storage
368 	 */
369 	regset->registers = regset->storage + regset->storage_used;
370 
371 	ret |= GUC_MMIO_REG_ADD(gt, regset, RING_MODE_GEN7(base), true);
372 	ret |= GUC_MMIO_REG_ADD(gt, regset, RING_HWS_PGA(base), false);
373 	ret |= GUC_MMIO_REG_ADD(gt, regset, RING_IMR(base), false);
374 
375 	if ((engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) &&
376 	    CCS_MASK(engine->gt))
377 		ret |= GUC_MMIO_REG_ADD(gt, regset, GEN12_RCU_MODE, true);
378 
379 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
380 		ret |= GUC_MMIO_REG_ADD(gt, regset, wa->reg, wa->masked_reg);
381 
382 	/* Be extra paranoid and include all whitelist registers. */
383 	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++)
384 		ret |= GUC_MMIO_REG_ADD(gt, regset,
385 					RING_FORCE_TO_NONPRIV(base, i),
386 					false);
387 
388 	/* add in local MOCS registers */
389 	for (i = 0; i < LNCFCMOCS_REG_COUNT; i++)
390 		if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
391 			ret |= GUC_MCR_REG_ADD(gt, regset, XEHP_LNCFCMOCS(i), false);
392 		else
393 			ret |= GUC_MMIO_REG_ADD(gt, regset, GEN9_LNCFCMOCS(i), false);
394 
395 	if (GRAPHICS_VER(engine->i915) >= 12) {
396 		ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL0, false);
397 		ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL1, false);
398 		ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL2, false);
399 		ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL3, false);
400 		ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL4, false);
401 		ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL5, false);
402 		ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL6, false);
403 	}
404 
405 	return ret ? -1 : 0;
406 }
407 
408 static long guc_mmio_reg_state_create(struct intel_guc *guc)
409 {
410 	struct intel_gt *gt = guc_to_gt(guc);
411 	struct intel_engine_cs *engine;
412 	enum intel_engine_id id;
413 	struct temp_regset temp_set = {};
414 	long total = 0;
415 	long ret;
416 
417 	for_each_engine(engine, gt, id) {
418 		u32 used = temp_set.storage_used;
419 
420 		ret = guc_mmio_regset_init(&temp_set, engine);
421 		if (ret < 0)
422 			goto fail_regset_init;
423 
424 		guc->ads_regset_count[id] = temp_set.storage_used - used;
425 		total += guc->ads_regset_count[id];
426 	}
427 
428 	guc->ads_regset = temp_set.storage;
429 
430 	drm_dbg(&guc_to_gt(guc)->i915->drm, "Used %zu KB for temporary ADS regset\n",
431 		(temp_set.storage_max * sizeof(struct guc_mmio_reg)) >> 10);
432 
433 	return total * sizeof(struct guc_mmio_reg);
434 
435 fail_regset_init:
436 	kfree(temp_set.storage);
437 	return ret;
438 }
439 
440 static void guc_mmio_reg_state_init(struct intel_guc *guc)
441 {
442 	struct intel_gt *gt = guc_to_gt(guc);
443 	struct intel_engine_cs *engine;
444 	enum intel_engine_id id;
445 	u32 addr_ggtt, offset;
446 
447 	offset = guc_ads_regset_offset(guc);
448 	addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
449 
450 	iosys_map_memcpy_to(&guc->ads_map, offset, guc->ads_regset,
451 			    guc->ads_regset_size);
452 
453 	for_each_engine(engine, gt, id) {
454 		u32 count = guc->ads_regset_count[id];
455 		u8 guc_class;
456 
457 		/* Class index is checked in class converter */
458 		GEM_BUG_ON(engine->instance >= GUC_MAX_INSTANCES_PER_CLASS);
459 
460 		guc_class = engine_class_to_guc_class(engine->class);
461 
462 		if (!count) {
463 			ads_blob_write(guc,
464 				       ads.reg_state_list[guc_class][engine->instance].address,
465 				       0);
466 			ads_blob_write(guc,
467 				       ads.reg_state_list[guc_class][engine->instance].count,
468 				       0);
469 			continue;
470 		}
471 
472 		ads_blob_write(guc,
473 			       ads.reg_state_list[guc_class][engine->instance].address,
474 			       addr_ggtt);
475 		ads_blob_write(guc,
476 			       ads.reg_state_list[guc_class][engine->instance].count,
477 			       count);
478 
479 		addr_ggtt += count * sizeof(struct guc_mmio_reg);
480 	}
481 }
482 
483 static void fill_engine_enable_masks(struct intel_gt *gt,
484 				     struct iosys_map *info_map)
485 {
486 	info_map_write(info_map, engine_enabled_masks[GUC_RENDER_CLASS], RCS_MASK(gt));
487 	info_map_write(info_map, engine_enabled_masks[GUC_COMPUTE_CLASS], CCS_MASK(gt));
488 	info_map_write(info_map, engine_enabled_masks[GUC_BLITTER_CLASS], BCS_MASK(gt));
489 	info_map_write(info_map, engine_enabled_masks[GUC_VIDEO_CLASS], VDBOX_MASK(gt));
490 	info_map_write(info_map, engine_enabled_masks[GUC_VIDEOENHANCE_CLASS], VEBOX_MASK(gt));
491 
492 	/* The GSC engine is an instance (6) of OTHER_CLASS */
493 	if (gt->engine[GSC0])
494 		info_map_write(info_map, engine_enabled_masks[GUC_GSC_OTHER_CLASS],
495 			       BIT(gt->engine[GSC0]->instance));
496 }
497 
498 #define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
499 #define XEHP_LR_HW_CONTEXT_SIZE (96 * sizeof(u32))
500 #define LR_HW_CONTEXT_SZ(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50) ? \
501 				    XEHP_LR_HW_CONTEXT_SIZE : \
502 				    LR_HW_CONTEXT_SIZE)
503 #define LRC_SKIP_SIZE(i915) (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SZ(i915))
504 static int guc_prep_golden_context(struct intel_guc *guc)
505 {
506 	struct intel_gt *gt = guc_to_gt(guc);
507 	u32 addr_ggtt, offset;
508 	u32 total_size = 0, alloc_size, real_size;
509 	u8 engine_class, guc_class;
510 	struct guc_gt_system_info local_info;
511 	struct iosys_map info_map;
512 
513 	/*
514 	 * Reserve the memory for the golden contexts and point GuC at it but
515 	 * leave it empty for now. The context data will be filled in later
516 	 * once there is something available to put there.
517 	 *
518 	 * Note that the HWSP and ring context are not included.
519 	 *
520 	 * Note also that the storage must be pinned in the GGTT, so that the
521 	 * address won't change after GuC has been told where to find it. The
522 	 * GuC will also validate that the LRC base + size fall within the
523 	 * allowed GGTT range.
524 	 */
525 	if (!iosys_map_is_null(&guc->ads_map)) {
526 		offset = guc_ads_golden_ctxt_offset(guc);
527 		addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
528 		info_map = IOSYS_MAP_INIT_OFFSET(&guc->ads_map,
529 						 offsetof(struct __guc_ads_blob, system_info));
530 	} else {
531 		memset(&local_info, 0, sizeof(local_info));
532 		iosys_map_set_vaddr(&info_map, &local_info);
533 		fill_engine_enable_masks(gt, &info_map);
534 	}
535 
536 	for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) {
537 		guc_class = engine_class_to_guc_class(engine_class);
538 
539 		if (!info_map_read(&info_map, engine_enabled_masks[guc_class]))
540 			continue;
541 
542 		real_size = intel_engine_context_size(gt, engine_class);
543 		alloc_size = PAGE_ALIGN(real_size);
544 		total_size += alloc_size;
545 
546 		if (iosys_map_is_null(&guc->ads_map))
547 			continue;
548 
549 		/*
550 		 * This interface is slightly confusing. We need to pass the
551 		 * base address of the full golden context and the size of just
552 		 * the engine state, which is the section of the context image
553 		 * that starts after the execlists context. This is required to
554 		 * allow the GuC to restore just the engine state when a
555 		 * watchdog reset occurs.
556 		 * We calculate the engine state size by removing the size of
557 		 * what comes before it in the context image (which is identical
558 		 * on all engines).
559 		 */
560 		ads_blob_write(guc, ads.eng_state_size[guc_class],
561 			       real_size - LRC_SKIP_SIZE(gt->i915));
562 		ads_blob_write(guc, ads.golden_context_lrca[guc_class],
563 			       addr_ggtt);
564 
565 		addr_ggtt += alloc_size;
566 	}
567 
568 	/* Make sure current size matches what we calculated previously */
569 	if (guc->ads_golden_ctxt_size)
570 		GEM_BUG_ON(guc->ads_golden_ctxt_size != total_size);
571 
572 	return total_size;
573 }
574 
575 static struct intel_engine_cs *find_engine_state(struct intel_gt *gt, u8 engine_class)
576 {
577 	struct intel_engine_cs *engine;
578 	enum intel_engine_id id;
579 
580 	for_each_engine(engine, gt, id) {
581 		if (engine->class != engine_class)
582 			continue;
583 
584 		if (!engine->default_state)
585 			continue;
586 
587 		return engine;
588 	}
589 
590 	return NULL;
591 }
592 
593 static void guc_init_golden_context(struct intel_guc *guc)
594 {
595 	struct intel_engine_cs *engine;
596 	struct intel_gt *gt = guc_to_gt(guc);
597 	unsigned long offset;
598 	u32 addr_ggtt, total_size = 0, alloc_size, real_size;
599 	u8 engine_class, guc_class;
600 
601 	if (!intel_uc_uses_guc_submission(&gt->uc))
602 		return;
603 
604 	GEM_BUG_ON(iosys_map_is_null(&guc->ads_map));
605 
606 	/*
607 	 * Go back and fill in the golden context data now that it is
608 	 * available.
609 	 */
610 	offset = guc_ads_golden_ctxt_offset(guc);
611 	addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
612 
613 	for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) {
614 		guc_class = engine_class_to_guc_class(engine_class);
615 		if (!ads_blob_read(guc, system_info.engine_enabled_masks[guc_class]))
616 			continue;
617 
618 		real_size = intel_engine_context_size(gt, engine_class);
619 		alloc_size = PAGE_ALIGN(real_size);
620 		total_size += alloc_size;
621 
622 		engine = find_engine_state(gt, engine_class);
623 		if (!engine) {
624 			drm_err(&gt->i915->drm, "No engine state recorded for class %d!\n",
625 				engine_class);
626 			ads_blob_write(guc, ads.eng_state_size[guc_class], 0);
627 			ads_blob_write(guc, ads.golden_context_lrca[guc_class], 0);
628 			continue;
629 		}
630 
631 		GEM_BUG_ON(ads_blob_read(guc, ads.eng_state_size[guc_class]) !=
632 			   real_size - LRC_SKIP_SIZE(gt->i915));
633 		GEM_BUG_ON(ads_blob_read(guc, ads.golden_context_lrca[guc_class]) != addr_ggtt);
634 
635 		addr_ggtt += alloc_size;
636 
637 		shmem_read_to_iosys_map(engine->default_state, 0, &guc->ads_map,
638 					offset, real_size);
639 		offset += alloc_size;
640 	}
641 
642 	GEM_BUG_ON(guc->ads_golden_ctxt_size != total_size);
643 }
644 
645 static int
646 guc_capture_prep_lists(struct intel_guc *guc)
647 {
648 	struct intel_gt *gt = guc_to_gt(guc);
649 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
650 	u32 ads_ggtt, capture_offset, null_ggtt, total_size = 0;
651 	struct guc_gt_system_info local_info;
652 	struct iosys_map info_map;
653 	bool ads_is_mapped;
654 	size_t size = 0;
655 	void *ptr;
656 	int i, j;
657 
658 	ads_is_mapped = !iosys_map_is_null(&guc->ads_map);
659 	if (ads_is_mapped) {
660 		capture_offset = guc_ads_capture_offset(guc);
661 		ads_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma);
662 		info_map = IOSYS_MAP_INIT_OFFSET(&guc->ads_map,
663 						 offsetof(struct __guc_ads_blob, system_info));
664 	} else {
665 		memset(&local_info, 0, sizeof(local_info));
666 		iosys_map_set_vaddr(&info_map, &local_info);
667 		fill_engine_enable_masks(gt, &info_map);
668 	}
669 
670 	/* first, set aside the first page for a capture_list with zero descriptors */
671 	total_size = PAGE_SIZE;
672 	if (ads_is_mapped) {
673 		if (!intel_guc_capture_getnullheader(guc, &ptr, &size))
674 			iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
675 		null_ggtt = ads_ggtt + capture_offset;
676 		capture_offset += PAGE_SIZE;
677 	}
678 
679 	for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) {
680 		for (j = 0; j < GUC_MAX_ENGINE_CLASSES; j++) {
681 
682 			/* null list if we dont have said engine or list */
683 			if (!info_map_read(&info_map, engine_enabled_masks[j])) {
684 				if (ads_is_mapped) {
685 					ads_blob_write(guc, ads.capture_class[i][j], null_ggtt);
686 					ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt);
687 				}
688 				continue;
689 			}
690 			if (intel_guc_capture_getlistsize(guc, i,
691 							  GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
692 							  j, &size)) {
693 				if (ads_is_mapped)
694 					ads_blob_write(guc, ads.capture_class[i][j], null_ggtt);
695 				goto engine_instance_list;
696 			}
697 			total_size += size;
698 			if (ads_is_mapped) {
699 				if (total_size > guc->ads_capture_size ||
700 				    intel_guc_capture_getlist(guc, i,
701 							      GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
702 							      j, &ptr)) {
703 					ads_blob_write(guc, ads.capture_class[i][j], null_ggtt);
704 					continue;
705 				}
706 				ads_blob_write(guc, ads.capture_class[i][j], ads_ggtt +
707 					       capture_offset);
708 				iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
709 				capture_offset += size;
710 			}
711 engine_instance_list:
712 			if (intel_guc_capture_getlistsize(guc, i,
713 							  GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
714 							  j, &size)) {
715 				if (ads_is_mapped)
716 					ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt);
717 				continue;
718 			}
719 			total_size += size;
720 			if (ads_is_mapped) {
721 				if (total_size > guc->ads_capture_size ||
722 				    intel_guc_capture_getlist(guc, i,
723 							      GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
724 							      j, &ptr)) {
725 					ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt);
726 					continue;
727 				}
728 				ads_blob_write(guc, ads.capture_instance[i][j], ads_ggtt +
729 					       capture_offset);
730 				iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
731 				capture_offset += size;
732 			}
733 		}
734 		if (intel_guc_capture_getlistsize(guc, i, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &size)) {
735 			if (ads_is_mapped)
736 				ads_blob_write(guc, ads.capture_global[i], null_ggtt);
737 			continue;
738 		}
739 		total_size += size;
740 		if (ads_is_mapped) {
741 			if (total_size > guc->ads_capture_size ||
742 			    intel_guc_capture_getlist(guc, i, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0,
743 						      &ptr)) {
744 				ads_blob_write(guc, ads.capture_global[i], null_ggtt);
745 				continue;
746 			}
747 			ads_blob_write(guc, ads.capture_global[i], ads_ggtt + capture_offset);
748 			iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
749 			capture_offset += size;
750 		}
751 	}
752 
753 	if (guc->ads_capture_size && guc->ads_capture_size != PAGE_ALIGN(total_size))
754 		drm_warn(&i915->drm, "GuC->ADS->Capture alloc size changed from %d to %d\n",
755 			 guc->ads_capture_size, PAGE_ALIGN(total_size));
756 
757 	return PAGE_ALIGN(total_size);
758 }
759 
760 static void __guc_ads_init(struct intel_guc *guc)
761 {
762 	struct intel_gt *gt = guc_to_gt(guc);
763 	struct drm_i915_private *i915 = gt->i915;
764 	struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(&guc->ads_map,
765 			offsetof(struct __guc_ads_blob, system_info));
766 	u32 base;
767 
768 	/* GuC scheduling policies */
769 	guc_policies_init(guc);
770 
771 	/* System info */
772 	fill_engine_enable_masks(gt, &info_map);
773 
774 	ads_blob_write(guc, system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED],
775 		       hweight8(gt->info.sseu.slice_mask));
776 	ads_blob_write(guc, system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK],
777 		       gt->info.vdbox_sfc_access);
778 
779 	if (GRAPHICS_VER(i915) >= 12 && !IS_DGFX(i915)) {
780 		u32 distdbreg = intel_uncore_read(gt->uncore,
781 						  GEN12_DIST_DBS_POPULATED);
782 		ads_blob_write(guc,
783 			       system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI],
784 			       ((distdbreg >> GEN12_DOORBELLS_PER_SQIDI_SHIFT)
785 				& GEN12_DOORBELLS_PER_SQIDI) + 1);
786 	}
787 
788 	/* Golden contexts for re-initialising after a watchdog reset */
789 	guc_prep_golden_context(guc);
790 
791 	guc_mapping_table_init(guc_to_gt(guc), &info_map);
792 
793 	base = intel_guc_ggtt_offset(guc, guc->ads_vma);
794 
795 	/* Lists for error capture debug */
796 	guc_capture_prep_lists(guc);
797 
798 	/* ADS */
799 	ads_blob_write(guc, ads.scheduler_policies, base +
800 		       offsetof(struct __guc_ads_blob, policies));
801 	ads_blob_write(guc, ads.gt_system_info, base +
802 		       offsetof(struct __guc_ads_blob, system_info));
803 
804 	/* MMIO save/restore list */
805 	guc_mmio_reg_state_init(guc);
806 
807 	/* Private Data */
808 	ads_blob_write(guc, ads.private_data, base +
809 		       guc_ads_private_data_offset(guc));
810 
811 	i915_gem_object_flush_map(guc->ads_vma->obj);
812 }
813 
814 /**
815  * intel_guc_ads_create() - allocates and initializes GuC ADS.
816  * @guc: intel_guc struct
817  *
818  * GuC needs memory block (Additional Data Struct), where it will store
819  * some data. Allocate and initialize such memory block for GuC use.
820  */
821 int intel_guc_ads_create(struct intel_guc *guc)
822 {
823 	void *ads_blob;
824 	u32 size;
825 	int ret;
826 
827 	GEM_BUG_ON(guc->ads_vma);
828 
829 	/*
830 	 * Create reg state size dynamically on system memory to be copied to
831 	 * the final ads blob on gt init/reset
832 	 */
833 	ret = guc_mmio_reg_state_create(guc);
834 	if (ret < 0)
835 		return ret;
836 	guc->ads_regset_size = ret;
837 
838 	/* Likewise the golden contexts: */
839 	ret = guc_prep_golden_context(guc);
840 	if (ret < 0)
841 		return ret;
842 	guc->ads_golden_ctxt_size = ret;
843 
844 	/* Likewise the capture lists: */
845 	ret = guc_capture_prep_lists(guc);
846 	if (ret < 0)
847 		return ret;
848 	guc->ads_capture_size = ret;
849 
850 	/* Now the total size can be determined: */
851 	size = guc_ads_blob_size(guc);
852 
853 	ret = intel_guc_allocate_and_map_vma(guc, size, &guc->ads_vma,
854 					     &ads_blob);
855 	if (ret)
856 		return ret;
857 
858 	if (i915_gem_object_is_lmem(guc->ads_vma->obj))
859 		iosys_map_set_vaddr_iomem(&guc->ads_map, (void __iomem *)ads_blob);
860 	else
861 		iosys_map_set_vaddr(&guc->ads_map, ads_blob);
862 
863 	__guc_ads_init(guc);
864 
865 	return 0;
866 }
867 
868 void intel_guc_ads_init_late(struct intel_guc *guc)
869 {
870 	/*
871 	 * The golden context setup requires the saved engine state from
872 	 * __engines_record_defaults(). However, that requires engines to be
873 	 * operational which means the ADS must already have been configured.
874 	 * Fortunately, the golden context state is not needed until a hang
875 	 * occurs, so it can be filled in during this late init phase.
876 	 */
877 	guc_init_golden_context(guc);
878 }
879 
880 void intel_guc_ads_destroy(struct intel_guc *guc)
881 {
882 	i915_vma_unpin_and_release(&guc->ads_vma, I915_VMA_RELEASE_MAP);
883 	iosys_map_clear(&guc->ads_map);
884 	kfree(guc->ads_regset);
885 }
886 
887 static void guc_ads_private_data_reset(struct intel_guc *guc)
888 {
889 	u32 size;
890 
891 	size = guc_ads_private_data_size(guc);
892 	if (!size)
893 		return;
894 
895 	iosys_map_memset(&guc->ads_map, guc_ads_private_data_offset(guc),
896 			 0, size);
897 }
898 
899 /**
900  * intel_guc_ads_reset() - prepares GuC Additional Data Struct for reuse
901  * @guc: intel_guc struct
902  *
903  * GuC stores some data in ADS, which might be stale after a reset.
904  * Reinitialize whole ADS in case any part of it was corrupted during
905  * previous GuC run.
906  */
907 void intel_guc_ads_reset(struct intel_guc *guc)
908 {
909 	if (!guc->ads_vma)
910 		return;
911 
912 	__guc_ads_init(guc);
913 
914 	guc_ads_private_data_reset(guc);
915 }
916 
917 u32 intel_guc_engine_usage_offset(struct intel_guc *guc)
918 {
919 	return intel_guc_ggtt_offset(guc, guc->ads_vma) +
920 		offsetof(struct __guc_ads_blob, engine_usage);
921 }
922 
923 struct iosys_map intel_guc_engine_usage_record_map(struct intel_engine_cs *engine)
924 {
925 	struct intel_guc *guc = &engine->gt->uc.guc;
926 	u8 guc_class = engine_class_to_guc_class(engine->class);
927 	size_t offset = offsetof(struct __guc_ads_blob,
928 				 engine_usage.engines[guc_class][ilog2(engine->logical_mask)]);
929 
930 	return IOSYS_MAP_INIT_OFFSET(&guc->ads_map, offset);
931 }
932