xref: /linux/drivers/gpu/drm/xe/xe_guc_ads.c (revision a3a02a52bcfcbcc4a637d4b68bf1bc391c9fad02)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_ads.h"
7 
8 #include <drm/drm_managed.h>
9 
10 #include <generated/xe_wa_oob.h>
11 
12 #include "abi/guc_actions_abi.h"
13 #include "regs/xe_engine_regs.h"
14 #include "regs/xe_gt_regs.h"
15 #include "regs/xe_guc_regs.h"
16 #include "xe_bo.h"
17 #include "xe_gt.h"
18 #include "xe_gt_ccs_mode.h"
19 #include "xe_gt_printk.h"
20 #include "xe_guc.h"
21 #include "xe_guc_ct.h"
22 #include "xe_hw_engine.h"
23 #include "xe_lrc.h"
24 #include "xe_map.h"
25 #include "xe_mmio.h"
26 #include "xe_platform_types.h"
27 #include "xe_wa.h"
28 
29 /* Slack of a few additional entries per engine */
30 #define ADS_REGSET_EXTRA_MAX	8
31 
32 static struct xe_guc *
33 ads_to_guc(struct xe_guc_ads *ads)
34 {
35 	return container_of(ads, struct xe_guc, ads);
36 }
37 
38 static struct xe_gt *
39 ads_to_gt(struct xe_guc_ads *ads)
40 {
41 	return container_of(ads, struct xe_gt, uc.guc.ads);
42 }
43 
44 static struct xe_device *
45 ads_to_xe(struct xe_guc_ads *ads)
46 {
47 	return gt_to_xe(ads_to_gt(ads));
48 }
49 
50 static struct iosys_map *
51 ads_to_map(struct xe_guc_ads *ads)
52 {
53 	return &ads->bo->vmap;
54 }
55 
56 /* UM Queue parameters: */
57 #define GUC_UM_QUEUE_SIZE       (SZ_64K)
58 #define GUC_PAGE_RES_TIMEOUT_US (-1)
59 
60 /*
61  * The Additional Data Struct (ADS) has pointers for different buffers used by
62  * the GuC. One single gem object contains the ADS struct itself (guc_ads) and
63  * all the extra buffers indirectly linked via the ADS struct's entries.
64  *
65  * Layout of the ADS blob allocated for the GuC:
66  *
67  *      +---------------------------------------+ <== base
68  *      | guc_ads                               |
69  *      +---------------------------------------+
70  *      | guc_policies                          |
71  *      +---------------------------------------+
72  *      | guc_gt_system_info                    |
73  *      +---------------------------------------+
74  *      | guc_engine_usage                      |
75  *      +---------------------------------------+
76  *      | guc_um_init_params                    |
77  *      +---------------------------------------+ <== static
78  *      | guc_mmio_reg[countA] (engine 0.0)     |
79  *      | guc_mmio_reg[countB] (engine 0.1)     |
80  *      | guc_mmio_reg[countC] (engine 1.0)     |
81  *      |   ...                                 |
82  *      +---------------------------------------+ <== dynamic
83  *      | padding                               |
84  *      +---------------------------------------+ <== 4K aligned
85  *      | golden contexts                       |
86  *      +---------------------------------------+
87  *      | padding                               |
88  *      +---------------------------------------+ <== 4K aligned
89  *      | w/a KLVs                              |
90  *      +---------------------------------------+
91  *      | padding                               |
92  *      +---------------------------------------+ <== 4K aligned
93  *      | capture lists                         |
94  *      +---------------------------------------+
95  *      | padding                               |
96  *      +---------------------------------------+ <== 4K aligned
97  *      | UM queues                             |
98  *      +---------------------------------------+
99  *      | padding                               |
100  *      +---------------------------------------+ <== 4K aligned
101  *      | private data                          |
102  *      +---------------------------------------+
103  *      | padding                               |
104  *      +---------------------------------------+ <== 4K aligned
105  */
106 struct __guc_ads_blob {
107 	struct guc_ads ads;
108 	struct guc_policies policies;
109 	struct guc_gt_system_info system_info;
110 	struct guc_engine_usage engine_usage;
111 	struct guc_um_init_params um_init_params;
112 	/* From here on, location is dynamic! Refer to above diagram. */
113 	struct guc_mmio_reg regset[];
114 } __packed;
115 
116 #define ads_blob_read(ads_, field_) \
117 	xe_map_rd_field(ads_to_xe(ads_), ads_to_map(ads_), 0, \
118 			struct __guc_ads_blob, field_)
119 
120 #define ads_blob_write(ads_, field_, val_)			\
121 	xe_map_wr_field(ads_to_xe(ads_), ads_to_map(ads_), 0,	\
122 			struct __guc_ads_blob, field_, val_)
123 
124 #define info_map_write(xe_, map_, field_, val_) \
125 	xe_map_wr_field(xe_, map_, 0, struct guc_gt_system_info, field_, val_)
126 
127 #define info_map_read(xe_, map_, field_) \
128 	xe_map_rd_field(xe_, map_, 0, struct guc_gt_system_info, field_)
129 
130 static size_t guc_ads_regset_size(struct xe_guc_ads *ads)
131 {
132 	struct xe_device *xe = ads_to_xe(ads);
133 
134 	xe_assert(xe, ads->regset_size);
135 
136 	return ads->regset_size;
137 }
138 
139 static size_t guc_ads_golden_lrc_size(struct xe_guc_ads *ads)
140 {
141 	return PAGE_ALIGN(ads->golden_lrc_size);
142 }
143 
144 static u32 guc_ads_waklv_size(struct xe_guc_ads *ads)
145 {
146 	return PAGE_ALIGN(ads->ads_waklv_size);
147 }
148 
149 static size_t guc_ads_capture_size(struct xe_guc_ads *ads)
150 {
151 	/* FIXME: Allocate a proper capture list */
152 	return PAGE_ALIGN(PAGE_SIZE);
153 }
154 
155 static size_t guc_ads_um_queues_size(struct xe_guc_ads *ads)
156 {
157 	struct xe_device *xe = ads_to_xe(ads);
158 
159 	if (!xe->info.has_usm)
160 		return 0;
161 
162 	return GUC_UM_QUEUE_SIZE * GUC_UM_HW_QUEUE_MAX;
163 }
164 
165 static size_t guc_ads_private_data_size(struct xe_guc_ads *ads)
166 {
167 	return PAGE_ALIGN(ads_to_guc(ads)->fw.private_data_size);
168 }
169 
170 static size_t guc_ads_regset_offset(struct xe_guc_ads *ads)
171 {
172 	return offsetof(struct __guc_ads_blob, regset);
173 }
174 
175 static size_t guc_ads_golden_lrc_offset(struct xe_guc_ads *ads)
176 {
177 	size_t offset;
178 
179 	offset = guc_ads_regset_offset(ads) +
180 		guc_ads_regset_size(ads);
181 
182 	return PAGE_ALIGN(offset);
183 }
184 
185 static size_t guc_ads_waklv_offset(struct xe_guc_ads *ads)
186 {
187 	u32 offset;
188 
189 	offset = guc_ads_golden_lrc_offset(ads) +
190 		 guc_ads_golden_lrc_size(ads);
191 
192 	return PAGE_ALIGN(offset);
193 }
194 
195 static size_t guc_ads_capture_offset(struct xe_guc_ads *ads)
196 {
197 	size_t offset;
198 
199 	offset = guc_ads_waklv_offset(ads) +
200 		 guc_ads_waklv_size(ads);
201 
202 	return PAGE_ALIGN(offset);
203 }
204 
205 static size_t guc_ads_um_queues_offset(struct xe_guc_ads *ads)
206 {
207 	u32 offset;
208 
209 	offset = guc_ads_capture_offset(ads) +
210 		 guc_ads_capture_size(ads);
211 
212 	return PAGE_ALIGN(offset);
213 }
214 
215 static size_t guc_ads_private_data_offset(struct xe_guc_ads *ads)
216 {
217 	size_t offset;
218 
219 	offset = guc_ads_um_queues_offset(ads) +
220 		guc_ads_um_queues_size(ads);
221 
222 	return PAGE_ALIGN(offset);
223 }
224 
225 static size_t guc_ads_size(struct xe_guc_ads *ads)
226 {
227 	return guc_ads_private_data_offset(ads) +
228 		guc_ads_private_data_size(ads);
229 }
230 
231 static bool needs_wa_1607983814(struct xe_device *xe)
232 {
233 	return GRAPHICS_VERx100(xe) < 1250;
234 }
235 
236 static size_t calculate_regset_size(struct xe_gt *gt)
237 {
238 	struct xe_reg_sr_entry *sr_entry;
239 	unsigned long sr_idx;
240 	struct xe_hw_engine *hwe;
241 	enum xe_hw_engine_id id;
242 	unsigned int count = 0;
243 
244 	for_each_hw_engine(hwe, gt, id)
245 		xa_for_each(&hwe->reg_sr.xa, sr_idx, sr_entry)
246 			count++;
247 
248 	count += ADS_REGSET_EXTRA_MAX * XE_NUM_HW_ENGINES;
249 
250 	if (needs_wa_1607983814(gt_to_xe(gt)))
251 		count += LNCFCMOCS_REG_COUNT;
252 
253 	return count * sizeof(struct guc_mmio_reg);
254 }
255 
256 static u32 engine_enable_mask(struct xe_gt *gt, enum xe_engine_class class)
257 {
258 	struct xe_hw_engine *hwe;
259 	enum xe_hw_engine_id id;
260 	u32 mask = 0;
261 
262 	for_each_hw_engine(hwe, gt, id)
263 		if (hwe->class == class)
264 			mask |= BIT(hwe->instance);
265 
266 	return mask;
267 }
268 
269 static size_t calculate_golden_lrc_size(struct xe_guc_ads *ads)
270 {
271 	struct xe_gt *gt = ads_to_gt(ads);
272 	size_t total_size = 0, alloc_size, real_size;
273 	int class;
274 
275 	for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
276 		if (!engine_enable_mask(gt, class))
277 			continue;
278 
279 		real_size = xe_gt_lrc_size(gt, class);
280 		alloc_size = PAGE_ALIGN(real_size);
281 		total_size += alloc_size;
282 	}
283 
284 	return total_size;
285 }
286 
287 static void guc_waklv_enable_one_word(struct xe_guc_ads *ads,
288 				      enum xe_guc_klv_ids klv_id,
289 				      u32 value,
290 				      u32 *offset, u32 *remain)
291 {
292 	u32 size;
293 	u32 klv_entry[] = {
294 		/* 16:16 key/length */
295 		FIELD_PREP(GUC_KLV_0_KEY, klv_id) |
296 		FIELD_PREP(GUC_KLV_0_LEN, 1),
297 		value,
298 		/* 1 dword data */
299 	};
300 
301 	size = sizeof(klv_entry);
302 
303 	if (*remain < size) {
304 		drm_warn(&ads_to_xe(ads)->drm,
305 			 "w/a klv buffer too small to add klv id %d\n", klv_id);
306 	} else {
307 		xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), *offset,
308 				 klv_entry, size);
309 		*offset += size;
310 		*remain -= size;
311 	}
312 }
313 
314 static void guc_waklv_enable_simple(struct xe_guc_ads *ads,
315 				    enum xe_guc_klv_ids klv_id, u32 *offset, u32 *remain)
316 {
317 	u32 klv_entry[] = {
318 		/* 16:16 key/length */
319 		FIELD_PREP(GUC_KLV_0_KEY, klv_id) |
320 		FIELD_PREP(GUC_KLV_0_LEN, 0),
321 		/* 0 dwords data */
322 	};
323 	u32 size;
324 
325 	size = sizeof(klv_entry);
326 
327 	if (xe_gt_WARN(ads_to_gt(ads), *remain < size,
328 		       "w/a klv buffer too small to add klv id %d\n", klv_id))
329 		return;
330 
331 	xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), *offset,
332 			 klv_entry, size);
333 	*offset += size;
334 	*remain -= size;
335 }
336 
337 static void guc_waklv_init(struct xe_guc_ads *ads)
338 {
339 	struct xe_gt *gt = ads_to_gt(ads);
340 	u64 addr_ggtt;
341 	u32 offset, remain, size;
342 
343 	offset = guc_ads_waklv_offset(ads);
344 	remain = guc_ads_waklv_size(ads);
345 
346 	if (XE_WA(gt, 14019882105))
347 		guc_waklv_enable_simple(ads,
348 					GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED,
349 					&offset, &remain);
350 	if (XE_WA(gt, 18024947630))
351 		guc_waklv_enable_simple(ads,
352 					GUC_WORKAROUND_KLV_ID_GAM_PFQ_SHADOW_TAIL_POLLING,
353 					&offset, &remain);
354 	if (XE_WA(gt, 16022287689))
355 		guc_waklv_enable_simple(ads,
356 					GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE,
357 					&offset, &remain);
358 
359 	/*
360 	 * On RC6 exit, GuC will write register 0xB04 with the default value provided. As of now,
361 	 * the default value for this register is determined to be 0xC40. This could change in the
362 	 * future, so GuC depends on KMD to send it the correct value.
363 	 */
364 	if (XE_WA(gt, 13011645652))
365 		guc_waklv_enable_one_word(ads,
366 					  GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE,
367 					  0xC40,
368 					  &offset, &remain);
369 
370 	size = guc_ads_waklv_size(ads) - remain;
371 	if (!size)
372 		return;
373 
374 	offset = guc_ads_waklv_offset(ads);
375 	addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset;
376 
377 	ads_blob_write(ads, ads.wa_klv_addr_lo, lower_32_bits(addr_ggtt));
378 	ads_blob_write(ads, ads.wa_klv_addr_hi, upper_32_bits(addr_ggtt));
379 	ads_blob_write(ads, ads.wa_klv_size, size);
380 }
381 
382 static int calculate_waklv_size(struct xe_guc_ads *ads)
383 {
384 	/*
385 	 * A single page is both the minimum size possible and
386 	 * is sufficiently large enough for all current platforms.
387 	 */
388 	return SZ_4K;
389 }
390 
391 #define MAX_GOLDEN_LRC_SIZE	(SZ_4K * 64)
392 
393 int xe_guc_ads_init(struct xe_guc_ads *ads)
394 {
395 	struct xe_device *xe = ads_to_xe(ads);
396 	struct xe_gt *gt = ads_to_gt(ads);
397 	struct xe_tile *tile = gt_to_tile(gt);
398 	struct xe_bo *bo;
399 
400 	ads->golden_lrc_size = calculate_golden_lrc_size(ads);
401 	ads->regset_size = calculate_regset_size(gt);
402 	ads->ads_waklv_size = calculate_waklv_size(ads);
403 
404 	bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE,
405 					  XE_BO_FLAG_SYSTEM |
406 					  XE_BO_FLAG_GGTT |
407 					  XE_BO_FLAG_GGTT_INVALIDATE);
408 	if (IS_ERR(bo))
409 		return PTR_ERR(bo);
410 
411 	ads->bo = bo;
412 
413 	return 0;
414 }
415 
416 /**
417  * xe_guc_ads_init_post_hwconfig - initialize ADS post hwconfig load
418  * @ads: Additional data structures object
419  *
420  * Recalcuate golden_lrc_size & regset_size as the number hardware engines may
421  * have changed after the hwconfig was loaded. Also verify the new sizes fit in
422  * the already allocated ADS buffer object.
423  *
424  * Return: 0 on success, negative error code on error.
425  */
426 int xe_guc_ads_init_post_hwconfig(struct xe_guc_ads *ads)
427 {
428 	struct xe_gt *gt = ads_to_gt(ads);
429 	u32 prev_regset_size = ads->regset_size;
430 
431 	xe_gt_assert(gt, ads->bo);
432 
433 	ads->golden_lrc_size = calculate_golden_lrc_size(ads);
434 	ads->regset_size = calculate_regset_size(gt);
435 
436 	xe_gt_assert(gt, ads->golden_lrc_size +
437 		     (ads->regset_size - prev_regset_size) <=
438 		     MAX_GOLDEN_LRC_SIZE);
439 
440 	return 0;
441 }
442 
443 static void guc_policies_init(struct xe_guc_ads *ads)
444 {
445 	struct xe_device *xe = ads_to_xe(ads);
446 	u32 global_flags = 0;
447 
448 	ads_blob_write(ads, policies.dpc_promote_time,
449 		       GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US);
450 	ads_blob_write(ads, policies.max_num_work_items,
451 		       GLOBAL_POLICY_MAX_NUM_WI);
452 
453 	if (xe->wedged.mode == 2)
454 		global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET;
455 
456 	ads_blob_write(ads, policies.global_flags, global_flags);
457 	ads_blob_write(ads, policies.is_valid, 1);
458 }
459 
460 static void fill_engine_enable_masks(struct xe_gt *gt,
461 				     struct iosys_map *info_map)
462 {
463 	struct xe_device *xe = gt_to_xe(gt);
464 
465 	info_map_write(xe, info_map, engine_enabled_masks[GUC_RENDER_CLASS],
466 		       engine_enable_mask(gt, XE_ENGINE_CLASS_RENDER));
467 	info_map_write(xe, info_map, engine_enabled_masks[GUC_BLITTER_CLASS],
468 		       engine_enable_mask(gt, XE_ENGINE_CLASS_COPY));
469 	info_map_write(xe, info_map, engine_enabled_masks[GUC_VIDEO_CLASS],
470 		       engine_enable_mask(gt, XE_ENGINE_CLASS_VIDEO_DECODE));
471 	info_map_write(xe, info_map,
472 		       engine_enabled_masks[GUC_VIDEOENHANCE_CLASS],
473 		       engine_enable_mask(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE));
474 	info_map_write(xe, info_map, engine_enabled_masks[GUC_COMPUTE_CLASS],
475 		       engine_enable_mask(gt, XE_ENGINE_CLASS_COMPUTE));
476 	info_map_write(xe, info_map, engine_enabled_masks[GUC_GSC_OTHER_CLASS],
477 		       engine_enable_mask(gt, XE_ENGINE_CLASS_OTHER));
478 }
479 
480 static void guc_prep_golden_lrc_null(struct xe_guc_ads *ads)
481 {
482 	struct xe_device *xe = ads_to_xe(ads);
483 	struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
484 			offsetof(struct __guc_ads_blob, system_info));
485 	u8 guc_class;
486 
487 	for (guc_class = 0; guc_class <= GUC_MAX_ENGINE_CLASSES; ++guc_class) {
488 		if (!info_map_read(xe, &info_map,
489 				   engine_enabled_masks[guc_class]))
490 			continue;
491 
492 		ads_blob_write(ads, ads.eng_state_size[guc_class],
493 			       guc_ads_golden_lrc_size(ads) -
494 			       xe_lrc_skip_size(xe));
495 		ads_blob_write(ads, ads.golden_context_lrca[guc_class],
496 			       xe_bo_ggtt_addr(ads->bo) +
497 			       guc_ads_golden_lrc_offset(ads));
498 	}
499 }
500 
501 static void guc_mapping_table_init_invalid(struct xe_gt *gt,
502 					   struct iosys_map *info_map)
503 {
504 	struct xe_device *xe = gt_to_xe(gt);
505 	unsigned int i, j;
506 
507 	/* Table must be set to invalid values for entries not used */
508 	for (i = 0; i < GUC_MAX_ENGINE_CLASSES; ++i)
509 		for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; ++j)
510 			info_map_write(xe, info_map, mapping_table[i][j],
511 				       GUC_MAX_INSTANCES_PER_CLASS);
512 }
513 
514 static void guc_mapping_table_init(struct xe_gt *gt,
515 				   struct iosys_map *info_map)
516 {
517 	struct xe_device *xe = gt_to_xe(gt);
518 	struct xe_hw_engine *hwe;
519 	enum xe_hw_engine_id id;
520 
521 	guc_mapping_table_init_invalid(gt, info_map);
522 
523 	for_each_hw_engine(hwe, gt, id) {
524 		u8 guc_class;
525 
526 		guc_class = xe_engine_class_to_guc_class(hwe->class);
527 		info_map_write(xe, info_map,
528 			       mapping_table[guc_class][hwe->logical_instance],
529 			       hwe->instance);
530 	}
531 }
532 
533 static void guc_capture_list_init(struct xe_guc_ads *ads)
534 {
535 	int i, j;
536 	u32 addr = xe_bo_ggtt_addr(ads->bo) + guc_ads_capture_offset(ads);
537 
538 	/* FIXME: Populate a proper capture list */
539 	for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) {
540 		for (j = 0; j < GUC_MAX_ENGINE_CLASSES; j++) {
541 			ads_blob_write(ads, ads.capture_instance[i][j], addr);
542 			ads_blob_write(ads, ads.capture_class[i][j], addr);
543 		}
544 
545 		ads_blob_write(ads, ads.capture_global[i], addr);
546 	}
547 }
548 
549 static void guc_mmio_regset_write_one(struct xe_guc_ads *ads,
550 				      struct iosys_map *regset_map,
551 				      struct xe_reg reg,
552 				      unsigned int n_entry)
553 {
554 	struct guc_mmio_reg entry = {
555 		.offset = reg.addr,
556 		.flags = reg.masked ? GUC_REGSET_MASKED : 0,
557 	};
558 
559 	xe_map_memcpy_to(ads_to_xe(ads), regset_map, n_entry * sizeof(entry),
560 			 &entry, sizeof(entry));
561 }
562 
563 static unsigned int guc_mmio_regset_write(struct xe_guc_ads *ads,
564 					  struct iosys_map *regset_map,
565 					  struct xe_hw_engine *hwe)
566 {
567 	struct xe_device *xe = ads_to_xe(ads);
568 	struct xe_hw_engine *hwe_rcs_reset_domain =
569 		xe_gt_any_hw_engine_by_reset_domain(hwe->gt, XE_ENGINE_CLASS_RENDER);
570 	struct xe_reg_sr_entry *entry;
571 	unsigned long idx;
572 	unsigned int count = 0;
573 	const struct {
574 		struct xe_reg reg;
575 		bool skip;
576 	} *e, extra_regs[] = {
577 		{ .reg = RING_MODE(hwe->mmio_base),			},
578 		{ .reg = RING_HWS_PGA(hwe->mmio_base),			},
579 		{ .reg = RING_IMR(hwe->mmio_base),			},
580 		{ .reg = RCU_MODE, .skip = hwe != hwe_rcs_reset_domain	},
581 		{ .reg = CCS_MODE,
582 		  .skip = hwe != hwe_rcs_reset_domain || !xe_gt_ccs_mode_enabled(hwe->gt) },
583 	};
584 	u32 i;
585 
586 	BUILD_BUG_ON(ARRAY_SIZE(extra_regs) > ADS_REGSET_EXTRA_MAX);
587 
588 	xa_for_each(&hwe->reg_sr.xa, idx, entry)
589 		guc_mmio_regset_write_one(ads, regset_map, entry->reg, count++);
590 
591 	for (e = extra_regs; e < extra_regs + ARRAY_SIZE(extra_regs); e++) {
592 		if (e->skip)
593 			continue;
594 
595 		guc_mmio_regset_write_one(ads, regset_map, e->reg, count++);
596 	}
597 
598 	/* Wa_1607983814 */
599 	if (needs_wa_1607983814(xe) && hwe->class == XE_ENGINE_CLASS_RENDER) {
600 		for (i = 0; i < LNCFCMOCS_REG_COUNT; i++) {
601 			guc_mmio_regset_write_one(ads, regset_map,
602 						  XELP_LNCFCMOCS(i), count++);
603 		}
604 	}
605 
606 	return count;
607 }
608 
609 static void guc_mmio_reg_state_init(struct xe_guc_ads *ads)
610 {
611 	size_t regset_offset = guc_ads_regset_offset(ads);
612 	struct xe_gt *gt = ads_to_gt(ads);
613 	struct xe_hw_engine *hwe;
614 	enum xe_hw_engine_id id;
615 	u32 addr = xe_bo_ggtt_addr(ads->bo) + regset_offset;
616 	struct iosys_map regset_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
617 							    regset_offset);
618 	unsigned int regset_used = 0;
619 
620 	for_each_hw_engine(hwe, gt, id) {
621 		unsigned int count;
622 		u8 gc;
623 
624 		/*
625 		 * 1. Write all MMIO entries for this exec queue to the table. No
626 		 * need to worry about fused-off engines and when there are
627 		 * entries in the regset: the reg_state_list has been zero'ed
628 		 * by xe_guc_ads_populate()
629 		 */
630 		count = guc_mmio_regset_write(ads, &regset_map, hwe);
631 		if (!count)
632 			continue;
633 
634 		/*
635 		 * 2. Record in the header (ads.reg_state_list) the address
636 		 * location and number of entries
637 		 */
638 		gc = xe_engine_class_to_guc_class(hwe->class);
639 		ads_blob_write(ads, ads.reg_state_list[gc][hwe->instance].address, addr);
640 		ads_blob_write(ads, ads.reg_state_list[gc][hwe->instance].count, count);
641 
642 		addr += count * sizeof(struct guc_mmio_reg);
643 		iosys_map_incr(&regset_map, count * sizeof(struct guc_mmio_reg));
644 
645 		regset_used += count * sizeof(struct guc_mmio_reg);
646 	}
647 
648 	xe_gt_assert(gt, regset_used <= ads->regset_size);
649 }
650 
651 static void guc_um_init_params(struct xe_guc_ads *ads)
652 {
653 	u32 um_queue_offset = guc_ads_um_queues_offset(ads);
654 	u64 base_dpa;
655 	u32 base_ggtt;
656 	int i;
657 
658 	base_ggtt = xe_bo_ggtt_addr(ads->bo) + um_queue_offset;
659 	base_dpa = xe_bo_main_addr(ads->bo, PAGE_SIZE) + um_queue_offset;
660 
661 	for (i = 0; i < GUC_UM_HW_QUEUE_MAX; ++i) {
662 		ads_blob_write(ads, um_init_params.queue_params[i].base_dpa,
663 			       base_dpa + (i * GUC_UM_QUEUE_SIZE));
664 		ads_blob_write(ads, um_init_params.queue_params[i].base_ggtt_address,
665 			       base_ggtt + (i * GUC_UM_QUEUE_SIZE));
666 		ads_blob_write(ads, um_init_params.queue_params[i].size_in_bytes,
667 			       GUC_UM_QUEUE_SIZE);
668 	}
669 
670 	ads_blob_write(ads, um_init_params.page_response_timeout_in_us,
671 		       GUC_PAGE_RES_TIMEOUT_US);
672 }
673 
674 static void guc_doorbell_init(struct xe_guc_ads *ads)
675 {
676 	struct xe_device *xe = ads_to_xe(ads);
677 	struct xe_gt *gt = ads_to_gt(ads);
678 
679 	if (GRAPHICS_VER(xe) >= 12 && !IS_DGFX(xe)) {
680 		u32 distdbreg =
681 			xe_mmio_read32(gt, DIST_DBS_POPULATED);
682 
683 		ads_blob_write(ads,
684 			       system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI],
685 			       REG_FIELD_GET(DOORBELLS_PER_SQIDI_MASK, distdbreg) + 1);
686 	}
687 }
688 
689 /**
690  * xe_guc_ads_populate_minimal - populate minimal ADS
691  * @ads: Additional data structures object
692  *
693  * This function populates a minimal ADS that does not support submissions but
694  * enough so the GuC can load and the hwconfig table can be read.
695  */
696 void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads)
697 {
698 	struct xe_gt *gt = ads_to_gt(ads);
699 	struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
700 			offsetof(struct __guc_ads_blob, system_info));
701 	u32 base = xe_bo_ggtt_addr(ads->bo);
702 
703 	xe_gt_assert(gt, ads->bo);
704 
705 	xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size);
706 	guc_policies_init(ads);
707 	guc_prep_golden_lrc_null(ads);
708 	guc_mapping_table_init_invalid(gt, &info_map);
709 	guc_doorbell_init(ads);
710 
711 	ads_blob_write(ads, ads.scheduler_policies, base +
712 		       offsetof(struct __guc_ads_blob, policies));
713 	ads_blob_write(ads, ads.gt_system_info, base +
714 		       offsetof(struct __guc_ads_blob, system_info));
715 	ads_blob_write(ads, ads.private_data, base +
716 		       guc_ads_private_data_offset(ads));
717 }
718 
719 void xe_guc_ads_populate(struct xe_guc_ads *ads)
720 {
721 	struct xe_device *xe = ads_to_xe(ads);
722 	struct xe_gt *gt = ads_to_gt(ads);
723 	struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
724 			offsetof(struct __guc_ads_blob, system_info));
725 	u32 base = xe_bo_ggtt_addr(ads->bo);
726 
727 	xe_gt_assert(gt, ads->bo);
728 
729 	xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size);
730 	guc_policies_init(ads);
731 	fill_engine_enable_masks(gt, &info_map);
732 	guc_mmio_reg_state_init(ads);
733 	guc_prep_golden_lrc_null(ads);
734 	guc_mapping_table_init(gt, &info_map);
735 	guc_capture_list_init(ads);
736 	guc_doorbell_init(ads);
737 	guc_waklv_init(ads);
738 
739 	if (xe->info.has_usm) {
740 		guc_um_init_params(ads);
741 		ads_blob_write(ads, ads.um_init_data, base +
742 			       offsetof(struct __guc_ads_blob, um_init_params));
743 	}
744 
745 	ads_blob_write(ads, ads.scheduler_policies, base +
746 		       offsetof(struct __guc_ads_blob, policies));
747 	ads_blob_write(ads, ads.gt_system_info, base +
748 		       offsetof(struct __guc_ads_blob, system_info));
749 	ads_blob_write(ads, ads.private_data, base +
750 		       guc_ads_private_data_offset(ads));
751 }
752 
753 static void guc_populate_golden_lrc(struct xe_guc_ads *ads)
754 {
755 	struct xe_device *xe = ads_to_xe(ads);
756 	struct xe_gt *gt = ads_to_gt(ads);
757 	struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
758 			offsetof(struct __guc_ads_blob, system_info));
759 	size_t total_size = 0, alloc_size, real_size;
760 	u32 addr_ggtt, offset;
761 	int class;
762 
763 	offset = guc_ads_golden_lrc_offset(ads);
764 	addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset;
765 
766 	for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
767 		u8 guc_class;
768 
769 		guc_class = xe_engine_class_to_guc_class(class);
770 
771 		if (!info_map_read(xe, &info_map,
772 				   engine_enabled_masks[guc_class]))
773 			continue;
774 
775 		xe_gt_assert(gt, gt->default_lrc[class]);
776 
777 		real_size = xe_gt_lrc_size(gt, class);
778 		alloc_size = PAGE_ALIGN(real_size);
779 		total_size += alloc_size;
780 
781 		/*
782 		 * This interface is slightly confusing. We need to pass the
783 		 * base address of the full golden context and the size of just
784 		 * the engine state, which is the section of the context image
785 		 * that starts after the execlists LRC registers. This is
786 		 * required to allow the GuC to restore just the engine state
787 		 * when a watchdog reset occurs.
788 		 * We calculate the engine state size by removing the size of
789 		 * what comes before it in the context image (which is identical
790 		 * on all engines).
791 		 */
792 		ads_blob_write(ads, ads.eng_state_size[guc_class],
793 			       real_size - xe_lrc_skip_size(xe));
794 		ads_blob_write(ads, ads.golden_context_lrca[guc_class],
795 			       addr_ggtt);
796 
797 		xe_map_memcpy_to(xe, ads_to_map(ads), offset,
798 				 gt->default_lrc[class], real_size);
799 
800 		addr_ggtt += alloc_size;
801 		offset += alloc_size;
802 	}
803 
804 	xe_gt_assert(gt, total_size == ads->golden_lrc_size);
805 }
806 
807 void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads)
808 {
809 	guc_populate_golden_lrc(ads);
810 }
811 
812 static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_offset)
813 {
814 	struct  xe_guc_ct *ct = &ads_to_guc(ads)->ct;
815 	u32 action[] = {
816 		XE_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE,
817 		policy_offset
818 	};
819 
820 	return xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
821 }
822 
823 /**
824  * xe_guc_ads_scheduler_policy_toggle_reset - Toggle reset policy
825  * @ads: Additional data structures object
826  *
827  * This function update the GuC's engine reset policy based on wedged.mode.
828  *
829  * Return: 0 on success, and negative error code otherwise.
830  */
831 int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads)
832 {
833 	struct xe_device *xe = ads_to_xe(ads);
834 	struct xe_gt *gt = ads_to_gt(ads);
835 	struct xe_tile *tile = gt_to_tile(gt);
836 	struct guc_policies *policies;
837 	struct xe_bo *bo;
838 	int ret = 0;
839 
840 	policies = kmalloc(sizeof(*policies), GFP_KERNEL);
841 	if (!policies)
842 		return -ENOMEM;
843 
844 	policies->dpc_promote_time = ads_blob_read(ads, policies.dpc_promote_time);
845 	policies->max_num_work_items = ads_blob_read(ads, policies.max_num_work_items);
846 	policies->is_valid = 1;
847 	if (xe->wedged.mode == 2)
848 		policies->global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET;
849 	else
850 		policies->global_flags &= ~GLOBAL_POLICY_DISABLE_ENGINE_RESET;
851 
852 	bo = xe_managed_bo_create_from_data(xe, tile, policies, sizeof(struct guc_policies),
853 					    XE_BO_FLAG_VRAM_IF_DGFX(tile) |
854 					    XE_BO_FLAG_GGTT);
855 	if (IS_ERR(bo)) {
856 		ret = PTR_ERR(bo);
857 		goto out;
858 	}
859 
860 	ret = guc_ads_action_update_policies(ads, xe_bo_ggtt_addr(bo));
861 out:
862 	kfree(policies);
863 	return ret;
864 }
865