xref: /linux/drivers/gpu/drm/xe/xe_guc_ads.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_ads.h"
7 
8 #include <linux/fault-inject.h>
9 
10 #include <drm/drm_managed.h>
11 
12 #include <generated/xe_wa_oob.h>
13 
14 #include "abi/guc_actions_abi.h"
15 #include "regs/xe_engine_regs.h"
16 #include "regs/xe_gt_regs.h"
17 #include "regs/xe_guc_regs.h"
18 #include "xe_bo.h"
19 #include "xe_gt.h"
20 #include "xe_gt_ccs_mode.h"
21 #include "xe_gt_printk.h"
22 #include "xe_guc.h"
23 #include "xe_guc_capture.h"
24 #include "xe_guc_ct.h"
25 #include "xe_hw_engine.h"
26 #include "xe_lrc.h"
27 #include "xe_map.h"
28 #include "xe_mmio.h"
29 #include "xe_platform_types.h"
30 #include "xe_uc_fw.h"
31 #include "xe_wa.h"
32 #include "xe_gt_mcr.h"
33 
34 /* Slack of a few additional entries per engine */
35 #define ADS_REGSET_EXTRA_MAX	8
36 
37 static struct xe_guc *
ads_to_guc(struct xe_guc_ads * ads)38 ads_to_guc(struct xe_guc_ads *ads)
39 {
40 	return container_of(ads, struct xe_guc, ads);
41 }
42 
43 static struct xe_gt *
ads_to_gt(struct xe_guc_ads * ads)44 ads_to_gt(struct xe_guc_ads *ads)
45 {
46 	return container_of(ads, struct xe_gt, uc.guc.ads);
47 }
48 
49 static struct xe_device *
ads_to_xe(struct xe_guc_ads * ads)50 ads_to_xe(struct xe_guc_ads *ads)
51 {
52 	return gt_to_xe(ads_to_gt(ads));
53 }
54 
55 static struct iosys_map *
ads_to_map(struct xe_guc_ads * ads)56 ads_to_map(struct xe_guc_ads *ads)
57 {
58 	return &ads->bo->vmap;
59 }
60 
61 /* UM Queue parameters: */
62 #define GUC_UM_QUEUE_SIZE       (SZ_64K)
63 #define GUC_PAGE_RES_TIMEOUT_US (-1)
64 
65 /*
66  * The Additional Data Struct (ADS) has pointers for different buffers used by
67  * the GuC. One single gem object contains the ADS struct itself (guc_ads) and
68  * all the extra buffers indirectly linked via the ADS struct's entries.
69  *
70  * Layout of the ADS blob allocated for the GuC:
71  *
72  *      +---------------------------------------+ <== base
73  *      | guc_ads                               |
74  *      +---------------------------------------+
75  *      | guc_policies                          |
76  *      +---------------------------------------+
77  *      | guc_gt_system_info                    |
78  *      +---------------------------------------+
79  *      | guc_engine_usage                      |
80  *      +---------------------------------------+
81  *      | guc_um_init_params                    |
82  *      +---------------------------------------+ <== static
83  *      | guc_mmio_reg[countA] (engine 0.0)     |
84  *      | guc_mmio_reg[countB] (engine 0.1)     |
85  *      | guc_mmio_reg[countC] (engine 1.0)     |
86  *      |   ...                                 |
87  *      +---------------------------------------+ <== dynamic
88  *      | padding                               |
89  *      +---------------------------------------+ <== 4K aligned
90  *      | golden contexts                       |
91  *      +---------------------------------------+
92  *      | padding                               |
93  *      +---------------------------------------+ <== 4K aligned
94  *      | w/a KLVs                              |
95  *      +---------------------------------------+
96  *      | padding                               |
97  *      +---------------------------------------+ <== 4K aligned
98  *      | capture lists                         |
99  *      +---------------------------------------+
100  *      | padding                               |
101  *      +---------------------------------------+ <== 4K aligned
102  *      | UM queues                             |
103  *      +---------------------------------------+
104  *      | padding                               |
105  *      +---------------------------------------+ <== 4K aligned
106  *      | private data                          |
107  *      +---------------------------------------+
108  *      | padding                               |
109  *      +---------------------------------------+ <== 4K aligned
110  */
111 struct __guc_ads_blob {
112 	struct guc_ads ads;
113 	struct guc_policies policies;
114 	struct guc_gt_system_info system_info;
115 	struct guc_engine_usage engine_usage;
116 	struct guc_um_init_params um_init_params;
117 	/* From here on, location is dynamic! Refer to above diagram. */
118 	struct guc_mmio_reg regset[];
119 } __packed;
120 
121 #define ads_blob_read(ads_, field_) \
122 	xe_map_rd_field(ads_to_xe(ads_), ads_to_map(ads_), 0, \
123 			struct __guc_ads_blob, field_)
124 
125 #define ads_blob_write(ads_, field_, val_)			\
126 	xe_map_wr_field(ads_to_xe(ads_), ads_to_map(ads_), 0,	\
127 			struct __guc_ads_blob, field_, val_)
128 
129 #define info_map_write(xe_, map_, field_, val_) \
130 	xe_map_wr_field(xe_, map_, 0, struct guc_gt_system_info, field_, val_)
131 
132 #define info_map_read(xe_, map_, field_) \
133 	xe_map_rd_field(xe_, map_, 0, struct guc_gt_system_info, field_)
134 
guc_ads_regset_size(struct xe_guc_ads * ads)135 static size_t guc_ads_regset_size(struct xe_guc_ads *ads)
136 {
137 	struct xe_device *xe = ads_to_xe(ads);
138 
139 	xe_assert(xe, ads->regset_size);
140 
141 	return ads->regset_size;
142 }
143 
guc_ads_golden_lrc_size(struct xe_guc_ads * ads)144 static size_t guc_ads_golden_lrc_size(struct xe_guc_ads *ads)
145 {
146 	return PAGE_ALIGN(ads->golden_lrc_size);
147 }
148 
guc_ads_waklv_size(struct xe_guc_ads * ads)149 static u32 guc_ads_waklv_size(struct xe_guc_ads *ads)
150 {
151 	return PAGE_ALIGN(ads->ads_waklv_size);
152 }
153 
guc_ads_capture_size(struct xe_guc_ads * ads)154 static size_t guc_ads_capture_size(struct xe_guc_ads *ads)
155 {
156 	return PAGE_ALIGN(ads->capture_size);
157 }
158 
guc_ads_um_queues_size(struct xe_guc_ads * ads)159 static size_t guc_ads_um_queues_size(struct xe_guc_ads *ads)
160 {
161 	struct xe_device *xe = ads_to_xe(ads);
162 
163 	if (!xe->info.has_usm)
164 		return 0;
165 
166 	return GUC_UM_QUEUE_SIZE * GUC_UM_HW_QUEUE_MAX;
167 }
168 
guc_ads_private_data_size(struct xe_guc_ads * ads)169 static size_t guc_ads_private_data_size(struct xe_guc_ads *ads)
170 {
171 	return PAGE_ALIGN(ads_to_guc(ads)->fw.private_data_size);
172 }
173 
guc_ads_regset_offset(struct xe_guc_ads * ads)174 static size_t guc_ads_regset_offset(struct xe_guc_ads *ads)
175 {
176 	return offsetof(struct __guc_ads_blob, regset);
177 }
178 
guc_ads_golden_lrc_offset(struct xe_guc_ads * ads)179 static size_t guc_ads_golden_lrc_offset(struct xe_guc_ads *ads)
180 {
181 	size_t offset;
182 
183 	offset = guc_ads_regset_offset(ads) +
184 		guc_ads_regset_size(ads);
185 
186 	return PAGE_ALIGN(offset);
187 }
188 
guc_ads_waklv_offset(struct xe_guc_ads * ads)189 static size_t guc_ads_waklv_offset(struct xe_guc_ads *ads)
190 {
191 	u32 offset;
192 
193 	offset = guc_ads_golden_lrc_offset(ads) +
194 		 guc_ads_golden_lrc_size(ads);
195 
196 	return PAGE_ALIGN(offset);
197 }
198 
guc_ads_capture_offset(struct xe_guc_ads * ads)199 static size_t guc_ads_capture_offset(struct xe_guc_ads *ads)
200 {
201 	size_t offset;
202 
203 	offset = guc_ads_waklv_offset(ads) +
204 		 guc_ads_waklv_size(ads);
205 
206 	return PAGE_ALIGN(offset);
207 }
208 
guc_ads_um_queues_offset(struct xe_guc_ads * ads)209 static size_t guc_ads_um_queues_offset(struct xe_guc_ads *ads)
210 {
211 	u32 offset;
212 
213 	offset = guc_ads_capture_offset(ads) +
214 		 guc_ads_capture_size(ads);
215 
216 	return PAGE_ALIGN(offset);
217 }
218 
guc_ads_private_data_offset(struct xe_guc_ads * ads)219 static size_t guc_ads_private_data_offset(struct xe_guc_ads *ads)
220 {
221 	size_t offset;
222 
223 	offset = guc_ads_um_queues_offset(ads) +
224 		guc_ads_um_queues_size(ads);
225 
226 	return PAGE_ALIGN(offset);
227 }
228 
guc_ads_size(struct xe_guc_ads * ads)229 static size_t guc_ads_size(struct xe_guc_ads *ads)
230 {
231 	return guc_ads_private_data_offset(ads) +
232 		guc_ads_private_data_size(ads);
233 }
234 
calculate_regset_size(struct xe_gt * gt)235 static size_t calculate_regset_size(struct xe_gt *gt)
236 {
237 	struct xe_reg_sr_entry *sr_entry;
238 	unsigned long sr_idx;
239 	struct xe_hw_engine *hwe;
240 	enum xe_hw_engine_id id;
241 	unsigned int count = 0;
242 
243 	for_each_hw_engine(hwe, gt, id)
244 		xa_for_each(&hwe->reg_sr.xa, sr_idx, sr_entry)
245 			count++;
246 
247 	count += ADS_REGSET_EXTRA_MAX * XE_NUM_HW_ENGINES;
248 
249 	if (XE_WA(gt, 1607983814))
250 		count += LNCFCMOCS_REG_COUNT;
251 
252 	return count * sizeof(struct guc_mmio_reg);
253 }
254 
engine_enable_mask(struct xe_gt * gt,enum xe_engine_class class)255 static u32 engine_enable_mask(struct xe_gt *gt, enum xe_engine_class class)
256 {
257 	struct xe_hw_engine *hwe;
258 	enum xe_hw_engine_id id;
259 	u32 mask = 0;
260 
261 	for_each_hw_engine(hwe, gt, id)
262 		if (hwe->class == class)
263 			mask |= BIT(hwe->instance);
264 
265 	return mask;
266 }
267 
calculate_golden_lrc_size(struct xe_guc_ads * ads)268 static size_t calculate_golden_lrc_size(struct xe_guc_ads *ads)
269 {
270 	struct xe_gt *gt = ads_to_gt(ads);
271 	size_t total_size = 0, alloc_size, real_size;
272 	int class;
273 
274 	for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
275 		if (!engine_enable_mask(gt, class))
276 			continue;
277 
278 		real_size = xe_gt_lrc_size(gt, class);
279 		alloc_size = PAGE_ALIGN(real_size);
280 		total_size += alloc_size;
281 	}
282 
283 	return total_size;
284 }
285 
guc_waklv_enable_one_word(struct xe_guc_ads * ads,enum xe_guc_klv_ids klv_id,u32 value,u32 * offset,u32 * remain)286 static void guc_waklv_enable_one_word(struct xe_guc_ads *ads,
287 				      enum xe_guc_klv_ids klv_id,
288 				      u32 value,
289 				      u32 *offset, u32 *remain)
290 {
291 	u32 size;
292 	u32 klv_entry[] = {
293 		/* 16:16 key/length */
294 		FIELD_PREP(GUC_KLV_0_KEY, klv_id) |
295 		FIELD_PREP(GUC_KLV_0_LEN, 1),
296 		value,
297 		/* 1 dword data */
298 	};
299 
300 	size = sizeof(klv_entry);
301 
302 	if (*remain < size) {
303 		drm_warn(&ads_to_xe(ads)->drm,
304 			 "w/a klv buffer too small to add klv id %d\n", klv_id);
305 	} else {
306 		xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), *offset,
307 				 klv_entry, size);
308 		*offset += size;
309 		*remain -= size;
310 	}
311 }
312 
guc_waklv_enable_simple(struct xe_guc_ads * ads,enum xe_guc_klv_ids klv_id,u32 * offset,u32 * remain)313 static void guc_waklv_enable_simple(struct xe_guc_ads *ads,
314 				    enum xe_guc_klv_ids klv_id, u32 *offset, u32 *remain)
315 {
316 	u32 klv_entry[] = {
317 		/* 16:16 key/length */
318 		FIELD_PREP(GUC_KLV_0_KEY, klv_id) |
319 		FIELD_PREP(GUC_KLV_0_LEN, 0),
320 		/* 0 dwords data */
321 	};
322 	u32 size;
323 
324 	size = sizeof(klv_entry);
325 
326 	if (xe_gt_WARN(ads_to_gt(ads), *remain < size,
327 		       "w/a klv buffer too small to add klv id %d\n", klv_id))
328 		return;
329 
330 	xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), *offset,
331 			 klv_entry, size);
332 	*offset += size;
333 	*remain -= size;
334 }
335 
guc_waklv_init(struct xe_guc_ads * ads)336 static void guc_waklv_init(struct xe_guc_ads *ads)
337 {
338 	struct xe_gt *gt = ads_to_gt(ads);
339 	u64 addr_ggtt;
340 	u32 offset, remain, size;
341 
342 	offset = guc_ads_waklv_offset(ads);
343 	remain = guc_ads_waklv_size(ads);
344 
345 	if (XE_WA(gt, 14019882105))
346 		guc_waklv_enable_simple(ads,
347 					GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED,
348 					&offset, &remain);
349 	if (XE_WA(gt, 18024947630))
350 		guc_waklv_enable_simple(ads,
351 					GUC_WORKAROUND_KLV_ID_GAM_PFQ_SHADOW_TAIL_POLLING,
352 					&offset, &remain);
353 	if (XE_WA(gt, 16022287689))
354 		guc_waklv_enable_simple(ads,
355 					GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE,
356 					&offset, &remain);
357 
358 	if (XE_WA(gt, 14022866841))
359 		guc_waklv_enable_simple(ads,
360 					GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO,
361 					&offset, &remain);
362 
363 	/*
364 	 * On RC6 exit, GuC will write register 0xB04 with the default value provided. As of now,
365 	 * the default value for this register is determined to be 0xC40. This could change in the
366 	 * future, so GuC depends on KMD to send it the correct value.
367 	 */
368 	if (XE_WA(gt, 13011645652))
369 		guc_waklv_enable_one_word(ads,
370 					  GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE,
371 					  0xC40,
372 					  &offset, &remain);
373 
374 	if (XE_WA(gt, 14022293748) || XE_WA(gt, 22019794406))
375 		guc_waklv_enable_simple(ads,
376 					GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET,
377 					&offset, &remain);
378 
379 	size = guc_ads_waklv_size(ads) - remain;
380 	if (!size)
381 		return;
382 
383 	offset = guc_ads_waklv_offset(ads);
384 	addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset;
385 
386 	ads_blob_write(ads, ads.wa_klv_addr_lo, lower_32_bits(addr_ggtt));
387 	ads_blob_write(ads, ads.wa_klv_addr_hi, upper_32_bits(addr_ggtt));
388 	ads_blob_write(ads, ads.wa_klv_size, size);
389 }
390 
calculate_waklv_size(struct xe_guc_ads * ads)391 static int calculate_waklv_size(struct xe_guc_ads *ads)
392 {
393 	/*
394 	 * A single page is both the minimum size possible and
395 	 * is sufficiently large enough for all current platforms.
396 	 */
397 	return SZ_4K;
398 }
399 
400 #define MAX_GOLDEN_LRC_SIZE	(SZ_4K * 64)
401 
xe_guc_ads_init(struct xe_guc_ads * ads)402 int xe_guc_ads_init(struct xe_guc_ads *ads)
403 {
404 	struct xe_device *xe = ads_to_xe(ads);
405 	struct xe_gt *gt = ads_to_gt(ads);
406 	struct xe_tile *tile = gt_to_tile(gt);
407 	struct xe_bo *bo;
408 
409 	ads->golden_lrc_size = calculate_golden_lrc_size(ads);
410 	ads->capture_size = xe_guc_capture_ads_input_worst_size(ads_to_guc(ads));
411 	ads->regset_size = calculate_regset_size(gt);
412 	ads->ads_waklv_size = calculate_waklv_size(ads);
413 
414 	bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE,
415 					  XE_BO_FLAG_SYSTEM |
416 					  XE_BO_FLAG_GGTT |
417 					  XE_BO_FLAG_GGTT_INVALIDATE);
418 	if (IS_ERR(bo))
419 		return PTR_ERR(bo);
420 
421 	ads->bo = bo;
422 
423 	return 0;
424 }
425 ALLOW_ERROR_INJECTION(xe_guc_ads_init, ERRNO); /* See xe_pci_probe() */
426 
427 /**
428  * xe_guc_ads_init_post_hwconfig - initialize ADS post hwconfig load
429  * @ads: Additional data structures object
430  *
431  * Recalculate golden_lrc_size, capture_size and regset_size as the number
432  * hardware engines may have changed after the hwconfig was loaded. Also verify
433  * the new sizes fit in the already allocated ADS buffer object.
434  *
435  * Return: 0 on success, negative error code on error.
436  */
xe_guc_ads_init_post_hwconfig(struct xe_guc_ads * ads)437 int xe_guc_ads_init_post_hwconfig(struct xe_guc_ads *ads)
438 {
439 	struct xe_gt *gt = ads_to_gt(ads);
440 	u32 prev_regset_size = ads->regset_size;
441 
442 	xe_gt_assert(gt, ads->bo);
443 
444 	ads->golden_lrc_size = calculate_golden_lrc_size(ads);
445 	/* Calculate Capture size with worst size */
446 	ads->capture_size = xe_guc_capture_ads_input_worst_size(ads_to_guc(ads));
447 	ads->regset_size = calculate_regset_size(gt);
448 
449 	xe_gt_assert(gt, ads->golden_lrc_size +
450 		     (ads->regset_size - prev_regset_size) <=
451 		     MAX_GOLDEN_LRC_SIZE);
452 
453 	return 0;
454 }
455 
guc_policies_init(struct xe_guc_ads * ads)456 static void guc_policies_init(struct xe_guc_ads *ads)
457 {
458 	struct xe_device *xe = ads_to_xe(ads);
459 	u32 global_flags = 0;
460 
461 	ads_blob_write(ads, policies.dpc_promote_time,
462 		       GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US);
463 	ads_blob_write(ads, policies.max_num_work_items,
464 		       GLOBAL_POLICY_MAX_NUM_WI);
465 
466 	if (xe->wedged.mode == 2)
467 		global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET;
468 
469 	ads_blob_write(ads, policies.global_flags, global_flags);
470 	ads_blob_write(ads, policies.is_valid, 1);
471 }
472 
fill_engine_enable_masks(struct xe_gt * gt,struct iosys_map * info_map)473 static void fill_engine_enable_masks(struct xe_gt *gt,
474 				     struct iosys_map *info_map)
475 {
476 	struct xe_device *xe = gt_to_xe(gt);
477 
478 	info_map_write(xe, info_map, engine_enabled_masks[GUC_RENDER_CLASS],
479 		       engine_enable_mask(gt, XE_ENGINE_CLASS_RENDER));
480 	info_map_write(xe, info_map, engine_enabled_masks[GUC_BLITTER_CLASS],
481 		       engine_enable_mask(gt, XE_ENGINE_CLASS_COPY));
482 	info_map_write(xe, info_map, engine_enabled_masks[GUC_VIDEO_CLASS],
483 		       engine_enable_mask(gt, XE_ENGINE_CLASS_VIDEO_DECODE));
484 	info_map_write(xe, info_map,
485 		       engine_enabled_masks[GUC_VIDEOENHANCE_CLASS],
486 		       engine_enable_mask(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE));
487 	info_map_write(xe, info_map, engine_enabled_masks[GUC_COMPUTE_CLASS],
488 		       engine_enable_mask(gt, XE_ENGINE_CLASS_COMPUTE));
489 	info_map_write(xe, info_map, engine_enabled_masks[GUC_GSC_OTHER_CLASS],
490 		       engine_enable_mask(gt, XE_ENGINE_CLASS_OTHER));
491 }
492 
guc_prep_golden_lrc_null(struct xe_guc_ads * ads)493 static void guc_prep_golden_lrc_null(struct xe_guc_ads *ads)
494 {
495 	struct xe_device *xe = ads_to_xe(ads);
496 	struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
497 			offsetof(struct __guc_ads_blob, system_info));
498 	u8 guc_class;
499 
500 	for (guc_class = 0; guc_class <= GUC_MAX_ENGINE_CLASSES; ++guc_class) {
501 		if (!info_map_read(xe, &info_map,
502 				   engine_enabled_masks[guc_class]))
503 			continue;
504 
505 		ads_blob_write(ads, ads.eng_state_size[guc_class],
506 			       guc_ads_golden_lrc_size(ads) -
507 			       xe_lrc_skip_size(xe));
508 		ads_blob_write(ads, ads.golden_context_lrca[guc_class],
509 			       xe_bo_ggtt_addr(ads->bo) +
510 			       guc_ads_golden_lrc_offset(ads));
511 	}
512 }
513 
guc_mapping_table_init_invalid(struct xe_gt * gt,struct iosys_map * info_map)514 static void guc_mapping_table_init_invalid(struct xe_gt *gt,
515 					   struct iosys_map *info_map)
516 {
517 	struct xe_device *xe = gt_to_xe(gt);
518 	unsigned int i, j;
519 
520 	/* Table must be set to invalid values for entries not used */
521 	for (i = 0; i < GUC_MAX_ENGINE_CLASSES; ++i)
522 		for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; ++j)
523 			info_map_write(xe, info_map, mapping_table[i][j],
524 				       GUC_MAX_INSTANCES_PER_CLASS);
525 }
526 
guc_mapping_table_init(struct xe_gt * gt,struct iosys_map * info_map)527 static void guc_mapping_table_init(struct xe_gt *gt,
528 				   struct iosys_map *info_map)
529 {
530 	struct xe_device *xe = gt_to_xe(gt);
531 	struct xe_hw_engine *hwe;
532 	enum xe_hw_engine_id id;
533 
534 	guc_mapping_table_init_invalid(gt, info_map);
535 
536 	for_each_hw_engine(hwe, gt, id) {
537 		u8 guc_class;
538 
539 		guc_class = xe_engine_class_to_guc_class(hwe->class);
540 		info_map_write(xe, info_map,
541 			       mapping_table[guc_class][hwe->logical_instance],
542 			       hwe->instance);
543 	}
544 }
545 
guc_get_capture_engine_mask(struct xe_gt * gt,struct iosys_map * info_map,enum guc_capture_list_class_type capture_class)546 static u32 guc_get_capture_engine_mask(struct xe_gt *gt, struct iosys_map *info_map,
547 				       enum guc_capture_list_class_type capture_class)
548 {
549 	struct xe_device *xe = gt_to_xe(gt);
550 	u32 mask;
551 
552 	switch (capture_class) {
553 	case GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE:
554 		mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_RENDER_CLASS]);
555 		mask |= info_map_read(xe, info_map, engine_enabled_masks[GUC_COMPUTE_CLASS]);
556 		break;
557 	case GUC_CAPTURE_LIST_CLASS_VIDEO:
558 		mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_VIDEO_CLASS]);
559 		break;
560 	case GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE:
561 		mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_VIDEOENHANCE_CLASS]);
562 		break;
563 	case GUC_CAPTURE_LIST_CLASS_BLITTER:
564 		mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_BLITTER_CLASS]);
565 		break;
566 	case GUC_CAPTURE_LIST_CLASS_GSC_OTHER:
567 		mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_GSC_OTHER_CLASS]);
568 		break;
569 	default:
570 		mask = 0;
571 	}
572 
573 	return mask;
574 }
575 
get_capture_list(struct xe_guc_ads * ads,struct xe_guc * guc,struct xe_gt * gt,int owner,int type,int class,u32 * total_size,size_t * size,void ** pptr)576 static inline bool get_capture_list(struct xe_guc_ads *ads, struct xe_guc *guc, struct xe_gt *gt,
577 				    int owner, int type, int class, u32 *total_size, size_t *size,
578 				    void **pptr)
579 {
580 	*size = 0;
581 
582 	if (!xe_guc_capture_getlistsize(guc, owner, type, class, size)) {
583 		if (*total_size + *size > ads->capture_size)
584 			xe_gt_dbg(gt, "Capture size overflow :%zu vs %d\n",
585 				  *total_size + *size, ads->capture_size);
586 		else if (!xe_guc_capture_getlist(guc, owner, type, class, pptr))
587 			return false;
588 	}
589 
590 	return true;
591 }
592 
guc_capture_prep_lists(struct xe_guc_ads * ads)593 static int guc_capture_prep_lists(struct xe_guc_ads *ads)
594 {
595 	struct xe_guc *guc = ads_to_guc(ads);
596 	struct xe_gt *gt = ads_to_gt(ads);
597 	u32 ads_ggtt, capture_offset, null_ggtt, total_size = 0;
598 	struct iosys_map info_map;
599 	size_t size = 0;
600 	void *ptr;
601 	int i, j;
602 
603 	/*
604 	 * GuC Capture's steered reg-list needs to be allocated and initialized
605 	 * after the GuC-hwconfig is available which guaranteed from here.
606 	 */
607 	xe_guc_capture_steered_list_init(ads_to_guc(ads));
608 
609 	capture_offset = guc_ads_capture_offset(ads);
610 	ads_ggtt = xe_bo_ggtt_addr(ads->bo);
611 	info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
612 					 offsetof(struct __guc_ads_blob, system_info));
613 
614 	/* first, set aside the first page for a capture_list with zero descriptors */
615 	total_size = PAGE_SIZE;
616 	if (!xe_guc_capture_getnullheader(guc, &ptr, &size))
617 		xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset, ptr, size);
618 
619 	null_ggtt = ads_ggtt + capture_offset;
620 	capture_offset += PAGE_SIZE;
621 
622 	/*
623 	 * Populate capture list : at this point adps is already allocated and
624 	 * mapped to worst case size
625 	 */
626 	for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) {
627 		bool write_empty_list;
628 
629 		for (j = 0; j < GUC_CAPTURE_LIST_CLASS_MAX; j++) {
630 			u32 engine_mask = guc_get_capture_engine_mask(gt, &info_map, j);
631 			/* null list if we dont have said engine or list */
632 			if (!engine_mask) {
633 				ads_blob_write(ads, ads.capture_class[i][j], null_ggtt);
634 				ads_blob_write(ads, ads.capture_instance[i][j], null_ggtt);
635 				continue;
636 			}
637 
638 			/* engine exists: start with engine-class registers */
639 			write_empty_list = get_capture_list(ads, guc, gt, i,
640 							    GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS,
641 							    j, &total_size, &size, &ptr);
642 			if (!write_empty_list) {
643 				ads_blob_write(ads, ads.capture_class[i][j],
644 					       ads_ggtt + capture_offset);
645 				xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset,
646 						 ptr, size);
647 				total_size += size;
648 				capture_offset += size;
649 			} else {
650 				ads_blob_write(ads, ads.capture_class[i][j], null_ggtt);
651 			}
652 
653 			/* engine exists: next, engine-instance registers   */
654 			write_empty_list = get_capture_list(ads, guc, gt, i,
655 							    GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE,
656 							    j, &total_size, &size, &ptr);
657 			if (!write_empty_list) {
658 				ads_blob_write(ads, ads.capture_instance[i][j],
659 					       ads_ggtt + capture_offset);
660 				xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset,
661 						 ptr, size);
662 				total_size += size;
663 				capture_offset += size;
664 			} else {
665 				ads_blob_write(ads, ads.capture_instance[i][j], null_ggtt);
666 			}
667 		}
668 
669 		/* global registers is last in our PF/VF loops */
670 		write_empty_list = get_capture_list(ads, guc, gt, i,
671 						    GUC_STATE_CAPTURE_TYPE_GLOBAL,
672 						    0, &total_size, &size, &ptr);
673 		if (!write_empty_list) {
674 			ads_blob_write(ads, ads.capture_global[i], ads_ggtt + capture_offset);
675 			xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset, ptr,
676 					 size);
677 			total_size += size;
678 			capture_offset += size;
679 		} else {
680 			ads_blob_write(ads, ads.capture_global[i], null_ggtt);
681 		}
682 	}
683 
684 	if (ads->capture_size != PAGE_ALIGN(total_size))
685 		xe_gt_dbg(gt, "ADS capture alloc size changed from %d to %d\n",
686 			  ads->capture_size, PAGE_ALIGN(total_size));
687 	return PAGE_ALIGN(total_size);
688 }
689 
guc_mmio_regset_write_one(struct xe_guc_ads * ads,struct iosys_map * regset_map,struct xe_reg reg,unsigned int n_entry)690 static void guc_mmio_regset_write_one(struct xe_guc_ads *ads,
691 				      struct iosys_map *regset_map,
692 				      struct xe_reg reg,
693 				      unsigned int n_entry)
694 {
695 	struct guc_mmio_reg entry = {
696 		.offset = reg.addr,
697 		.flags = reg.masked ? GUC_REGSET_MASKED : 0,
698 	};
699 
700 	if (reg.mcr) {
701 		struct xe_reg_mcr mcr_reg = XE_REG_MCR(reg.addr);
702 		u8 group, instance;
703 
704 		bool steer = xe_gt_mcr_get_nonterminated_steering(ads_to_gt(ads), mcr_reg,
705 								  &group, &instance);
706 
707 		if (steer) {
708 			entry.flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, group);
709 			entry.flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, instance);
710 			entry.flags |= GUC_REGSET_STEERING_NEEDED;
711 		}
712 	}
713 
714 	xe_map_memcpy_to(ads_to_xe(ads), regset_map, n_entry * sizeof(entry),
715 			 &entry, sizeof(entry));
716 }
717 
guc_mmio_regset_write(struct xe_guc_ads * ads,struct iosys_map * regset_map,struct xe_hw_engine * hwe)718 static unsigned int guc_mmio_regset_write(struct xe_guc_ads *ads,
719 					  struct iosys_map *regset_map,
720 					  struct xe_hw_engine *hwe)
721 {
722 	struct xe_hw_engine *hwe_rcs_reset_domain =
723 		xe_gt_any_hw_engine_by_reset_domain(hwe->gt, XE_ENGINE_CLASS_RENDER);
724 	struct xe_reg_sr_entry *entry;
725 	unsigned long idx;
726 	unsigned int count = 0;
727 	const struct {
728 		struct xe_reg reg;
729 		bool skip;
730 	} *e, extra_regs[] = {
731 		{ .reg = RING_MODE(hwe->mmio_base),			},
732 		{ .reg = RING_HWS_PGA(hwe->mmio_base),			},
733 		{ .reg = RING_IMR(hwe->mmio_base),			},
734 		{ .reg = RCU_MODE, .skip = hwe != hwe_rcs_reset_domain	},
735 		{ .reg = CCS_MODE,
736 		  .skip = hwe != hwe_rcs_reset_domain || !xe_gt_ccs_mode_enabled(hwe->gt) },
737 	};
738 	u32 i;
739 
740 	BUILD_BUG_ON(ARRAY_SIZE(extra_regs) > ADS_REGSET_EXTRA_MAX);
741 
742 	xa_for_each(&hwe->reg_sr.xa, idx, entry)
743 		guc_mmio_regset_write_one(ads, regset_map, entry->reg, count++);
744 
745 	for (e = extra_regs; e < extra_regs + ARRAY_SIZE(extra_regs); e++) {
746 		if (e->skip)
747 			continue;
748 
749 		guc_mmio_regset_write_one(ads, regset_map, e->reg, count++);
750 	}
751 
752 	if (XE_WA(hwe->gt, 1607983814) && hwe->class == XE_ENGINE_CLASS_RENDER) {
753 		for (i = 0; i < LNCFCMOCS_REG_COUNT; i++) {
754 			guc_mmio_regset_write_one(ads, regset_map,
755 						  XELP_LNCFCMOCS(i), count++);
756 		}
757 	}
758 
759 	return count;
760 }
761 
guc_mmio_reg_state_init(struct xe_guc_ads * ads)762 static void guc_mmio_reg_state_init(struct xe_guc_ads *ads)
763 {
764 	size_t regset_offset = guc_ads_regset_offset(ads);
765 	struct xe_gt *gt = ads_to_gt(ads);
766 	struct xe_hw_engine *hwe;
767 	enum xe_hw_engine_id id;
768 	u32 addr = xe_bo_ggtt_addr(ads->bo) + regset_offset;
769 	struct iosys_map regset_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
770 							    regset_offset);
771 	unsigned int regset_used = 0;
772 
773 	for_each_hw_engine(hwe, gt, id) {
774 		unsigned int count;
775 		u8 gc;
776 
777 		/*
778 		 * 1. Write all MMIO entries for this exec queue to the table. No
779 		 * need to worry about fused-off engines and when there are
780 		 * entries in the regset: the reg_state_list has been zero'ed
781 		 * by xe_guc_ads_populate()
782 		 */
783 		count = guc_mmio_regset_write(ads, &regset_map, hwe);
784 		if (!count)
785 			continue;
786 
787 		/*
788 		 * 2. Record in the header (ads.reg_state_list) the address
789 		 * location and number of entries
790 		 */
791 		gc = xe_engine_class_to_guc_class(hwe->class);
792 		ads_blob_write(ads, ads.reg_state_list[gc][hwe->instance].address, addr);
793 		ads_blob_write(ads, ads.reg_state_list[gc][hwe->instance].count, count);
794 
795 		addr += count * sizeof(struct guc_mmio_reg);
796 		iosys_map_incr(&regset_map, count * sizeof(struct guc_mmio_reg));
797 
798 		regset_used += count * sizeof(struct guc_mmio_reg);
799 	}
800 
801 	xe_gt_assert(gt, regset_used <= ads->regset_size);
802 }
803 
guc_um_init_params(struct xe_guc_ads * ads)804 static void guc_um_init_params(struct xe_guc_ads *ads)
805 {
806 	u32 um_queue_offset = guc_ads_um_queues_offset(ads);
807 	u64 base_dpa;
808 	u32 base_ggtt;
809 	int i;
810 
811 	base_ggtt = xe_bo_ggtt_addr(ads->bo) + um_queue_offset;
812 	base_dpa = xe_bo_main_addr(ads->bo, PAGE_SIZE) + um_queue_offset;
813 
814 	for (i = 0; i < GUC_UM_HW_QUEUE_MAX; ++i) {
815 		ads_blob_write(ads, um_init_params.queue_params[i].base_dpa,
816 			       base_dpa + (i * GUC_UM_QUEUE_SIZE));
817 		ads_blob_write(ads, um_init_params.queue_params[i].base_ggtt_address,
818 			       base_ggtt + (i * GUC_UM_QUEUE_SIZE));
819 		ads_blob_write(ads, um_init_params.queue_params[i].size_in_bytes,
820 			       GUC_UM_QUEUE_SIZE);
821 	}
822 
823 	ads_blob_write(ads, um_init_params.page_response_timeout_in_us,
824 		       GUC_PAGE_RES_TIMEOUT_US);
825 }
826 
guc_doorbell_init(struct xe_guc_ads * ads)827 static void guc_doorbell_init(struct xe_guc_ads *ads)
828 {
829 	struct xe_device *xe = ads_to_xe(ads);
830 	struct xe_gt *gt = ads_to_gt(ads);
831 
832 	if (GRAPHICS_VER(xe) >= 12 && !IS_DGFX(xe)) {
833 		u32 distdbreg =
834 			xe_mmio_read32(&gt->mmio, DIST_DBS_POPULATED);
835 
836 		ads_blob_write(ads,
837 			       system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI],
838 			       REG_FIELD_GET(DOORBELLS_PER_SQIDI_MASK, distdbreg) + 1);
839 	}
840 }
841 
842 /**
843  * xe_guc_ads_populate_minimal - populate minimal ADS
844  * @ads: Additional data structures object
845  *
846  * This function populates a minimal ADS that does not support submissions but
847  * enough so the GuC can load and the hwconfig table can be read.
848  */
xe_guc_ads_populate_minimal(struct xe_guc_ads * ads)849 void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads)
850 {
851 	struct xe_gt *gt = ads_to_gt(ads);
852 	struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
853 			offsetof(struct __guc_ads_blob, system_info));
854 	u32 base = xe_bo_ggtt_addr(ads->bo);
855 
856 	xe_gt_assert(gt, ads->bo);
857 
858 	xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size);
859 	guc_policies_init(ads);
860 	guc_prep_golden_lrc_null(ads);
861 	guc_mapping_table_init_invalid(gt, &info_map);
862 	guc_doorbell_init(ads);
863 
864 	ads_blob_write(ads, ads.scheduler_policies, base +
865 		       offsetof(struct __guc_ads_blob, policies));
866 	ads_blob_write(ads, ads.gt_system_info, base +
867 		       offsetof(struct __guc_ads_blob, system_info));
868 	ads_blob_write(ads, ads.private_data, base +
869 		       guc_ads_private_data_offset(ads));
870 }
871 
xe_guc_ads_populate(struct xe_guc_ads * ads)872 void xe_guc_ads_populate(struct xe_guc_ads *ads)
873 {
874 	struct xe_device *xe = ads_to_xe(ads);
875 	struct xe_gt *gt = ads_to_gt(ads);
876 	struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
877 			offsetof(struct __guc_ads_blob, system_info));
878 	u32 base = xe_bo_ggtt_addr(ads->bo);
879 
880 	xe_gt_assert(gt, ads->bo);
881 
882 	xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size);
883 	guc_policies_init(ads);
884 	fill_engine_enable_masks(gt, &info_map);
885 	guc_mmio_reg_state_init(ads);
886 	guc_prep_golden_lrc_null(ads);
887 	guc_mapping_table_init(gt, &info_map);
888 	guc_capture_prep_lists(ads);
889 	guc_doorbell_init(ads);
890 	guc_waklv_init(ads);
891 
892 	if (xe->info.has_usm) {
893 		guc_um_init_params(ads);
894 		ads_blob_write(ads, ads.um_init_data, base +
895 			       offsetof(struct __guc_ads_blob, um_init_params));
896 	}
897 
898 	ads_blob_write(ads, ads.scheduler_policies, base +
899 		       offsetof(struct __guc_ads_blob, policies));
900 	ads_blob_write(ads, ads.gt_system_info, base +
901 		       offsetof(struct __guc_ads_blob, system_info));
902 	ads_blob_write(ads, ads.private_data, base +
903 		       guc_ads_private_data_offset(ads));
904 }
905 
guc_populate_golden_lrc(struct xe_guc_ads * ads)906 static void guc_populate_golden_lrc(struct xe_guc_ads *ads)
907 {
908 	struct xe_device *xe = ads_to_xe(ads);
909 	struct xe_gt *gt = ads_to_gt(ads);
910 	struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
911 			offsetof(struct __guc_ads_blob, system_info));
912 	size_t total_size = 0, alloc_size, real_size;
913 	u32 addr_ggtt, offset;
914 	int class;
915 
916 	offset = guc_ads_golden_lrc_offset(ads);
917 	addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset;
918 
919 	for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
920 		u8 guc_class;
921 
922 		guc_class = xe_engine_class_to_guc_class(class);
923 
924 		if (!info_map_read(xe, &info_map,
925 				   engine_enabled_masks[guc_class]))
926 			continue;
927 
928 		xe_gt_assert(gt, gt->default_lrc[class]);
929 
930 		real_size = xe_gt_lrc_size(gt, class);
931 		alloc_size = PAGE_ALIGN(real_size);
932 		total_size += alloc_size;
933 
934 		/*
935 		 * This interface is slightly confusing. We need to pass the
936 		 * base address of the full golden context and the size of just
937 		 * the engine state, which is the section of the context image
938 		 * that starts after the execlists LRC registers. This is
939 		 * required to allow the GuC to restore just the engine state
940 		 * when a watchdog reset occurs.
941 		 * We calculate the engine state size by removing the size of
942 		 * what comes before it in the context image (which is identical
943 		 * on all engines).
944 		 */
945 		ads_blob_write(ads, ads.eng_state_size[guc_class],
946 			       real_size - xe_lrc_skip_size(xe));
947 		ads_blob_write(ads, ads.golden_context_lrca[guc_class],
948 			       addr_ggtt);
949 
950 		xe_map_memcpy_to(xe, ads_to_map(ads), offset,
951 				 gt->default_lrc[class], real_size);
952 
953 		addr_ggtt += alloc_size;
954 		offset += alloc_size;
955 	}
956 
957 	xe_gt_assert(gt, total_size == ads->golden_lrc_size);
958 }
959 
xe_guc_ads_populate_post_load(struct xe_guc_ads * ads)960 void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads)
961 {
962 	guc_populate_golden_lrc(ads);
963 }
964 
guc_ads_action_update_policies(struct xe_guc_ads * ads,u32 policy_offset)965 static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_offset)
966 {
967 	struct  xe_guc_ct *ct = &ads_to_guc(ads)->ct;
968 	u32 action[] = {
969 		XE_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE,
970 		policy_offset
971 	};
972 
973 	return xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
974 }
975 
976 /**
977  * xe_guc_ads_scheduler_policy_toggle_reset - Toggle reset policy
978  * @ads: Additional data structures object
979  *
980  * This function update the GuC's engine reset policy based on wedged.mode.
981  *
982  * Return: 0 on success, and negative error code otherwise.
983  */
xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads * ads)984 int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads)
985 {
986 	struct xe_device *xe = ads_to_xe(ads);
987 	struct xe_gt *gt = ads_to_gt(ads);
988 	struct xe_tile *tile = gt_to_tile(gt);
989 	struct guc_policies *policies;
990 	struct xe_bo *bo;
991 	int ret = 0;
992 
993 	policies = kmalloc(sizeof(*policies), GFP_KERNEL);
994 	if (!policies)
995 		return -ENOMEM;
996 
997 	policies->dpc_promote_time = ads_blob_read(ads, policies.dpc_promote_time);
998 	policies->max_num_work_items = ads_blob_read(ads, policies.max_num_work_items);
999 	policies->is_valid = 1;
1000 	if (xe->wedged.mode == 2)
1001 		policies->global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET;
1002 	else
1003 		policies->global_flags &= ~GLOBAL_POLICY_DISABLE_ENGINE_RESET;
1004 
1005 	bo = xe_managed_bo_create_from_data(xe, tile, policies, sizeof(struct guc_policies),
1006 					    XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1007 					    XE_BO_FLAG_GGTT);
1008 	if (IS_ERR(bo)) {
1009 		ret = PTR_ERR(bo);
1010 		goto out;
1011 	}
1012 
1013 	ret = guc_ads_action_update_policies(ads, xe_bo_ggtt_addr(bo));
1014 out:
1015 	kfree(policies);
1016 	return ret;
1017 }
1018