1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_guc_ads.h" 7 8 #include <linux/fault-inject.h> 9 10 #include <drm/drm_managed.h> 11 12 #include <generated/xe_wa_oob.h> 13 14 #include "abi/guc_actions_abi.h" 15 #include "regs/xe_engine_regs.h" 16 #include "regs/xe_gt_regs.h" 17 #include "regs/xe_guc_regs.h" 18 #include "xe_bo.h" 19 #include "xe_gt.h" 20 #include "xe_gt_ccs_mode.h" 21 #include "xe_gt_mcr.h" 22 #include "xe_gt_printk.h" 23 #include "xe_guc.h" 24 #include "xe_guc_buf.h" 25 #include "xe_guc_capture.h" 26 #include "xe_guc_ct.h" 27 #include "xe_hw_engine.h" 28 #include "xe_lrc.h" 29 #include "xe_map.h" 30 #include "xe_mmio.h" 31 #include "xe_wa.h" 32 33 /* Slack of a few additional entries per engine */ 34 #define ADS_REGSET_EXTRA_MAX 8 35 36 static struct xe_guc * 37 ads_to_guc(struct xe_guc_ads *ads) 38 { 39 return container_of(ads, struct xe_guc, ads); 40 } 41 42 static struct xe_gt * 43 ads_to_gt(struct xe_guc_ads *ads) 44 { 45 return container_of(ads, struct xe_gt, uc.guc.ads); 46 } 47 48 static struct xe_device * 49 ads_to_xe(struct xe_guc_ads *ads) 50 { 51 return gt_to_xe(ads_to_gt(ads)); 52 } 53 54 static struct iosys_map * 55 ads_to_map(struct xe_guc_ads *ads) 56 { 57 return &ads->bo->vmap; 58 } 59 60 /* UM Queue parameters: */ 61 #define GUC_UM_QUEUE_SIZE (SZ_64K) 62 #define GUC_PAGE_RES_TIMEOUT_US (-1) 63 64 /* 65 * The Additional Data Struct (ADS) has pointers for different buffers used by 66 * the GuC. One single gem object contains the ADS struct itself (guc_ads) and 67 * all the extra buffers indirectly linked via the ADS struct's entries. 68 * 69 * Layout of the ADS blob allocated for the GuC: 70 * 71 * +---------------------------------------+ <== base 72 * | guc_ads | 73 * +---------------------------------------+ 74 * | guc_policies | 75 * +---------------------------------------+ 76 * | guc_gt_system_info | 77 * +---------------------------------------+ 78 * | guc_engine_usage | 79 * +---------------------------------------+ 80 * | guc_um_init_params | 81 * +---------------------------------------+ <== static 82 * | guc_mmio_reg[countA] (engine 0.0) | 83 * | guc_mmio_reg[countB] (engine 0.1) | 84 * | guc_mmio_reg[countC] (engine 1.0) | 85 * | ... | 86 * +---------------------------------------+ <== dynamic 87 * | padding | 88 * +---------------------------------------+ <== 4K aligned 89 * | golden contexts | 90 * +---------------------------------------+ 91 * | padding | 92 * +---------------------------------------+ <== 4K aligned 93 * | w/a KLVs | 94 * +---------------------------------------+ 95 * | padding | 96 * +---------------------------------------+ <== 4K aligned 97 * | capture lists | 98 * +---------------------------------------+ 99 * | padding | 100 * +---------------------------------------+ <== 4K aligned 101 * | UM queues | 102 * +---------------------------------------+ 103 * | padding | 104 * +---------------------------------------+ <== 4K aligned 105 * | private data | 106 * +---------------------------------------+ 107 * | padding | 108 * +---------------------------------------+ <== 4K aligned 109 */ 110 struct __guc_ads_blob { 111 struct guc_ads ads; 112 struct guc_policies policies; 113 struct guc_gt_system_info system_info; 114 struct guc_engine_usage engine_usage; 115 struct guc_um_init_params um_init_params; 116 /* From here on, location is dynamic! Refer to above diagram. */ 117 struct guc_mmio_reg regset[]; 118 } __packed; 119 120 #define ads_blob_read(ads_, field_) \ 121 xe_map_rd_field(ads_to_xe(ads_), ads_to_map(ads_), 0, \ 122 struct __guc_ads_blob, field_) 123 124 #define ads_blob_write(ads_, field_, val_) \ 125 xe_map_wr_field(ads_to_xe(ads_), ads_to_map(ads_), 0, \ 126 struct __guc_ads_blob, field_, val_) 127 128 #define info_map_write(xe_, map_, field_, val_) \ 129 xe_map_wr_field(xe_, map_, 0, struct guc_gt_system_info, field_, val_) 130 131 #define info_map_read(xe_, map_, field_) \ 132 xe_map_rd_field(xe_, map_, 0, struct guc_gt_system_info, field_) 133 134 static size_t guc_ads_regset_size(struct xe_guc_ads *ads) 135 { 136 struct xe_device *xe = ads_to_xe(ads); 137 138 xe_assert(xe, ads->regset_size); 139 140 return ads->regset_size; 141 } 142 143 static size_t guc_ads_golden_lrc_size(struct xe_guc_ads *ads) 144 { 145 return PAGE_ALIGN(ads->golden_lrc_size); 146 } 147 148 static u32 guc_ads_waklv_size(struct xe_guc_ads *ads) 149 { 150 return PAGE_ALIGN(ads->ads_waklv_size); 151 } 152 153 static size_t guc_ads_capture_size(struct xe_guc_ads *ads) 154 { 155 return PAGE_ALIGN(ads->capture_size); 156 } 157 158 static size_t guc_ads_um_queues_size(struct xe_guc_ads *ads) 159 { 160 struct xe_device *xe = ads_to_xe(ads); 161 162 if (!xe->info.has_usm) 163 return 0; 164 165 return GUC_UM_QUEUE_SIZE * GUC_UM_HW_QUEUE_MAX; 166 } 167 168 static size_t guc_ads_private_data_size(struct xe_guc_ads *ads) 169 { 170 return PAGE_ALIGN(ads_to_guc(ads)->fw.private_data_size); 171 } 172 173 static size_t guc_ads_regset_offset(struct xe_guc_ads *ads) 174 { 175 return offsetof(struct __guc_ads_blob, regset); 176 } 177 178 static size_t guc_ads_golden_lrc_offset(struct xe_guc_ads *ads) 179 { 180 size_t offset; 181 182 offset = guc_ads_regset_offset(ads) + 183 guc_ads_regset_size(ads); 184 185 return PAGE_ALIGN(offset); 186 } 187 188 static size_t guc_ads_waklv_offset(struct xe_guc_ads *ads) 189 { 190 u32 offset; 191 192 offset = guc_ads_golden_lrc_offset(ads) + 193 guc_ads_golden_lrc_size(ads); 194 195 return PAGE_ALIGN(offset); 196 } 197 198 static size_t guc_ads_capture_offset(struct xe_guc_ads *ads) 199 { 200 size_t offset; 201 202 offset = guc_ads_waklv_offset(ads) + 203 guc_ads_waklv_size(ads); 204 205 return PAGE_ALIGN(offset); 206 } 207 208 static size_t guc_ads_um_queues_offset(struct xe_guc_ads *ads) 209 { 210 u32 offset; 211 212 offset = guc_ads_capture_offset(ads) + 213 guc_ads_capture_size(ads); 214 215 return PAGE_ALIGN(offset); 216 } 217 218 static size_t guc_ads_private_data_offset(struct xe_guc_ads *ads) 219 { 220 size_t offset; 221 222 offset = guc_ads_um_queues_offset(ads) + 223 guc_ads_um_queues_size(ads); 224 225 return PAGE_ALIGN(offset); 226 } 227 228 static size_t guc_ads_size(struct xe_guc_ads *ads) 229 { 230 return guc_ads_private_data_offset(ads) + 231 guc_ads_private_data_size(ads); 232 } 233 234 static size_t calculate_regset_size(struct xe_gt *gt) 235 { 236 struct xe_reg_sr_entry *sr_entry; 237 unsigned long sr_idx; 238 struct xe_hw_engine *hwe; 239 enum xe_hw_engine_id id; 240 unsigned int count = 0; 241 242 for_each_hw_engine(hwe, gt, id) 243 xa_for_each(&hwe->reg_sr.xa, sr_idx, sr_entry) 244 count++; 245 246 count += ADS_REGSET_EXTRA_MAX * XE_NUM_HW_ENGINES; 247 248 if (XE_GT_WA(gt, 1607983814)) 249 count += LNCFCMOCS_REG_COUNT; 250 251 return count * sizeof(struct guc_mmio_reg); 252 } 253 254 static u32 engine_enable_mask(struct xe_gt *gt, enum xe_engine_class class) 255 { 256 struct xe_hw_engine *hwe; 257 enum xe_hw_engine_id id; 258 u32 mask = 0; 259 260 for_each_hw_engine(hwe, gt, id) 261 if (hwe->class == class) 262 mask |= BIT(hwe->instance); 263 264 return mask; 265 } 266 267 static size_t calculate_golden_lrc_size(struct xe_guc_ads *ads) 268 { 269 struct xe_gt *gt = ads_to_gt(ads); 270 size_t total_size = 0, alloc_size, real_size; 271 int class; 272 273 for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) { 274 if (!engine_enable_mask(gt, class)) 275 continue; 276 277 real_size = xe_gt_lrc_size(gt, class); 278 alloc_size = PAGE_ALIGN(real_size); 279 total_size += alloc_size; 280 } 281 282 return total_size; 283 } 284 285 static void guc_waklv_enable(struct xe_guc_ads *ads, 286 u32 data[], u32 data_len_dw, 287 u32 *offset, u32 *remain, 288 enum xe_guc_klv_ids klv_id) 289 { 290 size_t size = sizeof(u32) * (1 + data_len_dw); 291 292 if (*remain < size) { 293 drm_warn(&ads_to_xe(ads)->drm, 294 "w/a klv buffer too small to add klv id 0x%04X\n", klv_id); 295 return; 296 } 297 298 /* 16:16 key/length */ 299 xe_map_wr(ads_to_xe(ads), ads_to_map(ads), *offset, u32, 300 FIELD_PREP(GUC_KLV_0_KEY, klv_id) | FIELD_PREP(GUC_KLV_0_LEN, data_len_dw)); 301 /* data_len_dw dwords of data */ 302 xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), 303 *offset + sizeof(u32), data, data_len_dw * sizeof(u32)); 304 305 *offset += size; 306 *remain -= size; 307 } 308 309 static void guc_waklv_init(struct xe_guc_ads *ads) 310 { 311 struct xe_gt *gt = ads_to_gt(ads); 312 u64 addr_ggtt; 313 u32 offset, remain, size; 314 315 offset = guc_ads_waklv_offset(ads); 316 remain = guc_ads_waklv_size(ads); 317 318 if (XE_GT_WA(gt, 16021333562)) 319 guc_waklv_enable(ads, NULL, 0, &offset, &remain, 320 GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED); 321 if (XE_GT_WA(gt, 18024947630)) 322 guc_waklv_enable(ads, NULL, 0, &offset, &remain, 323 GUC_WORKAROUND_KLV_ID_GAM_PFQ_SHADOW_TAIL_POLLING); 324 if (XE_GT_WA(gt, 16022287689)) 325 guc_waklv_enable(ads, NULL, 0, &offset, &remain, 326 GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE); 327 328 if (XE_GT_WA(gt, 14022866841)) 329 guc_waklv_enable(ads, NULL, 0, &offset, &remain, 330 GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO); 331 332 /* 333 * On RC6 exit, GuC will write register 0xB04 with the default value provided. As of now, 334 * the default value for this register is determined to be 0xC40. This could change in the 335 * future, so GuC depends on KMD to send it the correct value. 336 */ 337 if (XE_GT_WA(gt, 13011645652)) { 338 u32 data = 0xC40; 339 340 guc_waklv_enable(ads, &data, 1, &offset, &remain, 341 GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE); 342 } 343 344 if (XE_GT_WA(gt, 14022293748) || XE_GT_WA(gt, 22019794406)) 345 guc_waklv_enable(ads, NULL, 0, &offset, &remain, 346 GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET); 347 348 if (GUC_FIRMWARE_VER_AT_LEAST(>->uc.guc, 70, 44) && XE_GT_WA(gt, 16026508708)) 349 guc_waklv_enable(ads, NULL, 0, &offset, &remain, 350 GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH); 351 if (GUC_FIRMWARE_VER_AT_LEAST(>->uc.guc, 70, 47) && XE_GT_WA(gt, 16026007364)) { 352 u32 data[] = { 353 0x0, 354 0xF, 355 }; 356 guc_waklv_enable(ads, data, ARRAY_SIZE(data), &offset, &remain, 357 GUC_WA_KLV_RESTORE_UNSAVED_MEDIA_CONTROL_REG); 358 } 359 360 if (XE_GT_WA(gt, 14020001231)) 361 guc_waklv_enable(ads, NULL, 0, &offset, &remain, 362 GUC_WORKAROUND_KLV_DISABLE_PSMI_INTERRUPTS_AT_C6_ENTRY_RESTORE_AT_EXIT); 363 364 size = guc_ads_waklv_size(ads) - remain; 365 if (!size) 366 return; 367 368 offset = guc_ads_waklv_offset(ads); 369 addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset; 370 371 ads_blob_write(ads, ads.wa_klv_addr_lo, lower_32_bits(addr_ggtt)); 372 ads_blob_write(ads, ads.wa_klv_addr_hi, upper_32_bits(addr_ggtt)); 373 ads_blob_write(ads, ads.wa_klv_size, size); 374 } 375 376 static int calculate_waklv_size(struct xe_guc_ads *ads) 377 { 378 /* 379 * A single page is both the minimum size possible and 380 * is sufficiently large enough for all current platforms. 381 */ 382 return SZ_4K; 383 } 384 385 #define MAX_GOLDEN_LRC_SIZE (SZ_4K * 64) 386 387 int xe_guc_ads_init(struct xe_guc_ads *ads) 388 { 389 struct xe_device *xe = ads_to_xe(ads); 390 struct xe_gt *gt = ads_to_gt(ads); 391 struct xe_tile *tile = gt_to_tile(gt); 392 struct xe_bo *bo; 393 394 ads->golden_lrc_size = calculate_golden_lrc_size(ads); 395 ads->capture_size = xe_guc_capture_ads_input_worst_size(ads_to_guc(ads)); 396 ads->regset_size = calculate_regset_size(gt); 397 ads->ads_waklv_size = calculate_waklv_size(ads); 398 399 bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE, 400 XE_BO_FLAG_SYSTEM | 401 XE_BO_FLAG_GGTT | 402 XE_BO_FLAG_GGTT_INVALIDATE | 403 XE_BO_FLAG_PINNED_NORESTORE); 404 if (IS_ERR(bo)) 405 return PTR_ERR(bo); 406 407 ads->bo = bo; 408 409 return 0; 410 } 411 ALLOW_ERROR_INJECTION(xe_guc_ads_init, ERRNO); /* See xe_pci_probe() */ 412 413 /** 414 * xe_guc_ads_init_post_hwconfig - initialize ADS post hwconfig load 415 * @ads: Additional data structures object 416 * 417 * Recalculate golden_lrc_size, capture_size and regset_size as the number 418 * hardware engines may have changed after the hwconfig was loaded. Also verify 419 * the new sizes fit in the already allocated ADS buffer object. 420 * 421 * Return: 0 on success, negative error code on error. 422 */ 423 int xe_guc_ads_init_post_hwconfig(struct xe_guc_ads *ads) 424 { 425 struct xe_gt *gt = ads_to_gt(ads); 426 u32 prev_regset_size = ads->regset_size; 427 428 xe_gt_assert(gt, ads->bo); 429 430 ads->golden_lrc_size = calculate_golden_lrc_size(ads); 431 /* Calculate Capture size with worst size */ 432 ads->capture_size = xe_guc_capture_ads_input_worst_size(ads_to_guc(ads)); 433 ads->regset_size = calculate_regset_size(gt); 434 435 xe_gt_assert(gt, ads->golden_lrc_size + 436 (ads->regset_size - prev_regset_size) <= 437 MAX_GOLDEN_LRC_SIZE); 438 439 return 0; 440 } 441 442 static void guc_policies_init(struct xe_guc_ads *ads) 443 { 444 struct xe_device *xe = ads_to_xe(ads); 445 u32 global_flags = 0; 446 447 ads_blob_write(ads, policies.dpc_promote_time, 448 GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US); 449 ads_blob_write(ads, policies.max_num_work_items, 450 GLOBAL_POLICY_MAX_NUM_WI); 451 452 if (xe->wedged.mode == XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET) 453 global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET; 454 455 ads_blob_write(ads, policies.global_flags, global_flags); 456 ads_blob_write(ads, policies.is_valid, 1); 457 } 458 459 static void fill_engine_enable_masks(struct xe_gt *gt, 460 struct iosys_map *info_map) 461 { 462 struct xe_device *xe = gt_to_xe(gt); 463 464 info_map_write(xe, info_map, engine_enabled_masks[GUC_RENDER_CLASS], 465 engine_enable_mask(gt, XE_ENGINE_CLASS_RENDER)); 466 info_map_write(xe, info_map, engine_enabled_masks[GUC_BLITTER_CLASS], 467 engine_enable_mask(gt, XE_ENGINE_CLASS_COPY)); 468 info_map_write(xe, info_map, engine_enabled_masks[GUC_VIDEO_CLASS], 469 engine_enable_mask(gt, XE_ENGINE_CLASS_VIDEO_DECODE)); 470 info_map_write(xe, info_map, 471 engine_enabled_masks[GUC_VIDEOENHANCE_CLASS], 472 engine_enable_mask(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE)); 473 info_map_write(xe, info_map, engine_enabled_masks[GUC_COMPUTE_CLASS], 474 engine_enable_mask(gt, XE_ENGINE_CLASS_COMPUTE)); 475 info_map_write(xe, info_map, engine_enabled_masks[GUC_GSC_OTHER_CLASS], 476 engine_enable_mask(gt, XE_ENGINE_CLASS_OTHER)); 477 } 478 479 /* 480 * Write the offsets corresponding to the golden LRCs. The actual data is 481 * populated later by guc_golden_lrc_populate() 482 */ 483 static void guc_golden_lrc_init(struct xe_guc_ads *ads) 484 { 485 struct xe_device *xe = ads_to_xe(ads); 486 struct xe_gt *gt = ads_to_gt(ads); 487 struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads), 488 offsetof(struct __guc_ads_blob, system_info)); 489 size_t alloc_size, real_size; 490 u32 addr_ggtt, offset; 491 int class; 492 493 offset = guc_ads_golden_lrc_offset(ads); 494 addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset; 495 496 for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) { 497 u8 guc_class; 498 499 guc_class = xe_engine_class_to_guc_class(class); 500 501 if (!info_map_read(xe, &info_map, 502 engine_enabled_masks[guc_class])) 503 continue; 504 505 real_size = xe_gt_lrc_size(gt, class); 506 alloc_size = PAGE_ALIGN(real_size); 507 508 /* 509 * This interface is slightly confusing. We need to pass the 510 * base address of the full golden context and the size of just 511 * the engine state, which is the section of the context image 512 * that starts after the execlists LRC registers. This is 513 * required to allow the GuC to restore just the engine state 514 * when a watchdog reset occurs. 515 * We calculate the engine state size by removing the size of 516 * what comes before it in the context image (which is identical 517 * on all engines). 518 */ 519 ads_blob_write(ads, ads.eng_state_size[guc_class], 520 real_size - xe_lrc_skip_size(xe)); 521 ads_blob_write(ads, ads.golden_context_lrca[guc_class], 522 addr_ggtt); 523 524 addr_ggtt += alloc_size; 525 } 526 } 527 528 static void guc_mapping_table_init_invalid(struct xe_gt *gt, 529 struct iosys_map *info_map) 530 { 531 struct xe_device *xe = gt_to_xe(gt); 532 unsigned int i, j; 533 534 /* Table must be set to invalid values for entries not used */ 535 for (i = 0; i < GUC_MAX_ENGINE_CLASSES; ++i) 536 for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; ++j) 537 info_map_write(xe, info_map, mapping_table[i][j], 538 GUC_MAX_INSTANCES_PER_CLASS); 539 } 540 541 static void guc_mapping_table_init(struct xe_gt *gt, 542 struct iosys_map *info_map) 543 { 544 struct xe_device *xe = gt_to_xe(gt); 545 struct xe_hw_engine *hwe; 546 enum xe_hw_engine_id id; 547 548 guc_mapping_table_init_invalid(gt, info_map); 549 550 for_each_hw_engine(hwe, gt, id) { 551 u8 guc_class; 552 553 guc_class = xe_engine_class_to_guc_class(hwe->class); 554 info_map_write(xe, info_map, 555 mapping_table[guc_class][hwe->logical_instance], 556 hwe->instance); 557 } 558 } 559 560 static u32 guc_get_capture_engine_mask(struct xe_gt *gt, struct iosys_map *info_map, 561 enum guc_capture_list_class_type capture_class) 562 { 563 struct xe_device *xe = gt_to_xe(gt); 564 u32 mask; 565 566 switch (capture_class) { 567 case GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE: 568 mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_RENDER_CLASS]); 569 mask |= info_map_read(xe, info_map, engine_enabled_masks[GUC_COMPUTE_CLASS]); 570 break; 571 case GUC_CAPTURE_LIST_CLASS_VIDEO: 572 mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_VIDEO_CLASS]); 573 break; 574 case GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE: 575 mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_VIDEOENHANCE_CLASS]); 576 break; 577 case GUC_CAPTURE_LIST_CLASS_BLITTER: 578 mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_BLITTER_CLASS]); 579 break; 580 case GUC_CAPTURE_LIST_CLASS_GSC_OTHER: 581 mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_GSC_OTHER_CLASS]); 582 break; 583 default: 584 mask = 0; 585 } 586 587 return mask; 588 } 589 590 static inline bool get_capture_list(struct xe_guc_ads *ads, struct xe_guc *guc, struct xe_gt *gt, 591 int owner, int type, int class, u32 *total_size, size_t *size, 592 void **pptr) 593 { 594 *size = 0; 595 596 if (!xe_guc_capture_getlistsize(guc, owner, type, class, size)) { 597 if (*total_size + *size > ads->capture_size) 598 xe_gt_dbg(gt, "Capture size overflow :%zu vs %d\n", 599 *total_size + *size, ads->capture_size); 600 else if (!xe_guc_capture_getlist(guc, owner, type, class, pptr)) 601 return false; 602 } 603 604 return true; 605 } 606 607 static int guc_capture_prep_lists(struct xe_guc_ads *ads) 608 { 609 struct xe_guc *guc = ads_to_guc(ads); 610 struct xe_gt *gt = ads_to_gt(ads); 611 u32 ads_ggtt, capture_offset, null_ggtt, total_size = 0; 612 struct iosys_map info_map; 613 size_t size = 0; 614 void *ptr; 615 int i, j; 616 617 /* 618 * GuC Capture's steered reg-list needs to be allocated and initialized 619 * after the GuC-hwconfig is available which guaranteed from here. 620 */ 621 xe_guc_capture_steered_list_init(ads_to_guc(ads)); 622 623 capture_offset = guc_ads_capture_offset(ads); 624 ads_ggtt = xe_bo_ggtt_addr(ads->bo); 625 info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads), 626 offsetof(struct __guc_ads_blob, system_info)); 627 628 /* first, set aside the first page for a capture_list with zero descriptors */ 629 total_size = PAGE_SIZE; 630 if (!xe_guc_capture_getnullheader(guc, &ptr, &size)) 631 xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset, ptr, size); 632 633 null_ggtt = ads_ggtt + capture_offset; 634 capture_offset += PAGE_SIZE; 635 636 /* 637 * Populate capture list : at this point adps is already allocated and 638 * mapped to worst case size 639 */ 640 for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) { 641 bool write_empty_list; 642 643 for (j = 0; j < GUC_CAPTURE_LIST_CLASS_MAX; j++) { 644 u32 engine_mask = guc_get_capture_engine_mask(gt, &info_map, j); 645 /* null list if we dont have said engine or list */ 646 if (!engine_mask) { 647 ads_blob_write(ads, ads.capture_class[i][j], null_ggtt); 648 ads_blob_write(ads, ads.capture_instance[i][j], null_ggtt); 649 continue; 650 } 651 652 /* engine exists: start with engine-class registers */ 653 write_empty_list = get_capture_list(ads, guc, gt, i, 654 GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS, 655 j, &total_size, &size, &ptr); 656 if (!write_empty_list) { 657 ads_blob_write(ads, ads.capture_class[i][j], 658 ads_ggtt + capture_offset); 659 xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset, 660 ptr, size); 661 total_size += size; 662 capture_offset += size; 663 } else { 664 ads_blob_write(ads, ads.capture_class[i][j], null_ggtt); 665 } 666 667 /* engine exists: next, engine-instance registers */ 668 write_empty_list = get_capture_list(ads, guc, gt, i, 669 GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE, 670 j, &total_size, &size, &ptr); 671 if (!write_empty_list) { 672 ads_blob_write(ads, ads.capture_instance[i][j], 673 ads_ggtt + capture_offset); 674 xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset, 675 ptr, size); 676 total_size += size; 677 capture_offset += size; 678 } else { 679 ads_blob_write(ads, ads.capture_instance[i][j], null_ggtt); 680 } 681 } 682 683 /* global registers is last in our PF/VF loops */ 684 write_empty_list = get_capture_list(ads, guc, gt, i, 685 GUC_STATE_CAPTURE_TYPE_GLOBAL, 686 0, &total_size, &size, &ptr); 687 if (!write_empty_list) { 688 ads_blob_write(ads, ads.capture_global[i], ads_ggtt + capture_offset); 689 xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset, ptr, 690 size); 691 total_size += size; 692 capture_offset += size; 693 } else { 694 ads_blob_write(ads, ads.capture_global[i], null_ggtt); 695 } 696 } 697 698 if (ads->capture_size != PAGE_ALIGN(total_size)) 699 xe_gt_dbg(gt, "Updated ADS capture size %d (was %d)\n", 700 PAGE_ALIGN(total_size), ads->capture_size); 701 return PAGE_ALIGN(total_size); 702 } 703 704 static void guc_mmio_regset_write_one(struct xe_guc_ads *ads, 705 struct iosys_map *regset_map, 706 struct xe_reg reg, 707 unsigned int n_entry) 708 { 709 struct guc_mmio_reg entry = { 710 .offset = reg.addr, 711 .flags = reg.masked ? GUC_REGSET_MASKED : 0, 712 }; 713 714 if (reg.mcr) { 715 struct xe_reg_mcr mcr_reg = XE_REG_MCR(reg.addr); 716 u8 group, instance; 717 718 bool steer = xe_gt_mcr_get_nonterminated_steering(ads_to_gt(ads), mcr_reg, 719 &group, &instance); 720 721 if (steer) { 722 entry.flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, group); 723 entry.flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, instance); 724 entry.flags |= GUC_REGSET_STEERING_NEEDED; 725 } 726 } 727 728 xe_map_memcpy_to(ads_to_xe(ads), regset_map, n_entry * sizeof(entry), 729 &entry, sizeof(entry)); 730 } 731 732 static unsigned int guc_mmio_regset_write(struct xe_guc_ads *ads, 733 struct iosys_map *regset_map, 734 struct xe_hw_engine *hwe) 735 { 736 struct xe_hw_engine *hwe_rcs_reset_domain = 737 xe_gt_any_hw_engine_by_reset_domain(hwe->gt, XE_ENGINE_CLASS_RENDER); 738 struct xe_reg_sr_entry *entry; 739 unsigned long idx; 740 unsigned int count = 0; 741 const struct { 742 struct xe_reg reg; 743 bool skip; 744 } *e, extra_regs[] = { 745 { .reg = RING_MODE(hwe->mmio_base), }, 746 { .reg = RING_HWS_PGA(hwe->mmio_base), }, 747 { .reg = RING_IMR(hwe->mmio_base), }, 748 { .reg = RCU_MODE, .skip = hwe != hwe_rcs_reset_domain }, 749 { .reg = CCS_MODE, 750 .skip = hwe != hwe_rcs_reset_domain || !xe_gt_ccs_mode_enabled(hwe->gt) }, 751 }; 752 u32 i; 753 754 BUILD_BUG_ON(ARRAY_SIZE(extra_regs) > ADS_REGSET_EXTRA_MAX); 755 756 xa_for_each(&hwe->reg_sr.xa, idx, entry) 757 guc_mmio_regset_write_one(ads, regset_map, entry->reg, count++); 758 759 for (e = extra_regs; e < extra_regs + ARRAY_SIZE(extra_regs); e++) { 760 if (e->skip) 761 continue; 762 763 guc_mmio_regset_write_one(ads, regset_map, e->reg, count++); 764 } 765 766 if (XE_GT_WA(hwe->gt, 1607983814) && hwe->class == XE_ENGINE_CLASS_RENDER) { 767 for (i = 0; i < LNCFCMOCS_REG_COUNT; i++) { 768 guc_mmio_regset_write_one(ads, regset_map, 769 XELP_LNCFCMOCS(i), count++); 770 } 771 } 772 773 return count; 774 } 775 776 static void guc_mmio_reg_state_init(struct xe_guc_ads *ads) 777 { 778 size_t regset_offset = guc_ads_regset_offset(ads); 779 struct xe_gt *gt = ads_to_gt(ads); 780 struct xe_hw_engine *hwe; 781 enum xe_hw_engine_id id; 782 u32 addr = xe_bo_ggtt_addr(ads->bo) + regset_offset; 783 struct iosys_map regset_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads), 784 regset_offset); 785 unsigned int regset_used = 0; 786 787 for_each_hw_engine(hwe, gt, id) { 788 unsigned int count; 789 u8 gc; 790 791 /* 792 * 1. Write all MMIO entries for this exec queue to the table. No 793 * need to worry about fused-off engines and when there are 794 * entries in the regset: the reg_state_list has been zero'ed 795 * by xe_guc_ads_populate() 796 */ 797 count = guc_mmio_regset_write(ads, ®set_map, hwe); 798 if (!count) 799 continue; 800 801 /* 802 * 2. Record in the header (ads.reg_state_list) the address 803 * location and number of entries 804 */ 805 gc = xe_engine_class_to_guc_class(hwe->class); 806 ads_blob_write(ads, ads.reg_state_list[gc][hwe->instance].address, addr); 807 ads_blob_write(ads, ads.reg_state_list[gc][hwe->instance].count, count); 808 809 addr += count * sizeof(struct guc_mmio_reg); 810 iosys_map_incr(®set_map, count * sizeof(struct guc_mmio_reg)); 811 812 regset_used += count * sizeof(struct guc_mmio_reg); 813 } 814 815 xe_gt_assert(gt, regset_used <= ads->regset_size); 816 } 817 818 static void guc_um_init_params(struct xe_guc_ads *ads) 819 { 820 u32 um_queue_offset = guc_ads_um_queues_offset(ads); 821 struct xe_guc *guc = ads_to_guc(ads); 822 u64 base_dpa; 823 u32 base_ggtt; 824 bool with_dpa; 825 int i; 826 827 with_dpa = !xe_guc_using_main_gamctrl_queues(guc); 828 829 base_ggtt = xe_bo_ggtt_addr(ads->bo) + um_queue_offset; 830 base_dpa = xe_bo_main_addr(ads->bo, PAGE_SIZE) + um_queue_offset; 831 832 for (i = 0; i < GUC_UM_HW_QUEUE_MAX; ++i) { 833 ads_blob_write(ads, um_init_params.queue_params[i].base_dpa, 834 with_dpa ? (base_dpa + (i * GUC_UM_QUEUE_SIZE)) : 0); 835 ads_blob_write(ads, um_init_params.queue_params[i].base_ggtt_address, 836 base_ggtt + (i * GUC_UM_QUEUE_SIZE)); 837 ads_blob_write(ads, um_init_params.queue_params[i].size_in_bytes, 838 GUC_UM_QUEUE_SIZE); 839 } 840 841 ads_blob_write(ads, um_init_params.page_response_timeout_in_us, 842 GUC_PAGE_RES_TIMEOUT_US); 843 } 844 845 static void guc_doorbell_init(struct xe_guc_ads *ads) 846 { 847 struct xe_device *xe = ads_to_xe(ads); 848 struct xe_gt *gt = ads_to_gt(ads); 849 850 if (GRAPHICS_VER(xe) >= 12 && !IS_DGFX(xe)) { 851 u32 distdbreg = 852 xe_mmio_read32(>->mmio, DIST_DBS_POPULATED); 853 854 ads_blob_write(ads, 855 system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI], 856 REG_FIELD_GET(DOORBELLS_PER_SQIDI_MASK, distdbreg) + 1); 857 } 858 } 859 860 /** 861 * xe_guc_ads_populate_minimal - populate minimal ADS 862 * @ads: Additional data structures object 863 * 864 * This function populates a minimal ADS that does not support submissions but 865 * enough so the GuC can load and the hwconfig table can be read. 866 */ 867 void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads) 868 { 869 struct xe_gt *gt = ads_to_gt(ads); 870 struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads), 871 offsetof(struct __guc_ads_blob, system_info)); 872 u32 base = xe_bo_ggtt_addr(ads->bo); 873 874 xe_gt_assert(gt, ads->bo); 875 876 xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, xe_bo_size(ads->bo)); 877 guc_policies_init(ads); 878 guc_golden_lrc_init(ads); 879 guc_mapping_table_init_invalid(gt, &info_map); 880 guc_doorbell_init(ads); 881 882 ads_blob_write(ads, ads.scheduler_policies, base + 883 offsetof(struct __guc_ads_blob, policies)); 884 ads_blob_write(ads, ads.gt_system_info, base + 885 offsetof(struct __guc_ads_blob, system_info)); 886 ads_blob_write(ads, ads.private_data, base + 887 guc_ads_private_data_offset(ads)); 888 } 889 890 void xe_guc_ads_populate(struct xe_guc_ads *ads) 891 { 892 struct xe_device *xe = ads_to_xe(ads); 893 struct xe_gt *gt = ads_to_gt(ads); 894 struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads), 895 offsetof(struct __guc_ads_blob, system_info)); 896 u32 base = xe_bo_ggtt_addr(ads->bo); 897 898 xe_gt_assert(gt, ads->bo); 899 900 xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, xe_bo_size(ads->bo)); 901 guc_policies_init(ads); 902 fill_engine_enable_masks(gt, &info_map); 903 guc_mmio_reg_state_init(ads); 904 guc_golden_lrc_init(ads); 905 guc_mapping_table_init(gt, &info_map); 906 guc_capture_prep_lists(ads); 907 guc_doorbell_init(ads); 908 guc_waklv_init(ads); 909 910 if (xe->info.has_usm) { 911 guc_um_init_params(ads); 912 ads_blob_write(ads, ads.um_init_data, base + 913 offsetof(struct __guc_ads_blob, um_init_params)); 914 } 915 916 ads_blob_write(ads, ads.scheduler_policies, base + 917 offsetof(struct __guc_ads_blob, policies)); 918 ads_blob_write(ads, ads.gt_system_info, base + 919 offsetof(struct __guc_ads_blob, system_info)); 920 ads_blob_write(ads, ads.private_data, base + 921 guc_ads_private_data_offset(ads)); 922 } 923 924 /* 925 * After the golden LRC's are recorded for each engine class by the first 926 * submission, copy them to the ADS, as initialized earlier by 927 * guc_golden_lrc_init(). 928 */ 929 static void guc_golden_lrc_populate(struct xe_guc_ads *ads) 930 { 931 struct xe_device *xe = ads_to_xe(ads); 932 struct xe_gt *gt = ads_to_gt(ads); 933 struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads), 934 offsetof(struct __guc_ads_blob, system_info)); 935 size_t total_size = 0, alloc_size, real_size; 936 u32 offset; 937 int class; 938 939 offset = guc_ads_golden_lrc_offset(ads); 940 941 for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) { 942 u8 guc_class; 943 944 guc_class = xe_engine_class_to_guc_class(class); 945 946 if (!info_map_read(xe, &info_map, 947 engine_enabled_masks[guc_class])) 948 continue; 949 950 xe_gt_assert(gt, gt->default_lrc[class]); 951 952 real_size = xe_gt_lrc_size(gt, class); 953 alloc_size = PAGE_ALIGN(real_size); 954 total_size += alloc_size; 955 956 xe_map_memcpy_to(xe, ads_to_map(ads), offset, 957 gt->default_lrc[class], real_size); 958 959 offset += alloc_size; 960 } 961 962 xe_gt_assert(gt, total_size == ads->golden_lrc_size); 963 } 964 965 void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads) 966 { 967 guc_golden_lrc_populate(ads); 968 } 969 970 static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_offset) 971 { 972 struct xe_guc_ct *ct = &ads_to_guc(ads)->ct; 973 u32 action[] = { 974 XE_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE, 975 policy_offset 976 }; 977 978 return xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0); 979 } 980 981 /** 982 * xe_guc_ads_scheduler_policy_toggle_reset - Toggle reset policy 983 * @ads: Additional data structures object 984 * @enable_engine_reset: true to enable engine resets, false otherwise 985 * 986 * This function update the GuC's engine reset policy. 987 * 988 * Return: 0 on success, and negative error code otherwise. 989 */ 990 int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads, 991 bool enable_engine_reset) 992 { 993 struct guc_policies *policies; 994 struct xe_guc *guc = ads_to_guc(ads); 995 CLASS(xe_guc_buf, buf)(&guc->buf, sizeof(*policies)); 996 997 if (!xe_guc_buf_is_valid(buf)) 998 return -ENOBUFS; 999 1000 policies = xe_guc_buf_cpu_ptr(buf); 1001 memset(policies, 0, sizeof(*policies)); 1002 1003 policies->dpc_promote_time = ads_blob_read(ads, policies.dpc_promote_time); 1004 policies->max_num_work_items = ads_blob_read(ads, policies.max_num_work_items); 1005 policies->is_valid = 1; 1006 1007 if (enable_engine_reset) 1008 policies->global_flags &= ~GLOBAL_POLICY_DISABLE_ENGINE_RESET; 1009 else 1010 policies->global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET; 1011 1012 return guc_ads_action_update_policies(ads, xe_guc_buf_flush(buf)); 1013 } 1014