1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_pci.h" 7 8 #include <kunit/static_stub.h> 9 #include <linux/device/driver.h> 10 #include <linux/module.h> 11 #include <linux/pci.h> 12 #include <linux/pm_runtime.h> 13 14 #include <drm/drm_color_mgmt.h> 15 #include <drm/drm_drv.h> 16 #include <drm/intel/pciids.h> 17 18 #include "display/xe_display.h" 19 #include "regs/xe_gt_regs.h" 20 #include "xe_device.h" 21 #include "xe_drv.h" 22 #include "xe_gt.h" 23 #include "xe_gt_sriov_vf.h" 24 #include "xe_guc.h" 25 #include "xe_macros.h" 26 #include "xe_mmio.h" 27 #include "xe_module.h" 28 #include "xe_pci_sriov.h" 29 #include "xe_pci_types.h" 30 #include "xe_pm.h" 31 #include "xe_sriov.h" 32 #include "xe_step.h" 33 #include "xe_survivability_mode.h" 34 #include "xe_tile.h" 35 36 enum toggle_d3cold { 37 D3COLD_DISABLE, 38 D3COLD_ENABLE, 39 }; 40 41 struct xe_subplatform_desc { 42 enum xe_subplatform subplatform; 43 const char *name; 44 const u16 *pciidlist; 45 }; 46 47 struct xe_device_desc { 48 /* Should only ever be set for platforms without GMD_ID */ 49 const struct xe_graphics_desc *graphics; 50 /* Should only ever be set for platforms without GMD_ID */ 51 const struct xe_media_desc *media; 52 53 const char *platform_name; 54 const struct xe_subplatform_desc *subplatforms; 55 56 enum xe_platform platform; 57 58 u8 require_force_probe:1; 59 u8 is_dgfx:1; 60 61 u8 has_display:1; 62 u8 has_heci_gscfi:1; 63 u8 has_heci_cscfi:1; 64 u8 has_llc:1; 65 u8 has_pxp:1; 66 u8 has_sriov:1; 67 u8 skip_guc_pc:1; 68 u8 skip_mtcfg:1; 69 u8 skip_pcode:1; 70 }; 71 72 __diag_push(); 73 __diag_ignore_all("-Woverride-init", "Allow field overrides in table"); 74 75 #define PLATFORM(x) \ 76 .platform = XE_##x, \ 77 .platform_name = #x 78 79 #define NOP(x) x 80 81 static const struct xe_graphics_desc graphics_xelp = { 82 .name = "Xe_LP", 83 .ver = 12, 84 .rel = 0, 85 86 .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0), 87 88 .dma_mask_size = 39, 89 .va_bits = 48, 90 .vm_max_level = 3, 91 }; 92 93 static const struct xe_graphics_desc graphics_xelpp = { 94 .name = "Xe_LP+", 95 .ver = 12, 96 .rel = 10, 97 98 .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0), 99 100 .dma_mask_size = 39, 101 .va_bits = 48, 102 .vm_max_level = 3, 103 }; 104 105 #define XE_HP_FEATURES \ 106 .has_range_tlb_invalidation = true, \ 107 .dma_mask_size = 46, \ 108 .va_bits = 48, \ 109 .vm_max_level = 3 110 111 static const struct xe_graphics_desc graphics_xehpg = { 112 .name = "Xe_HPG", 113 .ver = 12, 114 .rel = 55, 115 116 .hw_engine_mask = 117 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 118 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 119 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 120 121 XE_HP_FEATURES, 122 .vram_flags = XE_VRAM_FLAGS_NEED64K, 123 124 .has_flat_ccs = 1, 125 }; 126 127 static const struct xe_graphics_desc graphics_xehpc = { 128 .name = "Xe_HPC", 129 .ver = 12, 130 .rel = 60, 131 132 .hw_engine_mask = 133 BIT(XE_HW_ENGINE_BCS0) | BIT(XE_HW_ENGINE_BCS1) | 134 BIT(XE_HW_ENGINE_BCS2) | BIT(XE_HW_ENGINE_BCS3) | 135 BIT(XE_HW_ENGINE_BCS4) | BIT(XE_HW_ENGINE_BCS5) | 136 BIT(XE_HW_ENGINE_BCS6) | BIT(XE_HW_ENGINE_BCS7) | 137 BIT(XE_HW_ENGINE_BCS8) | 138 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 139 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 140 141 XE_HP_FEATURES, 142 .dma_mask_size = 52, 143 .max_remote_tiles = 1, 144 .va_bits = 57, 145 .vm_max_level = 4, 146 .vram_flags = XE_VRAM_FLAGS_NEED64K, 147 148 .has_asid = 1, 149 .has_atomic_enable_pte_bit = 1, 150 .has_usm = 1, 151 }; 152 153 static const struct xe_graphics_desc graphics_xelpg = { 154 .name = "Xe_LPG", 155 .hw_engine_mask = 156 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 157 BIT(XE_HW_ENGINE_CCS0), 158 159 XE_HP_FEATURES, 160 }; 161 162 #define XE2_GFX_FEATURES \ 163 .dma_mask_size = 46, \ 164 .has_asid = 1, \ 165 .has_atomic_enable_pte_bit = 1, \ 166 .has_flat_ccs = 1, \ 167 .has_indirect_ring_state = 1, \ 168 .has_range_tlb_invalidation = 1, \ 169 .has_usm = 1, \ 170 .va_bits = 48, \ 171 .vm_max_level = 4, \ 172 .hw_engine_mask = \ 173 BIT(XE_HW_ENGINE_RCS0) | \ 174 BIT(XE_HW_ENGINE_BCS8) | BIT(XE_HW_ENGINE_BCS0) | \ 175 GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0) 176 177 static const struct xe_graphics_desc graphics_xe2 = { 178 .name = "Xe2_LPG / Xe2_HPG / Xe3_LPG", 179 180 XE2_GFX_FEATURES, 181 }; 182 183 static const struct xe_media_desc media_xem = { 184 .name = "Xe_M", 185 .ver = 12, 186 .rel = 0, 187 188 .hw_engine_mask = 189 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 190 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0), 191 }; 192 193 static const struct xe_media_desc media_xehpm = { 194 .name = "Xe_HPM", 195 .ver = 12, 196 .rel = 55, 197 198 .hw_engine_mask = 199 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 200 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0), 201 }; 202 203 static const struct xe_media_desc media_xelpmp = { 204 .name = "Xe_LPM+", 205 .hw_engine_mask = 206 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 207 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) | 208 BIT(XE_HW_ENGINE_GSCCS0) 209 }; 210 211 static const struct xe_media_desc media_xe2 = { 212 .name = "Xe2_LPM / Xe2_HPM / Xe3_LPM", 213 .hw_engine_mask = 214 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 215 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) | 216 BIT(XE_HW_ENGINE_GSCCS0) 217 }; 218 219 static const struct xe_device_desc tgl_desc = { 220 .graphics = &graphics_xelp, 221 .media = &media_xem, 222 PLATFORM(TIGERLAKE), 223 .has_display = true, 224 .has_llc = true, 225 .require_force_probe = true, 226 }; 227 228 static const struct xe_device_desc rkl_desc = { 229 .graphics = &graphics_xelp, 230 .media = &media_xem, 231 PLATFORM(ROCKETLAKE), 232 .has_display = true, 233 .has_llc = true, 234 .require_force_probe = true, 235 }; 236 237 static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 }; 238 239 static const struct xe_device_desc adl_s_desc = { 240 .graphics = &graphics_xelp, 241 .media = &media_xem, 242 PLATFORM(ALDERLAKE_S), 243 .has_display = true, 244 .has_llc = true, 245 .require_force_probe = true, 246 .subplatforms = (const struct xe_subplatform_desc[]) { 247 { XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids }, 248 {}, 249 }, 250 }; 251 252 static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 }; 253 254 static const struct xe_device_desc adl_p_desc = { 255 .graphics = &graphics_xelp, 256 .media = &media_xem, 257 PLATFORM(ALDERLAKE_P), 258 .has_display = true, 259 .has_llc = true, 260 .require_force_probe = true, 261 .subplatforms = (const struct xe_subplatform_desc[]) { 262 { XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids }, 263 {}, 264 }, 265 }; 266 267 static const struct xe_device_desc adl_n_desc = { 268 .graphics = &graphics_xelp, 269 .media = &media_xem, 270 PLATFORM(ALDERLAKE_N), 271 .has_display = true, 272 .has_llc = true, 273 .require_force_probe = true, 274 }; 275 276 #define DGFX_FEATURES \ 277 .is_dgfx = 1 278 279 static const struct xe_device_desc dg1_desc = { 280 .graphics = &graphics_xelpp, 281 .media = &media_xem, 282 DGFX_FEATURES, 283 PLATFORM(DG1), 284 .has_display = true, 285 .has_heci_gscfi = 1, 286 .require_force_probe = true, 287 }; 288 289 static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 }; 290 static const u16 dg2_g11_ids[] = { INTEL_DG2_G11_IDS(NOP), INTEL_ATS_M75_IDS(NOP), 0 }; 291 static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 }; 292 293 #define DG2_FEATURES \ 294 DGFX_FEATURES, \ 295 PLATFORM(DG2), \ 296 .has_heci_gscfi = 1, \ 297 .subplatforms = (const struct xe_subplatform_desc[]) { \ 298 { XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \ 299 { XE_SUBPLATFORM_DG2_G11, "G11", dg2_g11_ids }, \ 300 { XE_SUBPLATFORM_DG2_G12, "G12", dg2_g12_ids }, \ 301 { } \ 302 } 303 304 static const struct xe_device_desc ats_m_desc = { 305 .graphics = &graphics_xehpg, 306 .media = &media_xehpm, 307 .require_force_probe = true, 308 309 DG2_FEATURES, 310 .has_display = false, 311 }; 312 313 static const struct xe_device_desc dg2_desc = { 314 .graphics = &graphics_xehpg, 315 .media = &media_xehpm, 316 .require_force_probe = true, 317 318 DG2_FEATURES, 319 .has_display = true, 320 }; 321 322 static const __maybe_unused struct xe_device_desc pvc_desc = { 323 .graphics = &graphics_xehpc, 324 DGFX_FEATURES, 325 PLATFORM(PVC), 326 .has_display = false, 327 .has_heci_gscfi = 1, 328 .require_force_probe = true, 329 }; 330 331 static const struct xe_device_desc mtl_desc = { 332 /* .graphics and .media determined via GMD_ID */ 333 .require_force_probe = true, 334 PLATFORM(METEORLAKE), 335 .has_display = true, 336 }; 337 338 static const struct xe_device_desc lnl_desc = { 339 PLATFORM(LUNARLAKE), 340 .has_display = true, 341 }; 342 343 static const struct xe_device_desc bmg_desc = { 344 DGFX_FEATURES, 345 PLATFORM(BATTLEMAGE), 346 .has_display = true, 347 .has_heci_cscfi = 1, 348 }; 349 350 static const struct xe_device_desc ptl_desc = { 351 PLATFORM(PANTHERLAKE), 352 .has_display = true, 353 .require_force_probe = true, 354 }; 355 356 #undef PLATFORM 357 __diag_pop(); 358 359 /* Map of GMD_ID values to graphics IP */ 360 static const struct gmdid_map graphics_ip_map[] = { 361 { 1270, &graphics_xelpg }, 362 { 1271, &graphics_xelpg }, 363 { 1274, &graphics_xelpg }, /* Xe_LPG+ */ 364 { 2001, &graphics_xe2 }, 365 { 2004, &graphics_xe2 }, 366 { 3000, &graphics_xe2 }, 367 { 3001, &graphics_xe2 }, 368 }; 369 370 /* Map of GMD_ID values to media IP */ 371 static const struct gmdid_map media_ip_map[] = { 372 { 1300, &media_xelpmp }, 373 { 1301, &media_xe2 }, 374 { 2000, &media_xe2 }, 375 { 3000, &media_xe2 }, 376 }; 377 378 /* 379 * Make sure any device matches here are from most specific to most 380 * general. For example, since the Quanta match is based on the subsystem 381 * and subvendor IDs, we need it to come before the more general IVB 382 * PCI ID matches, otherwise we'll use the wrong info struct above. 383 */ 384 static const struct pci_device_id pciidlist[] = { 385 INTEL_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc), 386 INTEL_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc), 387 INTEL_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 388 INTEL_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 389 INTEL_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc), 390 INTEL_RPLU_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 391 INTEL_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 392 INTEL_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 393 INTEL_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc), 394 INTEL_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc), 395 INTEL_ARL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 396 INTEL_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc), 397 INTEL_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 398 INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc), 399 INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc), 400 INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc), 401 { } 402 }; 403 MODULE_DEVICE_TABLE(pci, pciidlist); 404 405 /* is device_id present in comma separated list of ids */ 406 static bool device_id_in_list(u16 device_id, const char *devices, bool negative) 407 { 408 char *s, *p, *tok; 409 bool ret; 410 411 if (!devices || !*devices) 412 return false; 413 414 /* match everything */ 415 if (negative && strcmp(devices, "!*") == 0) 416 return true; 417 if (!negative && strcmp(devices, "*") == 0) 418 return true; 419 420 s = kstrdup(devices, GFP_KERNEL); 421 if (!s) 422 return false; 423 424 for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) { 425 u16 val; 426 427 if (negative && tok[0] == '!') 428 tok++; 429 else if ((negative && tok[0] != '!') || 430 (!negative && tok[0] == '!')) 431 continue; 432 433 if (kstrtou16(tok, 16, &val) == 0 && val == device_id) { 434 ret = true; 435 break; 436 } 437 } 438 439 kfree(s); 440 441 return ret; 442 } 443 444 static bool id_forced(u16 device_id) 445 { 446 return device_id_in_list(device_id, xe_modparam.force_probe, false); 447 } 448 449 static bool id_blocked(u16 device_id) 450 { 451 return device_id_in_list(device_id, xe_modparam.force_probe, true); 452 } 453 454 static const struct xe_subplatform_desc * 455 find_subplatform(const struct xe_device *xe, const struct xe_device_desc *desc) 456 { 457 const struct xe_subplatform_desc *sp; 458 const u16 *id; 459 460 for (sp = desc->subplatforms; sp && sp->subplatform; sp++) 461 for (id = sp->pciidlist; *id; id++) 462 if (*id == xe->info.devid) 463 return sp; 464 465 return NULL; 466 } 467 468 enum xe_gmdid_type { 469 GMDID_GRAPHICS, 470 GMDID_MEDIA 471 }; 472 473 static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid) 474 { 475 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 476 struct xe_reg gmdid_reg = GMD_ID; 477 u32 val; 478 479 KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid); 480 481 if (IS_SRIOV_VF(xe)) { 482 struct xe_gt *gt = xe_root_mmio_gt(xe); 483 484 /* 485 * To get the value of the GMDID register, VFs must obtain it 486 * from the GuC using MMIO communication. 487 * 488 * Note that at this point the xe_gt is not fully uninitialized 489 * and only basic access to MMIO registers is possible. To use 490 * our existing GuC communication functions we must perform at 491 * least basic xe_gt and xe_guc initialization. 492 * 493 * Since to obtain the value of GMDID_MEDIA we need to use the 494 * media GuC, temporarily tweak the gt type. 495 */ 496 xe_gt_assert(gt, gt->info.type == XE_GT_TYPE_UNINITIALIZED); 497 498 if (type == GMDID_MEDIA) { 499 gt->info.id = 1; 500 gt->info.type = XE_GT_TYPE_MEDIA; 501 } else { 502 gt->info.id = 0; 503 gt->info.type = XE_GT_TYPE_MAIN; 504 } 505 506 xe_gt_mmio_init(gt); 507 xe_guc_comm_init_early(>->uc.guc); 508 509 /* Don't bother with GMDID if failed to negotiate the GuC ABI */ 510 val = xe_gt_sriov_vf_bootstrap(gt) ? 0 : xe_gt_sriov_vf_gmdid(gt); 511 512 /* 513 * Only undo xe_gt.info here, the remaining changes made above 514 * will be overwritten as part of the regular initialization. 515 */ 516 gt->info.id = 0; 517 gt->info.type = XE_GT_TYPE_UNINITIALIZED; 518 } else { 519 /* 520 * GMD_ID is a GT register, but at this point in the driver 521 * init we haven't fully initialized the GT yet so we need to 522 * read the register with the tile's MMIO accessor. That means 523 * we need to apply the GSI offset manually since it won't get 524 * automatically added as it would if we were using a GT mmio 525 * accessor. 526 */ 527 if (type == GMDID_MEDIA) 528 gmdid_reg.addr += MEDIA_GT_GSI_OFFSET; 529 530 val = xe_mmio_read32(mmio, gmdid_reg); 531 } 532 533 *ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val); 534 *revid = REG_FIELD_GET(GMD_ID_REVID, val); 535 } 536 537 /* 538 * Pre-GMD_ID platform: device descriptor already points to the appropriate 539 * graphics descriptor. Simply forward the description and calculate the version 540 * appropriately. "graphics" should be present in all such platforms, while 541 * media is optional. 542 */ 543 static void handle_pre_gmdid(struct xe_device *xe, 544 const struct xe_graphics_desc *graphics, 545 const struct xe_media_desc *media) 546 { 547 xe->info.graphics_verx100 = graphics->ver * 100 + graphics->rel; 548 549 if (media) 550 xe->info.media_verx100 = media->ver * 100 + media->rel; 551 552 } 553 554 /* 555 * GMD_ID platform: read IP version from hardware and select graphics descriptor 556 * based on the result. 557 */ 558 static void handle_gmdid(struct xe_device *xe, 559 const struct xe_graphics_desc **graphics, 560 const struct xe_media_desc **media, 561 u32 *graphics_revid, 562 u32 *media_revid) 563 { 564 u32 ver; 565 566 read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid); 567 568 for (int i = 0; i < ARRAY_SIZE(graphics_ip_map); i++) { 569 if (ver == graphics_ip_map[i].ver) { 570 xe->info.graphics_verx100 = ver; 571 *graphics = graphics_ip_map[i].ip; 572 573 break; 574 } 575 } 576 577 if (!xe->info.graphics_verx100) { 578 drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n", 579 ver / 100, ver % 100); 580 } 581 582 read_gmdid(xe, GMDID_MEDIA, &ver, media_revid); 583 584 /* Media may legitimately be fused off / not present */ 585 if (ver == 0) 586 return; 587 588 for (int i = 0; i < ARRAY_SIZE(media_ip_map); i++) { 589 if (ver == media_ip_map[i].ver) { 590 xe->info.media_verx100 = ver; 591 *media = media_ip_map[i].ip; 592 593 break; 594 } 595 } 596 597 if (!xe->info.media_verx100) { 598 drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n", 599 ver / 100, ver % 100); 600 } 601 } 602 603 /* 604 * Initialize device info content that only depends on static driver_data 605 * passed to the driver at probe time from PCI ID table. 606 */ 607 static int xe_info_init_early(struct xe_device *xe, 608 const struct xe_device_desc *desc, 609 const struct xe_subplatform_desc *subplatform_desc) 610 { 611 int err; 612 613 xe->info.platform_name = desc->platform_name; 614 xe->info.platform = desc->platform; 615 xe->info.subplatform = subplatform_desc ? 616 subplatform_desc->subplatform : XE_SUBPLATFORM_NONE; 617 618 xe->info.is_dgfx = desc->is_dgfx; 619 xe->info.has_heci_gscfi = desc->has_heci_gscfi; 620 xe->info.has_heci_cscfi = desc->has_heci_cscfi; 621 xe->info.has_llc = desc->has_llc; 622 xe->info.has_pxp = desc->has_pxp; 623 xe->info.has_sriov = desc->has_sriov; 624 xe->info.skip_guc_pc = desc->skip_guc_pc; 625 xe->info.skip_mtcfg = desc->skip_mtcfg; 626 xe->info.skip_pcode = desc->skip_pcode; 627 628 xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) && 629 xe_modparam.probe_display && 630 desc->has_display; 631 632 err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0); 633 if (err) 634 return err; 635 636 return 0; 637 } 638 639 /* 640 * Initialize device info content that does require knowledge about 641 * graphics / media IP version. 642 * Make sure that GT / tile structures allocated by the driver match the data 643 * present in device info. 644 */ 645 static int xe_info_init(struct xe_device *xe, 646 const struct xe_graphics_desc *graphics_desc, 647 const struct xe_media_desc *media_desc) 648 { 649 u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0; 650 struct xe_tile *tile; 651 struct xe_gt *gt; 652 u8 id; 653 654 /* 655 * If this platform supports GMD_ID, we'll detect the proper IP 656 * descriptor to use from hardware registers. desc->graphics will only 657 * ever be set at this point for platforms before GMD_ID. In that case 658 * the IP descriptions and versions are simply derived from that. 659 */ 660 if (graphics_desc) { 661 handle_pre_gmdid(xe, graphics_desc, media_desc); 662 xe->info.step = xe_step_pre_gmdid_get(xe); 663 } else { 664 xe_assert(xe, !media_desc); 665 handle_gmdid(xe, &graphics_desc, &media_desc, 666 &graphics_gmdid_revid, &media_gmdid_revid); 667 xe->info.step = xe_step_gmdid_get(xe, 668 graphics_gmdid_revid, 669 media_gmdid_revid); 670 } 671 672 /* 673 * If we couldn't detect the graphics IP, that's considered a fatal 674 * error and we should abort driver load. Failing to detect media 675 * IP is non-fatal; we'll just proceed without enabling media support. 676 */ 677 if (!graphics_desc) 678 return -ENODEV; 679 680 xe->info.graphics_name = graphics_desc->name; 681 xe->info.media_name = media_desc ? media_desc->name : "none"; 682 683 xe->info.dma_mask_size = graphics_desc->dma_mask_size; 684 xe->info.vram_flags = graphics_desc->vram_flags; 685 xe->info.va_bits = graphics_desc->va_bits; 686 xe->info.vm_max_level = graphics_desc->vm_max_level; 687 xe->info.has_asid = graphics_desc->has_asid; 688 xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit; 689 if (xe->info.platform != XE_PVC) 690 xe->info.has_device_atomics_on_smem = 1; 691 692 /* Runtime detection may change this later */ 693 xe->info.has_flat_ccs = graphics_desc->has_flat_ccs; 694 695 xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation; 696 xe->info.has_usm = graphics_desc->has_usm; 697 698 /* 699 * All platforms have at least one primary GT. Any platform with media 700 * version 13 or higher has an additional dedicated media GT. And 701 * depending on the graphics IP there may be additional "remote tiles." 702 * All of these together determine the overall GT count. 703 * 704 * FIXME: 'tile_count' here is misnamed since the rest of the driver 705 * treats it as the number of GTs rather than just the number of tiles. 706 */ 707 xe->info.tile_count = 1 + graphics_desc->max_remote_tiles; 708 709 for_each_remote_tile(tile, xe, id) { 710 int err; 711 712 err = xe_tile_init_early(tile, xe, id); 713 if (err) 714 return err; 715 } 716 717 for_each_tile(tile, xe, id) { 718 gt = tile->primary_gt; 719 gt->info.id = xe->info.gt_count++; 720 gt->info.type = XE_GT_TYPE_MAIN; 721 gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state; 722 gt->info.engine_mask = graphics_desc->hw_engine_mask; 723 724 if (MEDIA_VER(xe) < 13 && media_desc) 725 gt->info.engine_mask |= media_desc->hw_engine_mask; 726 727 if (MEDIA_VER(xe) < 13 || !media_desc) 728 continue; 729 730 /* 731 * Allocate and setup media GT for platforms with standalone 732 * media. 733 */ 734 tile->media_gt = xe_gt_alloc(tile); 735 if (IS_ERR(tile->media_gt)) 736 return PTR_ERR(tile->media_gt); 737 738 gt = tile->media_gt; 739 gt->info.type = XE_GT_TYPE_MEDIA; 740 gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state; 741 gt->info.engine_mask = media_desc->hw_engine_mask; 742 743 /* 744 * FIXME: At the moment multi-tile and standalone media are 745 * mutually exclusive on current platforms. We'll need to 746 * come up with a better way to number GTs if we ever wind 747 * up with platforms that support both together. 748 */ 749 drm_WARN_ON(&xe->drm, id != 0); 750 gt->info.id = xe->info.gt_count++; 751 } 752 753 return 0; 754 } 755 756 static void xe_pci_remove(struct pci_dev *pdev) 757 { 758 struct xe_device *xe; 759 760 xe = pdev_to_xe_device(pdev); 761 if (!xe) /* driver load aborted, nothing to cleanup */ 762 return; 763 764 if (IS_SRIOV_PF(xe)) 765 xe_pci_sriov_configure(pdev, 0); 766 767 if (xe_survivability_mode_enabled(xe)) 768 return xe_survivability_mode_remove(xe); 769 770 xe_device_remove(xe); 771 xe_pm_runtime_fini(xe); 772 pci_set_drvdata(pdev, NULL); 773 } 774 775 /* 776 * Probe the PCI device, initialize various parts of the driver. 777 * 778 * Fault injection is used to test the error paths of some initialization 779 * functions called either directly from xe_pci_probe() or indirectly for 780 * example through xe_device_probe(). Those functions use the kernel fault 781 * injection capabilities infrastructure, see 782 * Documentation/fault-injection/fault-injection.rst for details. The macro 783 * ALLOW_ERROR_INJECTION() is used to conditionally skip function execution 784 * at runtime and use a provided return value. The first requirement for 785 * error injectable functions is proper handling of the error code by the 786 * caller for recovery, which is always the case here. The second 787 * requirement is that no state is changed before the first error return. 788 * It is not strictly fulfilled for all initialization functions using the 789 * ALLOW_ERROR_INJECTION() macro but this is acceptable because for those 790 * error cases at probe time, the error code is simply propagated up by the 791 * caller. Therefore there is no consequence on those specific callers when 792 * function error injection skips the whole function. 793 */ 794 static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 795 { 796 const struct xe_device_desc *desc = (const void *)ent->driver_data; 797 const struct xe_subplatform_desc *subplatform_desc; 798 struct xe_device *xe; 799 int err; 800 801 if (desc->require_force_probe && !id_forced(pdev->device)) { 802 dev_info(&pdev->dev, 803 "Your graphics device %04x is not officially supported\n" 804 "by xe driver in this kernel version. To force Xe probe,\n" 805 "use xe.force_probe='%04x' and i915.force_probe='!%04x'\n" 806 "module parameters or CONFIG_DRM_XE_FORCE_PROBE='%04x' and\n" 807 "CONFIG_DRM_I915_FORCE_PROBE='!%04x' configuration options.\n", 808 pdev->device, pdev->device, pdev->device, 809 pdev->device, pdev->device); 810 return -ENODEV; 811 } 812 813 if (id_blocked(pdev->device)) { 814 dev_info(&pdev->dev, "Probe blocked for device [%04x:%04x].\n", 815 pdev->vendor, pdev->device); 816 return -ENODEV; 817 } 818 819 if (xe_display_driver_probe_defer(pdev)) 820 return -EPROBE_DEFER; 821 822 err = pcim_enable_device(pdev); 823 if (err) 824 return err; 825 826 xe = xe_device_create(pdev, ent); 827 if (IS_ERR(xe)) 828 return PTR_ERR(xe); 829 830 pci_set_drvdata(pdev, &xe->drm); 831 832 xe_pm_assert_unbounded_bridge(xe); 833 subplatform_desc = find_subplatform(xe, desc); 834 835 pci_set_master(pdev); 836 837 err = xe_info_init_early(xe, desc, subplatform_desc); 838 if (err) 839 return err; 840 841 err = xe_device_probe_early(xe); 842 843 /* 844 * In Boot Survivability mode, no drm card is exposed 845 * and driver is loaded with bare minimum to allow 846 * for firmware to be flashed through mei. Return 847 * success if survivability mode is enabled. 848 */ 849 if (err) { 850 if (xe_survivability_mode_enabled(xe)) 851 return 0; 852 853 return err; 854 } 855 856 err = xe_info_init(xe, desc->graphics, desc->media); 857 if (err) 858 return err; 859 860 err = xe_display_probe(xe); 861 if (err) 862 return err; 863 864 drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d cscfi:%d", 865 desc->platform_name, 866 subplatform_desc ? subplatform_desc->name : "", 867 xe->info.devid, xe->info.revid, 868 xe->info.is_dgfx, 869 xe->info.graphics_name, 870 xe->info.graphics_verx100 / 100, 871 xe->info.graphics_verx100 % 100, 872 xe->info.media_name, 873 xe->info.media_verx100 / 100, 874 xe->info.media_verx100 % 100, 875 str_yes_no(xe->info.probe_display), 876 xe->info.dma_mask_size, xe->info.tile_count, 877 xe->info.has_heci_gscfi, xe->info.has_heci_cscfi); 878 879 drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, B:%s)\n", 880 xe_step_name(xe->info.step.graphics), 881 xe_step_name(xe->info.step.media), 882 xe_step_name(xe->info.step.basedie)); 883 884 drm_dbg(&xe->drm, "SR-IOV support: %s (mode: %s)\n", 885 str_yes_no(xe_device_has_sriov(xe)), 886 xe_sriov_mode_to_string(xe_device_sriov_mode(xe))); 887 888 err = xe_pm_init_early(xe); 889 if (err) 890 return err; 891 892 err = xe_device_probe(xe); 893 if (err) 894 return err; 895 896 err = xe_pm_init(xe); 897 if (err) 898 goto err_driver_cleanup; 899 900 drm_dbg(&xe->drm, "d3cold: capable=%s\n", 901 str_yes_no(xe->d3cold.capable)); 902 903 return 0; 904 905 err_driver_cleanup: 906 xe_pci_remove(pdev); 907 return err; 908 } 909 910 static void xe_pci_shutdown(struct pci_dev *pdev) 911 { 912 xe_device_shutdown(pdev_to_xe_device(pdev)); 913 } 914 915 #ifdef CONFIG_PM_SLEEP 916 static void d3cold_toggle(struct pci_dev *pdev, enum toggle_d3cold toggle) 917 { 918 struct xe_device *xe = pdev_to_xe_device(pdev); 919 struct pci_dev *root_pdev; 920 921 if (!xe->d3cold.capable) 922 return; 923 924 root_pdev = pcie_find_root_port(pdev); 925 if (!root_pdev) 926 return; 927 928 switch (toggle) { 929 case D3COLD_DISABLE: 930 pci_d3cold_disable(root_pdev); 931 break; 932 case D3COLD_ENABLE: 933 pci_d3cold_enable(root_pdev); 934 break; 935 } 936 } 937 938 static int xe_pci_suspend(struct device *dev) 939 { 940 struct pci_dev *pdev = to_pci_dev(dev); 941 struct xe_device *xe = pdev_to_xe_device(pdev); 942 int err; 943 944 if (xe_survivability_mode_enabled(xe)) 945 return -EBUSY; 946 947 err = xe_pm_suspend(xe); 948 if (err) 949 return err; 950 951 /* 952 * Enabling D3Cold is needed for S2Idle/S0ix. 953 * It is save to allow here since xe_pm_suspend has evicted 954 * the local memory and the direct complete optimization is disabled. 955 */ 956 d3cold_toggle(pdev, D3COLD_ENABLE); 957 958 pci_save_state(pdev); 959 pci_disable_device(pdev); 960 961 return 0; 962 } 963 964 static int xe_pci_resume(struct device *dev) 965 { 966 struct pci_dev *pdev = to_pci_dev(dev); 967 int err; 968 969 /* Give back the D3Cold decision to the runtime P M*/ 970 d3cold_toggle(pdev, D3COLD_DISABLE); 971 972 err = pci_set_power_state(pdev, PCI_D0); 973 if (err) 974 return err; 975 976 pci_restore_state(pdev); 977 978 err = pci_enable_device(pdev); 979 if (err) 980 return err; 981 982 pci_set_master(pdev); 983 984 err = xe_pm_resume(pdev_to_xe_device(pdev)); 985 if (err) 986 return err; 987 988 return 0; 989 } 990 991 static int xe_pci_runtime_suspend(struct device *dev) 992 { 993 struct pci_dev *pdev = to_pci_dev(dev); 994 struct xe_device *xe = pdev_to_xe_device(pdev); 995 int err; 996 997 err = xe_pm_runtime_suspend(xe); 998 if (err) 999 return err; 1000 1001 pci_save_state(pdev); 1002 1003 if (xe->d3cold.allowed) { 1004 d3cold_toggle(pdev, D3COLD_ENABLE); 1005 pci_disable_device(pdev); 1006 pci_ignore_hotplug(pdev); 1007 pci_set_power_state(pdev, PCI_D3cold); 1008 } else { 1009 d3cold_toggle(pdev, D3COLD_DISABLE); 1010 pci_set_power_state(pdev, PCI_D3hot); 1011 } 1012 1013 return 0; 1014 } 1015 1016 static int xe_pci_runtime_resume(struct device *dev) 1017 { 1018 struct pci_dev *pdev = to_pci_dev(dev); 1019 struct xe_device *xe = pdev_to_xe_device(pdev); 1020 int err; 1021 1022 err = pci_set_power_state(pdev, PCI_D0); 1023 if (err) 1024 return err; 1025 1026 pci_restore_state(pdev); 1027 1028 if (xe->d3cold.allowed) { 1029 err = pci_enable_device(pdev); 1030 if (err) 1031 return err; 1032 1033 pci_set_master(pdev); 1034 } 1035 1036 return xe_pm_runtime_resume(xe); 1037 } 1038 1039 static int xe_pci_runtime_idle(struct device *dev) 1040 { 1041 struct pci_dev *pdev = to_pci_dev(dev); 1042 struct xe_device *xe = pdev_to_xe_device(pdev); 1043 1044 xe_pm_d3cold_allowed_toggle(xe); 1045 1046 return 0; 1047 } 1048 1049 static const struct dev_pm_ops xe_pm_ops = { 1050 SET_SYSTEM_SLEEP_PM_OPS(xe_pci_suspend, xe_pci_resume) 1051 SET_RUNTIME_PM_OPS(xe_pci_runtime_suspend, xe_pci_runtime_resume, xe_pci_runtime_idle) 1052 }; 1053 #endif 1054 1055 static struct pci_driver xe_pci_driver = { 1056 .name = DRIVER_NAME, 1057 .id_table = pciidlist, 1058 .probe = xe_pci_probe, 1059 .remove = xe_pci_remove, 1060 .shutdown = xe_pci_shutdown, 1061 .sriov_configure = xe_pci_sriov_configure, 1062 #ifdef CONFIG_PM_SLEEP 1063 .driver.pm = &xe_pm_ops, 1064 #endif 1065 }; 1066 1067 int xe_register_pci_driver(void) 1068 { 1069 return pci_register_driver(&xe_pci_driver); 1070 } 1071 1072 void xe_unregister_pci_driver(void) 1073 { 1074 pci_unregister_driver(&xe_pci_driver); 1075 } 1076 1077 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 1078 #include "tests/xe_pci.c" 1079 #endif 1080