1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_pci.h" 7 8 #include <kunit/static_stub.h> 9 #include <linux/device/driver.h> 10 #include <linux/module.h> 11 #include <linux/pci.h> 12 #include <linux/pm_runtime.h> 13 14 #include <drm/drm_color_mgmt.h> 15 #include <drm/drm_drv.h> 16 #include <drm/intel/pciids.h> 17 18 #include "display/xe_display.h" 19 #include "regs/xe_gt_regs.h" 20 #include "xe_device.h" 21 #include "xe_drv.h" 22 #include "xe_gt.h" 23 #include "xe_gt_sriov_vf.h" 24 #include "xe_guc.h" 25 #include "xe_macros.h" 26 #include "xe_mmio.h" 27 #include "xe_module.h" 28 #include "xe_pci_sriov.h" 29 #include "xe_pci_types.h" 30 #include "xe_pm.h" 31 #include "xe_sriov.h" 32 #include "xe_step.h" 33 #include "xe_survivability_mode.h" 34 #include "xe_tile.h" 35 36 enum toggle_d3cold { 37 D3COLD_DISABLE, 38 D3COLD_ENABLE, 39 }; 40 41 struct xe_subplatform_desc { 42 enum xe_subplatform subplatform; 43 const char *name; 44 const u16 *pciidlist; 45 }; 46 47 struct xe_device_desc { 48 /* Should only ever be set for platforms without GMD_ID */ 49 const struct xe_graphics_desc *graphics; 50 /* Should only ever be set for platforms without GMD_ID */ 51 const struct xe_media_desc *media; 52 53 const char *platform_name; 54 const struct xe_subplatform_desc *subplatforms; 55 56 enum xe_platform platform; 57 58 u8 dma_mask_size; 59 u8 max_remote_tiles:2; 60 61 u8 require_force_probe:1; 62 u8 is_dgfx:1; 63 64 u8 has_display:1; 65 u8 has_heci_gscfi:1; 66 u8 has_heci_cscfi:1; 67 u8 has_llc:1; 68 u8 has_pxp:1; 69 u8 has_sriov:1; 70 u8 skip_guc_pc:1; 71 u8 skip_mtcfg:1; 72 u8 skip_pcode:1; 73 }; 74 75 __diag_push(); 76 __diag_ignore_all("-Woverride-init", "Allow field overrides in table"); 77 78 #define PLATFORM(x) \ 79 .platform = XE_##x, \ 80 .platform_name = #x 81 82 #define NOP(x) x 83 84 static const struct xe_graphics_desc graphics_xelp = { 85 .name = "Xe_LP", 86 .ver = 12, 87 .rel = 0, 88 89 .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0), 90 91 .va_bits = 48, 92 .vm_max_level = 3, 93 }; 94 95 static const struct xe_graphics_desc graphics_xelpp = { 96 .name = "Xe_LP+", 97 .ver = 12, 98 .rel = 10, 99 100 .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0), 101 102 .va_bits = 48, 103 .vm_max_level = 3, 104 }; 105 106 #define XE_HP_FEATURES \ 107 .has_range_tlb_invalidation = true, \ 108 .va_bits = 48, \ 109 .vm_max_level = 3 110 111 static const struct xe_graphics_desc graphics_xehpg = { 112 .name = "Xe_HPG", 113 .ver = 12, 114 .rel = 55, 115 116 .hw_engine_mask = 117 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 118 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 119 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 120 121 XE_HP_FEATURES, 122 .vram_flags = XE_VRAM_FLAGS_NEED64K, 123 124 .has_flat_ccs = 1, 125 }; 126 127 static const struct xe_graphics_desc graphics_xehpc = { 128 .name = "Xe_HPC", 129 .ver = 12, 130 .rel = 60, 131 132 .hw_engine_mask = 133 BIT(XE_HW_ENGINE_BCS0) | BIT(XE_HW_ENGINE_BCS1) | 134 BIT(XE_HW_ENGINE_BCS2) | BIT(XE_HW_ENGINE_BCS3) | 135 BIT(XE_HW_ENGINE_BCS4) | BIT(XE_HW_ENGINE_BCS5) | 136 BIT(XE_HW_ENGINE_BCS6) | BIT(XE_HW_ENGINE_BCS7) | 137 BIT(XE_HW_ENGINE_BCS8) | 138 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 139 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 140 141 XE_HP_FEATURES, 142 .va_bits = 57, 143 .vm_max_level = 4, 144 .vram_flags = XE_VRAM_FLAGS_NEED64K, 145 146 .has_asid = 1, 147 .has_atomic_enable_pte_bit = 1, 148 .has_usm = 1, 149 }; 150 151 static const struct xe_graphics_desc graphics_xelpg = { 152 .name = "Xe_LPG", 153 .hw_engine_mask = 154 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 155 BIT(XE_HW_ENGINE_CCS0), 156 157 XE_HP_FEATURES, 158 }; 159 160 #define XE2_GFX_FEATURES \ 161 .has_asid = 1, \ 162 .has_atomic_enable_pte_bit = 1, \ 163 .has_flat_ccs = 1, \ 164 .has_indirect_ring_state = 1, \ 165 .has_range_tlb_invalidation = 1, \ 166 .has_usm = 1, \ 167 .va_bits = 48, \ 168 .vm_max_level = 4, \ 169 .hw_engine_mask = \ 170 BIT(XE_HW_ENGINE_RCS0) | \ 171 BIT(XE_HW_ENGINE_BCS8) | BIT(XE_HW_ENGINE_BCS0) | \ 172 GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0) 173 174 static const struct xe_graphics_desc graphics_xe2 = { 175 .name = "Xe2_LPG / Xe2_HPG / Xe3_LPG", 176 177 XE2_GFX_FEATURES, 178 }; 179 180 static const struct xe_media_desc media_xem = { 181 .name = "Xe_M", 182 .ver = 12, 183 .rel = 0, 184 185 .hw_engine_mask = 186 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 187 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0), 188 }; 189 190 static const struct xe_media_desc media_xehpm = { 191 .name = "Xe_HPM", 192 .ver = 12, 193 .rel = 55, 194 195 .hw_engine_mask = 196 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 197 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0), 198 }; 199 200 static const struct xe_media_desc media_xelpmp = { 201 .name = "Xe_LPM+", 202 .hw_engine_mask = 203 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 204 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) | 205 BIT(XE_HW_ENGINE_GSCCS0) 206 }; 207 208 static const struct xe_media_desc media_xe2 = { 209 .name = "Xe2_LPM / Xe2_HPM / Xe3_LPM", 210 .hw_engine_mask = 211 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 212 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) | 213 BIT(XE_HW_ENGINE_GSCCS0) 214 }; 215 216 static const struct xe_device_desc tgl_desc = { 217 .graphics = &graphics_xelp, 218 .media = &media_xem, 219 PLATFORM(TIGERLAKE), 220 .dma_mask_size = 39, 221 .has_display = true, 222 .has_llc = true, 223 .require_force_probe = true, 224 }; 225 226 static const struct xe_device_desc rkl_desc = { 227 .graphics = &graphics_xelp, 228 .media = &media_xem, 229 PLATFORM(ROCKETLAKE), 230 .dma_mask_size = 39, 231 .has_display = true, 232 .has_llc = true, 233 .require_force_probe = true, 234 }; 235 236 static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 }; 237 238 static const struct xe_device_desc adl_s_desc = { 239 .graphics = &graphics_xelp, 240 .media = &media_xem, 241 PLATFORM(ALDERLAKE_S), 242 .dma_mask_size = 39, 243 .has_display = true, 244 .has_llc = true, 245 .require_force_probe = true, 246 .subplatforms = (const struct xe_subplatform_desc[]) { 247 { XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids }, 248 {}, 249 }, 250 }; 251 252 static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 }; 253 254 static const struct xe_device_desc adl_p_desc = { 255 .graphics = &graphics_xelp, 256 .media = &media_xem, 257 PLATFORM(ALDERLAKE_P), 258 .dma_mask_size = 39, 259 .has_display = true, 260 .has_llc = true, 261 .require_force_probe = true, 262 .subplatforms = (const struct xe_subplatform_desc[]) { 263 { XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids }, 264 {}, 265 }, 266 }; 267 268 static const struct xe_device_desc adl_n_desc = { 269 .graphics = &graphics_xelp, 270 .media = &media_xem, 271 PLATFORM(ALDERLAKE_N), 272 .dma_mask_size = 39, 273 .has_display = true, 274 .has_llc = true, 275 .require_force_probe = true, 276 }; 277 278 #define DGFX_FEATURES \ 279 .is_dgfx = 1 280 281 static const struct xe_device_desc dg1_desc = { 282 .graphics = &graphics_xelpp, 283 .media = &media_xem, 284 DGFX_FEATURES, 285 PLATFORM(DG1), 286 .dma_mask_size = 39, 287 .has_display = true, 288 .has_heci_gscfi = 1, 289 .require_force_probe = true, 290 }; 291 292 static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 }; 293 static const u16 dg2_g11_ids[] = { INTEL_DG2_G11_IDS(NOP), INTEL_ATS_M75_IDS(NOP), 0 }; 294 static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 }; 295 296 #define DG2_FEATURES \ 297 DGFX_FEATURES, \ 298 PLATFORM(DG2), \ 299 .has_heci_gscfi = 1, \ 300 .subplatforms = (const struct xe_subplatform_desc[]) { \ 301 { XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \ 302 { XE_SUBPLATFORM_DG2_G11, "G11", dg2_g11_ids }, \ 303 { XE_SUBPLATFORM_DG2_G12, "G12", dg2_g12_ids }, \ 304 { } \ 305 } 306 307 static const struct xe_device_desc ats_m_desc = { 308 .graphics = &graphics_xehpg, 309 .media = &media_xehpm, 310 .dma_mask_size = 46, 311 .require_force_probe = true, 312 313 DG2_FEATURES, 314 .has_display = false, 315 }; 316 317 static const struct xe_device_desc dg2_desc = { 318 .graphics = &graphics_xehpg, 319 .media = &media_xehpm, 320 .dma_mask_size = 46, 321 .require_force_probe = true, 322 323 DG2_FEATURES, 324 .has_display = true, 325 }; 326 327 static const __maybe_unused struct xe_device_desc pvc_desc = { 328 .graphics = &graphics_xehpc, 329 DGFX_FEATURES, 330 PLATFORM(PVC), 331 .dma_mask_size = 52, 332 .has_display = false, 333 .has_heci_gscfi = 1, 334 .max_remote_tiles = 1, 335 .require_force_probe = true, 336 }; 337 338 static const struct xe_device_desc mtl_desc = { 339 /* .graphics and .media determined via GMD_ID */ 340 .require_force_probe = true, 341 PLATFORM(METEORLAKE), 342 .dma_mask_size = 46, 343 .has_display = true, 344 .has_pxp = true, 345 }; 346 347 static const struct xe_device_desc lnl_desc = { 348 PLATFORM(LUNARLAKE), 349 .dma_mask_size = 46, 350 .has_display = true, 351 .has_pxp = true, 352 }; 353 354 static const struct xe_device_desc bmg_desc = { 355 DGFX_FEATURES, 356 PLATFORM(BATTLEMAGE), 357 .dma_mask_size = 46, 358 .has_display = true, 359 .has_heci_cscfi = 1, 360 }; 361 362 static const struct xe_device_desc ptl_desc = { 363 PLATFORM(PANTHERLAKE), 364 .dma_mask_size = 46, 365 .has_display = true, 366 .has_sriov = true, 367 .require_force_probe = true, 368 }; 369 370 #undef PLATFORM 371 __diag_pop(); 372 373 /* Map of GMD_ID values to graphics IP */ 374 static const struct gmdid_map graphics_ip_map[] = { 375 { 1270, &graphics_xelpg }, 376 { 1271, &graphics_xelpg }, 377 { 1274, &graphics_xelpg }, /* Xe_LPG+ */ 378 { 2001, &graphics_xe2 }, 379 { 2004, &graphics_xe2 }, 380 { 3000, &graphics_xe2 }, 381 { 3001, &graphics_xe2 }, 382 }; 383 384 /* Map of GMD_ID values to media IP */ 385 static const struct gmdid_map media_ip_map[] = { 386 { 1300, &media_xelpmp }, 387 { 1301, &media_xe2 }, 388 { 2000, &media_xe2 }, 389 { 3000, &media_xe2 }, 390 }; 391 392 /* 393 * Make sure any device matches here are from most specific to most 394 * general. For example, since the Quanta match is based on the subsystem 395 * and subvendor IDs, we need it to come before the more general IVB 396 * PCI ID matches, otherwise we'll use the wrong info struct above. 397 */ 398 static const struct pci_device_id pciidlist[] = { 399 INTEL_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc), 400 INTEL_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc), 401 INTEL_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 402 INTEL_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 403 INTEL_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc), 404 INTEL_RPLU_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 405 INTEL_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 406 INTEL_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 407 INTEL_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc), 408 INTEL_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc), 409 INTEL_ARL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 410 INTEL_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc), 411 INTEL_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 412 INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc), 413 INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc), 414 INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc), 415 { } 416 }; 417 MODULE_DEVICE_TABLE(pci, pciidlist); 418 419 /* is device_id present in comma separated list of ids */ 420 static bool device_id_in_list(u16 device_id, const char *devices, bool negative) 421 { 422 char *s, *p, *tok; 423 bool ret; 424 425 if (!devices || !*devices) 426 return false; 427 428 /* match everything */ 429 if (negative && strcmp(devices, "!*") == 0) 430 return true; 431 if (!negative && strcmp(devices, "*") == 0) 432 return true; 433 434 s = kstrdup(devices, GFP_KERNEL); 435 if (!s) 436 return false; 437 438 for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) { 439 u16 val; 440 441 if (negative && tok[0] == '!') 442 tok++; 443 else if ((negative && tok[0] != '!') || 444 (!negative && tok[0] == '!')) 445 continue; 446 447 if (kstrtou16(tok, 16, &val) == 0 && val == device_id) { 448 ret = true; 449 break; 450 } 451 } 452 453 kfree(s); 454 455 return ret; 456 } 457 458 static bool id_forced(u16 device_id) 459 { 460 return device_id_in_list(device_id, xe_modparam.force_probe, false); 461 } 462 463 static bool id_blocked(u16 device_id) 464 { 465 return device_id_in_list(device_id, xe_modparam.force_probe, true); 466 } 467 468 static const struct xe_subplatform_desc * 469 find_subplatform(const struct xe_device *xe, const struct xe_device_desc *desc) 470 { 471 const struct xe_subplatform_desc *sp; 472 const u16 *id; 473 474 for (sp = desc->subplatforms; sp && sp->subplatform; sp++) 475 for (id = sp->pciidlist; *id; id++) 476 if (*id == xe->info.devid) 477 return sp; 478 479 return NULL; 480 } 481 482 enum xe_gmdid_type { 483 GMDID_GRAPHICS, 484 GMDID_MEDIA 485 }; 486 487 static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid) 488 { 489 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 490 struct xe_reg gmdid_reg = GMD_ID; 491 u32 val; 492 493 KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid); 494 495 if (IS_SRIOV_VF(xe)) { 496 struct xe_gt *gt = xe_root_mmio_gt(xe); 497 498 /* 499 * To get the value of the GMDID register, VFs must obtain it 500 * from the GuC using MMIO communication. 501 * 502 * Note that at this point the xe_gt is not fully uninitialized 503 * and only basic access to MMIO registers is possible. To use 504 * our existing GuC communication functions we must perform at 505 * least basic xe_gt and xe_guc initialization. 506 * 507 * Since to obtain the value of GMDID_MEDIA we need to use the 508 * media GuC, temporarily tweak the gt type. 509 */ 510 xe_gt_assert(gt, gt->info.type == XE_GT_TYPE_UNINITIALIZED); 511 512 if (type == GMDID_MEDIA) { 513 gt->info.id = 1; 514 gt->info.type = XE_GT_TYPE_MEDIA; 515 } else { 516 gt->info.id = 0; 517 gt->info.type = XE_GT_TYPE_MAIN; 518 } 519 520 xe_gt_mmio_init(gt); 521 xe_guc_comm_init_early(>->uc.guc); 522 523 /* Don't bother with GMDID if failed to negotiate the GuC ABI */ 524 val = xe_gt_sriov_vf_bootstrap(gt) ? 0 : xe_gt_sriov_vf_gmdid(gt); 525 526 /* 527 * Only undo xe_gt.info here, the remaining changes made above 528 * will be overwritten as part of the regular initialization. 529 */ 530 gt->info.id = 0; 531 gt->info.type = XE_GT_TYPE_UNINITIALIZED; 532 } else { 533 /* 534 * GMD_ID is a GT register, but at this point in the driver 535 * init we haven't fully initialized the GT yet so we need to 536 * read the register with the tile's MMIO accessor. That means 537 * we need to apply the GSI offset manually since it won't get 538 * automatically added as it would if we were using a GT mmio 539 * accessor. 540 */ 541 if (type == GMDID_MEDIA) 542 gmdid_reg.addr += MEDIA_GT_GSI_OFFSET; 543 544 val = xe_mmio_read32(mmio, gmdid_reg); 545 } 546 547 *ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val); 548 *revid = REG_FIELD_GET(GMD_ID_REVID, val); 549 } 550 551 /* 552 * Pre-GMD_ID platform: device descriptor already points to the appropriate 553 * graphics descriptor. Simply forward the description and calculate the version 554 * appropriately. "graphics" should be present in all such platforms, while 555 * media is optional. 556 */ 557 static void handle_pre_gmdid(struct xe_device *xe, 558 const struct xe_graphics_desc *graphics, 559 const struct xe_media_desc *media) 560 { 561 xe->info.graphics_verx100 = graphics->ver * 100 + graphics->rel; 562 563 if (media) 564 xe->info.media_verx100 = media->ver * 100 + media->rel; 565 566 } 567 568 /* 569 * GMD_ID platform: read IP version from hardware and select graphics descriptor 570 * based on the result. 571 */ 572 static void handle_gmdid(struct xe_device *xe, 573 const struct xe_graphics_desc **graphics, 574 const struct xe_media_desc **media, 575 u32 *graphics_revid, 576 u32 *media_revid) 577 { 578 u32 ver; 579 580 read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid); 581 582 for (int i = 0; i < ARRAY_SIZE(graphics_ip_map); i++) { 583 if (ver == graphics_ip_map[i].ver) { 584 xe->info.graphics_verx100 = ver; 585 *graphics = graphics_ip_map[i].ip; 586 587 break; 588 } 589 } 590 591 if (!xe->info.graphics_verx100) { 592 drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n", 593 ver / 100, ver % 100); 594 } 595 596 read_gmdid(xe, GMDID_MEDIA, &ver, media_revid); 597 598 /* Media may legitimately be fused off / not present */ 599 if (ver == 0) 600 return; 601 602 for (int i = 0; i < ARRAY_SIZE(media_ip_map); i++) { 603 if (ver == media_ip_map[i].ver) { 604 xe->info.media_verx100 = ver; 605 *media = media_ip_map[i].ip; 606 607 break; 608 } 609 } 610 611 if (!xe->info.media_verx100) { 612 drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n", 613 ver / 100, ver % 100); 614 } 615 } 616 617 /* 618 * Initialize device info content that only depends on static driver_data 619 * passed to the driver at probe time from PCI ID table. 620 */ 621 static int xe_info_init_early(struct xe_device *xe, 622 const struct xe_device_desc *desc, 623 const struct xe_subplatform_desc *subplatform_desc) 624 { 625 int err; 626 627 xe->info.platform_name = desc->platform_name; 628 xe->info.platform = desc->platform; 629 xe->info.subplatform = subplatform_desc ? 630 subplatform_desc->subplatform : XE_SUBPLATFORM_NONE; 631 632 xe->info.dma_mask_size = desc->dma_mask_size; 633 xe->info.is_dgfx = desc->is_dgfx; 634 xe->info.has_heci_gscfi = desc->has_heci_gscfi; 635 xe->info.has_heci_cscfi = desc->has_heci_cscfi; 636 xe->info.has_llc = desc->has_llc; 637 xe->info.has_pxp = desc->has_pxp; 638 xe->info.has_sriov = desc->has_sriov; 639 xe->info.skip_guc_pc = desc->skip_guc_pc; 640 xe->info.skip_mtcfg = desc->skip_mtcfg; 641 xe->info.skip_pcode = desc->skip_pcode; 642 643 xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) && 644 xe_modparam.probe_display && 645 desc->has_display; 646 xe->info.tile_count = 1 + desc->max_remote_tiles; 647 648 err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0); 649 if (err) 650 return err; 651 652 return 0; 653 } 654 655 /* 656 * Initialize device info content that does require knowledge about 657 * graphics / media IP version. 658 * Make sure that GT / tile structures allocated by the driver match the data 659 * present in device info. 660 */ 661 static int xe_info_init(struct xe_device *xe, 662 const struct xe_graphics_desc *graphics_desc, 663 const struct xe_media_desc *media_desc) 664 { 665 u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0; 666 struct xe_tile *tile; 667 struct xe_gt *gt; 668 u8 id; 669 670 /* 671 * If this platform supports GMD_ID, we'll detect the proper IP 672 * descriptor to use from hardware registers. desc->graphics will only 673 * ever be set at this point for platforms before GMD_ID. In that case 674 * the IP descriptions and versions are simply derived from that. 675 */ 676 if (graphics_desc) { 677 handle_pre_gmdid(xe, graphics_desc, media_desc); 678 xe->info.step = xe_step_pre_gmdid_get(xe); 679 } else { 680 xe_assert(xe, !media_desc); 681 handle_gmdid(xe, &graphics_desc, &media_desc, 682 &graphics_gmdid_revid, &media_gmdid_revid); 683 xe->info.step = xe_step_gmdid_get(xe, 684 graphics_gmdid_revid, 685 media_gmdid_revid); 686 } 687 688 /* 689 * If we couldn't detect the graphics IP, that's considered a fatal 690 * error and we should abort driver load. Failing to detect media 691 * IP is non-fatal; we'll just proceed without enabling media support. 692 */ 693 if (!graphics_desc) 694 return -ENODEV; 695 696 xe->info.graphics_name = graphics_desc->name; 697 xe->info.media_name = media_desc ? media_desc->name : "none"; 698 699 xe->info.vram_flags = graphics_desc->vram_flags; 700 xe->info.va_bits = graphics_desc->va_bits; 701 xe->info.vm_max_level = graphics_desc->vm_max_level; 702 xe->info.has_asid = graphics_desc->has_asid; 703 xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit; 704 if (xe->info.platform != XE_PVC) 705 xe->info.has_device_atomics_on_smem = 1; 706 707 /* Runtime detection may change this later */ 708 xe->info.has_flat_ccs = graphics_desc->has_flat_ccs; 709 710 xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation; 711 xe->info.has_usm = graphics_desc->has_usm; 712 713 for_each_remote_tile(tile, xe, id) { 714 int err; 715 716 err = xe_tile_init_early(tile, xe, id); 717 if (err) 718 return err; 719 } 720 721 /* 722 * All platforms have at least one primary GT. Any platform with media 723 * version 13 or higher has an additional dedicated media GT. And 724 * depending on the graphics IP there may be additional "remote tiles." 725 * All of these together determine the overall GT count. 726 */ 727 for_each_tile(tile, xe, id) { 728 gt = tile->primary_gt; 729 gt->info.id = xe->info.gt_count++; 730 gt->info.type = XE_GT_TYPE_MAIN; 731 gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state; 732 gt->info.engine_mask = graphics_desc->hw_engine_mask; 733 734 if (MEDIA_VER(xe) < 13 && media_desc) 735 gt->info.engine_mask |= media_desc->hw_engine_mask; 736 737 if (MEDIA_VER(xe) < 13 || !media_desc) 738 continue; 739 740 /* 741 * Allocate and setup media GT for platforms with standalone 742 * media. 743 */ 744 tile->media_gt = xe_gt_alloc(tile); 745 if (IS_ERR(tile->media_gt)) 746 return PTR_ERR(tile->media_gt); 747 748 gt = tile->media_gt; 749 gt->info.type = XE_GT_TYPE_MEDIA; 750 gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state; 751 gt->info.engine_mask = media_desc->hw_engine_mask; 752 753 /* 754 * FIXME: At the moment multi-tile and standalone media are 755 * mutually exclusive on current platforms. We'll need to 756 * come up with a better way to number GTs if we ever wind 757 * up with platforms that support both together. 758 */ 759 drm_WARN_ON(&xe->drm, id != 0); 760 gt->info.id = xe->info.gt_count++; 761 } 762 763 return 0; 764 } 765 766 static void xe_pci_remove(struct pci_dev *pdev) 767 { 768 struct xe_device *xe; 769 770 xe = pdev_to_xe_device(pdev); 771 if (!xe) /* driver load aborted, nothing to cleanup */ 772 return; 773 774 if (IS_SRIOV_PF(xe)) 775 xe_pci_sriov_configure(pdev, 0); 776 777 if (xe_survivability_mode_enabled(xe)) 778 return xe_survivability_mode_remove(xe); 779 780 xe_device_remove(xe); 781 xe_pm_runtime_fini(xe); 782 pci_set_drvdata(pdev, NULL); 783 } 784 785 /* 786 * Probe the PCI device, initialize various parts of the driver. 787 * 788 * Fault injection is used to test the error paths of some initialization 789 * functions called either directly from xe_pci_probe() or indirectly for 790 * example through xe_device_probe(). Those functions use the kernel fault 791 * injection capabilities infrastructure, see 792 * Documentation/fault-injection/fault-injection.rst for details. The macro 793 * ALLOW_ERROR_INJECTION() is used to conditionally skip function execution 794 * at runtime and use a provided return value. The first requirement for 795 * error injectable functions is proper handling of the error code by the 796 * caller for recovery, which is always the case here. The second 797 * requirement is that no state is changed before the first error return. 798 * It is not strictly fulfilled for all initialization functions using the 799 * ALLOW_ERROR_INJECTION() macro but this is acceptable because for those 800 * error cases at probe time, the error code is simply propagated up by the 801 * caller. Therefore there is no consequence on those specific callers when 802 * function error injection skips the whole function. 803 */ 804 static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 805 { 806 const struct xe_device_desc *desc = (const void *)ent->driver_data; 807 const struct xe_subplatform_desc *subplatform_desc; 808 struct xe_device *xe; 809 int err; 810 811 if (desc->require_force_probe && !id_forced(pdev->device)) { 812 dev_info(&pdev->dev, 813 "Your graphics device %04x is not officially supported\n" 814 "by xe driver in this kernel version. To force Xe probe,\n" 815 "use xe.force_probe='%04x' and i915.force_probe='!%04x'\n" 816 "module parameters or CONFIG_DRM_XE_FORCE_PROBE='%04x' and\n" 817 "CONFIG_DRM_I915_FORCE_PROBE='!%04x' configuration options.\n", 818 pdev->device, pdev->device, pdev->device, 819 pdev->device, pdev->device); 820 return -ENODEV; 821 } 822 823 if (id_blocked(pdev->device)) { 824 dev_info(&pdev->dev, "Probe blocked for device [%04x:%04x].\n", 825 pdev->vendor, pdev->device); 826 return -ENODEV; 827 } 828 829 if (xe_display_driver_probe_defer(pdev)) 830 return -EPROBE_DEFER; 831 832 err = pcim_enable_device(pdev); 833 if (err) 834 return err; 835 836 xe = xe_device_create(pdev, ent); 837 if (IS_ERR(xe)) 838 return PTR_ERR(xe); 839 840 pci_set_drvdata(pdev, &xe->drm); 841 842 xe_pm_assert_unbounded_bridge(xe); 843 subplatform_desc = find_subplatform(xe, desc); 844 845 pci_set_master(pdev); 846 847 err = xe_info_init_early(xe, desc, subplatform_desc); 848 if (err) 849 return err; 850 851 err = xe_device_probe_early(xe); 852 853 /* 854 * In Boot Survivability mode, no drm card is exposed 855 * and driver is loaded with bare minimum to allow 856 * for firmware to be flashed through mei. Return 857 * success if survivability mode is enabled. 858 */ 859 if (err) { 860 if (xe_survivability_mode_enabled(xe)) 861 return 0; 862 863 return err; 864 } 865 866 err = xe_info_init(xe, desc->graphics, desc->media); 867 if (err) 868 return err; 869 870 err = xe_display_probe(xe); 871 if (err) 872 return err; 873 874 drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d cscfi:%d", 875 desc->platform_name, 876 subplatform_desc ? subplatform_desc->name : "", 877 xe->info.devid, xe->info.revid, 878 xe->info.is_dgfx, 879 xe->info.graphics_name, 880 xe->info.graphics_verx100 / 100, 881 xe->info.graphics_verx100 % 100, 882 xe->info.media_name, 883 xe->info.media_verx100 / 100, 884 xe->info.media_verx100 % 100, 885 str_yes_no(xe->info.probe_display), 886 xe->info.dma_mask_size, xe->info.tile_count, 887 xe->info.has_heci_gscfi, xe->info.has_heci_cscfi); 888 889 drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, B:%s)\n", 890 xe_step_name(xe->info.step.graphics), 891 xe_step_name(xe->info.step.media), 892 xe_step_name(xe->info.step.basedie)); 893 894 drm_dbg(&xe->drm, "SR-IOV support: %s (mode: %s)\n", 895 str_yes_no(xe_device_has_sriov(xe)), 896 xe_sriov_mode_to_string(xe_device_sriov_mode(xe))); 897 898 err = xe_pm_init_early(xe); 899 if (err) 900 return err; 901 902 err = xe_device_probe(xe); 903 if (err) { 904 xe_device_call_remove_actions(xe); 905 return err; 906 } 907 908 err = xe_pm_init(xe); 909 if (err) 910 goto err_driver_cleanup; 911 912 drm_dbg(&xe->drm, "d3cold: capable=%s\n", 913 str_yes_no(xe->d3cold.capable)); 914 915 return 0; 916 917 err_driver_cleanup: 918 xe_pci_remove(pdev); 919 return err; 920 } 921 922 static void xe_pci_shutdown(struct pci_dev *pdev) 923 { 924 xe_device_shutdown(pdev_to_xe_device(pdev)); 925 } 926 927 #ifdef CONFIG_PM_SLEEP 928 static void d3cold_toggle(struct pci_dev *pdev, enum toggle_d3cold toggle) 929 { 930 struct xe_device *xe = pdev_to_xe_device(pdev); 931 struct pci_dev *root_pdev; 932 933 if (!xe->d3cold.capable) 934 return; 935 936 root_pdev = pcie_find_root_port(pdev); 937 if (!root_pdev) 938 return; 939 940 switch (toggle) { 941 case D3COLD_DISABLE: 942 pci_d3cold_disable(root_pdev); 943 break; 944 case D3COLD_ENABLE: 945 pci_d3cold_enable(root_pdev); 946 break; 947 } 948 } 949 950 static int xe_pci_suspend(struct device *dev) 951 { 952 struct pci_dev *pdev = to_pci_dev(dev); 953 struct xe_device *xe = pdev_to_xe_device(pdev); 954 int err; 955 956 if (xe_survivability_mode_enabled(xe)) 957 return -EBUSY; 958 959 err = xe_pm_suspend(xe); 960 if (err) 961 return err; 962 963 /* 964 * Enabling D3Cold is needed for S2Idle/S0ix. 965 * It is save to allow here since xe_pm_suspend has evicted 966 * the local memory and the direct complete optimization is disabled. 967 */ 968 d3cold_toggle(pdev, D3COLD_ENABLE); 969 970 pci_save_state(pdev); 971 pci_disable_device(pdev); 972 973 return 0; 974 } 975 976 static int xe_pci_resume(struct device *dev) 977 { 978 struct pci_dev *pdev = to_pci_dev(dev); 979 int err; 980 981 /* Give back the D3Cold decision to the runtime P M*/ 982 d3cold_toggle(pdev, D3COLD_DISABLE); 983 984 err = pci_set_power_state(pdev, PCI_D0); 985 if (err) 986 return err; 987 988 pci_restore_state(pdev); 989 990 err = pci_enable_device(pdev); 991 if (err) 992 return err; 993 994 pci_set_master(pdev); 995 996 err = xe_pm_resume(pdev_to_xe_device(pdev)); 997 if (err) 998 return err; 999 1000 return 0; 1001 } 1002 1003 static int xe_pci_runtime_suspend(struct device *dev) 1004 { 1005 struct pci_dev *pdev = to_pci_dev(dev); 1006 struct xe_device *xe = pdev_to_xe_device(pdev); 1007 int err; 1008 1009 err = xe_pm_runtime_suspend(xe); 1010 if (err) 1011 return err; 1012 1013 pci_save_state(pdev); 1014 1015 if (xe->d3cold.allowed) { 1016 d3cold_toggle(pdev, D3COLD_ENABLE); 1017 pci_disable_device(pdev); 1018 pci_ignore_hotplug(pdev); 1019 pci_set_power_state(pdev, PCI_D3cold); 1020 } else { 1021 d3cold_toggle(pdev, D3COLD_DISABLE); 1022 pci_set_power_state(pdev, PCI_D3hot); 1023 } 1024 1025 return 0; 1026 } 1027 1028 static int xe_pci_runtime_resume(struct device *dev) 1029 { 1030 struct pci_dev *pdev = to_pci_dev(dev); 1031 struct xe_device *xe = pdev_to_xe_device(pdev); 1032 int err; 1033 1034 err = pci_set_power_state(pdev, PCI_D0); 1035 if (err) 1036 return err; 1037 1038 pci_restore_state(pdev); 1039 1040 if (xe->d3cold.allowed) { 1041 err = pci_enable_device(pdev); 1042 if (err) 1043 return err; 1044 1045 pci_set_master(pdev); 1046 } 1047 1048 return xe_pm_runtime_resume(xe); 1049 } 1050 1051 static int xe_pci_runtime_idle(struct device *dev) 1052 { 1053 struct pci_dev *pdev = to_pci_dev(dev); 1054 struct xe_device *xe = pdev_to_xe_device(pdev); 1055 1056 xe_pm_d3cold_allowed_toggle(xe); 1057 1058 return 0; 1059 } 1060 1061 static const struct dev_pm_ops xe_pm_ops = { 1062 SET_SYSTEM_SLEEP_PM_OPS(xe_pci_suspend, xe_pci_resume) 1063 SET_RUNTIME_PM_OPS(xe_pci_runtime_suspend, xe_pci_runtime_resume, xe_pci_runtime_idle) 1064 }; 1065 #endif 1066 1067 static struct pci_driver xe_pci_driver = { 1068 .name = DRIVER_NAME, 1069 .id_table = pciidlist, 1070 .probe = xe_pci_probe, 1071 .remove = xe_pci_remove, 1072 .shutdown = xe_pci_shutdown, 1073 .sriov_configure = xe_pci_sriov_configure, 1074 #ifdef CONFIG_PM_SLEEP 1075 .driver.pm = &xe_pm_ops, 1076 #endif 1077 }; 1078 1079 int xe_register_pci_driver(void) 1080 { 1081 return pci_register_driver(&xe_pci_driver); 1082 } 1083 1084 void xe_unregister_pci_driver(void) 1085 { 1086 pci_unregister_driver(&xe_pci_driver); 1087 } 1088 1089 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 1090 #include "tests/xe_pci.c" 1091 #endif 1092