1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_pci.h" 7 8 #include <kunit/static_stub.h> 9 #include <linux/device/driver.h> 10 #include <linux/module.h> 11 #include <linux/pci.h> 12 #include <linux/pm_runtime.h> 13 14 #include <drm/drm_color_mgmt.h> 15 #include <drm/drm_drv.h> 16 #include <drm/intel/xe_pciids.h> 17 18 #include "display/xe_display.h" 19 #include "regs/xe_gt_regs.h" 20 #include "xe_device.h" 21 #include "xe_drv.h" 22 #include "xe_gt.h" 23 #include "xe_gt_sriov_vf.h" 24 #include "xe_guc.h" 25 #include "xe_macros.h" 26 #include "xe_mmio.h" 27 #include "xe_module.h" 28 #include "xe_pci_sriov.h" 29 #include "xe_pci_types.h" 30 #include "xe_pm.h" 31 #include "xe_sriov.h" 32 #include "xe_step.h" 33 #include "xe_tile.h" 34 35 enum toggle_d3cold { 36 D3COLD_DISABLE, 37 D3COLD_ENABLE, 38 }; 39 40 struct xe_subplatform_desc { 41 enum xe_subplatform subplatform; 42 const char *name; 43 const u16 *pciidlist; 44 }; 45 46 struct xe_device_desc { 47 /* Should only ever be set for platforms without GMD_ID */ 48 const struct xe_graphics_desc *graphics; 49 /* Should only ever be set for platforms without GMD_ID */ 50 const struct xe_media_desc *media; 51 52 const char *platform_name; 53 const struct xe_subplatform_desc *subplatforms; 54 55 enum xe_platform platform; 56 57 u8 require_force_probe:1; 58 u8 is_dgfx:1; 59 60 u8 has_display:1; 61 u8 has_heci_gscfi:1; 62 u8 has_heci_cscfi:1; 63 u8 has_llc:1; 64 u8 has_mmio_ext:1; 65 u8 has_sriov:1; 66 u8 skip_guc_pc:1; 67 u8 skip_mtcfg:1; 68 u8 skip_pcode:1; 69 }; 70 71 __diag_push(); 72 __diag_ignore_all("-Woverride-init", "Allow field overrides in table"); 73 74 #define PLATFORM(x) \ 75 .platform = XE_##x, \ 76 .platform_name = #x 77 78 #define NOP(x) x 79 80 static const struct xe_graphics_desc graphics_xelp = { 81 .name = "Xe_LP", 82 .ver = 12, 83 .rel = 0, 84 85 .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0), 86 87 .dma_mask_size = 39, 88 .va_bits = 48, 89 .vm_max_level = 3, 90 }; 91 92 static const struct xe_graphics_desc graphics_xelpp = { 93 .name = "Xe_LP+", 94 .ver = 12, 95 .rel = 10, 96 97 .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0), 98 99 .dma_mask_size = 39, 100 .va_bits = 48, 101 .vm_max_level = 3, 102 }; 103 104 #define XE_HP_FEATURES \ 105 .has_range_tlb_invalidation = true, \ 106 .dma_mask_size = 46, \ 107 .va_bits = 48, \ 108 .vm_max_level = 3 109 110 static const struct xe_graphics_desc graphics_xehpg = { 111 .name = "Xe_HPG", 112 .ver = 12, 113 .rel = 55, 114 115 .hw_engine_mask = 116 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 117 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 118 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 119 120 XE_HP_FEATURES, 121 .vram_flags = XE_VRAM_FLAGS_NEED64K, 122 123 .has_flat_ccs = 1, 124 }; 125 126 static const struct xe_graphics_desc graphics_xehpc = { 127 .name = "Xe_HPC", 128 .ver = 12, 129 .rel = 60, 130 131 .hw_engine_mask = 132 BIT(XE_HW_ENGINE_BCS0) | BIT(XE_HW_ENGINE_BCS1) | 133 BIT(XE_HW_ENGINE_BCS2) | BIT(XE_HW_ENGINE_BCS3) | 134 BIT(XE_HW_ENGINE_BCS4) | BIT(XE_HW_ENGINE_BCS5) | 135 BIT(XE_HW_ENGINE_BCS6) | BIT(XE_HW_ENGINE_BCS7) | 136 BIT(XE_HW_ENGINE_BCS8) | 137 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 138 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 139 140 XE_HP_FEATURES, 141 .dma_mask_size = 52, 142 .max_remote_tiles = 1, 143 .va_bits = 57, 144 .vm_max_level = 4, 145 .vram_flags = XE_VRAM_FLAGS_NEED64K, 146 147 .has_asid = 1, 148 .has_atomic_enable_pte_bit = 1, 149 .has_usm = 1, 150 }; 151 152 static const struct xe_graphics_desc graphics_xelpg = { 153 .name = "Xe_LPG", 154 .hw_engine_mask = 155 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 156 BIT(XE_HW_ENGINE_CCS0), 157 158 XE_HP_FEATURES, 159 }; 160 161 #define XE2_GFX_FEATURES \ 162 .dma_mask_size = 46, \ 163 .has_asid = 1, \ 164 .has_atomic_enable_pte_bit = 1, \ 165 .has_flat_ccs = 1, \ 166 .has_indirect_ring_state = 1, \ 167 .has_range_tlb_invalidation = 1, \ 168 .has_usm = 1, \ 169 .va_bits = 48, \ 170 .vm_max_level = 4, \ 171 .hw_engine_mask = \ 172 BIT(XE_HW_ENGINE_RCS0) | \ 173 BIT(XE_HW_ENGINE_BCS8) | BIT(XE_HW_ENGINE_BCS0) | \ 174 GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0) 175 176 static const struct xe_graphics_desc graphics_xe2 = { 177 .name = "Xe2_LPG / Xe2_HPG", 178 179 XE2_GFX_FEATURES, 180 }; 181 182 static const struct xe_media_desc media_xem = { 183 .name = "Xe_M", 184 .ver = 12, 185 .rel = 0, 186 187 .hw_engine_mask = 188 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 189 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0), 190 }; 191 192 static const struct xe_media_desc media_xehpm = { 193 .name = "Xe_HPM", 194 .ver = 12, 195 .rel = 55, 196 197 .hw_engine_mask = 198 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 199 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0), 200 }; 201 202 static const struct xe_media_desc media_xelpmp = { 203 .name = "Xe_LPM+", 204 .hw_engine_mask = 205 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 206 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) | 207 BIT(XE_HW_ENGINE_GSCCS0) 208 }; 209 210 static const struct xe_media_desc media_xe2 = { 211 .name = "Xe2_LPM / Xe2_HPM / Xe3_LPM", 212 .hw_engine_mask = 213 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 214 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) | 215 BIT(XE_HW_ENGINE_GSCCS0) 216 }; 217 218 static const struct xe_device_desc tgl_desc = { 219 .graphics = &graphics_xelp, 220 .media = &media_xem, 221 PLATFORM(TIGERLAKE), 222 .has_display = true, 223 .has_llc = true, 224 .require_force_probe = true, 225 }; 226 227 static const struct xe_device_desc rkl_desc = { 228 .graphics = &graphics_xelp, 229 .media = &media_xem, 230 PLATFORM(ROCKETLAKE), 231 .has_display = true, 232 .has_llc = true, 233 .require_force_probe = true, 234 }; 235 236 static const u16 adls_rpls_ids[] = { XE_RPLS_IDS(NOP), 0 }; 237 238 static const struct xe_device_desc adl_s_desc = { 239 .graphics = &graphics_xelp, 240 .media = &media_xem, 241 PLATFORM(ALDERLAKE_S), 242 .has_display = true, 243 .has_llc = true, 244 .require_force_probe = true, 245 .subplatforms = (const struct xe_subplatform_desc[]) { 246 { XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids }, 247 {}, 248 }, 249 }; 250 251 static const u16 adlp_rplu_ids[] = { XE_RPLU_IDS(NOP), 0 }; 252 253 static const struct xe_device_desc adl_p_desc = { 254 .graphics = &graphics_xelp, 255 .media = &media_xem, 256 PLATFORM(ALDERLAKE_P), 257 .has_display = true, 258 .has_llc = true, 259 .require_force_probe = true, 260 .subplatforms = (const struct xe_subplatform_desc[]) { 261 { XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids }, 262 {}, 263 }, 264 }; 265 266 static const struct xe_device_desc adl_n_desc = { 267 .graphics = &graphics_xelp, 268 .media = &media_xem, 269 PLATFORM(ALDERLAKE_N), 270 .has_display = true, 271 .has_llc = true, 272 .require_force_probe = true, 273 }; 274 275 #define DGFX_FEATURES \ 276 .is_dgfx = 1 277 278 static const struct xe_device_desc dg1_desc = { 279 .graphics = &graphics_xelpp, 280 .media = &media_xem, 281 DGFX_FEATURES, 282 PLATFORM(DG1), 283 .has_display = true, 284 .has_heci_gscfi = 1, 285 .require_force_probe = true, 286 }; 287 288 static const u16 dg2_g10_ids[] = { XE_DG2_G10_IDS(NOP), XE_ATS_M150_IDS(NOP), 0 }; 289 static const u16 dg2_g11_ids[] = { XE_DG2_G11_IDS(NOP), XE_ATS_M75_IDS(NOP), 0 }; 290 static const u16 dg2_g12_ids[] = { XE_DG2_G12_IDS(NOP), 0 }; 291 292 #define DG2_FEATURES \ 293 DGFX_FEATURES, \ 294 PLATFORM(DG2), \ 295 .has_heci_gscfi = 1, \ 296 .subplatforms = (const struct xe_subplatform_desc[]) { \ 297 { XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \ 298 { XE_SUBPLATFORM_DG2_G11, "G11", dg2_g11_ids }, \ 299 { XE_SUBPLATFORM_DG2_G12, "G12", dg2_g12_ids }, \ 300 { } \ 301 } 302 303 static const struct xe_device_desc ats_m_desc = { 304 .graphics = &graphics_xehpg, 305 .media = &media_xehpm, 306 .require_force_probe = true, 307 308 DG2_FEATURES, 309 .has_display = false, 310 }; 311 312 static const struct xe_device_desc dg2_desc = { 313 .graphics = &graphics_xehpg, 314 .media = &media_xehpm, 315 .require_force_probe = true, 316 317 DG2_FEATURES, 318 .has_display = true, 319 }; 320 321 static const __maybe_unused struct xe_device_desc pvc_desc = { 322 .graphics = &graphics_xehpc, 323 DGFX_FEATURES, 324 PLATFORM(PVC), 325 .has_display = false, 326 .has_heci_gscfi = 1, 327 .require_force_probe = true, 328 }; 329 330 static const struct xe_device_desc mtl_desc = { 331 /* .graphics and .media determined via GMD_ID */ 332 .require_force_probe = true, 333 PLATFORM(METEORLAKE), 334 .has_display = true, 335 }; 336 337 static const struct xe_device_desc lnl_desc = { 338 PLATFORM(LUNARLAKE), 339 .has_display = true, 340 }; 341 342 static const struct xe_device_desc bmg_desc = { 343 DGFX_FEATURES, 344 PLATFORM(BATTLEMAGE), 345 .has_display = true, 346 .has_heci_cscfi = 1, 347 }; 348 349 static const struct xe_device_desc ptl_desc = { 350 PLATFORM(PANTHERLAKE), 351 .has_display = false, 352 .require_force_probe = true, 353 }; 354 355 #undef PLATFORM 356 __diag_pop(); 357 358 /* Map of GMD_ID values to graphics IP */ 359 static const struct gmdid_map graphics_ip_map[] = { 360 { 1270, &graphics_xelpg }, 361 { 1271, &graphics_xelpg }, 362 { 1274, &graphics_xelpg }, /* Xe_LPG+ */ 363 { 2001, &graphics_xe2 }, 364 { 2004, &graphics_xe2 }, 365 { 3000, &graphics_xe2 }, 366 { 3001, &graphics_xe2 }, 367 }; 368 369 /* Map of GMD_ID values to media IP */ 370 static const struct gmdid_map media_ip_map[] = { 371 { 1300, &media_xelpmp }, 372 { 1301, &media_xe2 }, 373 { 2000, &media_xe2 }, 374 { 3000, &media_xe2 }, 375 }; 376 377 #define INTEL_VGA_DEVICE(id, info) { \ 378 PCI_DEVICE(PCI_VENDOR_ID_INTEL, id), \ 379 PCI_BASE_CLASS_DISPLAY << 16, 0xff << 16, \ 380 (unsigned long) info } 381 382 /* 383 * Make sure any device matches here are from most specific to most 384 * general. For example, since the Quanta match is based on the subsystem 385 * and subvendor IDs, we need it to come before the more general IVB 386 * PCI ID matches, otherwise we'll use the wrong info struct above. 387 */ 388 static const struct pci_device_id pciidlist[] = { 389 XE_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc), 390 XE_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc), 391 XE_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 392 XE_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 393 XE_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc), 394 XE_RPLU_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 395 XE_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 396 XE_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 397 XE_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc), 398 XE_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc), 399 XE_ARL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 400 XE_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc), 401 XE_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 402 XE_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc), 403 XE_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc), 404 XE_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc), 405 { } 406 }; 407 MODULE_DEVICE_TABLE(pci, pciidlist); 408 409 #undef INTEL_VGA_DEVICE 410 411 /* is device_id present in comma separated list of ids */ 412 static bool device_id_in_list(u16 device_id, const char *devices, bool negative) 413 { 414 char *s, *p, *tok; 415 bool ret; 416 417 if (!devices || !*devices) 418 return false; 419 420 /* match everything */ 421 if (negative && strcmp(devices, "!*") == 0) 422 return true; 423 if (!negative && strcmp(devices, "*") == 0) 424 return true; 425 426 s = kstrdup(devices, GFP_KERNEL); 427 if (!s) 428 return false; 429 430 for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) { 431 u16 val; 432 433 if (negative && tok[0] == '!') 434 tok++; 435 else if ((negative && tok[0] != '!') || 436 (!negative && tok[0] == '!')) 437 continue; 438 439 if (kstrtou16(tok, 16, &val) == 0 && val == device_id) { 440 ret = true; 441 break; 442 } 443 } 444 445 kfree(s); 446 447 return ret; 448 } 449 450 static bool id_forced(u16 device_id) 451 { 452 return device_id_in_list(device_id, xe_modparam.force_probe, false); 453 } 454 455 static bool id_blocked(u16 device_id) 456 { 457 return device_id_in_list(device_id, xe_modparam.force_probe, true); 458 } 459 460 static const struct xe_subplatform_desc * 461 find_subplatform(const struct xe_device *xe, const struct xe_device_desc *desc) 462 { 463 const struct xe_subplatform_desc *sp; 464 const u16 *id; 465 466 for (sp = desc->subplatforms; sp && sp->subplatform; sp++) 467 for (id = sp->pciidlist; *id; id++) 468 if (*id == xe->info.devid) 469 return sp; 470 471 return NULL; 472 } 473 474 enum xe_gmdid_type { 475 GMDID_GRAPHICS, 476 GMDID_MEDIA 477 }; 478 479 static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid) 480 { 481 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 482 struct xe_reg gmdid_reg = GMD_ID; 483 u32 val; 484 485 KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid); 486 487 if (IS_SRIOV_VF(xe)) { 488 struct xe_gt *gt = xe_root_mmio_gt(xe); 489 490 /* 491 * To get the value of the GMDID register, VFs must obtain it 492 * from the GuC using MMIO communication. 493 * 494 * Note that at this point the xe_gt is not fully uninitialized 495 * and only basic access to MMIO registers is possible. To use 496 * our existing GuC communication functions we must perform at 497 * least basic xe_gt and xe_guc initialization. 498 * 499 * Since to obtain the value of GMDID_MEDIA we need to use the 500 * media GuC, temporarly tweak the gt type. 501 */ 502 xe_gt_assert(gt, gt->info.type == XE_GT_TYPE_UNINITIALIZED); 503 504 if (type == GMDID_MEDIA) { 505 gt->info.id = 1; 506 gt->info.type = XE_GT_TYPE_MEDIA; 507 } else { 508 gt->info.id = 0; 509 gt->info.type = XE_GT_TYPE_MAIN; 510 } 511 512 xe_guc_comm_init_early(>->uc.guc); 513 514 /* Don't bother with GMDID if failed to negotiate the GuC ABI */ 515 val = xe_gt_sriov_vf_bootstrap(gt) ? 0 : xe_gt_sriov_vf_gmdid(gt); 516 517 /* 518 * Only undo xe_gt.info here, the remaining changes made above 519 * will be overwritten as part of the regular initialization. 520 */ 521 gt->info.id = 0; 522 gt->info.type = XE_GT_TYPE_UNINITIALIZED; 523 } else { 524 /* 525 * GMD_ID is a GT register, but at this point in the driver 526 * init we haven't fully initialized the GT yet so we need to 527 * read the register with the tile's MMIO accessor. That means 528 * we need to apply the GSI offset manually since it won't get 529 * automatically added as it would if we were using a GT mmio 530 * accessor. 531 */ 532 if (type == GMDID_MEDIA) 533 gmdid_reg.addr += MEDIA_GT_GSI_OFFSET; 534 535 val = xe_mmio_read32(mmio, gmdid_reg); 536 } 537 538 *ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val); 539 *revid = REG_FIELD_GET(GMD_ID_REVID, val); 540 } 541 542 /* 543 * Pre-GMD_ID platform: device descriptor already points to the appropriate 544 * graphics descriptor. Simply forward the description and calculate the version 545 * appropriately. "graphics" should be present in all such platforms, while 546 * media is optional. 547 */ 548 static void handle_pre_gmdid(struct xe_device *xe, 549 const struct xe_graphics_desc *graphics, 550 const struct xe_media_desc *media) 551 { 552 xe->info.graphics_verx100 = graphics->ver * 100 + graphics->rel; 553 554 if (media) 555 xe->info.media_verx100 = media->ver * 100 + media->rel; 556 557 } 558 559 /* 560 * GMD_ID platform: read IP version from hardware and select graphics descriptor 561 * based on the result. 562 */ 563 static void handle_gmdid(struct xe_device *xe, 564 const struct xe_graphics_desc **graphics, 565 const struct xe_media_desc **media, 566 u32 *graphics_revid, 567 u32 *media_revid) 568 { 569 u32 ver; 570 571 read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid); 572 573 for (int i = 0; i < ARRAY_SIZE(graphics_ip_map); i++) { 574 if (ver == graphics_ip_map[i].ver) { 575 xe->info.graphics_verx100 = ver; 576 *graphics = graphics_ip_map[i].ip; 577 578 break; 579 } 580 } 581 582 if (!xe->info.graphics_verx100) { 583 drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n", 584 ver / 100, ver % 100); 585 } 586 587 read_gmdid(xe, GMDID_MEDIA, &ver, media_revid); 588 589 /* Media may legitimately be fused off / not present */ 590 if (ver == 0) 591 return; 592 593 for (int i = 0; i < ARRAY_SIZE(media_ip_map); i++) { 594 if (ver == media_ip_map[i].ver) { 595 xe->info.media_verx100 = ver; 596 *media = media_ip_map[i].ip; 597 598 break; 599 } 600 } 601 602 if (!xe->info.media_verx100) { 603 drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n", 604 ver / 100, ver % 100); 605 } 606 } 607 608 /* 609 * Initialize device info content that only depends on static driver_data 610 * passed to the driver at probe time from PCI ID table. 611 */ 612 static int xe_info_init_early(struct xe_device *xe, 613 const struct xe_device_desc *desc, 614 const struct xe_subplatform_desc *subplatform_desc) 615 { 616 int err; 617 618 xe->info.platform_name = desc->platform_name; 619 xe->info.platform = desc->platform; 620 xe->info.subplatform = subplatform_desc ? 621 subplatform_desc->subplatform : XE_SUBPLATFORM_NONE; 622 623 xe->info.is_dgfx = desc->is_dgfx; 624 xe->info.has_heci_gscfi = desc->has_heci_gscfi; 625 xe->info.has_heci_cscfi = desc->has_heci_cscfi; 626 xe->info.has_llc = desc->has_llc; 627 xe->info.has_mmio_ext = desc->has_mmio_ext; 628 xe->info.has_sriov = desc->has_sriov; 629 xe->info.skip_guc_pc = desc->skip_guc_pc; 630 xe->info.skip_mtcfg = desc->skip_mtcfg; 631 xe->info.skip_pcode = desc->skip_pcode; 632 633 xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) && 634 xe_modparam.probe_display && 635 desc->has_display; 636 637 err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0); 638 if (err) 639 return err; 640 641 return 0; 642 } 643 644 /* 645 * Initialize device info content that does require knowledge about 646 * graphics / media IP version. 647 * Make sure that GT / tile structures allocated by the driver match the data 648 * present in device info. 649 */ 650 static int xe_info_init(struct xe_device *xe, 651 const struct xe_graphics_desc *graphics_desc, 652 const struct xe_media_desc *media_desc) 653 { 654 u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0; 655 struct xe_tile *tile; 656 struct xe_gt *gt; 657 u8 id; 658 659 /* 660 * If this platform supports GMD_ID, we'll detect the proper IP 661 * descriptor to use from hardware registers. desc->graphics will only 662 * ever be set at this point for platforms before GMD_ID. In that case 663 * the IP descriptions and versions are simply derived from that. 664 */ 665 if (graphics_desc) { 666 handle_pre_gmdid(xe, graphics_desc, media_desc); 667 xe->info.step = xe_step_pre_gmdid_get(xe); 668 } else { 669 xe_assert(xe, !media_desc); 670 handle_gmdid(xe, &graphics_desc, &media_desc, 671 &graphics_gmdid_revid, &media_gmdid_revid); 672 xe->info.step = xe_step_gmdid_get(xe, 673 graphics_gmdid_revid, 674 media_gmdid_revid); 675 } 676 677 /* 678 * If we couldn't detect the graphics IP, that's considered a fatal 679 * error and we should abort driver load. Failing to detect media 680 * IP is non-fatal; we'll just proceed without enabling media support. 681 */ 682 if (!graphics_desc) 683 return -ENODEV; 684 685 xe->info.graphics_name = graphics_desc->name; 686 xe->info.media_name = media_desc ? media_desc->name : "none"; 687 xe->info.tile_mmio_ext_size = graphics_desc->tile_mmio_ext_size; 688 689 xe->info.dma_mask_size = graphics_desc->dma_mask_size; 690 xe->info.vram_flags = graphics_desc->vram_flags; 691 xe->info.va_bits = graphics_desc->va_bits; 692 xe->info.vm_max_level = graphics_desc->vm_max_level; 693 xe->info.has_asid = graphics_desc->has_asid; 694 xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit; 695 if (xe->info.platform != XE_PVC) 696 xe->info.has_device_atomics_on_smem = 1; 697 698 /* Runtime detection may change this later */ 699 xe->info.has_flat_ccs = graphics_desc->has_flat_ccs; 700 701 xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation; 702 xe->info.has_usm = graphics_desc->has_usm; 703 704 /* 705 * All platforms have at least one primary GT. Any platform with media 706 * version 13 or higher has an additional dedicated media GT. And 707 * depending on the graphics IP there may be additional "remote tiles." 708 * All of these together determine the overall GT count. 709 * 710 * FIXME: 'tile_count' here is misnamed since the rest of the driver 711 * treats it as the number of GTs rather than just the number of tiles. 712 */ 713 xe->info.tile_count = 1 + graphics_desc->max_remote_tiles; 714 715 for_each_remote_tile(tile, xe, id) { 716 int err; 717 718 err = xe_tile_init_early(tile, xe, id); 719 if (err) 720 return err; 721 } 722 723 for_each_tile(tile, xe, id) { 724 gt = tile->primary_gt; 725 gt->info.id = xe->info.gt_count++; 726 gt->info.type = XE_GT_TYPE_MAIN; 727 gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state; 728 gt->info.engine_mask = graphics_desc->hw_engine_mask; 729 730 if (MEDIA_VER(xe) < 13 && media_desc) 731 gt->info.engine_mask |= media_desc->hw_engine_mask; 732 733 if (MEDIA_VER(xe) < 13 || !media_desc) 734 continue; 735 736 /* 737 * Allocate and setup media GT for platforms with standalone 738 * media. 739 */ 740 tile->media_gt = xe_gt_alloc(tile); 741 if (IS_ERR(tile->media_gt)) 742 return PTR_ERR(tile->media_gt); 743 744 gt = tile->media_gt; 745 gt->info.type = XE_GT_TYPE_MEDIA; 746 gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state; 747 gt->info.engine_mask = media_desc->hw_engine_mask; 748 749 /* 750 * FIXME: At the moment multi-tile and standalone media are 751 * mutually exclusive on current platforms. We'll need to 752 * come up with a better way to number GTs if we ever wind 753 * up with platforms that support both together. 754 */ 755 drm_WARN_ON(&xe->drm, id != 0); 756 gt->info.id = xe->info.gt_count++; 757 } 758 759 return 0; 760 } 761 762 static void xe_pci_remove(struct pci_dev *pdev) 763 { 764 struct xe_device *xe; 765 766 xe = pdev_to_xe_device(pdev); 767 if (!xe) /* driver load aborted, nothing to cleanup */ 768 return; 769 770 if (IS_SRIOV_PF(xe)) 771 xe_pci_sriov_configure(pdev, 0); 772 773 xe_device_remove(xe); 774 xe_pm_runtime_fini(xe); 775 pci_set_drvdata(pdev, NULL); 776 } 777 778 /* 779 * Probe the PCI device, initialize various parts of the driver. 780 * 781 * Fault injection is used to test the error paths of some initialization 782 * functions called either directly from xe_pci_probe() or indirectly for 783 * example through xe_device_probe(). Those functions use the kernel fault 784 * injection capabilities infrastructure, see 785 * Documentation/fault-injection/fault-injection.rst for details. The macro 786 * ALLOW_ERROR_INJECTION() is used to conditionally skip function execution 787 * at runtime and use a provided return value. The first requirement for 788 * error injectable functions is proper handling of the error code by the 789 * caller for recovery, which is always the case here. The second 790 * requirement is that no state is changed before the first error return. 791 * It is not strictly fullfilled for all initialization functions using the 792 * ALLOW_ERROR_INJECTION() macro but this is acceptable because for those 793 * error cases at probe time, the error code is simply propagated up by the 794 * caller. Therefore there is no consequence on those specific callers when 795 * function error injection skips the whole function. 796 */ 797 static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 798 { 799 const struct xe_device_desc *desc = (const void *)ent->driver_data; 800 const struct xe_subplatform_desc *subplatform_desc; 801 struct xe_device *xe; 802 int err; 803 804 if (desc->require_force_probe && !id_forced(pdev->device)) { 805 dev_info(&pdev->dev, 806 "Your graphics device %04x is not officially supported\n" 807 "by xe driver in this kernel version. To force Xe probe,\n" 808 "use xe.force_probe='%04x' and i915.force_probe='!%04x'\n" 809 "module parameters or CONFIG_DRM_XE_FORCE_PROBE='%04x' and\n" 810 "CONFIG_DRM_I915_FORCE_PROBE='!%04x' configuration options.\n", 811 pdev->device, pdev->device, pdev->device, 812 pdev->device, pdev->device); 813 return -ENODEV; 814 } 815 816 if (id_blocked(pdev->device)) { 817 dev_info(&pdev->dev, "Probe blocked for device [%04x:%04x].\n", 818 pdev->vendor, pdev->device); 819 return -ENODEV; 820 } 821 822 if (xe_display_driver_probe_defer(pdev)) 823 return -EPROBE_DEFER; 824 825 err = pcim_enable_device(pdev); 826 if (err) 827 return err; 828 829 xe = xe_device_create(pdev, ent); 830 if (IS_ERR(xe)) 831 return PTR_ERR(xe); 832 833 pci_set_drvdata(pdev, &xe->drm); 834 835 xe_pm_assert_unbounded_bridge(xe); 836 subplatform_desc = find_subplatform(xe, desc); 837 838 pci_set_master(pdev); 839 840 err = xe_info_init_early(xe, desc, subplatform_desc); 841 if (err) 842 return err; 843 844 err = xe_device_probe_early(xe); 845 if (err) 846 return err; 847 848 err = xe_info_init(xe, desc->graphics, desc->media); 849 if (err) 850 return err; 851 852 err = xe_display_probe(xe); 853 if (err) 854 return err; 855 856 drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d cscfi:%d", 857 desc->platform_name, 858 subplatform_desc ? subplatform_desc->name : "", 859 xe->info.devid, xe->info.revid, 860 xe->info.is_dgfx, 861 xe->info.graphics_name, 862 xe->info.graphics_verx100 / 100, 863 xe->info.graphics_verx100 % 100, 864 xe->info.media_name, 865 xe->info.media_verx100 / 100, 866 xe->info.media_verx100 % 100, 867 str_yes_no(xe->info.probe_display), 868 xe->info.dma_mask_size, xe->info.tile_count, 869 xe->info.has_heci_gscfi, xe->info.has_heci_cscfi); 870 871 drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, B:%s)\n", 872 xe_step_name(xe->info.step.graphics), 873 xe_step_name(xe->info.step.media), 874 xe_step_name(xe->info.step.basedie)); 875 876 drm_dbg(&xe->drm, "SR-IOV support: %s (mode: %s)\n", 877 str_yes_no(xe_device_has_sriov(xe)), 878 xe_sriov_mode_to_string(xe_device_sriov_mode(xe))); 879 880 err = xe_pm_init_early(xe); 881 if (err) 882 return err; 883 884 err = xe_device_probe(xe); 885 if (err) 886 return err; 887 888 err = xe_pm_init(xe); 889 if (err) 890 goto err_driver_cleanup; 891 892 drm_dbg(&xe->drm, "d3cold: capable=%s\n", 893 str_yes_no(xe->d3cold.capable)); 894 895 return 0; 896 897 err_driver_cleanup: 898 xe_pci_remove(pdev); 899 return err; 900 } 901 902 static void xe_pci_shutdown(struct pci_dev *pdev) 903 { 904 xe_device_shutdown(pdev_to_xe_device(pdev)); 905 } 906 907 #ifdef CONFIG_PM_SLEEP 908 static void d3cold_toggle(struct pci_dev *pdev, enum toggle_d3cold toggle) 909 { 910 struct xe_device *xe = pdev_to_xe_device(pdev); 911 struct pci_dev *root_pdev; 912 913 if (!xe->d3cold.capable) 914 return; 915 916 root_pdev = pcie_find_root_port(pdev); 917 if (!root_pdev) 918 return; 919 920 switch (toggle) { 921 case D3COLD_DISABLE: 922 pci_d3cold_disable(root_pdev); 923 break; 924 case D3COLD_ENABLE: 925 pci_d3cold_enable(root_pdev); 926 break; 927 } 928 } 929 930 static int xe_pci_suspend(struct device *dev) 931 { 932 struct pci_dev *pdev = to_pci_dev(dev); 933 int err; 934 935 err = xe_pm_suspend(pdev_to_xe_device(pdev)); 936 if (err) 937 return err; 938 939 /* 940 * Enabling D3Cold is needed for S2Idle/S0ix. 941 * It is save to allow here since xe_pm_suspend has evicted 942 * the local memory and the direct complete optimization is disabled. 943 */ 944 d3cold_toggle(pdev, D3COLD_ENABLE); 945 946 pci_save_state(pdev); 947 pci_disable_device(pdev); 948 949 return 0; 950 } 951 952 static int xe_pci_resume(struct device *dev) 953 { 954 struct pci_dev *pdev = to_pci_dev(dev); 955 int err; 956 957 /* Give back the D3Cold decision to the runtime P M*/ 958 d3cold_toggle(pdev, D3COLD_DISABLE); 959 960 err = pci_set_power_state(pdev, PCI_D0); 961 if (err) 962 return err; 963 964 pci_restore_state(pdev); 965 966 err = pci_enable_device(pdev); 967 if (err) 968 return err; 969 970 pci_set_master(pdev); 971 972 err = xe_pm_resume(pdev_to_xe_device(pdev)); 973 if (err) 974 return err; 975 976 return 0; 977 } 978 979 static int xe_pci_runtime_suspend(struct device *dev) 980 { 981 struct pci_dev *pdev = to_pci_dev(dev); 982 struct xe_device *xe = pdev_to_xe_device(pdev); 983 int err; 984 985 err = xe_pm_runtime_suspend(xe); 986 if (err) 987 return err; 988 989 pci_save_state(pdev); 990 991 if (xe->d3cold.allowed) { 992 d3cold_toggle(pdev, D3COLD_ENABLE); 993 pci_disable_device(pdev); 994 pci_ignore_hotplug(pdev); 995 pci_set_power_state(pdev, PCI_D3cold); 996 } else { 997 d3cold_toggle(pdev, D3COLD_DISABLE); 998 pci_set_power_state(pdev, PCI_D3hot); 999 } 1000 1001 return 0; 1002 } 1003 1004 static int xe_pci_runtime_resume(struct device *dev) 1005 { 1006 struct pci_dev *pdev = to_pci_dev(dev); 1007 struct xe_device *xe = pdev_to_xe_device(pdev); 1008 int err; 1009 1010 err = pci_set_power_state(pdev, PCI_D0); 1011 if (err) 1012 return err; 1013 1014 pci_restore_state(pdev); 1015 1016 if (xe->d3cold.allowed) { 1017 err = pci_enable_device(pdev); 1018 if (err) 1019 return err; 1020 1021 pci_set_master(pdev); 1022 } 1023 1024 return xe_pm_runtime_resume(xe); 1025 } 1026 1027 static int xe_pci_runtime_idle(struct device *dev) 1028 { 1029 struct pci_dev *pdev = to_pci_dev(dev); 1030 struct xe_device *xe = pdev_to_xe_device(pdev); 1031 1032 xe_pm_d3cold_allowed_toggle(xe); 1033 1034 return 0; 1035 } 1036 1037 static const struct dev_pm_ops xe_pm_ops = { 1038 SET_SYSTEM_SLEEP_PM_OPS(xe_pci_suspend, xe_pci_resume) 1039 SET_RUNTIME_PM_OPS(xe_pci_runtime_suspend, xe_pci_runtime_resume, xe_pci_runtime_idle) 1040 }; 1041 #endif 1042 1043 static struct pci_driver xe_pci_driver = { 1044 .name = DRIVER_NAME, 1045 .id_table = pciidlist, 1046 .probe = xe_pci_probe, 1047 .remove = xe_pci_remove, 1048 .shutdown = xe_pci_shutdown, 1049 .sriov_configure = xe_pci_sriov_configure, 1050 #ifdef CONFIG_PM_SLEEP 1051 .driver.pm = &xe_pm_ops, 1052 #endif 1053 }; 1054 1055 int xe_register_pci_driver(void) 1056 { 1057 return pci_register_driver(&xe_pci_driver); 1058 } 1059 1060 void xe_unregister_pci_driver(void) 1061 { 1062 pci_unregister_driver(&xe_pci_driver); 1063 } 1064 1065 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 1066 #include "tests/xe_pci.c" 1067 #endif 1068