1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_pci.h" 7 8 #include <kunit/static_stub.h> 9 #include <linux/device/driver.h> 10 #include <linux/module.h> 11 #include <linux/pci.h> 12 #include <linux/pm_runtime.h> 13 14 #include <drm/drm_color_mgmt.h> 15 #include <drm/drm_drv.h> 16 #include <drm/intel/xe_pciids.h> 17 18 #include "display/xe_display.h" 19 #include "regs/xe_gt_regs.h" 20 #include "xe_device.h" 21 #include "xe_drv.h" 22 #include "xe_gt.h" 23 #include "xe_gt_sriov_vf.h" 24 #include "xe_guc.h" 25 #include "xe_macros.h" 26 #include "xe_mmio.h" 27 #include "xe_module.h" 28 #include "xe_pci_sriov.h" 29 #include "xe_pci_types.h" 30 #include "xe_pm.h" 31 #include "xe_sriov.h" 32 #include "xe_step.h" 33 #include "xe_tile.h" 34 35 enum toggle_d3cold { 36 D3COLD_DISABLE, 37 D3COLD_ENABLE, 38 }; 39 40 struct xe_subplatform_desc { 41 enum xe_subplatform subplatform; 42 const char *name; 43 const u16 *pciidlist; 44 }; 45 46 struct xe_device_desc { 47 /* Should only ever be set for platforms without GMD_ID */ 48 const struct xe_graphics_desc *graphics; 49 /* Should only ever be set for platforms without GMD_ID */ 50 const struct xe_media_desc *media; 51 52 const char *platform_name; 53 const struct xe_subplatform_desc *subplatforms; 54 55 enum xe_platform platform; 56 57 u8 require_force_probe:1; 58 u8 is_dgfx:1; 59 60 u8 has_display:1; 61 u8 has_heci_gscfi:1; 62 u8 has_heci_cscfi:1; 63 u8 has_llc:1; 64 u8 has_mmio_ext:1; 65 u8 has_sriov:1; 66 u8 skip_guc_pc:1; 67 u8 skip_mtcfg:1; 68 u8 skip_pcode:1; 69 }; 70 71 __diag_push(); 72 __diag_ignore_all("-Woverride-init", "Allow field overrides in table"); 73 74 #define PLATFORM(x) \ 75 .platform = XE_##x, \ 76 .platform_name = #x 77 78 #define NOP(x) x 79 80 static const struct xe_graphics_desc graphics_xelp = { 81 .name = "Xe_LP", 82 .ver = 12, 83 .rel = 0, 84 85 .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0), 86 87 .dma_mask_size = 39, 88 .va_bits = 48, 89 .vm_max_level = 3, 90 }; 91 92 static const struct xe_graphics_desc graphics_xelpp = { 93 .name = "Xe_LP+", 94 .ver = 12, 95 .rel = 10, 96 97 .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0), 98 99 .dma_mask_size = 39, 100 .va_bits = 48, 101 .vm_max_level = 3, 102 }; 103 104 #define XE_HP_FEATURES \ 105 .has_range_tlb_invalidation = true, \ 106 .has_flat_ccs = true, \ 107 .dma_mask_size = 46, \ 108 .va_bits = 48, \ 109 .vm_max_level = 3 110 111 static const struct xe_graphics_desc graphics_xehpg = { 112 .name = "Xe_HPG", 113 .ver = 12, 114 .rel = 55, 115 116 .hw_engine_mask = 117 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 118 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 119 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 120 121 XE_HP_FEATURES, 122 .vram_flags = XE_VRAM_FLAGS_NEED64K, 123 }; 124 125 static const struct xe_graphics_desc graphics_xehpc = { 126 .name = "Xe_HPC", 127 .ver = 12, 128 .rel = 60, 129 130 .hw_engine_mask = 131 BIT(XE_HW_ENGINE_BCS0) | BIT(XE_HW_ENGINE_BCS1) | 132 BIT(XE_HW_ENGINE_BCS2) | BIT(XE_HW_ENGINE_BCS3) | 133 BIT(XE_HW_ENGINE_BCS4) | BIT(XE_HW_ENGINE_BCS5) | 134 BIT(XE_HW_ENGINE_BCS6) | BIT(XE_HW_ENGINE_BCS7) | 135 BIT(XE_HW_ENGINE_BCS8) | 136 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 137 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 138 139 XE_HP_FEATURES, 140 .dma_mask_size = 52, 141 .max_remote_tiles = 1, 142 .va_bits = 57, 143 .vm_max_level = 4, 144 .vram_flags = XE_VRAM_FLAGS_NEED64K, 145 146 .has_asid = 1, 147 .has_atomic_enable_pte_bit = 1, 148 .has_flat_ccs = 0, 149 .has_usm = 1, 150 }; 151 152 static const struct xe_graphics_desc graphics_xelpg = { 153 .name = "Xe_LPG", 154 .hw_engine_mask = 155 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 156 BIT(XE_HW_ENGINE_CCS0), 157 158 XE_HP_FEATURES, 159 .has_flat_ccs = 0, 160 }; 161 162 #define XE2_GFX_FEATURES \ 163 .dma_mask_size = 46, \ 164 .has_asid = 1, \ 165 .has_atomic_enable_pte_bit = 1, \ 166 .has_flat_ccs = 1, \ 167 .has_indirect_ring_state = 1, \ 168 .has_range_tlb_invalidation = 1, \ 169 .has_usm = 1, \ 170 .va_bits = 48, \ 171 .vm_max_level = 4, \ 172 .hw_engine_mask = \ 173 BIT(XE_HW_ENGINE_RCS0) | \ 174 BIT(XE_HW_ENGINE_BCS8) | BIT(XE_HW_ENGINE_BCS0) | \ 175 GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0) 176 177 static const struct xe_graphics_desc graphics_xe2 = { 178 .name = "Xe2_LPG / Xe2_HPG", 179 180 XE2_GFX_FEATURES, 181 }; 182 183 static const struct xe_media_desc media_xem = { 184 .name = "Xe_M", 185 .ver = 12, 186 .rel = 0, 187 188 .hw_engine_mask = 189 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 190 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0), 191 }; 192 193 static const struct xe_media_desc media_xehpm = { 194 .name = "Xe_HPM", 195 .ver = 12, 196 .rel = 55, 197 198 .hw_engine_mask = 199 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 200 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0), 201 }; 202 203 static const struct xe_media_desc media_xelpmp = { 204 .name = "Xe_LPM+", 205 .hw_engine_mask = 206 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 207 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) | 208 BIT(XE_HW_ENGINE_GSCCS0) 209 }; 210 211 static const struct xe_media_desc media_xe2 = { 212 .name = "Xe2_LPM / Xe2_HPM", 213 .hw_engine_mask = 214 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 215 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) | 216 BIT(XE_HW_ENGINE_GSCCS0) 217 }; 218 219 static const struct xe_device_desc tgl_desc = { 220 .graphics = &graphics_xelp, 221 .media = &media_xem, 222 PLATFORM(TIGERLAKE), 223 .has_display = true, 224 .has_llc = true, 225 .require_force_probe = true, 226 }; 227 228 static const struct xe_device_desc rkl_desc = { 229 .graphics = &graphics_xelp, 230 .media = &media_xem, 231 PLATFORM(ROCKETLAKE), 232 .has_display = true, 233 .has_llc = true, 234 .require_force_probe = true, 235 }; 236 237 static const u16 adls_rpls_ids[] = { XE_RPLS_IDS(NOP), 0 }; 238 239 static const struct xe_device_desc adl_s_desc = { 240 .graphics = &graphics_xelp, 241 .media = &media_xem, 242 PLATFORM(ALDERLAKE_S), 243 .has_display = true, 244 .has_llc = true, 245 .require_force_probe = true, 246 .subplatforms = (const struct xe_subplatform_desc[]) { 247 { XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids }, 248 {}, 249 }, 250 }; 251 252 static const u16 adlp_rplu_ids[] = { XE_RPLU_IDS(NOP), 0 }; 253 254 static const struct xe_device_desc adl_p_desc = { 255 .graphics = &graphics_xelp, 256 .media = &media_xem, 257 PLATFORM(ALDERLAKE_P), 258 .has_display = true, 259 .has_llc = true, 260 .require_force_probe = true, 261 .subplatforms = (const struct xe_subplatform_desc[]) { 262 { XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids }, 263 {}, 264 }, 265 }; 266 267 static const struct xe_device_desc adl_n_desc = { 268 .graphics = &graphics_xelp, 269 .media = &media_xem, 270 PLATFORM(ALDERLAKE_N), 271 .has_display = true, 272 .has_llc = true, 273 .require_force_probe = true, 274 }; 275 276 #define DGFX_FEATURES \ 277 .is_dgfx = 1 278 279 static const struct xe_device_desc dg1_desc = { 280 .graphics = &graphics_xelpp, 281 .media = &media_xem, 282 DGFX_FEATURES, 283 PLATFORM(DG1), 284 .has_display = true, 285 .has_heci_gscfi = 1, 286 .require_force_probe = true, 287 }; 288 289 static const u16 dg2_g10_ids[] = { XE_DG2_G10_IDS(NOP), XE_ATS_M150_IDS(NOP), 0 }; 290 static const u16 dg2_g11_ids[] = { XE_DG2_G11_IDS(NOP), XE_ATS_M75_IDS(NOP), 0 }; 291 static const u16 dg2_g12_ids[] = { XE_DG2_G12_IDS(NOP), 0 }; 292 293 #define DG2_FEATURES \ 294 DGFX_FEATURES, \ 295 PLATFORM(DG2), \ 296 .has_heci_gscfi = 1, \ 297 .subplatforms = (const struct xe_subplatform_desc[]) { \ 298 { XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \ 299 { XE_SUBPLATFORM_DG2_G11, "G11", dg2_g11_ids }, \ 300 { XE_SUBPLATFORM_DG2_G12, "G12", dg2_g12_ids }, \ 301 { } \ 302 } 303 304 static const struct xe_device_desc ats_m_desc = { 305 .graphics = &graphics_xehpg, 306 .media = &media_xehpm, 307 .require_force_probe = true, 308 309 DG2_FEATURES, 310 .has_display = false, 311 }; 312 313 static const struct xe_device_desc dg2_desc = { 314 .graphics = &graphics_xehpg, 315 .media = &media_xehpm, 316 .require_force_probe = true, 317 318 DG2_FEATURES, 319 .has_display = true, 320 }; 321 322 static const __maybe_unused struct xe_device_desc pvc_desc = { 323 .graphics = &graphics_xehpc, 324 DGFX_FEATURES, 325 PLATFORM(PVC), 326 .has_display = false, 327 .has_heci_gscfi = 1, 328 .require_force_probe = true, 329 }; 330 331 static const struct xe_device_desc mtl_desc = { 332 /* .graphics and .media determined via GMD_ID */ 333 .require_force_probe = true, 334 PLATFORM(METEORLAKE), 335 .has_display = true, 336 }; 337 338 static const struct xe_device_desc lnl_desc = { 339 PLATFORM(LUNARLAKE), 340 .has_display = true, 341 }; 342 343 static const struct xe_device_desc bmg_desc = { 344 DGFX_FEATURES, 345 PLATFORM(BATTLEMAGE), 346 .has_display = true, 347 .has_heci_cscfi = 1, 348 }; 349 350 #undef PLATFORM 351 __diag_pop(); 352 353 /* Map of GMD_ID values to graphics IP */ 354 static const struct gmdid_map graphics_ip_map[] = { 355 { 1270, &graphics_xelpg }, 356 { 1271, &graphics_xelpg }, 357 { 1274, &graphics_xelpg }, /* Xe_LPG+ */ 358 { 2001, &graphics_xe2 }, 359 { 2004, &graphics_xe2 }, 360 }; 361 362 /* Map of GMD_ID values to media IP */ 363 static const struct gmdid_map media_ip_map[] = { 364 { 1300, &media_xelpmp }, 365 { 1301, &media_xe2 }, 366 { 2000, &media_xe2 }, 367 }; 368 369 #define INTEL_VGA_DEVICE(id, info) { \ 370 PCI_DEVICE(PCI_VENDOR_ID_INTEL, id), \ 371 PCI_BASE_CLASS_DISPLAY << 16, 0xff << 16, \ 372 (unsigned long) info } 373 374 /* 375 * Make sure any device matches here are from most specific to most 376 * general. For example, since the Quanta match is based on the subsystem 377 * and subvendor IDs, we need it to come before the more general IVB 378 * PCI ID matches, otherwise we'll use the wrong info struct above. 379 */ 380 static const struct pci_device_id pciidlist[] = { 381 XE_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc), 382 XE_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc), 383 XE_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 384 XE_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 385 XE_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc), 386 XE_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 387 XE_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 388 XE_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc), 389 XE_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc), 390 XE_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc), 391 XE_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 392 XE_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc), 393 XE_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc), 394 { } 395 }; 396 MODULE_DEVICE_TABLE(pci, pciidlist); 397 398 #undef INTEL_VGA_DEVICE 399 400 /* is device_id present in comma separated list of ids */ 401 static bool device_id_in_list(u16 device_id, const char *devices, bool negative) 402 { 403 char *s, *p, *tok; 404 bool ret; 405 406 if (!devices || !*devices) 407 return false; 408 409 /* match everything */ 410 if (negative && strcmp(devices, "!*") == 0) 411 return true; 412 if (!negative && strcmp(devices, "*") == 0) 413 return true; 414 415 s = kstrdup(devices, GFP_KERNEL); 416 if (!s) 417 return false; 418 419 for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) { 420 u16 val; 421 422 if (negative && tok[0] == '!') 423 tok++; 424 else if ((negative && tok[0] != '!') || 425 (!negative && tok[0] == '!')) 426 continue; 427 428 if (kstrtou16(tok, 16, &val) == 0 && val == device_id) { 429 ret = true; 430 break; 431 } 432 } 433 434 kfree(s); 435 436 return ret; 437 } 438 439 static bool id_forced(u16 device_id) 440 { 441 return device_id_in_list(device_id, xe_modparam.force_probe, false); 442 } 443 444 static bool id_blocked(u16 device_id) 445 { 446 return device_id_in_list(device_id, xe_modparam.force_probe, true); 447 } 448 449 static const struct xe_subplatform_desc * 450 find_subplatform(const struct xe_device *xe, const struct xe_device_desc *desc) 451 { 452 const struct xe_subplatform_desc *sp; 453 const u16 *id; 454 455 for (sp = desc->subplatforms; sp && sp->subplatform; sp++) 456 for (id = sp->pciidlist; *id; id++) 457 if (*id == xe->info.devid) 458 return sp; 459 460 return NULL; 461 } 462 463 enum xe_gmdid_type { 464 GMDID_GRAPHICS, 465 GMDID_MEDIA 466 }; 467 468 static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid) 469 { 470 struct xe_gt *gt = xe_root_mmio_gt(xe); 471 struct xe_reg gmdid_reg = GMD_ID; 472 u32 val; 473 474 KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid); 475 476 if (IS_SRIOV_VF(xe)) { 477 /* 478 * To get the value of the GMDID register, VFs must obtain it 479 * from the GuC using MMIO communication. 480 * 481 * Note that at this point the xe_gt is not fully uninitialized 482 * and only basic access to MMIO registers is possible. To use 483 * our existing GuC communication functions we must perform at 484 * least basic xe_gt and xe_guc initialization. 485 * 486 * Since to obtain the value of GMDID_MEDIA we need to use the 487 * media GuC, temporarly tweak the gt type. 488 */ 489 xe_gt_assert(gt, gt->info.type == XE_GT_TYPE_UNINITIALIZED); 490 491 if (type == GMDID_MEDIA) { 492 gt->info.id = 1; 493 gt->info.type = XE_GT_TYPE_MEDIA; 494 } else { 495 gt->info.id = 0; 496 gt->info.type = XE_GT_TYPE_MAIN; 497 } 498 499 xe_guc_comm_init_early(>->uc.guc); 500 501 /* Don't bother with GMDID if failed to negotiate the GuC ABI */ 502 val = xe_gt_sriov_vf_bootstrap(gt) ? 0 : xe_gt_sriov_vf_gmdid(gt); 503 504 /* 505 * Only undo xe_gt.info here, the remaining changes made above 506 * will be overwritten as part of the regular initialization. 507 */ 508 gt->info.id = 0; 509 gt->info.type = XE_GT_TYPE_UNINITIALIZED; 510 } else { 511 /* 512 * We need to apply the GSI offset explicitly here as at this 513 * point the xe_gt is not fully uninitialized and only basic 514 * access to MMIO registers is possible. 515 */ 516 if (type == GMDID_MEDIA) 517 gmdid_reg.addr += MEDIA_GT_GSI_OFFSET; 518 519 val = xe_mmio_read32(gt, gmdid_reg); 520 } 521 522 *ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val); 523 *revid = REG_FIELD_GET(GMD_ID_REVID, val); 524 } 525 526 /* 527 * Pre-GMD_ID platform: device descriptor already points to the appropriate 528 * graphics descriptor. Simply forward the description and calculate the version 529 * appropriately. "graphics" should be present in all such platforms, while 530 * media is optional. 531 */ 532 static void handle_pre_gmdid(struct xe_device *xe, 533 const struct xe_graphics_desc *graphics, 534 const struct xe_media_desc *media) 535 { 536 xe->info.graphics_verx100 = graphics->ver * 100 + graphics->rel; 537 538 if (media) 539 xe->info.media_verx100 = media->ver * 100 + media->rel; 540 541 } 542 543 /* 544 * GMD_ID platform: read IP version from hardware and select graphics descriptor 545 * based on the result. 546 */ 547 static void handle_gmdid(struct xe_device *xe, 548 const struct xe_graphics_desc **graphics, 549 const struct xe_media_desc **media, 550 u32 *graphics_revid, 551 u32 *media_revid) 552 { 553 u32 ver; 554 555 read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid); 556 557 for (int i = 0; i < ARRAY_SIZE(graphics_ip_map); i++) { 558 if (ver == graphics_ip_map[i].ver) { 559 xe->info.graphics_verx100 = ver; 560 *graphics = graphics_ip_map[i].ip; 561 562 break; 563 } 564 } 565 566 if (!xe->info.graphics_verx100) { 567 drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n", 568 ver / 100, ver % 100); 569 } 570 571 read_gmdid(xe, GMDID_MEDIA, &ver, media_revid); 572 573 /* Media may legitimately be fused off / not present */ 574 if (ver == 0) 575 return; 576 577 for (int i = 0; i < ARRAY_SIZE(media_ip_map); i++) { 578 if (ver == media_ip_map[i].ver) { 579 xe->info.media_verx100 = ver; 580 *media = media_ip_map[i].ip; 581 582 break; 583 } 584 } 585 586 if (!xe->info.media_verx100) { 587 drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n", 588 ver / 100, ver % 100); 589 } 590 } 591 592 /* 593 * Initialize device info content that only depends on static driver_data 594 * passed to the driver at probe time from PCI ID table. 595 */ 596 static int xe_info_init_early(struct xe_device *xe, 597 const struct xe_device_desc *desc, 598 const struct xe_subplatform_desc *subplatform_desc) 599 { 600 int err; 601 602 xe->info.platform_name = desc->platform_name; 603 xe->info.platform = desc->platform; 604 xe->info.subplatform = subplatform_desc ? 605 subplatform_desc->subplatform : XE_SUBPLATFORM_NONE; 606 607 xe->info.is_dgfx = desc->is_dgfx; 608 xe->info.has_heci_gscfi = desc->has_heci_gscfi; 609 xe->info.has_heci_cscfi = desc->has_heci_cscfi; 610 xe->info.has_llc = desc->has_llc; 611 xe->info.has_mmio_ext = desc->has_mmio_ext; 612 xe->info.has_sriov = desc->has_sriov; 613 xe->info.skip_guc_pc = desc->skip_guc_pc; 614 xe->info.skip_mtcfg = desc->skip_mtcfg; 615 xe->info.skip_pcode = desc->skip_pcode; 616 617 xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) && 618 xe_modparam.probe_display && 619 desc->has_display; 620 621 err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0); 622 if (err) 623 return err; 624 625 return 0; 626 } 627 628 /* 629 * Initialize device info content that does require knowledge about 630 * graphics / media IP version. 631 * Make sure that GT / tile structures allocated by the driver match the data 632 * present in device info. 633 */ 634 static int xe_info_init(struct xe_device *xe, 635 const struct xe_graphics_desc *graphics_desc, 636 const struct xe_media_desc *media_desc) 637 { 638 u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0; 639 struct xe_tile *tile; 640 struct xe_gt *gt; 641 u8 id; 642 643 /* 644 * If this platform supports GMD_ID, we'll detect the proper IP 645 * descriptor to use from hardware registers. desc->graphics will only 646 * ever be set at this point for platforms before GMD_ID. In that case 647 * the IP descriptions and versions are simply derived from that. 648 */ 649 if (graphics_desc) { 650 handle_pre_gmdid(xe, graphics_desc, media_desc); 651 xe->info.step = xe_step_pre_gmdid_get(xe); 652 } else { 653 xe_assert(xe, !media_desc); 654 handle_gmdid(xe, &graphics_desc, &media_desc, 655 &graphics_gmdid_revid, &media_gmdid_revid); 656 xe->info.step = xe_step_gmdid_get(xe, 657 graphics_gmdid_revid, 658 media_gmdid_revid); 659 } 660 661 /* 662 * If we couldn't detect the graphics IP, that's considered a fatal 663 * error and we should abort driver load. Failing to detect media 664 * IP is non-fatal; we'll just proceed without enabling media support. 665 */ 666 if (!graphics_desc) 667 return -ENODEV; 668 669 xe->info.graphics_name = graphics_desc->name; 670 xe->info.media_name = media_desc ? media_desc->name : "none"; 671 xe->info.tile_mmio_ext_size = graphics_desc->tile_mmio_ext_size; 672 673 xe->info.dma_mask_size = graphics_desc->dma_mask_size; 674 xe->info.vram_flags = graphics_desc->vram_flags; 675 xe->info.va_bits = graphics_desc->va_bits; 676 xe->info.vm_max_level = graphics_desc->vm_max_level; 677 xe->info.has_asid = graphics_desc->has_asid; 678 xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit; 679 if (xe->info.platform != XE_PVC) 680 xe->info.has_device_atomics_on_smem = 1; 681 xe->info.has_flat_ccs = graphics_desc->has_flat_ccs; 682 xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation; 683 xe->info.has_usm = graphics_desc->has_usm; 684 685 /* 686 * All platforms have at least one primary GT. Any platform with media 687 * version 13 or higher has an additional dedicated media GT. And 688 * depending on the graphics IP there may be additional "remote tiles." 689 * All of these together determine the overall GT count. 690 * 691 * FIXME: 'tile_count' here is misnamed since the rest of the driver 692 * treats it as the number of GTs rather than just the number of tiles. 693 */ 694 xe->info.tile_count = 1 + graphics_desc->max_remote_tiles; 695 696 for_each_remote_tile(tile, xe, id) { 697 int err; 698 699 err = xe_tile_init_early(tile, xe, id); 700 if (err) 701 return err; 702 } 703 704 for_each_tile(tile, xe, id) { 705 gt = tile->primary_gt; 706 gt->info.id = xe->info.gt_count++; 707 gt->info.type = XE_GT_TYPE_MAIN; 708 gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state; 709 gt->info.engine_mask = graphics_desc->hw_engine_mask; 710 if (MEDIA_VER(xe) < 13 && media_desc) 711 gt->info.engine_mask |= media_desc->hw_engine_mask; 712 713 if (MEDIA_VER(xe) < 13 || !media_desc) 714 continue; 715 716 /* 717 * Allocate and setup media GT for platforms with standalone 718 * media. 719 */ 720 tile->media_gt = xe_gt_alloc(tile); 721 if (IS_ERR(tile->media_gt)) 722 return PTR_ERR(tile->media_gt); 723 724 gt = tile->media_gt; 725 gt->info.type = XE_GT_TYPE_MEDIA; 726 gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state; 727 gt->info.engine_mask = media_desc->hw_engine_mask; 728 gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET; 729 gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH; 730 731 /* 732 * FIXME: At the moment multi-tile and standalone media are 733 * mutually exclusive on current platforms. We'll need to 734 * come up with a better way to number GTs if we ever wind 735 * up with platforms that support both together. 736 */ 737 drm_WARN_ON(&xe->drm, id != 0); 738 gt->info.id = xe->info.gt_count++; 739 } 740 741 return 0; 742 } 743 744 static void xe_pci_remove(struct pci_dev *pdev) 745 { 746 struct xe_device *xe; 747 748 xe = pdev_to_xe_device(pdev); 749 if (!xe) /* driver load aborted, nothing to cleanup */ 750 return; 751 752 if (IS_SRIOV_PF(xe)) 753 xe_pci_sriov_configure(pdev, 0); 754 755 xe_device_remove(xe); 756 xe_pm_runtime_fini(xe); 757 pci_set_drvdata(pdev, NULL); 758 } 759 760 static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 761 { 762 const struct xe_device_desc *desc = (const void *)ent->driver_data; 763 const struct xe_subplatform_desc *subplatform_desc; 764 struct xe_device *xe; 765 int err; 766 767 if (desc->require_force_probe && !id_forced(pdev->device)) { 768 dev_info(&pdev->dev, 769 "Your graphics device %04x is not officially supported\n" 770 "by xe driver in this kernel version. To force Xe probe,\n" 771 "use xe.force_probe='%04x' and i915.force_probe='!%04x'\n" 772 "module parameters or CONFIG_DRM_XE_FORCE_PROBE='%04x' and\n" 773 "CONFIG_DRM_I915_FORCE_PROBE='!%04x' configuration options.\n", 774 pdev->device, pdev->device, pdev->device, 775 pdev->device, pdev->device); 776 return -ENODEV; 777 } 778 779 if (id_blocked(pdev->device)) { 780 dev_info(&pdev->dev, "Probe blocked for device [%04x:%04x].\n", 781 pdev->vendor, pdev->device); 782 return -ENODEV; 783 } 784 785 if (xe_display_driver_probe_defer(pdev)) 786 return -EPROBE_DEFER; 787 788 err = pcim_enable_device(pdev); 789 if (err) 790 return err; 791 792 xe = xe_device_create(pdev, ent); 793 if (IS_ERR(xe)) 794 return PTR_ERR(xe); 795 796 pci_set_drvdata(pdev, &xe->drm); 797 798 xe_pm_assert_unbounded_bridge(xe); 799 subplatform_desc = find_subplatform(xe, desc); 800 801 pci_set_master(pdev); 802 803 err = xe_info_init_early(xe, desc, subplatform_desc); 804 if (err) 805 return err; 806 807 err = xe_device_probe_early(xe); 808 if (err) 809 return err; 810 811 err = xe_info_init(xe, desc->graphics, desc->media); 812 if (err) 813 return err; 814 815 err = xe_display_probe(xe); 816 if (err) 817 return err; 818 819 drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d cscfi:%d", 820 desc->platform_name, 821 subplatform_desc ? subplatform_desc->name : "", 822 xe->info.devid, xe->info.revid, 823 xe->info.is_dgfx, 824 xe->info.graphics_name, 825 xe->info.graphics_verx100 / 100, 826 xe->info.graphics_verx100 % 100, 827 xe->info.media_name, 828 xe->info.media_verx100 / 100, 829 xe->info.media_verx100 % 100, 830 str_yes_no(xe->info.probe_display), 831 xe->info.dma_mask_size, xe->info.tile_count, 832 xe->info.has_heci_gscfi, xe->info.has_heci_cscfi); 833 834 drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, B:%s)\n", 835 xe_step_name(xe->info.step.graphics), 836 xe_step_name(xe->info.step.media), 837 xe_step_name(xe->info.step.basedie)); 838 839 drm_dbg(&xe->drm, "SR-IOV support: %s (mode: %s)\n", 840 str_yes_no(xe_device_has_sriov(xe)), 841 xe_sriov_mode_to_string(xe_device_sriov_mode(xe))); 842 843 err = xe_pm_init_early(xe); 844 if (err) 845 return err; 846 847 err = xe_device_probe(xe); 848 if (err) 849 return err; 850 851 err = xe_pm_init(xe); 852 if (err) 853 goto err_driver_cleanup; 854 855 drm_dbg(&xe->drm, "d3cold: capable=%s\n", 856 str_yes_no(xe->d3cold.capable)); 857 858 return 0; 859 860 err_driver_cleanup: 861 xe_pci_remove(pdev); 862 return err; 863 } 864 865 static void xe_pci_shutdown(struct pci_dev *pdev) 866 { 867 xe_device_shutdown(pdev_to_xe_device(pdev)); 868 } 869 870 #ifdef CONFIG_PM_SLEEP 871 static void d3cold_toggle(struct pci_dev *pdev, enum toggle_d3cold toggle) 872 { 873 struct xe_device *xe = pdev_to_xe_device(pdev); 874 struct pci_dev *root_pdev; 875 876 if (!xe->d3cold.capable) 877 return; 878 879 root_pdev = pcie_find_root_port(pdev); 880 if (!root_pdev) 881 return; 882 883 switch (toggle) { 884 case D3COLD_DISABLE: 885 pci_d3cold_disable(root_pdev); 886 break; 887 case D3COLD_ENABLE: 888 pci_d3cold_enable(root_pdev); 889 break; 890 } 891 } 892 893 static int xe_pci_suspend(struct device *dev) 894 { 895 struct pci_dev *pdev = to_pci_dev(dev); 896 int err; 897 898 err = xe_pm_suspend(pdev_to_xe_device(pdev)); 899 if (err) 900 return err; 901 902 /* 903 * Enabling D3Cold is needed for S2Idle/S0ix. 904 * It is save to allow here since xe_pm_suspend has evicted 905 * the local memory and the direct complete optimization is disabled. 906 */ 907 d3cold_toggle(pdev, D3COLD_ENABLE); 908 909 pci_save_state(pdev); 910 pci_disable_device(pdev); 911 912 return 0; 913 } 914 915 static int xe_pci_resume(struct device *dev) 916 { 917 struct pci_dev *pdev = to_pci_dev(dev); 918 int err; 919 920 /* Give back the D3Cold decision to the runtime P M*/ 921 d3cold_toggle(pdev, D3COLD_DISABLE); 922 923 err = pci_set_power_state(pdev, PCI_D0); 924 if (err) 925 return err; 926 927 pci_restore_state(pdev); 928 929 err = pci_enable_device(pdev); 930 if (err) 931 return err; 932 933 pci_set_master(pdev); 934 935 err = xe_pm_resume(pdev_to_xe_device(pdev)); 936 if (err) 937 return err; 938 939 return 0; 940 } 941 942 static int xe_pci_runtime_suspend(struct device *dev) 943 { 944 struct pci_dev *pdev = to_pci_dev(dev); 945 struct xe_device *xe = pdev_to_xe_device(pdev); 946 int err; 947 948 err = xe_pm_runtime_suspend(xe); 949 if (err) 950 return err; 951 952 pci_save_state(pdev); 953 954 if (xe->d3cold.allowed) { 955 d3cold_toggle(pdev, D3COLD_ENABLE); 956 pci_disable_device(pdev); 957 pci_ignore_hotplug(pdev); 958 pci_set_power_state(pdev, PCI_D3cold); 959 } else { 960 d3cold_toggle(pdev, D3COLD_DISABLE); 961 pci_set_power_state(pdev, PCI_D3hot); 962 } 963 964 return 0; 965 } 966 967 static int xe_pci_runtime_resume(struct device *dev) 968 { 969 struct pci_dev *pdev = to_pci_dev(dev); 970 struct xe_device *xe = pdev_to_xe_device(pdev); 971 int err; 972 973 err = pci_set_power_state(pdev, PCI_D0); 974 if (err) 975 return err; 976 977 pci_restore_state(pdev); 978 979 if (xe->d3cold.allowed) { 980 err = pci_enable_device(pdev); 981 if (err) 982 return err; 983 984 pci_set_master(pdev); 985 } 986 987 return xe_pm_runtime_resume(xe); 988 } 989 990 static int xe_pci_runtime_idle(struct device *dev) 991 { 992 struct pci_dev *pdev = to_pci_dev(dev); 993 struct xe_device *xe = pdev_to_xe_device(pdev); 994 995 xe_pm_d3cold_allowed_toggle(xe); 996 997 return 0; 998 } 999 1000 static const struct dev_pm_ops xe_pm_ops = { 1001 SET_SYSTEM_SLEEP_PM_OPS(xe_pci_suspend, xe_pci_resume) 1002 SET_RUNTIME_PM_OPS(xe_pci_runtime_suspend, xe_pci_runtime_resume, xe_pci_runtime_idle) 1003 }; 1004 #endif 1005 1006 static struct pci_driver xe_pci_driver = { 1007 .name = DRIVER_NAME, 1008 .id_table = pciidlist, 1009 .probe = xe_pci_probe, 1010 .remove = xe_pci_remove, 1011 .shutdown = xe_pci_shutdown, 1012 .sriov_configure = xe_pci_sriov_configure, 1013 #ifdef CONFIG_PM_SLEEP 1014 .driver.pm = &xe_pm_ops, 1015 #endif 1016 }; 1017 1018 int xe_register_pci_driver(void) 1019 { 1020 return pci_register_driver(&xe_pci_driver); 1021 } 1022 1023 void xe_unregister_pci_driver(void) 1024 { 1025 pci_unregister_driver(&xe_pci_driver); 1026 } 1027 1028 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 1029 #include "tests/xe_pci.c" 1030 #endif 1031