1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_pci.h" 7 8 #include <kunit/static_stub.h> 9 #include <linux/device/driver.h> 10 #include <linux/module.h> 11 #include <linux/pci.h> 12 #include <linux/pm_runtime.h> 13 14 #include <drm/drm_color_mgmt.h> 15 #include <drm/drm_drv.h> 16 #include <drm/xe_pciids.h> 17 18 #include "display/xe_display.h" 19 #include "regs/xe_gt_regs.h" 20 #include "xe_device.h" 21 #include "xe_drv.h" 22 #include "xe_gt.h" 23 #include "xe_macros.h" 24 #include "xe_mmio.h" 25 #include "xe_module.h" 26 #include "xe_pci_types.h" 27 #include "xe_pm.h" 28 #include "xe_sriov.h" 29 #include "xe_step.h" 30 #include "xe_tile.h" 31 32 enum toggle_d3cold { 33 D3COLD_DISABLE, 34 D3COLD_ENABLE, 35 }; 36 37 struct xe_subplatform_desc { 38 enum xe_subplatform subplatform; 39 const char *name; 40 const u16 *pciidlist; 41 }; 42 43 struct xe_gt_desc { 44 enum xe_gt_type type; 45 u32 mmio_adj_limit; 46 u32 mmio_adj_offset; 47 }; 48 49 struct xe_device_desc { 50 /* Should only ever be set for platforms without GMD_ID */ 51 const struct xe_graphics_desc *graphics; 52 /* Should only ever be set for platforms without GMD_ID */ 53 const struct xe_media_desc *media; 54 55 const char *platform_name; 56 const struct xe_subplatform_desc *subplatforms; 57 58 enum xe_platform platform; 59 60 u8 require_force_probe:1; 61 u8 is_dgfx:1; 62 63 u8 has_display:1; 64 u8 has_heci_gscfi:1; 65 u8 has_llc:1; 66 u8 has_mmio_ext:1; 67 u8 has_sriov:1; 68 u8 skip_guc_pc:1; 69 u8 skip_mtcfg:1; 70 u8 skip_pcode:1; 71 }; 72 73 __diag_push(); 74 __diag_ignore_all("-Woverride-init", "Allow field overrides in table"); 75 76 #define PLATFORM(x) \ 77 .platform = (x), \ 78 .platform_name = #x 79 80 #define NOP(x) x 81 82 static const struct xe_graphics_desc graphics_xelp = { 83 .name = "Xe_LP", 84 .ver = 12, 85 .rel = 0, 86 87 .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0), 88 89 .dma_mask_size = 39, 90 .va_bits = 48, 91 .vm_max_level = 3, 92 }; 93 94 static const struct xe_graphics_desc graphics_xelpp = { 95 .name = "Xe_LP+", 96 .ver = 12, 97 .rel = 10, 98 99 .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0), 100 101 .dma_mask_size = 39, 102 .va_bits = 48, 103 .vm_max_level = 3, 104 }; 105 106 #define XE_HP_FEATURES \ 107 .has_range_tlb_invalidation = true, \ 108 .has_flat_ccs = true, \ 109 .dma_mask_size = 46, \ 110 .va_bits = 48, \ 111 .vm_max_level = 3 112 113 static const struct xe_graphics_desc graphics_xehpg = { 114 .name = "Xe_HPG", 115 .ver = 12, 116 .rel = 55, 117 118 .hw_engine_mask = 119 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 120 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 121 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 122 123 XE_HP_FEATURES, 124 .vram_flags = XE_VRAM_FLAGS_NEED64K, 125 }; 126 127 static const struct xe_graphics_desc graphics_xehpc = { 128 .name = "Xe_HPC", 129 .ver = 12, 130 .rel = 60, 131 132 .hw_engine_mask = 133 BIT(XE_HW_ENGINE_BCS0) | BIT(XE_HW_ENGINE_BCS1) | 134 BIT(XE_HW_ENGINE_BCS2) | BIT(XE_HW_ENGINE_BCS3) | 135 BIT(XE_HW_ENGINE_BCS4) | BIT(XE_HW_ENGINE_BCS5) | 136 BIT(XE_HW_ENGINE_BCS6) | BIT(XE_HW_ENGINE_BCS7) | 137 BIT(XE_HW_ENGINE_BCS8) | 138 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 139 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 140 141 XE_HP_FEATURES, 142 .dma_mask_size = 52, 143 .max_remote_tiles = 1, 144 .va_bits = 57, 145 .vm_max_level = 4, 146 .vram_flags = XE_VRAM_FLAGS_NEED64K, 147 148 .has_asid = 1, 149 .has_flat_ccs = 0, 150 .has_usm = 1, 151 }; 152 153 static const struct xe_graphics_desc graphics_xelpg = { 154 .name = "Xe_LPG", 155 .hw_engine_mask = 156 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 157 BIT(XE_HW_ENGINE_CCS0), 158 159 XE_HP_FEATURES, 160 .has_flat_ccs = 0, 161 }; 162 163 #define XE2_GFX_FEATURES \ 164 .dma_mask_size = 46, \ 165 .has_asid = 1, \ 166 .has_flat_ccs = 1, \ 167 .has_range_tlb_invalidation = 1, \ 168 .has_usm = 1, \ 169 .va_bits = 48, \ 170 .vm_max_level = 4, \ 171 .hw_engine_mask = \ 172 BIT(XE_HW_ENGINE_RCS0) | \ 173 BIT(XE_HW_ENGINE_BCS8) | BIT(XE_HW_ENGINE_BCS0) | \ 174 GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0) 175 176 static const struct xe_graphics_desc graphics_xe2 = { 177 .name = "Xe2_LPG / Xe2_HPG", 178 179 XE2_GFX_FEATURES, 180 }; 181 182 static const struct xe_media_desc media_xem = { 183 .name = "Xe_M", 184 .ver = 12, 185 .rel = 0, 186 187 .hw_engine_mask = 188 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 189 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0), 190 }; 191 192 static const struct xe_media_desc media_xehpm = { 193 .name = "Xe_HPM", 194 .ver = 12, 195 .rel = 55, 196 197 .hw_engine_mask = 198 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 199 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0), 200 }; 201 202 static const struct xe_media_desc media_xelpmp = { 203 .name = "Xe_LPM+", 204 .hw_engine_mask = 205 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 206 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) | 207 BIT(XE_HW_ENGINE_GSCCS0) 208 }; 209 210 static const struct xe_media_desc media_xe2 = { 211 .name = "Xe2_LPM / Xe2_HPM", 212 .hw_engine_mask = 213 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 214 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0), /* TODO: GSC0 */ 215 }; 216 217 static const struct xe_device_desc tgl_desc = { 218 .graphics = &graphics_xelp, 219 .media = &media_xem, 220 PLATFORM(XE_TIGERLAKE), 221 .has_display = true, 222 .has_llc = true, 223 .require_force_probe = true, 224 }; 225 226 static const struct xe_device_desc rkl_desc = { 227 .graphics = &graphics_xelp, 228 .media = &media_xem, 229 PLATFORM(XE_ROCKETLAKE), 230 .has_display = true, 231 .has_llc = true, 232 .require_force_probe = true, 233 }; 234 235 static const u16 adls_rpls_ids[] = { XE_RPLS_IDS(NOP), 0 }; 236 237 static const struct xe_device_desc adl_s_desc = { 238 .graphics = &graphics_xelp, 239 .media = &media_xem, 240 PLATFORM(XE_ALDERLAKE_S), 241 .has_display = true, 242 .has_llc = true, 243 .require_force_probe = true, 244 .subplatforms = (const struct xe_subplatform_desc[]) { 245 { XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids }, 246 {}, 247 }, 248 }; 249 250 static const u16 adlp_rplu_ids[] = { XE_RPLU_IDS(NOP), 0 }; 251 252 static const struct xe_device_desc adl_p_desc = { 253 .graphics = &graphics_xelp, 254 .media = &media_xem, 255 PLATFORM(XE_ALDERLAKE_P), 256 .has_display = true, 257 .has_llc = true, 258 .require_force_probe = true, 259 .subplatforms = (const struct xe_subplatform_desc[]) { 260 { XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids }, 261 {}, 262 }, 263 }; 264 265 static const struct xe_device_desc adl_n_desc = { 266 .graphics = &graphics_xelp, 267 .media = &media_xem, 268 PLATFORM(XE_ALDERLAKE_N), 269 .has_display = true, 270 .has_llc = true, 271 .require_force_probe = true, 272 }; 273 274 #define DGFX_FEATURES \ 275 .is_dgfx = 1 276 277 static const struct xe_device_desc dg1_desc = { 278 .graphics = &graphics_xelpp, 279 .media = &media_xem, 280 DGFX_FEATURES, 281 PLATFORM(XE_DG1), 282 .has_display = true, 283 .has_heci_gscfi = 1, 284 .require_force_probe = true, 285 }; 286 287 static const u16 dg2_g10_ids[] = { XE_DG2_G10_IDS(NOP), XE_ATS_M150_IDS(NOP), 0 }; 288 static const u16 dg2_g11_ids[] = { XE_DG2_G11_IDS(NOP), XE_ATS_M75_IDS(NOP), 0 }; 289 static const u16 dg2_g12_ids[] = { XE_DG2_G12_IDS(NOP), 0 }; 290 291 #define DG2_FEATURES \ 292 DGFX_FEATURES, \ 293 PLATFORM(XE_DG2), \ 294 .has_heci_gscfi = 1, \ 295 .subplatforms = (const struct xe_subplatform_desc[]) { \ 296 { XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \ 297 { XE_SUBPLATFORM_DG2_G11, "G11", dg2_g11_ids }, \ 298 { XE_SUBPLATFORM_DG2_G12, "G12", dg2_g12_ids }, \ 299 { } \ 300 } 301 302 static const struct xe_device_desc ats_m_desc = { 303 .graphics = &graphics_xehpg, 304 .media = &media_xehpm, 305 .require_force_probe = true, 306 307 DG2_FEATURES, 308 .has_display = false, 309 }; 310 311 static const struct xe_device_desc dg2_desc = { 312 .graphics = &graphics_xehpg, 313 .media = &media_xehpm, 314 .require_force_probe = true, 315 316 DG2_FEATURES, 317 .has_display = true, 318 }; 319 320 static const __maybe_unused struct xe_device_desc pvc_desc = { 321 .graphics = &graphics_xehpc, 322 DGFX_FEATURES, 323 PLATFORM(XE_PVC), 324 .has_display = false, 325 .has_heci_gscfi = 1, 326 .require_force_probe = true, 327 }; 328 329 static const struct xe_device_desc mtl_desc = { 330 /* .graphics and .media determined via GMD_ID */ 331 .require_force_probe = true, 332 PLATFORM(XE_METEORLAKE), 333 .has_display = true, 334 }; 335 336 static const struct xe_device_desc lnl_desc = { 337 PLATFORM(XE_LUNARLAKE), 338 .has_display = true, 339 .require_force_probe = true, 340 }; 341 342 static const struct xe_device_desc bmg_desc __maybe_unused = { 343 DGFX_FEATURES, 344 PLATFORM(XE_BATTLEMAGE), 345 .require_force_probe = true, 346 }; 347 348 #undef PLATFORM 349 __diag_pop(); 350 351 /* Map of GMD_ID values to graphics IP */ 352 static const struct gmdid_map graphics_ip_map[] = { 353 { 1270, &graphics_xelpg }, 354 { 1271, &graphics_xelpg }, 355 { 1274, &graphics_xelpg }, /* Xe_LPG+ */ 356 { 2001, &graphics_xe2 }, 357 { 2004, &graphics_xe2 }, 358 }; 359 360 /* Map of GMD_ID values to media IP */ 361 static const struct gmdid_map media_ip_map[] = { 362 { 1300, &media_xelpmp }, 363 { 1301, &media_xe2 }, 364 { 2000, &media_xe2 }, 365 }; 366 367 #define INTEL_VGA_DEVICE(id, info) { \ 368 PCI_DEVICE(PCI_VENDOR_ID_INTEL, id), \ 369 PCI_BASE_CLASS_DISPLAY << 16, 0xff << 16, \ 370 (unsigned long) info } 371 372 /* 373 * Make sure any device matches here are from most specific to most 374 * general. For example, since the Quanta match is based on the subsystem 375 * and subvendor IDs, we need it to come before the more general IVB 376 * PCI ID matches, otherwise we'll use the wrong info struct above. 377 */ 378 static const struct pci_device_id pciidlist[] = { 379 XE_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc), 380 XE_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc), 381 XE_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 382 XE_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 383 XE_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc), 384 XE_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 385 XE_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 386 XE_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc), 387 XE_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc), 388 XE_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc), 389 XE_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 390 XE_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc), 391 { } 392 }; 393 MODULE_DEVICE_TABLE(pci, pciidlist); 394 395 #undef INTEL_VGA_DEVICE 396 397 /* is device_id present in comma separated list of ids */ 398 static bool device_id_in_list(u16 device_id, const char *devices, bool negative) 399 { 400 char *s, *p, *tok; 401 bool ret; 402 403 if (!devices || !*devices) 404 return false; 405 406 /* match everything */ 407 if (negative && strcmp(devices, "!*") == 0) 408 return true; 409 if (!negative && strcmp(devices, "*") == 0) 410 return true; 411 412 s = kstrdup(devices, GFP_KERNEL); 413 if (!s) 414 return false; 415 416 for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) { 417 u16 val; 418 419 if (negative && tok[0] == '!') 420 tok++; 421 else if ((negative && tok[0] != '!') || 422 (!negative && tok[0] == '!')) 423 continue; 424 425 if (kstrtou16(tok, 16, &val) == 0 && val == device_id) { 426 ret = true; 427 break; 428 } 429 } 430 431 kfree(s); 432 433 return ret; 434 } 435 436 static bool id_forced(u16 device_id) 437 { 438 return device_id_in_list(device_id, xe_modparam.force_probe, false); 439 } 440 441 static bool id_blocked(u16 device_id) 442 { 443 return device_id_in_list(device_id, xe_modparam.force_probe, true); 444 } 445 446 static const struct xe_subplatform_desc * 447 find_subplatform(const struct xe_device *xe, const struct xe_device_desc *desc) 448 { 449 const struct xe_subplatform_desc *sp; 450 const u16 *id; 451 452 for (sp = desc->subplatforms; sp && sp->subplatform; sp++) 453 for (id = sp->pciidlist; *id; id++) 454 if (*id == xe->info.devid) 455 return sp; 456 457 return NULL; 458 } 459 460 enum xe_gmdid_type { 461 GMDID_GRAPHICS, 462 GMDID_MEDIA 463 }; 464 465 static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid) 466 { 467 struct xe_gt *gt = xe_root_mmio_gt(xe); 468 struct xe_reg gmdid_reg = GMD_ID; 469 u32 val; 470 471 KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid); 472 473 if (type == GMDID_MEDIA) 474 gmdid_reg.addr += MEDIA_GT_GSI_OFFSET; 475 476 val = xe_mmio_read32(gt, gmdid_reg); 477 *ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val); 478 *revid = REG_FIELD_GET(GMD_ID_REVID, val); 479 } 480 481 /* 482 * Pre-GMD_ID platform: device descriptor already points to the appropriate 483 * graphics descriptor. Simply forward the description and calculate the version 484 * appropriately. "graphics" should be present in all such platforms, while 485 * media is optional. 486 */ 487 static void handle_pre_gmdid(struct xe_device *xe, 488 const struct xe_graphics_desc *graphics, 489 const struct xe_media_desc *media) 490 { 491 xe->info.graphics_verx100 = graphics->ver * 100 + graphics->rel; 492 493 if (media) 494 xe->info.media_verx100 = media->ver * 100 + media->rel; 495 496 } 497 498 /* 499 * GMD_ID platform: read IP version from hardware and select graphics descriptor 500 * based on the result. 501 */ 502 static void handle_gmdid(struct xe_device *xe, 503 const struct xe_graphics_desc **graphics, 504 const struct xe_media_desc **media, 505 u32 *graphics_revid, 506 u32 *media_revid) 507 { 508 u32 ver; 509 510 read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid); 511 512 for (int i = 0; i < ARRAY_SIZE(graphics_ip_map); i++) { 513 if (ver == graphics_ip_map[i].ver) { 514 xe->info.graphics_verx100 = ver; 515 *graphics = graphics_ip_map[i].ip; 516 517 break; 518 } 519 } 520 521 if (!xe->info.graphics_verx100) { 522 drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n", 523 ver / 100, ver % 100); 524 } 525 526 read_gmdid(xe, GMDID_MEDIA, &ver, media_revid); 527 528 /* Media may legitimately be fused off / not present */ 529 if (ver == 0) 530 return; 531 532 for (int i = 0; i < ARRAY_SIZE(media_ip_map); i++) { 533 if (ver == media_ip_map[i].ver) { 534 xe->info.media_verx100 = ver; 535 *media = media_ip_map[i].ip; 536 537 break; 538 } 539 } 540 541 if (!xe->info.media_verx100) { 542 drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n", 543 ver / 100, ver % 100); 544 } 545 } 546 547 /* 548 * Initialize device info content that only depends on static driver_data 549 * passed to the driver at probe time from PCI ID table. 550 */ 551 static int xe_info_init_early(struct xe_device *xe, 552 const struct xe_device_desc *desc, 553 const struct xe_subplatform_desc *subplatform_desc) 554 { 555 int err; 556 557 xe->info.platform = desc->platform; 558 xe->info.subplatform = subplatform_desc ? 559 subplatform_desc->subplatform : XE_SUBPLATFORM_NONE; 560 561 xe->info.is_dgfx = desc->is_dgfx; 562 xe->info.has_heci_gscfi = desc->has_heci_gscfi; 563 xe->info.has_llc = desc->has_llc; 564 xe->info.has_mmio_ext = desc->has_mmio_ext; 565 xe->info.has_sriov = desc->has_sriov; 566 xe->info.skip_guc_pc = desc->skip_guc_pc; 567 xe->info.skip_mtcfg = desc->skip_mtcfg; 568 xe->info.skip_pcode = desc->skip_pcode; 569 570 xe->info.enable_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) && 571 xe_modparam.enable_display && 572 desc->has_display; 573 574 err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0); 575 if (err) 576 return err; 577 578 return 0; 579 } 580 581 /* 582 * Initialize device info content that does require knowledge about 583 * graphics / media IP version. 584 * Make sure that GT / tile structures allocated by the driver match the data 585 * present in device info. 586 */ 587 static int xe_info_init(struct xe_device *xe, 588 const struct xe_graphics_desc *graphics_desc, 589 const struct xe_media_desc *media_desc) 590 { 591 u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0; 592 struct xe_tile *tile; 593 struct xe_gt *gt; 594 u8 id; 595 596 /* 597 * If this platform supports GMD_ID, we'll detect the proper IP 598 * descriptor to use from hardware registers. desc->graphics will only 599 * ever be set at this point for platforms before GMD_ID. In that case 600 * the IP descriptions and versions are simply derived from that. 601 */ 602 if (graphics_desc) { 603 handle_pre_gmdid(xe, graphics_desc, media_desc); 604 xe->info.step = xe_step_pre_gmdid_get(xe); 605 } else { 606 xe_assert(xe, !media_desc); 607 handle_gmdid(xe, &graphics_desc, &media_desc, 608 &graphics_gmdid_revid, &media_gmdid_revid); 609 xe->info.step = xe_step_gmdid_get(xe, 610 graphics_gmdid_revid, 611 media_gmdid_revid); 612 } 613 614 /* 615 * If we couldn't detect the graphics IP, that's considered a fatal 616 * error and we should abort driver load. Failing to detect media 617 * IP is non-fatal; we'll just proceed without enabling media support. 618 */ 619 if (!graphics_desc) 620 return -ENODEV; 621 622 xe->info.graphics_name = graphics_desc->name; 623 xe->info.media_name = media_desc ? media_desc->name : "none"; 624 xe->info.tile_mmio_ext_size = graphics_desc->tile_mmio_ext_size; 625 626 xe->info.dma_mask_size = graphics_desc->dma_mask_size; 627 xe->info.vram_flags = graphics_desc->vram_flags; 628 xe->info.va_bits = graphics_desc->va_bits; 629 xe->info.vm_max_level = graphics_desc->vm_max_level; 630 xe->info.has_asid = graphics_desc->has_asid; 631 xe->info.has_flat_ccs = graphics_desc->has_flat_ccs; 632 xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation; 633 xe->info.has_usm = graphics_desc->has_usm; 634 635 /* 636 * All platforms have at least one primary GT. Any platform with media 637 * version 13 or higher has an additional dedicated media GT. And 638 * depending on the graphics IP there may be additional "remote tiles." 639 * All of these together determine the overall GT count. 640 * 641 * FIXME: 'tile_count' here is misnamed since the rest of the driver 642 * treats it as the number of GTs rather than just the number of tiles. 643 */ 644 xe->info.tile_count = 1 + graphics_desc->max_remote_tiles; 645 646 for_each_remote_tile(tile, xe, id) { 647 int err; 648 649 err = xe_tile_init_early(tile, xe, id); 650 if (err) 651 return err; 652 } 653 654 for_each_tile(tile, xe, id) { 655 gt = tile->primary_gt; 656 gt->info.id = xe->info.gt_count++; 657 gt->info.type = XE_GT_TYPE_MAIN; 658 gt->info.__engine_mask = graphics_desc->hw_engine_mask; 659 if (MEDIA_VER(xe) < 13 && media_desc) 660 gt->info.__engine_mask |= media_desc->hw_engine_mask; 661 662 if (MEDIA_VER(xe) < 13 || !media_desc) 663 continue; 664 665 /* 666 * Allocate and setup media GT for platforms with standalone 667 * media. 668 */ 669 tile->media_gt = xe_gt_alloc(tile); 670 if (IS_ERR(tile->media_gt)) 671 return PTR_ERR(tile->media_gt); 672 673 gt = tile->media_gt; 674 gt->info.type = XE_GT_TYPE_MEDIA; 675 gt->info.__engine_mask = media_desc->hw_engine_mask; 676 gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET; 677 gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH; 678 679 /* 680 * FIXME: At the moment multi-tile and standalone media are 681 * mutually exclusive on current platforms. We'll need to 682 * come up with a better way to number GTs if we ever wind 683 * up with platforms that support both together. 684 */ 685 drm_WARN_ON(&xe->drm, id != 0); 686 gt->info.id = xe->info.gt_count++; 687 } 688 689 return 0; 690 } 691 692 static void xe_pci_remove(struct pci_dev *pdev) 693 { 694 struct xe_device *xe; 695 696 xe = pci_get_drvdata(pdev); 697 if (!xe) /* driver load aborted, nothing to cleanup */ 698 return; 699 700 xe_device_remove(xe); 701 xe_pm_runtime_fini(xe); 702 pci_set_drvdata(pdev, NULL); 703 } 704 705 static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 706 { 707 const struct xe_device_desc *desc = (const void *)ent->driver_data; 708 const struct xe_subplatform_desc *subplatform_desc; 709 struct xe_device *xe; 710 int err; 711 712 if (desc->require_force_probe && !id_forced(pdev->device)) { 713 dev_info(&pdev->dev, 714 "Your graphics device %04x is not officially supported\n" 715 "by xe driver in this kernel version. To force Xe probe,\n" 716 "use xe.force_probe='%04x' and i915.force_probe='!%04x'\n" 717 "module parameters or CONFIG_DRM_XE_FORCE_PROBE='%04x' and\n" 718 "CONFIG_DRM_I915_FORCE_PROBE='!%04x' configuration options.\n", 719 pdev->device, pdev->device, pdev->device, 720 pdev->device, pdev->device); 721 return -ENODEV; 722 } 723 724 if (id_blocked(pdev->device)) { 725 dev_info(&pdev->dev, "Probe blocked for device [%04x:%04x].\n", 726 pdev->vendor, pdev->device); 727 return -ENODEV; 728 } 729 730 if (xe_display_driver_probe_defer(pdev)) 731 return -EPROBE_DEFER; 732 733 err = pcim_enable_device(pdev); 734 if (err) 735 return err; 736 737 xe = xe_device_create(pdev, ent); 738 if (IS_ERR(xe)) 739 return PTR_ERR(xe); 740 741 pci_set_drvdata(pdev, xe); 742 743 xe_pm_assert_unbounded_bridge(xe); 744 subplatform_desc = find_subplatform(xe, desc); 745 746 pci_set_master(pdev); 747 748 err = xe_info_init_early(xe, desc, subplatform_desc); 749 if (err) 750 return err; 751 752 err = xe_device_probe_early(xe); 753 if (err) 754 return err; 755 756 err = xe_info_init(xe, desc->graphics, desc->media); 757 if (err) 758 return err; 759 760 xe_display_probe(xe); 761 762 drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d", 763 desc->platform_name, 764 subplatform_desc ? subplatform_desc->name : "", 765 xe->info.devid, xe->info.revid, 766 xe->info.is_dgfx, 767 xe->info.graphics_name, 768 xe->info.graphics_verx100 / 100, 769 xe->info.graphics_verx100 % 100, 770 xe->info.media_name, 771 xe->info.media_verx100 / 100, 772 xe->info.media_verx100 % 100, 773 str_yes_no(xe->info.enable_display), 774 xe->info.dma_mask_size, xe->info.tile_count, 775 xe->info.has_heci_gscfi); 776 777 drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, D:%s, B:%s)\n", 778 xe_step_name(xe->info.step.graphics), 779 xe_step_name(xe->info.step.media), 780 xe_step_name(xe->info.step.display), 781 xe_step_name(xe->info.step.basedie)); 782 783 drm_dbg(&xe->drm, "SR-IOV support: %s (mode: %s)\n", 784 str_yes_no(xe_device_has_sriov(xe)), 785 xe_sriov_mode_to_string(xe_device_sriov_mode(xe))); 786 787 err = xe_pm_init_early(xe); 788 if (err) 789 return err; 790 791 err = xe_device_probe(xe); 792 if (err) 793 return err; 794 795 err = xe_pm_init(xe); 796 if (err) 797 goto err_driver_cleanup; 798 799 drm_dbg(&xe->drm, "d3cold: capable=%s\n", 800 str_yes_no(xe->d3cold.capable)); 801 802 return 0; 803 804 err_driver_cleanup: 805 xe_pci_remove(pdev); 806 return err; 807 } 808 809 static void xe_pci_shutdown(struct pci_dev *pdev) 810 { 811 xe_device_shutdown(pdev_to_xe_device(pdev)); 812 } 813 814 #ifdef CONFIG_PM_SLEEP 815 static void d3cold_toggle(struct pci_dev *pdev, enum toggle_d3cold toggle) 816 { 817 struct xe_device *xe = pdev_to_xe_device(pdev); 818 struct pci_dev *root_pdev; 819 820 if (!xe->d3cold.capable) 821 return; 822 823 root_pdev = pcie_find_root_port(pdev); 824 if (!root_pdev) 825 return; 826 827 switch (toggle) { 828 case D3COLD_DISABLE: 829 pci_d3cold_disable(root_pdev); 830 break; 831 case D3COLD_ENABLE: 832 pci_d3cold_enable(root_pdev); 833 break; 834 } 835 } 836 837 static int xe_pci_suspend(struct device *dev) 838 { 839 struct pci_dev *pdev = to_pci_dev(dev); 840 int err; 841 842 err = xe_pm_suspend(pdev_to_xe_device(pdev)); 843 if (err) 844 return err; 845 846 /* 847 * Enabling D3Cold is needed for S2Idle/S0ix. 848 * It is save to allow here since xe_pm_suspend has evicted 849 * the local memory and the direct complete optimization is disabled. 850 */ 851 d3cold_toggle(pdev, D3COLD_ENABLE); 852 853 pci_save_state(pdev); 854 pci_disable_device(pdev); 855 856 return 0; 857 } 858 859 static int xe_pci_resume(struct device *dev) 860 { 861 struct pci_dev *pdev = to_pci_dev(dev); 862 int err; 863 864 /* Give back the D3Cold decision to the runtime P M*/ 865 d3cold_toggle(pdev, D3COLD_DISABLE); 866 867 err = pci_set_power_state(pdev, PCI_D0); 868 if (err) 869 return err; 870 871 err = pci_enable_device(pdev); 872 if (err) 873 return err; 874 875 pci_set_master(pdev); 876 877 err = xe_pm_resume(pdev_to_xe_device(pdev)); 878 if (err) 879 return err; 880 881 return 0; 882 } 883 884 static int xe_pci_runtime_suspend(struct device *dev) 885 { 886 struct pci_dev *pdev = to_pci_dev(dev); 887 struct xe_device *xe = pdev_to_xe_device(pdev); 888 int err; 889 890 err = xe_pm_runtime_suspend(xe); 891 if (err) 892 return err; 893 894 pci_save_state(pdev); 895 896 if (xe->d3cold.allowed) { 897 d3cold_toggle(pdev, D3COLD_ENABLE); 898 pci_disable_device(pdev); 899 pci_ignore_hotplug(pdev); 900 pci_set_power_state(pdev, PCI_D3cold); 901 } else { 902 d3cold_toggle(pdev, D3COLD_DISABLE); 903 pci_set_power_state(pdev, PCI_D3hot); 904 } 905 906 return 0; 907 } 908 909 static int xe_pci_runtime_resume(struct device *dev) 910 { 911 struct pci_dev *pdev = to_pci_dev(dev); 912 struct xe_device *xe = pdev_to_xe_device(pdev); 913 int err; 914 915 err = pci_set_power_state(pdev, PCI_D0); 916 if (err) 917 return err; 918 919 pci_restore_state(pdev); 920 921 if (xe->d3cold.allowed) { 922 err = pci_enable_device(pdev); 923 if (err) 924 return err; 925 926 pci_set_master(pdev); 927 } 928 929 return xe_pm_runtime_resume(xe); 930 } 931 932 static int xe_pci_runtime_idle(struct device *dev) 933 { 934 struct pci_dev *pdev = to_pci_dev(dev); 935 struct xe_device *xe = pdev_to_xe_device(pdev); 936 937 xe_pm_d3cold_allowed_toggle(xe); 938 939 return 0; 940 } 941 942 static const struct dev_pm_ops xe_pm_ops = { 943 SET_SYSTEM_SLEEP_PM_OPS(xe_pci_suspend, xe_pci_resume) 944 SET_RUNTIME_PM_OPS(xe_pci_runtime_suspend, xe_pci_runtime_resume, xe_pci_runtime_idle) 945 }; 946 #endif 947 948 static struct pci_driver xe_pci_driver = { 949 .name = DRIVER_NAME, 950 .id_table = pciidlist, 951 .probe = xe_pci_probe, 952 .remove = xe_pci_remove, 953 .shutdown = xe_pci_shutdown, 954 #ifdef CONFIG_PM_SLEEP 955 .driver.pm = &xe_pm_ops, 956 #endif 957 }; 958 959 int xe_register_pci_driver(void) 960 { 961 return pci_register_driver(&xe_pci_driver); 962 } 963 964 void xe_unregister_pci_driver(void) 965 { 966 pci_unregister_driver(&xe_pci_driver); 967 } 968 969 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 970 #include "tests/xe_pci.c" 971 #endif 972