1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_pci.h" 7 8 #include <kunit/static_stub.h> 9 #include <linux/device/driver.h> 10 #include <linux/module.h> 11 #include <linux/pci.h> 12 #include <linux/pm_runtime.h> 13 14 #include <drm/drm_color_mgmt.h> 15 #include <drm/drm_drv.h> 16 #include <drm/intel/pciids.h> 17 18 #include "display/xe_display.h" 19 #include "regs/xe_gt_regs.h" 20 #include "xe_device.h" 21 #include "xe_drv.h" 22 #include "xe_gt.h" 23 #include "xe_gt_sriov_vf.h" 24 #include "xe_guc.h" 25 #include "xe_macros.h" 26 #include "xe_mmio.h" 27 #include "xe_module.h" 28 #include "xe_pci_sriov.h" 29 #include "xe_pci_types.h" 30 #include "xe_pm.h" 31 #include "xe_sriov.h" 32 #include "xe_step.h" 33 #include "xe_survivability_mode.h" 34 #include "xe_tile.h" 35 36 enum toggle_d3cold { 37 D3COLD_DISABLE, 38 D3COLD_ENABLE, 39 }; 40 41 struct xe_subplatform_desc { 42 enum xe_subplatform subplatform; 43 const char *name; 44 const u16 *pciidlist; 45 }; 46 47 struct xe_device_desc { 48 /* Should only ever be set for platforms without GMD_ID */ 49 const struct xe_ip *pre_gmdid_graphics_ip; 50 /* Should only ever be set for platforms without GMD_ID */ 51 const struct xe_ip *pre_gmdid_media_ip; 52 53 const char *platform_name; 54 const struct xe_subplatform_desc *subplatforms; 55 56 enum xe_platform platform; 57 58 u8 dma_mask_size; 59 u8 max_remote_tiles:2; 60 61 u8 require_force_probe:1; 62 u8 is_dgfx:1; 63 64 u8 has_display:1; 65 u8 has_heci_gscfi:1; 66 u8 has_heci_cscfi:1; 67 u8 has_llc:1; 68 u8 has_pxp:1; 69 u8 has_sriov:1; 70 u8 skip_guc_pc:1; 71 u8 skip_mtcfg:1; 72 u8 skip_pcode:1; 73 }; 74 75 __diag_push(); 76 __diag_ignore_all("-Woverride-init", "Allow field overrides in table"); 77 78 #define PLATFORM(x) \ 79 .platform = XE_##x, \ 80 .platform_name = #x 81 82 #define NOP(x) x 83 84 static const struct xe_graphics_desc graphics_xelp = { 85 .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0), 86 87 .va_bits = 48, 88 .vm_max_level = 3, 89 }; 90 91 #define XE_HP_FEATURES \ 92 .has_range_tlb_invalidation = true, \ 93 .va_bits = 48, \ 94 .vm_max_level = 3 95 96 static const struct xe_graphics_desc graphics_xehpg = { 97 .hw_engine_mask = 98 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 99 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 100 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 101 102 XE_HP_FEATURES, 103 .vram_flags = XE_VRAM_FLAGS_NEED64K, 104 105 .has_flat_ccs = 1, 106 }; 107 108 static const struct xe_graphics_desc graphics_xehpc = { 109 .hw_engine_mask = 110 BIT(XE_HW_ENGINE_BCS0) | BIT(XE_HW_ENGINE_BCS1) | 111 BIT(XE_HW_ENGINE_BCS2) | BIT(XE_HW_ENGINE_BCS3) | 112 BIT(XE_HW_ENGINE_BCS4) | BIT(XE_HW_ENGINE_BCS5) | 113 BIT(XE_HW_ENGINE_BCS6) | BIT(XE_HW_ENGINE_BCS7) | 114 BIT(XE_HW_ENGINE_BCS8) | 115 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 116 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 117 118 XE_HP_FEATURES, 119 .va_bits = 57, 120 .vm_max_level = 4, 121 .vram_flags = XE_VRAM_FLAGS_NEED64K, 122 123 .has_asid = 1, 124 .has_atomic_enable_pte_bit = 1, 125 .has_usm = 1, 126 }; 127 128 static const struct xe_graphics_desc graphics_xelpg = { 129 .hw_engine_mask = 130 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 131 BIT(XE_HW_ENGINE_CCS0), 132 133 XE_HP_FEATURES, 134 }; 135 136 #define XE2_GFX_FEATURES \ 137 .has_asid = 1, \ 138 .has_atomic_enable_pte_bit = 1, \ 139 .has_flat_ccs = 1, \ 140 .has_indirect_ring_state = 1, \ 141 .has_range_tlb_invalidation = 1, \ 142 .has_usm = 1, \ 143 .va_bits = 48, \ 144 .vm_max_level = 4, \ 145 .hw_engine_mask = \ 146 BIT(XE_HW_ENGINE_RCS0) | \ 147 BIT(XE_HW_ENGINE_BCS8) | BIT(XE_HW_ENGINE_BCS0) | \ 148 GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0) 149 150 static const struct xe_graphics_desc graphics_xe2 = { 151 XE2_GFX_FEATURES, 152 }; 153 154 static const struct xe_media_desc media_xem = { 155 .hw_engine_mask = 156 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 157 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0), 158 }; 159 160 static const struct xe_media_desc media_xelpmp = { 161 .hw_engine_mask = 162 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 163 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) | 164 BIT(XE_HW_ENGINE_GSCCS0) 165 }; 166 167 /* Pre-GMDID Graphics IPs */ 168 static const struct xe_ip graphics_ip_xelp = { 1200, "Xe_LP", &graphics_xelp }; 169 static const struct xe_ip graphics_ip_xelpp = { 1210, "Xe_LP+", &graphics_xelp }; 170 static const struct xe_ip graphics_ip_xehpg = { 1255, "Xe_HPG", &graphics_xehpg }; 171 static const struct xe_ip graphics_ip_xehpc = { 1260, "Xe_HPC", &graphics_xehpc }; 172 173 /* GMDID-based Graphics IPs */ 174 static const struct xe_ip graphics_ips[] = { 175 { 1270, "Xe_LPG", &graphics_xelpg }, 176 { 1271, "Xe_LPG", &graphics_xelpg }, 177 { 1274, "Xe_LPG+", &graphics_xelpg }, 178 { 2001, "Xe2_HPG", &graphics_xe2 }, 179 { 2004, "Xe2_LPG", &graphics_xe2 }, 180 { 3000, "Xe3_LPG", &graphics_xe2 }, 181 { 3001, "Xe3_LPG", &graphics_xe2 }, 182 }; 183 184 /* Pre-GMDID Media IPs */ 185 static const struct xe_ip media_ip_xem = { 1200, "Xe_M", &media_xem }; 186 static const struct xe_ip media_ip_xehpm = { 1255, "Xe_HPM", &media_xem }; 187 188 /* GMDID-based Media IPs */ 189 static const struct xe_ip media_ips[] = { 190 { 1300, "Xe_LPM+", &media_xelpmp }, 191 { 1301, "Xe2_HPM", &media_xelpmp }, 192 { 2000, "Xe2_LPM", &media_xelpmp }, 193 { 3000, "Xe3_LPM", &media_xelpmp }, 194 }; 195 196 static const struct xe_device_desc tgl_desc = { 197 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 198 .pre_gmdid_media_ip = &media_ip_xem, 199 PLATFORM(TIGERLAKE), 200 .dma_mask_size = 39, 201 .has_display = true, 202 .has_llc = true, 203 .require_force_probe = true, 204 }; 205 206 static const struct xe_device_desc rkl_desc = { 207 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 208 .pre_gmdid_media_ip = &media_ip_xem, 209 PLATFORM(ROCKETLAKE), 210 .dma_mask_size = 39, 211 .has_display = true, 212 .has_llc = true, 213 .require_force_probe = true, 214 }; 215 216 static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 }; 217 218 static const struct xe_device_desc adl_s_desc = { 219 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 220 .pre_gmdid_media_ip = &media_ip_xem, 221 PLATFORM(ALDERLAKE_S), 222 .dma_mask_size = 39, 223 .has_display = true, 224 .has_llc = true, 225 .require_force_probe = true, 226 .subplatforms = (const struct xe_subplatform_desc[]) { 227 { XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids }, 228 {}, 229 }, 230 }; 231 232 static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 }; 233 234 static const struct xe_device_desc adl_p_desc = { 235 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 236 .pre_gmdid_media_ip = &media_ip_xem, 237 PLATFORM(ALDERLAKE_P), 238 .dma_mask_size = 39, 239 .has_display = true, 240 .has_llc = true, 241 .require_force_probe = true, 242 .subplatforms = (const struct xe_subplatform_desc[]) { 243 { XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids }, 244 {}, 245 }, 246 }; 247 248 static const struct xe_device_desc adl_n_desc = { 249 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 250 .pre_gmdid_media_ip = &media_ip_xem, 251 PLATFORM(ALDERLAKE_N), 252 .dma_mask_size = 39, 253 .has_display = true, 254 .has_llc = true, 255 .require_force_probe = true, 256 }; 257 258 #define DGFX_FEATURES \ 259 .is_dgfx = 1 260 261 static const struct xe_device_desc dg1_desc = { 262 .pre_gmdid_graphics_ip = &graphics_ip_xelpp, 263 .pre_gmdid_media_ip = &media_ip_xem, 264 DGFX_FEATURES, 265 PLATFORM(DG1), 266 .dma_mask_size = 39, 267 .has_display = true, 268 .has_heci_gscfi = 1, 269 .require_force_probe = true, 270 }; 271 272 static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 }; 273 static const u16 dg2_g11_ids[] = { INTEL_DG2_G11_IDS(NOP), INTEL_ATS_M75_IDS(NOP), 0 }; 274 static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 }; 275 276 #define DG2_FEATURES \ 277 DGFX_FEATURES, \ 278 PLATFORM(DG2), \ 279 .has_heci_gscfi = 1, \ 280 .subplatforms = (const struct xe_subplatform_desc[]) { \ 281 { XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \ 282 { XE_SUBPLATFORM_DG2_G11, "G11", dg2_g11_ids }, \ 283 { XE_SUBPLATFORM_DG2_G12, "G12", dg2_g12_ids }, \ 284 { } \ 285 } 286 287 static const struct xe_device_desc ats_m_desc = { 288 .pre_gmdid_graphics_ip = &graphics_ip_xehpg, 289 .pre_gmdid_media_ip = &media_ip_xehpm, 290 .dma_mask_size = 46, 291 .require_force_probe = true, 292 293 DG2_FEATURES, 294 .has_display = false, 295 }; 296 297 static const struct xe_device_desc dg2_desc = { 298 .pre_gmdid_graphics_ip = &graphics_ip_xehpg, 299 .pre_gmdid_media_ip = &media_ip_xehpm, 300 .dma_mask_size = 46, 301 .require_force_probe = true, 302 303 DG2_FEATURES, 304 .has_display = true, 305 }; 306 307 static const __maybe_unused struct xe_device_desc pvc_desc = { 308 .pre_gmdid_graphics_ip = &graphics_ip_xehpc, 309 DGFX_FEATURES, 310 PLATFORM(PVC), 311 .dma_mask_size = 52, 312 .has_display = false, 313 .has_heci_gscfi = 1, 314 .max_remote_tiles = 1, 315 .require_force_probe = true, 316 }; 317 318 static const struct xe_device_desc mtl_desc = { 319 /* .graphics and .media determined via GMD_ID */ 320 .require_force_probe = true, 321 PLATFORM(METEORLAKE), 322 .dma_mask_size = 46, 323 .has_display = true, 324 .has_pxp = true, 325 }; 326 327 static const struct xe_device_desc lnl_desc = { 328 PLATFORM(LUNARLAKE), 329 .dma_mask_size = 46, 330 .has_display = true, 331 .has_pxp = true, 332 }; 333 334 static const struct xe_device_desc bmg_desc = { 335 DGFX_FEATURES, 336 PLATFORM(BATTLEMAGE), 337 .dma_mask_size = 46, 338 .has_display = true, 339 .has_heci_cscfi = 1, 340 }; 341 342 static const struct xe_device_desc ptl_desc = { 343 PLATFORM(PANTHERLAKE), 344 .dma_mask_size = 46, 345 .has_display = true, 346 .has_sriov = true, 347 .require_force_probe = true, 348 }; 349 350 #undef PLATFORM 351 __diag_pop(); 352 353 /* 354 * Make sure any device matches here are from most specific to most 355 * general. For example, since the Quanta match is based on the subsystem 356 * and subvendor IDs, we need it to come before the more general IVB 357 * PCI ID matches, otherwise we'll use the wrong info struct above. 358 */ 359 static const struct pci_device_id pciidlist[] = { 360 INTEL_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc), 361 INTEL_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc), 362 INTEL_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 363 INTEL_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 364 INTEL_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc), 365 INTEL_RPLU_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 366 INTEL_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 367 INTEL_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 368 INTEL_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc), 369 INTEL_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc), 370 INTEL_ARL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 371 INTEL_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc), 372 INTEL_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 373 INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc), 374 INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc), 375 INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc), 376 { } 377 }; 378 MODULE_DEVICE_TABLE(pci, pciidlist); 379 380 /* is device_id present in comma separated list of ids */ 381 static bool device_id_in_list(u16 device_id, const char *devices, bool negative) 382 { 383 char *s, *p, *tok; 384 bool ret; 385 386 if (!devices || !*devices) 387 return false; 388 389 /* match everything */ 390 if (negative && strcmp(devices, "!*") == 0) 391 return true; 392 if (!negative && strcmp(devices, "*") == 0) 393 return true; 394 395 s = kstrdup(devices, GFP_KERNEL); 396 if (!s) 397 return false; 398 399 for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) { 400 u16 val; 401 402 if (negative && tok[0] == '!') 403 tok++; 404 else if ((negative && tok[0] != '!') || 405 (!negative && tok[0] == '!')) 406 continue; 407 408 if (kstrtou16(tok, 16, &val) == 0 && val == device_id) { 409 ret = true; 410 break; 411 } 412 } 413 414 kfree(s); 415 416 return ret; 417 } 418 419 static bool id_forced(u16 device_id) 420 { 421 return device_id_in_list(device_id, xe_modparam.force_probe, false); 422 } 423 424 static bool id_blocked(u16 device_id) 425 { 426 return device_id_in_list(device_id, xe_modparam.force_probe, true); 427 } 428 429 static const struct xe_subplatform_desc * 430 find_subplatform(const struct xe_device *xe, const struct xe_device_desc *desc) 431 { 432 const struct xe_subplatform_desc *sp; 433 const u16 *id; 434 435 for (sp = desc->subplatforms; sp && sp->subplatform; sp++) 436 for (id = sp->pciidlist; *id; id++) 437 if (*id == xe->info.devid) 438 return sp; 439 440 return NULL; 441 } 442 443 enum xe_gmdid_type { 444 GMDID_GRAPHICS, 445 GMDID_MEDIA 446 }; 447 448 static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid) 449 { 450 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 451 struct xe_reg gmdid_reg = GMD_ID; 452 u32 val; 453 454 KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid); 455 456 if (IS_SRIOV_VF(xe)) { 457 struct xe_gt *gt = xe_root_mmio_gt(xe); 458 459 /* 460 * To get the value of the GMDID register, VFs must obtain it 461 * from the GuC using MMIO communication. 462 * 463 * Note that at this point the xe_gt is not fully uninitialized 464 * and only basic access to MMIO registers is possible. To use 465 * our existing GuC communication functions we must perform at 466 * least basic xe_gt and xe_guc initialization. 467 * 468 * Since to obtain the value of GMDID_MEDIA we need to use the 469 * media GuC, temporarily tweak the gt type. 470 */ 471 xe_gt_assert(gt, gt->info.type == XE_GT_TYPE_UNINITIALIZED); 472 473 if (type == GMDID_MEDIA) { 474 gt->info.id = 1; 475 gt->info.type = XE_GT_TYPE_MEDIA; 476 } else { 477 gt->info.id = 0; 478 gt->info.type = XE_GT_TYPE_MAIN; 479 } 480 481 xe_gt_mmio_init(gt); 482 xe_guc_comm_init_early(>->uc.guc); 483 484 /* Don't bother with GMDID if failed to negotiate the GuC ABI */ 485 val = xe_gt_sriov_vf_bootstrap(gt) ? 0 : xe_gt_sriov_vf_gmdid(gt); 486 487 /* 488 * Only undo xe_gt.info here, the remaining changes made above 489 * will be overwritten as part of the regular initialization. 490 */ 491 gt->info.id = 0; 492 gt->info.type = XE_GT_TYPE_UNINITIALIZED; 493 } else { 494 /* 495 * GMD_ID is a GT register, but at this point in the driver 496 * init we haven't fully initialized the GT yet so we need to 497 * read the register with the tile's MMIO accessor. That means 498 * we need to apply the GSI offset manually since it won't get 499 * automatically added as it would if we were using a GT mmio 500 * accessor. 501 */ 502 if (type == GMDID_MEDIA) 503 gmdid_reg.addr += MEDIA_GT_GSI_OFFSET; 504 505 val = xe_mmio_read32(mmio, gmdid_reg); 506 } 507 508 *ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val); 509 *revid = REG_FIELD_GET(GMD_ID_REVID, val); 510 } 511 512 /* 513 * Read IP version from hardware and select graphics/media IP descriptors 514 * based on the result. 515 */ 516 static void handle_gmdid(struct xe_device *xe, 517 const struct xe_ip **graphics_ip, 518 const struct xe_ip **media_ip, 519 u32 *graphics_revid, 520 u32 *media_revid) 521 { 522 u32 ver; 523 524 *graphics_ip = NULL; 525 *media_ip = NULL; 526 527 read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid); 528 529 for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) { 530 if (ver == graphics_ips[i].verx100) { 531 *graphics_ip = &graphics_ips[i]; 532 533 break; 534 } 535 } 536 537 if (!*graphics_ip) { 538 drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n", 539 ver / 100, ver % 100); 540 } 541 542 read_gmdid(xe, GMDID_MEDIA, &ver, media_revid); 543 /* Media may legitimately be fused off / not present */ 544 if (ver == 0) 545 return; 546 547 for (int i = 0; i < ARRAY_SIZE(media_ips); i++) { 548 if (ver == media_ips[i].verx100) { 549 *media_ip = &media_ips[i]; 550 551 break; 552 } 553 } 554 555 if (!*media_ip) { 556 drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n", 557 ver / 100, ver % 100); 558 } 559 } 560 561 /* 562 * Initialize device info content that only depends on static driver_data 563 * passed to the driver at probe time from PCI ID table. 564 */ 565 static int xe_info_init_early(struct xe_device *xe, 566 const struct xe_device_desc *desc, 567 const struct xe_subplatform_desc *subplatform_desc) 568 { 569 int err; 570 571 xe->info.platform_name = desc->platform_name; 572 xe->info.platform = desc->platform; 573 xe->info.subplatform = subplatform_desc ? 574 subplatform_desc->subplatform : XE_SUBPLATFORM_NONE; 575 576 xe->info.dma_mask_size = desc->dma_mask_size; 577 xe->info.is_dgfx = desc->is_dgfx; 578 xe->info.has_heci_gscfi = desc->has_heci_gscfi; 579 xe->info.has_heci_cscfi = desc->has_heci_cscfi; 580 xe->info.has_llc = desc->has_llc; 581 xe->info.has_pxp = desc->has_pxp; 582 xe->info.has_sriov = desc->has_sriov; 583 xe->info.skip_guc_pc = desc->skip_guc_pc; 584 xe->info.skip_mtcfg = desc->skip_mtcfg; 585 xe->info.skip_pcode = desc->skip_pcode; 586 587 xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) && 588 xe_modparam.probe_display && 589 desc->has_display; 590 xe->info.tile_count = 1 + desc->max_remote_tiles; 591 592 err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0); 593 if (err) 594 return err; 595 596 return 0; 597 } 598 599 /* 600 * Initialize device info content that does require knowledge about 601 * graphics / media IP version. 602 * Make sure that GT / tile structures allocated by the driver match the data 603 * present in device info. 604 */ 605 static int xe_info_init(struct xe_device *xe, 606 const struct xe_device_desc *desc) 607 { 608 u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0; 609 const struct xe_ip *graphics_ip; 610 const struct xe_ip *media_ip; 611 const struct xe_graphics_desc *graphics_desc; 612 const struct xe_media_desc *media_desc; 613 struct xe_tile *tile; 614 struct xe_gt *gt; 615 u8 id; 616 617 /* 618 * If this platform supports GMD_ID, we'll detect the proper IP 619 * descriptor to use from hardware registers. 620 * desc->pre_gmdid_graphics_ip will only ever be set at this point for 621 * platforms before GMD_ID. In that case the IP descriptions and 622 * versions are simply derived from that. 623 */ 624 if (desc->pre_gmdid_graphics_ip) { 625 graphics_ip = desc->pre_gmdid_graphics_ip; 626 media_ip = desc->pre_gmdid_media_ip; 627 xe->info.step = xe_step_pre_gmdid_get(xe); 628 } else { 629 xe_assert(xe, !desc->pre_gmdid_media_ip); 630 handle_gmdid(xe, &graphics_ip, &media_ip, 631 &graphics_gmdid_revid, &media_gmdid_revid); 632 xe->info.step = xe_step_gmdid_get(xe, 633 graphics_gmdid_revid, 634 media_gmdid_revid); 635 } 636 637 /* 638 * If we couldn't detect the graphics IP, that's considered a fatal 639 * error and we should abort driver load. Failing to detect media 640 * IP is non-fatal; we'll just proceed without enabling media support. 641 */ 642 if (!graphics_ip) 643 return -ENODEV; 644 645 xe->info.graphics_verx100 = graphics_ip->verx100; 646 xe->info.graphics_name = graphics_ip->name; 647 graphics_desc = graphics_ip->desc; 648 649 if (media_ip) { 650 xe->info.media_verx100 = media_ip->verx100; 651 xe->info.media_name = media_ip->name; 652 media_desc = media_ip->desc; 653 } else { 654 xe->info.media_name = "none"; 655 media_desc = NULL; 656 } 657 658 xe->info.vram_flags = graphics_desc->vram_flags; 659 xe->info.va_bits = graphics_desc->va_bits; 660 xe->info.vm_max_level = graphics_desc->vm_max_level; 661 xe->info.has_asid = graphics_desc->has_asid; 662 xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit; 663 if (xe->info.platform != XE_PVC) 664 xe->info.has_device_atomics_on_smem = 1; 665 666 /* Runtime detection may change this later */ 667 xe->info.has_flat_ccs = graphics_desc->has_flat_ccs; 668 669 xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation; 670 xe->info.has_usm = graphics_desc->has_usm; 671 672 for_each_remote_tile(tile, xe, id) { 673 int err; 674 675 err = xe_tile_init_early(tile, xe, id); 676 if (err) 677 return err; 678 } 679 680 /* 681 * All platforms have at least one primary GT. Any platform with media 682 * version 13 or higher has an additional dedicated media GT. And 683 * depending on the graphics IP there may be additional "remote tiles." 684 * All of these together determine the overall GT count. 685 */ 686 for_each_tile(tile, xe, id) { 687 gt = tile->primary_gt; 688 gt->info.id = xe->info.gt_count++; 689 gt->info.type = XE_GT_TYPE_MAIN; 690 gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state; 691 gt->info.engine_mask = graphics_desc->hw_engine_mask; 692 693 if (MEDIA_VER(xe) < 13 && media_desc) 694 gt->info.engine_mask |= media_desc->hw_engine_mask; 695 696 if (MEDIA_VER(xe) < 13 || !media_desc) 697 continue; 698 699 /* 700 * Allocate and setup media GT for platforms with standalone 701 * media. 702 */ 703 tile->media_gt = xe_gt_alloc(tile); 704 if (IS_ERR(tile->media_gt)) 705 return PTR_ERR(tile->media_gt); 706 707 gt = tile->media_gt; 708 gt->info.type = XE_GT_TYPE_MEDIA; 709 gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state; 710 gt->info.engine_mask = media_desc->hw_engine_mask; 711 712 /* 713 * FIXME: At the moment multi-tile and standalone media are 714 * mutually exclusive on current platforms. We'll need to 715 * come up with a better way to number GTs if we ever wind 716 * up with platforms that support both together. 717 */ 718 drm_WARN_ON(&xe->drm, id != 0); 719 gt->info.id = xe->info.gt_count++; 720 } 721 722 return 0; 723 } 724 725 static void xe_pci_remove(struct pci_dev *pdev) 726 { 727 struct xe_device *xe = pdev_to_xe_device(pdev); 728 729 if (IS_SRIOV_PF(xe)) 730 xe_pci_sriov_configure(pdev, 0); 731 732 if (xe_survivability_mode_is_enabled(xe)) 733 return; 734 735 xe_device_remove(xe); 736 xe_pm_runtime_fini(xe); 737 } 738 739 /* 740 * Probe the PCI device, initialize various parts of the driver. 741 * 742 * Fault injection is used to test the error paths of some initialization 743 * functions called either directly from xe_pci_probe() or indirectly for 744 * example through xe_device_probe(). Those functions use the kernel fault 745 * injection capabilities infrastructure, see 746 * Documentation/fault-injection/fault-injection.rst for details. The macro 747 * ALLOW_ERROR_INJECTION() is used to conditionally skip function execution 748 * at runtime and use a provided return value. The first requirement for 749 * error injectable functions is proper handling of the error code by the 750 * caller for recovery, which is always the case here. The second 751 * requirement is that no state is changed before the first error return. 752 * It is not strictly fulfilled for all initialization functions using the 753 * ALLOW_ERROR_INJECTION() macro but this is acceptable because for those 754 * error cases at probe time, the error code is simply propagated up by the 755 * caller. Therefore there is no consequence on those specific callers when 756 * function error injection skips the whole function. 757 */ 758 static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 759 { 760 const struct xe_device_desc *desc = (const void *)ent->driver_data; 761 const struct xe_subplatform_desc *subplatform_desc; 762 struct xe_device *xe; 763 int err; 764 765 if (desc->require_force_probe && !id_forced(pdev->device)) { 766 dev_info(&pdev->dev, 767 "Your graphics device %04x is not officially supported\n" 768 "by xe driver in this kernel version. To force Xe probe,\n" 769 "use xe.force_probe='%04x' and i915.force_probe='!%04x'\n" 770 "module parameters or CONFIG_DRM_XE_FORCE_PROBE='%04x' and\n" 771 "CONFIG_DRM_I915_FORCE_PROBE='!%04x' configuration options.\n", 772 pdev->device, pdev->device, pdev->device, 773 pdev->device, pdev->device); 774 return -ENODEV; 775 } 776 777 if (id_blocked(pdev->device)) { 778 dev_info(&pdev->dev, "Probe blocked for device [%04x:%04x].\n", 779 pdev->vendor, pdev->device); 780 return -ENODEV; 781 } 782 783 if (xe_display_driver_probe_defer(pdev)) 784 return -EPROBE_DEFER; 785 786 err = pcim_enable_device(pdev); 787 if (err) 788 return err; 789 790 xe = xe_device_create(pdev, ent); 791 if (IS_ERR(xe)) 792 return PTR_ERR(xe); 793 794 pci_set_drvdata(pdev, &xe->drm); 795 796 xe_pm_assert_unbounded_bridge(xe); 797 subplatform_desc = find_subplatform(xe, desc); 798 799 pci_set_master(pdev); 800 801 err = xe_info_init_early(xe, desc, subplatform_desc); 802 if (err) 803 return err; 804 805 err = xe_device_probe_early(xe); 806 if (err) { 807 /* 808 * In Boot Survivability mode, no drm card is exposed and driver 809 * is loaded with bare minimum to allow for firmware to be 810 * flashed through mei. If early probe failed, but it managed to 811 * enable survivability mode, return success. 812 */ 813 if (xe_survivability_mode_is_enabled(xe)) 814 return 0; 815 816 return err; 817 } 818 819 err = xe_info_init(xe, desc); 820 if (err) 821 return err; 822 823 err = xe_display_probe(xe); 824 if (err) 825 return err; 826 827 drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d cscfi:%d", 828 desc->platform_name, 829 subplatform_desc ? subplatform_desc->name : "", 830 xe->info.devid, xe->info.revid, 831 xe->info.is_dgfx, 832 xe->info.graphics_name, 833 xe->info.graphics_verx100 / 100, 834 xe->info.graphics_verx100 % 100, 835 xe->info.media_name, 836 xe->info.media_verx100 / 100, 837 xe->info.media_verx100 % 100, 838 str_yes_no(xe->info.probe_display), 839 xe->info.dma_mask_size, xe->info.tile_count, 840 xe->info.has_heci_gscfi, xe->info.has_heci_cscfi); 841 842 drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, B:%s)\n", 843 xe_step_name(xe->info.step.graphics), 844 xe_step_name(xe->info.step.media), 845 xe_step_name(xe->info.step.basedie)); 846 847 drm_dbg(&xe->drm, "SR-IOV support: %s (mode: %s)\n", 848 str_yes_no(xe_device_has_sriov(xe)), 849 xe_sriov_mode_to_string(xe_device_sriov_mode(xe))); 850 851 err = xe_pm_init_early(xe); 852 if (err) 853 return err; 854 855 err = xe_device_probe(xe); 856 if (err) 857 return err; 858 859 err = xe_pm_init(xe); 860 if (err) 861 goto err_driver_cleanup; 862 863 drm_dbg(&xe->drm, "d3cold: capable=%s\n", 864 str_yes_no(xe->d3cold.capable)); 865 866 return 0; 867 868 err_driver_cleanup: 869 xe_pci_remove(pdev); 870 return err; 871 } 872 873 static void xe_pci_shutdown(struct pci_dev *pdev) 874 { 875 xe_device_shutdown(pdev_to_xe_device(pdev)); 876 } 877 878 #ifdef CONFIG_PM_SLEEP 879 static void d3cold_toggle(struct pci_dev *pdev, enum toggle_d3cold toggle) 880 { 881 struct xe_device *xe = pdev_to_xe_device(pdev); 882 struct pci_dev *root_pdev; 883 884 if (!xe->d3cold.capable) 885 return; 886 887 root_pdev = pcie_find_root_port(pdev); 888 if (!root_pdev) 889 return; 890 891 switch (toggle) { 892 case D3COLD_DISABLE: 893 pci_d3cold_disable(root_pdev); 894 break; 895 case D3COLD_ENABLE: 896 pci_d3cold_enable(root_pdev); 897 break; 898 } 899 } 900 901 static int xe_pci_suspend(struct device *dev) 902 { 903 struct pci_dev *pdev = to_pci_dev(dev); 904 struct xe_device *xe = pdev_to_xe_device(pdev); 905 int err; 906 907 if (xe_survivability_mode_is_enabled(xe)) 908 return -EBUSY; 909 910 err = xe_pm_suspend(xe); 911 if (err) 912 return err; 913 914 /* 915 * Enabling D3Cold is needed for S2Idle/S0ix. 916 * It is save to allow here since xe_pm_suspend has evicted 917 * the local memory and the direct complete optimization is disabled. 918 */ 919 d3cold_toggle(pdev, D3COLD_ENABLE); 920 921 pci_save_state(pdev); 922 pci_disable_device(pdev); 923 924 return 0; 925 } 926 927 static int xe_pci_resume(struct device *dev) 928 { 929 struct pci_dev *pdev = to_pci_dev(dev); 930 int err; 931 932 /* Give back the D3Cold decision to the runtime P M*/ 933 d3cold_toggle(pdev, D3COLD_DISABLE); 934 935 err = pci_set_power_state(pdev, PCI_D0); 936 if (err) 937 return err; 938 939 pci_restore_state(pdev); 940 941 err = pci_enable_device(pdev); 942 if (err) 943 return err; 944 945 pci_set_master(pdev); 946 947 err = xe_pm_resume(pdev_to_xe_device(pdev)); 948 if (err) 949 return err; 950 951 return 0; 952 } 953 954 static int xe_pci_runtime_suspend(struct device *dev) 955 { 956 struct pci_dev *pdev = to_pci_dev(dev); 957 struct xe_device *xe = pdev_to_xe_device(pdev); 958 int err; 959 960 err = xe_pm_runtime_suspend(xe); 961 if (err) 962 return err; 963 964 pci_save_state(pdev); 965 966 if (xe->d3cold.allowed) { 967 d3cold_toggle(pdev, D3COLD_ENABLE); 968 pci_disable_device(pdev); 969 pci_ignore_hotplug(pdev); 970 pci_set_power_state(pdev, PCI_D3cold); 971 } else { 972 d3cold_toggle(pdev, D3COLD_DISABLE); 973 pci_set_power_state(pdev, PCI_D3hot); 974 } 975 976 return 0; 977 } 978 979 static int xe_pci_runtime_resume(struct device *dev) 980 { 981 struct pci_dev *pdev = to_pci_dev(dev); 982 struct xe_device *xe = pdev_to_xe_device(pdev); 983 int err; 984 985 err = pci_set_power_state(pdev, PCI_D0); 986 if (err) 987 return err; 988 989 pci_restore_state(pdev); 990 991 if (xe->d3cold.allowed) { 992 err = pci_enable_device(pdev); 993 if (err) 994 return err; 995 996 pci_set_master(pdev); 997 } 998 999 return xe_pm_runtime_resume(xe); 1000 } 1001 1002 static int xe_pci_runtime_idle(struct device *dev) 1003 { 1004 struct pci_dev *pdev = to_pci_dev(dev); 1005 struct xe_device *xe = pdev_to_xe_device(pdev); 1006 1007 xe_pm_d3cold_allowed_toggle(xe); 1008 1009 return 0; 1010 } 1011 1012 static const struct dev_pm_ops xe_pm_ops = { 1013 SET_SYSTEM_SLEEP_PM_OPS(xe_pci_suspend, xe_pci_resume) 1014 SET_RUNTIME_PM_OPS(xe_pci_runtime_suspend, xe_pci_runtime_resume, xe_pci_runtime_idle) 1015 }; 1016 #endif 1017 1018 static struct pci_driver xe_pci_driver = { 1019 .name = DRIVER_NAME, 1020 .id_table = pciidlist, 1021 .probe = xe_pci_probe, 1022 .remove = xe_pci_remove, 1023 .shutdown = xe_pci_shutdown, 1024 .sriov_configure = xe_pci_sriov_configure, 1025 #ifdef CONFIG_PM_SLEEP 1026 .driver.pm = &xe_pm_ops, 1027 #endif 1028 }; 1029 1030 int xe_register_pci_driver(void) 1031 { 1032 return pci_register_driver(&xe_pci_driver); 1033 } 1034 1035 void xe_unregister_pci_driver(void) 1036 { 1037 pci_unregister_driver(&xe_pci_driver); 1038 } 1039 1040 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 1041 #include "tests/xe_pci.c" 1042 #endif 1043