1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_pci.h" 7 8 #include <kunit/static_stub.h> 9 #include <linux/device/driver.h> 10 #include <linux/module.h> 11 #include <linux/pci.h> 12 #include <linux/pm_runtime.h> 13 14 #include <drm/drm_color_mgmt.h> 15 #include <drm/drm_drv.h> 16 #include <drm/intel/pciids.h> 17 18 #include "display/xe_display.h" 19 #include "regs/xe_gt_regs.h" 20 #include "xe_device.h" 21 #include "xe_drv.h" 22 #include "xe_gt.h" 23 #include "xe_gt_sriov_vf.h" 24 #include "xe_guc.h" 25 #include "xe_macros.h" 26 #include "xe_mmio.h" 27 #include "xe_module.h" 28 #include "xe_pci_sriov.h" 29 #include "xe_pci_types.h" 30 #include "xe_pm.h" 31 #include "xe_sriov.h" 32 #include "xe_step.h" 33 #include "xe_survivability_mode.h" 34 #include "xe_tile.h" 35 36 enum toggle_d3cold { 37 D3COLD_DISABLE, 38 D3COLD_ENABLE, 39 }; 40 41 struct xe_subplatform_desc { 42 enum xe_subplatform subplatform; 43 const char *name; 44 const u16 *pciidlist; 45 }; 46 47 struct xe_device_desc { 48 /* Should only ever be set for platforms without GMD_ID */ 49 const struct xe_ip *pre_gmdid_graphics_ip; 50 /* Should only ever be set for platforms without GMD_ID */ 51 const struct xe_ip *pre_gmdid_media_ip; 52 53 const char *platform_name; 54 const struct xe_subplatform_desc *subplatforms; 55 56 enum xe_platform platform; 57 58 u8 dma_mask_size; 59 u8 max_remote_tiles:2; 60 61 u8 require_force_probe:1; 62 u8 is_dgfx:1; 63 64 u8 has_display:1; 65 u8 has_fan_control:1; 66 u8 has_gsc_nvm:1; 67 u8 has_heci_gscfi:1; 68 u8 has_heci_cscfi:1; 69 u8 has_llc:1; 70 u8 has_mbx_power_limits:1; 71 u8 has_pxp:1; 72 u8 has_sriov:1; 73 u8 needs_scratch:1; 74 u8 skip_guc_pc:1; 75 u8 skip_mtcfg:1; 76 u8 skip_pcode:1; 77 }; 78 79 __diag_push(); 80 __diag_ignore_all("-Woverride-init", "Allow field overrides in table"); 81 82 #define PLATFORM(x) \ 83 .platform = XE_##x, \ 84 .platform_name = #x 85 86 #define NOP(x) x 87 88 static const struct xe_graphics_desc graphics_xelp = { 89 .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0), 90 91 .va_bits = 48, 92 .vm_max_level = 3, 93 }; 94 95 #define XE_HP_FEATURES \ 96 .has_range_tlb_invalidation = true, \ 97 .va_bits = 48, \ 98 .vm_max_level = 3 99 100 static const struct xe_graphics_desc graphics_xehpg = { 101 .hw_engine_mask = 102 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 103 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 104 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 105 106 XE_HP_FEATURES, 107 .vram_flags = XE_VRAM_FLAGS_NEED64K, 108 109 .has_flat_ccs = 1, 110 }; 111 112 static const struct xe_graphics_desc graphics_xehpc = { 113 .hw_engine_mask = 114 BIT(XE_HW_ENGINE_BCS0) | BIT(XE_HW_ENGINE_BCS1) | 115 BIT(XE_HW_ENGINE_BCS2) | BIT(XE_HW_ENGINE_BCS3) | 116 BIT(XE_HW_ENGINE_BCS4) | BIT(XE_HW_ENGINE_BCS5) | 117 BIT(XE_HW_ENGINE_BCS6) | BIT(XE_HW_ENGINE_BCS7) | 118 BIT(XE_HW_ENGINE_BCS8) | 119 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 120 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 121 122 XE_HP_FEATURES, 123 .va_bits = 57, 124 .vm_max_level = 4, 125 .vram_flags = XE_VRAM_FLAGS_NEED64K, 126 127 .has_asid = 1, 128 .has_atomic_enable_pte_bit = 1, 129 .has_usm = 1, 130 }; 131 132 static const struct xe_graphics_desc graphics_xelpg = { 133 .hw_engine_mask = 134 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 135 BIT(XE_HW_ENGINE_CCS0), 136 137 XE_HP_FEATURES, 138 }; 139 140 #define XE2_GFX_FEATURES \ 141 .has_asid = 1, \ 142 .has_atomic_enable_pte_bit = 1, \ 143 .has_flat_ccs = 1, \ 144 .has_indirect_ring_state = 1, \ 145 .has_range_tlb_invalidation = 1, \ 146 .has_usm = 1, \ 147 .has_64bit_timestamp = 1, \ 148 .va_bits = 48, \ 149 .vm_max_level = 4, \ 150 .hw_engine_mask = \ 151 BIT(XE_HW_ENGINE_RCS0) | \ 152 BIT(XE_HW_ENGINE_BCS8) | BIT(XE_HW_ENGINE_BCS0) | \ 153 GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0) 154 155 static const struct xe_graphics_desc graphics_xe2 = { 156 XE2_GFX_FEATURES, 157 }; 158 159 static const struct xe_media_desc media_xem = { 160 .hw_engine_mask = 161 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 162 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0), 163 }; 164 165 static const struct xe_media_desc media_xelpmp = { 166 .hw_engine_mask = 167 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 168 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) | 169 BIT(XE_HW_ENGINE_GSCCS0) 170 }; 171 172 /* Pre-GMDID Graphics IPs */ 173 static const struct xe_ip graphics_ip_xelp = { 1200, "Xe_LP", &graphics_xelp }; 174 static const struct xe_ip graphics_ip_xelpp = { 1210, "Xe_LP+", &graphics_xelp }; 175 static const struct xe_ip graphics_ip_xehpg = { 1255, "Xe_HPG", &graphics_xehpg }; 176 static const struct xe_ip graphics_ip_xehpc = { 1260, "Xe_HPC", &graphics_xehpc }; 177 178 /* GMDID-based Graphics IPs */ 179 static const struct xe_ip graphics_ips[] = { 180 { 1270, "Xe_LPG", &graphics_xelpg }, 181 { 1271, "Xe_LPG", &graphics_xelpg }, 182 { 1274, "Xe_LPG+", &graphics_xelpg }, 183 { 2001, "Xe2_HPG", &graphics_xe2 }, 184 { 2002, "Xe2_HPG", &graphics_xe2 }, 185 { 2004, "Xe2_LPG", &graphics_xe2 }, 186 { 3000, "Xe3_LPG", &graphics_xe2 }, 187 { 3001, "Xe3_LPG", &graphics_xe2 }, 188 { 3003, "Xe3_LPG", &graphics_xe2 }, 189 }; 190 191 /* Pre-GMDID Media IPs */ 192 static const struct xe_ip media_ip_xem = { 1200, "Xe_M", &media_xem }; 193 static const struct xe_ip media_ip_xehpm = { 1255, "Xe_HPM", &media_xem }; 194 195 /* GMDID-based Media IPs */ 196 static const struct xe_ip media_ips[] = { 197 { 1300, "Xe_LPM+", &media_xelpmp }, 198 { 1301, "Xe2_HPM", &media_xelpmp }, 199 { 2000, "Xe2_LPM", &media_xelpmp }, 200 { 3000, "Xe3_LPM", &media_xelpmp }, 201 { 3002, "Xe3_LPM", &media_xelpmp }, 202 }; 203 204 static const struct xe_device_desc tgl_desc = { 205 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 206 .pre_gmdid_media_ip = &media_ip_xem, 207 PLATFORM(TIGERLAKE), 208 .dma_mask_size = 39, 209 .has_display = true, 210 .has_llc = true, 211 .require_force_probe = true, 212 }; 213 214 static const struct xe_device_desc rkl_desc = { 215 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 216 .pre_gmdid_media_ip = &media_ip_xem, 217 PLATFORM(ROCKETLAKE), 218 .dma_mask_size = 39, 219 .has_display = true, 220 .has_llc = true, 221 .require_force_probe = true, 222 }; 223 224 static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 }; 225 226 static const struct xe_device_desc adl_s_desc = { 227 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 228 .pre_gmdid_media_ip = &media_ip_xem, 229 PLATFORM(ALDERLAKE_S), 230 .dma_mask_size = 39, 231 .has_display = true, 232 .has_llc = true, 233 .require_force_probe = true, 234 .subplatforms = (const struct xe_subplatform_desc[]) { 235 { XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids }, 236 {}, 237 }, 238 }; 239 240 static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 }; 241 242 static const struct xe_device_desc adl_p_desc = { 243 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 244 .pre_gmdid_media_ip = &media_ip_xem, 245 PLATFORM(ALDERLAKE_P), 246 .dma_mask_size = 39, 247 .has_display = true, 248 .has_llc = true, 249 .require_force_probe = true, 250 .subplatforms = (const struct xe_subplatform_desc[]) { 251 { XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids }, 252 {}, 253 }, 254 }; 255 256 static const struct xe_device_desc adl_n_desc = { 257 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 258 .pre_gmdid_media_ip = &media_ip_xem, 259 PLATFORM(ALDERLAKE_N), 260 .dma_mask_size = 39, 261 .has_display = true, 262 .has_llc = true, 263 .require_force_probe = true, 264 }; 265 266 #define DGFX_FEATURES \ 267 .is_dgfx = 1 268 269 static const struct xe_device_desc dg1_desc = { 270 .pre_gmdid_graphics_ip = &graphics_ip_xelpp, 271 .pre_gmdid_media_ip = &media_ip_xem, 272 DGFX_FEATURES, 273 PLATFORM(DG1), 274 .dma_mask_size = 39, 275 .has_display = true, 276 .has_gsc_nvm = 1, 277 .has_heci_gscfi = 1, 278 .require_force_probe = true, 279 }; 280 281 static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 }; 282 static const u16 dg2_g11_ids[] = { INTEL_DG2_G11_IDS(NOP), INTEL_ATS_M75_IDS(NOP), 0 }; 283 static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 }; 284 285 #define DG2_FEATURES \ 286 DGFX_FEATURES, \ 287 PLATFORM(DG2), \ 288 .has_gsc_nvm = 1, \ 289 .has_heci_gscfi = 1, \ 290 .subplatforms = (const struct xe_subplatform_desc[]) { \ 291 { XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \ 292 { XE_SUBPLATFORM_DG2_G11, "G11", dg2_g11_ids }, \ 293 { XE_SUBPLATFORM_DG2_G12, "G12", dg2_g12_ids }, \ 294 { } \ 295 } 296 297 static const struct xe_device_desc ats_m_desc = { 298 .pre_gmdid_graphics_ip = &graphics_ip_xehpg, 299 .pre_gmdid_media_ip = &media_ip_xehpm, 300 .dma_mask_size = 46, 301 .require_force_probe = true, 302 303 DG2_FEATURES, 304 .has_display = false, 305 }; 306 307 static const struct xe_device_desc dg2_desc = { 308 .pre_gmdid_graphics_ip = &graphics_ip_xehpg, 309 .pre_gmdid_media_ip = &media_ip_xehpm, 310 .dma_mask_size = 46, 311 .require_force_probe = true, 312 313 DG2_FEATURES, 314 .has_display = true, 315 .has_fan_control = true, 316 .has_mbx_power_limits = false, 317 }; 318 319 static const __maybe_unused struct xe_device_desc pvc_desc = { 320 .pre_gmdid_graphics_ip = &graphics_ip_xehpc, 321 DGFX_FEATURES, 322 PLATFORM(PVC), 323 .dma_mask_size = 52, 324 .has_display = false, 325 .has_gsc_nvm = 1, 326 .has_heci_gscfi = 1, 327 .max_remote_tiles = 1, 328 .require_force_probe = true, 329 .has_mbx_power_limits = false, 330 }; 331 332 static const struct xe_device_desc mtl_desc = { 333 /* .graphics and .media determined via GMD_ID */ 334 .require_force_probe = true, 335 PLATFORM(METEORLAKE), 336 .dma_mask_size = 46, 337 .has_display = true, 338 .has_pxp = true, 339 }; 340 341 static const struct xe_device_desc lnl_desc = { 342 PLATFORM(LUNARLAKE), 343 .dma_mask_size = 46, 344 .has_display = true, 345 .has_pxp = true, 346 .needs_scratch = true, 347 }; 348 349 static const struct xe_device_desc bmg_desc = { 350 DGFX_FEATURES, 351 PLATFORM(BATTLEMAGE), 352 .dma_mask_size = 46, 353 .has_display = true, 354 .has_fan_control = true, 355 .has_mbx_power_limits = true, 356 .has_gsc_nvm = 1, 357 .has_heci_cscfi = 1, 358 .needs_scratch = true, 359 }; 360 361 static const struct xe_device_desc ptl_desc = { 362 PLATFORM(PANTHERLAKE), 363 .dma_mask_size = 46, 364 .has_display = true, 365 .has_sriov = true, 366 .require_force_probe = true, 367 .needs_scratch = true, 368 }; 369 370 #undef PLATFORM 371 __diag_pop(); 372 373 /* 374 * Make sure any device matches here are from most specific to most 375 * general. For example, since the Quanta match is based on the subsystem 376 * and subvendor IDs, we need it to come before the more general IVB 377 * PCI ID matches, otherwise we'll use the wrong info struct above. 378 */ 379 static const struct pci_device_id pciidlist[] = { 380 INTEL_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc), 381 INTEL_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc), 382 INTEL_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 383 INTEL_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 384 INTEL_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc), 385 INTEL_RPLU_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 386 INTEL_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 387 INTEL_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 388 INTEL_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc), 389 INTEL_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc), 390 INTEL_ARL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 391 INTEL_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc), 392 INTEL_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 393 INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc), 394 INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc), 395 INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc), 396 { } 397 }; 398 MODULE_DEVICE_TABLE(pci, pciidlist); 399 400 /* is device_id present in comma separated list of ids */ 401 static bool device_id_in_list(u16 device_id, const char *devices, bool negative) 402 { 403 char *s, *p, *tok; 404 bool ret; 405 406 if (!devices || !*devices) 407 return false; 408 409 /* match everything */ 410 if (negative && strcmp(devices, "!*") == 0) 411 return true; 412 if (!negative && strcmp(devices, "*") == 0) 413 return true; 414 415 s = kstrdup(devices, GFP_KERNEL); 416 if (!s) 417 return false; 418 419 for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) { 420 u16 val; 421 422 if (negative && tok[0] == '!') 423 tok++; 424 else if ((negative && tok[0] != '!') || 425 (!negative && tok[0] == '!')) 426 continue; 427 428 if (kstrtou16(tok, 16, &val) == 0 && val == device_id) { 429 ret = true; 430 break; 431 } 432 } 433 434 kfree(s); 435 436 return ret; 437 } 438 439 static bool id_forced(u16 device_id) 440 { 441 return device_id_in_list(device_id, xe_modparam.force_probe, false); 442 } 443 444 static bool id_blocked(u16 device_id) 445 { 446 return device_id_in_list(device_id, xe_modparam.force_probe, true); 447 } 448 449 static const struct xe_subplatform_desc * 450 find_subplatform(const struct xe_device *xe, const struct xe_device_desc *desc) 451 { 452 const struct xe_subplatform_desc *sp; 453 const u16 *id; 454 455 for (sp = desc->subplatforms; sp && sp->subplatform; sp++) 456 for (id = sp->pciidlist; *id; id++) 457 if (*id == xe->info.devid) 458 return sp; 459 460 return NULL; 461 } 462 463 enum xe_gmdid_type { 464 GMDID_GRAPHICS, 465 GMDID_MEDIA 466 }; 467 468 static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid) 469 { 470 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 471 struct xe_reg gmdid_reg = GMD_ID; 472 u32 val; 473 474 KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid); 475 476 if (IS_SRIOV_VF(xe)) { 477 struct xe_gt *gt = xe_root_mmio_gt(xe); 478 479 /* 480 * To get the value of the GMDID register, VFs must obtain it 481 * from the GuC using MMIO communication. 482 * 483 * Note that at this point the xe_gt is not fully uninitialized 484 * and only basic access to MMIO registers is possible. To use 485 * our existing GuC communication functions we must perform at 486 * least basic xe_gt and xe_guc initialization. 487 * 488 * Since to obtain the value of GMDID_MEDIA we need to use the 489 * media GuC, temporarily tweak the gt type. 490 */ 491 xe_gt_assert(gt, gt->info.type == XE_GT_TYPE_UNINITIALIZED); 492 493 if (type == GMDID_MEDIA) { 494 gt->info.id = 1; 495 gt->info.type = XE_GT_TYPE_MEDIA; 496 } else { 497 gt->info.id = 0; 498 gt->info.type = XE_GT_TYPE_MAIN; 499 } 500 501 xe_gt_mmio_init(gt); 502 xe_guc_comm_init_early(>->uc.guc); 503 504 /* Don't bother with GMDID if failed to negotiate the GuC ABI */ 505 val = xe_gt_sriov_vf_bootstrap(gt) ? 0 : xe_gt_sriov_vf_gmdid(gt); 506 507 /* 508 * Only undo xe_gt.info here, the remaining changes made above 509 * will be overwritten as part of the regular initialization. 510 */ 511 gt->info.id = 0; 512 gt->info.type = XE_GT_TYPE_UNINITIALIZED; 513 } else { 514 /* 515 * GMD_ID is a GT register, but at this point in the driver 516 * init we haven't fully initialized the GT yet so we need to 517 * read the register with the tile's MMIO accessor. That means 518 * we need to apply the GSI offset manually since it won't get 519 * automatically added as it would if we were using a GT mmio 520 * accessor. 521 */ 522 if (type == GMDID_MEDIA) 523 gmdid_reg.addr += MEDIA_GT_GSI_OFFSET; 524 525 val = xe_mmio_read32(mmio, gmdid_reg); 526 } 527 528 *ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val); 529 *revid = REG_FIELD_GET(GMD_ID_REVID, val); 530 } 531 532 /* 533 * Read IP version from hardware and select graphics/media IP descriptors 534 * based on the result. 535 */ 536 static void handle_gmdid(struct xe_device *xe, 537 const struct xe_ip **graphics_ip, 538 const struct xe_ip **media_ip, 539 u32 *graphics_revid, 540 u32 *media_revid) 541 { 542 u32 ver; 543 544 *graphics_ip = NULL; 545 *media_ip = NULL; 546 547 read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid); 548 549 for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) { 550 if (ver == graphics_ips[i].verx100) { 551 *graphics_ip = &graphics_ips[i]; 552 553 break; 554 } 555 } 556 557 if (!*graphics_ip) { 558 drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n", 559 ver / 100, ver % 100); 560 } 561 562 read_gmdid(xe, GMDID_MEDIA, &ver, media_revid); 563 /* Media may legitimately be fused off / not present */ 564 if (ver == 0) 565 return; 566 567 for (int i = 0; i < ARRAY_SIZE(media_ips); i++) { 568 if (ver == media_ips[i].verx100) { 569 *media_ip = &media_ips[i]; 570 571 break; 572 } 573 } 574 575 if (!*media_ip) { 576 drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n", 577 ver / 100, ver % 100); 578 } 579 } 580 581 /* 582 * Initialize device info content that only depends on static driver_data 583 * passed to the driver at probe time from PCI ID table. 584 */ 585 static int xe_info_init_early(struct xe_device *xe, 586 const struct xe_device_desc *desc, 587 const struct xe_subplatform_desc *subplatform_desc) 588 { 589 int err; 590 591 xe->info.platform_name = desc->platform_name; 592 xe->info.platform = desc->platform; 593 xe->info.subplatform = subplatform_desc ? 594 subplatform_desc->subplatform : XE_SUBPLATFORM_NONE; 595 596 xe->info.dma_mask_size = desc->dma_mask_size; 597 xe->info.is_dgfx = desc->is_dgfx; 598 xe->info.has_fan_control = desc->has_fan_control; 599 xe->info.has_mbx_power_limits = desc->has_mbx_power_limits; 600 xe->info.has_gsc_nvm = desc->has_gsc_nvm; 601 xe->info.has_heci_gscfi = desc->has_heci_gscfi; 602 xe->info.has_heci_cscfi = desc->has_heci_cscfi; 603 xe->info.has_llc = desc->has_llc; 604 xe->info.has_pxp = desc->has_pxp; 605 xe->info.has_sriov = desc->has_sriov; 606 xe->info.skip_guc_pc = desc->skip_guc_pc; 607 xe->info.skip_mtcfg = desc->skip_mtcfg; 608 xe->info.skip_pcode = desc->skip_pcode; 609 xe->info.needs_scratch = desc->needs_scratch; 610 611 xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) && 612 xe_modparam.probe_display && 613 desc->has_display; 614 xe->info.tile_count = 1 + desc->max_remote_tiles; 615 616 err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0); 617 if (err) 618 return err; 619 620 return 0; 621 } 622 623 /* 624 * Initialize device info content that does require knowledge about 625 * graphics / media IP version. 626 * Make sure that GT / tile structures allocated by the driver match the data 627 * present in device info. 628 */ 629 static int xe_info_init(struct xe_device *xe, 630 const struct xe_device_desc *desc) 631 { 632 u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0; 633 const struct xe_ip *graphics_ip; 634 const struct xe_ip *media_ip; 635 const struct xe_graphics_desc *graphics_desc; 636 const struct xe_media_desc *media_desc; 637 struct xe_tile *tile; 638 struct xe_gt *gt; 639 u8 id; 640 641 /* 642 * If this platform supports GMD_ID, we'll detect the proper IP 643 * descriptor to use from hardware registers. 644 * desc->pre_gmdid_graphics_ip will only ever be set at this point for 645 * platforms before GMD_ID. In that case the IP descriptions and 646 * versions are simply derived from that. 647 */ 648 if (desc->pre_gmdid_graphics_ip) { 649 graphics_ip = desc->pre_gmdid_graphics_ip; 650 media_ip = desc->pre_gmdid_media_ip; 651 xe->info.step = xe_step_pre_gmdid_get(xe); 652 } else { 653 xe_assert(xe, !desc->pre_gmdid_media_ip); 654 handle_gmdid(xe, &graphics_ip, &media_ip, 655 &graphics_gmdid_revid, &media_gmdid_revid); 656 xe->info.step = xe_step_gmdid_get(xe, 657 graphics_gmdid_revid, 658 media_gmdid_revid); 659 } 660 661 /* 662 * If we couldn't detect the graphics IP, that's considered a fatal 663 * error and we should abort driver load. Failing to detect media 664 * IP is non-fatal; we'll just proceed without enabling media support. 665 */ 666 if (!graphics_ip) 667 return -ENODEV; 668 669 xe->info.graphics_verx100 = graphics_ip->verx100; 670 xe->info.graphics_name = graphics_ip->name; 671 graphics_desc = graphics_ip->desc; 672 673 if (media_ip) { 674 xe->info.media_verx100 = media_ip->verx100; 675 xe->info.media_name = media_ip->name; 676 media_desc = media_ip->desc; 677 } else { 678 xe->info.media_name = "none"; 679 media_desc = NULL; 680 } 681 682 xe->info.vram_flags = graphics_desc->vram_flags; 683 xe->info.va_bits = graphics_desc->va_bits; 684 xe->info.vm_max_level = graphics_desc->vm_max_level; 685 xe->info.has_asid = graphics_desc->has_asid; 686 xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit; 687 if (xe->info.platform != XE_PVC) 688 xe->info.has_device_atomics_on_smem = 1; 689 690 /* Runtime detection may change this later */ 691 xe->info.has_flat_ccs = graphics_desc->has_flat_ccs; 692 693 xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation; 694 xe->info.has_usm = graphics_desc->has_usm; 695 xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp; 696 697 for_each_remote_tile(tile, xe, id) { 698 int err; 699 700 err = xe_tile_init_early(tile, xe, id); 701 if (err) 702 return err; 703 } 704 705 /* 706 * All platforms have at least one primary GT. Any platform with media 707 * version 13 or higher has an additional dedicated media GT. And 708 * depending on the graphics IP there may be additional "remote tiles." 709 * All of these together determine the overall GT count. 710 */ 711 for_each_tile(tile, xe, id) { 712 gt = tile->primary_gt; 713 gt->info.id = xe->info.gt_count++; 714 gt->info.type = XE_GT_TYPE_MAIN; 715 gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state; 716 gt->info.engine_mask = graphics_desc->hw_engine_mask; 717 718 if (MEDIA_VER(xe) < 13 && media_desc) 719 gt->info.engine_mask |= media_desc->hw_engine_mask; 720 721 if (MEDIA_VER(xe) < 13 || !media_desc) 722 continue; 723 724 /* 725 * Allocate and setup media GT for platforms with standalone 726 * media. 727 */ 728 tile->media_gt = xe_gt_alloc(tile); 729 if (IS_ERR(tile->media_gt)) 730 return PTR_ERR(tile->media_gt); 731 732 gt = tile->media_gt; 733 gt->info.type = XE_GT_TYPE_MEDIA; 734 gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state; 735 gt->info.engine_mask = media_desc->hw_engine_mask; 736 737 /* 738 * FIXME: At the moment multi-tile and standalone media are 739 * mutually exclusive on current platforms. We'll need to 740 * come up with a better way to number GTs if we ever wind 741 * up with platforms that support both together. 742 */ 743 drm_WARN_ON(&xe->drm, id != 0); 744 gt->info.id = xe->info.gt_count++; 745 } 746 747 return 0; 748 } 749 750 static void xe_pci_remove(struct pci_dev *pdev) 751 { 752 struct xe_device *xe = pdev_to_xe_device(pdev); 753 754 if (IS_SRIOV_PF(xe)) 755 xe_pci_sriov_configure(pdev, 0); 756 757 if (xe_survivability_mode_is_enabled(xe)) 758 return; 759 760 xe_device_remove(xe); 761 xe_pm_fini(xe); 762 } 763 764 /* 765 * Probe the PCI device, initialize various parts of the driver. 766 * 767 * Fault injection is used to test the error paths of some initialization 768 * functions called either directly from xe_pci_probe() or indirectly for 769 * example through xe_device_probe(). Those functions use the kernel fault 770 * injection capabilities infrastructure, see 771 * Documentation/fault-injection/fault-injection.rst for details. The macro 772 * ALLOW_ERROR_INJECTION() is used to conditionally skip function execution 773 * at runtime and use a provided return value. The first requirement for 774 * error injectable functions is proper handling of the error code by the 775 * caller for recovery, which is always the case here. The second 776 * requirement is that no state is changed before the first error return. 777 * It is not strictly fulfilled for all initialization functions using the 778 * ALLOW_ERROR_INJECTION() macro but this is acceptable because for those 779 * error cases at probe time, the error code is simply propagated up by the 780 * caller. Therefore there is no consequence on those specific callers when 781 * function error injection skips the whole function. 782 */ 783 static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 784 { 785 const struct xe_device_desc *desc = (const void *)ent->driver_data; 786 const struct xe_subplatform_desc *subplatform_desc; 787 struct xe_device *xe; 788 int err; 789 790 if (desc->require_force_probe && !id_forced(pdev->device)) { 791 dev_info(&pdev->dev, 792 "Your graphics device %04x is not officially supported\n" 793 "by xe driver in this kernel version. To force Xe probe,\n" 794 "use xe.force_probe='%04x' and i915.force_probe='!%04x'\n" 795 "module parameters or CONFIG_DRM_XE_FORCE_PROBE='%04x' and\n" 796 "CONFIG_DRM_I915_FORCE_PROBE='!%04x' configuration options.\n", 797 pdev->device, pdev->device, pdev->device, 798 pdev->device, pdev->device); 799 return -ENODEV; 800 } 801 802 if (id_blocked(pdev->device)) { 803 dev_info(&pdev->dev, "Probe blocked for device [%04x:%04x].\n", 804 pdev->vendor, pdev->device); 805 return -ENODEV; 806 } 807 808 if (xe_display_driver_probe_defer(pdev)) 809 return -EPROBE_DEFER; 810 811 err = pcim_enable_device(pdev); 812 if (err) 813 return err; 814 815 xe = xe_device_create(pdev, ent); 816 if (IS_ERR(xe)) 817 return PTR_ERR(xe); 818 819 pci_set_drvdata(pdev, &xe->drm); 820 821 xe_pm_assert_unbounded_bridge(xe); 822 subplatform_desc = find_subplatform(xe, desc); 823 824 pci_set_master(pdev); 825 826 err = xe_info_init_early(xe, desc, subplatform_desc); 827 if (err) 828 return err; 829 830 err = xe_device_probe_early(xe); 831 /* 832 * In Boot Survivability mode, no drm card is exposed and driver 833 * is loaded with bare minimum to allow for firmware to be 834 * flashed through mei. Return success, if survivability mode 835 * is enabled due to pcode failure or configfs being set 836 */ 837 if (xe_survivability_mode_is_enabled(xe)) 838 return 0; 839 840 if (err) 841 return err; 842 843 err = xe_info_init(xe, desc); 844 if (err) 845 return err; 846 847 err = xe_display_probe(xe); 848 if (err) 849 return err; 850 851 drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d cscfi:%d", 852 desc->platform_name, 853 subplatform_desc ? subplatform_desc->name : "", 854 xe->info.devid, xe->info.revid, 855 xe->info.is_dgfx, 856 xe->info.graphics_name, 857 xe->info.graphics_verx100 / 100, 858 xe->info.graphics_verx100 % 100, 859 xe->info.media_name, 860 xe->info.media_verx100 / 100, 861 xe->info.media_verx100 % 100, 862 str_yes_no(xe->info.probe_display), 863 xe->info.dma_mask_size, xe->info.tile_count, 864 xe->info.has_heci_gscfi, xe->info.has_heci_cscfi); 865 866 drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, B:%s)\n", 867 xe_step_name(xe->info.step.graphics), 868 xe_step_name(xe->info.step.media), 869 xe_step_name(xe->info.step.basedie)); 870 871 drm_dbg(&xe->drm, "SR-IOV support: %s (mode: %s)\n", 872 str_yes_no(xe_device_has_sriov(xe)), 873 xe_sriov_mode_to_string(xe_device_sriov_mode(xe))); 874 875 err = xe_pm_init_early(xe); 876 if (err) 877 return err; 878 879 err = xe_device_probe(xe); 880 if (err) 881 return err; 882 883 err = xe_pm_init(xe); 884 if (err) 885 goto err_driver_cleanup; 886 887 drm_dbg(&xe->drm, "d3cold: capable=%s\n", 888 str_yes_no(xe->d3cold.capable)); 889 890 return 0; 891 892 err_driver_cleanup: 893 xe_pci_remove(pdev); 894 return err; 895 } 896 897 static void xe_pci_shutdown(struct pci_dev *pdev) 898 { 899 xe_device_shutdown(pdev_to_xe_device(pdev)); 900 } 901 902 #ifdef CONFIG_PM_SLEEP 903 static void d3cold_toggle(struct pci_dev *pdev, enum toggle_d3cold toggle) 904 { 905 struct xe_device *xe = pdev_to_xe_device(pdev); 906 struct pci_dev *root_pdev; 907 908 if (!xe->d3cold.capable) 909 return; 910 911 root_pdev = pcie_find_root_port(pdev); 912 if (!root_pdev) 913 return; 914 915 switch (toggle) { 916 case D3COLD_DISABLE: 917 pci_d3cold_disable(root_pdev); 918 break; 919 case D3COLD_ENABLE: 920 pci_d3cold_enable(root_pdev); 921 break; 922 } 923 } 924 925 static int xe_pci_suspend(struct device *dev) 926 { 927 struct pci_dev *pdev = to_pci_dev(dev); 928 struct xe_device *xe = pdev_to_xe_device(pdev); 929 int err; 930 931 if (xe_survivability_mode_is_enabled(xe)) 932 return -EBUSY; 933 934 err = xe_pm_suspend(xe); 935 if (err) 936 return err; 937 938 /* 939 * Enabling D3Cold is needed for S2Idle/S0ix. 940 * It is save to allow here since xe_pm_suspend has evicted 941 * the local memory and the direct complete optimization is disabled. 942 */ 943 d3cold_toggle(pdev, D3COLD_ENABLE); 944 945 pci_save_state(pdev); 946 pci_disable_device(pdev); 947 pci_set_power_state(pdev, PCI_D3cold); 948 949 return 0; 950 } 951 952 static int xe_pci_resume(struct device *dev) 953 { 954 struct pci_dev *pdev = to_pci_dev(dev); 955 int err; 956 957 /* Give back the D3Cold decision to the runtime P M*/ 958 d3cold_toggle(pdev, D3COLD_DISABLE); 959 960 err = pci_set_power_state(pdev, PCI_D0); 961 if (err) 962 return err; 963 964 pci_restore_state(pdev); 965 966 err = pci_enable_device(pdev); 967 if (err) 968 return err; 969 970 pci_set_master(pdev); 971 972 err = xe_pm_resume(pdev_to_xe_device(pdev)); 973 if (err) 974 return err; 975 976 return 0; 977 } 978 979 static int xe_pci_runtime_suspend(struct device *dev) 980 { 981 struct pci_dev *pdev = to_pci_dev(dev); 982 struct xe_device *xe = pdev_to_xe_device(pdev); 983 int err; 984 985 err = xe_pm_runtime_suspend(xe); 986 if (err) 987 return err; 988 989 pci_save_state(pdev); 990 991 if (xe->d3cold.allowed) { 992 d3cold_toggle(pdev, D3COLD_ENABLE); 993 pci_disable_device(pdev); 994 pci_ignore_hotplug(pdev); 995 pci_set_power_state(pdev, PCI_D3cold); 996 } else { 997 d3cold_toggle(pdev, D3COLD_DISABLE); 998 pci_set_power_state(pdev, PCI_D3hot); 999 } 1000 1001 return 0; 1002 } 1003 1004 static int xe_pci_runtime_resume(struct device *dev) 1005 { 1006 struct pci_dev *pdev = to_pci_dev(dev); 1007 struct xe_device *xe = pdev_to_xe_device(pdev); 1008 int err; 1009 1010 err = pci_set_power_state(pdev, PCI_D0); 1011 if (err) 1012 return err; 1013 1014 pci_restore_state(pdev); 1015 1016 if (xe->d3cold.allowed) { 1017 err = pci_enable_device(pdev); 1018 if (err) 1019 return err; 1020 1021 pci_set_master(pdev); 1022 } 1023 1024 return xe_pm_runtime_resume(xe); 1025 } 1026 1027 static int xe_pci_runtime_idle(struct device *dev) 1028 { 1029 struct pci_dev *pdev = to_pci_dev(dev); 1030 struct xe_device *xe = pdev_to_xe_device(pdev); 1031 1032 xe_pm_d3cold_allowed_toggle(xe); 1033 1034 return 0; 1035 } 1036 1037 static const struct dev_pm_ops xe_pm_ops = { 1038 SET_SYSTEM_SLEEP_PM_OPS(xe_pci_suspend, xe_pci_resume) 1039 SET_RUNTIME_PM_OPS(xe_pci_runtime_suspend, xe_pci_runtime_resume, xe_pci_runtime_idle) 1040 }; 1041 #endif 1042 1043 static struct pci_driver xe_pci_driver = { 1044 .name = DRIVER_NAME, 1045 .id_table = pciidlist, 1046 .probe = xe_pci_probe, 1047 .remove = xe_pci_remove, 1048 .shutdown = xe_pci_shutdown, 1049 .sriov_configure = xe_pci_sriov_configure, 1050 #ifdef CONFIG_PM_SLEEP 1051 .driver.pm = &xe_pm_ops, 1052 #endif 1053 }; 1054 1055 int xe_register_pci_driver(void) 1056 { 1057 return pci_register_driver(&xe_pci_driver); 1058 } 1059 1060 void xe_unregister_pci_driver(void) 1061 { 1062 pci_unregister_driver(&xe_pci_driver); 1063 } 1064 1065 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 1066 #include "tests/xe_pci.c" 1067 #endif 1068