1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_pci.h" 7 8 #include <kunit/static_stub.h> 9 #include <linux/device/driver.h> 10 #include <linux/module.h> 11 #include <linux/pci.h> 12 #include <linux/pm_runtime.h> 13 14 #include <drm/drm_color_mgmt.h> 15 #include <drm/drm_drv.h> 16 #include <drm/intel/pciids.h> 17 18 #include "display/xe_display.h" 19 #include "regs/xe_gt_regs.h" 20 #include "xe_device.h" 21 #include "xe_drv.h" 22 #include "xe_gt.h" 23 #include "xe_gt_sriov_vf.h" 24 #include "xe_guc.h" 25 #include "xe_macros.h" 26 #include "xe_mmio.h" 27 #include "xe_module.h" 28 #include "xe_pci_sriov.h" 29 #include "xe_pci_types.h" 30 #include "xe_pm.h" 31 #include "xe_sriov.h" 32 #include "xe_step.h" 33 #include "xe_survivability_mode.h" 34 #include "xe_tile.h" 35 36 enum toggle_d3cold { 37 D3COLD_DISABLE, 38 D3COLD_ENABLE, 39 }; 40 41 struct xe_subplatform_desc { 42 enum xe_subplatform subplatform; 43 const char *name; 44 const u16 *pciidlist; 45 }; 46 47 struct xe_device_desc { 48 /* Should only ever be set for platforms without GMD_ID */ 49 const struct xe_ip *pre_gmdid_graphics_ip; 50 /* Should only ever be set for platforms without GMD_ID */ 51 const struct xe_ip *pre_gmdid_media_ip; 52 53 const char *platform_name; 54 const struct xe_subplatform_desc *subplatforms; 55 56 enum xe_platform platform; 57 58 u8 dma_mask_size; 59 u8 max_remote_tiles:2; 60 61 u8 require_force_probe:1; 62 u8 is_dgfx:1; 63 64 u8 has_display:1; 65 u8 has_fan_control:1; 66 u8 has_heci_gscfi:1; 67 u8 has_heci_cscfi:1; 68 u8 has_llc:1; 69 u8 has_mbx_power_limits:1; 70 u8 has_pxp:1; 71 u8 has_sriov:1; 72 u8 needs_scratch:1; 73 u8 skip_guc_pc:1; 74 u8 skip_mtcfg:1; 75 u8 skip_pcode:1; 76 }; 77 78 __diag_push(); 79 __diag_ignore_all("-Woverride-init", "Allow field overrides in table"); 80 81 #define PLATFORM(x) \ 82 .platform = XE_##x, \ 83 .platform_name = #x 84 85 #define NOP(x) x 86 87 static const struct xe_graphics_desc graphics_xelp = { 88 .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0), 89 90 .va_bits = 48, 91 .vm_max_level = 3, 92 }; 93 94 #define XE_HP_FEATURES \ 95 .has_range_tlb_invalidation = true, \ 96 .va_bits = 48, \ 97 .vm_max_level = 3 98 99 static const struct xe_graphics_desc graphics_xehpg = { 100 .hw_engine_mask = 101 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 102 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 103 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 104 105 XE_HP_FEATURES, 106 .vram_flags = XE_VRAM_FLAGS_NEED64K, 107 108 .has_flat_ccs = 1, 109 }; 110 111 static const struct xe_graphics_desc graphics_xehpc = { 112 .hw_engine_mask = 113 BIT(XE_HW_ENGINE_BCS0) | BIT(XE_HW_ENGINE_BCS1) | 114 BIT(XE_HW_ENGINE_BCS2) | BIT(XE_HW_ENGINE_BCS3) | 115 BIT(XE_HW_ENGINE_BCS4) | BIT(XE_HW_ENGINE_BCS5) | 116 BIT(XE_HW_ENGINE_BCS6) | BIT(XE_HW_ENGINE_BCS7) | 117 BIT(XE_HW_ENGINE_BCS8) | 118 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 119 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 120 121 XE_HP_FEATURES, 122 .va_bits = 57, 123 .vm_max_level = 4, 124 .vram_flags = XE_VRAM_FLAGS_NEED64K, 125 126 .has_asid = 1, 127 .has_atomic_enable_pte_bit = 1, 128 .has_usm = 1, 129 }; 130 131 static const struct xe_graphics_desc graphics_xelpg = { 132 .hw_engine_mask = 133 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 134 BIT(XE_HW_ENGINE_CCS0), 135 136 XE_HP_FEATURES, 137 }; 138 139 #define XE2_GFX_FEATURES \ 140 .has_asid = 1, \ 141 .has_atomic_enable_pte_bit = 1, \ 142 .has_flat_ccs = 1, \ 143 .has_indirect_ring_state = 1, \ 144 .has_range_tlb_invalidation = 1, \ 145 .has_usm = 1, \ 146 .has_64bit_timestamp = 1, \ 147 .va_bits = 48, \ 148 .vm_max_level = 4, \ 149 .hw_engine_mask = \ 150 BIT(XE_HW_ENGINE_RCS0) | \ 151 BIT(XE_HW_ENGINE_BCS8) | BIT(XE_HW_ENGINE_BCS0) | \ 152 GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0) 153 154 static const struct xe_graphics_desc graphics_xe2 = { 155 XE2_GFX_FEATURES, 156 }; 157 158 static const struct xe_media_desc media_xem = { 159 .hw_engine_mask = 160 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 161 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0), 162 }; 163 164 static const struct xe_media_desc media_xelpmp = { 165 .hw_engine_mask = 166 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 167 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) | 168 BIT(XE_HW_ENGINE_GSCCS0) 169 }; 170 171 /* Pre-GMDID Graphics IPs */ 172 static const struct xe_ip graphics_ip_xelp = { 1200, "Xe_LP", &graphics_xelp }; 173 static const struct xe_ip graphics_ip_xelpp = { 1210, "Xe_LP+", &graphics_xelp }; 174 static const struct xe_ip graphics_ip_xehpg = { 1255, "Xe_HPG", &graphics_xehpg }; 175 static const struct xe_ip graphics_ip_xehpc = { 1260, "Xe_HPC", &graphics_xehpc }; 176 177 /* GMDID-based Graphics IPs */ 178 static const struct xe_ip graphics_ips[] = { 179 { 1270, "Xe_LPG", &graphics_xelpg }, 180 { 1271, "Xe_LPG", &graphics_xelpg }, 181 { 1274, "Xe_LPG+", &graphics_xelpg }, 182 { 2001, "Xe2_HPG", &graphics_xe2 }, 183 { 2002, "Xe2_HPG", &graphics_xe2 }, 184 { 2004, "Xe2_LPG", &graphics_xe2 }, 185 { 3000, "Xe3_LPG", &graphics_xe2 }, 186 { 3001, "Xe3_LPG", &graphics_xe2 }, 187 }; 188 189 /* Pre-GMDID Media IPs */ 190 static const struct xe_ip media_ip_xem = { 1200, "Xe_M", &media_xem }; 191 static const struct xe_ip media_ip_xehpm = { 1255, "Xe_HPM", &media_xem }; 192 193 /* GMDID-based Media IPs */ 194 static const struct xe_ip media_ips[] = { 195 { 1300, "Xe_LPM+", &media_xelpmp }, 196 { 1301, "Xe2_HPM", &media_xelpmp }, 197 { 2000, "Xe2_LPM", &media_xelpmp }, 198 { 3000, "Xe3_LPM", &media_xelpmp }, 199 }; 200 201 static const struct xe_device_desc tgl_desc = { 202 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 203 .pre_gmdid_media_ip = &media_ip_xem, 204 PLATFORM(TIGERLAKE), 205 .dma_mask_size = 39, 206 .has_display = true, 207 .has_llc = true, 208 .require_force_probe = true, 209 }; 210 211 static const struct xe_device_desc rkl_desc = { 212 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 213 .pre_gmdid_media_ip = &media_ip_xem, 214 PLATFORM(ROCKETLAKE), 215 .dma_mask_size = 39, 216 .has_display = true, 217 .has_llc = true, 218 .require_force_probe = true, 219 }; 220 221 static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 }; 222 223 static const struct xe_device_desc adl_s_desc = { 224 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 225 .pre_gmdid_media_ip = &media_ip_xem, 226 PLATFORM(ALDERLAKE_S), 227 .dma_mask_size = 39, 228 .has_display = true, 229 .has_llc = true, 230 .require_force_probe = true, 231 .subplatforms = (const struct xe_subplatform_desc[]) { 232 { XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids }, 233 {}, 234 }, 235 }; 236 237 static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 }; 238 239 static const struct xe_device_desc adl_p_desc = { 240 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 241 .pre_gmdid_media_ip = &media_ip_xem, 242 PLATFORM(ALDERLAKE_P), 243 .dma_mask_size = 39, 244 .has_display = true, 245 .has_llc = true, 246 .require_force_probe = true, 247 .subplatforms = (const struct xe_subplatform_desc[]) { 248 { XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids }, 249 {}, 250 }, 251 }; 252 253 static const struct xe_device_desc adl_n_desc = { 254 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 255 .pre_gmdid_media_ip = &media_ip_xem, 256 PLATFORM(ALDERLAKE_N), 257 .dma_mask_size = 39, 258 .has_display = true, 259 .has_llc = true, 260 .require_force_probe = true, 261 }; 262 263 #define DGFX_FEATURES \ 264 .is_dgfx = 1 265 266 static const struct xe_device_desc dg1_desc = { 267 .pre_gmdid_graphics_ip = &graphics_ip_xelpp, 268 .pre_gmdid_media_ip = &media_ip_xem, 269 DGFX_FEATURES, 270 PLATFORM(DG1), 271 .dma_mask_size = 39, 272 .has_display = true, 273 .has_heci_gscfi = 1, 274 .require_force_probe = true, 275 }; 276 277 static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 }; 278 static const u16 dg2_g11_ids[] = { INTEL_DG2_G11_IDS(NOP), INTEL_ATS_M75_IDS(NOP), 0 }; 279 static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 }; 280 281 #define DG2_FEATURES \ 282 DGFX_FEATURES, \ 283 PLATFORM(DG2), \ 284 .has_heci_gscfi = 1, \ 285 .subplatforms = (const struct xe_subplatform_desc[]) { \ 286 { XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \ 287 { XE_SUBPLATFORM_DG2_G11, "G11", dg2_g11_ids }, \ 288 { XE_SUBPLATFORM_DG2_G12, "G12", dg2_g12_ids }, \ 289 { } \ 290 } 291 292 static const struct xe_device_desc ats_m_desc = { 293 .pre_gmdid_graphics_ip = &graphics_ip_xehpg, 294 .pre_gmdid_media_ip = &media_ip_xehpm, 295 .dma_mask_size = 46, 296 .require_force_probe = true, 297 298 DG2_FEATURES, 299 .has_display = false, 300 }; 301 302 static const struct xe_device_desc dg2_desc = { 303 .pre_gmdid_graphics_ip = &graphics_ip_xehpg, 304 .pre_gmdid_media_ip = &media_ip_xehpm, 305 .dma_mask_size = 46, 306 .require_force_probe = true, 307 308 DG2_FEATURES, 309 .has_display = true, 310 .has_fan_control = true, 311 .has_mbx_power_limits = false, 312 }; 313 314 static const __maybe_unused struct xe_device_desc pvc_desc = { 315 .pre_gmdid_graphics_ip = &graphics_ip_xehpc, 316 DGFX_FEATURES, 317 PLATFORM(PVC), 318 .dma_mask_size = 52, 319 .has_display = false, 320 .has_heci_gscfi = 1, 321 .max_remote_tiles = 1, 322 .require_force_probe = true, 323 .has_mbx_power_limits = false, 324 }; 325 326 static const struct xe_device_desc mtl_desc = { 327 /* .graphics and .media determined via GMD_ID */ 328 .require_force_probe = true, 329 PLATFORM(METEORLAKE), 330 .dma_mask_size = 46, 331 .has_display = true, 332 .has_pxp = true, 333 }; 334 335 static const struct xe_device_desc lnl_desc = { 336 PLATFORM(LUNARLAKE), 337 .dma_mask_size = 46, 338 .has_display = true, 339 .has_pxp = true, 340 .needs_scratch = true, 341 }; 342 343 static const struct xe_device_desc bmg_desc = { 344 DGFX_FEATURES, 345 PLATFORM(BATTLEMAGE), 346 .dma_mask_size = 46, 347 .has_display = true, 348 .has_fan_control = true, 349 .has_mbx_power_limits = true, 350 .has_heci_cscfi = 1, 351 .needs_scratch = true, 352 }; 353 354 static const struct xe_device_desc ptl_desc = { 355 PLATFORM(PANTHERLAKE), 356 .dma_mask_size = 46, 357 .has_display = true, 358 .has_sriov = true, 359 .require_force_probe = true, 360 .needs_scratch = true, 361 }; 362 363 #undef PLATFORM 364 __diag_pop(); 365 366 /* 367 * Make sure any device matches here are from most specific to most 368 * general. For example, since the Quanta match is based on the subsystem 369 * and subvendor IDs, we need it to come before the more general IVB 370 * PCI ID matches, otherwise we'll use the wrong info struct above. 371 */ 372 static const struct pci_device_id pciidlist[] = { 373 INTEL_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc), 374 INTEL_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc), 375 INTEL_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 376 INTEL_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 377 INTEL_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc), 378 INTEL_RPLU_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 379 INTEL_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 380 INTEL_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 381 INTEL_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc), 382 INTEL_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc), 383 INTEL_ARL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 384 INTEL_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc), 385 INTEL_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 386 INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc), 387 INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc), 388 INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc), 389 { } 390 }; 391 MODULE_DEVICE_TABLE(pci, pciidlist); 392 393 /* is device_id present in comma separated list of ids */ 394 static bool device_id_in_list(u16 device_id, const char *devices, bool negative) 395 { 396 char *s, *p, *tok; 397 bool ret; 398 399 if (!devices || !*devices) 400 return false; 401 402 /* match everything */ 403 if (negative && strcmp(devices, "!*") == 0) 404 return true; 405 if (!negative && strcmp(devices, "*") == 0) 406 return true; 407 408 s = kstrdup(devices, GFP_KERNEL); 409 if (!s) 410 return false; 411 412 for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) { 413 u16 val; 414 415 if (negative && tok[0] == '!') 416 tok++; 417 else if ((negative && tok[0] != '!') || 418 (!negative && tok[0] == '!')) 419 continue; 420 421 if (kstrtou16(tok, 16, &val) == 0 && val == device_id) { 422 ret = true; 423 break; 424 } 425 } 426 427 kfree(s); 428 429 return ret; 430 } 431 432 static bool id_forced(u16 device_id) 433 { 434 return device_id_in_list(device_id, xe_modparam.force_probe, false); 435 } 436 437 static bool id_blocked(u16 device_id) 438 { 439 return device_id_in_list(device_id, xe_modparam.force_probe, true); 440 } 441 442 static const struct xe_subplatform_desc * 443 find_subplatform(const struct xe_device *xe, const struct xe_device_desc *desc) 444 { 445 const struct xe_subplatform_desc *sp; 446 const u16 *id; 447 448 for (sp = desc->subplatforms; sp && sp->subplatform; sp++) 449 for (id = sp->pciidlist; *id; id++) 450 if (*id == xe->info.devid) 451 return sp; 452 453 return NULL; 454 } 455 456 enum xe_gmdid_type { 457 GMDID_GRAPHICS, 458 GMDID_MEDIA 459 }; 460 461 static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid) 462 { 463 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 464 struct xe_reg gmdid_reg = GMD_ID; 465 u32 val; 466 467 KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid); 468 469 if (IS_SRIOV_VF(xe)) { 470 struct xe_gt *gt = xe_root_mmio_gt(xe); 471 472 /* 473 * To get the value of the GMDID register, VFs must obtain it 474 * from the GuC using MMIO communication. 475 * 476 * Note that at this point the xe_gt is not fully uninitialized 477 * and only basic access to MMIO registers is possible. To use 478 * our existing GuC communication functions we must perform at 479 * least basic xe_gt and xe_guc initialization. 480 * 481 * Since to obtain the value of GMDID_MEDIA we need to use the 482 * media GuC, temporarily tweak the gt type. 483 */ 484 xe_gt_assert(gt, gt->info.type == XE_GT_TYPE_UNINITIALIZED); 485 486 if (type == GMDID_MEDIA) { 487 gt->info.id = 1; 488 gt->info.type = XE_GT_TYPE_MEDIA; 489 } else { 490 gt->info.id = 0; 491 gt->info.type = XE_GT_TYPE_MAIN; 492 } 493 494 xe_gt_mmio_init(gt); 495 xe_guc_comm_init_early(>->uc.guc); 496 497 /* Don't bother with GMDID if failed to negotiate the GuC ABI */ 498 val = xe_gt_sriov_vf_bootstrap(gt) ? 0 : xe_gt_sriov_vf_gmdid(gt); 499 500 /* 501 * Only undo xe_gt.info here, the remaining changes made above 502 * will be overwritten as part of the regular initialization. 503 */ 504 gt->info.id = 0; 505 gt->info.type = XE_GT_TYPE_UNINITIALIZED; 506 } else { 507 /* 508 * GMD_ID is a GT register, but at this point in the driver 509 * init we haven't fully initialized the GT yet so we need to 510 * read the register with the tile's MMIO accessor. That means 511 * we need to apply the GSI offset manually since it won't get 512 * automatically added as it would if we were using a GT mmio 513 * accessor. 514 */ 515 if (type == GMDID_MEDIA) 516 gmdid_reg.addr += MEDIA_GT_GSI_OFFSET; 517 518 val = xe_mmio_read32(mmio, gmdid_reg); 519 } 520 521 *ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val); 522 *revid = REG_FIELD_GET(GMD_ID_REVID, val); 523 } 524 525 /* 526 * Read IP version from hardware and select graphics/media IP descriptors 527 * based on the result. 528 */ 529 static void handle_gmdid(struct xe_device *xe, 530 const struct xe_ip **graphics_ip, 531 const struct xe_ip **media_ip, 532 u32 *graphics_revid, 533 u32 *media_revid) 534 { 535 u32 ver; 536 537 *graphics_ip = NULL; 538 *media_ip = NULL; 539 540 read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid); 541 542 for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) { 543 if (ver == graphics_ips[i].verx100) { 544 *graphics_ip = &graphics_ips[i]; 545 546 break; 547 } 548 } 549 550 if (!*graphics_ip) { 551 drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n", 552 ver / 100, ver % 100); 553 } 554 555 read_gmdid(xe, GMDID_MEDIA, &ver, media_revid); 556 /* Media may legitimately be fused off / not present */ 557 if (ver == 0) 558 return; 559 560 for (int i = 0; i < ARRAY_SIZE(media_ips); i++) { 561 if (ver == media_ips[i].verx100) { 562 *media_ip = &media_ips[i]; 563 564 break; 565 } 566 } 567 568 if (!*media_ip) { 569 drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n", 570 ver / 100, ver % 100); 571 } 572 } 573 574 /* 575 * Initialize device info content that only depends on static driver_data 576 * passed to the driver at probe time from PCI ID table. 577 */ 578 static int xe_info_init_early(struct xe_device *xe, 579 const struct xe_device_desc *desc, 580 const struct xe_subplatform_desc *subplatform_desc) 581 { 582 int err; 583 584 xe->info.platform_name = desc->platform_name; 585 xe->info.platform = desc->platform; 586 xe->info.subplatform = subplatform_desc ? 587 subplatform_desc->subplatform : XE_SUBPLATFORM_NONE; 588 589 xe->info.dma_mask_size = desc->dma_mask_size; 590 xe->info.is_dgfx = desc->is_dgfx; 591 xe->info.has_fan_control = desc->has_fan_control; 592 xe->info.has_mbx_power_limits = desc->has_mbx_power_limits; 593 xe->info.has_heci_gscfi = desc->has_heci_gscfi; 594 xe->info.has_heci_cscfi = desc->has_heci_cscfi; 595 xe->info.has_llc = desc->has_llc; 596 xe->info.has_pxp = desc->has_pxp; 597 xe->info.has_sriov = desc->has_sriov; 598 xe->info.skip_guc_pc = desc->skip_guc_pc; 599 xe->info.skip_mtcfg = desc->skip_mtcfg; 600 xe->info.skip_pcode = desc->skip_pcode; 601 xe->info.needs_scratch = desc->needs_scratch; 602 603 xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) && 604 xe_modparam.probe_display && 605 desc->has_display; 606 xe->info.tile_count = 1 + desc->max_remote_tiles; 607 608 err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0); 609 if (err) 610 return err; 611 612 return 0; 613 } 614 615 /* 616 * Initialize device info content that does require knowledge about 617 * graphics / media IP version. 618 * Make sure that GT / tile structures allocated by the driver match the data 619 * present in device info. 620 */ 621 static int xe_info_init(struct xe_device *xe, 622 const struct xe_device_desc *desc) 623 { 624 u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0; 625 const struct xe_ip *graphics_ip; 626 const struct xe_ip *media_ip; 627 const struct xe_graphics_desc *graphics_desc; 628 const struct xe_media_desc *media_desc; 629 struct xe_tile *tile; 630 struct xe_gt *gt; 631 u8 id; 632 633 /* 634 * If this platform supports GMD_ID, we'll detect the proper IP 635 * descriptor to use from hardware registers. 636 * desc->pre_gmdid_graphics_ip will only ever be set at this point for 637 * platforms before GMD_ID. In that case the IP descriptions and 638 * versions are simply derived from that. 639 */ 640 if (desc->pre_gmdid_graphics_ip) { 641 graphics_ip = desc->pre_gmdid_graphics_ip; 642 media_ip = desc->pre_gmdid_media_ip; 643 xe->info.step = xe_step_pre_gmdid_get(xe); 644 } else { 645 xe_assert(xe, !desc->pre_gmdid_media_ip); 646 handle_gmdid(xe, &graphics_ip, &media_ip, 647 &graphics_gmdid_revid, &media_gmdid_revid); 648 xe->info.step = xe_step_gmdid_get(xe, 649 graphics_gmdid_revid, 650 media_gmdid_revid); 651 } 652 653 /* 654 * If we couldn't detect the graphics IP, that's considered a fatal 655 * error and we should abort driver load. Failing to detect media 656 * IP is non-fatal; we'll just proceed without enabling media support. 657 */ 658 if (!graphics_ip) 659 return -ENODEV; 660 661 xe->info.graphics_verx100 = graphics_ip->verx100; 662 xe->info.graphics_name = graphics_ip->name; 663 graphics_desc = graphics_ip->desc; 664 665 if (media_ip) { 666 xe->info.media_verx100 = media_ip->verx100; 667 xe->info.media_name = media_ip->name; 668 media_desc = media_ip->desc; 669 } else { 670 xe->info.media_name = "none"; 671 media_desc = NULL; 672 } 673 674 xe->info.vram_flags = graphics_desc->vram_flags; 675 xe->info.va_bits = graphics_desc->va_bits; 676 xe->info.vm_max_level = graphics_desc->vm_max_level; 677 xe->info.has_asid = graphics_desc->has_asid; 678 xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit; 679 if (xe->info.platform != XE_PVC) 680 xe->info.has_device_atomics_on_smem = 1; 681 682 /* Runtime detection may change this later */ 683 xe->info.has_flat_ccs = graphics_desc->has_flat_ccs; 684 685 xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation; 686 xe->info.has_usm = graphics_desc->has_usm; 687 xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp; 688 689 for_each_remote_tile(tile, xe, id) { 690 int err; 691 692 err = xe_tile_init_early(tile, xe, id); 693 if (err) 694 return err; 695 } 696 697 /* 698 * All platforms have at least one primary GT. Any platform with media 699 * version 13 or higher has an additional dedicated media GT. And 700 * depending on the graphics IP there may be additional "remote tiles." 701 * All of these together determine the overall GT count. 702 */ 703 for_each_tile(tile, xe, id) { 704 gt = tile->primary_gt; 705 gt->info.id = xe->info.gt_count++; 706 gt->info.type = XE_GT_TYPE_MAIN; 707 gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state; 708 gt->info.engine_mask = graphics_desc->hw_engine_mask; 709 710 if (MEDIA_VER(xe) < 13 && media_desc) 711 gt->info.engine_mask |= media_desc->hw_engine_mask; 712 713 if (MEDIA_VER(xe) < 13 || !media_desc) 714 continue; 715 716 /* 717 * Allocate and setup media GT for platforms with standalone 718 * media. 719 */ 720 tile->media_gt = xe_gt_alloc(tile); 721 if (IS_ERR(tile->media_gt)) 722 return PTR_ERR(tile->media_gt); 723 724 gt = tile->media_gt; 725 gt->info.type = XE_GT_TYPE_MEDIA; 726 gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state; 727 gt->info.engine_mask = media_desc->hw_engine_mask; 728 729 /* 730 * FIXME: At the moment multi-tile and standalone media are 731 * mutually exclusive on current platforms. We'll need to 732 * come up with a better way to number GTs if we ever wind 733 * up with platforms that support both together. 734 */ 735 drm_WARN_ON(&xe->drm, id != 0); 736 gt->info.id = xe->info.gt_count++; 737 } 738 739 return 0; 740 } 741 742 static void xe_pci_remove(struct pci_dev *pdev) 743 { 744 struct xe_device *xe = pdev_to_xe_device(pdev); 745 746 if (IS_SRIOV_PF(xe)) 747 xe_pci_sriov_configure(pdev, 0); 748 749 if (xe_survivability_mode_is_enabled(xe)) 750 return; 751 752 xe_device_remove(xe); 753 xe_pm_fini(xe); 754 } 755 756 /* 757 * Probe the PCI device, initialize various parts of the driver. 758 * 759 * Fault injection is used to test the error paths of some initialization 760 * functions called either directly from xe_pci_probe() or indirectly for 761 * example through xe_device_probe(). Those functions use the kernel fault 762 * injection capabilities infrastructure, see 763 * Documentation/fault-injection/fault-injection.rst for details. The macro 764 * ALLOW_ERROR_INJECTION() is used to conditionally skip function execution 765 * at runtime and use a provided return value. The first requirement for 766 * error injectable functions is proper handling of the error code by the 767 * caller for recovery, which is always the case here. The second 768 * requirement is that no state is changed before the first error return. 769 * It is not strictly fulfilled for all initialization functions using the 770 * ALLOW_ERROR_INJECTION() macro but this is acceptable because for those 771 * error cases at probe time, the error code is simply propagated up by the 772 * caller. Therefore there is no consequence on those specific callers when 773 * function error injection skips the whole function. 774 */ 775 static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 776 { 777 const struct xe_device_desc *desc = (const void *)ent->driver_data; 778 const struct xe_subplatform_desc *subplatform_desc; 779 struct xe_device *xe; 780 int err; 781 782 if (desc->require_force_probe && !id_forced(pdev->device)) { 783 dev_info(&pdev->dev, 784 "Your graphics device %04x is not officially supported\n" 785 "by xe driver in this kernel version. To force Xe probe,\n" 786 "use xe.force_probe='%04x' and i915.force_probe='!%04x'\n" 787 "module parameters or CONFIG_DRM_XE_FORCE_PROBE='%04x' and\n" 788 "CONFIG_DRM_I915_FORCE_PROBE='!%04x' configuration options.\n", 789 pdev->device, pdev->device, pdev->device, 790 pdev->device, pdev->device); 791 return -ENODEV; 792 } 793 794 if (id_blocked(pdev->device)) { 795 dev_info(&pdev->dev, "Probe blocked for device [%04x:%04x].\n", 796 pdev->vendor, pdev->device); 797 return -ENODEV; 798 } 799 800 if (xe_display_driver_probe_defer(pdev)) 801 return -EPROBE_DEFER; 802 803 err = pcim_enable_device(pdev); 804 if (err) 805 return err; 806 807 xe = xe_device_create(pdev, ent); 808 if (IS_ERR(xe)) 809 return PTR_ERR(xe); 810 811 pci_set_drvdata(pdev, &xe->drm); 812 813 xe_pm_assert_unbounded_bridge(xe); 814 subplatform_desc = find_subplatform(xe, desc); 815 816 pci_set_master(pdev); 817 818 err = xe_info_init_early(xe, desc, subplatform_desc); 819 if (err) 820 return err; 821 822 err = xe_device_probe_early(xe); 823 /* 824 * In Boot Survivability mode, no drm card is exposed and driver 825 * is loaded with bare minimum to allow for firmware to be 826 * flashed through mei. Return success, if survivability mode 827 * is enabled due to pcode failure or configfs being set 828 */ 829 if (xe_survivability_mode_is_enabled(xe)) 830 return 0; 831 832 if (err) 833 return err; 834 835 err = xe_info_init(xe, desc); 836 if (err) 837 return err; 838 839 err = xe_display_probe(xe); 840 if (err) 841 return err; 842 843 drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d cscfi:%d", 844 desc->platform_name, 845 subplatform_desc ? subplatform_desc->name : "", 846 xe->info.devid, xe->info.revid, 847 xe->info.is_dgfx, 848 xe->info.graphics_name, 849 xe->info.graphics_verx100 / 100, 850 xe->info.graphics_verx100 % 100, 851 xe->info.media_name, 852 xe->info.media_verx100 / 100, 853 xe->info.media_verx100 % 100, 854 str_yes_no(xe->info.probe_display), 855 xe->info.dma_mask_size, xe->info.tile_count, 856 xe->info.has_heci_gscfi, xe->info.has_heci_cscfi); 857 858 drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, B:%s)\n", 859 xe_step_name(xe->info.step.graphics), 860 xe_step_name(xe->info.step.media), 861 xe_step_name(xe->info.step.basedie)); 862 863 drm_dbg(&xe->drm, "SR-IOV support: %s (mode: %s)\n", 864 str_yes_no(xe_device_has_sriov(xe)), 865 xe_sriov_mode_to_string(xe_device_sriov_mode(xe))); 866 867 err = xe_pm_init_early(xe); 868 if (err) 869 return err; 870 871 err = xe_device_probe(xe); 872 if (err) 873 return err; 874 875 err = xe_pm_init(xe); 876 if (err) 877 goto err_driver_cleanup; 878 879 drm_dbg(&xe->drm, "d3cold: capable=%s\n", 880 str_yes_no(xe->d3cold.capable)); 881 882 return 0; 883 884 err_driver_cleanup: 885 xe_pci_remove(pdev); 886 return err; 887 } 888 889 static void xe_pci_shutdown(struct pci_dev *pdev) 890 { 891 xe_device_shutdown(pdev_to_xe_device(pdev)); 892 } 893 894 #ifdef CONFIG_PM_SLEEP 895 static void d3cold_toggle(struct pci_dev *pdev, enum toggle_d3cold toggle) 896 { 897 struct xe_device *xe = pdev_to_xe_device(pdev); 898 struct pci_dev *root_pdev; 899 900 if (!xe->d3cold.capable) 901 return; 902 903 root_pdev = pcie_find_root_port(pdev); 904 if (!root_pdev) 905 return; 906 907 switch (toggle) { 908 case D3COLD_DISABLE: 909 pci_d3cold_disable(root_pdev); 910 break; 911 case D3COLD_ENABLE: 912 pci_d3cold_enable(root_pdev); 913 break; 914 } 915 } 916 917 static int xe_pci_suspend(struct device *dev) 918 { 919 struct pci_dev *pdev = to_pci_dev(dev); 920 struct xe_device *xe = pdev_to_xe_device(pdev); 921 int err; 922 923 if (xe_survivability_mode_is_enabled(xe)) 924 return -EBUSY; 925 926 err = xe_pm_suspend(xe); 927 if (err) 928 return err; 929 930 /* 931 * Enabling D3Cold is needed for S2Idle/S0ix. 932 * It is save to allow here since xe_pm_suspend has evicted 933 * the local memory and the direct complete optimization is disabled. 934 */ 935 d3cold_toggle(pdev, D3COLD_ENABLE); 936 937 pci_save_state(pdev); 938 pci_disable_device(pdev); 939 pci_set_power_state(pdev, PCI_D3cold); 940 941 return 0; 942 } 943 944 static int xe_pci_resume(struct device *dev) 945 { 946 struct pci_dev *pdev = to_pci_dev(dev); 947 int err; 948 949 /* Give back the D3Cold decision to the runtime P M*/ 950 d3cold_toggle(pdev, D3COLD_DISABLE); 951 952 err = pci_set_power_state(pdev, PCI_D0); 953 if (err) 954 return err; 955 956 pci_restore_state(pdev); 957 958 err = pci_enable_device(pdev); 959 if (err) 960 return err; 961 962 pci_set_master(pdev); 963 964 err = xe_pm_resume(pdev_to_xe_device(pdev)); 965 if (err) 966 return err; 967 968 return 0; 969 } 970 971 static int xe_pci_runtime_suspend(struct device *dev) 972 { 973 struct pci_dev *pdev = to_pci_dev(dev); 974 struct xe_device *xe = pdev_to_xe_device(pdev); 975 int err; 976 977 err = xe_pm_runtime_suspend(xe); 978 if (err) 979 return err; 980 981 pci_save_state(pdev); 982 983 if (xe->d3cold.allowed) { 984 d3cold_toggle(pdev, D3COLD_ENABLE); 985 pci_disable_device(pdev); 986 pci_ignore_hotplug(pdev); 987 pci_set_power_state(pdev, PCI_D3cold); 988 } else { 989 d3cold_toggle(pdev, D3COLD_DISABLE); 990 pci_set_power_state(pdev, PCI_D3hot); 991 } 992 993 return 0; 994 } 995 996 static int xe_pci_runtime_resume(struct device *dev) 997 { 998 struct pci_dev *pdev = to_pci_dev(dev); 999 struct xe_device *xe = pdev_to_xe_device(pdev); 1000 int err; 1001 1002 err = pci_set_power_state(pdev, PCI_D0); 1003 if (err) 1004 return err; 1005 1006 pci_restore_state(pdev); 1007 1008 if (xe->d3cold.allowed) { 1009 err = pci_enable_device(pdev); 1010 if (err) 1011 return err; 1012 1013 pci_set_master(pdev); 1014 } 1015 1016 return xe_pm_runtime_resume(xe); 1017 } 1018 1019 static int xe_pci_runtime_idle(struct device *dev) 1020 { 1021 struct pci_dev *pdev = to_pci_dev(dev); 1022 struct xe_device *xe = pdev_to_xe_device(pdev); 1023 1024 xe_pm_d3cold_allowed_toggle(xe); 1025 1026 return 0; 1027 } 1028 1029 static const struct dev_pm_ops xe_pm_ops = { 1030 SET_SYSTEM_SLEEP_PM_OPS(xe_pci_suspend, xe_pci_resume) 1031 SET_RUNTIME_PM_OPS(xe_pci_runtime_suspend, xe_pci_runtime_resume, xe_pci_runtime_idle) 1032 }; 1033 #endif 1034 1035 static struct pci_driver xe_pci_driver = { 1036 .name = DRIVER_NAME, 1037 .id_table = pciidlist, 1038 .probe = xe_pci_probe, 1039 .remove = xe_pci_remove, 1040 .shutdown = xe_pci_shutdown, 1041 .sriov_configure = xe_pci_sriov_configure, 1042 #ifdef CONFIG_PM_SLEEP 1043 .driver.pm = &xe_pm_ops, 1044 #endif 1045 }; 1046 1047 int xe_register_pci_driver(void) 1048 { 1049 return pci_register_driver(&xe_pci_driver); 1050 } 1051 1052 void xe_unregister_pci_driver(void) 1053 { 1054 pci_unregister_driver(&xe_pci_driver); 1055 } 1056 1057 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 1058 #include "tests/xe_pci.c" 1059 #endif 1060