1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_pci.h" 7 8 #include <kunit/static_stub.h> 9 #include <linux/device/driver.h> 10 #include <linux/module.h> 11 #include <linux/pci.h> 12 #include <linux/pm_runtime.h> 13 14 #include <drm/drm_color_mgmt.h> 15 #include <drm/drm_drv.h> 16 #include <drm/intel/pciids.h> 17 18 #include "display/xe_display.h" 19 #include "regs/xe_gt_regs.h" 20 #include "xe_device.h" 21 #include "xe_drv.h" 22 #include "xe_gt.h" 23 #include "xe_gt_sriov_vf.h" 24 #include "xe_guc.h" 25 #include "xe_macros.h" 26 #include "xe_mmio.h" 27 #include "xe_module.h" 28 #include "xe_pci_sriov.h" 29 #include "xe_pci_types.h" 30 #include "xe_pm.h" 31 #include "xe_sriov.h" 32 #include "xe_step.h" 33 #include "xe_survivability_mode.h" 34 #include "xe_tile.h" 35 36 enum toggle_d3cold { 37 D3COLD_DISABLE, 38 D3COLD_ENABLE, 39 }; 40 41 struct xe_subplatform_desc { 42 enum xe_subplatform subplatform; 43 const char *name; 44 const u16 *pciidlist; 45 }; 46 47 struct xe_device_desc { 48 /* Should only ever be set for platforms without GMD_ID */ 49 const struct xe_ip *pre_gmdid_graphics_ip; 50 /* Should only ever be set for platforms without GMD_ID */ 51 const struct xe_ip *pre_gmdid_media_ip; 52 53 const char *platform_name; 54 const struct xe_subplatform_desc *subplatforms; 55 56 enum xe_platform platform; 57 58 u8 dma_mask_size; 59 u8 max_remote_tiles:2; 60 61 u8 require_force_probe:1; 62 u8 is_dgfx:1; 63 64 u8 has_display:1; 65 u8 has_fan_control:1; 66 u8 has_heci_gscfi:1; 67 u8 has_heci_cscfi:1; 68 u8 has_llc:1; 69 u8 has_mbx_power_limits:1; 70 u8 has_pxp:1; 71 u8 has_sriov:1; 72 u8 needs_scratch:1; 73 u8 skip_guc_pc:1; 74 u8 skip_mtcfg:1; 75 u8 skip_pcode:1; 76 }; 77 78 __diag_push(); 79 __diag_ignore_all("-Woverride-init", "Allow field overrides in table"); 80 81 #define PLATFORM(x) \ 82 .platform = XE_##x, \ 83 .platform_name = #x 84 85 #define NOP(x) x 86 87 static const struct xe_graphics_desc graphics_xelp = { 88 .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0), 89 90 .va_bits = 48, 91 .vm_max_level = 3, 92 }; 93 94 #define XE_HP_FEATURES \ 95 .has_range_tlb_invalidation = true, \ 96 .va_bits = 48, \ 97 .vm_max_level = 3 98 99 static const struct xe_graphics_desc graphics_xehpg = { 100 .hw_engine_mask = 101 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 102 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 103 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 104 105 XE_HP_FEATURES, 106 .vram_flags = XE_VRAM_FLAGS_NEED64K, 107 108 .has_flat_ccs = 1, 109 }; 110 111 static const struct xe_graphics_desc graphics_xehpc = { 112 .hw_engine_mask = 113 BIT(XE_HW_ENGINE_BCS0) | BIT(XE_HW_ENGINE_BCS1) | 114 BIT(XE_HW_ENGINE_BCS2) | BIT(XE_HW_ENGINE_BCS3) | 115 BIT(XE_HW_ENGINE_BCS4) | BIT(XE_HW_ENGINE_BCS5) | 116 BIT(XE_HW_ENGINE_BCS6) | BIT(XE_HW_ENGINE_BCS7) | 117 BIT(XE_HW_ENGINE_BCS8) | 118 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 119 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 120 121 XE_HP_FEATURES, 122 .va_bits = 57, 123 .vm_max_level = 4, 124 .vram_flags = XE_VRAM_FLAGS_NEED64K, 125 126 .has_asid = 1, 127 .has_atomic_enable_pte_bit = 1, 128 .has_usm = 1, 129 }; 130 131 static const struct xe_graphics_desc graphics_xelpg = { 132 .hw_engine_mask = 133 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 134 BIT(XE_HW_ENGINE_CCS0), 135 136 XE_HP_FEATURES, 137 }; 138 139 #define XE2_GFX_FEATURES \ 140 .has_asid = 1, \ 141 .has_atomic_enable_pte_bit = 1, \ 142 .has_flat_ccs = 1, \ 143 .has_indirect_ring_state = 1, \ 144 .has_range_tlb_invalidation = 1, \ 145 .has_usm = 1, \ 146 .has_64bit_timestamp = 1, \ 147 .va_bits = 48, \ 148 .vm_max_level = 4, \ 149 .hw_engine_mask = \ 150 BIT(XE_HW_ENGINE_RCS0) | \ 151 BIT(XE_HW_ENGINE_BCS8) | BIT(XE_HW_ENGINE_BCS0) | \ 152 GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0) 153 154 static const struct xe_graphics_desc graphics_xe2 = { 155 XE2_GFX_FEATURES, 156 }; 157 158 static const struct xe_media_desc media_xem = { 159 .hw_engine_mask = 160 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 161 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0), 162 }; 163 164 static const struct xe_media_desc media_xelpmp = { 165 .hw_engine_mask = 166 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 167 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) | 168 BIT(XE_HW_ENGINE_GSCCS0) 169 }; 170 171 /* Pre-GMDID Graphics IPs */ 172 static const struct xe_ip graphics_ip_xelp = { 1200, "Xe_LP", &graphics_xelp }; 173 static const struct xe_ip graphics_ip_xelpp = { 1210, "Xe_LP+", &graphics_xelp }; 174 static const struct xe_ip graphics_ip_xehpg = { 1255, "Xe_HPG", &graphics_xehpg }; 175 static const struct xe_ip graphics_ip_xehpc = { 1260, "Xe_HPC", &graphics_xehpc }; 176 177 /* GMDID-based Graphics IPs */ 178 static const struct xe_ip graphics_ips[] = { 179 { 1270, "Xe_LPG", &graphics_xelpg }, 180 { 1271, "Xe_LPG", &graphics_xelpg }, 181 { 1274, "Xe_LPG+", &graphics_xelpg }, 182 { 2001, "Xe2_HPG", &graphics_xe2 }, 183 { 2004, "Xe2_LPG", &graphics_xe2 }, 184 { 3000, "Xe3_LPG", &graphics_xe2 }, 185 { 3001, "Xe3_LPG", &graphics_xe2 }, 186 }; 187 188 /* Pre-GMDID Media IPs */ 189 static const struct xe_ip media_ip_xem = { 1200, "Xe_M", &media_xem }; 190 static const struct xe_ip media_ip_xehpm = { 1255, "Xe_HPM", &media_xem }; 191 192 /* GMDID-based Media IPs */ 193 static const struct xe_ip media_ips[] = { 194 { 1300, "Xe_LPM+", &media_xelpmp }, 195 { 1301, "Xe2_HPM", &media_xelpmp }, 196 { 2000, "Xe2_LPM", &media_xelpmp }, 197 { 3000, "Xe3_LPM", &media_xelpmp }, 198 }; 199 200 static const struct xe_device_desc tgl_desc = { 201 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 202 .pre_gmdid_media_ip = &media_ip_xem, 203 PLATFORM(TIGERLAKE), 204 .dma_mask_size = 39, 205 .has_display = true, 206 .has_llc = true, 207 .require_force_probe = true, 208 }; 209 210 static const struct xe_device_desc rkl_desc = { 211 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 212 .pre_gmdid_media_ip = &media_ip_xem, 213 PLATFORM(ROCKETLAKE), 214 .dma_mask_size = 39, 215 .has_display = true, 216 .has_llc = true, 217 .require_force_probe = true, 218 }; 219 220 static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 }; 221 222 static const struct xe_device_desc adl_s_desc = { 223 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 224 .pre_gmdid_media_ip = &media_ip_xem, 225 PLATFORM(ALDERLAKE_S), 226 .dma_mask_size = 39, 227 .has_display = true, 228 .has_llc = true, 229 .require_force_probe = true, 230 .subplatforms = (const struct xe_subplatform_desc[]) { 231 { XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids }, 232 {}, 233 }, 234 }; 235 236 static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 }; 237 238 static const struct xe_device_desc adl_p_desc = { 239 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 240 .pre_gmdid_media_ip = &media_ip_xem, 241 PLATFORM(ALDERLAKE_P), 242 .dma_mask_size = 39, 243 .has_display = true, 244 .has_llc = true, 245 .require_force_probe = true, 246 .subplatforms = (const struct xe_subplatform_desc[]) { 247 { XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids }, 248 {}, 249 }, 250 }; 251 252 static const struct xe_device_desc adl_n_desc = { 253 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 254 .pre_gmdid_media_ip = &media_ip_xem, 255 PLATFORM(ALDERLAKE_N), 256 .dma_mask_size = 39, 257 .has_display = true, 258 .has_llc = true, 259 .require_force_probe = true, 260 }; 261 262 #define DGFX_FEATURES \ 263 .is_dgfx = 1 264 265 static const struct xe_device_desc dg1_desc = { 266 .pre_gmdid_graphics_ip = &graphics_ip_xelpp, 267 .pre_gmdid_media_ip = &media_ip_xem, 268 DGFX_FEATURES, 269 PLATFORM(DG1), 270 .dma_mask_size = 39, 271 .has_display = true, 272 .has_heci_gscfi = 1, 273 .require_force_probe = true, 274 }; 275 276 static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 }; 277 static const u16 dg2_g11_ids[] = { INTEL_DG2_G11_IDS(NOP), INTEL_ATS_M75_IDS(NOP), 0 }; 278 static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 }; 279 280 #define DG2_FEATURES \ 281 DGFX_FEATURES, \ 282 PLATFORM(DG2), \ 283 .has_heci_gscfi = 1, \ 284 .subplatforms = (const struct xe_subplatform_desc[]) { \ 285 { XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \ 286 { XE_SUBPLATFORM_DG2_G11, "G11", dg2_g11_ids }, \ 287 { XE_SUBPLATFORM_DG2_G12, "G12", dg2_g12_ids }, \ 288 { } \ 289 } 290 291 static const struct xe_device_desc ats_m_desc = { 292 .pre_gmdid_graphics_ip = &graphics_ip_xehpg, 293 .pre_gmdid_media_ip = &media_ip_xehpm, 294 .dma_mask_size = 46, 295 .require_force_probe = true, 296 297 DG2_FEATURES, 298 .has_display = false, 299 }; 300 301 static const struct xe_device_desc dg2_desc = { 302 .pre_gmdid_graphics_ip = &graphics_ip_xehpg, 303 .pre_gmdid_media_ip = &media_ip_xehpm, 304 .dma_mask_size = 46, 305 .require_force_probe = true, 306 307 DG2_FEATURES, 308 .has_display = true, 309 .has_fan_control = true, 310 .has_mbx_power_limits = false, 311 }; 312 313 static const __maybe_unused struct xe_device_desc pvc_desc = { 314 .pre_gmdid_graphics_ip = &graphics_ip_xehpc, 315 DGFX_FEATURES, 316 PLATFORM(PVC), 317 .dma_mask_size = 52, 318 .has_display = false, 319 .has_heci_gscfi = 1, 320 .max_remote_tiles = 1, 321 .require_force_probe = true, 322 .has_mbx_power_limits = false, 323 }; 324 325 static const struct xe_device_desc mtl_desc = { 326 /* .graphics and .media determined via GMD_ID */ 327 .require_force_probe = true, 328 PLATFORM(METEORLAKE), 329 .dma_mask_size = 46, 330 .has_display = true, 331 .has_pxp = true, 332 }; 333 334 static const struct xe_device_desc lnl_desc = { 335 PLATFORM(LUNARLAKE), 336 .dma_mask_size = 46, 337 .has_display = true, 338 .has_pxp = true, 339 .needs_scratch = true, 340 }; 341 342 static const struct xe_device_desc bmg_desc = { 343 DGFX_FEATURES, 344 PLATFORM(BATTLEMAGE), 345 .dma_mask_size = 46, 346 .has_display = true, 347 .has_fan_control = true, 348 .has_mbx_power_limits = true, 349 .has_heci_cscfi = 1, 350 .needs_scratch = true, 351 }; 352 353 static const struct xe_device_desc ptl_desc = { 354 PLATFORM(PANTHERLAKE), 355 .dma_mask_size = 46, 356 .has_display = true, 357 .has_sriov = true, 358 .require_force_probe = true, 359 .needs_scratch = true, 360 }; 361 362 #undef PLATFORM 363 __diag_pop(); 364 365 /* 366 * Make sure any device matches here are from most specific to most 367 * general. For example, since the Quanta match is based on the subsystem 368 * and subvendor IDs, we need it to come before the more general IVB 369 * PCI ID matches, otherwise we'll use the wrong info struct above. 370 */ 371 static const struct pci_device_id pciidlist[] = { 372 INTEL_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc), 373 INTEL_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc), 374 INTEL_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 375 INTEL_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 376 INTEL_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc), 377 INTEL_RPLU_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 378 INTEL_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 379 INTEL_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 380 INTEL_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc), 381 INTEL_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc), 382 INTEL_ARL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 383 INTEL_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc), 384 INTEL_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 385 INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc), 386 INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc), 387 INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc), 388 { } 389 }; 390 MODULE_DEVICE_TABLE(pci, pciidlist); 391 392 /* is device_id present in comma separated list of ids */ 393 static bool device_id_in_list(u16 device_id, const char *devices, bool negative) 394 { 395 char *s, *p, *tok; 396 bool ret; 397 398 if (!devices || !*devices) 399 return false; 400 401 /* match everything */ 402 if (negative && strcmp(devices, "!*") == 0) 403 return true; 404 if (!negative && strcmp(devices, "*") == 0) 405 return true; 406 407 s = kstrdup(devices, GFP_KERNEL); 408 if (!s) 409 return false; 410 411 for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) { 412 u16 val; 413 414 if (negative && tok[0] == '!') 415 tok++; 416 else if ((negative && tok[0] != '!') || 417 (!negative && tok[0] == '!')) 418 continue; 419 420 if (kstrtou16(tok, 16, &val) == 0 && val == device_id) { 421 ret = true; 422 break; 423 } 424 } 425 426 kfree(s); 427 428 return ret; 429 } 430 431 static bool id_forced(u16 device_id) 432 { 433 return device_id_in_list(device_id, xe_modparam.force_probe, false); 434 } 435 436 static bool id_blocked(u16 device_id) 437 { 438 return device_id_in_list(device_id, xe_modparam.force_probe, true); 439 } 440 441 static const struct xe_subplatform_desc * 442 find_subplatform(const struct xe_device *xe, const struct xe_device_desc *desc) 443 { 444 const struct xe_subplatform_desc *sp; 445 const u16 *id; 446 447 for (sp = desc->subplatforms; sp && sp->subplatform; sp++) 448 for (id = sp->pciidlist; *id; id++) 449 if (*id == xe->info.devid) 450 return sp; 451 452 return NULL; 453 } 454 455 enum xe_gmdid_type { 456 GMDID_GRAPHICS, 457 GMDID_MEDIA 458 }; 459 460 static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid) 461 { 462 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 463 struct xe_reg gmdid_reg = GMD_ID; 464 u32 val; 465 466 KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid); 467 468 if (IS_SRIOV_VF(xe)) { 469 struct xe_gt *gt = xe_root_mmio_gt(xe); 470 471 /* 472 * To get the value of the GMDID register, VFs must obtain it 473 * from the GuC using MMIO communication. 474 * 475 * Note that at this point the xe_gt is not fully uninitialized 476 * and only basic access to MMIO registers is possible. To use 477 * our existing GuC communication functions we must perform at 478 * least basic xe_gt and xe_guc initialization. 479 * 480 * Since to obtain the value of GMDID_MEDIA we need to use the 481 * media GuC, temporarily tweak the gt type. 482 */ 483 xe_gt_assert(gt, gt->info.type == XE_GT_TYPE_UNINITIALIZED); 484 485 if (type == GMDID_MEDIA) { 486 gt->info.id = 1; 487 gt->info.type = XE_GT_TYPE_MEDIA; 488 } else { 489 gt->info.id = 0; 490 gt->info.type = XE_GT_TYPE_MAIN; 491 } 492 493 xe_gt_mmio_init(gt); 494 xe_guc_comm_init_early(>->uc.guc); 495 496 /* Don't bother with GMDID if failed to negotiate the GuC ABI */ 497 val = xe_gt_sriov_vf_bootstrap(gt) ? 0 : xe_gt_sriov_vf_gmdid(gt); 498 499 /* 500 * Only undo xe_gt.info here, the remaining changes made above 501 * will be overwritten as part of the regular initialization. 502 */ 503 gt->info.id = 0; 504 gt->info.type = XE_GT_TYPE_UNINITIALIZED; 505 } else { 506 /* 507 * GMD_ID is a GT register, but at this point in the driver 508 * init we haven't fully initialized the GT yet so we need to 509 * read the register with the tile's MMIO accessor. That means 510 * we need to apply the GSI offset manually since it won't get 511 * automatically added as it would if we were using a GT mmio 512 * accessor. 513 */ 514 if (type == GMDID_MEDIA) 515 gmdid_reg.addr += MEDIA_GT_GSI_OFFSET; 516 517 val = xe_mmio_read32(mmio, gmdid_reg); 518 } 519 520 *ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val); 521 *revid = REG_FIELD_GET(GMD_ID_REVID, val); 522 } 523 524 /* 525 * Read IP version from hardware and select graphics/media IP descriptors 526 * based on the result. 527 */ 528 static void handle_gmdid(struct xe_device *xe, 529 const struct xe_ip **graphics_ip, 530 const struct xe_ip **media_ip, 531 u32 *graphics_revid, 532 u32 *media_revid) 533 { 534 u32 ver; 535 536 *graphics_ip = NULL; 537 *media_ip = NULL; 538 539 read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid); 540 541 for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) { 542 if (ver == graphics_ips[i].verx100) { 543 *graphics_ip = &graphics_ips[i]; 544 545 break; 546 } 547 } 548 549 if (!*graphics_ip) { 550 drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n", 551 ver / 100, ver % 100); 552 } 553 554 read_gmdid(xe, GMDID_MEDIA, &ver, media_revid); 555 /* Media may legitimately be fused off / not present */ 556 if (ver == 0) 557 return; 558 559 for (int i = 0; i < ARRAY_SIZE(media_ips); i++) { 560 if (ver == media_ips[i].verx100) { 561 *media_ip = &media_ips[i]; 562 563 break; 564 } 565 } 566 567 if (!*media_ip) { 568 drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n", 569 ver / 100, ver % 100); 570 } 571 } 572 573 /* 574 * Initialize device info content that only depends on static driver_data 575 * passed to the driver at probe time from PCI ID table. 576 */ 577 static int xe_info_init_early(struct xe_device *xe, 578 const struct xe_device_desc *desc, 579 const struct xe_subplatform_desc *subplatform_desc) 580 { 581 int err; 582 583 xe->info.platform_name = desc->platform_name; 584 xe->info.platform = desc->platform; 585 xe->info.subplatform = subplatform_desc ? 586 subplatform_desc->subplatform : XE_SUBPLATFORM_NONE; 587 588 xe->info.dma_mask_size = desc->dma_mask_size; 589 xe->info.is_dgfx = desc->is_dgfx; 590 xe->info.has_fan_control = desc->has_fan_control; 591 xe->info.has_mbx_power_limits = desc->has_mbx_power_limits; 592 xe->info.has_heci_gscfi = desc->has_heci_gscfi; 593 xe->info.has_heci_cscfi = desc->has_heci_cscfi; 594 xe->info.has_llc = desc->has_llc; 595 xe->info.has_pxp = desc->has_pxp; 596 xe->info.has_sriov = desc->has_sriov; 597 xe->info.skip_guc_pc = desc->skip_guc_pc; 598 xe->info.skip_mtcfg = desc->skip_mtcfg; 599 xe->info.skip_pcode = desc->skip_pcode; 600 xe->info.needs_scratch = desc->needs_scratch; 601 602 xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) && 603 xe_modparam.probe_display && 604 desc->has_display; 605 xe->info.tile_count = 1 + desc->max_remote_tiles; 606 607 err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0); 608 if (err) 609 return err; 610 611 return 0; 612 } 613 614 /* 615 * Initialize device info content that does require knowledge about 616 * graphics / media IP version. 617 * Make sure that GT / tile structures allocated by the driver match the data 618 * present in device info. 619 */ 620 static int xe_info_init(struct xe_device *xe, 621 const struct xe_device_desc *desc) 622 { 623 u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0; 624 const struct xe_ip *graphics_ip; 625 const struct xe_ip *media_ip; 626 const struct xe_graphics_desc *graphics_desc; 627 const struct xe_media_desc *media_desc; 628 struct xe_tile *tile; 629 struct xe_gt *gt; 630 u8 id; 631 632 /* 633 * If this platform supports GMD_ID, we'll detect the proper IP 634 * descriptor to use from hardware registers. 635 * desc->pre_gmdid_graphics_ip will only ever be set at this point for 636 * platforms before GMD_ID. In that case the IP descriptions and 637 * versions are simply derived from that. 638 */ 639 if (desc->pre_gmdid_graphics_ip) { 640 graphics_ip = desc->pre_gmdid_graphics_ip; 641 media_ip = desc->pre_gmdid_media_ip; 642 xe->info.step = xe_step_pre_gmdid_get(xe); 643 } else { 644 xe_assert(xe, !desc->pre_gmdid_media_ip); 645 handle_gmdid(xe, &graphics_ip, &media_ip, 646 &graphics_gmdid_revid, &media_gmdid_revid); 647 xe->info.step = xe_step_gmdid_get(xe, 648 graphics_gmdid_revid, 649 media_gmdid_revid); 650 } 651 652 /* 653 * If we couldn't detect the graphics IP, that's considered a fatal 654 * error and we should abort driver load. Failing to detect media 655 * IP is non-fatal; we'll just proceed without enabling media support. 656 */ 657 if (!graphics_ip) 658 return -ENODEV; 659 660 xe->info.graphics_verx100 = graphics_ip->verx100; 661 xe->info.graphics_name = graphics_ip->name; 662 graphics_desc = graphics_ip->desc; 663 664 if (media_ip) { 665 xe->info.media_verx100 = media_ip->verx100; 666 xe->info.media_name = media_ip->name; 667 media_desc = media_ip->desc; 668 } else { 669 xe->info.media_name = "none"; 670 media_desc = NULL; 671 } 672 673 xe->info.vram_flags = graphics_desc->vram_flags; 674 xe->info.va_bits = graphics_desc->va_bits; 675 xe->info.vm_max_level = graphics_desc->vm_max_level; 676 xe->info.has_asid = graphics_desc->has_asid; 677 xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit; 678 if (xe->info.platform != XE_PVC) 679 xe->info.has_device_atomics_on_smem = 1; 680 681 /* Runtime detection may change this later */ 682 xe->info.has_flat_ccs = graphics_desc->has_flat_ccs; 683 684 xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation; 685 xe->info.has_usm = graphics_desc->has_usm; 686 xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp; 687 688 for_each_remote_tile(tile, xe, id) { 689 int err; 690 691 err = xe_tile_init_early(tile, xe, id); 692 if (err) 693 return err; 694 } 695 696 /* 697 * All platforms have at least one primary GT. Any platform with media 698 * version 13 or higher has an additional dedicated media GT. And 699 * depending on the graphics IP there may be additional "remote tiles." 700 * All of these together determine the overall GT count. 701 */ 702 for_each_tile(tile, xe, id) { 703 gt = tile->primary_gt; 704 gt->info.id = xe->info.gt_count++; 705 gt->info.type = XE_GT_TYPE_MAIN; 706 gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state; 707 gt->info.engine_mask = graphics_desc->hw_engine_mask; 708 709 if (MEDIA_VER(xe) < 13 && media_desc) 710 gt->info.engine_mask |= media_desc->hw_engine_mask; 711 712 if (MEDIA_VER(xe) < 13 || !media_desc) 713 continue; 714 715 /* 716 * Allocate and setup media GT for platforms with standalone 717 * media. 718 */ 719 tile->media_gt = xe_gt_alloc(tile); 720 if (IS_ERR(tile->media_gt)) 721 return PTR_ERR(tile->media_gt); 722 723 gt = tile->media_gt; 724 gt->info.type = XE_GT_TYPE_MEDIA; 725 gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state; 726 gt->info.engine_mask = media_desc->hw_engine_mask; 727 728 /* 729 * FIXME: At the moment multi-tile and standalone media are 730 * mutually exclusive on current platforms. We'll need to 731 * come up with a better way to number GTs if we ever wind 732 * up with platforms that support both together. 733 */ 734 drm_WARN_ON(&xe->drm, id != 0); 735 gt->info.id = xe->info.gt_count++; 736 } 737 738 return 0; 739 } 740 741 static void xe_pci_remove(struct pci_dev *pdev) 742 { 743 struct xe_device *xe = pdev_to_xe_device(pdev); 744 745 if (IS_SRIOV_PF(xe)) 746 xe_pci_sriov_configure(pdev, 0); 747 748 if (xe_survivability_mode_is_enabled(xe)) 749 return; 750 751 xe_device_remove(xe); 752 xe_pm_fini(xe); 753 } 754 755 /* 756 * Probe the PCI device, initialize various parts of the driver. 757 * 758 * Fault injection is used to test the error paths of some initialization 759 * functions called either directly from xe_pci_probe() or indirectly for 760 * example through xe_device_probe(). Those functions use the kernel fault 761 * injection capabilities infrastructure, see 762 * Documentation/fault-injection/fault-injection.rst for details. The macro 763 * ALLOW_ERROR_INJECTION() is used to conditionally skip function execution 764 * at runtime and use a provided return value. The first requirement for 765 * error injectable functions is proper handling of the error code by the 766 * caller for recovery, which is always the case here. The second 767 * requirement is that no state is changed before the first error return. 768 * It is not strictly fulfilled for all initialization functions using the 769 * ALLOW_ERROR_INJECTION() macro but this is acceptable because for those 770 * error cases at probe time, the error code is simply propagated up by the 771 * caller. Therefore there is no consequence on those specific callers when 772 * function error injection skips the whole function. 773 */ 774 static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 775 { 776 const struct xe_device_desc *desc = (const void *)ent->driver_data; 777 const struct xe_subplatform_desc *subplatform_desc; 778 struct xe_device *xe; 779 int err; 780 781 if (desc->require_force_probe && !id_forced(pdev->device)) { 782 dev_info(&pdev->dev, 783 "Your graphics device %04x is not officially supported\n" 784 "by xe driver in this kernel version. To force Xe probe,\n" 785 "use xe.force_probe='%04x' and i915.force_probe='!%04x'\n" 786 "module parameters or CONFIG_DRM_XE_FORCE_PROBE='%04x' and\n" 787 "CONFIG_DRM_I915_FORCE_PROBE='!%04x' configuration options.\n", 788 pdev->device, pdev->device, pdev->device, 789 pdev->device, pdev->device); 790 return -ENODEV; 791 } 792 793 if (id_blocked(pdev->device)) { 794 dev_info(&pdev->dev, "Probe blocked for device [%04x:%04x].\n", 795 pdev->vendor, pdev->device); 796 return -ENODEV; 797 } 798 799 if (xe_display_driver_probe_defer(pdev)) 800 return -EPROBE_DEFER; 801 802 err = pcim_enable_device(pdev); 803 if (err) 804 return err; 805 806 xe = xe_device_create(pdev, ent); 807 if (IS_ERR(xe)) 808 return PTR_ERR(xe); 809 810 pci_set_drvdata(pdev, &xe->drm); 811 812 xe_pm_assert_unbounded_bridge(xe); 813 subplatform_desc = find_subplatform(xe, desc); 814 815 pci_set_master(pdev); 816 817 err = xe_info_init_early(xe, desc, subplatform_desc); 818 if (err) 819 return err; 820 821 err = xe_device_probe_early(xe); 822 /* 823 * In Boot Survivability mode, no drm card is exposed and driver 824 * is loaded with bare minimum to allow for firmware to be 825 * flashed through mei. Return success, if survivability mode 826 * is enabled due to pcode failure or configfs being set 827 */ 828 if (xe_survivability_mode_is_enabled(xe)) 829 return 0; 830 831 if (err) 832 return err; 833 834 err = xe_info_init(xe, desc); 835 if (err) 836 return err; 837 838 err = xe_display_probe(xe); 839 if (err) 840 return err; 841 842 drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d cscfi:%d", 843 desc->platform_name, 844 subplatform_desc ? subplatform_desc->name : "", 845 xe->info.devid, xe->info.revid, 846 xe->info.is_dgfx, 847 xe->info.graphics_name, 848 xe->info.graphics_verx100 / 100, 849 xe->info.graphics_verx100 % 100, 850 xe->info.media_name, 851 xe->info.media_verx100 / 100, 852 xe->info.media_verx100 % 100, 853 str_yes_no(xe->info.probe_display), 854 xe->info.dma_mask_size, xe->info.tile_count, 855 xe->info.has_heci_gscfi, xe->info.has_heci_cscfi); 856 857 drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, B:%s)\n", 858 xe_step_name(xe->info.step.graphics), 859 xe_step_name(xe->info.step.media), 860 xe_step_name(xe->info.step.basedie)); 861 862 drm_dbg(&xe->drm, "SR-IOV support: %s (mode: %s)\n", 863 str_yes_no(xe_device_has_sriov(xe)), 864 xe_sriov_mode_to_string(xe_device_sriov_mode(xe))); 865 866 err = xe_pm_init_early(xe); 867 if (err) 868 return err; 869 870 err = xe_device_probe(xe); 871 if (err) 872 return err; 873 874 err = xe_pm_init(xe); 875 if (err) 876 goto err_driver_cleanup; 877 878 drm_dbg(&xe->drm, "d3cold: capable=%s\n", 879 str_yes_no(xe->d3cold.capable)); 880 881 return 0; 882 883 err_driver_cleanup: 884 xe_pci_remove(pdev); 885 return err; 886 } 887 888 static void xe_pci_shutdown(struct pci_dev *pdev) 889 { 890 xe_device_shutdown(pdev_to_xe_device(pdev)); 891 } 892 893 #ifdef CONFIG_PM_SLEEP 894 static void d3cold_toggle(struct pci_dev *pdev, enum toggle_d3cold toggle) 895 { 896 struct xe_device *xe = pdev_to_xe_device(pdev); 897 struct pci_dev *root_pdev; 898 899 if (!xe->d3cold.capable) 900 return; 901 902 root_pdev = pcie_find_root_port(pdev); 903 if (!root_pdev) 904 return; 905 906 switch (toggle) { 907 case D3COLD_DISABLE: 908 pci_d3cold_disable(root_pdev); 909 break; 910 case D3COLD_ENABLE: 911 pci_d3cold_enable(root_pdev); 912 break; 913 } 914 } 915 916 static int xe_pci_suspend(struct device *dev) 917 { 918 struct pci_dev *pdev = to_pci_dev(dev); 919 struct xe_device *xe = pdev_to_xe_device(pdev); 920 int err; 921 922 if (xe_survivability_mode_is_enabled(xe)) 923 return -EBUSY; 924 925 err = xe_pm_suspend(xe); 926 if (err) 927 return err; 928 929 /* 930 * Enabling D3Cold is needed for S2Idle/S0ix. 931 * It is save to allow here since xe_pm_suspend has evicted 932 * the local memory and the direct complete optimization is disabled. 933 */ 934 d3cold_toggle(pdev, D3COLD_ENABLE); 935 936 pci_save_state(pdev); 937 pci_disable_device(pdev); 938 pci_set_power_state(pdev, PCI_D3cold); 939 940 return 0; 941 } 942 943 static int xe_pci_resume(struct device *dev) 944 { 945 struct pci_dev *pdev = to_pci_dev(dev); 946 int err; 947 948 /* Give back the D3Cold decision to the runtime P M*/ 949 d3cold_toggle(pdev, D3COLD_DISABLE); 950 951 err = pci_set_power_state(pdev, PCI_D0); 952 if (err) 953 return err; 954 955 pci_restore_state(pdev); 956 957 err = pci_enable_device(pdev); 958 if (err) 959 return err; 960 961 pci_set_master(pdev); 962 963 err = xe_pm_resume(pdev_to_xe_device(pdev)); 964 if (err) 965 return err; 966 967 return 0; 968 } 969 970 static int xe_pci_runtime_suspend(struct device *dev) 971 { 972 struct pci_dev *pdev = to_pci_dev(dev); 973 struct xe_device *xe = pdev_to_xe_device(pdev); 974 int err; 975 976 err = xe_pm_runtime_suspend(xe); 977 if (err) 978 return err; 979 980 pci_save_state(pdev); 981 982 if (xe->d3cold.allowed) { 983 d3cold_toggle(pdev, D3COLD_ENABLE); 984 pci_disable_device(pdev); 985 pci_ignore_hotplug(pdev); 986 pci_set_power_state(pdev, PCI_D3cold); 987 } else { 988 d3cold_toggle(pdev, D3COLD_DISABLE); 989 pci_set_power_state(pdev, PCI_D3hot); 990 } 991 992 return 0; 993 } 994 995 static int xe_pci_runtime_resume(struct device *dev) 996 { 997 struct pci_dev *pdev = to_pci_dev(dev); 998 struct xe_device *xe = pdev_to_xe_device(pdev); 999 int err; 1000 1001 err = pci_set_power_state(pdev, PCI_D0); 1002 if (err) 1003 return err; 1004 1005 pci_restore_state(pdev); 1006 1007 if (xe->d3cold.allowed) { 1008 err = pci_enable_device(pdev); 1009 if (err) 1010 return err; 1011 1012 pci_set_master(pdev); 1013 } 1014 1015 return xe_pm_runtime_resume(xe); 1016 } 1017 1018 static int xe_pci_runtime_idle(struct device *dev) 1019 { 1020 struct pci_dev *pdev = to_pci_dev(dev); 1021 struct xe_device *xe = pdev_to_xe_device(pdev); 1022 1023 xe_pm_d3cold_allowed_toggle(xe); 1024 1025 return 0; 1026 } 1027 1028 static const struct dev_pm_ops xe_pm_ops = { 1029 SET_SYSTEM_SLEEP_PM_OPS(xe_pci_suspend, xe_pci_resume) 1030 SET_RUNTIME_PM_OPS(xe_pci_runtime_suspend, xe_pci_runtime_resume, xe_pci_runtime_idle) 1031 }; 1032 #endif 1033 1034 static struct pci_driver xe_pci_driver = { 1035 .name = DRIVER_NAME, 1036 .id_table = pciidlist, 1037 .probe = xe_pci_probe, 1038 .remove = xe_pci_remove, 1039 .shutdown = xe_pci_shutdown, 1040 .sriov_configure = xe_pci_sriov_configure, 1041 #ifdef CONFIG_PM_SLEEP 1042 .driver.pm = &xe_pm_ops, 1043 #endif 1044 }; 1045 1046 int xe_register_pci_driver(void) 1047 { 1048 return pci_register_driver(&xe_pci_driver); 1049 } 1050 1051 void xe_unregister_pci_driver(void) 1052 { 1053 pci_unregister_driver(&xe_pci_driver); 1054 } 1055 1056 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 1057 #include "tests/xe_pci.c" 1058 #endif 1059