1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_pci.h" 7 8 #include <kunit/static_stub.h> 9 #include <linux/device/driver.h> 10 #include <linux/module.h> 11 #include <linux/pci.h> 12 #include <linux/pm_runtime.h> 13 14 #include <drm/drm_color_mgmt.h> 15 #include <drm/drm_drv.h> 16 #include <drm/intel/pciids.h> 17 18 #include "display/xe_display.h" 19 #include "regs/xe_gt_regs.h" 20 #include "xe_device.h" 21 #include "xe_drv.h" 22 #include "xe_gt.h" 23 #include "xe_gt_sriov_vf.h" 24 #include "xe_guc.h" 25 #include "xe_macros.h" 26 #include "xe_mmio.h" 27 #include "xe_module.h" 28 #include "xe_pci_sriov.h" 29 #include "xe_pci_types.h" 30 #include "xe_pm.h" 31 #include "xe_sriov.h" 32 #include "xe_step.h" 33 #include "xe_survivability_mode.h" 34 #include "xe_tile.h" 35 36 enum toggle_d3cold { 37 D3COLD_DISABLE, 38 D3COLD_ENABLE, 39 }; 40 41 struct xe_subplatform_desc { 42 enum xe_subplatform subplatform; 43 const char *name; 44 const u16 *pciidlist; 45 }; 46 47 struct xe_device_desc { 48 /* Should only ever be set for platforms without GMD_ID */ 49 const struct xe_ip *pre_gmdid_graphics_ip; 50 /* Should only ever be set for platforms without GMD_ID */ 51 const struct xe_ip *pre_gmdid_media_ip; 52 53 const char *platform_name; 54 const struct xe_subplatform_desc *subplatforms; 55 56 enum xe_platform platform; 57 58 u8 dma_mask_size; 59 u8 max_remote_tiles:2; 60 61 u8 require_force_probe:1; 62 u8 is_dgfx:1; 63 64 u8 has_display:1; 65 u8 has_fan_control:1; 66 u8 has_heci_gscfi:1; 67 u8 has_heci_cscfi:1; 68 u8 has_llc:1; 69 u8 has_pxp:1; 70 u8 has_sriov:1; 71 u8 skip_guc_pc:1; 72 u8 skip_mtcfg:1; 73 u8 skip_pcode:1; 74 }; 75 76 __diag_push(); 77 __diag_ignore_all("-Woverride-init", "Allow field overrides in table"); 78 79 #define PLATFORM(x) \ 80 .platform = XE_##x, \ 81 .platform_name = #x 82 83 #define NOP(x) x 84 85 static const struct xe_graphics_desc graphics_xelp = { 86 .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0), 87 88 .va_bits = 48, 89 .vm_max_level = 3, 90 }; 91 92 #define XE_HP_FEATURES \ 93 .has_range_tlb_invalidation = true, \ 94 .va_bits = 48, \ 95 .vm_max_level = 3 96 97 static const struct xe_graphics_desc graphics_xehpg = { 98 .hw_engine_mask = 99 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 100 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 101 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 102 103 XE_HP_FEATURES, 104 .vram_flags = XE_VRAM_FLAGS_NEED64K, 105 106 .has_flat_ccs = 1, 107 }; 108 109 static const struct xe_graphics_desc graphics_xehpc = { 110 .hw_engine_mask = 111 BIT(XE_HW_ENGINE_BCS0) | BIT(XE_HW_ENGINE_BCS1) | 112 BIT(XE_HW_ENGINE_BCS2) | BIT(XE_HW_ENGINE_BCS3) | 113 BIT(XE_HW_ENGINE_BCS4) | BIT(XE_HW_ENGINE_BCS5) | 114 BIT(XE_HW_ENGINE_BCS6) | BIT(XE_HW_ENGINE_BCS7) | 115 BIT(XE_HW_ENGINE_BCS8) | 116 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 117 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 118 119 XE_HP_FEATURES, 120 .va_bits = 57, 121 .vm_max_level = 4, 122 .vram_flags = XE_VRAM_FLAGS_NEED64K, 123 124 .has_asid = 1, 125 .has_atomic_enable_pte_bit = 1, 126 .has_usm = 1, 127 }; 128 129 static const struct xe_graphics_desc graphics_xelpg = { 130 .hw_engine_mask = 131 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 132 BIT(XE_HW_ENGINE_CCS0), 133 134 XE_HP_FEATURES, 135 }; 136 137 #define XE2_GFX_FEATURES \ 138 .has_asid = 1, \ 139 .has_atomic_enable_pte_bit = 1, \ 140 .has_flat_ccs = 1, \ 141 .has_indirect_ring_state = 1, \ 142 .has_range_tlb_invalidation = 1, \ 143 .has_usm = 1, \ 144 .va_bits = 48, \ 145 .vm_max_level = 4, \ 146 .hw_engine_mask = \ 147 BIT(XE_HW_ENGINE_RCS0) | \ 148 BIT(XE_HW_ENGINE_BCS8) | BIT(XE_HW_ENGINE_BCS0) | \ 149 GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0) 150 151 static const struct xe_graphics_desc graphics_xe2 = { 152 XE2_GFX_FEATURES, 153 }; 154 155 static const struct xe_media_desc media_xem = { 156 .hw_engine_mask = 157 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 158 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0), 159 }; 160 161 static const struct xe_media_desc media_xelpmp = { 162 .hw_engine_mask = 163 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 164 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) | 165 BIT(XE_HW_ENGINE_GSCCS0) 166 }; 167 168 /* Pre-GMDID Graphics IPs */ 169 static const struct xe_ip graphics_ip_xelp = { 1200, "Xe_LP", &graphics_xelp }; 170 static const struct xe_ip graphics_ip_xelpp = { 1210, "Xe_LP+", &graphics_xelp }; 171 static const struct xe_ip graphics_ip_xehpg = { 1255, "Xe_HPG", &graphics_xehpg }; 172 static const struct xe_ip graphics_ip_xehpc = { 1260, "Xe_HPC", &graphics_xehpc }; 173 174 /* GMDID-based Graphics IPs */ 175 static const struct xe_ip graphics_ips[] = { 176 { 1270, "Xe_LPG", &graphics_xelpg }, 177 { 1271, "Xe_LPG", &graphics_xelpg }, 178 { 1274, "Xe_LPG+", &graphics_xelpg }, 179 { 2001, "Xe2_HPG", &graphics_xe2 }, 180 { 2004, "Xe2_LPG", &graphics_xe2 }, 181 { 3000, "Xe3_LPG", &graphics_xe2 }, 182 { 3001, "Xe3_LPG", &graphics_xe2 }, 183 }; 184 185 /* Pre-GMDID Media IPs */ 186 static const struct xe_ip media_ip_xem = { 1200, "Xe_M", &media_xem }; 187 static const struct xe_ip media_ip_xehpm = { 1255, "Xe_HPM", &media_xem }; 188 189 /* GMDID-based Media IPs */ 190 static const struct xe_ip media_ips[] = { 191 { 1300, "Xe_LPM+", &media_xelpmp }, 192 { 1301, "Xe2_HPM", &media_xelpmp }, 193 { 2000, "Xe2_LPM", &media_xelpmp }, 194 { 3000, "Xe3_LPM", &media_xelpmp }, 195 }; 196 197 static const struct xe_device_desc tgl_desc = { 198 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 199 .pre_gmdid_media_ip = &media_ip_xem, 200 PLATFORM(TIGERLAKE), 201 .dma_mask_size = 39, 202 .has_display = true, 203 .has_llc = true, 204 .require_force_probe = true, 205 }; 206 207 static const struct xe_device_desc rkl_desc = { 208 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 209 .pre_gmdid_media_ip = &media_ip_xem, 210 PLATFORM(ROCKETLAKE), 211 .dma_mask_size = 39, 212 .has_display = true, 213 .has_llc = true, 214 .require_force_probe = true, 215 }; 216 217 static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 }; 218 219 static const struct xe_device_desc adl_s_desc = { 220 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 221 .pre_gmdid_media_ip = &media_ip_xem, 222 PLATFORM(ALDERLAKE_S), 223 .dma_mask_size = 39, 224 .has_display = true, 225 .has_llc = true, 226 .require_force_probe = true, 227 .subplatforms = (const struct xe_subplatform_desc[]) { 228 { XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids }, 229 {}, 230 }, 231 }; 232 233 static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 }; 234 235 static const struct xe_device_desc adl_p_desc = { 236 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 237 .pre_gmdid_media_ip = &media_ip_xem, 238 PLATFORM(ALDERLAKE_P), 239 .dma_mask_size = 39, 240 .has_display = true, 241 .has_llc = true, 242 .require_force_probe = true, 243 .subplatforms = (const struct xe_subplatform_desc[]) { 244 { XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids }, 245 {}, 246 }, 247 }; 248 249 static const struct xe_device_desc adl_n_desc = { 250 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 251 .pre_gmdid_media_ip = &media_ip_xem, 252 PLATFORM(ALDERLAKE_N), 253 .dma_mask_size = 39, 254 .has_display = true, 255 .has_llc = true, 256 .require_force_probe = true, 257 }; 258 259 #define DGFX_FEATURES \ 260 .is_dgfx = 1 261 262 static const struct xe_device_desc dg1_desc = { 263 .pre_gmdid_graphics_ip = &graphics_ip_xelpp, 264 .pre_gmdid_media_ip = &media_ip_xem, 265 DGFX_FEATURES, 266 PLATFORM(DG1), 267 .dma_mask_size = 39, 268 .has_display = true, 269 .has_heci_gscfi = 1, 270 .require_force_probe = true, 271 }; 272 273 static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 }; 274 static const u16 dg2_g11_ids[] = { INTEL_DG2_G11_IDS(NOP), INTEL_ATS_M75_IDS(NOP), 0 }; 275 static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 }; 276 277 #define DG2_FEATURES \ 278 DGFX_FEATURES, \ 279 PLATFORM(DG2), \ 280 .has_heci_gscfi = 1, \ 281 .subplatforms = (const struct xe_subplatform_desc[]) { \ 282 { XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \ 283 { XE_SUBPLATFORM_DG2_G11, "G11", dg2_g11_ids }, \ 284 { XE_SUBPLATFORM_DG2_G12, "G12", dg2_g12_ids }, \ 285 { } \ 286 } 287 288 static const struct xe_device_desc ats_m_desc = { 289 .pre_gmdid_graphics_ip = &graphics_ip_xehpg, 290 .pre_gmdid_media_ip = &media_ip_xehpm, 291 .dma_mask_size = 46, 292 .require_force_probe = true, 293 294 DG2_FEATURES, 295 .has_display = false, 296 }; 297 298 static const struct xe_device_desc dg2_desc = { 299 .pre_gmdid_graphics_ip = &graphics_ip_xehpg, 300 .pre_gmdid_media_ip = &media_ip_xehpm, 301 .dma_mask_size = 46, 302 .require_force_probe = true, 303 304 DG2_FEATURES, 305 .has_display = true, 306 .has_fan_control = true, 307 }; 308 309 static const __maybe_unused struct xe_device_desc pvc_desc = { 310 .pre_gmdid_graphics_ip = &graphics_ip_xehpc, 311 DGFX_FEATURES, 312 PLATFORM(PVC), 313 .dma_mask_size = 52, 314 .has_display = false, 315 .has_heci_gscfi = 1, 316 .max_remote_tiles = 1, 317 .require_force_probe = true, 318 }; 319 320 static const struct xe_device_desc mtl_desc = { 321 /* .graphics and .media determined via GMD_ID */ 322 .require_force_probe = true, 323 PLATFORM(METEORLAKE), 324 .dma_mask_size = 46, 325 .has_display = true, 326 .has_pxp = true, 327 }; 328 329 static const struct xe_device_desc lnl_desc = { 330 PLATFORM(LUNARLAKE), 331 .dma_mask_size = 46, 332 .has_display = true, 333 .has_pxp = true, 334 }; 335 336 static const struct xe_device_desc bmg_desc = { 337 DGFX_FEATURES, 338 PLATFORM(BATTLEMAGE), 339 .dma_mask_size = 46, 340 .has_display = true, 341 .has_fan_control = true, 342 .has_heci_cscfi = 1, 343 }; 344 345 static const struct xe_device_desc ptl_desc = { 346 PLATFORM(PANTHERLAKE), 347 .dma_mask_size = 46, 348 .has_display = true, 349 .has_sriov = true, 350 .require_force_probe = true, 351 }; 352 353 #undef PLATFORM 354 __diag_pop(); 355 356 /* 357 * Make sure any device matches here are from most specific to most 358 * general. For example, since the Quanta match is based on the subsystem 359 * and subvendor IDs, we need it to come before the more general IVB 360 * PCI ID matches, otherwise we'll use the wrong info struct above. 361 */ 362 static const struct pci_device_id pciidlist[] = { 363 INTEL_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc), 364 INTEL_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc), 365 INTEL_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 366 INTEL_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 367 INTEL_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc), 368 INTEL_RPLU_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 369 INTEL_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 370 INTEL_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 371 INTEL_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc), 372 INTEL_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc), 373 INTEL_ARL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 374 INTEL_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc), 375 INTEL_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 376 INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc), 377 INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc), 378 INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc), 379 { } 380 }; 381 MODULE_DEVICE_TABLE(pci, pciidlist); 382 383 /* is device_id present in comma separated list of ids */ 384 static bool device_id_in_list(u16 device_id, const char *devices, bool negative) 385 { 386 char *s, *p, *tok; 387 bool ret; 388 389 if (!devices || !*devices) 390 return false; 391 392 /* match everything */ 393 if (negative && strcmp(devices, "!*") == 0) 394 return true; 395 if (!negative && strcmp(devices, "*") == 0) 396 return true; 397 398 s = kstrdup(devices, GFP_KERNEL); 399 if (!s) 400 return false; 401 402 for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) { 403 u16 val; 404 405 if (negative && tok[0] == '!') 406 tok++; 407 else if ((negative && tok[0] != '!') || 408 (!negative && tok[0] == '!')) 409 continue; 410 411 if (kstrtou16(tok, 16, &val) == 0 && val == device_id) { 412 ret = true; 413 break; 414 } 415 } 416 417 kfree(s); 418 419 return ret; 420 } 421 422 static bool id_forced(u16 device_id) 423 { 424 return device_id_in_list(device_id, xe_modparam.force_probe, false); 425 } 426 427 static bool id_blocked(u16 device_id) 428 { 429 return device_id_in_list(device_id, xe_modparam.force_probe, true); 430 } 431 432 static const struct xe_subplatform_desc * 433 find_subplatform(const struct xe_device *xe, const struct xe_device_desc *desc) 434 { 435 const struct xe_subplatform_desc *sp; 436 const u16 *id; 437 438 for (sp = desc->subplatforms; sp && sp->subplatform; sp++) 439 for (id = sp->pciidlist; *id; id++) 440 if (*id == xe->info.devid) 441 return sp; 442 443 return NULL; 444 } 445 446 enum xe_gmdid_type { 447 GMDID_GRAPHICS, 448 GMDID_MEDIA 449 }; 450 451 static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid) 452 { 453 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 454 struct xe_reg gmdid_reg = GMD_ID; 455 u32 val; 456 457 KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid); 458 459 if (IS_SRIOV_VF(xe)) { 460 struct xe_gt *gt = xe_root_mmio_gt(xe); 461 462 /* 463 * To get the value of the GMDID register, VFs must obtain it 464 * from the GuC using MMIO communication. 465 * 466 * Note that at this point the xe_gt is not fully uninitialized 467 * and only basic access to MMIO registers is possible. To use 468 * our existing GuC communication functions we must perform at 469 * least basic xe_gt and xe_guc initialization. 470 * 471 * Since to obtain the value of GMDID_MEDIA we need to use the 472 * media GuC, temporarily tweak the gt type. 473 */ 474 xe_gt_assert(gt, gt->info.type == XE_GT_TYPE_UNINITIALIZED); 475 476 if (type == GMDID_MEDIA) { 477 gt->info.id = 1; 478 gt->info.type = XE_GT_TYPE_MEDIA; 479 } else { 480 gt->info.id = 0; 481 gt->info.type = XE_GT_TYPE_MAIN; 482 } 483 484 xe_gt_mmio_init(gt); 485 xe_guc_comm_init_early(>->uc.guc); 486 487 /* Don't bother with GMDID if failed to negotiate the GuC ABI */ 488 val = xe_gt_sriov_vf_bootstrap(gt) ? 0 : xe_gt_sriov_vf_gmdid(gt); 489 490 /* 491 * Only undo xe_gt.info here, the remaining changes made above 492 * will be overwritten as part of the regular initialization. 493 */ 494 gt->info.id = 0; 495 gt->info.type = XE_GT_TYPE_UNINITIALIZED; 496 } else { 497 /* 498 * GMD_ID is a GT register, but at this point in the driver 499 * init we haven't fully initialized the GT yet so we need to 500 * read the register with the tile's MMIO accessor. That means 501 * we need to apply the GSI offset manually since it won't get 502 * automatically added as it would if we were using a GT mmio 503 * accessor. 504 */ 505 if (type == GMDID_MEDIA) 506 gmdid_reg.addr += MEDIA_GT_GSI_OFFSET; 507 508 val = xe_mmio_read32(mmio, gmdid_reg); 509 } 510 511 *ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val); 512 *revid = REG_FIELD_GET(GMD_ID_REVID, val); 513 } 514 515 /* 516 * Read IP version from hardware and select graphics/media IP descriptors 517 * based on the result. 518 */ 519 static void handle_gmdid(struct xe_device *xe, 520 const struct xe_ip **graphics_ip, 521 const struct xe_ip **media_ip, 522 u32 *graphics_revid, 523 u32 *media_revid) 524 { 525 u32 ver; 526 527 *graphics_ip = NULL; 528 *media_ip = NULL; 529 530 read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid); 531 532 for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) { 533 if (ver == graphics_ips[i].verx100) { 534 *graphics_ip = &graphics_ips[i]; 535 536 break; 537 } 538 } 539 540 if (!*graphics_ip) { 541 drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n", 542 ver / 100, ver % 100); 543 } 544 545 read_gmdid(xe, GMDID_MEDIA, &ver, media_revid); 546 /* Media may legitimately be fused off / not present */ 547 if (ver == 0) 548 return; 549 550 for (int i = 0; i < ARRAY_SIZE(media_ips); i++) { 551 if (ver == media_ips[i].verx100) { 552 *media_ip = &media_ips[i]; 553 554 break; 555 } 556 } 557 558 if (!*media_ip) { 559 drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n", 560 ver / 100, ver % 100); 561 } 562 } 563 564 /* 565 * Initialize device info content that only depends on static driver_data 566 * passed to the driver at probe time from PCI ID table. 567 */ 568 static int xe_info_init_early(struct xe_device *xe, 569 const struct xe_device_desc *desc, 570 const struct xe_subplatform_desc *subplatform_desc) 571 { 572 int err; 573 574 xe->info.platform_name = desc->platform_name; 575 xe->info.platform = desc->platform; 576 xe->info.subplatform = subplatform_desc ? 577 subplatform_desc->subplatform : XE_SUBPLATFORM_NONE; 578 579 xe->info.dma_mask_size = desc->dma_mask_size; 580 xe->info.is_dgfx = desc->is_dgfx; 581 xe->info.has_fan_control = desc->has_fan_control; 582 xe->info.has_heci_gscfi = desc->has_heci_gscfi; 583 xe->info.has_heci_cscfi = desc->has_heci_cscfi; 584 xe->info.has_llc = desc->has_llc; 585 xe->info.has_pxp = desc->has_pxp; 586 xe->info.has_sriov = desc->has_sriov; 587 xe->info.skip_guc_pc = desc->skip_guc_pc; 588 xe->info.skip_mtcfg = desc->skip_mtcfg; 589 xe->info.skip_pcode = desc->skip_pcode; 590 591 xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) && 592 xe_modparam.probe_display && 593 desc->has_display; 594 xe->info.tile_count = 1 + desc->max_remote_tiles; 595 596 err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0); 597 if (err) 598 return err; 599 600 return 0; 601 } 602 603 /* 604 * Initialize device info content that does require knowledge about 605 * graphics / media IP version. 606 * Make sure that GT / tile structures allocated by the driver match the data 607 * present in device info. 608 */ 609 static int xe_info_init(struct xe_device *xe, 610 const struct xe_device_desc *desc) 611 { 612 u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0; 613 const struct xe_ip *graphics_ip; 614 const struct xe_ip *media_ip; 615 const struct xe_graphics_desc *graphics_desc; 616 const struct xe_media_desc *media_desc; 617 struct xe_tile *tile; 618 struct xe_gt *gt; 619 u8 id; 620 621 /* 622 * If this platform supports GMD_ID, we'll detect the proper IP 623 * descriptor to use from hardware registers. 624 * desc->pre_gmdid_graphics_ip will only ever be set at this point for 625 * platforms before GMD_ID. In that case the IP descriptions and 626 * versions are simply derived from that. 627 */ 628 if (desc->pre_gmdid_graphics_ip) { 629 graphics_ip = desc->pre_gmdid_graphics_ip; 630 media_ip = desc->pre_gmdid_media_ip; 631 xe->info.step = xe_step_pre_gmdid_get(xe); 632 } else { 633 xe_assert(xe, !desc->pre_gmdid_media_ip); 634 handle_gmdid(xe, &graphics_ip, &media_ip, 635 &graphics_gmdid_revid, &media_gmdid_revid); 636 xe->info.step = xe_step_gmdid_get(xe, 637 graphics_gmdid_revid, 638 media_gmdid_revid); 639 } 640 641 /* 642 * If we couldn't detect the graphics IP, that's considered a fatal 643 * error and we should abort driver load. Failing to detect media 644 * IP is non-fatal; we'll just proceed without enabling media support. 645 */ 646 if (!graphics_ip) 647 return -ENODEV; 648 649 xe->info.graphics_verx100 = graphics_ip->verx100; 650 xe->info.graphics_name = graphics_ip->name; 651 graphics_desc = graphics_ip->desc; 652 653 if (media_ip) { 654 xe->info.media_verx100 = media_ip->verx100; 655 xe->info.media_name = media_ip->name; 656 media_desc = media_ip->desc; 657 } else { 658 xe->info.media_name = "none"; 659 media_desc = NULL; 660 } 661 662 xe->info.vram_flags = graphics_desc->vram_flags; 663 xe->info.va_bits = graphics_desc->va_bits; 664 xe->info.vm_max_level = graphics_desc->vm_max_level; 665 xe->info.has_asid = graphics_desc->has_asid; 666 xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit; 667 if (xe->info.platform != XE_PVC) 668 xe->info.has_device_atomics_on_smem = 1; 669 670 /* Runtime detection may change this later */ 671 xe->info.has_flat_ccs = graphics_desc->has_flat_ccs; 672 673 xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation; 674 xe->info.has_usm = graphics_desc->has_usm; 675 676 for_each_remote_tile(tile, xe, id) { 677 int err; 678 679 err = xe_tile_init_early(tile, xe, id); 680 if (err) 681 return err; 682 } 683 684 /* 685 * All platforms have at least one primary GT. Any platform with media 686 * version 13 or higher has an additional dedicated media GT. And 687 * depending on the graphics IP there may be additional "remote tiles." 688 * All of these together determine the overall GT count. 689 */ 690 for_each_tile(tile, xe, id) { 691 gt = tile->primary_gt; 692 gt->info.id = xe->info.gt_count++; 693 gt->info.type = XE_GT_TYPE_MAIN; 694 gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state; 695 gt->info.engine_mask = graphics_desc->hw_engine_mask; 696 697 if (MEDIA_VER(xe) < 13 && media_desc) 698 gt->info.engine_mask |= media_desc->hw_engine_mask; 699 700 if (MEDIA_VER(xe) < 13 || !media_desc) 701 continue; 702 703 /* 704 * Allocate and setup media GT for platforms with standalone 705 * media. 706 */ 707 tile->media_gt = xe_gt_alloc(tile); 708 if (IS_ERR(tile->media_gt)) 709 return PTR_ERR(tile->media_gt); 710 711 gt = tile->media_gt; 712 gt->info.type = XE_GT_TYPE_MEDIA; 713 gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state; 714 gt->info.engine_mask = media_desc->hw_engine_mask; 715 716 /* 717 * FIXME: At the moment multi-tile and standalone media are 718 * mutually exclusive on current platforms. We'll need to 719 * come up with a better way to number GTs if we ever wind 720 * up with platforms that support both together. 721 */ 722 drm_WARN_ON(&xe->drm, id != 0); 723 gt->info.id = xe->info.gt_count++; 724 } 725 726 return 0; 727 } 728 729 static void xe_pci_remove(struct pci_dev *pdev) 730 { 731 struct xe_device *xe = pdev_to_xe_device(pdev); 732 733 if (IS_SRIOV_PF(xe)) 734 xe_pci_sriov_configure(pdev, 0); 735 736 if (xe_survivability_mode_is_enabled(xe)) 737 return; 738 739 xe_device_remove(xe); 740 xe_pm_runtime_fini(xe); 741 } 742 743 /* 744 * Probe the PCI device, initialize various parts of the driver. 745 * 746 * Fault injection is used to test the error paths of some initialization 747 * functions called either directly from xe_pci_probe() or indirectly for 748 * example through xe_device_probe(). Those functions use the kernel fault 749 * injection capabilities infrastructure, see 750 * Documentation/fault-injection/fault-injection.rst for details. The macro 751 * ALLOW_ERROR_INJECTION() is used to conditionally skip function execution 752 * at runtime and use a provided return value. The first requirement for 753 * error injectable functions is proper handling of the error code by the 754 * caller for recovery, which is always the case here. The second 755 * requirement is that no state is changed before the first error return. 756 * It is not strictly fulfilled for all initialization functions using the 757 * ALLOW_ERROR_INJECTION() macro but this is acceptable because for those 758 * error cases at probe time, the error code is simply propagated up by the 759 * caller. Therefore there is no consequence on those specific callers when 760 * function error injection skips the whole function. 761 */ 762 static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 763 { 764 const struct xe_device_desc *desc = (const void *)ent->driver_data; 765 const struct xe_subplatform_desc *subplatform_desc; 766 struct xe_device *xe; 767 int err; 768 769 if (desc->require_force_probe && !id_forced(pdev->device)) { 770 dev_info(&pdev->dev, 771 "Your graphics device %04x is not officially supported\n" 772 "by xe driver in this kernel version. To force Xe probe,\n" 773 "use xe.force_probe='%04x' and i915.force_probe='!%04x'\n" 774 "module parameters or CONFIG_DRM_XE_FORCE_PROBE='%04x' and\n" 775 "CONFIG_DRM_I915_FORCE_PROBE='!%04x' configuration options.\n", 776 pdev->device, pdev->device, pdev->device, 777 pdev->device, pdev->device); 778 return -ENODEV; 779 } 780 781 if (id_blocked(pdev->device)) { 782 dev_info(&pdev->dev, "Probe blocked for device [%04x:%04x].\n", 783 pdev->vendor, pdev->device); 784 return -ENODEV; 785 } 786 787 if (xe_display_driver_probe_defer(pdev)) 788 return -EPROBE_DEFER; 789 790 err = pcim_enable_device(pdev); 791 if (err) 792 return err; 793 794 xe = xe_device_create(pdev, ent); 795 if (IS_ERR(xe)) 796 return PTR_ERR(xe); 797 798 pci_set_drvdata(pdev, &xe->drm); 799 800 xe_pm_assert_unbounded_bridge(xe); 801 subplatform_desc = find_subplatform(xe, desc); 802 803 pci_set_master(pdev); 804 805 err = xe_info_init_early(xe, desc, subplatform_desc); 806 if (err) 807 return err; 808 809 err = xe_device_probe_early(xe); 810 if (err) { 811 /* 812 * In Boot Survivability mode, no drm card is exposed and driver 813 * is loaded with bare minimum to allow for firmware to be 814 * flashed through mei. If early probe failed, but it managed to 815 * enable survivability mode, return success. 816 */ 817 if (xe_survivability_mode_is_enabled(xe)) 818 return 0; 819 820 return err; 821 } 822 823 err = xe_info_init(xe, desc); 824 if (err) 825 return err; 826 827 err = xe_display_probe(xe); 828 if (err) 829 return err; 830 831 drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d cscfi:%d", 832 desc->platform_name, 833 subplatform_desc ? subplatform_desc->name : "", 834 xe->info.devid, xe->info.revid, 835 xe->info.is_dgfx, 836 xe->info.graphics_name, 837 xe->info.graphics_verx100 / 100, 838 xe->info.graphics_verx100 % 100, 839 xe->info.media_name, 840 xe->info.media_verx100 / 100, 841 xe->info.media_verx100 % 100, 842 str_yes_no(xe->info.probe_display), 843 xe->info.dma_mask_size, xe->info.tile_count, 844 xe->info.has_heci_gscfi, xe->info.has_heci_cscfi); 845 846 drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, B:%s)\n", 847 xe_step_name(xe->info.step.graphics), 848 xe_step_name(xe->info.step.media), 849 xe_step_name(xe->info.step.basedie)); 850 851 drm_dbg(&xe->drm, "SR-IOV support: %s (mode: %s)\n", 852 str_yes_no(xe_device_has_sriov(xe)), 853 xe_sriov_mode_to_string(xe_device_sriov_mode(xe))); 854 855 err = xe_pm_init_early(xe); 856 if (err) 857 return err; 858 859 err = xe_device_probe(xe); 860 if (err) 861 return err; 862 863 err = xe_pm_init(xe); 864 if (err) 865 goto err_driver_cleanup; 866 867 drm_dbg(&xe->drm, "d3cold: capable=%s\n", 868 str_yes_no(xe->d3cold.capable)); 869 870 return 0; 871 872 err_driver_cleanup: 873 xe_pci_remove(pdev); 874 return err; 875 } 876 877 static void xe_pci_shutdown(struct pci_dev *pdev) 878 { 879 xe_device_shutdown(pdev_to_xe_device(pdev)); 880 } 881 882 #ifdef CONFIG_PM_SLEEP 883 static void d3cold_toggle(struct pci_dev *pdev, enum toggle_d3cold toggle) 884 { 885 struct xe_device *xe = pdev_to_xe_device(pdev); 886 struct pci_dev *root_pdev; 887 888 if (!xe->d3cold.capable) 889 return; 890 891 root_pdev = pcie_find_root_port(pdev); 892 if (!root_pdev) 893 return; 894 895 switch (toggle) { 896 case D3COLD_DISABLE: 897 pci_d3cold_disable(root_pdev); 898 break; 899 case D3COLD_ENABLE: 900 pci_d3cold_enable(root_pdev); 901 break; 902 } 903 } 904 905 static int xe_pci_suspend(struct device *dev) 906 { 907 struct pci_dev *pdev = to_pci_dev(dev); 908 struct xe_device *xe = pdev_to_xe_device(pdev); 909 int err; 910 911 if (xe_survivability_mode_is_enabled(xe)) 912 return -EBUSY; 913 914 err = xe_pm_suspend(xe); 915 if (err) 916 return err; 917 918 /* 919 * Enabling D3Cold is needed for S2Idle/S0ix. 920 * It is save to allow here since xe_pm_suspend has evicted 921 * the local memory and the direct complete optimization is disabled. 922 */ 923 d3cold_toggle(pdev, D3COLD_ENABLE); 924 925 pci_save_state(pdev); 926 pci_disable_device(pdev); 927 928 return 0; 929 } 930 931 static int xe_pci_resume(struct device *dev) 932 { 933 struct pci_dev *pdev = to_pci_dev(dev); 934 int err; 935 936 /* Give back the D3Cold decision to the runtime P M*/ 937 d3cold_toggle(pdev, D3COLD_DISABLE); 938 939 err = pci_set_power_state(pdev, PCI_D0); 940 if (err) 941 return err; 942 943 pci_restore_state(pdev); 944 945 err = pci_enable_device(pdev); 946 if (err) 947 return err; 948 949 pci_set_master(pdev); 950 951 err = xe_pm_resume(pdev_to_xe_device(pdev)); 952 if (err) 953 return err; 954 955 return 0; 956 } 957 958 static int xe_pci_runtime_suspend(struct device *dev) 959 { 960 struct pci_dev *pdev = to_pci_dev(dev); 961 struct xe_device *xe = pdev_to_xe_device(pdev); 962 int err; 963 964 err = xe_pm_runtime_suspend(xe); 965 if (err) 966 return err; 967 968 pci_save_state(pdev); 969 970 if (xe->d3cold.allowed) { 971 d3cold_toggle(pdev, D3COLD_ENABLE); 972 pci_disable_device(pdev); 973 pci_ignore_hotplug(pdev); 974 pci_set_power_state(pdev, PCI_D3cold); 975 } else { 976 d3cold_toggle(pdev, D3COLD_DISABLE); 977 pci_set_power_state(pdev, PCI_D3hot); 978 } 979 980 return 0; 981 } 982 983 static int xe_pci_runtime_resume(struct device *dev) 984 { 985 struct pci_dev *pdev = to_pci_dev(dev); 986 struct xe_device *xe = pdev_to_xe_device(pdev); 987 int err; 988 989 err = pci_set_power_state(pdev, PCI_D0); 990 if (err) 991 return err; 992 993 pci_restore_state(pdev); 994 995 if (xe->d3cold.allowed) { 996 err = pci_enable_device(pdev); 997 if (err) 998 return err; 999 1000 pci_set_master(pdev); 1001 } 1002 1003 return xe_pm_runtime_resume(xe); 1004 } 1005 1006 static int xe_pci_runtime_idle(struct device *dev) 1007 { 1008 struct pci_dev *pdev = to_pci_dev(dev); 1009 struct xe_device *xe = pdev_to_xe_device(pdev); 1010 1011 xe_pm_d3cold_allowed_toggle(xe); 1012 1013 return 0; 1014 } 1015 1016 static const struct dev_pm_ops xe_pm_ops = { 1017 SET_SYSTEM_SLEEP_PM_OPS(xe_pci_suspend, xe_pci_resume) 1018 SET_RUNTIME_PM_OPS(xe_pci_runtime_suspend, xe_pci_runtime_resume, xe_pci_runtime_idle) 1019 }; 1020 #endif 1021 1022 static struct pci_driver xe_pci_driver = { 1023 .name = DRIVER_NAME, 1024 .id_table = pciidlist, 1025 .probe = xe_pci_probe, 1026 .remove = xe_pci_remove, 1027 .shutdown = xe_pci_shutdown, 1028 .sriov_configure = xe_pci_sriov_configure, 1029 #ifdef CONFIG_PM_SLEEP 1030 .driver.pm = &xe_pm_ops, 1031 #endif 1032 }; 1033 1034 int xe_register_pci_driver(void) 1035 { 1036 return pci_register_driver(&xe_pci_driver); 1037 } 1038 1039 void xe_unregister_pci_driver(void) 1040 { 1041 pci_unregister_driver(&xe_pci_driver); 1042 } 1043 1044 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 1045 #include "tests/xe_pci.c" 1046 #endif 1047