1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_pci.h" 7 8 #include <kunit/static_stub.h> 9 #include <linux/device/driver.h> 10 #include <linux/module.h> 11 #include <linux/pci.h> 12 #include <linux/pm_runtime.h> 13 14 #include <drm/drm_color_mgmt.h> 15 #include <drm/drm_drv.h> 16 #include <drm/intel/pciids.h> 17 18 #include "display/xe_display.h" 19 #include "regs/xe_gt_regs.h" 20 #include "xe_device.h" 21 #include "xe_drv.h" 22 #include "xe_gt.h" 23 #include "xe_gt_sriov_vf.h" 24 #include "xe_guc.h" 25 #include "xe_macros.h" 26 #include "xe_mmio.h" 27 #include "xe_module.h" 28 #include "xe_pci_sriov.h" 29 #include "xe_pci_types.h" 30 #include "xe_pm.h" 31 #include "xe_sriov.h" 32 #include "xe_step.h" 33 #include "xe_survivability_mode.h" 34 #include "xe_tile.h" 35 36 enum toggle_d3cold { 37 D3COLD_DISABLE, 38 D3COLD_ENABLE, 39 }; 40 41 struct xe_subplatform_desc { 42 enum xe_subplatform subplatform; 43 const char *name; 44 const u16 *pciidlist; 45 }; 46 47 struct xe_device_desc { 48 /* Should only ever be set for platforms without GMD_ID */ 49 const struct xe_ip *pre_gmdid_graphics_ip; 50 /* Should only ever be set for platforms without GMD_ID */ 51 const struct xe_ip *pre_gmdid_media_ip; 52 53 const char *platform_name; 54 const struct xe_subplatform_desc *subplatforms; 55 56 enum xe_platform platform; 57 58 u8 dma_mask_size; 59 u8 max_remote_tiles:2; 60 61 u8 require_force_probe:1; 62 u8 is_dgfx:1; 63 64 u8 has_display:1; 65 u8 has_fan_control:1; 66 u8 has_heci_gscfi:1; 67 u8 has_heci_cscfi:1; 68 u8 has_llc:1; 69 u8 has_pxp:1; 70 u8 has_sriov:1; 71 u8 needs_scratch:1; 72 u8 skip_guc_pc:1; 73 u8 skip_mtcfg:1; 74 u8 skip_pcode:1; 75 }; 76 77 __diag_push(); 78 __diag_ignore_all("-Woverride-init", "Allow field overrides in table"); 79 80 #define PLATFORM(x) \ 81 .platform = XE_##x, \ 82 .platform_name = #x 83 84 #define NOP(x) x 85 86 static const struct xe_graphics_desc graphics_xelp = { 87 .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0), 88 89 .va_bits = 48, 90 .vm_max_level = 3, 91 }; 92 93 #define XE_HP_FEATURES \ 94 .has_range_tlb_invalidation = true, \ 95 .va_bits = 48, \ 96 .vm_max_level = 3 97 98 static const struct xe_graphics_desc graphics_xehpg = { 99 .hw_engine_mask = 100 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 101 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 102 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 103 104 XE_HP_FEATURES, 105 .vram_flags = XE_VRAM_FLAGS_NEED64K, 106 107 .has_flat_ccs = 1, 108 }; 109 110 static const struct xe_graphics_desc graphics_xehpc = { 111 .hw_engine_mask = 112 BIT(XE_HW_ENGINE_BCS0) | BIT(XE_HW_ENGINE_BCS1) | 113 BIT(XE_HW_ENGINE_BCS2) | BIT(XE_HW_ENGINE_BCS3) | 114 BIT(XE_HW_ENGINE_BCS4) | BIT(XE_HW_ENGINE_BCS5) | 115 BIT(XE_HW_ENGINE_BCS6) | BIT(XE_HW_ENGINE_BCS7) | 116 BIT(XE_HW_ENGINE_BCS8) | 117 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 118 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 119 120 XE_HP_FEATURES, 121 .va_bits = 57, 122 .vm_max_level = 4, 123 .vram_flags = XE_VRAM_FLAGS_NEED64K, 124 125 .has_asid = 1, 126 .has_atomic_enable_pte_bit = 1, 127 .has_usm = 1, 128 }; 129 130 static const struct xe_graphics_desc graphics_xelpg = { 131 .hw_engine_mask = 132 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 133 BIT(XE_HW_ENGINE_CCS0), 134 135 XE_HP_FEATURES, 136 }; 137 138 #define XE2_GFX_FEATURES \ 139 .has_asid = 1, \ 140 .has_atomic_enable_pte_bit = 1, \ 141 .has_flat_ccs = 1, \ 142 .has_indirect_ring_state = 1, \ 143 .has_range_tlb_invalidation = 1, \ 144 .has_usm = 1, \ 145 .va_bits = 48, \ 146 .vm_max_level = 4, \ 147 .hw_engine_mask = \ 148 BIT(XE_HW_ENGINE_RCS0) | \ 149 BIT(XE_HW_ENGINE_BCS8) | BIT(XE_HW_ENGINE_BCS0) | \ 150 GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0) 151 152 static const struct xe_graphics_desc graphics_xe2 = { 153 XE2_GFX_FEATURES, 154 }; 155 156 static const struct xe_media_desc media_xem = { 157 .hw_engine_mask = 158 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 159 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0), 160 }; 161 162 static const struct xe_media_desc media_xelpmp = { 163 .hw_engine_mask = 164 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 165 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) | 166 BIT(XE_HW_ENGINE_GSCCS0) 167 }; 168 169 /* Pre-GMDID Graphics IPs */ 170 static const struct xe_ip graphics_ip_xelp = { 1200, "Xe_LP", &graphics_xelp }; 171 static const struct xe_ip graphics_ip_xelpp = { 1210, "Xe_LP+", &graphics_xelp }; 172 static const struct xe_ip graphics_ip_xehpg = { 1255, "Xe_HPG", &graphics_xehpg }; 173 static const struct xe_ip graphics_ip_xehpc = { 1260, "Xe_HPC", &graphics_xehpc }; 174 175 /* GMDID-based Graphics IPs */ 176 static const struct xe_ip graphics_ips[] = { 177 { 1270, "Xe_LPG", &graphics_xelpg }, 178 { 1271, "Xe_LPG", &graphics_xelpg }, 179 { 1274, "Xe_LPG+", &graphics_xelpg }, 180 { 2001, "Xe2_HPG", &graphics_xe2 }, 181 { 2004, "Xe2_LPG", &graphics_xe2 }, 182 { 3000, "Xe3_LPG", &graphics_xe2 }, 183 { 3001, "Xe3_LPG", &graphics_xe2 }, 184 }; 185 186 /* Pre-GMDID Media IPs */ 187 static const struct xe_ip media_ip_xem = { 1200, "Xe_M", &media_xem }; 188 static const struct xe_ip media_ip_xehpm = { 1255, "Xe_HPM", &media_xem }; 189 190 /* GMDID-based Media IPs */ 191 static const struct xe_ip media_ips[] = { 192 { 1300, "Xe_LPM+", &media_xelpmp }, 193 { 1301, "Xe2_HPM", &media_xelpmp }, 194 { 2000, "Xe2_LPM", &media_xelpmp }, 195 { 3000, "Xe3_LPM", &media_xelpmp }, 196 }; 197 198 static const struct xe_device_desc tgl_desc = { 199 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 200 .pre_gmdid_media_ip = &media_ip_xem, 201 PLATFORM(TIGERLAKE), 202 .dma_mask_size = 39, 203 .has_display = true, 204 .has_llc = true, 205 .require_force_probe = true, 206 }; 207 208 static const struct xe_device_desc rkl_desc = { 209 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 210 .pre_gmdid_media_ip = &media_ip_xem, 211 PLATFORM(ROCKETLAKE), 212 .dma_mask_size = 39, 213 .has_display = true, 214 .has_llc = true, 215 .require_force_probe = true, 216 }; 217 218 static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 }; 219 220 static const struct xe_device_desc adl_s_desc = { 221 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 222 .pre_gmdid_media_ip = &media_ip_xem, 223 PLATFORM(ALDERLAKE_S), 224 .dma_mask_size = 39, 225 .has_display = true, 226 .has_llc = true, 227 .require_force_probe = true, 228 .subplatforms = (const struct xe_subplatform_desc[]) { 229 { XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids }, 230 {}, 231 }, 232 }; 233 234 static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 }; 235 236 static const struct xe_device_desc adl_p_desc = { 237 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 238 .pre_gmdid_media_ip = &media_ip_xem, 239 PLATFORM(ALDERLAKE_P), 240 .dma_mask_size = 39, 241 .has_display = true, 242 .has_llc = true, 243 .require_force_probe = true, 244 .subplatforms = (const struct xe_subplatform_desc[]) { 245 { XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids }, 246 {}, 247 }, 248 }; 249 250 static const struct xe_device_desc adl_n_desc = { 251 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 252 .pre_gmdid_media_ip = &media_ip_xem, 253 PLATFORM(ALDERLAKE_N), 254 .dma_mask_size = 39, 255 .has_display = true, 256 .has_llc = true, 257 .require_force_probe = true, 258 }; 259 260 #define DGFX_FEATURES \ 261 .is_dgfx = 1 262 263 static const struct xe_device_desc dg1_desc = { 264 .pre_gmdid_graphics_ip = &graphics_ip_xelpp, 265 .pre_gmdid_media_ip = &media_ip_xem, 266 DGFX_FEATURES, 267 PLATFORM(DG1), 268 .dma_mask_size = 39, 269 .has_display = true, 270 .has_heci_gscfi = 1, 271 .require_force_probe = true, 272 }; 273 274 static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 }; 275 static const u16 dg2_g11_ids[] = { INTEL_DG2_G11_IDS(NOP), INTEL_ATS_M75_IDS(NOP), 0 }; 276 static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 }; 277 278 #define DG2_FEATURES \ 279 DGFX_FEATURES, \ 280 PLATFORM(DG2), \ 281 .has_heci_gscfi = 1, \ 282 .subplatforms = (const struct xe_subplatform_desc[]) { \ 283 { XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \ 284 { XE_SUBPLATFORM_DG2_G11, "G11", dg2_g11_ids }, \ 285 { XE_SUBPLATFORM_DG2_G12, "G12", dg2_g12_ids }, \ 286 { } \ 287 } 288 289 static const struct xe_device_desc ats_m_desc = { 290 .pre_gmdid_graphics_ip = &graphics_ip_xehpg, 291 .pre_gmdid_media_ip = &media_ip_xehpm, 292 .dma_mask_size = 46, 293 .require_force_probe = true, 294 295 DG2_FEATURES, 296 .has_display = false, 297 }; 298 299 static const struct xe_device_desc dg2_desc = { 300 .pre_gmdid_graphics_ip = &graphics_ip_xehpg, 301 .pre_gmdid_media_ip = &media_ip_xehpm, 302 .dma_mask_size = 46, 303 .require_force_probe = true, 304 305 DG2_FEATURES, 306 .has_display = true, 307 .has_fan_control = true, 308 }; 309 310 static const __maybe_unused struct xe_device_desc pvc_desc = { 311 .pre_gmdid_graphics_ip = &graphics_ip_xehpc, 312 DGFX_FEATURES, 313 PLATFORM(PVC), 314 .dma_mask_size = 52, 315 .has_display = false, 316 .has_heci_gscfi = 1, 317 .max_remote_tiles = 1, 318 .require_force_probe = true, 319 }; 320 321 static const struct xe_device_desc mtl_desc = { 322 /* .graphics and .media determined via GMD_ID */ 323 .require_force_probe = true, 324 PLATFORM(METEORLAKE), 325 .dma_mask_size = 46, 326 .has_display = true, 327 .has_pxp = true, 328 }; 329 330 static const struct xe_device_desc lnl_desc = { 331 PLATFORM(LUNARLAKE), 332 .dma_mask_size = 46, 333 .has_display = true, 334 .has_pxp = true, 335 .needs_scratch = true, 336 }; 337 338 static const struct xe_device_desc bmg_desc = { 339 DGFX_FEATURES, 340 PLATFORM(BATTLEMAGE), 341 .dma_mask_size = 46, 342 .has_display = true, 343 .has_fan_control = true, 344 .has_heci_cscfi = 1, 345 .needs_scratch = true, 346 }; 347 348 static const struct xe_device_desc ptl_desc = { 349 PLATFORM(PANTHERLAKE), 350 .dma_mask_size = 46, 351 .has_display = true, 352 .has_sriov = true, 353 .require_force_probe = true, 354 .needs_scratch = true, 355 }; 356 357 #undef PLATFORM 358 __diag_pop(); 359 360 /* 361 * Make sure any device matches here are from most specific to most 362 * general. For example, since the Quanta match is based on the subsystem 363 * and subvendor IDs, we need it to come before the more general IVB 364 * PCI ID matches, otherwise we'll use the wrong info struct above. 365 */ 366 static const struct pci_device_id pciidlist[] = { 367 INTEL_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc), 368 INTEL_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc), 369 INTEL_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 370 INTEL_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 371 INTEL_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc), 372 INTEL_RPLU_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 373 INTEL_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 374 INTEL_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 375 INTEL_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc), 376 INTEL_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc), 377 INTEL_ARL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 378 INTEL_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc), 379 INTEL_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 380 INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc), 381 INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc), 382 INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc), 383 { } 384 }; 385 MODULE_DEVICE_TABLE(pci, pciidlist); 386 387 /* is device_id present in comma separated list of ids */ 388 static bool device_id_in_list(u16 device_id, const char *devices, bool negative) 389 { 390 char *s, *p, *tok; 391 bool ret; 392 393 if (!devices || !*devices) 394 return false; 395 396 /* match everything */ 397 if (negative && strcmp(devices, "!*") == 0) 398 return true; 399 if (!negative && strcmp(devices, "*") == 0) 400 return true; 401 402 s = kstrdup(devices, GFP_KERNEL); 403 if (!s) 404 return false; 405 406 for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) { 407 u16 val; 408 409 if (negative && tok[0] == '!') 410 tok++; 411 else if ((negative && tok[0] != '!') || 412 (!negative && tok[0] == '!')) 413 continue; 414 415 if (kstrtou16(tok, 16, &val) == 0 && val == device_id) { 416 ret = true; 417 break; 418 } 419 } 420 421 kfree(s); 422 423 return ret; 424 } 425 426 static bool id_forced(u16 device_id) 427 { 428 return device_id_in_list(device_id, xe_modparam.force_probe, false); 429 } 430 431 static bool id_blocked(u16 device_id) 432 { 433 return device_id_in_list(device_id, xe_modparam.force_probe, true); 434 } 435 436 static const struct xe_subplatform_desc * 437 find_subplatform(const struct xe_device *xe, const struct xe_device_desc *desc) 438 { 439 const struct xe_subplatform_desc *sp; 440 const u16 *id; 441 442 for (sp = desc->subplatforms; sp && sp->subplatform; sp++) 443 for (id = sp->pciidlist; *id; id++) 444 if (*id == xe->info.devid) 445 return sp; 446 447 return NULL; 448 } 449 450 enum xe_gmdid_type { 451 GMDID_GRAPHICS, 452 GMDID_MEDIA 453 }; 454 455 static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid) 456 { 457 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 458 struct xe_reg gmdid_reg = GMD_ID; 459 u32 val; 460 461 KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid); 462 463 if (IS_SRIOV_VF(xe)) { 464 struct xe_gt *gt = xe_root_mmio_gt(xe); 465 466 /* 467 * To get the value of the GMDID register, VFs must obtain it 468 * from the GuC using MMIO communication. 469 * 470 * Note that at this point the xe_gt is not fully uninitialized 471 * and only basic access to MMIO registers is possible. To use 472 * our existing GuC communication functions we must perform at 473 * least basic xe_gt and xe_guc initialization. 474 * 475 * Since to obtain the value of GMDID_MEDIA we need to use the 476 * media GuC, temporarily tweak the gt type. 477 */ 478 xe_gt_assert(gt, gt->info.type == XE_GT_TYPE_UNINITIALIZED); 479 480 if (type == GMDID_MEDIA) { 481 gt->info.id = 1; 482 gt->info.type = XE_GT_TYPE_MEDIA; 483 } else { 484 gt->info.id = 0; 485 gt->info.type = XE_GT_TYPE_MAIN; 486 } 487 488 xe_gt_mmio_init(gt); 489 xe_guc_comm_init_early(>->uc.guc); 490 491 /* Don't bother with GMDID if failed to negotiate the GuC ABI */ 492 val = xe_gt_sriov_vf_bootstrap(gt) ? 0 : xe_gt_sriov_vf_gmdid(gt); 493 494 /* 495 * Only undo xe_gt.info here, the remaining changes made above 496 * will be overwritten as part of the regular initialization. 497 */ 498 gt->info.id = 0; 499 gt->info.type = XE_GT_TYPE_UNINITIALIZED; 500 } else { 501 /* 502 * GMD_ID is a GT register, but at this point in the driver 503 * init we haven't fully initialized the GT yet so we need to 504 * read the register with the tile's MMIO accessor. That means 505 * we need to apply the GSI offset manually since it won't get 506 * automatically added as it would if we were using a GT mmio 507 * accessor. 508 */ 509 if (type == GMDID_MEDIA) 510 gmdid_reg.addr += MEDIA_GT_GSI_OFFSET; 511 512 val = xe_mmio_read32(mmio, gmdid_reg); 513 } 514 515 *ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val); 516 *revid = REG_FIELD_GET(GMD_ID_REVID, val); 517 } 518 519 /* 520 * Read IP version from hardware and select graphics/media IP descriptors 521 * based on the result. 522 */ 523 static void handle_gmdid(struct xe_device *xe, 524 const struct xe_ip **graphics_ip, 525 const struct xe_ip **media_ip, 526 u32 *graphics_revid, 527 u32 *media_revid) 528 { 529 u32 ver; 530 531 *graphics_ip = NULL; 532 *media_ip = NULL; 533 534 read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid); 535 536 for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) { 537 if (ver == graphics_ips[i].verx100) { 538 *graphics_ip = &graphics_ips[i]; 539 540 break; 541 } 542 } 543 544 if (!*graphics_ip) { 545 drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n", 546 ver / 100, ver % 100); 547 } 548 549 read_gmdid(xe, GMDID_MEDIA, &ver, media_revid); 550 /* Media may legitimately be fused off / not present */ 551 if (ver == 0) 552 return; 553 554 for (int i = 0; i < ARRAY_SIZE(media_ips); i++) { 555 if (ver == media_ips[i].verx100) { 556 *media_ip = &media_ips[i]; 557 558 break; 559 } 560 } 561 562 if (!*media_ip) { 563 drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n", 564 ver / 100, ver % 100); 565 } 566 } 567 568 /* 569 * Initialize device info content that only depends on static driver_data 570 * passed to the driver at probe time from PCI ID table. 571 */ 572 static int xe_info_init_early(struct xe_device *xe, 573 const struct xe_device_desc *desc, 574 const struct xe_subplatform_desc *subplatform_desc) 575 { 576 int err; 577 578 xe->info.platform_name = desc->platform_name; 579 xe->info.platform = desc->platform; 580 xe->info.subplatform = subplatform_desc ? 581 subplatform_desc->subplatform : XE_SUBPLATFORM_NONE; 582 583 xe->info.dma_mask_size = desc->dma_mask_size; 584 xe->info.is_dgfx = desc->is_dgfx; 585 xe->info.has_fan_control = desc->has_fan_control; 586 xe->info.has_heci_gscfi = desc->has_heci_gscfi; 587 xe->info.has_heci_cscfi = desc->has_heci_cscfi; 588 xe->info.has_llc = desc->has_llc; 589 xe->info.has_pxp = desc->has_pxp; 590 xe->info.has_sriov = desc->has_sriov; 591 xe->info.skip_guc_pc = desc->skip_guc_pc; 592 xe->info.skip_mtcfg = desc->skip_mtcfg; 593 xe->info.skip_pcode = desc->skip_pcode; 594 xe->info.needs_scratch = desc->needs_scratch; 595 596 xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) && 597 xe_modparam.probe_display && 598 desc->has_display; 599 xe->info.tile_count = 1 + desc->max_remote_tiles; 600 601 err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0); 602 if (err) 603 return err; 604 605 return 0; 606 } 607 608 /* 609 * Initialize device info content that does require knowledge about 610 * graphics / media IP version. 611 * Make sure that GT / tile structures allocated by the driver match the data 612 * present in device info. 613 */ 614 static int xe_info_init(struct xe_device *xe, 615 const struct xe_device_desc *desc) 616 { 617 u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0; 618 const struct xe_ip *graphics_ip; 619 const struct xe_ip *media_ip; 620 const struct xe_graphics_desc *graphics_desc; 621 const struct xe_media_desc *media_desc; 622 struct xe_tile *tile; 623 struct xe_gt *gt; 624 u8 id; 625 626 /* 627 * If this platform supports GMD_ID, we'll detect the proper IP 628 * descriptor to use from hardware registers. 629 * desc->pre_gmdid_graphics_ip will only ever be set at this point for 630 * platforms before GMD_ID. In that case the IP descriptions and 631 * versions are simply derived from that. 632 */ 633 if (desc->pre_gmdid_graphics_ip) { 634 graphics_ip = desc->pre_gmdid_graphics_ip; 635 media_ip = desc->pre_gmdid_media_ip; 636 xe->info.step = xe_step_pre_gmdid_get(xe); 637 } else { 638 xe_assert(xe, !desc->pre_gmdid_media_ip); 639 handle_gmdid(xe, &graphics_ip, &media_ip, 640 &graphics_gmdid_revid, &media_gmdid_revid); 641 xe->info.step = xe_step_gmdid_get(xe, 642 graphics_gmdid_revid, 643 media_gmdid_revid); 644 } 645 646 /* 647 * If we couldn't detect the graphics IP, that's considered a fatal 648 * error and we should abort driver load. Failing to detect media 649 * IP is non-fatal; we'll just proceed without enabling media support. 650 */ 651 if (!graphics_ip) 652 return -ENODEV; 653 654 xe->info.graphics_verx100 = graphics_ip->verx100; 655 xe->info.graphics_name = graphics_ip->name; 656 graphics_desc = graphics_ip->desc; 657 658 if (media_ip) { 659 xe->info.media_verx100 = media_ip->verx100; 660 xe->info.media_name = media_ip->name; 661 media_desc = media_ip->desc; 662 } else { 663 xe->info.media_name = "none"; 664 media_desc = NULL; 665 } 666 667 xe->info.vram_flags = graphics_desc->vram_flags; 668 xe->info.va_bits = graphics_desc->va_bits; 669 xe->info.vm_max_level = graphics_desc->vm_max_level; 670 xe->info.has_asid = graphics_desc->has_asid; 671 xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit; 672 if (xe->info.platform != XE_PVC) 673 xe->info.has_device_atomics_on_smem = 1; 674 675 /* Runtime detection may change this later */ 676 xe->info.has_flat_ccs = graphics_desc->has_flat_ccs; 677 678 xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation; 679 xe->info.has_usm = graphics_desc->has_usm; 680 681 for_each_remote_tile(tile, xe, id) { 682 int err; 683 684 err = xe_tile_init_early(tile, xe, id); 685 if (err) 686 return err; 687 } 688 689 /* 690 * All platforms have at least one primary GT. Any platform with media 691 * version 13 or higher has an additional dedicated media GT. And 692 * depending on the graphics IP there may be additional "remote tiles." 693 * All of these together determine the overall GT count. 694 */ 695 for_each_tile(tile, xe, id) { 696 gt = tile->primary_gt; 697 gt->info.id = xe->info.gt_count++; 698 gt->info.type = XE_GT_TYPE_MAIN; 699 gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state; 700 gt->info.engine_mask = graphics_desc->hw_engine_mask; 701 702 if (MEDIA_VER(xe) < 13 && media_desc) 703 gt->info.engine_mask |= media_desc->hw_engine_mask; 704 705 if (MEDIA_VER(xe) < 13 || !media_desc) 706 continue; 707 708 /* 709 * Allocate and setup media GT for platforms with standalone 710 * media. 711 */ 712 tile->media_gt = xe_gt_alloc(tile); 713 if (IS_ERR(tile->media_gt)) 714 return PTR_ERR(tile->media_gt); 715 716 gt = tile->media_gt; 717 gt->info.type = XE_GT_TYPE_MEDIA; 718 gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state; 719 gt->info.engine_mask = media_desc->hw_engine_mask; 720 721 /* 722 * FIXME: At the moment multi-tile and standalone media are 723 * mutually exclusive on current platforms. We'll need to 724 * come up with a better way to number GTs if we ever wind 725 * up with platforms that support both together. 726 */ 727 drm_WARN_ON(&xe->drm, id != 0); 728 gt->info.id = xe->info.gt_count++; 729 } 730 731 return 0; 732 } 733 734 static void xe_pci_remove(struct pci_dev *pdev) 735 { 736 struct xe_device *xe = pdev_to_xe_device(pdev); 737 738 if (IS_SRIOV_PF(xe)) 739 xe_pci_sriov_configure(pdev, 0); 740 741 if (xe_survivability_mode_is_enabled(xe)) 742 return; 743 744 xe_device_remove(xe); 745 xe_pm_fini(xe); 746 } 747 748 /* 749 * Probe the PCI device, initialize various parts of the driver. 750 * 751 * Fault injection is used to test the error paths of some initialization 752 * functions called either directly from xe_pci_probe() or indirectly for 753 * example through xe_device_probe(). Those functions use the kernel fault 754 * injection capabilities infrastructure, see 755 * Documentation/fault-injection/fault-injection.rst for details. The macro 756 * ALLOW_ERROR_INJECTION() is used to conditionally skip function execution 757 * at runtime and use a provided return value. The first requirement for 758 * error injectable functions is proper handling of the error code by the 759 * caller for recovery, which is always the case here. The second 760 * requirement is that no state is changed before the first error return. 761 * It is not strictly fulfilled for all initialization functions using the 762 * ALLOW_ERROR_INJECTION() macro but this is acceptable because for those 763 * error cases at probe time, the error code is simply propagated up by the 764 * caller. Therefore there is no consequence on those specific callers when 765 * function error injection skips the whole function. 766 */ 767 static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 768 { 769 const struct xe_device_desc *desc = (const void *)ent->driver_data; 770 const struct xe_subplatform_desc *subplatform_desc; 771 struct xe_device *xe; 772 int err; 773 774 if (desc->require_force_probe && !id_forced(pdev->device)) { 775 dev_info(&pdev->dev, 776 "Your graphics device %04x is not officially supported\n" 777 "by xe driver in this kernel version. To force Xe probe,\n" 778 "use xe.force_probe='%04x' and i915.force_probe='!%04x'\n" 779 "module parameters or CONFIG_DRM_XE_FORCE_PROBE='%04x' and\n" 780 "CONFIG_DRM_I915_FORCE_PROBE='!%04x' configuration options.\n", 781 pdev->device, pdev->device, pdev->device, 782 pdev->device, pdev->device); 783 return -ENODEV; 784 } 785 786 if (id_blocked(pdev->device)) { 787 dev_info(&pdev->dev, "Probe blocked for device [%04x:%04x].\n", 788 pdev->vendor, pdev->device); 789 return -ENODEV; 790 } 791 792 if (xe_display_driver_probe_defer(pdev)) 793 return -EPROBE_DEFER; 794 795 err = pcim_enable_device(pdev); 796 if (err) 797 return err; 798 799 xe = xe_device_create(pdev, ent); 800 if (IS_ERR(xe)) 801 return PTR_ERR(xe); 802 803 pci_set_drvdata(pdev, &xe->drm); 804 805 xe_pm_assert_unbounded_bridge(xe); 806 subplatform_desc = find_subplatform(xe, desc); 807 808 pci_set_master(pdev); 809 810 err = xe_info_init_early(xe, desc, subplatform_desc); 811 if (err) 812 return err; 813 814 err = xe_device_probe_early(xe); 815 /* 816 * In Boot Survivability mode, no drm card is exposed and driver 817 * is loaded with bare minimum to allow for firmware to be 818 * flashed through mei. Return success, if survivability mode 819 * is enabled due to pcode failure or configfs being set 820 */ 821 if (xe_survivability_mode_is_enabled(xe)) 822 return 0; 823 824 if (err) 825 return err; 826 827 err = xe_info_init(xe, desc); 828 if (err) 829 return err; 830 831 err = xe_display_probe(xe); 832 if (err) 833 return err; 834 835 drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d cscfi:%d", 836 desc->platform_name, 837 subplatform_desc ? subplatform_desc->name : "", 838 xe->info.devid, xe->info.revid, 839 xe->info.is_dgfx, 840 xe->info.graphics_name, 841 xe->info.graphics_verx100 / 100, 842 xe->info.graphics_verx100 % 100, 843 xe->info.media_name, 844 xe->info.media_verx100 / 100, 845 xe->info.media_verx100 % 100, 846 str_yes_no(xe->info.probe_display), 847 xe->info.dma_mask_size, xe->info.tile_count, 848 xe->info.has_heci_gscfi, xe->info.has_heci_cscfi); 849 850 drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, B:%s)\n", 851 xe_step_name(xe->info.step.graphics), 852 xe_step_name(xe->info.step.media), 853 xe_step_name(xe->info.step.basedie)); 854 855 drm_dbg(&xe->drm, "SR-IOV support: %s (mode: %s)\n", 856 str_yes_no(xe_device_has_sriov(xe)), 857 xe_sriov_mode_to_string(xe_device_sriov_mode(xe))); 858 859 err = xe_pm_init_early(xe); 860 if (err) 861 return err; 862 863 err = xe_device_probe(xe); 864 if (err) 865 return err; 866 867 err = xe_pm_init(xe); 868 if (err) 869 goto err_driver_cleanup; 870 871 drm_dbg(&xe->drm, "d3cold: capable=%s\n", 872 str_yes_no(xe->d3cold.capable)); 873 874 return 0; 875 876 err_driver_cleanup: 877 xe_pci_remove(pdev); 878 return err; 879 } 880 881 static void xe_pci_shutdown(struct pci_dev *pdev) 882 { 883 xe_device_shutdown(pdev_to_xe_device(pdev)); 884 } 885 886 #ifdef CONFIG_PM_SLEEP 887 static void d3cold_toggle(struct pci_dev *pdev, enum toggle_d3cold toggle) 888 { 889 struct xe_device *xe = pdev_to_xe_device(pdev); 890 struct pci_dev *root_pdev; 891 892 if (!xe->d3cold.capable) 893 return; 894 895 root_pdev = pcie_find_root_port(pdev); 896 if (!root_pdev) 897 return; 898 899 switch (toggle) { 900 case D3COLD_DISABLE: 901 pci_d3cold_disable(root_pdev); 902 break; 903 case D3COLD_ENABLE: 904 pci_d3cold_enable(root_pdev); 905 break; 906 } 907 } 908 909 static int xe_pci_suspend(struct device *dev) 910 { 911 struct pci_dev *pdev = to_pci_dev(dev); 912 struct xe_device *xe = pdev_to_xe_device(pdev); 913 int err; 914 915 if (xe_survivability_mode_is_enabled(xe)) 916 return -EBUSY; 917 918 err = xe_pm_suspend(xe); 919 if (err) 920 return err; 921 922 /* 923 * Enabling D3Cold is needed for S2Idle/S0ix. 924 * It is save to allow here since xe_pm_suspend has evicted 925 * the local memory and the direct complete optimization is disabled. 926 */ 927 d3cold_toggle(pdev, D3COLD_ENABLE); 928 929 pci_save_state(pdev); 930 pci_disable_device(pdev); 931 pci_set_power_state(pdev, PCI_D3cold); 932 933 return 0; 934 } 935 936 static int xe_pci_resume(struct device *dev) 937 { 938 struct pci_dev *pdev = to_pci_dev(dev); 939 int err; 940 941 /* Give back the D3Cold decision to the runtime P M*/ 942 d3cold_toggle(pdev, D3COLD_DISABLE); 943 944 err = pci_set_power_state(pdev, PCI_D0); 945 if (err) 946 return err; 947 948 pci_restore_state(pdev); 949 950 err = pci_enable_device(pdev); 951 if (err) 952 return err; 953 954 pci_set_master(pdev); 955 956 err = xe_pm_resume(pdev_to_xe_device(pdev)); 957 if (err) 958 return err; 959 960 return 0; 961 } 962 963 static int xe_pci_runtime_suspend(struct device *dev) 964 { 965 struct pci_dev *pdev = to_pci_dev(dev); 966 struct xe_device *xe = pdev_to_xe_device(pdev); 967 int err; 968 969 err = xe_pm_runtime_suspend(xe); 970 if (err) 971 return err; 972 973 pci_save_state(pdev); 974 975 if (xe->d3cold.allowed) { 976 d3cold_toggle(pdev, D3COLD_ENABLE); 977 pci_disable_device(pdev); 978 pci_ignore_hotplug(pdev); 979 pci_set_power_state(pdev, PCI_D3cold); 980 } else { 981 d3cold_toggle(pdev, D3COLD_DISABLE); 982 pci_set_power_state(pdev, PCI_D3hot); 983 } 984 985 return 0; 986 } 987 988 static int xe_pci_runtime_resume(struct device *dev) 989 { 990 struct pci_dev *pdev = to_pci_dev(dev); 991 struct xe_device *xe = pdev_to_xe_device(pdev); 992 int err; 993 994 err = pci_set_power_state(pdev, PCI_D0); 995 if (err) 996 return err; 997 998 pci_restore_state(pdev); 999 1000 if (xe->d3cold.allowed) { 1001 err = pci_enable_device(pdev); 1002 if (err) 1003 return err; 1004 1005 pci_set_master(pdev); 1006 } 1007 1008 return xe_pm_runtime_resume(xe); 1009 } 1010 1011 static int xe_pci_runtime_idle(struct device *dev) 1012 { 1013 struct pci_dev *pdev = to_pci_dev(dev); 1014 struct xe_device *xe = pdev_to_xe_device(pdev); 1015 1016 xe_pm_d3cold_allowed_toggle(xe); 1017 1018 return 0; 1019 } 1020 1021 static const struct dev_pm_ops xe_pm_ops = { 1022 SET_SYSTEM_SLEEP_PM_OPS(xe_pci_suspend, xe_pci_resume) 1023 SET_RUNTIME_PM_OPS(xe_pci_runtime_suspend, xe_pci_runtime_resume, xe_pci_runtime_idle) 1024 }; 1025 #endif 1026 1027 static struct pci_driver xe_pci_driver = { 1028 .name = DRIVER_NAME, 1029 .id_table = pciidlist, 1030 .probe = xe_pci_probe, 1031 .remove = xe_pci_remove, 1032 .shutdown = xe_pci_shutdown, 1033 .sriov_configure = xe_pci_sriov_configure, 1034 #ifdef CONFIG_PM_SLEEP 1035 .driver.pm = &xe_pm_ops, 1036 #endif 1037 }; 1038 1039 int xe_register_pci_driver(void) 1040 { 1041 return pci_register_driver(&xe_pci_driver); 1042 } 1043 1044 void xe_unregister_pci_driver(void) 1045 { 1046 pci_unregister_driver(&xe_pci_driver); 1047 } 1048 1049 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 1050 #include "tests/xe_pci.c" 1051 #endif 1052