1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_pci.h" 7 8 #include <kunit/static_stub.h> 9 #include <linux/device/driver.h> 10 #include <linux/module.h> 11 #include <linux/pci.h> 12 #include <linux/pm_runtime.h> 13 14 #include <drm/drm_color_mgmt.h> 15 #include <drm/drm_drv.h> 16 #include <drm/intel/pciids.h> 17 18 #include "display/xe_display.h" 19 #include "regs/xe_gt_regs.h" 20 #include "regs/xe_regs.h" 21 #include "xe_configfs.h" 22 #include "xe_device.h" 23 #include "xe_drv.h" 24 #include "xe_gt.h" 25 #include "xe_gt_sriov_vf.h" 26 #include "xe_guc.h" 27 #include "xe_macros.h" 28 #include "xe_mmio.h" 29 #include "xe_module.h" 30 #include "xe_pci_sriov.h" 31 #include "xe_pci_types.h" 32 #include "xe_pm.h" 33 #include "xe_sriov.h" 34 #include "xe_step.h" 35 #include "xe_survivability_mode.h" 36 #include "xe_tile.h" 37 38 enum toggle_d3cold { 39 D3COLD_DISABLE, 40 D3COLD_ENABLE, 41 }; 42 43 __diag_push(); 44 __diag_ignore_all("-Woverride-init", "Allow field overrides in table"); 45 46 #define PLATFORM(x) \ 47 .platform = XE_##x, \ 48 .platform_name = #x 49 50 #define NOP(x) x 51 52 static const struct xe_graphics_desc graphics_xelp = { 53 .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0), 54 55 .va_bits = 48, 56 .vm_max_level = 3, 57 }; 58 59 #define XE_HP_FEATURES \ 60 .has_range_tlb_inval = true, \ 61 .va_bits = 48, \ 62 .vm_max_level = 3 63 64 static const struct xe_graphics_desc graphics_xehpg = { 65 .hw_engine_mask = 66 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 67 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 68 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 69 70 XE_HP_FEATURES, 71 .vram_flags = XE_VRAM_FLAGS_NEED64K, 72 73 .has_flat_ccs = 1, 74 }; 75 76 static const struct xe_graphics_desc graphics_xehpc = { 77 .hw_engine_mask = 78 BIT(XE_HW_ENGINE_BCS0) | BIT(XE_HW_ENGINE_BCS1) | 79 BIT(XE_HW_ENGINE_BCS2) | BIT(XE_HW_ENGINE_BCS3) | 80 BIT(XE_HW_ENGINE_BCS4) | BIT(XE_HW_ENGINE_BCS5) | 81 BIT(XE_HW_ENGINE_BCS6) | BIT(XE_HW_ENGINE_BCS7) | 82 BIT(XE_HW_ENGINE_BCS8) | 83 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 84 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 85 86 XE_HP_FEATURES, 87 .va_bits = 57, 88 .vm_max_level = 4, 89 .vram_flags = XE_VRAM_FLAGS_NEED64K, 90 91 .has_asid = 1, 92 .has_atomic_enable_pte_bit = 1, 93 .has_usm = 1, 94 }; 95 96 static const struct xe_graphics_desc graphics_xelpg = { 97 .hw_engine_mask = 98 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 99 BIT(XE_HW_ENGINE_CCS0), 100 101 XE_HP_FEATURES, 102 }; 103 104 #define XE2_GFX_FEATURES \ 105 .has_asid = 1, \ 106 .has_atomic_enable_pte_bit = 1, \ 107 .has_flat_ccs = 1, \ 108 .has_range_tlb_inval = 1, \ 109 .has_usm = 1, \ 110 .has_64bit_timestamp = 1, \ 111 .va_bits = 48, \ 112 .vm_max_level = 4, \ 113 .hw_engine_mask = \ 114 BIT(XE_HW_ENGINE_RCS0) | \ 115 BIT(XE_HW_ENGINE_BCS8) | BIT(XE_HW_ENGINE_BCS0) | \ 116 GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0) 117 118 static const struct xe_graphics_desc graphics_xe2 = { 119 XE2_GFX_FEATURES, 120 }; 121 122 static const struct xe_media_desc media_xem = { 123 .hw_engine_mask = 124 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 125 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0), 126 }; 127 128 static const struct xe_media_desc media_xelpmp = { 129 .hw_engine_mask = 130 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 131 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) | 132 BIT(XE_HW_ENGINE_GSCCS0) 133 }; 134 135 /* Pre-GMDID Graphics IPs */ 136 static const struct xe_ip graphics_ip_xelp = { 1200, "Xe_LP", &graphics_xelp }; 137 static const struct xe_ip graphics_ip_xelpp = { 1210, "Xe_LP+", &graphics_xelp }; 138 static const struct xe_ip graphics_ip_xehpg = { 1255, "Xe_HPG", &graphics_xehpg }; 139 static const struct xe_ip graphics_ip_xehpc = { 1260, "Xe_HPC", &graphics_xehpc }; 140 141 /* GMDID-based Graphics IPs */ 142 static const struct xe_ip graphics_ips[] = { 143 { 1270, "Xe_LPG", &graphics_xelpg }, 144 { 1271, "Xe_LPG", &graphics_xelpg }, 145 { 1274, "Xe_LPG+", &graphics_xelpg }, 146 { 2001, "Xe2_HPG", &graphics_xe2 }, 147 { 2002, "Xe2_HPG", &graphics_xe2 }, 148 { 2004, "Xe2_LPG", &graphics_xe2 }, 149 { 3000, "Xe3_LPG", &graphics_xe2 }, 150 { 3001, "Xe3_LPG", &graphics_xe2 }, 151 { 3003, "Xe3_LPG", &graphics_xe2 }, 152 }; 153 154 /* Pre-GMDID Media IPs */ 155 static const struct xe_ip media_ip_xem = { 1200, "Xe_M", &media_xem }; 156 static const struct xe_ip media_ip_xehpm = { 1255, "Xe_HPM", &media_xem }; 157 158 /* GMDID-based Media IPs */ 159 static const struct xe_ip media_ips[] = { 160 { 1300, "Xe_LPM+", &media_xelpmp }, 161 { 1301, "Xe2_HPM", &media_xelpmp }, 162 { 2000, "Xe2_LPM", &media_xelpmp }, 163 { 3000, "Xe3_LPM", &media_xelpmp }, 164 { 3002, "Xe3_LPM", &media_xelpmp }, 165 }; 166 167 static const struct xe_device_desc tgl_desc = { 168 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 169 .pre_gmdid_media_ip = &media_ip_xem, 170 PLATFORM(TIGERLAKE), 171 .dma_mask_size = 39, 172 .has_display = true, 173 .has_llc = true, 174 .has_sriov = true, 175 .max_gt_per_tile = 1, 176 .require_force_probe = true, 177 }; 178 179 static const struct xe_device_desc rkl_desc = { 180 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 181 .pre_gmdid_media_ip = &media_ip_xem, 182 PLATFORM(ROCKETLAKE), 183 .dma_mask_size = 39, 184 .has_display = true, 185 .has_llc = true, 186 .max_gt_per_tile = 1, 187 .require_force_probe = true, 188 }; 189 190 static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 }; 191 192 static const struct xe_device_desc adl_s_desc = { 193 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 194 .pre_gmdid_media_ip = &media_ip_xem, 195 PLATFORM(ALDERLAKE_S), 196 .dma_mask_size = 39, 197 .has_display = true, 198 .has_llc = true, 199 .has_sriov = true, 200 .max_gt_per_tile = 1, 201 .require_force_probe = true, 202 .subplatforms = (const struct xe_subplatform_desc[]) { 203 { XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids }, 204 {}, 205 }, 206 }; 207 208 static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 }; 209 210 static const struct xe_device_desc adl_p_desc = { 211 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 212 .pre_gmdid_media_ip = &media_ip_xem, 213 PLATFORM(ALDERLAKE_P), 214 .dma_mask_size = 39, 215 .has_display = true, 216 .has_llc = true, 217 .has_sriov = true, 218 .max_gt_per_tile = 1, 219 .require_force_probe = true, 220 .subplatforms = (const struct xe_subplatform_desc[]) { 221 { XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids }, 222 {}, 223 }, 224 }; 225 226 static const struct xe_device_desc adl_n_desc = { 227 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 228 .pre_gmdid_media_ip = &media_ip_xem, 229 PLATFORM(ALDERLAKE_N), 230 .dma_mask_size = 39, 231 .has_display = true, 232 .has_llc = true, 233 .has_sriov = true, 234 .max_gt_per_tile = 1, 235 .require_force_probe = true, 236 }; 237 238 #define DGFX_FEATURES \ 239 .is_dgfx = 1 240 241 static const struct xe_device_desc dg1_desc = { 242 .pre_gmdid_graphics_ip = &graphics_ip_xelpp, 243 .pre_gmdid_media_ip = &media_ip_xem, 244 DGFX_FEATURES, 245 PLATFORM(DG1), 246 .dma_mask_size = 39, 247 .has_display = true, 248 .has_gsc_nvm = 1, 249 .has_heci_gscfi = 1, 250 .max_gt_per_tile = 1, 251 .require_force_probe = true, 252 }; 253 254 static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 }; 255 static const u16 dg2_g11_ids[] = { INTEL_DG2_G11_IDS(NOP), INTEL_ATS_M75_IDS(NOP), 0 }; 256 static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 }; 257 258 #define DG2_FEATURES \ 259 DGFX_FEATURES, \ 260 PLATFORM(DG2), \ 261 .has_gsc_nvm = 1, \ 262 .has_heci_gscfi = 1, \ 263 .subplatforms = (const struct xe_subplatform_desc[]) { \ 264 { XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \ 265 { XE_SUBPLATFORM_DG2_G11, "G11", dg2_g11_ids }, \ 266 { XE_SUBPLATFORM_DG2_G12, "G12", dg2_g12_ids }, \ 267 { } \ 268 } 269 270 static const struct xe_device_desc ats_m_desc = { 271 .pre_gmdid_graphics_ip = &graphics_ip_xehpg, 272 .pre_gmdid_media_ip = &media_ip_xehpm, 273 .dma_mask_size = 46, 274 .max_gt_per_tile = 1, 275 .require_force_probe = true, 276 277 DG2_FEATURES, 278 .has_display = false, 279 .has_sriov = true, 280 }; 281 282 static const struct xe_device_desc dg2_desc = { 283 .pre_gmdid_graphics_ip = &graphics_ip_xehpg, 284 .pre_gmdid_media_ip = &media_ip_xehpm, 285 .dma_mask_size = 46, 286 .max_gt_per_tile = 1, 287 .require_force_probe = true, 288 289 DG2_FEATURES, 290 .has_display = true, 291 .has_fan_control = true, 292 .has_mbx_power_limits = false, 293 }; 294 295 static const __maybe_unused struct xe_device_desc pvc_desc = { 296 .pre_gmdid_graphics_ip = &graphics_ip_xehpc, 297 DGFX_FEATURES, 298 PLATFORM(PVC), 299 .dma_mask_size = 52, 300 .has_display = false, 301 .has_gsc_nvm = 1, 302 .has_heci_gscfi = 1, 303 .max_gt_per_tile = 1, 304 .max_remote_tiles = 1, 305 .require_force_probe = true, 306 .has_mbx_power_limits = false, 307 }; 308 309 static const struct xe_device_desc mtl_desc = { 310 /* .graphics and .media determined via GMD_ID */ 311 .require_force_probe = true, 312 PLATFORM(METEORLAKE), 313 .dma_mask_size = 46, 314 .has_display = true, 315 .has_pxp = true, 316 .max_gt_per_tile = 2, 317 }; 318 319 static const struct xe_device_desc lnl_desc = { 320 PLATFORM(LUNARLAKE), 321 .dma_mask_size = 46, 322 .has_display = true, 323 .has_pxp = true, 324 .max_gt_per_tile = 2, 325 .needs_scratch = true, 326 }; 327 328 static const struct xe_device_desc bmg_desc = { 329 DGFX_FEATURES, 330 PLATFORM(BATTLEMAGE), 331 .dma_mask_size = 46, 332 .has_display = true, 333 .has_fan_control = true, 334 .has_mbx_power_limits = true, 335 .has_gsc_nvm = 1, 336 .has_heci_cscfi = 1, 337 .has_late_bind = true, 338 .has_sriov = true, 339 .max_gt_per_tile = 2, 340 .needs_scratch = true, 341 }; 342 343 static const struct xe_device_desc ptl_desc = { 344 PLATFORM(PANTHERLAKE), 345 .dma_mask_size = 46, 346 .has_display = true, 347 .has_sriov = true, 348 .max_gt_per_tile = 2, 349 .needs_scratch = true, 350 }; 351 352 #undef PLATFORM 353 __diag_pop(); 354 355 /* 356 * Make sure any device matches here are from most specific to most 357 * general. For example, since the Quanta match is based on the subsystem 358 * and subvendor IDs, we need it to come before the more general IVB 359 * PCI ID matches, otherwise we'll use the wrong info struct above. 360 */ 361 static const struct pci_device_id pciidlist[] = { 362 INTEL_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc), 363 INTEL_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc), 364 INTEL_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 365 INTEL_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 366 INTEL_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc), 367 INTEL_RPLU_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 368 INTEL_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 369 INTEL_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 370 INTEL_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc), 371 INTEL_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc), 372 INTEL_ARL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 373 INTEL_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc), 374 INTEL_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 375 INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc), 376 INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc), 377 INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc), 378 INTEL_WCL_IDS(INTEL_VGA_DEVICE, &ptl_desc), 379 { } 380 }; 381 MODULE_DEVICE_TABLE(pci, pciidlist); 382 383 /* is device_id present in comma separated list of ids */ 384 static bool device_id_in_list(u16 device_id, const char *devices, bool negative) 385 { 386 char *s, *p, *tok; 387 bool ret; 388 389 if (!devices || !*devices) 390 return false; 391 392 /* match everything */ 393 if (negative && strcmp(devices, "!*") == 0) 394 return true; 395 if (!negative && strcmp(devices, "*") == 0) 396 return true; 397 398 s = kstrdup(devices, GFP_KERNEL); 399 if (!s) 400 return false; 401 402 for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) { 403 u16 val; 404 405 if (negative && tok[0] == '!') 406 tok++; 407 else if ((negative && tok[0] != '!') || 408 (!negative && tok[0] == '!')) 409 continue; 410 411 if (kstrtou16(tok, 16, &val) == 0 && val == device_id) { 412 ret = true; 413 break; 414 } 415 } 416 417 kfree(s); 418 419 return ret; 420 } 421 422 static bool id_forced(u16 device_id) 423 { 424 return device_id_in_list(device_id, xe_modparam.force_probe, false); 425 } 426 427 static bool id_blocked(u16 device_id) 428 { 429 return device_id_in_list(device_id, xe_modparam.force_probe, true); 430 } 431 432 static const struct xe_subplatform_desc * 433 find_subplatform(const struct xe_device *xe, const struct xe_device_desc *desc) 434 { 435 const struct xe_subplatform_desc *sp; 436 const u16 *id; 437 438 for (sp = desc->subplatforms; sp && sp->subplatform; sp++) 439 for (id = sp->pciidlist; *id; id++) 440 if (*id == xe->info.devid) 441 return sp; 442 443 return NULL; 444 } 445 446 enum xe_gmdid_type { 447 GMDID_GRAPHICS, 448 GMDID_MEDIA 449 }; 450 451 static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid) 452 { 453 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 454 struct xe_reg gmdid_reg = GMD_ID; 455 u32 val; 456 457 KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid); 458 459 if (IS_SRIOV_VF(xe)) { 460 struct xe_gt *gt = xe_root_mmio_gt(xe); 461 462 /* 463 * To get the value of the GMDID register, VFs must obtain it 464 * from the GuC using MMIO communication. 465 * 466 * Note that at this point the xe_gt is not fully uninitialized 467 * and only basic access to MMIO registers is possible. To use 468 * our existing GuC communication functions we must perform at 469 * least basic xe_gt and xe_guc initialization. 470 * 471 * Since to obtain the value of GMDID_MEDIA we need to use the 472 * media GuC, temporarily tweak the gt type. 473 */ 474 xe_gt_assert(gt, gt->info.type == XE_GT_TYPE_UNINITIALIZED); 475 476 if (type == GMDID_MEDIA) { 477 gt->info.id = 1; 478 gt->info.type = XE_GT_TYPE_MEDIA; 479 } else { 480 gt->info.id = 0; 481 gt->info.type = XE_GT_TYPE_MAIN; 482 } 483 484 xe_gt_mmio_init(gt); 485 xe_guc_comm_init_early(>->uc.guc); 486 487 /* Don't bother with GMDID if failed to negotiate the GuC ABI */ 488 val = xe_gt_sriov_vf_bootstrap(gt) ? 0 : xe_gt_sriov_vf_gmdid(gt); 489 490 /* 491 * Only undo xe_gt.info here, the remaining changes made above 492 * will be overwritten as part of the regular initialization. 493 */ 494 gt->info.id = 0; 495 gt->info.type = XE_GT_TYPE_UNINITIALIZED; 496 } else { 497 /* 498 * GMD_ID is a GT register, but at this point in the driver 499 * init we haven't fully initialized the GT yet so we need to 500 * read the register with the tile's MMIO accessor. That means 501 * we need to apply the GSI offset manually since it won't get 502 * automatically added as it would if we were using a GT mmio 503 * accessor. 504 */ 505 if (type == GMDID_MEDIA) 506 gmdid_reg.addr += MEDIA_GT_GSI_OFFSET; 507 508 val = xe_mmio_read32(mmio, gmdid_reg); 509 } 510 511 *ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val); 512 *revid = REG_FIELD_GET(GMD_ID_REVID, val); 513 } 514 515 static const struct xe_ip *find_graphics_ip(unsigned int verx100) 516 { 517 KUNIT_STATIC_STUB_REDIRECT(find_graphics_ip, verx100); 518 519 for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) 520 if (graphics_ips[i].verx100 == verx100) 521 return &graphics_ips[i]; 522 return NULL; 523 } 524 525 static const struct xe_ip *find_media_ip(unsigned int verx100) 526 { 527 KUNIT_STATIC_STUB_REDIRECT(find_media_ip, verx100); 528 529 for (int i = 0; i < ARRAY_SIZE(media_ips); i++) 530 if (media_ips[i].verx100 == verx100) 531 return &media_ips[i]; 532 return NULL; 533 } 534 535 /* 536 * Read IP version from hardware and select graphics/media IP descriptors 537 * based on the result. 538 */ 539 static void handle_gmdid(struct xe_device *xe, 540 const struct xe_ip **graphics_ip, 541 const struct xe_ip **media_ip, 542 u32 *graphics_revid, 543 u32 *media_revid) 544 { 545 u32 ver; 546 547 *graphics_ip = NULL; 548 *media_ip = NULL; 549 550 read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid); 551 552 *graphics_ip = find_graphics_ip(ver); 553 if (!*graphics_ip) { 554 drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n", 555 ver / 100, ver % 100); 556 } 557 558 read_gmdid(xe, GMDID_MEDIA, &ver, media_revid); 559 /* Media may legitimately be fused off / not present */ 560 if (ver == 0) 561 return; 562 563 *media_ip = find_media_ip(ver); 564 if (!*media_ip) { 565 drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n", 566 ver / 100, ver % 100); 567 } 568 } 569 570 /* 571 * Initialize device info content that only depends on static driver_data 572 * passed to the driver at probe time from PCI ID table. 573 */ 574 static int xe_info_init_early(struct xe_device *xe, 575 const struct xe_device_desc *desc, 576 const struct xe_subplatform_desc *subplatform_desc) 577 { 578 int err; 579 580 xe->info.platform_name = desc->platform_name; 581 xe->info.platform = desc->platform; 582 xe->info.subplatform = subplatform_desc ? 583 subplatform_desc->subplatform : XE_SUBPLATFORM_NONE; 584 585 xe->info.dma_mask_size = desc->dma_mask_size; 586 xe->info.is_dgfx = desc->is_dgfx; 587 xe->info.has_fan_control = desc->has_fan_control; 588 xe->info.has_mbx_power_limits = desc->has_mbx_power_limits; 589 xe->info.has_gsc_nvm = desc->has_gsc_nvm; 590 xe->info.has_heci_gscfi = desc->has_heci_gscfi; 591 xe->info.has_heci_cscfi = desc->has_heci_cscfi; 592 xe->info.has_late_bind = desc->has_late_bind; 593 xe->info.has_llc = desc->has_llc; 594 xe->info.has_pxp = desc->has_pxp; 595 xe->info.has_sriov = desc->has_sriov; 596 xe->info.skip_guc_pc = desc->skip_guc_pc; 597 xe->info.skip_mtcfg = desc->skip_mtcfg; 598 xe->info.skip_pcode = desc->skip_pcode; 599 xe->info.needs_scratch = desc->needs_scratch; 600 601 xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) && 602 xe_modparam.probe_display && 603 desc->has_display; 604 605 xe_assert(xe, desc->max_gt_per_tile > 0); 606 xe_assert(xe, desc->max_gt_per_tile <= XE_MAX_GT_PER_TILE); 607 xe->info.max_gt_per_tile = desc->max_gt_per_tile; 608 xe->info.tile_count = 1 + desc->max_remote_tiles; 609 610 err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0); 611 if (err) 612 return err; 613 614 return 0; 615 } 616 617 /* 618 * Possibly override number of tile based on configuration register. 619 */ 620 static void xe_info_probe_tile_count(struct xe_device *xe) 621 { 622 struct xe_mmio *mmio; 623 u8 tile_count; 624 u32 mtcfg; 625 626 KUNIT_STATIC_STUB_REDIRECT(xe_info_probe_tile_count, xe); 627 628 /* 629 * Probe for tile count only for platforms that support multiple 630 * tiles. 631 */ 632 if (xe->info.tile_count == 1) 633 return; 634 635 if (xe->info.skip_mtcfg) 636 return; 637 638 mmio = xe_root_tile_mmio(xe); 639 640 /* 641 * Although the per-tile mmio regs are not yet initialized, this 642 * is fine as it's going to the root tile's mmio, that's 643 * guaranteed to be initialized earlier in xe_mmio_probe_early() 644 */ 645 mtcfg = xe_mmio_read32(mmio, XEHP_MTCFG_ADDR); 646 tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1; 647 648 if (tile_count < xe->info.tile_count) { 649 drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n", 650 xe->info.tile_count, tile_count); 651 xe->info.tile_count = tile_count; 652 } 653 } 654 655 /* 656 * Initialize device info content that does require knowledge about 657 * graphics / media IP version. 658 * Make sure that GT / tile structures allocated by the driver match the data 659 * present in device info. 660 */ 661 static int xe_info_init(struct xe_device *xe, 662 const struct xe_device_desc *desc) 663 { 664 u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0; 665 const struct xe_ip *graphics_ip; 666 const struct xe_ip *media_ip; 667 const struct xe_graphics_desc *graphics_desc; 668 const struct xe_media_desc *media_desc; 669 struct xe_tile *tile; 670 struct xe_gt *gt; 671 u8 id; 672 673 /* 674 * If this platform supports GMD_ID, we'll detect the proper IP 675 * descriptor to use from hardware registers. 676 * desc->pre_gmdid_graphics_ip will only ever be set at this point for 677 * platforms before GMD_ID. In that case the IP descriptions and 678 * versions are simply derived from that. 679 */ 680 if (desc->pre_gmdid_graphics_ip) { 681 graphics_ip = desc->pre_gmdid_graphics_ip; 682 media_ip = desc->pre_gmdid_media_ip; 683 xe->info.step = xe_step_pre_gmdid_get(xe); 684 } else { 685 xe_assert(xe, !desc->pre_gmdid_media_ip); 686 handle_gmdid(xe, &graphics_ip, &media_ip, 687 &graphics_gmdid_revid, &media_gmdid_revid); 688 xe->info.step = xe_step_gmdid_get(xe, 689 graphics_gmdid_revid, 690 media_gmdid_revid); 691 } 692 693 /* 694 * If we couldn't detect the graphics IP, that's considered a fatal 695 * error and we should abort driver load. Failing to detect media 696 * IP is non-fatal; we'll just proceed without enabling media support. 697 */ 698 if (!graphics_ip) 699 return -ENODEV; 700 701 xe->info.graphics_verx100 = graphics_ip->verx100; 702 xe->info.graphics_name = graphics_ip->name; 703 graphics_desc = graphics_ip->desc; 704 705 if (media_ip) { 706 xe->info.media_verx100 = media_ip->verx100; 707 xe->info.media_name = media_ip->name; 708 media_desc = media_ip->desc; 709 } else { 710 xe->info.media_name = "none"; 711 media_desc = NULL; 712 } 713 714 xe->info.vram_flags = graphics_desc->vram_flags; 715 xe->info.va_bits = graphics_desc->va_bits; 716 xe->info.vm_max_level = graphics_desc->vm_max_level; 717 xe->info.has_asid = graphics_desc->has_asid; 718 xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit; 719 if (xe->info.platform != XE_PVC) 720 xe->info.has_device_atomics_on_smem = 1; 721 722 /* Runtime detection may change this later */ 723 xe->info.has_flat_ccs = graphics_desc->has_flat_ccs; 724 725 xe->info.has_range_tlb_inval = graphics_desc->has_range_tlb_inval; 726 xe->info.has_usm = graphics_desc->has_usm; 727 xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp; 728 729 xe_info_probe_tile_count(xe); 730 731 for_each_remote_tile(tile, xe, id) { 732 int err; 733 734 err = xe_tile_init_early(tile, xe, id); 735 if (err) 736 return err; 737 } 738 739 /* 740 * All platforms have at least one primary GT. Any platform with media 741 * version 13 or higher has an additional dedicated media GT. And 742 * depending on the graphics IP there may be additional "remote tiles." 743 * All of these together determine the overall GT count. 744 */ 745 for_each_tile(tile, xe, id) { 746 int err; 747 748 gt = tile->primary_gt; 749 gt->info.type = XE_GT_TYPE_MAIN; 750 gt->info.id = tile->id * xe->info.max_gt_per_tile; 751 gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state; 752 gt->info.engine_mask = graphics_desc->hw_engine_mask; 753 754 err = xe_tile_alloc_vram(tile); 755 if (err) 756 return err; 757 758 if (MEDIA_VER(xe) < 13 && media_desc) 759 gt->info.engine_mask |= media_desc->hw_engine_mask; 760 761 if (MEDIA_VER(xe) < 13 || !media_desc) 762 continue; 763 764 /* 765 * Allocate and setup media GT for platforms with standalone 766 * media. 767 */ 768 tile->media_gt = xe_gt_alloc(tile); 769 if (IS_ERR(tile->media_gt)) 770 return PTR_ERR(tile->media_gt); 771 772 gt = tile->media_gt; 773 gt->info.type = XE_GT_TYPE_MEDIA; 774 gt->info.id = tile->id * xe->info.max_gt_per_tile + 1; 775 gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state; 776 gt->info.engine_mask = media_desc->hw_engine_mask; 777 } 778 779 /* 780 * Now that we have tiles and GTs defined, let's loop over valid GTs 781 * in order to define gt_count. 782 */ 783 for_each_gt(gt, xe, id) 784 xe->info.gt_count++; 785 786 return 0; 787 } 788 789 static void xe_pci_remove(struct pci_dev *pdev) 790 { 791 struct xe_device *xe = pdev_to_xe_device(pdev); 792 793 if (IS_SRIOV_PF(xe)) 794 xe_pci_sriov_configure(pdev, 0); 795 796 if (xe_survivability_mode_is_boot_enabled(xe)) 797 return; 798 799 xe_device_remove(xe); 800 xe_pm_fini(xe); 801 } 802 803 /* 804 * Probe the PCI device, initialize various parts of the driver. 805 * 806 * Fault injection is used to test the error paths of some initialization 807 * functions called either directly from xe_pci_probe() or indirectly for 808 * example through xe_device_probe(). Those functions use the kernel fault 809 * injection capabilities infrastructure, see 810 * Documentation/fault-injection/fault-injection.rst for details. The macro 811 * ALLOW_ERROR_INJECTION() is used to conditionally skip function execution 812 * at runtime and use a provided return value. The first requirement for 813 * error injectable functions is proper handling of the error code by the 814 * caller for recovery, which is always the case here. The second 815 * requirement is that no state is changed before the first error return. 816 * It is not strictly fulfilled for all initialization functions using the 817 * ALLOW_ERROR_INJECTION() macro but this is acceptable because for those 818 * error cases at probe time, the error code is simply propagated up by the 819 * caller. Therefore there is no consequence on those specific callers when 820 * function error injection skips the whole function. 821 */ 822 static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 823 { 824 const struct xe_device_desc *desc = (const void *)ent->driver_data; 825 const struct xe_subplatform_desc *subplatform_desc; 826 struct xe_device *xe; 827 int err; 828 829 xe_configfs_check_device(pdev); 830 831 if (desc->require_force_probe && !id_forced(pdev->device)) { 832 dev_info(&pdev->dev, 833 "Your graphics device %04x is not officially supported\n" 834 "by xe driver in this kernel version. To force Xe probe,\n" 835 "use xe.force_probe='%04x' and i915.force_probe='!%04x'\n" 836 "module parameters or CONFIG_DRM_XE_FORCE_PROBE='%04x' and\n" 837 "CONFIG_DRM_I915_FORCE_PROBE='!%04x' configuration options.\n", 838 pdev->device, pdev->device, pdev->device, 839 pdev->device, pdev->device); 840 return -ENODEV; 841 } 842 843 if (id_blocked(pdev->device)) { 844 dev_info(&pdev->dev, "Probe blocked for device [%04x:%04x].\n", 845 pdev->vendor, pdev->device); 846 return -ENODEV; 847 } 848 849 if (xe_display_driver_probe_defer(pdev)) 850 return -EPROBE_DEFER; 851 852 err = pcim_enable_device(pdev); 853 if (err) 854 return err; 855 856 xe = xe_device_create(pdev, ent); 857 if (IS_ERR(xe)) 858 return PTR_ERR(xe); 859 860 pci_set_drvdata(pdev, &xe->drm); 861 862 xe_pm_assert_unbounded_bridge(xe); 863 subplatform_desc = find_subplatform(xe, desc); 864 865 pci_set_master(pdev); 866 867 err = xe_info_init_early(xe, desc, subplatform_desc); 868 if (err) 869 return err; 870 871 xe_vram_resize_bar(xe); 872 873 err = xe_device_probe_early(xe); 874 /* 875 * In Boot Survivability mode, no drm card is exposed and driver 876 * is loaded with bare minimum to allow for firmware to be 877 * flashed through mei. Return success, if survivability mode 878 * is enabled due to pcode failure or configfs being set 879 */ 880 if (xe_survivability_mode_is_boot_enabled(xe)) 881 return 0; 882 883 if (err) 884 return err; 885 886 err = xe_info_init(xe, desc); 887 if (err) 888 return err; 889 890 err = xe_display_probe(xe); 891 if (err) 892 return err; 893 894 drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d cscfi:%d", 895 desc->platform_name, 896 subplatform_desc ? subplatform_desc->name : "", 897 xe->info.devid, xe->info.revid, 898 xe->info.is_dgfx, 899 xe->info.graphics_name, 900 xe->info.graphics_verx100 / 100, 901 xe->info.graphics_verx100 % 100, 902 xe->info.media_name, 903 xe->info.media_verx100 / 100, 904 xe->info.media_verx100 % 100, 905 str_yes_no(xe->info.probe_display), 906 xe->info.dma_mask_size, xe->info.tile_count, 907 xe->info.has_heci_gscfi, xe->info.has_heci_cscfi); 908 909 drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, B:%s)\n", 910 xe_step_name(xe->info.step.graphics), 911 xe_step_name(xe->info.step.media), 912 xe_step_name(xe->info.step.basedie)); 913 914 drm_dbg(&xe->drm, "SR-IOV support: %s (mode: %s)\n", 915 str_yes_no(xe_device_has_sriov(xe)), 916 xe_sriov_mode_to_string(xe_device_sriov_mode(xe))); 917 918 err = xe_pm_init_early(xe); 919 if (err) 920 return err; 921 922 err = xe_device_probe(xe); 923 if (err) 924 return err; 925 926 err = xe_pm_init(xe); 927 if (err) 928 goto err_driver_cleanup; 929 930 drm_dbg(&xe->drm, "d3cold: capable=%s\n", 931 str_yes_no(xe->d3cold.capable)); 932 933 return 0; 934 935 err_driver_cleanup: 936 xe_pci_remove(pdev); 937 return err; 938 } 939 940 static void xe_pci_shutdown(struct pci_dev *pdev) 941 { 942 xe_device_shutdown(pdev_to_xe_device(pdev)); 943 } 944 945 #ifdef CONFIG_PM_SLEEP 946 static void d3cold_toggle(struct pci_dev *pdev, enum toggle_d3cold toggle) 947 { 948 struct xe_device *xe = pdev_to_xe_device(pdev); 949 struct pci_dev *root_pdev; 950 951 if (!xe->d3cold.capable) 952 return; 953 954 root_pdev = pcie_find_root_port(pdev); 955 if (!root_pdev) 956 return; 957 958 switch (toggle) { 959 case D3COLD_DISABLE: 960 pci_d3cold_disable(root_pdev); 961 break; 962 case D3COLD_ENABLE: 963 pci_d3cold_enable(root_pdev); 964 break; 965 } 966 } 967 968 static int xe_pci_suspend(struct device *dev) 969 { 970 struct pci_dev *pdev = to_pci_dev(dev); 971 struct xe_device *xe = pdev_to_xe_device(pdev); 972 int err; 973 974 if (xe_survivability_mode_is_boot_enabled(xe)) 975 return -EBUSY; 976 977 err = xe_pm_suspend(xe); 978 if (err) 979 return err; 980 981 /* 982 * Enabling D3Cold is needed for S2Idle/S0ix. 983 * It is save to allow here since xe_pm_suspend has evicted 984 * the local memory and the direct complete optimization is disabled. 985 */ 986 d3cold_toggle(pdev, D3COLD_ENABLE); 987 988 pci_save_state(pdev); 989 pci_disable_device(pdev); 990 pci_set_power_state(pdev, PCI_D3cold); 991 992 return 0; 993 } 994 995 static int xe_pci_resume(struct device *dev) 996 { 997 struct pci_dev *pdev = to_pci_dev(dev); 998 int err; 999 1000 /* Give back the D3Cold decision to the runtime P M*/ 1001 d3cold_toggle(pdev, D3COLD_DISABLE); 1002 1003 err = pci_set_power_state(pdev, PCI_D0); 1004 if (err) 1005 return err; 1006 1007 pci_restore_state(pdev); 1008 1009 err = pci_enable_device(pdev); 1010 if (err) 1011 return err; 1012 1013 pci_set_master(pdev); 1014 1015 err = xe_pm_resume(pdev_to_xe_device(pdev)); 1016 if (err) 1017 return err; 1018 1019 return 0; 1020 } 1021 1022 static int xe_pci_runtime_suspend(struct device *dev) 1023 { 1024 struct pci_dev *pdev = to_pci_dev(dev); 1025 struct xe_device *xe = pdev_to_xe_device(pdev); 1026 int err; 1027 1028 err = xe_pm_runtime_suspend(xe); 1029 if (err) 1030 return err; 1031 1032 pci_save_state(pdev); 1033 1034 if (xe->d3cold.allowed) { 1035 d3cold_toggle(pdev, D3COLD_ENABLE); 1036 pci_disable_device(pdev); 1037 pci_ignore_hotplug(pdev); 1038 pci_set_power_state(pdev, PCI_D3cold); 1039 } else { 1040 d3cold_toggle(pdev, D3COLD_DISABLE); 1041 pci_set_power_state(pdev, PCI_D3hot); 1042 } 1043 1044 return 0; 1045 } 1046 1047 static int xe_pci_runtime_resume(struct device *dev) 1048 { 1049 struct pci_dev *pdev = to_pci_dev(dev); 1050 struct xe_device *xe = pdev_to_xe_device(pdev); 1051 int err; 1052 1053 err = pci_set_power_state(pdev, PCI_D0); 1054 if (err) 1055 return err; 1056 1057 pci_restore_state(pdev); 1058 1059 if (xe->d3cold.allowed) { 1060 err = pci_enable_device(pdev); 1061 if (err) 1062 return err; 1063 1064 pci_set_master(pdev); 1065 } 1066 1067 return xe_pm_runtime_resume(xe); 1068 } 1069 1070 static int xe_pci_runtime_idle(struct device *dev) 1071 { 1072 struct pci_dev *pdev = to_pci_dev(dev); 1073 struct xe_device *xe = pdev_to_xe_device(pdev); 1074 1075 xe_pm_d3cold_allowed_toggle(xe); 1076 1077 return 0; 1078 } 1079 1080 static const struct dev_pm_ops xe_pm_ops = { 1081 SET_SYSTEM_SLEEP_PM_OPS(xe_pci_suspend, xe_pci_resume) 1082 SET_RUNTIME_PM_OPS(xe_pci_runtime_suspend, xe_pci_runtime_resume, xe_pci_runtime_idle) 1083 }; 1084 #endif 1085 1086 static struct pci_driver xe_pci_driver = { 1087 .name = DRIVER_NAME, 1088 .id_table = pciidlist, 1089 .probe = xe_pci_probe, 1090 .remove = xe_pci_remove, 1091 .shutdown = xe_pci_shutdown, 1092 .sriov_configure = xe_pci_sriov_configure, 1093 #ifdef CONFIG_PM_SLEEP 1094 .driver.pm = &xe_pm_ops, 1095 #endif 1096 }; 1097 1098 int xe_register_pci_driver(void) 1099 { 1100 return pci_register_driver(&xe_pci_driver); 1101 } 1102 1103 void xe_unregister_pci_driver(void) 1104 { 1105 pci_unregister_driver(&xe_pci_driver); 1106 } 1107 1108 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 1109 #include "tests/xe_pci.c" 1110 #endif 1111