1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_pci.h" 7 8 #include <kunit/static_stub.h> 9 #include <linux/device/driver.h> 10 #include <linux/module.h> 11 #include <linux/pci.h> 12 #include <linux/pm_runtime.h> 13 14 #include <drm/drm_color_mgmt.h> 15 #include <drm/drm_drv.h> 16 #include <drm/intel/pciids.h> 17 18 #include "display/xe_display.h" 19 #include "regs/xe_gt_regs.h" 20 #include "regs/xe_regs.h" 21 #include "xe_configfs.h" 22 #include "xe_device.h" 23 #include "xe_drv.h" 24 #include "xe_gt.h" 25 #include "xe_gt_sriov_vf.h" 26 #include "xe_guc.h" 27 #include "xe_mmio.h" 28 #include "xe_module.h" 29 #include "xe_pci_rebar.h" 30 #include "xe_pci_sriov.h" 31 #include "xe_pci_types.h" 32 #include "xe_pm.h" 33 #include "xe_printk.h" 34 #include "xe_sriov.h" 35 #include "xe_step.h" 36 #include "xe_survivability_mode.h" 37 #include "xe_tile.h" 38 39 enum toggle_d3cold { 40 D3COLD_DISABLE, 41 D3COLD_ENABLE, 42 }; 43 44 __diag_push(); 45 __diag_ignore_all("-Woverride-init", "Allow field overrides in table"); 46 47 #define PLATFORM(x) \ 48 .platform = XE_##x, \ 49 .platform_name = #x 50 51 #define NOP(x) x 52 53 static const struct xe_graphics_desc graphics_xelp = { 54 .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0), 55 }; 56 57 #define XE_HP_FEATURES \ 58 .has_range_tlb_inval = true 59 60 static const struct xe_graphics_desc graphics_xehpg = { 61 .hw_engine_mask = 62 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 63 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 64 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 65 66 XE_HP_FEATURES, 67 }; 68 69 static const struct xe_graphics_desc graphics_xehpc = { 70 .hw_engine_mask = 71 BIT(XE_HW_ENGINE_BCS0) | BIT(XE_HW_ENGINE_BCS1) | 72 BIT(XE_HW_ENGINE_BCS2) | BIT(XE_HW_ENGINE_BCS3) | 73 BIT(XE_HW_ENGINE_BCS4) | BIT(XE_HW_ENGINE_BCS5) | 74 BIT(XE_HW_ENGINE_BCS6) | BIT(XE_HW_ENGINE_BCS7) | 75 BIT(XE_HW_ENGINE_BCS8) | 76 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) | 77 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3), 78 79 XE_HP_FEATURES, 80 81 .has_asid = 1, 82 .has_atomic_enable_pte_bit = 1, 83 .has_usm = 1, 84 }; 85 86 static const struct xe_graphics_desc graphics_xelpg = { 87 .hw_engine_mask = 88 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) | 89 BIT(XE_HW_ENGINE_CCS0), 90 91 XE_HP_FEATURES, 92 }; 93 94 #define XE2_GFX_FEATURES \ 95 .has_asid = 1, \ 96 .has_atomic_enable_pte_bit = 1, \ 97 .has_range_tlb_inval = 1, \ 98 .has_usm = 1, \ 99 .has_64bit_timestamp = 1, \ 100 .hw_engine_mask = \ 101 BIT(XE_HW_ENGINE_RCS0) | \ 102 BIT(XE_HW_ENGINE_BCS8) | BIT(XE_HW_ENGINE_BCS0) | \ 103 GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0) 104 105 static const struct xe_graphics_desc graphics_xe2 = { 106 XE2_GFX_FEATURES, 107 }; 108 109 static const struct xe_graphics_desc graphics_xe3p_xpc = { 110 XE2_GFX_FEATURES, 111 .has_indirect_ring_state = 1, 112 .hw_engine_mask = 113 GENMASK(XE_HW_ENGINE_BCS8, XE_HW_ENGINE_BCS1) | 114 GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0), 115 }; 116 117 static const struct xe_media_desc media_xem = { 118 .hw_engine_mask = 119 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 120 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0), 121 }; 122 123 static const struct xe_media_desc media_xelpmp = { 124 .hw_engine_mask = 125 GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) | 126 GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) | 127 BIT(XE_HW_ENGINE_GSCCS0) 128 }; 129 130 /* Pre-GMDID Graphics IPs */ 131 static const struct xe_ip graphics_ip_xelp = { 1200, "Xe_LP", &graphics_xelp }; 132 static const struct xe_ip graphics_ip_xelpp = { 1210, "Xe_LP+", &graphics_xelp }; 133 static const struct xe_ip graphics_ip_xehpg = { 1255, "Xe_HPG", &graphics_xehpg }; 134 static const struct xe_ip graphics_ip_xehpc = { 1260, "Xe_HPC", &graphics_xehpc }; 135 136 /* GMDID-based Graphics IPs */ 137 static const struct xe_ip graphics_ips[] = { 138 { 1270, "Xe_LPG", &graphics_xelpg }, 139 { 1271, "Xe_LPG", &graphics_xelpg }, 140 { 1274, "Xe_LPG+", &graphics_xelpg }, 141 { 2001, "Xe2_HPG", &graphics_xe2 }, 142 { 2002, "Xe2_HPG", &graphics_xe2 }, 143 { 2004, "Xe2_LPG", &graphics_xe2 }, 144 { 3000, "Xe3_LPG", &graphics_xe2 }, 145 { 3001, "Xe3_LPG", &graphics_xe2 }, 146 { 3003, "Xe3_LPG", &graphics_xe2 }, 147 { 3004, "Xe3_LPG", &graphics_xe2 }, 148 { 3005, "Xe3_LPG", &graphics_xe2 }, 149 { 3511, "Xe3p_XPC", &graphics_xe3p_xpc }, 150 }; 151 152 /* Pre-GMDID Media IPs */ 153 static const struct xe_ip media_ip_xem = { 1200, "Xe_M", &media_xem }; 154 static const struct xe_ip media_ip_xehpm = { 1255, "Xe_HPM", &media_xem }; 155 156 /* GMDID-based Media IPs */ 157 static const struct xe_ip media_ips[] = { 158 { 1300, "Xe_LPM+", &media_xelpmp }, 159 { 1301, "Xe2_HPM", &media_xelpmp }, 160 { 2000, "Xe2_LPM", &media_xelpmp }, 161 { 3000, "Xe3_LPM", &media_xelpmp }, 162 { 3002, "Xe3_LPM", &media_xelpmp }, 163 { 3500, "Xe3p_LPM", &media_xelpmp }, 164 { 3503, "Xe3p_HPM", &media_xelpmp }, 165 }; 166 167 static const struct xe_device_desc tgl_desc = { 168 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 169 .pre_gmdid_media_ip = &media_ip_xem, 170 PLATFORM(TIGERLAKE), 171 .dma_mask_size = 39, 172 .has_cached_pt = true, 173 .has_display = true, 174 .has_llc = true, 175 .has_sriov = true, 176 .max_gt_per_tile = 1, 177 .require_force_probe = true, 178 .va_bits = 48, 179 .vm_max_level = 3, 180 }; 181 182 static const struct xe_device_desc rkl_desc = { 183 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 184 .pre_gmdid_media_ip = &media_ip_xem, 185 PLATFORM(ROCKETLAKE), 186 .dma_mask_size = 39, 187 .has_cached_pt = true, 188 .has_display = true, 189 .has_llc = true, 190 .max_gt_per_tile = 1, 191 .require_force_probe = true, 192 .va_bits = 48, 193 .vm_max_level = 3, 194 }; 195 196 static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 }; 197 198 static const struct xe_device_desc adl_s_desc = { 199 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 200 .pre_gmdid_media_ip = &media_ip_xem, 201 PLATFORM(ALDERLAKE_S), 202 .dma_mask_size = 39, 203 .has_cached_pt = true, 204 .has_display = true, 205 .has_llc = true, 206 .has_sriov = true, 207 .max_gt_per_tile = 1, 208 .require_force_probe = true, 209 .subplatforms = (const struct xe_subplatform_desc[]) { 210 { XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids }, 211 {}, 212 }, 213 .va_bits = 48, 214 .vm_max_level = 3, 215 }; 216 217 static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 }; 218 219 static const struct xe_device_desc adl_p_desc = { 220 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 221 .pre_gmdid_media_ip = &media_ip_xem, 222 PLATFORM(ALDERLAKE_P), 223 .dma_mask_size = 39, 224 .has_cached_pt = true, 225 .has_display = true, 226 .has_llc = true, 227 .has_sriov = true, 228 .max_gt_per_tile = 1, 229 .require_force_probe = true, 230 .subplatforms = (const struct xe_subplatform_desc[]) { 231 { XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids }, 232 {}, 233 }, 234 .va_bits = 48, 235 .vm_max_level = 3, 236 }; 237 238 static const struct xe_device_desc adl_n_desc = { 239 .pre_gmdid_graphics_ip = &graphics_ip_xelp, 240 .pre_gmdid_media_ip = &media_ip_xem, 241 PLATFORM(ALDERLAKE_N), 242 .dma_mask_size = 39, 243 .has_cached_pt = true, 244 .has_display = true, 245 .has_llc = true, 246 .has_sriov = true, 247 .max_gt_per_tile = 1, 248 .require_force_probe = true, 249 .va_bits = 48, 250 .vm_max_level = 3, 251 }; 252 253 #define DGFX_FEATURES \ 254 .is_dgfx = 1 255 256 static const struct xe_device_desc dg1_desc = { 257 .pre_gmdid_graphics_ip = &graphics_ip_xelpp, 258 .pre_gmdid_media_ip = &media_ip_xem, 259 DGFX_FEATURES, 260 PLATFORM(DG1), 261 .dma_mask_size = 39, 262 .has_display = true, 263 .has_gsc_nvm = 1, 264 .has_heci_gscfi = 1, 265 .max_gt_per_tile = 1, 266 .require_force_probe = true, 267 .va_bits = 48, 268 .vm_max_level = 3, 269 }; 270 271 static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 }; 272 static const u16 dg2_g11_ids[] = { INTEL_DG2_G11_IDS(NOP), INTEL_ATS_M75_IDS(NOP), 0 }; 273 static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 }; 274 275 #define DG2_FEATURES \ 276 DGFX_FEATURES, \ 277 PLATFORM(DG2), \ 278 .has_flat_ccs = 1, \ 279 .has_gsc_nvm = 1, \ 280 .has_heci_gscfi = 1, \ 281 .subplatforms = (const struct xe_subplatform_desc[]) { \ 282 { XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \ 283 { XE_SUBPLATFORM_DG2_G11, "G11", dg2_g11_ids }, \ 284 { XE_SUBPLATFORM_DG2_G12, "G12", dg2_g12_ids }, \ 285 { } \ 286 }, \ 287 .va_bits = 48, \ 288 .vm_max_level = 3, \ 289 .vram_flags = XE_VRAM_FLAGS_NEED64K 290 291 static const struct xe_device_desc ats_m_desc = { 292 .pre_gmdid_graphics_ip = &graphics_ip_xehpg, 293 .pre_gmdid_media_ip = &media_ip_xehpm, 294 .dma_mask_size = 46, 295 .max_gt_per_tile = 1, 296 .require_force_probe = true, 297 298 DG2_FEATURES, 299 .has_display = false, 300 .has_sriov = true, 301 }; 302 303 static const struct xe_device_desc dg2_desc = { 304 .pre_gmdid_graphics_ip = &graphics_ip_xehpg, 305 .pre_gmdid_media_ip = &media_ip_xehpm, 306 .dma_mask_size = 46, 307 .max_gt_per_tile = 1, 308 .require_force_probe = true, 309 310 DG2_FEATURES, 311 .has_display = true, 312 .has_fan_control = true, 313 .has_mbx_power_limits = false, 314 }; 315 316 static const __maybe_unused struct xe_device_desc pvc_desc = { 317 .pre_gmdid_graphics_ip = &graphics_ip_xehpc, 318 DGFX_FEATURES, 319 PLATFORM(PVC), 320 .dma_mask_size = 52, 321 .has_display = false, 322 .has_gsc_nvm = 1, 323 .has_heci_gscfi = 1, 324 .max_gt_per_tile = 1, 325 .max_remote_tiles = 1, 326 .require_force_probe = true, 327 .va_bits = 57, 328 .vm_max_level = 4, 329 .vram_flags = XE_VRAM_FLAGS_NEED64K, 330 .has_mbx_power_limits = false, 331 }; 332 333 static const struct xe_device_desc mtl_desc = { 334 /* .graphics and .media determined via GMD_ID */ 335 .require_force_probe = true, 336 PLATFORM(METEORLAKE), 337 .dma_mask_size = 46, 338 .has_display = true, 339 .has_pxp = true, 340 .max_gt_per_tile = 2, 341 .va_bits = 48, 342 .vm_max_level = 3, 343 }; 344 345 static const struct xe_device_desc lnl_desc = { 346 PLATFORM(LUNARLAKE), 347 .dma_mask_size = 46, 348 .has_display = true, 349 .has_flat_ccs = 1, 350 .has_pxp = true, 351 .max_gt_per_tile = 2, 352 .needs_scratch = true, 353 .va_bits = 48, 354 .vm_max_level = 4, 355 }; 356 357 static const u16 bmg_g21_ids[] = { INTEL_BMG_G21_IDS(NOP), 0 }; 358 359 static const struct xe_device_desc bmg_desc = { 360 DGFX_FEATURES, 361 PLATFORM(BATTLEMAGE), 362 .dma_mask_size = 46, 363 .has_display = true, 364 .has_fan_control = true, 365 .has_flat_ccs = 1, 366 .has_mbx_power_limits = true, 367 .has_mbx_thermal_info = true, 368 .has_gsc_nvm = 1, 369 .has_heci_cscfi = 1, 370 .has_i2c = true, 371 .has_late_bind = true, 372 .has_pre_prod_wa = 1, 373 .has_soc_remapper_telem = true, 374 .has_sriov = true, 375 .max_gt_per_tile = 2, 376 .needs_scratch = true, 377 .subplatforms = (const struct xe_subplatform_desc[]) { 378 { XE_SUBPLATFORM_BATTLEMAGE_G21, "G21", bmg_g21_ids }, 379 { } 380 }, 381 .va_bits = 48, 382 .vm_max_level = 4, 383 }; 384 385 static const struct xe_device_desc ptl_desc = { 386 PLATFORM(PANTHERLAKE), 387 .dma_mask_size = 46, 388 .has_display = true, 389 .has_flat_ccs = 1, 390 .has_sriov = true, 391 .has_pre_prod_wa = 1, 392 .has_pxp = true, 393 .max_gt_per_tile = 2, 394 .needs_scratch = true, 395 .needs_shared_vf_gt_wq = true, 396 .va_bits = 48, 397 .vm_max_level = 4, 398 }; 399 400 static const struct xe_device_desc nvls_desc = { 401 PLATFORM(NOVALAKE_S), 402 .dma_mask_size = 46, 403 .has_display = true, 404 .has_flat_ccs = 1, 405 .has_pre_prod_wa = 1, 406 .max_gt_per_tile = 2, 407 .require_force_probe = true, 408 .va_bits = 48, 409 .vm_max_level = 4, 410 }; 411 412 static const struct xe_device_desc cri_desc = { 413 DGFX_FEATURES, 414 PLATFORM(CRESCENTISLAND), 415 .dma_mask_size = 52, 416 .has_display = false, 417 .has_flat_ccs = false, 418 .has_gsc_nvm = 1, 419 .has_i2c = true, 420 .has_mbx_power_limits = true, 421 .has_mbx_thermal_info = true, 422 .has_mert = true, 423 .has_pre_prod_wa = 1, 424 .has_soc_remapper_sysctrl = true, 425 .has_soc_remapper_telem = true, 426 .has_sriov = true, 427 .max_gt_per_tile = 2, 428 .require_force_probe = true, 429 .va_bits = 57, 430 .vm_max_level = 4, 431 }; 432 433 #undef PLATFORM 434 __diag_pop(); 435 436 /* 437 * Make sure any device matches here are from most specific to most 438 * general. For example, since the Quanta match is based on the subsystem 439 * and subvendor IDs, we need it to come before the more general IVB 440 * PCI ID matches, otherwise we'll use the wrong info struct above. 441 */ 442 static const struct pci_device_id pciidlist[] = { 443 INTEL_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc), 444 INTEL_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc), 445 INTEL_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 446 INTEL_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 447 INTEL_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc), 448 INTEL_RPLU_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 449 INTEL_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc), 450 INTEL_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc), 451 INTEL_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc), 452 INTEL_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc), 453 INTEL_ARL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 454 INTEL_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc), 455 INTEL_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc), 456 INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc), 457 INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc), 458 INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc), 459 INTEL_WCL_IDS(INTEL_VGA_DEVICE, &ptl_desc), 460 INTEL_NVLS_IDS(INTEL_VGA_DEVICE, &nvls_desc), 461 INTEL_CRI_IDS(INTEL_PCI_DEVICE, &cri_desc), 462 { } 463 }; 464 MODULE_DEVICE_TABLE(pci, pciidlist); 465 466 /* is device_id present in comma separated list of ids */ 467 static bool device_id_in_list(u16 device_id, const char *devices, bool negative) 468 { 469 char *s, *p, *tok; 470 bool ret; 471 472 if (!devices || !*devices) 473 return false; 474 475 /* match everything */ 476 if (negative && strcmp(devices, "!*") == 0) 477 return true; 478 if (!negative && strcmp(devices, "*") == 0) 479 return true; 480 481 s = kstrdup(devices, GFP_KERNEL); 482 if (!s) 483 return false; 484 485 for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) { 486 u16 val; 487 488 if (negative && tok[0] == '!') 489 tok++; 490 else if ((negative && tok[0] != '!') || 491 (!negative && tok[0] == '!')) 492 continue; 493 494 if (kstrtou16(tok, 16, &val) == 0 && val == device_id) { 495 ret = true; 496 break; 497 } 498 } 499 500 kfree(s); 501 502 return ret; 503 } 504 505 static bool id_forced(u16 device_id) 506 { 507 return device_id_in_list(device_id, xe_modparam.force_probe, false); 508 } 509 510 static bool id_blocked(u16 device_id) 511 { 512 return device_id_in_list(device_id, xe_modparam.force_probe, true); 513 } 514 515 static const struct xe_subplatform_desc * 516 find_subplatform(const struct xe_device *xe, const struct xe_device_desc *desc) 517 { 518 const struct xe_subplatform_desc *sp; 519 const u16 *id; 520 521 for (sp = desc->subplatforms; sp && sp->subplatform; sp++) 522 for (id = sp->pciidlist; *id; id++) 523 if (*id == xe->info.devid) 524 return sp; 525 526 return NULL; 527 } 528 529 enum xe_gmdid_type { 530 GMDID_GRAPHICS, 531 GMDID_MEDIA 532 }; 533 534 static int read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid) 535 { 536 struct xe_mmio *mmio = xe_root_tile_mmio(xe); 537 struct xe_reg gmdid_reg = GMD_ID; 538 u32 val; 539 540 KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid); 541 542 if (IS_SRIOV_VF(xe)) { 543 /* 544 * To get the value of the GMDID register, VFs must obtain it 545 * from the GuC using MMIO communication. 546 * 547 * Note that at this point the GTs are not initialized and only 548 * tile-level access to MMIO registers is possible. To use our 549 * existing GuC communication functions we must create a dummy 550 * GT structure and perform at least basic xe_gt and xe_guc 551 * initialization. 552 */ 553 struct xe_gt *gt __free(kfree) = NULL; 554 int err; 555 556 gt = kzalloc(sizeof(*gt), GFP_KERNEL); 557 if (!gt) 558 return -ENOMEM; 559 560 gt->tile = &xe->tiles[0]; 561 if (type == GMDID_MEDIA) { 562 gt->info.id = 1; 563 gt->info.type = XE_GT_TYPE_MEDIA; 564 } else { 565 gt->info.id = 0; 566 gt->info.type = XE_GT_TYPE_MAIN; 567 } 568 569 xe_gt_mmio_init(gt); 570 xe_guc_comm_init_early(>->uc.guc); 571 572 err = xe_gt_sriov_vf_bootstrap(gt); 573 if (err) 574 return err; 575 576 val = xe_gt_sriov_vf_gmdid(gt); 577 } else { 578 /* 579 * GMD_ID is a GT register, but at this point in the driver 580 * init we haven't fully initialized the GT yet so we need to 581 * read the register with the tile's MMIO accessor. That means 582 * we need to apply the GSI offset manually since it won't get 583 * automatically added as it would if we were using a GT mmio 584 * accessor. 585 */ 586 if (type == GMDID_MEDIA) 587 gmdid_reg.addr += MEDIA_GT_GSI_OFFSET; 588 589 val = xe_mmio_read32(mmio, gmdid_reg); 590 } 591 592 *ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val); 593 *revid = REG_FIELD_GET(GMD_ID_REVID, val); 594 595 return 0; 596 } 597 598 static const struct xe_ip *find_graphics_ip(unsigned int verx100) 599 { 600 KUNIT_STATIC_STUB_REDIRECT(find_graphics_ip, verx100); 601 602 for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) 603 if (graphics_ips[i].verx100 == verx100) 604 return &graphics_ips[i]; 605 return NULL; 606 } 607 608 static const struct xe_ip *find_media_ip(unsigned int verx100) 609 { 610 KUNIT_STATIC_STUB_REDIRECT(find_media_ip, verx100); 611 612 for (int i = 0; i < ARRAY_SIZE(media_ips); i++) 613 if (media_ips[i].verx100 == verx100) 614 return &media_ips[i]; 615 return NULL; 616 } 617 618 /* 619 * Read IP version from hardware and select graphics/media IP descriptors 620 * based on the result. 621 */ 622 static int handle_gmdid(struct xe_device *xe, 623 const struct xe_ip **graphics_ip, 624 const struct xe_ip **media_ip, 625 u32 *graphics_revid, 626 u32 *media_revid) 627 { 628 u32 ver; 629 int ret; 630 631 *graphics_ip = NULL; 632 *media_ip = NULL; 633 634 ret = read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid); 635 if (ret) 636 return ret; 637 638 *graphics_ip = find_graphics_ip(ver); 639 if (!*graphics_ip) { 640 drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n", 641 ver / 100, ver % 100); 642 } 643 644 ret = read_gmdid(xe, GMDID_MEDIA, &ver, media_revid); 645 if (ret) 646 return ret; 647 648 /* Media may legitimately be fused off / not present */ 649 if (ver == 0) 650 return 0; 651 652 *media_ip = find_media_ip(ver); 653 if (!*media_ip) { 654 drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n", 655 ver / 100, ver % 100); 656 } 657 658 return 0; 659 } 660 661 /* 662 * Initialize device info content that only depends on static driver_data 663 * passed to the driver at probe time from PCI ID table. 664 */ 665 static int xe_info_init_early(struct xe_device *xe, 666 const struct xe_device_desc *desc, 667 const struct xe_subplatform_desc *subplatform_desc) 668 { 669 int err; 670 671 xe->info.platform_name = desc->platform_name; 672 xe->info.platform = desc->platform; 673 xe->info.subplatform = subplatform_desc ? 674 subplatform_desc->subplatform : XE_SUBPLATFORM_NONE; 675 676 xe->info.dma_mask_size = desc->dma_mask_size; 677 xe->info.va_bits = desc->va_bits; 678 xe->info.vm_max_level = desc->vm_max_level; 679 xe->info.vram_flags = desc->vram_flags; 680 681 xe->info.is_dgfx = desc->is_dgfx; 682 xe->info.has_cached_pt = desc->has_cached_pt; 683 xe->info.has_fan_control = desc->has_fan_control; 684 /* runtime fusing may force flat_ccs to disabled later */ 685 xe->info.has_flat_ccs = desc->has_flat_ccs; 686 xe->info.has_mbx_power_limits = desc->has_mbx_power_limits; 687 xe->info.has_mbx_thermal_info = desc->has_mbx_thermal_info; 688 xe->info.has_gsc_nvm = desc->has_gsc_nvm; 689 xe->info.has_heci_gscfi = desc->has_heci_gscfi; 690 xe->info.has_heci_cscfi = desc->has_heci_cscfi; 691 xe->info.has_i2c = desc->has_i2c; 692 xe->info.has_late_bind = desc->has_late_bind; 693 xe->info.has_llc = desc->has_llc; 694 xe->info.has_mert = desc->has_mert; 695 xe->info.has_page_reclaim_hw_assist = desc->has_page_reclaim_hw_assist; 696 xe->info.has_pre_prod_wa = desc->has_pre_prod_wa; 697 xe->info.has_pxp = desc->has_pxp; 698 xe->info.has_soc_remapper_sysctrl = desc->has_soc_remapper_sysctrl; 699 xe->info.has_soc_remapper_telem = desc->has_soc_remapper_telem; 700 xe->info.has_sriov = xe_configfs_primary_gt_allowed(to_pci_dev(xe->drm.dev)) && 701 desc->has_sriov; 702 xe->info.skip_guc_pc = desc->skip_guc_pc; 703 xe->info.skip_mtcfg = desc->skip_mtcfg; 704 xe->info.skip_pcode = desc->skip_pcode; 705 xe->info.needs_scratch = desc->needs_scratch; 706 xe->info.needs_shared_vf_gt_wq = desc->needs_shared_vf_gt_wq; 707 708 xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) && 709 xe_modparam.probe_display && 710 desc->has_display; 711 712 xe_assert(xe, desc->max_gt_per_tile > 0); 713 xe_assert(xe, desc->max_gt_per_tile <= XE_MAX_GT_PER_TILE); 714 xe->info.max_gt_per_tile = desc->max_gt_per_tile; 715 xe->info.tile_count = 1 + desc->max_remote_tiles; 716 717 err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0); 718 if (err) 719 return err; 720 721 return 0; 722 } 723 724 /* 725 * Possibly override number of tile based on configuration register. 726 */ 727 static void xe_info_probe_tile_count(struct xe_device *xe) 728 { 729 struct xe_mmio *mmio; 730 u8 tile_count; 731 u32 mtcfg; 732 733 KUNIT_STATIC_STUB_REDIRECT(xe_info_probe_tile_count, xe); 734 735 /* 736 * Probe for tile count only for platforms that support multiple 737 * tiles. 738 */ 739 if (xe->info.tile_count == 1) 740 return; 741 742 if (xe->info.skip_mtcfg) 743 return; 744 745 mmio = xe_root_tile_mmio(xe); 746 747 /* 748 * Although the per-tile mmio regs are not yet initialized, this 749 * is fine as it's going to the root tile's mmio, that's 750 * guaranteed to be initialized earlier in xe_mmio_probe_early() 751 */ 752 mtcfg = xe_mmio_read32(mmio, XEHP_MTCFG_ADDR); 753 tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1; 754 755 if (tile_count < xe->info.tile_count) { 756 drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n", 757 xe->info.tile_count, tile_count); 758 xe->info.tile_count = tile_count; 759 } 760 } 761 762 static struct xe_gt *alloc_primary_gt(struct xe_tile *tile, 763 const struct xe_graphics_desc *graphics_desc, 764 const struct xe_media_desc *media_desc) 765 { 766 struct xe_device *xe = tile_to_xe(tile); 767 struct xe_gt *gt; 768 769 if (!xe_configfs_primary_gt_allowed(to_pci_dev(xe->drm.dev))) { 770 xe_info(xe, "Primary GT disabled via configfs\n"); 771 return NULL; 772 } 773 774 gt = xe_gt_alloc(tile); 775 if (IS_ERR(gt)) 776 return gt; 777 778 gt->info.type = XE_GT_TYPE_MAIN; 779 gt->info.id = tile->id * xe->info.max_gt_per_tile; 780 gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state; 781 gt->info.multi_queue_engine_class_mask = graphics_desc->multi_queue_engine_class_mask; 782 gt->info.engine_mask = graphics_desc->hw_engine_mask; 783 784 /* 785 * Before media version 13, the media IP was part of the primary GT 786 * so we need to add the media engines to the primary GT's engine list. 787 */ 788 if (MEDIA_VER(xe) < 13 && media_desc) 789 gt->info.engine_mask |= media_desc->hw_engine_mask; 790 791 return gt; 792 } 793 794 static struct xe_gt *alloc_media_gt(struct xe_tile *tile, 795 const struct xe_media_desc *media_desc) 796 { 797 struct xe_device *xe = tile_to_xe(tile); 798 struct xe_gt *gt; 799 800 if (!xe_configfs_media_gt_allowed(to_pci_dev(xe->drm.dev))) { 801 xe_info(xe, "Media GT disabled via configfs\n"); 802 return NULL; 803 } 804 805 if (MEDIA_VER(xe) < 13 || !media_desc) 806 return NULL; 807 808 gt = xe_gt_alloc(tile); 809 if (IS_ERR(gt)) 810 return gt; 811 812 gt->info.type = XE_GT_TYPE_MEDIA; 813 gt->info.id = tile->id * xe->info.max_gt_per_tile + 1; 814 gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state; 815 gt->info.engine_mask = media_desc->hw_engine_mask; 816 817 return gt; 818 } 819 820 /* 821 * Initialize device info content that does require knowledge about 822 * graphics / media IP version. 823 * Make sure that GT / tile structures allocated by the driver match the data 824 * present in device info. 825 */ 826 static int xe_info_init(struct xe_device *xe, 827 const struct xe_device_desc *desc) 828 { 829 u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0; 830 const struct xe_ip *graphics_ip; 831 const struct xe_ip *media_ip; 832 const struct xe_graphics_desc *graphics_desc; 833 const struct xe_media_desc *media_desc; 834 struct xe_tile *tile; 835 struct xe_gt *gt; 836 int ret; 837 u8 id; 838 839 /* 840 * If this platform supports GMD_ID, we'll detect the proper IP 841 * descriptor to use from hardware registers. 842 * desc->pre_gmdid_graphics_ip will only ever be set at this point for 843 * platforms before GMD_ID. In that case the IP descriptions and 844 * versions are simply derived from that. 845 */ 846 if (desc->pre_gmdid_graphics_ip) { 847 graphics_ip = desc->pre_gmdid_graphics_ip; 848 media_ip = desc->pre_gmdid_media_ip; 849 xe->info.step = xe_step_pre_gmdid_get(xe); 850 } else { 851 xe_assert(xe, !desc->pre_gmdid_media_ip); 852 ret = handle_gmdid(xe, &graphics_ip, &media_ip, 853 &graphics_gmdid_revid, &media_gmdid_revid); 854 if (ret) 855 return ret; 856 857 xe->info.step = xe_step_gmdid_get(xe, 858 graphics_gmdid_revid, 859 media_gmdid_revid); 860 } 861 862 /* 863 * If we couldn't detect the graphics IP, that's considered a fatal 864 * error and we should abort driver load. Failing to detect media 865 * IP is non-fatal; we'll just proceed without enabling media support. 866 */ 867 if (!graphics_ip) 868 return -ENODEV; 869 870 xe->info.graphics_verx100 = graphics_ip->verx100; 871 xe->info.graphics_name = graphics_ip->name; 872 graphics_desc = graphics_ip->desc; 873 874 if (media_ip) { 875 xe->info.media_verx100 = media_ip->verx100; 876 xe->info.media_name = media_ip->name; 877 media_desc = media_ip->desc; 878 } else { 879 xe->info.media_name = "none"; 880 media_desc = NULL; 881 } 882 883 xe->info.has_asid = graphics_desc->has_asid; 884 xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit; 885 if (xe->info.platform != XE_PVC) 886 xe->info.has_device_atomics_on_smem = 1; 887 888 xe->info.has_range_tlb_inval = graphics_desc->has_range_tlb_inval; 889 xe->info.has_usm = graphics_desc->has_usm; 890 xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp; 891 xe->info.has_mem_copy_instr = GRAPHICS_VER(xe) >= 20; 892 893 xe_info_probe_tile_count(xe); 894 895 for_each_remote_tile(tile, xe, id) { 896 int err; 897 898 err = xe_tile_init_early(tile, xe, id); 899 if (err) 900 return err; 901 } 902 903 /* Allocate any GT and VRAM structures necessary for the platform. */ 904 for_each_tile(tile, xe, id) { 905 int err; 906 907 err = xe_tile_alloc_vram(tile); 908 if (err) 909 return err; 910 911 tile->primary_gt = alloc_primary_gt(tile, graphics_desc, media_desc); 912 if (IS_ERR(tile->primary_gt)) 913 return PTR_ERR(tile->primary_gt); 914 915 /* 916 * It's not currently possible to probe a device with the 917 * primary GT disabled. With some work, this may be future in 918 * the possible for igpu platforms (although probably not for 919 * dgpu's since access to the primary GT's BCS engines is 920 * required for VRAM management). 921 */ 922 if (!tile->primary_gt) { 923 drm_err(&xe->drm, "Cannot probe device with without a primary GT\n"); 924 return -ENODEV; 925 } 926 927 tile->media_gt = alloc_media_gt(tile, media_desc); 928 if (IS_ERR(tile->media_gt)) 929 return PTR_ERR(tile->media_gt); 930 } 931 932 /* 933 * Now that we have tiles and GTs defined, let's loop over valid GTs 934 * in order to define gt_count. 935 */ 936 for_each_gt(gt, xe, id) 937 xe->info.gt_count++; 938 939 return 0; 940 } 941 942 static void xe_pci_remove(struct pci_dev *pdev) 943 { 944 struct xe_device *xe = pdev_to_xe_device(pdev); 945 946 if (IS_SRIOV_PF(xe)) 947 xe_pci_sriov_configure(pdev, 0); 948 949 if (xe_survivability_mode_is_boot_enabled(xe)) 950 return; 951 952 xe_device_remove(xe); 953 xe_pm_fini(xe); 954 } 955 956 /* 957 * Probe the PCI device, initialize various parts of the driver. 958 * 959 * Fault injection is used to test the error paths of some initialization 960 * functions called either directly from xe_pci_probe() or indirectly for 961 * example through xe_device_probe(). Those functions use the kernel fault 962 * injection capabilities infrastructure, see 963 * Documentation/fault-injection/fault-injection.rst for details. The macro 964 * ALLOW_ERROR_INJECTION() is used to conditionally skip function execution 965 * at runtime and use a provided return value. The first requirement for 966 * error injectable functions is proper handling of the error code by the 967 * caller for recovery, which is always the case here. The second 968 * requirement is that no state is changed before the first error return. 969 * It is not strictly fulfilled for all initialization functions using the 970 * ALLOW_ERROR_INJECTION() macro but this is acceptable because for those 971 * error cases at probe time, the error code is simply propagated up by the 972 * caller. Therefore there is no consequence on those specific callers when 973 * function error injection skips the whole function. 974 */ 975 static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 976 { 977 const struct xe_device_desc *desc = (const void *)ent->driver_data; 978 const struct xe_subplatform_desc *subplatform_desc; 979 struct xe_device *xe; 980 int err; 981 982 xe_configfs_check_device(pdev); 983 984 if (desc->require_force_probe && !id_forced(pdev->device)) { 985 dev_info(&pdev->dev, 986 "Your graphics device %04x is not officially supported\n" 987 "by xe driver in this kernel version. To force Xe probe,\n" 988 "use xe.force_probe='%04x' and i915.force_probe='!%04x'\n" 989 "module parameters or CONFIG_DRM_XE_FORCE_PROBE='%04x' and\n" 990 "CONFIG_DRM_I915_FORCE_PROBE='!%04x' configuration options.\n", 991 pdev->device, pdev->device, pdev->device, 992 pdev->device, pdev->device); 993 return -ENODEV; 994 } 995 996 if (id_blocked(pdev->device)) { 997 dev_info(&pdev->dev, "Probe blocked for device [%04x:%04x].\n", 998 pdev->vendor, pdev->device); 999 return -ENODEV; 1000 } 1001 1002 if (xe_display_driver_probe_defer(pdev)) 1003 return -EPROBE_DEFER; 1004 1005 err = pcim_enable_device(pdev); 1006 if (err) 1007 return err; 1008 1009 xe = xe_device_create(pdev, ent); 1010 if (IS_ERR(xe)) 1011 return PTR_ERR(xe); 1012 1013 pci_set_drvdata(pdev, &xe->drm); 1014 1015 xe_pm_assert_unbounded_bridge(xe); 1016 subplatform_desc = find_subplatform(xe, desc); 1017 1018 pci_set_master(pdev); 1019 1020 err = xe_info_init_early(xe, desc, subplatform_desc); 1021 if (err) 1022 return err; 1023 1024 xe_pci_rebar_resize(xe); 1025 1026 err = xe_device_probe_early(xe); 1027 /* 1028 * In Boot Survivability mode, no drm card is exposed and driver 1029 * is loaded with bare minimum to allow for firmware to be 1030 * flashed through mei. Return success, if survivability mode 1031 * is enabled due to pcode failure or configfs being set 1032 */ 1033 if (xe_survivability_mode_is_boot_enabled(xe)) 1034 return 0; 1035 1036 if (err) 1037 return err; 1038 1039 err = xe_info_init(xe, desc); 1040 if (err) 1041 return err; 1042 1043 err = xe_display_probe(xe); 1044 if (err) 1045 return err; 1046 1047 drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d cscfi:%d", 1048 desc->platform_name, 1049 subplatform_desc ? subplatform_desc->name : "", 1050 xe->info.devid, xe->info.revid, 1051 xe->info.is_dgfx, 1052 xe->info.graphics_name, 1053 xe->info.graphics_verx100 / 100, 1054 xe->info.graphics_verx100 % 100, 1055 xe->info.media_name, 1056 xe->info.media_verx100 / 100, 1057 xe->info.media_verx100 % 100, 1058 str_yes_no(xe->info.probe_display), 1059 xe->info.dma_mask_size, xe->info.tile_count, 1060 xe->info.has_heci_gscfi, xe->info.has_heci_cscfi); 1061 1062 drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, B:%s)\n", 1063 xe_step_name(xe->info.step.graphics), 1064 xe_step_name(xe->info.step.media), 1065 xe_step_name(xe->info.step.basedie)); 1066 1067 drm_dbg(&xe->drm, "SR-IOV support: %s (mode: %s)\n", 1068 str_yes_no(xe_device_has_sriov(xe)), 1069 xe_sriov_mode_to_string(xe_device_sriov_mode(xe))); 1070 1071 err = xe_pm_init_early(xe); 1072 if (err) 1073 return err; 1074 1075 err = xe_device_probe(xe); 1076 if (err) 1077 return err; 1078 1079 err = xe_pm_init(xe); 1080 if (err) 1081 goto err_driver_cleanup; 1082 1083 drm_dbg(&xe->drm, "d3cold: capable=%s\n", 1084 str_yes_no(xe->d3cold.capable)); 1085 1086 return 0; 1087 1088 err_driver_cleanup: 1089 xe_pci_remove(pdev); 1090 return err; 1091 } 1092 1093 static void xe_pci_shutdown(struct pci_dev *pdev) 1094 { 1095 xe_device_shutdown(pdev_to_xe_device(pdev)); 1096 } 1097 1098 #ifdef CONFIG_PM_SLEEP 1099 static void d3cold_toggle(struct pci_dev *pdev, enum toggle_d3cold toggle) 1100 { 1101 struct xe_device *xe = pdev_to_xe_device(pdev); 1102 struct pci_dev *root_pdev; 1103 1104 if (!xe->d3cold.capable) 1105 return; 1106 1107 root_pdev = pcie_find_root_port(pdev); 1108 if (!root_pdev) 1109 return; 1110 1111 switch (toggle) { 1112 case D3COLD_DISABLE: 1113 pci_d3cold_disable(root_pdev); 1114 break; 1115 case D3COLD_ENABLE: 1116 pci_d3cold_enable(root_pdev); 1117 break; 1118 } 1119 } 1120 1121 static int xe_pci_suspend(struct device *dev) 1122 { 1123 struct pci_dev *pdev = to_pci_dev(dev); 1124 struct xe_device *xe = pdev_to_xe_device(pdev); 1125 int err; 1126 1127 if (xe_survivability_mode_is_boot_enabled(xe)) 1128 return -EBUSY; 1129 1130 err = xe_pm_suspend(xe); 1131 if (err) 1132 return err; 1133 1134 /* 1135 * Enabling D3Cold is needed for S2Idle/S0ix. 1136 * It is save to allow here since xe_pm_suspend has evicted 1137 * the local memory and the direct complete optimization is disabled. 1138 */ 1139 d3cold_toggle(pdev, D3COLD_ENABLE); 1140 1141 pci_save_state(pdev); 1142 pci_disable_device(pdev); 1143 pci_set_power_state(pdev, PCI_D3cold); 1144 1145 return 0; 1146 } 1147 1148 static int xe_pci_resume(struct device *dev) 1149 { 1150 struct pci_dev *pdev = to_pci_dev(dev); 1151 int err; 1152 1153 /* Give back the D3Cold decision to the runtime P M*/ 1154 d3cold_toggle(pdev, D3COLD_DISABLE); 1155 1156 err = pci_set_power_state(pdev, PCI_D0); 1157 if (err) 1158 return err; 1159 1160 pci_restore_state(pdev); 1161 1162 err = pci_enable_device(pdev); 1163 if (err) 1164 return err; 1165 1166 pci_set_master(pdev); 1167 1168 err = xe_pm_resume(pdev_to_xe_device(pdev)); 1169 if (err) 1170 return err; 1171 1172 return 0; 1173 } 1174 1175 static int xe_pci_runtime_suspend(struct device *dev) 1176 { 1177 struct pci_dev *pdev = to_pci_dev(dev); 1178 struct xe_device *xe = pdev_to_xe_device(pdev); 1179 int err; 1180 1181 /* 1182 * We hold an additional reference to the runtime PM to keep PF in D0 1183 * during VFs lifetime, as our VFs do not implement the PM capability. 1184 * This means we should never be runtime suspending as long as VFs are 1185 * enabled. 1186 */ 1187 xe_assert(xe, !IS_SRIOV_VF(xe)); 1188 xe_assert(xe, !pci_num_vf(pdev)); 1189 1190 err = xe_pm_runtime_suspend(xe); 1191 if (err) 1192 return err; 1193 1194 pci_save_state(pdev); 1195 1196 if (xe->d3cold.allowed) { 1197 d3cold_toggle(pdev, D3COLD_ENABLE); 1198 pci_disable_device(pdev); 1199 pci_ignore_hotplug(pdev); 1200 pci_set_power_state(pdev, PCI_D3cold); 1201 } else { 1202 d3cold_toggle(pdev, D3COLD_DISABLE); 1203 pci_set_power_state(pdev, PCI_D3hot); 1204 } 1205 1206 return 0; 1207 } 1208 1209 static int xe_pci_runtime_resume(struct device *dev) 1210 { 1211 struct pci_dev *pdev = to_pci_dev(dev); 1212 struct xe_device *xe = pdev_to_xe_device(pdev); 1213 int err; 1214 1215 err = pci_set_power_state(pdev, PCI_D0); 1216 if (err) 1217 return err; 1218 1219 pci_restore_state(pdev); 1220 1221 if (xe->d3cold.allowed) { 1222 err = pci_enable_device(pdev); 1223 if (err) 1224 return err; 1225 1226 pci_set_master(pdev); 1227 } 1228 1229 return xe_pm_runtime_resume(xe); 1230 } 1231 1232 static int xe_pci_runtime_idle(struct device *dev) 1233 { 1234 struct pci_dev *pdev = to_pci_dev(dev); 1235 struct xe_device *xe = pdev_to_xe_device(pdev); 1236 1237 xe_pm_d3cold_allowed_toggle(xe); 1238 1239 return 0; 1240 } 1241 1242 static const struct dev_pm_ops xe_pm_ops = { 1243 SET_SYSTEM_SLEEP_PM_OPS(xe_pci_suspend, xe_pci_resume) 1244 SET_RUNTIME_PM_OPS(xe_pci_runtime_suspend, xe_pci_runtime_resume, xe_pci_runtime_idle) 1245 }; 1246 #endif 1247 1248 static struct pci_driver xe_pci_driver = { 1249 .name = DRIVER_NAME, 1250 .id_table = pciidlist, 1251 .probe = xe_pci_probe, 1252 .remove = xe_pci_remove, 1253 .shutdown = xe_pci_shutdown, 1254 .sriov_configure = xe_pci_sriov_configure, 1255 #ifdef CONFIG_PM_SLEEP 1256 .driver.pm = &xe_pm_ops, 1257 #endif 1258 }; 1259 1260 /** 1261 * xe_pci_to_pf_device() - Get PF &xe_device. 1262 * @pdev: the VF &pci_dev device 1263 * 1264 * Return: pointer to PF &xe_device, NULL otherwise. 1265 */ 1266 struct xe_device *xe_pci_to_pf_device(struct pci_dev *pdev) 1267 { 1268 struct drm_device *drm; 1269 1270 drm = pci_iov_get_pf_drvdata(pdev, &xe_pci_driver); 1271 if (IS_ERR(drm)) 1272 return NULL; 1273 1274 return to_xe_device(drm); 1275 } 1276 1277 int xe_register_pci_driver(void) 1278 { 1279 return pci_register_driver(&xe_pci_driver); 1280 } 1281 1282 void xe_unregister_pci_driver(void) 1283 { 1284 pci_unregister_driver(&xe_pci_driver); 1285 } 1286 1287 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 1288 #include "tests/xe_pci.c" 1289 #endif 1290