1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_device.h" 7 8 #include <linux/units.h> 9 10 #include <drm/drm_aperture.h> 11 #include <drm/drm_atomic_helper.h> 12 #include <drm/drm_gem_ttm_helper.h> 13 #include <drm/drm_ioctl.h> 14 #include <drm/drm_managed.h> 15 #include <drm/drm_print.h> 16 #include <drm/xe_drm.h> 17 18 #include "display/xe_display.h" 19 #include "regs/xe_gt_regs.h" 20 #include "regs/xe_regs.h" 21 #include "xe_bo.h" 22 #include "xe_debugfs.h" 23 #include "xe_dma_buf.h" 24 #include "xe_drm_client.h" 25 #include "xe_drv.h" 26 #include "xe_exec.h" 27 #include "xe_exec_queue.h" 28 #include "xe_ggtt.h" 29 #include "xe_gsc_proxy.h" 30 #include "xe_gt.h" 31 #include "xe_gt_mcr.h" 32 #include "xe_hwmon.h" 33 #include "xe_irq.h" 34 #include "xe_memirq.h" 35 #include "xe_mmio.h" 36 #include "xe_module.h" 37 #include "xe_pat.h" 38 #include "xe_pcode.h" 39 #include "xe_pm.h" 40 #include "xe_query.h" 41 #include "xe_sriov.h" 42 #include "xe_tile.h" 43 #include "xe_ttm_stolen_mgr.h" 44 #include "xe_ttm_sys_mgr.h" 45 #include "xe_vm.h" 46 #include "xe_wait_user_fence.h" 47 48 #ifdef CONFIG_LOCKDEP 49 struct lockdep_map xe_device_mem_access_lockdep_map = { 50 .name = "xe_device_mem_access_lockdep_map" 51 }; 52 #endif 53 54 static int xe_file_open(struct drm_device *dev, struct drm_file *file) 55 { 56 struct xe_device *xe = to_xe_device(dev); 57 struct xe_drm_client *client; 58 struct xe_file *xef; 59 int ret = -ENOMEM; 60 61 xef = kzalloc(sizeof(*xef), GFP_KERNEL); 62 if (!xef) 63 return ret; 64 65 client = xe_drm_client_alloc(); 66 if (!client) { 67 kfree(xef); 68 return ret; 69 } 70 71 xef->drm = file; 72 xef->client = client; 73 xef->xe = xe; 74 75 mutex_init(&xef->vm.lock); 76 xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1); 77 78 mutex_init(&xef->exec_queue.lock); 79 xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1); 80 81 spin_lock(&xe->clients.lock); 82 xe->clients.count++; 83 spin_unlock(&xe->clients.lock); 84 85 file->driver_priv = xef; 86 return 0; 87 } 88 89 static void xe_file_close(struct drm_device *dev, struct drm_file *file) 90 { 91 struct xe_device *xe = to_xe_device(dev); 92 struct xe_file *xef = file->driver_priv; 93 struct xe_vm *vm; 94 struct xe_exec_queue *q; 95 unsigned long idx; 96 97 mutex_lock(&xef->exec_queue.lock); 98 xa_for_each(&xef->exec_queue.xa, idx, q) { 99 xe_exec_queue_kill(q); 100 xe_exec_queue_put(q); 101 } 102 mutex_unlock(&xef->exec_queue.lock); 103 xa_destroy(&xef->exec_queue.xa); 104 mutex_destroy(&xef->exec_queue.lock); 105 mutex_lock(&xef->vm.lock); 106 xa_for_each(&xef->vm.xa, idx, vm) 107 xe_vm_close_and_put(vm); 108 mutex_unlock(&xef->vm.lock); 109 xa_destroy(&xef->vm.xa); 110 mutex_destroy(&xef->vm.lock); 111 112 spin_lock(&xe->clients.lock); 113 xe->clients.count--; 114 spin_unlock(&xe->clients.lock); 115 116 xe_drm_client_put(xef->client); 117 kfree(xef); 118 } 119 120 static const struct drm_ioctl_desc xe_ioctls[] = { 121 DRM_IOCTL_DEF_DRV(XE_DEVICE_QUERY, xe_query_ioctl, DRM_RENDER_ALLOW), 122 DRM_IOCTL_DEF_DRV(XE_GEM_CREATE, xe_gem_create_ioctl, DRM_RENDER_ALLOW), 123 DRM_IOCTL_DEF_DRV(XE_GEM_MMAP_OFFSET, xe_gem_mmap_offset_ioctl, 124 DRM_RENDER_ALLOW), 125 DRM_IOCTL_DEF_DRV(XE_VM_CREATE, xe_vm_create_ioctl, DRM_RENDER_ALLOW), 126 DRM_IOCTL_DEF_DRV(XE_VM_DESTROY, xe_vm_destroy_ioctl, DRM_RENDER_ALLOW), 127 DRM_IOCTL_DEF_DRV(XE_VM_BIND, xe_vm_bind_ioctl, DRM_RENDER_ALLOW), 128 DRM_IOCTL_DEF_DRV(XE_EXEC, xe_exec_ioctl, DRM_RENDER_ALLOW), 129 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_CREATE, xe_exec_queue_create_ioctl, 130 DRM_RENDER_ALLOW), 131 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_DESTROY, xe_exec_queue_destroy_ioctl, 132 DRM_RENDER_ALLOW), 133 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_GET_PROPERTY, xe_exec_queue_get_property_ioctl, 134 DRM_RENDER_ALLOW), 135 DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl, 136 DRM_RENDER_ALLOW), 137 }; 138 139 static const struct file_operations xe_driver_fops = { 140 .owner = THIS_MODULE, 141 .open = drm_open, 142 .release = drm_release_noglobal, 143 .unlocked_ioctl = drm_ioctl, 144 .mmap = drm_gem_mmap, 145 .poll = drm_poll, 146 .read = drm_read, 147 .compat_ioctl = drm_compat_ioctl, 148 .llseek = noop_llseek, 149 #ifdef CONFIG_PROC_FS 150 .show_fdinfo = drm_show_fdinfo, 151 #endif 152 }; 153 154 static void xe_driver_release(struct drm_device *dev) 155 { 156 struct xe_device *xe = to_xe_device(dev); 157 158 pci_set_drvdata(to_pci_dev(xe->drm.dev), NULL); 159 } 160 161 static struct drm_driver driver = { 162 /* Don't use MTRRs here; the Xserver or userspace app should 163 * deal with them for Intel hardware. 164 */ 165 .driver_features = 166 DRIVER_GEM | 167 DRIVER_RENDER | DRIVER_SYNCOBJ | 168 DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA, 169 .open = xe_file_open, 170 .postclose = xe_file_close, 171 172 .gem_prime_import = xe_gem_prime_import, 173 174 .dumb_create = xe_bo_dumb_create, 175 .dumb_map_offset = drm_gem_ttm_dumb_map_offset, 176 #ifdef CONFIG_PROC_FS 177 .show_fdinfo = xe_drm_client_fdinfo, 178 #endif 179 .release = &xe_driver_release, 180 181 .ioctls = xe_ioctls, 182 .num_ioctls = ARRAY_SIZE(xe_ioctls), 183 .fops = &xe_driver_fops, 184 .name = DRIVER_NAME, 185 .desc = DRIVER_DESC, 186 .date = DRIVER_DATE, 187 .major = DRIVER_MAJOR, 188 .minor = DRIVER_MINOR, 189 .patchlevel = DRIVER_PATCHLEVEL, 190 }; 191 192 static void xe_device_destroy(struct drm_device *dev, void *dummy) 193 { 194 struct xe_device *xe = to_xe_device(dev); 195 196 if (xe->ordered_wq) 197 destroy_workqueue(xe->ordered_wq); 198 199 if (xe->unordered_wq) 200 destroy_workqueue(xe->unordered_wq); 201 202 ttm_device_fini(&xe->ttm); 203 } 204 205 struct xe_device *xe_device_create(struct pci_dev *pdev, 206 const struct pci_device_id *ent) 207 { 208 struct xe_device *xe; 209 int err; 210 211 xe_display_driver_set_hooks(&driver); 212 213 err = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); 214 if (err) 215 return ERR_PTR(err); 216 217 xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm); 218 if (IS_ERR(xe)) 219 return xe; 220 221 err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev, 222 xe->drm.anon_inode->i_mapping, 223 xe->drm.vma_offset_manager, false, false); 224 if (WARN_ON(err)) 225 goto err; 226 227 err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL); 228 if (err) 229 goto err; 230 231 xe->info.devid = pdev->device; 232 xe->info.revid = pdev->revision; 233 xe->info.force_execlist = xe_modparam.force_execlist; 234 235 spin_lock_init(&xe->irq.lock); 236 spin_lock_init(&xe->clients.lock); 237 238 init_waitqueue_head(&xe->ufence_wq); 239 240 drmm_mutex_init(&xe->drm, &xe->usm.lock); 241 xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC); 242 243 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { 244 /* Trigger a large asid and an early asid wrap. */ 245 u32 asid; 246 247 BUILD_BUG_ON(XE_MAX_ASID < 2); 248 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, NULL, 249 XA_LIMIT(XE_MAX_ASID - 2, XE_MAX_ASID - 1), 250 &xe->usm.next_asid, GFP_KERNEL); 251 drm_WARN_ON(&xe->drm, err); 252 if (err >= 0) 253 xa_erase(&xe->usm.asid_to_vm, asid); 254 } 255 256 spin_lock_init(&xe->pinned.lock); 257 INIT_LIST_HEAD(&xe->pinned.kernel_bo_present); 258 INIT_LIST_HEAD(&xe->pinned.external_vram); 259 INIT_LIST_HEAD(&xe->pinned.evicted); 260 261 xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0); 262 xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0); 263 if (!xe->ordered_wq || !xe->unordered_wq) { 264 drm_err(&xe->drm, "Failed to allocate xe workqueues\n"); 265 err = -ENOMEM; 266 goto err; 267 } 268 269 err = xe_display_create(xe); 270 if (WARN_ON(err)) 271 goto err; 272 273 return xe; 274 275 err: 276 return ERR_PTR(err); 277 } 278 279 /* 280 * The driver-initiated FLR is the highest level of reset that we can trigger 281 * from within the driver. It is different from the PCI FLR in that it doesn't 282 * fully reset the SGUnit and doesn't modify the PCI config space and therefore 283 * it doesn't require a re-enumeration of the PCI BARs. However, the 284 * driver-initiated FLR does still cause a reset of both GT and display and a 285 * memory wipe of local and stolen memory, so recovery would require a full HW 286 * re-init and saving/restoring (or re-populating) the wiped memory. Since we 287 * perform the FLR as the very last action before releasing access to the HW 288 * during the driver release flow, we don't attempt recovery at all, because 289 * if/when a new instance of i915 is bound to the device it will do a full 290 * re-init anyway. 291 */ 292 static void xe_driver_flr(struct xe_device *xe) 293 { 294 const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */ 295 struct xe_gt *gt = xe_root_mmio_gt(xe); 296 int ret; 297 298 if (xe_mmio_read32(gt, GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) { 299 drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n"); 300 return; 301 } 302 303 drm_dbg(&xe->drm, "Triggering Driver-FLR\n"); 304 305 /* 306 * Make sure any pending FLR requests have cleared by waiting for the 307 * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS 308 * to make sure it's not still set from a prior attempt (it's a write to 309 * clear bit). 310 * Note that we should never be in a situation where a previous attempt 311 * is still pending (unless the HW is totally dead), but better to be 312 * safe in case something unexpected happens 313 */ 314 ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); 315 if (ret) { 316 drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret); 317 return; 318 } 319 xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS); 320 321 /* Trigger the actual Driver-FLR */ 322 xe_mmio_rmw32(gt, GU_CNTL, 0, DRIVERFLR); 323 324 /* Wait for hardware teardown to complete */ 325 ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); 326 if (ret) { 327 drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret); 328 return; 329 } 330 331 /* Wait for hardware/firmware re-init to complete */ 332 ret = xe_mmio_wait32(gt, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS, 333 flr_timeout, NULL, false); 334 if (ret) { 335 drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret); 336 return; 337 } 338 339 /* Clear sticky completion status */ 340 xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS); 341 } 342 343 static void xe_driver_flr_fini(struct drm_device *drm, void *arg) 344 { 345 struct xe_device *xe = arg; 346 347 if (xe->needs_flr_on_fini) 348 xe_driver_flr(xe); 349 } 350 351 static void xe_device_sanitize(struct drm_device *drm, void *arg) 352 { 353 struct xe_device *xe = arg; 354 struct xe_gt *gt; 355 u8 id; 356 357 for_each_gt(gt, xe, id) 358 xe_gt_sanitize(gt); 359 } 360 361 static int xe_set_dma_info(struct xe_device *xe) 362 { 363 unsigned int mask_size = xe->info.dma_mask_size; 364 int err; 365 366 dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev)); 367 368 err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); 369 if (err) 370 goto mask_err; 371 372 err = dma_set_coherent_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); 373 if (err) 374 goto mask_err; 375 376 return 0; 377 378 mask_err: 379 drm_err(&xe->drm, "Can't set DMA mask/consistent mask (%d)\n", err); 380 return err; 381 } 382 383 /* 384 * Initialize MMIO resources that don't require any knowledge about tile count. 385 */ 386 int xe_device_probe_early(struct xe_device *xe) 387 { 388 int err; 389 390 err = xe_mmio_init(xe); 391 if (err) 392 return err; 393 394 err = xe_mmio_root_tile_init(xe); 395 if (err) 396 return err; 397 398 return 0; 399 } 400 401 static int xe_device_set_has_flat_ccs(struct xe_device *xe) 402 { 403 u32 reg; 404 int err; 405 406 if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs) 407 return 0; 408 409 struct xe_gt *gt = xe_root_mmio_gt(xe); 410 411 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 412 if (err) 413 return err; 414 415 reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER); 416 xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE); 417 418 if (!xe->info.has_flat_ccs) 419 drm_dbg(&xe->drm, 420 "Flat CCS has been disabled in bios, May lead to performance impact"); 421 422 return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); 423 } 424 425 int xe_device_probe(struct xe_device *xe) 426 { 427 struct xe_tile *tile; 428 struct xe_gt *gt; 429 int err; 430 u8 last_gt; 431 u8 id; 432 433 xe_pat_init_early(xe); 434 435 err = xe_sriov_init(xe); 436 if (err) 437 return err; 438 439 xe->info.mem_region_mask = 1; 440 err = xe_display_init_nommio(xe); 441 if (err) 442 return err; 443 444 err = xe_set_dma_info(xe); 445 if (err) 446 return err; 447 448 xe_mmio_probe_tiles(xe); 449 450 xe_ttm_sys_mgr_init(xe); 451 452 for_each_gt(gt, xe, id) 453 xe_force_wake_init_gt(gt, gt_to_fw(gt)); 454 455 for_each_tile(tile, xe, id) { 456 err = xe_ggtt_init_early(tile->mem.ggtt); 457 if (err) 458 return err; 459 if (IS_SRIOV_VF(xe)) { 460 err = xe_memirq_init(&tile->sriov.vf.memirq); 461 if (err) 462 return err; 463 } 464 } 465 466 for_each_gt(gt, xe, id) { 467 err = xe_gt_init_hwconfig(gt); 468 if (err) 469 return err; 470 } 471 472 err = drmm_add_action_or_reset(&xe->drm, xe_driver_flr_fini, xe); 473 if (err) 474 return err; 475 476 for_each_gt(gt, xe, id) { 477 err = xe_pcode_probe(gt); 478 if (err) 479 return err; 480 } 481 482 err = xe_display_init_noirq(xe); 483 if (err) 484 return err; 485 486 err = xe_irq_install(xe); 487 if (err) 488 goto err; 489 490 for_each_gt(gt, xe, id) { 491 err = xe_gt_init_early(gt); 492 if (err) 493 goto err_irq_shutdown; 494 } 495 496 err = xe_device_set_has_flat_ccs(xe); 497 if (err) 498 goto err_irq_shutdown; 499 500 err = xe_mmio_probe_vram(xe); 501 if (err) 502 goto err_irq_shutdown; 503 504 for_each_tile(tile, xe, id) { 505 err = xe_tile_init_noalloc(tile); 506 if (err) 507 goto err_irq_shutdown; 508 } 509 510 /* Allocate and map stolen after potential VRAM resize */ 511 xe_ttm_stolen_mgr_init(xe); 512 513 /* 514 * Now that GT is initialized (TTM in particular), 515 * we can try to init display, and inherit the initial fb. 516 * This is the reason the first allocation needs to be done 517 * inside display. 518 */ 519 err = xe_display_init_noaccel(xe); 520 if (err) 521 goto err_irq_shutdown; 522 523 for_each_gt(gt, xe, id) { 524 last_gt = id; 525 526 err = xe_gt_init(gt); 527 if (err) 528 goto err_fini_gt; 529 } 530 531 xe_heci_gsc_init(xe); 532 533 err = xe_display_init(xe); 534 if (err) 535 goto err_fini_gt; 536 537 err = drm_dev_register(&xe->drm, 0); 538 if (err) 539 goto err_fini_display; 540 541 xe_display_register(xe); 542 543 xe_debugfs_register(xe); 544 545 xe_hwmon_register(xe); 546 547 err = drmm_add_action_or_reset(&xe->drm, xe_device_sanitize, xe); 548 if (err) 549 return err; 550 551 return 0; 552 553 err_fini_display: 554 xe_display_driver_remove(xe); 555 556 err_fini_gt: 557 for_each_gt(gt, xe, id) { 558 if (id < last_gt) 559 xe_gt_remove(gt); 560 else 561 break; 562 } 563 564 err_irq_shutdown: 565 xe_irq_shutdown(xe); 566 err: 567 xe_display_fini(xe); 568 return err; 569 } 570 571 static void xe_device_remove_display(struct xe_device *xe) 572 { 573 xe_display_unregister(xe); 574 575 drm_dev_unplug(&xe->drm); 576 xe_display_driver_remove(xe); 577 } 578 579 void xe_device_remove(struct xe_device *xe) 580 { 581 struct xe_gt *gt; 582 u8 id; 583 584 xe_device_remove_display(xe); 585 586 xe_display_fini(xe); 587 588 xe_heci_gsc_fini(xe); 589 590 for_each_gt(gt, xe, id) 591 xe_gt_remove(gt); 592 593 xe_irq_shutdown(xe); 594 } 595 596 void xe_device_shutdown(struct xe_device *xe) 597 { 598 } 599 600 void xe_device_wmb(struct xe_device *xe) 601 { 602 struct xe_gt *gt = xe_root_mmio_gt(xe); 603 604 wmb(); 605 if (IS_DGFX(xe)) 606 xe_mmio_write32(gt, SOFTWARE_FLAGS_SPR33, 0); 607 } 608 609 u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size) 610 { 611 return xe_device_has_flat_ccs(xe) ? 612 DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0; 613 } 614 615 bool xe_device_mem_access_ongoing(struct xe_device *xe) 616 { 617 if (xe_pm_read_callback_task(xe) != NULL) 618 return true; 619 620 return atomic_read(&xe->mem_access.ref); 621 } 622 623 void xe_device_assert_mem_access(struct xe_device *xe) 624 { 625 XE_WARN_ON(!xe_device_mem_access_ongoing(xe)); 626 } 627 628 bool xe_device_mem_access_get_if_ongoing(struct xe_device *xe) 629 { 630 bool active; 631 632 if (xe_pm_read_callback_task(xe) == current) 633 return true; 634 635 active = xe_pm_runtime_get_if_active(xe); 636 if (active) { 637 int ref = atomic_inc_return(&xe->mem_access.ref); 638 639 xe_assert(xe, ref != S32_MAX); 640 } 641 642 return active; 643 } 644 645 void xe_device_mem_access_get(struct xe_device *xe) 646 { 647 int ref; 648 649 /* 650 * This looks racy, but should be fine since the pm_callback_task only 651 * transitions from NULL -> current (and back to NULL again), during the 652 * runtime_resume() or runtime_suspend() callbacks, for which there can 653 * only be a single one running for our device. We only need to prevent 654 * recursively calling the runtime_get or runtime_put from those 655 * callbacks, as well as preventing triggering any access_ongoing 656 * asserts. 657 */ 658 if (xe_pm_read_callback_task(xe) == current) 659 return; 660 661 /* 662 * Since the resume here is synchronous it can be quite easy to deadlock 663 * if we are not careful. Also in practice it might be quite timing 664 * sensitive to ever see the 0 -> 1 transition with the callers locks 665 * held, so deadlocks might exist but are hard for lockdep to ever see. 666 * With this in mind, help lockdep learn about the potentially scary 667 * stuff that can happen inside the runtime_resume callback by acquiring 668 * a dummy lock (it doesn't protect anything and gets compiled out on 669 * non-debug builds). Lockdep then only needs to see the 670 * mem_access_lockdep_map -> runtime_resume callback once, and then can 671 * hopefully validate all the (callers_locks) -> mem_access_lockdep_map. 672 * For example if the (callers_locks) are ever grabbed in the 673 * runtime_resume callback, lockdep should give us a nice splat. 674 */ 675 lock_map_acquire(&xe_device_mem_access_lockdep_map); 676 lock_map_release(&xe_device_mem_access_lockdep_map); 677 678 xe_pm_runtime_get(xe); 679 ref = atomic_inc_return(&xe->mem_access.ref); 680 681 xe_assert(xe, ref != S32_MAX); 682 683 } 684 685 void xe_device_mem_access_put(struct xe_device *xe) 686 { 687 int ref; 688 689 if (xe_pm_read_callback_task(xe) == current) 690 return; 691 692 ref = atomic_dec_return(&xe->mem_access.ref); 693 xe_pm_runtime_put(xe); 694 695 xe_assert(xe, ref >= 0); 696 } 697 698 void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p) 699 { 700 struct xe_gt *gt; 701 u8 id; 702 703 drm_printf(p, "PCI ID: 0x%04x\n", xe->info.devid); 704 drm_printf(p, "PCI revision: 0x%02x\n", xe->info.revid); 705 706 for_each_gt(gt, xe, id) { 707 drm_printf(p, "GT id: %u\n", id); 708 drm_printf(p, "\tType: %s\n", 709 gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media"); 710 drm_printf(p, "\tIP ver: %u.%u.%u\n", 711 REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid), 712 REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid), 713 REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid)); 714 drm_printf(p, "\tCS reference clock: %u\n", gt->info.reference_clock); 715 } 716 } 717 718 u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address) 719 { 720 return sign_extend64(address, xe->info.va_bits - 1); 721 } 722 723 u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address) 724 { 725 return address & GENMASK_ULL(xe->info.va_bits - 1, 0); 726 } 727