1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_device.h" 7 8 #include <linux/units.h> 9 10 #include <drm/drm_aperture.h> 11 #include <drm/drm_atomic_helper.h> 12 #include <drm/drm_gem_ttm_helper.h> 13 #include <drm/drm_ioctl.h> 14 #include <drm/drm_managed.h> 15 #include <drm/drm_print.h> 16 #include <drm/xe_drm.h> 17 18 #include "display/xe_display.h" 19 #include "regs/xe_gt_regs.h" 20 #include "regs/xe_regs.h" 21 #include "xe_bo.h" 22 #include "xe_debugfs.h" 23 #include "xe_devcoredump.h" 24 #include "xe_dma_buf.h" 25 #include "xe_drm_client.h" 26 #include "xe_drv.h" 27 #include "xe_exec.h" 28 #include "xe_exec_queue.h" 29 #include "xe_ggtt.h" 30 #include "xe_gsc_proxy.h" 31 #include "xe_gt.h" 32 #include "xe_gt_mcr.h" 33 #include "xe_hwmon.h" 34 #include "xe_irq.h" 35 #include "xe_memirq.h" 36 #include "xe_mmio.h" 37 #include "xe_module.h" 38 #include "xe_pat.h" 39 #include "xe_pcode.h" 40 #include "xe_pm.h" 41 #include "xe_query.h" 42 #include "xe_sriov.h" 43 #include "xe_tile.h" 44 #include "xe_ttm_stolen_mgr.h" 45 #include "xe_ttm_sys_mgr.h" 46 #include "xe_vm.h" 47 #include "xe_wait_user_fence.h" 48 49 static int xe_file_open(struct drm_device *dev, struct drm_file *file) 50 { 51 struct xe_device *xe = to_xe_device(dev); 52 struct xe_drm_client *client; 53 struct xe_file *xef; 54 int ret = -ENOMEM; 55 56 xef = kzalloc(sizeof(*xef), GFP_KERNEL); 57 if (!xef) 58 return ret; 59 60 client = xe_drm_client_alloc(); 61 if (!client) { 62 kfree(xef); 63 return ret; 64 } 65 66 xef->drm = file; 67 xef->client = client; 68 xef->xe = xe; 69 70 mutex_init(&xef->vm.lock); 71 xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1); 72 73 mutex_init(&xef->exec_queue.lock); 74 xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1); 75 76 spin_lock(&xe->clients.lock); 77 xe->clients.count++; 78 spin_unlock(&xe->clients.lock); 79 80 file->driver_priv = xef; 81 return 0; 82 } 83 84 static void xe_file_close(struct drm_device *dev, struct drm_file *file) 85 { 86 struct xe_device *xe = to_xe_device(dev); 87 struct xe_file *xef = file->driver_priv; 88 struct xe_vm *vm; 89 struct xe_exec_queue *q; 90 unsigned long idx; 91 92 mutex_lock(&xef->exec_queue.lock); 93 xa_for_each(&xef->exec_queue.xa, idx, q) { 94 xe_exec_queue_kill(q); 95 xe_exec_queue_put(q); 96 } 97 mutex_unlock(&xef->exec_queue.lock); 98 xa_destroy(&xef->exec_queue.xa); 99 mutex_destroy(&xef->exec_queue.lock); 100 mutex_lock(&xef->vm.lock); 101 xa_for_each(&xef->vm.xa, idx, vm) 102 xe_vm_close_and_put(vm); 103 mutex_unlock(&xef->vm.lock); 104 xa_destroy(&xef->vm.xa); 105 mutex_destroy(&xef->vm.lock); 106 107 spin_lock(&xe->clients.lock); 108 xe->clients.count--; 109 spin_unlock(&xe->clients.lock); 110 111 xe_drm_client_put(xef->client); 112 kfree(xef); 113 } 114 115 static const struct drm_ioctl_desc xe_ioctls[] = { 116 DRM_IOCTL_DEF_DRV(XE_DEVICE_QUERY, xe_query_ioctl, DRM_RENDER_ALLOW), 117 DRM_IOCTL_DEF_DRV(XE_GEM_CREATE, xe_gem_create_ioctl, DRM_RENDER_ALLOW), 118 DRM_IOCTL_DEF_DRV(XE_GEM_MMAP_OFFSET, xe_gem_mmap_offset_ioctl, 119 DRM_RENDER_ALLOW), 120 DRM_IOCTL_DEF_DRV(XE_VM_CREATE, xe_vm_create_ioctl, DRM_RENDER_ALLOW), 121 DRM_IOCTL_DEF_DRV(XE_VM_DESTROY, xe_vm_destroy_ioctl, DRM_RENDER_ALLOW), 122 DRM_IOCTL_DEF_DRV(XE_VM_BIND, xe_vm_bind_ioctl, DRM_RENDER_ALLOW), 123 DRM_IOCTL_DEF_DRV(XE_EXEC, xe_exec_ioctl, DRM_RENDER_ALLOW), 124 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_CREATE, xe_exec_queue_create_ioctl, 125 DRM_RENDER_ALLOW), 126 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_DESTROY, xe_exec_queue_destroy_ioctl, 127 DRM_RENDER_ALLOW), 128 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_GET_PROPERTY, xe_exec_queue_get_property_ioctl, 129 DRM_RENDER_ALLOW), 130 DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl, 131 DRM_RENDER_ALLOW), 132 }; 133 134 static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 135 { 136 struct drm_file *file_priv = file->private_data; 137 struct xe_device *xe = to_xe_device(file_priv->minor->dev); 138 long ret; 139 140 ret = xe_pm_runtime_get_ioctl(xe); 141 if (ret >= 0) 142 ret = drm_ioctl(file, cmd, arg); 143 xe_pm_runtime_put(xe); 144 145 return ret; 146 } 147 148 #ifdef CONFIG_COMPAT 149 static long xe_drm_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 150 { 151 struct drm_file *file_priv = file->private_data; 152 struct xe_device *xe = to_xe_device(file_priv->minor->dev); 153 long ret; 154 155 ret = xe_pm_runtime_get_ioctl(xe); 156 if (ret >= 0) 157 ret = drm_compat_ioctl(file, cmd, arg); 158 xe_pm_runtime_put(xe); 159 160 return ret; 161 } 162 #else 163 /* similarly to drm_compat_ioctl, let's it be assigned to .compat_ioct unconditionally */ 164 #define xe_drm_compat_ioctl NULL 165 #endif 166 167 static const struct file_operations xe_driver_fops = { 168 .owner = THIS_MODULE, 169 .open = drm_open, 170 .release = drm_release_noglobal, 171 .unlocked_ioctl = xe_drm_ioctl, 172 .mmap = drm_gem_mmap, 173 .poll = drm_poll, 174 .read = drm_read, 175 .compat_ioctl = xe_drm_compat_ioctl, 176 .llseek = noop_llseek, 177 #ifdef CONFIG_PROC_FS 178 .show_fdinfo = drm_show_fdinfo, 179 #endif 180 }; 181 182 static void xe_driver_release(struct drm_device *dev) 183 { 184 struct xe_device *xe = to_xe_device(dev); 185 186 pci_set_drvdata(to_pci_dev(xe->drm.dev), NULL); 187 } 188 189 static struct drm_driver driver = { 190 /* Don't use MTRRs here; the Xserver or userspace app should 191 * deal with them for Intel hardware. 192 */ 193 .driver_features = 194 DRIVER_GEM | 195 DRIVER_RENDER | DRIVER_SYNCOBJ | 196 DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA, 197 .open = xe_file_open, 198 .postclose = xe_file_close, 199 200 .gem_prime_import = xe_gem_prime_import, 201 202 .dumb_create = xe_bo_dumb_create, 203 .dumb_map_offset = drm_gem_ttm_dumb_map_offset, 204 #ifdef CONFIG_PROC_FS 205 .show_fdinfo = xe_drm_client_fdinfo, 206 #endif 207 .release = &xe_driver_release, 208 209 .ioctls = xe_ioctls, 210 .num_ioctls = ARRAY_SIZE(xe_ioctls), 211 .fops = &xe_driver_fops, 212 .name = DRIVER_NAME, 213 .desc = DRIVER_DESC, 214 .date = DRIVER_DATE, 215 .major = DRIVER_MAJOR, 216 .minor = DRIVER_MINOR, 217 .patchlevel = DRIVER_PATCHLEVEL, 218 }; 219 220 static void xe_device_destroy(struct drm_device *dev, void *dummy) 221 { 222 struct xe_device *xe = to_xe_device(dev); 223 224 if (xe->preempt_fence_wq) 225 destroy_workqueue(xe->preempt_fence_wq); 226 227 if (xe->ordered_wq) 228 destroy_workqueue(xe->ordered_wq); 229 230 if (xe->unordered_wq) 231 destroy_workqueue(xe->unordered_wq); 232 233 ttm_device_fini(&xe->ttm); 234 } 235 236 struct xe_device *xe_device_create(struct pci_dev *pdev, 237 const struct pci_device_id *ent) 238 { 239 struct xe_device *xe; 240 int err; 241 242 xe_display_driver_set_hooks(&driver); 243 244 err = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); 245 if (err) 246 return ERR_PTR(err); 247 248 xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm); 249 if (IS_ERR(xe)) 250 return xe; 251 252 err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev, 253 xe->drm.anon_inode->i_mapping, 254 xe->drm.vma_offset_manager, false, false); 255 if (WARN_ON(err)) 256 goto err; 257 258 err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL); 259 if (err) 260 goto err; 261 262 xe->info.devid = pdev->device; 263 xe->info.revid = pdev->revision; 264 xe->info.force_execlist = xe_modparam.force_execlist; 265 266 spin_lock_init(&xe->irq.lock); 267 spin_lock_init(&xe->clients.lock); 268 269 init_waitqueue_head(&xe->ufence_wq); 270 271 drmm_mutex_init(&xe->drm, &xe->usm.lock); 272 xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC); 273 274 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { 275 /* Trigger a large asid and an early asid wrap. */ 276 u32 asid; 277 278 BUILD_BUG_ON(XE_MAX_ASID < 2); 279 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, NULL, 280 XA_LIMIT(XE_MAX_ASID - 2, XE_MAX_ASID - 1), 281 &xe->usm.next_asid, GFP_KERNEL); 282 drm_WARN_ON(&xe->drm, err); 283 if (err >= 0) 284 xa_erase(&xe->usm.asid_to_vm, asid); 285 } 286 287 spin_lock_init(&xe->pinned.lock); 288 INIT_LIST_HEAD(&xe->pinned.kernel_bo_present); 289 INIT_LIST_HEAD(&xe->pinned.external_vram); 290 INIT_LIST_HEAD(&xe->pinned.evicted); 291 292 xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", 0); 293 xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0); 294 xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0); 295 if (!xe->ordered_wq || !xe->unordered_wq || 296 !xe->preempt_fence_wq) { 297 /* 298 * Cleanup done in xe_device_destroy via 299 * drmm_add_action_or_reset register above 300 */ 301 drm_err(&xe->drm, "Failed to allocate xe workqueues\n"); 302 err = -ENOMEM; 303 goto err; 304 } 305 306 err = xe_display_create(xe); 307 if (WARN_ON(err)) 308 goto err; 309 310 return xe; 311 312 err: 313 return ERR_PTR(err); 314 } 315 316 /* 317 * The driver-initiated FLR is the highest level of reset that we can trigger 318 * from within the driver. It is different from the PCI FLR in that it doesn't 319 * fully reset the SGUnit and doesn't modify the PCI config space and therefore 320 * it doesn't require a re-enumeration of the PCI BARs. However, the 321 * driver-initiated FLR does still cause a reset of both GT and display and a 322 * memory wipe of local and stolen memory, so recovery would require a full HW 323 * re-init and saving/restoring (or re-populating) the wiped memory. Since we 324 * perform the FLR as the very last action before releasing access to the HW 325 * during the driver release flow, we don't attempt recovery at all, because 326 * if/when a new instance of i915 is bound to the device it will do a full 327 * re-init anyway. 328 */ 329 static void xe_driver_flr(struct xe_device *xe) 330 { 331 const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */ 332 struct xe_gt *gt = xe_root_mmio_gt(xe); 333 int ret; 334 335 if (xe_mmio_read32(gt, GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) { 336 drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n"); 337 return; 338 } 339 340 drm_dbg(&xe->drm, "Triggering Driver-FLR\n"); 341 342 /* 343 * Make sure any pending FLR requests have cleared by waiting for the 344 * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS 345 * to make sure it's not still set from a prior attempt (it's a write to 346 * clear bit). 347 * Note that we should never be in a situation where a previous attempt 348 * is still pending (unless the HW is totally dead), but better to be 349 * safe in case something unexpected happens 350 */ 351 ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); 352 if (ret) { 353 drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret); 354 return; 355 } 356 xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS); 357 358 /* Trigger the actual Driver-FLR */ 359 xe_mmio_rmw32(gt, GU_CNTL, 0, DRIVERFLR); 360 361 /* Wait for hardware teardown to complete */ 362 ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); 363 if (ret) { 364 drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret); 365 return; 366 } 367 368 /* Wait for hardware/firmware re-init to complete */ 369 ret = xe_mmio_wait32(gt, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS, 370 flr_timeout, NULL, false); 371 if (ret) { 372 drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret); 373 return; 374 } 375 376 /* Clear sticky completion status */ 377 xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS); 378 } 379 380 static void xe_driver_flr_fini(struct drm_device *drm, void *arg) 381 { 382 struct xe_device *xe = arg; 383 384 if (xe->needs_flr_on_fini) 385 xe_driver_flr(xe); 386 } 387 388 static void xe_device_sanitize(struct drm_device *drm, void *arg) 389 { 390 struct xe_device *xe = arg; 391 struct xe_gt *gt; 392 u8 id; 393 394 for_each_gt(gt, xe, id) 395 xe_gt_sanitize(gt); 396 } 397 398 static int xe_set_dma_info(struct xe_device *xe) 399 { 400 unsigned int mask_size = xe->info.dma_mask_size; 401 int err; 402 403 dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev)); 404 405 err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); 406 if (err) 407 goto mask_err; 408 409 err = dma_set_coherent_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); 410 if (err) 411 goto mask_err; 412 413 return 0; 414 415 mask_err: 416 drm_err(&xe->drm, "Can't set DMA mask/consistent mask (%d)\n", err); 417 return err; 418 } 419 420 static bool verify_lmem_ready(struct xe_gt *gt) 421 { 422 u32 val = xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT; 423 424 return !!val; 425 } 426 427 static int wait_for_lmem_ready(struct xe_device *xe) 428 { 429 struct xe_gt *gt = xe_root_mmio_gt(xe); 430 unsigned long timeout, start; 431 432 if (!IS_DGFX(xe)) 433 return 0; 434 435 if (IS_SRIOV_VF(xe)) 436 return 0; 437 438 if (verify_lmem_ready(gt)) 439 return 0; 440 441 drm_dbg(&xe->drm, "Waiting for lmem initialization\n"); 442 443 start = jiffies; 444 timeout = start + msecs_to_jiffies(60 * 1000); /* 60 sec! */ 445 446 do { 447 if (signal_pending(current)) 448 return -EINTR; 449 450 /* 451 * The boot firmware initializes local memory and 452 * assesses its health. If memory training fails, 453 * the punit will have been instructed to keep the GT powered 454 * down.we won't be able to communicate with it 455 * 456 * If the status check is done before punit updates the register, 457 * it can lead to the system being unusable. 458 * use a timeout and defer the probe to prevent this. 459 */ 460 if (time_after(jiffies, timeout)) { 461 drm_dbg(&xe->drm, "lmem not initialized by firmware\n"); 462 return -EPROBE_DEFER; 463 } 464 465 msleep(20); 466 467 } while (!verify_lmem_ready(gt)); 468 469 drm_dbg(&xe->drm, "lmem ready after %ums", 470 jiffies_to_msecs(jiffies - start)); 471 472 return 0; 473 } 474 475 /** 476 * xe_device_probe_early: Device early probe 477 * @xe: xe device instance 478 * 479 * Initialize MMIO resources that don't require any 480 * knowledge about tile count. Also initialize pcode and 481 * check vram initialization on root tile. 482 * 483 * Return: 0 on success, error code on failure 484 */ 485 int xe_device_probe_early(struct xe_device *xe) 486 { 487 int err; 488 489 err = xe_mmio_init(xe); 490 if (err) 491 return err; 492 493 xe_sriov_probe_early(xe); 494 495 err = xe_pcode_probe_early(xe); 496 if (err) 497 return err; 498 499 err = wait_for_lmem_ready(xe); 500 if (err) 501 return err; 502 503 return 0; 504 } 505 506 static int xe_device_set_has_flat_ccs(struct xe_device *xe) 507 { 508 u32 reg; 509 int err; 510 511 if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs) 512 return 0; 513 514 struct xe_gt *gt = xe_root_mmio_gt(xe); 515 516 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 517 if (err) 518 return err; 519 520 reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER); 521 xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE); 522 523 if (!xe->info.has_flat_ccs) 524 drm_dbg(&xe->drm, 525 "Flat CCS has been disabled in bios, May lead to performance impact"); 526 527 return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); 528 } 529 530 int xe_device_probe(struct xe_device *xe) 531 { 532 struct xe_tile *tile; 533 struct xe_gt *gt; 534 int err; 535 u8 last_gt; 536 u8 id; 537 538 xe_pat_init_early(xe); 539 540 err = xe_sriov_init(xe); 541 if (err) 542 return err; 543 544 xe->info.mem_region_mask = 1; 545 err = xe_display_init_nommio(xe); 546 if (err) 547 return err; 548 549 err = xe_set_dma_info(xe); 550 if (err) 551 return err; 552 553 xe_mmio_probe_tiles(xe); 554 555 xe_ttm_sys_mgr_init(xe); 556 557 for_each_gt(gt, xe, id) 558 xe_force_wake_init_gt(gt, gt_to_fw(gt)); 559 560 for_each_tile(tile, xe, id) { 561 err = xe_ggtt_init_early(tile->mem.ggtt); 562 if (err) 563 return err; 564 if (IS_SRIOV_VF(xe)) { 565 err = xe_memirq_init(&tile->sriov.vf.memirq); 566 if (err) 567 return err; 568 } 569 } 570 571 for_each_gt(gt, xe, id) { 572 err = xe_gt_init_hwconfig(gt); 573 if (err) 574 return err; 575 } 576 577 err = xe_devcoredump_init(xe); 578 if (err) 579 return err; 580 err = drmm_add_action_or_reset(&xe->drm, xe_driver_flr_fini, xe); 581 if (err) 582 return err; 583 584 for_each_gt(gt, xe, id) 585 xe_pcode_init(gt); 586 587 err = xe_display_init_noirq(xe); 588 if (err) 589 return err; 590 591 err = xe_irq_install(xe); 592 if (err) 593 goto err; 594 595 for_each_gt(gt, xe, id) { 596 err = xe_gt_init_early(gt); 597 if (err) 598 goto err_irq_shutdown; 599 } 600 601 err = xe_device_set_has_flat_ccs(xe); 602 if (err) 603 goto err_irq_shutdown; 604 605 err = xe_mmio_probe_vram(xe); 606 if (err) 607 goto err_irq_shutdown; 608 609 for_each_tile(tile, xe, id) { 610 err = xe_tile_init_noalloc(tile); 611 if (err) 612 goto err_irq_shutdown; 613 } 614 615 /* Allocate and map stolen after potential VRAM resize */ 616 xe_ttm_stolen_mgr_init(xe); 617 618 /* 619 * Now that GT is initialized (TTM in particular), 620 * we can try to init display, and inherit the initial fb. 621 * This is the reason the first allocation needs to be done 622 * inside display. 623 */ 624 err = xe_display_init_noaccel(xe); 625 if (err) 626 goto err_irq_shutdown; 627 628 for_each_gt(gt, xe, id) { 629 last_gt = id; 630 631 err = xe_gt_init(gt); 632 if (err) 633 goto err_fini_gt; 634 } 635 636 xe_heci_gsc_init(xe); 637 638 err = xe_display_init(xe); 639 if (err) 640 goto err_fini_gt; 641 642 err = drm_dev_register(&xe->drm, 0); 643 if (err) 644 goto err_fini_display; 645 646 xe_display_register(xe); 647 648 xe_debugfs_register(xe); 649 650 xe_hwmon_register(xe); 651 652 return drmm_add_action_or_reset(&xe->drm, xe_device_sanitize, xe); 653 654 err_fini_display: 655 xe_display_driver_remove(xe); 656 657 err_fini_gt: 658 for_each_gt(gt, xe, id) { 659 if (id < last_gt) 660 xe_gt_remove(gt); 661 else 662 break; 663 } 664 665 err_irq_shutdown: 666 xe_irq_shutdown(xe); 667 err: 668 xe_display_fini(xe); 669 return err; 670 } 671 672 static void xe_device_remove_display(struct xe_device *xe) 673 { 674 xe_display_unregister(xe); 675 676 drm_dev_unplug(&xe->drm); 677 xe_display_driver_remove(xe); 678 } 679 680 void xe_device_remove(struct xe_device *xe) 681 { 682 struct xe_gt *gt; 683 u8 id; 684 685 xe_device_remove_display(xe); 686 687 xe_display_fini(xe); 688 689 xe_heci_gsc_fini(xe); 690 691 for_each_gt(gt, xe, id) 692 xe_gt_remove(gt); 693 694 xe_irq_shutdown(xe); 695 } 696 697 void xe_device_shutdown(struct xe_device *xe) 698 { 699 } 700 701 void xe_device_wmb(struct xe_device *xe) 702 { 703 struct xe_gt *gt = xe_root_mmio_gt(xe); 704 705 wmb(); 706 if (IS_DGFX(xe)) 707 xe_mmio_write32(gt, SOFTWARE_FLAGS_SPR33, 0); 708 } 709 710 u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size) 711 { 712 return xe_device_has_flat_ccs(xe) ? 713 DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0; 714 } 715 716 /** 717 * xe_device_assert_mem_access - Inspect the current runtime_pm state. 718 * @xe: xe device instance 719 * 720 * To be used before any kind of memory access. It will splat a debug warning 721 * if the device is currently sleeping. But it doesn't guarantee in any way 722 * that the device is going to remain awake. Xe PM runtime get and put 723 * functions might be added to the outer bound of the memory access, while 724 * this check is intended for inner usage to splat some warning if the worst 725 * case has just happened. 726 */ 727 void xe_device_assert_mem_access(struct xe_device *xe) 728 { 729 xe_assert(xe, !xe_pm_runtime_suspended(xe)); 730 } 731 732 void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p) 733 { 734 struct xe_gt *gt; 735 u8 id; 736 737 drm_printf(p, "PCI ID: 0x%04x\n", xe->info.devid); 738 drm_printf(p, "PCI revision: 0x%02x\n", xe->info.revid); 739 740 for_each_gt(gt, xe, id) { 741 drm_printf(p, "GT id: %u\n", id); 742 drm_printf(p, "\tType: %s\n", 743 gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media"); 744 drm_printf(p, "\tIP ver: %u.%u.%u\n", 745 REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid), 746 REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid), 747 REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid)); 748 drm_printf(p, "\tCS reference clock: %u\n", gt->info.reference_clock); 749 } 750 } 751 752 u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address) 753 { 754 return sign_extend64(address, xe->info.va_bits - 1); 755 } 756 757 u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address) 758 { 759 return address & GENMASK_ULL(xe->info.va_bits - 1, 0); 760 } 761