1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_device.h" 7 8 #include <linux/units.h> 9 10 #include <drm/drm_aperture.h> 11 #include <drm/drm_atomic_helper.h> 12 #include <drm/drm_gem_ttm_helper.h> 13 #include <drm/drm_ioctl.h> 14 #include <drm/drm_managed.h> 15 #include <drm/drm_print.h> 16 #include <drm/xe_drm.h> 17 18 #include "regs/xe_gt_regs.h" 19 #include "regs/xe_regs.h" 20 #include "xe_bo.h" 21 #include "xe_debugfs.h" 22 #include "xe_display.h" 23 #include "xe_dma_buf.h" 24 #include "xe_drm_client.h" 25 #include "xe_drv.h" 26 #include "xe_exec_queue.h" 27 #include "xe_exec.h" 28 #include "xe_ggtt.h" 29 #include "xe_gt.h" 30 #include "xe_gt_mcr.h" 31 #include "xe_irq.h" 32 #include "xe_mmio.h" 33 #include "xe_module.h" 34 #include "xe_pat.h" 35 #include "xe_pcode.h" 36 #include "xe_pm.h" 37 #include "xe_query.h" 38 #include "xe_tile.h" 39 #include "xe_ttm_stolen_mgr.h" 40 #include "xe_ttm_sys_mgr.h" 41 #include "xe_vm.h" 42 #include "xe_wait_user_fence.h" 43 #include "xe_hwmon.h" 44 45 #ifdef CONFIG_LOCKDEP 46 struct lockdep_map xe_device_mem_access_lockdep_map = { 47 .name = "xe_device_mem_access_lockdep_map" 48 }; 49 #endif 50 51 static int xe_file_open(struct drm_device *dev, struct drm_file *file) 52 { 53 struct xe_device *xe = to_xe_device(dev); 54 struct xe_drm_client *client; 55 struct xe_file *xef; 56 int ret = -ENOMEM; 57 58 xef = kzalloc(sizeof(*xef), GFP_KERNEL); 59 if (!xef) 60 return ret; 61 62 client = xe_drm_client_alloc(); 63 if (!client) { 64 kfree(xef); 65 return ret; 66 } 67 68 xef->drm = file; 69 xef->client = client; 70 xef->xe = xe; 71 72 mutex_init(&xef->vm.lock); 73 xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1); 74 75 mutex_init(&xef->exec_queue.lock); 76 xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1); 77 78 spin_lock(&xe->clients.lock); 79 xe->clients.count++; 80 spin_unlock(&xe->clients.lock); 81 82 file->driver_priv = xef; 83 return 0; 84 } 85 86 static void device_kill_persistent_exec_queues(struct xe_device *xe, 87 struct xe_file *xef); 88 89 static void xe_file_close(struct drm_device *dev, struct drm_file *file) 90 { 91 struct xe_device *xe = to_xe_device(dev); 92 struct xe_file *xef = file->driver_priv; 93 struct xe_vm *vm; 94 struct xe_exec_queue *q; 95 unsigned long idx; 96 97 mutex_lock(&xef->exec_queue.lock); 98 xa_for_each(&xef->exec_queue.xa, idx, q) { 99 xe_exec_queue_kill(q); 100 xe_exec_queue_put(q); 101 } 102 mutex_unlock(&xef->exec_queue.lock); 103 xa_destroy(&xef->exec_queue.xa); 104 mutex_destroy(&xef->exec_queue.lock); 105 device_kill_persistent_exec_queues(xe, xef); 106 107 mutex_lock(&xef->vm.lock); 108 xa_for_each(&xef->vm.xa, idx, vm) 109 xe_vm_close_and_put(vm); 110 mutex_unlock(&xef->vm.lock); 111 xa_destroy(&xef->vm.xa); 112 mutex_destroy(&xef->vm.lock); 113 114 spin_lock(&xe->clients.lock); 115 xe->clients.count--; 116 spin_unlock(&xe->clients.lock); 117 118 xe_drm_client_put(xef->client); 119 kfree(xef); 120 } 121 122 static const struct drm_ioctl_desc xe_ioctls[] = { 123 DRM_IOCTL_DEF_DRV(XE_DEVICE_QUERY, xe_query_ioctl, DRM_RENDER_ALLOW), 124 DRM_IOCTL_DEF_DRV(XE_GEM_CREATE, xe_gem_create_ioctl, DRM_RENDER_ALLOW), 125 DRM_IOCTL_DEF_DRV(XE_GEM_MMAP_OFFSET, xe_gem_mmap_offset_ioctl, 126 DRM_RENDER_ALLOW), 127 DRM_IOCTL_DEF_DRV(XE_VM_CREATE, xe_vm_create_ioctl, DRM_RENDER_ALLOW), 128 DRM_IOCTL_DEF_DRV(XE_VM_DESTROY, xe_vm_destroy_ioctl, DRM_RENDER_ALLOW), 129 DRM_IOCTL_DEF_DRV(XE_VM_BIND, xe_vm_bind_ioctl, DRM_RENDER_ALLOW), 130 DRM_IOCTL_DEF_DRV(XE_EXEC, xe_exec_ioctl, DRM_RENDER_ALLOW), 131 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_CREATE, xe_exec_queue_create_ioctl, 132 DRM_RENDER_ALLOW), 133 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_DESTROY, xe_exec_queue_destroy_ioctl, 134 DRM_RENDER_ALLOW), 135 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_GET_PROPERTY, xe_exec_queue_get_property_ioctl, 136 DRM_RENDER_ALLOW), 137 DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl, 138 DRM_RENDER_ALLOW), 139 }; 140 141 static const struct file_operations xe_driver_fops = { 142 .owner = THIS_MODULE, 143 .open = drm_open, 144 .release = drm_release_noglobal, 145 .unlocked_ioctl = drm_ioctl, 146 .mmap = drm_gem_mmap, 147 .poll = drm_poll, 148 .read = drm_read, 149 .compat_ioctl = drm_compat_ioctl, 150 .llseek = noop_llseek, 151 #ifdef CONFIG_PROC_FS 152 .show_fdinfo = drm_show_fdinfo, 153 #endif 154 }; 155 156 static void xe_driver_release(struct drm_device *dev) 157 { 158 struct xe_device *xe = to_xe_device(dev); 159 160 pci_set_drvdata(to_pci_dev(xe->drm.dev), NULL); 161 } 162 163 static struct drm_driver driver = { 164 /* Don't use MTRRs here; the Xserver or userspace app should 165 * deal with them for Intel hardware. 166 */ 167 .driver_features = 168 DRIVER_GEM | 169 DRIVER_RENDER | DRIVER_SYNCOBJ | 170 DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA, 171 .open = xe_file_open, 172 .postclose = xe_file_close, 173 174 .gem_prime_import = xe_gem_prime_import, 175 176 .dumb_create = xe_bo_dumb_create, 177 .dumb_map_offset = drm_gem_ttm_dumb_map_offset, 178 #ifdef CONFIG_PROC_FS 179 .show_fdinfo = xe_drm_client_fdinfo, 180 #endif 181 .release = &xe_driver_release, 182 183 .ioctls = xe_ioctls, 184 .num_ioctls = ARRAY_SIZE(xe_ioctls), 185 .fops = &xe_driver_fops, 186 .name = DRIVER_NAME, 187 .desc = DRIVER_DESC, 188 .date = DRIVER_DATE, 189 .major = DRIVER_MAJOR, 190 .minor = DRIVER_MINOR, 191 .patchlevel = DRIVER_PATCHLEVEL, 192 }; 193 194 static void xe_device_destroy(struct drm_device *dev, void *dummy) 195 { 196 struct xe_device *xe = to_xe_device(dev); 197 198 if (xe->ordered_wq) 199 destroy_workqueue(xe->ordered_wq); 200 201 if (xe->unordered_wq) 202 destroy_workqueue(xe->unordered_wq); 203 204 ttm_device_fini(&xe->ttm); 205 } 206 207 struct xe_device *xe_device_create(struct pci_dev *pdev, 208 const struct pci_device_id *ent) 209 { 210 struct xe_device *xe; 211 int err; 212 213 xe_display_driver_set_hooks(&driver); 214 215 err = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); 216 if (err) 217 return ERR_PTR(err); 218 219 xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm); 220 if (IS_ERR(xe)) 221 return xe; 222 223 err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev, 224 xe->drm.anon_inode->i_mapping, 225 xe->drm.vma_offset_manager, false, false); 226 if (WARN_ON(err)) 227 goto err; 228 229 err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL); 230 if (err) 231 goto err; 232 233 xe->info.devid = pdev->device; 234 xe->info.revid = pdev->revision; 235 xe->info.force_execlist = xe_modparam.force_execlist; 236 237 spin_lock_init(&xe->irq.lock); 238 spin_lock_init(&xe->clients.lock); 239 240 init_waitqueue_head(&xe->ufence_wq); 241 242 drmm_mutex_init(&xe->drm, &xe->usm.lock); 243 xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC); 244 245 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { 246 /* Trigger a large asid and an early asid wrap. */ 247 u32 asid; 248 249 BUILD_BUG_ON(XE_MAX_ASID < 2); 250 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, NULL, 251 XA_LIMIT(XE_MAX_ASID - 2, XE_MAX_ASID - 1), 252 &xe->usm.next_asid, GFP_KERNEL); 253 drm_WARN_ON(&xe->drm, err); 254 if (err >= 0) 255 xa_erase(&xe->usm.asid_to_vm, asid); 256 } 257 258 drmm_mutex_init(&xe->drm, &xe->persistent_engines.lock); 259 INIT_LIST_HEAD(&xe->persistent_engines.list); 260 261 spin_lock_init(&xe->pinned.lock); 262 INIT_LIST_HEAD(&xe->pinned.kernel_bo_present); 263 INIT_LIST_HEAD(&xe->pinned.external_vram); 264 INIT_LIST_HEAD(&xe->pinned.evicted); 265 266 xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0); 267 xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0); 268 if (!xe->ordered_wq || !xe->unordered_wq) { 269 drm_err(&xe->drm, "Failed to allocate xe workqueues\n"); 270 err = -ENOMEM; 271 goto err; 272 } 273 274 err = xe_display_create(xe); 275 if (WARN_ON(err)) 276 goto err; 277 278 return xe; 279 280 err: 281 return ERR_PTR(err); 282 } 283 284 /* 285 * The driver-initiated FLR is the highest level of reset that we can trigger 286 * from within the driver. It is different from the PCI FLR in that it doesn't 287 * fully reset the SGUnit and doesn't modify the PCI config space and therefore 288 * it doesn't require a re-enumeration of the PCI BARs. However, the 289 * driver-initiated FLR does still cause a reset of both GT and display and a 290 * memory wipe of local and stolen memory, so recovery would require a full HW 291 * re-init and saving/restoring (or re-populating) the wiped memory. Since we 292 * perform the FLR as the very last action before releasing access to the HW 293 * during the driver release flow, we don't attempt recovery at all, because 294 * if/when a new instance of i915 is bound to the device it will do a full 295 * re-init anyway. 296 */ 297 static void xe_driver_flr(struct xe_device *xe) 298 { 299 const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */ 300 struct xe_gt *gt = xe_root_mmio_gt(xe); 301 int ret; 302 303 if (xe_mmio_read32(gt, GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) { 304 drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n"); 305 return; 306 } 307 308 drm_dbg(&xe->drm, "Triggering Driver-FLR\n"); 309 310 /* 311 * Make sure any pending FLR requests have cleared by waiting for the 312 * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS 313 * to make sure it's not still set from a prior attempt (it's a write to 314 * clear bit). 315 * Note that we should never be in a situation where a previous attempt 316 * is still pending (unless the HW is totally dead), but better to be 317 * safe in case something unexpected happens 318 */ 319 ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); 320 if (ret) { 321 drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret); 322 return; 323 } 324 xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS); 325 326 /* Trigger the actual Driver-FLR */ 327 xe_mmio_rmw32(gt, GU_CNTL, 0, DRIVERFLR); 328 329 /* Wait for hardware teardown to complete */ 330 ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false); 331 if (ret) { 332 drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret); 333 return; 334 } 335 336 /* Wait for hardware/firmware re-init to complete */ 337 ret = xe_mmio_wait32(gt, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS, 338 flr_timeout, NULL, false); 339 if (ret) { 340 drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret); 341 return; 342 } 343 344 /* Clear sticky completion status */ 345 xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS); 346 } 347 348 static void xe_driver_flr_fini(struct drm_device *drm, void *arg) 349 { 350 struct xe_device *xe = arg; 351 352 if (xe->needs_flr_on_fini) 353 xe_driver_flr(xe); 354 } 355 356 static void xe_device_sanitize(struct drm_device *drm, void *arg) 357 { 358 struct xe_device *xe = arg; 359 struct xe_gt *gt; 360 u8 id; 361 362 for_each_gt(gt, xe, id) 363 xe_gt_sanitize(gt); 364 } 365 366 static int xe_set_dma_info(struct xe_device *xe) 367 { 368 unsigned int mask_size = xe->info.dma_mask_size; 369 int err; 370 371 dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev)); 372 373 err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); 374 if (err) 375 goto mask_err; 376 377 err = dma_set_coherent_mask(xe->drm.dev, DMA_BIT_MASK(mask_size)); 378 if (err) 379 goto mask_err; 380 381 return 0; 382 383 mask_err: 384 drm_err(&xe->drm, "Can't set DMA mask/consistent mask (%d)\n", err); 385 return err; 386 } 387 388 /* 389 * Initialize MMIO resources that don't require any knowledge about tile count. 390 */ 391 int xe_device_probe_early(struct xe_device *xe) 392 { 393 int err; 394 395 err = xe_mmio_init(xe); 396 if (err) 397 return err; 398 399 err = xe_mmio_root_tile_init(xe); 400 if (err) 401 return err; 402 403 return 0; 404 } 405 406 static int xe_device_set_has_flat_ccs(struct xe_device *xe) 407 { 408 u32 reg; 409 int err; 410 411 if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs) 412 return 0; 413 414 struct xe_gt *gt = xe_root_mmio_gt(xe); 415 416 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 417 if (err) 418 return err; 419 420 reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER); 421 xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE); 422 423 if (!xe->info.has_flat_ccs) 424 drm_dbg(&xe->drm, 425 "Flat CCS has been disabled in bios, May lead to performance impact"); 426 427 return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); 428 } 429 430 int xe_device_probe(struct xe_device *xe) 431 { 432 struct xe_tile *tile; 433 struct xe_gt *gt; 434 int err; 435 u8 id; 436 437 xe_pat_init_early(xe); 438 439 xe->info.mem_region_mask = 1; 440 err = xe_display_init_nommio(xe); 441 if (err) 442 return err; 443 444 err = xe_set_dma_info(xe); 445 if (err) 446 return err; 447 448 xe_mmio_probe_tiles(xe); 449 450 xe_ttm_sys_mgr_init(xe); 451 452 for_each_gt(gt, xe, id) 453 xe_force_wake_init_gt(gt, gt_to_fw(gt)); 454 455 for_each_tile(tile, xe, id) { 456 err = xe_ggtt_init_early(tile->mem.ggtt); 457 if (err) 458 return err; 459 } 460 461 err = drmm_add_action_or_reset(&xe->drm, xe_driver_flr_fini, xe); 462 if (err) 463 return err; 464 465 for_each_gt(gt, xe, id) { 466 err = xe_pcode_probe(gt); 467 if (err) 468 return err; 469 } 470 471 err = xe_display_init_noirq(xe); 472 if (err) 473 return err; 474 475 err = xe_irq_install(xe); 476 if (err) 477 goto err; 478 479 for_each_gt(gt, xe, id) { 480 err = xe_gt_init_early(gt); 481 if (err) 482 goto err_irq_shutdown; 483 } 484 485 err = xe_device_set_has_flat_ccs(xe); 486 if (err) 487 goto err_irq_shutdown; 488 489 err = xe_mmio_probe_vram(xe); 490 if (err) 491 goto err_irq_shutdown; 492 493 for_each_tile(tile, xe, id) { 494 err = xe_tile_init_noalloc(tile); 495 if (err) 496 goto err_irq_shutdown; 497 } 498 499 /* Allocate and map stolen after potential VRAM resize */ 500 xe_ttm_stolen_mgr_init(xe); 501 502 /* 503 * Now that GT is initialized (TTM in particular), 504 * we can try to init display, and inherit the initial fb. 505 * This is the reason the first allocation needs to be done 506 * inside display. 507 */ 508 err = xe_display_init_noaccel(xe); 509 if (err) 510 goto err_irq_shutdown; 511 512 for_each_gt(gt, xe, id) { 513 err = xe_gt_init(gt); 514 if (err) 515 goto err_irq_shutdown; 516 } 517 518 xe_heci_gsc_init(xe); 519 520 err = xe_display_init(xe); 521 if (err) 522 goto err_irq_shutdown; 523 524 err = drm_dev_register(&xe->drm, 0); 525 if (err) 526 goto err_fini_display; 527 528 xe_display_register(xe); 529 530 xe_debugfs_register(xe); 531 532 xe_hwmon_register(xe); 533 534 err = drmm_add_action_or_reset(&xe->drm, xe_device_sanitize, xe); 535 if (err) 536 return err; 537 538 return 0; 539 540 err_fini_display: 541 xe_display_driver_remove(xe); 542 543 err_irq_shutdown: 544 xe_irq_shutdown(xe); 545 err: 546 xe_display_fini(xe); 547 return err; 548 } 549 550 static void xe_device_remove_display(struct xe_device *xe) 551 { 552 xe_display_unregister(xe); 553 554 drm_dev_unplug(&xe->drm); 555 xe_display_driver_remove(xe); 556 } 557 558 void xe_device_remove(struct xe_device *xe) 559 { 560 xe_device_remove_display(xe); 561 562 xe_display_fini(xe); 563 564 xe_heci_gsc_fini(xe); 565 566 xe_irq_shutdown(xe); 567 } 568 569 void xe_device_shutdown(struct xe_device *xe) 570 { 571 } 572 573 void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q) 574 { 575 mutex_lock(&xe->persistent_engines.lock); 576 list_add_tail(&q->persistent.link, &xe->persistent_engines.list); 577 mutex_unlock(&xe->persistent_engines.lock); 578 } 579 580 void xe_device_remove_persistent_exec_queues(struct xe_device *xe, 581 struct xe_exec_queue *q) 582 { 583 mutex_lock(&xe->persistent_engines.lock); 584 if (!list_empty(&q->persistent.link)) 585 list_del(&q->persistent.link); 586 mutex_unlock(&xe->persistent_engines.lock); 587 } 588 589 static void device_kill_persistent_exec_queues(struct xe_device *xe, 590 struct xe_file *xef) 591 { 592 struct xe_exec_queue *q, *next; 593 594 mutex_lock(&xe->persistent_engines.lock); 595 list_for_each_entry_safe(q, next, &xe->persistent_engines.list, 596 persistent.link) 597 if (q->persistent.xef == xef) { 598 xe_exec_queue_kill(q); 599 list_del_init(&q->persistent.link); 600 } 601 mutex_unlock(&xe->persistent_engines.lock); 602 } 603 604 void xe_device_wmb(struct xe_device *xe) 605 { 606 struct xe_gt *gt = xe_root_mmio_gt(xe); 607 608 wmb(); 609 if (IS_DGFX(xe)) 610 xe_mmio_write32(gt, SOFTWARE_FLAGS_SPR33, 0); 611 } 612 613 u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size) 614 { 615 return xe_device_has_flat_ccs(xe) ? 616 DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0; 617 } 618 619 bool xe_device_mem_access_ongoing(struct xe_device *xe) 620 { 621 if (xe_pm_read_callback_task(xe) != NULL) 622 return true; 623 624 return atomic_read(&xe->mem_access.ref); 625 } 626 627 void xe_device_assert_mem_access(struct xe_device *xe) 628 { 629 XE_WARN_ON(!xe_device_mem_access_ongoing(xe)); 630 } 631 632 bool xe_device_mem_access_get_if_ongoing(struct xe_device *xe) 633 { 634 bool active; 635 636 if (xe_pm_read_callback_task(xe) == current) 637 return true; 638 639 active = xe_pm_runtime_get_if_active(xe); 640 if (active) { 641 int ref = atomic_inc_return(&xe->mem_access.ref); 642 643 xe_assert(xe, ref != S32_MAX); 644 } 645 646 return active; 647 } 648 649 void xe_device_mem_access_get(struct xe_device *xe) 650 { 651 int ref; 652 653 /* 654 * This looks racy, but should be fine since the pm_callback_task only 655 * transitions from NULL -> current (and back to NULL again), during the 656 * runtime_resume() or runtime_suspend() callbacks, for which there can 657 * only be a single one running for our device. We only need to prevent 658 * recursively calling the runtime_get or runtime_put from those 659 * callbacks, as well as preventing triggering any access_ongoing 660 * asserts. 661 */ 662 if (xe_pm_read_callback_task(xe) == current) 663 return; 664 665 /* 666 * Since the resume here is synchronous it can be quite easy to deadlock 667 * if we are not careful. Also in practice it might be quite timing 668 * sensitive to ever see the 0 -> 1 transition with the callers locks 669 * held, so deadlocks might exist but are hard for lockdep to ever see. 670 * With this in mind, help lockdep learn about the potentially scary 671 * stuff that can happen inside the runtime_resume callback by acquiring 672 * a dummy lock (it doesn't protect anything and gets compiled out on 673 * non-debug builds). Lockdep then only needs to see the 674 * mem_access_lockdep_map -> runtime_resume callback once, and then can 675 * hopefully validate all the (callers_locks) -> mem_access_lockdep_map. 676 * For example if the (callers_locks) are ever grabbed in the 677 * runtime_resume callback, lockdep should give us a nice splat. 678 */ 679 lock_map_acquire(&xe_device_mem_access_lockdep_map); 680 lock_map_release(&xe_device_mem_access_lockdep_map); 681 682 xe_pm_runtime_get(xe); 683 ref = atomic_inc_return(&xe->mem_access.ref); 684 685 xe_assert(xe, ref != S32_MAX); 686 687 } 688 689 void xe_device_mem_access_put(struct xe_device *xe) 690 { 691 int ref; 692 693 if (xe_pm_read_callback_task(xe) == current) 694 return; 695 696 ref = atomic_dec_return(&xe->mem_access.ref); 697 xe_pm_runtime_put(xe); 698 699 xe_assert(xe, ref >= 0); 700 } 701