1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_pm.h" 7 8 #include <linux/fault-inject.h> 9 #include <linux/pm_runtime.h> 10 #include <linux/suspend.h> 11 12 #include <drm/drm_managed.h> 13 #include <drm/ttm/ttm_placement.h> 14 15 #include "display/xe_display.h" 16 #include "xe_bo.h" 17 #include "xe_bo_evict.h" 18 #include "xe_device.h" 19 #include "xe_device_sysfs.h" 20 #include "xe_ggtt.h" 21 #include "xe_gt.h" 22 #include "xe_guc.h" 23 #include "xe_irq.h" 24 #include "xe_pcode.h" 25 #include "xe_trace.h" 26 #include "xe_wa.h" 27 28 /** 29 * DOC: Xe Power Management 30 * 31 * Xe PM implements the main routines for both system level suspend states and 32 * for the opportunistic runtime suspend states. 33 * 34 * System Level Suspend (S-States) - In general this is OS initiated suspend 35 * driven by ACPI for achieving S0ix (a.k.a. S2idle, freeze), S3 (suspend to ram), 36 * S4 (disk). The main functions here are `xe_pm_suspend` and `xe_pm_resume`. They 37 * are the main point for the suspend to and resume from these states. 38 * 39 * PCI Device Suspend (D-States) - This is the opportunistic PCIe device low power 40 * state D3, controlled by the PCI subsystem and ACPI with the help from the 41 * runtime_pm infrastructure. 42 * PCI D3 is special and can mean D3hot, where Vcc power is on for keeping memory 43 * alive and quicker low latency resume or D3Cold where Vcc power is off for 44 * better power savings. 45 * The Vcc control of PCI hierarchy can only be controlled at the PCI root port 46 * level, while the device driver can be behind multiple bridges/switches and 47 * paired with other devices. For this reason, the PCI subsystem cannot perform 48 * the transition towards D3Cold. The lowest runtime PM possible from the PCI 49 * subsystem is D3hot. Then, if all these paired devices in the same root port 50 * are in D3hot, ACPI will assist here and run its own methods (_PR3 and _OFF) 51 * to perform the transition from D3hot to D3cold. Xe may disallow this 52 * transition by calling pci_d3cold_disable(root_pdev) before going to runtime 53 * suspend. It will be based on runtime conditions such as VRAM usage for a 54 * quick and low latency resume for instance. 55 * 56 * Runtime PM - This infrastructure provided by the Linux kernel allows the 57 * device drivers to indicate when the can be runtime suspended, so the device 58 * could be put at D3 (if supported), or allow deeper package sleep states 59 * (PC-states), and/or other low level power states. Xe PM component provides 60 * `xe_pm_runtime_suspend` and `xe_pm_runtime_resume` functions that PCI 61 * subsystem will call before transition to/from runtime suspend. 62 * 63 * Also, Xe PM provides get and put functions that Xe driver will use to 64 * indicate activity. In order to avoid locking complications with the memory 65 * management, whenever possible, these get and put functions needs to be called 66 * from the higher/outer levels. 67 * The main cases that need to be protected from the outer levels are: IOCTL, 68 * sysfs, debugfs, dma-buf sharing, GPU execution. 69 * 70 * This component is not responsible for GT idleness (RC6) nor GT frequency 71 * management (RPS). 72 */ 73 74 #ifdef CONFIG_LOCKDEP 75 static struct lockdep_map xe_pm_runtime_d3cold_map = { 76 .name = "xe_rpm_d3cold_map" 77 }; 78 79 static struct lockdep_map xe_pm_runtime_nod3cold_map = { 80 .name = "xe_rpm_nod3cold_map" 81 }; 82 #endif 83 84 /** 85 * xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context 86 * @xe: The xe device. 87 * 88 * Return: true if it is safe to runtime resume from reclaim context. 89 * false otherwise. 90 */ 91 bool xe_rpm_reclaim_safe(const struct xe_device *xe) 92 { 93 return !xe->d3cold.capable && !xe->info.has_sriov; 94 } 95 96 static void xe_rpm_lockmap_acquire(const struct xe_device *xe) 97 { 98 lock_map_acquire(xe_rpm_reclaim_safe(xe) ? 99 &xe_pm_runtime_nod3cold_map : 100 &xe_pm_runtime_d3cold_map); 101 } 102 103 static void xe_rpm_lockmap_release(const struct xe_device *xe) 104 { 105 lock_map_release(xe_rpm_reclaim_safe(xe) ? 106 &xe_pm_runtime_nod3cold_map : 107 &xe_pm_runtime_d3cold_map); 108 } 109 110 /** 111 * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle 112 * @xe: xe device instance 113 * 114 * Return: 0 on success 115 */ 116 int xe_pm_suspend(struct xe_device *xe) 117 { 118 struct xe_gt *gt; 119 u8 id; 120 int err; 121 122 drm_dbg(&xe->drm, "Suspending device\n"); 123 trace_xe_pm_suspend(xe, __builtin_return_address(0)); 124 125 for_each_gt(gt, xe, id) 126 xe_gt_suspend_prepare(gt); 127 128 xe_display_pm_suspend(xe); 129 130 /* FIXME: Super racey... */ 131 err = xe_bo_evict_all(xe); 132 if (err) 133 goto err; 134 135 for_each_gt(gt, xe, id) { 136 err = xe_gt_suspend(gt); 137 if (err) { 138 xe_display_pm_resume(xe); 139 goto err; 140 } 141 } 142 143 xe_irq_suspend(xe); 144 145 xe_display_pm_suspend_late(xe); 146 147 drm_dbg(&xe->drm, "Device suspended\n"); 148 return 0; 149 err: 150 drm_dbg(&xe->drm, "Device suspend failed %d\n", err); 151 return err; 152 } 153 154 /** 155 * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0 156 * @xe: xe device instance 157 * 158 * Return: 0 on success 159 */ 160 int xe_pm_resume(struct xe_device *xe) 161 { 162 struct xe_tile *tile; 163 struct xe_gt *gt; 164 u8 id; 165 int err; 166 167 drm_dbg(&xe->drm, "Resuming device\n"); 168 trace_xe_pm_resume(xe, __builtin_return_address(0)); 169 170 for_each_tile(tile, xe, id) 171 xe_wa_apply_tile_workarounds(tile); 172 173 err = xe_pcode_ready(xe, true); 174 if (err) 175 return err; 176 177 xe_display_pm_resume_early(xe); 178 179 /* 180 * This only restores pinned memory which is the memory required for the 181 * GT(s) to resume. 182 */ 183 err = xe_bo_restore_kernel(xe); 184 if (err) 185 goto err; 186 187 xe_irq_resume(xe); 188 189 for_each_gt(gt, xe, id) 190 xe_gt_resume(gt); 191 192 xe_display_pm_resume(xe); 193 194 err = xe_bo_restore_user(xe); 195 if (err) 196 goto err; 197 198 drm_dbg(&xe->drm, "Device resumed\n"); 199 return 0; 200 err: 201 drm_dbg(&xe->drm, "Device resume failed %d\n", err); 202 return err; 203 } 204 205 static bool xe_pm_pci_d3cold_capable(struct xe_device *xe) 206 { 207 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 208 struct pci_dev *root_pdev; 209 210 root_pdev = pcie_find_root_port(pdev); 211 if (!root_pdev) 212 return false; 213 214 /* D3Cold requires PME capability */ 215 if (!pci_pme_capable(root_pdev, PCI_D3cold)) { 216 drm_dbg(&xe->drm, "d3cold: PME# not supported\n"); 217 return false; 218 } 219 220 /* D3Cold requires _PR3 power resource */ 221 if (!pci_pr3_present(root_pdev)) { 222 drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n"); 223 return false; 224 } 225 226 return true; 227 } 228 229 static void xe_pm_runtime_init(struct xe_device *xe) 230 { 231 struct device *dev = xe->drm.dev; 232 233 /* 234 * Disable the system suspend direct complete optimization. 235 * We need to ensure that the regular device suspend/resume functions 236 * are called since our runtime_pm cannot guarantee local memory 237 * eviction for d3cold. 238 * TODO: Check HDA audio dependencies claimed by i915, and then enforce 239 * this option to integrated graphics as well. 240 */ 241 if (IS_DGFX(xe)) 242 dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE); 243 244 pm_runtime_use_autosuspend(dev); 245 pm_runtime_set_autosuspend_delay(dev, 1000); 246 pm_runtime_set_active(dev); 247 pm_runtime_allow(dev); 248 pm_runtime_mark_last_busy(dev); 249 pm_runtime_put(dev); 250 } 251 252 int xe_pm_init_early(struct xe_device *xe) 253 { 254 int err; 255 256 INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list); 257 258 err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock); 259 if (err) 260 return err; 261 262 err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock); 263 if (err) 264 return err; 265 266 return 0; 267 } 268 ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */ 269 270 /** 271 * xe_pm_init - Initialize Xe Power Management 272 * @xe: xe device instance 273 * 274 * This component is responsible for System and Device sleep states. 275 * 276 * Returns 0 for success, negative error code otherwise. 277 */ 278 int xe_pm_init(struct xe_device *xe) 279 { 280 int err; 281 282 /* For now suspend/resume is only allowed with GuC */ 283 if (!xe_device_uc_enabled(xe)) 284 return 0; 285 286 xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe); 287 288 if (xe->d3cold.capable) { 289 err = xe_device_sysfs_init(xe); 290 if (err) 291 return err; 292 293 err = xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD); 294 if (err) 295 return err; 296 } 297 298 xe_pm_runtime_init(xe); 299 300 return 0; 301 } 302 303 /** 304 * xe_pm_runtime_fini - Finalize Runtime PM 305 * @xe: xe device instance 306 */ 307 void xe_pm_runtime_fini(struct xe_device *xe) 308 { 309 struct device *dev = xe->drm.dev; 310 311 pm_runtime_get_sync(dev); 312 pm_runtime_forbid(dev); 313 } 314 315 static void xe_pm_write_callback_task(struct xe_device *xe, 316 struct task_struct *task) 317 { 318 WRITE_ONCE(xe->pm_callback_task, task); 319 320 /* 321 * Just in case it's somehow possible for our writes to be reordered to 322 * the extent that something else re-uses the task written in 323 * pm_callback_task. For example after returning from the callback, but 324 * before the reordered write that resets pm_callback_task back to NULL. 325 */ 326 smp_mb(); /* pairs with xe_pm_read_callback_task */ 327 } 328 329 struct task_struct *xe_pm_read_callback_task(struct xe_device *xe) 330 { 331 smp_mb(); /* pairs with xe_pm_write_callback_task */ 332 333 return READ_ONCE(xe->pm_callback_task); 334 } 335 336 /** 337 * xe_pm_runtime_suspended - Check if runtime_pm state is suspended 338 * @xe: xe device instance 339 * 340 * This does not provide any guarantee that the device is going to remain 341 * suspended as it might be racing with the runtime state transitions. 342 * It can be used only as a non-reliable assertion, to ensure that we are not in 343 * the sleep state while trying to access some memory for instance. 344 * 345 * Returns true if PCI device is suspended, false otherwise. 346 */ 347 bool xe_pm_runtime_suspended(struct xe_device *xe) 348 { 349 return pm_runtime_suspended(xe->drm.dev); 350 } 351 352 /** 353 * xe_pm_runtime_suspend - Prepare our device for D3hot/D3Cold 354 * @xe: xe device instance 355 * 356 * Returns 0 for success, negative error code otherwise. 357 */ 358 int xe_pm_runtime_suspend(struct xe_device *xe) 359 { 360 struct xe_bo *bo, *on; 361 struct xe_gt *gt; 362 u8 id; 363 int err = 0; 364 365 trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0)); 366 /* Disable access_ongoing asserts and prevent recursive pm calls */ 367 xe_pm_write_callback_task(xe, current); 368 369 /* 370 * The actual xe_pm_runtime_put() is always async underneath, so 371 * exactly where that is called should makes no difference to us. However 372 * we still need to be very careful with the locks that this callback 373 * acquires and the locks that are acquired and held by any callers of 374 * xe_runtime_pm_get(). We already have the matching annotation 375 * on that side, but we also need it here. For example lockdep should be 376 * able to tell us if the following scenario is in theory possible: 377 * 378 * CPU0 | CPU1 (kworker) 379 * lock(A) | 380 * | xe_pm_runtime_suspend() 381 * | lock(A) 382 * xe_pm_runtime_get() | 383 * 384 * This will clearly deadlock since rpm core needs to wait for 385 * xe_pm_runtime_suspend() to complete, but here we are holding lock(A) 386 * on CPU0 which prevents CPU1 making forward progress. With the 387 * annotation here and in xe_pm_runtime_get() lockdep will see 388 * the potential lock inversion and give us a nice splat. 389 */ 390 xe_rpm_lockmap_acquire(xe); 391 392 /* 393 * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify 394 * also checks and deletes bo entry from user fault list. 395 */ 396 mutex_lock(&xe->mem_access.vram_userfault.lock); 397 list_for_each_entry_safe(bo, on, 398 &xe->mem_access.vram_userfault.list, vram_userfault_link) 399 xe_bo_runtime_pm_release_mmap_offset(bo); 400 mutex_unlock(&xe->mem_access.vram_userfault.lock); 401 402 xe_display_pm_runtime_suspend(xe); 403 404 if (xe->d3cold.allowed) { 405 err = xe_bo_evict_all(xe); 406 if (err) 407 goto out; 408 } 409 410 for_each_gt(gt, xe, id) { 411 err = xe_gt_suspend(gt); 412 if (err) 413 goto out; 414 } 415 416 xe_irq_suspend(xe); 417 418 xe_display_pm_runtime_suspend_late(xe); 419 420 out: 421 if (err) 422 xe_display_pm_runtime_resume(xe); 423 xe_rpm_lockmap_release(xe); 424 xe_pm_write_callback_task(xe, NULL); 425 return err; 426 } 427 428 /** 429 * xe_pm_runtime_resume - Waking up from D3hot/D3Cold 430 * @xe: xe device instance 431 * 432 * Returns 0 for success, negative error code otherwise. 433 */ 434 int xe_pm_runtime_resume(struct xe_device *xe) 435 { 436 struct xe_gt *gt; 437 u8 id; 438 int err = 0; 439 440 trace_xe_pm_runtime_resume(xe, __builtin_return_address(0)); 441 /* Disable access_ongoing asserts and prevent recursive pm calls */ 442 xe_pm_write_callback_task(xe, current); 443 444 xe_rpm_lockmap_acquire(xe); 445 446 if (xe->d3cold.allowed) { 447 err = xe_pcode_ready(xe, true); 448 if (err) 449 goto out; 450 451 xe_display_pm_resume_early(xe); 452 453 /* 454 * This only restores pinned memory which is the memory 455 * required for the GT(s) to resume. 456 */ 457 err = xe_bo_restore_kernel(xe); 458 if (err) 459 goto out; 460 } 461 462 xe_irq_resume(xe); 463 464 for_each_gt(gt, xe, id) 465 xe_gt_resume(gt); 466 467 xe_display_pm_runtime_resume(xe); 468 469 if (xe->d3cold.allowed) { 470 err = xe_bo_restore_user(xe); 471 if (err) 472 goto out; 473 } 474 475 out: 476 xe_rpm_lockmap_release(xe); 477 xe_pm_write_callback_task(xe, NULL); 478 return err; 479 } 480 481 /* 482 * For places where resume is synchronous it can be quite easy to deadlock 483 * if we are not careful. Also in practice it might be quite timing 484 * sensitive to ever see the 0 -> 1 transition with the callers locks 485 * held, so deadlocks might exist but are hard for lockdep to ever see. 486 * With this in mind, help lockdep learn about the potentially scary 487 * stuff that can happen inside the runtime_resume callback by acquiring 488 * a dummy lock (it doesn't protect anything and gets compiled out on 489 * non-debug builds). Lockdep then only needs to see the 490 * xe_pm_runtime_xxx_map -> runtime_resume callback once, and then can 491 * hopefully validate all the (callers_locks) -> xe_pm_runtime_xxx_map. 492 * For example if the (callers_locks) are ever grabbed in the 493 * runtime_resume callback, lockdep should give us a nice splat. 494 */ 495 static void xe_rpm_might_enter_cb(const struct xe_device *xe) 496 { 497 xe_rpm_lockmap_acquire(xe); 498 xe_rpm_lockmap_release(xe); 499 } 500 501 /* 502 * Prime the lockdep maps for known locking orders that need to 503 * be supported but that may not always occur on all systems. 504 */ 505 static void xe_pm_runtime_lockdep_prime(void) 506 { 507 struct dma_resv lockdep_resv; 508 509 dma_resv_init(&lockdep_resv); 510 lock_map_acquire(&xe_pm_runtime_d3cold_map); 511 /* D3Cold takes the dma_resv locks to evict bos */ 512 dma_resv_lock(&lockdep_resv, NULL); 513 dma_resv_unlock(&lockdep_resv); 514 lock_map_release(&xe_pm_runtime_d3cold_map); 515 516 /* Shrinkers might like to wake up the device under reclaim. */ 517 fs_reclaim_acquire(GFP_KERNEL); 518 lock_map_acquire(&xe_pm_runtime_nod3cold_map); 519 lock_map_release(&xe_pm_runtime_nod3cold_map); 520 fs_reclaim_release(GFP_KERNEL); 521 } 522 523 /** 524 * xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously 525 * @xe: xe device instance 526 */ 527 void xe_pm_runtime_get(struct xe_device *xe) 528 { 529 trace_xe_pm_runtime_get(xe, __builtin_return_address(0)); 530 pm_runtime_get_noresume(xe->drm.dev); 531 532 if (xe_pm_read_callback_task(xe) == current) 533 return; 534 535 xe_rpm_might_enter_cb(xe); 536 pm_runtime_resume(xe->drm.dev); 537 } 538 539 /** 540 * xe_pm_runtime_put - Put the runtime_pm reference back and mark as idle 541 * @xe: xe device instance 542 */ 543 void xe_pm_runtime_put(struct xe_device *xe) 544 { 545 trace_xe_pm_runtime_put(xe, __builtin_return_address(0)); 546 if (xe_pm_read_callback_task(xe) == current) { 547 pm_runtime_put_noidle(xe->drm.dev); 548 } else { 549 pm_runtime_mark_last_busy(xe->drm.dev); 550 pm_runtime_put(xe->drm.dev); 551 } 552 } 553 554 /** 555 * xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl 556 * @xe: xe device instance 557 * 558 * Returns: Any number greater than or equal to 0 for success, negative error 559 * code otherwise. 560 */ 561 int xe_pm_runtime_get_ioctl(struct xe_device *xe) 562 { 563 trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0)); 564 if (WARN_ON(xe_pm_read_callback_task(xe) == current)) 565 return -ELOOP; 566 567 xe_rpm_might_enter_cb(xe); 568 return pm_runtime_get_sync(xe->drm.dev); 569 } 570 571 /** 572 * xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active 573 * @xe: xe device instance 574 * 575 * Return: True if device is awake (regardless the previous number of references) 576 * and a new reference was taken, false otherwise. 577 */ 578 bool xe_pm_runtime_get_if_active(struct xe_device *xe) 579 { 580 return pm_runtime_get_if_active(xe->drm.dev) > 0; 581 } 582 583 /** 584 * xe_pm_runtime_get_if_in_use - Get a new reference if device is active with previous ref taken 585 * @xe: xe device instance 586 * 587 * Return: True if device is awake, a previous reference had been already taken, 588 * and a new reference was now taken, false otherwise. 589 */ 590 bool xe_pm_runtime_get_if_in_use(struct xe_device *xe) 591 { 592 if (xe_pm_read_callback_task(xe) == current) { 593 /* The device is awake, grab the ref and move on */ 594 pm_runtime_get_noresume(xe->drm.dev); 595 return true; 596 } 597 598 return pm_runtime_get_if_in_use(xe->drm.dev) > 0; 599 } 600 601 /* 602 * Very unreliable! Should only be used to suppress the false positive case 603 * in the missing outer rpm protection warning. 604 */ 605 static bool xe_pm_suspending_or_resuming(struct xe_device *xe) 606 { 607 #ifdef CONFIG_PM 608 struct device *dev = xe->drm.dev; 609 610 return dev->power.runtime_status == RPM_SUSPENDING || 611 dev->power.runtime_status == RPM_RESUMING || 612 pm_suspend_target_state != PM_SUSPEND_ON; 613 #else 614 return false; 615 #endif 616 } 617 618 /** 619 * xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming 620 * @xe: xe device instance 621 * 622 * This function should be used in inner places where it is surely already 623 * protected by outer-bound callers of `xe_pm_runtime_get`. 624 * It will warn if not protected. 625 * The reference should be put back after this function regardless, since it 626 * will always bump the usage counter, regardless. 627 */ 628 void xe_pm_runtime_get_noresume(struct xe_device *xe) 629 { 630 bool ref; 631 632 ref = xe_pm_runtime_get_if_in_use(xe); 633 634 if (!ref) { 635 pm_runtime_get_noresume(xe->drm.dev); 636 drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe), 637 "Missing outer runtime PM protection\n"); 638 } 639 } 640 641 /** 642 * xe_pm_runtime_resume_and_get - Resume, then get a runtime_pm ref if awake. 643 * @xe: xe device instance 644 * 645 * Returns: True if device is awake and the reference was taken, false otherwise. 646 */ 647 bool xe_pm_runtime_resume_and_get(struct xe_device *xe) 648 { 649 if (xe_pm_read_callback_task(xe) == current) { 650 /* The device is awake, grab the ref and move on */ 651 pm_runtime_get_noresume(xe->drm.dev); 652 return true; 653 } 654 655 xe_rpm_might_enter_cb(xe); 656 return pm_runtime_resume_and_get(xe->drm.dev) >= 0; 657 } 658 659 /** 660 * xe_pm_assert_unbounded_bridge - Disable PM on unbounded pcie parent bridge 661 * @xe: xe device instance 662 */ 663 void xe_pm_assert_unbounded_bridge(struct xe_device *xe) 664 { 665 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 666 struct pci_dev *bridge = pci_upstream_bridge(pdev); 667 668 if (!bridge) 669 return; 670 671 if (!bridge->driver) { 672 drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n"); 673 device_set_pm_not_required(&pdev->dev); 674 } 675 } 676 677 /** 678 * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold 679 * @xe: xe device instance 680 * @threshold: VRAM size in bites for the D3cold threshold 681 * 682 * Returns 0 for success, negative error code otherwise. 683 */ 684 int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold) 685 { 686 struct ttm_resource_manager *man; 687 u32 vram_total_mb = 0; 688 int i; 689 690 for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) { 691 man = ttm_manager_type(&xe->ttm, i); 692 if (man) 693 vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024); 694 } 695 696 drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb); 697 698 if (threshold > vram_total_mb) 699 return -EINVAL; 700 701 mutex_lock(&xe->d3cold.lock); 702 xe->d3cold.vram_threshold = threshold; 703 mutex_unlock(&xe->d3cold.lock); 704 705 return 0; 706 } 707 708 /** 709 * xe_pm_d3cold_allowed_toggle - Check conditions to toggle d3cold.allowed 710 * @xe: xe device instance 711 * 712 * To be called during runtime_pm idle callback. 713 * Check for all the D3Cold conditions ahead of runtime suspend. 714 */ 715 void xe_pm_d3cold_allowed_toggle(struct xe_device *xe) 716 { 717 struct ttm_resource_manager *man; 718 u32 total_vram_used_mb = 0; 719 u64 vram_used; 720 int i; 721 722 if (!xe->d3cold.capable) { 723 xe->d3cold.allowed = false; 724 return; 725 } 726 727 for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) { 728 man = ttm_manager_type(&xe->ttm, i); 729 if (man) { 730 vram_used = ttm_resource_manager_usage(man); 731 total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024); 732 } 733 } 734 735 mutex_lock(&xe->d3cold.lock); 736 737 if (total_vram_used_mb < xe->d3cold.vram_threshold) 738 xe->d3cold.allowed = true; 739 else 740 xe->d3cold.allowed = false; 741 742 mutex_unlock(&xe->d3cold.lock); 743 } 744 745 /** 746 * xe_pm_module_init() - Perform xe_pm specific module initialization. 747 * 748 * Return: 0 on success. Currently doesn't fail. 749 */ 750 int __init xe_pm_module_init(void) 751 { 752 xe_pm_runtime_lockdep_prime(); 753 return 0; 754 } 755