1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "xe_pm.h"
7
8 #include <linux/fault-inject.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/suspend.h>
11
12 #include <drm/drm_managed.h>
13 #include <drm/ttm/ttm_placement.h>
14
15 #include "display/xe_display.h"
16 #include "xe_bo.h"
17 #include "xe_bo_evict.h"
18 #include "xe_device.h"
19 #include "xe_device_sysfs.h"
20 #include "xe_ggtt.h"
21 #include "xe_gt.h"
22 #include "xe_guc.h"
23 #include "xe_irq.h"
24 #include "xe_pcode.h"
25 #include "xe_pxp.h"
26 #include "xe_trace.h"
27 #include "xe_wa.h"
28
29 /**
30 * DOC: Xe Power Management
31 *
32 * Xe PM implements the main routines for both system level suspend states and
33 * for the opportunistic runtime suspend states.
34 *
35 * System Level Suspend (S-States) - In general this is OS initiated suspend
36 * driven by ACPI for achieving S0ix (a.k.a. S2idle, freeze), S3 (suspend to ram),
37 * S4 (disk). The main functions here are `xe_pm_suspend` and `xe_pm_resume`. They
38 * are the main point for the suspend to and resume from these states.
39 *
40 * PCI Device Suspend (D-States) - This is the opportunistic PCIe device low power
41 * state D3, controlled by the PCI subsystem and ACPI with the help from the
42 * runtime_pm infrastructure.
43 * PCI D3 is special and can mean D3hot, where Vcc power is on for keeping memory
44 * alive and quicker low latency resume or D3Cold where Vcc power is off for
45 * better power savings.
46 * The Vcc control of PCI hierarchy can only be controlled at the PCI root port
47 * level, while the device driver can be behind multiple bridges/switches and
48 * paired with other devices. For this reason, the PCI subsystem cannot perform
49 * the transition towards D3Cold. The lowest runtime PM possible from the PCI
50 * subsystem is D3hot. Then, if all these paired devices in the same root port
51 * are in D3hot, ACPI will assist here and run its own methods (_PR3 and _OFF)
52 * to perform the transition from D3hot to D3cold. Xe may disallow this
53 * transition by calling pci_d3cold_disable(root_pdev) before going to runtime
54 * suspend. It will be based on runtime conditions such as VRAM usage for a
55 * quick and low latency resume for instance.
56 *
57 * Runtime PM - This infrastructure provided by the Linux kernel allows the
58 * device drivers to indicate when the can be runtime suspended, so the device
59 * could be put at D3 (if supported), or allow deeper package sleep states
60 * (PC-states), and/or other low level power states. Xe PM component provides
61 * `xe_pm_runtime_suspend` and `xe_pm_runtime_resume` functions that PCI
62 * subsystem will call before transition to/from runtime suspend.
63 *
64 * Also, Xe PM provides get and put functions that Xe driver will use to
65 * indicate activity. In order to avoid locking complications with the memory
66 * management, whenever possible, these get and put functions needs to be called
67 * from the higher/outer levels.
68 * The main cases that need to be protected from the outer levels are: IOCTL,
69 * sysfs, debugfs, dma-buf sharing, GPU execution.
70 *
71 * This component is not responsible for GT idleness (RC6) nor GT frequency
72 * management (RPS).
73 */
74
75 #ifdef CONFIG_LOCKDEP
76 static struct lockdep_map xe_pm_runtime_d3cold_map = {
77 .name = "xe_rpm_d3cold_map"
78 };
79
80 static struct lockdep_map xe_pm_runtime_nod3cold_map = {
81 .name = "xe_rpm_nod3cold_map"
82 };
83 #endif
84
85 /**
86 * xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context
87 * @xe: The xe device.
88 *
89 * Return: true if it is safe to runtime resume from reclaim context.
90 * false otherwise.
91 */
xe_rpm_reclaim_safe(const struct xe_device * xe)92 bool xe_rpm_reclaim_safe(const struct xe_device *xe)
93 {
94 return !xe->d3cold.capable;
95 }
96
xe_rpm_lockmap_acquire(const struct xe_device * xe)97 static void xe_rpm_lockmap_acquire(const struct xe_device *xe)
98 {
99 lock_map_acquire(xe_rpm_reclaim_safe(xe) ?
100 &xe_pm_runtime_nod3cold_map :
101 &xe_pm_runtime_d3cold_map);
102 }
103
xe_rpm_lockmap_release(const struct xe_device * xe)104 static void xe_rpm_lockmap_release(const struct xe_device *xe)
105 {
106 lock_map_release(xe_rpm_reclaim_safe(xe) ?
107 &xe_pm_runtime_nod3cold_map :
108 &xe_pm_runtime_d3cold_map);
109 }
110
111 /**
112 * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
113 * @xe: xe device instance
114 *
115 * Return: 0 on success
116 */
xe_pm_suspend(struct xe_device * xe)117 int xe_pm_suspend(struct xe_device *xe)
118 {
119 struct xe_gt *gt;
120 u8 id;
121 int err;
122
123 drm_dbg(&xe->drm, "Suspending device\n");
124 trace_xe_pm_suspend(xe, __builtin_return_address(0));
125
126 err = xe_pxp_pm_suspend(xe->pxp);
127 if (err)
128 goto err;
129
130 for_each_gt(gt, xe, id)
131 xe_gt_suspend_prepare(gt);
132
133 xe_display_pm_suspend(xe);
134
135 /* FIXME: Super racey... */
136 err = xe_bo_evict_all(xe);
137 if (err)
138 goto err_pxp;
139
140 for_each_gt(gt, xe, id) {
141 err = xe_gt_suspend(gt);
142 if (err)
143 goto err_display;
144 }
145
146 xe_irq_suspend(xe);
147
148 xe_display_pm_suspend_late(xe);
149
150 drm_dbg(&xe->drm, "Device suspended\n");
151 return 0;
152
153 err_display:
154 xe_display_pm_resume(xe);
155 err_pxp:
156 xe_pxp_pm_resume(xe->pxp);
157 err:
158 drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
159 return err;
160 }
161
162 /**
163 * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0
164 * @xe: xe device instance
165 *
166 * Return: 0 on success
167 */
xe_pm_resume(struct xe_device * xe)168 int xe_pm_resume(struct xe_device *xe)
169 {
170 struct xe_tile *tile;
171 struct xe_gt *gt;
172 u8 id;
173 int err;
174
175 drm_dbg(&xe->drm, "Resuming device\n");
176 trace_xe_pm_resume(xe, __builtin_return_address(0));
177
178 for_each_tile(tile, xe, id)
179 xe_wa_apply_tile_workarounds(tile);
180
181 err = xe_pcode_ready(xe, true);
182 if (err)
183 return err;
184
185 xe_display_pm_resume_early(xe);
186
187 /*
188 * This only restores pinned memory which is the memory required for the
189 * GT(s) to resume.
190 */
191 err = xe_bo_restore_kernel(xe);
192 if (err)
193 goto err;
194
195 xe_irq_resume(xe);
196
197 for_each_gt(gt, xe, id)
198 xe_gt_resume(gt);
199
200 xe_display_pm_resume(xe);
201
202 err = xe_bo_restore_user(xe);
203 if (err)
204 goto err;
205
206 xe_pxp_pm_resume(xe->pxp);
207
208 drm_dbg(&xe->drm, "Device resumed\n");
209 return 0;
210 err:
211 drm_dbg(&xe->drm, "Device resume failed %d\n", err);
212 return err;
213 }
214
xe_pm_pci_d3cold_capable(struct xe_device * xe)215 static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
216 {
217 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
218 struct pci_dev *root_pdev;
219
220 root_pdev = pcie_find_root_port(pdev);
221 if (!root_pdev)
222 return false;
223
224 /* D3Cold requires PME capability */
225 if (!pci_pme_capable(root_pdev, PCI_D3cold)) {
226 drm_dbg(&xe->drm, "d3cold: PME# not supported\n");
227 return false;
228 }
229
230 /* D3Cold requires _PR3 power resource */
231 if (!pci_pr3_present(root_pdev)) {
232 drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n");
233 return false;
234 }
235
236 return true;
237 }
238
xe_pm_runtime_init(struct xe_device * xe)239 static void xe_pm_runtime_init(struct xe_device *xe)
240 {
241 struct device *dev = xe->drm.dev;
242
243 /*
244 * Disable the system suspend direct complete optimization.
245 * We need to ensure that the regular device suspend/resume functions
246 * are called since our runtime_pm cannot guarantee local memory
247 * eviction for d3cold.
248 * TODO: Check HDA audio dependencies claimed by i915, and then enforce
249 * this option to integrated graphics as well.
250 */
251 if (IS_DGFX(xe))
252 dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
253
254 pm_runtime_use_autosuspend(dev);
255 pm_runtime_set_autosuspend_delay(dev, 1000);
256 pm_runtime_set_active(dev);
257 pm_runtime_allow(dev);
258 pm_runtime_mark_last_busy(dev);
259 pm_runtime_put(dev);
260 }
261
xe_pm_init_early(struct xe_device * xe)262 int xe_pm_init_early(struct xe_device *xe)
263 {
264 int err;
265
266 INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list);
267
268 err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
269 if (err)
270 return err;
271
272 err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
273 if (err)
274 return err;
275
276 return 0;
277 }
278 ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
279
vram_threshold_value(struct xe_device * xe)280 static u32 vram_threshold_value(struct xe_device *xe)
281 {
282 /* FIXME: D3Cold temporarily disabled by default on BMG */
283 if (xe->info.platform == XE_BATTLEMAGE)
284 return 0;
285
286 return DEFAULT_VRAM_THRESHOLD;
287 }
288
289 /**
290 * xe_pm_init - Initialize Xe Power Management
291 * @xe: xe device instance
292 *
293 * This component is responsible for System and Device sleep states.
294 *
295 * Returns 0 for success, negative error code otherwise.
296 */
xe_pm_init(struct xe_device * xe)297 int xe_pm_init(struct xe_device *xe)
298 {
299 u32 vram_threshold;
300 int err;
301
302 /* For now suspend/resume is only allowed with GuC */
303 if (!xe_device_uc_enabled(xe))
304 return 0;
305
306 xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
307
308 if (xe->d3cold.capable) {
309 err = xe_device_sysfs_init(xe);
310 if (err)
311 return err;
312
313 vram_threshold = vram_threshold_value(xe);
314 err = xe_pm_set_vram_threshold(xe, vram_threshold);
315 if (err)
316 return err;
317 }
318
319 xe_pm_runtime_init(xe);
320
321 return 0;
322 }
323
324 /**
325 * xe_pm_runtime_fini - Finalize Runtime PM
326 * @xe: xe device instance
327 */
xe_pm_runtime_fini(struct xe_device * xe)328 void xe_pm_runtime_fini(struct xe_device *xe)
329 {
330 struct device *dev = xe->drm.dev;
331
332 pm_runtime_get_sync(dev);
333 pm_runtime_forbid(dev);
334 }
335
xe_pm_write_callback_task(struct xe_device * xe,struct task_struct * task)336 static void xe_pm_write_callback_task(struct xe_device *xe,
337 struct task_struct *task)
338 {
339 WRITE_ONCE(xe->pm_callback_task, task);
340
341 /*
342 * Just in case it's somehow possible for our writes to be reordered to
343 * the extent that something else re-uses the task written in
344 * pm_callback_task. For example after returning from the callback, but
345 * before the reordered write that resets pm_callback_task back to NULL.
346 */
347 smp_mb(); /* pairs with xe_pm_read_callback_task */
348 }
349
xe_pm_read_callback_task(struct xe_device * xe)350 struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
351 {
352 smp_mb(); /* pairs with xe_pm_write_callback_task */
353
354 return READ_ONCE(xe->pm_callback_task);
355 }
356
357 /**
358 * xe_pm_runtime_suspended - Check if runtime_pm state is suspended
359 * @xe: xe device instance
360 *
361 * This does not provide any guarantee that the device is going to remain
362 * suspended as it might be racing with the runtime state transitions.
363 * It can be used only as a non-reliable assertion, to ensure that we are not in
364 * the sleep state while trying to access some memory for instance.
365 *
366 * Returns true if PCI device is suspended, false otherwise.
367 */
xe_pm_runtime_suspended(struct xe_device * xe)368 bool xe_pm_runtime_suspended(struct xe_device *xe)
369 {
370 return pm_runtime_suspended(xe->drm.dev);
371 }
372
373 /**
374 * xe_pm_runtime_suspend - Prepare our device for D3hot/D3Cold
375 * @xe: xe device instance
376 *
377 * Returns 0 for success, negative error code otherwise.
378 */
xe_pm_runtime_suspend(struct xe_device * xe)379 int xe_pm_runtime_suspend(struct xe_device *xe)
380 {
381 struct xe_bo *bo, *on;
382 struct xe_gt *gt;
383 u8 id;
384 int err = 0;
385
386 trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0));
387 /* Disable access_ongoing asserts and prevent recursive pm calls */
388 xe_pm_write_callback_task(xe, current);
389
390 /*
391 * The actual xe_pm_runtime_put() is always async underneath, so
392 * exactly where that is called should makes no difference to us. However
393 * we still need to be very careful with the locks that this callback
394 * acquires and the locks that are acquired and held by any callers of
395 * xe_runtime_pm_get(). We already have the matching annotation
396 * on that side, but we also need it here. For example lockdep should be
397 * able to tell us if the following scenario is in theory possible:
398 *
399 * CPU0 | CPU1 (kworker)
400 * lock(A) |
401 * | xe_pm_runtime_suspend()
402 * | lock(A)
403 * xe_pm_runtime_get() |
404 *
405 * This will clearly deadlock since rpm core needs to wait for
406 * xe_pm_runtime_suspend() to complete, but here we are holding lock(A)
407 * on CPU0 which prevents CPU1 making forward progress. With the
408 * annotation here and in xe_pm_runtime_get() lockdep will see
409 * the potential lock inversion and give us a nice splat.
410 */
411 xe_rpm_lockmap_acquire(xe);
412
413 err = xe_pxp_pm_suspend(xe->pxp);
414 if (err)
415 goto out;
416
417 /*
418 * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
419 * also checks and deletes bo entry from user fault list.
420 */
421 mutex_lock(&xe->mem_access.vram_userfault.lock);
422 list_for_each_entry_safe(bo, on,
423 &xe->mem_access.vram_userfault.list, vram_userfault_link)
424 xe_bo_runtime_pm_release_mmap_offset(bo);
425 mutex_unlock(&xe->mem_access.vram_userfault.lock);
426
427 xe_display_pm_runtime_suspend(xe);
428
429 if (xe->d3cold.allowed) {
430 err = xe_bo_evict_all(xe);
431 if (err)
432 goto out_resume;
433 }
434
435 for_each_gt(gt, xe, id) {
436 err = xe_gt_suspend(gt);
437 if (err)
438 goto out_resume;
439 }
440
441 xe_irq_suspend(xe);
442
443 xe_display_pm_runtime_suspend_late(xe);
444
445 xe_rpm_lockmap_release(xe);
446 xe_pm_write_callback_task(xe, NULL);
447 return 0;
448
449 out_resume:
450 xe_display_pm_runtime_resume(xe);
451 xe_pxp_pm_resume(xe->pxp);
452 out:
453 xe_rpm_lockmap_release(xe);
454 xe_pm_write_callback_task(xe, NULL);
455 return err;
456 }
457
458 /**
459 * xe_pm_runtime_resume - Waking up from D3hot/D3Cold
460 * @xe: xe device instance
461 *
462 * Returns 0 for success, negative error code otherwise.
463 */
xe_pm_runtime_resume(struct xe_device * xe)464 int xe_pm_runtime_resume(struct xe_device *xe)
465 {
466 struct xe_gt *gt;
467 u8 id;
468 int err = 0;
469
470 trace_xe_pm_runtime_resume(xe, __builtin_return_address(0));
471 /* Disable access_ongoing asserts and prevent recursive pm calls */
472 xe_pm_write_callback_task(xe, current);
473
474 xe_rpm_lockmap_acquire(xe);
475
476 if (xe->d3cold.allowed) {
477 err = xe_pcode_ready(xe, true);
478 if (err)
479 goto out;
480
481 xe_display_pm_resume_early(xe);
482
483 /*
484 * This only restores pinned memory which is the memory
485 * required for the GT(s) to resume.
486 */
487 err = xe_bo_restore_kernel(xe);
488 if (err)
489 goto out;
490 }
491
492 xe_irq_resume(xe);
493
494 for_each_gt(gt, xe, id)
495 xe_gt_resume(gt);
496
497 xe_display_pm_runtime_resume(xe);
498
499 if (xe->d3cold.allowed) {
500 err = xe_bo_restore_user(xe);
501 if (err)
502 goto out;
503 }
504
505 xe_pxp_pm_resume(xe->pxp);
506
507 out:
508 xe_rpm_lockmap_release(xe);
509 xe_pm_write_callback_task(xe, NULL);
510 return err;
511 }
512
513 /*
514 * For places where resume is synchronous it can be quite easy to deadlock
515 * if we are not careful. Also in practice it might be quite timing
516 * sensitive to ever see the 0 -> 1 transition with the callers locks
517 * held, so deadlocks might exist but are hard for lockdep to ever see.
518 * With this in mind, help lockdep learn about the potentially scary
519 * stuff that can happen inside the runtime_resume callback by acquiring
520 * a dummy lock (it doesn't protect anything and gets compiled out on
521 * non-debug builds). Lockdep then only needs to see the
522 * xe_pm_runtime_xxx_map -> runtime_resume callback once, and then can
523 * hopefully validate all the (callers_locks) -> xe_pm_runtime_xxx_map.
524 * For example if the (callers_locks) are ever grabbed in the
525 * runtime_resume callback, lockdep should give us a nice splat.
526 */
xe_rpm_might_enter_cb(const struct xe_device * xe)527 static void xe_rpm_might_enter_cb(const struct xe_device *xe)
528 {
529 xe_rpm_lockmap_acquire(xe);
530 xe_rpm_lockmap_release(xe);
531 }
532
533 /*
534 * Prime the lockdep maps for known locking orders that need to
535 * be supported but that may not always occur on all systems.
536 */
xe_pm_runtime_lockdep_prime(void)537 static void xe_pm_runtime_lockdep_prime(void)
538 {
539 struct dma_resv lockdep_resv;
540
541 dma_resv_init(&lockdep_resv);
542 lock_map_acquire(&xe_pm_runtime_d3cold_map);
543 /* D3Cold takes the dma_resv locks to evict bos */
544 dma_resv_lock(&lockdep_resv, NULL);
545 dma_resv_unlock(&lockdep_resv);
546 lock_map_release(&xe_pm_runtime_d3cold_map);
547
548 /* Shrinkers might like to wake up the device under reclaim. */
549 fs_reclaim_acquire(GFP_KERNEL);
550 lock_map_acquire(&xe_pm_runtime_nod3cold_map);
551 lock_map_release(&xe_pm_runtime_nod3cold_map);
552 fs_reclaim_release(GFP_KERNEL);
553 }
554
555 /**
556 * xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
557 * @xe: xe device instance
558 */
xe_pm_runtime_get(struct xe_device * xe)559 void xe_pm_runtime_get(struct xe_device *xe)
560 {
561 trace_xe_pm_runtime_get(xe, __builtin_return_address(0));
562 pm_runtime_get_noresume(xe->drm.dev);
563
564 if (xe_pm_read_callback_task(xe) == current)
565 return;
566
567 xe_rpm_might_enter_cb(xe);
568 pm_runtime_resume(xe->drm.dev);
569 }
570
571 /**
572 * xe_pm_runtime_put - Put the runtime_pm reference back and mark as idle
573 * @xe: xe device instance
574 */
xe_pm_runtime_put(struct xe_device * xe)575 void xe_pm_runtime_put(struct xe_device *xe)
576 {
577 trace_xe_pm_runtime_put(xe, __builtin_return_address(0));
578 if (xe_pm_read_callback_task(xe) == current) {
579 pm_runtime_put_noidle(xe->drm.dev);
580 } else {
581 pm_runtime_mark_last_busy(xe->drm.dev);
582 pm_runtime_put(xe->drm.dev);
583 }
584 }
585
586 /**
587 * xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
588 * @xe: xe device instance
589 *
590 * Returns: Any number greater than or equal to 0 for success, negative error
591 * code otherwise.
592 */
xe_pm_runtime_get_ioctl(struct xe_device * xe)593 int xe_pm_runtime_get_ioctl(struct xe_device *xe)
594 {
595 trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0));
596 if (WARN_ON(xe_pm_read_callback_task(xe) == current))
597 return -ELOOP;
598
599 xe_rpm_might_enter_cb(xe);
600 return pm_runtime_get_sync(xe->drm.dev);
601 }
602
603 /**
604 * xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active
605 * @xe: xe device instance
606 *
607 * Return: True if device is awake (regardless the previous number of references)
608 * and a new reference was taken, false otherwise.
609 */
xe_pm_runtime_get_if_active(struct xe_device * xe)610 bool xe_pm_runtime_get_if_active(struct xe_device *xe)
611 {
612 return pm_runtime_get_if_active(xe->drm.dev) > 0;
613 }
614
615 /**
616 * xe_pm_runtime_get_if_in_use - Get a new reference if device is active with previous ref taken
617 * @xe: xe device instance
618 *
619 * Return: True if device is awake, a previous reference had been already taken,
620 * and a new reference was now taken, false otherwise.
621 */
xe_pm_runtime_get_if_in_use(struct xe_device * xe)622 bool xe_pm_runtime_get_if_in_use(struct xe_device *xe)
623 {
624 if (xe_pm_read_callback_task(xe) == current) {
625 /* The device is awake, grab the ref and move on */
626 pm_runtime_get_noresume(xe->drm.dev);
627 return true;
628 }
629
630 return pm_runtime_get_if_in_use(xe->drm.dev) > 0;
631 }
632
633 /*
634 * Very unreliable! Should only be used to suppress the false positive case
635 * in the missing outer rpm protection warning.
636 */
xe_pm_suspending_or_resuming(struct xe_device * xe)637 static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
638 {
639 #ifdef CONFIG_PM
640 struct device *dev = xe->drm.dev;
641
642 return dev->power.runtime_status == RPM_SUSPENDING ||
643 dev->power.runtime_status == RPM_RESUMING ||
644 pm_suspend_target_state != PM_SUSPEND_ON;
645 #else
646 return false;
647 #endif
648 }
649
650 /**
651 * xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming
652 * @xe: xe device instance
653 *
654 * This function should be used in inner places where it is surely already
655 * protected by outer-bound callers of `xe_pm_runtime_get`.
656 * It will warn if not protected.
657 * The reference should be put back after this function regardless, since it
658 * will always bump the usage counter, regardless.
659 */
xe_pm_runtime_get_noresume(struct xe_device * xe)660 void xe_pm_runtime_get_noresume(struct xe_device *xe)
661 {
662 bool ref;
663
664 ref = xe_pm_runtime_get_if_in_use(xe);
665
666 if (!ref) {
667 pm_runtime_get_noresume(xe->drm.dev);
668 drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe),
669 "Missing outer runtime PM protection\n");
670 }
671 }
672
673 /**
674 * xe_pm_runtime_resume_and_get - Resume, then get a runtime_pm ref if awake.
675 * @xe: xe device instance
676 *
677 * Returns: True if device is awake and the reference was taken, false otherwise.
678 */
xe_pm_runtime_resume_and_get(struct xe_device * xe)679 bool xe_pm_runtime_resume_and_get(struct xe_device *xe)
680 {
681 if (xe_pm_read_callback_task(xe) == current) {
682 /* The device is awake, grab the ref and move on */
683 pm_runtime_get_noresume(xe->drm.dev);
684 return true;
685 }
686
687 xe_rpm_might_enter_cb(xe);
688 return pm_runtime_resume_and_get(xe->drm.dev) >= 0;
689 }
690
691 /**
692 * xe_pm_assert_unbounded_bridge - Disable PM on unbounded pcie parent bridge
693 * @xe: xe device instance
694 */
xe_pm_assert_unbounded_bridge(struct xe_device * xe)695 void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
696 {
697 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
698 struct pci_dev *bridge = pci_upstream_bridge(pdev);
699
700 if (!bridge)
701 return;
702
703 if (!bridge->driver) {
704 drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n");
705 device_set_pm_not_required(&pdev->dev);
706 }
707 }
708
709 /**
710 * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold
711 * @xe: xe device instance
712 * @threshold: VRAM size in bites for the D3cold threshold
713 *
714 * Returns 0 for success, negative error code otherwise.
715 */
xe_pm_set_vram_threshold(struct xe_device * xe,u32 threshold)716 int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
717 {
718 struct ttm_resource_manager *man;
719 u32 vram_total_mb = 0;
720 int i;
721
722 for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
723 man = ttm_manager_type(&xe->ttm, i);
724 if (man)
725 vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024);
726 }
727
728 drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb);
729
730 if (threshold > vram_total_mb)
731 return -EINVAL;
732
733 mutex_lock(&xe->d3cold.lock);
734 xe->d3cold.vram_threshold = threshold;
735 mutex_unlock(&xe->d3cold.lock);
736
737 return 0;
738 }
739
740 /**
741 * xe_pm_d3cold_allowed_toggle - Check conditions to toggle d3cold.allowed
742 * @xe: xe device instance
743 *
744 * To be called during runtime_pm idle callback.
745 * Check for all the D3Cold conditions ahead of runtime suspend.
746 */
xe_pm_d3cold_allowed_toggle(struct xe_device * xe)747 void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
748 {
749 struct ttm_resource_manager *man;
750 u32 total_vram_used_mb = 0;
751 u64 vram_used;
752 int i;
753
754 if (!xe->d3cold.capable) {
755 xe->d3cold.allowed = false;
756 return;
757 }
758
759 for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
760 man = ttm_manager_type(&xe->ttm, i);
761 if (man) {
762 vram_used = ttm_resource_manager_usage(man);
763 total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024);
764 }
765 }
766
767 mutex_lock(&xe->d3cold.lock);
768
769 if (total_vram_used_mb < xe->d3cold.vram_threshold)
770 xe->d3cold.allowed = true;
771 else
772 xe->d3cold.allowed = false;
773
774 mutex_unlock(&xe->d3cold.lock);
775 }
776
777 /**
778 * xe_pm_module_init() - Perform xe_pm specific module initialization.
779 *
780 * Return: 0 on success. Currently doesn't fail.
781 */
xe_pm_module_init(void)782 int __init xe_pm_module_init(void)
783 {
784 xe_pm_runtime_lockdep_prime();
785 return 0;
786 }
787