xref: /linux/drivers/gpu/drm/xe/xe_pm.c (revision a4871e6201c46c8e1d04308265b4b4c5753c8209)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_pm.h"
7 
8 #include <linux/fault-inject.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/suspend.h>
11 
12 #include <drm/drm_managed.h>
13 #include <drm/ttm/ttm_placement.h>
14 
15 #include "display/xe_display.h"
16 #include "xe_bo.h"
17 #include "xe_bo_evict.h"
18 #include "xe_device.h"
19 #include "xe_device_sysfs.h"
20 #include "xe_ggtt.h"
21 #include "xe_gt.h"
22 #include "xe_guc.h"
23 #include "xe_irq.h"
24 #include "xe_pcode.h"
25 #include "xe_pxp.h"
26 #include "xe_trace.h"
27 #include "xe_wa.h"
28 
29 /**
30  * DOC: Xe Power Management
31  *
32  * Xe PM implements the main routines for both system level suspend states and
33  * for the opportunistic runtime suspend states.
34  *
35  * System Level Suspend (S-States) - In general this is OS initiated suspend
36  * driven by ACPI for achieving S0ix (a.k.a. S2idle, freeze), S3 (suspend to ram),
37  * S4 (disk). The main functions here are `xe_pm_suspend` and `xe_pm_resume`. They
38  * are the main point for the suspend to and resume from these states.
39  *
40  * PCI Device Suspend (D-States) - This is the opportunistic PCIe device low power
41  * state D3, controlled by the PCI subsystem and ACPI with the help from the
42  * runtime_pm infrastructure.
43  * PCI D3 is special and can mean D3hot, where Vcc power is on for keeping memory
44  * alive and quicker low latency resume or D3Cold where Vcc power is off for
45  * better power savings.
46  * The Vcc control of PCI hierarchy can only be controlled at the PCI root port
47  * level, while the device driver can be behind multiple bridges/switches and
48  * paired with other devices. For this reason, the PCI subsystem cannot perform
49  * the transition towards D3Cold. The lowest runtime PM possible from the PCI
50  * subsystem is D3hot. Then, if all these paired devices in the same root port
51  * are in D3hot, ACPI will assist here and run its own methods (_PR3 and _OFF)
52  * to perform the transition from D3hot to D3cold. Xe may disallow this
53  * transition by calling pci_d3cold_disable(root_pdev) before going to runtime
54  * suspend. It will be based on runtime conditions such as VRAM usage for a
55  * quick and low latency resume for instance.
56  *
57  * Runtime PM - This infrastructure provided by the Linux kernel allows the
58  * device drivers to indicate when the can be runtime suspended, so the device
59  * could be put at D3 (if supported), or allow deeper package sleep states
60  * (PC-states), and/or other low level power states. Xe PM component provides
61  * `xe_pm_runtime_suspend` and `xe_pm_runtime_resume` functions that PCI
62  * subsystem will call before transition to/from runtime suspend.
63  *
64  * Also, Xe PM provides get and put functions that Xe driver will use to
65  * indicate activity. In order to avoid locking complications with the memory
66  * management, whenever possible, these get and put functions needs to be called
67  * from the higher/outer levels.
68  * The main cases that need to be protected from the outer levels are: IOCTL,
69  * sysfs, debugfs, dma-buf sharing, GPU execution.
70  *
71  * This component is not responsible for GT idleness (RC6) nor GT frequency
72  * management (RPS).
73  */
74 
75 #ifdef CONFIG_LOCKDEP
76 static struct lockdep_map xe_pm_runtime_d3cold_map = {
77 	.name = "xe_rpm_d3cold_map"
78 };
79 
80 static struct lockdep_map xe_pm_runtime_nod3cold_map = {
81 	.name = "xe_rpm_nod3cold_map"
82 };
83 #endif
84 
85 /**
86  * xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context
87  * @xe: The xe device.
88  *
89  * Return: true if it is safe to runtime resume from reclaim context.
90  * false otherwise.
91  */
92 bool xe_rpm_reclaim_safe(const struct xe_device *xe)
93 {
94 	return !xe->d3cold.capable;
95 }
96 
97 static void xe_rpm_lockmap_acquire(const struct xe_device *xe)
98 {
99 	lock_map_acquire(xe_rpm_reclaim_safe(xe) ?
100 			 &xe_pm_runtime_nod3cold_map :
101 			 &xe_pm_runtime_d3cold_map);
102 }
103 
104 static void xe_rpm_lockmap_release(const struct xe_device *xe)
105 {
106 	lock_map_release(xe_rpm_reclaim_safe(xe) ?
107 			 &xe_pm_runtime_nod3cold_map :
108 			 &xe_pm_runtime_d3cold_map);
109 }
110 
111 /**
112  * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
113  * @xe: xe device instance
114  *
115  * Return: 0 on success
116  */
117 int xe_pm_suspend(struct xe_device *xe)
118 {
119 	struct xe_gt *gt;
120 	u8 id;
121 	int err;
122 
123 	drm_dbg(&xe->drm, "Suspending device\n");
124 	trace_xe_pm_suspend(xe, __builtin_return_address(0));
125 
126 	err = xe_pxp_pm_suspend(xe->pxp);
127 	if (err)
128 		goto err;
129 
130 	for_each_gt(gt, xe, id)
131 		xe_gt_suspend_prepare(gt);
132 
133 	xe_display_pm_suspend(xe);
134 
135 	/* FIXME: Super racey... */
136 	err = xe_bo_evict_all(xe);
137 	if (err)
138 		goto err_pxp;
139 
140 	for_each_gt(gt, xe, id) {
141 		err = xe_gt_suspend(gt);
142 		if (err)
143 			goto err_display;
144 	}
145 
146 	xe_irq_suspend(xe);
147 
148 	xe_display_pm_suspend_late(xe);
149 
150 	drm_dbg(&xe->drm, "Device suspended\n");
151 	return 0;
152 
153 err_display:
154 	xe_display_pm_resume(xe);
155 err_pxp:
156 	xe_pxp_pm_resume(xe->pxp);
157 err:
158 	drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
159 	return err;
160 }
161 
162 /**
163  * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0
164  * @xe: xe device instance
165  *
166  * Return: 0 on success
167  */
168 int xe_pm_resume(struct xe_device *xe)
169 {
170 	struct xe_tile *tile;
171 	struct xe_gt *gt;
172 	u8 id;
173 	int err;
174 
175 	drm_dbg(&xe->drm, "Resuming device\n");
176 	trace_xe_pm_resume(xe, __builtin_return_address(0));
177 
178 	for_each_tile(tile, xe, id)
179 		xe_wa_apply_tile_workarounds(tile);
180 
181 	err = xe_pcode_ready(xe, true);
182 	if (err)
183 		return err;
184 
185 	xe_display_pm_resume_early(xe);
186 
187 	/*
188 	 * This only restores pinned memory which is the memory required for the
189 	 * GT(s) to resume.
190 	 */
191 	err = xe_bo_restore_early(xe);
192 	if (err)
193 		goto err;
194 
195 	xe_irq_resume(xe);
196 
197 	for_each_gt(gt, xe, id)
198 		xe_gt_resume(gt);
199 
200 	xe_display_pm_resume(xe);
201 
202 	err = xe_bo_restore_late(xe);
203 	if (err)
204 		goto err;
205 
206 	xe_pxp_pm_resume(xe->pxp);
207 
208 	drm_dbg(&xe->drm, "Device resumed\n");
209 	return 0;
210 err:
211 	drm_dbg(&xe->drm, "Device resume failed %d\n", err);
212 	return err;
213 }
214 
215 static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
216 {
217 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
218 	struct pci_dev *root_pdev;
219 
220 	root_pdev = pcie_find_root_port(pdev);
221 	if (!root_pdev)
222 		return false;
223 
224 	/* D3Cold requires PME capability */
225 	if (!pci_pme_capable(root_pdev, PCI_D3cold)) {
226 		drm_dbg(&xe->drm, "d3cold: PME# not supported\n");
227 		return false;
228 	}
229 
230 	/* D3Cold requires _PR3 power resource */
231 	if (!pci_pr3_present(root_pdev)) {
232 		drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n");
233 		return false;
234 	}
235 
236 	return true;
237 }
238 
239 static void xe_pm_runtime_init(struct xe_device *xe)
240 {
241 	struct device *dev = xe->drm.dev;
242 
243 	/*
244 	 * Disable the system suspend direct complete optimization.
245 	 * We need to ensure that the regular device suspend/resume functions
246 	 * are called since our runtime_pm cannot guarantee local memory
247 	 * eviction for d3cold.
248 	 * TODO: Check HDA audio dependencies claimed by i915, and then enforce
249 	 *       this option to integrated graphics as well.
250 	 */
251 	if (IS_DGFX(xe))
252 		dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
253 
254 	pm_runtime_use_autosuspend(dev);
255 	pm_runtime_set_autosuspend_delay(dev, 1000);
256 	pm_runtime_set_active(dev);
257 	pm_runtime_allow(dev);
258 	pm_runtime_mark_last_busy(dev);
259 	pm_runtime_put(dev);
260 }
261 
262 int xe_pm_init_early(struct xe_device *xe)
263 {
264 	int err;
265 
266 	INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list);
267 
268 	err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
269 	if (err)
270 		return err;
271 
272 	err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
273 	if (err)
274 		return err;
275 
276 	return 0;
277 }
278 ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
279 
280 static u32 vram_threshold_value(struct xe_device *xe)
281 {
282 	/* FIXME: D3Cold temporarily disabled by default on BMG */
283 	if (xe->info.platform == XE_BATTLEMAGE)
284 		return 0;
285 
286 	return DEFAULT_VRAM_THRESHOLD;
287 }
288 
289 static int xe_pm_notifier_callback(struct notifier_block *nb,
290 				   unsigned long action, void *data)
291 {
292 	struct xe_device *xe = container_of(nb, struct xe_device, pm_notifier);
293 	int err = 0;
294 
295 	switch (action) {
296 	case PM_HIBERNATION_PREPARE:
297 	case PM_SUSPEND_PREPARE:
298 		xe_pm_runtime_get(xe);
299 		err = xe_bo_evict_all_user(xe);
300 		if (err) {
301 			drm_dbg(&xe->drm, "Notifier evict user failed (%d)\n", err);
302 			xe_pm_runtime_put(xe);
303 			break;
304 		}
305 
306 		err = xe_bo_notifier_prepare_all_pinned(xe);
307 		if (err) {
308 			drm_dbg(&xe->drm, "Notifier prepare pin failed (%d)\n", err);
309 			xe_pm_runtime_put(xe);
310 		}
311 		break;
312 	case PM_POST_HIBERNATION:
313 	case PM_POST_SUSPEND:
314 		xe_bo_notifier_unprepare_all_pinned(xe);
315 		xe_pm_runtime_put(xe);
316 		break;
317 	}
318 
319 	if (err)
320 		return NOTIFY_BAD;
321 
322 	return NOTIFY_DONE;
323 }
324 
325 /**
326  * xe_pm_init - Initialize Xe Power Management
327  * @xe: xe device instance
328  *
329  * This component is responsible for System and Device sleep states.
330  *
331  * Returns 0 for success, negative error code otherwise.
332  */
333 int xe_pm_init(struct xe_device *xe)
334 {
335 	u32 vram_threshold;
336 	int err;
337 
338 	xe->pm_notifier.notifier_call = xe_pm_notifier_callback;
339 	err = register_pm_notifier(&xe->pm_notifier);
340 	if (err)
341 		return err;
342 
343 	/* For now suspend/resume is only allowed with GuC */
344 	if (!xe_device_uc_enabled(xe))
345 		return 0;
346 
347 	xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
348 
349 	if (xe->d3cold.capable) {
350 		err = xe_device_sysfs_init(xe);
351 		if (err)
352 			goto err_unregister;
353 
354 		vram_threshold = vram_threshold_value(xe);
355 		err = xe_pm_set_vram_threshold(xe, vram_threshold);
356 		if (err)
357 			goto err_unregister;
358 	}
359 
360 	xe_pm_runtime_init(xe);
361 	return 0;
362 
363 err_unregister:
364 	unregister_pm_notifier(&xe->pm_notifier);
365 	return err;
366 }
367 
368 static void xe_pm_runtime_fini(struct xe_device *xe)
369 {
370 	struct device *dev = xe->drm.dev;
371 
372 	pm_runtime_get_sync(dev);
373 	pm_runtime_forbid(dev);
374 }
375 
376 /**
377  * xe_pm_fini - Finalize PM
378  * @xe: xe device instance
379  */
380 void xe_pm_fini(struct xe_device *xe)
381 {
382 	if (xe_device_uc_enabled(xe))
383 		xe_pm_runtime_fini(xe);
384 
385 	unregister_pm_notifier(&xe->pm_notifier);
386 }
387 
388 static void xe_pm_write_callback_task(struct xe_device *xe,
389 				      struct task_struct *task)
390 {
391 	WRITE_ONCE(xe->pm_callback_task, task);
392 
393 	/*
394 	 * Just in case it's somehow possible for our writes to be reordered to
395 	 * the extent that something else re-uses the task written in
396 	 * pm_callback_task. For example after returning from the callback, but
397 	 * before the reordered write that resets pm_callback_task back to NULL.
398 	 */
399 	smp_mb(); /* pairs with xe_pm_read_callback_task */
400 }
401 
402 struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
403 {
404 	smp_mb(); /* pairs with xe_pm_write_callback_task */
405 
406 	return READ_ONCE(xe->pm_callback_task);
407 }
408 
409 /**
410  * xe_pm_runtime_suspended - Check if runtime_pm state is suspended
411  * @xe: xe device instance
412  *
413  * This does not provide any guarantee that the device is going to remain
414  * suspended as it might be racing with the runtime state transitions.
415  * It can be used only as a non-reliable assertion, to ensure that we are not in
416  * the sleep state while trying to access some memory for instance.
417  *
418  * Returns true if PCI device is suspended, false otherwise.
419  */
420 bool xe_pm_runtime_suspended(struct xe_device *xe)
421 {
422 	return pm_runtime_suspended(xe->drm.dev);
423 }
424 
425 /**
426  * xe_pm_runtime_suspend - Prepare our device for D3hot/D3Cold
427  * @xe: xe device instance
428  *
429  * Returns 0 for success, negative error code otherwise.
430  */
431 int xe_pm_runtime_suspend(struct xe_device *xe)
432 {
433 	struct xe_bo *bo, *on;
434 	struct xe_gt *gt;
435 	u8 id;
436 	int err = 0;
437 
438 	trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0));
439 	/* Disable access_ongoing asserts and prevent recursive pm calls */
440 	xe_pm_write_callback_task(xe, current);
441 
442 	/*
443 	 * The actual xe_pm_runtime_put() is always async underneath, so
444 	 * exactly where that is called should makes no difference to us. However
445 	 * we still need to be very careful with the locks that this callback
446 	 * acquires and the locks that are acquired and held by any callers of
447 	 * xe_runtime_pm_get(). We already have the matching annotation
448 	 * on that side, but we also need it here. For example lockdep should be
449 	 * able to tell us if the following scenario is in theory possible:
450 	 *
451 	 * CPU0                          | CPU1 (kworker)
452 	 * lock(A)                       |
453 	 *                               | xe_pm_runtime_suspend()
454 	 *                               |      lock(A)
455 	 * xe_pm_runtime_get()           |
456 	 *
457 	 * This will clearly deadlock since rpm core needs to wait for
458 	 * xe_pm_runtime_suspend() to complete, but here we are holding lock(A)
459 	 * on CPU0 which prevents CPU1 making forward progress.  With the
460 	 * annotation here and in xe_pm_runtime_get() lockdep will see
461 	 * the potential lock inversion and give us a nice splat.
462 	 */
463 	xe_rpm_lockmap_acquire(xe);
464 
465 	err = xe_pxp_pm_suspend(xe->pxp);
466 	if (err)
467 		goto out;
468 
469 	/*
470 	 * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
471 	 * also checks and deletes bo entry from user fault list.
472 	 */
473 	mutex_lock(&xe->mem_access.vram_userfault.lock);
474 	list_for_each_entry_safe(bo, on,
475 				 &xe->mem_access.vram_userfault.list, vram_userfault_link)
476 		xe_bo_runtime_pm_release_mmap_offset(bo);
477 	mutex_unlock(&xe->mem_access.vram_userfault.lock);
478 
479 	xe_display_pm_runtime_suspend(xe);
480 
481 	if (xe->d3cold.allowed) {
482 		err = xe_bo_evict_all(xe);
483 		if (err)
484 			goto out_resume;
485 	}
486 
487 	for_each_gt(gt, xe, id) {
488 		err = xe_gt_suspend(gt);
489 		if (err)
490 			goto out_resume;
491 	}
492 
493 	xe_irq_suspend(xe);
494 
495 	xe_display_pm_runtime_suspend_late(xe);
496 
497 	xe_rpm_lockmap_release(xe);
498 	xe_pm_write_callback_task(xe, NULL);
499 	return 0;
500 
501 out_resume:
502 	xe_display_pm_runtime_resume(xe);
503 	xe_pxp_pm_resume(xe->pxp);
504 out:
505 	xe_rpm_lockmap_release(xe);
506 	xe_pm_write_callback_task(xe, NULL);
507 	return err;
508 }
509 
510 /**
511  * xe_pm_runtime_resume - Waking up from D3hot/D3Cold
512  * @xe: xe device instance
513  *
514  * Returns 0 for success, negative error code otherwise.
515  */
516 int xe_pm_runtime_resume(struct xe_device *xe)
517 {
518 	struct xe_gt *gt;
519 	u8 id;
520 	int err = 0;
521 
522 	trace_xe_pm_runtime_resume(xe, __builtin_return_address(0));
523 	/* Disable access_ongoing asserts and prevent recursive pm calls */
524 	xe_pm_write_callback_task(xe, current);
525 
526 	xe_rpm_lockmap_acquire(xe);
527 
528 	if (xe->d3cold.allowed) {
529 		err = xe_pcode_ready(xe, true);
530 		if (err)
531 			goto out;
532 
533 		xe_display_pm_resume_early(xe);
534 
535 		/*
536 		 * This only restores pinned memory which is the memory
537 		 * required for the GT(s) to resume.
538 		 */
539 		err = xe_bo_restore_early(xe);
540 		if (err)
541 			goto out;
542 	}
543 
544 	xe_irq_resume(xe);
545 
546 	for_each_gt(gt, xe, id)
547 		xe_gt_resume(gt);
548 
549 	xe_display_pm_runtime_resume(xe);
550 
551 	if (xe->d3cold.allowed) {
552 		err = xe_bo_restore_late(xe);
553 		if (err)
554 			goto out;
555 	}
556 
557 	xe_pxp_pm_resume(xe->pxp);
558 
559 out:
560 	xe_rpm_lockmap_release(xe);
561 	xe_pm_write_callback_task(xe, NULL);
562 	return err;
563 }
564 
565 /*
566  * For places where resume is synchronous it can be quite easy to deadlock
567  * if we are not careful. Also in practice it might be quite timing
568  * sensitive to ever see the 0 -> 1 transition with the callers locks
569  * held, so deadlocks might exist but are hard for lockdep to ever see.
570  * With this in mind, help lockdep learn about the potentially scary
571  * stuff that can happen inside the runtime_resume callback by acquiring
572  * a dummy lock (it doesn't protect anything and gets compiled out on
573  * non-debug builds).  Lockdep then only needs to see the
574  * xe_pm_runtime_xxx_map -> runtime_resume callback once, and then can
575  * hopefully validate all the (callers_locks) -> xe_pm_runtime_xxx_map.
576  * For example if the (callers_locks) are ever grabbed in the
577  * runtime_resume callback, lockdep should give us a nice splat.
578  */
579 static void xe_rpm_might_enter_cb(const struct xe_device *xe)
580 {
581 	xe_rpm_lockmap_acquire(xe);
582 	xe_rpm_lockmap_release(xe);
583 }
584 
585 /*
586  * Prime the lockdep maps for known locking orders that need to
587  * be supported but that may not always occur on all systems.
588  */
589 static void xe_pm_runtime_lockdep_prime(void)
590 {
591 	struct dma_resv lockdep_resv;
592 
593 	dma_resv_init(&lockdep_resv);
594 	lock_map_acquire(&xe_pm_runtime_d3cold_map);
595 	/* D3Cold takes the dma_resv locks to evict bos */
596 	dma_resv_lock(&lockdep_resv, NULL);
597 	dma_resv_unlock(&lockdep_resv);
598 	lock_map_release(&xe_pm_runtime_d3cold_map);
599 
600 	/* Shrinkers might like to wake up the device under reclaim. */
601 	fs_reclaim_acquire(GFP_KERNEL);
602 	lock_map_acquire(&xe_pm_runtime_nod3cold_map);
603 	lock_map_release(&xe_pm_runtime_nod3cold_map);
604 	fs_reclaim_release(GFP_KERNEL);
605 }
606 
607 /**
608  * xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
609  * @xe: xe device instance
610  */
611 void xe_pm_runtime_get(struct xe_device *xe)
612 {
613 	trace_xe_pm_runtime_get(xe, __builtin_return_address(0));
614 	pm_runtime_get_noresume(xe->drm.dev);
615 
616 	if (xe_pm_read_callback_task(xe) == current)
617 		return;
618 
619 	xe_rpm_might_enter_cb(xe);
620 	pm_runtime_resume(xe->drm.dev);
621 }
622 
623 /**
624  * xe_pm_runtime_put - Put the runtime_pm reference back and mark as idle
625  * @xe: xe device instance
626  */
627 void xe_pm_runtime_put(struct xe_device *xe)
628 {
629 	trace_xe_pm_runtime_put(xe, __builtin_return_address(0));
630 	if (xe_pm_read_callback_task(xe) == current) {
631 		pm_runtime_put_noidle(xe->drm.dev);
632 	} else {
633 		pm_runtime_mark_last_busy(xe->drm.dev);
634 		pm_runtime_put(xe->drm.dev);
635 	}
636 }
637 
638 /**
639  * xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
640  * @xe: xe device instance
641  *
642  * Returns: Any number greater than or equal to 0 for success, negative error
643  * code otherwise.
644  */
645 int xe_pm_runtime_get_ioctl(struct xe_device *xe)
646 {
647 	trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0));
648 	if (WARN_ON(xe_pm_read_callback_task(xe) == current))
649 		return -ELOOP;
650 
651 	xe_rpm_might_enter_cb(xe);
652 	return pm_runtime_get_sync(xe->drm.dev);
653 }
654 
655 /**
656  * xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active
657  * @xe: xe device instance
658  *
659  * Return: True if device is awake (regardless the previous number of references)
660  * and a new reference was taken, false otherwise.
661  */
662 bool xe_pm_runtime_get_if_active(struct xe_device *xe)
663 {
664 	return pm_runtime_get_if_active(xe->drm.dev) > 0;
665 }
666 
667 /**
668  * xe_pm_runtime_get_if_in_use - Get a new reference if device is active with previous ref taken
669  * @xe: xe device instance
670  *
671  * Return: True if device is awake, a previous reference had been already taken,
672  * and a new reference was now taken, false otherwise.
673  */
674 bool xe_pm_runtime_get_if_in_use(struct xe_device *xe)
675 {
676 	if (xe_pm_read_callback_task(xe) == current) {
677 		/* The device is awake, grab the ref and move on */
678 		pm_runtime_get_noresume(xe->drm.dev);
679 		return true;
680 	}
681 
682 	return pm_runtime_get_if_in_use(xe->drm.dev) > 0;
683 }
684 
685 /*
686  * Very unreliable! Should only be used to suppress the false positive case
687  * in the missing outer rpm protection warning.
688  */
689 static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
690 {
691 #ifdef CONFIG_PM
692 	struct device *dev = xe->drm.dev;
693 
694 	return dev->power.runtime_status == RPM_SUSPENDING ||
695 		dev->power.runtime_status == RPM_RESUMING ||
696 		pm_suspend_target_state != PM_SUSPEND_ON;
697 #else
698 	return false;
699 #endif
700 }
701 
702 /**
703  * xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming
704  * @xe: xe device instance
705  *
706  * This function should be used in inner places where it is surely already
707  * protected by outer-bound callers of `xe_pm_runtime_get`.
708  * It will warn if not protected.
709  * The reference should be put back after this function regardless, since it
710  * will always bump the usage counter, regardless.
711  */
712 void xe_pm_runtime_get_noresume(struct xe_device *xe)
713 {
714 	bool ref;
715 
716 	ref = xe_pm_runtime_get_if_in_use(xe);
717 
718 	if (!ref) {
719 		pm_runtime_get_noresume(xe->drm.dev);
720 		drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe),
721 			 "Missing outer runtime PM protection\n");
722 	}
723 }
724 
725 /**
726  * xe_pm_runtime_resume_and_get - Resume, then get a runtime_pm ref if awake.
727  * @xe: xe device instance
728  *
729  * Returns: True if device is awake and the reference was taken, false otherwise.
730  */
731 bool xe_pm_runtime_resume_and_get(struct xe_device *xe)
732 {
733 	if (xe_pm_read_callback_task(xe) == current) {
734 		/* The device is awake, grab the ref and move on */
735 		pm_runtime_get_noresume(xe->drm.dev);
736 		return true;
737 	}
738 
739 	xe_rpm_might_enter_cb(xe);
740 	return pm_runtime_resume_and_get(xe->drm.dev) >= 0;
741 }
742 
743 /**
744  * xe_pm_assert_unbounded_bridge - Disable PM on unbounded pcie parent bridge
745  * @xe: xe device instance
746  */
747 void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
748 {
749 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
750 	struct pci_dev *bridge = pci_upstream_bridge(pdev);
751 
752 	if (!bridge)
753 		return;
754 
755 	if (!bridge->driver) {
756 		drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n");
757 		device_set_pm_not_required(&pdev->dev);
758 	}
759 }
760 
761 /**
762  * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold
763  * @xe: xe device instance
764  * @threshold: VRAM size in bites for the D3cold threshold
765  *
766  * Returns 0 for success, negative error code otherwise.
767  */
768 int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
769 {
770 	struct ttm_resource_manager *man;
771 	u32 vram_total_mb = 0;
772 	int i;
773 
774 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
775 		man = ttm_manager_type(&xe->ttm, i);
776 		if (man)
777 			vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024);
778 	}
779 
780 	drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb);
781 
782 	if (threshold > vram_total_mb)
783 		return -EINVAL;
784 
785 	mutex_lock(&xe->d3cold.lock);
786 	xe->d3cold.vram_threshold = threshold;
787 	mutex_unlock(&xe->d3cold.lock);
788 
789 	return 0;
790 }
791 
792 /**
793  * xe_pm_d3cold_allowed_toggle - Check conditions to toggle d3cold.allowed
794  * @xe: xe device instance
795  *
796  * To be called during runtime_pm idle callback.
797  * Check for all the D3Cold conditions ahead of runtime suspend.
798  */
799 void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
800 {
801 	struct ttm_resource_manager *man;
802 	u32 total_vram_used_mb = 0;
803 	u64 vram_used;
804 	int i;
805 
806 	if (!xe->d3cold.capable) {
807 		xe->d3cold.allowed = false;
808 		return;
809 	}
810 
811 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
812 		man = ttm_manager_type(&xe->ttm, i);
813 		if (man) {
814 			vram_used = ttm_resource_manager_usage(man);
815 			total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024);
816 		}
817 	}
818 
819 	mutex_lock(&xe->d3cold.lock);
820 
821 	if (total_vram_used_mb < xe->d3cold.vram_threshold)
822 		xe->d3cold.allowed = true;
823 	else
824 		xe->d3cold.allowed = false;
825 
826 	mutex_unlock(&xe->d3cold.lock);
827 }
828 
829 /**
830  * xe_pm_module_init() - Perform xe_pm specific module initialization.
831  *
832  * Return: 0 on success. Currently doesn't fail.
833  */
834 int __init xe_pm_module_init(void)
835 {
836 	xe_pm_runtime_lockdep_prime();
837 	return 0;
838 }
839