xref: /linux/drivers/gpu/drm/xe/xe_pm.c (revision fe7fad476ec8153a8b8767a08114e3e4a58a837e)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_pm.h"
7 
8 #include <linux/fault-inject.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/suspend.h>
11 
12 #include <drm/drm_managed.h>
13 #include <drm/ttm/ttm_placement.h>
14 
15 #include "display/xe_display.h"
16 #include "xe_bo.h"
17 #include "xe_bo_evict.h"
18 #include "xe_device.h"
19 #include "xe_device_sysfs.h"
20 #include "xe_ggtt.h"
21 #include "xe_gt.h"
22 #include "xe_guc.h"
23 #include "xe_irq.h"
24 #include "xe_pcode.h"
25 #include "xe_pxp.h"
26 #include "xe_trace.h"
27 #include "xe_wa.h"
28 
29 /**
30  * DOC: Xe Power Management
31  *
32  * Xe PM implements the main routines for both system level suspend states and
33  * for the opportunistic runtime suspend states.
34  *
35  * System Level Suspend (S-States) - In general this is OS initiated suspend
36  * driven by ACPI for achieving S0ix (a.k.a. S2idle, freeze), S3 (suspend to ram),
37  * S4 (disk). The main functions here are `xe_pm_suspend` and `xe_pm_resume`. They
38  * are the main point for the suspend to and resume from these states.
39  *
40  * PCI Device Suspend (D-States) - This is the opportunistic PCIe device low power
41  * state D3, controlled by the PCI subsystem and ACPI with the help from the
42  * runtime_pm infrastructure.
43  * PCI D3 is special and can mean D3hot, where Vcc power is on for keeping memory
44  * alive and quicker low latency resume or D3Cold where Vcc power is off for
45  * better power savings.
46  * The Vcc control of PCI hierarchy can only be controlled at the PCI root port
47  * level, while the device driver can be behind multiple bridges/switches and
48  * paired with other devices. For this reason, the PCI subsystem cannot perform
49  * the transition towards D3Cold. The lowest runtime PM possible from the PCI
50  * subsystem is D3hot. Then, if all these paired devices in the same root port
51  * are in D3hot, ACPI will assist here and run its own methods (_PR3 and _OFF)
52  * to perform the transition from D3hot to D3cold. Xe may disallow this
53  * transition by calling pci_d3cold_disable(root_pdev) before going to runtime
54  * suspend. It will be based on runtime conditions such as VRAM usage for a
55  * quick and low latency resume for instance.
56  *
57  * Runtime PM - This infrastructure provided by the Linux kernel allows the
58  * device drivers to indicate when the can be runtime suspended, so the device
59  * could be put at D3 (if supported), or allow deeper package sleep states
60  * (PC-states), and/or other low level power states. Xe PM component provides
61  * `xe_pm_runtime_suspend` and `xe_pm_runtime_resume` functions that PCI
62  * subsystem will call before transition to/from runtime suspend.
63  *
64  * Also, Xe PM provides get and put functions that Xe driver will use to
65  * indicate activity. In order to avoid locking complications with the memory
66  * management, whenever possible, these get and put functions needs to be called
67  * from the higher/outer levels.
68  * The main cases that need to be protected from the outer levels are: IOCTL,
69  * sysfs, debugfs, dma-buf sharing, GPU execution.
70  *
71  * This component is not responsible for GT idleness (RC6) nor GT frequency
72  * management (RPS).
73  */
74 
75 #ifdef CONFIG_LOCKDEP
76 static struct lockdep_map xe_pm_runtime_d3cold_map = {
77 	.name = "xe_rpm_d3cold_map"
78 };
79 
80 static struct lockdep_map xe_pm_runtime_nod3cold_map = {
81 	.name = "xe_rpm_nod3cold_map"
82 };
83 #endif
84 
85 /**
86  * xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context
87  * @xe: The xe device.
88  *
89  * Return: true if it is safe to runtime resume from reclaim context.
90  * false otherwise.
91  */
92 bool xe_rpm_reclaim_safe(const struct xe_device *xe)
93 {
94 	return !xe->d3cold.capable;
95 }
96 
97 static void xe_rpm_lockmap_acquire(const struct xe_device *xe)
98 {
99 	lock_map_acquire(xe_rpm_reclaim_safe(xe) ?
100 			 &xe_pm_runtime_nod3cold_map :
101 			 &xe_pm_runtime_d3cold_map);
102 }
103 
104 static void xe_rpm_lockmap_release(const struct xe_device *xe)
105 {
106 	lock_map_release(xe_rpm_reclaim_safe(xe) ?
107 			 &xe_pm_runtime_nod3cold_map :
108 			 &xe_pm_runtime_d3cold_map);
109 }
110 
111 /**
112  * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
113  * @xe: xe device instance
114  *
115  * Return: 0 on success
116  */
117 int xe_pm_suspend(struct xe_device *xe)
118 {
119 	struct xe_gt *gt;
120 	u8 id;
121 	int err;
122 
123 	drm_dbg(&xe->drm, "Suspending device\n");
124 	trace_xe_pm_suspend(xe, __builtin_return_address(0));
125 
126 	err = xe_pxp_pm_suspend(xe->pxp);
127 	if (err)
128 		goto err;
129 
130 	for_each_gt(gt, xe, id)
131 		xe_gt_suspend_prepare(gt);
132 
133 	xe_display_pm_suspend(xe);
134 
135 	/* FIXME: Super racey... */
136 	err = xe_bo_evict_all(xe);
137 	if (err)
138 		goto err_pxp;
139 
140 	for_each_gt(gt, xe, id) {
141 		err = xe_gt_suspend(gt);
142 		if (err)
143 			goto err_display;
144 	}
145 
146 	xe_irq_suspend(xe);
147 
148 	xe_display_pm_suspend_late(xe);
149 
150 	drm_dbg(&xe->drm, "Device suspended\n");
151 	return 0;
152 
153 err_display:
154 	xe_display_pm_resume(xe);
155 err_pxp:
156 	xe_pxp_pm_resume(xe->pxp);
157 err:
158 	drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
159 	return err;
160 }
161 
162 /**
163  * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0
164  * @xe: xe device instance
165  *
166  * Return: 0 on success
167  */
168 int xe_pm_resume(struct xe_device *xe)
169 {
170 	struct xe_tile *tile;
171 	struct xe_gt *gt;
172 	u8 id;
173 	int err;
174 
175 	drm_dbg(&xe->drm, "Resuming device\n");
176 	trace_xe_pm_resume(xe, __builtin_return_address(0));
177 
178 	for_each_tile(tile, xe, id)
179 		xe_wa_apply_tile_workarounds(tile);
180 
181 	err = xe_pcode_ready(xe, true);
182 	if (err)
183 		return err;
184 
185 	xe_display_pm_resume_early(xe);
186 
187 	/*
188 	 * This only restores pinned memory which is the memory required for the
189 	 * GT(s) to resume.
190 	 */
191 	err = xe_bo_restore_kernel(xe);
192 	if (err)
193 		goto err;
194 
195 	xe_irq_resume(xe);
196 
197 	for_each_gt(gt, xe, id)
198 		xe_gt_resume(gt);
199 
200 	xe_display_pm_resume(xe);
201 
202 	err = xe_bo_restore_user(xe);
203 	if (err)
204 		goto err;
205 
206 	xe_pxp_pm_resume(xe->pxp);
207 
208 	drm_dbg(&xe->drm, "Device resumed\n");
209 	return 0;
210 err:
211 	drm_dbg(&xe->drm, "Device resume failed %d\n", err);
212 	return err;
213 }
214 
215 static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
216 {
217 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
218 	struct pci_dev *root_pdev;
219 
220 	root_pdev = pcie_find_root_port(pdev);
221 	if (!root_pdev)
222 		return false;
223 
224 	/* D3Cold requires PME capability */
225 	if (!pci_pme_capable(root_pdev, PCI_D3cold)) {
226 		drm_dbg(&xe->drm, "d3cold: PME# not supported\n");
227 		return false;
228 	}
229 
230 	/* D3Cold requires _PR3 power resource */
231 	if (!pci_pr3_present(root_pdev)) {
232 		drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n");
233 		return false;
234 	}
235 
236 	return true;
237 }
238 
239 static void xe_pm_runtime_init(struct xe_device *xe)
240 {
241 	struct device *dev = xe->drm.dev;
242 
243 	/*
244 	 * Disable the system suspend direct complete optimization.
245 	 * We need to ensure that the regular device suspend/resume functions
246 	 * are called since our runtime_pm cannot guarantee local memory
247 	 * eviction for d3cold.
248 	 * TODO: Check HDA audio dependencies claimed by i915, and then enforce
249 	 *       this option to integrated graphics as well.
250 	 */
251 	if (IS_DGFX(xe))
252 		dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
253 
254 	pm_runtime_use_autosuspend(dev);
255 	pm_runtime_set_autosuspend_delay(dev, 1000);
256 	pm_runtime_set_active(dev);
257 	pm_runtime_allow(dev);
258 	pm_runtime_mark_last_busy(dev);
259 	pm_runtime_put(dev);
260 }
261 
262 int xe_pm_init_early(struct xe_device *xe)
263 {
264 	int err;
265 
266 	INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list);
267 
268 	err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
269 	if (err)
270 		return err;
271 
272 	err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
273 	if (err)
274 		return err;
275 
276 	return 0;
277 }
278 ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
279 
280 /**
281  * xe_pm_init - Initialize Xe Power Management
282  * @xe: xe device instance
283  *
284  * This component is responsible for System and Device sleep states.
285  *
286  * Returns 0 for success, negative error code otherwise.
287  */
288 int xe_pm_init(struct xe_device *xe)
289 {
290 	int err;
291 
292 	/* For now suspend/resume is only allowed with GuC */
293 	if (!xe_device_uc_enabled(xe))
294 		return 0;
295 
296 	xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
297 
298 	if (xe->d3cold.capable) {
299 		err = xe_device_sysfs_init(xe);
300 		if (err)
301 			return err;
302 
303 		err = xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD);
304 		if (err)
305 			return err;
306 	}
307 
308 	xe_pm_runtime_init(xe);
309 
310 	return 0;
311 }
312 
313 /**
314  * xe_pm_runtime_fini - Finalize Runtime PM
315  * @xe: xe device instance
316  */
317 void xe_pm_runtime_fini(struct xe_device *xe)
318 {
319 	struct device *dev = xe->drm.dev;
320 
321 	pm_runtime_get_sync(dev);
322 	pm_runtime_forbid(dev);
323 }
324 
325 static void xe_pm_write_callback_task(struct xe_device *xe,
326 				      struct task_struct *task)
327 {
328 	WRITE_ONCE(xe->pm_callback_task, task);
329 
330 	/*
331 	 * Just in case it's somehow possible for our writes to be reordered to
332 	 * the extent that something else re-uses the task written in
333 	 * pm_callback_task. For example after returning from the callback, but
334 	 * before the reordered write that resets pm_callback_task back to NULL.
335 	 */
336 	smp_mb(); /* pairs with xe_pm_read_callback_task */
337 }
338 
339 struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
340 {
341 	smp_mb(); /* pairs with xe_pm_write_callback_task */
342 
343 	return READ_ONCE(xe->pm_callback_task);
344 }
345 
346 /**
347  * xe_pm_runtime_suspended - Check if runtime_pm state is suspended
348  * @xe: xe device instance
349  *
350  * This does not provide any guarantee that the device is going to remain
351  * suspended as it might be racing with the runtime state transitions.
352  * It can be used only as a non-reliable assertion, to ensure that we are not in
353  * the sleep state while trying to access some memory for instance.
354  *
355  * Returns true if PCI device is suspended, false otherwise.
356  */
357 bool xe_pm_runtime_suspended(struct xe_device *xe)
358 {
359 	return pm_runtime_suspended(xe->drm.dev);
360 }
361 
362 /**
363  * xe_pm_runtime_suspend - Prepare our device for D3hot/D3Cold
364  * @xe: xe device instance
365  *
366  * Returns 0 for success, negative error code otherwise.
367  */
368 int xe_pm_runtime_suspend(struct xe_device *xe)
369 {
370 	struct xe_bo *bo, *on;
371 	struct xe_gt *gt;
372 	u8 id;
373 	int err = 0;
374 
375 	trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0));
376 	/* Disable access_ongoing asserts and prevent recursive pm calls */
377 	xe_pm_write_callback_task(xe, current);
378 
379 	/*
380 	 * The actual xe_pm_runtime_put() is always async underneath, so
381 	 * exactly where that is called should makes no difference to us. However
382 	 * we still need to be very careful with the locks that this callback
383 	 * acquires and the locks that are acquired and held by any callers of
384 	 * xe_runtime_pm_get(). We already have the matching annotation
385 	 * on that side, but we also need it here. For example lockdep should be
386 	 * able to tell us if the following scenario is in theory possible:
387 	 *
388 	 * CPU0                          | CPU1 (kworker)
389 	 * lock(A)                       |
390 	 *                               | xe_pm_runtime_suspend()
391 	 *                               |      lock(A)
392 	 * xe_pm_runtime_get()           |
393 	 *
394 	 * This will clearly deadlock since rpm core needs to wait for
395 	 * xe_pm_runtime_suspend() to complete, but here we are holding lock(A)
396 	 * on CPU0 which prevents CPU1 making forward progress.  With the
397 	 * annotation here and in xe_pm_runtime_get() lockdep will see
398 	 * the potential lock inversion and give us a nice splat.
399 	 */
400 	xe_rpm_lockmap_acquire(xe);
401 
402 	err = xe_pxp_pm_suspend(xe->pxp);
403 	if (err)
404 		goto out;
405 
406 	/*
407 	 * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
408 	 * also checks and deletes bo entry from user fault list.
409 	 */
410 	mutex_lock(&xe->mem_access.vram_userfault.lock);
411 	list_for_each_entry_safe(bo, on,
412 				 &xe->mem_access.vram_userfault.list, vram_userfault_link)
413 		xe_bo_runtime_pm_release_mmap_offset(bo);
414 	mutex_unlock(&xe->mem_access.vram_userfault.lock);
415 
416 	xe_display_pm_runtime_suspend(xe);
417 
418 	if (xe->d3cold.allowed) {
419 		err = xe_bo_evict_all(xe);
420 		if (err)
421 			goto out_resume;
422 	}
423 
424 	for_each_gt(gt, xe, id) {
425 		err = xe_gt_suspend(gt);
426 		if (err)
427 			goto out_resume;
428 	}
429 
430 	xe_irq_suspend(xe);
431 
432 	xe_display_pm_runtime_suspend_late(xe);
433 
434 	xe_rpm_lockmap_release(xe);
435 	xe_pm_write_callback_task(xe, NULL);
436 	return 0;
437 
438 out_resume:
439 	xe_display_pm_runtime_resume(xe);
440 	xe_pxp_pm_resume(xe->pxp);
441 out:
442 	xe_rpm_lockmap_release(xe);
443 	xe_pm_write_callback_task(xe, NULL);
444 	return err;
445 }
446 
447 /**
448  * xe_pm_runtime_resume - Waking up from D3hot/D3Cold
449  * @xe: xe device instance
450  *
451  * Returns 0 for success, negative error code otherwise.
452  */
453 int xe_pm_runtime_resume(struct xe_device *xe)
454 {
455 	struct xe_gt *gt;
456 	u8 id;
457 	int err = 0;
458 
459 	trace_xe_pm_runtime_resume(xe, __builtin_return_address(0));
460 	/* Disable access_ongoing asserts and prevent recursive pm calls */
461 	xe_pm_write_callback_task(xe, current);
462 
463 	xe_rpm_lockmap_acquire(xe);
464 
465 	if (xe->d3cold.allowed) {
466 		err = xe_pcode_ready(xe, true);
467 		if (err)
468 			goto out;
469 
470 		xe_display_pm_resume_early(xe);
471 
472 		/*
473 		 * This only restores pinned memory which is the memory
474 		 * required for the GT(s) to resume.
475 		 */
476 		err = xe_bo_restore_kernel(xe);
477 		if (err)
478 			goto out;
479 	}
480 
481 	xe_irq_resume(xe);
482 
483 	for_each_gt(gt, xe, id)
484 		xe_gt_resume(gt);
485 
486 	xe_display_pm_runtime_resume(xe);
487 
488 	if (xe->d3cold.allowed) {
489 		err = xe_bo_restore_user(xe);
490 		if (err)
491 			goto out;
492 	}
493 
494 	xe_pxp_pm_resume(xe->pxp);
495 
496 out:
497 	xe_rpm_lockmap_release(xe);
498 	xe_pm_write_callback_task(xe, NULL);
499 	return err;
500 }
501 
502 /*
503  * For places where resume is synchronous it can be quite easy to deadlock
504  * if we are not careful. Also in practice it might be quite timing
505  * sensitive to ever see the 0 -> 1 transition with the callers locks
506  * held, so deadlocks might exist but are hard for lockdep to ever see.
507  * With this in mind, help lockdep learn about the potentially scary
508  * stuff that can happen inside the runtime_resume callback by acquiring
509  * a dummy lock (it doesn't protect anything and gets compiled out on
510  * non-debug builds).  Lockdep then only needs to see the
511  * xe_pm_runtime_xxx_map -> runtime_resume callback once, and then can
512  * hopefully validate all the (callers_locks) -> xe_pm_runtime_xxx_map.
513  * For example if the (callers_locks) are ever grabbed in the
514  * runtime_resume callback, lockdep should give us a nice splat.
515  */
516 static void xe_rpm_might_enter_cb(const struct xe_device *xe)
517 {
518 	xe_rpm_lockmap_acquire(xe);
519 	xe_rpm_lockmap_release(xe);
520 }
521 
522 /*
523  * Prime the lockdep maps for known locking orders that need to
524  * be supported but that may not always occur on all systems.
525  */
526 static void xe_pm_runtime_lockdep_prime(void)
527 {
528 	struct dma_resv lockdep_resv;
529 
530 	dma_resv_init(&lockdep_resv);
531 	lock_map_acquire(&xe_pm_runtime_d3cold_map);
532 	/* D3Cold takes the dma_resv locks to evict bos */
533 	dma_resv_lock(&lockdep_resv, NULL);
534 	dma_resv_unlock(&lockdep_resv);
535 	lock_map_release(&xe_pm_runtime_d3cold_map);
536 
537 	/* Shrinkers might like to wake up the device under reclaim. */
538 	fs_reclaim_acquire(GFP_KERNEL);
539 	lock_map_acquire(&xe_pm_runtime_nod3cold_map);
540 	lock_map_release(&xe_pm_runtime_nod3cold_map);
541 	fs_reclaim_release(GFP_KERNEL);
542 }
543 
544 /**
545  * xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
546  * @xe: xe device instance
547  */
548 void xe_pm_runtime_get(struct xe_device *xe)
549 {
550 	trace_xe_pm_runtime_get(xe, __builtin_return_address(0));
551 	pm_runtime_get_noresume(xe->drm.dev);
552 
553 	if (xe_pm_read_callback_task(xe) == current)
554 		return;
555 
556 	xe_rpm_might_enter_cb(xe);
557 	pm_runtime_resume(xe->drm.dev);
558 }
559 
560 /**
561  * xe_pm_runtime_put - Put the runtime_pm reference back and mark as idle
562  * @xe: xe device instance
563  */
564 void xe_pm_runtime_put(struct xe_device *xe)
565 {
566 	trace_xe_pm_runtime_put(xe, __builtin_return_address(0));
567 	if (xe_pm_read_callback_task(xe) == current) {
568 		pm_runtime_put_noidle(xe->drm.dev);
569 	} else {
570 		pm_runtime_mark_last_busy(xe->drm.dev);
571 		pm_runtime_put(xe->drm.dev);
572 	}
573 }
574 
575 /**
576  * xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
577  * @xe: xe device instance
578  *
579  * Returns: Any number greater than or equal to 0 for success, negative error
580  * code otherwise.
581  */
582 int xe_pm_runtime_get_ioctl(struct xe_device *xe)
583 {
584 	trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0));
585 	if (WARN_ON(xe_pm_read_callback_task(xe) == current))
586 		return -ELOOP;
587 
588 	xe_rpm_might_enter_cb(xe);
589 	return pm_runtime_get_sync(xe->drm.dev);
590 }
591 
592 /**
593  * xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active
594  * @xe: xe device instance
595  *
596  * Return: True if device is awake (regardless the previous number of references)
597  * and a new reference was taken, false otherwise.
598  */
599 bool xe_pm_runtime_get_if_active(struct xe_device *xe)
600 {
601 	return pm_runtime_get_if_active(xe->drm.dev) > 0;
602 }
603 
604 /**
605  * xe_pm_runtime_get_if_in_use - Get a new reference if device is active with previous ref taken
606  * @xe: xe device instance
607  *
608  * Return: True if device is awake, a previous reference had been already taken,
609  * and a new reference was now taken, false otherwise.
610  */
611 bool xe_pm_runtime_get_if_in_use(struct xe_device *xe)
612 {
613 	if (xe_pm_read_callback_task(xe) == current) {
614 		/* The device is awake, grab the ref and move on */
615 		pm_runtime_get_noresume(xe->drm.dev);
616 		return true;
617 	}
618 
619 	return pm_runtime_get_if_in_use(xe->drm.dev) > 0;
620 }
621 
622 /*
623  * Very unreliable! Should only be used to suppress the false positive case
624  * in the missing outer rpm protection warning.
625  */
626 static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
627 {
628 #ifdef CONFIG_PM
629 	struct device *dev = xe->drm.dev;
630 
631 	return dev->power.runtime_status == RPM_SUSPENDING ||
632 		dev->power.runtime_status == RPM_RESUMING ||
633 		pm_suspend_target_state != PM_SUSPEND_ON;
634 #else
635 	return false;
636 #endif
637 }
638 
639 /**
640  * xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming
641  * @xe: xe device instance
642  *
643  * This function should be used in inner places where it is surely already
644  * protected by outer-bound callers of `xe_pm_runtime_get`.
645  * It will warn if not protected.
646  * The reference should be put back after this function regardless, since it
647  * will always bump the usage counter, regardless.
648  */
649 void xe_pm_runtime_get_noresume(struct xe_device *xe)
650 {
651 	bool ref;
652 
653 	ref = xe_pm_runtime_get_if_in_use(xe);
654 
655 	if (!ref) {
656 		pm_runtime_get_noresume(xe->drm.dev);
657 		drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe),
658 			 "Missing outer runtime PM protection\n");
659 	}
660 }
661 
662 /**
663  * xe_pm_runtime_resume_and_get - Resume, then get a runtime_pm ref if awake.
664  * @xe: xe device instance
665  *
666  * Returns: True if device is awake and the reference was taken, false otherwise.
667  */
668 bool xe_pm_runtime_resume_and_get(struct xe_device *xe)
669 {
670 	if (xe_pm_read_callback_task(xe) == current) {
671 		/* The device is awake, grab the ref and move on */
672 		pm_runtime_get_noresume(xe->drm.dev);
673 		return true;
674 	}
675 
676 	xe_rpm_might_enter_cb(xe);
677 	return pm_runtime_resume_and_get(xe->drm.dev) >= 0;
678 }
679 
680 /**
681  * xe_pm_assert_unbounded_bridge - Disable PM on unbounded pcie parent bridge
682  * @xe: xe device instance
683  */
684 void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
685 {
686 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
687 	struct pci_dev *bridge = pci_upstream_bridge(pdev);
688 
689 	if (!bridge)
690 		return;
691 
692 	if (!bridge->driver) {
693 		drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n");
694 		device_set_pm_not_required(&pdev->dev);
695 	}
696 }
697 
698 /**
699  * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold
700  * @xe: xe device instance
701  * @threshold: VRAM size in bites for the D3cold threshold
702  *
703  * Returns 0 for success, negative error code otherwise.
704  */
705 int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
706 {
707 	struct ttm_resource_manager *man;
708 	u32 vram_total_mb = 0;
709 	int i;
710 
711 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
712 		man = ttm_manager_type(&xe->ttm, i);
713 		if (man)
714 			vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024);
715 	}
716 
717 	drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb);
718 
719 	if (threshold > vram_total_mb)
720 		return -EINVAL;
721 
722 	mutex_lock(&xe->d3cold.lock);
723 	xe->d3cold.vram_threshold = threshold;
724 	mutex_unlock(&xe->d3cold.lock);
725 
726 	return 0;
727 }
728 
729 /**
730  * xe_pm_d3cold_allowed_toggle - Check conditions to toggle d3cold.allowed
731  * @xe: xe device instance
732  *
733  * To be called during runtime_pm idle callback.
734  * Check for all the D3Cold conditions ahead of runtime suspend.
735  */
736 void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
737 {
738 	struct ttm_resource_manager *man;
739 	u32 total_vram_used_mb = 0;
740 	u64 vram_used;
741 	int i;
742 
743 	if (!xe->d3cold.capable) {
744 		xe->d3cold.allowed = false;
745 		return;
746 	}
747 
748 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
749 		man = ttm_manager_type(&xe->ttm, i);
750 		if (man) {
751 			vram_used = ttm_resource_manager_usage(man);
752 			total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024);
753 		}
754 	}
755 
756 	mutex_lock(&xe->d3cold.lock);
757 
758 	if (total_vram_used_mb < xe->d3cold.vram_threshold)
759 		xe->d3cold.allowed = true;
760 	else
761 		xe->d3cold.allowed = false;
762 
763 	mutex_unlock(&xe->d3cold.lock);
764 }
765 
766 /**
767  * xe_pm_module_init() - Perform xe_pm specific module initialization.
768  *
769  * Return: 0 on success. Currently doesn't fail.
770  */
771 int __init xe_pm_module_init(void)
772 {
773 	xe_pm_runtime_lockdep_prime();
774 	return 0;
775 }
776