xref: /linux/drivers/gpu/drm/xe/xe_pm.c (revision 44343e8b250abb2f6bfd615493ca07a7f11f3cc2)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_pm.h"
7 
8 #include <linux/fault-inject.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/suspend.h>
11 
12 #include <drm/drm_managed.h>
13 #include <drm/ttm/ttm_placement.h>
14 
15 #include "display/xe_display.h"
16 #include "xe_bo.h"
17 #include "xe_bo_evict.h"
18 #include "xe_device.h"
19 #include "xe_ggtt.h"
20 #include "xe_gt.h"
21 #include "xe_gt_idle.h"
22 #include "xe_i2c.h"
23 #include "xe_irq.h"
24 #include "xe_pcode.h"
25 #include "xe_pxp.h"
26 #include "xe_sriov_vf_ccs.h"
27 #include "xe_trace.h"
28 #include "xe_wa.h"
29 
30 /**
31  * DOC: Xe Power Management
32  *
33  * Xe PM implements the main routines for both system level suspend states and
34  * for the opportunistic runtime suspend states.
35  *
36  * System Level Suspend (S-States) - In general this is OS initiated suspend
37  * driven by ACPI for achieving S0ix (a.k.a. S2idle, freeze), S3 (suspend to ram),
38  * S4 (disk). The main functions here are `xe_pm_suspend` and `xe_pm_resume`. They
39  * are the main point for the suspend to and resume from these states.
40  *
41  * PCI Device Suspend (D-States) - This is the opportunistic PCIe device low power
42  * state D3, controlled by the PCI subsystem and ACPI with the help from the
43  * runtime_pm infrastructure.
44  * PCI D3 is special and can mean D3hot, where Vcc power is on for keeping memory
45  * alive and quicker low latency resume or D3Cold where Vcc power is off for
46  * better power savings.
47  * The Vcc control of PCI hierarchy can only be controlled at the PCI root port
48  * level, while the device driver can be behind multiple bridges/switches and
49  * paired with other devices. For this reason, the PCI subsystem cannot perform
50  * the transition towards D3Cold. The lowest runtime PM possible from the PCI
51  * subsystem is D3hot. Then, if all these paired devices in the same root port
52  * are in D3hot, ACPI will assist here and run its own methods (_PR3 and _OFF)
53  * to perform the transition from D3hot to D3cold. Xe may disallow this
54  * transition by calling pci_d3cold_disable(root_pdev) before going to runtime
55  * suspend. It will be based on runtime conditions such as VRAM usage for a
56  * quick and low latency resume for instance.
57  *
58  * Runtime PM - This infrastructure provided by the Linux kernel allows the
59  * device drivers to indicate when the can be runtime suspended, so the device
60  * could be put at D3 (if supported), or allow deeper package sleep states
61  * (PC-states), and/or other low level power states. Xe PM component provides
62  * `xe_pm_runtime_suspend` and `xe_pm_runtime_resume` functions that PCI
63  * subsystem will call before transition to/from runtime suspend.
64  *
65  * Also, Xe PM provides get and put functions that Xe driver will use to
66  * indicate activity. In order to avoid locking complications with the memory
67  * management, whenever possible, these get and put functions needs to be called
68  * from the higher/outer levels.
69  * The main cases that need to be protected from the outer levels are: IOCTL,
70  * sysfs, debugfs, dma-buf sharing, GPU execution.
71  *
72  * This component is not responsible for GT idleness (RC6) nor GT frequency
73  * management (RPS).
74  */
75 
76 #ifdef CONFIG_LOCKDEP
77 static struct lockdep_map xe_pm_runtime_d3cold_map = {
78 	.name = "xe_rpm_d3cold_map"
79 };
80 
81 static struct lockdep_map xe_pm_runtime_nod3cold_map = {
82 	.name = "xe_rpm_nod3cold_map"
83 };
84 #endif
85 
86 /**
87  * xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context
88  * @xe: The xe device.
89  *
90  * Return: true if it is safe to runtime resume from reclaim context.
91  * false otherwise.
92  */
93 bool xe_rpm_reclaim_safe(const struct xe_device *xe)
94 {
95 	return !xe->d3cold.capable;
96 }
97 
98 static void xe_rpm_lockmap_acquire(const struct xe_device *xe)
99 {
100 	lock_map_acquire(xe_rpm_reclaim_safe(xe) ?
101 			 &xe_pm_runtime_nod3cold_map :
102 			 &xe_pm_runtime_d3cold_map);
103 }
104 
105 static void xe_rpm_lockmap_release(const struct xe_device *xe)
106 {
107 	lock_map_release(xe_rpm_reclaim_safe(xe) ?
108 			 &xe_pm_runtime_nod3cold_map :
109 			 &xe_pm_runtime_d3cold_map);
110 }
111 
112 /**
113  * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
114  * @xe: xe device instance
115  *
116  * Return: 0 on success
117  */
118 int xe_pm_suspend(struct xe_device *xe)
119 {
120 	struct xe_gt *gt;
121 	u8 id;
122 	int err;
123 
124 	drm_dbg(&xe->drm, "Suspending device\n");
125 	trace_xe_pm_suspend(xe, __builtin_return_address(0));
126 
127 	err = xe_pxp_pm_suspend(xe->pxp);
128 	if (err)
129 		goto err;
130 
131 	for_each_gt(gt, xe, id)
132 		xe_gt_suspend_prepare(gt);
133 
134 	xe_display_pm_suspend(xe);
135 
136 	/* FIXME: Super racey... */
137 	err = xe_bo_evict_all(xe);
138 	if (err)
139 		goto err_display;
140 
141 	for_each_gt(gt, xe, id) {
142 		err = xe_gt_suspend(gt);
143 		if (err)
144 			goto err_display;
145 	}
146 
147 	xe_irq_suspend(xe);
148 
149 	xe_display_pm_suspend_late(xe);
150 
151 	xe_i2c_pm_suspend(xe);
152 
153 	drm_dbg(&xe->drm, "Device suspended\n");
154 	return 0;
155 
156 err_display:
157 	xe_display_pm_resume(xe);
158 	xe_pxp_pm_resume(xe->pxp);
159 err:
160 	drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
161 	return err;
162 }
163 
164 /**
165  * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0
166  * @xe: xe device instance
167  *
168  * Return: 0 on success
169  */
170 int xe_pm_resume(struct xe_device *xe)
171 {
172 	struct xe_tile *tile;
173 	struct xe_gt *gt;
174 	u8 id;
175 	int err;
176 
177 	drm_dbg(&xe->drm, "Resuming device\n");
178 	trace_xe_pm_resume(xe, __builtin_return_address(0));
179 
180 	for_each_gt(gt, xe, id)
181 		xe_gt_idle_disable_c6(gt);
182 
183 	for_each_tile(tile, xe, id)
184 		xe_wa_apply_tile_workarounds(tile);
185 
186 	err = xe_pcode_ready(xe, true);
187 	if (err)
188 		return err;
189 
190 	xe_display_pm_resume_early(xe);
191 
192 	/*
193 	 * This only restores pinned memory which is the memory required for the
194 	 * GT(s) to resume.
195 	 */
196 	err = xe_bo_restore_early(xe);
197 	if (err)
198 		goto err;
199 
200 	xe_i2c_pm_resume(xe, xe->d3cold.allowed);
201 
202 	xe_irq_resume(xe);
203 
204 	for_each_gt(gt, xe, id)
205 		xe_gt_resume(gt);
206 
207 	xe_display_pm_resume(xe);
208 
209 	err = xe_bo_restore_late(xe);
210 	if (err)
211 		goto err;
212 
213 	xe_pxp_pm_resume(xe->pxp);
214 
215 	if (IS_SRIOV_VF(xe))
216 		xe_sriov_vf_ccs_register_context(xe);
217 
218 	drm_dbg(&xe->drm, "Device resumed\n");
219 	return 0;
220 err:
221 	drm_dbg(&xe->drm, "Device resume failed %d\n", err);
222 	return err;
223 }
224 
225 static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
226 {
227 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
228 	struct pci_dev *root_pdev;
229 
230 	root_pdev = pcie_find_root_port(pdev);
231 	if (!root_pdev)
232 		return false;
233 
234 	/* D3Cold requires PME capability */
235 	if (!pci_pme_capable(root_pdev, PCI_D3cold)) {
236 		drm_dbg(&xe->drm, "d3cold: PME# not supported\n");
237 		return false;
238 	}
239 
240 	/* D3Cold requires _PR3 power resource */
241 	if (!pci_pr3_present(root_pdev)) {
242 		drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n");
243 		return false;
244 	}
245 
246 	return true;
247 }
248 
249 static void xe_pm_runtime_init(struct xe_device *xe)
250 {
251 	struct device *dev = xe->drm.dev;
252 
253 	/* Our current VFs do not support RPM. so, disable it */
254 	if (IS_SRIOV_VF(xe))
255 		return;
256 
257 	/*
258 	 * Disable the system suspend direct complete optimization.
259 	 * We need to ensure that the regular device suspend/resume functions
260 	 * are called since our runtime_pm cannot guarantee local memory
261 	 * eviction for d3cold.
262 	 * TODO: Check HDA audio dependencies claimed by i915, and then enforce
263 	 *       this option to integrated graphics as well.
264 	 */
265 	if (IS_DGFX(xe))
266 		dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
267 
268 	pm_runtime_use_autosuspend(dev);
269 	pm_runtime_set_autosuspend_delay(dev, 1000);
270 	pm_runtime_set_active(dev);
271 	pm_runtime_allow(dev);
272 	pm_runtime_mark_last_busy(dev);
273 	pm_runtime_put(dev);
274 }
275 
276 int xe_pm_init_early(struct xe_device *xe)
277 {
278 	int err;
279 
280 	INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list);
281 
282 	err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
283 	if (err)
284 		return err;
285 
286 	err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
287 	if (err)
288 		return err;
289 
290 	xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
291 	return 0;
292 }
293 ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
294 
295 static u32 vram_threshold_value(struct xe_device *xe)
296 {
297 	/* FIXME: D3Cold temporarily disabled by default on BMG */
298 	if (xe->info.platform == XE_BATTLEMAGE)
299 		return 0;
300 
301 	return DEFAULT_VRAM_THRESHOLD;
302 }
303 
304 static int xe_pm_notifier_callback(struct notifier_block *nb,
305 				   unsigned long action, void *data)
306 {
307 	struct xe_device *xe = container_of(nb, struct xe_device, pm_notifier);
308 	int err = 0;
309 
310 	switch (action) {
311 	case PM_HIBERNATION_PREPARE:
312 	case PM_SUSPEND_PREPARE:
313 		xe_pm_runtime_get(xe);
314 		err = xe_bo_evict_all_user(xe);
315 		if (err) {
316 			drm_dbg(&xe->drm, "Notifier evict user failed (%d)\n", err);
317 			xe_pm_runtime_put(xe);
318 			break;
319 		}
320 
321 		err = xe_bo_notifier_prepare_all_pinned(xe);
322 		if (err) {
323 			drm_dbg(&xe->drm, "Notifier prepare pin failed (%d)\n", err);
324 			xe_pm_runtime_put(xe);
325 		}
326 		break;
327 	case PM_POST_HIBERNATION:
328 	case PM_POST_SUSPEND:
329 		xe_bo_notifier_unprepare_all_pinned(xe);
330 		xe_pm_runtime_put(xe);
331 		break;
332 	}
333 
334 	if (err)
335 		return NOTIFY_BAD;
336 
337 	return NOTIFY_DONE;
338 }
339 
340 /**
341  * xe_pm_init - Initialize Xe Power Management
342  * @xe: xe device instance
343  *
344  * This component is responsible for System and Device sleep states.
345  *
346  * Returns 0 for success, negative error code otherwise.
347  */
348 int xe_pm_init(struct xe_device *xe)
349 {
350 	u32 vram_threshold;
351 	int err;
352 
353 	xe->pm_notifier.notifier_call = xe_pm_notifier_callback;
354 	err = register_pm_notifier(&xe->pm_notifier);
355 	if (err)
356 		return err;
357 
358 	/* For now suspend/resume is only allowed with GuC */
359 	if (!xe_device_uc_enabled(xe))
360 		return 0;
361 
362 	if (xe->d3cold.capable) {
363 		vram_threshold = vram_threshold_value(xe);
364 		err = xe_pm_set_vram_threshold(xe, vram_threshold);
365 		if (err)
366 			goto err_unregister;
367 	}
368 
369 	xe_pm_runtime_init(xe);
370 	return 0;
371 
372 err_unregister:
373 	unregister_pm_notifier(&xe->pm_notifier);
374 	return err;
375 }
376 
377 static void xe_pm_runtime_fini(struct xe_device *xe)
378 {
379 	struct device *dev = xe->drm.dev;
380 
381 	/* Our current VFs do not support RPM. so, disable it */
382 	if (IS_SRIOV_VF(xe))
383 		return;
384 
385 	pm_runtime_get_sync(dev);
386 	pm_runtime_forbid(dev);
387 }
388 
389 /**
390  * xe_pm_fini - Finalize PM
391  * @xe: xe device instance
392  */
393 void xe_pm_fini(struct xe_device *xe)
394 {
395 	if (xe_device_uc_enabled(xe))
396 		xe_pm_runtime_fini(xe);
397 
398 	unregister_pm_notifier(&xe->pm_notifier);
399 }
400 
401 static void xe_pm_write_callback_task(struct xe_device *xe,
402 				      struct task_struct *task)
403 {
404 	WRITE_ONCE(xe->pm_callback_task, task);
405 
406 	/*
407 	 * Just in case it's somehow possible for our writes to be reordered to
408 	 * the extent that something else re-uses the task written in
409 	 * pm_callback_task. For example after returning from the callback, but
410 	 * before the reordered write that resets pm_callback_task back to NULL.
411 	 */
412 	smp_mb(); /* pairs with xe_pm_read_callback_task */
413 }
414 
415 struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
416 {
417 	smp_mb(); /* pairs with xe_pm_write_callback_task */
418 
419 	return READ_ONCE(xe->pm_callback_task);
420 }
421 
422 /**
423  * xe_pm_runtime_suspended - Check if runtime_pm state is suspended
424  * @xe: xe device instance
425  *
426  * This does not provide any guarantee that the device is going to remain
427  * suspended as it might be racing with the runtime state transitions.
428  * It can be used only as a non-reliable assertion, to ensure that we are not in
429  * the sleep state while trying to access some memory for instance.
430  *
431  * Returns true if PCI device is suspended, false otherwise.
432  */
433 bool xe_pm_runtime_suspended(struct xe_device *xe)
434 {
435 	return pm_runtime_suspended(xe->drm.dev);
436 }
437 
438 /**
439  * xe_pm_runtime_suspend - Prepare our device for D3hot/D3Cold
440  * @xe: xe device instance
441  *
442  * Returns 0 for success, negative error code otherwise.
443  */
444 int xe_pm_runtime_suspend(struct xe_device *xe)
445 {
446 	struct xe_bo *bo, *on;
447 	struct xe_gt *gt;
448 	u8 id;
449 	int err = 0;
450 
451 	trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0));
452 	/* Disable access_ongoing asserts and prevent recursive pm calls */
453 	xe_pm_write_callback_task(xe, current);
454 
455 	/*
456 	 * The actual xe_pm_runtime_put() is always async underneath, so
457 	 * exactly where that is called should makes no difference to us. However
458 	 * we still need to be very careful with the locks that this callback
459 	 * acquires and the locks that are acquired and held by any callers of
460 	 * xe_runtime_pm_get(). We already have the matching annotation
461 	 * on that side, but we also need it here. For example lockdep should be
462 	 * able to tell us if the following scenario is in theory possible:
463 	 *
464 	 * CPU0                          | CPU1 (kworker)
465 	 * lock(A)                       |
466 	 *                               | xe_pm_runtime_suspend()
467 	 *                               |      lock(A)
468 	 * xe_pm_runtime_get()           |
469 	 *
470 	 * This will clearly deadlock since rpm core needs to wait for
471 	 * xe_pm_runtime_suspend() to complete, but here we are holding lock(A)
472 	 * on CPU0 which prevents CPU1 making forward progress.  With the
473 	 * annotation here and in xe_pm_runtime_get() lockdep will see
474 	 * the potential lock inversion and give us a nice splat.
475 	 */
476 	xe_rpm_lockmap_acquire(xe);
477 
478 	err = xe_pxp_pm_suspend(xe->pxp);
479 	if (err)
480 		goto out;
481 
482 	/*
483 	 * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
484 	 * also checks and deletes bo entry from user fault list.
485 	 */
486 	mutex_lock(&xe->mem_access.vram_userfault.lock);
487 	list_for_each_entry_safe(bo, on,
488 				 &xe->mem_access.vram_userfault.list, vram_userfault_link)
489 		xe_bo_runtime_pm_release_mmap_offset(bo);
490 	mutex_unlock(&xe->mem_access.vram_userfault.lock);
491 
492 	xe_display_pm_runtime_suspend(xe);
493 
494 	if (xe->d3cold.allowed) {
495 		err = xe_bo_evict_all(xe);
496 		if (err)
497 			goto out_resume;
498 	}
499 
500 	for_each_gt(gt, xe, id) {
501 		err = xe_gt_suspend(gt);
502 		if (err)
503 			goto out_resume;
504 	}
505 
506 	xe_irq_suspend(xe);
507 
508 	xe_display_pm_runtime_suspend_late(xe);
509 
510 	xe_i2c_pm_suspend(xe);
511 
512 	xe_rpm_lockmap_release(xe);
513 	xe_pm_write_callback_task(xe, NULL);
514 	return 0;
515 
516 out_resume:
517 	xe_display_pm_runtime_resume(xe);
518 	xe_pxp_pm_resume(xe->pxp);
519 out:
520 	xe_rpm_lockmap_release(xe);
521 	xe_pm_write_callback_task(xe, NULL);
522 	return err;
523 }
524 
525 /**
526  * xe_pm_runtime_resume - Waking up from D3hot/D3Cold
527  * @xe: xe device instance
528  *
529  * Returns 0 for success, negative error code otherwise.
530  */
531 int xe_pm_runtime_resume(struct xe_device *xe)
532 {
533 	struct xe_gt *gt;
534 	u8 id;
535 	int err = 0;
536 
537 	trace_xe_pm_runtime_resume(xe, __builtin_return_address(0));
538 	/* Disable access_ongoing asserts and prevent recursive pm calls */
539 	xe_pm_write_callback_task(xe, current);
540 
541 	xe_rpm_lockmap_acquire(xe);
542 
543 	for_each_gt(gt, xe, id)
544 		xe_gt_idle_disable_c6(gt);
545 
546 	if (xe->d3cold.allowed) {
547 		err = xe_pcode_ready(xe, true);
548 		if (err)
549 			goto out;
550 
551 		xe_display_pm_resume_early(xe);
552 
553 		/*
554 		 * This only restores pinned memory which is the memory
555 		 * required for the GT(s) to resume.
556 		 */
557 		err = xe_bo_restore_early(xe);
558 		if (err)
559 			goto out;
560 	}
561 
562 	xe_i2c_pm_resume(xe, xe->d3cold.allowed);
563 
564 	xe_irq_resume(xe);
565 
566 	for_each_gt(gt, xe, id)
567 		xe_gt_resume(gt);
568 
569 	xe_display_pm_runtime_resume(xe);
570 
571 	if (xe->d3cold.allowed) {
572 		err = xe_bo_restore_late(xe);
573 		if (err)
574 			goto out;
575 	}
576 
577 	xe_pxp_pm_resume(xe->pxp);
578 
579 	if (IS_SRIOV_VF(xe))
580 		xe_sriov_vf_ccs_register_context(xe);
581 
582 out:
583 	xe_rpm_lockmap_release(xe);
584 	xe_pm_write_callback_task(xe, NULL);
585 	return err;
586 }
587 
588 /*
589  * For places where resume is synchronous it can be quite easy to deadlock
590  * if we are not careful. Also in practice it might be quite timing
591  * sensitive to ever see the 0 -> 1 transition with the callers locks
592  * held, so deadlocks might exist but are hard for lockdep to ever see.
593  * With this in mind, help lockdep learn about the potentially scary
594  * stuff that can happen inside the runtime_resume callback by acquiring
595  * a dummy lock (it doesn't protect anything and gets compiled out on
596  * non-debug builds).  Lockdep then only needs to see the
597  * xe_pm_runtime_xxx_map -> runtime_resume callback once, and then can
598  * hopefully validate all the (callers_locks) -> xe_pm_runtime_xxx_map.
599  * For example if the (callers_locks) are ever grabbed in the
600  * runtime_resume callback, lockdep should give us a nice splat.
601  */
602 static void xe_rpm_might_enter_cb(const struct xe_device *xe)
603 {
604 	xe_rpm_lockmap_acquire(xe);
605 	xe_rpm_lockmap_release(xe);
606 }
607 
608 /*
609  * Prime the lockdep maps for known locking orders that need to
610  * be supported but that may not always occur on all systems.
611  */
612 static void xe_pm_runtime_lockdep_prime(void)
613 {
614 	struct dma_resv lockdep_resv;
615 
616 	dma_resv_init(&lockdep_resv);
617 	lock_map_acquire(&xe_pm_runtime_d3cold_map);
618 	/* D3Cold takes the dma_resv locks to evict bos */
619 	dma_resv_lock(&lockdep_resv, NULL);
620 	dma_resv_unlock(&lockdep_resv);
621 	lock_map_release(&xe_pm_runtime_d3cold_map);
622 
623 	/* Shrinkers might like to wake up the device under reclaim. */
624 	fs_reclaim_acquire(GFP_KERNEL);
625 	lock_map_acquire(&xe_pm_runtime_nod3cold_map);
626 	lock_map_release(&xe_pm_runtime_nod3cold_map);
627 	fs_reclaim_release(GFP_KERNEL);
628 }
629 
630 /**
631  * xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
632  * @xe: xe device instance
633  */
634 void xe_pm_runtime_get(struct xe_device *xe)
635 {
636 	trace_xe_pm_runtime_get(xe, __builtin_return_address(0));
637 	pm_runtime_get_noresume(xe->drm.dev);
638 
639 	if (xe_pm_read_callback_task(xe) == current)
640 		return;
641 
642 	xe_rpm_might_enter_cb(xe);
643 	pm_runtime_resume(xe->drm.dev);
644 }
645 
646 /**
647  * xe_pm_runtime_put - Put the runtime_pm reference back and mark as idle
648  * @xe: xe device instance
649  */
650 void xe_pm_runtime_put(struct xe_device *xe)
651 {
652 	trace_xe_pm_runtime_put(xe, __builtin_return_address(0));
653 	if (xe_pm_read_callback_task(xe) == current) {
654 		pm_runtime_put_noidle(xe->drm.dev);
655 	} else {
656 		pm_runtime_mark_last_busy(xe->drm.dev);
657 		pm_runtime_put(xe->drm.dev);
658 	}
659 }
660 
661 /**
662  * xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
663  * @xe: xe device instance
664  *
665  * Returns: Any number greater than or equal to 0 for success, negative error
666  * code otherwise.
667  */
668 int xe_pm_runtime_get_ioctl(struct xe_device *xe)
669 {
670 	trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0));
671 	if (WARN_ON(xe_pm_read_callback_task(xe) == current))
672 		return -ELOOP;
673 
674 	xe_rpm_might_enter_cb(xe);
675 	return pm_runtime_get_sync(xe->drm.dev);
676 }
677 
678 /**
679  * xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active
680  * @xe: xe device instance
681  *
682  * Return: True if device is awake (regardless the previous number of references)
683  * and a new reference was taken, false otherwise.
684  */
685 bool xe_pm_runtime_get_if_active(struct xe_device *xe)
686 {
687 	return pm_runtime_get_if_active(xe->drm.dev) > 0;
688 }
689 
690 /**
691  * xe_pm_runtime_get_if_in_use - Get a new reference if device is active with previous ref taken
692  * @xe: xe device instance
693  *
694  * Return: True if device is awake, a previous reference had been already taken,
695  * and a new reference was now taken, false otherwise.
696  */
697 bool xe_pm_runtime_get_if_in_use(struct xe_device *xe)
698 {
699 	if (xe_pm_read_callback_task(xe) == current) {
700 		/* The device is awake, grab the ref and move on */
701 		pm_runtime_get_noresume(xe->drm.dev);
702 		return true;
703 	}
704 
705 	return pm_runtime_get_if_in_use(xe->drm.dev) > 0;
706 }
707 
708 /*
709  * Very unreliable! Should only be used to suppress the false positive case
710  * in the missing outer rpm protection warning.
711  */
712 static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
713 {
714 #ifdef CONFIG_PM
715 	struct device *dev = xe->drm.dev;
716 
717 	return dev->power.runtime_status == RPM_SUSPENDING ||
718 		dev->power.runtime_status == RPM_RESUMING ||
719 		pm_suspend_in_progress();
720 #else
721 	return false;
722 #endif
723 }
724 
725 /**
726  * xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming
727  * @xe: xe device instance
728  *
729  * This function should be used in inner places where it is surely already
730  * protected by outer-bound callers of `xe_pm_runtime_get`.
731  * It will warn if not protected.
732  * The reference should be put back after this function regardless, since it
733  * will always bump the usage counter, regardless.
734  */
735 void xe_pm_runtime_get_noresume(struct xe_device *xe)
736 {
737 	bool ref;
738 
739 	ref = xe_pm_runtime_get_if_in_use(xe);
740 
741 	if (!ref) {
742 		pm_runtime_get_noresume(xe->drm.dev);
743 		drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe),
744 			 "Missing outer runtime PM protection\n");
745 	}
746 }
747 
748 /**
749  * xe_pm_runtime_resume_and_get - Resume, then get a runtime_pm ref if awake.
750  * @xe: xe device instance
751  *
752  * Returns: True if device is awake and the reference was taken, false otherwise.
753  */
754 bool xe_pm_runtime_resume_and_get(struct xe_device *xe)
755 {
756 	if (xe_pm_read_callback_task(xe) == current) {
757 		/* The device is awake, grab the ref and move on */
758 		pm_runtime_get_noresume(xe->drm.dev);
759 		return true;
760 	}
761 
762 	xe_rpm_might_enter_cb(xe);
763 	return pm_runtime_resume_and_get(xe->drm.dev) >= 0;
764 }
765 
766 /**
767  * xe_pm_assert_unbounded_bridge - Disable PM on unbounded pcie parent bridge
768  * @xe: xe device instance
769  */
770 void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
771 {
772 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
773 	struct pci_dev *bridge = pci_upstream_bridge(pdev);
774 
775 	if (!bridge)
776 		return;
777 
778 	if (!bridge->driver) {
779 		drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n");
780 		device_set_pm_not_required(&pdev->dev);
781 	}
782 }
783 
784 /**
785  * xe_pm_set_vram_threshold - Set a VRAM threshold for allowing/blocking D3Cold
786  * @xe: xe device instance
787  * @threshold: VRAM size in MiB for the D3cold threshold
788  *
789  * Return:
790  * * 0		- success
791  * * -EINVAL	- invalid argument
792  */
793 int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
794 {
795 	struct ttm_resource_manager *man;
796 	u32 vram_total_mb = 0;
797 	int i;
798 
799 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
800 		man = ttm_manager_type(&xe->ttm, i);
801 		if (man)
802 			vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024);
803 	}
804 
805 	drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb);
806 
807 	if (threshold > vram_total_mb)
808 		return -EINVAL;
809 
810 	mutex_lock(&xe->d3cold.lock);
811 	xe->d3cold.vram_threshold = threshold;
812 	mutex_unlock(&xe->d3cold.lock);
813 
814 	return 0;
815 }
816 
817 /**
818  * xe_pm_d3cold_allowed_toggle - Check conditions to toggle d3cold.allowed
819  * @xe: xe device instance
820  *
821  * To be called during runtime_pm idle callback.
822  * Check for all the D3Cold conditions ahead of runtime suspend.
823  */
824 void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
825 {
826 	struct ttm_resource_manager *man;
827 	u32 total_vram_used_mb = 0;
828 	u64 vram_used;
829 	int i;
830 
831 	if (!xe->d3cold.capable) {
832 		xe->d3cold.allowed = false;
833 		return;
834 	}
835 
836 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
837 		man = ttm_manager_type(&xe->ttm, i);
838 		if (man) {
839 			vram_used = ttm_resource_manager_usage(man);
840 			total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024);
841 		}
842 	}
843 
844 	mutex_lock(&xe->d3cold.lock);
845 
846 	if (total_vram_used_mb < xe->d3cold.vram_threshold)
847 		xe->d3cold.allowed = true;
848 	else
849 		xe->d3cold.allowed = false;
850 
851 	mutex_unlock(&xe->d3cold.lock);
852 }
853 
854 /**
855  * xe_pm_module_init() - Perform xe_pm specific module initialization.
856  *
857  * Return: 0 on success. Currently doesn't fail.
858  */
859 int __init xe_pm_module_init(void)
860 {
861 	xe_pm_runtime_lockdep_prime();
862 	return 0;
863 }
864