xref: /linux/drivers/gpu/drm/xe/xe_pm.c (revision 38fc73b8c7d692a099ddda37b700eeb330a03ff1)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_pm.h"
7 
8 #include <linux/fault-inject.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/suspend.h>
11 
12 #include <drm/drm_managed.h>
13 #include <drm/ttm/ttm_placement.h>
14 
15 #include "display/xe_display.h"
16 #include "xe_bo.h"
17 #include "xe_bo_evict.h"
18 #include "xe_device.h"
19 #include "xe_ggtt.h"
20 #include "xe_gt.h"
21 #include "xe_guc.h"
22 #include "xe_i2c.h"
23 #include "xe_irq.h"
24 #include "xe_pcode.h"
25 #include "xe_pxp.h"
26 #include "xe_sriov_vf_ccs.h"
27 #include "xe_trace.h"
28 #include "xe_wa.h"
29 
30 /**
31  * DOC: Xe Power Management
32  *
33  * Xe PM implements the main routines for both system level suspend states and
34  * for the opportunistic runtime suspend states.
35  *
36  * System Level Suspend (S-States) - In general this is OS initiated suspend
37  * driven by ACPI for achieving S0ix (a.k.a. S2idle, freeze), S3 (suspend to ram),
38  * S4 (disk). The main functions here are `xe_pm_suspend` and `xe_pm_resume`. They
39  * are the main point for the suspend to and resume from these states.
40  *
41  * PCI Device Suspend (D-States) - This is the opportunistic PCIe device low power
42  * state D3, controlled by the PCI subsystem and ACPI with the help from the
43  * runtime_pm infrastructure.
44  * PCI D3 is special and can mean D3hot, where Vcc power is on for keeping memory
45  * alive and quicker low latency resume or D3Cold where Vcc power is off for
46  * better power savings.
47  * The Vcc control of PCI hierarchy can only be controlled at the PCI root port
48  * level, while the device driver can be behind multiple bridges/switches and
49  * paired with other devices. For this reason, the PCI subsystem cannot perform
50  * the transition towards D3Cold. The lowest runtime PM possible from the PCI
51  * subsystem is D3hot. Then, if all these paired devices in the same root port
52  * are in D3hot, ACPI will assist here and run its own methods (_PR3 and _OFF)
53  * to perform the transition from D3hot to D3cold. Xe may disallow this
54  * transition by calling pci_d3cold_disable(root_pdev) before going to runtime
55  * suspend. It will be based on runtime conditions such as VRAM usage for a
56  * quick and low latency resume for instance.
57  *
58  * Runtime PM - This infrastructure provided by the Linux kernel allows the
59  * device drivers to indicate when the can be runtime suspended, so the device
60  * could be put at D3 (if supported), or allow deeper package sleep states
61  * (PC-states), and/or other low level power states. Xe PM component provides
62  * `xe_pm_runtime_suspend` and `xe_pm_runtime_resume` functions that PCI
63  * subsystem will call before transition to/from runtime suspend.
64  *
65  * Also, Xe PM provides get and put functions that Xe driver will use to
66  * indicate activity. In order to avoid locking complications with the memory
67  * management, whenever possible, these get and put functions needs to be called
68  * from the higher/outer levels.
69  * The main cases that need to be protected from the outer levels are: IOCTL,
70  * sysfs, debugfs, dma-buf sharing, GPU execution.
71  *
72  * This component is not responsible for GT idleness (RC6) nor GT frequency
73  * management (RPS).
74  */
75 
76 #ifdef CONFIG_LOCKDEP
77 static struct lockdep_map xe_pm_runtime_d3cold_map = {
78 	.name = "xe_rpm_d3cold_map"
79 };
80 
81 static struct lockdep_map xe_pm_runtime_nod3cold_map = {
82 	.name = "xe_rpm_nod3cold_map"
83 };
84 #endif
85 
86 /**
87  * xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context
88  * @xe: The xe device.
89  *
90  * Return: true if it is safe to runtime resume from reclaim context.
91  * false otherwise.
92  */
93 bool xe_rpm_reclaim_safe(const struct xe_device *xe)
94 {
95 	return !xe->d3cold.capable;
96 }
97 
98 static void xe_rpm_lockmap_acquire(const struct xe_device *xe)
99 {
100 	lock_map_acquire(xe_rpm_reclaim_safe(xe) ?
101 			 &xe_pm_runtime_nod3cold_map :
102 			 &xe_pm_runtime_d3cold_map);
103 }
104 
105 static void xe_rpm_lockmap_release(const struct xe_device *xe)
106 {
107 	lock_map_release(xe_rpm_reclaim_safe(xe) ?
108 			 &xe_pm_runtime_nod3cold_map :
109 			 &xe_pm_runtime_d3cold_map);
110 }
111 
112 /**
113  * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
114  * @xe: xe device instance
115  *
116  * Return: 0 on success
117  */
118 int xe_pm_suspend(struct xe_device *xe)
119 {
120 	struct xe_gt *gt;
121 	u8 id;
122 	int err;
123 
124 	drm_dbg(&xe->drm, "Suspending device\n");
125 	trace_xe_pm_suspend(xe, __builtin_return_address(0));
126 
127 	err = xe_pxp_pm_suspend(xe->pxp);
128 	if (err)
129 		goto err;
130 
131 	for_each_gt(gt, xe, id)
132 		xe_gt_suspend_prepare(gt);
133 
134 	xe_display_pm_suspend(xe);
135 
136 	/* FIXME: Super racey... */
137 	err = xe_bo_evict_all(xe);
138 	if (err)
139 		goto err_display;
140 
141 	for_each_gt(gt, xe, id) {
142 		err = xe_gt_suspend(gt);
143 		if (err)
144 			goto err_display;
145 	}
146 
147 	xe_irq_suspend(xe);
148 
149 	xe_display_pm_suspend_late(xe);
150 
151 	xe_i2c_pm_suspend(xe);
152 
153 	drm_dbg(&xe->drm, "Device suspended\n");
154 	return 0;
155 
156 err_display:
157 	xe_display_pm_resume(xe);
158 	xe_pxp_pm_resume(xe->pxp);
159 err:
160 	drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
161 	return err;
162 }
163 
164 /**
165  * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0
166  * @xe: xe device instance
167  *
168  * Return: 0 on success
169  */
170 int xe_pm_resume(struct xe_device *xe)
171 {
172 	struct xe_tile *tile;
173 	struct xe_gt *gt;
174 	u8 id;
175 	int err;
176 
177 	drm_dbg(&xe->drm, "Resuming device\n");
178 	trace_xe_pm_resume(xe, __builtin_return_address(0));
179 
180 	for_each_tile(tile, xe, id)
181 		xe_wa_apply_tile_workarounds(tile);
182 
183 	err = xe_pcode_ready(xe, true);
184 	if (err)
185 		return err;
186 
187 	xe_display_pm_resume_early(xe);
188 
189 	/*
190 	 * This only restores pinned memory which is the memory required for the
191 	 * GT(s) to resume.
192 	 */
193 	err = xe_bo_restore_early(xe);
194 	if (err)
195 		goto err;
196 
197 	xe_i2c_pm_resume(xe, xe->d3cold.allowed);
198 
199 	xe_irq_resume(xe);
200 
201 	for_each_gt(gt, xe, id)
202 		xe_gt_resume(gt);
203 
204 	xe_display_pm_resume(xe);
205 
206 	err = xe_bo_restore_late(xe);
207 	if (err)
208 		goto err;
209 
210 	xe_pxp_pm_resume(xe->pxp);
211 
212 	if (IS_SRIOV_VF(xe))
213 		xe_sriov_vf_ccs_register_context(xe);
214 
215 	drm_dbg(&xe->drm, "Device resumed\n");
216 	return 0;
217 err:
218 	drm_dbg(&xe->drm, "Device resume failed %d\n", err);
219 	return err;
220 }
221 
222 static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
223 {
224 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
225 	struct pci_dev *root_pdev;
226 
227 	root_pdev = pcie_find_root_port(pdev);
228 	if (!root_pdev)
229 		return false;
230 
231 	/* D3Cold requires PME capability */
232 	if (!pci_pme_capable(root_pdev, PCI_D3cold)) {
233 		drm_dbg(&xe->drm, "d3cold: PME# not supported\n");
234 		return false;
235 	}
236 
237 	/* D3Cold requires _PR3 power resource */
238 	if (!pci_pr3_present(root_pdev)) {
239 		drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n");
240 		return false;
241 	}
242 
243 	return true;
244 }
245 
246 static void xe_pm_runtime_init(struct xe_device *xe)
247 {
248 	struct device *dev = xe->drm.dev;
249 
250 	/* Our current VFs do not support RPM. so, disable it */
251 	if (IS_SRIOV_VF(xe))
252 		return;
253 
254 	/*
255 	 * Disable the system suspend direct complete optimization.
256 	 * We need to ensure that the regular device suspend/resume functions
257 	 * are called since our runtime_pm cannot guarantee local memory
258 	 * eviction for d3cold.
259 	 * TODO: Check HDA audio dependencies claimed by i915, and then enforce
260 	 *       this option to integrated graphics as well.
261 	 */
262 	if (IS_DGFX(xe))
263 		dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
264 
265 	pm_runtime_use_autosuspend(dev);
266 	pm_runtime_set_autosuspend_delay(dev, 1000);
267 	pm_runtime_set_active(dev);
268 	pm_runtime_allow(dev);
269 	pm_runtime_mark_last_busy(dev);
270 	pm_runtime_put(dev);
271 }
272 
273 int xe_pm_init_early(struct xe_device *xe)
274 {
275 	int err;
276 
277 	INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list);
278 
279 	err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
280 	if (err)
281 		return err;
282 
283 	err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
284 	if (err)
285 		return err;
286 
287 	xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
288 	return 0;
289 }
290 ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
291 
292 static u32 vram_threshold_value(struct xe_device *xe)
293 {
294 	/* FIXME: D3Cold temporarily disabled by default on BMG */
295 	if (xe->info.platform == XE_BATTLEMAGE)
296 		return 0;
297 
298 	return DEFAULT_VRAM_THRESHOLD;
299 }
300 
301 static int xe_pm_notifier_callback(struct notifier_block *nb,
302 				   unsigned long action, void *data)
303 {
304 	struct xe_device *xe = container_of(nb, struct xe_device, pm_notifier);
305 	int err = 0;
306 
307 	switch (action) {
308 	case PM_HIBERNATION_PREPARE:
309 	case PM_SUSPEND_PREPARE:
310 		xe_pm_runtime_get(xe);
311 		err = xe_bo_evict_all_user(xe);
312 		if (err) {
313 			drm_dbg(&xe->drm, "Notifier evict user failed (%d)\n", err);
314 			xe_pm_runtime_put(xe);
315 			break;
316 		}
317 
318 		err = xe_bo_notifier_prepare_all_pinned(xe);
319 		if (err) {
320 			drm_dbg(&xe->drm, "Notifier prepare pin failed (%d)\n", err);
321 			xe_pm_runtime_put(xe);
322 		}
323 		break;
324 	case PM_POST_HIBERNATION:
325 	case PM_POST_SUSPEND:
326 		xe_bo_notifier_unprepare_all_pinned(xe);
327 		xe_pm_runtime_put(xe);
328 		break;
329 	}
330 
331 	if (err)
332 		return NOTIFY_BAD;
333 
334 	return NOTIFY_DONE;
335 }
336 
337 /**
338  * xe_pm_init - Initialize Xe Power Management
339  * @xe: xe device instance
340  *
341  * This component is responsible for System and Device sleep states.
342  *
343  * Returns 0 for success, negative error code otherwise.
344  */
345 int xe_pm_init(struct xe_device *xe)
346 {
347 	u32 vram_threshold;
348 	int err;
349 
350 	xe->pm_notifier.notifier_call = xe_pm_notifier_callback;
351 	err = register_pm_notifier(&xe->pm_notifier);
352 	if (err)
353 		return err;
354 
355 	/* For now suspend/resume is only allowed with GuC */
356 	if (!xe_device_uc_enabled(xe))
357 		return 0;
358 
359 	if (xe->d3cold.capable) {
360 		vram_threshold = vram_threshold_value(xe);
361 		err = xe_pm_set_vram_threshold(xe, vram_threshold);
362 		if (err)
363 			goto err_unregister;
364 	}
365 
366 	xe_pm_runtime_init(xe);
367 	return 0;
368 
369 err_unregister:
370 	unregister_pm_notifier(&xe->pm_notifier);
371 	return err;
372 }
373 
374 static void xe_pm_runtime_fini(struct xe_device *xe)
375 {
376 	struct device *dev = xe->drm.dev;
377 
378 	/* Our current VFs do not support RPM. so, disable it */
379 	if (IS_SRIOV_VF(xe))
380 		return;
381 
382 	pm_runtime_get_sync(dev);
383 	pm_runtime_forbid(dev);
384 }
385 
386 /**
387  * xe_pm_fini - Finalize PM
388  * @xe: xe device instance
389  */
390 void xe_pm_fini(struct xe_device *xe)
391 {
392 	if (xe_device_uc_enabled(xe))
393 		xe_pm_runtime_fini(xe);
394 
395 	unregister_pm_notifier(&xe->pm_notifier);
396 }
397 
398 static void xe_pm_write_callback_task(struct xe_device *xe,
399 				      struct task_struct *task)
400 {
401 	WRITE_ONCE(xe->pm_callback_task, task);
402 
403 	/*
404 	 * Just in case it's somehow possible for our writes to be reordered to
405 	 * the extent that something else re-uses the task written in
406 	 * pm_callback_task. For example after returning from the callback, but
407 	 * before the reordered write that resets pm_callback_task back to NULL.
408 	 */
409 	smp_mb(); /* pairs with xe_pm_read_callback_task */
410 }
411 
412 struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
413 {
414 	smp_mb(); /* pairs with xe_pm_write_callback_task */
415 
416 	return READ_ONCE(xe->pm_callback_task);
417 }
418 
419 /**
420  * xe_pm_runtime_suspended - Check if runtime_pm state is suspended
421  * @xe: xe device instance
422  *
423  * This does not provide any guarantee that the device is going to remain
424  * suspended as it might be racing with the runtime state transitions.
425  * It can be used only as a non-reliable assertion, to ensure that we are not in
426  * the sleep state while trying to access some memory for instance.
427  *
428  * Returns true if PCI device is suspended, false otherwise.
429  */
430 bool xe_pm_runtime_suspended(struct xe_device *xe)
431 {
432 	return pm_runtime_suspended(xe->drm.dev);
433 }
434 
435 /**
436  * xe_pm_runtime_suspend - Prepare our device for D3hot/D3Cold
437  * @xe: xe device instance
438  *
439  * Returns 0 for success, negative error code otherwise.
440  */
441 int xe_pm_runtime_suspend(struct xe_device *xe)
442 {
443 	struct xe_bo *bo, *on;
444 	struct xe_gt *gt;
445 	u8 id;
446 	int err = 0;
447 
448 	trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0));
449 	/* Disable access_ongoing asserts and prevent recursive pm calls */
450 	xe_pm_write_callback_task(xe, current);
451 
452 	/*
453 	 * The actual xe_pm_runtime_put() is always async underneath, so
454 	 * exactly where that is called should makes no difference to us. However
455 	 * we still need to be very careful with the locks that this callback
456 	 * acquires and the locks that are acquired and held by any callers of
457 	 * xe_runtime_pm_get(). We already have the matching annotation
458 	 * on that side, but we also need it here. For example lockdep should be
459 	 * able to tell us if the following scenario is in theory possible:
460 	 *
461 	 * CPU0                          | CPU1 (kworker)
462 	 * lock(A)                       |
463 	 *                               | xe_pm_runtime_suspend()
464 	 *                               |      lock(A)
465 	 * xe_pm_runtime_get()           |
466 	 *
467 	 * This will clearly deadlock since rpm core needs to wait for
468 	 * xe_pm_runtime_suspend() to complete, but here we are holding lock(A)
469 	 * on CPU0 which prevents CPU1 making forward progress.  With the
470 	 * annotation here and in xe_pm_runtime_get() lockdep will see
471 	 * the potential lock inversion and give us a nice splat.
472 	 */
473 	xe_rpm_lockmap_acquire(xe);
474 
475 	err = xe_pxp_pm_suspend(xe->pxp);
476 	if (err)
477 		goto out;
478 
479 	/*
480 	 * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
481 	 * also checks and deletes bo entry from user fault list.
482 	 */
483 	mutex_lock(&xe->mem_access.vram_userfault.lock);
484 	list_for_each_entry_safe(bo, on,
485 				 &xe->mem_access.vram_userfault.list, vram_userfault_link)
486 		xe_bo_runtime_pm_release_mmap_offset(bo);
487 	mutex_unlock(&xe->mem_access.vram_userfault.lock);
488 
489 	xe_display_pm_runtime_suspend(xe);
490 
491 	if (xe->d3cold.allowed) {
492 		err = xe_bo_evict_all(xe);
493 		if (err)
494 			goto out_resume;
495 	}
496 
497 	for_each_gt(gt, xe, id) {
498 		err = xe_gt_suspend(gt);
499 		if (err)
500 			goto out_resume;
501 	}
502 
503 	xe_irq_suspend(xe);
504 
505 	xe_display_pm_runtime_suspend_late(xe);
506 
507 	xe_i2c_pm_suspend(xe);
508 
509 	xe_rpm_lockmap_release(xe);
510 	xe_pm_write_callback_task(xe, NULL);
511 	return 0;
512 
513 out_resume:
514 	xe_display_pm_runtime_resume(xe);
515 	xe_pxp_pm_resume(xe->pxp);
516 out:
517 	xe_rpm_lockmap_release(xe);
518 	xe_pm_write_callback_task(xe, NULL);
519 	return err;
520 }
521 
522 /**
523  * xe_pm_runtime_resume - Waking up from D3hot/D3Cold
524  * @xe: xe device instance
525  *
526  * Returns 0 for success, negative error code otherwise.
527  */
528 int xe_pm_runtime_resume(struct xe_device *xe)
529 {
530 	struct xe_gt *gt;
531 	u8 id;
532 	int err = 0;
533 
534 	trace_xe_pm_runtime_resume(xe, __builtin_return_address(0));
535 	/* Disable access_ongoing asserts and prevent recursive pm calls */
536 	xe_pm_write_callback_task(xe, current);
537 
538 	xe_rpm_lockmap_acquire(xe);
539 
540 	if (xe->d3cold.allowed) {
541 		err = xe_pcode_ready(xe, true);
542 		if (err)
543 			goto out;
544 
545 		xe_display_pm_resume_early(xe);
546 
547 		/*
548 		 * This only restores pinned memory which is the memory
549 		 * required for the GT(s) to resume.
550 		 */
551 		err = xe_bo_restore_early(xe);
552 		if (err)
553 			goto out;
554 	}
555 
556 	xe_i2c_pm_resume(xe, xe->d3cold.allowed);
557 
558 	xe_irq_resume(xe);
559 
560 	for_each_gt(gt, xe, id)
561 		xe_gt_resume(gt);
562 
563 	xe_display_pm_runtime_resume(xe);
564 
565 	if (xe->d3cold.allowed) {
566 		err = xe_bo_restore_late(xe);
567 		if (err)
568 			goto out;
569 	}
570 
571 	xe_pxp_pm_resume(xe->pxp);
572 
573 	if (IS_SRIOV_VF(xe))
574 		xe_sriov_vf_ccs_register_context(xe);
575 
576 out:
577 	xe_rpm_lockmap_release(xe);
578 	xe_pm_write_callback_task(xe, NULL);
579 	return err;
580 }
581 
582 /*
583  * For places where resume is synchronous it can be quite easy to deadlock
584  * if we are not careful. Also in practice it might be quite timing
585  * sensitive to ever see the 0 -> 1 transition with the callers locks
586  * held, so deadlocks might exist but are hard for lockdep to ever see.
587  * With this in mind, help lockdep learn about the potentially scary
588  * stuff that can happen inside the runtime_resume callback by acquiring
589  * a dummy lock (it doesn't protect anything and gets compiled out on
590  * non-debug builds).  Lockdep then only needs to see the
591  * xe_pm_runtime_xxx_map -> runtime_resume callback once, and then can
592  * hopefully validate all the (callers_locks) -> xe_pm_runtime_xxx_map.
593  * For example if the (callers_locks) are ever grabbed in the
594  * runtime_resume callback, lockdep should give us a nice splat.
595  */
596 static void xe_rpm_might_enter_cb(const struct xe_device *xe)
597 {
598 	xe_rpm_lockmap_acquire(xe);
599 	xe_rpm_lockmap_release(xe);
600 }
601 
602 /*
603  * Prime the lockdep maps for known locking orders that need to
604  * be supported but that may not always occur on all systems.
605  */
606 static void xe_pm_runtime_lockdep_prime(void)
607 {
608 	struct dma_resv lockdep_resv;
609 
610 	dma_resv_init(&lockdep_resv);
611 	lock_map_acquire(&xe_pm_runtime_d3cold_map);
612 	/* D3Cold takes the dma_resv locks to evict bos */
613 	dma_resv_lock(&lockdep_resv, NULL);
614 	dma_resv_unlock(&lockdep_resv);
615 	lock_map_release(&xe_pm_runtime_d3cold_map);
616 
617 	/* Shrinkers might like to wake up the device under reclaim. */
618 	fs_reclaim_acquire(GFP_KERNEL);
619 	lock_map_acquire(&xe_pm_runtime_nod3cold_map);
620 	lock_map_release(&xe_pm_runtime_nod3cold_map);
621 	fs_reclaim_release(GFP_KERNEL);
622 }
623 
624 /**
625  * xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
626  * @xe: xe device instance
627  */
628 void xe_pm_runtime_get(struct xe_device *xe)
629 {
630 	trace_xe_pm_runtime_get(xe, __builtin_return_address(0));
631 	pm_runtime_get_noresume(xe->drm.dev);
632 
633 	if (xe_pm_read_callback_task(xe) == current)
634 		return;
635 
636 	xe_rpm_might_enter_cb(xe);
637 	pm_runtime_resume(xe->drm.dev);
638 }
639 
640 /**
641  * xe_pm_runtime_put - Put the runtime_pm reference back and mark as idle
642  * @xe: xe device instance
643  */
644 void xe_pm_runtime_put(struct xe_device *xe)
645 {
646 	trace_xe_pm_runtime_put(xe, __builtin_return_address(0));
647 	if (xe_pm_read_callback_task(xe) == current) {
648 		pm_runtime_put_noidle(xe->drm.dev);
649 	} else {
650 		pm_runtime_mark_last_busy(xe->drm.dev);
651 		pm_runtime_put(xe->drm.dev);
652 	}
653 }
654 
655 /**
656  * xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
657  * @xe: xe device instance
658  *
659  * Returns: Any number greater than or equal to 0 for success, negative error
660  * code otherwise.
661  */
662 int xe_pm_runtime_get_ioctl(struct xe_device *xe)
663 {
664 	trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0));
665 	if (WARN_ON(xe_pm_read_callback_task(xe) == current))
666 		return -ELOOP;
667 
668 	xe_rpm_might_enter_cb(xe);
669 	return pm_runtime_get_sync(xe->drm.dev);
670 }
671 
672 /**
673  * xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active
674  * @xe: xe device instance
675  *
676  * Return: True if device is awake (regardless the previous number of references)
677  * and a new reference was taken, false otherwise.
678  */
679 bool xe_pm_runtime_get_if_active(struct xe_device *xe)
680 {
681 	return pm_runtime_get_if_active(xe->drm.dev) > 0;
682 }
683 
684 /**
685  * xe_pm_runtime_get_if_in_use - Get a new reference if device is active with previous ref taken
686  * @xe: xe device instance
687  *
688  * Return: True if device is awake, a previous reference had been already taken,
689  * and a new reference was now taken, false otherwise.
690  */
691 bool xe_pm_runtime_get_if_in_use(struct xe_device *xe)
692 {
693 	if (xe_pm_read_callback_task(xe) == current) {
694 		/* The device is awake, grab the ref and move on */
695 		pm_runtime_get_noresume(xe->drm.dev);
696 		return true;
697 	}
698 
699 	return pm_runtime_get_if_in_use(xe->drm.dev) > 0;
700 }
701 
702 /*
703  * Very unreliable! Should only be used to suppress the false positive case
704  * in the missing outer rpm protection warning.
705  */
706 static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
707 {
708 #ifdef CONFIG_PM
709 	struct device *dev = xe->drm.dev;
710 
711 	return dev->power.runtime_status == RPM_SUSPENDING ||
712 		dev->power.runtime_status == RPM_RESUMING ||
713 		pm_suspend_in_progress();
714 #else
715 	return false;
716 #endif
717 }
718 
719 /**
720  * xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming
721  * @xe: xe device instance
722  *
723  * This function should be used in inner places where it is surely already
724  * protected by outer-bound callers of `xe_pm_runtime_get`.
725  * It will warn if not protected.
726  * The reference should be put back after this function regardless, since it
727  * will always bump the usage counter, regardless.
728  */
729 void xe_pm_runtime_get_noresume(struct xe_device *xe)
730 {
731 	bool ref;
732 
733 	ref = xe_pm_runtime_get_if_in_use(xe);
734 
735 	if (!ref) {
736 		pm_runtime_get_noresume(xe->drm.dev);
737 		drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe),
738 			 "Missing outer runtime PM protection\n");
739 	}
740 }
741 
742 /**
743  * xe_pm_runtime_resume_and_get - Resume, then get a runtime_pm ref if awake.
744  * @xe: xe device instance
745  *
746  * Returns: True if device is awake and the reference was taken, false otherwise.
747  */
748 bool xe_pm_runtime_resume_and_get(struct xe_device *xe)
749 {
750 	if (xe_pm_read_callback_task(xe) == current) {
751 		/* The device is awake, grab the ref and move on */
752 		pm_runtime_get_noresume(xe->drm.dev);
753 		return true;
754 	}
755 
756 	xe_rpm_might_enter_cb(xe);
757 	return pm_runtime_resume_and_get(xe->drm.dev) >= 0;
758 }
759 
760 /**
761  * xe_pm_assert_unbounded_bridge - Disable PM on unbounded pcie parent bridge
762  * @xe: xe device instance
763  */
764 void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
765 {
766 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
767 	struct pci_dev *bridge = pci_upstream_bridge(pdev);
768 
769 	if (!bridge)
770 		return;
771 
772 	if (!bridge->driver) {
773 		drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n");
774 		device_set_pm_not_required(&pdev->dev);
775 	}
776 }
777 
778 /**
779  * xe_pm_set_vram_threshold - Set a VRAM threshold for allowing/blocking D3Cold
780  * @xe: xe device instance
781  * @threshold: VRAM size in MiB for the D3cold threshold
782  *
783  * Return:
784  * * 0		- success
785  * * -EINVAL	- invalid argument
786  */
787 int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
788 {
789 	struct ttm_resource_manager *man;
790 	u32 vram_total_mb = 0;
791 	int i;
792 
793 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
794 		man = ttm_manager_type(&xe->ttm, i);
795 		if (man)
796 			vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024);
797 	}
798 
799 	drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb);
800 
801 	if (threshold > vram_total_mb)
802 		return -EINVAL;
803 
804 	mutex_lock(&xe->d3cold.lock);
805 	xe->d3cold.vram_threshold = threshold;
806 	mutex_unlock(&xe->d3cold.lock);
807 
808 	return 0;
809 }
810 
811 /**
812  * xe_pm_d3cold_allowed_toggle - Check conditions to toggle d3cold.allowed
813  * @xe: xe device instance
814  *
815  * To be called during runtime_pm idle callback.
816  * Check for all the D3Cold conditions ahead of runtime suspend.
817  */
818 void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
819 {
820 	struct ttm_resource_manager *man;
821 	u32 total_vram_used_mb = 0;
822 	u64 vram_used;
823 	int i;
824 
825 	if (!xe->d3cold.capable) {
826 		xe->d3cold.allowed = false;
827 		return;
828 	}
829 
830 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
831 		man = ttm_manager_type(&xe->ttm, i);
832 		if (man) {
833 			vram_used = ttm_resource_manager_usage(man);
834 			total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024);
835 		}
836 	}
837 
838 	mutex_lock(&xe->d3cold.lock);
839 
840 	if (total_vram_used_mb < xe->d3cold.vram_threshold)
841 		xe->d3cold.allowed = true;
842 	else
843 		xe->d3cold.allowed = false;
844 
845 	mutex_unlock(&xe->d3cold.lock);
846 }
847 
848 /**
849  * xe_pm_module_init() - Perform xe_pm specific module initialization.
850  *
851  * Return: 0 on success. Currently doesn't fail.
852  */
853 int __init xe_pm_module_init(void)
854 {
855 	xe_pm_runtime_lockdep_prime();
856 	return 0;
857 }
858