xref: /linux/drivers/gpu/drm/xe/xe_pm.c (revision fb7399cf2d0b33825b8039f95c45395c7deba25c)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_pm.h"
7 
8 #include <linux/fault-inject.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/suspend.h>
11 
12 #include <drm/drm_managed.h>
13 #include <drm/ttm/ttm_placement.h>
14 
15 #include "display/xe_display.h"
16 #include "xe_bo.h"
17 #include "xe_bo_evict.h"
18 #include "xe_device.h"
19 #include "xe_ggtt.h"
20 #include "xe_gt.h"
21 #include "xe_guc.h"
22 #include "xe_irq.h"
23 #include "xe_pcode.h"
24 #include "xe_pxp.h"
25 #include "xe_trace.h"
26 #include "xe_wa.h"
27 
28 /**
29  * DOC: Xe Power Management
30  *
31  * Xe PM implements the main routines for both system level suspend states and
32  * for the opportunistic runtime suspend states.
33  *
34  * System Level Suspend (S-States) - In general this is OS initiated suspend
35  * driven by ACPI for achieving S0ix (a.k.a. S2idle, freeze), S3 (suspend to ram),
36  * S4 (disk). The main functions here are `xe_pm_suspend` and `xe_pm_resume`. They
37  * are the main point for the suspend to and resume from these states.
38  *
39  * PCI Device Suspend (D-States) - This is the opportunistic PCIe device low power
40  * state D3, controlled by the PCI subsystem and ACPI with the help from the
41  * runtime_pm infrastructure.
42  * PCI D3 is special and can mean D3hot, where Vcc power is on for keeping memory
43  * alive and quicker low latency resume or D3Cold where Vcc power is off for
44  * better power savings.
45  * The Vcc control of PCI hierarchy can only be controlled at the PCI root port
46  * level, while the device driver can be behind multiple bridges/switches and
47  * paired with other devices. For this reason, the PCI subsystem cannot perform
48  * the transition towards D3Cold. The lowest runtime PM possible from the PCI
49  * subsystem is D3hot. Then, if all these paired devices in the same root port
50  * are in D3hot, ACPI will assist here and run its own methods (_PR3 and _OFF)
51  * to perform the transition from D3hot to D3cold. Xe may disallow this
52  * transition by calling pci_d3cold_disable(root_pdev) before going to runtime
53  * suspend. It will be based on runtime conditions such as VRAM usage for a
54  * quick and low latency resume for instance.
55  *
56  * Runtime PM - This infrastructure provided by the Linux kernel allows the
57  * device drivers to indicate when the can be runtime suspended, so the device
58  * could be put at D3 (if supported), or allow deeper package sleep states
59  * (PC-states), and/or other low level power states. Xe PM component provides
60  * `xe_pm_runtime_suspend` and `xe_pm_runtime_resume` functions that PCI
61  * subsystem will call before transition to/from runtime suspend.
62  *
63  * Also, Xe PM provides get and put functions that Xe driver will use to
64  * indicate activity. In order to avoid locking complications with the memory
65  * management, whenever possible, these get and put functions needs to be called
66  * from the higher/outer levels.
67  * The main cases that need to be protected from the outer levels are: IOCTL,
68  * sysfs, debugfs, dma-buf sharing, GPU execution.
69  *
70  * This component is not responsible for GT idleness (RC6) nor GT frequency
71  * management (RPS).
72  */
73 
74 #ifdef CONFIG_LOCKDEP
75 static struct lockdep_map xe_pm_runtime_d3cold_map = {
76 	.name = "xe_rpm_d3cold_map"
77 };
78 
79 static struct lockdep_map xe_pm_runtime_nod3cold_map = {
80 	.name = "xe_rpm_nod3cold_map"
81 };
82 #endif
83 
84 /**
85  * xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context
86  * @xe: The xe device.
87  *
88  * Return: true if it is safe to runtime resume from reclaim context.
89  * false otherwise.
90  */
91 bool xe_rpm_reclaim_safe(const struct xe_device *xe)
92 {
93 	return !xe->d3cold.capable;
94 }
95 
96 static void xe_rpm_lockmap_acquire(const struct xe_device *xe)
97 {
98 	lock_map_acquire(xe_rpm_reclaim_safe(xe) ?
99 			 &xe_pm_runtime_nod3cold_map :
100 			 &xe_pm_runtime_d3cold_map);
101 }
102 
103 static void xe_rpm_lockmap_release(const struct xe_device *xe)
104 {
105 	lock_map_release(xe_rpm_reclaim_safe(xe) ?
106 			 &xe_pm_runtime_nod3cold_map :
107 			 &xe_pm_runtime_d3cold_map);
108 }
109 
110 /**
111  * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
112  * @xe: xe device instance
113  *
114  * Return: 0 on success
115  */
116 int xe_pm_suspend(struct xe_device *xe)
117 {
118 	struct xe_gt *gt;
119 	u8 id;
120 	int err;
121 
122 	drm_dbg(&xe->drm, "Suspending device\n");
123 	trace_xe_pm_suspend(xe, __builtin_return_address(0));
124 
125 	err = xe_pxp_pm_suspend(xe->pxp);
126 	if (err)
127 		goto err;
128 
129 	for_each_gt(gt, xe, id)
130 		xe_gt_suspend_prepare(gt);
131 
132 	xe_display_pm_suspend(xe);
133 
134 	/* FIXME: Super racey... */
135 	err = xe_bo_evict_all(xe);
136 	if (err)
137 		goto err_pxp;
138 
139 	for_each_gt(gt, xe, id) {
140 		err = xe_gt_suspend(gt);
141 		if (err)
142 			goto err_display;
143 	}
144 
145 	xe_irq_suspend(xe);
146 
147 	xe_display_pm_suspend_late(xe);
148 
149 	drm_dbg(&xe->drm, "Device suspended\n");
150 	return 0;
151 
152 err_display:
153 	xe_display_pm_resume(xe);
154 err_pxp:
155 	xe_pxp_pm_resume(xe->pxp);
156 err:
157 	drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
158 	return err;
159 }
160 
161 /**
162  * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0
163  * @xe: xe device instance
164  *
165  * Return: 0 on success
166  */
167 int xe_pm_resume(struct xe_device *xe)
168 {
169 	struct xe_tile *tile;
170 	struct xe_gt *gt;
171 	u8 id;
172 	int err;
173 
174 	drm_dbg(&xe->drm, "Resuming device\n");
175 	trace_xe_pm_resume(xe, __builtin_return_address(0));
176 
177 	for_each_tile(tile, xe, id)
178 		xe_wa_apply_tile_workarounds(tile);
179 
180 	err = xe_pcode_ready(xe, true);
181 	if (err)
182 		return err;
183 
184 	xe_display_pm_resume_early(xe);
185 
186 	/*
187 	 * This only restores pinned memory which is the memory required for the
188 	 * GT(s) to resume.
189 	 */
190 	err = xe_bo_restore_early(xe);
191 	if (err)
192 		goto err;
193 
194 	xe_irq_resume(xe);
195 
196 	for_each_gt(gt, xe, id)
197 		xe_gt_resume(gt);
198 
199 	xe_display_pm_resume(xe);
200 
201 	err = xe_bo_restore_late(xe);
202 	if (err)
203 		goto err;
204 
205 	xe_pxp_pm_resume(xe->pxp);
206 
207 	drm_dbg(&xe->drm, "Device resumed\n");
208 	return 0;
209 err:
210 	drm_dbg(&xe->drm, "Device resume failed %d\n", err);
211 	return err;
212 }
213 
214 static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
215 {
216 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
217 	struct pci_dev *root_pdev;
218 
219 	root_pdev = pcie_find_root_port(pdev);
220 	if (!root_pdev)
221 		return false;
222 
223 	/* D3Cold requires PME capability */
224 	if (!pci_pme_capable(root_pdev, PCI_D3cold)) {
225 		drm_dbg(&xe->drm, "d3cold: PME# not supported\n");
226 		return false;
227 	}
228 
229 	/* D3Cold requires _PR3 power resource */
230 	if (!pci_pr3_present(root_pdev)) {
231 		drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n");
232 		return false;
233 	}
234 
235 	return true;
236 }
237 
238 static void xe_pm_runtime_init(struct xe_device *xe)
239 {
240 	struct device *dev = xe->drm.dev;
241 
242 	/*
243 	 * Disable the system suspend direct complete optimization.
244 	 * We need to ensure that the regular device suspend/resume functions
245 	 * are called since our runtime_pm cannot guarantee local memory
246 	 * eviction for d3cold.
247 	 * TODO: Check HDA audio dependencies claimed by i915, and then enforce
248 	 *       this option to integrated graphics as well.
249 	 */
250 	if (IS_DGFX(xe))
251 		dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
252 
253 	pm_runtime_use_autosuspend(dev);
254 	pm_runtime_set_autosuspend_delay(dev, 1000);
255 	pm_runtime_set_active(dev);
256 	pm_runtime_allow(dev);
257 	pm_runtime_mark_last_busy(dev);
258 	pm_runtime_put(dev);
259 }
260 
261 int xe_pm_init_early(struct xe_device *xe)
262 {
263 	int err;
264 
265 	INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list);
266 
267 	err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
268 	if (err)
269 		return err;
270 
271 	err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
272 	if (err)
273 		return err;
274 
275 	xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
276 	return 0;
277 }
278 ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
279 
280 static u32 vram_threshold_value(struct xe_device *xe)
281 {
282 	/* FIXME: D3Cold temporarily disabled by default on BMG */
283 	if (xe->info.platform == XE_BATTLEMAGE)
284 		return 0;
285 
286 	return DEFAULT_VRAM_THRESHOLD;
287 }
288 
289 static int xe_pm_notifier_callback(struct notifier_block *nb,
290 				   unsigned long action, void *data)
291 {
292 	struct xe_device *xe = container_of(nb, struct xe_device, pm_notifier);
293 	int err = 0;
294 
295 	switch (action) {
296 	case PM_HIBERNATION_PREPARE:
297 	case PM_SUSPEND_PREPARE:
298 		xe_pm_runtime_get(xe);
299 		err = xe_bo_evict_all_user(xe);
300 		if (err) {
301 			drm_dbg(&xe->drm, "Notifier evict user failed (%d)\n", err);
302 			xe_pm_runtime_put(xe);
303 			break;
304 		}
305 
306 		err = xe_bo_notifier_prepare_all_pinned(xe);
307 		if (err) {
308 			drm_dbg(&xe->drm, "Notifier prepare pin failed (%d)\n", err);
309 			xe_pm_runtime_put(xe);
310 		}
311 		break;
312 	case PM_POST_HIBERNATION:
313 	case PM_POST_SUSPEND:
314 		xe_bo_notifier_unprepare_all_pinned(xe);
315 		xe_pm_runtime_put(xe);
316 		break;
317 	}
318 
319 	if (err)
320 		return NOTIFY_BAD;
321 
322 	return NOTIFY_DONE;
323 }
324 
325 /**
326  * xe_pm_init - Initialize Xe Power Management
327  * @xe: xe device instance
328  *
329  * This component is responsible for System and Device sleep states.
330  *
331  * Returns 0 for success, negative error code otherwise.
332  */
333 int xe_pm_init(struct xe_device *xe)
334 {
335 	u32 vram_threshold;
336 	int err;
337 
338 	xe->pm_notifier.notifier_call = xe_pm_notifier_callback;
339 	err = register_pm_notifier(&xe->pm_notifier);
340 	if (err)
341 		return err;
342 
343 	/* For now suspend/resume is only allowed with GuC */
344 	if (!xe_device_uc_enabled(xe))
345 		return 0;
346 
347 	if (xe->d3cold.capable) {
348 		vram_threshold = vram_threshold_value(xe);
349 		err = xe_pm_set_vram_threshold(xe, vram_threshold);
350 		if (err)
351 			goto err_unregister;
352 	}
353 
354 	xe_pm_runtime_init(xe);
355 	return 0;
356 
357 err_unregister:
358 	unregister_pm_notifier(&xe->pm_notifier);
359 	return err;
360 }
361 
362 static void xe_pm_runtime_fini(struct xe_device *xe)
363 {
364 	struct device *dev = xe->drm.dev;
365 
366 	pm_runtime_get_sync(dev);
367 	pm_runtime_forbid(dev);
368 }
369 
370 /**
371  * xe_pm_fini - Finalize PM
372  * @xe: xe device instance
373  */
374 void xe_pm_fini(struct xe_device *xe)
375 {
376 	if (xe_device_uc_enabled(xe))
377 		xe_pm_runtime_fini(xe);
378 
379 	unregister_pm_notifier(&xe->pm_notifier);
380 }
381 
382 static void xe_pm_write_callback_task(struct xe_device *xe,
383 				      struct task_struct *task)
384 {
385 	WRITE_ONCE(xe->pm_callback_task, task);
386 
387 	/*
388 	 * Just in case it's somehow possible for our writes to be reordered to
389 	 * the extent that something else re-uses the task written in
390 	 * pm_callback_task. For example after returning from the callback, but
391 	 * before the reordered write that resets pm_callback_task back to NULL.
392 	 */
393 	smp_mb(); /* pairs with xe_pm_read_callback_task */
394 }
395 
396 struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
397 {
398 	smp_mb(); /* pairs with xe_pm_write_callback_task */
399 
400 	return READ_ONCE(xe->pm_callback_task);
401 }
402 
403 /**
404  * xe_pm_runtime_suspended - Check if runtime_pm state is suspended
405  * @xe: xe device instance
406  *
407  * This does not provide any guarantee that the device is going to remain
408  * suspended as it might be racing with the runtime state transitions.
409  * It can be used only as a non-reliable assertion, to ensure that we are not in
410  * the sleep state while trying to access some memory for instance.
411  *
412  * Returns true if PCI device is suspended, false otherwise.
413  */
414 bool xe_pm_runtime_suspended(struct xe_device *xe)
415 {
416 	return pm_runtime_suspended(xe->drm.dev);
417 }
418 
419 /**
420  * xe_pm_runtime_suspend - Prepare our device for D3hot/D3Cold
421  * @xe: xe device instance
422  *
423  * Returns 0 for success, negative error code otherwise.
424  */
425 int xe_pm_runtime_suspend(struct xe_device *xe)
426 {
427 	struct xe_bo *bo, *on;
428 	struct xe_gt *gt;
429 	u8 id;
430 	int err = 0;
431 
432 	trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0));
433 	/* Disable access_ongoing asserts and prevent recursive pm calls */
434 	xe_pm_write_callback_task(xe, current);
435 
436 	/*
437 	 * The actual xe_pm_runtime_put() is always async underneath, so
438 	 * exactly where that is called should makes no difference to us. However
439 	 * we still need to be very careful with the locks that this callback
440 	 * acquires and the locks that are acquired and held by any callers of
441 	 * xe_runtime_pm_get(). We already have the matching annotation
442 	 * on that side, but we also need it here. For example lockdep should be
443 	 * able to tell us if the following scenario is in theory possible:
444 	 *
445 	 * CPU0                          | CPU1 (kworker)
446 	 * lock(A)                       |
447 	 *                               | xe_pm_runtime_suspend()
448 	 *                               |      lock(A)
449 	 * xe_pm_runtime_get()           |
450 	 *
451 	 * This will clearly deadlock since rpm core needs to wait for
452 	 * xe_pm_runtime_suspend() to complete, but here we are holding lock(A)
453 	 * on CPU0 which prevents CPU1 making forward progress.  With the
454 	 * annotation here and in xe_pm_runtime_get() lockdep will see
455 	 * the potential lock inversion and give us a nice splat.
456 	 */
457 	xe_rpm_lockmap_acquire(xe);
458 
459 	err = xe_pxp_pm_suspend(xe->pxp);
460 	if (err)
461 		goto out;
462 
463 	/*
464 	 * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
465 	 * also checks and deletes bo entry from user fault list.
466 	 */
467 	mutex_lock(&xe->mem_access.vram_userfault.lock);
468 	list_for_each_entry_safe(bo, on,
469 				 &xe->mem_access.vram_userfault.list, vram_userfault_link)
470 		xe_bo_runtime_pm_release_mmap_offset(bo);
471 	mutex_unlock(&xe->mem_access.vram_userfault.lock);
472 
473 	xe_display_pm_runtime_suspend(xe);
474 
475 	if (xe->d3cold.allowed) {
476 		err = xe_bo_evict_all(xe);
477 		if (err)
478 			goto out_resume;
479 	}
480 
481 	for_each_gt(gt, xe, id) {
482 		err = xe_gt_suspend(gt);
483 		if (err)
484 			goto out_resume;
485 	}
486 
487 	xe_irq_suspend(xe);
488 
489 	xe_display_pm_runtime_suspend_late(xe);
490 
491 	xe_rpm_lockmap_release(xe);
492 	xe_pm_write_callback_task(xe, NULL);
493 	return 0;
494 
495 out_resume:
496 	xe_display_pm_runtime_resume(xe);
497 	xe_pxp_pm_resume(xe->pxp);
498 out:
499 	xe_rpm_lockmap_release(xe);
500 	xe_pm_write_callback_task(xe, NULL);
501 	return err;
502 }
503 
504 /**
505  * xe_pm_runtime_resume - Waking up from D3hot/D3Cold
506  * @xe: xe device instance
507  *
508  * Returns 0 for success, negative error code otherwise.
509  */
510 int xe_pm_runtime_resume(struct xe_device *xe)
511 {
512 	struct xe_gt *gt;
513 	u8 id;
514 	int err = 0;
515 
516 	trace_xe_pm_runtime_resume(xe, __builtin_return_address(0));
517 	/* Disable access_ongoing asserts and prevent recursive pm calls */
518 	xe_pm_write_callback_task(xe, current);
519 
520 	xe_rpm_lockmap_acquire(xe);
521 
522 	if (xe->d3cold.allowed) {
523 		err = xe_pcode_ready(xe, true);
524 		if (err)
525 			goto out;
526 
527 		xe_display_pm_resume_early(xe);
528 
529 		/*
530 		 * This only restores pinned memory which is the memory
531 		 * required for the GT(s) to resume.
532 		 */
533 		err = xe_bo_restore_early(xe);
534 		if (err)
535 			goto out;
536 	}
537 
538 	xe_irq_resume(xe);
539 
540 	for_each_gt(gt, xe, id)
541 		xe_gt_resume(gt);
542 
543 	xe_display_pm_runtime_resume(xe);
544 
545 	if (xe->d3cold.allowed) {
546 		err = xe_bo_restore_late(xe);
547 		if (err)
548 			goto out;
549 	}
550 
551 	xe_pxp_pm_resume(xe->pxp);
552 
553 out:
554 	xe_rpm_lockmap_release(xe);
555 	xe_pm_write_callback_task(xe, NULL);
556 	return err;
557 }
558 
559 /*
560  * For places where resume is synchronous it can be quite easy to deadlock
561  * if we are not careful. Also in practice it might be quite timing
562  * sensitive to ever see the 0 -> 1 transition with the callers locks
563  * held, so deadlocks might exist but are hard for lockdep to ever see.
564  * With this in mind, help lockdep learn about the potentially scary
565  * stuff that can happen inside the runtime_resume callback by acquiring
566  * a dummy lock (it doesn't protect anything and gets compiled out on
567  * non-debug builds).  Lockdep then only needs to see the
568  * xe_pm_runtime_xxx_map -> runtime_resume callback once, and then can
569  * hopefully validate all the (callers_locks) -> xe_pm_runtime_xxx_map.
570  * For example if the (callers_locks) are ever grabbed in the
571  * runtime_resume callback, lockdep should give us a nice splat.
572  */
573 static void xe_rpm_might_enter_cb(const struct xe_device *xe)
574 {
575 	xe_rpm_lockmap_acquire(xe);
576 	xe_rpm_lockmap_release(xe);
577 }
578 
579 /*
580  * Prime the lockdep maps for known locking orders that need to
581  * be supported but that may not always occur on all systems.
582  */
583 static void xe_pm_runtime_lockdep_prime(void)
584 {
585 	struct dma_resv lockdep_resv;
586 
587 	dma_resv_init(&lockdep_resv);
588 	lock_map_acquire(&xe_pm_runtime_d3cold_map);
589 	/* D3Cold takes the dma_resv locks to evict bos */
590 	dma_resv_lock(&lockdep_resv, NULL);
591 	dma_resv_unlock(&lockdep_resv);
592 	lock_map_release(&xe_pm_runtime_d3cold_map);
593 
594 	/* Shrinkers might like to wake up the device under reclaim. */
595 	fs_reclaim_acquire(GFP_KERNEL);
596 	lock_map_acquire(&xe_pm_runtime_nod3cold_map);
597 	lock_map_release(&xe_pm_runtime_nod3cold_map);
598 	fs_reclaim_release(GFP_KERNEL);
599 }
600 
601 /**
602  * xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
603  * @xe: xe device instance
604  */
605 void xe_pm_runtime_get(struct xe_device *xe)
606 {
607 	trace_xe_pm_runtime_get(xe, __builtin_return_address(0));
608 	pm_runtime_get_noresume(xe->drm.dev);
609 
610 	if (xe_pm_read_callback_task(xe) == current)
611 		return;
612 
613 	xe_rpm_might_enter_cb(xe);
614 	pm_runtime_resume(xe->drm.dev);
615 }
616 
617 /**
618  * xe_pm_runtime_put - Put the runtime_pm reference back and mark as idle
619  * @xe: xe device instance
620  */
621 void xe_pm_runtime_put(struct xe_device *xe)
622 {
623 	trace_xe_pm_runtime_put(xe, __builtin_return_address(0));
624 	if (xe_pm_read_callback_task(xe) == current) {
625 		pm_runtime_put_noidle(xe->drm.dev);
626 	} else {
627 		pm_runtime_mark_last_busy(xe->drm.dev);
628 		pm_runtime_put(xe->drm.dev);
629 	}
630 }
631 
632 /**
633  * xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
634  * @xe: xe device instance
635  *
636  * Returns: Any number greater than or equal to 0 for success, negative error
637  * code otherwise.
638  */
639 int xe_pm_runtime_get_ioctl(struct xe_device *xe)
640 {
641 	trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0));
642 	if (WARN_ON(xe_pm_read_callback_task(xe) == current))
643 		return -ELOOP;
644 
645 	xe_rpm_might_enter_cb(xe);
646 	return pm_runtime_get_sync(xe->drm.dev);
647 }
648 
649 /**
650  * xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active
651  * @xe: xe device instance
652  *
653  * Return: True if device is awake (regardless the previous number of references)
654  * and a new reference was taken, false otherwise.
655  */
656 bool xe_pm_runtime_get_if_active(struct xe_device *xe)
657 {
658 	return pm_runtime_get_if_active(xe->drm.dev) > 0;
659 }
660 
661 /**
662  * xe_pm_runtime_get_if_in_use - Get a new reference if device is active with previous ref taken
663  * @xe: xe device instance
664  *
665  * Return: True if device is awake, a previous reference had been already taken,
666  * and a new reference was now taken, false otherwise.
667  */
668 bool xe_pm_runtime_get_if_in_use(struct xe_device *xe)
669 {
670 	if (xe_pm_read_callback_task(xe) == current) {
671 		/* The device is awake, grab the ref and move on */
672 		pm_runtime_get_noresume(xe->drm.dev);
673 		return true;
674 	}
675 
676 	return pm_runtime_get_if_in_use(xe->drm.dev) > 0;
677 }
678 
679 /*
680  * Very unreliable! Should only be used to suppress the false positive case
681  * in the missing outer rpm protection warning.
682  */
683 static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
684 {
685 #ifdef CONFIG_PM
686 	struct device *dev = xe->drm.dev;
687 
688 	return dev->power.runtime_status == RPM_SUSPENDING ||
689 		dev->power.runtime_status == RPM_RESUMING ||
690 		pm_suspend_in_progress();
691 #else
692 	return false;
693 #endif
694 }
695 
696 /**
697  * xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming
698  * @xe: xe device instance
699  *
700  * This function should be used in inner places where it is surely already
701  * protected by outer-bound callers of `xe_pm_runtime_get`.
702  * It will warn if not protected.
703  * The reference should be put back after this function regardless, since it
704  * will always bump the usage counter, regardless.
705  */
706 void xe_pm_runtime_get_noresume(struct xe_device *xe)
707 {
708 	bool ref;
709 
710 	ref = xe_pm_runtime_get_if_in_use(xe);
711 
712 	if (!ref) {
713 		pm_runtime_get_noresume(xe->drm.dev);
714 		drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe),
715 			 "Missing outer runtime PM protection\n");
716 	}
717 }
718 
719 /**
720  * xe_pm_runtime_resume_and_get - Resume, then get a runtime_pm ref if awake.
721  * @xe: xe device instance
722  *
723  * Returns: True if device is awake and the reference was taken, false otherwise.
724  */
725 bool xe_pm_runtime_resume_and_get(struct xe_device *xe)
726 {
727 	if (xe_pm_read_callback_task(xe) == current) {
728 		/* The device is awake, grab the ref and move on */
729 		pm_runtime_get_noresume(xe->drm.dev);
730 		return true;
731 	}
732 
733 	xe_rpm_might_enter_cb(xe);
734 	return pm_runtime_resume_and_get(xe->drm.dev) >= 0;
735 }
736 
737 /**
738  * xe_pm_assert_unbounded_bridge - Disable PM on unbounded pcie parent bridge
739  * @xe: xe device instance
740  */
741 void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
742 {
743 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
744 	struct pci_dev *bridge = pci_upstream_bridge(pdev);
745 
746 	if (!bridge)
747 		return;
748 
749 	if (!bridge->driver) {
750 		drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n");
751 		device_set_pm_not_required(&pdev->dev);
752 	}
753 }
754 
755 /**
756  * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold
757  * @xe: xe device instance
758  * @threshold: VRAM size in bites for the D3cold threshold
759  *
760  * Returns 0 for success, negative error code otherwise.
761  */
762 int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
763 {
764 	struct ttm_resource_manager *man;
765 	u32 vram_total_mb = 0;
766 	int i;
767 
768 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
769 		man = ttm_manager_type(&xe->ttm, i);
770 		if (man)
771 			vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024);
772 	}
773 
774 	drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb);
775 
776 	if (threshold > vram_total_mb)
777 		return -EINVAL;
778 
779 	mutex_lock(&xe->d3cold.lock);
780 	xe->d3cold.vram_threshold = threshold;
781 	mutex_unlock(&xe->d3cold.lock);
782 
783 	return 0;
784 }
785 
786 /**
787  * xe_pm_d3cold_allowed_toggle - Check conditions to toggle d3cold.allowed
788  * @xe: xe device instance
789  *
790  * To be called during runtime_pm idle callback.
791  * Check for all the D3Cold conditions ahead of runtime suspend.
792  */
793 void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
794 {
795 	struct ttm_resource_manager *man;
796 	u32 total_vram_used_mb = 0;
797 	u64 vram_used;
798 	int i;
799 
800 	if (!xe->d3cold.capable) {
801 		xe->d3cold.allowed = false;
802 		return;
803 	}
804 
805 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
806 		man = ttm_manager_type(&xe->ttm, i);
807 		if (man) {
808 			vram_used = ttm_resource_manager_usage(man);
809 			total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024);
810 		}
811 	}
812 
813 	mutex_lock(&xe->d3cold.lock);
814 
815 	if (total_vram_used_mb < xe->d3cold.vram_threshold)
816 		xe->d3cold.allowed = true;
817 	else
818 		xe->d3cold.allowed = false;
819 
820 	mutex_unlock(&xe->d3cold.lock);
821 }
822 
823 /**
824  * xe_pm_module_init() - Perform xe_pm specific module initialization.
825  *
826  * Return: 0 on success. Currently doesn't fail.
827  */
828 int __init xe_pm_module_init(void)
829 {
830 	xe_pm_runtime_lockdep_prime();
831 	return 0;
832 }
833