xref: /linux/drivers/gpu/drm/xe/xe_pm.c (revision 2bb3fc536d692d43cd55396ecff73c7691eeae85)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_pm.h"
7 
8 #include <linux/pm_runtime.h>
9 
10 #include <drm/drm_managed.h>
11 #include <drm/ttm/ttm_placement.h>
12 
13 #include "display/xe_display.h"
14 #include "xe_bo.h"
15 #include "xe_bo_evict.h"
16 #include "xe_device.h"
17 #include "xe_device_sysfs.h"
18 #include "xe_ggtt.h"
19 #include "xe_gt.h"
20 #include "xe_guc.h"
21 #include "xe_irq.h"
22 #include "xe_pcode.h"
23 #include "xe_trace.h"
24 #include "xe_wa.h"
25 
26 /**
27  * DOC: Xe Power Management
28  *
29  * Xe PM implements the main routines for both system level suspend states and
30  * for the opportunistic runtime suspend states.
31  *
32  * System Level Suspend (S-States) - In general this is OS initiated suspend
33  * driven by ACPI for achieving S0ix (a.k.a. S2idle, freeze), S3 (suspend to ram),
34  * S4 (disk). The main functions here are `xe_pm_suspend` and `xe_pm_resume`. They
35  * are the main point for the suspend to and resume from these states.
36  *
37  * PCI Device Suspend (D-States) - This is the opportunistic PCIe device low power
38  * state D3, controlled by the PCI subsystem and ACPI with the help from the
39  * runtime_pm infrastructure.
40  * PCI D3 is special and can mean D3hot, where Vcc power is on for keeping memory
41  * alive and quicker low latency resume or D3Cold where Vcc power is off for
42  * better power savings.
43  * The Vcc control of PCI hierarchy can only be controlled at the PCI root port
44  * level, while the device driver can be behind multiple bridges/switches and
45  * paired with other devices. For this reason, the PCI subsystem cannot perform
46  * the transition towards D3Cold. The lowest runtime PM possible from the PCI
47  * subsystem is D3hot. Then, if all these paired devices in the same root port
48  * are in D3hot, ACPI will assist here and run its own methods (_PR3 and _OFF)
49  * to perform the transition from D3hot to D3cold. Xe may disallow this
50  * transition by calling pci_d3cold_disable(root_pdev) before going to runtime
51  * suspend. It will be based on runtime conditions such as VRAM usage for a
52  * quick and low latency resume for instance.
53  *
54  * Runtime PM - This infrastructure provided by the Linux kernel allows the
55  * device drivers to indicate when the can be runtime suspended, so the device
56  * could be put at D3 (if supported), or allow deeper package sleep states
57  * (PC-states), and/or other low level power states. Xe PM component provides
58  * `xe_pm_runtime_suspend` and `xe_pm_runtime_resume` functions that PCI
59  * subsystem will call before transition to/from runtime suspend.
60  *
61  * Also, Xe PM provides get and put functions that Xe driver will use to
62  * indicate activity. In order to avoid locking complications with the memory
63  * management, whenever possible, these get and put functions needs to be called
64  * from the higher/outer levels.
65  * The main cases that need to be protected from the outer levels are: IOCTL,
66  * sysfs, debugfs, dma-buf sharing, GPU execution.
67  *
68  * This component is not responsible for GT idleness (RC6) nor GT frequency
69  * management (RPS).
70  */
71 
72 #ifdef CONFIG_LOCKDEP
73 static struct lockdep_map xe_pm_runtime_d3cold_map = {
74 	.name = "xe_rpm_d3cold_map"
75 };
76 
77 static struct lockdep_map xe_pm_runtime_nod3cold_map = {
78 	.name = "xe_rpm_nod3cold_map"
79 };
80 #endif
81 
82 static bool __maybe_unused xe_rpm_reclaim_safe(const struct xe_device *xe)
83 {
84 	return !xe->d3cold.capable && !xe->info.has_sriov;
85 }
86 
87 static void xe_rpm_lockmap_acquire(const struct xe_device *xe)
88 {
89 	lock_map_acquire(xe_rpm_reclaim_safe(xe) ?
90 			 &xe_pm_runtime_nod3cold_map :
91 			 &xe_pm_runtime_d3cold_map);
92 }
93 
94 static void xe_rpm_lockmap_release(const struct xe_device *xe)
95 {
96 	lock_map_release(xe_rpm_reclaim_safe(xe) ?
97 			 &xe_pm_runtime_nod3cold_map :
98 			 &xe_pm_runtime_d3cold_map);
99 }
100 
101 /**
102  * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
103  * @xe: xe device instance
104  *
105  * Return: 0 on success
106  */
107 int xe_pm_suspend(struct xe_device *xe)
108 {
109 	struct xe_gt *gt;
110 	u8 id;
111 	int err;
112 
113 	drm_dbg(&xe->drm, "Suspending device\n");
114 	trace_xe_pm_suspend(xe, __builtin_return_address(0));
115 
116 	for_each_gt(gt, xe, id)
117 		xe_gt_suspend_prepare(gt);
118 
119 	xe_display_pm_suspend(xe, false);
120 
121 	/* FIXME: Super racey... */
122 	err = xe_bo_evict_all(xe);
123 	if (err)
124 		goto err;
125 
126 	for_each_gt(gt, xe, id) {
127 		err = xe_gt_suspend(gt);
128 		if (err) {
129 			xe_display_pm_resume(xe, false);
130 			goto err;
131 		}
132 	}
133 
134 	xe_irq_suspend(xe);
135 
136 	xe_display_pm_suspend_late(xe);
137 
138 	drm_dbg(&xe->drm, "Device suspended\n");
139 	return 0;
140 err:
141 	drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
142 	return err;
143 }
144 
145 /**
146  * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0
147  * @xe: xe device instance
148  *
149  * Return: 0 on success
150  */
151 int xe_pm_resume(struct xe_device *xe)
152 {
153 	struct xe_tile *tile;
154 	struct xe_gt *gt;
155 	u8 id;
156 	int err;
157 
158 	drm_dbg(&xe->drm, "Resuming device\n");
159 	trace_xe_pm_resume(xe, __builtin_return_address(0));
160 
161 	for_each_tile(tile, xe, id)
162 		xe_wa_apply_tile_workarounds(tile);
163 
164 	err = xe_pcode_ready(xe, true);
165 	if (err)
166 		return err;
167 
168 	xe_display_pm_resume_early(xe);
169 
170 	/*
171 	 * This only restores pinned memory which is the memory required for the
172 	 * GT(s) to resume.
173 	 */
174 	err = xe_bo_restore_kernel(xe);
175 	if (err)
176 		goto err;
177 
178 	xe_irq_resume(xe);
179 
180 	for_each_gt(gt, xe, id)
181 		xe_gt_resume(gt);
182 
183 	xe_display_pm_resume(xe, false);
184 
185 	err = xe_bo_restore_user(xe);
186 	if (err)
187 		goto err;
188 
189 	drm_dbg(&xe->drm, "Device resumed\n");
190 	return 0;
191 err:
192 	drm_dbg(&xe->drm, "Device resume failed %d\n", err);
193 	return err;
194 }
195 
196 static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
197 {
198 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
199 	struct pci_dev *root_pdev;
200 
201 	root_pdev = pcie_find_root_port(pdev);
202 	if (!root_pdev)
203 		return false;
204 
205 	/* D3Cold requires PME capability */
206 	if (!pci_pme_capable(root_pdev, PCI_D3cold)) {
207 		drm_dbg(&xe->drm, "d3cold: PME# not supported\n");
208 		return false;
209 	}
210 
211 	/* D3Cold requires _PR3 power resource */
212 	if (!pci_pr3_present(root_pdev)) {
213 		drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n");
214 		return false;
215 	}
216 
217 	return true;
218 }
219 
220 static void xe_pm_runtime_init(struct xe_device *xe)
221 {
222 	struct device *dev = xe->drm.dev;
223 
224 	/*
225 	 * Disable the system suspend direct complete optimization.
226 	 * We need to ensure that the regular device suspend/resume functions
227 	 * are called since our runtime_pm cannot guarantee local memory
228 	 * eviction for d3cold.
229 	 * TODO: Check HDA audio dependencies claimed by i915, and then enforce
230 	 *       this option to integrated graphics as well.
231 	 */
232 	if (IS_DGFX(xe))
233 		dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
234 
235 	pm_runtime_use_autosuspend(dev);
236 	pm_runtime_set_autosuspend_delay(dev, 1000);
237 	pm_runtime_set_active(dev);
238 	pm_runtime_allow(dev);
239 	pm_runtime_mark_last_busy(dev);
240 	pm_runtime_put(dev);
241 }
242 
243 int xe_pm_init_early(struct xe_device *xe)
244 {
245 	int err;
246 
247 	INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list);
248 
249 	err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
250 	if (err)
251 		return err;
252 
253 	err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
254 	if (err)
255 		return err;
256 
257 	return 0;
258 }
259 
260 /**
261  * xe_pm_init - Initialize Xe Power Management
262  * @xe: xe device instance
263  *
264  * This component is responsible for System and Device sleep states.
265  *
266  * Returns 0 for success, negative error code otherwise.
267  */
268 int xe_pm_init(struct xe_device *xe)
269 {
270 	int err;
271 
272 	/* For now suspend/resume is only allowed with GuC */
273 	if (!xe_device_uc_enabled(xe))
274 		return 0;
275 
276 	xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
277 
278 	if (xe->d3cold.capable) {
279 		err = xe_device_sysfs_init(xe);
280 		if (err)
281 			return err;
282 
283 		err = xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD);
284 		if (err)
285 			return err;
286 	}
287 
288 	xe_pm_runtime_init(xe);
289 
290 	return 0;
291 }
292 
293 /**
294  * xe_pm_runtime_fini - Finalize Runtime PM
295  * @xe: xe device instance
296  */
297 void xe_pm_runtime_fini(struct xe_device *xe)
298 {
299 	struct device *dev = xe->drm.dev;
300 
301 	pm_runtime_get_sync(dev);
302 	pm_runtime_forbid(dev);
303 }
304 
305 static void xe_pm_write_callback_task(struct xe_device *xe,
306 				      struct task_struct *task)
307 {
308 	WRITE_ONCE(xe->pm_callback_task, task);
309 
310 	/*
311 	 * Just in case it's somehow possible for our writes to be reordered to
312 	 * the extent that something else re-uses the task written in
313 	 * pm_callback_task. For example after returning from the callback, but
314 	 * before the reordered write that resets pm_callback_task back to NULL.
315 	 */
316 	smp_mb(); /* pairs with xe_pm_read_callback_task */
317 }
318 
319 struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
320 {
321 	smp_mb(); /* pairs with xe_pm_write_callback_task */
322 
323 	return READ_ONCE(xe->pm_callback_task);
324 }
325 
326 /**
327  * xe_pm_runtime_suspended - Check if runtime_pm state is suspended
328  * @xe: xe device instance
329  *
330  * This does not provide any guarantee that the device is going to remain
331  * suspended as it might be racing with the runtime state transitions.
332  * It can be used only as a non-reliable assertion, to ensure that we are not in
333  * the sleep state while trying to access some memory for instance.
334  *
335  * Returns true if PCI device is suspended, false otherwise.
336  */
337 bool xe_pm_runtime_suspended(struct xe_device *xe)
338 {
339 	return pm_runtime_suspended(xe->drm.dev);
340 }
341 
342 /**
343  * xe_pm_runtime_suspend - Prepare our device for D3hot/D3Cold
344  * @xe: xe device instance
345  *
346  * Returns 0 for success, negative error code otherwise.
347  */
348 int xe_pm_runtime_suspend(struct xe_device *xe)
349 {
350 	struct xe_bo *bo, *on;
351 	struct xe_gt *gt;
352 	u8 id;
353 	int err = 0;
354 
355 	trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0));
356 	/* Disable access_ongoing asserts and prevent recursive pm calls */
357 	xe_pm_write_callback_task(xe, current);
358 
359 	/*
360 	 * The actual xe_pm_runtime_put() is always async underneath, so
361 	 * exactly where that is called should makes no difference to us. However
362 	 * we still need to be very careful with the locks that this callback
363 	 * acquires and the locks that are acquired and held by any callers of
364 	 * xe_runtime_pm_get(). We already have the matching annotation
365 	 * on that side, but we also need it here. For example lockdep should be
366 	 * able to tell us if the following scenario is in theory possible:
367 	 *
368 	 * CPU0                          | CPU1 (kworker)
369 	 * lock(A)                       |
370 	 *                               | xe_pm_runtime_suspend()
371 	 *                               |      lock(A)
372 	 * xe_pm_runtime_get()           |
373 	 *
374 	 * This will clearly deadlock since rpm core needs to wait for
375 	 * xe_pm_runtime_suspend() to complete, but here we are holding lock(A)
376 	 * on CPU0 which prevents CPU1 making forward progress.  With the
377 	 * annotation here and in xe_pm_runtime_get() lockdep will see
378 	 * the potential lock inversion and give us a nice splat.
379 	 */
380 	xe_rpm_lockmap_acquire(xe);
381 
382 	/*
383 	 * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
384 	 * also checks and delets bo entry from user fault list.
385 	 */
386 	mutex_lock(&xe->mem_access.vram_userfault.lock);
387 	list_for_each_entry_safe(bo, on,
388 				 &xe->mem_access.vram_userfault.list, vram_userfault_link)
389 		xe_bo_runtime_pm_release_mmap_offset(bo);
390 	mutex_unlock(&xe->mem_access.vram_userfault.lock);
391 
392 	xe_display_pm_runtime_suspend(xe);
393 
394 	if (xe->d3cold.allowed) {
395 		xe_display_pm_suspend(xe, true);
396 
397 		err = xe_bo_evict_all(xe);
398 		if (err)
399 			goto out;
400 	}
401 
402 	for_each_gt(gt, xe, id) {
403 		err = xe_gt_suspend(gt);
404 		if (err)
405 			goto out;
406 	}
407 
408 	xe_irq_suspend(xe);
409 
410 	if (xe->d3cold.allowed)
411 		xe_display_pm_suspend_late(xe);
412 out:
413 	if (err)
414 		xe_display_pm_resume(xe, true);
415 	xe_rpm_lockmap_release(xe);
416 	xe_pm_write_callback_task(xe, NULL);
417 	return err;
418 }
419 
420 /**
421  * xe_pm_runtime_resume - Waking up from D3hot/D3Cold
422  * @xe: xe device instance
423  *
424  * Returns 0 for success, negative error code otherwise.
425  */
426 int xe_pm_runtime_resume(struct xe_device *xe)
427 {
428 	struct xe_gt *gt;
429 	u8 id;
430 	int err = 0;
431 
432 	trace_xe_pm_runtime_resume(xe, __builtin_return_address(0));
433 	/* Disable access_ongoing asserts and prevent recursive pm calls */
434 	xe_pm_write_callback_task(xe, current);
435 
436 	xe_rpm_lockmap_acquire(xe);
437 
438 	if (xe->d3cold.allowed) {
439 		err = xe_pcode_ready(xe, true);
440 		if (err)
441 			goto out;
442 
443 		xe_display_pm_resume_early(xe);
444 
445 		/*
446 		 * This only restores pinned memory which is the memory
447 		 * required for the GT(s) to resume.
448 		 */
449 		err = xe_bo_restore_kernel(xe);
450 		if (err)
451 			goto out;
452 	}
453 
454 	xe_irq_resume(xe);
455 
456 	for_each_gt(gt, xe, id)
457 		xe_gt_resume(gt);
458 
459 	xe_display_pm_runtime_resume(xe);
460 
461 	if (xe->d3cold.allowed) {
462 		err = xe_bo_restore_user(xe);
463 		if (err)
464 			goto out;
465 	}
466 
467 out:
468 	xe_rpm_lockmap_release(xe);
469 	xe_pm_write_callback_task(xe, NULL);
470 	return err;
471 }
472 
473 /*
474  * For places where resume is synchronous it can be quite easy to deadlock
475  * if we are not careful. Also in practice it might be quite timing
476  * sensitive to ever see the 0 -> 1 transition with the callers locks
477  * held, so deadlocks might exist but are hard for lockdep to ever see.
478  * With this in mind, help lockdep learn about the potentially scary
479  * stuff that can happen inside the runtime_resume callback by acquiring
480  * a dummy lock (it doesn't protect anything and gets compiled out on
481  * non-debug builds).  Lockdep then only needs to see the
482  * xe_pm_runtime_xxx_map -> runtime_resume callback once, and then can
483  * hopefully validate all the (callers_locks) -> xe_pm_runtime_xxx_map.
484  * For example if the (callers_locks) are ever grabbed in the
485  * runtime_resume callback, lockdep should give us a nice splat.
486  */
487 static void xe_rpm_might_enter_cb(const struct xe_device *xe)
488 {
489 	xe_rpm_lockmap_acquire(xe);
490 	xe_rpm_lockmap_release(xe);
491 }
492 
493 /*
494  * Prime the lockdep maps for known locking orders that need to
495  * be supported but that may not always occur on all systems.
496  */
497 static void xe_pm_runtime_lockdep_prime(void)
498 {
499 	struct dma_resv lockdep_resv;
500 
501 	dma_resv_init(&lockdep_resv);
502 	lock_map_acquire(&xe_pm_runtime_d3cold_map);
503 	/* D3Cold takes the dma_resv locks to evict bos */
504 	dma_resv_lock(&lockdep_resv, NULL);
505 	dma_resv_unlock(&lockdep_resv);
506 	lock_map_release(&xe_pm_runtime_d3cold_map);
507 
508 	/* Shrinkers might like to wake up the device under reclaim. */
509 	fs_reclaim_acquire(GFP_KERNEL);
510 	lock_map_acquire(&xe_pm_runtime_nod3cold_map);
511 	lock_map_release(&xe_pm_runtime_nod3cold_map);
512 	fs_reclaim_release(GFP_KERNEL);
513 }
514 
515 /**
516  * xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
517  * @xe: xe device instance
518  */
519 void xe_pm_runtime_get(struct xe_device *xe)
520 {
521 	trace_xe_pm_runtime_get(xe, __builtin_return_address(0));
522 	pm_runtime_get_noresume(xe->drm.dev);
523 
524 	if (xe_pm_read_callback_task(xe) == current)
525 		return;
526 
527 	xe_rpm_might_enter_cb(xe);
528 	pm_runtime_resume(xe->drm.dev);
529 }
530 
531 /**
532  * xe_pm_runtime_put - Put the runtime_pm reference back and mark as idle
533  * @xe: xe device instance
534  */
535 void xe_pm_runtime_put(struct xe_device *xe)
536 {
537 	trace_xe_pm_runtime_put(xe, __builtin_return_address(0));
538 	if (xe_pm_read_callback_task(xe) == current) {
539 		pm_runtime_put_noidle(xe->drm.dev);
540 	} else {
541 		pm_runtime_mark_last_busy(xe->drm.dev);
542 		pm_runtime_put(xe->drm.dev);
543 	}
544 }
545 
546 /**
547  * xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
548  * @xe: xe device instance
549  *
550  * Returns: Any number greater than or equal to 0 for success, negative error
551  * code otherwise.
552  */
553 int xe_pm_runtime_get_ioctl(struct xe_device *xe)
554 {
555 	trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0));
556 	if (WARN_ON(xe_pm_read_callback_task(xe) == current))
557 		return -ELOOP;
558 
559 	xe_rpm_might_enter_cb(xe);
560 	return pm_runtime_get_sync(xe->drm.dev);
561 }
562 
563 /**
564  * xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active
565  * @xe: xe device instance
566  *
567  * Return: True if device is awake (regardless the previous number of references)
568  * and a new reference was taken, false otherwise.
569  */
570 bool xe_pm_runtime_get_if_active(struct xe_device *xe)
571 {
572 	return pm_runtime_get_if_active(xe->drm.dev) > 0;
573 }
574 
575 /**
576  * xe_pm_runtime_get_if_in_use - Get a new reference if device is active with previous ref taken
577  * @xe: xe device instance
578  *
579  * Return: True if device is awake, a previous reference had been already taken,
580  * and a new reference was now taken, false otherwise.
581  */
582 bool xe_pm_runtime_get_if_in_use(struct xe_device *xe)
583 {
584 	if (xe_pm_read_callback_task(xe) == current) {
585 		/* The device is awake, grab the ref and move on */
586 		pm_runtime_get_noresume(xe->drm.dev);
587 		return true;
588 	}
589 
590 	return pm_runtime_get_if_in_use(xe->drm.dev) > 0;
591 }
592 
593 /**
594  * xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming
595  * @xe: xe device instance
596  *
597  * This function should be used in inner places where it is surely already
598  * protected by outer-bound callers of `xe_pm_runtime_get`.
599  * It will warn if not protected.
600  * The reference should be put back after this function regardless, since it
601  * will always bump the usage counter, regardless.
602  */
603 void xe_pm_runtime_get_noresume(struct xe_device *xe)
604 {
605 	bool ref;
606 
607 	ref = xe_pm_runtime_get_if_in_use(xe);
608 
609 	if (drm_WARN(&xe->drm, !ref, "Missing outer runtime PM protection\n"))
610 		pm_runtime_get_noresume(xe->drm.dev);
611 }
612 
613 /**
614  * xe_pm_runtime_resume_and_get - Resume, then get a runtime_pm ref if awake.
615  * @xe: xe device instance
616  *
617  * Returns: True if device is awake and the reference was taken, false otherwise.
618  */
619 bool xe_pm_runtime_resume_and_get(struct xe_device *xe)
620 {
621 	if (xe_pm_read_callback_task(xe) == current) {
622 		/* The device is awake, grab the ref and move on */
623 		pm_runtime_get_noresume(xe->drm.dev);
624 		return true;
625 	}
626 
627 	xe_rpm_might_enter_cb(xe);
628 	return pm_runtime_resume_and_get(xe->drm.dev) >= 0;
629 }
630 
631 /**
632  * xe_pm_assert_unbounded_bridge - Disable PM on unbounded pcie parent bridge
633  * @xe: xe device instance
634  */
635 void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
636 {
637 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
638 	struct pci_dev *bridge = pci_upstream_bridge(pdev);
639 
640 	if (!bridge)
641 		return;
642 
643 	if (!bridge->driver) {
644 		drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n");
645 		device_set_pm_not_required(&pdev->dev);
646 	}
647 }
648 
649 /**
650  * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold
651  * @xe: xe device instance
652  * @threshold: VRAM size in bites for the D3cold threshold
653  *
654  * Returns 0 for success, negative error code otherwise.
655  */
656 int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
657 {
658 	struct ttm_resource_manager *man;
659 	u32 vram_total_mb = 0;
660 	int i;
661 
662 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
663 		man = ttm_manager_type(&xe->ttm, i);
664 		if (man)
665 			vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024);
666 	}
667 
668 	drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb);
669 
670 	if (threshold > vram_total_mb)
671 		return -EINVAL;
672 
673 	mutex_lock(&xe->d3cold.lock);
674 	xe->d3cold.vram_threshold = threshold;
675 	mutex_unlock(&xe->d3cold.lock);
676 
677 	return 0;
678 }
679 
680 /**
681  * xe_pm_d3cold_allowed_toggle - Check conditions to toggle d3cold.allowed
682  * @xe: xe device instance
683  *
684  * To be called during runtime_pm idle callback.
685  * Check for all the D3Cold conditions ahead of runtime suspend.
686  */
687 void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
688 {
689 	struct ttm_resource_manager *man;
690 	u32 total_vram_used_mb = 0;
691 	u64 vram_used;
692 	int i;
693 
694 	if (!xe->d3cold.capable) {
695 		xe->d3cold.allowed = false;
696 		return;
697 	}
698 
699 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
700 		man = ttm_manager_type(&xe->ttm, i);
701 		if (man) {
702 			vram_used = ttm_resource_manager_usage(man);
703 			total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024);
704 		}
705 	}
706 
707 	mutex_lock(&xe->d3cold.lock);
708 
709 	if (total_vram_used_mb < xe->d3cold.vram_threshold)
710 		xe->d3cold.allowed = true;
711 	else
712 		xe->d3cold.allowed = false;
713 
714 	mutex_unlock(&xe->d3cold.lock);
715 
716 	drm_dbg(&xe->drm,
717 		"d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed));
718 }
719 
720 /**
721  * xe_pm_module_init() - Perform xe_pm specific module initialization.
722  *
723  * Return: 0 on success. Currently doesn't fail.
724  */
725 int __init xe_pm_module_init(void)
726 {
727 	xe_pm_runtime_lockdep_prime();
728 	return 0;
729 }
730