xref: /linux/drivers/gpu/drm/xe/xe_pm.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_pm.h"
7 
8 #include <linux/fault-inject.h>
9 #include <linux/pm_runtime.h>
10 
11 #include <drm/drm_managed.h>
12 #include <drm/ttm/ttm_placement.h>
13 
14 #include "display/xe_display.h"
15 #include "xe_bo.h"
16 #include "xe_bo_evict.h"
17 #include "xe_device.h"
18 #include "xe_device_sysfs.h"
19 #include "xe_ggtt.h"
20 #include "xe_gt.h"
21 #include "xe_guc.h"
22 #include "xe_irq.h"
23 #include "xe_pcode.h"
24 #include "xe_trace.h"
25 #include "xe_wa.h"
26 
27 /**
28  * DOC: Xe Power Management
29  *
30  * Xe PM implements the main routines for both system level suspend states and
31  * for the opportunistic runtime suspend states.
32  *
33  * System Level Suspend (S-States) - In general this is OS initiated suspend
34  * driven by ACPI for achieving S0ix (a.k.a. S2idle, freeze), S3 (suspend to ram),
35  * S4 (disk). The main functions here are `xe_pm_suspend` and `xe_pm_resume`. They
36  * are the main point for the suspend to and resume from these states.
37  *
38  * PCI Device Suspend (D-States) - This is the opportunistic PCIe device low power
39  * state D3, controlled by the PCI subsystem and ACPI with the help from the
40  * runtime_pm infrastructure.
41  * PCI D3 is special and can mean D3hot, where Vcc power is on for keeping memory
42  * alive and quicker low latency resume or D3Cold where Vcc power is off for
43  * better power savings.
44  * The Vcc control of PCI hierarchy can only be controlled at the PCI root port
45  * level, while the device driver can be behind multiple bridges/switches and
46  * paired with other devices. For this reason, the PCI subsystem cannot perform
47  * the transition towards D3Cold. The lowest runtime PM possible from the PCI
48  * subsystem is D3hot. Then, if all these paired devices in the same root port
49  * are in D3hot, ACPI will assist here and run its own methods (_PR3 and _OFF)
50  * to perform the transition from D3hot to D3cold. Xe may disallow this
51  * transition by calling pci_d3cold_disable(root_pdev) before going to runtime
52  * suspend. It will be based on runtime conditions such as VRAM usage for a
53  * quick and low latency resume for instance.
54  *
55  * Runtime PM - This infrastructure provided by the Linux kernel allows the
56  * device drivers to indicate when the can be runtime suspended, so the device
57  * could be put at D3 (if supported), or allow deeper package sleep states
58  * (PC-states), and/or other low level power states. Xe PM component provides
59  * `xe_pm_runtime_suspend` and `xe_pm_runtime_resume` functions that PCI
60  * subsystem will call before transition to/from runtime suspend.
61  *
62  * Also, Xe PM provides get and put functions that Xe driver will use to
63  * indicate activity. In order to avoid locking complications with the memory
64  * management, whenever possible, these get and put functions needs to be called
65  * from the higher/outer levels.
66  * The main cases that need to be protected from the outer levels are: IOCTL,
67  * sysfs, debugfs, dma-buf sharing, GPU execution.
68  *
69  * This component is not responsible for GT idleness (RC6) nor GT frequency
70  * management (RPS).
71  */
72 
73 #ifdef CONFIG_LOCKDEP
74 static struct lockdep_map xe_pm_runtime_d3cold_map = {
75 	.name = "xe_rpm_d3cold_map"
76 };
77 
78 static struct lockdep_map xe_pm_runtime_nod3cold_map = {
79 	.name = "xe_rpm_nod3cold_map"
80 };
81 #endif
82 
83 /**
84  * xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context
85  * @xe: The xe device.
86  *
87  * Return: true if it is safe to runtime resume from reclaim context.
88  * false otherwise.
89  */
xe_rpm_reclaim_safe(const struct xe_device * xe)90 bool xe_rpm_reclaim_safe(const struct xe_device *xe)
91 {
92 	return !xe->d3cold.capable && !xe->info.has_sriov;
93 }
94 
xe_rpm_lockmap_acquire(const struct xe_device * xe)95 static void xe_rpm_lockmap_acquire(const struct xe_device *xe)
96 {
97 	lock_map_acquire(xe_rpm_reclaim_safe(xe) ?
98 			 &xe_pm_runtime_nod3cold_map :
99 			 &xe_pm_runtime_d3cold_map);
100 }
101 
xe_rpm_lockmap_release(const struct xe_device * xe)102 static void xe_rpm_lockmap_release(const struct xe_device *xe)
103 {
104 	lock_map_release(xe_rpm_reclaim_safe(xe) ?
105 			 &xe_pm_runtime_nod3cold_map :
106 			 &xe_pm_runtime_d3cold_map);
107 }
108 
109 /**
110  * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
111  * @xe: xe device instance
112  *
113  * Return: 0 on success
114  */
xe_pm_suspend(struct xe_device * xe)115 int xe_pm_suspend(struct xe_device *xe)
116 {
117 	struct xe_gt *gt;
118 	u8 id;
119 	int err;
120 
121 	drm_dbg(&xe->drm, "Suspending device\n");
122 	trace_xe_pm_suspend(xe, __builtin_return_address(0));
123 
124 	for_each_gt(gt, xe, id)
125 		xe_gt_suspend_prepare(gt);
126 
127 	xe_display_pm_suspend(xe);
128 
129 	/* FIXME: Super racey... */
130 	err = xe_bo_evict_all(xe);
131 	if (err)
132 		goto err;
133 
134 	for_each_gt(gt, xe, id) {
135 		err = xe_gt_suspend(gt);
136 		if (err) {
137 			xe_display_pm_resume(xe);
138 			goto err;
139 		}
140 	}
141 
142 	xe_irq_suspend(xe);
143 
144 	xe_display_pm_suspend_late(xe);
145 
146 	drm_dbg(&xe->drm, "Device suspended\n");
147 	return 0;
148 err:
149 	drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
150 	return err;
151 }
152 
153 /**
154  * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0
155  * @xe: xe device instance
156  *
157  * Return: 0 on success
158  */
xe_pm_resume(struct xe_device * xe)159 int xe_pm_resume(struct xe_device *xe)
160 {
161 	struct xe_tile *tile;
162 	struct xe_gt *gt;
163 	u8 id;
164 	int err;
165 
166 	drm_dbg(&xe->drm, "Resuming device\n");
167 	trace_xe_pm_resume(xe, __builtin_return_address(0));
168 
169 	for_each_tile(tile, xe, id)
170 		xe_wa_apply_tile_workarounds(tile);
171 
172 	err = xe_pcode_ready(xe, true);
173 	if (err)
174 		return err;
175 
176 	xe_display_pm_resume_early(xe);
177 
178 	/*
179 	 * This only restores pinned memory which is the memory required for the
180 	 * GT(s) to resume.
181 	 */
182 	err = xe_bo_restore_kernel(xe);
183 	if (err)
184 		goto err;
185 
186 	xe_irq_resume(xe);
187 
188 	for_each_gt(gt, xe, id)
189 		xe_gt_resume(gt);
190 
191 	xe_display_pm_resume(xe);
192 
193 	err = xe_bo_restore_user(xe);
194 	if (err)
195 		goto err;
196 
197 	drm_dbg(&xe->drm, "Device resumed\n");
198 	return 0;
199 err:
200 	drm_dbg(&xe->drm, "Device resume failed %d\n", err);
201 	return err;
202 }
203 
xe_pm_pci_d3cold_capable(struct xe_device * xe)204 static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
205 {
206 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
207 	struct pci_dev *root_pdev;
208 
209 	root_pdev = pcie_find_root_port(pdev);
210 	if (!root_pdev)
211 		return false;
212 
213 	/* D3Cold requires PME capability */
214 	if (!pci_pme_capable(root_pdev, PCI_D3cold)) {
215 		drm_dbg(&xe->drm, "d3cold: PME# not supported\n");
216 		return false;
217 	}
218 
219 	/* D3Cold requires _PR3 power resource */
220 	if (!pci_pr3_present(root_pdev)) {
221 		drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n");
222 		return false;
223 	}
224 
225 	return true;
226 }
227 
xe_pm_runtime_init(struct xe_device * xe)228 static void xe_pm_runtime_init(struct xe_device *xe)
229 {
230 	struct device *dev = xe->drm.dev;
231 
232 	/*
233 	 * Disable the system suspend direct complete optimization.
234 	 * We need to ensure that the regular device suspend/resume functions
235 	 * are called since our runtime_pm cannot guarantee local memory
236 	 * eviction for d3cold.
237 	 * TODO: Check HDA audio dependencies claimed by i915, and then enforce
238 	 *       this option to integrated graphics as well.
239 	 */
240 	if (IS_DGFX(xe))
241 		dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
242 
243 	pm_runtime_use_autosuspend(dev);
244 	pm_runtime_set_autosuspend_delay(dev, 1000);
245 	pm_runtime_set_active(dev);
246 	pm_runtime_allow(dev);
247 	pm_runtime_mark_last_busy(dev);
248 	pm_runtime_put(dev);
249 }
250 
xe_pm_init_early(struct xe_device * xe)251 int xe_pm_init_early(struct xe_device *xe)
252 {
253 	int err;
254 
255 	INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list);
256 
257 	err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
258 	if (err)
259 		return err;
260 
261 	err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
262 	if (err)
263 		return err;
264 
265 	return 0;
266 }
267 ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
268 
269 /**
270  * xe_pm_init - Initialize Xe Power Management
271  * @xe: xe device instance
272  *
273  * This component is responsible for System and Device sleep states.
274  *
275  * Returns 0 for success, negative error code otherwise.
276  */
xe_pm_init(struct xe_device * xe)277 int xe_pm_init(struct xe_device *xe)
278 {
279 	int err;
280 
281 	/* For now suspend/resume is only allowed with GuC */
282 	if (!xe_device_uc_enabled(xe))
283 		return 0;
284 
285 	xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
286 
287 	if (xe->d3cold.capable) {
288 		err = xe_device_sysfs_init(xe);
289 		if (err)
290 			return err;
291 
292 		err = xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD);
293 		if (err)
294 			return err;
295 	}
296 
297 	xe_pm_runtime_init(xe);
298 
299 	return 0;
300 }
301 
302 /**
303  * xe_pm_runtime_fini - Finalize Runtime PM
304  * @xe: xe device instance
305  */
xe_pm_runtime_fini(struct xe_device * xe)306 void xe_pm_runtime_fini(struct xe_device *xe)
307 {
308 	struct device *dev = xe->drm.dev;
309 
310 	pm_runtime_get_sync(dev);
311 	pm_runtime_forbid(dev);
312 }
313 
xe_pm_write_callback_task(struct xe_device * xe,struct task_struct * task)314 static void xe_pm_write_callback_task(struct xe_device *xe,
315 				      struct task_struct *task)
316 {
317 	WRITE_ONCE(xe->pm_callback_task, task);
318 
319 	/*
320 	 * Just in case it's somehow possible for our writes to be reordered to
321 	 * the extent that something else re-uses the task written in
322 	 * pm_callback_task. For example after returning from the callback, but
323 	 * before the reordered write that resets pm_callback_task back to NULL.
324 	 */
325 	smp_mb(); /* pairs with xe_pm_read_callback_task */
326 }
327 
xe_pm_read_callback_task(struct xe_device * xe)328 struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
329 {
330 	smp_mb(); /* pairs with xe_pm_write_callback_task */
331 
332 	return READ_ONCE(xe->pm_callback_task);
333 }
334 
335 /**
336  * xe_pm_runtime_suspended - Check if runtime_pm state is suspended
337  * @xe: xe device instance
338  *
339  * This does not provide any guarantee that the device is going to remain
340  * suspended as it might be racing with the runtime state transitions.
341  * It can be used only as a non-reliable assertion, to ensure that we are not in
342  * the sleep state while trying to access some memory for instance.
343  *
344  * Returns true if PCI device is suspended, false otherwise.
345  */
xe_pm_runtime_suspended(struct xe_device * xe)346 bool xe_pm_runtime_suspended(struct xe_device *xe)
347 {
348 	return pm_runtime_suspended(xe->drm.dev);
349 }
350 
351 /**
352  * xe_pm_runtime_suspend - Prepare our device for D3hot/D3Cold
353  * @xe: xe device instance
354  *
355  * Returns 0 for success, negative error code otherwise.
356  */
xe_pm_runtime_suspend(struct xe_device * xe)357 int xe_pm_runtime_suspend(struct xe_device *xe)
358 {
359 	struct xe_bo *bo, *on;
360 	struct xe_gt *gt;
361 	u8 id;
362 	int err = 0;
363 
364 	trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0));
365 	/* Disable access_ongoing asserts and prevent recursive pm calls */
366 	xe_pm_write_callback_task(xe, current);
367 
368 	/*
369 	 * The actual xe_pm_runtime_put() is always async underneath, so
370 	 * exactly where that is called should makes no difference to us. However
371 	 * we still need to be very careful with the locks that this callback
372 	 * acquires and the locks that are acquired and held by any callers of
373 	 * xe_runtime_pm_get(). We already have the matching annotation
374 	 * on that side, but we also need it here. For example lockdep should be
375 	 * able to tell us if the following scenario is in theory possible:
376 	 *
377 	 * CPU0                          | CPU1 (kworker)
378 	 * lock(A)                       |
379 	 *                               | xe_pm_runtime_suspend()
380 	 *                               |      lock(A)
381 	 * xe_pm_runtime_get()           |
382 	 *
383 	 * This will clearly deadlock since rpm core needs to wait for
384 	 * xe_pm_runtime_suspend() to complete, but here we are holding lock(A)
385 	 * on CPU0 which prevents CPU1 making forward progress.  With the
386 	 * annotation here and in xe_pm_runtime_get() lockdep will see
387 	 * the potential lock inversion and give us a nice splat.
388 	 */
389 	xe_rpm_lockmap_acquire(xe);
390 
391 	/*
392 	 * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
393 	 * also checks and delets bo entry from user fault list.
394 	 */
395 	mutex_lock(&xe->mem_access.vram_userfault.lock);
396 	list_for_each_entry_safe(bo, on,
397 				 &xe->mem_access.vram_userfault.list, vram_userfault_link)
398 		xe_bo_runtime_pm_release_mmap_offset(bo);
399 	mutex_unlock(&xe->mem_access.vram_userfault.lock);
400 
401 	xe_display_pm_runtime_suspend(xe);
402 
403 	if (xe->d3cold.allowed) {
404 		err = xe_bo_evict_all(xe);
405 		if (err)
406 			goto out;
407 	}
408 
409 	for_each_gt(gt, xe, id) {
410 		err = xe_gt_suspend(gt);
411 		if (err)
412 			goto out;
413 	}
414 
415 	xe_irq_suspend(xe);
416 
417 	if (xe->d3cold.allowed)
418 		xe_display_pm_suspend_late(xe);
419 out:
420 	if (err)
421 		xe_display_pm_runtime_resume(xe);
422 	xe_rpm_lockmap_release(xe);
423 	xe_pm_write_callback_task(xe, NULL);
424 	return err;
425 }
426 
427 /**
428  * xe_pm_runtime_resume - Waking up from D3hot/D3Cold
429  * @xe: xe device instance
430  *
431  * Returns 0 for success, negative error code otherwise.
432  */
xe_pm_runtime_resume(struct xe_device * xe)433 int xe_pm_runtime_resume(struct xe_device *xe)
434 {
435 	struct xe_gt *gt;
436 	u8 id;
437 	int err = 0;
438 
439 	trace_xe_pm_runtime_resume(xe, __builtin_return_address(0));
440 	/* Disable access_ongoing asserts and prevent recursive pm calls */
441 	xe_pm_write_callback_task(xe, current);
442 
443 	xe_rpm_lockmap_acquire(xe);
444 
445 	if (xe->d3cold.allowed) {
446 		err = xe_pcode_ready(xe, true);
447 		if (err)
448 			goto out;
449 
450 		xe_display_pm_resume_early(xe);
451 
452 		/*
453 		 * This only restores pinned memory which is the memory
454 		 * required for the GT(s) to resume.
455 		 */
456 		err = xe_bo_restore_kernel(xe);
457 		if (err)
458 			goto out;
459 	}
460 
461 	xe_irq_resume(xe);
462 
463 	for_each_gt(gt, xe, id)
464 		xe_gt_resume(gt);
465 
466 	xe_display_pm_runtime_resume(xe);
467 
468 	if (xe->d3cold.allowed) {
469 		err = xe_bo_restore_user(xe);
470 		if (err)
471 			goto out;
472 	}
473 
474 out:
475 	xe_rpm_lockmap_release(xe);
476 	xe_pm_write_callback_task(xe, NULL);
477 	return err;
478 }
479 
480 /*
481  * For places where resume is synchronous it can be quite easy to deadlock
482  * if we are not careful. Also in practice it might be quite timing
483  * sensitive to ever see the 0 -> 1 transition with the callers locks
484  * held, so deadlocks might exist but are hard for lockdep to ever see.
485  * With this in mind, help lockdep learn about the potentially scary
486  * stuff that can happen inside the runtime_resume callback by acquiring
487  * a dummy lock (it doesn't protect anything and gets compiled out on
488  * non-debug builds).  Lockdep then only needs to see the
489  * xe_pm_runtime_xxx_map -> runtime_resume callback once, and then can
490  * hopefully validate all the (callers_locks) -> xe_pm_runtime_xxx_map.
491  * For example if the (callers_locks) are ever grabbed in the
492  * runtime_resume callback, lockdep should give us a nice splat.
493  */
xe_rpm_might_enter_cb(const struct xe_device * xe)494 static void xe_rpm_might_enter_cb(const struct xe_device *xe)
495 {
496 	xe_rpm_lockmap_acquire(xe);
497 	xe_rpm_lockmap_release(xe);
498 }
499 
500 /*
501  * Prime the lockdep maps for known locking orders that need to
502  * be supported but that may not always occur on all systems.
503  */
xe_pm_runtime_lockdep_prime(void)504 static void xe_pm_runtime_lockdep_prime(void)
505 {
506 	struct dma_resv lockdep_resv;
507 
508 	dma_resv_init(&lockdep_resv);
509 	lock_map_acquire(&xe_pm_runtime_d3cold_map);
510 	/* D3Cold takes the dma_resv locks to evict bos */
511 	dma_resv_lock(&lockdep_resv, NULL);
512 	dma_resv_unlock(&lockdep_resv);
513 	lock_map_release(&xe_pm_runtime_d3cold_map);
514 
515 	/* Shrinkers might like to wake up the device under reclaim. */
516 	fs_reclaim_acquire(GFP_KERNEL);
517 	lock_map_acquire(&xe_pm_runtime_nod3cold_map);
518 	lock_map_release(&xe_pm_runtime_nod3cold_map);
519 	fs_reclaim_release(GFP_KERNEL);
520 }
521 
522 /**
523  * xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
524  * @xe: xe device instance
525  */
xe_pm_runtime_get(struct xe_device * xe)526 void xe_pm_runtime_get(struct xe_device *xe)
527 {
528 	trace_xe_pm_runtime_get(xe, __builtin_return_address(0));
529 	pm_runtime_get_noresume(xe->drm.dev);
530 
531 	if (xe_pm_read_callback_task(xe) == current)
532 		return;
533 
534 	xe_rpm_might_enter_cb(xe);
535 	pm_runtime_resume(xe->drm.dev);
536 }
537 
538 /**
539  * xe_pm_runtime_put - Put the runtime_pm reference back and mark as idle
540  * @xe: xe device instance
541  */
xe_pm_runtime_put(struct xe_device * xe)542 void xe_pm_runtime_put(struct xe_device *xe)
543 {
544 	trace_xe_pm_runtime_put(xe, __builtin_return_address(0));
545 	if (xe_pm_read_callback_task(xe) == current) {
546 		pm_runtime_put_noidle(xe->drm.dev);
547 	} else {
548 		pm_runtime_mark_last_busy(xe->drm.dev);
549 		pm_runtime_put(xe->drm.dev);
550 	}
551 }
552 
553 /**
554  * xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
555  * @xe: xe device instance
556  *
557  * Returns: Any number greater than or equal to 0 for success, negative error
558  * code otherwise.
559  */
xe_pm_runtime_get_ioctl(struct xe_device * xe)560 int xe_pm_runtime_get_ioctl(struct xe_device *xe)
561 {
562 	trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0));
563 	if (WARN_ON(xe_pm_read_callback_task(xe) == current))
564 		return -ELOOP;
565 
566 	xe_rpm_might_enter_cb(xe);
567 	return pm_runtime_get_sync(xe->drm.dev);
568 }
569 
570 /**
571  * xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active
572  * @xe: xe device instance
573  *
574  * Return: True if device is awake (regardless the previous number of references)
575  * and a new reference was taken, false otherwise.
576  */
xe_pm_runtime_get_if_active(struct xe_device * xe)577 bool xe_pm_runtime_get_if_active(struct xe_device *xe)
578 {
579 	return pm_runtime_get_if_active(xe->drm.dev) > 0;
580 }
581 
582 /**
583  * xe_pm_runtime_get_if_in_use - Get a new reference if device is active with previous ref taken
584  * @xe: xe device instance
585  *
586  * Return: True if device is awake, a previous reference had been already taken,
587  * and a new reference was now taken, false otherwise.
588  */
xe_pm_runtime_get_if_in_use(struct xe_device * xe)589 bool xe_pm_runtime_get_if_in_use(struct xe_device *xe)
590 {
591 	if (xe_pm_read_callback_task(xe) == current) {
592 		/* The device is awake, grab the ref and move on */
593 		pm_runtime_get_noresume(xe->drm.dev);
594 		return true;
595 	}
596 
597 	return pm_runtime_get_if_in_use(xe->drm.dev) > 0;
598 }
599 
600 /*
601  * Very unreliable! Should only be used to suppress the false positive case
602  * in the missing outer rpm protection warning.
603  */
xe_pm_suspending_or_resuming(struct xe_device * xe)604 static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
605 {
606 #ifdef CONFIG_PM
607 	struct device *dev = xe->drm.dev;
608 
609 	return dev->power.runtime_status == RPM_SUSPENDING ||
610 		dev->power.runtime_status == RPM_RESUMING;
611 #else
612 	return false;
613 #endif
614 }
615 
616 /**
617  * xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming
618  * @xe: xe device instance
619  *
620  * This function should be used in inner places where it is surely already
621  * protected by outer-bound callers of `xe_pm_runtime_get`.
622  * It will warn if not protected.
623  * The reference should be put back after this function regardless, since it
624  * will always bump the usage counter, regardless.
625  */
xe_pm_runtime_get_noresume(struct xe_device * xe)626 void xe_pm_runtime_get_noresume(struct xe_device *xe)
627 {
628 	bool ref;
629 
630 	ref = xe_pm_runtime_get_if_in_use(xe);
631 
632 	if (!ref) {
633 		pm_runtime_get_noresume(xe->drm.dev);
634 		drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe),
635 			 "Missing outer runtime PM protection\n");
636 	}
637 }
638 
639 /**
640  * xe_pm_runtime_resume_and_get - Resume, then get a runtime_pm ref if awake.
641  * @xe: xe device instance
642  *
643  * Returns: True if device is awake and the reference was taken, false otherwise.
644  */
xe_pm_runtime_resume_and_get(struct xe_device * xe)645 bool xe_pm_runtime_resume_and_get(struct xe_device *xe)
646 {
647 	if (xe_pm_read_callback_task(xe) == current) {
648 		/* The device is awake, grab the ref and move on */
649 		pm_runtime_get_noresume(xe->drm.dev);
650 		return true;
651 	}
652 
653 	xe_rpm_might_enter_cb(xe);
654 	return pm_runtime_resume_and_get(xe->drm.dev) >= 0;
655 }
656 
657 /**
658  * xe_pm_assert_unbounded_bridge - Disable PM on unbounded pcie parent bridge
659  * @xe: xe device instance
660  */
xe_pm_assert_unbounded_bridge(struct xe_device * xe)661 void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
662 {
663 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
664 	struct pci_dev *bridge = pci_upstream_bridge(pdev);
665 
666 	if (!bridge)
667 		return;
668 
669 	if (!bridge->driver) {
670 		drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n");
671 		device_set_pm_not_required(&pdev->dev);
672 	}
673 }
674 
675 /**
676  * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold
677  * @xe: xe device instance
678  * @threshold: VRAM size in bites for the D3cold threshold
679  *
680  * Returns 0 for success, negative error code otherwise.
681  */
xe_pm_set_vram_threshold(struct xe_device * xe,u32 threshold)682 int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
683 {
684 	struct ttm_resource_manager *man;
685 	u32 vram_total_mb = 0;
686 	int i;
687 
688 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
689 		man = ttm_manager_type(&xe->ttm, i);
690 		if (man)
691 			vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024);
692 	}
693 
694 	drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb);
695 
696 	if (threshold > vram_total_mb)
697 		return -EINVAL;
698 
699 	mutex_lock(&xe->d3cold.lock);
700 	xe->d3cold.vram_threshold = threshold;
701 	mutex_unlock(&xe->d3cold.lock);
702 
703 	return 0;
704 }
705 
706 /**
707  * xe_pm_d3cold_allowed_toggle - Check conditions to toggle d3cold.allowed
708  * @xe: xe device instance
709  *
710  * To be called during runtime_pm idle callback.
711  * Check for all the D3Cold conditions ahead of runtime suspend.
712  */
xe_pm_d3cold_allowed_toggle(struct xe_device * xe)713 void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
714 {
715 	struct ttm_resource_manager *man;
716 	u32 total_vram_used_mb = 0;
717 	u64 vram_used;
718 	int i;
719 
720 	if (!xe->d3cold.capable) {
721 		xe->d3cold.allowed = false;
722 		return;
723 	}
724 
725 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
726 		man = ttm_manager_type(&xe->ttm, i);
727 		if (man) {
728 			vram_used = ttm_resource_manager_usage(man);
729 			total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024);
730 		}
731 	}
732 
733 	mutex_lock(&xe->d3cold.lock);
734 
735 	if (total_vram_used_mb < xe->d3cold.vram_threshold)
736 		xe->d3cold.allowed = true;
737 	else
738 		xe->d3cold.allowed = false;
739 
740 	mutex_unlock(&xe->d3cold.lock);
741 
742 	drm_dbg(&xe->drm,
743 		"d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed));
744 }
745 
746 /**
747  * xe_pm_module_init() - Perform xe_pm specific module initialization.
748  *
749  * Return: 0 on success. Currently doesn't fail.
750  */
xe_pm_module_init(void)751 int __init xe_pm_module_init(void)
752 {
753 	xe_pm_runtime_lockdep_prime();
754 	return 0;
755 }
756