xref: /linux/drivers/gpu/drm/xe/xe_pm.c (revision 965c995c9a4b395471ff48790a0155ee986ca405)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_pm.h"
7 
8 #include <linux/fault-inject.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/suspend.h>
11 
12 #include <drm/drm_managed.h>
13 #include <drm/ttm/ttm_placement.h>
14 
15 #include "display/xe_display.h"
16 #include "xe_bo.h"
17 #include "xe_bo_evict.h"
18 #include "xe_device.h"
19 #include "xe_ggtt.h"
20 #include "xe_gt.h"
21 #include "xe_guc.h"
22 #include "xe_i2c.h"
23 #include "xe_irq.h"
24 #include "xe_pcode.h"
25 #include "xe_pxp.h"
26 #include "xe_trace.h"
27 #include "xe_vm.h"
28 #include "xe_wa.h"
29 
30 /**
31  * DOC: Xe Power Management
32  *
33  * Xe PM implements the main routines for both system level suspend states and
34  * for the opportunistic runtime suspend states.
35  *
36  * System Level Suspend (S-States) - In general this is OS initiated suspend
37  * driven by ACPI for achieving S0ix (a.k.a. S2idle, freeze), S3 (suspend to ram),
38  * S4 (disk). The main functions here are `xe_pm_suspend` and `xe_pm_resume`. They
39  * are the main point for the suspend to and resume from these states.
40  *
41  * PCI Device Suspend (D-States) - This is the opportunistic PCIe device low power
42  * state D3, controlled by the PCI subsystem and ACPI with the help from the
43  * runtime_pm infrastructure.
44  * PCI D3 is special and can mean D3hot, where Vcc power is on for keeping memory
45  * alive and quicker low latency resume or D3Cold where Vcc power is off for
46  * better power savings.
47  * The Vcc control of PCI hierarchy can only be controlled at the PCI root port
48  * level, while the device driver can be behind multiple bridges/switches and
49  * paired with other devices. For this reason, the PCI subsystem cannot perform
50  * the transition towards D3Cold. The lowest runtime PM possible from the PCI
51  * subsystem is D3hot. Then, if all these paired devices in the same root port
52  * are in D3hot, ACPI will assist here and run its own methods (_PR3 and _OFF)
53  * to perform the transition from D3hot to D3cold. Xe may disallow this
54  * transition by calling pci_d3cold_disable(root_pdev) before going to runtime
55  * suspend. It will be based on runtime conditions such as VRAM usage for a
56  * quick and low latency resume for instance.
57  *
58  * Runtime PM - This infrastructure provided by the Linux kernel allows the
59  * device drivers to indicate when the can be runtime suspended, so the device
60  * could be put at D3 (if supported), or allow deeper package sleep states
61  * (PC-states), and/or other low level power states. Xe PM component provides
62  * `xe_pm_runtime_suspend` and `xe_pm_runtime_resume` functions that PCI
63  * subsystem will call before transition to/from runtime suspend.
64  *
65  * Also, Xe PM provides get and put functions that Xe driver will use to
66  * indicate activity. In order to avoid locking complications with the memory
67  * management, whenever possible, these get and put functions needs to be called
68  * from the higher/outer levels.
69  * The main cases that need to be protected from the outer levels are: IOCTL,
70  * sysfs, debugfs, dma-buf sharing, GPU execution.
71  *
72  * This component is not responsible for GT idleness (RC6) nor GT frequency
73  * management (RPS).
74  */
75 
76 #ifdef CONFIG_LOCKDEP
77 static struct lockdep_map xe_pm_runtime_d3cold_map = {
78 	.name = "xe_rpm_d3cold_map"
79 };
80 
81 static struct lockdep_map xe_pm_runtime_nod3cold_map = {
82 	.name = "xe_rpm_nod3cold_map"
83 };
84 #endif
85 
86 /**
87  * xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context
88  * @xe: The xe device.
89  *
90  * Return: true if it is safe to runtime resume from reclaim context.
91  * false otherwise.
92  */
xe_rpm_reclaim_safe(const struct xe_device * xe)93 bool xe_rpm_reclaim_safe(const struct xe_device *xe)
94 {
95 	return !xe->d3cold.capable;
96 }
97 
xe_rpm_lockmap_acquire(const struct xe_device * xe)98 static void xe_rpm_lockmap_acquire(const struct xe_device *xe)
99 {
100 	lock_map_acquire(xe_rpm_reclaim_safe(xe) ?
101 			 &xe_pm_runtime_nod3cold_map :
102 			 &xe_pm_runtime_d3cold_map);
103 }
104 
xe_rpm_lockmap_release(const struct xe_device * xe)105 static void xe_rpm_lockmap_release(const struct xe_device *xe)
106 {
107 	lock_map_release(xe_rpm_reclaim_safe(xe) ?
108 			 &xe_pm_runtime_nod3cold_map :
109 			 &xe_pm_runtime_d3cold_map);
110 }
111 
112 /**
113  * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
114  * @xe: xe device instance
115  *
116  * Return: 0 on success
117  */
xe_pm_suspend(struct xe_device * xe)118 int xe_pm_suspend(struct xe_device *xe)
119 {
120 	struct xe_gt *gt;
121 	u8 id;
122 	int err;
123 
124 	drm_dbg(&xe->drm, "Suspending device\n");
125 	trace_xe_pm_suspend(xe, __builtin_return_address(0));
126 
127 	err = xe_pxp_pm_suspend(xe->pxp);
128 	if (err)
129 		goto err;
130 
131 	for_each_gt(gt, xe, id)
132 		xe_gt_suspend_prepare(gt);
133 
134 	xe_display_pm_suspend(xe);
135 
136 	/* FIXME: Super racey... */
137 	err = xe_bo_evict_all(xe);
138 	if (err)
139 		goto err_display;
140 
141 	for_each_gt(gt, xe, id) {
142 		err = xe_gt_suspend(gt);
143 		if (err)
144 			goto err_display;
145 	}
146 
147 	xe_irq_suspend(xe);
148 
149 	xe_display_pm_suspend_late(xe);
150 
151 	xe_i2c_pm_suspend(xe);
152 
153 	drm_dbg(&xe->drm, "Device suspended\n");
154 	return 0;
155 
156 err_display:
157 	xe_display_pm_resume(xe);
158 	xe_pxp_pm_resume(xe->pxp);
159 err:
160 	drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
161 	return err;
162 }
163 
164 /**
165  * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0
166  * @xe: xe device instance
167  *
168  * Return: 0 on success
169  */
xe_pm_resume(struct xe_device * xe)170 int xe_pm_resume(struct xe_device *xe)
171 {
172 	struct xe_tile *tile;
173 	struct xe_gt *gt;
174 	u8 id;
175 	int err;
176 
177 	drm_dbg(&xe->drm, "Resuming device\n");
178 	trace_xe_pm_resume(xe, __builtin_return_address(0));
179 
180 	for_each_tile(tile, xe, id)
181 		xe_wa_apply_tile_workarounds(tile);
182 
183 	err = xe_pcode_ready(xe, true);
184 	if (err)
185 		return err;
186 
187 	xe_display_pm_resume_early(xe);
188 
189 	/*
190 	 * This only restores pinned memory which is the memory required for the
191 	 * GT(s) to resume.
192 	 */
193 	err = xe_bo_restore_early(xe);
194 	if (err)
195 		goto err;
196 
197 	xe_i2c_pm_resume(xe, xe->d3cold.allowed);
198 
199 	xe_irq_resume(xe);
200 
201 	for_each_gt(gt, xe, id)
202 		xe_gt_resume(gt);
203 
204 	xe_display_pm_resume(xe);
205 
206 	err = xe_bo_restore_late(xe);
207 	if (err)
208 		goto err;
209 
210 	xe_pxp_pm_resume(xe->pxp);
211 
212 	drm_dbg(&xe->drm, "Device resumed\n");
213 	return 0;
214 err:
215 	drm_dbg(&xe->drm, "Device resume failed %d\n", err);
216 	return err;
217 }
218 
xe_pm_pci_d3cold_capable(struct xe_device * xe)219 static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
220 {
221 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
222 	struct pci_dev *root_pdev;
223 
224 	root_pdev = pcie_find_root_port(pdev);
225 	if (!root_pdev)
226 		return false;
227 
228 	/* D3Cold requires PME capability */
229 	if (!pci_pme_capable(root_pdev, PCI_D3cold)) {
230 		drm_dbg(&xe->drm, "d3cold: PME# not supported\n");
231 		return false;
232 	}
233 
234 	/* D3Cold requires _PR3 power resource */
235 	if (!pci_pr3_present(root_pdev)) {
236 		drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n");
237 		return false;
238 	}
239 
240 	return true;
241 }
242 
xe_pm_runtime_init(struct xe_device * xe)243 static void xe_pm_runtime_init(struct xe_device *xe)
244 {
245 	struct device *dev = xe->drm.dev;
246 
247 	/*
248 	 * Disable the system suspend direct complete optimization.
249 	 * We need to ensure that the regular device suspend/resume functions
250 	 * are called since our runtime_pm cannot guarantee local memory
251 	 * eviction for d3cold.
252 	 * TODO: Check HDA audio dependencies claimed by i915, and then enforce
253 	 *       this option to integrated graphics as well.
254 	 */
255 	if (IS_DGFX(xe))
256 		dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
257 
258 	pm_runtime_use_autosuspend(dev);
259 	pm_runtime_set_autosuspend_delay(dev, 1000);
260 	pm_runtime_set_active(dev);
261 	pm_runtime_allow(dev);
262 	pm_runtime_mark_last_busy(dev);
263 	pm_runtime_put(dev);
264 }
265 
xe_pm_init_early(struct xe_device * xe)266 int xe_pm_init_early(struct xe_device *xe)
267 {
268 	int err;
269 
270 	INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list);
271 
272 	err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
273 	if (err)
274 		return err;
275 
276 	err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
277 	if (err)
278 		return err;
279 
280 	xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
281 	return 0;
282 }
283 ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
284 
vram_threshold_value(struct xe_device * xe)285 static u32 vram_threshold_value(struct xe_device *xe)
286 {
287 	/* FIXME: D3Cold temporarily disabled by default on BMG */
288 	if (xe->info.platform == XE_BATTLEMAGE)
289 		return 0;
290 
291 	return DEFAULT_VRAM_THRESHOLD;
292 }
293 
xe_pm_wake_rebind_workers(struct xe_device * xe)294 static void xe_pm_wake_rebind_workers(struct xe_device *xe)
295 {
296 	struct xe_vm *vm, *next;
297 
298 	mutex_lock(&xe->rebind_resume_lock);
299 	list_for_each_entry_safe(vm, next, &xe->rebind_resume_list,
300 				 preempt.pm_activate_link) {
301 		list_del_init(&vm->preempt.pm_activate_link);
302 		xe_vm_resume_rebind_worker(vm);
303 	}
304 	mutex_unlock(&xe->rebind_resume_lock);
305 }
306 
xe_pm_notifier_callback(struct notifier_block * nb,unsigned long action,void * data)307 static int xe_pm_notifier_callback(struct notifier_block *nb,
308 				   unsigned long action, void *data)
309 {
310 	struct xe_device *xe = container_of(nb, struct xe_device, pm_notifier);
311 	int err = 0;
312 
313 	switch (action) {
314 	case PM_HIBERNATION_PREPARE:
315 	case PM_SUSPEND_PREPARE:
316 		reinit_completion(&xe->pm_block);
317 		xe_pm_runtime_get(xe);
318 		err = xe_bo_evict_all_user(xe);
319 		if (err)
320 			drm_dbg(&xe->drm, "Notifier evict user failed (%d)\n", err);
321 
322 		err = xe_bo_notifier_prepare_all_pinned(xe);
323 		if (err)
324 			drm_dbg(&xe->drm, "Notifier prepare pin failed (%d)\n", err);
325 		/*
326 		 * Keep the runtime pm reference until post hibernation / post suspend to
327 		 * avoid a runtime suspend interfering with evicted objects or backup
328 		 * allocations.
329 		 */
330 		break;
331 	case PM_POST_HIBERNATION:
332 	case PM_POST_SUSPEND:
333 		complete_all(&xe->pm_block);
334 		xe_pm_wake_rebind_workers(xe);
335 		xe_bo_notifier_unprepare_all_pinned(xe);
336 		xe_pm_runtime_put(xe);
337 		break;
338 	}
339 
340 	return NOTIFY_DONE;
341 }
342 
343 /**
344  * xe_pm_init - Initialize Xe Power Management
345  * @xe: xe device instance
346  *
347  * This component is responsible for System and Device sleep states.
348  *
349  * Returns 0 for success, negative error code otherwise.
350  */
xe_pm_init(struct xe_device * xe)351 int xe_pm_init(struct xe_device *xe)
352 {
353 	u32 vram_threshold;
354 	int err;
355 
356 	xe->pm_notifier.notifier_call = xe_pm_notifier_callback;
357 	err = register_pm_notifier(&xe->pm_notifier);
358 	if (err)
359 		return err;
360 
361 	err = drmm_mutex_init(&xe->drm, &xe->rebind_resume_lock);
362 	if (err)
363 		goto err_unregister;
364 
365 	init_completion(&xe->pm_block);
366 	complete_all(&xe->pm_block);
367 	INIT_LIST_HEAD(&xe->rebind_resume_list);
368 
369 	/* For now suspend/resume is only allowed with GuC */
370 	if (!xe_device_uc_enabled(xe))
371 		return 0;
372 
373 	if (xe->d3cold.capable) {
374 		vram_threshold = vram_threshold_value(xe);
375 		err = xe_pm_set_vram_threshold(xe, vram_threshold);
376 		if (err)
377 			goto err_unregister;
378 	}
379 
380 	xe_pm_runtime_init(xe);
381 	return 0;
382 
383 err_unregister:
384 	unregister_pm_notifier(&xe->pm_notifier);
385 	return err;
386 }
387 
xe_pm_runtime_fini(struct xe_device * xe)388 static void xe_pm_runtime_fini(struct xe_device *xe)
389 {
390 	struct device *dev = xe->drm.dev;
391 
392 	pm_runtime_get_sync(dev);
393 	pm_runtime_forbid(dev);
394 }
395 
396 /**
397  * xe_pm_fini - Finalize PM
398  * @xe: xe device instance
399  */
xe_pm_fini(struct xe_device * xe)400 void xe_pm_fini(struct xe_device *xe)
401 {
402 	if (xe_device_uc_enabled(xe))
403 		xe_pm_runtime_fini(xe);
404 
405 	unregister_pm_notifier(&xe->pm_notifier);
406 }
407 
xe_pm_write_callback_task(struct xe_device * xe,struct task_struct * task)408 static void xe_pm_write_callback_task(struct xe_device *xe,
409 				      struct task_struct *task)
410 {
411 	WRITE_ONCE(xe->pm_callback_task, task);
412 
413 	/*
414 	 * Just in case it's somehow possible for our writes to be reordered to
415 	 * the extent that something else re-uses the task written in
416 	 * pm_callback_task. For example after returning from the callback, but
417 	 * before the reordered write that resets pm_callback_task back to NULL.
418 	 */
419 	smp_mb(); /* pairs with xe_pm_read_callback_task */
420 }
421 
xe_pm_read_callback_task(struct xe_device * xe)422 struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
423 {
424 	smp_mb(); /* pairs with xe_pm_write_callback_task */
425 
426 	return READ_ONCE(xe->pm_callback_task);
427 }
428 
429 /**
430  * xe_pm_runtime_suspended - Check if runtime_pm state is suspended
431  * @xe: xe device instance
432  *
433  * This does not provide any guarantee that the device is going to remain
434  * suspended as it might be racing with the runtime state transitions.
435  * It can be used only as a non-reliable assertion, to ensure that we are not in
436  * the sleep state while trying to access some memory for instance.
437  *
438  * Returns true if PCI device is suspended, false otherwise.
439  */
xe_pm_runtime_suspended(struct xe_device * xe)440 bool xe_pm_runtime_suspended(struct xe_device *xe)
441 {
442 	return pm_runtime_suspended(xe->drm.dev);
443 }
444 
445 /**
446  * xe_pm_runtime_suspend - Prepare our device for D3hot/D3Cold
447  * @xe: xe device instance
448  *
449  * Returns 0 for success, negative error code otherwise.
450  */
xe_pm_runtime_suspend(struct xe_device * xe)451 int xe_pm_runtime_suspend(struct xe_device *xe)
452 {
453 	struct xe_bo *bo, *on;
454 	struct xe_gt *gt;
455 	u8 id;
456 	int err = 0;
457 
458 	trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0));
459 	/* Disable access_ongoing asserts and prevent recursive pm calls */
460 	xe_pm_write_callback_task(xe, current);
461 
462 	/*
463 	 * The actual xe_pm_runtime_put() is always async underneath, so
464 	 * exactly where that is called should makes no difference to us. However
465 	 * we still need to be very careful with the locks that this callback
466 	 * acquires and the locks that are acquired and held by any callers of
467 	 * xe_runtime_pm_get(). We already have the matching annotation
468 	 * on that side, but we also need it here. For example lockdep should be
469 	 * able to tell us if the following scenario is in theory possible:
470 	 *
471 	 * CPU0                          | CPU1 (kworker)
472 	 * lock(A)                       |
473 	 *                               | xe_pm_runtime_suspend()
474 	 *                               |      lock(A)
475 	 * xe_pm_runtime_get()           |
476 	 *
477 	 * This will clearly deadlock since rpm core needs to wait for
478 	 * xe_pm_runtime_suspend() to complete, but here we are holding lock(A)
479 	 * on CPU0 which prevents CPU1 making forward progress.  With the
480 	 * annotation here and in xe_pm_runtime_get() lockdep will see
481 	 * the potential lock inversion and give us a nice splat.
482 	 */
483 	xe_rpm_lockmap_acquire(xe);
484 
485 	err = xe_pxp_pm_suspend(xe->pxp);
486 	if (err)
487 		goto out;
488 
489 	/*
490 	 * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
491 	 * also checks and deletes bo entry from user fault list.
492 	 */
493 	mutex_lock(&xe->mem_access.vram_userfault.lock);
494 	list_for_each_entry_safe(bo, on,
495 				 &xe->mem_access.vram_userfault.list, vram_userfault_link)
496 		xe_bo_runtime_pm_release_mmap_offset(bo);
497 	mutex_unlock(&xe->mem_access.vram_userfault.lock);
498 
499 	xe_display_pm_runtime_suspend(xe);
500 
501 	if (xe->d3cold.allowed) {
502 		err = xe_bo_evict_all(xe);
503 		if (err)
504 			goto out_resume;
505 	}
506 
507 	for_each_gt(gt, xe, id) {
508 		err = xe_gt_suspend(gt);
509 		if (err)
510 			goto out_resume;
511 	}
512 
513 	xe_irq_suspend(xe);
514 
515 	xe_display_pm_runtime_suspend_late(xe);
516 
517 	xe_i2c_pm_suspend(xe);
518 
519 	xe_rpm_lockmap_release(xe);
520 	xe_pm_write_callback_task(xe, NULL);
521 	return 0;
522 
523 out_resume:
524 	xe_display_pm_runtime_resume(xe);
525 	xe_pxp_pm_resume(xe->pxp);
526 out:
527 	xe_rpm_lockmap_release(xe);
528 	xe_pm_write_callback_task(xe, NULL);
529 	return err;
530 }
531 
532 /**
533  * xe_pm_runtime_resume - Waking up from D3hot/D3Cold
534  * @xe: xe device instance
535  *
536  * Returns 0 for success, negative error code otherwise.
537  */
xe_pm_runtime_resume(struct xe_device * xe)538 int xe_pm_runtime_resume(struct xe_device *xe)
539 {
540 	struct xe_gt *gt;
541 	u8 id;
542 	int err = 0;
543 
544 	trace_xe_pm_runtime_resume(xe, __builtin_return_address(0));
545 	/* Disable access_ongoing asserts and prevent recursive pm calls */
546 	xe_pm_write_callback_task(xe, current);
547 
548 	xe_rpm_lockmap_acquire(xe);
549 
550 	if (xe->d3cold.allowed) {
551 		err = xe_pcode_ready(xe, true);
552 		if (err)
553 			goto out;
554 
555 		xe_display_pm_resume_early(xe);
556 
557 		/*
558 		 * This only restores pinned memory which is the memory
559 		 * required for the GT(s) to resume.
560 		 */
561 		err = xe_bo_restore_early(xe);
562 		if (err)
563 			goto out;
564 	}
565 
566 	xe_i2c_pm_resume(xe, xe->d3cold.allowed);
567 
568 	xe_irq_resume(xe);
569 
570 	for_each_gt(gt, xe, id)
571 		xe_gt_resume(gt);
572 
573 	xe_display_pm_runtime_resume(xe);
574 
575 	if (xe->d3cold.allowed) {
576 		err = xe_bo_restore_late(xe);
577 		if (err)
578 			goto out;
579 	}
580 
581 	xe_pxp_pm_resume(xe->pxp);
582 
583 out:
584 	xe_rpm_lockmap_release(xe);
585 	xe_pm_write_callback_task(xe, NULL);
586 	return err;
587 }
588 
589 /*
590  * For places where resume is synchronous it can be quite easy to deadlock
591  * if we are not careful. Also in practice it might be quite timing
592  * sensitive to ever see the 0 -> 1 transition with the callers locks
593  * held, so deadlocks might exist but are hard for lockdep to ever see.
594  * With this in mind, help lockdep learn about the potentially scary
595  * stuff that can happen inside the runtime_resume callback by acquiring
596  * a dummy lock (it doesn't protect anything and gets compiled out on
597  * non-debug builds).  Lockdep then only needs to see the
598  * xe_pm_runtime_xxx_map -> runtime_resume callback once, and then can
599  * hopefully validate all the (callers_locks) -> xe_pm_runtime_xxx_map.
600  * For example if the (callers_locks) are ever grabbed in the
601  * runtime_resume callback, lockdep should give us a nice splat.
602  */
xe_rpm_might_enter_cb(const struct xe_device * xe)603 static void xe_rpm_might_enter_cb(const struct xe_device *xe)
604 {
605 	xe_rpm_lockmap_acquire(xe);
606 	xe_rpm_lockmap_release(xe);
607 }
608 
609 /*
610  * Prime the lockdep maps for known locking orders that need to
611  * be supported but that may not always occur on all systems.
612  */
xe_pm_runtime_lockdep_prime(void)613 static void xe_pm_runtime_lockdep_prime(void)
614 {
615 	struct dma_resv lockdep_resv;
616 
617 	dma_resv_init(&lockdep_resv);
618 	lock_map_acquire(&xe_pm_runtime_d3cold_map);
619 	/* D3Cold takes the dma_resv locks to evict bos */
620 	dma_resv_lock(&lockdep_resv, NULL);
621 	dma_resv_unlock(&lockdep_resv);
622 	lock_map_release(&xe_pm_runtime_d3cold_map);
623 
624 	/* Shrinkers might like to wake up the device under reclaim. */
625 	fs_reclaim_acquire(GFP_KERNEL);
626 	lock_map_acquire(&xe_pm_runtime_nod3cold_map);
627 	lock_map_release(&xe_pm_runtime_nod3cold_map);
628 	fs_reclaim_release(GFP_KERNEL);
629 }
630 
631 /**
632  * xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
633  * @xe: xe device instance
634  */
xe_pm_runtime_get(struct xe_device * xe)635 void xe_pm_runtime_get(struct xe_device *xe)
636 {
637 	trace_xe_pm_runtime_get(xe, __builtin_return_address(0));
638 	pm_runtime_get_noresume(xe->drm.dev);
639 
640 	if (xe_pm_read_callback_task(xe) == current)
641 		return;
642 
643 	xe_rpm_might_enter_cb(xe);
644 	pm_runtime_resume(xe->drm.dev);
645 }
646 
647 /**
648  * xe_pm_runtime_put - Put the runtime_pm reference back and mark as idle
649  * @xe: xe device instance
650  */
xe_pm_runtime_put(struct xe_device * xe)651 void xe_pm_runtime_put(struct xe_device *xe)
652 {
653 	trace_xe_pm_runtime_put(xe, __builtin_return_address(0));
654 	if (xe_pm_read_callback_task(xe) == current) {
655 		pm_runtime_put_noidle(xe->drm.dev);
656 	} else {
657 		pm_runtime_mark_last_busy(xe->drm.dev);
658 		pm_runtime_put(xe->drm.dev);
659 	}
660 }
661 
662 /**
663  * xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
664  * @xe: xe device instance
665  *
666  * Returns: Any number greater than or equal to 0 for success, negative error
667  * code otherwise.
668  */
xe_pm_runtime_get_ioctl(struct xe_device * xe)669 int xe_pm_runtime_get_ioctl(struct xe_device *xe)
670 {
671 	trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0));
672 	if (WARN_ON(xe_pm_read_callback_task(xe) == current))
673 		return -ELOOP;
674 
675 	xe_rpm_might_enter_cb(xe);
676 	return pm_runtime_get_sync(xe->drm.dev);
677 }
678 
679 /**
680  * xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active
681  * @xe: xe device instance
682  *
683  * Return: True if device is awake (regardless the previous number of references)
684  * and a new reference was taken, false otherwise.
685  */
xe_pm_runtime_get_if_active(struct xe_device * xe)686 bool xe_pm_runtime_get_if_active(struct xe_device *xe)
687 {
688 	return pm_runtime_get_if_active(xe->drm.dev) > 0;
689 }
690 
691 /**
692  * xe_pm_runtime_get_if_in_use - Get a new reference if device is active with previous ref taken
693  * @xe: xe device instance
694  *
695  * Return: True if device is awake, a previous reference had been already taken,
696  * and a new reference was now taken, false otherwise.
697  */
xe_pm_runtime_get_if_in_use(struct xe_device * xe)698 bool xe_pm_runtime_get_if_in_use(struct xe_device *xe)
699 {
700 	if (xe_pm_read_callback_task(xe) == current) {
701 		/* The device is awake, grab the ref and move on */
702 		pm_runtime_get_noresume(xe->drm.dev);
703 		return true;
704 	}
705 
706 	return pm_runtime_get_if_in_use(xe->drm.dev) > 0;
707 }
708 
709 /*
710  * Very unreliable! Should only be used to suppress the false positive case
711  * in the missing outer rpm protection warning.
712  */
xe_pm_suspending_or_resuming(struct xe_device * xe)713 static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
714 {
715 #ifdef CONFIG_PM
716 	struct device *dev = xe->drm.dev;
717 
718 	return dev->power.runtime_status == RPM_SUSPENDING ||
719 		dev->power.runtime_status == RPM_RESUMING ||
720 		pm_suspend_in_progress();
721 #else
722 	return false;
723 #endif
724 }
725 
726 /**
727  * xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming
728  * @xe: xe device instance
729  *
730  * This function should be used in inner places where it is surely already
731  * protected by outer-bound callers of `xe_pm_runtime_get`.
732  * It will warn if not protected.
733  * The reference should be put back after this function regardless, since it
734  * will always bump the usage counter, regardless.
735  */
xe_pm_runtime_get_noresume(struct xe_device * xe)736 void xe_pm_runtime_get_noresume(struct xe_device *xe)
737 {
738 	bool ref;
739 
740 	ref = xe_pm_runtime_get_if_in_use(xe);
741 
742 	if (!ref) {
743 		pm_runtime_get_noresume(xe->drm.dev);
744 		drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe),
745 			 "Missing outer runtime PM protection\n");
746 	}
747 }
748 
749 /**
750  * xe_pm_runtime_resume_and_get - Resume, then get a runtime_pm ref if awake.
751  * @xe: xe device instance
752  *
753  * Returns: True if device is awake and the reference was taken, false otherwise.
754  */
xe_pm_runtime_resume_and_get(struct xe_device * xe)755 bool xe_pm_runtime_resume_and_get(struct xe_device *xe)
756 {
757 	if (xe_pm_read_callback_task(xe) == current) {
758 		/* The device is awake, grab the ref and move on */
759 		pm_runtime_get_noresume(xe->drm.dev);
760 		return true;
761 	}
762 
763 	xe_rpm_might_enter_cb(xe);
764 	return pm_runtime_resume_and_get(xe->drm.dev) >= 0;
765 }
766 
767 /**
768  * xe_pm_assert_unbounded_bridge - Disable PM on unbounded pcie parent bridge
769  * @xe: xe device instance
770  */
xe_pm_assert_unbounded_bridge(struct xe_device * xe)771 void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
772 {
773 	struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
774 	struct pci_dev *bridge = pci_upstream_bridge(pdev);
775 
776 	if (!bridge)
777 		return;
778 
779 	if (!bridge->driver) {
780 		drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n");
781 		device_set_pm_not_required(&pdev->dev);
782 	}
783 }
784 
785 /**
786  * xe_pm_set_vram_threshold - Set a VRAM threshold for allowing/blocking D3Cold
787  * @xe: xe device instance
788  * @threshold: VRAM size in MiB for the D3cold threshold
789  *
790  * Return:
791  * * 0		- success
792  * * -EINVAL	- invalid argument
793  */
xe_pm_set_vram_threshold(struct xe_device * xe,u32 threshold)794 int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
795 {
796 	struct ttm_resource_manager *man;
797 	u32 vram_total_mb = 0;
798 	int i;
799 
800 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
801 		man = ttm_manager_type(&xe->ttm, i);
802 		if (man)
803 			vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024);
804 	}
805 
806 	drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb);
807 
808 	if (threshold > vram_total_mb)
809 		return -EINVAL;
810 
811 	mutex_lock(&xe->d3cold.lock);
812 	xe->d3cold.vram_threshold = threshold;
813 	mutex_unlock(&xe->d3cold.lock);
814 
815 	return 0;
816 }
817 
818 /**
819  * xe_pm_d3cold_allowed_toggle - Check conditions to toggle d3cold.allowed
820  * @xe: xe device instance
821  *
822  * To be called during runtime_pm idle callback.
823  * Check for all the D3Cold conditions ahead of runtime suspend.
824  */
xe_pm_d3cold_allowed_toggle(struct xe_device * xe)825 void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
826 {
827 	struct ttm_resource_manager *man;
828 	u32 total_vram_used_mb = 0;
829 	u64 vram_used;
830 	int i;
831 
832 	if (!xe->d3cold.capable) {
833 		xe->d3cold.allowed = false;
834 		return;
835 	}
836 
837 	for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
838 		man = ttm_manager_type(&xe->ttm, i);
839 		if (man) {
840 			vram_used = ttm_resource_manager_usage(man);
841 			total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024);
842 		}
843 	}
844 
845 	mutex_lock(&xe->d3cold.lock);
846 
847 	if (total_vram_used_mb < xe->d3cold.vram_threshold)
848 		xe->d3cold.allowed = true;
849 	else
850 		xe->d3cold.allowed = false;
851 
852 	mutex_unlock(&xe->d3cold.lock);
853 }
854 
855 /**
856  * xe_pm_module_init() - Perform xe_pm specific module initialization.
857  *
858  * Return: 0 on success. Currently doesn't fail.
859  */
xe_pm_module_init(void)860 int __init xe_pm_module_init(void)
861 {
862 	xe_pm_runtime_lockdep_prime();
863 	return 0;
864 }
865