Lines Matching refs:xe

86  * @xe: The xe device.
91 bool xe_rpm_reclaim_safe(const struct xe_device *xe)
93 return !xe->d3cold.capable && !xe->info.has_sriov;
96 static void xe_rpm_lockmap_acquire(const struct xe_device *xe)
98 lock_map_acquire(xe_rpm_reclaim_safe(xe) ?
103 static void xe_rpm_lockmap_release(const struct xe_device *xe)
105 lock_map_release(xe_rpm_reclaim_safe(xe) ?
112 * @xe: xe device instance
116 int xe_pm_suspend(struct xe_device *xe)
122 drm_dbg(&xe->drm, "Suspending device\n");
123 trace_xe_pm_suspend(xe, __builtin_return_address(0));
125 for_each_gt(gt, xe, id)
128 xe_display_pm_suspend(xe);
131 err = xe_bo_evict_all(xe);
135 for_each_gt(gt, xe, id) {
138 xe_display_pm_resume(xe);
143 xe_irq_suspend(xe);
145 xe_display_pm_suspend_late(xe);
147 drm_dbg(&xe->drm, "Device suspended\n");
150 drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
156 * @xe: xe device instance
160 int xe_pm_resume(struct xe_device *xe)
167 drm_dbg(&xe->drm, "Resuming device\n");
168 trace_xe_pm_resume(xe, __builtin_return_address(0));
170 for_each_tile(tile, xe, id)
173 err = xe_pcode_ready(xe, true);
177 xe_display_pm_resume_early(xe);
183 err = xe_bo_restore_kernel(xe);
187 xe_irq_resume(xe);
189 for_each_gt(gt, xe, id)
192 xe_display_pm_resume(xe);
194 err = xe_bo_restore_user(xe);
198 drm_dbg(&xe->drm, "Device resumed\n");
201 drm_dbg(&xe->drm, "Device resume failed %d\n", err);
205 static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
207 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
216 drm_dbg(&xe->drm, "d3cold: PME# not supported\n");
222 drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n");
229 static void xe_pm_runtime_init(struct xe_device *xe)
231 struct device *dev = xe->drm.dev;
241 if (IS_DGFX(xe))
252 int xe_pm_init_early(struct xe_device *xe)
256 INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list);
258 err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
262 err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
270 static u32 vram_threshold_value(struct xe_device *xe)
273 if (xe->info.platform == XE_BATTLEMAGE)
281 * @xe: xe device instance
287 int xe_pm_init(struct xe_device *xe)
293 if (!xe_device_uc_enabled(xe))
296 xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
298 if (xe->d3cold.capable) {
299 err = xe_device_sysfs_init(xe);
303 vram_threshold = vram_threshold_value(xe);
304 err = xe_pm_set_vram_threshold(xe, vram_threshold);
309 xe_pm_runtime_init(xe);
316 * @xe: xe device instance
318 void xe_pm_runtime_fini(struct xe_device *xe)
320 struct device *dev = xe->drm.dev;
326 static void xe_pm_write_callback_task(struct xe_device *xe,
329 WRITE_ONCE(xe->pm_callback_task, task);
340 struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
344 return READ_ONCE(xe->pm_callback_task);
349 * @xe: xe device instance
358 bool xe_pm_runtime_suspended(struct xe_device *xe)
360 return pm_runtime_suspended(xe->drm.dev);
365 * @xe: xe device instance
369 int xe_pm_runtime_suspend(struct xe_device *xe)
376 trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0));
378 xe_pm_write_callback_task(xe, current);
401 xe_rpm_lockmap_acquire(xe);
407 mutex_lock(&xe->mem_access.vram_userfault.lock);
409 &xe->mem_access.vram_userfault.list, vram_userfault_link)
411 mutex_unlock(&xe->mem_access.vram_userfault.lock);
413 xe_display_pm_runtime_suspend(xe);
415 if (xe->d3cold.allowed) {
416 err = xe_bo_evict_all(xe);
421 for_each_gt(gt, xe, id) {
427 xe_irq_suspend(xe);
429 xe_display_pm_runtime_suspend_late(xe);
433 xe_display_pm_runtime_resume(xe);
434 xe_rpm_lockmap_release(xe);
435 xe_pm_write_callback_task(xe, NULL);
441 * @xe: xe device instance
445 int xe_pm_runtime_resume(struct xe_device *xe)
451 trace_xe_pm_runtime_resume(xe, __builtin_return_address(0));
453 xe_pm_write_callback_task(xe, current);
455 xe_rpm_lockmap_acquire(xe);
457 if (xe->d3cold.allowed) {
458 err = xe_pcode_ready(xe, true);
462 xe_display_pm_resume_early(xe);
468 err = xe_bo_restore_kernel(xe);
473 xe_irq_resume(xe);
475 for_each_gt(gt, xe, id)
478 xe_display_pm_runtime_resume(xe);
480 if (xe->d3cold.allowed) {
481 err = xe_bo_restore_user(xe);
487 xe_rpm_lockmap_release(xe);
488 xe_pm_write_callback_task(xe, NULL);
506 static void xe_rpm_might_enter_cb(const struct xe_device *xe)
508 xe_rpm_lockmap_acquire(xe);
509 xe_rpm_lockmap_release(xe);
536 * @xe: xe device instance
538 void xe_pm_runtime_get(struct xe_device *xe)
540 trace_xe_pm_runtime_get(xe, __builtin_return_address(0));
541 pm_runtime_get_noresume(xe->drm.dev);
543 if (xe_pm_read_callback_task(xe) == current)
546 xe_rpm_might_enter_cb(xe);
547 pm_runtime_resume(xe->drm.dev);
552 * @xe: xe device instance
554 void xe_pm_runtime_put(struct xe_device *xe)
556 trace_xe_pm_runtime_put(xe, __builtin_return_address(0));
557 if (xe_pm_read_callback_task(xe) == current) {
558 pm_runtime_put_noidle(xe->drm.dev);
560 pm_runtime_mark_last_busy(xe->drm.dev);
561 pm_runtime_put(xe->drm.dev);
567 * @xe: xe device instance
572 int xe_pm_runtime_get_ioctl(struct xe_device *xe)
574 trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0));
575 if (WARN_ON(xe_pm_read_callback_task(xe) == current))
578 xe_rpm_might_enter_cb(xe);
579 return pm_runtime_get_sync(xe->drm.dev);
584 * @xe: xe device instance
589 bool xe_pm_runtime_get_if_active(struct xe_device *xe)
591 return pm_runtime_get_if_active(xe->drm.dev) > 0;
596 * @xe: xe device instance
601 bool xe_pm_runtime_get_if_in_use(struct xe_device *xe)
603 if (xe_pm_read_callback_task(xe) == current) {
605 pm_runtime_get_noresume(xe->drm.dev);
609 return pm_runtime_get_if_in_use(xe->drm.dev) > 0;
616 static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
619 struct device *dev = xe->drm.dev;
631 * @xe: xe device instance
639 void xe_pm_runtime_get_noresume(struct xe_device *xe)
643 ref = xe_pm_runtime_get_if_in_use(xe);
646 pm_runtime_get_noresume(xe->drm.dev);
647 drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe),
654 * @xe: xe device instance
658 bool xe_pm_runtime_resume_and_get(struct xe_device *xe)
660 if (xe_pm_read_callback_task(xe) == current) {
662 pm_runtime_get_noresume(xe->drm.dev);
666 xe_rpm_might_enter_cb(xe);
667 return pm_runtime_resume_and_get(xe->drm.dev) >= 0;
672 * @xe: xe device instance
674 void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
676 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
683 drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n");
690 * @xe: xe device instance
695 int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
702 man = ttm_manager_type(&xe->ttm, i);
707 drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb);
712 mutex_lock(&xe->d3cold.lock);
713 xe->d3cold.vram_threshold = threshold;
714 mutex_unlock(&xe->d3cold.lock);
721 * @xe: xe device instance
726 void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
733 if (!xe->d3cold.capable) {
734 xe->d3cold.allowed = false;
739 man = ttm_manager_type(&xe->ttm, i);
746 mutex_lock(&xe->d3cold.lock);
748 if (total_vram_used_mb < xe->d3cold.vram_threshold)
749 xe->d3cold.allowed = true;
751 xe->d3cold.allowed = false;
753 mutex_unlock(&xe->d3cold.lock);