Lines Matching +full:0 +full:xe
28 * DOC: Xe Power Management
30 * Xe PM implements the main routines for both system level suspend states and
50 * to perform the transition from D3hot to D3cold. Xe may disallow this
58 * (PC-states), and/or other low level power states. Xe PM component provides
62 * Also, Xe PM provides get and put functions that Xe driver will use to
85 * @xe: The xe device.
90 bool xe_rpm_reclaim_safe(const struct xe_device *xe) in xe_rpm_reclaim_safe() argument
92 return !xe->d3cold.capable && !xe->info.has_sriov; in xe_rpm_reclaim_safe()
95 static void xe_rpm_lockmap_acquire(const struct xe_device *xe) in xe_rpm_lockmap_acquire() argument
97 lock_map_acquire(xe_rpm_reclaim_safe(xe) ? in xe_rpm_lockmap_acquire()
102 static void xe_rpm_lockmap_release(const struct xe_device *xe) in xe_rpm_lockmap_release() argument
104 lock_map_release(xe_rpm_reclaim_safe(xe) ? in xe_rpm_lockmap_release()
111 * @xe: xe device instance
113 * Return: 0 on success
115 int xe_pm_suspend(struct xe_device *xe) in xe_pm_suspend() argument
121 drm_dbg(&xe->drm, "Suspending device\n"); in xe_pm_suspend()
122 trace_xe_pm_suspend(xe, __builtin_return_address(0)); in xe_pm_suspend()
124 for_each_gt(gt, xe, id) in xe_pm_suspend()
127 xe_display_pm_suspend(xe); in xe_pm_suspend()
130 err = xe_bo_evict_all(xe); in xe_pm_suspend()
134 for_each_gt(gt, xe, id) { in xe_pm_suspend()
137 xe_display_pm_resume(xe); in xe_pm_suspend()
142 xe_irq_suspend(xe); in xe_pm_suspend()
144 xe_display_pm_suspend_late(xe); in xe_pm_suspend()
146 drm_dbg(&xe->drm, "Device suspended\n"); in xe_pm_suspend()
147 return 0; in xe_pm_suspend()
149 drm_dbg(&xe->drm, "Device suspend failed %d\n", err); in xe_pm_suspend()
155 * @xe: xe device instance
157 * Return: 0 on success
159 int xe_pm_resume(struct xe_device *xe) in xe_pm_resume() argument
166 drm_dbg(&xe->drm, "Resuming device\n"); in xe_pm_resume()
167 trace_xe_pm_resume(xe, __builtin_return_address(0)); in xe_pm_resume()
169 for_each_tile(tile, xe, id) in xe_pm_resume()
172 err = xe_pcode_ready(xe, true); in xe_pm_resume()
176 xe_display_pm_resume_early(xe); in xe_pm_resume()
182 err = xe_bo_restore_kernel(xe); in xe_pm_resume()
186 xe_irq_resume(xe); in xe_pm_resume()
188 for_each_gt(gt, xe, id) in xe_pm_resume()
191 xe_display_pm_resume(xe); in xe_pm_resume()
193 err = xe_bo_restore_user(xe); in xe_pm_resume()
197 drm_dbg(&xe->drm, "Device resumed\n"); in xe_pm_resume()
198 return 0; in xe_pm_resume()
200 drm_dbg(&xe->drm, "Device resume failed %d\n", err); in xe_pm_resume()
204 static bool xe_pm_pci_d3cold_capable(struct xe_device *xe) in xe_pm_pci_d3cold_capable() argument
206 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_pm_pci_d3cold_capable()
215 drm_dbg(&xe->drm, "d3cold: PME# not supported\n"); in xe_pm_pci_d3cold_capable()
221 drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n"); in xe_pm_pci_d3cold_capable()
228 static void xe_pm_runtime_init(struct xe_device *xe) in xe_pm_runtime_init() argument
230 struct device *dev = xe->drm.dev; in xe_pm_runtime_init()
240 if (IS_DGFX(xe)) in xe_pm_runtime_init()
251 int xe_pm_init_early(struct xe_device *xe) in xe_pm_init_early() argument
255 INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list); in xe_pm_init_early()
257 err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock); in xe_pm_init_early()
261 err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock); in xe_pm_init_early()
265 return 0; in xe_pm_init_early()
270 * xe_pm_init - Initialize Xe Power Management
271 * @xe: xe device instance
275 * Returns 0 for success, negative error code otherwise.
277 int xe_pm_init(struct xe_device *xe) in xe_pm_init() argument
282 if (!xe_device_uc_enabled(xe)) in xe_pm_init()
283 return 0; in xe_pm_init()
285 xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe); in xe_pm_init()
287 if (xe->d3cold.capable) { in xe_pm_init()
288 err = xe_device_sysfs_init(xe); in xe_pm_init()
292 err = xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD); in xe_pm_init()
297 xe_pm_runtime_init(xe); in xe_pm_init()
299 return 0; in xe_pm_init()
304 * @xe: xe device instance
306 void xe_pm_runtime_fini(struct xe_device *xe) in xe_pm_runtime_fini() argument
308 struct device *dev = xe->drm.dev; in xe_pm_runtime_fini()
314 static void xe_pm_write_callback_task(struct xe_device *xe, in xe_pm_write_callback_task() argument
317 WRITE_ONCE(xe->pm_callback_task, task); in xe_pm_write_callback_task()
328 struct task_struct *xe_pm_read_callback_task(struct xe_device *xe) in xe_pm_read_callback_task() argument
332 return READ_ONCE(xe->pm_callback_task); in xe_pm_read_callback_task()
337 * @xe: xe device instance
346 bool xe_pm_runtime_suspended(struct xe_device *xe) in xe_pm_runtime_suspended() argument
348 return pm_runtime_suspended(xe->drm.dev); in xe_pm_runtime_suspended()
353 * @xe: xe device instance
355 * Returns 0 for success, negative error code otherwise.
357 int xe_pm_runtime_suspend(struct xe_device *xe) in xe_pm_runtime_suspend() argument
362 int err = 0; in xe_pm_runtime_suspend()
364 trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0)); in xe_pm_runtime_suspend()
366 xe_pm_write_callback_task(xe, current); in xe_pm_runtime_suspend()
389 xe_rpm_lockmap_acquire(xe); in xe_pm_runtime_suspend()
395 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_pm_runtime_suspend()
397 &xe->mem_access.vram_userfault.list, vram_userfault_link) in xe_pm_runtime_suspend()
399 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_pm_runtime_suspend()
401 xe_display_pm_runtime_suspend(xe); in xe_pm_runtime_suspend()
403 if (xe->d3cold.allowed) { in xe_pm_runtime_suspend()
404 err = xe_bo_evict_all(xe); in xe_pm_runtime_suspend()
409 for_each_gt(gt, xe, id) { in xe_pm_runtime_suspend()
415 xe_irq_suspend(xe); in xe_pm_runtime_suspend()
417 if (xe->d3cold.allowed) in xe_pm_runtime_suspend()
418 xe_display_pm_suspend_late(xe); in xe_pm_runtime_suspend()
421 xe_display_pm_runtime_resume(xe); in xe_pm_runtime_suspend()
422 xe_rpm_lockmap_release(xe); in xe_pm_runtime_suspend()
423 xe_pm_write_callback_task(xe, NULL); in xe_pm_runtime_suspend()
429 * @xe: xe device instance
431 * Returns 0 for success, negative error code otherwise.
433 int xe_pm_runtime_resume(struct xe_device *xe) in xe_pm_runtime_resume() argument
437 int err = 0; in xe_pm_runtime_resume()
439 trace_xe_pm_runtime_resume(xe, __builtin_return_address(0)); in xe_pm_runtime_resume()
441 xe_pm_write_callback_task(xe, current); in xe_pm_runtime_resume()
443 xe_rpm_lockmap_acquire(xe); in xe_pm_runtime_resume()
445 if (xe->d3cold.allowed) { in xe_pm_runtime_resume()
446 err = xe_pcode_ready(xe, true); in xe_pm_runtime_resume()
450 xe_display_pm_resume_early(xe); in xe_pm_runtime_resume()
456 err = xe_bo_restore_kernel(xe); in xe_pm_runtime_resume()
461 xe_irq_resume(xe); in xe_pm_runtime_resume()
463 for_each_gt(gt, xe, id) in xe_pm_runtime_resume()
466 xe_display_pm_runtime_resume(xe); in xe_pm_runtime_resume()
468 if (xe->d3cold.allowed) { in xe_pm_runtime_resume()
469 err = xe_bo_restore_user(xe); in xe_pm_runtime_resume()
475 xe_rpm_lockmap_release(xe); in xe_pm_runtime_resume()
476 xe_pm_write_callback_task(xe, NULL); in xe_pm_runtime_resume()
483 * sensitive to ever see the 0 -> 1 transition with the callers locks
494 static void xe_rpm_might_enter_cb(const struct xe_device *xe) in xe_rpm_might_enter_cb() argument
496 xe_rpm_lockmap_acquire(xe); in xe_rpm_might_enter_cb()
497 xe_rpm_lockmap_release(xe); in xe_rpm_might_enter_cb()
524 * @xe: xe device instance
526 void xe_pm_runtime_get(struct xe_device *xe) in xe_pm_runtime_get() argument
528 trace_xe_pm_runtime_get(xe, __builtin_return_address(0)); in xe_pm_runtime_get()
529 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_get()
531 if (xe_pm_read_callback_task(xe) == current) in xe_pm_runtime_get()
534 xe_rpm_might_enter_cb(xe); in xe_pm_runtime_get()
535 pm_runtime_resume(xe->drm.dev); in xe_pm_runtime_get()
540 * @xe: xe device instance
542 void xe_pm_runtime_put(struct xe_device *xe) in xe_pm_runtime_put() argument
544 trace_xe_pm_runtime_put(xe, __builtin_return_address(0)); in xe_pm_runtime_put()
545 if (xe_pm_read_callback_task(xe) == current) { in xe_pm_runtime_put()
546 pm_runtime_put_noidle(xe->drm.dev); in xe_pm_runtime_put()
548 pm_runtime_mark_last_busy(xe->drm.dev); in xe_pm_runtime_put()
549 pm_runtime_put(xe->drm.dev); in xe_pm_runtime_put()
555 * @xe: xe device instance
557 * Returns: Any number greater than or equal to 0 for success, negative error
560 int xe_pm_runtime_get_ioctl(struct xe_device *xe) in xe_pm_runtime_get_ioctl() argument
562 trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0)); in xe_pm_runtime_get_ioctl()
563 if (WARN_ON(xe_pm_read_callback_task(xe) == current)) in xe_pm_runtime_get_ioctl()
566 xe_rpm_might_enter_cb(xe); in xe_pm_runtime_get_ioctl()
567 return pm_runtime_get_sync(xe->drm.dev); in xe_pm_runtime_get_ioctl()
572 * @xe: xe device instance
577 bool xe_pm_runtime_get_if_active(struct xe_device *xe) in xe_pm_runtime_get_if_active() argument
579 return pm_runtime_get_if_active(xe->drm.dev) > 0; in xe_pm_runtime_get_if_active()
584 * @xe: xe device instance
589 bool xe_pm_runtime_get_if_in_use(struct xe_device *xe) in xe_pm_runtime_get_if_in_use() argument
591 if (xe_pm_read_callback_task(xe) == current) { in xe_pm_runtime_get_if_in_use()
593 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_get_if_in_use()
597 return pm_runtime_get_if_in_use(xe->drm.dev) > 0; in xe_pm_runtime_get_if_in_use()
604 static bool xe_pm_suspending_or_resuming(struct xe_device *xe) in xe_pm_suspending_or_resuming() argument
607 struct device *dev = xe->drm.dev; in xe_pm_suspending_or_resuming()
618 * @xe: xe device instance
626 void xe_pm_runtime_get_noresume(struct xe_device *xe) in xe_pm_runtime_get_noresume() argument
630 ref = xe_pm_runtime_get_if_in_use(xe); in xe_pm_runtime_get_noresume()
633 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_get_noresume()
634 drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe), in xe_pm_runtime_get_noresume()
641 * @xe: xe device instance
645 bool xe_pm_runtime_resume_and_get(struct xe_device *xe) in xe_pm_runtime_resume_and_get() argument
647 if (xe_pm_read_callback_task(xe) == current) { in xe_pm_runtime_resume_and_get()
649 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_resume_and_get()
653 xe_rpm_might_enter_cb(xe); in xe_pm_runtime_resume_and_get()
654 return pm_runtime_resume_and_get(xe->drm.dev) >= 0; in xe_pm_runtime_resume_and_get()
659 * @xe: xe device instance
661 void xe_pm_assert_unbounded_bridge(struct xe_device *xe) in xe_pm_assert_unbounded_bridge() argument
663 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_pm_assert_unbounded_bridge()
670 drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n"); in xe_pm_assert_unbounded_bridge()
677 * @xe: xe device instance
680 * Returns 0 for success, negative error code otherwise.
682 int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold) in xe_pm_set_vram_threshold() argument
685 u32 vram_total_mb = 0; in xe_pm_set_vram_threshold()
689 man = ttm_manager_type(&xe->ttm, i); in xe_pm_set_vram_threshold()
694 drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb); in xe_pm_set_vram_threshold()
699 mutex_lock(&xe->d3cold.lock); in xe_pm_set_vram_threshold()
700 xe->d3cold.vram_threshold = threshold; in xe_pm_set_vram_threshold()
701 mutex_unlock(&xe->d3cold.lock); in xe_pm_set_vram_threshold()
703 return 0; in xe_pm_set_vram_threshold()
708 * @xe: xe device instance
713 void xe_pm_d3cold_allowed_toggle(struct xe_device *xe) in xe_pm_d3cold_allowed_toggle() argument
716 u32 total_vram_used_mb = 0; in xe_pm_d3cold_allowed_toggle()
720 if (!xe->d3cold.capable) { in xe_pm_d3cold_allowed_toggle()
721 xe->d3cold.allowed = false; in xe_pm_d3cold_allowed_toggle()
726 man = ttm_manager_type(&xe->ttm, i); in xe_pm_d3cold_allowed_toggle()
733 mutex_lock(&xe->d3cold.lock); in xe_pm_d3cold_allowed_toggle()
735 if (total_vram_used_mb < xe->d3cold.vram_threshold) in xe_pm_d3cold_allowed_toggle()
736 xe->d3cold.allowed = true; in xe_pm_d3cold_allowed_toggle()
738 xe->d3cold.allowed = false; in xe_pm_d3cold_allowed_toggle()
740 mutex_unlock(&xe->d3cold.lock); in xe_pm_d3cold_allowed_toggle()
742 drm_dbg(&xe->drm, in xe_pm_d3cold_allowed_toggle()
743 "d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed)); in xe_pm_d3cold_allowed_toggle()
749 * Return: 0 on success. Currently doesn't fail.
754 return 0; in xe_pm_module_init()