Lines Matching +full:pre +full:- +full:timeout
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2024 Intel Corporation
34 MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default");
40 struct ivpu_fw_info *fw = vdev->fw; in ivpu_pm_prepare_cold_boot()
46 fw->entry_point = fw->cold_boot_entry_point; in ivpu_pm_prepare_cold_boot()
51 struct ivpu_fw_info *fw = vdev->fw; in ivpu_pm_prepare_warm_boot()
52 struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem); in ivpu_pm_prepare_warm_boot()
54 if (!bp->save_restore_ret_address) { in ivpu_pm_prepare_warm_boot()
59 ivpu_dbg(vdev, FW_BOOT, "Save/restore entry point %llx", bp->save_restore_ret_address); in ivpu_pm_prepare_warm_boot()
60 fw->entry_point = bp->save_restore_ret_address; in ivpu_pm_prepare_warm_boot()
81 pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0); in ivpu_resume()
82 pci_restore_state(to_pci_dev(vdev->drm.dev)); in ivpu_resume()
106 pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot); in ivpu_resume()
120 pm_runtime_disable(vdev->drm.dev); in ivpu_pm_reset_begin()
122 atomic_inc(&vdev->pm->reset_counter); in ivpu_pm_reset_begin()
123 atomic_set(&vdev->pm->reset_pending, 1); in ivpu_pm_reset_begin()
124 down_write(&vdev->pm->reset_lock); in ivpu_pm_reset_begin()
138 pm_runtime_set_suspended(vdev->drm.dev); in ivpu_pm_reset_complete()
140 pm_runtime_set_active(vdev->drm.dev); in ivpu_pm_reset_complete()
143 up_write(&vdev->pm->reset_lock); in ivpu_pm_reset_complete()
144 atomic_set(&vdev->pm->reset_pending, 0); in ivpu_pm_reset_complete()
146 pm_runtime_mark_last_busy(vdev->drm.dev); in ivpu_pm_reset_complete()
147 pm_runtime_enable(vdev->drm.dev); in ivpu_pm_reset_complete()
153 struct ivpu_device *vdev = pm->vdev; in ivpu_pm_recovery_work()
156 ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter)); in ivpu_pm_recovery_work()
160 if (!pm_runtime_status_suspended(vdev->drm.dev)) { in ivpu_pm_recovery_work()
168 kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt); in ivpu_pm_recovery_work()
186 if (atomic_cmpxchg(&vdev->pm->reset_pending, 0, 1) == 0) { in ivpu_pm_trigger_recovery()
189 queue_work(system_long_wq, &vdev->pm->recovery_work); in ivpu_pm_trigger_recovery()
196 struct ivpu_device *vdev = pm->vdev; in ivpu_job_timeout_work()
203 unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr; in ivpu_start_job_timeout_detection()
205 /* No-op if already queued */ in ivpu_start_job_timeout_detection()
206 queue_delayed_work(system_wq, &vdev->pm->job_timeout_work, msecs_to_jiffies(timeout_ms)); in ivpu_start_job_timeout_detection()
211 cancel_delayed_work_sync(&vdev->pm->job_timeout_work); in ivpu_stop_job_timeout_detection()
218 unsigned long timeout; in ivpu_pm_suspend_cb() local
223 timeout = jiffies + msecs_to_jiffies(vdev->timeout.tdr); in ivpu_pm_suspend_cb()
226 if (time_after_eq(jiffies, timeout)) { in ivpu_pm_suspend_cb()
228 return -EBUSY; in ivpu_pm_suspend_cb()
269 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa)); in ivpu_pm_runtime_suspend_cb()
270 drm_WARN_ON(&vdev->drm, work_pending(&vdev->pm->recovery_work)); in ivpu_pm_runtime_suspend_cb()
277 is_idle = ivpu_hw_is_idle(vdev) || vdev->pm->dct_active_percent; in ivpu_pm_runtime_suspend_cb()
291 atomic_inc(&vdev->pm->reset_counter); in ivpu_pm_runtime_suspend_cb()
327 ret = pm_runtime_resume_and_get(vdev->drm.dev); in ivpu_rpm_get()
330 pm_runtime_set_suspended(vdev->drm.dev); in ivpu_rpm_get()
338 pm_runtime_mark_last_busy(vdev->drm.dev); in ivpu_rpm_put()
339 pm_runtime_put_autosuspend(vdev->drm.dev); in ivpu_rpm_put()
346 ivpu_dbg(vdev, PM, "Pre-reset..\n"); in ivpu_pm_reset_prepare_cb()
350 if (!pm_runtime_status_suspended(vdev->drm.dev)) { in ivpu_pm_reset_prepare_cb()
355 ivpu_dbg(vdev, PM, "Pre-reset done.\n"); in ivpu_pm_reset_prepare_cb()
362 ivpu_dbg(vdev, PM, "Post-reset..\n"); in ivpu_pm_reset_done_cb()
366 ivpu_dbg(vdev, PM, "Post-reset done.\n"); in ivpu_pm_reset_done_cb()
371 struct device *dev = vdev->drm.dev; in ivpu_pm_init()
372 struct ivpu_pm_info *pm = vdev->pm; in ivpu_pm_init()
375 pm->vdev = vdev; in ivpu_pm_init()
377 init_rwsem(&pm->reset_lock); in ivpu_pm_init()
378 atomic_set(&pm->reset_pending, 0); in ivpu_pm_init()
379 atomic_set(&pm->reset_counter, 0); in ivpu_pm_init()
381 INIT_WORK(&pm->recovery_work, ivpu_pm_recovery_work); in ivpu_pm_init()
382 INIT_DELAYED_WORK(&pm->job_timeout_work, ivpu_job_timeout_work); in ivpu_pm_init()
385 delay = -1; in ivpu_pm_init()
387 delay = vdev->timeout.autosuspend; in ivpu_pm_init()
398 drm_WARN_ON(&vdev->drm, delayed_work_pending(&vdev->pm->job_timeout_work)); in ivpu_pm_cancel_recovery()
399 cancel_work_sync(&vdev->pm->recovery_work); in ivpu_pm_cancel_recovery()
404 struct device *dev = vdev->drm.dev; in ivpu_pm_enable()
413 pm_runtime_get_noresume(vdev->drm.dev); in ivpu_pm_disable()
414 pm_runtime_forbid(vdev->drm.dev); in ivpu_pm_disable()
419 if (vdev->pm->dct_active_percent) in ivpu_pm_dct_init()
420 return ivpu_pm_dct_enable(vdev, vdev->pm->dct_active_percent); in ivpu_pm_dct_init()
431 return -EINVAL; in ivpu_pm_dct_enable()
434 inactive_us = DCT_PERIOD_US - active_us; in ivpu_pm_dct_enable()
442 vdev->pm->dct_active_percent = active_percent; in ivpu_pm_dct_enable()
459 vdev->pm->dct_active_percent = 0; in ivpu_pm_dct_disable()
473 if (vdev->pm->dct_active_percent) in ivpu_pm_dct_irq_thread_handler()
479 ivpu_hw_btrs_dct_set_status(vdev, enable, vdev->pm->dct_active_percent); in ivpu_pm_dct_irq_thread_handler()