ivpu_pm.c (2a20b857dd654595d332f2521d80bd67b03536a0) ivpu_pm.c (27d19268cf394f2c78db732be0cb31852eeadb0a)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020-2023 Intel Corporation
4 */
5
6#include <linux/highmem.h>
7#include <linux/moduleparam.h>
8#include <linux/pci.h>

--- 98 unchanged lines hidden (view full) ---

107
108static void ivpu_pm_recovery_work(struct work_struct *work)
109{
110 struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
111 struct ivpu_device *vdev = pm->vdev;
112 char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
113 int ret;
114
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020-2023 Intel Corporation
4 */
5
6#include <linux/highmem.h>
7#include <linux/moduleparam.h>
8#include <linux/pci.h>

--- 98 unchanged lines hidden (view full) ---

107
108static void ivpu_pm_recovery_work(struct work_struct *work)
109{
110 struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
111 struct ivpu_device *vdev = pm->vdev;
112 char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
113 int ret;
114
115 ivpu_err(vdev, "Recovering the VPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
116
117 ret = pm_runtime_resume_and_get(vdev->drm.dev);
118 if (ret)
119 ivpu_err(vdev, "Failed to resume VPU: %d\n", ret);
120
121 ivpu_fw_log_dump(vdev);
122
115retry:
116 ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
117 if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) {
118 cond_resched();
119 goto retry;
120 }
121
122 if (ret && ret != -EAGAIN)
123 ivpu_err(vdev, "Failed to reset VPU: %d\n", ret);
124
125 kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
123retry:
124 ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
125 if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) {
126 cond_resched();
127 goto retry;
128 }
129
130 if (ret && ret != -EAGAIN)
131 ivpu_err(vdev, "Failed to reset VPU: %d\n", ret);
132
133 kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
134 pm_runtime_mark_last_busy(vdev->drm.dev);
135 pm_runtime_put_autosuspend(vdev->drm.dev);
126}
127
136}
137
128void ivpu_pm_schedule_recovery(struct ivpu_device *vdev)
138void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
129{
139{
130 struct ivpu_pm_info *pm = vdev->pm;
140 ivpu_err(vdev, "Recovery triggered by %s\n", reason);
131
132 if (ivpu_disable_recovery) {
133 ivpu_err(vdev, "Recovery not available when disable_recovery param is set\n");
134 return;
135 }
136
137 if (ivpu_is_fpga(vdev)) {
138 ivpu_err(vdev, "Recovery not available on FPGA\n");
139 return;
140 }
141
141
142 if (ivpu_disable_recovery) {
143 ivpu_err(vdev, "Recovery not available when disable_recovery param is set\n");
144 return;
145 }
146
147 if (ivpu_is_fpga(vdev)) {
148 ivpu_err(vdev, "Recovery not available on FPGA\n");
149 return;
150 }
151
142 /* Schedule recovery if it's not in progress */
143 if (atomic_cmpxchg(&pm->in_reset, 0, 1) == 0) {
144 ivpu_hw_irq_disable(vdev);
145 queue_work(system_long_wq, &pm->recovery_work);
152 /* Trigger recovery if it's not in progress */
153 if (atomic_cmpxchg(&vdev->pm->reset_pending, 0, 1) == 0) {
154 ivpu_hw_diagnose_failure(vdev);
155 ivpu_hw_irq_disable(vdev); /* Disable IRQ early to protect from IRQ storm */
156 queue_work(system_long_wq, &vdev->pm->recovery_work);
146 }
147}
148
149static void ivpu_job_timeout_work(struct work_struct *work)
150{
151 struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work);
152 struct ivpu_device *vdev = pm->vdev;
157 }
158}
159
160static void ivpu_job_timeout_work(struct work_struct *work)
161{
162 struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work);
163 struct ivpu_device *vdev = pm->vdev;
153 unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
154
164
155 ivpu_err(vdev, "TDR detected, timeout %lu ms", timeout_ms);
156 ivpu_hw_diagnose_failure(vdev);
157
158 ivpu_pm_schedule_recovery(vdev);
165 ivpu_pm_trigger_recovery(vdev, "TDR");
159}
160
161void ivpu_start_job_timeout_detection(struct ivpu_device *vdev)
162{
163 unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
164
165 /* No-op if already queued */
166 queue_delayed_work(system_wq, &vdev->pm->job_timeout_work, msecs_to_jiffies(timeout_ms));

--- 56 unchanged lines hidden (view full) ---

223
224int ivpu_pm_runtime_suspend_cb(struct device *dev)
225{
226 struct drm_device *drm = dev_get_drvdata(dev);
227 struct ivpu_device *vdev = to_ivpu_device(drm);
228 bool hw_is_idle = true;
229 int ret;
230
166}
167
168void ivpu_start_job_timeout_detection(struct ivpu_device *vdev)
169{
170 unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
171
172 /* No-op if already queued */
173 queue_delayed_work(system_wq, &vdev->pm->job_timeout_work, msecs_to_jiffies(timeout_ms));

--- 56 unchanged lines hidden (view full) ---

230
231int ivpu_pm_runtime_suspend_cb(struct device *dev)
232{
233 struct drm_device *drm = dev_get_drvdata(dev);
234 struct ivpu_device *vdev = to_ivpu_device(drm);
235 bool hw_is_idle = true;
236 int ret;
237
238 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
239 drm_WARN_ON(&vdev->drm, work_pending(&vdev->pm->recovery_work));
240
231 ivpu_dbg(vdev, PM, "Runtime suspend..\n");
232
233 if (!ivpu_hw_is_idle(vdev) && vdev->pm->suspend_reschedule_counter) {
234 ivpu_dbg(vdev, PM, "Failed to enter idle, rescheduling suspend, retries left %d\n",
235 vdev->pm->suspend_reschedule_counter);
236 pm_schedule_suspend(dev, vdev->timeout.reschedule_suspend);
237 vdev->pm->suspend_reschedule_counter--;
238 return -EAGAIN;

--- 66 unchanged lines hidden (view full) ---

305 pm_runtime_mark_last_busy(vdev->drm.dev);
306 pm_runtime_put_autosuspend(vdev->drm.dev);
307}
308
309void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
310{
311 struct ivpu_device *vdev = pci_get_drvdata(pdev);
312
241 ivpu_dbg(vdev, PM, "Runtime suspend..\n");
242
243 if (!ivpu_hw_is_idle(vdev) && vdev->pm->suspend_reschedule_counter) {
244 ivpu_dbg(vdev, PM, "Failed to enter idle, rescheduling suspend, retries left %d\n",
245 vdev->pm->suspend_reschedule_counter);
246 pm_schedule_suspend(dev, vdev->timeout.reschedule_suspend);
247 vdev->pm->suspend_reschedule_counter--;
248 return -EAGAIN;

--- 66 unchanged lines hidden (view full) ---

315 pm_runtime_mark_last_busy(vdev->drm.dev);
316 pm_runtime_put_autosuspend(vdev->drm.dev);
317}
318
319void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
320{
321 struct ivpu_device *vdev = pci_get_drvdata(pdev);
322
313 pm_runtime_get_sync(vdev->drm.dev);
314
315 ivpu_dbg(vdev, PM, "Pre-reset..\n");
316 atomic_inc(&vdev->pm->reset_counter);
323 ivpu_dbg(vdev, PM, "Pre-reset..\n");
324 atomic_inc(&vdev->pm->reset_counter);
317 atomic_set(&vdev->pm->in_reset, 1);
325 atomic_set(&vdev->pm->reset_pending, 1);
326
327 pm_runtime_get_sync(vdev->drm.dev);
328 down_write(&vdev->pm->reset_lock);
318 ivpu_prepare_for_reset(vdev);
319 ivpu_hw_reset(vdev);
320 ivpu_pm_prepare_cold_boot(vdev);
321 ivpu_jobs_abort_all(vdev);
322 ivpu_dbg(vdev, PM, "Pre-reset done.\n");
323}
324
325void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
326{
327 struct ivpu_device *vdev = pci_get_drvdata(pdev);
328 int ret;
329
330 ivpu_dbg(vdev, PM, "Post-reset..\n");
331 ret = ivpu_resume(vdev);
332 if (ret)
333 ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
329 ivpu_prepare_for_reset(vdev);
330 ivpu_hw_reset(vdev);
331 ivpu_pm_prepare_cold_boot(vdev);
332 ivpu_jobs_abort_all(vdev);
333 ivpu_dbg(vdev, PM, "Pre-reset done.\n");
334}
335
336void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
337{
338 struct ivpu_device *vdev = pci_get_drvdata(pdev);
339 int ret;
340
341 ivpu_dbg(vdev, PM, "Post-reset..\n");
342 ret = ivpu_resume(vdev);
343 if (ret)
344 ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
334 atomic_set(&vdev->pm->in_reset, 0);
345 up_write(&vdev->pm->reset_lock);
346 atomic_set(&vdev->pm->reset_pending, 0);
335 ivpu_dbg(vdev, PM, "Post-reset done.\n");
336
347 ivpu_dbg(vdev, PM, "Post-reset done.\n");
348
349 pm_runtime_mark_last_busy(vdev->drm.dev);
337 pm_runtime_put_autosuspend(vdev->drm.dev);
338}
339
340void ivpu_pm_init(struct ivpu_device *vdev)
341{
342 struct device *dev = vdev->drm.dev;
343 struct ivpu_pm_info *pm = vdev->pm;
344 int delay;
345
346 pm->vdev = vdev;
347 pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
348
350 pm_runtime_put_autosuspend(vdev->drm.dev);
351}
352
353void ivpu_pm_init(struct ivpu_device *vdev)
354{
355 struct device *dev = vdev->drm.dev;
356 struct ivpu_pm_info *pm = vdev->pm;
357 int delay;
358
359 pm->vdev = vdev;
360 pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
361
349 atomic_set(&pm->in_reset, 0);
362 init_rwsem(&pm->reset_lock);
363 atomic_set(&pm->reset_pending, 0);
364 atomic_set(&pm->reset_counter, 0);
365
350 INIT_WORK(&pm->recovery_work, ivpu_pm_recovery_work);
351 INIT_DELAYED_WORK(&pm->job_timeout_work, ivpu_job_timeout_work);
352
353 if (ivpu_disable_recovery)
354 delay = -1;
355 else
356 delay = vdev->timeout.autosuspend;
357

--- 27 unchanged lines hidden ---
366 INIT_WORK(&pm->recovery_work, ivpu_pm_recovery_work);
367 INIT_DELAYED_WORK(&pm->job_timeout_work, ivpu_job_timeout_work);
368
369 if (ivpu_disable_recovery)
370 delay = -1;
371 else
372 delay = vdev->timeout.autosuspend;
373

--- 27 unchanged lines hidden ---