xref: /linux/drivers/accel/ivpu/ivpu_pm.c (revision 6a4aee277740d04ac0fd54cfa17cc28261932ddc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2023 Intel Corporation
4  */
5 
6 #include <linux/highmem.h>
7 #include <linux/moduleparam.h>
8 #include <linux/pci.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/reboot.h>
11 
12 #include "vpu_boot_api.h"
13 #include "ivpu_drv.h"
14 #include "ivpu_hw.h"
15 #include "ivpu_fw.h"
16 #include "ivpu_fw_log.h"
17 #include "ivpu_ipc.h"
18 #include "ivpu_job.h"
19 #include "ivpu_jsm_msg.h"
20 #include "ivpu_mmu.h"
21 #include "ivpu_pm.h"
22 
23 static bool ivpu_disable_recovery;
24 module_param_named_unsafe(disable_recovery, ivpu_disable_recovery, bool, 0644);
25 MODULE_PARM_DESC(disable_recovery, "Disables recovery when NPU hang is detected");
26 
27 static unsigned long ivpu_tdr_timeout_ms;
28 module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, ulong, 0644);
29 MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default");
30 
31 #define PM_RESCHEDULE_LIMIT     5
32 
33 static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
34 {
35 	struct ivpu_fw_info *fw = vdev->fw;
36 
37 	ivpu_cmdq_reset_all_contexts(vdev);
38 	ivpu_ipc_reset(vdev);
39 	ivpu_fw_load(vdev);
40 	fw->entry_point = fw->cold_boot_entry_point;
41 }
42 
43 static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev)
44 {
45 	struct ivpu_fw_info *fw = vdev->fw;
46 	struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem);
47 
48 	if (!bp->save_restore_ret_address) {
49 		ivpu_pm_prepare_cold_boot(vdev);
50 		return;
51 	}
52 
53 	ivpu_dbg(vdev, FW_BOOT, "Save/restore entry point %llx", bp->save_restore_ret_address);
54 	fw->entry_point = bp->save_restore_ret_address;
55 }
56 
57 static int ivpu_suspend(struct ivpu_device *vdev)
58 {
59 	int ret;
60 
61 	/* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
62 	pci_save_state(to_pci_dev(vdev->drm.dev));
63 
64 	ret = ivpu_shutdown(vdev);
65 	if (ret)
66 		ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret);
67 
68 	pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
69 
70 	return ret;
71 }
72 
73 static int ivpu_resume(struct ivpu_device *vdev)
74 {
75 	int ret;
76 
77 	pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
78 	pci_restore_state(to_pci_dev(vdev->drm.dev));
79 
80 retry:
81 	ret = ivpu_hw_power_up(vdev);
82 	if (ret) {
83 		ivpu_err(vdev, "Failed to power up HW: %d\n", ret);
84 		goto err_power_down;
85 	}
86 
87 	ret = ivpu_mmu_enable(vdev);
88 	if (ret) {
89 		ivpu_err(vdev, "Failed to resume MMU: %d\n", ret);
90 		goto err_power_down;
91 	}
92 
93 	ret = ivpu_boot(vdev);
94 	if (ret)
95 		goto err_mmu_disable;
96 
97 	return 0;
98 
99 err_mmu_disable:
100 	ivpu_mmu_disable(vdev);
101 err_power_down:
102 	ivpu_hw_power_down(vdev);
103 
104 	if (!ivpu_fw_is_cold_boot(vdev)) {
105 		ivpu_pm_prepare_cold_boot(vdev);
106 		goto retry;
107 	} else {
108 		ivpu_err(vdev, "Failed to resume the FW: %d\n", ret);
109 	}
110 
111 	return ret;
112 }
113 
114 static void ivpu_pm_recovery_work(struct work_struct *work)
115 {
116 	struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
117 	struct ivpu_device *vdev = pm->vdev;
118 	char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
119 	int ret;
120 
121 	ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
122 
123 	ret = pm_runtime_resume_and_get(vdev->drm.dev);
124 	if (ret)
125 		ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
126 
127 	ivpu_fw_log_dump(vdev);
128 
129 	atomic_inc(&vdev->pm->reset_counter);
130 	atomic_set(&vdev->pm->reset_pending, 1);
131 	down_write(&vdev->pm->reset_lock);
132 
133 	ivpu_suspend(vdev);
134 	ivpu_pm_prepare_cold_boot(vdev);
135 	ivpu_jobs_abort_all(vdev);
136 
137 	ret = ivpu_resume(vdev);
138 	if (ret)
139 		ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
140 
141 	up_write(&vdev->pm->reset_lock);
142 	atomic_set(&vdev->pm->reset_pending, 0);
143 
144 	kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
145 	pm_runtime_mark_last_busy(vdev->drm.dev);
146 	pm_runtime_put_autosuspend(vdev->drm.dev);
147 }
148 
149 void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
150 {
151 	ivpu_err(vdev, "Recovery triggered by %s\n", reason);
152 
153 	if (ivpu_disable_recovery) {
154 		ivpu_err(vdev, "Recovery not available when disable_recovery param is set\n");
155 		return;
156 	}
157 
158 	if (ivpu_is_fpga(vdev)) {
159 		ivpu_err(vdev, "Recovery not available on FPGA\n");
160 		return;
161 	}
162 
163 	/* Trigger recovery if it's not in progress */
164 	if (atomic_cmpxchg(&vdev->pm->reset_pending, 0, 1) == 0) {
165 		ivpu_hw_diagnose_failure(vdev);
166 		ivpu_hw_irq_disable(vdev); /* Disable IRQ early to protect from IRQ storm */
167 		queue_work(system_long_wq, &vdev->pm->recovery_work);
168 	}
169 }
170 
171 static void ivpu_job_timeout_work(struct work_struct *work)
172 {
173 	struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work);
174 	struct ivpu_device *vdev = pm->vdev;
175 
176 	ivpu_pm_trigger_recovery(vdev, "TDR");
177 }
178 
179 void ivpu_start_job_timeout_detection(struct ivpu_device *vdev)
180 {
181 	unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
182 
183 	/* No-op if already queued */
184 	queue_delayed_work(system_wq, &vdev->pm->job_timeout_work, msecs_to_jiffies(timeout_ms));
185 }
186 
187 void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev)
188 {
189 	cancel_delayed_work_sync(&vdev->pm->job_timeout_work);
190 }
191 
192 int ivpu_pm_suspend_cb(struct device *dev)
193 {
194 	struct drm_device *drm = dev_get_drvdata(dev);
195 	struct ivpu_device *vdev = to_ivpu_device(drm);
196 	unsigned long timeout;
197 
198 	ivpu_dbg(vdev, PM, "Suspend..\n");
199 
200 	timeout = jiffies + msecs_to_jiffies(vdev->timeout.tdr);
201 	while (!ivpu_hw_is_idle(vdev)) {
202 		cond_resched();
203 		if (time_after_eq(jiffies, timeout)) {
204 			ivpu_err(vdev, "Failed to enter idle on system suspend\n");
205 			return -EBUSY;
206 		}
207 	}
208 
209 	ivpu_jsm_pwr_d0i3_enter(vdev);
210 
211 	ivpu_suspend(vdev);
212 	ivpu_pm_prepare_warm_boot(vdev);
213 
214 	ivpu_dbg(vdev, PM, "Suspend done.\n");
215 
216 	return 0;
217 }
218 
219 int ivpu_pm_resume_cb(struct device *dev)
220 {
221 	struct drm_device *drm = dev_get_drvdata(dev);
222 	struct ivpu_device *vdev = to_ivpu_device(drm);
223 	int ret;
224 
225 	ivpu_dbg(vdev, PM, "Resume..\n");
226 
227 	ret = ivpu_resume(vdev);
228 	if (ret)
229 		ivpu_err(vdev, "Failed to resume: %d\n", ret);
230 
231 	ivpu_dbg(vdev, PM, "Resume done.\n");
232 
233 	return ret;
234 }
235 
236 int ivpu_pm_runtime_suspend_cb(struct device *dev)
237 {
238 	struct drm_device *drm = dev_get_drvdata(dev);
239 	struct ivpu_device *vdev = to_ivpu_device(drm);
240 	bool hw_is_idle = true;
241 	int ret;
242 
243 	drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
244 	drm_WARN_ON(&vdev->drm, work_pending(&vdev->pm->recovery_work));
245 
246 	ivpu_dbg(vdev, PM, "Runtime suspend..\n");
247 
248 	if (!ivpu_hw_is_idle(vdev) && vdev->pm->suspend_reschedule_counter) {
249 		ivpu_dbg(vdev, PM, "Failed to enter idle, rescheduling suspend, retries left %d\n",
250 			 vdev->pm->suspend_reschedule_counter);
251 		pm_schedule_suspend(dev, vdev->timeout.reschedule_suspend);
252 		vdev->pm->suspend_reschedule_counter--;
253 		return -EAGAIN;
254 	}
255 
256 	if (!vdev->pm->suspend_reschedule_counter)
257 		hw_is_idle = false;
258 	else if (ivpu_jsm_pwr_d0i3_enter(vdev))
259 		hw_is_idle = false;
260 
261 	ret = ivpu_suspend(vdev);
262 	if (ret)
263 		ivpu_err(vdev, "Failed to suspend NPU: %d\n", ret);
264 
265 	if (!hw_is_idle) {
266 		ivpu_err(vdev, "NPU failed to enter idle, force suspended.\n");
267 		ivpu_fw_log_dump(vdev);
268 		ivpu_pm_prepare_cold_boot(vdev);
269 	} else {
270 		ivpu_pm_prepare_warm_boot(vdev);
271 	}
272 
273 	vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
274 
275 	ivpu_dbg(vdev, PM, "Runtime suspend done.\n");
276 
277 	return 0;
278 }
279 
280 int ivpu_pm_runtime_resume_cb(struct device *dev)
281 {
282 	struct drm_device *drm = dev_get_drvdata(dev);
283 	struct ivpu_device *vdev = to_ivpu_device(drm);
284 	int ret;
285 
286 	ivpu_dbg(vdev, PM, "Runtime resume..\n");
287 
288 	ret = ivpu_resume(vdev);
289 	if (ret)
290 		ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
291 
292 	ivpu_dbg(vdev, PM, "Runtime resume done.\n");
293 
294 	return ret;
295 }
296 
297 int ivpu_rpm_get(struct ivpu_device *vdev)
298 {
299 	int ret;
300 
301 	ret = pm_runtime_resume_and_get(vdev->drm.dev);
302 	if (!drm_WARN_ON(&vdev->drm, ret < 0))
303 		vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
304 
305 	return ret;
306 }
307 
308 int ivpu_rpm_get_if_active(struct ivpu_device *vdev)
309 {
310 	int ret;
311 
312 	ret = pm_runtime_get_if_in_use(vdev->drm.dev);
313 	drm_WARN_ON(&vdev->drm, ret < 0);
314 
315 	return ret;
316 }
317 
318 void ivpu_rpm_put(struct ivpu_device *vdev)
319 {
320 	pm_runtime_mark_last_busy(vdev->drm.dev);
321 	pm_runtime_put_autosuspend(vdev->drm.dev);
322 }
323 
324 void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
325 {
326 	struct ivpu_device *vdev = pci_get_drvdata(pdev);
327 
328 	ivpu_dbg(vdev, PM, "Pre-reset..\n");
329 	atomic_inc(&vdev->pm->reset_counter);
330 	atomic_set(&vdev->pm->reset_pending, 1);
331 
332 	pm_runtime_get_sync(vdev->drm.dev);
333 	down_write(&vdev->pm->reset_lock);
334 	ivpu_prepare_for_reset(vdev);
335 	ivpu_hw_reset(vdev);
336 	ivpu_pm_prepare_cold_boot(vdev);
337 	ivpu_jobs_abort_all(vdev);
338 	ivpu_dbg(vdev, PM, "Pre-reset done.\n");
339 }
340 
341 void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
342 {
343 	struct ivpu_device *vdev = pci_get_drvdata(pdev);
344 	int ret;
345 
346 	ivpu_dbg(vdev, PM, "Post-reset..\n");
347 	ret = ivpu_resume(vdev);
348 	if (ret)
349 		ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
350 	up_write(&vdev->pm->reset_lock);
351 	atomic_set(&vdev->pm->reset_pending, 0);
352 	ivpu_dbg(vdev, PM, "Post-reset done.\n");
353 
354 	pm_runtime_mark_last_busy(vdev->drm.dev);
355 	pm_runtime_put_autosuspend(vdev->drm.dev);
356 }
357 
358 void ivpu_pm_init(struct ivpu_device *vdev)
359 {
360 	struct device *dev = vdev->drm.dev;
361 	struct ivpu_pm_info *pm = vdev->pm;
362 	int delay;
363 
364 	pm->vdev = vdev;
365 	pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
366 
367 	init_rwsem(&pm->reset_lock);
368 	atomic_set(&pm->reset_pending, 0);
369 	atomic_set(&pm->reset_counter, 0);
370 
371 	INIT_WORK(&pm->recovery_work, ivpu_pm_recovery_work);
372 	INIT_DELAYED_WORK(&pm->job_timeout_work, ivpu_job_timeout_work);
373 
374 	if (ivpu_disable_recovery)
375 		delay = -1;
376 	else
377 		delay = vdev->timeout.autosuspend;
378 
379 	pm_runtime_use_autosuspend(dev);
380 	pm_runtime_set_autosuspend_delay(dev, delay);
381 
382 	ivpu_dbg(vdev, PM, "Autosuspend delay = %d\n", delay);
383 }
384 
385 void ivpu_pm_cancel_recovery(struct ivpu_device *vdev)
386 {
387 	drm_WARN_ON(&vdev->drm, delayed_work_pending(&vdev->pm->job_timeout_work));
388 	cancel_work_sync(&vdev->pm->recovery_work);
389 }
390 
391 void ivpu_pm_enable(struct ivpu_device *vdev)
392 {
393 	struct device *dev = vdev->drm.dev;
394 
395 	pm_runtime_set_active(dev);
396 	pm_runtime_allow(dev);
397 	pm_runtime_mark_last_busy(dev);
398 	pm_runtime_put_autosuspend(dev);
399 }
400 
401 void ivpu_pm_disable(struct ivpu_device *vdev)
402 {
403 	pm_runtime_get_noresume(vdev->drm.dev);
404 	pm_runtime_forbid(vdev->drm.dev);
405 }
406