1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020-2023 Intel Corporation 4 */ 5 6 #include <linux/highmem.h> 7 #include <linux/moduleparam.h> 8 #include <linux/pci.h> 9 #include <linux/pm_runtime.h> 10 #include <linux/reboot.h> 11 12 #include "vpu_boot_api.h" 13 #include "ivpu_drv.h" 14 #include "ivpu_hw.h" 15 #include "ivpu_fw.h" 16 #include "ivpu_ipc.h" 17 #include "ivpu_job.h" 18 #include "ivpu_mmu.h" 19 #include "ivpu_pm.h" 20 21 static bool ivpu_disable_recovery; 22 module_param_named_unsafe(disable_recovery, ivpu_disable_recovery, bool, 0644); 23 MODULE_PARM_DESC(disable_recovery, "Disables recovery when VPU hang is detected"); 24 25 #define PM_RESCHEDULE_LIMIT 5 26 27 static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev) 28 { 29 struct ivpu_fw_info *fw = vdev->fw; 30 31 ivpu_cmdq_reset_all_contexts(vdev); 32 ivpu_ipc_reset(vdev); 33 ivpu_fw_load(vdev); 34 fw->entry_point = fw->cold_boot_entry_point; 35 } 36 37 static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev) 38 { 39 struct ivpu_fw_info *fw = vdev->fw; 40 struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem); 41 42 if (!bp->save_restore_ret_address) { 43 ivpu_pm_prepare_cold_boot(vdev); 44 return; 45 } 46 47 ivpu_dbg(vdev, FW_BOOT, "Save/restore entry point %llx", bp->save_restore_ret_address); 48 fw->entry_point = bp->save_restore_ret_address; 49 } 50 51 static int ivpu_suspend(struct ivpu_device *vdev) 52 { 53 int ret; 54 55 ret = ivpu_shutdown(vdev); 56 if (ret) { 57 ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret); 58 return ret; 59 } 60 61 return ret; 62 } 63 64 static int ivpu_resume(struct ivpu_device *vdev) 65 { 66 int ret; 67 68 retry: 69 ret = ivpu_hw_power_up(vdev); 70 if (ret) { 71 ivpu_err(vdev, "Failed to power up HW: %d\n", ret); 72 return ret; 73 } 74 75 ret = ivpu_mmu_enable(vdev); 76 if (ret) { 77 ivpu_err(vdev, "Failed to resume MMU: %d\n", ret); 78 ivpu_hw_power_down(vdev); 79 return ret; 80 } 81 82 ret = ivpu_boot(vdev); 83 if (ret) { 84 ivpu_mmu_disable(vdev); 85 ivpu_hw_power_down(vdev); 86 if (!ivpu_fw_is_cold_boot(vdev)) { 87 ivpu_warn(vdev, "Failed to resume the FW: %d. Retrying cold boot..\n", ret); 88 ivpu_pm_prepare_cold_boot(vdev); 89 goto retry; 90 } else { 91 ivpu_err(vdev, "Failed to resume the FW: %d\n", ret); 92 } 93 } 94 95 return ret; 96 } 97 98 static void ivpu_pm_recovery_work(struct work_struct *work) 99 { 100 struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work); 101 struct ivpu_device *vdev = pm->vdev; 102 char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL}; 103 int ret; 104 105 retry: 106 ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev)); 107 if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) { 108 cond_resched(); 109 goto retry; 110 } 111 112 if (ret && ret != -EAGAIN) 113 ivpu_err(vdev, "Failed to reset VPU: %d\n", ret); 114 115 kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt); 116 } 117 118 void ivpu_pm_schedule_recovery(struct ivpu_device *vdev) 119 { 120 struct ivpu_pm_info *pm = vdev->pm; 121 122 if (ivpu_disable_recovery) { 123 ivpu_err(vdev, "Recovery not available when disable_recovery param is set\n"); 124 return; 125 } 126 127 if (ivpu_is_fpga(vdev)) { 128 ivpu_err(vdev, "Recovery not available on FPGA\n"); 129 return; 130 } 131 132 /* Schedule recovery if it's not in progress */ 133 if (atomic_cmpxchg(&pm->in_reset, 0, 1) == 0) { 134 ivpu_hw_irq_disable(vdev); 135 queue_work(system_long_wq, &pm->recovery_work); 136 } 137 } 138 139 int ivpu_pm_suspend_cb(struct device *dev) 140 { 141 struct drm_device *drm = dev_get_drvdata(dev); 142 struct ivpu_device *vdev = to_ivpu_device(drm); 143 unsigned long timeout; 144 145 ivpu_dbg(vdev, PM, "Suspend..\n"); 146 147 timeout = jiffies + msecs_to_jiffies(vdev->timeout.tdr); 148 while (!ivpu_hw_is_idle(vdev)) { 149 cond_resched(); 150 if (time_after_eq(jiffies, timeout)) { 151 ivpu_err(vdev, "Failed to enter idle on system suspend\n"); 152 return -EBUSY; 153 } 154 } 155 156 ivpu_suspend(vdev); 157 ivpu_pm_prepare_warm_boot(vdev); 158 159 pci_save_state(to_pci_dev(dev)); 160 pci_set_power_state(to_pci_dev(dev), PCI_D3hot); 161 162 ivpu_dbg(vdev, PM, "Suspend done.\n"); 163 164 return 0; 165 } 166 167 int ivpu_pm_resume_cb(struct device *dev) 168 { 169 struct drm_device *drm = dev_get_drvdata(dev); 170 struct ivpu_device *vdev = to_ivpu_device(drm); 171 int ret; 172 173 ivpu_dbg(vdev, PM, "Resume..\n"); 174 175 pci_set_power_state(to_pci_dev(dev), PCI_D0); 176 pci_restore_state(to_pci_dev(dev)); 177 178 ret = ivpu_resume(vdev); 179 if (ret) 180 ivpu_err(vdev, "Failed to resume: %d\n", ret); 181 182 ivpu_dbg(vdev, PM, "Resume done.\n"); 183 184 return ret; 185 } 186 187 int ivpu_pm_runtime_suspend_cb(struct device *dev) 188 { 189 struct drm_device *drm = dev_get_drvdata(dev); 190 struct ivpu_device *vdev = to_ivpu_device(drm); 191 int ret; 192 193 ivpu_dbg(vdev, PM, "Runtime suspend..\n"); 194 195 if (!ivpu_hw_is_idle(vdev) && vdev->pm->suspend_reschedule_counter) { 196 ivpu_dbg(vdev, PM, "Failed to enter idle, rescheduling suspend, retries left %d\n", 197 vdev->pm->suspend_reschedule_counter); 198 pm_schedule_suspend(dev, vdev->timeout.reschedule_suspend); 199 vdev->pm->suspend_reschedule_counter--; 200 return -EAGAIN; 201 } 202 203 ret = ivpu_suspend(vdev); 204 if (ret) 205 ivpu_err(vdev, "Failed to set suspend VPU: %d\n", ret); 206 207 if (!vdev->pm->suspend_reschedule_counter) { 208 ivpu_warn(vdev, "VPU failed to enter idle, force suspended.\n"); 209 ivpu_pm_prepare_cold_boot(vdev); 210 } else { 211 ivpu_pm_prepare_warm_boot(vdev); 212 } 213 214 vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; 215 216 ivpu_dbg(vdev, PM, "Runtime suspend done.\n"); 217 218 return 0; 219 } 220 221 int ivpu_pm_runtime_resume_cb(struct device *dev) 222 { 223 struct drm_device *drm = dev_get_drvdata(dev); 224 struct ivpu_device *vdev = to_ivpu_device(drm); 225 int ret; 226 227 ivpu_dbg(vdev, PM, "Runtime resume..\n"); 228 229 ret = ivpu_resume(vdev); 230 if (ret) 231 ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret); 232 233 ivpu_dbg(vdev, PM, "Runtime resume done.\n"); 234 235 return ret; 236 } 237 238 int ivpu_rpm_get(struct ivpu_device *vdev) 239 { 240 int ret; 241 242 ret = pm_runtime_resume_and_get(vdev->drm.dev); 243 if (!drm_WARN_ON(&vdev->drm, ret < 0)) 244 vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; 245 246 return ret; 247 } 248 249 int ivpu_rpm_get_if_active(struct ivpu_device *vdev) 250 { 251 int ret; 252 253 ivpu_dbg(vdev, RPM, "rpm_get_if_active count %d\n", 254 atomic_read(&vdev->drm.dev->power.usage_count)); 255 256 ret = pm_runtime_get_if_active(vdev->drm.dev, false); 257 drm_WARN_ON(&vdev->drm, ret < 0); 258 259 return ret; 260 } 261 262 void ivpu_rpm_put(struct ivpu_device *vdev) 263 { 264 pm_runtime_mark_last_busy(vdev->drm.dev); 265 pm_runtime_put_autosuspend(vdev->drm.dev); 266 } 267 268 void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev) 269 { 270 struct ivpu_device *vdev = pci_get_drvdata(pdev); 271 272 pm_runtime_get_sync(vdev->drm.dev); 273 274 ivpu_dbg(vdev, PM, "Pre-reset..\n"); 275 atomic_inc(&vdev->pm->reset_counter); 276 atomic_set(&vdev->pm->in_reset, 1); 277 ivpu_prepare_for_reset(vdev); 278 ivpu_hw_reset(vdev); 279 ivpu_pm_prepare_cold_boot(vdev); 280 ivpu_jobs_abort_all(vdev); 281 ivpu_dbg(vdev, PM, "Pre-reset done.\n"); 282 } 283 284 void ivpu_pm_reset_done_cb(struct pci_dev *pdev) 285 { 286 struct ivpu_device *vdev = pci_get_drvdata(pdev); 287 int ret; 288 289 ivpu_dbg(vdev, PM, "Post-reset..\n"); 290 ret = ivpu_resume(vdev); 291 if (ret) 292 ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret); 293 atomic_set(&vdev->pm->in_reset, 0); 294 ivpu_dbg(vdev, PM, "Post-reset done.\n"); 295 296 pm_runtime_put_autosuspend(vdev->drm.dev); 297 } 298 299 void ivpu_pm_init(struct ivpu_device *vdev) 300 { 301 struct device *dev = vdev->drm.dev; 302 struct ivpu_pm_info *pm = vdev->pm; 303 int delay; 304 305 pm->vdev = vdev; 306 pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; 307 308 atomic_set(&pm->in_reset, 0); 309 INIT_WORK(&pm->recovery_work, ivpu_pm_recovery_work); 310 311 if (ivpu_disable_recovery) 312 delay = -1; 313 else 314 delay = vdev->timeout.autosuspend; 315 316 pm_runtime_use_autosuspend(dev); 317 pm_runtime_set_autosuspend_delay(dev, delay); 318 319 ivpu_dbg(vdev, PM, "Autosuspend delay = %d\n", delay); 320 } 321 322 void ivpu_pm_cancel_recovery(struct ivpu_device *vdev) 323 { 324 cancel_work_sync(&vdev->pm->recovery_work); 325 } 326 327 void ivpu_pm_enable(struct ivpu_device *vdev) 328 { 329 struct device *dev = vdev->drm.dev; 330 331 pm_runtime_set_active(dev); 332 pm_runtime_allow(dev); 333 pm_runtime_mark_last_busy(dev); 334 pm_runtime_put_autosuspend(dev); 335 } 336 337 void ivpu_pm_disable(struct ivpu_device *vdev) 338 { 339 pm_runtime_get_noresume(vdev->drm.dev); 340 pm_runtime_forbid(vdev->drm.dev); 341 } 342