1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include <linux/string_helpers.h> 7 #include <linux/suspend.h> 8 9 #include "display/intel_display_power.h" 10 11 #include "i915_drv.h" 12 #include "i915_irq.h" 13 #include "i915_params.h" 14 #include "intel_context.h" 15 #include "intel_engine_pm.h" 16 #include "intel_gt.h" 17 #include "intel_gt_clock_utils.h" 18 #include "intel_gt_mcr.h" 19 #include "intel_gt_pm.h" 20 #include "intel_gt_print.h" 21 #include "intel_gt_requests.h" 22 #include "intel_llc.h" 23 #include "intel_rc6.h" 24 #include "intel_rps.h" 25 #include "intel_wakeref.h" 26 #include "pxp/intel_pxp_pm.h" 27 28 #define I915_GT_SUSPEND_IDLE_TIMEOUT (HZ / 2) 29 30 static void user_forcewake(struct intel_gt *gt, bool suspend) 31 { 32 int count = atomic_read(>->user_wakeref); 33 intel_wakeref_t wakeref; 34 35 /* Inside suspend/resume so single threaded, no races to worry about. */ 36 if (likely(!count)) 37 return; 38 39 wakeref = intel_gt_pm_get(gt); 40 if (suspend) { 41 GEM_BUG_ON(count > atomic_read(>->wakeref.count)); 42 atomic_sub(count, >->wakeref.count); 43 } else { 44 atomic_add(count, >->wakeref.count); 45 } 46 intel_gt_pm_put(gt, wakeref); 47 } 48 49 static void runtime_begin(struct intel_gt *gt) 50 { 51 local_irq_disable(); 52 write_seqcount_begin(>->stats.lock); 53 gt->stats.start = ktime_get(); 54 gt->stats.active = true; 55 write_seqcount_end(>->stats.lock); 56 local_irq_enable(); 57 } 58 59 static void runtime_end(struct intel_gt *gt) 60 { 61 local_irq_disable(); 62 write_seqcount_begin(>->stats.lock); 63 gt->stats.active = false; 64 gt->stats.total = 65 ktime_add(gt->stats.total, 66 ktime_sub(ktime_get(), gt->stats.start)); 67 write_seqcount_end(>->stats.lock); 68 local_irq_enable(); 69 } 70 71 static int __gt_unpark(struct intel_wakeref *wf) 72 { 73 struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref); 74 struct drm_i915_private *i915 = gt->i915; 75 struct intel_display *display = i915->display; 76 77 GT_TRACE(gt, "\n"); 78 79 /* 80 * It seems that the DMC likes to transition between the DC states a lot 81 * when there are no connected displays (no active power domains) during 82 * command submission. 83 * 84 * This activity has negative impact on the performance of the chip with 85 * huge latencies observed in the interrupt handler and elsewhere. 86 * 87 * Work around it by grabbing a GT IRQ power domain whilst there is any 88 * GT activity, preventing any DC state transitions. 89 */ 90 gt->awake = intel_display_power_get(display, POWER_DOMAIN_GT_IRQ); 91 GEM_BUG_ON(!gt->awake); 92 93 intel_rc6_unpark(>->rc6); 94 intel_rps_unpark(>->rps); 95 i915_pmu_gt_unparked(gt); 96 intel_guc_busyness_unpark(gt); 97 98 intel_gt_unpark_requests(gt); 99 runtime_begin(gt); 100 101 return 0; 102 } 103 104 static int __gt_park(struct intel_wakeref *wf) 105 { 106 struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref); 107 intel_wakeref_t wakeref = fetch_and_zero(>->awake); 108 struct drm_i915_private *i915 = gt->i915; 109 struct intel_display *display = i915->display; 110 111 GT_TRACE(gt, "\n"); 112 113 runtime_end(gt); 114 intel_gt_park_requests(gt); 115 116 intel_guc_busyness_park(gt); 117 i915_vma_parked(gt); 118 i915_pmu_gt_parked(gt); 119 intel_rps_park(>->rps); 120 intel_rc6_park(>->rc6); 121 122 /* Everything switched off, flush any residual interrupt just in case */ 123 intel_synchronize_irq(i915); 124 125 /* Defer dropping the display power well for 100ms, it's slow! */ 126 GEM_BUG_ON(!wakeref); 127 intel_display_power_put_async(display, POWER_DOMAIN_GT_IRQ, wakeref); 128 129 return 0; 130 } 131 132 static const struct intel_wakeref_ops wf_ops = { 133 .get = __gt_unpark, 134 .put = __gt_park, 135 }; 136 137 void intel_gt_pm_init_early(struct intel_gt *gt) 138 { 139 /* 140 * We access the runtime_pm structure via gt->i915 here rather than 141 * gt->uncore as we do elsewhere in the file because gt->uncore is not 142 * yet initialized for all tiles at this point in the driver startup. 143 * runtime_pm is per-device rather than per-tile, so this is still the 144 * correct structure. 145 */ 146 intel_wakeref_init(>->wakeref, gt->i915, &wf_ops, "GT"); 147 seqcount_mutex_init(>->stats.lock, >->wakeref.mutex); 148 } 149 150 void intel_gt_pm_init(struct intel_gt *gt) 151 { 152 /* 153 * Enabling power-management should be "self-healing". If we cannot 154 * enable a feature, simply leave it disabled with a notice to the 155 * user. 156 */ 157 intel_rc6_init(>->rc6); 158 intel_rps_init(>->rps); 159 } 160 161 static bool reset_engines(struct intel_gt *gt) 162 { 163 if (intel_gt_gpu_reset_clobbers_display(gt)) 164 return false; 165 166 return intel_gt_reset_all_engines(gt) == 0; 167 } 168 169 static void gt_sanitize(struct intel_gt *gt, bool force) 170 { 171 struct intel_engine_cs *engine; 172 enum intel_engine_id id; 173 intel_wakeref_t wakeref; 174 175 GT_TRACE(gt, "force:%s\n", str_yes_no(force)); 176 177 /* Use a raw wakeref to avoid calling intel_display_power_get early */ 178 wakeref = intel_runtime_pm_get(gt->uncore->rpm); 179 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); 180 181 intel_gt_check_clock_frequency(gt); 182 183 /* 184 * As we have just resumed the machine and woken the device up from 185 * deep PCI sleep (presumably D3_cold), assume the HW has been reset 186 * back to defaults, recovering from whatever wedged state we left it 187 * in and so worth trying to use the device once more. 188 */ 189 if (intel_gt_is_wedged(gt)) 190 intel_gt_unset_wedged(gt); 191 192 /* For GuC mode, ensure submission is disabled before stopping ring */ 193 intel_uc_reset_prepare(>->uc); 194 195 for_each_engine(engine, gt, id) { 196 if (engine->reset.prepare) 197 engine->reset.prepare(engine); 198 199 if (engine->sanitize) 200 engine->sanitize(engine); 201 } 202 203 if (reset_engines(gt) || force) { 204 for_each_engine(engine, gt, id) 205 __intel_engine_reset(engine, false); 206 } 207 208 intel_uc_reset(>->uc, false); 209 210 for_each_engine(engine, gt, id) 211 if (engine->reset.finish) 212 engine->reset.finish(engine); 213 214 intel_rps_sanitize(>->rps); 215 216 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); 217 intel_runtime_pm_put(gt->uncore->rpm, wakeref); 218 } 219 220 void intel_gt_pm_fini(struct intel_gt *gt) 221 { 222 intel_rc6_fini(>->rc6); 223 } 224 225 void intel_gt_resume_early(struct intel_gt *gt) 226 { 227 /* 228 * Sanitize steer semaphores during driver resume. This is necessary 229 * to address observed cases of steer semaphores being 230 * held after a suspend operation. Confirmation from the hardware team 231 * assures the safety of this operation, as no lock acquisitions 232 * by other agents occur during driver load/resume process. 233 */ 234 intel_gt_mcr_lock_sanitize(gt); 235 236 intel_uncore_resume_early(gt->uncore); 237 intel_gt_check_and_clear_faults(gt); 238 } 239 240 int intel_gt_resume(struct intel_gt *gt) 241 { 242 struct intel_engine_cs *engine; 243 enum intel_engine_id id; 244 intel_wakeref_t wakeref; 245 int err; 246 247 err = intel_gt_has_unrecoverable_error(gt); 248 if (err) 249 return err; 250 251 GT_TRACE(gt, "\n"); 252 253 /* 254 * After resume, we may need to poke into the pinned kernel 255 * contexts to paper over any damage caused by the sudden suspend. 256 * Only the kernel contexts should remain pinned over suspend, 257 * allowing us to fixup the user contexts on their first pin. 258 */ 259 gt_sanitize(gt, true); 260 261 wakeref = intel_gt_pm_get(gt); 262 263 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); 264 intel_rc6_sanitize(>->rc6); 265 if (intel_gt_is_wedged(gt)) { 266 err = -EIO; 267 goto out_fw; 268 } 269 270 /* Only when the HW is re-initialised, can we replay the requests */ 271 err = intel_gt_init_hw(gt); 272 if (err) { 273 gt_probe_error(gt, "Failed to initialize GPU, declaring it wedged!\n"); 274 goto err_wedged; 275 } 276 277 intel_uc_reset_finish(>->uc); 278 279 intel_rps_enable(>->rps); 280 intel_llc_enable(>->llc); 281 282 for_each_engine(engine, gt, id) { 283 intel_engine_pm_get(engine); 284 285 engine->serial++; /* kernel context lost */ 286 err = intel_engine_resume(engine); 287 288 intel_engine_pm_put(engine); 289 if (err) { 290 gt_err(gt, "Failed to restart %s (%d)\n", 291 engine->name, err); 292 goto err_wedged; 293 } 294 } 295 296 intel_rc6_enable(>->rc6); 297 298 intel_uc_resume(>->uc); 299 300 user_forcewake(gt, false); 301 302 out_fw: 303 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); 304 intel_gt_pm_put(gt, wakeref); 305 intel_gt_bind_context_set_ready(gt); 306 return err; 307 308 err_wedged: 309 intel_gt_set_wedged(gt); 310 goto out_fw; 311 } 312 313 static void wait_for_suspend(struct intel_gt *gt) 314 { 315 if (!intel_gt_pm_is_awake(gt)) 316 return; 317 318 if (intel_gt_wait_for_idle(gt, I915_GT_SUSPEND_IDLE_TIMEOUT) == -ETIME) { 319 /* 320 * Forcibly cancel outstanding work and leave 321 * the gpu quiet. 322 */ 323 intel_gt_set_wedged(gt); 324 intel_gt_retire_requests(gt); 325 } 326 327 intel_gt_pm_wait_for_idle(gt); 328 } 329 330 void intel_gt_suspend_prepare(struct intel_gt *gt) 331 { 332 intel_gt_bind_context_set_unready(gt); 333 user_forcewake(gt, true); 334 wait_for_suspend(gt); 335 } 336 337 static suspend_state_t pm_suspend_target(void) 338 { 339 #if IS_ENABLED(CONFIG_SUSPEND) && IS_ENABLED(CONFIG_PM_SLEEP) 340 return pm_suspend_target_state; 341 #else 342 return PM_SUSPEND_TO_IDLE; 343 #endif 344 } 345 346 void intel_gt_suspend_late(struct intel_gt *gt) 347 { 348 intel_wakeref_t wakeref; 349 350 /* We expect to be idle already; but also want to be independent */ 351 wait_for_suspend(gt); 352 353 if (is_mock_gt(gt)) 354 return; 355 356 GEM_BUG_ON(gt->awake); 357 358 intel_uc_suspend(>->uc); 359 360 /* 361 * On disabling the device, we want to turn off HW access to memory 362 * that we no longer own. 363 * 364 * However, not all suspend-states disable the device. S0 (s2idle) 365 * is effectively runtime-suspend, the device is left powered on 366 * but needs to be put into a low power state. We need to keep 367 * powermanagement enabled, but we also retain system state and so 368 * it remains safe to keep on using our allocated memory. 369 */ 370 if (pm_suspend_target() == PM_SUSPEND_TO_IDLE) 371 return; 372 373 with_intel_runtime_pm(gt->uncore->rpm, wakeref) { 374 intel_rps_disable(>->rps); 375 intel_rc6_disable(>->rc6); 376 intel_llc_disable(>->llc); 377 } 378 379 gt_sanitize(gt, false); 380 381 GT_TRACE(gt, "\n"); 382 } 383 384 void intel_gt_runtime_suspend(struct intel_gt *gt) 385 { 386 intel_gt_bind_context_set_unready(gt); 387 intel_uc_runtime_suspend(>->uc); 388 389 GT_TRACE(gt, "\n"); 390 } 391 392 int intel_gt_runtime_resume(struct intel_gt *gt) 393 { 394 int ret; 395 396 GT_TRACE(gt, "\n"); 397 intel_gt_init_swizzling(gt); 398 intel_ggtt_restore_fences(gt->ggtt); 399 400 ret = intel_uc_runtime_resume(>->uc); 401 if (ret) 402 return ret; 403 404 intel_gt_bind_context_set_ready(gt); 405 return 0; 406 } 407 408 static ktime_t __intel_gt_get_awake_time(const struct intel_gt *gt) 409 { 410 ktime_t total = gt->stats.total; 411 412 if (gt->stats.active) 413 total = ktime_add(total, 414 ktime_sub(ktime_get(), gt->stats.start)); 415 416 return total; 417 } 418 419 ktime_t intel_gt_get_awake_time(const struct intel_gt *gt) 420 { 421 unsigned int seq; 422 ktime_t total; 423 424 do { 425 seq = read_seqcount_begin(>->stats.lock); 426 total = __intel_gt_get_awake_time(gt); 427 } while (read_seqcount_retry(>->stats.lock, seq)); 428 429 return total; 430 } 431 432 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 433 #include "selftest_gt_pm.c" 434 #endif 435