1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "i915_reg.h" 8 #include "intel_atomic.h" 9 #include "intel_de.h" 10 #include "intel_display_types.h" 11 #include "intel_drrs.h" 12 #include "intel_frontbuffer.h" 13 #include "intel_panel.h" 14 15 /** 16 * DOC: Display Refresh Rate Switching (DRRS) 17 * 18 * Display Refresh Rate Switching (DRRS) is a power conservation feature 19 * which enables swtching between low and high refresh rates, 20 * dynamically, based on the usage scenario. This feature is applicable 21 * for internal panels. 22 * 23 * Indication that the panel supports DRRS is given by the panel EDID, which 24 * would list multiple refresh rates for one resolution. 25 * 26 * DRRS is of 2 types - static and seamless. 27 * Static DRRS involves changing refresh rate (RR) by doing a full modeset 28 * (may appear as a blink on screen) and is used in dock-undock scenario. 29 * Seamless DRRS involves changing RR without any visual effect to the user 30 * and can be used during normal system usage. This is done by programming 31 * certain registers. 32 * 33 * Support for static/seamless DRRS may be indicated in the VBT based on 34 * inputs from the panel spec. 35 * 36 * DRRS saves power by switching to low RR based on usage scenarios. 37 * 38 * The implementation is based on frontbuffer tracking implementation. When 39 * there is a disturbance on the screen triggered by user activity or a periodic 40 * system activity, DRRS is disabled (RR is changed to high RR). When there is 41 * no movement on screen, after a timeout of 1 second, a switch to low RR is 42 * made. 43 * 44 * For integration with frontbuffer tracking code, intel_drrs_invalidate() 45 * and intel_drrs_flush() are called. 46 * 47 * DRRS can be further extended to support other internal panels and also 48 * the scenario of video playback wherein RR is set based on the rate 49 * requested by userspace. 50 */ 51 52 const char *intel_drrs_type_str(enum drrs_type drrs_type) 53 { 54 static const char * const str[] = { 55 [DRRS_TYPE_NONE] = "none", 56 [DRRS_TYPE_STATIC] = "static", 57 [DRRS_TYPE_SEAMLESS] = "seamless", 58 }; 59 60 if (drrs_type >= ARRAY_SIZE(str)) 61 return "<invalid>"; 62 63 return str[drrs_type]; 64 } 65 66 static void 67 intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc, 68 enum drrs_refresh_rate refresh_rate) 69 { 70 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 71 enum transcoder cpu_transcoder = crtc->drrs.cpu_transcoder; 72 u32 bit; 73 74 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 75 bit = TRANSCONF_REFRESH_RATE_ALT_VLV; 76 else 77 bit = TRANSCONF_REFRESH_RATE_ALT_ILK; 78 79 intel_de_rmw(dev_priv, TRANSCONF(cpu_transcoder), 80 bit, refresh_rate == DRRS_REFRESH_RATE_LOW ? bit : 0); 81 } 82 83 static void 84 intel_drrs_set_refresh_rate_m_n(struct intel_crtc *crtc, 85 enum drrs_refresh_rate refresh_rate) 86 { 87 intel_cpu_transcoder_set_m1_n1(crtc, crtc->drrs.cpu_transcoder, 88 refresh_rate == DRRS_REFRESH_RATE_LOW ? 89 &crtc->drrs.m2_n2 : &crtc->drrs.m_n); 90 } 91 92 bool intel_drrs_is_active(struct intel_crtc *crtc) 93 { 94 return crtc->drrs.cpu_transcoder != INVALID_TRANSCODER; 95 } 96 97 static void intel_drrs_set_state(struct intel_crtc *crtc, 98 enum drrs_refresh_rate refresh_rate) 99 { 100 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 101 102 if (refresh_rate == crtc->drrs.refresh_rate) 103 return; 104 105 if (intel_cpu_transcoder_has_m2_n2(dev_priv, crtc->drrs.cpu_transcoder)) 106 intel_drrs_set_refresh_rate_pipeconf(crtc, refresh_rate); 107 else 108 intel_drrs_set_refresh_rate_m_n(crtc, refresh_rate); 109 110 crtc->drrs.refresh_rate = refresh_rate; 111 } 112 113 static void intel_drrs_schedule_work(struct intel_crtc *crtc) 114 { 115 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 116 117 mod_delayed_work(i915->unordered_wq, &crtc->drrs.work, msecs_to_jiffies(1000)); 118 } 119 120 static unsigned int intel_drrs_frontbuffer_bits(const struct intel_crtc_state *crtc_state) 121 { 122 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 123 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 124 unsigned int frontbuffer_bits; 125 126 frontbuffer_bits = INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe); 127 128 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, 129 crtc_state->bigjoiner_pipes) 130 frontbuffer_bits |= INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe); 131 132 return frontbuffer_bits; 133 } 134 135 /** 136 * intel_drrs_activate - activate DRRS 137 * @crtc_state: the crtc state 138 * 139 * Activates DRRS on the crtc. 140 */ 141 void intel_drrs_activate(const struct intel_crtc_state *crtc_state) 142 { 143 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 144 145 if (!crtc_state->has_drrs) 146 return; 147 148 if (!crtc_state->hw.active) 149 return; 150 151 if (intel_crtc_is_bigjoiner_slave(crtc_state)) 152 return; 153 154 mutex_lock(&crtc->drrs.mutex); 155 156 crtc->drrs.cpu_transcoder = crtc_state->cpu_transcoder; 157 crtc->drrs.m_n = crtc_state->dp_m_n; 158 crtc->drrs.m2_n2 = crtc_state->dp_m2_n2; 159 crtc->drrs.frontbuffer_bits = intel_drrs_frontbuffer_bits(crtc_state); 160 crtc->drrs.busy_frontbuffer_bits = 0; 161 162 intel_drrs_schedule_work(crtc); 163 164 mutex_unlock(&crtc->drrs.mutex); 165 } 166 167 /** 168 * intel_drrs_deactivate - deactivate DRRS 169 * @old_crtc_state: the old crtc state 170 * 171 * Deactivates DRRS on the crtc. 172 */ 173 void intel_drrs_deactivate(const struct intel_crtc_state *old_crtc_state) 174 { 175 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 176 177 if (!old_crtc_state->has_drrs) 178 return; 179 180 if (!old_crtc_state->hw.active) 181 return; 182 183 if (intel_crtc_is_bigjoiner_slave(old_crtc_state)) 184 return; 185 186 mutex_lock(&crtc->drrs.mutex); 187 188 if (intel_drrs_is_active(crtc)) 189 intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_HIGH); 190 191 crtc->drrs.cpu_transcoder = INVALID_TRANSCODER; 192 crtc->drrs.frontbuffer_bits = 0; 193 crtc->drrs.busy_frontbuffer_bits = 0; 194 195 mutex_unlock(&crtc->drrs.mutex); 196 197 cancel_delayed_work_sync(&crtc->drrs.work); 198 } 199 200 static void intel_drrs_downclock_work(struct work_struct *work) 201 { 202 struct intel_crtc *crtc = container_of(work, typeof(*crtc), drrs.work.work); 203 204 mutex_lock(&crtc->drrs.mutex); 205 206 if (intel_drrs_is_active(crtc) && !crtc->drrs.busy_frontbuffer_bits) 207 intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_LOW); 208 209 mutex_unlock(&crtc->drrs.mutex); 210 } 211 212 static void intel_drrs_frontbuffer_update(struct drm_i915_private *dev_priv, 213 unsigned int all_frontbuffer_bits, 214 bool invalidate) 215 { 216 struct intel_crtc *crtc; 217 218 for_each_intel_crtc(&dev_priv->drm, crtc) { 219 unsigned int frontbuffer_bits; 220 221 mutex_lock(&crtc->drrs.mutex); 222 223 frontbuffer_bits = all_frontbuffer_bits & crtc->drrs.frontbuffer_bits; 224 if (!frontbuffer_bits) { 225 mutex_unlock(&crtc->drrs.mutex); 226 continue; 227 } 228 229 if (invalidate) 230 crtc->drrs.busy_frontbuffer_bits |= frontbuffer_bits; 231 else 232 crtc->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; 233 234 /* flush/invalidate means busy screen hence upclock */ 235 intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_HIGH); 236 237 /* 238 * flush also means no more activity hence schedule downclock, if all 239 * other fbs are quiescent too 240 */ 241 if (!crtc->drrs.busy_frontbuffer_bits) 242 intel_drrs_schedule_work(crtc); 243 else 244 cancel_delayed_work(&crtc->drrs.work); 245 246 mutex_unlock(&crtc->drrs.mutex); 247 } 248 } 249 250 /** 251 * intel_drrs_invalidate - Disable Idleness DRRS 252 * @dev_priv: i915 device 253 * @frontbuffer_bits: frontbuffer plane tracking bits 254 * 255 * This function gets called everytime rendering on the given planes start. 256 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). 257 * 258 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 259 */ 260 void intel_drrs_invalidate(struct drm_i915_private *dev_priv, 261 unsigned int frontbuffer_bits) 262 { 263 intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, true); 264 } 265 266 /** 267 * intel_drrs_flush - Restart Idleness DRRS 268 * @dev_priv: i915 device 269 * @frontbuffer_bits: frontbuffer plane tracking bits 270 * 271 * This function gets called every time rendering on the given planes has 272 * completed or flip on a crtc is completed. So DRRS should be upclocked 273 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, 274 * if no other planes are dirty. 275 * 276 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 277 */ 278 void intel_drrs_flush(struct drm_i915_private *dev_priv, 279 unsigned int frontbuffer_bits) 280 { 281 intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, false); 282 } 283 284 /** 285 * intel_drrs_crtc_init - Init DRRS for CRTC 286 * @crtc: crtc 287 * 288 * This function is called only once at driver load to initialize basic 289 * DRRS stuff. 290 * 291 */ 292 void intel_drrs_crtc_init(struct intel_crtc *crtc) 293 { 294 INIT_DELAYED_WORK(&crtc->drrs.work, intel_drrs_downclock_work); 295 mutex_init(&crtc->drrs.mutex); 296 crtc->drrs.cpu_transcoder = INVALID_TRANSCODER; 297 } 298 299 static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused) 300 { 301 struct intel_crtc *crtc = m->private; 302 const struct intel_crtc_state *crtc_state; 303 int ret; 304 305 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex); 306 if (ret) 307 return ret; 308 309 crtc_state = to_intel_crtc_state(crtc->base.state); 310 311 mutex_lock(&crtc->drrs.mutex); 312 313 seq_printf(m, "DRRS enabled: %s\n", 314 str_yes_no(crtc_state->has_drrs)); 315 316 seq_printf(m, "DRRS active: %s\n", 317 str_yes_no(intel_drrs_is_active(crtc))); 318 319 seq_printf(m, "DRRS refresh rate: %s\n", 320 crtc->drrs.refresh_rate == DRRS_REFRESH_RATE_LOW ? 321 "low" : "high"); 322 323 seq_printf(m, "DRRS busy frontbuffer bits: 0x%x\n", 324 crtc->drrs.busy_frontbuffer_bits); 325 326 mutex_unlock(&crtc->drrs.mutex); 327 328 drm_modeset_unlock(&crtc->base.mutex); 329 330 return 0; 331 } 332 333 DEFINE_SHOW_ATTRIBUTE(intel_drrs_debugfs_status); 334 335 static int intel_drrs_debugfs_ctl_set(void *data, u64 val) 336 { 337 struct intel_crtc *crtc = data; 338 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 339 struct intel_crtc_state *crtc_state; 340 struct drm_crtc_commit *commit; 341 int ret; 342 343 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex); 344 if (ret) 345 return ret; 346 347 crtc_state = to_intel_crtc_state(crtc->base.state); 348 349 if (!crtc_state->hw.active || 350 !crtc_state->has_drrs) 351 goto out; 352 353 commit = crtc_state->uapi.commit; 354 if (commit) { 355 ret = wait_for_completion_interruptible(&commit->hw_done); 356 if (ret) 357 goto out; 358 } 359 360 drm_dbg(&i915->drm, 361 "Manually %sactivating DRRS\n", val ? "" : "de"); 362 363 if (val) 364 intel_drrs_activate(crtc_state); 365 else 366 intel_drrs_deactivate(crtc_state); 367 368 out: 369 drm_modeset_unlock(&crtc->base.mutex); 370 371 return ret; 372 } 373 374 DEFINE_DEBUGFS_ATTRIBUTE(intel_drrs_debugfs_ctl_fops, 375 NULL, intel_drrs_debugfs_ctl_set, "%llu\n"); 376 377 void intel_drrs_crtc_debugfs_add(struct intel_crtc *crtc) 378 { 379 debugfs_create_file("i915_drrs_status", 0444, crtc->base.debugfs_entry, 380 crtc, &intel_drrs_debugfs_status_fops); 381 382 debugfs_create_file_unsafe("i915_drrs_ctl", 0644, crtc->base.debugfs_entry, 383 crtc, &intel_drrs_debugfs_ctl_fops); 384 } 385 386 static int intel_drrs_debugfs_type_show(struct seq_file *m, void *unused) 387 { 388 struct intel_connector *connector = m->private; 389 390 seq_printf(m, "DRRS type: %s\n", 391 intel_drrs_type_str(intel_panel_drrs_type(connector))); 392 393 return 0; 394 } 395 396 DEFINE_SHOW_ATTRIBUTE(intel_drrs_debugfs_type); 397 398 void intel_drrs_connector_debugfs_add(struct intel_connector *connector) 399 { 400 if (intel_panel_drrs_type(connector) == DRRS_TYPE_NONE) 401 return; 402 403 debugfs_create_file("i915_drrs_type", 0444, connector->base.debugfs_entry, 404 connector, &intel_drrs_debugfs_type_fops); 405 } 406