1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright (C) 2024 Intel Corporation 4 */ 5 6 #include <linux/kernel.h> 7 8 #include "intel_de.h" 9 #include "intel_dmc.h" 10 #include "intel_dmc_regs.h" 11 #include "intel_dmc_wl.h" 12 13 /** 14 * DOC: DMC wakelock support 15 * 16 * Wake lock is the mechanism to cause display engine to exit DC 17 * states to allow programming to registers that are powered down in 18 * those states. Previous projects exited DC states automatically when 19 * detecting programming. Now software controls the exit by 20 * programming the wake lock. This improves system performance and 21 * system interactions and better fits the flip queue style of 22 * programming. Wake lock is only required when DC5, DC6, or DC6v have 23 * been enabled in DC_STATE_EN and the wake lock mode of operation has 24 * been enabled. 25 * 26 * The wakelock mechanism in DMC allows the display engine to exit DC 27 * states explicitly before programming registers that may be powered 28 * down. In earlier hardware, this was done automatically and 29 * implicitly when the display engine accessed a register. With the 30 * wakelock implementation, the driver asserts a wakelock in DMC, 31 * which forces it to exit the DC state until the wakelock is 32 * deasserted. 33 * 34 * The mechanism can be enabled and disabled by writing to the 35 * DMC_WAKELOCK_CFG register. There are also 13 control registers 36 * that can be used to hold and release different wakelocks. In the 37 * current implementation, we only need one wakelock, so only 38 * DMC_WAKELOCK1_CTL is used. The other definitions are here for 39 * potential future use. 40 */ 41 42 #define DMC_WAKELOCK_CTL_TIMEOUT 5 43 #define DMC_WAKELOCK_HOLD_TIME 50 44 45 struct intel_dmc_wl_range { 46 u32 start; 47 u32 end; 48 }; 49 50 static struct intel_dmc_wl_range lnl_wl_range[] = { 51 { .start = 0x60000, .end = 0x7ffff }, 52 }; 53 54 static void __intel_dmc_wl_release(struct drm_i915_private *i915) 55 { 56 struct intel_dmc_wl *wl = &i915->display.wl; 57 58 WARN_ON(refcount_read(&wl->refcount)); 59 60 queue_delayed_work(i915->unordered_wq, &wl->work, 61 msecs_to_jiffies(DMC_WAKELOCK_HOLD_TIME)); 62 } 63 64 static void intel_dmc_wl_work(struct work_struct *work) 65 { 66 struct intel_dmc_wl *wl = 67 container_of(work, struct intel_dmc_wl, work.work); 68 struct drm_i915_private *i915 = 69 container_of(wl, struct drm_i915_private, display.wl); 70 unsigned long flags; 71 72 spin_lock_irqsave(&wl->lock, flags); 73 74 /* Bail out if refcount reached zero while waiting for the spinlock */ 75 if (!refcount_read(&wl->refcount)) 76 goto out_unlock; 77 78 __intel_de_rmw_nowl(i915, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0); 79 80 if (__intel_wait_for_register_nowl(i915, DMC_WAKELOCK1_CTL, 81 DMC_WAKELOCK_CTL_ACK, 0, 82 DMC_WAKELOCK_CTL_TIMEOUT)) { 83 WARN_RATELIMIT(1, "DMC wakelock release timed out"); 84 goto out_unlock; 85 } 86 87 wl->taken = false; 88 89 out_unlock: 90 spin_unlock_irqrestore(&wl->lock, flags); 91 } 92 93 static bool intel_dmc_wl_check_range(u32 address) 94 { 95 int i; 96 bool wl_needed = false; 97 98 for (i = 0; i < ARRAY_SIZE(lnl_wl_range); i++) { 99 if (address >= lnl_wl_range[i].start && 100 address <= lnl_wl_range[i].end) { 101 wl_needed = true; 102 break; 103 } 104 } 105 106 return wl_needed; 107 } 108 109 static bool __intel_dmc_wl_supported(struct drm_i915_private *i915) 110 { 111 if (DISPLAY_VER(i915) < 20 || 112 !intel_dmc_has_payload(i915) || 113 !i915->display.params.enable_dmc_wl) 114 return false; 115 116 return true; 117 } 118 119 void intel_dmc_wl_init(struct drm_i915_private *i915) 120 { 121 struct intel_dmc_wl *wl = &i915->display.wl; 122 123 /* don't call __intel_dmc_wl_supported(), DMC is not loaded yet */ 124 if (DISPLAY_VER(i915) < 20 || 125 !i915->display.params.enable_dmc_wl) 126 return; 127 128 INIT_DELAYED_WORK(&wl->work, intel_dmc_wl_work); 129 spin_lock_init(&wl->lock); 130 refcount_set(&wl->refcount, 0); 131 } 132 133 void intel_dmc_wl_enable(struct drm_i915_private *i915) 134 { 135 struct intel_dmc_wl *wl = &i915->display.wl; 136 unsigned long flags; 137 138 if (!__intel_dmc_wl_supported(i915)) 139 return; 140 141 spin_lock_irqsave(&wl->lock, flags); 142 143 if (wl->enabled) 144 goto out_unlock; 145 146 /* 147 * Enable wakelock in DMC. We shouldn't try to take the 148 * wakelock, because we're just enabling it, so call the 149 * non-locking version directly here. 150 */ 151 __intel_de_rmw_nowl(i915, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE); 152 153 wl->enabled = true; 154 wl->taken = false; 155 156 out_unlock: 157 spin_unlock_irqrestore(&wl->lock, flags); 158 } 159 160 void intel_dmc_wl_disable(struct drm_i915_private *i915) 161 { 162 struct intel_dmc_wl *wl = &i915->display.wl; 163 unsigned long flags; 164 165 if (!__intel_dmc_wl_supported(i915)) 166 return; 167 168 flush_delayed_work(&wl->work); 169 170 spin_lock_irqsave(&wl->lock, flags); 171 172 if (!wl->enabled) 173 goto out_unlock; 174 175 /* Disable wakelock in DMC */ 176 __intel_de_rmw_nowl(i915, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0); 177 178 refcount_set(&wl->refcount, 0); 179 wl->enabled = false; 180 wl->taken = false; 181 182 out_unlock: 183 spin_unlock_irqrestore(&wl->lock, flags); 184 } 185 186 void intel_dmc_wl_get(struct drm_i915_private *i915, i915_reg_t reg) 187 { 188 struct intel_dmc_wl *wl = &i915->display.wl; 189 unsigned long flags; 190 191 if (!__intel_dmc_wl_supported(i915)) 192 return; 193 194 if (!intel_dmc_wl_check_range(reg.reg)) 195 return; 196 197 spin_lock_irqsave(&wl->lock, flags); 198 199 if (!wl->enabled) 200 goto out_unlock; 201 202 cancel_delayed_work(&wl->work); 203 204 if (refcount_inc_not_zero(&wl->refcount)) 205 goto out_unlock; 206 207 refcount_set(&wl->refcount, 1); 208 209 /* 210 * Only try to take the wakelock if it's not marked as taken 211 * yet. It may be already taken at this point if we have 212 * already released the last reference, but the work has not 213 * run yet. 214 */ 215 if (!wl->taken) { 216 __intel_de_rmw_nowl(i915, DMC_WAKELOCK1_CTL, 0, 217 DMC_WAKELOCK_CTL_REQ); 218 219 if (__intel_wait_for_register_nowl(i915, DMC_WAKELOCK1_CTL, 220 DMC_WAKELOCK_CTL_ACK, 221 DMC_WAKELOCK_CTL_ACK, 222 DMC_WAKELOCK_CTL_TIMEOUT)) { 223 WARN_RATELIMIT(1, "DMC wakelock ack timed out"); 224 goto out_unlock; 225 } 226 227 wl->taken = true; 228 } 229 230 out_unlock: 231 spin_unlock_irqrestore(&wl->lock, flags); 232 } 233 234 void intel_dmc_wl_put(struct drm_i915_private *i915, i915_reg_t reg) 235 { 236 struct intel_dmc_wl *wl = &i915->display.wl; 237 unsigned long flags; 238 239 if (!__intel_dmc_wl_supported(i915)) 240 return; 241 242 if (!intel_dmc_wl_check_range(reg.reg)) 243 return; 244 245 spin_lock_irqsave(&wl->lock, flags); 246 247 if (!wl->enabled) 248 goto out_unlock; 249 250 if (WARN_RATELIMIT(!refcount_read(&wl->refcount), 251 "Tried to put wakelock with refcount zero\n")) 252 goto out_unlock; 253 254 if (refcount_dec_and_test(&wl->refcount)) { 255 __intel_dmc_wl_release(i915); 256 257 goto out_unlock; 258 } 259 260 out_unlock: 261 spin_unlock_irqrestore(&wl->lock, flags); 262 } 263