xref: /linux/drivers/gpu/drm/i915/display/intel_dmc_wl.c (revision 42d37fc0c819b81f6f6afd108b55d04ba9d32d0f)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright (C) 2024 Intel Corporation
4  */
5 
6 #include <linux/kernel.h>
7 
8 #include "intel_de.h"
9 #include "intel_dmc.h"
10 #include "intel_dmc_regs.h"
11 #include "intel_dmc_wl.h"
12 
13 /**
14  * DOC: DMC wakelock support
15  *
16  * Wake lock is the mechanism to cause display engine to exit DC
17  * states to allow programming to registers that are powered down in
18  * those states. Previous projects exited DC states automatically when
19  * detecting programming. Now software controls the exit by
20  * programming the wake lock. This improves system performance and
21  * system interactions and better fits the flip queue style of
22  * programming. Wake lock is only required when DC5, DC6, or DC6v have
23  * been enabled in DC_STATE_EN and the wake lock mode of operation has
24  * been enabled.
25  *
26  * The wakelock mechanism in DMC allows the display engine to exit DC
27  * states explicitly before programming registers that may be powered
28  * down.  In earlier hardware, this was done automatically and
29  * implicitly when the display engine accessed a register.  With the
30  * wakelock implementation, the driver asserts a wakelock in DMC,
31  * which forces it to exit the DC state until the wakelock is
32  * deasserted.
33  *
34  * The mechanism can be enabled and disabled by writing to the
35  * DMC_WAKELOCK_CFG register.  There are also 13 control registers
36  * that can be used to hold and release different wakelocks.  In the
37  * current implementation, we only need one wakelock, so only
38  * DMC_WAKELOCK1_CTL is used.  The other definitions are here for
39  * potential future use.
40  */
41 
42 #define DMC_WAKELOCK_CTL_TIMEOUT 5
43 #define DMC_WAKELOCK_HOLD_TIME 50
44 
45 struct intel_dmc_wl_range {
46 	u32 start;
47 	u32 end;
48 };
49 
50 static struct intel_dmc_wl_range lnl_wl_range[] = {
51 	{ .start = 0x60000, .end = 0x7ffff },
52 };
53 
54 static void __intel_dmc_wl_release(struct intel_display *display)
55 {
56 	struct drm_i915_private *i915 = to_i915(display->drm);
57 	struct intel_dmc_wl *wl = &display->wl;
58 
59 	WARN_ON(refcount_read(&wl->refcount));
60 
61 	queue_delayed_work(i915->unordered_wq, &wl->work,
62 			   msecs_to_jiffies(DMC_WAKELOCK_HOLD_TIME));
63 }
64 
65 static void intel_dmc_wl_work(struct work_struct *work)
66 {
67 	struct intel_dmc_wl *wl =
68 		container_of(work, struct intel_dmc_wl, work.work);
69 	struct intel_display *display =
70 		container_of(wl, struct intel_display, wl);
71 	unsigned long flags;
72 
73 	spin_lock_irqsave(&wl->lock, flags);
74 
75 	/* Bail out if refcount reached zero while waiting for the spinlock */
76 	if (!refcount_read(&wl->refcount))
77 		goto out_unlock;
78 
79 	__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
80 
81 	if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
82 					      DMC_WAKELOCK_CTL_ACK, 0,
83 					      DMC_WAKELOCK_CTL_TIMEOUT)) {
84 		WARN_RATELIMIT(1, "DMC wakelock release timed out");
85 		goto out_unlock;
86 	}
87 
88 	wl->taken = false;
89 
90 out_unlock:
91 	spin_unlock_irqrestore(&wl->lock, flags);
92 }
93 
94 static bool intel_dmc_wl_check_range(u32 address)
95 {
96 	int i;
97 	bool wl_needed = false;
98 
99 	for (i = 0; i < ARRAY_SIZE(lnl_wl_range); i++) {
100 		if (address >= lnl_wl_range[i].start &&
101 		    address <= lnl_wl_range[i].end) {
102 			wl_needed = true;
103 			break;
104 		}
105 	}
106 
107 	return wl_needed;
108 }
109 
110 static bool __intel_dmc_wl_supported(struct intel_display *display)
111 {
112 	struct drm_i915_private *i915 = to_i915(display->drm);
113 
114 	if (DISPLAY_VER(display) < 20 ||
115 	    !intel_dmc_has_payload(i915) ||
116 	    !display->params.enable_dmc_wl)
117 		return false;
118 
119 	return true;
120 }
121 
122 void intel_dmc_wl_init(struct intel_display *display)
123 {
124 	struct intel_dmc_wl *wl = &display->wl;
125 
126 	/* don't call __intel_dmc_wl_supported(), DMC is not loaded yet */
127 	if (DISPLAY_VER(display) < 20 || !display->params.enable_dmc_wl)
128 		return;
129 
130 	INIT_DELAYED_WORK(&wl->work, intel_dmc_wl_work);
131 	spin_lock_init(&wl->lock);
132 	refcount_set(&wl->refcount, 0);
133 }
134 
135 void intel_dmc_wl_enable(struct intel_display *display)
136 {
137 	struct intel_dmc_wl *wl = &display->wl;
138 	unsigned long flags;
139 
140 	if (!__intel_dmc_wl_supported(display))
141 		return;
142 
143 	spin_lock_irqsave(&wl->lock, flags);
144 
145 	if (wl->enabled)
146 		goto out_unlock;
147 
148 	/*
149 	 * Enable wakelock in DMC.  We shouldn't try to take the
150 	 * wakelock, because we're just enabling it, so call the
151 	 * non-locking version directly here.
152 	 */
153 	__intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE);
154 
155 	wl->enabled = true;
156 	wl->taken = false;
157 
158 out_unlock:
159 	spin_unlock_irqrestore(&wl->lock, flags);
160 }
161 
162 void intel_dmc_wl_disable(struct intel_display *display)
163 {
164 	struct intel_dmc_wl *wl = &display->wl;
165 	unsigned long flags;
166 
167 	if (!__intel_dmc_wl_supported(display))
168 		return;
169 
170 	flush_delayed_work(&wl->work);
171 
172 	spin_lock_irqsave(&wl->lock, flags);
173 
174 	if (!wl->enabled)
175 		goto out_unlock;
176 
177 	/* Disable wakelock in DMC */
178 	__intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0);
179 
180 	refcount_set(&wl->refcount, 0);
181 	wl->enabled = false;
182 	wl->taken = false;
183 
184 out_unlock:
185 	spin_unlock_irqrestore(&wl->lock, flags);
186 }
187 
188 void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
189 {
190 	struct intel_dmc_wl *wl = &display->wl;
191 	unsigned long flags;
192 
193 	if (!__intel_dmc_wl_supported(display))
194 		return;
195 
196 	if (!intel_dmc_wl_check_range(reg.reg))
197 		return;
198 
199 	spin_lock_irqsave(&wl->lock, flags);
200 
201 	if (!wl->enabled)
202 		goto out_unlock;
203 
204 	cancel_delayed_work(&wl->work);
205 
206 	if (refcount_inc_not_zero(&wl->refcount))
207 		goto out_unlock;
208 
209 	refcount_set(&wl->refcount, 1);
210 
211 	/*
212 	 * Only try to take the wakelock if it's not marked as taken
213 	 * yet.  It may be already taken at this point if we have
214 	 * already released the last reference, but the work has not
215 	 * run yet.
216 	 */
217 	if (!wl->taken) {
218 		__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
219 				    DMC_WAKELOCK_CTL_REQ);
220 
221 		if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
222 						      DMC_WAKELOCK_CTL_ACK,
223 						      DMC_WAKELOCK_CTL_ACK,
224 						      DMC_WAKELOCK_CTL_TIMEOUT)) {
225 			WARN_RATELIMIT(1, "DMC wakelock ack timed out");
226 			goto out_unlock;
227 		}
228 
229 		wl->taken = true;
230 	}
231 
232 out_unlock:
233 	spin_unlock_irqrestore(&wl->lock, flags);
234 }
235 
236 void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
237 {
238 	struct intel_dmc_wl *wl = &display->wl;
239 	unsigned long flags;
240 
241 	if (!__intel_dmc_wl_supported(display))
242 		return;
243 
244 	if (!intel_dmc_wl_check_range(reg.reg))
245 		return;
246 
247 	spin_lock_irqsave(&wl->lock, flags);
248 
249 	if (!wl->enabled)
250 		goto out_unlock;
251 
252 	if (WARN_RATELIMIT(!refcount_read(&wl->refcount),
253 			   "Tried to put wakelock with refcount zero\n"))
254 		goto out_unlock;
255 
256 	if (refcount_dec_and_test(&wl->refcount)) {
257 		__intel_dmc_wl_release(display);
258 
259 		goto out_unlock;
260 	}
261 
262 out_unlock:
263 	spin_unlock_irqrestore(&wl->lock, flags);
264 }
265