xref: /linux/drivers/gpu/drm/i915/display/intel_dmc_wl.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright (C) 2024 Intel Corporation
4  */
5 
6 #include <linux/kernel.h>
7 
8 #include <drm/drm_print.h>
9 
10 #include "i915_drv.h"
11 #include "i915_reg.h"
12 #include "intel_de.h"
13 #include "intel_dmc.h"
14 #include "intel_dmc_regs.h"
15 #include "intel_dmc_wl.h"
16 
17 /**
18  * DOC: DMC wakelock support
19  *
20  * Wake lock is the mechanism to cause display engine to exit DC
21  * states to allow programming to registers that are powered down in
22  * those states. Previous projects exited DC states automatically when
23  * detecting programming. Now software controls the exit by
24  * programming the wake lock. This improves system performance and
25  * system interactions and better fits the flip queue style of
26  * programming. Wake lock is only required when DC5, DC6, or DC6v have
27  * been enabled in DC_STATE_EN and the wake lock mode of operation has
28  * been enabled.
29  *
30  * The wakelock mechanism in DMC allows the display engine to exit DC
31  * states explicitly before programming registers that may be powered
32  * down.  In earlier hardware, this was done automatically and
33  * implicitly when the display engine accessed a register.  With the
34  * wakelock implementation, the driver asserts a wakelock in DMC,
35  * which forces it to exit the DC state until the wakelock is
36  * deasserted.
37  *
38  * The mechanism can be enabled and disabled by writing to the
39  * DMC_WAKELOCK_CFG register.  There are also 13 control registers
40  * that can be used to hold and release different wakelocks.  In the
41  * current implementation, we only need one wakelock, so only
42  * DMC_WAKELOCK1_CTL is used.  The other definitions are here for
43  * potential future use.
44  */
45 
46 /*
47  * Define DMC_WAKELOCK_CTL_TIMEOUT_US in microseconds because we use the
48  * atomic variant of waiting MMIO.
49  */
50 #define DMC_WAKELOCK_CTL_TIMEOUT_US 5000
51 #define DMC_WAKELOCK_HOLD_TIME 50
52 
53 struct intel_dmc_wl_range {
54 	u32 start;
55 	u32 end;
56 };
57 
58 static const struct intel_dmc_wl_range powered_off_ranges[] = {
59 	{ .start = 0x60000, .end = 0x7ffff },
60 	{},
61 };
62 
63 static const struct intel_dmc_wl_range xe3lpd_dc5_dc6_dmc_ranges[] = {
64 	{ .start = 0x45500 }, /* DC_STATE_SEL */
65 	{ .start = 0x457a0, .end = 0x457b0 }, /* DC*_RESIDENCY_COUNTER */
66 	{ .start = 0x45504 }, /* DC_STATE_EN */
67 	{ .start = 0x45400, .end = 0x4540c }, /* PWR_WELL_CTL_* */
68 	{ .start = 0x454f0 }, /* RETENTION_CTRL */
69 
70 	/* DBUF_CTL_* */
71 	{ .start = 0x44300 },
72 	{ .start = 0x44304 },
73 	{ .start = 0x44f00 },
74 	{ .start = 0x44f04 },
75 	{ .start = 0x44fe8 },
76 	{ .start = 0x45008 },
77 
78 	{ .start = 0x46070 }, /* CDCLK_PLL_ENABLE */
79 	{ .start = 0x46000 }, /* CDCLK_CTL */
80 	{ .start = 0x46008 }, /* CDCLK_SQUASH_CTL */
81 
82 	/* TRANS_CMTG_CTL_* */
83 	{ .start = 0x6fa88 },
84 	{ .start = 0x6fb88 },
85 
86 	{ .start = 0x46430 }, /* CHICKEN_DCPR_1 */
87 	{ .start = 0x46434 }, /* CHICKEN_DCPR_2 */
88 	{ .start = 0x454a0 }, /* CHICKEN_DCPR_4 */
89 	{ .start = 0x42084 }, /* CHICKEN_MISC_2 */
90 	{ .start = 0x42088 }, /* CHICKEN_MISC_3 */
91 	{ .start = 0x46160 }, /* CMTG_CLK_SEL */
92 	{ .start = 0x8f000, .end = 0x8ffff }, /* Main DMC registers */
93 
94 	{},
95 };
96 
97 static const struct intel_dmc_wl_range xe3lpd_dc3co_dmc_ranges[] = {
98 	{ .start = 0x454a0 }, /* CHICKEN_DCPR_4 */
99 
100 	{ .start = 0x45504 }, /* DC_STATE_EN */
101 
102 	/* DBUF_CTL_* */
103 	{ .start = 0x44300 },
104 	{ .start = 0x44304 },
105 	{ .start = 0x44f00 },
106 	{ .start = 0x44f04 },
107 	{ .start = 0x44fe8 },
108 	{ .start = 0x45008 },
109 
110 	{ .start = 0x46070 }, /* CDCLK_PLL_ENABLE */
111 	{ .start = 0x46000 }, /* CDCLK_CTL */
112 	{ .start = 0x46008 }, /* CDCLK_SQUASH_CTL */
113 	{ .start = 0x8f000, .end = 0x8ffff }, /* Main DMC registers */
114 
115 	/* Scanline registers */
116 	{ .start = 0x70000 },
117 	{ .start = 0x70004 },
118 	{ .start = 0x70014 },
119 	{ .start = 0x70018 },
120 	{ .start = 0x71000 },
121 	{ .start = 0x71004 },
122 	{ .start = 0x71014 },
123 	{ .start = 0x71018 },
124 	{ .start = 0x72000 },
125 	{ .start = 0x72004 },
126 	{ .start = 0x72014 },
127 	{ .start = 0x72018 },
128 	{ .start = 0x73000 },
129 	{ .start = 0x73004 },
130 	{ .start = 0x73014 },
131 	{ .start = 0x73018 },
132 	{ .start = 0x7b000 },
133 	{ .start = 0x7b004 },
134 	{ .start = 0x7b014 },
135 	{ .start = 0x7b018 },
136 	{ .start = 0x7c000 },
137 	{ .start = 0x7c004 },
138 	{ .start = 0x7c014 },
139 	{ .start = 0x7c018 },
140 
141 	{},
142 };
143 
__intel_dmc_wl_release(struct intel_display * display)144 static void __intel_dmc_wl_release(struct intel_display *display)
145 {
146 	struct drm_i915_private *i915 = to_i915(display->drm);
147 	struct intel_dmc_wl *wl = &display->wl;
148 
149 	WARN_ON(refcount_read(&wl->refcount));
150 
151 	queue_delayed_work(i915->unordered_wq, &wl->work,
152 			   msecs_to_jiffies(DMC_WAKELOCK_HOLD_TIME));
153 }
154 
intel_dmc_wl_work(struct work_struct * work)155 static void intel_dmc_wl_work(struct work_struct *work)
156 {
157 	struct intel_dmc_wl *wl =
158 		container_of(work, struct intel_dmc_wl, work.work);
159 	struct intel_display *display =
160 		container_of(wl, struct intel_display, wl);
161 	unsigned long flags;
162 
163 	spin_lock_irqsave(&wl->lock, flags);
164 
165 	/*
166 	 * Bail out if refcount became non-zero while waiting for the spinlock,
167 	 * meaning that the lock is now taken again.
168 	 */
169 	if (refcount_read(&wl->refcount))
170 		goto out_unlock;
171 
172 	__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
173 
174 	if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
175 						     DMC_WAKELOCK_CTL_ACK, 0,
176 						     DMC_WAKELOCK_CTL_TIMEOUT_US)) {
177 		WARN_RATELIMIT(1, "DMC wakelock release timed out");
178 		goto out_unlock;
179 	}
180 
181 	wl->taken = false;
182 
183 out_unlock:
184 	spin_unlock_irqrestore(&wl->lock, flags);
185 }
186 
__intel_dmc_wl_take(struct intel_display * display)187 static void __intel_dmc_wl_take(struct intel_display *display)
188 {
189 	struct intel_dmc_wl *wl = &display->wl;
190 
191 	/*
192 	 * Only try to take the wakelock if it's not marked as taken
193 	 * yet.  It may be already taken at this point if we have
194 	 * already released the last reference, but the work has not
195 	 * run yet.
196 	 */
197 	if (wl->taken)
198 		return;
199 
200 	__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
201 			    DMC_WAKELOCK_CTL_REQ);
202 
203 	/*
204 	 * We need to use the atomic variant of the waiting routine
205 	 * because the DMC wakelock is also taken in atomic context.
206 	 */
207 	if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
208 						     DMC_WAKELOCK_CTL_ACK,
209 						     DMC_WAKELOCK_CTL_ACK,
210 						     DMC_WAKELOCK_CTL_TIMEOUT_US)) {
211 		WARN_RATELIMIT(1, "DMC wakelock ack timed out");
212 		return;
213 	}
214 
215 	wl->taken = true;
216 }
217 
intel_dmc_wl_reg_in_range(i915_reg_t reg,const struct intel_dmc_wl_range ranges[])218 static bool intel_dmc_wl_reg_in_range(i915_reg_t reg,
219 				      const struct intel_dmc_wl_range ranges[])
220 {
221 	u32 offset = i915_mmio_reg_offset(reg);
222 
223 	for (int i = 0; ranges[i].start; i++) {
224 		u32 end = ranges[i].end ?: ranges[i].start;
225 
226 		if (ranges[i].start <= offset && offset <= end)
227 			return true;
228 	}
229 
230 	return false;
231 }
232 
intel_dmc_wl_check_range(i915_reg_t reg,u32 dc_state)233 static bool intel_dmc_wl_check_range(i915_reg_t reg, u32 dc_state)
234 {
235 	const struct intel_dmc_wl_range *ranges;
236 
237 	/*
238 	 * Check that the offset is in one of the ranges for which
239 	 * registers are powered off during DC states.
240 	 */
241 	if (intel_dmc_wl_reg_in_range(reg, powered_off_ranges))
242 		return true;
243 
244 	/*
245 	 * Check that the offset is for a register that is touched by
246 	 * the DMC and requires a DC exit for proper access.
247 	 */
248 	switch (dc_state) {
249 	case DC_STATE_EN_DC3CO:
250 		ranges = xe3lpd_dc3co_dmc_ranges;
251 		break;
252 	case DC_STATE_EN_UPTO_DC5:
253 	case DC_STATE_EN_UPTO_DC6:
254 		ranges = xe3lpd_dc5_dc6_dmc_ranges;
255 		break;
256 	default:
257 		ranges = NULL;
258 	}
259 
260 	if (ranges && intel_dmc_wl_reg_in_range(reg, ranges))
261 		return true;
262 
263 	return false;
264 }
265 
__intel_dmc_wl_supported(struct intel_display * display)266 static bool __intel_dmc_wl_supported(struct intel_display *display)
267 {
268 	return display->params.enable_dmc_wl && intel_dmc_has_payload(display);
269 }
270 
intel_dmc_wl_sanitize_param(struct intel_display * display)271 static void intel_dmc_wl_sanitize_param(struct intel_display *display)
272 {
273 	if (!HAS_DMC_WAKELOCK(display))
274 		display->params.enable_dmc_wl = 0;
275 	else if (display->params.enable_dmc_wl >= 0)
276 		display->params.enable_dmc_wl = !!display->params.enable_dmc_wl;
277 	else
278 		display->params.enable_dmc_wl = DISPLAY_VER(display) >= 30;
279 
280 	drm_dbg_kms(display->drm, "Sanitized enable_dmc_wl value: %d\n",
281 		    display->params.enable_dmc_wl);
282 }
283 
intel_dmc_wl_init(struct intel_display * display)284 void intel_dmc_wl_init(struct intel_display *display)
285 {
286 	struct intel_dmc_wl *wl = &display->wl;
287 
288 	intel_dmc_wl_sanitize_param(display);
289 
290 	if (!display->params.enable_dmc_wl)
291 		return;
292 
293 	INIT_DELAYED_WORK(&wl->work, intel_dmc_wl_work);
294 	spin_lock_init(&wl->lock);
295 	refcount_set(&wl->refcount, 0);
296 }
297 
298 /* Must only be called as part of enabling dynamic DC states. */
intel_dmc_wl_enable(struct intel_display * display,u32 dc_state)299 void intel_dmc_wl_enable(struct intel_display *display, u32 dc_state)
300 {
301 	struct intel_dmc_wl *wl = &display->wl;
302 	unsigned long flags;
303 
304 	if (!__intel_dmc_wl_supported(display))
305 		return;
306 
307 	spin_lock_irqsave(&wl->lock, flags);
308 
309 	wl->dc_state = dc_state;
310 
311 	if (drm_WARN_ON(display->drm, wl->enabled))
312 		goto out_unlock;
313 
314 	/*
315 	 * Enable wakelock in DMC.  We shouldn't try to take the
316 	 * wakelock, because we're just enabling it, so call the
317 	 * non-locking version directly here.
318 	 */
319 	__intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE);
320 
321 	wl->enabled = true;
322 
323 	/*
324 	 * This would be racy in the following scenario:
325 	 *
326 	 *   1. Function A calls intel_dmc_wl_get();
327 	 *   2. Some function calls intel_dmc_wl_disable();
328 	 *   3. Some function calls intel_dmc_wl_enable();
329 	 *   4. Concurrently with (3), function A performs the MMIO in between
330 	 *      setting DMC_WAKELOCK_CFG_ENABLE and asserting the lock with
331 	 *      __intel_dmc_wl_take().
332 	 *
333 	 * TODO: Check with the hardware team whether it is safe to assert the
334 	 * hardware lock before enabling to avoid such a scenario. Otherwise, we
335 	 * would need to deal with it via software synchronization.
336 	 */
337 	if (refcount_read(&wl->refcount))
338 		__intel_dmc_wl_take(display);
339 
340 out_unlock:
341 	spin_unlock_irqrestore(&wl->lock, flags);
342 }
343 
344 /* Must only be called as part of disabling dynamic DC states. */
intel_dmc_wl_disable(struct intel_display * display)345 void intel_dmc_wl_disable(struct intel_display *display)
346 {
347 	struct intel_dmc_wl *wl = &display->wl;
348 	unsigned long flags;
349 
350 	if (!__intel_dmc_wl_supported(display))
351 		return;
352 
353 	intel_dmc_wl_flush_release_work(display);
354 
355 	spin_lock_irqsave(&wl->lock, flags);
356 
357 	if (drm_WARN_ON(display->drm, !wl->enabled))
358 		goto out_unlock;
359 
360 	/* Disable wakelock in DMC */
361 	__intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0);
362 
363 	wl->enabled = false;
364 
365 	/*
366 	 * The spec is not explicit about the expectation of existing
367 	 * lock users at the moment of disabling, but it does say that we must
368 	 * clear DMC_WAKELOCK_CTL_REQ, which gives us a clue that it is okay to
369 	 * disable with existing lock users.
370 	 *
371 	 * TODO: Get the correct expectation from the hardware team.
372 	 */
373 	__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
374 
375 	wl->taken = false;
376 
377 out_unlock:
378 	spin_unlock_irqrestore(&wl->lock, flags);
379 }
380 
intel_dmc_wl_flush_release_work(struct intel_display * display)381 void intel_dmc_wl_flush_release_work(struct intel_display *display)
382 {
383 	struct intel_dmc_wl *wl = &display->wl;
384 
385 	if (!__intel_dmc_wl_supported(display))
386 		return;
387 
388 	flush_delayed_work(&wl->work);
389 }
390 
intel_dmc_wl_get(struct intel_display * display,i915_reg_t reg)391 void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
392 {
393 	struct intel_dmc_wl *wl = &display->wl;
394 	unsigned long flags;
395 
396 	if (!__intel_dmc_wl_supported(display))
397 		return;
398 
399 	spin_lock_irqsave(&wl->lock, flags);
400 
401 	if (i915_mmio_reg_valid(reg) && !intel_dmc_wl_check_range(reg, wl->dc_state))
402 		goto out_unlock;
403 
404 	if (!wl->enabled) {
405 		if (!refcount_inc_not_zero(&wl->refcount))
406 			refcount_set(&wl->refcount, 1);
407 		goto out_unlock;
408 	}
409 
410 	cancel_delayed_work(&wl->work);
411 
412 	if (refcount_inc_not_zero(&wl->refcount))
413 		goto out_unlock;
414 
415 	refcount_set(&wl->refcount, 1);
416 
417 	__intel_dmc_wl_take(display);
418 
419 out_unlock:
420 	spin_unlock_irqrestore(&wl->lock, flags);
421 }
422 
intel_dmc_wl_put(struct intel_display * display,i915_reg_t reg)423 void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
424 {
425 	struct intel_dmc_wl *wl = &display->wl;
426 	unsigned long flags;
427 
428 	if (!__intel_dmc_wl_supported(display))
429 		return;
430 
431 	spin_lock_irqsave(&wl->lock, flags);
432 
433 	if (i915_mmio_reg_valid(reg) && !intel_dmc_wl_check_range(reg, wl->dc_state))
434 		goto out_unlock;
435 
436 	if (WARN_RATELIMIT(!refcount_read(&wl->refcount),
437 			   "Tried to put wakelock with refcount zero\n"))
438 		goto out_unlock;
439 
440 	if (refcount_dec_and_test(&wl->refcount)) {
441 		if (!wl->enabled)
442 			goto out_unlock;
443 
444 		__intel_dmc_wl_release(display);
445 
446 		goto out_unlock;
447 	}
448 
449 out_unlock:
450 	spin_unlock_irqrestore(&wl->lock, flags);
451 }
452 
intel_dmc_wl_get_noreg(struct intel_display * display)453 void intel_dmc_wl_get_noreg(struct intel_display *display)
454 {
455 	intel_dmc_wl_get(display, INVALID_MMIO_REG);
456 }
457 
intel_dmc_wl_put_noreg(struct intel_display * display)458 void intel_dmc_wl_put_noreg(struct intel_display *display)
459 {
460 	intel_dmc_wl_put(display, INVALID_MMIO_REG);
461 }
462