xref: /linux/drivers/gpu/drm/i915/display/intel_flipq.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2025 Intel Corporation
4  */
5 
6 #include <linux/pci.h>
7 
8 #include <drm/drm_print.h>
9 
10 #include "intel_crtc.h"
11 #include "intel_de.h"
12 #include "intel_display_core.h"
13 #include "intel_display_types.h"
14 #include "intel_display_utils.h"
15 #include "intel_display_wa.h"
16 #include "intel_dmc.h"
17 #include "intel_dmc_regs.h"
18 #include "intel_dsb.h"
19 #include "intel_flipq.h"
20 #include "intel_step.h"
21 #include "intel_vblank.h"
22 #include "intel_vrr.h"
23 
24 /**
25  * DOC: DMC Flip Queue
26  *
27  * A flip queue is a ring buffer implemented by the pipe DMC firmware.
28  * The driver inserts entries into the queues to be executed by the
29  * pipe DMC at a specified presentation timestamp (PTS).
30  *
31  * Each pipe DMC provides several queues:
32  *
33  * - 1 general queue (two DSB buffers executed per entry)
34  * - 3 plane queues (one DSB buffer executed per entry)
35  * - 1 fast queue (deprecated)
36  */
37 
38 #define for_each_flipq(flipq_id) \
39 	for ((flipq_id) = INTEL_FLIPQ_PLANE_1; (flipq_id) < MAX_INTEL_FLIPQ; (flipq_id)++)
40 
41 static int intel_flipq_offset(enum intel_flipq_id flipq_id)
42 {
43 	switch (flipq_id) {
44 	case INTEL_FLIPQ_PLANE_1:
45 		return 0x008;
46 	case INTEL_FLIPQ_PLANE_2:
47 		return 0x108;
48 	case INTEL_FLIPQ_PLANE_3:
49 		return 0x208;
50 	case INTEL_FLIPQ_GENERAL:
51 		return 0x308;
52 	case INTEL_FLIPQ_FAST:
53 		return 0x3c8;
54 	default:
55 		MISSING_CASE(flipq_id);
56 		return 0;
57 	}
58 }
59 
60 static int intel_flipq_size_dw(enum intel_flipq_id flipq_id)
61 {
62 	switch (flipq_id) {
63 	case INTEL_FLIPQ_PLANE_1:
64 	case INTEL_FLIPQ_PLANE_2:
65 	case INTEL_FLIPQ_PLANE_3:
66 		return 64;
67 	case INTEL_FLIPQ_GENERAL:
68 	case INTEL_FLIPQ_FAST:
69 		return 48;
70 	default:
71 		MISSING_CASE(flipq_id);
72 		return 1;
73 	}
74 }
75 
76 static int intel_flipq_elem_size_dw(enum intel_flipq_id flipq_id)
77 {
78 	switch (flipq_id) {
79 	case INTEL_FLIPQ_PLANE_1:
80 	case INTEL_FLIPQ_PLANE_2:
81 	case INTEL_FLIPQ_PLANE_3:
82 		return 4;
83 	case INTEL_FLIPQ_GENERAL:
84 	case INTEL_FLIPQ_FAST:
85 		return 6;
86 	default:
87 		MISSING_CASE(flipq_id);
88 		return 1;
89 	}
90 }
91 
92 static int intel_flipq_size_entries(enum intel_flipq_id flipq_id)
93 {
94 	return intel_flipq_size_dw(flipq_id) / intel_flipq_elem_size_dw(flipq_id);
95 }
96 
97 static void intel_flipq_crtc_init(struct intel_crtc *crtc)
98 {
99 	struct intel_display *display = to_intel_display(crtc);
100 	enum intel_flipq_id flipq_id;
101 
102 	for_each_flipq(flipq_id) {
103 		struct intel_flipq *flipq = &crtc->flipq[flipq_id];
104 
105 		flipq->start_mmioaddr = intel_pipedmc_start_mmioaddr(crtc) + intel_flipq_offset(flipq_id);
106 		flipq->flipq_id = flipq_id;
107 
108 		drm_dbg_kms(display->drm, "[CRTC:%d:%s] FQ %d: start 0x%x\n",
109 			    crtc->base.base.id, crtc->base.name,
110 			    flipq_id, flipq->start_mmioaddr);
111 	}
112 }
113 
114 bool intel_flipq_supported(struct intel_display *display)
115 {
116 	if (!display->params.enable_flipq)
117 		return false;
118 
119 	if (!display->dmc.dmc)
120 		return false;
121 
122 	if (DISPLAY_VER(display) == 20)
123 		return true;
124 
125 	/* DMC firmware expects VRR timing generator to be used */
126 	return DISPLAY_VER(display) >= 30 && intel_vrr_always_use_vrr_tg(display);
127 }
128 
129 void intel_flipq_init(struct intel_display *display)
130 {
131 	struct intel_crtc *crtc;
132 
133 	intel_dmc_wait_fw_load(display);
134 
135 	for_each_intel_crtc(display->drm, crtc)
136 		intel_flipq_crtc_init(crtc);
137 }
138 
139 static int cdclk_factor(struct intel_display *display)
140 {
141 	if (DISPLAY_VER(display) >= 30)
142 		return 120;
143 	else
144 		return 280;
145 }
146 
147 int intel_flipq_exec_time_us(struct intel_display *display)
148 {
149 	return intel_dsb_exec_time_us() +
150 		DIV_ROUND_UP(display->cdclk.hw.cdclk * cdclk_factor(display), 540000) +
151 		display->sagv.block_time_us;
152 }
153 
154 static int intel_flipq_preempt_timeout_ms(struct intel_display *display)
155 {
156 	return DIV_ROUND_UP(intel_flipq_exec_time_us(display), 1000);
157 }
158 
159 static void intel_flipq_preempt(struct intel_crtc *crtc, bool preempt)
160 {
161 	struct intel_display *display = to_intel_display(crtc);
162 
163 	intel_de_rmw(display, PIPEDMC_FQ_CTRL(crtc->pipe),
164 		     PIPEDMC_FQ_CTRL_PREEMPT, preempt ? PIPEDMC_FQ_CTRL_PREEMPT : 0);
165 
166 	if (preempt &&
167 	    intel_de_wait_for_clear_ms(display,
168 				       PIPEDMC_FQ_STATUS(crtc->pipe),
169 				       PIPEDMC_FQ_STATUS_BUSY,
170 				       intel_flipq_preempt_timeout_ms(display)))
171 		drm_err(display->drm, "[CRTC:%d:%s] flip queue preempt timeout\n",
172 			crtc->base.base.id, crtc->base.name);
173 }
174 
175 static int intel_flipq_current_head(struct intel_crtc *crtc, enum intel_flipq_id flipq_id)
176 {
177 	struct intel_display *display = to_intel_display(crtc);
178 
179 	return intel_de_read(display, PIPEDMC_FPQ_CHP(crtc->pipe, flipq_id));
180 }
181 
182 static void intel_flipq_write_tail(struct intel_crtc *crtc)
183 {
184 	struct intel_display *display = to_intel_display(crtc);
185 
186 	intel_de_write(display, PIPEDMC_FPQ_ATOMIC_TP(crtc->pipe),
187 		       PIPEDMC_FPQ_PLANEQ_3_TP(crtc->flipq[INTEL_FLIPQ_PLANE_3].tail) |
188 		       PIPEDMC_FPQ_PLANEQ_2_TP(crtc->flipq[INTEL_FLIPQ_PLANE_2].tail) |
189 		       PIPEDMC_FPQ_PLANEQ_1_TP(crtc->flipq[INTEL_FLIPQ_PLANE_1].tail) |
190 		       PIPEDMC_FPQ_FASTQ_TP(crtc->flipq[INTEL_FLIPQ_FAST].tail) |
191 		       PIPEDMC_FPQ_GENERALQ_TP(crtc->flipq[INTEL_FLIPQ_GENERAL].tail));
192 }
193 
194 static void intel_flipq_sw_dmc_wake(struct intel_crtc *crtc)
195 {
196 	struct intel_display *display = to_intel_display(crtc);
197 
198 	intel_de_write(display, PIPEDMC_FPQ_CTL1(crtc->pipe), PIPEDMC_SW_DMC_WAKE);
199 }
200 
201 static int intel_flipq_exec_time_lines(const struct intel_crtc_state *crtc_state)
202 {
203 	struct intel_display *display = to_intel_display(crtc_state);
204 
205 	return intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode,
206 					intel_flipq_exec_time_us(display));
207 }
208 
209 void intel_flipq_dump(struct intel_crtc *crtc,
210 		      enum intel_flipq_id flipq_id)
211 {
212 	struct intel_display *display = to_intel_display(crtc);
213 	struct intel_flipq *flipq = &crtc->flipq[flipq_id];
214 	u32 tmp;
215 
216 	drm_dbg_kms(display->drm,
217 		    "[CRTC:%d:%s] FQ %d @ 0x%x: ",
218 		    crtc->base.base.id, crtc->base.name, flipq_id,
219 		    flipq->start_mmioaddr);
220 	for (int i = 0 ; i < intel_flipq_size_dw(flipq_id); i++) {
221 		printk(KERN_CONT " 0x%08x",
222 		       intel_de_read(display, PIPEDMC_FQ_RAM(flipq->start_mmioaddr, i)));
223 		if (i % intel_flipq_elem_size_dw(flipq_id) == intel_flipq_elem_size_dw(flipq_id) - 1)
224 			printk(KERN_CONT "\n");
225 	}
226 
227 	drm_dbg_kms(display->drm,
228 		    "[CRTC:%d:%s] FQ %d: chp=0x%x, hp=0x%x\n",
229 		    crtc->base.base.id, crtc->base.name, flipq_id,
230 		    intel_de_read(display, PIPEDMC_FPQ_CHP(crtc->pipe, flipq_id)),
231 		    intel_de_read(display, PIPEDMC_FPQ_HP(crtc->pipe, flipq_id)));
232 
233 	drm_dbg_kms(display->drm,
234 		    "[CRTC:%d:%s] FQ %d: current head %d\n",
235 		    crtc->base.base.id, crtc->base.name, flipq_id,
236 		    intel_flipq_current_head(crtc, flipq_id));
237 
238 	drm_dbg_kms(display->drm,
239 		    "[CRTC:%d:%s] flip queue timestamp: 0x%x\n",
240 		    crtc->base.base.id, crtc->base.name,
241 		    intel_de_read(display, PIPEDMC_FPQ_TS(crtc->pipe)));
242 
243 	tmp = intel_de_read(display, PIPEDMC_FPQ_ATOMIC_TP(crtc->pipe));
244 
245 	drm_dbg_kms(display->drm,
246 		    "[CRTC:%d:%s] flip queue atomic tails: P3 %d, P2 %d, P1 %d, G %d, F %d\n",
247 		    crtc->base.base.id, crtc->base.name,
248 		    REG_FIELD_GET(PIPEDMC_FPQ_PLANEQ_3_TP_MASK, tmp),
249 		    REG_FIELD_GET(PIPEDMC_FPQ_PLANEQ_2_TP_MASK, tmp),
250 		    REG_FIELD_GET(PIPEDMC_FPQ_PLANEQ_1_TP_MASK, tmp),
251 		    REG_FIELD_GET(PIPEDMC_FPQ_GENERALQ_TP_MASK, tmp),
252 		    REG_FIELD_GET(PIPEDMC_FPQ_FASTQ_TP_MASK, tmp));
253 }
254 
255 void intel_flipq_reset(struct intel_display *display, enum pipe pipe)
256 {
257 	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
258 	enum intel_flipq_id flipq_id;
259 
260 	intel_de_write(display, PIPEDMC_FQ_CTRL(pipe), 0);
261 
262 	intel_de_write(display, PIPEDMC_SCANLINECMPLOWER(pipe), 0);
263 	intel_de_write(display, PIPEDMC_SCANLINECMPUPPER(pipe), 0);
264 
265 	for_each_flipq(flipq_id) {
266 		struct intel_flipq *flipq = &crtc->flipq[flipq_id];
267 
268 		intel_de_write(display, PIPEDMC_FPQ_HP(pipe, flipq_id), 0);
269 		intel_de_write(display, PIPEDMC_FPQ_CHP(pipe, flipq_id), 0);
270 
271 		flipq->tail = 0;
272 	}
273 
274 	intel_de_write(display, PIPEDMC_FPQ_ATOMIC_TP(pipe), 0);
275 }
276 
277 static enum pipedmc_event_id flipq_event_id(struct intel_display *display)
278 {
279 	if (DISPLAY_VER(display) >= 30)
280 		return PIPEDMC_EVENT_FULL_FQ_WAKE_TRIGGER;
281 	else
282 		return PIPEDMC_EVENT_SCANLINE_INRANGE_FQ_TRIGGER;
283 }
284 
285 void intel_flipq_enable(const struct intel_crtc_state *crtc_state)
286 {
287 	struct intel_display *display = to_intel_display(crtc_state);
288 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
289 	/* FIXME what to do with VRR? */
290 	int scanline = intel_mode_vblank_start(&crtc_state->hw.adjusted_mode) -
291 		intel_flipq_exec_time_lines(crtc_state);
292 
293 	if (DISPLAY_VER(display) >= 30) {
294 		u32 start_mmioaddr = intel_pipedmc_start_mmioaddr(crtc);
295 
296 		/* undocumented magic DMC variables */
297 		intel_de_write(display, PTL_PIPEDMC_EXEC_TIME_LINES(start_mmioaddr),
298 			       intel_flipq_exec_time_lines(crtc_state));
299 		intel_de_write(display, PTL_PIPEDMC_END_OF_EXEC_GB(start_mmioaddr),
300 			       100);
301 	}
302 
303 	intel_de_write(display, PIPEDMC_SCANLINECMPUPPER(crtc->pipe),
304 		       PIPEDMC_SCANLINE_UPPER(scanline));
305 	intel_de_write(display, PIPEDMC_SCANLINECMPLOWER(crtc->pipe),
306 		       PIPEDMC_SCANLINEINRANGECMP_EN |
307 		       PIPEDMC_SCANLINE_LOWER(scanline - 2));
308 
309 	intel_pipedmc_enable_event(crtc, flipq_event_id(display));
310 
311 	intel_de_write(display, PIPEDMC_FQ_CTRL(crtc->pipe), PIPEDMC_FQ_CTRL_ENABLE);
312 }
313 
314 void intel_flipq_disable(const struct intel_crtc_state *crtc_state)
315 {
316 	struct intel_display *display = to_intel_display(crtc_state);
317 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
318 
319 	intel_flipq_preempt(crtc, true);
320 
321 	intel_de_write(display, PIPEDMC_FQ_CTRL(crtc->pipe), 0);
322 
323 	intel_pipedmc_disable_event(crtc, flipq_event_id(display));
324 
325 	intel_de_write(display, PIPEDMC_SCANLINECMPLOWER(crtc->pipe), 0);
326 	intel_de_write(display, PIPEDMC_SCANLINECMPUPPER(crtc->pipe), 0);
327 }
328 
329 static bool assert_flipq_has_room(struct intel_crtc *crtc,
330 				  enum intel_flipq_id flipq_id)
331 {
332 	struct intel_display *display = to_intel_display(crtc);
333 	struct intel_flipq *flipq = &crtc->flipq[flipq_id];
334 	int head, size = intel_flipq_size_entries(flipq_id);
335 
336 	head = intel_flipq_current_head(crtc, flipq_id);
337 
338 	return !drm_WARN(display->drm,
339 			 (flipq->tail + size - head) % size >= size - 1,
340 			 "[CRTC:%d:%s] FQ %d overflow (head %d, tail %d, size %d)\n",
341 			 crtc->base.base.id, crtc->base.name, flipq_id,
342 			 head, flipq->tail, size);
343 }
344 
345 static void intel_flipq_write(struct intel_display *display,
346 			      struct intel_flipq *flipq, u32 data, int i)
347 {
348 	intel_de_write(display, PIPEDMC_FQ_RAM(flipq->start_mmioaddr, flipq->tail *
349 					       intel_flipq_elem_size_dw(flipq->flipq_id) + i), data);
350 }
351 
352 static void lnl_flipq_add(struct intel_display *display,
353 			  struct intel_flipq *flipq,
354 			  unsigned int pts,
355 			  enum intel_dsb_id dsb_id,
356 			  struct intel_dsb *dsb)
357 {
358 	int i = 0;
359 
360 	switch (flipq->flipq_id) {
361 	case INTEL_FLIPQ_GENERAL:
362 		intel_flipq_write(display, flipq, pts, i++);
363 		intel_flipq_write(display, flipq, intel_dsb_head(dsb), i++);
364 		intel_flipq_write(display, flipq, LNL_FQ_INTERRUPT |
365 				  LNL_FQ_DSB_ID(dsb_id) |
366 				  LNL_FQ_DSB_SIZE(intel_dsb_size(dsb) / 64), i++);
367 		intel_flipq_write(display, flipq, 0, i++);
368 		intel_flipq_write(display, flipq, 0, i++); /* head for second DSB */
369 		intel_flipq_write(display, flipq, 0, i++); /* DSB engine + size for second DSB */
370 		break;
371 	case INTEL_FLIPQ_PLANE_1:
372 	case INTEL_FLIPQ_PLANE_2:
373 	case INTEL_FLIPQ_PLANE_3:
374 		intel_flipq_write(display, flipq, pts, i++);
375 		intel_flipq_write(display, flipq, intel_dsb_head(dsb), i++);
376 		intel_flipq_write(display, flipq, LNL_FQ_INTERRUPT |
377 				  LNL_FQ_DSB_ID(dsb_id) |
378 				  LNL_FQ_DSB_SIZE(intel_dsb_size(dsb) / 64), i++);
379 		intel_flipq_write(display, flipq, 0, i++);
380 		break;
381 	default:
382 		MISSING_CASE(flipq->flipq_id);
383 		return;
384 	}
385 }
386 
387 static void ptl_flipq_add(struct intel_display *display,
388 			  struct intel_flipq *flipq,
389 			  unsigned int pts,
390 			  enum intel_dsb_id dsb_id,
391 			  struct intel_dsb *dsb)
392 {
393 	int i = 0;
394 
395 	switch (flipq->flipq_id) {
396 	case INTEL_FLIPQ_GENERAL:
397 		intel_flipq_write(display, flipq, pts, i++);
398 		intel_flipq_write(display, flipq, 0, i++);
399 		intel_flipq_write(display, flipq, PTL_FQ_INTERRUPT |
400 				  PTL_FQ_DSB_ID(dsb_id) |
401 				  PTL_FQ_DSB_SIZE(intel_dsb_size(dsb) / 64), i++);
402 		intel_flipq_write(display, flipq, intel_dsb_head(dsb), i++);
403 		intel_flipq_write(display, flipq, 0, i++); /* DSB engine + size for second DSB */
404 		intel_flipq_write(display, flipq, 0, i++); /* head for second DSB */
405 		break;
406 	case INTEL_FLIPQ_PLANE_1:
407 	case INTEL_FLIPQ_PLANE_2:
408 	case INTEL_FLIPQ_PLANE_3:
409 		intel_flipq_write(display, flipq, pts, i++);
410 		intel_flipq_write(display, flipq, 0, i++);
411 		intel_flipq_write(display, flipq, PTL_FQ_INTERRUPT |
412 				  PTL_FQ_DSB_ID(dsb_id) |
413 				  PTL_FQ_DSB_SIZE(intel_dsb_size(dsb) / 64), i++);
414 		intel_flipq_write(display, flipq, intel_dsb_head(dsb), i++);
415 		break;
416 	default:
417 		MISSING_CASE(flipq->flipq_id);
418 		return;
419 	}
420 }
421 
422 void intel_flipq_add(struct intel_crtc *crtc,
423 		     enum intel_flipq_id flipq_id,
424 		     unsigned int pts,
425 		     enum intel_dsb_id dsb_id,
426 		     struct intel_dsb *dsb)
427 {
428 	struct intel_display *display = to_intel_display(crtc);
429 	struct intel_flipq *flipq = &crtc->flipq[flipq_id];
430 
431 	if (!assert_flipq_has_room(crtc, flipq_id))
432 		return;
433 
434 	pts += intel_de_read(display, PIPEDMC_FPQ_TS(crtc->pipe));
435 
436 	intel_flipq_preempt(crtc, true);
437 
438 	if (DISPLAY_VER(display) >= 30)
439 		ptl_flipq_add(display, flipq,  pts, dsb_id, dsb);
440 	else
441 		lnl_flipq_add(display, flipq,  pts, dsb_id, dsb);
442 
443 	flipq->tail = (flipq->tail + 1) % intel_flipq_size_entries(flipq->flipq_id);
444 	intel_flipq_write_tail(crtc);
445 
446 	intel_flipq_preempt(crtc, false);
447 
448 	intel_flipq_sw_dmc_wake(crtc);
449 }
450 
451 void intel_flipq_wait_dmc_halt(struct intel_dsb *dsb, struct intel_crtc *crtc)
452 {
453 	struct intel_display *display = to_intel_display(crtc);
454 
455 	if (intel_display_wa(display, INTEL_DISPLAY_WA_18034343758))
456 		intel_dsb_wait_usec(dsb, 2);
457 }
458 
459 void intel_flipq_unhalt_dmc(struct intel_dsb *dsb, struct intel_crtc *crtc)
460 {
461 	struct intel_display *display = to_intel_display(crtc);
462 
463 	if (intel_display_wa(display, INTEL_DISPLAY_WA_18034343758))
464 		intel_dsb_reg_write(dsb, PIPEDMC_CTL(crtc->pipe), 0);
465 }
466