xref: /linux/drivers/gpu/drm/i915/display/intel_dsb.c (revision fcab107abe1ab5be9dbe874baa722372da8f4f73)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  *
5  */
6 
7 #include <drm/drm_print.h>
8 #include <drm/drm_vblank.h>
9 
10 #include "i915_irq.h"
11 #include "i915_reg.h"
12 #include "i915_utils.h"
13 #include "intel_crtc.h"
14 #include "intel_de.h"
15 #include "intel_display_rpm.h"
16 #include "intel_display_types.h"
17 #include "intel_dsb.h"
18 #include "intel_dsb_buffer.h"
19 #include "intel_dsb_regs.h"
20 #include "intel_vblank.h"
21 #include "intel_vrr.h"
22 #include "skl_watermark.h"
23 
24 #define CACHELINE_BYTES 64
25 
26 struct intel_dsb {
27 	enum intel_dsb_id id;
28 
29 	struct intel_dsb_buffer dsb_buf;
30 	struct intel_crtc *crtc;
31 
32 	/*
33 	 * maximum number of dwords the buffer will hold.
34 	 */
35 	unsigned int size;
36 
37 	/*
38 	 * free_pos will point the first free dword and
39 	 * help in calculating tail of command buffer.
40 	 */
41 	unsigned int free_pos;
42 
43 	/*
44 	 * Previously emitted DSB instruction. Used to
45 	 * identify/adjust the instruction for indexed
46 	 * register writes.
47 	 */
48 	u32 ins[2];
49 
50 	/*
51 	 * Start of the previously emitted DSB instruction.
52 	 * Used to adjust the instruction for indexed
53 	 * register writes.
54 	 */
55 	unsigned int ins_start_offset;
56 
57 	u32 chicken;
58 	int hw_dewake_scanline;
59 };
60 
61 /**
62  * DOC: DSB
63  *
64  * A DSB (Display State Buffer) is a queue of MMIO instructions in the memory
65  * which can be offloaded to DSB HW in Display Controller. DSB HW is a DMA
66  * engine that can be programmed to download the DSB from memory.
67  * It allows driver to batch submit display HW programming. This helps to
68  * reduce loading time and CPU activity, thereby making the context switch
69  * faster. DSB Support added from Gen12 Intel graphics based platform.
70  *
71  * DSB's can access only the pipe, plane, and transcoder Data Island Packet
72  * registers.
73  *
74  * DSB HW can support only register writes (both indexed and direct MMIO
75  * writes). There are no registers reads possible with DSB HW engine.
76  */
77 
78 /* DSB opcodes. */
79 #define DSB_OPCODE_SHIFT		24
80 #define DSB_OPCODE_NOOP			0x0
81 #define DSB_OPCODE_MMIO_WRITE		0x1
82 #define   DSB_BYTE_EN			0xf
83 #define   DSB_BYTE_EN_SHIFT		20
84 #define   DSB_REG_VALUE_MASK		0xfffff
85 #define DSB_OPCODE_WAIT_USEC		0x2
86 #define DSB_OPCODE_WAIT_SCANLINE	0x3
87 #define DSB_OPCODE_WAIT_VBLANKS		0x4
88 #define DSB_OPCODE_WAIT_DSL_IN		0x5
89 #define DSB_OPCODE_WAIT_DSL_OUT		0x6
90 #define   DSB_SCANLINE_UPPER_SHIFT	20
91 #define   DSB_SCANLINE_LOWER_SHIFT	0
92 #define DSB_OPCODE_INTERRUPT		0x7
93 #define DSB_OPCODE_INDEXED_WRITE	0x9
94 /* see DSB_REG_VALUE_MASK */
95 #define DSB_OPCODE_POLL			0xA
96 /* see DSB_REG_VALUE_MASK */
97 
98 static bool pre_commit_is_vrr_active(struct intel_atomic_state *state,
99 				     struct intel_crtc *crtc)
100 {
101 	const struct intel_crtc_state *old_crtc_state =
102 		intel_atomic_get_old_crtc_state(state, crtc);
103 	const struct intel_crtc_state *new_crtc_state =
104 		intel_atomic_get_new_crtc_state(state, crtc);
105 
106 	/* VRR will be enabled afterwards, if necessary */
107 	if (intel_crtc_needs_modeset(new_crtc_state))
108 		return false;
109 
110 	/* VRR will have been disabled during intel_pre_plane_update() */
111 	return old_crtc_state->vrr.enable && !intel_crtc_vrr_disabling(state, crtc);
112 }
113 
114 static int dsb_vblank_delay(struct intel_atomic_state *state,
115 			    struct intel_crtc *crtc)
116 {
117 	const struct intel_crtc_state *crtc_state =
118 		intel_pre_commit_crtc_state(state, crtc);
119 
120 	if (pre_commit_is_vrr_active(state, crtc))
121 		/*
122 		 * When the push is sent during vblank it will trigger
123 		 * on the next scanline, hence we have up to one extra
124 		 * scanline until the delayed vblank occurs after
125 		 * TRANS_PUSH has been written.
126 		 */
127 		return intel_vrr_vblank_delay(crtc_state) + 1;
128 	else
129 		return intel_mode_vblank_delay(&crtc_state->hw.adjusted_mode);
130 }
131 
132 static int dsb_vtotal(struct intel_atomic_state *state,
133 		      struct intel_crtc *crtc)
134 {
135 	const struct intel_crtc_state *crtc_state =
136 		intel_pre_commit_crtc_state(state, crtc);
137 
138 	if (pre_commit_is_vrr_active(state, crtc))
139 		return intel_vrr_vmax_vtotal(crtc_state);
140 	else
141 		return intel_mode_vtotal(&crtc_state->hw.adjusted_mode);
142 }
143 
144 static int dsb_dewake_scanline_start(struct intel_atomic_state *state,
145 				     struct intel_crtc *crtc)
146 {
147 	struct intel_display *display = to_intel_display(state);
148 	const struct intel_crtc_state *crtc_state =
149 		intel_pre_commit_crtc_state(state, crtc);
150 	unsigned int latency = skl_watermark_max_latency(display, 0);
151 
152 	return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode) -
153 		intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, latency);
154 }
155 
156 static int dsb_dewake_scanline_end(struct intel_atomic_state *state,
157 				   struct intel_crtc *crtc)
158 {
159 	const struct intel_crtc_state *crtc_state =
160 		intel_pre_commit_crtc_state(state, crtc);
161 
162 	return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode);
163 }
164 
165 static int dsb_scanline_to_hw(struct intel_atomic_state *state,
166 			      struct intel_crtc *crtc, int scanline)
167 {
168 	const struct intel_crtc_state *crtc_state =
169 		intel_pre_commit_crtc_state(state, crtc);
170 	int vtotal = dsb_vtotal(state, crtc);
171 
172 	return (scanline + vtotal - intel_crtc_scanline_offset(crtc_state)) % vtotal;
173 }
174 
175 /*
176  * Bspec suggests that we should always set DSB_SKIP_WAITS_EN. We have approach
177  * different from what is explained in Bspec on how flip is considered being
178  * complete. We are waiting for vblank in DSB and generate interrupt when it
179  * happens and this interrupt is considered as indication of completion -> we
180  * definitely do not want to skip vblank wait. We also have concern what comes
181  * to skipping vblank evasion. I.e. arming registers are latched before we have
182  * managed writing them. Due to these reasons we are not setting
183  * DSB_SKIP_WAITS_EN.
184  */
185 static u32 dsb_chicken(struct intel_atomic_state *state,
186 		       struct intel_crtc *crtc)
187 {
188 	if (pre_commit_is_vrr_active(state, crtc))
189 		return DSB_CTRL_WAIT_SAFE_WINDOW |
190 			DSB_CTRL_NO_WAIT_VBLANK |
191 			DSB_INST_WAIT_SAFE_WINDOW |
192 			DSB_INST_NO_WAIT_VBLANK;
193 	else
194 		return 0;
195 }
196 
197 static bool assert_dsb_has_room(struct intel_dsb *dsb)
198 {
199 	struct intel_crtc *crtc = dsb->crtc;
200 	struct intel_display *display = to_intel_display(crtc->base.dev);
201 
202 	/* each instruction is 2 dwords */
203 	return !drm_WARN(display->drm, dsb->free_pos > dsb->size - 2,
204 			 "[CRTC:%d:%s] DSB %d buffer overflow\n",
205 			 crtc->base.base.id, crtc->base.name, dsb->id);
206 }
207 
208 static void intel_dsb_dump(struct intel_dsb *dsb)
209 {
210 	struct intel_crtc *crtc = dsb->crtc;
211 	struct intel_display *display = to_intel_display(crtc->base.dev);
212 	int i;
213 
214 	drm_dbg_kms(display->drm, "[CRTC:%d:%s] DSB %d commands {\n",
215 		    crtc->base.base.id, crtc->base.name, dsb->id);
216 	for (i = 0; i < ALIGN(dsb->free_pos, 64 / 4); i += 4)
217 		drm_dbg_kms(display->drm,
218 			    " 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i * 4,
219 			    intel_dsb_buffer_read(&dsb->dsb_buf, i),
220 			    intel_dsb_buffer_read(&dsb->dsb_buf, i + 1),
221 			    intel_dsb_buffer_read(&dsb->dsb_buf, i + 2),
222 			    intel_dsb_buffer_read(&dsb->dsb_buf, i + 3));
223 	drm_dbg_kms(display->drm, "}\n");
224 }
225 
226 static bool is_dsb_busy(struct intel_display *display, enum pipe pipe,
227 			enum intel_dsb_id dsb_id)
228 {
229 	return intel_de_read_fw(display, DSB_CTRL(pipe, dsb_id)) & DSB_STATUS_BUSY;
230 }
231 
232 static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
233 {
234 	if (!assert_dsb_has_room(dsb))
235 		return;
236 
237 	/* Every instruction should be 8 byte aligned. */
238 	dsb->free_pos = ALIGN(dsb->free_pos, 2);
239 
240 	dsb->ins_start_offset = dsb->free_pos;
241 	dsb->ins[0] = ldw;
242 	dsb->ins[1] = udw;
243 
244 	intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, dsb->ins[0]);
245 	intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, dsb->ins[1]);
246 }
247 
248 static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
249 					u32 opcode, i915_reg_t reg)
250 {
251 	u32 prev_opcode, prev_reg;
252 
253 	/*
254 	 * Nothing emitted yet? Must check before looking
255 	 * at the actual data since i915_gem_object_create_internal()
256 	 * does *not* give you zeroed memory!
257 	 */
258 	if (dsb->free_pos == 0)
259 		return false;
260 
261 	prev_opcode = dsb->ins[1] & ~DSB_REG_VALUE_MASK;
262 	prev_reg =  dsb->ins[1] & DSB_REG_VALUE_MASK;
263 
264 	return prev_opcode == opcode && prev_reg == i915_mmio_reg_offset(reg);
265 }
266 
267 static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_t reg)
268 {
269 	return intel_dsb_prev_ins_is_write(dsb,
270 					   DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT,
271 					   reg);
272 }
273 
274 /**
275  * intel_dsb_reg_write_indexed() - Emit indexed register write to the DSB context
276  * @dsb: DSB context
277  * @reg: register address.
278  * @val: value.
279  *
280  * This function is used for writing register-value pair in command
281  * buffer of DSB.
282  *
283  * Note that indexed writes are slower than normal MMIO writes
284  * for a small number (less than 5 or so) of writes to the same
285  * register.
286  */
287 void intel_dsb_reg_write_indexed(struct intel_dsb *dsb,
288 				 i915_reg_t reg, u32 val)
289 {
290 	/*
291 	 * For example the buffer will look like below for 3 dwords for auto
292 	 * increment register:
293 	 * +--------------------------------------------------------+
294 	 * | size = 3 | offset &| value1 | value2 | value3 | zero   |
295 	 * |          | opcode  |        |        |        |        |
296 	 * +--------------------------------------------------------+
297 	 * +          +         +        +        +        +        +
298 	 * 0          4         8        12       16       20       24
299 	 * Byte
300 	 *
301 	 * As every instruction is 8 byte aligned the index of dsb instruction
302 	 * will start always from even number while dealing with u32 array. If
303 	 * we are writing odd no of dwords, Zeros will be added in the end for
304 	 * padding.
305 	 */
306 	if (!intel_dsb_prev_ins_is_indexed_write(dsb, reg))
307 		intel_dsb_emit(dsb, 0, /* count */
308 			       (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) |
309 			       i915_mmio_reg_offset(reg));
310 
311 	if (!assert_dsb_has_room(dsb))
312 		return;
313 
314 	/* Update the count */
315 	dsb->ins[0]++;
316 	intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 0,
317 			       dsb->ins[0]);
318 
319 	intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, val);
320 	/* if number of data words is odd, then the last dword should be 0.*/
321 	if (dsb->free_pos & 0x1)
322 		intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos, 0);
323 }
324 
325 void intel_dsb_reg_write(struct intel_dsb *dsb,
326 			 i915_reg_t reg, u32 val)
327 {
328 	intel_dsb_emit(dsb, val,
329 		       (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
330 		       (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
331 		       i915_mmio_reg_offset(reg));
332 }
333 
334 static u32 intel_dsb_mask_to_byte_en(u32 mask)
335 {
336 	return (!!(mask & 0xff000000) << 3 |
337 		!!(mask & 0x00ff0000) << 2 |
338 		!!(mask & 0x0000ff00) << 1 |
339 		!!(mask & 0x000000ff) << 0);
340 }
341 
342 /* Note: mask implemented via byte enables! */
343 void intel_dsb_reg_write_masked(struct intel_dsb *dsb,
344 				i915_reg_t reg, u32 mask, u32 val)
345 {
346 	intel_dsb_emit(dsb, val,
347 		       (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
348 		       (intel_dsb_mask_to_byte_en(mask) << DSB_BYTE_EN_SHIFT) |
349 		       i915_mmio_reg_offset(reg));
350 }
351 
352 void intel_dsb_noop(struct intel_dsb *dsb, int count)
353 {
354 	int i;
355 
356 	for (i = 0; i < count; i++)
357 		intel_dsb_emit(dsb, 0,
358 			       DSB_OPCODE_NOOP << DSB_OPCODE_SHIFT);
359 }
360 
361 void intel_dsb_nonpost_start(struct intel_dsb *dsb)
362 {
363 	struct intel_crtc *crtc = dsb->crtc;
364 	enum pipe pipe = crtc->pipe;
365 
366 	intel_dsb_reg_write_masked(dsb, DSB_CTRL(pipe, dsb->id),
367 				   DSB_NON_POSTED, DSB_NON_POSTED);
368 	intel_dsb_noop(dsb, 4);
369 }
370 
371 void intel_dsb_nonpost_end(struct intel_dsb *dsb)
372 {
373 	struct intel_crtc *crtc = dsb->crtc;
374 	enum pipe pipe = crtc->pipe;
375 
376 	intel_dsb_reg_write_masked(dsb, DSB_CTRL(pipe, dsb->id),
377 				   DSB_NON_POSTED, 0);
378 	intel_dsb_noop(dsb, 4);
379 }
380 
381 void intel_dsb_interrupt(struct intel_dsb *dsb)
382 {
383 	intel_dsb_emit(dsb, 0,
384 		       DSB_OPCODE_INTERRUPT << DSB_OPCODE_SHIFT);
385 }
386 
387 void intel_dsb_wait_usec(struct intel_dsb *dsb, int count)
388 {
389 	/* +1 to make sure we never wait less time than asked for */
390 	intel_dsb_emit(dsb, count + 1,
391 		       DSB_OPCODE_WAIT_USEC << DSB_OPCODE_SHIFT);
392 }
393 
394 void intel_dsb_wait_vblanks(struct intel_dsb *dsb, int count)
395 {
396 	intel_dsb_emit(dsb, count,
397 		       DSB_OPCODE_WAIT_VBLANKS << DSB_OPCODE_SHIFT);
398 }
399 
400 static void intel_dsb_emit_wait_dsl(struct intel_dsb *dsb,
401 				    u32 opcode, int lower, int upper)
402 {
403 	u64 window = ((u64)upper << DSB_SCANLINE_UPPER_SHIFT) |
404 		((u64)lower << DSB_SCANLINE_LOWER_SHIFT);
405 
406 	intel_dsb_emit(dsb, lower_32_bits(window),
407 		       (opcode << DSB_OPCODE_SHIFT) |
408 		       upper_32_bits(window));
409 }
410 
411 static void intel_dsb_wait_dsl(struct intel_atomic_state *state,
412 			       struct intel_dsb *dsb,
413 			       int lower_in, int upper_in,
414 			       int lower_out, int upper_out)
415 {
416 	struct intel_crtc *crtc = dsb->crtc;
417 
418 	lower_in = dsb_scanline_to_hw(state, crtc, lower_in);
419 	upper_in = dsb_scanline_to_hw(state, crtc, upper_in);
420 
421 	lower_out = dsb_scanline_to_hw(state, crtc, lower_out);
422 	upper_out = dsb_scanline_to_hw(state, crtc, upper_out);
423 
424 	if (upper_in >= lower_in)
425 		intel_dsb_emit_wait_dsl(dsb, DSB_OPCODE_WAIT_DSL_IN,
426 					lower_in, upper_in);
427 	else if (upper_out >= lower_out)
428 		intel_dsb_emit_wait_dsl(dsb, DSB_OPCODE_WAIT_DSL_OUT,
429 					lower_out, upper_out);
430 	else
431 		drm_WARN_ON(crtc->base.dev, 1); /* assert_dsl_ok() should have caught it already */
432 }
433 
434 static void assert_dsl_ok(struct intel_atomic_state *state,
435 			  struct intel_dsb *dsb,
436 			  int start, int end)
437 {
438 	struct intel_crtc *crtc = dsb->crtc;
439 	int vtotal = dsb_vtotal(state, crtc);
440 
441 	/*
442 	 * Waiting for the entire frame doesn't make sense,
443 	 * (IN==don't wait, OUT=wait forever).
444 	 */
445 	drm_WARN(crtc->base.dev, (end - start + vtotal) % vtotal == vtotal - 1,
446 		 "[CRTC:%d:%s] DSB %d bad scanline window wait: %d-%d (vt=%d)\n",
447 		 crtc->base.base.id, crtc->base.name, dsb->id,
448 		 start, end, vtotal);
449 }
450 
451 void intel_dsb_wait_scanline_in(struct intel_atomic_state *state,
452 				struct intel_dsb *dsb,
453 				int start, int end)
454 {
455 	assert_dsl_ok(state, dsb, start, end);
456 
457 	intel_dsb_wait_dsl(state, dsb,
458 			   start, end,
459 			   end + 1, start - 1);
460 }
461 
462 void intel_dsb_wait_scanline_out(struct intel_atomic_state *state,
463 				 struct intel_dsb *dsb,
464 				 int start, int end)
465 {
466 	assert_dsl_ok(state, dsb, start, end);
467 
468 	intel_dsb_wait_dsl(state, dsb,
469 			   end + 1, start - 1,
470 			   start, end);
471 }
472 
473 void intel_dsb_poll(struct intel_dsb *dsb,
474 		    i915_reg_t reg, u32 mask, u32 val,
475 		    int wait_us, int count)
476 {
477 	struct intel_crtc *crtc = dsb->crtc;
478 	enum pipe pipe = crtc->pipe;
479 
480 	intel_dsb_reg_write(dsb, DSB_POLLMASK(pipe, dsb->id), mask);
481 	intel_dsb_reg_write(dsb, DSB_POLLFUNC(pipe, dsb->id),
482 			    DSB_POLL_ENABLE |
483 			    DSB_POLL_WAIT(wait_us) | DSB_POLL_COUNT(count));
484 
485 	intel_dsb_noop(dsb, 5);
486 
487 	intel_dsb_emit(dsb, val,
488 		       (DSB_OPCODE_POLL << DSB_OPCODE_SHIFT) |
489 		       i915_mmio_reg_offset(reg));
490 }
491 
492 static void intel_dsb_align_tail(struct intel_dsb *dsb)
493 {
494 	u32 aligned_tail, tail;
495 
496 	tail = dsb->free_pos * 4;
497 	aligned_tail = ALIGN(tail, CACHELINE_BYTES);
498 
499 	if (aligned_tail > tail)
500 		intel_dsb_buffer_memset(&dsb->dsb_buf, dsb->free_pos, 0,
501 					aligned_tail - tail);
502 
503 	dsb->free_pos = aligned_tail / 4;
504 }
505 
506 void intel_dsb_finish(struct intel_dsb *dsb)
507 {
508 	struct intel_crtc *crtc = dsb->crtc;
509 
510 	/*
511 	 * DSB_FORCE_DEWAKE remains active even after DSB is
512 	 * disabled, so make sure to clear it (if set during
513 	 * intel_dsb_commit()). And clear DSB_ENABLE_DEWAKE as
514 	 * well for good measure.
515 	 */
516 	intel_dsb_reg_write(dsb, DSB_PMCTRL(crtc->pipe, dsb->id), 0);
517 	intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(crtc->pipe, dsb->id),
518 				   DSB_FORCE_DEWAKE, 0);
519 
520 	intel_dsb_align_tail(dsb);
521 
522 	intel_dsb_buffer_flush_map(&dsb->dsb_buf);
523 }
524 
525 static u32 dsb_error_int_status(struct intel_display *display)
526 {
527 	u32 errors;
528 
529 	errors = DSB_GTT_FAULT_INT_STATUS |
530 		DSB_RSPTIMEOUT_INT_STATUS |
531 		DSB_POLL_ERR_INT_STATUS;
532 
533 	/*
534 	 * All the non-existing status bits operate as
535 	 * normal r/w bits, so any attempt to clear them
536 	 * will just end up setting them. Never do that so
537 	 * we won't mistake them for actual error interrupts.
538 	 */
539 	if (DISPLAY_VER(display) >= 14)
540 		errors |= DSB_ATS_FAULT_INT_STATUS;
541 
542 	return errors;
543 }
544 
545 static u32 dsb_error_int_en(struct intel_display *display)
546 {
547 	u32 errors;
548 
549 	errors = DSB_GTT_FAULT_INT_EN |
550 		DSB_RSPTIMEOUT_INT_EN |
551 		DSB_POLL_ERR_INT_EN;
552 
553 	if (DISPLAY_VER(display) >= 14)
554 		errors |= DSB_ATS_FAULT_INT_EN;
555 
556 	return errors;
557 }
558 
559 void intel_dsb_vblank_evade(struct intel_atomic_state *state,
560 			    struct intel_dsb *dsb)
561 {
562 	struct intel_crtc *crtc = dsb->crtc;
563 	const struct intel_crtc_state *crtc_state =
564 		intel_pre_commit_crtc_state(state, crtc);
565 	/* FIXME calibrate sensibly */
566 	int latency = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, 20);
567 	int start, end;
568 
569 	/*
570 	 * PIPEDSL is reading as 0 when in SRDENT(PSR1) or DEEP_SLEEP(PSR2). On
571 	 * wake-up scanline counting starts from vblank_start - 1. We don't know
572 	 * if wake-up is already ongoing when evasion starts. In worst case
573 	 * PIPEDSL could start reading valid value right after checking the
574 	 * scanline. In this scenario we wouldn't have enough time to write all
575 	 * registers. To tackle this evade scanline 0 as well. As a drawback we
576 	 * have 1 frame delay in flip when waking up.
577 	 */
578 	if (crtc_state->has_psr)
579 		intel_dsb_emit_wait_dsl(dsb, DSB_OPCODE_WAIT_DSL_OUT, 0, 0);
580 
581 	if (pre_commit_is_vrr_active(state, crtc)) {
582 		int vblank_delay = intel_vrr_vblank_delay(crtc_state);
583 
584 		end = intel_vrr_vmin_vblank_start(crtc_state);
585 		start = end - vblank_delay - latency;
586 		intel_dsb_wait_scanline_out(state, dsb, start, end);
587 
588 		end = intel_vrr_vmax_vblank_start(crtc_state);
589 		start = end - vblank_delay - latency;
590 		intel_dsb_wait_scanline_out(state, dsb, start, end);
591 	} else {
592 		int vblank_delay = intel_mode_vblank_delay(&crtc_state->hw.adjusted_mode);
593 
594 		end = intel_mode_vblank_start(&crtc_state->hw.adjusted_mode);
595 		start = end - vblank_delay - latency;
596 		intel_dsb_wait_scanline_out(state, dsb, start, end);
597 	}
598 }
599 
600 static void _intel_dsb_chain(struct intel_atomic_state *state,
601 			     struct intel_dsb *dsb,
602 			     struct intel_dsb *chained_dsb,
603 			     u32 ctrl)
604 {
605 	struct intel_display *display = to_intel_display(state->base.dev);
606 	struct intel_crtc *crtc = dsb->crtc;
607 	enum pipe pipe = crtc->pipe;
608 	u32 tail;
609 
610 	if (drm_WARN_ON(display->drm, dsb->id == chained_dsb->id))
611 		return;
612 
613 	tail = chained_dsb->free_pos * 4;
614 	if (drm_WARN_ON(display->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
615 		return;
616 
617 	intel_dsb_reg_write(dsb, DSB_CTRL(pipe, chained_dsb->id),
618 			    ctrl | DSB_ENABLE);
619 
620 	intel_dsb_reg_write(dsb, DSB_CHICKEN(pipe, chained_dsb->id),
621 			    dsb_chicken(state, crtc));
622 
623 	intel_dsb_reg_write(dsb, DSB_INTERRUPT(pipe, chained_dsb->id),
624 			    dsb_error_int_status(display) | DSB_PROG_INT_STATUS |
625 			    dsb_error_int_en(display) | DSB_PROG_INT_EN);
626 
627 	if (ctrl & DSB_WAIT_FOR_VBLANK) {
628 		int dewake_scanline = dsb_dewake_scanline_start(state, crtc);
629 		int hw_dewake_scanline = dsb_scanline_to_hw(state, crtc, dewake_scanline);
630 
631 		intel_dsb_reg_write(dsb, DSB_PMCTRL(pipe, chained_dsb->id),
632 				    DSB_ENABLE_DEWAKE |
633 				    DSB_SCANLINE_FOR_DEWAKE(hw_dewake_scanline));
634 	}
635 
636 	intel_dsb_reg_write(dsb, DSB_HEAD(pipe, chained_dsb->id),
637 			    intel_dsb_buffer_ggtt_offset(&chained_dsb->dsb_buf));
638 
639 	intel_dsb_reg_write(dsb, DSB_TAIL(pipe, chained_dsb->id),
640 			    intel_dsb_buffer_ggtt_offset(&chained_dsb->dsb_buf) + tail);
641 
642 	if (ctrl & DSB_WAIT_FOR_VBLANK) {
643 		/*
644 		 * Keep DEwake alive via the first DSB, in
645 		 * case we're already past dewake_scanline,
646 		 * and thus DSB_ENABLE_DEWAKE on the second
647 		 * DSB won't do its job.
648 		 */
649 		intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(pipe, dsb->id),
650 					   DSB_FORCE_DEWAKE, DSB_FORCE_DEWAKE);
651 
652 		intel_dsb_wait_scanline_out(state, dsb,
653 					    dsb_dewake_scanline_start(state, crtc),
654 					    dsb_dewake_scanline_end(state, crtc));
655 	}
656 }
657 
658 void intel_dsb_chain(struct intel_atomic_state *state,
659 		     struct intel_dsb *dsb,
660 		     struct intel_dsb *chained_dsb,
661 		     bool wait_for_vblank)
662 {
663 	_intel_dsb_chain(state, dsb, chained_dsb,
664 			 wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0);
665 }
666 
667 void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state,
668 				 struct intel_dsb *dsb)
669 {
670 	struct intel_crtc *crtc = dsb->crtc;
671 	const struct intel_crtc_state *crtc_state =
672 		intel_pre_commit_crtc_state(state, crtc);
673 	int usecs = intel_scanlines_to_usecs(&crtc_state->hw.adjusted_mode,
674 					     dsb_vblank_delay(state, crtc));
675 
676 	intel_dsb_wait_usec(dsb, usecs);
677 }
678 
679 static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
680 			      int hw_dewake_scanline)
681 {
682 	struct intel_crtc *crtc = dsb->crtc;
683 	struct intel_display *display = to_intel_display(crtc->base.dev);
684 	enum pipe pipe = crtc->pipe;
685 	u32 tail;
686 
687 	tail = dsb->free_pos * 4;
688 	if (drm_WARN_ON(display->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
689 		return;
690 
691 	if (is_dsb_busy(display, pipe, dsb->id)) {
692 		drm_err(display->drm, "[CRTC:%d:%s] DSB %d is busy\n",
693 			crtc->base.base.id, crtc->base.name, dsb->id);
694 		return;
695 	}
696 
697 	intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id),
698 			  ctrl | DSB_ENABLE);
699 
700 	intel_de_write_fw(display, DSB_CHICKEN(pipe, dsb->id),
701 			  dsb->chicken);
702 
703 	intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb->id),
704 			  dsb_error_int_status(display) | DSB_PROG_INT_STATUS |
705 			  dsb_error_int_en(display) | DSB_PROG_INT_EN);
706 
707 	intel_de_write_fw(display, DSB_HEAD(pipe, dsb->id),
708 			  intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
709 
710 	if (hw_dewake_scanline >= 0) {
711 		int diff, position;
712 
713 		intel_de_write_fw(display, DSB_PMCTRL(pipe, dsb->id),
714 				  DSB_ENABLE_DEWAKE |
715 				  DSB_SCANLINE_FOR_DEWAKE(hw_dewake_scanline));
716 
717 		/*
718 		 * Force DEwake immediately if we're already past
719 		 * or close to racing past the target scanline.
720 		 */
721 		position = intel_de_read_fw(display, PIPEDSL(display, pipe)) & PIPEDSL_LINE_MASK;
722 
723 		diff = hw_dewake_scanline - position;
724 		intel_de_write_fw(display, DSB_PMCTRL_2(pipe, dsb->id),
725 				  (diff >= 0 && diff < 5 ? DSB_FORCE_DEWAKE : 0) |
726 				  DSB_BLOCK_DEWAKE_EXTENSION);
727 	}
728 
729 	intel_de_write_fw(display, DSB_TAIL(pipe, dsb->id),
730 			  intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf) + tail);
731 }
732 
733 /**
734  * intel_dsb_commit() - Trigger workload execution of DSB.
735  * @dsb: DSB context
736  * @wait_for_vblank: wait for vblank before executing
737  *
738  * This function is used to do actual write to hardware using DSB.
739  */
740 void intel_dsb_commit(struct intel_dsb *dsb,
741 		      bool wait_for_vblank)
742 {
743 	_intel_dsb_commit(dsb,
744 			  wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0,
745 			  wait_for_vblank ? dsb->hw_dewake_scanline : -1);
746 }
747 
748 void intel_dsb_wait(struct intel_dsb *dsb)
749 {
750 	struct intel_crtc *crtc = dsb->crtc;
751 	struct intel_display *display = to_intel_display(crtc->base.dev);
752 	enum pipe pipe = crtc->pipe;
753 
754 	if (wait_for(!is_dsb_busy(display, pipe, dsb->id), 1)) {
755 		u32 offset = intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf);
756 
757 		intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id),
758 				  DSB_ENABLE | DSB_HALT);
759 
760 		drm_err(display->drm,
761 			"[CRTC:%d:%s] DSB %d timed out waiting for idle (current head=0x%x, head=0x%x, tail=0x%x)\n",
762 			crtc->base.base.id, crtc->base.name, dsb->id,
763 			intel_de_read_fw(display, DSB_CURRENT_HEAD(pipe, dsb->id)) - offset,
764 			intel_de_read_fw(display, DSB_HEAD(pipe, dsb->id)) - offset,
765 			intel_de_read_fw(display, DSB_TAIL(pipe, dsb->id)) - offset);
766 
767 		intel_dsb_dump(dsb);
768 	}
769 
770 	/* Attempt to reset it */
771 	dsb->free_pos = 0;
772 	dsb->ins_start_offset = 0;
773 	dsb->ins[0] = 0;
774 	dsb->ins[1] = 0;
775 
776 	intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id), 0);
777 
778 	intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb->id),
779 			  dsb_error_int_status(display) | DSB_PROG_INT_STATUS);
780 }
781 
782 /**
783  * intel_dsb_prepare() - Allocate, pin and map the DSB command buffer.
784  * @state: the atomic state
785  * @crtc: the CRTC
786  * @dsb_id: the DSB engine to use
787  * @max_cmds: number of commands we need to fit into command buffer
788  *
789  * This function prepare the command buffer which is used to store dsb
790  * instructions with data.
791  *
792  * Returns:
793  * DSB context, NULL on failure
794  */
795 struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
796 				    struct intel_crtc *crtc,
797 				    enum intel_dsb_id dsb_id,
798 				    unsigned int max_cmds)
799 {
800 	struct intel_display *display = to_intel_display(state);
801 	struct ref_tracker *wakeref;
802 	struct intel_dsb *dsb;
803 	unsigned int size;
804 
805 	if (!HAS_DSB(display))
806 		return NULL;
807 
808 	if (!display->params.enable_dsb)
809 		return NULL;
810 
811 	dsb = kzalloc(sizeof(*dsb), GFP_KERNEL);
812 	if (!dsb)
813 		goto out;
814 
815 	wakeref = intel_display_rpm_get(display);
816 
817 	/* ~1 qword per instruction, full cachelines */
818 	size = ALIGN(max_cmds * 8, CACHELINE_BYTES);
819 
820 	if (!intel_dsb_buffer_create(crtc, &dsb->dsb_buf, size))
821 		goto out_put_rpm;
822 
823 	intel_display_rpm_put(display, wakeref);
824 
825 	dsb->id = dsb_id;
826 	dsb->crtc = crtc;
827 	dsb->size = size / 4; /* in dwords */
828 
829 	dsb->chicken = dsb_chicken(state, crtc);
830 	dsb->hw_dewake_scanline =
831 		dsb_scanline_to_hw(state, crtc, dsb_dewake_scanline_start(state, crtc));
832 
833 	return dsb;
834 
835 out_put_rpm:
836 	intel_display_rpm_put(display, wakeref);
837 	kfree(dsb);
838 out:
839 	drm_info_once(display->drm,
840 		      "[CRTC:%d:%s] DSB %d queue setup failed, will fallback to MMIO for display HW programming\n",
841 		      crtc->base.base.id, crtc->base.name, dsb_id);
842 
843 	return NULL;
844 }
845 
846 /**
847  * intel_dsb_cleanup() - To cleanup DSB context.
848  * @dsb: DSB context
849  *
850  * This function cleanup the DSB context by unpinning and releasing
851  * the VMA object associated with it.
852  */
853 void intel_dsb_cleanup(struct intel_dsb *dsb)
854 {
855 	intel_dsb_buffer_cleanup(&dsb->dsb_buf);
856 	kfree(dsb);
857 }
858 
859 void intel_dsb_irq_handler(struct intel_display *display,
860 			   enum pipe pipe, enum intel_dsb_id dsb_id)
861 {
862 	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
863 	u32 tmp, errors;
864 
865 	tmp = intel_de_read_fw(display, DSB_INTERRUPT(pipe, dsb_id));
866 	intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb_id), tmp);
867 
868 	if (tmp & DSB_PROG_INT_STATUS) {
869 		spin_lock(&display->drm->event_lock);
870 
871 		if (crtc->dsb_event) {
872 			/*
873 			 * Update vblank counter/timestamp in case it
874 			 * hasn't been done yet for this frame.
875 			 */
876 			drm_crtc_accurate_vblank_count(&crtc->base);
877 
878 			drm_crtc_send_vblank_event(&crtc->base, crtc->dsb_event);
879 			crtc->dsb_event = NULL;
880 		}
881 
882 		spin_unlock(&display->drm->event_lock);
883 	}
884 
885 	errors = tmp & dsb_error_int_status(display);
886 	if (errors & DSB_ATS_FAULT_INT_STATUS)
887 		drm_err(display->drm, "[CRTC:%d:%s] DSB %d ATS fault\n",
888 			crtc->base.base.id, crtc->base.name, dsb_id);
889 	if (errors & DSB_GTT_FAULT_INT_STATUS)
890 		drm_err(display->drm, "[CRTC:%d:%s] DSB %d GTT fault\n",
891 			crtc->base.base.id, crtc->base.name, dsb_id);
892 	if (errors & DSB_RSPTIMEOUT_INT_STATUS)
893 		drm_err(display->drm, "[CRTC:%d:%s] DSB %d response timeout\n",
894 			crtc->base.base.id, crtc->base.name, dsb_id);
895 	if (errors & DSB_POLL_ERR_INT_STATUS)
896 		drm_err(display->drm, "[CRTC:%d:%s] DSB %d poll error\n",
897 			crtc->base.base.id, crtc->base.name, dsb_id);
898 }
899