xref: /linux/drivers/gpu/drm/i915/display/intel_dsb.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  *
5  */
6 
7 #include "i915_drv.h"
8 #include "i915_irq.h"
9 #include "i915_reg.h"
10 #include "intel_crtc.h"
11 #include "intel_de.h"
12 #include "intel_display_types.h"
13 #include "intel_dsb.h"
14 #include "intel_dsb_buffer.h"
15 #include "intel_dsb_regs.h"
16 #include "intel_vblank.h"
17 #include "intel_vrr.h"
18 #include "skl_watermark.h"
19 
20 #define CACHELINE_BYTES 64
21 
22 struct intel_dsb {
23 	enum intel_dsb_id id;
24 
25 	struct intel_dsb_buffer dsb_buf;
26 	struct intel_crtc *crtc;
27 
28 	/*
29 	 * maximum number of dwords the buffer will hold.
30 	 */
31 	unsigned int size;
32 
33 	/*
34 	 * free_pos will point the first free dword and
35 	 * help in calculating tail of command buffer.
36 	 */
37 	unsigned int free_pos;
38 
39 	/*
40 	 * ins_start_offset will help to store start dword of the dsb
41 	 * instuction and help in identifying the batch of auto-increment
42 	 * register.
43 	 */
44 	unsigned int ins_start_offset;
45 
46 	u32 chicken;
47 	int hw_dewake_scanline;
48 };
49 
50 /**
51  * DOC: DSB
52  *
53  * A DSB (Display State Buffer) is a queue of MMIO instructions in the memory
54  * which can be offloaded to DSB HW in Display Controller. DSB HW is a DMA
55  * engine that can be programmed to download the DSB from memory.
56  * It allows driver to batch submit display HW programming. This helps to
57  * reduce loading time and CPU activity, thereby making the context switch
58  * faster. DSB Support added from Gen12 Intel graphics based platform.
59  *
60  * DSB's can access only the pipe, plane, and transcoder Data Island Packet
61  * registers.
62  *
63  * DSB HW can support only register writes (both indexed and direct MMIO
64  * writes). There are no registers reads possible with DSB HW engine.
65  */
66 
67 /* DSB opcodes. */
68 #define DSB_OPCODE_SHIFT		24
69 #define DSB_OPCODE_NOOP			0x0
70 #define DSB_OPCODE_MMIO_WRITE		0x1
71 #define   DSB_BYTE_EN			0xf
72 #define   DSB_BYTE_EN_SHIFT		20
73 #define   DSB_REG_VALUE_MASK		0xfffff
74 #define DSB_OPCODE_WAIT_USEC		0x2
75 #define DSB_OPCODE_WAIT_SCANLINE	0x3
76 #define DSB_OPCODE_WAIT_VBLANKS		0x4
77 #define DSB_OPCODE_WAIT_DSL_IN		0x5
78 #define DSB_OPCODE_WAIT_DSL_OUT		0x6
79 #define   DSB_SCANLINE_UPPER_SHIFT	20
80 #define   DSB_SCANLINE_LOWER_SHIFT	0
81 #define DSB_OPCODE_INTERRUPT		0x7
82 #define DSB_OPCODE_INDEXED_WRITE	0x9
83 /* see DSB_REG_VALUE_MASK */
84 #define DSB_OPCODE_POLL			0xA
85 /* see DSB_REG_VALUE_MASK */
86 
87 static bool pre_commit_is_vrr_active(struct intel_atomic_state *state,
88 				     struct intel_crtc *crtc)
89 {
90 	const struct intel_crtc_state *old_crtc_state =
91 		intel_atomic_get_old_crtc_state(state, crtc);
92 	const struct intel_crtc_state *new_crtc_state =
93 		intel_atomic_get_new_crtc_state(state, crtc);
94 
95 	/* VRR will be enabled afterwards, if necessary */
96 	if (intel_crtc_needs_modeset(new_crtc_state))
97 		return false;
98 
99 	/* VRR will have been disabled during intel_pre_plane_update() */
100 	return old_crtc_state->vrr.enable && !intel_crtc_vrr_disabling(state, crtc);
101 }
102 
103 static const struct intel_crtc_state *
104 pre_commit_crtc_state(struct intel_atomic_state *state,
105 		      struct intel_crtc *crtc)
106 {
107 	const struct intel_crtc_state *old_crtc_state =
108 		intel_atomic_get_old_crtc_state(state, crtc);
109 	const struct intel_crtc_state *new_crtc_state =
110 		intel_atomic_get_new_crtc_state(state, crtc);
111 
112 	/*
113 	 * During fastsets/etc. the transcoder is still
114 	 * running with the old timings at this point.
115 	 */
116 	if (intel_crtc_needs_modeset(new_crtc_state))
117 		return new_crtc_state;
118 	else
119 		return old_crtc_state;
120 }
121 
122 static int dsb_vtotal(struct intel_atomic_state *state,
123 		      struct intel_crtc *crtc)
124 {
125 	const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
126 
127 	if (pre_commit_is_vrr_active(state, crtc))
128 		return crtc_state->vrr.vmax;
129 	else
130 		return intel_mode_vtotal(&crtc_state->hw.adjusted_mode);
131 }
132 
133 static int dsb_dewake_scanline_start(struct intel_atomic_state *state,
134 				     struct intel_crtc *crtc)
135 {
136 	const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
137 	struct drm_i915_private *i915 = to_i915(state->base.dev);
138 	unsigned int latency = skl_watermark_max_latency(i915, 0);
139 
140 	return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode) -
141 		intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, latency);
142 }
143 
144 static int dsb_dewake_scanline_end(struct intel_atomic_state *state,
145 				   struct intel_crtc *crtc)
146 {
147 	const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
148 
149 	return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode);
150 }
151 
152 static int dsb_scanline_to_hw(struct intel_atomic_state *state,
153 			      struct intel_crtc *crtc, int scanline)
154 {
155 	const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
156 	int vtotal = dsb_vtotal(state, crtc);
157 
158 	return (scanline + vtotal - intel_crtc_scanline_offset(crtc_state)) % vtotal;
159 }
160 
161 static u32 dsb_chicken(struct intel_atomic_state *state,
162 		       struct intel_crtc *crtc)
163 {
164 	if (pre_commit_is_vrr_active(state, crtc))
165 		return DSB_SKIP_WAITS_EN |
166 			DSB_CTRL_WAIT_SAFE_WINDOW |
167 			DSB_CTRL_NO_WAIT_VBLANK |
168 			DSB_INST_WAIT_SAFE_WINDOW |
169 			DSB_INST_NO_WAIT_VBLANK;
170 	else
171 		return DSB_SKIP_WAITS_EN;
172 }
173 
174 static bool assert_dsb_has_room(struct intel_dsb *dsb)
175 {
176 	struct intel_crtc *crtc = dsb->crtc;
177 	struct intel_display *display = to_intel_display(crtc->base.dev);
178 
179 	/* each instruction is 2 dwords */
180 	return !drm_WARN(display->drm, dsb->free_pos > dsb->size - 2,
181 			 "[CRTC:%d:%s] DSB %d buffer overflow\n",
182 			 crtc->base.base.id, crtc->base.name, dsb->id);
183 }
184 
185 static void intel_dsb_dump(struct intel_dsb *dsb)
186 {
187 	struct intel_crtc *crtc = dsb->crtc;
188 	struct intel_display *display = to_intel_display(crtc->base.dev);
189 	int i;
190 
191 	drm_dbg_kms(display->drm, "[CRTC:%d:%s] DSB %d commands {\n",
192 		    crtc->base.base.id, crtc->base.name, dsb->id);
193 	for (i = 0; i < ALIGN(dsb->free_pos, 64 / 4); i += 4)
194 		drm_dbg_kms(display->drm,
195 			    " 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i * 4,
196 			    intel_dsb_buffer_read(&dsb->dsb_buf, i),
197 			    intel_dsb_buffer_read(&dsb->dsb_buf, i + 1),
198 			    intel_dsb_buffer_read(&dsb->dsb_buf, i + 2),
199 			    intel_dsb_buffer_read(&dsb->dsb_buf, i + 3));
200 	drm_dbg_kms(display->drm, "}\n");
201 }
202 
203 static bool is_dsb_busy(struct intel_display *display, enum pipe pipe,
204 			enum intel_dsb_id dsb_id)
205 {
206 	return intel_de_read_fw(display, DSB_CTRL(pipe, dsb_id)) & DSB_STATUS_BUSY;
207 }
208 
209 static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
210 {
211 	if (!assert_dsb_has_room(dsb))
212 		return;
213 
214 	/* Every instruction should be 8 byte aligned. */
215 	dsb->free_pos = ALIGN(dsb->free_pos, 2);
216 
217 	dsb->ins_start_offset = dsb->free_pos;
218 
219 	intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, ldw);
220 	intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, udw);
221 }
222 
223 static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
224 					u32 opcode, i915_reg_t reg)
225 {
226 	u32 prev_opcode, prev_reg;
227 
228 	/*
229 	 * Nothing emitted yet? Must check before looking
230 	 * at the actual data since i915_gem_object_create_internal()
231 	 * does *not* give you zeroed memory!
232 	 */
233 	if (dsb->free_pos == 0)
234 		return false;
235 
236 	prev_opcode = intel_dsb_buffer_read(&dsb->dsb_buf,
237 					    dsb->ins_start_offset + 1) & ~DSB_REG_VALUE_MASK;
238 	prev_reg =  intel_dsb_buffer_read(&dsb->dsb_buf,
239 					  dsb->ins_start_offset + 1) & DSB_REG_VALUE_MASK;
240 
241 	return prev_opcode == opcode && prev_reg == i915_mmio_reg_offset(reg);
242 }
243 
244 static bool intel_dsb_prev_ins_is_mmio_write(struct intel_dsb *dsb, i915_reg_t reg)
245 {
246 	/* only full byte-enables can be converted to indexed writes */
247 	return intel_dsb_prev_ins_is_write(dsb,
248 					   DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT |
249 					   DSB_BYTE_EN << DSB_BYTE_EN_SHIFT,
250 					   reg);
251 }
252 
253 static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_t reg)
254 {
255 	return intel_dsb_prev_ins_is_write(dsb,
256 					   DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT,
257 					   reg);
258 }
259 
260 /**
261  * intel_dsb_reg_write() - Emit register wriite to the DSB context
262  * @dsb: DSB context
263  * @reg: register address.
264  * @val: value.
265  *
266  * This function is used for writing register-value pair in command
267  * buffer of DSB.
268  */
269 void intel_dsb_reg_write(struct intel_dsb *dsb,
270 			 i915_reg_t reg, u32 val)
271 {
272 	u32 old_val;
273 
274 	/*
275 	 * For example the buffer will look like below for 3 dwords for auto
276 	 * increment register:
277 	 * +--------------------------------------------------------+
278 	 * | size = 3 | offset &| value1 | value2 | value3 | zero   |
279 	 * |          | opcode  |        |        |        |        |
280 	 * +--------------------------------------------------------+
281 	 * +          +         +        +        +        +        +
282 	 * 0          4         8        12       16       20       24
283 	 * Byte
284 	 *
285 	 * As every instruction is 8 byte aligned the index of dsb instruction
286 	 * will start always from even number while dealing with u32 array. If
287 	 * we are writing odd no of dwords, Zeros will be added in the end for
288 	 * padding.
289 	 */
290 	if (!intel_dsb_prev_ins_is_mmio_write(dsb, reg) &&
291 	    !intel_dsb_prev_ins_is_indexed_write(dsb, reg)) {
292 		intel_dsb_emit(dsb, val,
293 			       (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
294 			       (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
295 			       i915_mmio_reg_offset(reg));
296 	} else {
297 		if (!assert_dsb_has_room(dsb))
298 			return;
299 
300 		/* convert to indexed write? */
301 		if (intel_dsb_prev_ins_is_mmio_write(dsb, reg)) {
302 			u32 prev_val = intel_dsb_buffer_read(&dsb->dsb_buf,
303 							     dsb->ins_start_offset + 0);
304 
305 			intel_dsb_buffer_write(&dsb->dsb_buf,
306 					       dsb->ins_start_offset + 0, 1); /* count */
307 			intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 1,
308 					       (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) |
309 					       i915_mmio_reg_offset(reg));
310 			intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 2, prev_val);
311 
312 			dsb->free_pos++;
313 		}
314 
315 		intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, val);
316 		/* Update the count */
317 		old_val = intel_dsb_buffer_read(&dsb->dsb_buf, dsb->ins_start_offset);
318 		intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset, old_val + 1);
319 
320 		/* if number of data words is odd, then the last dword should be 0.*/
321 		if (dsb->free_pos & 0x1)
322 			intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos, 0);
323 	}
324 }
325 
326 static u32 intel_dsb_mask_to_byte_en(u32 mask)
327 {
328 	return (!!(mask & 0xff000000) << 3 |
329 		!!(mask & 0x00ff0000) << 2 |
330 		!!(mask & 0x0000ff00) << 1 |
331 		!!(mask & 0x000000ff) << 0);
332 }
333 
334 /* Note: mask implemented via byte enables! */
335 void intel_dsb_reg_write_masked(struct intel_dsb *dsb,
336 				i915_reg_t reg, u32 mask, u32 val)
337 {
338 	intel_dsb_emit(dsb, val,
339 		       (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
340 		       (intel_dsb_mask_to_byte_en(mask) << DSB_BYTE_EN_SHIFT) |
341 		       i915_mmio_reg_offset(reg));
342 }
343 
344 void intel_dsb_noop(struct intel_dsb *dsb, int count)
345 {
346 	int i;
347 
348 	for (i = 0; i < count; i++)
349 		intel_dsb_emit(dsb, 0,
350 			       DSB_OPCODE_NOOP << DSB_OPCODE_SHIFT);
351 }
352 
353 void intel_dsb_nonpost_start(struct intel_dsb *dsb)
354 {
355 	struct intel_crtc *crtc = dsb->crtc;
356 	enum pipe pipe = crtc->pipe;
357 
358 	intel_dsb_reg_write_masked(dsb, DSB_CTRL(pipe, dsb->id),
359 				   DSB_NON_POSTED, DSB_NON_POSTED);
360 	intel_dsb_noop(dsb, 4);
361 }
362 
363 void intel_dsb_nonpost_end(struct intel_dsb *dsb)
364 {
365 	struct intel_crtc *crtc = dsb->crtc;
366 	enum pipe pipe = crtc->pipe;
367 
368 	intel_dsb_reg_write_masked(dsb, DSB_CTRL(pipe, dsb->id),
369 				   DSB_NON_POSTED, 0);
370 	intel_dsb_noop(dsb, 4);
371 }
372 
373 static void intel_dsb_emit_wait_dsl(struct intel_dsb *dsb,
374 				    u32 opcode, int lower, int upper)
375 {
376 	u64 window = ((u64)upper << DSB_SCANLINE_UPPER_SHIFT) |
377 		((u64)lower << DSB_SCANLINE_LOWER_SHIFT);
378 
379 	intel_dsb_emit(dsb, lower_32_bits(window),
380 		       (opcode << DSB_OPCODE_SHIFT) |
381 		       upper_32_bits(window));
382 }
383 
384 static void intel_dsb_wait_dsl(struct intel_atomic_state *state,
385 			       struct intel_dsb *dsb,
386 			       int lower_in, int upper_in,
387 			       int lower_out, int upper_out)
388 {
389 	struct intel_crtc *crtc = dsb->crtc;
390 
391 	lower_in = dsb_scanline_to_hw(state, crtc, lower_in);
392 	upper_in = dsb_scanline_to_hw(state, crtc, upper_in);
393 
394 	lower_out = dsb_scanline_to_hw(state, crtc, lower_out);
395 	upper_out = dsb_scanline_to_hw(state, crtc, upper_out);
396 
397 	if (upper_in >= lower_in)
398 		intel_dsb_emit_wait_dsl(dsb, DSB_OPCODE_WAIT_DSL_IN,
399 					lower_in, upper_in);
400 	else if (upper_out >= lower_out)
401 		intel_dsb_emit_wait_dsl(dsb, DSB_OPCODE_WAIT_DSL_OUT,
402 					lower_out, upper_out);
403 	else
404 		drm_WARN_ON(crtc->base.dev, 1); /* assert_dsl_ok() should have caught it already */
405 }
406 
407 static void assert_dsl_ok(struct intel_atomic_state *state,
408 			  struct intel_dsb *dsb,
409 			  int start, int end)
410 {
411 	struct intel_crtc *crtc = dsb->crtc;
412 	int vtotal = dsb_vtotal(state, crtc);
413 
414 	/*
415 	 * Waiting for the entire frame doesn't make sense,
416 	 * (IN==don't wait, OUT=wait forever).
417 	 */
418 	drm_WARN(crtc->base.dev, (end - start + vtotal) % vtotal == vtotal - 1,
419 		 "[CRTC:%d:%s] DSB %d bad scanline window wait: %d-%d (vt=%d)\n",
420 		 crtc->base.base.id, crtc->base.name, dsb->id,
421 		 start, end, vtotal);
422 }
423 
424 void intel_dsb_wait_scanline_in(struct intel_atomic_state *state,
425 				struct intel_dsb *dsb,
426 				int start, int end)
427 {
428 	assert_dsl_ok(state, dsb, start, end);
429 
430 	intel_dsb_wait_dsl(state, dsb,
431 			   start, end,
432 			   end + 1, start - 1);
433 }
434 
435 void intel_dsb_wait_scanline_out(struct intel_atomic_state *state,
436 				 struct intel_dsb *dsb,
437 				 int start, int end)
438 {
439 	assert_dsl_ok(state, dsb, start, end);
440 
441 	intel_dsb_wait_dsl(state, dsb,
442 			   end + 1, start - 1,
443 			   start, end);
444 }
445 
446 static void intel_dsb_align_tail(struct intel_dsb *dsb)
447 {
448 	u32 aligned_tail, tail;
449 
450 	tail = dsb->free_pos * 4;
451 	aligned_tail = ALIGN(tail, CACHELINE_BYTES);
452 
453 	if (aligned_tail > tail)
454 		intel_dsb_buffer_memset(&dsb->dsb_buf, dsb->free_pos, 0,
455 					aligned_tail - tail);
456 
457 	dsb->free_pos = aligned_tail / 4;
458 }
459 
460 void intel_dsb_finish(struct intel_dsb *dsb)
461 {
462 	struct intel_crtc *crtc = dsb->crtc;
463 
464 	/*
465 	 * DSB_FORCE_DEWAKE remains active even after DSB is
466 	 * disabled, so make sure to clear it (if set during
467 	 * intel_dsb_commit()). And clear DSB_ENABLE_DEWAKE as
468 	 * well for good measure.
469 	 */
470 	intel_dsb_reg_write(dsb, DSB_PMCTRL(crtc->pipe, dsb->id), 0);
471 	intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(crtc->pipe, dsb->id),
472 				   DSB_FORCE_DEWAKE, 0);
473 
474 	intel_dsb_align_tail(dsb);
475 
476 	intel_dsb_buffer_flush_map(&dsb->dsb_buf);
477 }
478 
479 static u32 dsb_error_int_status(struct intel_display *display)
480 {
481 	u32 errors;
482 
483 	errors = DSB_GTT_FAULT_INT_STATUS |
484 		DSB_RSPTIMEOUT_INT_STATUS |
485 		DSB_POLL_ERR_INT_STATUS;
486 
487 	/*
488 	 * All the non-existing status bits operate as
489 	 * normal r/w bits, so any attempt to clear them
490 	 * will just end up setting them. Never do that so
491 	 * we won't mistake them for actual error interrupts.
492 	 */
493 	if (DISPLAY_VER(display) >= 14)
494 		errors |= DSB_ATS_FAULT_INT_STATUS;
495 
496 	return errors;
497 }
498 
499 static u32 dsb_error_int_en(struct intel_display *display)
500 {
501 	u32 errors;
502 
503 	errors = DSB_GTT_FAULT_INT_EN |
504 		DSB_RSPTIMEOUT_INT_EN |
505 		DSB_POLL_ERR_INT_EN;
506 
507 	if (DISPLAY_VER(display) >= 14)
508 		errors |= DSB_ATS_FAULT_INT_EN;
509 
510 	return errors;
511 }
512 
513 static void _intel_dsb_chain(struct intel_atomic_state *state,
514 			     struct intel_dsb *dsb,
515 			     struct intel_dsb *chained_dsb,
516 			     u32 ctrl)
517 {
518 	struct intel_display *display = to_intel_display(state->base.dev);
519 	struct intel_crtc *crtc = dsb->crtc;
520 	enum pipe pipe = crtc->pipe;
521 	u32 tail;
522 
523 	if (drm_WARN_ON(display->drm, dsb->id == chained_dsb->id))
524 		return;
525 
526 	tail = chained_dsb->free_pos * 4;
527 	if (drm_WARN_ON(display->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
528 		return;
529 
530 	intel_dsb_reg_write(dsb, DSB_CTRL(pipe, chained_dsb->id),
531 			    ctrl | DSB_ENABLE);
532 
533 	intel_dsb_reg_write(dsb, DSB_CHICKEN(pipe, chained_dsb->id),
534 			    dsb_chicken(state, crtc));
535 
536 	intel_dsb_reg_write(dsb, DSB_INTERRUPT(pipe, chained_dsb->id),
537 			    dsb_error_int_status(display) | DSB_PROG_INT_STATUS |
538 			    dsb_error_int_en(display));
539 
540 	if (ctrl & DSB_WAIT_FOR_VBLANK) {
541 		int dewake_scanline = dsb_dewake_scanline_start(state, crtc);
542 		int hw_dewake_scanline = dsb_scanline_to_hw(state, crtc, dewake_scanline);
543 
544 		intel_dsb_reg_write(dsb, DSB_PMCTRL(pipe, chained_dsb->id),
545 				    DSB_ENABLE_DEWAKE |
546 				    DSB_SCANLINE_FOR_DEWAKE(hw_dewake_scanline));
547 	}
548 
549 	intel_dsb_reg_write(dsb, DSB_HEAD(pipe, chained_dsb->id),
550 			    intel_dsb_buffer_ggtt_offset(&chained_dsb->dsb_buf));
551 
552 	intel_dsb_reg_write(dsb, DSB_TAIL(pipe, chained_dsb->id),
553 			    intel_dsb_buffer_ggtt_offset(&chained_dsb->dsb_buf) + tail);
554 
555 	if (ctrl & DSB_WAIT_FOR_VBLANK) {
556 		/*
557 		 * Keep DEwake alive via the first DSB, in
558 		 * case we're already past dewake_scanline,
559 		 * and thus DSB_ENABLE_DEWAKE on the second
560 		 * DSB won't do its job.
561 		 */
562 		intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(pipe, dsb->id),
563 					   DSB_FORCE_DEWAKE, DSB_FORCE_DEWAKE);
564 
565 		intel_dsb_wait_scanline_out(state, dsb,
566 					    dsb_dewake_scanline_start(state, crtc),
567 					    dsb_dewake_scanline_end(state, crtc));
568 	}
569 }
570 
571 void intel_dsb_chain(struct intel_atomic_state *state,
572 		     struct intel_dsb *dsb,
573 		     struct intel_dsb *chained_dsb,
574 		     bool wait_for_vblank)
575 {
576 	_intel_dsb_chain(state, dsb, chained_dsb,
577 			 wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0);
578 }
579 
580 static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
581 			      int hw_dewake_scanline)
582 {
583 	struct intel_crtc *crtc = dsb->crtc;
584 	struct intel_display *display = to_intel_display(crtc->base.dev);
585 	enum pipe pipe = crtc->pipe;
586 	u32 tail;
587 
588 	tail = dsb->free_pos * 4;
589 	if (drm_WARN_ON(display->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
590 		return;
591 
592 	if (is_dsb_busy(display, pipe, dsb->id)) {
593 		drm_err(display->drm, "[CRTC:%d:%s] DSB %d is busy\n",
594 			crtc->base.base.id, crtc->base.name, dsb->id);
595 		return;
596 	}
597 
598 	intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id),
599 			  ctrl | DSB_ENABLE);
600 
601 	intel_de_write_fw(display, DSB_CHICKEN(pipe, dsb->id),
602 			  dsb->chicken);
603 
604 	intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb->id),
605 			  dsb_error_int_status(display) | DSB_PROG_INT_STATUS |
606 			  dsb_error_int_en(display));
607 
608 	intel_de_write_fw(display, DSB_HEAD(pipe, dsb->id),
609 			  intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
610 
611 	if (hw_dewake_scanline >= 0) {
612 		int diff, position;
613 
614 		intel_de_write_fw(display, DSB_PMCTRL(pipe, dsb->id),
615 				  DSB_ENABLE_DEWAKE |
616 				  DSB_SCANLINE_FOR_DEWAKE(hw_dewake_scanline));
617 
618 		/*
619 		 * Force DEwake immediately if we're already past
620 		 * or close to racing past the target scanline.
621 		 */
622 		position = intel_de_read_fw(display, PIPEDSL(display, pipe)) & PIPEDSL_LINE_MASK;
623 
624 		diff = hw_dewake_scanline - position;
625 		intel_de_write_fw(display, DSB_PMCTRL_2(pipe, dsb->id),
626 				  (diff >= 0 && diff < 5 ? DSB_FORCE_DEWAKE : 0) |
627 				  DSB_BLOCK_DEWAKE_EXTENSION);
628 	}
629 
630 	intel_de_write_fw(display, DSB_TAIL(pipe, dsb->id),
631 			  intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf) + tail);
632 }
633 
634 /**
635  * intel_dsb_commit() - Trigger workload execution of DSB.
636  * @dsb: DSB context
637  * @wait_for_vblank: wait for vblank before executing
638  *
639  * This function is used to do actual write to hardware using DSB.
640  */
641 void intel_dsb_commit(struct intel_dsb *dsb,
642 		      bool wait_for_vblank)
643 {
644 	_intel_dsb_commit(dsb,
645 			  wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0,
646 			  wait_for_vblank ? dsb->hw_dewake_scanline : -1);
647 }
648 
649 void intel_dsb_wait(struct intel_dsb *dsb)
650 {
651 	struct intel_crtc *crtc = dsb->crtc;
652 	struct intel_display *display = to_intel_display(crtc->base.dev);
653 	enum pipe pipe = crtc->pipe;
654 
655 	if (wait_for(!is_dsb_busy(display, pipe, dsb->id), 1)) {
656 		u32 offset = intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf);
657 
658 		intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id),
659 				  DSB_ENABLE | DSB_HALT);
660 
661 		drm_err(display->drm,
662 			"[CRTC:%d:%s] DSB %d timed out waiting for idle (current head=0x%x, head=0x%x, tail=0x%x)\n",
663 			crtc->base.base.id, crtc->base.name, dsb->id,
664 			intel_de_read_fw(display, DSB_CURRENT_HEAD(pipe, dsb->id)) - offset,
665 			intel_de_read_fw(display, DSB_HEAD(pipe, dsb->id)) - offset,
666 			intel_de_read_fw(display, DSB_TAIL(pipe, dsb->id)) - offset);
667 
668 		intel_dsb_dump(dsb);
669 	}
670 
671 	/* Attempt to reset it */
672 	dsb->free_pos = 0;
673 	dsb->ins_start_offset = 0;
674 	intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id), 0);
675 
676 	intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb->id),
677 			  dsb_error_int_status(display) | DSB_PROG_INT_STATUS);
678 }
679 
680 /**
681  * intel_dsb_prepare() - Allocate, pin and map the DSB command buffer.
682  * @state: the atomic state
683  * @crtc: the CRTC
684  * @dsb_id: the DSB engine to use
685  * @max_cmds: number of commands we need to fit into command buffer
686  *
687  * This function prepare the command buffer which is used to store dsb
688  * instructions with data.
689  *
690  * Returns:
691  * DSB context, NULL on failure
692  */
693 struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
694 				    struct intel_crtc *crtc,
695 				    enum intel_dsb_id dsb_id,
696 				    unsigned int max_cmds)
697 {
698 	struct drm_i915_private *i915 = to_i915(state->base.dev);
699 	intel_wakeref_t wakeref;
700 	struct intel_dsb *dsb;
701 	unsigned int size;
702 
703 	if (!HAS_DSB(i915))
704 		return NULL;
705 
706 	if (!i915->display.params.enable_dsb)
707 		return NULL;
708 
709 	/* TODO: DSB is broken in Xe KMD, so disabling it until fixed */
710 	if (!IS_ENABLED(I915))
711 		return NULL;
712 
713 	dsb = kzalloc(sizeof(*dsb), GFP_KERNEL);
714 	if (!dsb)
715 		goto out;
716 
717 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
718 
719 	/* ~1 qword per instruction, full cachelines */
720 	size = ALIGN(max_cmds * 8, CACHELINE_BYTES);
721 
722 	if (!intel_dsb_buffer_create(crtc, &dsb->dsb_buf, size))
723 		goto out_put_rpm;
724 
725 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
726 
727 	dsb->id = dsb_id;
728 	dsb->crtc = crtc;
729 	dsb->size = size / 4; /* in dwords */
730 	dsb->free_pos = 0;
731 	dsb->ins_start_offset = 0;
732 
733 	dsb->chicken = dsb_chicken(state, crtc);
734 	dsb->hw_dewake_scanline =
735 		dsb_scanline_to_hw(state, crtc, dsb_dewake_scanline_start(state, crtc));
736 
737 	return dsb;
738 
739 out_put_rpm:
740 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
741 	kfree(dsb);
742 out:
743 	drm_info_once(&i915->drm,
744 		      "[CRTC:%d:%s] DSB %d queue setup failed, will fallback to MMIO for display HW programming\n",
745 		      crtc->base.base.id, crtc->base.name, dsb_id);
746 
747 	return NULL;
748 }
749 
750 /**
751  * intel_dsb_cleanup() - To cleanup DSB context.
752  * @dsb: DSB context
753  *
754  * This function cleanup the DSB context by unpinning and releasing
755  * the VMA object associated with it.
756  */
757 void intel_dsb_cleanup(struct intel_dsb *dsb)
758 {
759 	intel_dsb_buffer_cleanup(&dsb->dsb_buf);
760 	kfree(dsb);
761 }
762 
763 void intel_dsb_irq_handler(struct intel_display *display,
764 			   enum pipe pipe, enum intel_dsb_id dsb_id)
765 {
766 	struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(display->drm), pipe);
767 	u32 tmp, errors;
768 
769 	tmp = intel_de_read_fw(display, DSB_INTERRUPT(pipe, dsb_id));
770 	intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb_id), tmp);
771 
772 	errors = tmp & dsb_error_int_status(display);
773 	if (errors)
774 		drm_err(display->drm, "[CRTC:%d:%s] DSB %d error interrupt: 0x%x\n",
775 			crtc->base.base.id, crtc->base.name, dsb_id, errors);
776 }
777