xref: /linux/drivers/gpu/drm/i915/display/intel_dsb.c (revision 06103dccbbd29408255a409f6f98f7f02387dc93)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  *
5  */
6 
7 #include <drm/drm_vblank.h>
8 
9 #include "i915_drv.h"
10 #include "i915_irq.h"
11 #include "i915_reg.h"
12 #include "intel_crtc.h"
13 #include "intel_de.h"
14 #include "intel_display_types.h"
15 #include "intel_dsb.h"
16 #include "intel_dsb_buffer.h"
17 #include "intel_dsb_regs.h"
18 #include "intel_vblank.h"
19 #include "intel_vrr.h"
20 #include "skl_watermark.h"
21 
22 #define CACHELINE_BYTES 64
23 
24 struct intel_dsb {
25 	enum intel_dsb_id id;
26 
27 	struct intel_dsb_buffer dsb_buf;
28 	struct intel_crtc *crtc;
29 
30 	/*
31 	 * maximum number of dwords the buffer will hold.
32 	 */
33 	unsigned int size;
34 
35 	/*
36 	 * free_pos will point the first free dword and
37 	 * help in calculating tail of command buffer.
38 	 */
39 	unsigned int free_pos;
40 
41 	/*
42 	 * Previously emitted DSB instruction. Used to
43 	 * identify/adjust the instruction for indexed
44 	 * register writes.
45 	 */
46 	u32 ins[2];
47 
48 	/*
49 	 * Start of the previously emitted DSB instruction.
50 	 * Used to adjust the instruction for indexed
51 	 * register writes.
52 	 */
53 	unsigned int ins_start_offset;
54 
55 	u32 chicken;
56 	int hw_dewake_scanline;
57 };
58 
59 /**
60  * DOC: DSB
61  *
62  * A DSB (Display State Buffer) is a queue of MMIO instructions in the memory
63  * which can be offloaded to DSB HW in Display Controller. DSB HW is a DMA
64  * engine that can be programmed to download the DSB from memory.
65  * It allows driver to batch submit display HW programming. This helps to
66  * reduce loading time and CPU activity, thereby making the context switch
67  * faster. DSB Support added from Gen12 Intel graphics based platform.
68  *
69  * DSB's can access only the pipe, plane, and transcoder Data Island Packet
70  * registers.
71  *
72  * DSB HW can support only register writes (both indexed and direct MMIO
73  * writes). There are no registers reads possible with DSB HW engine.
74  */
75 
76 /* DSB opcodes. */
77 #define DSB_OPCODE_SHIFT		24
78 #define DSB_OPCODE_NOOP			0x0
79 #define DSB_OPCODE_MMIO_WRITE		0x1
80 #define   DSB_BYTE_EN			0xf
81 #define   DSB_BYTE_EN_SHIFT		20
82 #define   DSB_REG_VALUE_MASK		0xfffff
83 #define DSB_OPCODE_WAIT_USEC		0x2
84 #define DSB_OPCODE_WAIT_SCANLINE	0x3
85 #define DSB_OPCODE_WAIT_VBLANKS		0x4
86 #define DSB_OPCODE_WAIT_DSL_IN		0x5
87 #define DSB_OPCODE_WAIT_DSL_OUT		0x6
88 #define   DSB_SCANLINE_UPPER_SHIFT	20
89 #define   DSB_SCANLINE_LOWER_SHIFT	0
90 #define DSB_OPCODE_INTERRUPT		0x7
91 #define DSB_OPCODE_INDEXED_WRITE	0x9
92 /* see DSB_REG_VALUE_MASK */
93 #define DSB_OPCODE_POLL			0xA
94 /* see DSB_REG_VALUE_MASK */
95 
pre_commit_is_vrr_active(struct intel_atomic_state * state,struct intel_crtc * crtc)96 static bool pre_commit_is_vrr_active(struct intel_atomic_state *state,
97 				     struct intel_crtc *crtc)
98 {
99 	const struct intel_crtc_state *old_crtc_state =
100 		intel_atomic_get_old_crtc_state(state, crtc);
101 	const struct intel_crtc_state *new_crtc_state =
102 		intel_atomic_get_new_crtc_state(state, crtc);
103 
104 	/* VRR will be enabled afterwards, if necessary */
105 	if (intel_crtc_needs_modeset(new_crtc_state))
106 		return false;
107 
108 	/* VRR will have been disabled during intel_pre_plane_update() */
109 	return old_crtc_state->vrr.enable && !intel_crtc_vrr_disabling(state, crtc);
110 }
111 
112 static const struct intel_crtc_state *
pre_commit_crtc_state(struct intel_atomic_state * state,struct intel_crtc * crtc)113 pre_commit_crtc_state(struct intel_atomic_state *state,
114 		      struct intel_crtc *crtc)
115 {
116 	const struct intel_crtc_state *old_crtc_state =
117 		intel_atomic_get_old_crtc_state(state, crtc);
118 	const struct intel_crtc_state *new_crtc_state =
119 		intel_atomic_get_new_crtc_state(state, crtc);
120 
121 	/*
122 	 * During fastsets/etc. the transcoder is still
123 	 * running with the old timings at this point.
124 	 */
125 	if (intel_crtc_needs_modeset(new_crtc_state))
126 		return new_crtc_state;
127 	else
128 		return old_crtc_state;
129 }
130 
dsb_vblank_delay(const struct intel_crtc_state * crtc_state)131 static int dsb_vblank_delay(const struct intel_crtc_state *crtc_state)
132 {
133 	return intel_mode_vblank_start(&crtc_state->hw.adjusted_mode) -
134 		intel_mode_vdisplay(&crtc_state->hw.adjusted_mode);
135 }
136 
dsb_vtotal(struct intel_atomic_state * state,struct intel_crtc * crtc)137 static int dsb_vtotal(struct intel_atomic_state *state,
138 		      struct intel_crtc *crtc)
139 {
140 	const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
141 
142 	if (pre_commit_is_vrr_active(state, crtc))
143 		return crtc_state->vrr.vmax;
144 	else
145 		return intel_mode_vtotal(&crtc_state->hw.adjusted_mode);
146 }
147 
dsb_dewake_scanline_start(struct intel_atomic_state * state,struct intel_crtc * crtc)148 static int dsb_dewake_scanline_start(struct intel_atomic_state *state,
149 				     struct intel_crtc *crtc)
150 {
151 	const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
152 	struct drm_i915_private *i915 = to_i915(state->base.dev);
153 	unsigned int latency = skl_watermark_max_latency(i915, 0);
154 
155 	return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode) -
156 		intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, latency);
157 }
158 
dsb_dewake_scanline_end(struct intel_atomic_state * state,struct intel_crtc * crtc)159 static int dsb_dewake_scanline_end(struct intel_atomic_state *state,
160 				   struct intel_crtc *crtc)
161 {
162 	const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
163 
164 	return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode);
165 }
166 
dsb_scanline_to_hw(struct intel_atomic_state * state,struct intel_crtc * crtc,int scanline)167 static int dsb_scanline_to_hw(struct intel_atomic_state *state,
168 			      struct intel_crtc *crtc, int scanline)
169 {
170 	const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
171 	int vtotal = dsb_vtotal(state, crtc);
172 
173 	return (scanline + vtotal - intel_crtc_scanline_offset(crtc_state)) % vtotal;
174 }
175 
dsb_chicken(struct intel_atomic_state * state,struct intel_crtc * crtc)176 static u32 dsb_chicken(struct intel_atomic_state *state,
177 		       struct intel_crtc *crtc)
178 {
179 	if (pre_commit_is_vrr_active(state, crtc))
180 		return DSB_SKIP_WAITS_EN |
181 			DSB_CTRL_WAIT_SAFE_WINDOW |
182 			DSB_CTRL_NO_WAIT_VBLANK |
183 			DSB_INST_WAIT_SAFE_WINDOW |
184 			DSB_INST_NO_WAIT_VBLANK;
185 	else
186 		return DSB_SKIP_WAITS_EN;
187 }
188 
assert_dsb_has_room(struct intel_dsb * dsb)189 static bool assert_dsb_has_room(struct intel_dsb *dsb)
190 {
191 	struct intel_crtc *crtc = dsb->crtc;
192 	struct intel_display *display = to_intel_display(crtc->base.dev);
193 
194 	/* each instruction is 2 dwords */
195 	return !drm_WARN(display->drm, dsb->free_pos > dsb->size - 2,
196 			 "[CRTC:%d:%s] DSB %d buffer overflow\n",
197 			 crtc->base.base.id, crtc->base.name, dsb->id);
198 }
199 
intel_dsb_dump(struct intel_dsb * dsb)200 static void intel_dsb_dump(struct intel_dsb *dsb)
201 {
202 	struct intel_crtc *crtc = dsb->crtc;
203 	struct intel_display *display = to_intel_display(crtc->base.dev);
204 	int i;
205 
206 	drm_dbg_kms(display->drm, "[CRTC:%d:%s] DSB %d commands {\n",
207 		    crtc->base.base.id, crtc->base.name, dsb->id);
208 	for (i = 0; i < ALIGN(dsb->free_pos, 64 / 4); i += 4)
209 		drm_dbg_kms(display->drm,
210 			    " 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i * 4,
211 			    intel_dsb_buffer_read(&dsb->dsb_buf, i),
212 			    intel_dsb_buffer_read(&dsb->dsb_buf, i + 1),
213 			    intel_dsb_buffer_read(&dsb->dsb_buf, i + 2),
214 			    intel_dsb_buffer_read(&dsb->dsb_buf, i + 3));
215 	drm_dbg_kms(display->drm, "}\n");
216 }
217 
is_dsb_busy(struct intel_display * display,enum pipe pipe,enum intel_dsb_id dsb_id)218 static bool is_dsb_busy(struct intel_display *display, enum pipe pipe,
219 			enum intel_dsb_id dsb_id)
220 {
221 	return intel_de_read_fw(display, DSB_CTRL(pipe, dsb_id)) & DSB_STATUS_BUSY;
222 }
223 
intel_dsb_emit(struct intel_dsb * dsb,u32 ldw,u32 udw)224 static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
225 {
226 	if (!assert_dsb_has_room(dsb))
227 		return;
228 
229 	/* Every instruction should be 8 byte aligned. */
230 	dsb->free_pos = ALIGN(dsb->free_pos, 2);
231 
232 	dsb->ins_start_offset = dsb->free_pos;
233 	dsb->ins[0] = ldw;
234 	dsb->ins[1] = udw;
235 
236 	intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, dsb->ins[0]);
237 	intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, dsb->ins[1]);
238 }
239 
intel_dsb_prev_ins_is_write(struct intel_dsb * dsb,u32 opcode,i915_reg_t reg)240 static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
241 					u32 opcode, i915_reg_t reg)
242 {
243 	u32 prev_opcode, prev_reg;
244 
245 	/*
246 	 * Nothing emitted yet? Must check before looking
247 	 * at the actual data since i915_gem_object_create_internal()
248 	 * does *not* give you zeroed memory!
249 	 */
250 	if (dsb->free_pos == 0)
251 		return false;
252 
253 	prev_opcode = dsb->ins[1] & ~DSB_REG_VALUE_MASK;
254 	prev_reg =  dsb->ins[1] & DSB_REG_VALUE_MASK;
255 
256 	return prev_opcode == opcode && prev_reg == i915_mmio_reg_offset(reg);
257 }
258 
intel_dsb_prev_ins_is_indexed_write(struct intel_dsb * dsb,i915_reg_t reg)259 static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_t reg)
260 {
261 	return intel_dsb_prev_ins_is_write(dsb,
262 					   DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT,
263 					   reg);
264 }
265 
266 /**
267  * intel_dsb_reg_write_indexed() - Emit indexed register write to the DSB context
268  * @dsb: DSB context
269  * @reg: register address.
270  * @val: value.
271  *
272  * This function is used for writing register-value pair in command
273  * buffer of DSB.
274  *
275  * Note that indexed writes are slower than normal MMIO writes
276  * for a small number (less than 5 or so) of writes to the same
277  * register.
278  */
intel_dsb_reg_write_indexed(struct intel_dsb * dsb,i915_reg_t reg,u32 val)279 void intel_dsb_reg_write_indexed(struct intel_dsb *dsb,
280 				 i915_reg_t reg, u32 val)
281 {
282 	/*
283 	 * For example the buffer will look like below for 3 dwords for auto
284 	 * increment register:
285 	 * +--------------------------------------------------------+
286 	 * | size = 3 | offset &| value1 | value2 | value3 | zero   |
287 	 * |          | opcode  |        |        |        |        |
288 	 * +--------------------------------------------------------+
289 	 * +          +         +        +        +        +        +
290 	 * 0          4         8        12       16       20       24
291 	 * Byte
292 	 *
293 	 * As every instruction is 8 byte aligned the index of dsb instruction
294 	 * will start always from even number while dealing with u32 array. If
295 	 * we are writing odd no of dwords, Zeros will be added in the end for
296 	 * padding.
297 	 */
298 	if (!intel_dsb_prev_ins_is_indexed_write(dsb, reg))
299 		intel_dsb_emit(dsb, 0, /* count */
300 			       (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) |
301 			       i915_mmio_reg_offset(reg));
302 
303 	if (!assert_dsb_has_room(dsb))
304 		return;
305 
306 	/* Update the count */
307 	dsb->ins[0]++;
308 	intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 0,
309 			       dsb->ins[0]);
310 
311 	intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, val);
312 	/* if number of data words is odd, then the last dword should be 0.*/
313 	if (dsb->free_pos & 0x1)
314 		intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos, 0);
315 }
316 
intel_dsb_reg_write(struct intel_dsb * dsb,i915_reg_t reg,u32 val)317 void intel_dsb_reg_write(struct intel_dsb *dsb,
318 			 i915_reg_t reg, u32 val)
319 {
320 	intel_dsb_emit(dsb, val,
321 		       (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
322 		       (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
323 		       i915_mmio_reg_offset(reg));
324 }
325 
intel_dsb_mask_to_byte_en(u32 mask)326 static u32 intel_dsb_mask_to_byte_en(u32 mask)
327 {
328 	return (!!(mask & 0xff000000) << 3 |
329 		!!(mask & 0x00ff0000) << 2 |
330 		!!(mask & 0x0000ff00) << 1 |
331 		!!(mask & 0x000000ff) << 0);
332 }
333 
334 /* Note: mask implemented via byte enables! */
intel_dsb_reg_write_masked(struct intel_dsb * dsb,i915_reg_t reg,u32 mask,u32 val)335 void intel_dsb_reg_write_masked(struct intel_dsb *dsb,
336 				i915_reg_t reg, u32 mask, u32 val)
337 {
338 	intel_dsb_emit(dsb, val,
339 		       (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
340 		       (intel_dsb_mask_to_byte_en(mask) << DSB_BYTE_EN_SHIFT) |
341 		       i915_mmio_reg_offset(reg));
342 }
343 
intel_dsb_noop(struct intel_dsb * dsb,int count)344 void intel_dsb_noop(struct intel_dsb *dsb, int count)
345 {
346 	int i;
347 
348 	for (i = 0; i < count; i++)
349 		intel_dsb_emit(dsb, 0,
350 			       DSB_OPCODE_NOOP << DSB_OPCODE_SHIFT);
351 }
352 
intel_dsb_nonpost_start(struct intel_dsb * dsb)353 void intel_dsb_nonpost_start(struct intel_dsb *dsb)
354 {
355 	struct intel_crtc *crtc = dsb->crtc;
356 	enum pipe pipe = crtc->pipe;
357 
358 	intel_dsb_reg_write_masked(dsb, DSB_CTRL(pipe, dsb->id),
359 				   DSB_NON_POSTED, DSB_NON_POSTED);
360 	intel_dsb_noop(dsb, 4);
361 }
362 
intel_dsb_nonpost_end(struct intel_dsb * dsb)363 void intel_dsb_nonpost_end(struct intel_dsb *dsb)
364 {
365 	struct intel_crtc *crtc = dsb->crtc;
366 	enum pipe pipe = crtc->pipe;
367 
368 	intel_dsb_reg_write_masked(dsb, DSB_CTRL(pipe, dsb->id),
369 				   DSB_NON_POSTED, 0);
370 	intel_dsb_noop(dsb, 4);
371 }
372 
intel_dsb_interrupt(struct intel_dsb * dsb)373 void intel_dsb_interrupt(struct intel_dsb *dsb)
374 {
375 	intel_dsb_emit(dsb, 0,
376 		       DSB_OPCODE_INTERRUPT << DSB_OPCODE_SHIFT);
377 }
378 
intel_dsb_wait_usec(struct intel_dsb * dsb,int count)379 void intel_dsb_wait_usec(struct intel_dsb *dsb, int count)
380 {
381 	intel_dsb_emit(dsb, count,
382 		       DSB_OPCODE_WAIT_USEC << DSB_OPCODE_SHIFT);
383 }
384 
intel_dsb_wait_vblanks(struct intel_dsb * dsb,int count)385 void intel_dsb_wait_vblanks(struct intel_dsb *dsb, int count)
386 {
387 	intel_dsb_emit(dsb, count,
388 		       DSB_OPCODE_WAIT_VBLANKS << DSB_OPCODE_SHIFT);
389 }
390 
intel_dsb_emit_wait_dsl(struct intel_dsb * dsb,u32 opcode,int lower,int upper)391 static void intel_dsb_emit_wait_dsl(struct intel_dsb *dsb,
392 				    u32 opcode, int lower, int upper)
393 {
394 	u64 window = ((u64)upper << DSB_SCANLINE_UPPER_SHIFT) |
395 		((u64)lower << DSB_SCANLINE_LOWER_SHIFT);
396 
397 	intel_dsb_emit(dsb, lower_32_bits(window),
398 		       (opcode << DSB_OPCODE_SHIFT) |
399 		       upper_32_bits(window));
400 }
401 
intel_dsb_wait_dsl(struct intel_atomic_state * state,struct intel_dsb * dsb,int lower_in,int upper_in,int lower_out,int upper_out)402 static void intel_dsb_wait_dsl(struct intel_atomic_state *state,
403 			       struct intel_dsb *dsb,
404 			       int lower_in, int upper_in,
405 			       int lower_out, int upper_out)
406 {
407 	struct intel_crtc *crtc = dsb->crtc;
408 
409 	lower_in = dsb_scanline_to_hw(state, crtc, lower_in);
410 	upper_in = dsb_scanline_to_hw(state, crtc, upper_in);
411 
412 	lower_out = dsb_scanline_to_hw(state, crtc, lower_out);
413 	upper_out = dsb_scanline_to_hw(state, crtc, upper_out);
414 
415 	if (upper_in >= lower_in)
416 		intel_dsb_emit_wait_dsl(dsb, DSB_OPCODE_WAIT_DSL_IN,
417 					lower_in, upper_in);
418 	else if (upper_out >= lower_out)
419 		intel_dsb_emit_wait_dsl(dsb, DSB_OPCODE_WAIT_DSL_OUT,
420 					lower_out, upper_out);
421 	else
422 		drm_WARN_ON(crtc->base.dev, 1); /* assert_dsl_ok() should have caught it already */
423 }
424 
assert_dsl_ok(struct intel_atomic_state * state,struct intel_dsb * dsb,int start,int end)425 static void assert_dsl_ok(struct intel_atomic_state *state,
426 			  struct intel_dsb *dsb,
427 			  int start, int end)
428 {
429 	struct intel_crtc *crtc = dsb->crtc;
430 	int vtotal = dsb_vtotal(state, crtc);
431 
432 	/*
433 	 * Waiting for the entire frame doesn't make sense,
434 	 * (IN==don't wait, OUT=wait forever).
435 	 */
436 	drm_WARN(crtc->base.dev, (end - start + vtotal) % vtotal == vtotal - 1,
437 		 "[CRTC:%d:%s] DSB %d bad scanline window wait: %d-%d (vt=%d)\n",
438 		 crtc->base.base.id, crtc->base.name, dsb->id,
439 		 start, end, vtotal);
440 }
441 
intel_dsb_wait_scanline_in(struct intel_atomic_state * state,struct intel_dsb * dsb,int start,int end)442 void intel_dsb_wait_scanline_in(struct intel_atomic_state *state,
443 				struct intel_dsb *dsb,
444 				int start, int end)
445 {
446 	assert_dsl_ok(state, dsb, start, end);
447 
448 	intel_dsb_wait_dsl(state, dsb,
449 			   start, end,
450 			   end + 1, start - 1);
451 }
452 
intel_dsb_wait_scanline_out(struct intel_atomic_state * state,struct intel_dsb * dsb,int start,int end)453 void intel_dsb_wait_scanline_out(struct intel_atomic_state *state,
454 				 struct intel_dsb *dsb,
455 				 int start, int end)
456 {
457 	assert_dsl_ok(state, dsb, start, end);
458 
459 	intel_dsb_wait_dsl(state, dsb,
460 			   end + 1, start - 1,
461 			   start, end);
462 }
463 
intel_dsb_align_tail(struct intel_dsb * dsb)464 static void intel_dsb_align_tail(struct intel_dsb *dsb)
465 {
466 	u32 aligned_tail, tail;
467 
468 	tail = dsb->free_pos * 4;
469 	aligned_tail = ALIGN(tail, CACHELINE_BYTES);
470 
471 	if (aligned_tail > tail)
472 		intel_dsb_buffer_memset(&dsb->dsb_buf, dsb->free_pos, 0,
473 					aligned_tail - tail);
474 
475 	dsb->free_pos = aligned_tail / 4;
476 }
477 
intel_dsb_finish(struct intel_dsb * dsb)478 void intel_dsb_finish(struct intel_dsb *dsb)
479 {
480 	struct intel_crtc *crtc = dsb->crtc;
481 
482 	/*
483 	 * DSB_FORCE_DEWAKE remains active even after DSB is
484 	 * disabled, so make sure to clear it (if set during
485 	 * intel_dsb_commit()). And clear DSB_ENABLE_DEWAKE as
486 	 * well for good measure.
487 	 */
488 	intel_dsb_reg_write(dsb, DSB_PMCTRL(crtc->pipe, dsb->id), 0);
489 	intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(crtc->pipe, dsb->id),
490 				   DSB_FORCE_DEWAKE, 0);
491 
492 	intel_dsb_align_tail(dsb);
493 
494 	intel_dsb_buffer_flush_map(&dsb->dsb_buf);
495 }
496 
dsb_error_int_status(struct intel_display * display)497 static u32 dsb_error_int_status(struct intel_display *display)
498 {
499 	u32 errors;
500 
501 	errors = DSB_GTT_FAULT_INT_STATUS |
502 		DSB_RSPTIMEOUT_INT_STATUS |
503 		DSB_POLL_ERR_INT_STATUS;
504 
505 	/*
506 	 * All the non-existing status bits operate as
507 	 * normal r/w bits, so any attempt to clear them
508 	 * will just end up setting them. Never do that so
509 	 * we won't mistake them for actual error interrupts.
510 	 */
511 	if (DISPLAY_VER(display) >= 14)
512 		errors |= DSB_ATS_FAULT_INT_STATUS;
513 
514 	return errors;
515 }
516 
dsb_error_int_en(struct intel_display * display)517 static u32 dsb_error_int_en(struct intel_display *display)
518 {
519 	u32 errors;
520 
521 	errors = DSB_GTT_FAULT_INT_EN |
522 		DSB_RSPTIMEOUT_INT_EN |
523 		DSB_POLL_ERR_INT_EN;
524 
525 	if (DISPLAY_VER(display) >= 14)
526 		errors |= DSB_ATS_FAULT_INT_EN;
527 
528 	return errors;
529 }
530 
intel_dsb_vblank_evade(struct intel_atomic_state * state,struct intel_dsb * dsb)531 void intel_dsb_vblank_evade(struct intel_atomic_state *state,
532 			    struct intel_dsb *dsb)
533 {
534 	struct intel_crtc *crtc = dsb->crtc;
535 	const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
536 	/* FIXME calibrate sensibly */
537 	int latency = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, 20);
538 	int vblank_delay = dsb_vblank_delay(crtc_state);
539 	int start, end;
540 
541 	if (pre_commit_is_vrr_active(state, crtc)) {
542 		end = intel_vrr_vmin_vblank_start(crtc_state);
543 		start = end - vblank_delay - latency;
544 		intel_dsb_wait_scanline_out(state, dsb, start, end);
545 
546 		end = intel_vrr_vmax_vblank_start(crtc_state);
547 		start = end - vblank_delay - latency;
548 		intel_dsb_wait_scanline_out(state, dsb, start, end);
549 	} else {
550 		end = intel_mode_vblank_start(&crtc_state->hw.adjusted_mode);
551 		start = end - vblank_delay - latency;
552 		intel_dsb_wait_scanline_out(state, dsb, start, end);
553 	}
554 }
555 
_intel_dsb_chain(struct intel_atomic_state * state,struct intel_dsb * dsb,struct intel_dsb * chained_dsb,u32 ctrl)556 static void _intel_dsb_chain(struct intel_atomic_state *state,
557 			     struct intel_dsb *dsb,
558 			     struct intel_dsb *chained_dsb,
559 			     u32 ctrl)
560 {
561 	struct intel_display *display = to_intel_display(state->base.dev);
562 	struct intel_crtc *crtc = dsb->crtc;
563 	enum pipe pipe = crtc->pipe;
564 	u32 tail;
565 
566 	if (drm_WARN_ON(display->drm, dsb->id == chained_dsb->id))
567 		return;
568 
569 	tail = chained_dsb->free_pos * 4;
570 	if (drm_WARN_ON(display->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
571 		return;
572 
573 	intel_dsb_reg_write(dsb, DSB_CTRL(pipe, chained_dsb->id),
574 			    ctrl | DSB_ENABLE);
575 
576 	intel_dsb_reg_write(dsb, DSB_CHICKEN(pipe, chained_dsb->id),
577 			    dsb_chicken(state, crtc));
578 
579 	intel_dsb_reg_write(dsb, DSB_INTERRUPT(pipe, chained_dsb->id),
580 			    dsb_error_int_status(display) | DSB_PROG_INT_STATUS |
581 			    dsb_error_int_en(display) | DSB_PROG_INT_EN);
582 
583 	if (ctrl & DSB_WAIT_FOR_VBLANK) {
584 		int dewake_scanline = dsb_dewake_scanline_start(state, crtc);
585 		int hw_dewake_scanline = dsb_scanline_to_hw(state, crtc, dewake_scanline);
586 
587 		intel_dsb_reg_write(dsb, DSB_PMCTRL(pipe, chained_dsb->id),
588 				    DSB_ENABLE_DEWAKE |
589 				    DSB_SCANLINE_FOR_DEWAKE(hw_dewake_scanline));
590 	}
591 
592 	intel_dsb_reg_write(dsb, DSB_HEAD(pipe, chained_dsb->id),
593 			    intel_dsb_buffer_ggtt_offset(&chained_dsb->dsb_buf));
594 
595 	intel_dsb_reg_write(dsb, DSB_TAIL(pipe, chained_dsb->id),
596 			    intel_dsb_buffer_ggtt_offset(&chained_dsb->dsb_buf) + tail);
597 
598 	if (ctrl & DSB_WAIT_FOR_VBLANK) {
599 		/*
600 		 * Keep DEwake alive via the first DSB, in
601 		 * case we're already past dewake_scanline,
602 		 * and thus DSB_ENABLE_DEWAKE on the second
603 		 * DSB won't do its job.
604 		 */
605 		intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(pipe, dsb->id),
606 					   DSB_FORCE_DEWAKE, DSB_FORCE_DEWAKE);
607 
608 		intel_dsb_wait_scanline_out(state, dsb,
609 					    dsb_dewake_scanline_start(state, crtc),
610 					    dsb_dewake_scanline_end(state, crtc));
611 	}
612 }
613 
intel_dsb_chain(struct intel_atomic_state * state,struct intel_dsb * dsb,struct intel_dsb * chained_dsb,bool wait_for_vblank)614 void intel_dsb_chain(struct intel_atomic_state *state,
615 		     struct intel_dsb *dsb,
616 		     struct intel_dsb *chained_dsb,
617 		     bool wait_for_vblank)
618 {
619 	_intel_dsb_chain(state, dsb, chained_dsb,
620 			 wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0);
621 }
622 
intel_dsb_wait_vblank_delay(struct intel_atomic_state * state,struct intel_dsb * dsb)623 void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state,
624 				 struct intel_dsb *dsb)
625 {
626 	struct intel_crtc *crtc = dsb->crtc;
627 	const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
628 	int usecs = intel_scanlines_to_usecs(&crtc_state->hw.adjusted_mode,
629 					     dsb_vblank_delay(crtc_state)) + 1;
630 
631 	intel_dsb_wait_usec(dsb, usecs);
632 }
633 
_intel_dsb_commit(struct intel_dsb * dsb,u32 ctrl,int hw_dewake_scanline)634 static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
635 			      int hw_dewake_scanline)
636 {
637 	struct intel_crtc *crtc = dsb->crtc;
638 	struct intel_display *display = to_intel_display(crtc->base.dev);
639 	enum pipe pipe = crtc->pipe;
640 	u32 tail;
641 
642 	tail = dsb->free_pos * 4;
643 	if (drm_WARN_ON(display->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
644 		return;
645 
646 	if (is_dsb_busy(display, pipe, dsb->id)) {
647 		drm_err(display->drm, "[CRTC:%d:%s] DSB %d is busy\n",
648 			crtc->base.base.id, crtc->base.name, dsb->id);
649 		return;
650 	}
651 
652 	intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id),
653 			  ctrl | DSB_ENABLE);
654 
655 	intel_de_write_fw(display, DSB_CHICKEN(pipe, dsb->id),
656 			  dsb->chicken);
657 
658 	intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb->id),
659 			  dsb_error_int_status(display) | DSB_PROG_INT_STATUS |
660 			  dsb_error_int_en(display) | DSB_PROG_INT_EN);
661 
662 	intel_de_write_fw(display, DSB_HEAD(pipe, dsb->id),
663 			  intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
664 
665 	if (hw_dewake_scanline >= 0) {
666 		int diff, position;
667 
668 		intel_de_write_fw(display, DSB_PMCTRL(pipe, dsb->id),
669 				  DSB_ENABLE_DEWAKE |
670 				  DSB_SCANLINE_FOR_DEWAKE(hw_dewake_scanline));
671 
672 		/*
673 		 * Force DEwake immediately if we're already past
674 		 * or close to racing past the target scanline.
675 		 */
676 		position = intel_de_read_fw(display, PIPEDSL(display, pipe)) & PIPEDSL_LINE_MASK;
677 
678 		diff = hw_dewake_scanline - position;
679 		intel_de_write_fw(display, DSB_PMCTRL_2(pipe, dsb->id),
680 				  (diff >= 0 && diff < 5 ? DSB_FORCE_DEWAKE : 0) |
681 				  DSB_BLOCK_DEWAKE_EXTENSION);
682 	}
683 
684 	intel_de_write_fw(display, DSB_TAIL(pipe, dsb->id),
685 			  intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf) + tail);
686 }
687 
688 /**
689  * intel_dsb_commit() - Trigger workload execution of DSB.
690  * @dsb: DSB context
691  * @wait_for_vblank: wait for vblank before executing
692  *
693  * This function is used to do actual write to hardware using DSB.
694  */
intel_dsb_commit(struct intel_dsb * dsb,bool wait_for_vblank)695 void intel_dsb_commit(struct intel_dsb *dsb,
696 		      bool wait_for_vblank)
697 {
698 	_intel_dsb_commit(dsb,
699 			  wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0,
700 			  wait_for_vblank ? dsb->hw_dewake_scanline : -1);
701 }
702 
intel_dsb_wait(struct intel_dsb * dsb)703 void intel_dsb_wait(struct intel_dsb *dsb)
704 {
705 	struct intel_crtc *crtc = dsb->crtc;
706 	struct intel_display *display = to_intel_display(crtc->base.dev);
707 	enum pipe pipe = crtc->pipe;
708 
709 	if (wait_for(!is_dsb_busy(display, pipe, dsb->id), 1)) {
710 		u32 offset = intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf);
711 
712 		intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id),
713 				  DSB_ENABLE | DSB_HALT);
714 
715 		drm_err(display->drm,
716 			"[CRTC:%d:%s] DSB %d timed out waiting for idle (current head=0x%x, head=0x%x, tail=0x%x)\n",
717 			crtc->base.base.id, crtc->base.name, dsb->id,
718 			intel_de_read_fw(display, DSB_CURRENT_HEAD(pipe, dsb->id)) - offset,
719 			intel_de_read_fw(display, DSB_HEAD(pipe, dsb->id)) - offset,
720 			intel_de_read_fw(display, DSB_TAIL(pipe, dsb->id)) - offset);
721 
722 		intel_dsb_dump(dsb);
723 	}
724 
725 	/* Attempt to reset it */
726 	dsb->free_pos = 0;
727 	dsb->ins_start_offset = 0;
728 	dsb->ins[0] = 0;
729 	dsb->ins[1] = 0;
730 
731 	intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id), 0);
732 
733 	intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb->id),
734 			  dsb_error_int_status(display) | DSB_PROG_INT_STATUS);
735 }
736 
737 /**
738  * intel_dsb_prepare() - Allocate, pin and map the DSB command buffer.
739  * @state: the atomic state
740  * @crtc: the CRTC
741  * @dsb_id: the DSB engine to use
742  * @max_cmds: number of commands we need to fit into command buffer
743  *
744  * This function prepare the command buffer which is used to store dsb
745  * instructions with data.
746  *
747  * Returns:
748  * DSB context, NULL on failure
749  */
intel_dsb_prepare(struct intel_atomic_state * state,struct intel_crtc * crtc,enum intel_dsb_id dsb_id,unsigned int max_cmds)750 struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
751 				    struct intel_crtc *crtc,
752 				    enum intel_dsb_id dsb_id,
753 				    unsigned int max_cmds)
754 {
755 	struct drm_i915_private *i915 = to_i915(state->base.dev);
756 	intel_wakeref_t wakeref;
757 	struct intel_dsb *dsb;
758 	unsigned int size;
759 
760 	if (!HAS_DSB(i915))
761 		return NULL;
762 
763 	if (!i915->display.params.enable_dsb)
764 		return NULL;
765 
766 	dsb = kzalloc(sizeof(*dsb), GFP_KERNEL);
767 	if (!dsb)
768 		goto out;
769 
770 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
771 
772 	/* ~1 qword per instruction, full cachelines */
773 	size = ALIGN(max_cmds * 8, CACHELINE_BYTES);
774 
775 	if (!intel_dsb_buffer_create(crtc, &dsb->dsb_buf, size))
776 		goto out_put_rpm;
777 
778 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
779 
780 	dsb->id = dsb_id;
781 	dsb->crtc = crtc;
782 	dsb->size = size / 4; /* in dwords */
783 
784 	dsb->chicken = dsb_chicken(state, crtc);
785 	dsb->hw_dewake_scanline =
786 		dsb_scanline_to_hw(state, crtc, dsb_dewake_scanline_start(state, crtc));
787 
788 	return dsb;
789 
790 out_put_rpm:
791 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
792 	kfree(dsb);
793 out:
794 	drm_info_once(&i915->drm,
795 		      "[CRTC:%d:%s] DSB %d queue setup failed, will fallback to MMIO for display HW programming\n",
796 		      crtc->base.base.id, crtc->base.name, dsb_id);
797 
798 	return NULL;
799 }
800 
801 /**
802  * intel_dsb_cleanup() - To cleanup DSB context.
803  * @dsb: DSB context
804  *
805  * This function cleanup the DSB context by unpinning and releasing
806  * the VMA object associated with it.
807  */
intel_dsb_cleanup(struct intel_dsb * dsb)808 void intel_dsb_cleanup(struct intel_dsb *dsb)
809 {
810 	intel_dsb_buffer_cleanup(&dsb->dsb_buf);
811 	kfree(dsb);
812 }
813 
intel_dsb_irq_handler(struct intel_display * display,enum pipe pipe,enum intel_dsb_id dsb_id)814 void intel_dsb_irq_handler(struct intel_display *display,
815 			   enum pipe pipe, enum intel_dsb_id dsb_id)
816 {
817 	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
818 	u32 tmp, errors;
819 
820 	tmp = intel_de_read_fw(display, DSB_INTERRUPT(pipe, dsb_id));
821 	intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb_id), tmp);
822 
823 	if (tmp & DSB_PROG_INT_STATUS) {
824 		spin_lock(&display->drm->event_lock);
825 
826 		if (crtc->dsb_event) {
827 			/*
828 			 * Update vblank counter/timestmap in case it
829 			 * hasn't been done yet for this frame.
830 			 */
831 			drm_crtc_accurate_vblank_count(&crtc->base);
832 
833 			drm_crtc_send_vblank_event(&crtc->base, crtc->dsb_event);
834 			crtc->dsb_event = NULL;
835 		}
836 
837 		spin_unlock(&display->drm->event_lock);
838 	}
839 
840 	errors = tmp & dsb_error_int_status(display);
841 	if (errors)
842 		drm_err(display->drm, "[CRTC:%d:%s] DSB %d error interrupt: 0x%x\n",
843 			crtc->base.base.id, crtc->base.name, dsb_id, errors);
844 }
845