xref: /linux/drivers/gpu/drm/i915/display/intel_dsb.c (revision 6e7fd890f1d6ac83805409e9c346240de2705584)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  *
5  */
6 
7 #include "i915_drv.h"
8 #include "i915_irq.h"
9 #include "intel_crtc.h"
10 #include "intel_de.h"
11 #include "intel_display_types.h"
12 #include "intel_dsb.h"
13 #include "intel_dsb_buffer.h"
14 #include "intel_dsb_regs.h"
15 #include "intel_vblank.h"
16 #include "intel_vrr.h"
17 #include "skl_watermark.h"
18 
19 #define CACHELINE_BYTES 64
20 
21 struct intel_dsb {
22 	enum intel_dsb_id id;
23 
24 	struct intel_dsb_buffer dsb_buf;
25 	struct intel_crtc *crtc;
26 
27 	/*
28 	 * maximum number of dwords the buffer will hold.
29 	 */
30 	unsigned int size;
31 
32 	/*
33 	 * free_pos will point the first free dword and
34 	 * help in calculating tail of command buffer.
35 	 */
36 	unsigned int free_pos;
37 
38 	/*
39 	 * ins_start_offset will help to store start dword of the dsb
40 	 * instuction and help in identifying the batch of auto-increment
41 	 * register.
42 	 */
43 	unsigned int ins_start_offset;
44 
45 	int dewake_scanline;
46 };
47 
48 /**
49  * DOC: DSB
50  *
51  * A DSB (Display State Buffer) is a queue of MMIO instructions in the memory
52  * which can be offloaded to DSB HW in Display Controller. DSB HW is a DMA
53  * engine that can be programmed to download the DSB from memory.
54  * It allows driver to batch submit display HW programming. This helps to
55  * reduce loading time and CPU activity, thereby making the context switch
56  * faster. DSB Support added from Gen12 Intel graphics based platform.
57  *
58  * DSB's can access only the pipe, plane, and transcoder Data Island Packet
59  * registers.
60  *
61  * DSB HW can support only register writes (both indexed and direct MMIO
62  * writes). There are no registers reads possible with DSB HW engine.
63  */
64 
65 /* DSB opcodes. */
66 #define DSB_OPCODE_SHIFT		24
67 #define DSB_OPCODE_NOOP			0x0
68 #define DSB_OPCODE_MMIO_WRITE		0x1
69 #define   DSB_BYTE_EN			0xf
70 #define   DSB_BYTE_EN_SHIFT		20
71 #define   DSB_REG_VALUE_MASK		0xfffff
72 #define DSB_OPCODE_WAIT_USEC		0x2
73 #define DSB_OPCODE_WAIT_SCANLINE	0x3
74 #define DSB_OPCODE_WAIT_VBLANKS		0x4
75 #define DSB_OPCODE_WAIT_DSL_IN		0x5
76 #define DSB_OPCODE_WAIT_DSL_OUT		0x6
77 #define   DSB_SCANLINE_UPPER_SHIFT	20
78 #define   DSB_SCANLINE_LOWER_SHIFT	0
79 #define DSB_OPCODE_INTERRUPT		0x7
80 #define DSB_OPCODE_INDEXED_WRITE	0x9
81 /* see DSB_REG_VALUE_MASK */
82 #define DSB_OPCODE_POLL			0xA
83 /* see DSB_REG_VALUE_MASK */
84 
85 static bool assert_dsb_has_room(struct intel_dsb *dsb)
86 {
87 	struct intel_crtc *crtc = dsb->crtc;
88 	struct intel_display *display = to_intel_display(crtc->base.dev);
89 
90 	/* each instruction is 2 dwords */
91 	return !drm_WARN(display->drm, dsb->free_pos > dsb->size - 2,
92 			 "[CRTC:%d:%s] DSB %d buffer overflow\n",
93 			 crtc->base.base.id, crtc->base.name, dsb->id);
94 }
95 
96 static void intel_dsb_dump(struct intel_dsb *dsb)
97 {
98 	struct intel_crtc *crtc = dsb->crtc;
99 	struct intel_display *display = to_intel_display(crtc->base.dev);
100 	int i;
101 
102 	drm_dbg_kms(display->drm, "[CRTC:%d:%s] DSB %d commands {\n",
103 		    crtc->base.base.id, crtc->base.name, dsb->id);
104 	for (i = 0; i < ALIGN(dsb->free_pos, 64 / 4); i += 4)
105 		drm_dbg_kms(display->drm,
106 			    " 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i * 4,
107 			    intel_dsb_buffer_read(&dsb->dsb_buf, i),
108 			    intel_dsb_buffer_read(&dsb->dsb_buf, i + 1),
109 			    intel_dsb_buffer_read(&dsb->dsb_buf, i + 2),
110 			    intel_dsb_buffer_read(&dsb->dsb_buf, i + 3));
111 	drm_dbg_kms(display->drm, "}\n");
112 }
113 
114 static bool is_dsb_busy(struct intel_display *display, enum pipe pipe,
115 			enum intel_dsb_id dsb_id)
116 {
117 	return intel_de_read_fw(display, DSB_CTRL(pipe, dsb_id)) & DSB_STATUS_BUSY;
118 }
119 
120 static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
121 {
122 	if (!assert_dsb_has_room(dsb))
123 		return;
124 
125 	/* Every instruction should be 8 byte aligned. */
126 	dsb->free_pos = ALIGN(dsb->free_pos, 2);
127 
128 	dsb->ins_start_offset = dsb->free_pos;
129 
130 	intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, ldw);
131 	intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, udw);
132 }
133 
134 static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
135 					u32 opcode, i915_reg_t reg)
136 {
137 	u32 prev_opcode, prev_reg;
138 
139 	/*
140 	 * Nothing emitted yet? Must check before looking
141 	 * at the actual data since i915_gem_object_create_internal()
142 	 * does *not* give you zeroed memory!
143 	 */
144 	if (dsb->free_pos == 0)
145 		return false;
146 
147 	prev_opcode = intel_dsb_buffer_read(&dsb->dsb_buf,
148 					    dsb->ins_start_offset + 1) & ~DSB_REG_VALUE_MASK;
149 	prev_reg =  intel_dsb_buffer_read(&dsb->dsb_buf,
150 					  dsb->ins_start_offset + 1) & DSB_REG_VALUE_MASK;
151 
152 	return prev_opcode == opcode && prev_reg == i915_mmio_reg_offset(reg);
153 }
154 
155 static bool intel_dsb_prev_ins_is_mmio_write(struct intel_dsb *dsb, i915_reg_t reg)
156 {
157 	/* only full byte-enables can be converted to indexed writes */
158 	return intel_dsb_prev_ins_is_write(dsb,
159 					   DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT |
160 					   DSB_BYTE_EN << DSB_BYTE_EN_SHIFT,
161 					   reg);
162 }
163 
164 static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_t reg)
165 {
166 	return intel_dsb_prev_ins_is_write(dsb,
167 					   DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT,
168 					   reg);
169 }
170 
171 /**
172  * intel_dsb_reg_write() - Emit register wriite to the DSB context
173  * @dsb: DSB context
174  * @reg: register address.
175  * @val: value.
176  *
177  * This function is used for writing register-value pair in command
178  * buffer of DSB.
179  */
180 void intel_dsb_reg_write(struct intel_dsb *dsb,
181 			 i915_reg_t reg, u32 val)
182 {
183 	u32 old_val;
184 
185 	/*
186 	 * For example the buffer will look like below for 3 dwords for auto
187 	 * increment register:
188 	 * +--------------------------------------------------------+
189 	 * | size = 3 | offset &| value1 | value2 | value3 | zero   |
190 	 * |          | opcode  |        |        |        |        |
191 	 * +--------------------------------------------------------+
192 	 * +          +         +        +        +        +        +
193 	 * 0          4         8        12       16       20       24
194 	 * Byte
195 	 *
196 	 * As every instruction is 8 byte aligned the index of dsb instruction
197 	 * will start always from even number while dealing with u32 array. If
198 	 * we are writing odd no of dwords, Zeros will be added in the end for
199 	 * padding.
200 	 */
201 	if (!intel_dsb_prev_ins_is_mmio_write(dsb, reg) &&
202 	    !intel_dsb_prev_ins_is_indexed_write(dsb, reg)) {
203 		intel_dsb_emit(dsb, val,
204 			       (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
205 			       (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
206 			       i915_mmio_reg_offset(reg));
207 	} else {
208 		if (!assert_dsb_has_room(dsb))
209 			return;
210 
211 		/* convert to indexed write? */
212 		if (intel_dsb_prev_ins_is_mmio_write(dsb, reg)) {
213 			u32 prev_val = intel_dsb_buffer_read(&dsb->dsb_buf,
214 							     dsb->ins_start_offset + 0);
215 
216 			intel_dsb_buffer_write(&dsb->dsb_buf,
217 					       dsb->ins_start_offset + 0, 1); /* count */
218 			intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 1,
219 					       (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) |
220 					       i915_mmio_reg_offset(reg));
221 			intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 2, prev_val);
222 
223 			dsb->free_pos++;
224 		}
225 
226 		intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, val);
227 		/* Update the count */
228 		old_val = intel_dsb_buffer_read(&dsb->dsb_buf, dsb->ins_start_offset);
229 		intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset, old_val + 1);
230 
231 		/* if number of data words is odd, then the last dword should be 0.*/
232 		if (dsb->free_pos & 0x1)
233 			intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos, 0);
234 	}
235 }
236 
237 static u32 intel_dsb_mask_to_byte_en(u32 mask)
238 {
239 	return (!!(mask & 0xff000000) << 3 |
240 		!!(mask & 0x00ff0000) << 2 |
241 		!!(mask & 0x0000ff00) << 1 |
242 		!!(mask & 0x000000ff) << 0);
243 }
244 
245 /* Note: mask implemented via byte enables! */
246 void intel_dsb_reg_write_masked(struct intel_dsb *dsb,
247 				i915_reg_t reg, u32 mask, u32 val)
248 {
249 	intel_dsb_emit(dsb, val,
250 		       (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
251 		       (intel_dsb_mask_to_byte_en(mask) << DSB_BYTE_EN_SHIFT) |
252 		       i915_mmio_reg_offset(reg));
253 }
254 
255 void intel_dsb_noop(struct intel_dsb *dsb, int count)
256 {
257 	int i;
258 
259 	for (i = 0; i < count; i++)
260 		intel_dsb_emit(dsb, 0,
261 			       DSB_OPCODE_NOOP << DSB_OPCODE_SHIFT);
262 }
263 
264 void intel_dsb_nonpost_start(struct intel_dsb *dsb)
265 {
266 	struct intel_crtc *crtc = dsb->crtc;
267 	enum pipe pipe = crtc->pipe;
268 
269 	intel_dsb_reg_write_masked(dsb, DSB_CTRL(pipe, dsb->id),
270 				   DSB_NON_POSTED, DSB_NON_POSTED);
271 	intel_dsb_noop(dsb, 4);
272 }
273 
274 void intel_dsb_nonpost_end(struct intel_dsb *dsb)
275 {
276 	struct intel_crtc *crtc = dsb->crtc;
277 	enum pipe pipe = crtc->pipe;
278 
279 	intel_dsb_reg_write_masked(dsb, DSB_CTRL(pipe, dsb->id),
280 				   DSB_NON_POSTED, 0);
281 	intel_dsb_noop(dsb, 4);
282 }
283 
284 static void intel_dsb_align_tail(struct intel_dsb *dsb)
285 {
286 	u32 aligned_tail, tail;
287 
288 	tail = dsb->free_pos * 4;
289 	aligned_tail = ALIGN(tail, CACHELINE_BYTES);
290 
291 	if (aligned_tail > tail)
292 		intel_dsb_buffer_memset(&dsb->dsb_buf, dsb->free_pos, 0,
293 					aligned_tail - tail);
294 
295 	dsb->free_pos = aligned_tail / 4;
296 }
297 
298 void intel_dsb_finish(struct intel_dsb *dsb)
299 {
300 	struct intel_crtc *crtc = dsb->crtc;
301 
302 	/*
303 	 * DSB_FORCE_DEWAKE remains active even after DSB is
304 	 * disabled, so make sure to clear it (if set during
305 	 * intel_dsb_commit()).
306 	 */
307 	intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(crtc->pipe, dsb->id),
308 				   DSB_FORCE_DEWAKE, 0);
309 
310 	intel_dsb_align_tail(dsb);
311 
312 	intel_dsb_buffer_flush_map(&dsb->dsb_buf);
313 }
314 
315 static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state)
316 {
317 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
318 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
319 	unsigned int latency = skl_watermark_max_latency(i915, 0);
320 	int vblank_start;
321 
322 	if (crtc_state->vrr.enable)
323 		vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
324 	else
325 		vblank_start = intel_mode_vblank_start(adjusted_mode);
326 
327 	return max(0, vblank_start - intel_usecs_to_scanlines(adjusted_mode, latency));
328 }
329 
330 static u32 dsb_chicken(struct intel_crtc *crtc)
331 {
332 	if (crtc->mode_flags & I915_MODE_FLAG_VRR)
333 		return DSB_SKIP_WAITS_EN |
334 			DSB_CTRL_WAIT_SAFE_WINDOW |
335 			DSB_CTRL_NO_WAIT_VBLANK |
336 			DSB_INST_WAIT_SAFE_WINDOW |
337 			DSB_INST_NO_WAIT_VBLANK;
338 	else
339 		return DSB_SKIP_WAITS_EN;
340 }
341 
342 static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
343 			      int dewake_scanline)
344 {
345 	struct intel_crtc *crtc = dsb->crtc;
346 	struct intel_display *display = to_intel_display(crtc->base.dev);
347 	enum pipe pipe = crtc->pipe;
348 	u32 tail;
349 
350 	tail = dsb->free_pos * 4;
351 	if (drm_WARN_ON(display->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
352 		return;
353 
354 	if (is_dsb_busy(display, pipe, dsb->id)) {
355 		drm_err(display->drm, "[CRTC:%d:%s] DSB %d is busy\n",
356 			crtc->base.base.id, crtc->base.name, dsb->id);
357 		return;
358 	}
359 
360 	intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id),
361 			  ctrl | DSB_ENABLE);
362 
363 	intel_de_write_fw(display, DSB_CHICKEN(pipe, dsb->id),
364 			  dsb_chicken(crtc));
365 
366 	intel_de_write_fw(display, DSB_HEAD(pipe, dsb->id),
367 			  intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
368 
369 	if (dewake_scanline >= 0) {
370 		int diff, hw_dewake_scanline;
371 
372 		hw_dewake_scanline = intel_crtc_scanline_to_hw(crtc, dewake_scanline);
373 
374 		intel_de_write_fw(display, DSB_PMCTRL(pipe, dsb->id),
375 				  DSB_ENABLE_DEWAKE |
376 				  DSB_SCANLINE_FOR_DEWAKE(hw_dewake_scanline));
377 
378 		/*
379 		 * Force DEwake immediately if we're already past
380 		 * or close to racing past the target scanline.
381 		 */
382 		diff = dewake_scanline - intel_get_crtc_scanline(crtc);
383 		intel_de_write_fw(display, DSB_PMCTRL_2(pipe, dsb->id),
384 				  (diff >= 0 && diff < 5 ? DSB_FORCE_DEWAKE : 0) |
385 				  DSB_BLOCK_DEWAKE_EXTENSION);
386 	}
387 
388 	intel_de_write_fw(display, DSB_TAIL(pipe, dsb->id),
389 			  intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf) + tail);
390 }
391 
392 /**
393  * intel_dsb_commit() - Trigger workload execution of DSB.
394  * @dsb: DSB context
395  * @wait_for_vblank: wait for vblank before executing
396  *
397  * This function is used to do actual write to hardware using DSB.
398  */
399 void intel_dsb_commit(struct intel_dsb *dsb,
400 		      bool wait_for_vblank)
401 {
402 	_intel_dsb_commit(dsb,
403 			  wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0,
404 			  wait_for_vblank ? dsb->dewake_scanline : -1);
405 }
406 
407 void intel_dsb_wait(struct intel_dsb *dsb)
408 {
409 	struct intel_crtc *crtc = dsb->crtc;
410 	struct intel_display *display = to_intel_display(crtc->base.dev);
411 	enum pipe pipe = crtc->pipe;
412 
413 	if (wait_for(!is_dsb_busy(display, pipe, dsb->id), 1)) {
414 		u32 offset = intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf);
415 
416 		intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id),
417 				  DSB_ENABLE | DSB_HALT);
418 
419 		drm_err(display->drm,
420 			"[CRTC:%d:%s] DSB %d timed out waiting for idle (current head=0x%x, head=0x%x, tail=0x%x)\n",
421 			crtc->base.base.id, crtc->base.name, dsb->id,
422 			intel_de_read_fw(display, DSB_CURRENT_HEAD(pipe, dsb->id)) - offset,
423 			intel_de_read_fw(display, DSB_HEAD(pipe, dsb->id)) - offset,
424 			intel_de_read_fw(display, DSB_TAIL(pipe, dsb->id)) - offset);
425 
426 		intel_dsb_dump(dsb);
427 	}
428 
429 	/* Attempt to reset it */
430 	dsb->free_pos = 0;
431 	dsb->ins_start_offset = 0;
432 	intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id), 0);
433 }
434 
435 /**
436  * intel_dsb_prepare() - Allocate, pin and map the DSB command buffer.
437  * @state: the atomic state
438  * @crtc: the CRTC
439  * @dsb_id: the DSB engine to use
440  * @max_cmds: number of commands we need to fit into command buffer
441  *
442  * This function prepare the command buffer which is used to store dsb
443  * instructions with data.
444  *
445  * Returns:
446  * DSB context, NULL on failure
447  */
448 struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
449 				    struct intel_crtc *crtc,
450 				    enum intel_dsb_id dsb_id,
451 				    unsigned int max_cmds)
452 {
453 	struct drm_i915_private *i915 = to_i915(state->base.dev);
454 	const struct intel_crtc_state *crtc_state =
455 		intel_atomic_get_new_crtc_state(state, crtc);
456 	intel_wakeref_t wakeref;
457 	struct intel_dsb *dsb;
458 	unsigned int size;
459 
460 	if (!HAS_DSB(i915))
461 		return NULL;
462 
463 	if (!i915->display.params.enable_dsb)
464 		return NULL;
465 
466 	/* TODO: DSB is broken in Xe KMD, so disabling it until fixed */
467 	if (!IS_ENABLED(I915))
468 		return NULL;
469 
470 	dsb = kzalloc(sizeof(*dsb), GFP_KERNEL);
471 	if (!dsb)
472 		goto out;
473 
474 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
475 
476 	/* ~1 qword per instruction, full cachelines */
477 	size = ALIGN(max_cmds * 8, CACHELINE_BYTES);
478 
479 	if (!intel_dsb_buffer_create(crtc, &dsb->dsb_buf, size))
480 		goto out_put_rpm;
481 
482 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
483 
484 	dsb->id = dsb_id;
485 	dsb->crtc = crtc;
486 	dsb->size = size / 4; /* in dwords */
487 	dsb->free_pos = 0;
488 	dsb->ins_start_offset = 0;
489 	dsb->dewake_scanline = intel_dsb_dewake_scanline(crtc_state);
490 
491 	return dsb;
492 
493 out_put_rpm:
494 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
495 	kfree(dsb);
496 out:
497 	drm_info_once(&i915->drm,
498 		      "[CRTC:%d:%s] DSB %d queue setup failed, will fallback to MMIO for display HW programming\n",
499 		      crtc->base.base.id, crtc->base.name, dsb_id);
500 
501 	return NULL;
502 }
503 
504 /**
505  * intel_dsb_cleanup() - To cleanup DSB context.
506  * @dsb: DSB context
507  *
508  * This function cleanup the DSB context by unpinning and releasing
509  * the VMA object associated with it.
510  */
511 void intel_dsb_cleanup(struct intel_dsb *dsb)
512 {
513 	intel_dsb_buffer_cleanup(&dsb->dsb_buf);
514 	kfree(dsb);
515 }
516