1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2019 Intel Corporation
4 *
5 */
6
7 #include <drm/drm_vblank.h>
8
9 #include "i915_drv.h"
10 #include "i915_irq.h"
11 #include "i915_reg.h"
12 #include "intel_crtc.h"
13 #include "intel_de.h"
14 #include "intel_display_types.h"
15 #include "intel_dsb.h"
16 #include "intel_dsb_buffer.h"
17 #include "intel_dsb_regs.h"
18 #include "intel_vblank.h"
19 #include "intel_vrr.h"
20 #include "skl_watermark.h"
21
22 #define CACHELINE_BYTES 64
23
24 struct intel_dsb {
25 enum intel_dsb_id id;
26
27 struct intel_dsb_buffer dsb_buf;
28 struct intel_crtc *crtc;
29
30 /*
31 * maximum number of dwords the buffer will hold.
32 */
33 unsigned int size;
34
35 /*
36 * free_pos will point the first free dword and
37 * help in calculating tail of command buffer.
38 */
39 unsigned int free_pos;
40
41 /*
42 * Previously emitted DSB instruction. Used to
43 * identify/adjust the instruction for indexed
44 * register writes.
45 */
46 u32 ins[2];
47
48 /*
49 * Start of the previously emitted DSB instruction.
50 * Used to adjust the instruction for indexed
51 * register writes.
52 */
53 unsigned int ins_start_offset;
54
55 u32 chicken;
56 int hw_dewake_scanline;
57 };
58
59 /**
60 * DOC: DSB
61 *
62 * A DSB (Display State Buffer) is a queue of MMIO instructions in the memory
63 * which can be offloaded to DSB HW in Display Controller. DSB HW is a DMA
64 * engine that can be programmed to download the DSB from memory.
65 * It allows driver to batch submit display HW programming. This helps to
66 * reduce loading time and CPU activity, thereby making the context switch
67 * faster. DSB Support added from Gen12 Intel graphics based platform.
68 *
69 * DSB's can access only the pipe, plane, and transcoder Data Island Packet
70 * registers.
71 *
72 * DSB HW can support only register writes (both indexed and direct MMIO
73 * writes). There are no registers reads possible with DSB HW engine.
74 */
75
76 /* DSB opcodes. */
77 #define DSB_OPCODE_SHIFT 24
78 #define DSB_OPCODE_NOOP 0x0
79 #define DSB_OPCODE_MMIO_WRITE 0x1
80 #define DSB_BYTE_EN 0xf
81 #define DSB_BYTE_EN_SHIFT 20
82 #define DSB_REG_VALUE_MASK 0xfffff
83 #define DSB_OPCODE_WAIT_USEC 0x2
84 #define DSB_OPCODE_WAIT_SCANLINE 0x3
85 #define DSB_OPCODE_WAIT_VBLANKS 0x4
86 #define DSB_OPCODE_WAIT_DSL_IN 0x5
87 #define DSB_OPCODE_WAIT_DSL_OUT 0x6
88 #define DSB_SCANLINE_UPPER_SHIFT 20
89 #define DSB_SCANLINE_LOWER_SHIFT 0
90 #define DSB_OPCODE_INTERRUPT 0x7
91 #define DSB_OPCODE_INDEXED_WRITE 0x9
92 /* see DSB_REG_VALUE_MASK */
93 #define DSB_OPCODE_POLL 0xA
94 /* see DSB_REG_VALUE_MASK */
95
pre_commit_is_vrr_active(struct intel_atomic_state * state,struct intel_crtc * crtc)96 static bool pre_commit_is_vrr_active(struct intel_atomic_state *state,
97 struct intel_crtc *crtc)
98 {
99 const struct intel_crtc_state *old_crtc_state =
100 intel_atomic_get_old_crtc_state(state, crtc);
101 const struct intel_crtc_state *new_crtc_state =
102 intel_atomic_get_new_crtc_state(state, crtc);
103
104 /* VRR will be enabled afterwards, if necessary */
105 if (intel_crtc_needs_modeset(new_crtc_state))
106 return false;
107
108 /* VRR will have been disabled during intel_pre_plane_update() */
109 return old_crtc_state->vrr.enable && !intel_crtc_vrr_disabling(state, crtc);
110 }
111
112 static const struct intel_crtc_state *
pre_commit_crtc_state(struct intel_atomic_state * state,struct intel_crtc * crtc)113 pre_commit_crtc_state(struct intel_atomic_state *state,
114 struct intel_crtc *crtc)
115 {
116 const struct intel_crtc_state *old_crtc_state =
117 intel_atomic_get_old_crtc_state(state, crtc);
118 const struct intel_crtc_state *new_crtc_state =
119 intel_atomic_get_new_crtc_state(state, crtc);
120
121 /*
122 * During fastsets/etc. the transcoder is still
123 * running with the old timings at this point.
124 */
125 if (intel_crtc_needs_modeset(new_crtc_state))
126 return new_crtc_state;
127 else
128 return old_crtc_state;
129 }
130
dsb_vblank_delay(const struct intel_crtc_state * crtc_state)131 static int dsb_vblank_delay(const struct intel_crtc_state *crtc_state)
132 {
133 return intel_mode_vblank_start(&crtc_state->hw.adjusted_mode) -
134 intel_mode_vdisplay(&crtc_state->hw.adjusted_mode);
135 }
136
dsb_vtotal(struct intel_atomic_state * state,struct intel_crtc * crtc)137 static int dsb_vtotal(struct intel_atomic_state *state,
138 struct intel_crtc *crtc)
139 {
140 const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
141
142 if (pre_commit_is_vrr_active(state, crtc))
143 return crtc_state->vrr.vmax;
144 else
145 return intel_mode_vtotal(&crtc_state->hw.adjusted_mode);
146 }
147
dsb_dewake_scanline_start(struct intel_atomic_state * state,struct intel_crtc * crtc)148 static int dsb_dewake_scanline_start(struct intel_atomic_state *state,
149 struct intel_crtc *crtc)
150 {
151 const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
152 struct drm_i915_private *i915 = to_i915(state->base.dev);
153 unsigned int latency = skl_watermark_max_latency(i915, 0);
154
155 return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode) -
156 intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, latency);
157 }
158
dsb_dewake_scanline_end(struct intel_atomic_state * state,struct intel_crtc * crtc)159 static int dsb_dewake_scanline_end(struct intel_atomic_state *state,
160 struct intel_crtc *crtc)
161 {
162 const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
163
164 return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode);
165 }
166
dsb_scanline_to_hw(struct intel_atomic_state * state,struct intel_crtc * crtc,int scanline)167 static int dsb_scanline_to_hw(struct intel_atomic_state *state,
168 struct intel_crtc *crtc, int scanline)
169 {
170 const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
171 int vtotal = dsb_vtotal(state, crtc);
172
173 return (scanline + vtotal - intel_crtc_scanline_offset(crtc_state)) % vtotal;
174 }
175
dsb_chicken(struct intel_atomic_state * state,struct intel_crtc * crtc)176 static u32 dsb_chicken(struct intel_atomic_state *state,
177 struct intel_crtc *crtc)
178 {
179 if (pre_commit_is_vrr_active(state, crtc))
180 return DSB_SKIP_WAITS_EN |
181 DSB_CTRL_WAIT_SAFE_WINDOW |
182 DSB_CTRL_NO_WAIT_VBLANK |
183 DSB_INST_WAIT_SAFE_WINDOW |
184 DSB_INST_NO_WAIT_VBLANK;
185 else
186 return DSB_SKIP_WAITS_EN;
187 }
188
assert_dsb_has_room(struct intel_dsb * dsb)189 static bool assert_dsb_has_room(struct intel_dsb *dsb)
190 {
191 struct intel_crtc *crtc = dsb->crtc;
192 struct intel_display *display = to_intel_display(crtc->base.dev);
193
194 /* each instruction is 2 dwords */
195 return !drm_WARN(display->drm, dsb->free_pos > dsb->size - 2,
196 "[CRTC:%d:%s] DSB %d buffer overflow\n",
197 crtc->base.base.id, crtc->base.name, dsb->id);
198 }
199
intel_dsb_dump(struct intel_dsb * dsb)200 static void intel_dsb_dump(struct intel_dsb *dsb)
201 {
202 struct intel_crtc *crtc = dsb->crtc;
203 struct intel_display *display = to_intel_display(crtc->base.dev);
204 int i;
205
206 drm_dbg_kms(display->drm, "[CRTC:%d:%s] DSB %d commands {\n",
207 crtc->base.base.id, crtc->base.name, dsb->id);
208 for (i = 0; i < ALIGN(dsb->free_pos, 64 / 4); i += 4)
209 drm_dbg_kms(display->drm,
210 " 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i * 4,
211 intel_dsb_buffer_read(&dsb->dsb_buf, i),
212 intel_dsb_buffer_read(&dsb->dsb_buf, i + 1),
213 intel_dsb_buffer_read(&dsb->dsb_buf, i + 2),
214 intel_dsb_buffer_read(&dsb->dsb_buf, i + 3));
215 drm_dbg_kms(display->drm, "}\n");
216 }
217
is_dsb_busy(struct intel_display * display,enum pipe pipe,enum intel_dsb_id dsb_id)218 static bool is_dsb_busy(struct intel_display *display, enum pipe pipe,
219 enum intel_dsb_id dsb_id)
220 {
221 return intel_de_read_fw(display, DSB_CTRL(pipe, dsb_id)) & DSB_STATUS_BUSY;
222 }
223
intel_dsb_emit(struct intel_dsb * dsb,u32 ldw,u32 udw)224 static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
225 {
226 if (!assert_dsb_has_room(dsb))
227 return;
228
229 /* Every instruction should be 8 byte aligned. */
230 dsb->free_pos = ALIGN(dsb->free_pos, 2);
231
232 dsb->ins_start_offset = dsb->free_pos;
233 dsb->ins[0] = ldw;
234 dsb->ins[1] = udw;
235
236 intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, dsb->ins[0]);
237 intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, dsb->ins[1]);
238 }
239
intel_dsb_prev_ins_is_write(struct intel_dsb * dsb,u32 opcode,i915_reg_t reg)240 static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
241 u32 opcode, i915_reg_t reg)
242 {
243 u32 prev_opcode, prev_reg;
244
245 /*
246 * Nothing emitted yet? Must check before looking
247 * at the actual data since i915_gem_object_create_internal()
248 * does *not* give you zeroed memory!
249 */
250 if (dsb->free_pos == 0)
251 return false;
252
253 prev_opcode = dsb->ins[1] & ~DSB_REG_VALUE_MASK;
254 prev_reg = dsb->ins[1] & DSB_REG_VALUE_MASK;
255
256 return prev_opcode == opcode && prev_reg == i915_mmio_reg_offset(reg);
257 }
258
intel_dsb_prev_ins_is_mmio_write(struct intel_dsb * dsb,i915_reg_t reg)259 static bool intel_dsb_prev_ins_is_mmio_write(struct intel_dsb *dsb, i915_reg_t reg)
260 {
261 /* only full byte-enables can be converted to indexed writes */
262 return intel_dsb_prev_ins_is_write(dsb,
263 DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT |
264 DSB_BYTE_EN << DSB_BYTE_EN_SHIFT,
265 reg);
266 }
267
intel_dsb_prev_ins_is_indexed_write(struct intel_dsb * dsb,i915_reg_t reg)268 static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_t reg)
269 {
270 return intel_dsb_prev_ins_is_write(dsb,
271 DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT,
272 reg);
273 }
274
275 /**
276 * intel_dsb_reg_write_indexed() - Emit register wriite to the DSB context
277 * @dsb: DSB context
278 * @reg: register address.
279 * @val: value.
280 *
281 * This function is used for writing register-value pair in command
282 * buffer of DSB.
283 *
284 * Note that indexed writes are slower than normal MMIO writes
285 * for a small number (less than 5 or so) of writes to the same
286 * register.
287 */
intel_dsb_reg_write_indexed(struct intel_dsb * dsb,i915_reg_t reg,u32 val)288 void intel_dsb_reg_write_indexed(struct intel_dsb *dsb,
289 i915_reg_t reg, u32 val)
290 {
291 /*
292 * For example the buffer will look like below for 3 dwords for auto
293 * increment register:
294 * +--------------------------------------------------------+
295 * | size = 3 | offset &| value1 | value2 | value3 | zero |
296 * | | opcode | | | | |
297 * +--------------------------------------------------------+
298 * + + + + + + +
299 * 0 4 8 12 16 20 24
300 * Byte
301 *
302 * As every instruction is 8 byte aligned the index of dsb instruction
303 * will start always from even number while dealing with u32 array. If
304 * we are writing odd no of dwords, Zeros will be added in the end for
305 * padding.
306 */
307 if (!intel_dsb_prev_ins_is_mmio_write(dsb, reg) &&
308 !intel_dsb_prev_ins_is_indexed_write(dsb, reg)) {
309 intel_dsb_emit(dsb, val,
310 (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
311 (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
312 i915_mmio_reg_offset(reg));
313 } else {
314 if (!assert_dsb_has_room(dsb))
315 return;
316
317 /* convert to indexed write? */
318 if (intel_dsb_prev_ins_is_mmio_write(dsb, reg)) {
319 u32 prev_val = dsb->ins[0];
320
321 dsb->ins[0] = 1; /* count */
322 dsb->ins[1] = (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) |
323 i915_mmio_reg_offset(reg);
324
325 intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 0,
326 dsb->ins[0]);
327 intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 1,
328 dsb->ins[1]);
329 intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 2,
330 prev_val);
331
332 dsb->free_pos++;
333 }
334
335 intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, val);
336 /* Update the count */
337 dsb->ins[0]++;
338 intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 0,
339 dsb->ins[0]);
340
341 /* if number of data words is odd, then the last dword should be 0.*/
342 if (dsb->free_pos & 0x1)
343 intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos, 0);
344 }
345 }
346
intel_dsb_reg_write(struct intel_dsb * dsb,i915_reg_t reg,u32 val)347 void intel_dsb_reg_write(struct intel_dsb *dsb,
348 i915_reg_t reg, u32 val)
349 {
350 intel_dsb_emit(dsb, val,
351 (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
352 (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
353 i915_mmio_reg_offset(reg));
354 }
355
intel_dsb_mask_to_byte_en(u32 mask)356 static u32 intel_dsb_mask_to_byte_en(u32 mask)
357 {
358 return (!!(mask & 0xff000000) << 3 |
359 !!(mask & 0x00ff0000) << 2 |
360 !!(mask & 0x0000ff00) << 1 |
361 !!(mask & 0x000000ff) << 0);
362 }
363
364 /* Note: mask implemented via byte enables! */
intel_dsb_reg_write_masked(struct intel_dsb * dsb,i915_reg_t reg,u32 mask,u32 val)365 void intel_dsb_reg_write_masked(struct intel_dsb *dsb,
366 i915_reg_t reg, u32 mask, u32 val)
367 {
368 intel_dsb_emit(dsb, val,
369 (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
370 (intel_dsb_mask_to_byte_en(mask) << DSB_BYTE_EN_SHIFT) |
371 i915_mmio_reg_offset(reg));
372 }
373
intel_dsb_noop(struct intel_dsb * dsb,int count)374 void intel_dsb_noop(struct intel_dsb *dsb, int count)
375 {
376 int i;
377
378 for (i = 0; i < count; i++)
379 intel_dsb_emit(dsb, 0,
380 DSB_OPCODE_NOOP << DSB_OPCODE_SHIFT);
381 }
382
intel_dsb_nonpost_start(struct intel_dsb * dsb)383 void intel_dsb_nonpost_start(struct intel_dsb *dsb)
384 {
385 struct intel_crtc *crtc = dsb->crtc;
386 enum pipe pipe = crtc->pipe;
387
388 intel_dsb_reg_write_masked(dsb, DSB_CTRL(pipe, dsb->id),
389 DSB_NON_POSTED, DSB_NON_POSTED);
390 intel_dsb_noop(dsb, 4);
391 }
392
intel_dsb_nonpost_end(struct intel_dsb * dsb)393 void intel_dsb_nonpost_end(struct intel_dsb *dsb)
394 {
395 struct intel_crtc *crtc = dsb->crtc;
396 enum pipe pipe = crtc->pipe;
397
398 intel_dsb_reg_write_masked(dsb, DSB_CTRL(pipe, dsb->id),
399 DSB_NON_POSTED, 0);
400 intel_dsb_noop(dsb, 4);
401 }
402
intel_dsb_interrupt(struct intel_dsb * dsb)403 void intel_dsb_interrupt(struct intel_dsb *dsb)
404 {
405 intel_dsb_emit(dsb, 0,
406 DSB_OPCODE_INTERRUPT << DSB_OPCODE_SHIFT);
407 }
408
intel_dsb_wait_usec(struct intel_dsb * dsb,int count)409 void intel_dsb_wait_usec(struct intel_dsb *dsb, int count)
410 {
411 intel_dsb_emit(dsb, count,
412 DSB_OPCODE_WAIT_USEC << DSB_OPCODE_SHIFT);
413 }
414
intel_dsb_wait_vblanks(struct intel_dsb * dsb,int count)415 void intel_dsb_wait_vblanks(struct intel_dsb *dsb, int count)
416 {
417 intel_dsb_emit(dsb, count,
418 DSB_OPCODE_WAIT_VBLANKS << DSB_OPCODE_SHIFT);
419 }
420
intel_dsb_emit_wait_dsl(struct intel_dsb * dsb,u32 opcode,int lower,int upper)421 static void intel_dsb_emit_wait_dsl(struct intel_dsb *dsb,
422 u32 opcode, int lower, int upper)
423 {
424 u64 window = ((u64)upper << DSB_SCANLINE_UPPER_SHIFT) |
425 ((u64)lower << DSB_SCANLINE_LOWER_SHIFT);
426
427 intel_dsb_emit(dsb, lower_32_bits(window),
428 (opcode << DSB_OPCODE_SHIFT) |
429 upper_32_bits(window));
430 }
431
intel_dsb_wait_dsl(struct intel_atomic_state * state,struct intel_dsb * dsb,int lower_in,int upper_in,int lower_out,int upper_out)432 static void intel_dsb_wait_dsl(struct intel_atomic_state *state,
433 struct intel_dsb *dsb,
434 int lower_in, int upper_in,
435 int lower_out, int upper_out)
436 {
437 struct intel_crtc *crtc = dsb->crtc;
438
439 lower_in = dsb_scanline_to_hw(state, crtc, lower_in);
440 upper_in = dsb_scanline_to_hw(state, crtc, upper_in);
441
442 lower_out = dsb_scanline_to_hw(state, crtc, lower_out);
443 upper_out = dsb_scanline_to_hw(state, crtc, upper_out);
444
445 if (upper_in >= lower_in)
446 intel_dsb_emit_wait_dsl(dsb, DSB_OPCODE_WAIT_DSL_IN,
447 lower_in, upper_in);
448 else if (upper_out >= lower_out)
449 intel_dsb_emit_wait_dsl(dsb, DSB_OPCODE_WAIT_DSL_OUT,
450 lower_out, upper_out);
451 else
452 drm_WARN_ON(crtc->base.dev, 1); /* assert_dsl_ok() should have caught it already */
453 }
454
assert_dsl_ok(struct intel_atomic_state * state,struct intel_dsb * dsb,int start,int end)455 static void assert_dsl_ok(struct intel_atomic_state *state,
456 struct intel_dsb *dsb,
457 int start, int end)
458 {
459 struct intel_crtc *crtc = dsb->crtc;
460 int vtotal = dsb_vtotal(state, crtc);
461
462 /*
463 * Waiting for the entire frame doesn't make sense,
464 * (IN==don't wait, OUT=wait forever).
465 */
466 drm_WARN(crtc->base.dev, (end - start + vtotal) % vtotal == vtotal - 1,
467 "[CRTC:%d:%s] DSB %d bad scanline window wait: %d-%d (vt=%d)\n",
468 crtc->base.base.id, crtc->base.name, dsb->id,
469 start, end, vtotal);
470 }
471
intel_dsb_wait_scanline_in(struct intel_atomic_state * state,struct intel_dsb * dsb,int start,int end)472 void intel_dsb_wait_scanline_in(struct intel_atomic_state *state,
473 struct intel_dsb *dsb,
474 int start, int end)
475 {
476 assert_dsl_ok(state, dsb, start, end);
477
478 intel_dsb_wait_dsl(state, dsb,
479 start, end,
480 end + 1, start - 1);
481 }
482
intel_dsb_wait_scanline_out(struct intel_atomic_state * state,struct intel_dsb * dsb,int start,int end)483 void intel_dsb_wait_scanline_out(struct intel_atomic_state *state,
484 struct intel_dsb *dsb,
485 int start, int end)
486 {
487 assert_dsl_ok(state, dsb, start, end);
488
489 intel_dsb_wait_dsl(state, dsb,
490 end + 1, start - 1,
491 start, end);
492 }
493
intel_dsb_align_tail(struct intel_dsb * dsb)494 static void intel_dsb_align_tail(struct intel_dsb *dsb)
495 {
496 u32 aligned_tail, tail;
497
498 tail = dsb->free_pos * 4;
499 aligned_tail = ALIGN(tail, CACHELINE_BYTES);
500
501 if (aligned_tail > tail)
502 intel_dsb_buffer_memset(&dsb->dsb_buf, dsb->free_pos, 0,
503 aligned_tail - tail);
504
505 dsb->free_pos = aligned_tail / 4;
506 }
507
intel_dsb_finish(struct intel_dsb * dsb)508 void intel_dsb_finish(struct intel_dsb *dsb)
509 {
510 struct intel_crtc *crtc = dsb->crtc;
511
512 /*
513 * DSB_FORCE_DEWAKE remains active even after DSB is
514 * disabled, so make sure to clear it (if set during
515 * intel_dsb_commit()). And clear DSB_ENABLE_DEWAKE as
516 * well for good measure.
517 */
518 intel_dsb_reg_write(dsb, DSB_PMCTRL(crtc->pipe, dsb->id), 0);
519 intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(crtc->pipe, dsb->id),
520 DSB_FORCE_DEWAKE, 0);
521
522 intel_dsb_align_tail(dsb);
523
524 intel_dsb_buffer_flush_map(&dsb->dsb_buf);
525 }
526
dsb_error_int_status(struct intel_display * display)527 static u32 dsb_error_int_status(struct intel_display *display)
528 {
529 u32 errors;
530
531 errors = DSB_GTT_FAULT_INT_STATUS |
532 DSB_RSPTIMEOUT_INT_STATUS |
533 DSB_POLL_ERR_INT_STATUS;
534
535 /*
536 * All the non-existing status bits operate as
537 * normal r/w bits, so any attempt to clear them
538 * will just end up setting them. Never do that so
539 * we won't mistake them for actual error interrupts.
540 */
541 if (DISPLAY_VER(display) >= 14)
542 errors |= DSB_ATS_FAULT_INT_STATUS;
543
544 return errors;
545 }
546
dsb_error_int_en(struct intel_display * display)547 static u32 dsb_error_int_en(struct intel_display *display)
548 {
549 u32 errors;
550
551 errors = DSB_GTT_FAULT_INT_EN |
552 DSB_RSPTIMEOUT_INT_EN |
553 DSB_POLL_ERR_INT_EN;
554
555 if (DISPLAY_VER(display) >= 14)
556 errors |= DSB_ATS_FAULT_INT_EN;
557
558 return errors;
559 }
560
intel_dsb_vblank_evade(struct intel_atomic_state * state,struct intel_dsb * dsb)561 void intel_dsb_vblank_evade(struct intel_atomic_state *state,
562 struct intel_dsb *dsb)
563 {
564 struct intel_crtc *crtc = dsb->crtc;
565 const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
566 /* FIXME calibrate sensibly */
567 int latency = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, 20);
568 int vblank_delay = dsb_vblank_delay(crtc_state);
569 int start, end;
570
571 if (pre_commit_is_vrr_active(state, crtc)) {
572 end = intel_vrr_vmin_vblank_start(crtc_state);
573 start = end - vblank_delay - latency;
574 intel_dsb_wait_scanline_out(state, dsb, start, end);
575
576 end = intel_vrr_vmax_vblank_start(crtc_state);
577 start = end - vblank_delay - latency;
578 intel_dsb_wait_scanline_out(state, dsb, start, end);
579 } else {
580 end = intel_mode_vblank_start(&crtc_state->hw.adjusted_mode);
581 start = end - vblank_delay - latency;
582 intel_dsb_wait_scanline_out(state, dsb, start, end);
583 }
584 }
585
_intel_dsb_chain(struct intel_atomic_state * state,struct intel_dsb * dsb,struct intel_dsb * chained_dsb,u32 ctrl)586 static void _intel_dsb_chain(struct intel_atomic_state *state,
587 struct intel_dsb *dsb,
588 struct intel_dsb *chained_dsb,
589 u32 ctrl)
590 {
591 struct intel_display *display = to_intel_display(state->base.dev);
592 struct intel_crtc *crtc = dsb->crtc;
593 enum pipe pipe = crtc->pipe;
594 u32 tail;
595
596 if (drm_WARN_ON(display->drm, dsb->id == chained_dsb->id))
597 return;
598
599 tail = chained_dsb->free_pos * 4;
600 if (drm_WARN_ON(display->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
601 return;
602
603 intel_dsb_reg_write(dsb, DSB_CTRL(pipe, chained_dsb->id),
604 ctrl | DSB_ENABLE);
605
606 intel_dsb_reg_write(dsb, DSB_CHICKEN(pipe, chained_dsb->id),
607 dsb_chicken(state, crtc));
608
609 intel_dsb_reg_write(dsb, DSB_INTERRUPT(pipe, chained_dsb->id),
610 dsb_error_int_status(display) | DSB_PROG_INT_STATUS |
611 dsb_error_int_en(display) | DSB_PROG_INT_EN);
612
613 if (ctrl & DSB_WAIT_FOR_VBLANK) {
614 int dewake_scanline = dsb_dewake_scanline_start(state, crtc);
615 int hw_dewake_scanline = dsb_scanline_to_hw(state, crtc, dewake_scanline);
616
617 intel_dsb_reg_write(dsb, DSB_PMCTRL(pipe, chained_dsb->id),
618 DSB_ENABLE_DEWAKE |
619 DSB_SCANLINE_FOR_DEWAKE(hw_dewake_scanline));
620 }
621
622 intel_dsb_reg_write(dsb, DSB_HEAD(pipe, chained_dsb->id),
623 intel_dsb_buffer_ggtt_offset(&chained_dsb->dsb_buf));
624
625 intel_dsb_reg_write(dsb, DSB_TAIL(pipe, chained_dsb->id),
626 intel_dsb_buffer_ggtt_offset(&chained_dsb->dsb_buf) + tail);
627
628 if (ctrl & DSB_WAIT_FOR_VBLANK) {
629 /*
630 * Keep DEwake alive via the first DSB, in
631 * case we're already past dewake_scanline,
632 * and thus DSB_ENABLE_DEWAKE on the second
633 * DSB won't do its job.
634 */
635 intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(pipe, dsb->id),
636 DSB_FORCE_DEWAKE, DSB_FORCE_DEWAKE);
637
638 intel_dsb_wait_scanline_out(state, dsb,
639 dsb_dewake_scanline_start(state, crtc),
640 dsb_dewake_scanline_end(state, crtc));
641 }
642 }
643
intel_dsb_chain(struct intel_atomic_state * state,struct intel_dsb * dsb,struct intel_dsb * chained_dsb,bool wait_for_vblank)644 void intel_dsb_chain(struct intel_atomic_state *state,
645 struct intel_dsb *dsb,
646 struct intel_dsb *chained_dsb,
647 bool wait_for_vblank)
648 {
649 _intel_dsb_chain(state, dsb, chained_dsb,
650 wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0);
651 }
652
intel_dsb_wait_vblank_delay(struct intel_atomic_state * state,struct intel_dsb * dsb)653 void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state,
654 struct intel_dsb *dsb)
655 {
656 struct intel_crtc *crtc = dsb->crtc;
657 const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
658 int usecs = intel_scanlines_to_usecs(&crtc_state->hw.adjusted_mode,
659 dsb_vblank_delay(crtc_state)) + 1;
660
661 intel_dsb_wait_usec(dsb, usecs);
662 }
663
_intel_dsb_commit(struct intel_dsb * dsb,u32 ctrl,int hw_dewake_scanline)664 static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
665 int hw_dewake_scanline)
666 {
667 struct intel_crtc *crtc = dsb->crtc;
668 struct intel_display *display = to_intel_display(crtc->base.dev);
669 enum pipe pipe = crtc->pipe;
670 u32 tail;
671
672 tail = dsb->free_pos * 4;
673 if (drm_WARN_ON(display->drm, !IS_ALIGNED(tail, CACHELINE_BYTES)))
674 return;
675
676 if (is_dsb_busy(display, pipe, dsb->id)) {
677 drm_err(display->drm, "[CRTC:%d:%s] DSB %d is busy\n",
678 crtc->base.base.id, crtc->base.name, dsb->id);
679 return;
680 }
681
682 intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id),
683 ctrl | DSB_ENABLE);
684
685 intel_de_write_fw(display, DSB_CHICKEN(pipe, dsb->id),
686 dsb->chicken);
687
688 intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb->id),
689 dsb_error_int_status(display) | DSB_PROG_INT_STATUS |
690 dsb_error_int_en(display) | DSB_PROG_INT_EN);
691
692 intel_de_write_fw(display, DSB_HEAD(pipe, dsb->id),
693 intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
694
695 if (hw_dewake_scanline >= 0) {
696 int diff, position;
697
698 intel_de_write_fw(display, DSB_PMCTRL(pipe, dsb->id),
699 DSB_ENABLE_DEWAKE |
700 DSB_SCANLINE_FOR_DEWAKE(hw_dewake_scanline));
701
702 /*
703 * Force DEwake immediately if we're already past
704 * or close to racing past the target scanline.
705 */
706 position = intel_de_read_fw(display, PIPEDSL(display, pipe)) & PIPEDSL_LINE_MASK;
707
708 diff = hw_dewake_scanline - position;
709 intel_de_write_fw(display, DSB_PMCTRL_2(pipe, dsb->id),
710 (diff >= 0 && diff < 5 ? DSB_FORCE_DEWAKE : 0) |
711 DSB_BLOCK_DEWAKE_EXTENSION);
712 }
713
714 intel_de_write_fw(display, DSB_TAIL(pipe, dsb->id),
715 intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf) + tail);
716 }
717
718 /**
719 * intel_dsb_commit() - Trigger workload execution of DSB.
720 * @dsb: DSB context
721 * @wait_for_vblank: wait for vblank before executing
722 *
723 * This function is used to do actual write to hardware using DSB.
724 */
intel_dsb_commit(struct intel_dsb * dsb,bool wait_for_vblank)725 void intel_dsb_commit(struct intel_dsb *dsb,
726 bool wait_for_vblank)
727 {
728 _intel_dsb_commit(dsb,
729 wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0,
730 wait_for_vblank ? dsb->hw_dewake_scanline : -1);
731 }
732
intel_dsb_wait(struct intel_dsb * dsb)733 void intel_dsb_wait(struct intel_dsb *dsb)
734 {
735 struct intel_crtc *crtc = dsb->crtc;
736 struct intel_display *display = to_intel_display(crtc->base.dev);
737 enum pipe pipe = crtc->pipe;
738
739 if (wait_for(!is_dsb_busy(display, pipe, dsb->id), 1)) {
740 u32 offset = intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf);
741
742 intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id),
743 DSB_ENABLE | DSB_HALT);
744
745 drm_err(display->drm,
746 "[CRTC:%d:%s] DSB %d timed out waiting for idle (current head=0x%x, head=0x%x, tail=0x%x)\n",
747 crtc->base.base.id, crtc->base.name, dsb->id,
748 intel_de_read_fw(display, DSB_CURRENT_HEAD(pipe, dsb->id)) - offset,
749 intel_de_read_fw(display, DSB_HEAD(pipe, dsb->id)) - offset,
750 intel_de_read_fw(display, DSB_TAIL(pipe, dsb->id)) - offset);
751
752 intel_dsb_dump(dsb);
753 }
754
755 /* Attempt to reset it */
756 dsb->free_pos = 0;
757 dsb->ins_start_offset = 0;
758 dsb->ins[0] = 0;
759 dsb->ins[1] = 0;
760
761 intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id), 0);
762
763 intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb->id),
764 dsb_error_int_status(display) | DSB_PROG_INT_STATUS);
765 }
766
767 /**
768 * intel_dsb_prepare() - Allocate, pin and map the DSB command buffer.
769 * @state: the atomic state
770 * @crtc: the CRTC
771 * @dsb_id: the DSB engine to use
772 * @max_cmds: number of commands we need to fit into command buffer
773 *
774 * This function prepare the command buffer which is used to store dsb
775 * instructions with data.
776 *
777 * Returns:
778 * DSB context, NULL on failure
779 */
intel_dsb_prepare(struct intel_atomic_state * state,struct intel_crtc * crtc,enum intel_dsb_id dsb_id,unsigned int max_cmds)780 struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
781 struct intel_crtc *crtc,
782 enum intel_dsb_id dsb_id,
783 unsigned int max_cmds)
784 {
785 struct drm_i915_private *i915 = to_i915(state->base.dev);
786 intel_wakeref_t wakeref;
787 struct intel_dsb *dsb;
788 unsigned int size;
789
790 if (!HAS_DSB(i915))
791 return NULL;
792
793 if (!i915->display.params.enable_dsb)
794 return NULL;
795
796 dsb = kzalloc(sizeof(*dsb), GFP_KERNEL);
797 if (!dsb)
798 goto out;
799
800 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
801
802 /* ~1 qword per instruction, full cachelines */
803 size = ALIGN(max_cmds * 8, CACHELINE_BYTES);
804
805 if (!intel_dsb_buffer_create(crtc, &dsb->dsb_buf, size))
806 goto out_put_rpm;
807
808 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
809
810 dsb->id = dsb_id;
811 dsb->crtc = crtc;
812 dsb->size = size / 4; /* in dwords */
813
814 dsb->chicken = dsb_chicken(state, crtc);
815 dsb->hw_dewake_scanline =
816 dsb_scanline_to_hw(state, crtc, dsb_dewake_scanline_start(state, crtc));
817
818 return dsb;
819
820 out_put_rpm:
821 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
822 kfree(dsb);
823 out:
824 drm_info_once(&i915->drm,
825 "[CRTC:%d:%s] DSB %d queue setup failed, will fallback to MMIO for display HW programming\n",
826 crtc->base.base.id, crtc->base.name, dsb_id);
827
828 return NULL;
829 }
830
831 /**
832 * intel_dsb_cleanup() - To cleanup DSB context.
833 * @dsb: DSB context
834 *
835 * This function cleanup the DSB context by unpinning and releasing
836 * the VMA object associated with it.
837 */
intel_dsb_cleanup(struct intel_dsb * dsb)838 void intel_dsb_cleanup(struct intel_dsb *dsb)
839 {
840 intel_dsb_buffer_cleanup(&dsb->dsb_buf);
841 kfree(dsb);
842 }
843
intel_dsb_irq_handler(struct intel_display * display,enum pipe pipe,enum intel_dsb_id dsb_id)844 void intel_dsb_irq_handler(struct intel_display *display,
845 enum pipe pipe, enum intel_dsb_id dsb_id)
846 {
847 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
848 u32 tmp, errors;
849
850 tmp = intel_de_read_fw(display, DSB_INTERRUPT(pipe, dsb_id));
851 intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb_id), tmp);
852
853 if (tmp & DSB_PROG_INT_STATUS) {
854 spin_lock(&display->drm->event_lock);
855
856 if (crtc->dsb_event) {
857 /*
858 * Update vblank counter/timestmap in case it
859 * hasn't been done yet for this frame.
860 */
861 drm_crtc_accurate_vblank_count(&crtc->base);
862
863 drm_crtc_send_vblank_event(&crtc->base, crtc->dsb_event);
864 crtc->dsb_event = NULL;
865 }
866
867 spin_unlock(&display->drm->event_lock);
868 }
869
870 errors = tmp & dsb_error_int_status(display);
871 if (errors)
872 drm_err(display->drm, "[CRTC:%d:%s] DSB %d error interrupt: 0x%x\n",
873 crtc->base.base.id, crtc->base.name, dsb_id, errors);
874 }
875