1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 * 5 */ 6 7 #include <drm/drm_vblank.h> 8 9 #include "i915_drv.h" 10 #include "i915_irq.h" 11 #include "i915_reg.h" 12 #include "intel_crtc.h" 13 #include "intel_de.h" 14 #include "intel_display_types.h" 15 #include "intel_dsb.h" 16 #include "intel_dsb_buffer.h" 17 #include "intel_dsb_regs.h" 18 #include "intel_vblank.h" 19 #include "intel_vrr.h" 20 #include "skl_watermark.h" 21 22 #define CACHELINE_BYTES 64 23 24 struct intel_dsb { 25 enum intel_dsb_id id; 26 27 struct intel_dsb_buffer dsb_buf; 28 struct intel_crtc *crtc; 29 30 /* 31 * maximum number of dwords the buffer will hold. 32 */ 33 unsigned int size; 34 35 /* 36 * free_pos will point the first free dword and 37 * help in calculating tail of command buffer. 38 */ 39 unsigned int free_pos; 40 41 /* 42 * Previously emitted DSB instruction. Used to 43 * identify/adjust the instruction for indexed 44 * register writes. 45 */ 46 u32 ins[2]; 47 48 /* 49 * Start of the previously emitted DSB instruction. 50 * Used to adjust the instruction for indexed 51 * register writes. 52 */ 53 unsigned int ins_start_offset; 54 55 u32 chicken; 56 int hw_dewake_scanline; 57 }; 58 59 /** 60 * DOC: DSB 61 * 62 * A DSB (Display State Buffer) is a queue of MMIO instructions in the memory 63 * which can be offloaded to DSB HW in Display Controller. DSB HW is a DMA 64 * engine that can be programmed to download the DSB from memory. 65 * It allows driver to batch submit display HW programming. This helps to 66 * reduce loading time and CPU activity, thereby making the context switch 67 * faster. DSB Support added from Gen12 Intel graphics based platform. 68 * 69 * DSB's can access only the pipe, plane, and transcoder Data Island Packet 70 * registers. 71 * 72 * DSB HW can support only register writes (both indexed and direct MMIO 73 * writes). There are no registers reads possible with DSB HW engine. 74 */ 75 76 /* DSB opcodes. */ 77 #define DSB_OPCODE_SHIFT 24 78 #define DSB_OPCODE_NOOP 0x0 79 #define DSB_OPCODE_MMIO_WRITE 0x1 80 #define DSB_BYTE_EN 0xf 81 #define DSB_BYTE_EN_SHIFT 20 82 #define DSB_REG_VALUE_MASK 0xfffff 83 #define DSB_OPCODE_WAIT_USEC 0x2 84 #define DSB_OPCODE_WAIT_SCANLINE 0x3 85 #define DSB_OPCODE_WAIT_VBLANKS 0x4 86 #define DSB_OPCODE_WAIT_DSL_IN 0x5 87 #define DSB_OPCODE_WAIT_DSL_OUT 0x6 88 #define DSB_SCANLINE_UPPER_SHIFT 20 89 #define DSB_SCANLINE_LOWER_SHIFT 0 90 #define DSB_OPCODE_INTERRUPT 0x7 91 #define DSB_OPCODE_INDEXED_WRITE 0x9 92 /* see DSB_REG_VALUE_MASK */ 93 #define DSB_OPCODE_POLL 0xA 94 /* see DSB_REG_VALUE_MASK */ 95 96 static bool pre_commit_is_vrr_active(struct intel_atomic_state *state, 97 struct intel_crtc *crtc) 98 { 99 const struct intel_crtc_state *old_crtc_state = 100 intel_atomic_get_old_crtc_state(state, crtc); 101 const struct intel_crtc_state *new_crtc_state = 102 intel_atomic_get_new_crtc_state(state, crtc); 103 104 /* VRR will be enabled afterwards, if necessary */ 105 if (intel_crtc_needs_modeset(new_crtc_state)) 106 return false; 107 108 /* VRR will have been disabled during intel_pre_plane_update() */ 109 return old_crtc_state->vrr.enable && !intel_crtc_vrr_disabling(state, crtc); 110 } 111 112 static const struct intel_crtc_state * 113 pre_commit_crtc_state(struct intel_atomic_state *state, 114 struct intel_crtc *crtc) 115 { 116 const struct intel_crtc_state *old_crtc_state = 117 intel_atomic_get_old_crtc_state(state, crtc); 118 const struct intel_crtc_state *new_crtc_state = 119 intel_atomic_get_new_crtc_state(state, crtc); 120 121 /* 122 * During fastsets/etc. the transcoder is still 123 * running with the old timings at this point. 124 */ 125 if (intel_crtc_needs_modeset(new_crtc_state)) 126 return new_crtc_state; 127 else 128 return old_crtc_state; 129 } 130 131 static int dsb_vblank_delay(const struct intel_crtc_state *crtc_state) 132 { 133 return intel_mode_vblank_start(&crtc_state->hw.adjusted_mode) - 134 intel_mode_vdisplay(&crtc_state->hw.adjusted_mode); 135 } 136 137 static int dsb_vtotal(struct intel_atomic_state *state, 138 struct intel_crtc *crtc) 139 { 140 const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc); 141 142 if (pre_commit_is_vrr_active(state, crtc)) 143 return crtc_state->vrr.vmax; 144 else 145 return intel_mode_vtotal(&crtc_state->hw.adjusted_mode); 146 } 147 148 static int dsb_dewake_scanline_start(struct intel_atomic_state *state, 149 struct intel_crtc *crtc) 150 { 151 const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc); 152 struct drm_i915_private *i915 = to_i915(state->base.dev); 153 unsigned int latency = skl_watermark_max_latency(i915, 0); 154 155 return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode) - 156 intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, latency); 157 } 158 159 static int dsb_dewake_scanline_end(struct intel_atomic_state *state, 160 struct intel_crtc *crtc) 161 { 162 const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc); 163 164 return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode); 165 } 166 167 static int dsb_scanline_to_hw(struct intel_atomic_state *state, 168 struct intel_crtc *crtc, int scanline) 169 { 170 const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc); 171 int vtotal = dsb_vtotal(state, crtc); 172 173 return (scanline + vtotal - intel_crtc_scanline_offset(crtc_state)) % vtotal; 174 } 175 176 static u32 dsb_chicken(struct intel_atomic_state *state, 177 struct intel_crtc *crtc) 178 { 179 if (pre_commit_is_vrr_active(state, crtc)) 180 return DSB_SKIP_WAITS_EN | 181 DSB_CTRL_WAIT_SAFE_WINDOW | 182 DSB_CTRL_NO_WAIT_VBLANK | 183 DSB_INST_WAIT_SAFE_WINDOW | 184 DSB_INST_NO_WAIT_VBLANK; 185 else 186 return DSB_SKIP_WAITS_EN; 187 } 188 189 static bool assert_dsb_has_room(struct intel_dsb *dsb) 190 { 191 struct intel_crtc *crtc = dsb->crtc; 192 struct intel_display *display = to_intel_display(crtc->base.dev); 193 194 /* each instruction is 2 dwords */ 195 return !drm_WARN(display->drm, dsb->free_pos > dsb->size - 2, 196 "[CRTC:%d:%s] DSB %d buffer overflow\n", 197 crtc->base.base.id, crtc->base.name, dsb->id); 198 } 199 200 static void intel_dsb_dump(struct intel_dsb *dsb) 201 { 202 struct intel_crtc *crtc = dsb->crtc; 203 struct intel_display *display = to_intel_display(crtc->base.dev); 204 int i; 205 206 drm_dbg_kms(display->drm, "[CRTC:%d:%s] DSB %d commands {\n", 207 crtc->base.base.id, crtc->base.name, dsb->id); 208 for (i = 0; i < ALIGN(dsb->free_pos, 64 / 4); i += 4) 209 drm_dbg_kms(display->drm, 210 " 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i * 4, 211 intel_dsb_buffer_read(&dsb->dsb_buf, i), 212 intel_dsb_buffer_read(&dsb->dsb_buf, i + 1), 213 intel_dsb_buffer_read(&dsb->dsb_buf, i + 2), 214 intel_dsb_buffer_read(&dsb->dsb_buf, i + 3)); 215 drm_dbg_kms(display->drm, "}\n"); 216 } 217 218 static bool is_dsb_busy(struct intel_display *display, enum pipe pipe, 219 enum intel_dsb_id dsb_id) 220 { 221 return intel_de_read_fw(display, DSB_CTRL(pipe, dsb_id)) & DSB_STATUS_BUSY; 222 } 223 224 static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw) 225 { 226 if (!assert_dsb_has_room(dsb)) 227 return; 228 229 /* Every instruction should be 8 byte aligned. */ 230 dsb->free_pos = ALIGN(dsb->free_pos, 2); 231 232 dsb->ins_start_offset = dsb->free_pos; 233 dsb->ins[0] = ldw; 234 dsb->ins[1] = udw; 235 236 intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, dsb->ins[0]); 237 intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, dsb->ins[1]); 238 } 239 240 static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb, 241 u32 opcode, i915_reg_t reg) 242 { 243 u32 prev_opcode, prev_reg; 244 245 /* 246 * Nothing emitted yet? Must check before looking 247 * at the actual data since i915_gem_object_create_internal() 248 * does *not* give you zeroed memory! 249 */ 250 if (dsb->free_pos == 0) 251 return false; 252 253 prev_opcode = dsb->ins[1] & ~DSB_REG_VALUE_MASK; 254 prev_reg = dsb->ins[1] & DSB_REG_VALUE_MASK; 255 256 return prev_opcode == opcode && prev_reg == i915_mmio_reg_offset(reg); 257 } 258 259 static bool intel_dsb_prev_ins_is_mmio_write(struct intel_dsb *dsb, i915_reg_t reg) 260 { 261 /* only full byte-enables can be converted to indexed writes */ 262 return intel_dsb_prev_ins_is_write(dsb, 263 DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT | 264 DSB_BYTE_EN << DSB_BYTE_EN_SHIFT, 265 reg); 266 } 267 268 static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_t reg) 269 { 270 return intel_dsb_prev_ins_is_write(dsb, 271 DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT, 272 reg); 273 } 274 275 /** 276 * intel_dsb_reg_write() - Emit register wriite to the DSB context 277 * @dsb: DSB context 278 * @reg: register address. 279 * @val: value. 280 * 281 * This function is used for writing register-value pair in command 282 * buffer of DSB. 283 */ 284 void intel_dsb_reg_write(struct intel_dsb *dsb, 285 i915_reg_t reg, u32 val) 286 { 287 /* 288 * For example the buffer will look like below for 3 dwords for auto 289 * increment register: 290 * +--------------------------------------------------------+ 291 * | size = 3 | offset &| value1 | value2 | value3 | zero | 292 * | | opcode | | | | | 293 * +--------------------------------------------------------+ 294 * + + + + + + + 295 * 0 4 8 12 16 20 24 296 * Byte 297 * 298 * As every instruction is 8 byte aligned the index of dsb instruction 299 * will start always from even number while dealing with u32 array. If 300 * we are writing odd no of dwords, Zeros will be added in the end for 301 * padding. 302 */ 303 if (!intel_dsb_prev_ins_is_mmio_write(dsb, reg) && 304 !intel_dsb_prev_ins_is_indexed_write(dsb, reg)) { 305 intel_dsb_emit(dsb, val, 306 (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) | 307 (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) | 308 i915_mmio_reg_offset(reg)); 309 } else { 310 if (!assert_dsb_has_room(dsb)) 311 return; 312 313 /* convert to indexed write? */ 314 if (intel_dsb_prev_ins_is_mmio_write(dsb, reg)) { 315 u32 prev_val = dsb->ins[0]; 316 317 dsb->ins[0] = 1; /* count */ 318 dsb->ins[1] = (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) | 319 i915_mmio_reg_offset(reg); 320 321 intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 0, 322 dsb->ins[0]); 323 intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 1, 324 dsb->ins[1]); 325 intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 2, 326 prev_val); 327 328 dsb->free_pos++; 329 } 330 331 intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, val); 332 /* Update the count */ 333 dsb->ins[0]++; 334 intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 0, 335 dsb->ins[0]); 336 337 /* if number of data words is odd, then the last dword should be 0.*/ 338 if (dsb->free_pos & 0x1) 339 intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos, 0); 340 } 341 } 342 343 static u32 intel_dsb_mask_to_byte_en(u32 mask) 344 { 345 return (!!(mask & 0xff000000) << 3 | 346 !!(mask & 0x00ff0000) << 2 | 347 !!(mask & 0x0000ff00) << 1 | 348 !!(mask & 0x000000ff) << 0); 349 } 350 351 /* Note: mask implemented via byte enables! */ 352 void intel_dsb_reg_write_masked(struct intel_dsb *dsb, 353 i915_reg_t reg, u32 mask, u32 val) 354 { 355 intel_dsb_emit(dsb, val, 356 (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) | 357 (intel_dsb_mask_to_byte_en(mask) << DSB_BYTE_EN_SHIFT) | 358 i915_mmio_reg_offset(reg)); 359 } 360 361 void intel_dsb_noop(struct intel_dsb *dsb, int count) 362 { 363 int i; 364 365 for (i = 0; i < count; i++) 366 intel_dsb_emit(dsb, 0, 367 DSB_OPCODE_NOOP << DSB_OPCODE_SHIFT); 368 } 369 370 void intel_dsb_nonpost_start(struct intel_dsb *dsb) 371 { 372 struct intel_crtc *crtc = dsb->crtc; 373 enum pipe pipe = crtc->pipe; 374 375 intel_dsb_reg_write_masked(dsb, DSB_CTRL(pipe, dsb->id), 376 DSB_NON_POSTED, DSB_NON_POSTED); 377 intel_dsb_noop(dsb, 4); 378 } 379 380 void intel_dsb_nonpost_end(struct intel_dsb *dsb) 381 { 382 struct intel_crtc *crtc = dsb->crtc; 383 enum pipe pipe = crtc->pipe; 384 385 intel_dsb_reg_write_masked(dsb, DSB_CTRL(pipe, dsb->id), 386 DSB_NON_POSTED, 0); 387 intel_dsb_noop(dsb, 4); 388 } 389 390 void intel_dsb_interrupt(struct intel_dsb *dsb) 391 { 392 intel_dsb_emit(dsb, 0, 393 DSB_OPCODE_INTERRUPT << DSB_OPCODE_SHIFT); 394 } 395 396 void intel_dsb_wait_usec(struct intel_dsb *dsb, int count) 397 { 398 intel_dsb_emit(dsb, count, 399 DSB_OPCODE_WAIT_USEC << DSB_OPCODE_SHIFT); 400 } 401 402 void intel_dsb_wait_vblanks(struct intel_dsb *dsb, int count) 403 { 404 intel_dsb_emit(dsb, count, 405 DSB_OPCODE_WAIT_VBLANKS << DSB_OPCODE_SHIFT); 406 } 407 408 static void intel_dsb_emit_wait_dsl(struct intel_dsb *dsb, 409 u32 opcode, int lower, int upper) 410 { 411 u64 window = ((u64)upper << DSB_SCANLINE_UPPER_SHIFT) | 412 ((u64)lower << DSB_SCANLINE_LOWER_SHIFT); 413 414 intel_dsb_emit(dsb, lower_32_bits(window), 415 (opcode << DSB_OPCODE_SHIFT) | 416 upper_32_bits(window)); 417 } 418 419 static void intel_dsb_wait_dsl(struct intel_atomic_state *state, 420 struct intel_dsb *dsb, 421 int lower_in, int upper_in, 422 int lower_out, int upper_out) 423 { 424 struct intel_crtc *crtc = dsb->crtc; 425 426 lower_in = dsb_scanline_to_hw(state, crtc, lower_in); 427 upper_in = dsb_scanline_to_hw(state, crtc, upper_in); 428 429 lower_out = dsb_scanline_to_hw(state, crtc, lower_out); 430 upper_out = dsb_scanline_to_hw(state, crtc, upper_out); 431 432 if (upper_in >= lower_in) 433 intel_dsb_emit_wait_dsl(dsb, DSB_OPCODE_WAIT_DSL_IN, 434 lower_in, upper_in); 435 else if (upper_out >= lower_out) 436 intel_dsb_emit_wait_dsl(dsb, DSB_OPCODE_WAIT_DSL_OUT, 437 lower_out, upper_out); 438 else 439 drm_WARN_ON(crtc->base.dev, 1); /* assert_dsl_ok() should have caught it already */ 440 } 441 442 static void assert_dsl_ok(struct intel_atomic_state *state, 443 struct intel_dsb *dsb, 444 int start, int end) 445 { 446 struct intel_crtc *crtc = dsb->crtc; 447 int vtotal = dsb_vtotal(state, crtc); 448 449 /* 450 * Waiting for the entire frame doesn't make sense, 451 * (IN==don't wait, OUT=wait forever). 452 */ 453 drm_WARN(crtc->base.dev, (end - start + vtotal) % vtotal == vtotal - 1, 454 "[CRTC:%d:%s] DSB %d bad scanline window wait: %d-%d (vt=%d)\n", 455 crtc->base.base.id, crtc->base.name, dsb->id, 456 start, end, vtotal); 457 } 458 459 void intel_dsb_wait_scanline_in(struct intel_atomic_state *state, 460 struct intel_dsb *dsb, 461 int start, int end) 462 { 463 assert_dsl_ok(state, dsb, start, end); 464 465 intel_dsb_wait_dsl(state, dsb, 466 start, end, 467 end + 1, start - 1); 468 } 469 470 void intel_dsb_wait_scanline_out(struct intel_atomic_state *state, 471 struct intel_dsb *dsb, 472 int start, int end) 473 { 474 assert_dsl_ok(state, dsb, start, end); 475 476 intel_dsb_wait_dsl(state, dsb, 477 end + 1, start - 1, 478 start, end); 479 } 480 481 static void intel_dsb_align_tail(struct intel_dsb *dsb) 482 { 483 u32 aligned_tail, tail; 484 485 tail = dsb->free_pos * 4; 486 aligned_tail = ALIGN(tail, CACHELINE_BYTES); 487 488 if (aligned_tail > tail) 489 intel_dsb_buffer_memset(&dsb->dsb_buf, dsb->free_pos, 0, 490 aligned_tail - tail); 491 492 dsb->free_pos = aligned_tail / 4; 493 } 494 495 void intel_dsb_finish(struct intel_dsb *dsb) 496 { 497 struct intel_crtc *crtc = dsb->crtc; 498 499 /* 500 * DSB_FORCE_DEWAKE remains active even after DSB is 501 * disabled, so make sure to clear it (if set during 502 * intel_dsb_commit()). And clear DSB_ENABLE_DEWAKE as 503 * well for good measure. 504 */ 505 intel_dsb_reg_write(dsb, DSB_PMCTRL(crtc->pipe, dsb->id), 0); 506 intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(crtc->pipe, dsb->id), 507 DSB_FORCE_DEWAKE, 0); 508 509 intel_dsb_align_tail(dsb); 510 511 intel_dsb_buffer_flush_map(&dsb->dsb_buf); 512 } 513 514 static u32 dsb_error_int_status(struct intel_display *display) 515 { 516 u32 errors; 517 518 errors = DSB_GTT_FAULT_INT_STATUS | 519 DSB_RSPTIMEOUT_INT_STATUS | 520 DSB_POLL_ERR_INT_STATUS; 521 522 /* 523 * All the non-existing status bits operate as 524 * normal r/w bits, so any attempt to clear them 525 * will just end up setting them. Never do that so 526 * we won't mistake them for actual error interrupts. 527 */ 528 if (DISPLAY_VER(display) >= 14) 529 errors |= DSB_ATS_FAULT_INT_STATUS; 530 531 return errors; 532 } 533 534 static u32 dsb_error_int_en(struct intel_display *display) 535 { 536 u32 errors; 537 538 errors = DSB_GTT_FAULT_INT_EN | 539 DSB_RSPTIMEOUT_INT_EN | 540 DSB_POLL_ERR_INT_EN; 541 542 if (DISPLAY_VER(display) >= 14) 543 errors |= DSB_ATS_FAULT_INT_EN; 544 545 return errors; 546 } 547 548 void intel_dsb_vblank_evade(struct intel_atomic_state *state, 549 struct intel_dsb *dsb) 550 { 551 struct intel_crtc *crtc = dsb->crtc; 552 const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc); 553 /* FIXME calibrate sensibly */ 554 int latency = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, 20); 555 int vblank_delay = dsb_vblank_delay(crtc_state); 556 int start, end; 557 558 if (pre_commit_is_vrr_active(state, crtc)) { 559 end = intel_vrr_vmin_vblank_start(crtc_state); 560 start = end - vblank_delay - latency; 561 intel_dsb_wait_scanline_out(state, dsb, start, end); 562 563 end = intel_vrr_vmax_vblank_start(crtc_state); 564 start = end - vblank_delay - latency; 565 intel_dsb_wait_scanline_out(state, dsb, start, end); 566 } else { 567 end = intel_mode_vblank_start(&crtc_state->hw.adjusted_mode); 568 start = end - vblank_delay - latency; 569 intel_dsb_wait_scanline_out(state, dsb, start, end); 570 } 571 } 572 573 static void _intel_dsb_chain(struct intel_atomic_state *state, 574 struct intel_dsb *dsb, 575 struct intel_dsb *chained_dsb, 576 u32 ctrl) 577 { 578 struct intel_display *display = to_intel_display(state->base.dev); 579 struct intel_crtc *crtc = dsb->crtc; 580 enum pipe pipe = crtc->pipe; 581 u32 tail; 582 583 if (drm_WARN_ON(display->drm, dsb->id == chained_dsb->id)) 584 return; 585 586 tail = chained_dsb->free_pos * 4; 587 if (drm_WARN_ON(display->drm, !IS_ALIGNED(tail, CACHELINE_BYTES))) 588 return; 589 590 intel_dsb_reg_write(dsb, DSB_CTRL(pipe, chained_dsb->id), 591 ctrl | DSB_ENABLE); 592 593 intel_dsb_reg_write(dsb, DSB_CHICKEN(pipe, chained_dsb->id), 594 dsb_chicken(state, crtc)); 595 596 intel_dsb_reg_write(dsb, DSB_INTERRUPT(pipe, chained_dsb->id), 597 dsb_error_int_status(display) | DSB_PROG_INT_STATUS | 598 dsb_error_int_en(display) | DSB_PROG_INT_EN); 599 600 if (ctrl & DSB_WAIT_FOR_VBLANK) { 601 int dewake_scanline = dsb_dewake_scanline_start(state, crtc); 602 int hw_dewake_scanline = dsb_scanline_to_hw(state, crtc, dewake_scanline); 603 604 intel_dsb_reg_write(dsb, DSB_PMCTRL(pipe, chained_dsb->id), 605 DSB_ENABLE_DEWAKE | 606 DSB_SCANLINE_FOR_DEWAKE(hw_dewake_scanline)); 607 } 608 609 intel_dsb_reg_write(dsb, DSB_HEAD(pipe, chained_dsb->id), 610 intel_dsb_buffer_ggtt_offset(&chained_dsb->dsb_buf)); 611 612 intel_dsb_reg_write(dsb, DSB_TAIL(pipe, chained_dsb->id), 613 intel_dsb_buffer_ggtt_offset(&chained_dsb->dsb_buf) + tail); 614 615 if (ctrl & DSB_WAIT_FOR_VBLANK) { 616 /* 617 * Keep DEwake alive via the first DSB, in 618 * case we're already past dewake_scanline, 619 * and thus DSB_ENABLE_DEWAKE on the second 620 * DSB won't do its job. 621 */ 622 intel_dsb_reg_write_masked(dsb, DSB_PMCTRL_2(pipe, dsb->id), 623 DSB_FORCE_DEWAKE, DSB_FORCE_DEWAKE); 624 625 intel_dsb_wait_scanline_out(state, dsb, 626 dsb_dewake_scanline_start(state, crtc), 627 dsb_dewake_scanline_end(state, crtc)); 628 } 629 } 630 631 void intel_dsb_chain(struct intel_atomic_state *state, 632 struct intel_dsb *dsb, 633 struct intel_dsb *chained_dsb, 634 bool wait_for_vblank) 635 { 636 _intel_dsb_chain(state, dsb, chained_dsb, 637 wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0); 638 } 639 640 void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state, 641 struct intel_dsb *dsb) 642 { 643 struct intel_crtc *crtc = dsb->crtc; 644 const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc); 645 int usecs = intel_scanlines_to_usecs(&crtc_state->hw.adjusted_mode, 646 dsb_vblank_delay(crtc_state)) + 1; 647 648 intel_dsb_wait_usec(dsb, usecs); 649 } 650 651 static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl, 652 int hw_dewake_scanline) 653 { 654 struct intel_crtc *crtc = dsb->crtc; 655 struct intel_display *display = to_intel_display(crtc->base.dev); 656 enum pipe pipe = crtc->pipe; 657 u32 tail; 658 659 tail = dsb->free_pos * 4; 660 if (drm_WARN_ON(display->drm, !IS_ALIGNED(tail, CACHELINE_BYTES))) 661 return; 662 663 if (is_dsb_busy(display, pipe, dsb->id)) { 664 drm_err(display->drm, "[CRTC:%d:%s] DSB %d is busy\n", 665 crtc->base.base.id, crtc->base.name, dsb->id); 666 return; 667 } 668 669 intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id), 670 ctrl | DSB_ENABLE); 671 672 intel_de_write_fw(display, DSB_CHICKEN(pipe, dsb->id), 673 dsb->chicken); 674 675 intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb->id), 676 dsb_error_int_status(display) | DSB_PROG_INT_STATUS | 677 dsb_error_int_en(display) | DSB_PROG_INT_EN); 678 679 intel_de_write_fw(display, DSB_HEAD(pipe, dsb->id), 680 intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf)); 681 682 if (hw_dewake_scanline >= 0) { 683 int diff, position; 684 685 intel_de_write_fw(display, DSB_PMCTRL(pipe, dsb->id), 686 DSB_ENABLE_DEWAKE | 687 DSB_SCANLINE_FOR_DEWAKE(hw_dewake_scanline)); 688 689 /* 690 * Force DEwake immediately if we're already past 691 * or close to racing past the target scanline. 692 */ 693 position = intel_de_read_fw(display, PIPEDSL(display, pipe)) & PIPEDSL_LINE_MASK; 694 695 diff = hw_dewake_scanline - position; 696 intel_de_write_fw(display, DSB_PMCTRL_2(pipe, dsb->id), 697 (diff >= 0 && diff < 5 ? DSB_FORCE_DEWAKE : 0) | 698 DSB_BLOCK_DEWAKE_EXTENSION); 699 } 700 701 intel_de_write_fw(display, DSB_TAIL(pipe, dsb->id), 702 intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf) + tail); 703 } 704 705 /** 706 * intel_dsb_commit() - Trigger workload execution of DSB. 707 * @dsb: DSB context 708 * @wait_for_vblank: wait for vblank before executing 709 * 710 * This function is used to do actual write to hardware using DSB. 711 */ 712 void intel_dsb_commit(struct intel_dsb *dsb, 713 bool wait_for_vblank) 714 { 715 _intel_dsb_commit(dsb, 716 wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0, 717 wait_for_vblank ? dsb->hw_dewake_scanline : -1); 718 } 719 720 void intel_dsb_wait(struct intel_dsb *dsb) 721 { 722 struct intel_crtc *crtc = dsb->crtc; 723 struct intel_display *display = to_intel_display(crtc->base.dev); 724 enum pipe pipe = crtc->pipe; 725 726 if (wait_for(!is_dsb_busy(display, pipe, dsb->id), 1)) { 727 u32 offset = intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf); 728 729 intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id), 730 DSB_ENABLE | DSB_HALT); 731 732 drm_err(display->drm, 733 "[CRTC:%d:%s] DSB %d timed out waiting for idle (current head=0x%x, head=0x%x, tail=0x%x)\n", 734 crtc->base.base.id, crtc->base.name, dsb->id, 735 intel_de_read_fw(display, DSB_CURRENT_HEAD(pipe, dsb->id)) - offset, 736 intel_de_read_fw(display, DSB_HEAD(pipe, dsb->id)) - offset, 737 intel_de_read_fw(display, DSB_TAIL(pipe, dsb->id)) - offset); 738 739 intel_dsb_dump(dsb); 740 } 741 742 /* Attempt to reset it */ 743 dsb->free_pos = 0; 744 dsb->ins_start_offset = 0; 745 dsb->ins[0] = 0; 746 dsb->ins[1] = 0; 747 748 intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id), 0); 749 750 intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb->id), 751 dsb_error_int_status(display) | DSB_PROG_INT_STATUS); 752 } 753 754 /** 755 * intel_dsb_prepare() - Allocate, pin and map the DSB command buffer. 756 * @state: the atomic state 757 * @crtc: the CRTC 758 * @dsb_id: the DSB engine to use 759 * @max_cmds: number of commands we need to fit into command buffer 760 * 761 * This function prepare the command buffer which is used to store dsb 762 * instructions with data. 763 * 764 * Returns: 765 * DSB context, NULL on failure 766 */ 767 struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state, 768 struct intel_crtc *crtc, 769 enum intel_dsb_id dsb_id, 770 unsigned int max_cmds) 771 { 772 struct drm_i915_private *i915 = to_i915(state->base.dev); 773 intel_wakeref_t wakeref; 774 struct intel_dsb *dsb; 775 unsigned int size; 776 777 if (!HAS_DSB(i915)) 778 return NULL; 779 780 if (!i915->display.params.enable_dsb) 781 return NULL; 782 783 dsb = kzalloc(sizeof(*dsb), GFP_KERNEL); 784 if (!dsb) 785 goto out; 786 787 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 788 789 /* ~1 qword per instruction, full cachelines */ 790 size = ALIGN(max_cmds * 8, CACHELINE_BYTES); 791 792 if (!intel_dsb_buffer_create(crtc, &dsb->dsb_buf, size)) 793 goto out_put_rpm; 794 795 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 796 797 dsb->id = dsb_id; 798 dsb->crtc = crtc; 799 dsb->size = size / 4; /* in dwords */ 800 801 dsb->chicken = dsb_chicken(state, crtc); 802 dsb->hw_dewake_scanline = 803 dsb_scanline_to_hw(state, crtc, dsb_dewake_scanline_start(state, crtc)); 804 805 return dsb; 806 807 out_put_rpm: 808 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 809 kfree(dsb); 810 out: 811 drm_info_once(&i915->drm, 812 "[CRTC:%d:%s] DSB %d queue setup failed, will fallback to MMIO for display HW programming\n", 813 crtc->base.base.id, crtc->base.name, dsb_id); 814 815 return NULL; 816 } 817 818 /** 819 * intel_dsb_cleanup() - To cleanup DSB context. 820 * @dsb: DSB context 821 * 822 * This function cleanup the DSB context by unpinning and releasing 823 * the VMA object associated with it. 824 */ 825 void intel_dsb_cleanup(struct intel_dsb *dsb) 826 { 827 intel_dsb_buffer_cleanup(&dsb->dsb_buf); 828 kfree(dsb); 829 } 830 831 void intel_dsb_irq_handler(struct intel_display *display, 832 enum pipe pipe, enum intel_dsb_id dsb_id) 833 { 834 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 835 u32 tmp, errors; 836 837 tmp = intel_de_read_fw(display, DSB_INTERRUPT(pipe, dsb_id)); 838 intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb_id), tmp); 839 840 if (tmp & DSB_PROG_INT_STATUS) { 841 spin_lock(&display->drm->event_lock); 842 843 if (crtc->dsb_event) { 844 /* 845 * Update vblank counter/timestmap in case it 846 * hasn't been done yet for this frame. 847 */ 848 drm_crtc_accurate_vblank_count(&crtc->base); 849 850 drm_crtc_send_vblank_event(&crtc->base, crtc->dsb_event); 851 crtc->dsb_event = NULL; 852 } 853 854 spin_unlock(&display->drm->event_lock); 855 } 856 857 errors = tmp & dsb_error_int_status(display); 858 if (errors) 859 drm_err(display->drm, "[CRTC:%d:%s] DSB %d error interrupt: 0x%x\n", 860 crtc->base.base.id, crtc->base.name, dsb_id, errors); 861 } 862