1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 /** 25 * DOC: Frame Buffer Compression (FBC) 26 * 27 * FBC tries to save memory bandwidth (and so power consumption) by 28 * compressing the amount of memory used by the display. It is total 29 * transparent to user space and completely handled in the kernel. 30 * 31 * The benefits of FBC are mostly visible with solid backgrounds and 32 * variation-less patterns. It comes from keeping the memory footprint small 33 * and having fewer memory pages opened and accessed for refreshing the display. 34 * 35 * i915 is responsible to reserve stolen memory for FBC and configure its 36 * offset on proper registers. The hardware takes care of all 37 * compress/decompress. However there are many known cases where we have to 38 * forcibly disable it to allow proper screen updates. 39 */ 40 41 #include <linux/debugfs.h> 42 #include <linux/string_helpers.h> 43 44 #include <drm/drm_blend.h> 45 #include <drm/drm_fourcc.h> 46 47 #include "gem/i915_gem_stolen.h" 48 49 #include "gt/intel_gt_types.h" 50 51 #include "i915_drv.h" 52 #include "i915_utils.h" 53 #include "i915_vgpu.h" 54 #include "i915_vma.h" 55 #include "i9xx_plane_regs.h" 56 #include "intel_cdclk.h" 57 #include "intel_de.h" 58 #include "intel_display_device.h" 59 #include "intel_display_regs.h" 60 #include "intel_display_rpm.h" 61 #include "intel_display_trace.h" 62 #include "intel_display_types.h" 63 #include "intel_display_wa.h" 64 #include "intel_fbc.h" 65 #include "intel_fbc_regs.h" 66 #include "intel_frontbuffer.h" 67 68 #define for_each_fbc_id(__display, __fbc_id) \ 69 for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) \ 70 for_each_if(DISPLAY_RUNTIME_INFO(__display)->fbc_mask & BIT(__fbc_id)) 71 72 #define for_each_intel_fbc(__display, __fbc, __fbc_id) \ 73 for_each_fbc_id((__display), (__fbc_id)) \ 74 for_each_if((__fbc) = (__display)->fbc[(__fbc_id)]) 75 76 struct intel_fbc_funcs { 77 void (*activate)(struct intel_fbc *fbc); 78 void (*deactivate)(struct intel_fbc *fbc); 79 bool (*is_active)(struct intel_fbc *fbc); 80 bool (*is_compressing)(struct intel_fbc *fbc); 81 void (*nuke)(struct intel_fbc *fbc); 82 void (*program_cfb)(struct intel_fbc *fbc); 83 void (*set_false_color)(struct intel_fbc *fbc, bool enable); 84 }; 85 86 struct intel_fbc_state { 87 struct intel_plane *plane; 88 unsigned int cfb_stride; 89 unsigned int cfb_size; 90 unsigned int fence_y_offset; 91 u16 override_cfb_stride; 92 u16 interval; 93 s8 fence_id; 94 struct drm_rect dirty_rect; 95 }; 96 97 struct intel_fbc { 98 struct intel_display *display; 99 const struct intel_fbc_funcs *funcs; 100 101 /* 102 * This is always the inner lock when overlapping with 103 * struct_mutex and it's the outer lock when overlapping 104 * with stolen_lock. 105 */ 106 struct mutex lock; 107 unsigned int busy_bits; 108 109 struct i915_stolen_fb compressed_fb, compressed_llb; 110 111 enum intel_fbc_id id; 112 113 u8 limit; 114 115 bool false_color; 116 117 bool active; 118 bool activated; 119 bool flip_pending; 120 121 bool underrun_detected; 122 struct work_struct underrun_work; 123 124 /* 125 * This structure contains everything that's relevant to program the 126 * hardware registers. When we want to figure out if we need to disable 127 * and re-enable FBC for a new configuration we just check if there's 128 * something different in the struct. The genx_fbc_activate functions 129 * are supposed to read from it in order to program the registers. 130 */ 131 struct intel_fbc_state state; 132 const char *no_fbc_reason; 133 }; 134 135 /* plane stride in pixels */ 136 static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane_state) 137 { 138 const struct drm_framebuffer *fb = plane_state->hw.fb; 139 unsigned int stride; 140 141 stride = plane_state->view.color_plane[0].mapping_stride; 142 if (!drm_rotation_90_or_270(plane_state->hw.rotation)) 143 stride /= fb->format->cpp[0]; 144 145 return stride; 146 } 147 148 static unsigned int intel_fbc_cfb_cpp(void) 149 { 150 return 4; /* FBC always 4 bytes per pixel */ 151 } 152 153 /* plane stride based cfb stride in bytes, assuming 1:1 compression limit */ 154 static unsigned int intel_fbc_plane_cfb_stride(const struct intel_plane_state *plane_state) 155 { 156 unsigned int cpp = intel_fbc_cfb_cpp(); 157 158 return intel_fbc_plane_stride(plane_state) * cpp; 159 } 160 161 /* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */ 162 static unsigned int skl_fbc_min_cfb_stride(struct intel_display *display, 163 unsigned int cpp, unsigned int width) 164 { 165 unsigned int limit = 4; /* 1:4 compression limit is the worst case */ 166 unsigned int height = 4; /* FBC segment is 4 lines */ 167 unsigned int stride; 168 169 /* minimum segment stride we can use */ 170 stride = width * cpp * height / limit; 171 172 /* 173 * Wa_16011863758: icl+ 174 * Avoid some hardware segment address miscalculation. 175 */ 176 if (DISPLAY_VER(display) >= 11) 177 stride += 64; 178 179 /* 180 * At least some of the platforms require each 4 line segment to 181 * be 512 byte aligned. Just do it always for simplicity. 182 */ 183 stride = ALIGN(stride, 512); 184 185 /* convert back to single line equivalent with 1:1 compression limit */ 186 return stride * limit / height; 187 } 188 189 /* properly aligned cfb stride in bytes, assuming 1:1 compression limit */ 190 static unsigned int _intel_fbc_cfb_stride(struct intel_display *display, 191 unsigned int cpp, unsigned int width, 192 unsigned int stride) 193 { 194 /* 195 * At least some of the platforms require each 4 line segment to 196 * be 512 byte aligned. Aligning each line to 512 bytes guarantees 197 * that regardless of the compression limit we choose later. 198 */ 199 if (DISPLAY_VER(display) >= 9) 200 return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(display, cpp, width)); 201 else 202 return stride; 203 } 204 205 static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state) 206 { 207 struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); 208 unsigned int stride = intel_fbc_plane_cfb_stride(plane_state); 209 unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16; 210 unsigned int cpp = intel_fbc_cfb_cpp(); 211 212 return _intel_fbc_cfb_stride(display, cpp, width, stride); 213 } 214 215 /* 216 * Maximum height the hardware will compress, on HSW+ 217 * additional lines (up to the actual plane height) will 218 * remain uncompressed. 219 */ 220 static unsigned int intel_fbc_max_cfb_height(struct intel_display *display) 221 { 222 if (DISPLAY_VER(display) >= 8) 223 return 2560; 224 else if (DISPLAY_VER(display) >= 5 || display->platform.g4x) 225 return 2048; 226 else 227 return 1536; 228 } 229 230 static unsigned int _intel_fbc_cfb_size(struct intel_display *display, 231 unsigned int height, unsigned int stride) 232 { 233 return min(height, intel_fbc_max_cfb_height(display)) * stride; 234 } 235 236 static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state) 237 { 238 struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); 239 unsigned int height = drm_rect_height(&plane_state->uapi.src) >> 16; 240 241 return _intel_fbc_cfb_size(display, height, intel_fbc_cfb_stride(plane_state)); 242 } 243 244 static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_state) 245 { 246 struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); 247 unsigned int stride_aligned = intel_fbc_cfb_stride(plane_state); 248 unsigned int stride = intel_fbc_plane_cfb_stride(plane_state); 249 const struct drm_framebuffer *fb = plane_state->hw.fb; 250 251 /* 252 * Override stride in 64 byte units per 4 line segment. 253 * 254 * Gen9 hw miscalculates cfb stride for linear as 255 * PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so 256 * we always need to use the override there. 257 * 258 * wa_14022269668 For bmg, always program the FBC_STRIDE before fbc enable 259 */ 260 if (stride != stride_aligned || 261 (DISPLAY_VER(display) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR) || 262 display->platform.battlemage) 263 return stride_aligned * 4 / 64; 264 265 return 0; 266 } 267 268 static bool intel_fbc_has_fences(struct intel_display *display) 269 { 270 struct drm_i915_private __maybe_unused *i915 = to_i915(display->drm); 271 272 return intel_gt_support_legacy_fencing(to_gt(i915)); 273 } 274 275 static u32 i8xx_fbc_ctl(struct intel_fbc *fbc) 276 { 277 struct intel_display *display = fbc->display; 278 const struct intel_fbc_state *fbc_state = &fbc->state; 279 unsigned int cfb_stride; 280 u32 fbc_ctl; 281 282 cfb_stride = fbc_state->cfb_stride / fbc->limit; 283 284 /* FBC_CTL wants 32B or 64B units */ 285 if (DISPLAY_VER(display) == 2) 286 cfb_stride = (cfb_stride / 32) - 1; 287 else 288 cfb_stride = (cfb_stride / 64) - 1; 289 290 fbc_ctl = FBC_CTL_PERIODIC | 291 FBC_CTL_INTERVAL(fbc_state->interval) | 292 FBC_CTL_STRIDE(cfb_stride); 293 294 if (display->platform.i945gm) 295 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ 296 297 if (fbc_state->fence_id >= 0) 298 fbc_ctl |= FBC_CTL_FENCENO(fbc_state->fence_id); 299 300 return fbc_ctl; 301 } 302 303 static u32 i965_fbc_ctl2(struct intel_fbc *fbc) 304 { 305 const struct intel_fbc_state *fbc_state = &fbc->state; 306 u32 fbc_ctl2; 307 308 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | 309 FBC_CTL_PLANE(fbc_state->plane->i9xx_plane); 310 311 if (fbc_state->fence_id >= 0) 312 fbc_ctl2 |= FBC_CTL_CPU_FENCE_EN; 313 314 return fbc_ctl2; 315 } 316 317 static void i8xx_fbc_deactivate(struct intel_fbc *fbc) 318 { 319 struct intel_display *display = fbc->display; 320 u32 fbc_ctl; 321 322 /* Disable compression */ 323 fbc_ctl = intel_de_read(display, FBC_CONTROL); 324 if ((fbc_ctl & FBC_CTL_EN) == 0) 325 return; 326 327 fbc_ctl &= ~FBC_CTL_EN; 328 intel_de_write(display, FBC_CONTROL, fbc_ctl); 329 330 /* Wait for compressing bit to clear */ 331 if (intel_de_wait_for_clear(display, FBC_STATUS, 332 FBC_STAT_COMPRESSING, 10)) { 333 drm_dbg_kms(display->drm, "FBC idle timed out\n"); 334 return; 335 } 336 } 337 338 static void i8xx_fbc_activate(struct intel_fbc *fbc) 339 { 340 struct intel_display *display = fbc->display; 341 const struct intel_fbc_state *fbc_state = &fbc->state; 342 int i; 343 344 /* Clear old tags */ 345 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) 346 intel_de_write(display, FBC_TAG(i), 0); 347 348 if (DISPLAY_VER(display) == 4) { 349 intel_de_write(display, FBC_CONTROL2, 350 i965_fbc_ctl2(fbc)); 351 intel_de_write(display, FBC_FENCE_OFF, 352 fbc_state->fence_y_offset); 353 } 354 355 intel_de_write(display, FBC_CONTROL, 356 FBC_CTL_EN | i8xx_fbc_ctl(fbc)); 357 } 358 359 static bool i8xx_fbc_is_active(struct intel_fbc *fbc) 360 { 361 return intel_de_read(fbc->display, FBC_CONTROL) & FBC_CTL_EN; 362 } 363 364 static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc) 365 { 366 return intel_de_read(fbc->display, FBC_STATUS) & 367 (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED); 368 } 369 370 static void i8xx_fbc_nuke(struct intel_fbc *fbc) 371 { 372 struct intel_display *display = fbc->display; 373 struct intel_fbc_state *fbc_state = &fbc->state; 374 enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane; 375 376 intel_de_write_fw(display, DSPADDR(display, i9xx_plane), 377 intel_de_read_fw(display, DSPADDR(display, i9xx_plane))); 378 } 379 380 static void i8xx_fbc_program_cfb(struct intel_fbc *fbc) 381 { 382 struct intel_display *display = fbc->display; 383 struct drm_i915_private *i915 = to_i915(display->drm); 384 385 drm_WARN_ON(display->drm, 386 range_overflows_end_t(u64, i915_gem_stolen_area_address(i915), 387 i915_gem_stolen_node_offset(&fbc->compressed_fb), 388 U32_MAX)); 389 drm_WARN_ON(display->drm, 390 range_overflows_end_t(u64, i915_gem_stolen_area_address(i915), 391 i915_gem_stolen_node_offset(&fbc->compressed_llb), 392 U32_MAX)); 393 intel_de_write(display, FBC_CFB_BASE, 394 i915_gem_stolen_node_address(i915, &fbc->compressed_fb)); 395 intel_de_write(display, FBC_LL_BASE, 396 i915_gem_stolen_node_address(i915, &fbc->compressed_llb)); 397 } 398 399 static const struct intel_fbc_funcs i8xx_fbc_funcs = { 400 .activate = i8xx_fbc_activate, 401 .deactivate = i8xx_fbc_deactivate, 402 .is_active = i8xx_fbc_is_active, 403 .is_compressing = i8xx_fbc_is_compressing, 404 .nuke = i8xx_fbc_nuke, 405 .program_cfb = i8xx_fbc_program_cfb, 406 }; 407 408 static void i965_fbc_nuke(struct intel_fbc *fbc) 409 { 410 struct intel_display *display = fbc->display; 411 struct intel_fbc_state *fbc_state = &fbc->state; 412 enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane; 413 414 intel_de_write_fw(display, DSPSURF(display, i9xx_plane), 415 intel_de_read_fw(display, DSPSURF(display, i9xx_plane))); 416 } 417 418 static const struct intel_fbc_funcs i965_fbc_funcs = { 419 .activate = i8xx_fbc_activate, 420 .deactivate = i8xx_fbc_deactivate, 421 .is_active = i8xx_fbc_is_active, 422 .is_compressing = i8xx_fbc_is_compressing, 423 .nuke = i965_fbc_nuke, 424 .program_cfb = i8xx_fbc_program_cfb, 425 }; 426 427 static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc) 428 { 429 switch (fbc->limit) { 430 default: 431 MISSING_CASE(fbc->limit); 432 fallthrough; 433 case 1: 434 return DPFC_CTL_LIMIT_1X; 435 case 2: 436 return DPFC_CTL_LIMIT_2X; 437 case 4: 438 return DPFC_CTL_LIMIT_4X; 439 } 440 } 441 442 static u32 g4x_dpfc_ctl(struct intel_fbc *fbc) 443 { 444 struct intel_display *display = fbc->display; 445 const struct intel_fbc_state *fbc_state = &fbc->state; 446 u32 dpfc_ctl; 447 448 dpfc_ctl = g4x_dpfc_ctl_limit(fbc) | 449 DPFC_CTL_PLANE_G4X(fbc_state->plane->i9xx_plane); 450 451 if (display->platform.g4x) 452 dpfc_ctl |= DPFC_CTL_SR_EN; 453 454 if (fbc_state->fence_id >= 0) { 455 dpfc_ctl |= DPFC_CTL_FENCE_EN_G4X; 456 457 if (DISPLAY_VER(display) < 6) 458 dpfc_ctl |= DPFC_CTL_FENCENO(fbc_state->fence_id); 459 } 460 461 return dpfc_ctl; 462 } 463 464 static void g4x_fbc_activate(struct intel_fbc *fbc) 465 { 466 struct intel_display *display = fbc->display; 467 const struct intel_fbc_state *fbc_state = &fbc->state; 468 469 intel_de_write(display, DPFC_FENCE_YOFF, 470 fbc_state->fence_y_offset); 471 472 intel_de_write(display, DPFC_CONTROL, 473 DPFC_CTL_EN | g4x_dpfc_ctl(fbc)); 474 } 475 476 static void g4x_fbc_deactivate(struct intel_fbc *fbc) 477 { 478 struct intel_display *display = fbc->display; 479 u32 dpfc_ctl; 480 481 /* Disable compression */ 482 dpfc_ctl = intel_de_read(display, DPFC_CONTROL); 483 if (dpfc_ctl & DPFC_CTL_EN) { 484 dpfc_ctl &= ~DPFC_CTL_EN; 485 intel_de_write(display, DPFC_CONTROL, dpfc_ctl); 486 } 487 } 488 489 static bool g4x_fbc_is_active(struct intel_fbc *fbc) 490 { 491 return intel_de_read(fbc->display, DPFC_CONTROL) & DPFC_CTL_EN; 492 } 493 494 static bool g4x_fbc_is_compressing(struct intel_fbc *fbc) 495 { 496 return intel_de_read(fbc->display, DPFC_STATUS) & DPFC_COMP_SEG_MASK; 497 } 498 499 static void g4x_fbc_program_cfb(struct intel_fbc *fbc) 500 { 501 struct intel_display *display = fbc->display; 502 503 intel_de_write(display, DPFC_CB_BASE, 504 i915_gem_stolen_node_offset(&fbc->compressed_fb)); 505 } 506 507 static const struct intel_fbc_funcs g4x_fbc_funcs = { 508 .activate = g4x_fbc_activate, 509 .deactivate = g4x_fbc_deactivate, 510 .is_active = g4x_fbc_is_active, 511 .is_compressing = g4x_fbc_is_compressing, 512 .nuke = i965_fbc_nuke, 513 .program_cfb = g4x_fbc_program_cfb, 514 }; 515 516 static void ilk_fbc_activate(struct intel_fbc *fbc) 517 { 518 struct intel_display *display = fbc->display; 519 struct intel_fbc_state *fbc_state = &fbc->state; 520 521 intel_de_write(display, ILK_DPFC_FENCE_YOFF(fbc->id), 522 fbc_state->fence_y_offset); 523 524 intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), 525 DPFC_CTL_EN | g4x_dpfc_ctl(fbc)); 526 } 527 528 static void fbc_compressor_clkgate_disable_wa(struct intel_fbc *fbc, 529 bool disable) 530 { 531 struct intel_display *display = fbc->display; 532 533 if (display->platform.dg2) 534 intel_de_rmw(display, GEN9_CLKGATE_DIS_4, DG2_DPFC_GATING_DIS, 535 disable ? DG2_DPFC_GATING_DIS : 0); 536 else if (DISPLAY_VER(display) >= 14) 537 intel_de_rmw(display, MTL_PIPE_CLKGATE_DIS2(fbc->id), 538 MTL_DPFC_GATING_DIS, 539 disable ? MTL_DPFC_GATING_DIS : 0); 540 } 541 542 static void ilk_fbc_deactivate(struct intel_fbc *fbc) 543 { 544 struct intel_display *display = fbc->display; 545 u32 dpfc_ctl; 546 547 if (HAS_FBC_DIRTY_RECT(display)) 548 intel_de_write(display, XE3_FBC_DIRTY_CTL(fbc->id), 0); 549 550 /* Disable compression */ 551 dpfc_ctl = intel_de_read(display, ILK_DPFC_CONTROL(fbc->id)); 552 if (dpfc_ctl & DPFC_CTL_EN) { 553 dpfc_ctl &= ~DPFC_CTL_EN; 554 intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl); 555 } 556 } 557 558 static bool ilk_fbc_is_active(struct intel_fbc *fbc) 559 { 560 return intel_de_read(fbc->display, ILK_DPFC_CONTROL(fbc->id)) & DPFC_CTL_EN; 561 } 562 563 static bool ilk_fbc_is_compressing(struct intel_fbc *fbc) 564 { 565 return intel_de_read(fbc->display, ILK_DPFC_STATUS(fbc->id)) & DPFC_COMP_SEG_MASK; 566 } 567 568 static void ilk_fbc_program_cfb(struct intel_fbc *fbc) 569 { 570 struct intel_display *display = fbc->display; 571 572 intel_de_write(display, ILK_DPFC_CB_BASE(fbc->id), 573 i915_gem_stolen_node_offset(&fbc->compressed_fb)); 574 } 575 576 static const struct intel_fbc_funcs ilk_fbc_funcs = { 577 .activate = ilk_fbc_activate, 578 .deactivate = ilk_fbc_deactivate, 579 .is_active = ilk_fbc_is_active, 580 .is_compressing = ilk_fbc_is_compressing, 581 .nuke = i965_fbc_nuke, 582 .program_cfb = ilk_fbc_program_cfb, 583 }; 584 585 static void snb_fbc_program_fence(struct intel_fbc *fbc) 586 { 587 struct intel_display *display = fbc->display; 588 const struct intel_fbc_state *fbc_state = &fbc->state; 589 u32 ctl = 0; 590 591 if (fbc_state->fence_id >= 0) 592 ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(fbc_state->fence_id); 593 594 intel_de_write(display, SNB_DPFC_CTL_SA, ctl); 595 intel_de_write(display, SNB_DPFC_CPU_FENCE_OFFSET, fbc_state->fence_y_offset); 596 } 597 598 static void snb_fbc_activate(struct intel_fbc *fbc) 599 { 600 snb_fbc_program_fence(fbc); 601 602 ilk_fbc_activate(fbc); 603 } 604 605 static void snb_fbc_nuke(struct intel_fbc *fbc) 606 { 607 struct intel_display *display = fbc->display; 608 609 intel_de_write(display, MSG_FBC_REND_STATE(fbc->id), FBC_REND_NUKE); 610 intel_de_posting_read(display, MSG_FBC_REND_STATE(fbc->id)); 611 } 612 613 static const struct intel_fbc_funcs snb_fbc_funcs = { 614 .activate = snb_fbc_activate, 615 .deactivate = ilk_fbc_deactivate, 616 .is_active = ilk_fbc_is_active, 617 .is_compressing = ilk_fbc_is_compressing, 618 .nuke = snb_fbc_nuke, 619 .program_cfb = ilk_fbc_program_cfb, 620 }; 621 622 static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc) 623 { 624 struct intel_display *display = fbc->display; 625 const struct intel_fbc_state *fbc_state = &fbc->state; 626 u32 val = 0; 627 628 if (fbc_state->override_cfb_stride) 629 val |= FBC_STRIDE_OVERRIDE | 630 FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit); 631 632 intel_de_write(display, GLK_FBC_STRIDE(fbc->id), val); 633 } 634 635 static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc) 636 { 637 struct intel_display *display = fbc->display; 638 const struct intel_fbc_state *fbc_state = &fbc->state; 639 u32 val = 0; 640 641 /* Display WA #0529: skl, kbl, bxt. */ 642 if (fbc_state->override_cfb_stride) 643 val |= CHICKEN_FBC_STRIDE_OVERRIDE | 644 CHICKEN_FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit); 645 646 intel_de_rmw(display, CHICKEN_MISC_4, 647 CHICKEN_FBC_STRIDE_OVERRIDE | 648 CHICKEN_FBC_STRIDE_MASK, val); 649 } 650 651 static u32 ivb_dpfc_ctl(struct intel_fbc *fbc) 652 { 653 struct intel_display *display = fbc->display; 654 const struct intel_fbc_state *fbc_state = &fbc->state; 655 u32 dpfc_ctl; 656 657 dpfc_ctl = g4x_dpfc_ctl_limit(fbc); 658 659 if (display->platform.ivybridge) 660 dpfc_ctl |= DPFC_CTL_PLANE_IVB(fbc_state->plane->i9xx_plane); 661 662 if (DISPLAY_VER(display) >= 20) 663 dpfc_ctl |= DPFC_CTL_PLANE_BINDING(fbc_state->plane->id); 664 665 if (fbc_state->fence_id >= 0) 666 dpfc_ctl |= DPFC_CTL_FENCE_EN_IVB; 667 668 if (fbc->false_color) 669 dpfc_ctl |= DPFC_CTL_FALSE_COLOR; 670 671 return dpfc_ctl; 672 } 673 674 static void ivb_fbc_activate(struct intel_fbc *fbc) 675 { 676 struct intel_display *display = fbc->display; 677 u32 dpfc_ctl; 678 679 if (DISPLAY_VER(display) >= 10) 680 glk_fbc_program_cfb_stride(fbc); 681 else if (DISPLAY_VER(display) == 9) 682 skl_fbc_program_cfb_stride(fbc); 683 684 if (intel_fbc_has_fences(display)) 685 snb_fbc_program_fence(fbc); 686 687 /* wa_14019417088 Alternative WA*/ 688 dpfc_ctl = ivb_dpfc_ctl(fbc); 689 if (DISPLAY_VER(display) >= 20) 690 intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl); 691 692 if (HAS_FBC_DIRTY_RECT(display)) 693 intel_de_write(display, XE3_FBC_DIRTY_CTL(fbc->id), 694 FBC_DIRTY_RECT_EN); 695 696 intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), 697 DPFC_CTL_EN | dpfc_ctl); 698 } 699 700 static bool ivb_fbc_is_compressing(struct intel_fbc *fbc) 701 { 702 return intel_de_read(fbc->display, ILK_DPFC_STATUS2(fbc->id)) & DPFC_COMP_SEG_MASK_IVB; 703 } 704 705 static void ivb_fbc_set_false_color(struct intel_fbc *fbc, 706 bool enable) 707 { 708 intel_de_rmw(fbc->display, ILK_DPFC_CONTROL(fbc->id), 709 DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0); 710 } 711 712 static const struct intel_fbc_funcs ivb_fbc_funcs = { 713 .activate = ivb_fbc_activate, 714 .deactivate = ilk_fbc_deactivate, 715 .is_active = ilk_fbc_is_active, 716 .is_compressing = ivb_fbc_is_compressing, 717 .nuke = snb_fbc_nuke, 718 .program_cfb = ilk_fbc_program_cfb, 719 .set_false_color = ivb_fbc_set_false_color, 720 }; 721 722 static bool intel_fbc_hw_is_active(struct intel_fbc *fbc) 723 { 724 return fbc->funcs->is_active(fbc); 725 } 726 727 static void intel_fbc_hw_activate(struct intel_fbc *fbc) 728 { 729 trace_intel_fbc_activate(fbc->state.plane); 730 731 fbc->active = true; 732 fbc->activated = true; 733 734 fbc->funcs->activate(fbc); 735 } 736 737 static void intel_fbc_hw_deactivate(struct intel_fbc *fbc) 738 { 739 trace_intel_fbc_deactivate(fbc->state.plane); 740 741 fbc->active = false; 742 743 fbc->funcs->deactivate(fbc); 744 } 745 746 static bool intel_fbc_is_compressing(struct intel_fbc *fbc) 747 { 748 return fbc->funcs->is_compressing(fbc); 749 } 750 751 static void intel_fbc_nuke(struct intel_fbc *fbc) 752 { 753 struct intel_display *display = fbc->display; 754 755 lockdep_assert_held(&fbc->lock); 756 drm_WARN_ON(display->drm, fbc->flip_pending); 757 758 trace_intel_fbc_nuke(fbc->state.plane); 759 760 fbc->funcs->nuke(fbc); 761 } 762 763 static void intel_fbc_activate(struct intel_fbc *fbc) 764 { 765 struct intel_display *display = fbc->display; 766 767 lockdep_assert_held(&fbc->lock); 768 769 /* only the fence can change for a flip nuke */ 770 if (fbc->active && !intel_fbc_has_fences(display)) 771 return; 772 /* 773 * In case of FBC dirt rect, any updates to the FBC registers will 774 * trigger the nuke. 775 */ 776 drm_WARN_ON(display->drm, fbc->active && HAS_FBC_DIRTY_RECT(display)); 777 778 intel_fbc_hw_activate(fbc); 779 intel_fbc_nuke(fbc); 780 781 fbc->no_fbc_reason = NULL; 782 } 783 784 static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason) 785 { 786 lockdep_assert_held(&fbc->lock); 787 788 if (fbc->active) 789 intel_fbc_hw_deactivate(fbc); 790 791 fbc->no_fbc_reason = reason; 792 } 793 794 static u64 intel_fbc_cfb_base_max(struct intel_display *display) 795 { 796 if (DISPLAY_VER(display) >= 5 || display->platform.g4x) 797 return BIT_ULL(28); 798 else 799 return BIT_ULL(32); 800 } 801 802 static u64 intel_fbc_stolen_end(struct intel_display *display) 803 { 804 struct drm_i915_private __maybe_unused *i915 = to_i915(display->drm); 805 u64 end; 806 807 /* The FBC hardware for BDW/SKL doesn't have access to the stolen 808 * reserved range size, so it always assumes the maximum (8mb) is used. 809 * If we enable FBC using a CFB on that memory range we'll get FIFO 810 * underruns, even if that range is not reserved by the BIOS. */ 811 if (display->platform.broadwell || 812 (DISPLAY_VER(display) == 9 && !display->platform.broxton)) 813 end = i915_gem_stolen_area_size(i915) - 8 * 1024 * 1024; 814 else 815 end = U64_MAX; 816 817 return min(end, intel_fbc_cfb_base_max(display)); 818 } 819 820 static int intel_fbc_min_limit(const struct intel_plane_state *plane_state) 821 { 822 return plane_state->hw.fb->format->cpp[0] == 2 ? 2 : 1; 823 } 824 825 static int intel_fbc_max_limit(struct intel_display *display) 826 { 827 /* WaFbcOnly1to1Ratio:ctg */ 828 if (display->platform.g4x) 829 return 1; 830 831 /* 832 * FBC2 can only do 1:1, 1:2, 1:4, we limit 833 * FBC1 to the same out of convenience. 834 */ 835 return 4; 836 } 837 838 static int find_compression_limit(struct intel_fbc *fbc, 839 unsigned int size, int min_limit) 840 { 841 struct intel_display *display = fbc->display; 842 struct drm_i915_private *i915 = to_i915(display->drm); 843 u64 end = intel_fbc_stolen_end(display); 844 int ret, limit = min_limit; 845 846 size /= limit; 847 848 /* Try to over-allocate to reduce reallocations and fragmentation. */ 849 ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb, 850 size <<= 1, 4096, 0, end); 851 if (ret == 0) 852 return limit; 853 854 for (; limit <= intel_fbc_max_limit(display); limit <<= 1) { 855 ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb, 856 size >>= 1, 4096, 0, end); 857 if (ret == 0) 858 return limit; 859 } 860 861 return 0; 862 } 863 864 static int intel_fbc_alloc_cfb(struct intel_fbc *fbc, 865 unsigned int size, int min_limit) 866 { 867 struct intel_display *display = fbc->display; 868 struct drm_i915_private *i915 = to_i915(display->drm); 869 int ret; 870 871 drm_WARN_ON(display->drm, 872 i915_gem_stolen_node_allocated(&fbc->compressed_fb)); 873 drm_WARN_ON(display->drm, 874 i915_gem_stolen_node_allocated(&fbc->compressed_llb)); 875 876 if (DISPLAY_VER(display) < 5 && !display->platform.g4x) { 877 ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb, 878 4096, 4096); 879 if (ret) 880 goto err; 881 } 882 883 ret = find_compression_limit(fbc, size, min_limit); 884 if (!ret) 885 goto err_llb; 886 else if (ret > min_limit) 887 drm_info_once(display->drm, 888 "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); 889 890 fbc->limit = ret; 891 892 drm_dbg_kms(display->drm, 893 "reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n", 894 i915_gem_stolen_node_size(&fbc->compressed_fb), fbc->limit); 895 return 0; 896 897 err_llb: 898 if (i915_gem_stolen_node_allocated(&fbc->compressed_llb)) 899 i915_gem_stolen_remove_node(i915, &fbc->compressed_llb); 900 err: 901 if (i915_gem_stolen_initialized(i915)) 902 drm_info_once(display->drm, 903 "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); 904 return -ENOSPC; 905 } 906 907 static void intel_fbc_program_cfb(struct intel_fbc *fbc) 908 { 909 fbc->funcs->program_cfb(fbc); 910 } 911 912 static void intel_fbc_program_workarounds(struct intel_fbc *fbc) 913 { 914 struct intel_display *display = fbc->display; 915 916 if (display->platform.skylake || display->platform.broxton) { 917 /* 918 * WaFbcHighMemBwCorruptionAvoidance:skl,bxt 919 * Display WA #0883: skl,bxt 920 */ 921 intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id), 922 0, DPFC_DISABLE_DUMMY0); 923 } 924 925 if (display->platform.skylake || display->platform.kabylake || 926 display->platform.coffeelake || display->platform.cometlake) { 927 /* 928 * WaFbcNukeOnHostModify:skl,kbl,cfl 929 * Display WA #0873: skl,kbl,cfl 930 */ 931 intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id), 932 0, DPFC_NUKE_ON_ANY_MODIFICATION); 933 } 934 935 /* Wa_1409120013:icl,jsl,tgl,dg1 */ 936 if (IS_DISPLAY_VER(display, 11, 12)) 937 intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id), 938 0, DPFC_CHICKEN_COMP_DUMMY_PIXEL); 939 940 /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp,mtl */ 941 if (DISPLAY_VER(display) >= 11 && !display->platform.dg2) 942 intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id), 943 0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION); 944 945 /* wa_18038517565 Disable DPFC clock gating before FBC enable */ 946 if (display->platform.dg2 || DISPLAY_VER(display) >= 14) 947 fbc_compressor_clkgate_disable_wa(fbc, true); 948 } 949 950 static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc) 951 { 952 struct intel_display *display = fbc->display; 953 struct drm_i915_private *i915 = to_i915(display->drm); 954 955 if (WARN_ON(intel_fbc_hw_is_active(fbc))) 956 return; 957 958 if (i915_gem_stolen_node_allocated(&fbc->compressed_llb)) 959 i915_gem_stolen_remove_node(i915, &fbc->compressed_llb); 960 if (i915_gem_stolen_node_allocated(&fbc->compressed_fb)) 961 i915_gem_stolen_remove_node(i915, &fbc->compressed_fb); 962 } 963 964 void intel_fbc_cleanup(struct intel_display *display) 965 { 966 struct intel_fbc *fbc; 967 enum intel_fbc_id fbc_id; 968 969 for_each_intel_fbc(display, fbc, fbc_id) { 970 mutex_lock(&fbc->lock); 971 __intel_fbc_cleanup_cfb(fbc); 972 mutex_unlock(&fbc->lock); 973 974 kfree(fbc); 975 } 976 } 977 978 static bool i8xx_fbc_stride_is_valid(const struct intel_plane_state *plane_state) 979 { 980 const struct drm_framebuffer *fb = plane_state->hw.fb; 981 unsigned int stride = intel_fbc_plane_stride(plane_state) * 982 fb->format->cpp[0]; 983 984 return stride == 4096 || stride == 8192; 985 } 986 987 static bool i965_fbc_stride_is_valid(const struct intel_plane_state *plane_state) 988 { 989 const struct drm_framebuffer *fb = plane_state->hw.fb; 990 unsigned int stride = intel_fbc_plane_stride(plane_state) * 991 fb->format->cpp[0]; 992 993 return stride >= 2048 && stride <= 16384; 994 } 995 996 static bool g4x_fbc_stride_is_valid(const struct intel_plane_state *plane_state) 997 { 998 return true; 999 } 1000 1001 static bool skl_fbc_stride_is_valid(const struct intel_plane_state *plane_state) 1002 { 1003 const struct drm_framebuffer *fb = plane_state->hw.fb; 1004 unsigned int stride = intel_fbc_plane_stride(plane_state) * 1005 fb->format->cpp[0]; 1006 1007 /* Display WA #1105: skl,bxt,kbl,cfl,glk */ 1008 if (fb->modifier == DRM_FORMAT_MOD_LINEAR && stride & 511) 1009 return false; 1010 1011 return true; 1012 } 1013 1014 static bool icl_fbc_stride_is_valid(const struct intel_plane_state *plane_state) 1015 { 1016 return true; 1017 } 1018 1019 static bool stride_is_valid(const struct intel_plane_state *plane_state) 1020 { 1021 struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); 1022 1023 if (DISPLAY_VER(display) >= 11) 1024 return icl_fbc_stride_is_valid(plane_state); 1025 else if (DISPLAY_VER(display) >= 9) 1026 return skl_fbc_stride_is_valid(plane_state); 1027 else if (DISPLAY_VER(display) >= 5 || display->platform.g4x) 1028 return g4x_fbc_stride_is_valid(plane_state); 1029 else if (DISPLAY_VER(display) == 4) 1030 return i965_fbc_stride_is_valid(plane_state); 1031 else 1032 return i8xx_fbc_stride_is_valid(plane_state); 1033 } 1034 1035 static bool i8xx_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state) 1036 { 1037 struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); 1038 const struct drm_framebuffer *fb = plane_state->hw.fb; 1039 1040 switch (fb->format->format) { 1041 case DRM_FORMAT_XRGB8888: 1042 case DRM_FORMAT_XBGR8888: 1043 return true; 1044 case DRM_FORMAT_XRGB1555: 1045 case DRM_FORMAT_RGB565: 1046 /* 16bpp not supported on gen2 */ 1047 if (DISPLAY_VER(display) == 2) 1048 return false; 1049 return true; 1050 default: 1051 return false; 1052 } 1053 } 1054 1055 static bool g4x_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state) 1056 { 1057 struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); 1058 const struct drm_framebuffer *fb = plane_state->hw.fb; 1059 1060 switch (fb->format->format) { 1061 case DRM_FORMAT_XRGB8888: 1062 case DRM_FORMAT_XBGR8888: 1063 return true; 1064 case DRM_FORMAT_RGB565: 1065 /* WaFbcOnly1to1Ratio:ctg */ 1066 if (display->platform.g4x) 1067 return false; 1068 return true; 1069 default: 1070 return false; 1071 } 1072 } 1073 1074 static bool lnl_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state) 1075 { 1076 const struct drm_framebuffer *fb = plane_state->hw.fb; 1077 1078 switch (fb->format->format) { 1079 case DRM_FORMAT_XRGB8888: 1080 case DRM_FORMAT_XBGR8888: 1081 case DRM_FORMAT_ARGB8888: 1082 case DRM_FORMAT_ABGR8888: 1083 case DRM_FORMAT_RGB565: 1084 return true; 1085 default: 1086 return false; 1087 } 1088 } 1089 1090 static bool pixel_format_is_valid(const struct intel_plane_state *plane_state) 1091 { 1092 struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); 1093 1094 if (DISPLAY_VER(display) >= 20) 1095 return lnl_fbc_pixel_format_is_valid(plane_state); 1096 else if (DISPLAY_VER(display) >= 5 || display->platform.g4x) 1097 return g4x_fbc_pixel_format_is_valid(plane_state); 1098 else 1099 return i8xx_fbc_pixel_format_is_valid(plane_state); 1100 } 1101 1102 static bool i8xx_fbc_rotation_is_valid(const struct intel_plane_state *plane_state) 1103 { 1104 return plane_state->hw.rotation == DRM_MODE_ROTATE_0; 1105 } 1106 1107 static bool g4x_fbc_rotation_is_valid(const struct intel_plane_state *plane_state) 1108 { 1109 return true; 1110 } 1111 1112 static bool skl_fbc_rotation_is_valid(const struct intel_plane_state *plane_state) 1113 { 1114 const struct drm_framebuffer *fb = plane_state->hw.fb; 1115 unsigned int rotation = plane_state->hw.rotation; 1116 1117 if (fb->format->format == DRM_FORMAT_RGB565 && 1118 drm_rotation_90_or_270(rotation)) 1119 return false; 1120 1121 return true; 1122 } 1123 1124 static bool rotation_is_valid(const struct intel_plane_state *plane_state) 1125 { 1126 struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); 1127 1128 if (DISPLAY_VER(display) >= 9) 1129 return skl_fbc_rotation_is_valid(plane_state); 1130 else if (DISPLAY_VER(display) >= 5 || display->platform.g4x) 1131 return g4x_fbc_rotation_is_valid(plane_state); 1132 else 1133 return i8xx_fbc_rotation_is_valid(plane_state); 1134 } 1135 1136 static void intel_fbc_max_surface_size(struct intel_display *display, 1137 unsigned int *w, unsigned int *h) 1138 { 1139 if (DISPLAY_VER(display) >= 11) { 1140 *w = 8192; 1141 *h = 4096; 1142 } else if (DISPLAY_VER(display) >= 10) { 1143 *w = 5120; 1144 *h = 4096; 1145 } else if (DISPLAY_VER(display) >= 7) { 1146 *w = 4096; 1147 *h = 4096; 1148 } else if (DISPLAY_VER(display) >= 5 || display->platform.g4x) { 1149 *w = 4096; 1150 *h = 2048; 1151 } else { 1152 *w = 2048; 1153 *h = 1536; 1154 } 1155 } 1156 1157 /* 1158 * For some reason, the hardware tracking starts looking at whatever we 1159 * programmed as the display plane base address register. It does not look at 1160 * the X and Y offset registers. That's why we include the src x/y offsets 1161 * instead of just looking at the plane size. 1162 */ 1163 static bool intel_fbc_surface_size_ok(const struct intel_plane_state *plane_state) 1164 { 1165 struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); 1166 unsigned int effective_w, effective_h, max_w, max_h; 1167 1168 intel_fbc_max_surface_size(display, &max_w, &max_h); 1169 1170 effective_w = plane_state->view.color_plane[0].x + 1171 (drm_rect_width(&plane_state->uapi.src) >> 16); 1172 effective_h = plane_state->view.color_plane[0].y + 1173 (drm_rect_height(&plane_state->uapi.src) >> 16); 1174 1175 return effective_w <= max_w && effective_h <= max_h; 1176 } 1177 1178 static void intel_fbc_max_plane_size(struct intel_display *display, 1179 unsigned int *w, unsigned int *h) 1180 { 1181 if (DISPLAY_VER(display) >= 10) { 1182 *w = 5120; 1183 *h = 4096; 1184 } else if (DISPLAY_VER(display) >= 8 || display->platform.haswell) { 1185 *w = 4096; 1186 *h = 4096; 1187 } else if (DISPLAY_VER(display) >= 5 || display->platform.g4x) { 1188 *w = 4096; 1189 *h = 2048; 1190 } else { 1191 *w = 2048; 1192 *h = 1536; 1193 } 1194 } 1195 1196 static bool intel_fbc_plane_size_valid(const struct intel_plane_state *plane_state) 1197 { 1198 struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); 1199 unsigned int w, h, max_w, max_h; 1200 1201 intel_fbc_max_plane_size(display, &max_w, &max_h); 1202 1203 w = drm_rect_width(&plane_state->uapi.src) >> 16; 1204 h = drm_rect_height(&plane_state->uapi.src) >> 16; 1205 1206 return w <= max_w && h <= max_h; 1207 } 1208 1209 static bool i8xx_fbc_tiling_valid(const struct intel_plane_state *plane_state) 1210 { 1211 const struct drm_framebuffer *fb = plane_state->hw.fb; 1212 1213 return fb->modifier == I915_FORMAT_MOD_X_TILED; 1214 } 1215 1216 static bool skl_fbc_tiling_valid(const struct intel_plane_state *plane_state) 1217 { 1218 return true; 1219 } 1220 1221 static bool tiling_is_valid(const struct intel_plane_state *plane_state) 1222 { 1223 struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); 1224 1225 if (DISPLAY_VER(display) >= 9) 1226 return skl_fbc_tiling_valid(plane_state); 1227 else 1228 return i8xx_fbc_tiling_valid(plane_state); 1229 } 1230 1231 static void 1232 intel_fbc_invalidate_dirty_rect(struct intel_fbc *fbc) 1233 { 1234 lockdep_assert_held(&fbc->lock); 1235 1236 fbc->state.dirty_rect = DRM_RECT_INIT(0, 0, 0, 0); 1237 } 1238 1239 static void 1240 intel_fbc_program_dirty_rect(struct intel_dsb *dsb, struct intel_fbc *fbc, 1241 const struct drm_rect *fbc_dirty_rect) 1242 { 1243 struct intel_display *display = fbc->display; 1244 1245 drm_WARN_ON(display->drm, fbc_dirty_rect->y2 == 0); 1246 1247 intel_de_write_dsb(display, dsb, XE3_FBC_DIRTY_RECT(fbc->id), 1248 FBC_DIRTY_RECT_START_LINE(fbc_dirty_rect->y1) | 1249 FBC_DIRTY_RECT_END_LINE(fbc_dirty_rect->y2 - 1)); 1250 } 1251 1252 static void 1253 intel_fbc_dirty_rect_update(struct intel_dsb *dsb, struct intel_fbc *fbc) 1254 { 1255 const struct drm_rect *fbc_dirty_rect = &fbc->state.dirty_rect; 1256 1257 lockdep_assert_held(&fbc->lock); 1258 1259 if (!drm_rect_visible(fbc_dirty_rect)) 1260 return; 1261 1262 intel_fbc_program_dirty_rect(dsb, fbc, fbc_dirty_rect); 1263 } 1264 1265 void 1266 intel_fbc_dirty_rect_update_noarm(struct intel_dsb *dsb, 1267 struct intel_plane *plane) 1268 { 1269 struct intel_display *display = to_intel_display(plane); 1270 struct intel_fbc *fbc = plane->fbc; 1271 1272 if (!HAS_FBC_DIRTY_RECT(display)) 1273 return; 1274 1275 mutex_lock(&fbc->lock); 1276 1277 if (fbc->state.plane == plane) 1278 intel_fbc_dirty_rect_update(dsb, fbc); 1279 1280 mutex_unlock(&fbc->lock); 1281 } 1282 1283 static void 1284 intel_fbc_hw_intialize_dirty_rect(struct intel_fbc *fbc, 1285 const struct intel_plane_state *plane_state) 1286 { 1287 struct drm_rect src; 1288 1289 /* 1290 * Initializing the FBC HW with the whole plane area as the dirty rect. 1291 * This is to ensure that we have valid coords be written to the 1292 * HW as dirty rect. 1293 */ 1294 drm_rect_fp_to_int(&src, &plane_state->uapi.src); 1295 1296 intel_fbc_program_dirty_rect(NULL, fbc, &src); 1297 } 1298 1299 static void intel_fbc_update_state(struct intel_atomic_state *state, 1300 struct intel_crtc *crtc, 1301 struct intel_plane *plane) 1302 { 1303 struct intel_display *display = to_intel_display(state->base.dev); 1304 const struct intel_crtc_state *crtc_state = 1305 intel_atomic_get_new_crtc_state(state, crtc); 1306 const struct intel_plane_state *plane_state = 1307 intel_atomic_get_new_plane_state(state, plane); 1308 struct intel_fbc *fbc = plane->fbc; 1309 struct intel_fbc_state *fbc_state = &fbc->state; 1310 1311 WARN_ON(plane_state->no_fbc_reason); 1312 WARN_ON(fbc_state->plane && fbc_state->plane != plane); 1313 1314 fbc_state->plane = plane; 1315 1316 /* FBC1 compression interval: arbitrary choice of 1 second */ 1317 fbc_state->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode); 1318 1319 fbc_state->fence_y_offset = intel_plane_fence_y_offset(plane_state); 1320 1321 drm_WARN_ON(display->drm, plane_state->flags & PLANE_HAS_FENCE && 1322 !intel_fbc_has_fences(display)); 1323 1324 if (plane_state->flags & PLANE_HAS_FENCE) 1325 fbc_state->fence_id = i915_vma_fence_id(plane_state->ggtt_vma); 1326 else 1327 fbc_state->fence_id = -1; 1328 1329 fbc_state->cfb_stride = intel_fbc_cfb_stride(plane_state); 1330 fbc_state->cfb_size = intel_fbc_cfb_size(plane_state); 1331 fbc_state->override_cfb_stride = intel_fbc_override_cfb_stride(plane_state); 1332 } 1333 1334 static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state) 1335 { 1336 struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev); 1337 1338 /* 1339 * The use of a CPU fence is one of two ways to detect writes by the 1340 * CPU to the scanout and trigger updates to the FBC. 1341 * 1342 * The other method is by software tracking (see 1343 * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke 1344 * the current compressed buffer and recompress it. 1345 * 1346 * Note that is possible for a tiled surface to be unmappable (and 1347 * so have no fence associated with it) due to aperture constraints 1348 * at the time of pinning. 1349 */ 1350 return DISPLAY_VER(display) >= 9 || 1351 (plane_state->flags & PLANE_HAS_FENCE && 1352 i915_vma_fence_id(plane_state->ggtt_vma) != -1); 1353 } 1354 1355 static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state) 1356 { 1357 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 1358 struct intel_fbc *fbc = plane->fbc; 1359 1360 return intel_fbc_min_limit(plane_state) <= fbc->limit && 1361 intel_fbc_cfb_size(plane_state) <= fbc->limit * 1362 i915_gem_stolen_node_size(&fbc->compressed_fb); 1363 } 1364 1365 static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state) 1366 { 1367 return !plane_state->no_fbc_reason && 1368 intel_fbc_is_fence_ok(plane_state) && 1369 intel_fbc_is_cfb_ok(plane_state); 1370 } 1371 1372 static void 1373 __intel_fbc_prepare_dirty_rect(const struct intel_plane_state *plane_state, 1374 const struct intel_crtc_state *crtc_state) 1375 { 1376 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 1377 struct intel_fbc *fbc = plane->fbc; 1378 struct drm_rect *fbc_dirty_rect = &fbc->state.dirty_rect; 1379 int width = drm_rect_width(&plane_state->uapi.src) >> 16; 1380 const struct drm_rect *damage = &plane_state->damage; 1381 int y_offset = plane_state->view.color_plane[0].y; 1382 1383 lockdep_assert_held(&fbc->lock); 1384 1385 if (intel_crtc_needs_modeset(crtc_state) || 1386 !intel_fbc_is_ok(plane_state)) { 1387 intel_fbc_invalidate_dirty_rect(fbc); 1388 return; 1389 } 1390 1391 if (drm_rect_visible(damage)) 1392 *fbc_dirty_rect = *damage; 1393 else 1394 /* dirty rect must cover at least one line */ 1395 *fbc_dirty_rect = DRM_RECT_INIT(0, y_offset, width, 1); 1396 } 1397 1398 void 1399 intel_fbc_prepare_dirty_rect(struct intel_atomic_state *state, 1400 struct intel_crtc *crtc) 1401 { 1402 struct intel_display *display = to_intel_display(state); 1403 const struct intel_crtc_state *crtc_state = 1404 intel_atomic_get_new_crtc_state(state, crtc); 1405 struct intel_plane_state *plane_state; 1406 struct intel_plane *plane; 1407 int i; 1408 1409 if (!HAS_FBC_DIRTY_RECT(display)) 1410 return; 1411 1412 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1413 struct intel_fbc *fbc = plane->fbc; 1414 1415 if (!fbc || plane->pipe != crtc->pipe) 1416 continue; 1417 1418 mutex_lock(&fbc->lock); 1419 1420 if (fbc->state.plane == plane) 1421 __intel_fbc_prepare_dirty_rect(plane_state, 1422 crtc_state); 1423 1424 mutex_unlock(&fbc->lock); 1425 } 1426 } 1427 1428 static int intel_fbc_check_plane(struct intel_atomic_state *state, 1429 struct intel_plane *plane) 1430 { 1431 struct intel_display *display = to_intel_display(state->base.dev); 1432 struct drm_i915_private *i915 = to_i915(display->drm); 1433 struct intel_plane_state *plane_state = 1434 intel_atomic_get_new_plane_state(state, plane); 1435 const struct drm_framebuffer *fb = plane_state->hw.fb; 1436 struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); 1437 const struct intel_crtc_state *crtc_state; 1438 struct intel_fbc *fbc = plane->fbc; 1439 1440 if (!fbc) 1441 return 0; 1442 1443 if (!i915_gem_stolen_initialized(i915)) { 1444 plane_state->no_fbc_reason = "stolen memory not initialised"; 1445 return 0; 1446 } 1447 1448 if (intel_vgpu_active(i915)) { 1449 plane_state->no_fbc_reason = "VGPU active"; 1450 return 0; 1451 } 1452 1453 if (!display->params.enable_fbc) { 1454 plane_state->no_fbc_reason = "disabled per module param or by default"; 1455 return 0; 1456 } 1457 1458 if (!plane_state->uapi.visible) { 1459 plane_state->no_fbc_reason = "plane not visible"; 1460 return 0; 1461 } 1462 1463 if (intel_display_needs_wa_16023588340(display)) { 1464 plane_state->no_fbc_reason = "Wa_16023588340"; 1465 return 0; 1466 } 1467 1468 /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ 1469 if (i915_vtd_active(i915) && (display->platform.skylake || display->platform.broxton)) { 1470 plane_state->no_fbc_reason = "VT-d enabled"; 1471 return 0; 1472 } 1473 1474 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 1475 1476 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 1477 plane_state->no_fbc_reason = "interlaced mode not supported"; 1478 return 0; 1479 } 1480 1481 if (crtc_state->double_wide) { 1482 plane_state->no_fbc_reason = "double wide pipe not supported"; 1483 return 0; 1484 } 1485 1486 /* 1487 * Display 12+ is not supporting FBC with PSR2. 1488 * Recommendation is to keep this combination disabled 1489 * Bspec: 50422 HSD: 14010260002 1490 * 1491 * TODO: Implement a logic to select between PSR2 selective fetch and 1492 * FBC based on Bspec: 68881 in xe2lpd onwards. 1493 * 1494 * As we still see some strange underruns in those platforms while 1495 * disabling PSR2, keep FBC disabled in case of selective update is on 1496 * until the selection logic is implemented. 1497 */ 1498 if (DISPLAY_VER(display) >= 12 && crtc_state->has_sel_update) { 1499 plane_state->no_fbc_reason = "Selective update enabled"; 1500 return 0; 1501 } 1502 1503 /* Wa_14016291713 */ 1504 if ((IS_DISPLAY_VER(display, 12, 13) || 1505 IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_C0)) && 1506 crtc_state->has_psr && !crtc_state->has_panel_replay) { 1507 plane_state->no_fbc_reason = "PSR1 enabled (Wa_14016291713)"; 1508 return 0; 1509 } 1510 1511 if (!pixel_format_is_valid(plane_state)) { 1512 plane_state->no_fbc_reason = "pixel format not supported"; 1513 return 0; 1514 } 1515 1516 if (!tiling_is_valid(plane_state)) { 1517 plane_state->no_fbc_reason = "tiling not supported"; 1518 return 0; 1519 } 1520 1521 if (!rotation_is_valid(plane_state)) { 1522 plane_state->no_fbc_reason = "rotation not supported"; 1523 return 0; 1524 } 1525 1526 if (!stride_is_valid(plane_state)) { 1527 plane_state->no_fbc_reason = "stride not supported"; 1528 return 0; 1529 } 1530 1531 if (DISPLAY_VER(display) < 20 && 1532 plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && 1533 fb->format->has_alpha) { 1534 plane_state->no_fbc_reason = "per-pixel alpha not supported"; 1535 return 0; 1536 } 1537 1538 if (!intel_fbc_plane_size_valid(plane_state)) { 1539 plane_state->no_fbc_reason = "plane size too big"; 1540 return 0; 1541 } 1542 1543 if (!intel_fbc_surface_size_ok(plane_state)) { 1544 plane_state->no_fbc_reason = "surface size too big"; 1545 return 0; 1546 } 1547 1548 /* 1549 * Work around a problem on GEN9+ HW, where enabling FBC on a plane 1550 * having a Y offset that isn't divisible by 4 causes FIFO underrun 1551 * and screen flicker. 1552 */ 1553 if (DISPLAY_VER(display) >= 9 && 1554 plane_state->view.color_plane[0].y & 3) { 1555 plane_state->no_fbc_reason = "plane start Y offset misaligned"; 1556 return 0; 1557 } 1558 1559 /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */ 1560 if (DISPLAY_VER(display) >= 11 && 1561 (plane_state->view.color_plane[0].y + 1562 (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) { 1563 plane_state->no_fbc_reason = "plane end Y offset misaligned"; 1564 return 0; 1565 } 1566 1567 /* WaFbcExceedCdClockThreshold:hsw,bdw */ 1568 if (display->platform.haswell || display->platform.broadwell) { 1569 const struct intel_cdclk_state *cdclk_state; 1570 1571 cdclk_state = intel_atomic_get_cdclk_state(state); 1572 if (IS_ERR(cdclk_state)) 1573 return PTR_ERR(cdclk_state); 1574 1575 if (crtc_state->pixel_rate >= intel_cdclk_logical(cdclk_state) * 95 / 100) { 1576 plane_state->no_fbc_reason = "pixel rate too high"; 1577 return 0; 1578 } 1579 } 1580 1581 plane_state->no_fbc_reason = NULL; 1582 1583 return 0; 1584 } 1585 1586 1587 static bool intel_fbc_can_flip_nuke(struct intel_atomic_state *state, 1588 struct intel_crtc *crtc, 1589 struct intel_plane *plane) 1590 { 1591 const struct intel_crtc_state *new_crtc_state = 1592 intel_atomic_get_new_crtc_state(state, crtc); 1593 const struct intel_plane_state *old_plane_state = 1594 intel_atomic_get_old_plane_state(state, plane); 1595 const struct intel_plane_state *new_plane_state = 1596 intel_atomic_get_new_plane_state(state, plane); 1597 const struct drm_framebuffer *old_fb = old_plane_state->hw.fb; 1598 const struct drm_framebuffer *new_fb = new_plane_state->hw.fb; 1599 1600 if (intel_crtc_needs_modeset(new_crtc_state)) 1601 return false; 1602 1603 if (!intel_fbc_is_ok(old_plane_state) || 1604 !intel_fbc_is_ok(new_plane_state)) 1605 return false; 1606 1607 if (old_fb->format->format != new_fb->format->format) 1608 return false; 1609 1610 if (old_fb->modifier != new_fb->modifier) 1611 return false; 1612 1613 if (intel_fbc_plane_stride(old_plane_state) != 1614 intel_fbc_plane_stride(new_plane_state)) 1615 return false; 1616 1617 if (intel_fbc_cfb_stride(old_plane_state) != 1618 intel_fbc_cfb_stride(new_plane_state)) 1619 return false; 1620 1621 if (intel_fbc_cfb_size(old_plane_state) != 1622 intel_fbc_cfb_size(new_plane_state)) 1623 return false; 1624 1625 if (intel_fbc_override_cfb_stride(old_plane_state) != 1626 intel_fbc_override_cfb_stride(new_plane_state)) 1627 return false; 1628 1629 return true; 1630 } 1631 1632 static bool __intel_fbc_pre_update(struct intel_atomic_state *state, 1633 struct intel_crtc *crtc, 1634 struct intel_plane *plane) 1635 { 1636 struct intel_display *display = to_intel_display(state->base.dev); 1637 struct intel_fbc *fbc = plane->fbc; 1638 bool need_vblank_wait = false; 1639 1640 lockdep_assert_held(&fbc->lock); 1641 1642 fbc->flip_pending = true; 1643 1644 if (intel_fbc_can_flip_nuke(state, crtc, plane)) 1645 return need_vblank_wait; 1646 1647 intel_fbc_deactivate(fbc, "update pending"); 1648 1649 /* 1650 * Display WA #1198: glk+ 1651 * Need an extra vblank wait between FBC disable and most plane 1652 * updates. Bspec says this is only needed for plane disable, but 1653 * that is not true. Touching most plane registers will cause the 1654 * corruption to appear. Also SKL/derivatives do not seem to be 1655 * affected. 1656 * 1657 * TODO: could optimize this a bit by sampling the frame 1658 * counter when we disable FBC (if it was already done earlier) 1659 * and skipping the extra vblank wait before the plane update 1660 * if at least one frame has already passed. 1661 */ 1662 if (fbc->activated && DISPLAY_VER(display) >= 10) 1663 need_vblank_wait = true; 1664 fbc->activated = false; 1665 1666 return need_vblank_wait; 1667 } 1668 1669 bool intel_fbc_pre_update(struct intel_atomic_state *state, 1670 struct intel_crtc *crtc) 1671 { 1672 const struct intel_plane_state __maybe_unused *plane_state; 1673 bool need_vblank_wait = false; 1674 struct intel_plane *plane; 1675 int i; 1676 1677 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1678 struct intel_fbc *fbc = plane->fbc; 1679 1680 if (!fbc || plane->pipe != crtc->pipe) 1681 continue; 1682 1683 mutex_lock(&fbc->lock); 1684 1685 if (fbc->state.plane == plane) 1686 need_vblank_wait |= __intel_fbc_pre_update(state, crtc, plane); 1687 1688 mutex_unlock(&fbc->lock); 1689 } 1690 1691 return need_vblank_wait; 1692 } 1693 1694 static void __intel_fbc_disable(struct intel_fbc *fbc) 1695 { 1696 struct intel_display *display = fbc->display; 1697 struct intel_plane *plane = fbc->state.plane; 1698 1699 lockdep_assert_held(&fbc->lock); 1700 drm_WARN_ON(display->drm, fbc->active); 1701 1702 drm_dbg_kms(display->drm, "Disabling FBC on [PLANE:%d:%s]\n", 1703 plane->base.base.id, plane->base.name); 1704 1705 intel_fbc_invalidate_dirty_rect(fbc); 1706 1707 __intel_fbc_cleanup_cfb(fbc); 1708 1709 /* wa_18038517565 Enable DPFC clock gating after FBC disable */ 1710 if (display->platform.dg2 || DISPLAY_VER(display) >= 14) 1711 fbc_compressor_clkgate_disable_wa(fbc, false); 1712 1713 fbc->state.plane = NULL; 1714 fbc->flip_pending = false; 1715 fbc->busy_bits = 0; 1716 } 1717 1718 static void __intel_fbc_post_update(struct intel_fbc *fbc) 1719 { 1720 lockdep_assert_held(&fbc->lock); 1721 1722 fbc->flip_pending = false; 1723 fbc->busy_bits = 0; 1724 1725 intel_fbc_activate(fbc); 1726 } 1727 1728 void intel_fbc_post_update(struct intel_atomic_state *state, 1729 struct intel_crtc *crtc) 1730 { 1731 const struct intel_plane_state __maybe_unused *plane_state; 1732 struct intel_plane *plane; 1733 int i; 1734 1735 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1736 struct intel_fbc *fbc = plane->fbc; 1737 1738 if (!fbc || plane->pipe != crtc->pipe) 1739 continue; 1740 1741 mutex_lock(&fbc->lock); 1742 1743 if (fbc->state.plane == plane) 1744 __intel_fbc_post_update(fbc); 1745 1746 mutex_unlock(&fbc->lock); 1747 } 1748 } 1749 1750 static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) 1751 { 1752 if (fbc->state.plane) 1753 return fbc->state.plane->frontbuffer_bit; 1754 else 1755 return 0; 1756 } 1757 1758 static void __intel_fbc_invalidate(struct intel_fbc *fbc, 1759 unsigned int frontbuffer_bits, 1760 enum fb_op_origin origin) 1761 { 1762 if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE) 1763 return; 1764 1765 mutex_lock(&fbc->lock); 1766 1767 frontbuffer_bits &= intel_fbc_get_frontbuffer_bit(fbc); 1768 if (!frontbuffer_bits) 1769 goto out; 1770 1771 fbc->busy_bits |= frontbuffer_bits; 1772 intel_fbc_deactivate(fbc, "frontbuffer write"); 1773 1774 out: 1775 mutex_unlock(&fbc->lock); 1776 } 1777 1778 void intel_fbc_invalidate(struct intel_display *display, 1779 unsigned int frontbuffer_bits, 1780 enum fb_op_origin origin) 1781 { 1782 struct intel_fbc *fbc; 1783 enum intel_fbc_id fbc_id; 1784 1785 for_each_intel_fbc(display, fbc, fbc_id) 1786 __intel_fbc_invalidate(fbc, frontbuffer_bits, origin); 1787 1788 } 1789 1790 static void __intel_fbc_flush(struct intel_fbc *fbc, 1791 unsigned int frontbuffer_bits, 1792 enum fb_op_origin origin) 1793 { 1794 mutex_lock(&fbc->lock); 1795 1796 frontbuffer_bits &= intel_fbc_get_frontbuffer_bit(fbc); 1797 if (!frontbuffer_bits) 1798 goto out; 1799 1800 fbc->busy_bits &= ~frontbuffer_bits; 1801 1802 if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE) 1803 goto out; 1804 1805 if (fbc->busy_bits || fbc->flip_pending) 1806 goto out; 1807 1808 if (fbc->active) 1809 intel_fbc_nuke(fbc); 1810 else 1811 intel_fbc_activate(fbc); 1812 1813 out: 1814 mutex_unlock(&fbc->lock); 1815 } 1816 1817 void intel_fbc_flush(struct intel_display *display, 1818 unsigned int frontbuffer_bits, 1819 enum fb_op_origin origin) 1820 { 1821 struct intel_fbc *fbc; 1822 enum intel_fbc_id fbc_id; 1823 1824 for_each_intel_fbc(display, fbc, fbc_id) 1825 __intel_fbc_flush(fbc, frontbuffer_bits, origin); 1826 } 1827 1828 int intel_fbc_atomic_check(struct intel_atomic_state *state) 1829 { 1830 struct intel_plane_state __maybe_unused *plane_state; 1831 struct intel_plane *plane; 1832 int i; 1833 1834 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1835 int ret; 1836 1837 ret = intel_fbc_check_plane(state, plane); 1838 if (ret) 1839 return ret; 1840 } 1841 1842 return 0; 1843 } 1844 1845 static void __intel_fbc_enable(struct intel_atomic_state *state, 1846 struct intel_crtc *crtc, 1847 struct intel_plane *plane) 1848 { 1849 struct intel_display *display = to_intel_display(state->base.dev); 1850 const struct intel_plane_state *plane_state = 1851 intel_atomic_get_new_plane_state(state, plane); 1852 struct intel_fbc *fbc = plane->fbc; 1853 1854 lockdep_assert_held(&fbc->lock); 1855 1856 if (fbc->state.plane) { 1857 if (fbc->state.plane != plane) 1858 return; 1859 1860 if (intel_fbc_is_ok(plane_state)) { 1861 intel_fbc_update_state(state, crtc, plane); 1862 return; 1863 } 1864 1865 __intel_fbc_disable(fbc); 1866 } 1867 1868 drm_WARN_ON(display->drm, fbc->active); 1869 1870 fbc->no_fbc_reason = plane_state->no_fbc_reason; 1871 if (fbc->no_fbc_reason) 1872 return; 1873 1874 if (!intel_fbc_is_fence_ok(plane_state)) { 1875 fbc->no_fbc_reason = "framebuffer not fenced"; 1876 return; 1877 } 1878 1879 if (fbc->underrun_detected) { 1880 fbc->no_fbc_reason = "FIFO underrun"; 1881 return; 1882 } 1883 1884 if (intel_fbc_alloc_cfb(fbc, intel_fbc_cfb_size(plane_state), 1885 intel_fbc_min_limit(plane_state))) { 1886 fbc->no_fbc_reason = "not enough stolen memory"; 1887 return; 1888 } 1889 1890 drm_dbg_kms(display->drm, "Enabling FBC on [PLANE:%d:%s]\n", 1891 plane->base.base.id, plane->base.name); 1892 fbc->no_fbc_reason = "FBC enabled but not active yet\n"; 1893 1894 intel_fbc_update_state(state, crtc, plane); 1895 1896 if (HAS_FBC_DIRTY_RECT(display)) 1897 intel_fbc_hw_intialize_dirty_rect(fbc, plane_state); 1898 1899 intel_fbc_program_workarounds(fbc); 1900 intel_fbc_program_cfb(fbc); 1901 } 1902 1903 /** 1904 * intel_fbc_disable - disable FBC if it's associated with crtc 1905 * @crtc: the CRTC 1906 * 1907 * This function disables FBC if it's associated with the provided CRTC. 1908 */ 1909 void intel_fbc_disable(struct intel_crtc *crtc) 1910 { 1911 struct intel_display *display = to_intel_display(crtc->base.dev); 1912 struct intel_plane *plane; 1913 1914 for_each_intel_plane(display->drm, plane) { 1915 struct intel_fbc *fbc = plane->fbc; 1916 1917 if (!fbc || plane->pipe != crtc->pipe) 1918 continue; 1919 1920 mutex_lock(&fbc->lock); 1921 if (fbc->state.plane == plane) 1922 __intel_fbc_disable(fbc); 1923 mutex_unlock(&fbc->lock); 1924 } 1925 } 1926 1927 void intel_fbc_update(struct intel_atomic_state *state, 1928 struct intel_crtc *crtc) 1929 { 1930 const struct intel_crtc_state *crtc_state = 1931 intel_atomic_get_new_crtc_state(state, crtc); 1932 const struct intel_plane_state *plane_state; 1933 struct intel_plane *plane; 1934 int i; 1935 1936 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1937 struct intel_fbc *fbc = plane->fbc; 1938 1939 if (!fbc || plane->pipe != crtc->pipe) 1940 continue; 1941 1942 mutex_lock(&fbc->lock); 1943 1944 if (intel_crtc_needs_fastset(crtc_state) && 1945 plane_state->no_fbc_reason) { 1946 if (fbc->state.plane == plane) 1947 __intel_fbc_disable(fbc); 1948 } else { 1949 __intel_fbc_enable(state, crtc, plane); 1950 } 1951 1952 mutex_unlock(&fbc->lock); 1953 } 1954 } 1955 1956 static void intel_fbc_underrun_work_fn(struct work_struct *work) 1957 { 1958 struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work); 1959 struct intel_display *display = fbc->display; 1960 1961 mutex_lock(&fbc->lock); 1962 1963 /* Maybe we were scheduled twice. */ 1964 if (fbc->underrun_detected || !fbc->state.plane) 1965 goto out; 1966 1967 drm_dbg_kms(display->drm, "Disabling FBC due to FIFO underrun.\n"); 1968 fbc->underrun_detected = true; 1969 1970 intel_fbc_deactivate(fbc, "FIFO underrun"); 1971 if (!fbc->flip_pending) 1972 intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(display, fbc->state.plane->pipe)); 1973 __intel_fbc_disable(fbc); 1974 out: 1975 mutex_unlock(&fbc->lock); 1976 } 1977 1978 static void __intel_fbc_reset_underrun(struct intel_fbc *fbc) 1979 { 1980 struct intel_display *display = fbc->display; 1981 1982 cancel_work_sync(&fbc->underrun_work); 1983 1984 mutex_lock(&fbc->lock); 1985 1986 if (fbc->underrun_detected) { 1987 drm_dbg_kms(display->drm, 1988 "Re-allowing FBC after fifo underrun\n"); 1989 fbc->no_fbc_reason = "FIFO underrun cleared"; 1990 } 1991 1992 fbc->underrun_detected = false; 1993 mutex_unlock(&fbc->lock); 1994 } 1995 1996 /* 1997 * intel_fbc_reset_underrun - reset FBC fifo underrun status. 1998 * @display: display 1999 * 2000 * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we 2001 * want to re-enable FBC after an underrun to increase test coverage. 2002 */ 2003 void intel_fbc_reset_underrun(struct intel_display *display) 2004 { 2005 struct intel_fbc *fbc; 2006 enum intel_fbc_id fbc_id; 2007 2008 for_each_intel_fbc(display, fbc, fbc_id) 2009 __intel_fbc_reset_underrun(fbc); 2010 } 2011 2012 static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc) 2013 { 2014 struct intel_display *display = fbc->display; 2015 2016 /* 2017 * There's no guarantee that underrun_detected won't be set to true 2018 * right after this check and before the work is scheduled, but that's 2019 * not a problem since we'll check it again under the work function 2020 * while FBC is locked. This check here is just to prevent us from 2021 * unnecessarily scheduling the work, and it relies on the fact that we 2022 * never switch underrun_detect back to false after it's true. 2023 */ 2024 if (READ_ONCE(fbc->underrun_detected)) 2025 return; 2026 2027 queue_work(display->wq.unordered, &fbc->underrun_work); 2028 } 2029 2030 /** 2031 * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun 2032 * @display: display 2033 * 2034 * Without FBC, most underruns are harmless and don't really cause too many 2035 * problems, except for an annoying message on dmesg. With FBC, underruns can 2036 * become black screens or even worse, especially when paired with bad 2037 * watermarks. So in order for us to be on the safe side, completely disable FBC 2038 * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe 2039 * already suggests that watermarks may be bad, so try to be as safe as 2040 * possible. 2041 * 2042 * This function is called from the IRQ handler. 2043 */ 2044 void intel_fbc_handle_fifo_underrun_irq(struct intel_display *display) 2045 { 2046 struct intel_fbc *fbc; 2047 enum intel_fbc_id fbc_id; 2048 2049 for_each_intel_fbc(display, fbc, fbc_id) 2050 __intel_fbc_handle_fifo_underrun_irq(fbc); 2051 } 2052 2053 /* 2054 * The DDX driver changes its behavior depending on the value it reads from 2055 * i915.enable_fbc, so sanitize it by translating the default value into either 2056 * 0 or 1 in order to allow it to know what's going on. 2057 * 2058 * Notice that this is done at driver initialization and we still allow user 2059 * space to change the value during runtime without sanitizing it again. IGT 2060 * relies on being able to change i915.enable_fbc at runtime. 2061 */ 2062 static int intel_sanitize_fbc_option(struct intel_display *display) 2063 { 2064 if (display->params.enable_fbc >= 0) 2065 return !!display->params.enable_fbc; 2066 2067 if (!HAS_FBC(display)) 2068 return 0; 2069 2070 if (display->platform.broadwell || DISPLAY_VER(display) >= 9) 2071 return 1; 2072 2073 return 0; 2074 } 2075 2076 void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane) 2077 { 2078 plane->fbc = fbc; 2079 } 2080 2081 static struct intel_fbc *intel_fbc_create(struct intel_display *display, 2082 enum intel_fbc_id fbc_id) 2083 { 2084 struct intel_fbc *fbc; 2085 2086 fbc = kzalloc(sizeof(*fbc), GFP_KERNEL); 2087 if (!fbc) 2088 return NULL; 2089 2090 fbc->id = fbc_id; 2091 fbc->display = display; 2092 INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); 2093 mutex_init(&fbc->lock); 2094 2095 if (DISPLAY_VER(display) >= 7) 2096 fbc->funcs = &ivb_fbc_funcs; 2097 else if (DISPLAY_VER(display) == 6) 2098 fbc->funcs = &snb_fbc_funcs; 2099 else if (DISPLAY_VER(display) == 5) 2100 fbc->funcs = &ilk_fbc_funcs; 2101 else if (display->platform.g4x) 2102 fbc->funcs = &g4x_fbc_funcs; 2103 else if (DISPLAY_VER(display) == 4) 2104 fbc->funcs = &i965_fbc_funcs; 2105 else 2106 fbc->funcs = &i8xx_fbc_funcs; 2107 2108 return fbc; 2109 } 2110 2111 /** 2112 * intel_fbc_init - Initialize FBC 2113 * @display: display 2114 * 2115 * This function might be called during PM init process. 2116 */ 2117 void intel_fbc_init(struct intel_display *display) 2118 { 2119 enum intel_fbc_id fbc_id; 2120 2121 display->params.enable_fbc = intel_sanitize_fbc_option(display); 2122 drm_dbg_kms(display->drm, "Sanitized enable_fbc value: %d\n", 2123 display->params.enable_fbc); 2124 2125 for_each_fbc_id(display, fbc_id) 2126 display->fbc[fbc_id] = intel_fbc_create(display, fbc_id); 2127 } 2128 2129 /** 2130 * intel_fbc_sanitize - Sanitize FBC 2131 * @display: display 2132 * 2133 * Make sure FBC is initially disabled since we have no 2134 * idea eg. into which parts of stolen it might be scribbling 2135 * into. 2136 */ 2137 void intel_fbc_sanitize(struct intel_display *display) 2138 { 2139 struct intel_fbc *fbc; 2140 enum intel_fbc_id fbc_id; 2141 2142 for_each_intel_fbc(display, fbc, fbc_id) { 2143 if (intel_fbc_hw_is_active(fbc)) 2144 intel_fbc_hw_deactivate(fbc); 2145 } 2146 } 2147 2148 static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused) 2149 { 2150 struct intel_fbc *fbc = m->private; 2151 struct intel_display *display = fbc->display; 2152 struct intel_plane *plane; 2153 struct ref_tracker *wakeref; 2154 2155 drm_modeset_lock_all(display->drm); 2156 2157 wakeref = intel_display_rpm_get(display); 2158 mutex_lock(&fbc->lock); 2159 2160 if (fbc->active) { 2161 seq_puts(m, "FBC enabled\n"); 2162 seq_printf(m, "Compressing: %s\n", 2163 str_yes_no(intel_fbc_is_compressing(fbc))); 2164 } else { 2165 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); 2166 } 2167 2168 for_each_intel_plane(display->drm, plane) { 2169 const struct intel_plane_state *plane_state = 2170 to_intel_plane_state(plane->base.state); 2171 2172 if (plane->fbc != fbc) 2173 continue; 2174 2175 seq_printf(m, "%c [PLANE:%d:%s]: %s\n", 2176 fbc->state.plane == plane ? '*' : ' ', 2177 plane->base.base.id, plane->base.name, 2178 plane_state->no_fbc_reason ?: "FBC possible"); 2179 } 2180 2181 mutex_unlock(&fbc->lock); 2182 intel_display_rpm_put(display, wakeref); 2183 2184 drm_modeset_unlock_all(display->drm); 2185 2186 return 0; 2187 } 2188 2189 DEFINE_SHOW_ATTRIBUTE(intel_fbc_debugfs_status); 2190 2191 static int intel_fbc_debugfs_false_color_get(void *data, u64 *val) 2192 { 2193 struct intel_fbc *fbc = data; 2194 2195 *val = fbc->false_color; 2196 2197 return 0; 2198 } 2199 2200 static int intel_fbc_debugfs_false_color_set(void *data, u64 val) 2201 { 2202 struct intel_fbc *fbc = data; 2203 2204 mutex_lock(&fbc->lock); 2205 2206 fbc->false_color = val; 2207 2208 if (fbc->active) 2209 fbc->funcs->set_false_color(fbc, fbc->false_color); 2210 2211 mutex_unlock(&fbc->lock); 2212 2213 return 0; 2214 } 2215 2216 DEFINE_DEBUGFS_ATTRIBUTE(intel_fbc_debugfs_false_color_fops, 2217 intel_fbc_debugfs_false_color_get, 2218 intel_fbc_debugfs_false_color_set, 2219 "%llu\n"); 2220 2221 static void intel_fbc_debugfs_add(struct intel_fbc *fbc, 2222 struct dentry *parent) 2223 { 2224 debugfs_create_file("i915_fbc_status", 0444, parent, 2225 fbc, &intel_fbc_debugfs_status_fops); 2226 2227 if (fbc->funcs->set_false_color) 2228 debugfs_create_file_unsafe("i915_fbc_false_color", 0644, parent, 2229 fbc, &intel_fbc_debugfs_false_color_fops); 2230 } 2231 2232 void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc) 2233 { 2234 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 2235 2236 if (plane->fbc) 2237 intel_fbc_debugfs_add(plane->fbc, crtc->base.debugfs_entry); 2238 } 2239 2240 /* FIXME: remove this once igt is on board with per-crtc stuff */ 2241 void intel_fbc_debugfs_register(struct intel_display *display) 2242 { 2243 struct drm_minor *minor = display->drm->primary; 2244 struct intel_fbc *fbc; 2245 2246 fbc = display->fbc[INTEL_FBC_A]; 2247 if (fbc) 2248 intel_fbc_debugfs_add(fbc, minor->debugfs_root); 2249 } 2250