1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 /** 25 * DOC: Frame Buffer Compression (FBC) 26 * 27 * FBC tries to save memory bandwidth (and so power consumption) by 28 * compressing the amount of memory used by the display. It is total 29 * transparent to user space and completely handled in the kernel. 30 * 31 * The benefits of FBC are mostly visible with solid backgrounds and 32 * variation-less patterns. It comes from keeping the memory footprint small 33 * and having fewer memory pages opened and accessed for refreshing the display. 34 * 35 * i915 is responsible to reserve stolen memory for FBC and configure its 36 * offset on proper registers. The hardware takes care of all 37 * compress/decompress. However there are many known cases where we have to 38 * forcibly disable it to allow proper screen updates. 39 */ 40 41 #include <drm/drm_fourcc.h> 42 43 #include "i915_drv.h" 44 #include "i915_trace.h" 45 #include "i915_vgpu.h" 46 #include "intel_display_types.h" 47 #include "intel_fbc.h" 48 #include "intel_frontbuffer.h" 49 50 /* 51 * For SKL+, the plane source size used by the hardware is based on the value we 52 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value 53 * we wrote to PIPESRC. 54 */ 55 static void intel_fbc_get_plane_source_size(const struct intel_fbc_state_cache *cache, 56 int *width, int *height) 57 { 58 if (width) 59 *width = cache->plane.src_w; 60 if (height) 61 *height = cache->plane.src_h; 62 } 63 64 static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, 65 const struct intel_fbc_state_cache *cache) 66 { 67 int lines; 68 69 intel_fbc_get_plane_source_size(cache, NULL, &lines); 70 if (IS_GEN(dev_priv, 7)) 71 lines = min(lines, 2048); 72 else if (INTEL_GEN(dev_priv) >= 8) 73 lines = min(lines, 2560); 74 75 /* Hardware needs the full buffer stride, not just the active area. */ 76 return lines * cache->fb.stride; 77 } 78 79 static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) 80 { 81 u32 fbc_ctl; 82 83 /* Disable compression */ 84 fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL); 85 if ((fbc_ctl & FBC_CTL_EN) == 0) 86 return; 87 88 fbc_ctl &= ~FBC_CTL_EN; 89 intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); 90 91 /* Wait for compressing bit to clear */ 92 if (intel_de_wait_for_clear(dev_priv, FBC_STATUS, 93 FBC_STAT_COMPRESSING, 10)) { 94 drm_dbg_kms(&dev_priv->drm, "FBC idle timed out\n"); 95 return; 96 } 97 } 98 99 static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) 100 { 101 struct intel_fbc_reg_params *params = &dev_priv->fbc.params; 102 int cfb_pitch; 103 int i; 104 u32 fbc_ctl; 105 106 /* Note: fbc.threshold == 1 for i8xx */ 107 cfb_pitch = params->cfb_size / FBC_LL_SIZE; 108 if (params->fb.stride < cfb_pitch) 109 cfb_pitch = params->fb.stride; 110 111 /* FBC_CTL wants 32B or 64B units */ 112 if (IS_GEN(dev_priv, 2)) 113 cfb_pitch = (cfb_pitch / 32) - 1; 114 else 115 cfb_pitch = (cfb_pitch / 64) - 1; 116 117 /* Clear old tags */ 118 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) 119 intel_de_write(dev_priv, FBC_TAG(i), 0); 120 121 if (IS_GEN(dev_priv, 4)) { 122 u32 fbc_ctl2; 123 124 /* Set it up... */ 125 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM; 126 fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane); 127 if (params->fence_id >= 0) 128 fbc_ctl2 |= FBC_CTL_CPU_FENCE; 129 intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2); 130 intel_de_write(dev_priv, FBC_FENCE_OFF, 131 params->fence_y_offset); 132 } 133 134 /* enable it... */ 135 fbc_ctl = FBC_CTL_INTERVAL(params->interval); 136 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; 137 if (IS_I945GM(dev_priv)) 138 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ 139 fbc_ctl |= FBC_CTL_STRIDE(cfb_pitch & 0xff); 140 if (params->fence_id >= 0) 141 fbc_ctl |= FBC_CTL_FENCENO(params->fence_id); 142 intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); 143 } 144 145 static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv) 146 { 147 return intel_de_read(dev_priv, FBC_CONTROL) & FBC_CTL_EN; 148 } 149 150 static void g4x_fbc_activate(struct drm_i915_private *dev_priv) 151 { 152 struct intel_fbc_reg_params *params = &dev_priv->fbc.params; 153 u32 dpfc_ctl; 154 155 dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN; 156 if (params->fb.format->cpp[0] == 2) 157 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 158 else 159 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 160 161 if (params->fence_id >= 0) { 162 dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id; 163 intel_de_write(dev_priv, DPFC_FENCE_YOFF, 164 params->fence_y_offset); 165 } else { 166 intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0); 167 } 168 169 /* enable it... */ 170 intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 171 } 172 173 static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv) 174 { 175 u32 dpfc_ctl; 176 177 /* Disable compression */ 178 dpfc_ctl = intel_de_read(dev_priv, DPFC_CONTROL); 179 if (dpfc_ctl & DPFC_CTL_EN) { 180 dpfc_ctl &= ~DPFC_CTL_EN; 181 intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl); 182 } 183 } 184 185 static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv) 186 { 187 return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN; 188 } 189 190 /* This function forces a CFB recompression through the nuke operation. */ 191 static void intel_fbc_recompress(struct drm_i915_private *dev_priv) 192 { 193 struct intel_fbc *fbc = &dev_priv->fbc; 194 195 trace_intel_fbc_nuke(fbc->crtc); 196 197 intel_de_write(dev_priv, MSG_FBC_REND_STATE, FBC_REND_NUKE); 198 intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE); 199 } 200 201 static void ilk_fbc_activate(struct drm_i915_private *dev_priv) 202 { 203 struct intel_fbc_reg_params *params = &dev_priv->fbc.params; 204 u32 dpfc_ctl; 205 int threshold = dev_priv->fbc.threshold; 206 207 dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane); 208 if (params->fb.format->cpp[0] == 2) 209 threshold++; 210 211 switch (threshold) { 212 case 4: 213 case 3: 214 dpfc_ctl |= DPFC_CTL_LIMIT_4X; 215 break; 216 case 2: 217 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 218 break; 219 case 1: 220 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 221 break; 222 } 223 224 if (params->fence_id >= 0) { 225 dpfc_ctl |= DPFC_CTL_FENCE_EN; 226 if (IS_GEN(dev_priv, 5)) 227 dpfc_ctl |= params->fence_id; 228 if (IS_GEN(dev_priv, 6)) { 229 intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 230 SNB_CPU_FENCE_ENABLE | params->fence_id); 231 intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 232 params->fence_y_offset); 233 } 234 } else { 235 if (IS_GEN(dev_priv, 6)) { 236 intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); 237 intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); 238 } 239 } 240 241 intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF, 242 params->fence_y_offset); 243 /* enable it... */ 244 intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 245 246 intel_fbc_recompress(dev_priv); 247 } 248 249 static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv) 250 { 251 u32 dpfc_ctl; 252 253 /* Disable compression */ 254 dpfc_ctl = intel_de_read(dev_priv, ILK_DPFC_CONTROL); 255 if (dpfc_ctl & DPFC_CTL_EN) { 256 dpfc_ctl &= ~DPFC_CTL_EN; 257 intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl); 258 } 259 } 260 261 static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv) 262 { 263 return intel_de_read(dev_priv, ILK_DPFC_CONTROL) & DPFC_CTL_EN; 264 } 265 266 static void gen7_fbc_activate(struct drm_i915_private *dev_priv) 267 { 268 struct intel_fbc_reg_params *params = &dev_priv->fbc.params; 269 u32 dpfc_ctl; 270 int threshold = dev_priv->fbc.threshold; 271 272 /* Display WA #0529: skl, kbl, bxt. */ 273 if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) { 274 u32 val = intel_de_read(dev_priv, CHICKEN_MISC_4); 275 276 val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK); 277 278 if (params->gen9_wa_cfb_stride) 279 val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride; 280 281 intel_de_write(dev_priv, CHICKEN_MISC_4, val); 282 } 283 284 dpfc_ctl = 0; 285 if (IS_IVYBRIDGE(dev_priv)) 286 dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane); 287 288 if (params->fb.format->cpp[0] == 2) 289 threshold++; 290 291 switch (threshold) { 292 case 4: 293 case 3: 294 dpfc_ctl |= DPFC_CTL_LIMIT_4X; 295 break; 296 case 2: 297 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 298 break; 299 case 1: 300 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 301 break; 302 } 303 304 if (params->fence_id >= 0) { 305 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; 306 intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 307 SNB_CPU_FENCE_ENABLE | params->fence_id); 308 intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 309 params->fence_y_offset); 310 } else if (dev_priv->ggtt.num_fences) { 311 intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); 312 intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); 313 } 314 315 if (dev_priv->fbc.false_color) 316 dpfc_ctl |= FBC_CTL_FALSE_COLOR; 317 318 if (IS_IVYBRIDGE(dev_priv)) { 319 /* WaFbcAsynchFlipDisableFbcQueue:ivb */ 320 intel_de_write(dev_priv, ILK_DISPLAY_CHICKEN1, 321 intel_de_read(dev_priv, ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS); 322 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 323 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ 324 intel_de_write(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe), 325 intel_de_read(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe)) | HSW_FBCQ_DIS); 326 } 327 328 if (INTEL_GEN(dev_priv) >= 11) 329 /* Wa_1409120013:icl,ehl,tgl */ 330 intel_de_write(dev_priv, ILK_DPFC_CHICKEN, 331 ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); 332 333 intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 334 335 intel_fbc_recompress(dev_priv); 336 } 337 338 static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv) 339 { 340 if (INTEL_GEN(dev_priv) >= 5) 341 return ilk_fbc_is_active(dev_priv); 342 else if (IS_GM45(dev_priv)) 343 return g4x_fbc_is_active(dev_priv); 344 else 345 return i8xx_fbc_is_active(dev_priv); 346 } 347 348 static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv) 349 { 350 struct intel_fbc *fbc = &dev_priv->fbc; 351 352 trace_intel_fbc_activate(fbc->crtc); 353 354 fbc->active = true; 355 fbc->activated = true; 356 357 if (INTEL_GEN(dev_priv) >= 7) 358 gen7_fbc_activate(dev_priv); 359 else if (INTEL_GEN(dev_priv) >= 5) 360 ilk_fbc_activate(dev_priv); 361 else if (IS_GM45(dev_priv)) 362 g4x_fbc_activate(dev_priv); 363 else 364 i8xx_fbc_activate(dev_priv); 365 } 366 367 static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv) 368 { 369 struct intel_fbc *fbc = &dev_priv->fbc; 370 371 trace_intel_fbc_deactivate(fbc->crtc); 372 373 fbc->active = false; 374 375 if (INTEL_GEN(dev_priv) >= 5) 376 ilk_fbc_deactivate(dev_priv); 377 else if (IS_GM45(dev_priv)) 378 g4x_fbc_deactivate(dev_priv); 379 else 380 i8xx_fbc_deactivate(dev_priv); 381 } 382 383 /** 384 * intel_fbc_is_active - Is FBC active? 385 * @dev_priv: i915 device instance 386 * 387 * This function is used to verify the current state of FBC. 388 * 389 * FIXME: This should be tracked in the plane config eventually 390 * instead of queried at runtime for most callers. 391 */ 392 bool intel_fbc_is_active(struct drm_i915_private *dev_priv) 393 { 394 return dev_priv->fbc.active; 395 } 396 397 static void intel_fbc_deactivate(struct drm_i915_private *dev_priv, 398 const char *reason) 399 { 400 struct intel_fbc *fbc = &dev_priv->fbc; 401 402 drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); 403 404 if (fbc->active) 405 intel_fbc_hw_deactivate(dev_priv); 406 407 fbc->no_fbc_reason = reason; 408 } 409 410 static int find_compression_threshold(struct drm_i915_private *dev_priv, 411 struct drm_mm_node *node, 412 unsigned int size, 413 unsigned int fb_cpp) 414 { 415 int compression_threshold = 1; 416 int ret; 417 u64 end; 418 419 /* The FBC hardware for BDW/SKL doesn't have access to the stolen 420 * reserved range size, so it always assumes the maximum (8mb) is used. 421 * If we enable FBC using a CFB on that memory range we'll get FIFO 422 * underruns, even if that range is not reserved by the BIOS. */ 423 if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv)) 424 end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024; 425 else 426 end = U64_MAX; 427 428 /* HACK: This code depends on what we will do in *_enable_fbc. If that 429 * code changes, this code needs to change as well. 430 * 431 * The enable_fbc code will attempt to use one of our 2 compression 432 * thresholds, therefore, in that case, we only have 1 resort. 433 */ 434 435 /* Try to over-allocate to reduce reallocations and fragmentation. */ 436 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1, 437 4096, 0, end); 438 if (ret == 0) 439 return compression_threshold; 440 441 again: 442 /* HW's ability to limit the CFB is 1:4 */ 443 if (compression_threshold > 4 || 444 (fb_cpp == 2 && compression_threshold == 2)) 445 return 0; 446 447 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1, 448 4096, 0, end); 449 if (ret && INTEL_GEN(dev_priv) <= 4) { 450 return 0; 451 } else if (ret) { 452 compression_threshold <<= 1; 453 goto again; 454 } else { 455 return compression_threshold; 456 } 457 } 458 459 static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, 460 unsigned int size, unsigned int fb_cpp) 461 { 462 struct intel_fbc *fbc = &dev_priv->fbc; 463 struct drm_mm_node *uninitialized_var(compressed_llb); 464 int ret; 465 466 drm_WARN_ON(&dev_priv->drm, 467 drm_mm_node_allocated(&fbc->compressed_fb)); 468 469 ret = find_compression_threshold(dev_priv, &fbc->compressed_fb, 470 size, fb_cpp); 471 if (!ret) 472 goto err_llb; 473 else if (ret > 1) { 474 drm_info_once(&dev_priv->drm, 475 "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); 476 } 477 478 fbc->threshold = ret; 479 480 if (INTEL_GEN(dev_priv) >= 5) 481 intel_de_write(dev_priv, ILK_DPFC_CB_BASE, 482 fbc->compressed_fb.start); 483 else if (IS_GM45(dev_priv)) { 484 intel_de_write(dev_priv, DPFC_CB_BASE, 485 fbc->compressed_fb.start); 486 } else { 487 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); 488 if (!compressed_llb) 489 goto err_fb; 490 491 ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb, 492 4096, 4096); 493 if (ret) 494 goto err_fb; 495 496 fbc->compressed_llb = compressed_llb; 497 498 GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, 499 fbc->compressed_fb.start, 500 U32_MAX)); 501 GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, 502 fbc->compressed_llb->start, 503 U32_MAX)); 504 intel_de_write(dev_priv, FBC_CFB_BASE, 505 dev_priv->dsm.start + fbc->compressed_fb.start); 506 intel_de_write(dev_priv, FBC_LL_BASE, 507 dev_priv->dsm.start + compressed_llb->start); 508 } 509 510 drm_dbg_kms(&dev_priv->drm, 511 "reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n", 512 fbc->compressed_fb.size, fbc->threshold); 513 514 return 0; 515 516 err_fb: 517 kfree(compressed_llb); 518 i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); 519 err_llb: 520 if (drm_mm_initialized(&dev_priv->mm.stolen)) 521 drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); 522 return -ENOSPC; 523 } 524 525 static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) 526 { 527 struct intel_fbc *fbc = &dev_priv->fbc; 528 529 if (WARN_ON(intel_fbc_hw_is_active(dev_priv))) 530 return; 531 532 if (!drm_mm_node_allocated(&fbc->compressed_fb)) 533 return; 534 535 if (fbc->compressed_llb) { 536 i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb); 537 kfree(fbc->compressed_llb); 538 } 539 540 i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); 541 } 542 543 void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) 544 { 545 struct intel_fbc *fbc = &dev_priv->fbc; 546 547 if (!HAS_FBC(dev_priv)) 548 return; 549 550 mutex_lock(&fbc->lock); 551 __intel_fbc_cleanup_cfb(dev_priv); 552 mutex_unlock(&fbc->lock); 553 } 554 555 static bool stride_is_valid(struct drm_i915_private *dev_priv, 556 u64 modifier, unsigned int stride) 557 { 558 /* This should have been caught earlier. */ 559 if (drm_WARN_ON_ONCE(&dev_priv->drm, (stride & (64 - 1)) != 0)) 560 return false; 561 562 /* Below are the additional FBC restrictions. */ 563 if (stride < 512) 564 return false; 565 566 if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3)) 567 return stride == 4096 || stride == 8192; 568 569 if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048) 570 return false; 571 572 /* Display WA #1105: skl,bxt,kbl,cfl,glk */ 573 if (IS_GEN(dev_priv, 9) && 574 modifier == DRM_FORMAT_MOD_LINEAR && stride & 511) 575 return false; 576 577 if (stride > 16384) 578 return false; 579 580 return true; 581 } 582 583 static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, 584 u32 pixel_format) 585 { 586 switch (pixel_format) { 587 case DRM_FORMAT_XRGB8888: 588 case DRM_FORMAT_XBGR8888: 589 return true; 590 case DRM_FORMAT_XRGB1555: 591 case DRM_FORMAT_RGB565: 592 /* 16bpp not supported on gen2 */ 593 if (IS_GEN(dev_priv, 2)) 594 return false; 595 /* WaFbcOnly1to1Ratio:ctg */ 596 if (IS_G4X(dev_priv)) 597 return false; 598 return true; 599 default: 600 return false; 601 } 602 } 603 604 static bool rotation_is_valid(struct drm_i915_private *dev_priv, 605 u32 pixel_format, unsigned int rotation) 606 { 607 if (INTEL_GEN(dev_priv) >= 9 && pixel_format == DRM_FORMAT_RGB565 && 608 drm_rotation_90_or_270(rotation)) 609 return false; 610 else if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) && 611 rotation != DRM_MODE_ROTATE_0) 612 return false; 613 614 return true; 615 } 616 617 /* 618 * For some reason, the hardware tracking starts looking at whatever we 619 * programmed as the display plane base address register. It does not look at 620 * the X and Y offset registers. That's why we include the src x/y offsets 621 * instead of just looking at the plane size. 622 */ 623 static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) 624 { 625 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 626 struct intel_fbc *fbc = &dev_priv->fbc; 627 unsigned int effective_w, effective_h, max_w, max_h; 628 629 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { 630 max_w = 5120; 631 max_h = 4096; 632 } else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) { 633 max_w = 4096; 634 max_h = 4096; 635 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 636 max_w = 4096; 637 max_h = 2048; 638 } else { 639 max_w = 2048; 640 max_h = 1536; 641 } 642 643 intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w, 644 &effective_h); 645 effective_w += fbc->state_cache.plane.adjusted_x; 646 effective_h += fbc->state_cache.plane.adjusted_y; 647 648 return effective_w <= max_w && effective_h <= max_h; 649 } 650 651 static bool tiling_is_valid(struct drm_i915_private *dev_priv, 652 uint64_t modifier) 653 { 654 switch (modifier) { 655 case DRM_FORMAT_MOD_LINEAR: 656 if (INTEL_GEN(dev_priv) >= 9) 657 return true; 658 return false; 659 case I915_FORMAT_MOD_X_TILED: 660 case I915_FORMAT_MOD_Y_TILED: 661 return true; 662 default: 663 return false; 664 } 665 } 666 667 static void intel_fbc_update_state_cache(struct intel_crtc *crtc, 668 const struct intel_crtc_state *crtc_state, 669 const struct intel_plane_state *plane_state) 670 { 671 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 672 struct intel_fbc *fbc = &dev_priv->fbc; 673 struct intel_fbc_state_cache *cache = &fbc->state_cache; 674 struct drm_framebuffer *fb = plane_state->hw.fb; 675 676 cache->plane.visible = plane_state->uapi.visible; 677 if (!cache->plane.visible) 678 return; 679 680 cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags; 681 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 682 cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; 683 684 cache->plane.rotation = plane_state->hw.rotation; 685 /* 686 * Src coordinates are already rotated by 270 degrees for 687 * the 90/270 degree plane rotation cases (to match the 688 * GTT mapping), hence no need to account for rotation here. 689 */ 690 cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 691 cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 692 cache->plane.adjusted_x = plane_state->color_plane[0].x; 693 cache->plane.adjusted_y = plane_state->color_plane[0].y; 694 695 cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode; 696 697 cache->fb.format = fb->format; 698 cache->fb.stride = fb->pitches[0]; 699 cache->fb.modifier = fb->modifier; 700 701 /* FBC1 compression interval: arbitrary choice of 1 second */ 702 cache->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode); 703 704 cache->fence_y_offset = intel_plane_fence_y_offset(plane_state); 705 706 drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE && 707 !plane_state->vma->fence); 708 709 if (plane_state->flags & PLANE_HAS_FENCE && 710 plane_state->vma->fence) 711 cache->fence_id = plane_state->vma->fence->id; 712 else 713 cache->fence_id = -1; 714 } 715 716 static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv) 717 { 718 struct intel_fbc *fbc = &dev_priv->fbc; 719 720 return intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) > 721 fbc->compressed_fb.size * fbc->threshold; 722 } 723 724 static u16 intel_fbc_gen9_wa_cfb_stride(struct drm_i915_private *dev_priv) 725 { 726 struct intel_fbc *fbc = &dev_priv->fbc; 727 struct intel_fbc_state_cache *cache = &fbc->state_cache; 728 729 if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) && 730 cache->fb.modifier != I915_FORMAT_MOD_X_TILED) 731 return DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8; 732 else 733 return 0; 734 } 735 736 static bool intel_fbc_gen9_wa_cfb_stride_changed(struct drm_i915_private *dev_priv) 737 { 738 struct intel_fbc *fbc = &dev_priv->fbc; 739 740 return fbc->params.gen9_wa_cfb_stride != intel_fbc_gen9_wa_cfb_stride(dev_priv); 741 } 742 743 static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) 744 { 745 struct intel_fbc *fbc = &dev_priv->fbc; 746 747 if (intel_vgpu_active(dev_priv)) { 748 fbc->no_fbc_reason = "VGPU is active"; 749 return false; 750 } 751 752 if (!dev_priv->params.enable_fbc) { 753 fbc->no_fbc_reason = "disabled per module param or by default"; 754 return false; 755 } 756 757 if (fbc->underrun_detected) { 758 fbc->no_fbc_reason = "underrun detected"; 759 return false; 760 } 761 762 return true; 763 } 764 765 static bool intel_fbc_can_activate(struct intel_crtc *crtc) 766 { 767 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 768 struct intel_fbc *fbc = &dev_priv->fbc; 769 struct intel_fbc_state_cache *cache = &fbc->state_cache; 770 771 if (!intel_fbc_can_enable(dev_priv)) 772 return false; 773 774 if (!cache->plane.visible) { 775 fbc->no_fbc_reason = "primary plane not visible"; 776 return false; 777 } 778 779 /* We don't need to use a state cache here since this information is 780 * global for all CRTC. 781 */ 782 if (fbc->underrun_detected) { 783 fbc->no_fbc_reason = "underrun detected"; 784 return false; 785 } 786 787 if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) { 788 fbc->no_fbc_reason = "incompatible mode"; 789 return false; 790 } 791 792 if (!intel_fbc_hw_tracking_covers_screen(crtc)) { 793 fbc->no_fbc_reason = "mode too large for compression"; 794 return false; 795 } 796 797 /* The use of a CPU fence is one of two ways to detect writes by the 798 * CPU to the scanout and trigger updates to the FBC. 799 * 800 * The other method is by software tracking (see 801 * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke 802 * the current compressed buffer and recompress it. 803 * 804 * Note that is possible for a tiled surface to be unmappable (and 805 * so have no fence associated with it) due to aperture constraints 806 * at the time of pinning. 807 * 808 * FIXME with 90/270 degree rotation we should use the fence on 809 * the normal GTT view (the rotated view doesn't even have a 810 * fence). Would need changes to the FBC fence Y offset as well. 811 * For now this will effectively disable FBC with 90/270 degree 812 * rotation. 813 */ 814 if (INTEL_GEN(dev_priv) < 9 && cache->fence_id < 0) { 815 fbc->no_fbc_reason = "framebuffer not tiled or fenced"; 816 return false; 817 } 818 819 if (!rotation_is_valid(dev_priv, cache->fb.format->format, 820 cache->plane.rotation)) { 821 fbc->no_fbc_reason = "rotation unsupported"; 822 return false; 823 } 824 825 if (!tiling_is_valid(dev_priv, cache->fb.modifier)) { 826 fbc->no_fbc_reason = "tiling unsupported"; 827 return false; 828 } 829 830 if (!stride_is_valid(dev_priv, cache->fb.modifier, cache->fb.stride)) { 831 fbc->no_fbc_reason = "framebuffer stride not supported"; 832 return false; 833 } 834 835 if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) { 836 fbc->no_fbc_reason = "pixel format is invalid"; 837 return false; 838 } 839 840 if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && 841 cache->fb.format->has_alpha) { 842 fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC"; 843 return false; 844 } 845 846 /* WaFbcExceedCdClockThreshold:hsw,bdw */ 847 if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && 848 cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) { 849 fbc->no_fbc_reason = "pixel rate is too big"; 850 return false; 851 } 852 853 /* It is possible for the required CFB size change without a 854 * crtc->disable + crtc->enable since it is possible to change the 855 * stride without triggering a full modeset. Since we try to 856 * over-allocate the CFB, there's a chance we may keep FBC enabled even 857 * if this happens, but if we exceed the current CFB size we'll have to 858 * disable FBC. Notice that it would be possible to disable FBC, wait 859 * for a frame, free the stolen node, then try to reenable FBC in case 860 * we didn't get any invalidate/deactivate calls, but this would require 861 * a lot of tracking just for a specific case. If we conclude it's an 862 * important case, we can implement it later. */ 863 if (intel_fbc_cfb_size_changed(dev_priv)) { 864 fbc->no_fbc_reason = "CFB requirements changed"; 865 return false; 866 } 867 868 /* 869 * Work around a problem on GEN9+ HW, where enabling FBC on a plane 870 * having a Y offset that isn't divisible by 4 causes FIFO underrun 871 * and screen flicker. 872 */ 873 if (INTEL_GEN(dev_priv) >= 9 && 874 (fbc->state_cache.plane.adjusted_y & 3)) { 875 fbc->no_fbc_reason = "plane Y offset is misaligned"; 876 return false; 877 } 878 879 return true; 880 } 881 882 static void intel_fbc_get_reg_params(struct intel_crtc *crtc, 883 struct intel_fbc_reg_params *params) 884 { 885 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 886 struct intel_fbc *fbc = &dev_priv->fbc; 887 struct intel_fbc_state_cache *cache = &fbc->state_cache; 888 889 /* Since all our fields are integer types, use memset here so the 890 * comparison function can rely on memcmp because the padding will be 891 * zero. */ 892 memset(params, 0, sizeof(*params)); 893 894 params->fence_id = cache->fence_id; 895 params->fence_y_offset = cache->fence_y_offset; 896 897 params->interval = cache->interval; 898 899 params->crtc.pipe = crtc->pipe; 900 params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane; 901 902 params->fb.format = cache->fb.format; 903 params->fb.modifier = cache->fb.modifier; 904 params->fb.stride = cache->fb.stride; 905 906 params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); 907 908 params->gen9_wa_cfb_stride = cache->gen9_wa_cfb_stride; 909 910 params->plane_visible = cache->plane.visible; 911 } 912 913 static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state) 914 { 915 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 916 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 917 const struct intel_fbc *fbc = &dev_priv->fbc; 918 const struct intel_fbc_state_cache *cache = &fbc->state_cache; 919 const struct intel_fbc_reg_params *params = &fbc->params; 920 921 if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi)) 922 return false; 923 924 if (!params->plane_visible) 925 return false; 926 927 if (!intel_fbc_can_activate(crtc)) 928 return false; 929 930 if (params->fb.format != cache->fb.format) 931 return false; 932 933 if (params->fb.modifier != cache->fb.modifier) 934 return false; 935 936 if (params->fb.stride != cache->fb.stride) 937 return false; 938 939 if (params->cfb_size != intel_fbc_calculate_cfb_size(dev_priv, cache)) 940 return false; 941 942 if (params->gen9_wa_cfb_stride != cache->gen9_wa_cfb_stride) 943 return false; 944 945 return true; 946 } 947 948 bool intel_fbc_pre_update(struct intel_atomic_state *state, 949 struct intel_crtc *crtc) 950 { 951 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 952 const struct intel_crtc_state *crtc_state = 953 intel_atomic_get_new_crtc_state(state, crtc); 954 const struct intel_plane_state *plane_state = 955 intel_atomic_get_new_plane_state(state, plane); 956 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 957 struct intel_fbc *fbc = &dev_priv->fbc; 958 const char *reason = "update pending"; 959 bool need_vblank_wait = false; 960 961 if (!plane->has_fbc || !plane_state) 962 return need_vblank_wait; 963 964 mutex_lock(&fbc->lock); 965 966 if (fbc->crtc != crtc) 967 goto unlock; 968 969 intel_fbc_update_state_cache(crtc, crtc_state, plane_state); 970 fbc->flip_pending = true; 971 972 if (!intel_fbc_can_flip_nuke(crtc_state)) { 973 intel_fbc_deactivate(dev_priv, reason); 974 975 /* 976 * Display WA #1198: glk+ 977 * Need an extra vblank wait between FBC disable and most plane 978 * updates. Bspec says this is only needed for plane disable, but 979 * that is not true. Touching most plane registers will cause the 980 * corruption to appear. Also SKL/derivatives do not seem to be 981 * affected. 982 * 983 * TODO: could optimize this a bit by sampling the frame 984 * counter when we disable FBC (if it was already done earlier) 985 * and skipping the extra vblank wait before the plane update 986 * if at least one frame has already passed. 987 */ 988 if (fbc->activated && 989 (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))) 990 need_vblank_wait = true; 991 fbc->activated = false; 992 } 993 unlock: 994 mutex_unlock(&fbc->lock); 995 996 return need_vblank_wait; 997 } 998 999 /** 1000 * __intel_fbc_disable - disable FBC 1001 * @dev_priv: i915 device instance 1002 * 1003 * This is the low level function that actually disables FBC. Callers should 1004 * grab the FBC lock. 1005 */ 1006 static void __intel_fbc_disable(struct drm_i915_private *dev_priv) 1007 { 1008 struct intel_fbc *fbc = &dev_priv->fbc; 1009 struct intel_crtc *crtc = fbc->crtc; 1010 1011 drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); 1012 drm_WARN_ON(&dev_priv->drm, !fbc->crtc); 1013 drm_WARN_ON(&dev_priv->drm, fbc->active); 1014 1015 drm_dbg_kms(&dev_priv->drm, "Disabling FBC on pipe %c\n", 1016 pipe_name(crtc->pipe)); 1017 1018 __intel_fbc_cleanup_cfb(dev_priv); 1019 1020 fbc->crtc = NULL; 1021 } 1022 1023 static void __intel_fbc_post_update(struct intel_crtc *crtc) 1024 { 1025 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1026 struct intel_fbc *fbc = &dev_priv->fbc; 1027 1028 drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); 1029 1030 if (fbc->crtc != crtc) 1031 return; 1032 1033 fbc->flip_pending = false; 1034 1035 if (!dev_priv->params.enable_fbc) { 1036 intel_fbc_deactivate(dev_priv, "disabled at runtime per module param"); 1037 __intel_fbc_disable(dev_priv); 1038 1039 return; 1040 } 1041 1042 intel_fbc_get_reg_params(crtc, &fbc->params); 1043 1044 if (!intel_fbc_can_activate(crtc)) 1045 return; 1046 1047 if (!fbc->busy_bits) 1048 intel_fbc_hw_activate(dev_priv); 1049 else 1050 intel_fbc_deactivate(dev_priv, "frontbuffer write"); 1051 } 1052 1053 void intel_fbc_post_update(struct intel_atomic_state *state, 1054 struct intel_crtc *crtc) 1055 { 1056 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1057 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 1058 const struct intel_plane_state *plane_state = 1059 intel_atomic_get_new_plane_state(state, plane); 1060 struct intel_fbc *fbc = &dev_priv->fbc; 1061 1062 if (!plane->has_fbc || !plane_state) 1063 return; 1064 1065 mutex_lock(&fbc->lock); 1066 __intel_fbc_post_update(crtc); 1067 mutex_unlock(&fbc->lock); 1068 } 1069 1070 static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) 1071 { 1072 if (fbc->crtc) 1073 return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit; 1074 else 1075 return fbc->possible_framebuffer_bits; 1076 } 1077 1078 void intel_fbc_invalidate(struct drm_i915_private *dev_priv, 1079 unsigned int frontbuffer_bits, 1080 enum fb_op_origin origin) 1081 { 1082 struct intel_fbc *fbc = &dev_priv->fbc; 1083 1084 if (!HAS_FBC(dev_priv)) 1085 return; 1086 1087 if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) 1088 return; 1089 1090 mutex_lock(&fbc->lock); 1091 1092 fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits; 1093 1094 if (fbc->crtc && fbc->busy_bits) 1095 intel_fbc_deactivate(dev_priv, "frontbuffer write"); 1096 1097 mutex_unlock(&fbc->lock); 1098 } 1099 1100 void intel_fbc_flush(struct drm_i915_private *dev_priv, 1101 unsigned int frontbuffer_bits, enum fb_op_origin origin) 1102 { 1103 struct intel_fbc *fbc = &dev_priv->fbc; 1104 1105 if (!HAS_FBC(dev_priv)) 1106 return; 1107 1108 /* 1109 * GTT tracking does not nuke the entire cfb 1110 * so don't clear busy_bits set for some other 1111 * reason. 1112 */ 1113 if (origin == ORIGIN_GTT) 1114 return; 1115 1116 mutex_lock(&fbc->lock); 1117 1118 fbc->busy_bits &= ~frontbuffer_bits; 1119 1120 if (origin == ORIGIN_FLIP) 1121 goto out; 1122 1123 if (!fbc->busy_bits && fbc->crtc && 1124 (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { 1125 if (fbc->active) 1126 intel_fbc_recompress(dev_priv); 1127 else if (!fbc->flip_pending) 1128 __intel_fbc_post_update(fbc->crtc); 1129 } 1130 1131 out: 1132 mutex_unlock(&fbc->lock); 1133 } 1134 1135 /** 1136 * intel_fbc_choose_crtc - select a CRTC to enable FBC on 1137 * @dev_priv: i915 device instance 1138 * @state: the atomic state structure 1139 * 1140 * This function looks at the proposed state for CRTCs and planes, then chooses 1141 * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to 1142 * true. 1143 * 1144 * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe 1145 * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc. 1146 */ 1147 void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, 1148 struct intel_atomic_state *state) 1149 { 1150 struct intel_fbc *fbc = &dev_priv->fbc; 1151 struct intel_plane *plane; 1152 struct intel_plane_state *plane_state; 1153 bool crtc_chosen = false; 1154 int i; 1155 1156 mutex_lock(&fbc->lock); 1157 1158 /* Does this atomic commit involve the CRTC currently tied to FBC? */ 1159 if (fbc->crtc && 1160 !intel_atomic_get_new_crtc_state(state, fbc->crtc)) 1161 goto out; 1162 1163 if (!intel_fbc_can_enable(dev_priv)) 1164 goto out; 1165 1166 /* Simply choose the first CRTC that is compatible and has a visible 1167 * plane. We could go for fancier schemes such as checking the plane 1168 * size, but this would just affect the few platforms that don't tie FBC 1169 * to pipe or plane A. */ 1170 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1171 struct intel_crtc_state *crtc_state; 1172 struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); 1173 1174 if (!plane->has_fbc) 1175 continue; 1176 1177 if (!plane_state->uapi.visible) 1178 continue; 1179 1180 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 1181 1182 crtc_state->enable_fbc = true; 1183 crtc_chosen = true; 1184 break; 1185 } 1186 1187 if (!crtc_chosen) 1188 fbc->no_fbc_reason = "no suitable CRTC for FBC"; 1189 1190 out: 1191 mutex_unlock(&fbc->lock); 1192 } 1193 1194 /** 1195 * intel_fbc_enable: tries to enable FBC on the CRTC 1196 * @crtc: the CRTC 1197 * @state: corresponding &drm_crtc_state for @crtc 1198 * 1199 * This function checks if the given CRTC was chosen for FBC, then enables it if 1200 * possible. Notice that it doesn't activate FBC. It is valid to call 1201 * intel_fbc_enable multiple times for the same pipe without an 1202 * intel_fbc_disable in the middle, as long as it is deactivated. 1203 */ 1204 void intel_fbc_enable(struct intel_atomic_state *state, 1205 struct intel_crtc *crtc) 1206 { 1207 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1208 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 1209 const struct intel_crtc_state *crtc_state = 1210 intel_atomic_get_new_crtc_state(state, crtc); 1211 const struct intel_plane_state *plane_state = 1212 intel_atomic_get_new_plane_state(state, plane); 1213 struct intel_fbc *fbc = &dev_priv->fbc; 1214 struct intel_fbc_state_cache *cache = &fbc->state_cache; 1215 1216 if (!plane->has_fbc || !plane_state) 1217 return; 1218 1219 mutex_lock(&fbc->lock); 1220 1221 if (fbc->crtc) { 1222 if (fbc->crtc != crtc || 1223 (!intel_fbc_cfb_size_changed(dev_priv) && 1224 !intel_fbc_gen9_wa_cfb_stride_changed(dev_priv))) 1225 goto out; 1226 1227 __intel_fbc_disable(dev_priv); 1228 } 1229 1230 drm_WARN_ON(&dev_priv->drm, fbc->active); 1231 1232 intel_fbc_update_state_cache(crtc, crtc_state, plane_state); 1233 1234 /* FIXME crtc_state->enable_fbc lies :( */ 1235 if (!cache->plane.visible) 1236 goto out; 1237 1238 if (intel_fbc_alloc_cfb(dev_priv, 1239 intel_fbc_calculate_cfb_size(dev_priv, cache), 1240 plane_state->hw.fb->format->cpp[0])) { 1241 cache->plane.visible = false; 1242 fbc->no_fbc_reason = "not enough stolen memory"; 1243 goto out; 1244 } 1245 1246 cache->gen9_wa_cfb_stride = intel_fbc_gen9_wa_cfb_stride(dev_priv); 1247 1248 drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n", 1249 pipe_name(crtc->pipe)); 1250 fbc->no_fbc_reason = "FBC enabled but not active yet\n"; 1251 1252 fbc->crtc = crtc; 1253 out: 1254 mutex_unlock(&fbc->lock); 1255 } 1256 1257 /** 1258 * intel_fbc_disable - disable FBC if it's associated with crtc 1259 * @crtc: the CRTC 1260 * 1261 * This function disables FBC if it's associated with the provided CRTC. 1262 */ 1263 void intel_fbc_disable(struct intel_crtc *crtc) 1264 { 1265 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1266 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 1267 struct intel_fbc *fbc = &dev_priv->fbc; 1268 1269 if (!plane->has_fbc) 1270 return; 1271 1272 mutex_lock(&fbc->lock); 1273 if (fbc->crtc == crtc) 1274 __intel_fbc_disable(dev_priv); 1275 mutex_unlock(&fbc->lock); 1276 } 1277 1278 /** 1279 * intel_fbc_global_disable - globally disable FBC 1280 * @dev_priv: i915 device instance 1281 * 1282 * This function disables FBC regardless of which CRTC is associated with it. 1283 */ 1284 void intel_fbc_global_disable(struct drm_i915_private *dev_priv) 1285 { 1286 struct intel_fbc *fbc = &dev_priv->fbc; 1287 1288 if (!HAS_FBC(dev_priv)) 1289 return; 1290 1291 mutex_lock(&fbc->lock); 1292 if (fbc->crtc) { 1293 drm_WARN_ON(&dev_priv->drm, fbc->crtc->active); 1294 __intel_fbc_disable(dev_priv); 1295 } 1296 mutex_unlock(&fbc->lock); 1297 } 1298 1299 static void intel_fbc_underrun_work_fn(struct work_struct *work) 1300 { 1301 struct drm_i915_private *dev_priv = 1302 container_of(work, struct drm_i915_private, fbc.underrun_work); 1303 struct intel_fbc *fbc = &dev_priv->fbc; 1304 1305 mutex_lock(&fbc->lock); 1306 1307 /* Maybe we were scheduled twice. */ 1308 if (fbc->underrun_detected || !fbc->crtc) 1309 goto out; 1310 1311 drm_dbg_kms(&dev_priv->drm, "Disabling FBC due to FIFO underrun.\n"); 1312 fbc->underrun_detected = true; 1313 1314 intel_fbc_deactivate(dev_priv, "FIFO underrun"); 1315 out: 1316 mutex_unlock(&fbc->lock); 1317 } 1318 1319 /* 1320 * intel_fbc_reset_underrun - reset FBC fifo underrun status. 1321 * @dev_priv: i915 device instance 1322 * 1323 * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we 1324 * want to re-enable FBC after an underrun to increase test coverage. 1325 */ 1326 int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv) 1327 { 1328 int ret; 1329 1330 cancel_work_sync(&dev_priv->fbc.underrun_work); 1331 1332 ret = mutex_lock_interruptible(&dev_priv->fbc.lock); 1333 if (ret) 1334 return ret; 1335 1336 if (dev_priv->fbc.underrun_detected) { 1337 drm_dbg_kms(&dev_priv->drm, 1338 "Re-allowing FBC after fifo underrun\n"); 1339 dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared"; 1340 } 1341 1342 dev_priv->fbc.underrun_detected = false; 1343 mutex_unlock(&dev_priv->fbc.lock); 1344 1345 return 0; 1346 } 1347 1348 /** 1349 * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun 1350 * @dev_priv: i915 device instance 1351 * 1352 * Without FBC, most underruns are harmless and don't really cause too many 1353 * problems, except for an annoying message on dmesg. With FBC, underruns can 1354 * become black screens or even worse, especially when paired with bad 1355 * watermarks. So in order for us to be on the safe side, completely disable FBC 1356 * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe 1357 * already suggests that watermarks may be bad, so try to be as safe as 1358 * possible. 1359 * 1360 * This function is called from the IRQ handler. 1361 */ 1362 void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv) 1363 { 1364 struct intel_fbc *fbc = &dev_priv->fbc; 1365 1366 if (!HAS_FBC(dev_priv)) 1367 return; 1368 1369 /* There's no guarantee that underrun_detected won't be set to true 1370 * right after this check and before the work is scheduled, but that's 1371 * not a problem since we'll check it again under the work function 1372 * while FBC is locked. This check here is just to prevent us from 1373 * unnecessarily scheduling the work, and it relies on the fact that we 1374 * never switch underrun_detect back to false after it's true. */ 1375 if (READ_ONCE(fbc->underrun_detected)) 1376 return; 1377 1378 schedule_work(&fbc->underrun_work); 1379 } 1380 1381 /* 1382 * The DDX driver changes its behavior depending on the value it reads from 1383 * i915.enable_fbc, so sanitize it by translating the default value into either 1384 * 0 or 1 in order to allow it to know what's going on. 1385 * 1386 * Notice that this is done at driver initialization and we still allow user 1387 * space to change the value during runtime without sanitizing it again. IGT 1388 * relies on being able to change i915.enable_fbc at runtime. 1389 */ 1390 static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) 1391 { 1392 if (dev_priv->params.enable_fbc >= 0) 1393 return !!dev_priv->params.enable_fbc; 1394 1395 if (!HAS_FBC(dev_priv)) 1396 return 0; 1397 1398 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) 1399 return 1; 1400 1401 return 0; 1402 } 1403 1404 static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) 1405 { 1406 /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ 1407 if (intel_vtd_active() && 1408 (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) { 1409 drm_info(&dev_priv->drm, 1410 "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n"); 1411 return true; 1412 } 1413 1414 return false; 1415 } 1416 1417 /** 1418 * intel_fbc_init - Initialize FBC 1419 * @dev_priv: the i915 device 1420 * 1421 * This function might be called during PM init process. 1422 */ 1423 void intel_fbc_init(struct drm_i915_private *dev_priv) 1424 { 1425 struct intel_fbc *fbc = &dev_priv->fbc; 1426 1427 INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); 1428 mutex_init(&fbc->lock); 1429 fbc->active = false; 1430 1431 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 1432 mkwrite_device_info(dev_priv)->display.has_fbc = false; 1433 1434 if (need_fbc_vtd_wa(dev_priv)) 1435 mkwrite_device_info(dev_priv)->display.has_fbc = false; 1436 1437 dev_priv->params.enable_fbc = intel_sanitize_fbc_option(dev_priv); 1438 drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n", 1439 dev_priv->params.enable_fbc); 1440 1441 if (!HAS_FBC(dev_priv)) { 1442 fbc->no_fbc_reason = "unsupported by this chipset"; 1443 return; 1444 } 1445 1446 /* We still don't have any sort of hardware state readout for FBC, so 1447 * deactivate it in case the BIOS activated it to make sure software 1448 * matches the hardware state. */ 1449 if (intel_fbc_hw_is_active(dev_priv)) 1450 intel_fbc_hw_deactivate(dev_priv); 1451 } 1452