1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 /** 25 * DOC: Frame Buffer Compression (FBC) 26 * 27 * FBC tries to save memory bandwidth (and so power consumption) by 28 * compressing the amount of memory used by the display. It is total 29 * transparent to user space and completely handled in the kernel. 30 * 31 * The benefits of FBC are mostly visible with solid backgrounds and 32 * variation-less patterns. It comes from keeping the memory footprint small 33 * and having fewer memory pages opened and accessed for refreshing the display. 34 * 35 * i915 is responsible to reserve stolen memory for FBC and configure its 36 * offset on proper registers. The hardware takes care of all 37 * compress/decompress. However there are many known cases where we have to 38 * forcibly disable it to allow proper screen updates. 39 */ 40 41 #include <linux/string_helpers.h> 42 43 #include <drm/drm_blend.h> 44 #include <drm/drm_fourcc.h> 45 46 #include "gem/i915_gem_stolen.h" 47 #include "gt/intel_gt_types.h" 48 #include "i915_drv.h" 49 #include "i915_reg.h" 50 #include "i915_utils.h" 51 #include "i915_vgpu.h" 52 #include "i915_vma.h" 53 #include "i9xx_plane_regs.h" 54 #include "intel_cdclk.h" 55 #include "intel_de.h" 56 #include "intel_display_device.h" 57 #include "intel_display_trace.h" 58 #include "intel_display_types.h" 59 #include "intel_fbc.h" 60 #include "intel_fbc_regs.h" 61 #include "intel_frontbuffer.h" 62 63 #define for_each_fbc_id(__dev_priv, __fbc_id) \ 64 for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) \ 65 for_each_if(DISPLAY_RUNTIME_INFO(__dev_priv)->fbc_mask & BIT(__fbc_id)) 66 67 #define for_each_intel_fbc(__dev_priv, __fbc, __fbc_id) \ 68 for_each_fbc_id((__dev_priv), (__fbc_id)) \ 69 for_each_if((__fbc) = (__dev_priv)->display.fbc[(__fbc_id)]) 70 71 struct intel_fbc_funcs { 72 void (*activate)(struct intel_fbc *fbc); 73 void (*deactivate)(struct intel_fbc *fbc); 74 bool (*is_active)(struct intel_fbc *fbc); 75 bool (*is_compressing)(struct intel_fbc *fbc); 76 void (*nuke)(struct intel_fbc *fbc); 77 void (*program_cfb)(struct intel_fbc *fbc); 78 void (*set_false_color)(struct intel_fbc *fbc, bool enable); 79 }; 80 81 struct intel_fbc_state { 82 struct intel_plane *plane; 83 unsigned int cfb_stride; 84 unsigned int cfb_size; 85 unsigned int fence_y_offset; 86 u16 override_cfb_stride; 87 u16 interval; 88 s8 fence_id; 89 }; 90 91 struct intel_fbc { 92 struct drm_i915_private *i915; 93 const struct intel_fbc_funcs *funcs; 94 95 /* 96 * This is always the inner lock when overlapping with 97 * struct_mutex and it's the outer lock when overlapping 98 * with stolen_lock. 99 */ 100 struct mutex lock; 101 unsigned int busy_bits; 102 103 struct i915_stolen_fb compressed_fb, compressed_llb; 104 105 enum intel_fbc_id id; 106 107 u8 limit; 108 109 bool false_color; 110 111 bool active; 112 bool activated; 113 bool flip_pending; 114 115 bool underrun_detected; 116 struct work_struct underrun_work; 117 118 /* 119 * This structure contains everything that's relevant to program the 120 * hardware registers. When we want to figure out if we need to disable 121 * and re-enable FBC for a new configuration we just check if there's 122 * something different in the struct. The genx_fbc_activate functions 123 * are supposed to read from it in order to program the registers. 124 */ 125 struct intel_fbc_state state; 126 const char *no_fbc_reason; 127 }; 128 129 /* plane stride in pixels */ 130 static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane_state) 131 { 132 const struct drm_framebuffer *fb = plane_state->hw.fb; 133 unsigned int stride; 134 135 stride = plane_state->view.color_plane[0].mapping_stride; 136 if (!drm_rotation_90_or_270(plane_state->hw.rotation)) 137 stride /= fb->format->cpp[0]; 138 139 return stride; 140 } 141 142 /* plane stride based cfb stride in bytes, assuming 1:1 compression limit */ 143 static unsigned int _intel_fbc_cfb_stride(const struct intel_plane_state *plane_state) 144 { 145 unsigned int cpp = 4; /* FBC always 4 bytes per pixel */ 146 147 return intel_fbc_plane_stride(plane_state) * cpp; 148 } 149 150 /* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */ 151 static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane_state) 152 { 153 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 154 unsigned int limit = 4; /* 1:4 compression limit is the worst case */ 155 unsigned int cpp = 4; /* FBC always 4 bytes per pixel */ 156 unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16; 157 unsigned int height = 4; /* FBC segment is 4 lines */ 158 unsigned int stride; 159 160 /* minimum segment stride we can use */ 161 stride = width * cpp * height / limit; 162 163 /* 164 * Wa_16011863758: icl+ 165 * Avoid some hardware segment address miscalculation. 166 */ 167 if (DISPLAY_VER(i915) >= 11) 168 stride += 64; 169 170 /* 171 * At least some of the platforms require each 4 line segment to 172 * be 512 byte aligned. Just do it always for simplicity. 173 */ 174 stride = ALIGN(stride, 512); 175 176 /* convert back to single line equivalent with 1:1 compression limit */ 177 return stride * limit / height; 178 } 179 180 /* properly aligned cfb stride in bytes, assuming 1:1 compression limit */ 181 static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state) 182 { 183 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 184 unsigned int stride = _intel_fbc_cfb_stride(plane_state); 185 186 /* 187 * At least some of the platforms require each 4 line segment to 188 * be 512 byte aligned. Aligning each line to 512 bytes guarantees 189 * that regardless of the compression limit we choose later. 190 */ 191 if (DISPLAY_VER(i915) >= 9) 192 return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(plane_state)); 193 else 194 return stride; 195 } 196 197 static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state) 198 { 199 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 200 int lines = drm_rect_height(&plane_state->uapi.src) >> 16; 201 202 if (DISPLAY_VER(i915) == 7) 203 lines = min(lines, 2048); 204 else if (DISPLAY_VER(i915) >= 8) 205 lines = min(lines, 2560); 206 207 return lines * intel_fbc_cfb_stride(plane_state); 208 } 209 210 static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_state) 211 { 212 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 213 unsigned int stride_aligned = intel_fbc_cfb_stride(plane_state); 214 unsigned int stride = _intel_fbc_cfb_stride(plane_state); 215 const struct drm_framebuffer *fb = plane_state->hw.fb; 216 217 /* 218 * Override stride in 64 byte units per 4 line segment. 219 * 220 * Gen9 hw miscalculates cfb stride for linear as 221 * PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so 222 * we always need to use the override there. 223 */ 224 if (stride != stride_aligned || 225 (DISPLAY_VER(i915) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR)) 226 return stride_aligned * 4 / 64; 227 228 return 0; 229 } 230 231 static u32 i8xx_fbc_ctl(struct intel_fbc *fbc) 232 { 233 const struct intel_fbc_state *fbc_state = &fbc->state; 234 struct drm_i915_private *i915 = fbc->i915; 235 unsigned int cfb_stride; 236 u32 fbc_ctl; 237 238 cfb_stride = fbc_state->cfb_stride / fbc->limit; 239 240 /* FBC_CTL wants 32B or 64B units */ 241 if (DISPLAY_VER(i915) == 2) 242 cfb_stride = (cfb_stride / 32) - 1; 243 else 244 cfb_stride = (cfb_stride / 64) - 1; 245 246 fbc_ctl = FBC_CTL_PERIODIC | 247 FBC_CTL_INTERVAL(fbc_state->interval) | 248 FBC_CTL_STRIDE(cfb_stride); 249 250 if (IS_I945GM(i915)) 251 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ 252 253 if (fbc_state->fence_id >= 0) 254 fbc_ctl |= FBC_CTL_FENCENO(fbc_state->fence_id); 255 256 return fbc_ctl; 257 } 258 259 static u32 i965_fbc_ctl2(struct intel_fbc *fbc) 260 { 261 const struct intel_fbc_state *fbc_state = &fbc->state; 262 u32 fbc_ctl2; 263 264 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | 265 FBC_CTL_PLANE(fbc_state->plane->i9xx_plane); 266 267 if (fbc_state->fence_id >= 0) 268 fbc_ctl2 |= FBC_CTL_CPU_FENCE_EN; 269 270 return fbc_ctl2; 271 } 272 273 static void i8xx_fbc_deactivate(struct intel_fbc *fbc) 274 { 275 struct drm_i915_private *i915 = fbc->i915; 276 u32 fbc_ctl; 277 278 /* Disable compression */ 279 fbc_ctl = intel_de_read(i915, FBC_CONTROL); 280 if ((fbc_ctl & FBC_CTL_EN) == 0) 281 return; 282 283 fbc_ctl &= ~FBC_CTL_EN; 284 intel_de_write(i915, FBC_CONTROL, fbc_ctl); 285 286 /* Wait for compressing bit to clear */ 287 if (intel_de_wait_for_clear(i915, FBC_STATUS, 288 FBC_STAT_COMPRESSING, 10)) { 289 drm_dbg_kms(&i915->drm, "FBC idle timed out\n"); 290 return; 291 } 292 } 293 294 static void i8xx_fbc_activate(struct intel_fbc *fbc) 295 { 296 const struct intel_fbc_state *fbc_state = &fbc->state; 297 struct drm_i915_private *i915 = fbc->i915; 298 int i; 299 300 /* Clear old tags */ 301 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) 302 intel_de_write(i915, FBC_TAG(i), 0); 303 304 if (DISPLAY_VER(i915) == 4) { 305 intel_de_write(i915, FBC_CONTROL2, 306 i965_fbc_ctl2(fbc)); 307 intel_de_write(i915, FBC_FENCE_OFF, 308 fbc_state->fence_y_offset); 309 } 310 311 intel_de_write(i915, FBC_CONTROL, 312 FBC_CTL_EN | i8xx_fbc_ctl(fbc)); 313 } 314 315 static bool i8xx_fbc_is_active(struct intel_fbc *fbc) 316 { 317 return intel_de_read(fbc->i915, FBC_CONTROL) & FBC_CTL_EN; 318 } 319 320 static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc) 321 { 322 return intel_de_read(fbc->i915, FBC_STATUS) & 323 (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED); 324 } 325 326 static void i8xx_fbc_nuke(struct intel_fbc *fbc) 327 { 328 struct intel_fbc_state *fbc_state = &fbc->state; 329 enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane; 330 struct drm_i915_private *dev_priv = fbc->i915; 331 332 intel_de_write_fw(dev_priv, DSPADDR(dev_priv, i9xx_plane), 333 intel_de_read_fw(dev_priv, DSPADDR(dev_priv, i9xx_plane))); 334 } 335 336 static void i8xx_fbc_program_cfb(struct intel_fbc *fbc) 337 { 338 struct drm_i915_private *i915 = fbc->i915; 339 340 drm_WARN_ON(&i915->drm, 341 range_overflows_end_t(u64, i915_gem_stolen_area_address(i915), 342 i915_gem_stolen_node_offset(&fbc->compressed_fb), 343 U32_MAX)); 344 drm_WARN_ON(&i915->drm, 345 range_overflows_end_t(u64, i915_gem_stolen_area_address(i915), 346 i915_gem_stolen_node_offset(&fbc->compressed_llb), 347 U32_MAX)); 348 intel_de_write(i915, FBC_CFB_BASE, 349 i915_gem_stolen_node_address(i915, &fbc->compressed_fb)); 350 intel_de_write(i915, FBC_LL_BASE, 351 i915_gem_stolen_node_address(i915, &fbc->compressed_llb)); 352 } 353 354 static const struct intel_fbc_funcs i8xx_fbc_funcs = { 355 .activate = i8xx_fbc_activate, 356 .deactivate = i8xx_fbc_deactivate, 357 .is_active = i8xx_fbc_is_active, 358 .is_compressing = i8xx_fbc_is_compressing, 359 .nuke = i8xx_fbc_nuke, 360 .program_cfb = i8xx_fbc_program_cfb, 361 }; 362 363 static void i965_fbc_nuke(struct intel_fbc *fbc) 364 { 365 struct intel_fbc_state *fbc_state = &fbc->state; 366 enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane; 367 struct drm_i915_private *dev_priv = fbc->i915; 368 369 intel_de_write_fw(dev_priv, DSPSURF(dev_priv, i9xx_plane), 370 intel_de_read_fw(dev_priv, DSPSURF(dev_priv, i9xx_plane))); 371 } 372 373 static const struct intel_fbc_funcs i965_fbc_funcs = { 374 .activate = i8xx_fbc_activate, 375 .deactivate = i8xx_fbc_deactivate, 376 .is_active = i8xx_fbc_is_active, 377 .is_compressing = i8xx_fbc_is_compressing, 378 .nuke = i965_fbc_nuke, 379 .program_cfb = i8xx_fbc_program_cfb, 380 }; 381 382 static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc) 383 { 384 switch (fbc->limit) { 385 default: 386 MISSING_CASE(fbc->limit); 387 fallthrough; 388 case 1: 389 return DPFC_CTL_LIMIT_1X; 390 case 2: 391 return DPFC_CTL_LIMIT_2X; 392 case 4: 393 return DPFC_CTL_LIMIT_4X; 394 } 395 } 396 397 static u32 g4x_dpfc_ctl(struct intel_fbc *fbc) 398 { 399 const struct intel_fbc_state *fbc_state = &fbc->state; 400 struct drm_i915_private *i915 = fbc->i915; 401 u32 dpfc_ctl; 402 403 dpfc_ctl = g4x_dpfc_ctl_limit(fbc) | 404 DPFC_CTL_PLANE_G4X(fbc_state->plane->i9xx_plane); 405 406 if (IS_G4X(i915)) 407 dpfc_ctl |= DPFC_CTL_SR_EN; 408 409 if (fbc_state->fence_id >= 0) { 410 dpfc_ctl |= DPFC_CTL_FENCE_EN_G4X; 411 412 if (DISPLAY_VER(i915) < 6) 413 dpfc_ctl |= DPFC_CTL_FENCENO(fbc_state->fence_id); 414 } 415 416 return dpfc_ctl; 417 } 418 419 static void g4x_fbc_activate(struct intel_fbc *fbc) 420 { 421 const struct intel_fbc_state *fbc_state = &fbc->state; 422 struct drm_i915_private *i915 = fbc->i915; 423 424 intel_de_write(i915, DPFC_FENCE_YOFF, 425 fbc_state->fence_y_offset); 426 427 intel_de_write(i915, DPFC_CONTROL, 428 DPFC_CTL_EN | g4x_dpfc_ctl(fbc)); 429 } 430 431 static void g4x_fbc_deactivate(struct intel_fbc *fbc) 432 { 433 struct drm_i915_private *i915 = fbc->i915; 434 u32 dpfc_ctl; 435 436 /* Disable compression */ 437 dpfc_ctl = intel_de_read(i915, DPFC_CONTROL); 438 if (dpfc_ctl & DPFC_CTL_EN) { 439 dpfc_ctl &= ~DPFC_CTL_EN; 440 intel_de_write(i915, DPFC_CONTROL, dpfc_ctl); 441 } 442 } 443 444 static bool g4x_fbc_is_active(struct intel_fbc *fbc) 445 { 446 return intel_de_read(fbc->i915, DPFC_CONTROL) & DPFC_CTL_EN; 447 } 448 449 static bool g4x_fbc_is_compressing(struct intel_fbc *fbc) 450 { 451 return intel_de_read(fbc->i915, DPFC_STATUS) & DPFC_COMP_SEG_MASK; 452 } 453 454 static void g4x_fbc_program_cfb(struct intel_fbc *fbc) 455 { 456 struct drm_i915_private *i915 = fbc->i915; 457 458 intel_de_write(i915, DPFC_CB_BASE, 459 i915_gem_stolen_node_offset(&fbc->compressed_fb)); 460 } 461 462 static const struct intel_fbc_funcs g4x_fbc_funcs = { 463 .activate = g4x_fbc_activate, 464 .deactivate = g4x_fbc_deactivate, 465 .is_active = g4x_fbc_is_active, 466 .is_compressing = g4x_fbc_is_compressing, 467 .nuke = i965_fbc_nuke, 468 .program_cfb = g4x_fbc_program_cfb, 469 }; 470 471 static void ilk_fbc_activate(struct intel_fbc *fbc) 472 { 473 struct intel_fbc_state *fbc_state = &fbc->state; 474 struct drm_i915_private *i915 = fbc->i915; 475 476 intel_de_write(i915, ILK_DPFC_FENCE_YOFF(fbc->id), 477 fbc_state->fence_y_offset); 478 479 intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), 480 DPFC_CTL_EN | g4x_dpfc_ctl(fbc)); 481 } 482 483 static void ilk_fbc_deactivate(struct intel_fbc *fbc) 484 { 485 struct drm_i915_private *i915 = fbc->i915; 486 u32 dpfc_ctl; 487 488 /* Disable compression */ 489 dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL(fbc->id)); 490 if (dpfc_ctl & DPFC_CTL_EN) { 491 dpfc_ctl &= ~DPFC_CTL_EN; 492 intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl); 493 } 494 } 495 496 static bool ilk_fbc_is_active(struct intel_fbc *fbc) 497 { 498 return intel_de_read(fbc->i915, ILK_DPFC_CONTROL(fbc->id)) & DPFC_CTL_EN; 499 } 500 501 static bool ilk_fbc_is_compressing(struct intel_fbc *fbc) 502 { 503 return intel_de_read(fbc->i915, ILK_DPFC_STATUS(fbc->id)) & DPFC_COMP_SEG_MASK; 504 } 505 506 static void ilk_fbc_program_cfb(struct intel_fbc *fbc) 507 { 508 struct drm_i915_private *i915 = fbc->i915; 509 510 intel_de_write(i915, ILK_DPFC_CB_BASE(fbc->id), 511 i915_gem_stolen_node_offset(&fbc->compressed_fb)); 512 } 513 514 static const struct intel_fbc_funcs ilk_fbc_funcs = { 515 .activate = ilk_fbc_activate, 516 .deactivate = ilk_fbc_deactivate, 517 .is_active = ilk_fbc_is_active, 518 .is_compressing = ilk_fbc_is_compressing, 519 .nuke = i965_fbc_nuke, 520 .program_cfb = ilk_fbc_program_cfb, 521 }; 522 523 static void snb_fbc_program_fence(struct intel_fbc *fbc) 524 { 525 const struct intel_fbc_state *fbc_state = &fbc->state; 526 struct drm_i915_private *i915 = fbc->i915; 527 u32 ctl = 0; 528 529 if (fbc_state->fence_id >= 0) 530 ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(fbc_state->fence_id); 531 532 intel_de_write(i915, SNB_DPFC_CTL_SA, ctl); 533 intel_de_write(i915, SNB_DPFC_CPU_FENCE_OFFSET, fbc_state->fence_y_offset); 534 } 535 536 static void snb_fbc_activate(struct intel_fbc *fbc) 537 { 538 snb_fbc_program_fence(fbc); 539 540 ilk_fbc_activate(fbc); 541 } 542 543 static void snb_fbc_nuke(struct intel_fbc *fbc) 544 { 545 struct drm_i915_private *i915 = fbc->i915; 546 547 intel_de_write(i915, MSG_FBC_REND_STATE(fbc->id), FBC_REND_NUKE); 548 intel_de_posting_read(i915, MSG_FBC_REND_STATE(fbc->id)); 549 } 550 551 static const struct intel_fbc_funcs snb_fbc_funcs = { 552 .activate = snb_fbc_activate, 553 .deactivate = ilk_fbc_deactivate, 554 .is_active = ilk_fbc_is_active, 555 .is_compressing = ilk_fbc_is_compressing, 556 .nuke = snb_fbc_nuke, 557 .program_cfb = ilk_fbc_program_cfb, 558 }; 559 560 static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc) 561 { 562 const struct intel_fbc_state *fbc_state = &fbc->state; 563 struct drm_i915_private *i915 = fbc->i915; 564 u32 val = 0; 565 566 if (fbc_state->override_cfb_stride) 567 val |= FBC_STRIDE_OVERRIDE | 568 FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit); 569 570 intel_de_write(i915, GLK_FBC_STRIDE(fbc->id), val); 571 } 572 573 static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc) 574 { 575 const struct intel_fbc_state *fbc_state = &fbc->state; 576 struct drm_i915_private *i915 = fbc->i915; 577 u32 val = 0; 578 579 /* Display WA #0529: skl, kbl, bxt. */ 580 if (fbc_state->override_cfb_stride) 581 val |= CHICKEN_FBC_STRIDE_OVERRIDE | 582 CHICKEN_FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit); 583 584 intel_de_rmw(i915, CHICKEN_MISC_4, 585 CHICKEN_FBC_STRIDE_OVERRIDE | 586 CHICKEN_FBC_STRIDE_MASK, val); 587 } 588 589 static u32 ivb_dpfc_ctl(struct intel_fbc *fbc) 590 { 591 const struct intel_fbc_state *fbc_state = &fbc->state; 592 struct drm_i915_private *i915 = fbc->i915; 593 u32 dpfc_ctl; 594 595 dpfc_ctl = g4x_dpfc_ctl_limit(fbc); 596 597 if (IS_IVYBRIDGE(i915)) 598 dpfc_ctl |= DPFC_CTL_PLANE_IVB(fbc_state->plane->i9xx_plane); 599 600 if (DISPLAY_VER(i915) >= 20) 601 dpfc_ctl |= DPFC_CTL_PLANE_BINDING(fbc_state->plane->id); 602 603 if (fbc_state->fence_id >= 0) 604 dpfc_ctl |= DPFC_CTL_FENCE_EN_IVB; 605 606 if (fbc->false_color) 607 dpfc_ctl |= DPFC_CTL_FALSE_COLOR; 608 609 return dpfc_ctl; 610 } 611 612 static void ivb_fbc_activate(struct intel_fbc *fbc) 613 { 614 struct drm_i915_private *i915 = fbc->i915; 615 u32 dpfc_ctl; 616 617 if (DISPLAY_VER(i915) >= 10) 618 glk_fbc_program_cfb_stride(fbc); 619 else if (DISPLAY_VER(i915) == 9) 620 skl_fbc_program_cfb_stride(fbc); 621 622 if (intel_gt_support_legacy_fencing(to_gt(i915))) 623 snb_fbc_program_fence(fbc); 624 625 /* wa_14019417088 Alternative WA*/ 626 dpfc_ctl = ivb_dpfc_ctl(fbc); 627 if (DISPLAY_VER(i915) >= 20) 628 intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl); 629 630 intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), 631 DPFC_CTL_EN | dpfc_ctl); 632 } 633 634 static bool ivb_fbc_is_compressing(struct intel_fbc *fbc) 635 { 636 return intel_de_read(fbc->i915, ILK_DPFC_STATUS2(fbc->id)) & DPFC_COMP_SEG_MASK_IVB; 637 } 638 639 static void ivb_fbc_set_false_color(struct intel_fbc *fbc, 640 bool enable) 641 { 642 intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL(fbc->id), 643 DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0); 644 } 645 646 static const struct intel_fbc_funcs ivb_fbc_funcs = { 647 .activate = ivb_fbc_activate, 648 .deactivate = ilk_fbc_deactivate, 649 .is_active = ilk_fbc_is_active, 650 .is_compressing = ivb_fbc_is_compressing, 651 .nuke = snb_fbc_nuke, 652 .program_cfb = ilk_fbc_program_cfb, 653 .set_false_color = ivb_fbc_set_false_color, 654 }; 655 656 static bool intel_fbc_hw_is_active(struct intel_fbc *fbc) 657 { 658 return fbc->funcs->is_active(fbc); 659 } 660 661 static void intel_fbc_hw_activate(struct intel_fbc *fbc) 662 { 663 trace_intel_fbc_activate(fbc->state.plane); 664 665 fbc->active = true; 666 fbc->activated = true; 667 668 fbc->funcs->activate(fbc); 669 } 670 671 static void intel_fbc_hw_deactivate(struct intel_fbc *fbc) 672 { 673 trace_intel_fbc_deactivate(fbc->state.plane); 674 675 fbc->active = false; 676 677 fbc->funcs->deactivate(fbc); 678 } 679 680 static bool intel_fbc_is_compressing(struct intel_fbc *fbc) 681 { 682 return fbc->funcs->is_compressing(fbc); 683 } 684 685 static void intel_fbc_nuke(struct intel_fbc *fbc) 686 { 687 struct drm_i915_private *i915 = fbc->i915; 688 689 lockdep_assert_held(&fbc->lock); 690 drm_WARN_ON(&i915->drm, fbc->flip_pending); 691 692 trace_intel_fbc_nuke(fbc->state.plane); 693 694 fbc->funcs->nuke(fbc); 695 } 696 697 static void intel_fbc_activate(struct intel_fbc *fbc) 698 { 699 lockdep_assert_held(&fbc->lock); 700 701 intel_fbc_hw_activate(fbc); 702 intel_fbc_nuke(fbc); 703 704 fbc->no_fbc_reason = NULL; 705 } 706 707 static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason) 708 { 709 lockdep_assert_held(&fbc->lock); 710 711 if (fbc->active) 712 intel_fbc_hw_deactivate(fbc); 713 714 fbc->no_fbc_reason = reason; 715 } 716 717 static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915) 718 { 719 if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915)) 720 return BIT_ULL(28); 721 else 722 return BIT_ULL(32); 723 } 724 725 static u64 intel_fbc_stolen_end(struct drm_i915_private *i915) 726 { 727 u64 end; 728 729 /* The FBC hardware for BDW/SKL doesn't have access to the stolen 730 * reserved range size, so it always assumes the maximum (8mb) is used. 731 * If we enable FBC using a CFB on that memory range we'll get FIFO 732 * underruns, even if that range is not reserved by the BIOS. */ 733 if (IS_BROADWELL(i915) || 734 (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915))) 735 end = i915_gem_stolen_area_size(i915) - 8 * 1024 * 1024; 736 else 737 end = U64_MAX; 738 739 return min(end, intel_fbc_cfb_base_max(i915)); 740 } 741 742 static int intel_fbc_min_limit(const struct intel_plane_state *plane_state) 743 { 744 return plane_state->hw.fb->format->cpp[0] == 2 ? 2 : 1; 745 } 746 747 static int intel_fbc_max_limit(struct drm_i915_private *i915) 748 { 749 /* WaFbcOnly1to1Ratio:ctg */ 750 if (IS_G4X(i915)) 751 return 1; 752 753 /* 754 * FBC2 can only do 1:1, 1:2, 1:4, we limit 755 * FBC1 to the same out of convenience. 756 */ 757 return 4; 758 } 759 760 static int find_compression_limit(struct intel_fbc *fbc, 761 unsigned int size, int min_limit) 762 { 763 struct drm_i915_private *i915 = fbc->i915; 764 u64 end = intel_fbc_stolen_end(i915); 765 int ret, limit = min_limit; 766 767 size /= limit; 768 769 /* Try to over-allocate to reduce reallocations and fragmentation. */ 770 ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb, 771 size <<= 1, 4096, 0, end); 772 if (ret == 0) 773 return limit; 774 775 for (; limit <= intel_fbc_max_limit(i915); limit <<= 1) { 776 ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb, 777 size >>= 1, 4096, 0, end); 778 if (ret == 0) 779 return limit; 780 } 781 782 return 0; 783 } 784 785 static int intel_fbc_alloc_cfb(struct intel_fbc *fbc, 786 unsigned int size, int min_limit) 787 { 788 struct drm_i915_private *i915 = fbc->i915; 789 int ret; 790 791 drm_WARN_ON(&i915->drm, 792 i915_gem_stolen_node_allocated(&fbc->compressed_fb)); 793 drm_WARN_ON(&i915->drm, 794 i915_gem_stolen_node_allocated(&fbc->compressed_llb)); 795 796 if (DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) { 797 ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb, 798 4096, 4096); 799 if (ret) 800 goto err; 801 } 802 803 ret = find_compression_limit(fbc, size, min_limit); 804 if (!ret) 805 goto err_llb; 806 else if (ret > min_limit) 807 drm_info_once(&i915->drm, 808 "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); 809 810 fbc->limit = ret; 811 812 drm_dbg_kms(&i915->drm, 813 "reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n", 814 i915_gem_stolen_node_size(&fbc->compressed_fb), fbc->limit); 815 return 0; 816 817 err_llb: 818 if (i915_gem_stolen_node_allocated(&fbc->compressed_llb)) 819 i915_gem_stolen_remove_node(i915, &fbc->compressed_llb); 820 err: 821 if (i915_gem_stolen_initialized(i915)) 822 drm_info_once(&i915->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); 823 return -ENOSPC; 824 } 825 826 static void intel_fbc_program_cfb(struct intel_fbc *fbc) 827 { 828 fbc->funcs->program_cfb(fbc); 829 } 830 831 static void intel_fbc_program_workarounds(struct intel_fbc *fbc) 832 { 833 struct drm_i915_private *i915 = fbc->i915; 834 835 if (IS_SKYLAKE(i915) || IS_BROXTON(i915)) { 836 /* 837 * WaFbcHighMemBwCorruptionAvoidance:skl,bxt 838 * Display WA #0883: skl,bxt 839 */ 840 intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id), 841 0, DPFC_DISABLE_DUMMY0); 842 } 843 844 if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || 845 IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) { 846 /* 847 * WaFbcNukeOnHostModify:skl,kbl,cfl 848 * Display WA #0873: skl,kbl,cfl 849 */ 850 intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id), 851 0, DPFC_NUKE_ON_ANY_MODIFICATION); 852 } 853 854 /* Wa_1409120013:icl,jsl,tgl,dg1 */ 855 if (IS_DISPLAY_VER(i915, 11, 12)) 856 intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id), 857 0, DPFC_CHICKEN_COMP_DUMMY_PIXEL); 858 859 /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp,mtl */ 860 if (DISPLAY_VER(i915) >= 11 && !IS_DG2(i915)) 861 intel_de_rmw(i915, ILK_DPFC_CHICKEN(fbc->id), 862 0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION); 863 } 864 865 static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc) 866 { 867 struct drm_i915_private *i915 = fbc->i915; 868 869 if (WARN_ON(intel_fbc_hw_is_active(fbc))) 870 return; 871 872 if (i915_gem_stolen_node_allocated(&fbc->compressed_llb)) 873 i915_gem_stolen_remove_node(i915, &fbc->compressed_llb); 874 if (i915_gem_stolen_node_allocated(&fbc->compressed_fb)) 875 i915_gem_stolen_remove_node(i915, &fbc->compressed_fb); 876 } 877 878 void intel_fbc_cleanup(struct drm_i915_private *i915) 879 { 880 struct intel_fbc *fbc; 881 enum intel_fbc_id fbc_id; 882 883 for_each_intel_fbc(i915, fbc, fbc_id) { 884 mutex_lock(&fbc->lock); 885 __intel_fbc_cleanup_cfb(fbc); 886 mutex_unlock(&fbc->lock); 887 888 kfree(fbc); 889 } 890 } 891 892 static bool i8xx_fbc_stride_is_valid(const struct intel_plane_state *plane_state) 893 { 894 const struct drm_framebuffer *fb = plane_state->hw.fb; 895 unsigned int stride = intel_fbc_plane_stride(plane_state) * 896 fb->format->cpp[0]; 897 898 return stride == 4096 || stride == 8192; 899 } 900 901 static bool i965_fbc_stride_is_valid(const struct intel_plane_state *plane_state) 902 { 903 const struct drm_framebuffer *fb = plane_state->hw.fb; 904 unsigned int stride = intel_fbc_plane_stride(plane_state) * 905 fb->format->cpp[0]; 906 907 return stride >= 2048 && stride <= 16384; 908 } 909 910 static bool g4x_fbc_stride_is_valid(const struct intel_plane_state *plane_state) 911 { 912 return true; 913 } 914 915 static bool skl_fbc_stride_is_valid(const struct intel_plane_state *plane_state) 916 { 917 const struct drm_framebuffer *fb = plane_state->hw.fb; 918 unsigned int stride = intel_fbc_plane_stride(plane_state) * 919 fb->format->cpp[0]; 920 921 /* Display WA #1105: skl,bxt,kbl,cfl,glk */ 922 if (fb->modifier == DRM_FORMAT_MOD_LINEAR && stride & 511) 923 return false; 924 925 return true; 926 } 927 928 static bool icl_fbc_stride_is_valid(const struct intel_plane_state *plane_state) 929 { 930 return true; 931 } 932 933 static bool stride_is_valid(const struct intel_plane_state *plane_state) 934 { 935 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 936 937 if (DISPLAY_VER(i915) >= 11) 938 return icl_fbc_stride_is_valid(plane_state); 939 else if (DISPLAY_VER(i915) >= 9) 940 return skl_fbc_stride_is_valid(plane_state); 941 else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915)) 942 return g4x_fbc_stride_is_valid(plane_state); 943 else if (DISPLAY_VER(i915) == 4) 944 return i965_fbc_stride_is_valid(plane_state); 945 else 946 return i8xx_fbc_stride_is_valid(plane_state); 947 } 948 949 static bool i8xx_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state) 950 { 951 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 952 const struct drm_framebuffer *fb = plane_state->hw.fb; 953 954 switch (fb->format->format) { 955 case DRM_FORMAT_XRGB8888: 956 case DRM_FORMAT_XBGR8888: 957 return true; 958 case DRM_FORMAT_XRGB1555: 959 case DRM_FORMAT_RGB565: 960 /* 16bpp not supported on gen2 */ 961 if (DISPLAY_VER(i915) == 2) 962 return false; 963 return true; 964 default: 965 return false; 966 } 967 } 968 969 static bool g4x_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state) 970 { 971 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 972 const struct drm_framebuffer *fb = plane_state->hw.fb; 973 974 switch (fb->format->format) { 975 case DRM_FORMAT_XRGB8888: 976 case DRM_FORMAT_XBGR8888: 977 return true; 978 case DRM_FORMAT_RGB565: 979 /* WaFbcOnly1to1Ratio:ctg */ 980 if (IS_G4X(i915)) 981 return false; 982 return true; 983 default: 984 return false; 985 } 986 } 987 988 static bool lnl_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state) 989 { 990 const struct drm_framebuffer *fb = plane_state->hw.fb; 991 992 switch (fb->format->format) { 993 case DRM_FORMAT_XRGB8888: 994 case DRM_FORMAT_XBGR8888: 995 case DRM_FORMAT_ARGB8888: 996 case DRM_FORMAT_ABGR8888: 997 case DRM_FORMAT_RGB565: 998 return true; 999 default: 1000 return false; 1001 } 1002 } 1003 1004 static bool pixel_format_is_valid(const struct intel_plane_state *plane_state) 1005 { 1006 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 1007 1008 if (DISPLAY_VER(i915) >= 20) 1009 return lnl_fbc_pixel_format_is_valid(plane_state); 1010 else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915)) 1011 return g4x_fbc_pixel_format_is_valid(plane_state); 1012 else 1013 return i8xx_fbc_pixel_format_is_valid(plane_state); 1014 } 1015 1016 static bool i8xx_fbc_rotation_is_valid(const struct intel_plane_state *plane_state) 1017 { 1018 return plane_state->hw.rotation == DRM_MODE_ROTATE_0; 1019 } 1020 1021 static bool g4x_fbc_rotation_is_valid(const struct intel_plane_state *plane_state) 1022 { 1023 return true; 1024 } 1025 1026 static bool skl_fbc_rotation_is_valid(const struct intel_plane_state *plane_state) 1027 { 1028 const struct drm_framebuffer *fb = plane_state->hw.fb; 1029 unsigned int rotation = plane_state->hw.rotation; 1030 1031 if (fb->format->format == DRM_FORMAT_RGB565 && 1032 drm_rotation_90_or_270(rotation)) 1033 return false; 1034 1035 return true; 1036 } 1037 1038 static bool rotation_is_valid(const struct intel_plane_state *plane_state) 1039 { 1040 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 1041 1042 if (DISPLAY_VER(i915) >= 9) 1043 return skl_fbc_rotation_is_valid(plane_state); 1044 else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915)) 1045 return g4x_fbc_rotation_is_valid(plane_state); 1046 else 1047 return i8xx_fbc_rotation_is_valid(plane_state); 1048 } 1049 1050 /* 1051 * For some reason, the hardware tracking starts looking at whatever we 1052 * programmed as the display plane base address register. It does not look at 1053 * the X and Y offset registers. That's why we include the src x/y offsets 1054 * instead of just looking at the plane size. 1055 */ 1056 static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *plane_state) 1057 { 1058 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 1059 unsigned int effective_w, effective_h, max_w, max_h; 1060 1061 if (DISPLAY_VER(i915) >= 11) { 1062 max_w = 8192; 1063 max_h = 4096; 1064 } else if (DISPLAY_VER(i915) >= 10) { 1065 max_w = 5120; 1066 max_h = 4096; 1067 } else if (DISPLAY_VER(i915) >= 7) { 1068 max_w = 4096; 1069 max_h = 4096; 1070 } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) { 1071 max_w = 4096; 1072 max_h = 2048; 1073 } else { 1074 max_w = 2048; 1075 max_h = 1536; 1076 } 1077 1078 effective_w = plane_state->view.color_plane[0].x + 1079 (drm_rect_width(&plane_state->uapi.src) >> 16); 1080 effective_h = plane_state->view.color_plane[0].y + 1081 (drm_rect_height(&plane_state->uapi.src) >> 16); 1082 1083 return effective_w <= max_w && effective_h <= max_h; 1084 } 1085 1086 static bool intel_fbc_plane_size_valid(const struct intel_plane_state *plane_state) 1087 { 1088 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 1089 unsigned int w, h, max_w, max_h; 1090 1091 if (DISPLAY_VER(i915) >= 10) { 1092 max_w = 5120; 1093 max_h = 4096; 1094 } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) { 1095 max_w = 4096; 1096 max_h = 4096; 1097 } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) { 1098 max_w = 4096; 1099 max_h = 2048; 1100 } else { 1101 max_w = 2048; 1102 max_h = 1536; 1103 } 1104 1105 w = drm_rect_width(&plane_state->uapi.src) >> 16; 1106 h = drm_rect_height(&plane_state->uapi.src) >> 16; 1107 1108 return w <= max_w && h <= max_h; 1109 } 1110 1111 static bool i8xx_fbc_tiling_valid(const struct intel_plane_state *plane_state) 1112 { 1113 const struct drm_framebuffer *fb = plane_state->hw.fb; 1114 1115 return fb->modifier == I915_FORMAT_MOD_X_TILED; 1116 } 1117 1118 static bool skl_fbc_tiling_valid(const struct intel_plane_state *plane_state) 1119 { 1120 return true; 1121 } 1122 1123 static bool tiling_is_valid(const struct intel_plane_state *plane_state) 1124 { 1125 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 1126 1127 if (DISPLAY_VER(i915) >= 9) 1128 return skl_fbc_tiling_valid(plane_state); 1129 else 1130 return i8xx_fbc_tiling_valid(plane_state); 1131 } 1132 1133 static void intel_fbc_update_state(struct intel_atomic_state *state, 1134 struct intel_crtc *crtc, 1135 struct intel_plane *plane) 1136 { 1137 struct drm_i915_private *i915 = to_i915(state->base.dev); 1138 const struct intel_crtc_state *crtc_state = 1139 intel_atomic_get_new_crtc_state(state, crtc); 1140 const struct intel_plane_state *plane_state = 1141 intel_atomic_get_new_plane_state(state, plane); 1142 struct intel_fbc *fbc = plane->fbc; 1143 struct intel_fbc_state *fbc_state = &fbc->state; 1144 1145 WARN_ON(plane_state->no_fbc_reason); 1146 WARN_ON(fbc_state->plane && fbc_state->plane != plane); 1147 1148 fbc_state->plane = plane; 1149 1150 /* FBC1 compression interval: arbitrary choice of 1 second */ 1151 fbc_state->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode); 1152 1153 fbc_state->fence_y_offset = intel_plane_fence_y_offset(plane_state); 1154 1155 drm_WARN_ON(&i915->drm, plane_state->flags & PLANE_HAS_FENCE && 1156 !intel_gt_support_legacy_fencing(to_gt(i915))); 1157 1158 if (plane_state->flags & PLANE_HAS_FENCE) 1159 fbc_state->fence_id = i915_vma_fence_id(plane_state->ggtt_vma); 1160 else 1161 fbc_state->fence_id = -1; 1162 1163 fbc_state->cfb_stride = intel_fbc_cfb_stride(plane_state); 1164 fbc_state->cfb_size = intel_fbc_cfb_size(plane_state); 1165 fbc_state->override_cfb_stride = intel_fbc_override_cfb_stride(plane_state); 1166 } 1167 1168 static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state) 1169 { 1170 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 1171 1172 /* 1173 * The use of a CPU fence is one of two ways to detect writes by the 1174 * CPU to the scanout and trigger updates to the FBC. 1175 * 1176 * The other method is by software tracking (see 1177 * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke 1178 * the current compressed buffer and recompress it. 1179 * 1180 * Note that is possible for a tiled surface to be unmappable (and 1181 * so have no fence associated with it) due to aperture constraints 1182 * at the time of pinning. 1183 */ 1184 return DISPLAY_VER(i915) >= 9 || 1185 (plane_state->flags & PLANE_HAS_FENCE && 1186 i915_vma_fence_id(plane_state->ggtt_vma) != -1); 1187 } 1188 1189 static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state) 1190 { 1191 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 1192 struct intel_fbc *fbc = plane->fbc; 1193 1194 return intel_fbc_min_limit(plane_state) <= fbc->limit && 1195 intel_fbc_cfb_size(plane_state) <= fbc->limit * 1196 i915_gem_stolen_node_size(&fbc->compressed_fb); 1197 } 1198 1199 static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state) 1200 { 1201 return !plane_state->no_fbc_reason && 1202 intel_fbc_is_fence_ok(plane_state) && 1203 intel_fbc_is_cfb_ok(plane_state); 1204 } 1205 1206 static int intel_fbc_check_plane(struct intel_atomic_state *state, 1207 struct intel_plane *plane) 1208 { 1209 struct drm_i915_private *i915 = to_i915(state->base.dev); 1210 struct intel_plane_state *plane_state = 1211 intel_atomic_get_new_plane_state(state, plane); 1212 const struct drm_framebuffer *fb = plane_state->hw.fb; 1213 struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); 1214 const struct intel_crtc_state *crtc_state; 1215 struct intel_fbc *fbc = plane->fbc; 1216 1217 if (!fbc) 1218 return 0; 1219 1220 if (!i915_gem_stolen_initialized(i915)) { 1221 plane_state->no_fbc_reason = "stolen memory not initialised"; 1222 return 0; 1223 } 1224 1225 if (intel_vgpu_active(i915)) { 1226 plane_state->no_fbc_reason = "VGPU active"; 1227 return 0; 1228 } 1229 1230 if (!i915->display.params.enable_fbc) { 1231 plane_state->no_fbc_reason = "disabled per module param or by default"; 1232 return 0; 1233 } 1234 1235 if (!plane_state->uapi.visible) { 1236 plane_state->no_fbc_reason = "plane not visible"; 1237 return 0; 1238 } 1239 1240 /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ 1241 if (i915_vtd_active(i915) && (IS_SKYLAKE(i915) || IS_BROXTON(i915))) { 1242 plane_state->no_fbc_reason = "VT-d enabled"; 1243 return 0; 1244 } 1245 1246 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 1247 1248 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 1249 plane_state->no_fbc_reason = "interlaced mode not supported"; 1250 return 0; 1251 } 1252 1253 if (crtc_state->double_wide) { 1254 plane_state->no_fbc_reason = "double wide pipe not supported"; 1255 return 0; 1256 } 1257 1258 /* 1259 * Display 12+ is not supporting FBC with PSR2. 1260 * Recommendation is to keep this combination disabled 1261 * Bspec: 50422 HSD: 14010260002 1262 */ 1263 if (IS_DISPLAY_VER(i915, 12, 14) && crtc_state->has_sel_update && 1264 !crtc_state->has_panel_replay) { 1265 plane_state->no_fbc_reason = "PSR2 enabled"; 1266 return 0; 1267 } 1268 1269 /* Wa_14016291713 */ 1270 if ((IS_DISPLAY_VER(i915, 12, 13) || 1271 IS_DISPLAY_IP_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_C0)) && 1272 crtc_state->has_psr && !crtc_state->has_panel_replay) { 1273 plane_state->no_fbc_reason = "PSR1 enabled (Wa_14016291713)"; 1274 return 0; 1275 } 1276 1277 if (!pixel_format_is_valid(plane_state)) { 1278 plane_state->no_fbc_reason = "pixel format not supported"; 1279 return 0; 1280 } 1281 1282 if (!tiling_is_valid(plane_state)) { 1283 plane_state->no_fbc_reason = "tiling not supported"; 1284 return 0; 1285 } 1286 1287 if (!rotation_is_valid(plane_state)) { 1288 plane_state->no_fbc_reason = "rotation not supported"; 1289 return 0; 1290 } 1291 1292 if (!stride_is_valid(plane_state)) { 1293 plane_state->no_fbc_reason = "stride not supported"; 1294 return 0; 1295 } 1296 1297 if (DISPLAY_VER(i915) < 20 && 1298 plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && 1299 fb->format->has_alpha) { 1300 plane_state->no_fbc_reason = "per-pixel alpha not supported"; 1301 return 0; 1302 } 1303 1304 if (!intel_fbc_plane_size_valid(plane_state)) { 1305 plane_state->no_fbc_reason = "plane size too big"; 1306 return 0; 1307 } 1308 1309 if (!intel_fbc_hw_tracking_covers_screen(plane_state)) { 1310 plane_state->no_fbc_reason = "surface size too big"; 1311 return 0; 1312 } 1313 1314 /* 1315 * Work around a problem on GEN9+ HW, where enabling FBC on a plane 1316 * having a Y offset that isn't divisible by 4 causes FIFO underrun 1317 * and screen flicker. 1318 */ 1319 if (DISPLAY_VER(i915) >= 9 && 1320 plane_state->view.color_plane[0].y & 3) { 1321 plane_state->no_fbc_reason = "plane start Y offset misaligned"; 1322 return 0; 1323 } 1324 1325 /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */ 1326 if (DISPLAY_VER(i915) >= 11 && 1327 (plane_state->view.color_plane[0].y + 1328 (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) { 1329 plane_state->no_fbc_reason = "plane end Y offset misaligned"; 1330 return 0; 1331 } 1332 1333 /* WaFbcExceedCdClockThreshold:hsw,bdw */ 1334 if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 1335 const struct intel_cdclk_state *cdclk_state; 1336 1337 cdclk_state = intel_atomic_get_cdclk_state(state); 1338 if (IS_ERR(cdclk_state)) 1339 return PTR_ERR(cdclk_state); 1340 1341 if (crtc_state->pixel_rate >= cdclk_state->logical.cdclk * 95 / 100) { 1342 plane_state->no_fbc_reason = "pixel rate too high"; 1343 return 0; 1344 } 1345 } 1346 1347 plane_state->no_fbc_reason = NULL; 1348 1349 return 0; 1350 } 1351 1352 1353 static bool intel_fbc_can_flip_nuke(struct intel_atomic_state *state, 1354 struct intel_crtc *crtc, 1355 struct intel_plane *plane) 1356 { 1357 const struct intel_crtc_state *new_crtc_state = 1358 intel_atomic_get_new_crtc_state(state, crtc); 1359 const struct intel_plane_state *old_plane_state = 1360 intel_atomic_get_old_plane_state(state, plane); 1361 const struct intel_plane_state *new_plane_state = 1362 intel_atomic_get_new_plane_state(state, plane); 1363 const struct drm_framebuffer *old_fb = old_plane_state->hw.fb; 1364 const struct drm_framebuffer *new_fb = new_plane_state->hw.fb; 1365 1366 if (intel_crtc_needs_modeset(new_crtc_state)) 1367 return false; 1368 1369 if (!intel_fbc_is_ok(old_plane_state) || 1370 !intel_fbc_is_ok(new_plane_state)) 1371 return false; 1372 1373 if (old_fb->format->format != new_fb->format->format) 1374 return false; 1375 1376 if (old_fb->modifier != new_fb->modifier) 1377 return false; 1378 1379 if (intel_fbc_plane_stride(old_plane_state) != 1380 intel_fbc_plane_stride(new_plane_state)) 1381 return false; 1382 1383 if (intel_fbc_cfb_stride(old_plane_state) != 1384 intel_fbc_cfb_stride(new_plane_state)) 1385 return false; 1386 1387 if (intel_fbc_cfb_size(old_plane_state) != 1388 intel_fbc_cfb_size(new_plane_state)) 1389 return false; 1390 1391 if (intel_fbc_override_cfb_stride(old_plane_state) != 1392 intel_fbc_override_cfb_stride(new_plane_state)) 1393 return false; 1394 1395 return true; 1396 } 1397 1398 static bool __intel_fbc_pre_update(struct intel_atomic_state *state, 1399 struct intel_crtc *crtc, 1400 struct intel_plane *plane) 1401 { 1402 struct drm_i915_private *i915 = to_i915(state->base.dev); 1403 struct intel_fbc *fbc = plane->fbc; 1404 bool need_vblank_wait = false; 1405 1406 lockdep_assert_held(&fbc->lock); 1407 1408 fbc->flip_pending = true; 1409 1410 if (intel_fbc_can_flip_nuke(state, crtc, plane)) 1411 return need_vblank_wait; 1412 1413 intel_fbc_deactivate(fbc, "update pending"); 1414 1415 /* 1416 * Display WA #1198: glk+ 1417 * Need an extra vblank wait between FBC disable and most plane 1418 * updates. Bspec says this is only needed for plane disable, but 1419 * that is not true. Touching most plane registers will cause the 1420 * corruption to appear. Also SKL/derivatives do not seem to be 1421 * affected. 1422 * 1423 * TODO: could optimize this a bit by sampling the frame 1424 * counter when we disable FBC (if it was already done earlier) 1425 * and skipping the extra vblank wait before the plane update 1426 * if at least one frame has already passed. 1427 */ 1428 if (fbc->activated && DISPLAY_VER(i915) >= 10) 1429 need_vblank_wait = true; 1430 fbc->activated = false; 1431 1432 return need_vblank_wait; 1433 } 1434 1435 bool intel_fbc_pre_update(struct intel_atomic_state *state, 1436 struct intel_crtc *crtc) 1437 { 1438 const struct intel_plane_state __maybe_unused *plane_state; 1439 bool need_vblank_wait = false; 1440 struct intel_plane *plane; 1441 int i; 1442 1443 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1444 struct intel_fbc *fbc = plane->fbc; 1445 1446 if (!fbc || plane->pipe != crtc->pipe) 1447 continue; 1448 1449 mutex_lock(&fbc->lock); 1450 1451 if (fbc->state.plane == plane) 1452 need_vblank_wait |= __intel_fbc_pre_update(state, crtc, plane); 1453 1454 mutex_unlock(&fbc->lock); 1455 } 1456 1457 return need_vblank_wait; 1458 } 1459 1460 static void __intel_fbc_disable(struct intel_fbc *fbc) 1461 { 1462 struct drm_i915_private *i915 = fbc->i915; 1463 struct intel_plane *plane = fbc->state.plane; 1464 1465 lockdep_assert_held(&fbc->lock); 1466 drm_WARN_ON(&i915->drm, fbc->active); 1467 1468 drm_dbg_kms(&i915->drm, "Disabling FBC on [PLANE:%d:%s]\n", 1469 plane->base.base.id, plane->base.name); 1470 1471 __intel_fbc_cleanup_cfb(fbc); 1472 1473 fbc->state.plane = NULL; 1474 fbc->flip_pending = false; 1475 fbc->busy_bits = 0; 1476 } 1477 1478 static void __intel_fbc_post_update(struct intel_fbc *fbc) 1479 { 1480 lockdep_assert_held(&fbc->lock); 1481 1482 fbc->flip_pending = false; 1483 fbc->busy_bits = 0; 1484 1485 intel_fbc_activate(fbc); 1486 } 1487 1488 void intel_fbc_post_update(struct intel_atomic_state *state, 1489 struct intel_crtc *crtc) 1490 { 1491 const struct intel_plane_state __maybe_unused *plane_state; 1492 struct intel_plane *plane; 1493 int i; 1494 1495 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1496 struct intel_fbc *fbc = plane->fbc; 1497 1498 if (!fbc || plane->pipe != crtc->pipe) 1499 continue; 1500 1501 mutex_lock(&fbc->lock); 1502 1503 if (fbc->state.plane == plane) 1504 __intel_fbc_post_update(fbc); 1505 1506 mutex_unlock(&fbc->lock); 1507 } 1508 } 1509 1510 static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) 1511 { 1512 if (fbc->state.plane) 1513 return fbc->state.plane->frontbuffer_bit; 1514 else 1515 return 0; 1516 } 1517 1518 static void __intel_fbc_invalidate(struct intel_fbc *fbc, 1519 unsigned int frontbuffer_bits, 1520 enum fb_op_origin origin) 1521 { 1522 if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE) 1523 return; 1524 1525 mutex_lock(&fbc->lock); 1526 1527 frontbuffer_bits &= intel_fbc_get_frontbuffer_bit(fbc); 1528 if (!frontbuffer_bits) 1529 goto out; 1530 1531 fbc->busy_bits |= frontbuffer_bits; 1532 intel_fbc_deactivate(fbc, "frontbuffer write"); 1533 1534 out: 1535 mutex_unlock(&fbc->lock); 1536 } 1537 1538 void intel_fbc_invalidate(struct drm_i915_private *i915, 1539 unsigned int frontbuffer_bits, 1540 enum fb_op_origin origin) 1541 { 1542 struct intel_fbc *fbc; 1543 enum intel_fbc_id fbc_id; 1544 1545 for_each_intel_fbc(i915, fbc, fbc_id) 1546 __intel_fbc_invalidate(fbc, frontbuffer_bits, origin); 1547 1548 } 1549 1550 static void __intel_fbc_flush(struct intel_fbc *fbc, 1551 unsigned int frontbuffer_bits, 1552 enum fb_op_origin origin) 1553 { 1554 mutex_lock(&fbc->lock); 1555 1556 frontbuffer_bits &= intel_fbc_get_frontbuffer_bit(fbc); 1557 if (!frontbuffer_bits) 1558 goto out; 1559 1560 fbc->busy_bits &= ~frontbuffer_bits; 1561 1562 if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE) 1563 goto out; 1564 1565 if (fbc->busy_bits || fbc->flip_pending) 1566 goto out; 1567 1568 if (fbc->active) 1569 intel_fbc_nuke(fbc); 1570 else 1571 intel_fbc_activate(fbc); 1572 1573 out: 1574 mutex_unlock(&fbc->lock); 1575 } 1576 1577 void intel_fbc_flush(struct drm_i915_private *i915, 1578 unsigned int frontbuffer_bits, 1579 enum fb_op_origin origin) 1580 { 1581 struct intel_fbc *fbc; 1582 enum intel_fbc_id fbc_id; 1583 1584 for_each_intel_fbc(i915, fbc, fbc_id) 1585 __intel_fbc_flush(fbc, frontbuffer_bits, origin); 1586 } 1587 1588 int intel_fbc_atomic_check(struct intel_atomic_state *state) 1589 { 1590 struct intel_plane_state __maybe_unused *plane_state; 1591 struct intel_plane *plane; 1592 int i; 1593 1594 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1595 int ret; 1596 1597 ret = intel_fbc_check_plane(state, plane); 1598 if (ret) 1599 return ret; 1600 } 1601 1602 return 0; 1603 } 1604 1605 static void __intel_fbc_enable(struct intel_atomic_state *state, 1606 struct intel_crtc *crtc, 1607 struct intel_plane *plane) 1608 { 1609 struct drm_i915_private *i915 = to_i915(state->base.dev); 1610 const struct intel_plane_state *plane_state = 1611 intel_atomic_get_new_plane_state(state, plane); 1612 struct intel_fbc *fbc = plane->fbc; 1613 1614 lockdep_assert_held(&fbc->lock); 1615 1616 if (fbc->state.plane) { 1617 if (fbc->state.plane != plane) 1618 return; 1619 1620 if (intel_fbc_is_ok(plane_state)) { 1621 intel_fbc_update_state(state, crtc, plane); 1622 return; 1623 } 1624 1625 __intel_fbc_disable(fbc); 1626 } 1627 1628 drm_WARN_ON(&i915->drm, fbc->active); 1629 1630 fbc->no_fbc_reason = plane_state->no_fbc_reason; 1631 if (fbc->no_fbc_reason) 1632 return; 1633 1634 if (!intel_fbc_is_fence_ok(plane_state)) { 1635 fbc->no_fbc_reason = "framebuffer not fenced"; 1636 return; 1637 } 1638 1639 if (fbc->underrun_detected) { 1640 fbc->no_fbc_reason = "FIFO underrun"; 1641 return; 1642 } 1643 1644 if (intel_fbc_alloc_cfb(fbc, intel_fbc_cfb_size(plane_state), 1645 intel_fbc_min_limit(plane_state))) { 1646 fbc->no_fbc_reason = "not enough stolen memory"; 1647 return; 1648 } 1649 1650 drm_dbg_kms(&i915->drm, "Enabling FBC on [PLANE:%d:%s]\n", 1651 plane->base.base.id, plane->base.name); 1652 fbc->no_fbc_reason = "FBC enabled but not active yet\n"; 1653 1654 intel_fbc_update_state(state, crtc, plane); 1655 1656 intel_fbc_program_workarounds(fbc); 1657 intel_fbc_program_cfb(fbc); 1658 } 1659 1660 /** 1661 * intel_fbc_disable - disable FBC if it's associated with crtc 1662 * @crtc: the CRTC 1663 * 1664 * This function disables FBC if it's associated with the provided CRTC. 1665 */ 1666 void intel_fbc_disable(struct intel_crtc *crtc) 1667 { 1668 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 1669 struct intel_plane *plane; 1670 1671 for_each_intel_plane(&i915->drm, plane) { 1672 struct intel_fbc *fbc = plane->fbc; 1673 1674 if (!fbc || plane->pipe != crtc->pipe) 1675 continue; 1676 1677 mutex_lock(&fbc->lock); 1678 if (fbc->state.plane == plane) 1679 __intel_fbc_disable(fbc); 1680 mutex_unlock(&fbc->lock); 1681 } 1682 } 1683 1684 void intel_fbc_update(struct intel_atomic_state *state, 1685 struct intel_crtc *crtc) 1686 { 1687 const struct intel_crtc_state *crtc_state = 1688 intel_atomic_get_new_crtc_state(state, crtc); 1689 const struct intel_plane_state *plane_state; 1690 struct intel_plane *plane; 1691 int i; 1692 1693 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 1694 struct intel_fbc *fbc = plane->fbc; 1695 1696 if (!fbc || plane->pipe != crtc->pipe) 1697 continue; 1698 1699 mutex_lock(&fbc->lock); 1700 1701 if (intel_crtc_needs_fastset(crtc_state) && 1702 plane_state->no_fbc_reason) { 1703 if (fbc->state.plane == plane) 1704 __intel_fbc_disable(fbc); 1705 } else { 1706 __intel_fbc_enable(state, crtc, plane); 1707 } 1708 1709 mutex_unlock(&fbc->lock); 1710 } 1711 } 1712 1713 static void intel_fbc_underrun_work_fn(struct work_struct *work) 1714 { 1715 struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work); 1716 struct drm_i915_private *i915 = fbc->i915; 1717 1718 mutex_lock(&fbc->lock); 1719 1720 /* Maybe we were scheduled twice. */ 1721 if (fbc->underrun_detected || !fbc->state.plane) 1722 goto out; 1723 1724 drm_dbg_kms(&i915->drm, "Disabling FBC due to FIFO underrun.\n"); 1725 fbc->underrun_detected = true; 1726 1727 intel_fbc_deactivate(fbc, "FIFO underrun"); 1728 if (!fbc->flip_pending) 1729 intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(i915, fbc->state.plane->pipe)); 1730 __intel_fbc_disable(fbc); 1731 out: 1732 mutex_unlock(&fbc->lock); 1733 } 1734 1735 static void __intel_fbc_reset_underrun(struct intel_fbc *fbc) 1736 { 1737 struct drm_i915_private *i915 = fbc->i915; 1738 1739 cancel_work_sync(&fbc->underrun_work); 1740 1741 mutex_lock(&fbc->lock); 1742 1743 if (fbc->underrun_detected) { 1744 drm_dbg_kms(&i915->drm, 1745 "Re-allowing FBC after fifo underrun\n"); 1746 fbc->no_fbc_reason = "FIFO underrun cleared"; 1747 } 1748 1749 fbc->underrun_detected = false; 1750 mutex_unlock(&fbc->lock); 1751 } 1752 1753 /* 1754 * intel_fbc_reset_underrun - reset FBC fifo underrun status. 1755 * @i915: the i915 device 1756 * 1757 * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we 1758 * want to re-enable FBC after an underrun to increase test coverage. 1759 */ 1760 void intel_fbc_reset_underrun(struct drm_i915_private *i915) 1761 { 1762 struct intel_fbc *fbc; 1763 enum intel_fbc_id fbc_id; 1764 1765 for_each_intel_fbc(i915, fbc, fbc_id) 1766 __intel_fbc_reset_underrun(fbc); 1767 } 1768 1769 static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc) 1770 { 1771 /* 1772 * There's no guarantee that underrun_detected won't be set to true 1773 * right after this check and before the work is scheduled, but that's 1774 * not a problem since we'll check it again under the work function 1775 * while FBC is locked. This check here is just to prevent us from 1776 * unnecessarily scheduling the work, and it relies on the fact that we 1777 * never switch underrun_detect back to false after it's true. 1778 */ 1779 if (READ_ONCE(fbc->underrun_detected)) 1780 return; 1781 1782 queue_work(fbc->i915->unordered_wq, &fbc->underrun_work); 1783 } 1784 1785 /** 1786 * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun 1787 * @i915: i915 device 1788 * 1789 * Without FBC, most underruns are harmless and don't really cause too many 1790 * problems, except for an annoying message on dmesg. With FBC, underruns can 1791 * become black screens or even worse, especially when paired with bad 1792 * watermarks. So in order for us to be on the safe side, completely disable FBC 1793 * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe 1794 * already suggests that watermarks may be bad, so try to be as safe as 1795 * possible. 1796 * 1797 * This function is called from the IRQ handler. 1798 */ 1799 void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915) 1800 { 1801 struct intel_fbc *fbc; 1802 enum intel_fbc_id fbc_id; 1803 1804 for_each_intel_fbc(i915, fbc, fbc_id) 1805 __intel_fbc_handle_fifo_underrun_irq(fbc); 1806 } 1807 1808 /* 1809 * The DDX driver changes its behavior depending on the value it reads from 1810 * i915.enable_fbc, so sanitize it by translating the default value into either 1811 * 0 or 1 in order to allow it to know what's going on. 1812 * 1813 * Notice that this is done at driver initialization and we still allow user 1814 * space to change the value during runtime without sanitizing it again. IGT 1815 * relies on being able to change i915.enable_fbc at runtime. 1816 */ 1817 static int intel_sanitize_fbc_option(struct drm_i915_private *i915) 1818 { 1819 if (i915->display.params.enable_fbc >= 0) 1820 return !!i915->display.params.enable_fbc; 1821 1822 if (!HAS_FBC(i915)) 1823 return 0; 1824 1825 if (IS_BROADWELL(i915) || DISPLAY_VER(i915) >= 9) 1826 return 1; 1827 1828 return 0; 1829 } 1830 1831 void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane) 1832 { 1833 plane->fbc = fbc; 1834 } 1835 1836 static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915, 1837 enum intel_fbc_id fbc_id) 1838 { 1839 struct intel_fbc *fbc; 1840 1841 fbc = kzalloc(sizeof(*fbc), GFP_KERNEL); 1842 if (!fbc) 1843 return NULL; 1844 1845 fbc->id = fbc_id; 1846 fbc->i915 = i915; 1847 INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); 1848 mutex_init(&fbc->lock); 1849 1850 if (DISPLAY_VER(i915) >= 7) 1851 fbc->funcs = &ivb_fbc_funcs; 1852 else if (DISPLAY_VER(i915) == 6) 1853 fbc->funcs = &snb_fbc_funcs; 1854 else if (DISPLAY_VER(i915) == 5) 1855 fbc->funcs = &ilk_fbc_funcs; 1856 else if (IS_G4X(i915)) 1857 fbc->funcs = &g4x_fbc_funcs; 1858 else if (DISPLAY_VER(i915) == 4) 1859 fbc->funcs = &i965_fbc_funcs; 1860 else 1861 fbc->funcs = &i8xx_fbc_funcs; 1862 1863 return fbc; 1864 } 1865 1866 /** 1867 * intel_fbc_init - Initialize FBC 1868 * @i915: the i915 device 1869 * 1870 * This function might be called during PM init process. 1871 */ 1872 void intel_fbc_init(struct drm_i915_private *i915) 1873 { 1874 enum intel_fbc_id fbc_id; 1875 1876 i915->display.params.enable_fbc = intel_sanitize_fbc_option(i915); 1877 drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n", 1878 i915->display.params.enable_fbc); 1879 1880 for_each_fbc_id(i915, fbc_id) 1881 i915->display.fbc[fbc_id] = intel_fbc_create(i915, fbc_id); 1882 } 1883 1884 /** 1885 * intel_fbc_sanitize - Sanitize FBC 1886 * @i915: the i915 device 1887 * 1888 * Make sure FBC is initially disabled since we have no 1889 * idea eg. into which parts of stolen it might be scribbling 1890 * into. 1891 */ 1892 void intel_fbc_sanitize(struct drm_i915_private *i915) 1893 { 1894 struct intel_fbc *fbc; 1895 enum intel_fbc_id fbc_id; 1896 1897 for_each_intel_fbc(i915, fbc, fbc_id) { 1898 if (intel_fbc_hw_is_active(fbc)) 1899 intel_fbc_hw_deactivate(fbc); 1900 } 1901 } 1902 1903 static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused) 1904 { 1905 struct intel_fbc *fbc = m->private; 1906 struct drm_i915_private *i915 = fbc->i915; 1907 struct intel_plane *plane; 1908 intel_wakeref_t wakeref; 1909 1910 drm_modeset_lock_all(&i915->drm); 1911 1912 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 1913 mutex_lock(&fbc->lock); 1914 1915 if (fbc->active) { 1916 seq_puts(m, "FBC enabled\n"); 1917 seq_printf(m, "Compressing: %s\n", 1918 str_yes_no(intel_fbc_is_compressing(fbc))); 1919 } else { 1920 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); 1921 } 1922 1923 for_each_intel_plane(&i915->drm, plane) { 1924 const struct intel_plane_state *plane_state = 1925 to_intel_plane_state(plane->base.state); 1926 1927 if (plane->fbc != fbc) 1928 continue; 1929 1930 seq_printf(m, "%c [PLANE:%d:%s]: %s\n", 1931 fbc->state.plane == plane ? '*' : ' ', 1932 plane->base.base.id, plane->base.name, 1933 plane_state->no_fbc_reason ?: "FBC possible"); 1934 } 1935 1936 mutex_unlock(&fbc->lock); 1937 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 1938 1939 drm_modeset_unlock_all(&i915->drm); 1940 1941 return 0; 1942 } 1943 1944 DEFINE_SHOW_ATTRIBUTE(intel_fbc_debugfs_status); 1945 1946 static int intel_fbc_debugfs_false_color_get(void *data, u64 *val) 1947 { 1948 struct intel_fbc *fbc = data; 1949 1950 *val = fbc->false_color; 1951 1952 return 0; 1953 } 1954 1955 static int intel_fbc_debugfs_false_color_set(void *data, u64 val) 1956 { 1957 struct intel_fbc *fbc = data; 1958 1959 mutex_lock(&fbc->lock); 1960 1961 fbc->false_color = val; 1962 1963 if (fbc->active) 1964 fbc->funcs->set_false_color(fbc, fbc->false_color); 1965 1966 mutex_unlock(&fbc->lock); 1967 1968 return 0; 1969 } 1970 1971 DEFINE_DEBUGFS_ATTRIBUTE(intel_fbc_debugfs_false_color_fops, 1972 intel_fbc_debugfs_false_color_get, 1973 intel_fbc_debugfs_false_color_set, 1974 "%llu\n"); 1975 1976 static void intel_fbc_debugfs_add(struct intel_fbc *fbc, 1977 struct dentry *parent) 1978 { 1979 debugfs_create_file("i915_fbc_status", 0444, parent, 1980 fbc, &intel_fbc_debugfs_status_fops); 1981 1982 if (fbc->funcs->set_false_color) 1983 debugfs_create_file_unsafe("i915_fbc_false_color", 0644, parent, 1984 fbc, &intel_fbc_debugfs_false_color_fops); 1985 } 1986 1987 void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc) 1988 { 1989 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 1990 1991 if (plane->fbc) 1992 intel_fbc_debugfs_add(plane->fbc, crtc->base.debugfs_entry); 1993 } 1994 1995 /* FIXME: remove this once igt is on board with per-crtc stuff */ 1996 void intel_fbc_debugfs_register(struct drm_i915_private *i915) 1997 { 1998 struct drm_minor *minor = i915->drm.primary; 1999 struct intel_fbc *fbc; 2000 2001 fbc = i915->display.fbc[INTEL_FBC_A]; 2002 if (fbc) 2003 intel_fbc_debugfs_add(fbc, minor->debugfs_root); 2004 } 2005