1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 /** 25 * DOC: atomic plane helpers 26 * 27 * The functions here are used by the atomic plane helper functions to 28 * implement legacy plane updates (i.e., drm_plane->update_plane() and 29 * drm_plane->disable_plane()). This allows plane updates to use the 30 * atomic state infrastructure and perform plane updates as separate 31 * prepare/check/commit/cleanup steps. 32 */ 33 34 #include <linux/dma-fence-chain.h> 35 #include <linux/dma-resv.h> 36 37 #include <drm/drm_atomic_helper.h> 38 #include <drm/drm_blend.h> 39 #include <drm/drm_fourcc.h> 40 #include <drm/drm_gem.h> 41 #include <drm/drm_gem_atomic_helper.h> 42 43 #include "i915_config.h" 44 #include "i9xx_plane_regs.h" 45 #include "intel_atomic_plane.h" 46 #include "intel_cdclk.h" 47 #include "intel_cursor.h" 48 #include "intel_display_rps.h" 49 #include "intel_display_trace.h" 50 #include "intel_display_types.h" 51 #include "intel_fb.h" 52 #include "intel_fb_pin.h" 53 #include "skl_scaler.h" 54 #include "skl_watermark.h" 55 56 static void intel_plane_state_reset(struct intel_plane_state *plane_state, 57 struct intel_plane *plane) 58 { 59 memset(plane_state, 0, sizeof(*plane_state)); 60 61 __drm_atomic_helper_plane_state_reset(&plane_state->uapi, &plane->base); 62 63 plane_state->scaler_id = -1; 64 } 65 66 struct intel_plane *intel_plane_alloc(void) 67 { 68 struct intel_plane_state *plane_state; 69 struct intel_plane *plane; 70 71 plane = kzalloc(sizeof(*plane), GFP_KERNEL); 72 if (!plane) 73 return ERR_PTR(-ENOMEM); 74 75 plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL); 76 if (!plane_state) { 77 kfree(plane); 78 return ERR_PTR(-ENOMEM); 79 } 80 81 intel_plane_state_reset(plane_state, plane); 82 83 plane->base.state = &plane_state->uapi; 84 85 return plane; 86 } 87 88 void intel_plane_free(struct intel_plane *plane) 89 { 90 intel_plane_destroy_state(&plane->base, plane->base.state); 91 kfree(plane); 92 } 93 94 /** 95 * intel_plane_duplicate_state - duplicate plane state 96 * @plane: drm plane 97 * 98 * Allocates and returns a copy of the plane state (both common and 99 * Intel-specific) for the specified plane. 100 * 101 * Returns: The newly allocated plane state, or NULL on failure. 102 */ 103 struct drm_plane_state * 104 intel_plane_duplicate_state(struct drm_plane *plane) 105 { 106 struct intel_plane_state *intel_state; 107 108 intel_state = to_intel_plane_state(plane->state); 109 intel_state = kmemdup(intel_state, sizeof(*intel_state), GFP_KERNEL); 110 111 if (!intel_state) 112 return NULL; 113 114 __drm_atomic_helper_plane_duplicate_state(plane, &intel_state->uapi); 115 116 intel_state->ggtt_vma = NULL; 117 intel_state->dpt_vma = NULL; 118 intel_state->flags = 0; 119 120 /* add reference to fb */ 121 if (intel_state->hw.fb) 122 drm_framebuffer_get(intel_state->hw.fb); 123 124 return &intel_state->uapi; 125 } 126 127 /** 128 * intel_plane_destroy_state - destroy plane state 129 * @plane: drm plane 130 * @state: state object to destroy 131 * 132 * Destroys the plane state (both common and Intel-specific) for the 133 * specified plane. 134 */ 135 void 136 intel_plane_destroy_state(struct drm_plane *plane, 137 struct drm_plane_state *state) 138 { 139 struct intel_plane_state *plane_state = to_intel_plane_state(state); 140 141 drm_WARN_ON(plane->dev, plane_state->ggtt_vma); 142 drm_WARN_ON(plane->dev, plane_state->dpt_vma); 143 144 __drm_atomic_helper_plane_destroy_state(&plane_state->uapi); 145 if (plane_state->hw.fb) 146 drm_framebuffer_put(plane_state->hw.fb); 147 kfree(plane_state); 148 } 149 150 bool intel_plane_needs_physical(struct intel_plane *plane) 151 { 152 struct drm_i915_private *i915 = to_i915(plane->base.dev); 153 154 return plane->id == PLANE_CURSOR && 155 DISPLAY_INFO(i915)->cursor_needs_physical; 156 } 157 158 unsigned int intel_adjusted_rate(const struct drm_rect *src, 159 const struct drm_rect *dst, 160 unsigned int rate) 161 { 162 unsigned int src_w, src_h, dst_w, dst_h; 163 164 src_w = drm_rect_width(src) >> 16; 165 src_h = drm_rect_height(src) >> 16; 166 dst_w = drm_rect_width(dst); 167 dst_h = drm_rect_height(dst); 168 169 /* Downscaling limits the maximum pixel rate */ 170 dst_w = min(src_w, dst_w); 171 dst_h = min(src_h, dst_h); 172 173 return DIV_ROUND_UP_ULL(mul_u32_u32(rate, src_w * src_h), 174 dst_w * dst_h); 175 } 176 177 unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state, 178 const struct intel_plane_state *plane_state) 179 { 180 /* 181 * Note we don't check for plane visibility here as 182 * we want to use this when calculating the cursor 183 * watermarks even if the cursor is fully offscreen. 184 * That depends on the src/dst rectangles being 185 * correctly populated whenever the watermark code 186 * considers the cursor to be visible, whether or not 187 * it is actually visible. 188 * 189 * See: intel_wm_plane_visible() and intel_check_cursor() 190 */ 191 192 return intel_adjusted_rate(&plane_state->uapi.src, 193 &plane_state->uapi.dst, 194 crtc_state->pixel_rate); 195 } 196 197 unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state, 198 const struct intel_plane_state *plane_state, 199 int color_plane) 200 { 201 const struct drm_framebuffer *fb = plane_state->hw.fb; 202 203 if (!plane_state->uapi.visible) 204 return 0; 205 206 return intel_plane_pixel_rate(crtc_state, plane_state) * 207 fb->format->cpp[color_plane]; 208 } 209 210 static bool 211 use_min_ddb(const struct intel_crtc_state *crtc_state, 212 struct intel_plane *plane) 213 { 214 struct drm_i915_private *i915 = to_i915(plane->base.dev); 215 216 return DISPLAY_VER(i915) >= 13 && 217 crtc_state->uapi.async_flip && 218 plane->async_flip; 219 } 220 221 static unsigned int 222 intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, 223 const struct intel_plane_state *plane_state, 224 int color_plane) 225 { 226 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 227 const struct drm_framebuffer *fb = plane_state->hw.fb; 228 int width, height; 229 unsigned int rel_data_rate; 230 231 if (plane->id == PLANE_CURSOR) 232 return 0; 233 234 if (!plane_state->uapi.visible) 235 return 0; 236 237 /* 238 * We calculate extra ddb based on ratio plane rate/total data rate 239 * in case, in some cases we should not allocate extra ddb for the plane, 240 * so do not count its data rate, if this is the case. 241 */ 242 if (use_min_ddb(crtc_state, plane)) 243 return 0; 244 245 /* 246 * Src coordinates are already rotated by 270 degrees for 247 * the 90/270 degree plane rotation cases (to match the 248 * GTT mapping), hence no need to account for rotation here. 249 */ 250 width = drm_rect_width(&plane_state->uapi.src) >> 16; 251 height = drm_rect_height(&plane_state->uapi.src) >> 16; 252 253 /* UV plane does 1/2 pixel sub-sampling */ 254 if (color_plane == 1) { 255 width /= 2; 256 height /= 2; 257 } 258 259 rel_data_rate = width * height * fb->format->cpp[color_plane]; 260 261 return intel_adjusted_rate(&plane_state->uapi.src, 262 &plane_state->uapi.dst, 263 rel_data_rate); 264 } 265 266 int intel_plane_calc_min_cdclk(struct intel_atomic_state *state, 267 struct intel_plane *plane, 268 bool *need_cdclk_calc) 269 { 270 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 271 const struct intel_plane_state *plane_state = 272 intel_atomic_get_new_plane_state(state, plane); 273 struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); 274 const struct intel_cdclk_state *cdclk_state; 275 const struct intel_crtc_state *old_crtc_state; 276 struct intel_crtc_state *new_crtc_state; 277 278 if (!plane_state->uapi.visible || !plane->min_cdclk) 279 return 0; 280 281 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 282 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 283 284 new_crtc_state->min_cdclk[plane->id] = 285 plane->min_cdclk(new_crtc_state, plane_state); 286 287 /* 288 * No need to check against the cdclk state if 289 * the min cdclk for the plane doesn't increase. 290 * 291 * Ie. we only ever increase the cdclk due to plane 292 * requirements. This can reduce back and forth 293 * display blinking due to constant cdclk changes. 294 */ 295 if (new_crtc_state->min_cdclk[plane->id] <= 296 old_crtc_state->min_cdclk[plane->id]) 297 return 0; 298 299 cdclk_state = intel_atomic_get_cdclk_state(state); 300 if (IS_ERR(cdclk_state)) 301 return PTR_ERR(cdclk_state); 302 303 /* 304 * No need to recalculate the cdclk state if 305 * the min cdclk for the pipe doesn't increase. 306 * 307 * Ie. we only ever increase the cdclk due to plane 308 * requirements. This can reduce back and forth 309 * display blinking due to constant cdclk changes. 310 */ 311 if (new_crtc_state->min_cdclk[plane->id] <= 312 cdclk_state->min_cdclk[crtc->pipe]) 313 return 0; 314 315 drm_dbg_kms(&dev_priv->drm, 316 "[PLANE:%d:%s] min cdclk (%d kHz) > [CRTC:%d:%s] min cdclk (%d kHz)\n", 317 plane->base.base.id, plane->base.name, 318 new_crtc_state->min_cdclk[plane->id], 319 crtc->base.base.id, crtc->base.name, 320 cdclk_state->min_cdclk[crtc->pipe]); 321 *need_cdclk_calc = true; 322 323 return 0; 324 } 325 326 static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state) 327 { 328 if (plane_state->hw.fb) 329 drm_framebuffer_put(plane_state->hw.fb); 330 331 memset(&plane_state->hw, 0, sizeof(plane_state->hw)); 332 } 333 334 void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state, 335 const struct intel_plane_state *from_plane_state, 336 struct intel_crtc *crtc) 337 { 338 intel_plane_clear_hw_state(plane_state); 339 340 /* 341 * For the joiner secondary uapi.crtc will point at 342 * the primary crtc. So we explicitly assign the right 343 * secondary crtc to hw.crtc. uapi.crtc!=NULL simply 344 * indicates the plane is logically enabled on the uapi level. 345 */ 346 plane_state->hw.crtc = from_plane_state->uapi.crtc ? &crtc->base : NULL; 347 348 plane_state->hw.fb = from_plane_state->uapi.fb; 349 if (plane_state->hw.fb) 350 drm_framebuffer_get(plane_state->hw.fb); 351 352 plane_state->hw.alpha = from_plane_state->uapi.alpha; 353 plane_state->hw.pixel_blend_mode = 354 from_plane_state->uapi.pixel_blend_mode; 355 plane_state->hw.rotation = from_plane_state->uapi.rotation; 356 plane_state->hw.color_encoding = from_plane_state->uapi.color_encoding; 357 plane_state->hw.color_range = from_plane_state->uapi.color_range; 358 plane_state->hw.scaling_filter = from_plane_state->uapi.scaling_filter; 359 360 plane_state->uapi.src = drm_plane_state_src(&from_plane_state->uapi); 361 plane_state->uapi.dst = drm_plane_state_dest(&from_plane_state->uapi); 362 } 363 364 void intel_plane_copy_hw_state(struct intel_plane_state *plane_state, 365 const struct intel_plane_state *from_plane_state) 366 { 367 intel_plane_clear_hw_state(plane_state); 368 369 memcpy(&plane_state->hw, &from_plane_state->hw, 370 sizeof(plane_state->hw)); 371 372 if (plane_state->hw.fb) 373 drm_framebuffer_get(plane_state->hw.fb); 374 } 375 376 void intel_plane_set_invisible(struct intel_crtc_state *crtc_state, 377 struct intel_plane_state *plane_state) 378 { 379 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 380 381 crtc_state->active_planes &= ~BIT(plane->id); 382 crtc_state->scaled_planes &= ~BIT(plane->id); 383 crtc_state->nv12_planes &= ~BIT(plane->id); 384 crtc_state->c8_planes &= ~BIT(plane->id); 385 crtc_state->async_flip_planes &= ~BIT(plane->id); 386 crtc_state->data_rate[plane->id] = 0; 387 crtc_state->data_rate_y[plane->id] = 0; 388 crtc_state->rel_data_rate[plane->id] = 0; 389 crtc_state->rel_data_rate_y[plane->id] = 0; 390 crtc_state->min_cdclk[plane->id] = 0; 391 392 plane_state->uapi.visible = false; 393 } 394 395 static bool intel_plane_is_scaled(const struct intel_plane_state *plane_state) 396 { 397 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 398 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 399 int dst_w = drm_rect_width(&plane_state->uapi.dst); 400 int dst_h = drm_rect_height(&plane_state->uapi.dst); 401 402 return src_w != dst_w || src_h != dst_h; 403 } 404 405 static bool intel_plane_do_async_flip(struct intel_plane *plane, 406 const struct intel_crtc_state *old_crtc_state, 407 const struct intel_crtc_state *new_crtc_state) 408 { 409 struct drm_i915_private *i915 = to_i915(plane->base.dev); 410 411 if (!plane->async_flip) 412 return false; 413 414 if (!new_crtc_state->uapi.async_flip) 415 return false; 416 417 /* 418 * In platforms after DISPLAY13, we might need to override 419 * first async flip in order to change watermark levels 420 * as part of optimization. 421 * 422 * And let's do this for all skl+ so that we can eg. change the 423 * modifier as well. 424 * 425 * TODO: For older platforms there is less reason to do this as 426 * only X-tile is supported with async flips, though we could 427 * extend this so other scanout parameters (stride/etc) could 428 * be changed as well... 429 */ 430 return DISPLAY_VER(i915) < 9 || old_crtc_state->uapi.async_flip; 431 } 432 433 static bool i9xx_must_disable_cxsr(const struct intel_crtc_state *new_crtc_state, 434 const struct intel_plane_state *old_plane_state, 435 const struct intel_plane_state *new_plane_state) 436 { 437 struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane); 438 bool old_visible = old_plane_state->uapi.visible; 439 bool new_visible = new_plane_state->uapi.visible; 440 u32 old_ctl = old_plane_state->ctl; 441 u32 new_ctl = new_plane_state->ctl; 442 bool modeset, turn_on, turn_off; 443 444 if (plane->id == PLANE_CURSOR) 445 return false; 446 447 modeset = intel_crtc_needs_modeset(new_crtc_state); 448 turn_off = old_visible && (!new_visible || modeset); 449 turn_on = new_visible && (!old_visible || modeset); 450 451 /* Must disable CxSR around plane enable/disable */ 452 if (turn_on || turn_off) 453 return true; 454 455 if (!old_visible || !new_visible) 456 return false; 457 458 /* 459 * Most plane control register updates are blocked while in CxSR. 460 * 461 * Tiling mode is one exception where the primary plane can 462 * apparently handle it, whereas the sprites can not (the 463 * sprite issue being only relevant on VLV/CHV where CxSR 464 * is actually possible with a sprite enabled). 465 */ 466 if (plane->id == PLANE_PRIMARY) { 467 old_ctl &= ~DISP_TILED; 468 new_ctl &= ~DISP_TILED; 469 } 470 471 return old_ctl != new_ctl; 472 } 473 474 static bool ilk_must_disable_cxsr(const struct intel_crtc_state *new_crtc_state, 475 const struct intel_plane_state *old_plane_state, 476 const struct intel_plane_state *new_plane_state) 477 { 478 struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane); 479 bool old_visible = old_plane_state->uapi.visible; 480 bool new_visible = new_plane_state->uapi.visible; 481 bool modeset, turn_on; 482 483 if (plane->id == PLANE_CURSOR) 484 return false; 485 486 modeset = intel_crtc_needs_modeset(new_crtc_state); 487 turn_on = new_visible && (!old_visible || modeset); 488 489 /* 490 * ILK/SNB DVSACNTR/Sprite Enable 491 * IVB SPR_CTL/Sprite Enable 492 * "When in Self Refresh Big FIFO mode, a write to enable the 493 * plane will be internally buffered and delayed while Big FIFO 494 * mode is exiting." 495 * 496 * Which means that enabling the sprite can take an extra frame 497 * when we start in big FIFO mode (LP1+). Thus we need to drop 498 * down to LP0 and wait for vblank in order to make sure the 499 * sprite gets enabled on the next vblank after the register write. 500 * Doing otherwise would risk enabling the sprite one frame after 501 * we've already signalled flip completion. We can resume LP1+ 502 * once the sprite has been enabled. 503 * 504 * With experimental results seems this is needed also for primary 505 * plane, not only sprite plane. 506 */ 507 if (turn_on) 508 return true; 509 510 /* 511 * WaCxSRDisabledForSpriteScaling:ivb 512 * IVB SPR_SCALE/Scaling Enable 513 * "Low Power watermarks must be disabled for at least one 514 * frame before enabling sprite scaling, and kept disabled 515 * until sprite scaling is disabled." 516 * 517 * ILK/SNB DVSASCALE/Scaling Enable 518 * "When in Self Refresh Big FIFO mode, scaling enable will be 519 * masked off while Big FIFO mode is exiting." 520 * 521 * Despite the w/a only being listed for IVB we assume that 522 * the ILK/SNB note has similar ramifications, hence we apply 523 * the w/a on all three platforms. 524 */ 525 return !intel_plane_is_scaled(old_plane_state) && 526 intel_plane_is_scaled(new_plane_state); 527 } 528 529 static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, 530 struct intel_crtc_state *new_crtc_state, 531 const struct intel_plane_state *old_plane_state, 532 struct intel_plane_state *new_plane_state) 533 { 534 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 535 struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane); 536 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 537 bool mode_changed = intel_crtc_needs_modeset(new_crtc_state); 538 bool was_crtc_enabled = old_crtc_state->hw.active; 539 bool is_crtc_enabled = new_crtc_state->hw.active; 540 bool turn_off, turn_on, visible, was_visible; 541 int ret; 542 543 if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { 544 ret = skl_update_scaler_plane(new_crtc_state, new_plane_state); 545 if (ret) 546 return ret; 547 } 548 549 was_visible = old_plane_state->uapi.visible; 550 visible = new_plane_state->uapi.visible; 551 552 if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible)) 553 was_visible = false; 554 555 /* 556 * Visibility is calculated as if the crtc was on, but 557 * after scaler setup everything depends on it being off 558 * when the crtc isn't active. 559 * 560 * FIXME this is wrong for watermarks. Watermarks should also 561 * be computed as if the pipe would be active. Perhaps move 562 * per-plane wm computation to the .check_plane() hook, and 563 * only combine the results from all planes in the current place? 564 */ 565 if (!is_crtc_enabled) { 566 intel_plane_set_invisible(new_crtc_state, new_plane_state); 567 visible = false; 568 } 569 570 if (!was_visible && !visible) 571 return 0; 572 573 turn_off = was_visible && (!visible || mode_changed); 574 turn_on = visible && (!was_visible || mode_changed); 575 576 drm_dbg_atomic(&dev_priv->drm, 577 "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", 578 crtc->base.base.id, crtc->base.name, 579 plane->base.base.id, plane->base.name, 580 was_visible, visible, 581 turn_off, turn_on, mode_changed); 582 583 if (visible || was_visible) 584 new_crtc_state->fb_bits |= plane->frontbuffer_bit; 585 586 if (HAS_GMCH(dev_priv) && 587 i9xx_must_disable_cxsr(new_crtc_state, old_plane_state, new_plane_state)) 588 new_crtc_state->disable_cxsr = true; 589 590 if ((IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) && 591 ilk_must_disable_cxsr(new_crtc_state, old_plane_state, new_plane_state)) 592 new_crtc_state->disable_cxsr = true; 593 594 if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state)) { 595 new_crtc_state->do_async_flip = true; 596 new_crtc_state->async_flip_planes |= BIT(plane->id); 597 } else if (plane->need_async_flip_toggle_wa && 598 new_crtc_state->uapi.async_flip) { 599 /* 600 * On platforms with double buffered async flip bit we 601 * set the bit already one frame early during the sync 602 * flip (see {i9xx,skl}_plane_update_arm()). The 603 * hardware will therefore be ready to perform a real 604 * async flip during the next commit, without having 605 * to wait yet another frame for the bit to latch. 606 */ 607 new_crtc_state->async_flip_planes |= BIT(plane->id); 608 } 609 610 return 0; 611 } 612 613 int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, 614 struct intel_crtc_state *new_crtc_state, 615 const struct intel_plane_state *old_plane_state, 616 struct intel_plane_state *new_plane_state) 617 { 618 struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane); 619 const struct drm_framebuffer *fb = new_plane_state->hw.fb; 620 int ret; 621 622 intel_plane_set_invisible(new_crtc_state, new_plane_state); 623 new_crtc_state->enabled_planes &= ~BIT(plane->id); 624 625 if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc) 626 return 0; 627 628 ret = plane->check_plane(new_crtc_state, new_plane_state); 629 if (ret) 630 return ret; 631 632 if (fb) 633 new_crtc_state->enabled_planes |= BIT(plane->id); 634 635 /* FIXME pre-g4x don't work like this */ 636 if (new_plane_state->uapi.visible) 637 new_crtc_state->active_planes |= BIT(plane->id); 638 639 if (new_plane_state->uapi.visible && 640 intel_plane_is_scaled(new_plane_state)) 641 new_crtc_state->scaled_planes |= BIT(plane->id); 642 643 if (new_plane_state->uapi.visible && 644 intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) 645 new_crtc_state->nv12_planes |= BIT(plane->id); 646 647 if (new_plane_state->uapi.visible && 648 fb->format->format == DRM_FORMAT_C8) 649 new_crtc_state->c8_planes |= BIT(plane->id); 650 651 if (new_plane_state->uapi.visible || old_plane_state->uapi.visible) 652 new_crtc_state->update_planes |= BIT(plane->id); 653 654 if (new_plane_state->uapi.visible && 655 intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) { 656 new_crtc_state->data_rate_y[plane->id] = 657 intel_plane_data_rate(new_crtc_state, new_plane_state, 0); 658 new_crtc_state->data_rate[plane->id] = 659 intel_plane_data_rate(new_crtc_state, new_plane_state, 1); 660 661 new_crtc_state->rel_data_rate_y[plane->id] = 662 intel_plane_relative_data_rate(new_crtc_state, 663 new_plane_state, 0); 664 new_crtc_state->rel_data_rate[plane->id] = 665 intel_plane_relative_data_rate(new_crtc_state, 666 new_plane_state, 1); 667 } else if (new_plane_state->uapi.visible) { 668 new_crtc_state->data_rate[plane->id] = 669 intel_plane_data_rate(new_crtc_state, new_plane_state, 0); 670 671 new_crtc_state->rel_data_rate[plane->id] = 672 intel_plane_relative_data_rate(new_crtc_state, 673 new_plane_state, 0); 674 } 675 676 return intel_plane_atomic_calc_changes(old_crtc_state, new_crtc_state, 677 old_plane_state, new_plane_state); 678 } 679 680 static struct intel_plane * 681 intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id) 682 { 683 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 684 struct intel_plane *plane; 685 686 for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { 687 if (plane->id == plane_id) 688 return plane; 689 } 690 691 return NULL; 692 } 693 694 int intel_plane_atomic_check(struct intel_atomic_state *state, 695 struct intel_plane *plane) 696 { 697 struct intel_display *display = to_intel_display(state); 698 struct intel_plane_state *new_plane_state = 699 intel_atomic_get_new_plane_state(state, plane); 700 const struct intel_plane_state *old_plane_state = 701 intel_atomic_get_old_plane_state(state, plane); 702 const struct intel_plane_state *new_primary_crtc_plane_state; 703 struct intel_crtc *crtc = intel_crtc_for_pipe(display, plane->pipe); 704 const struct intel_crtc_state *old_crtc_state = 705 intel_atomic_get_old_crtc_state(state, crtc); 706 struct intel_crtc_state *new_crtc_state = 707 intel_atomic_get_new_crtc_state(state, crtc); 708 709 if (new_crtc_state && intel_crtc_is_joiner_secondary(new_crtc_state)) { 710 struct intel_crtc *primary_crtc = 711 intel_primary_crtc(new_crtc_state); 712 struct intel_plane *primary_crtc_plane = 713 intel_crtc_get_plane(primary_crtc, plane->id); 714 715 new_primary_crtc_plane_state = 716 intel_atomic_get_new_plane_state(state, primary_crtc_plane); 717 } else { 718 new_primary_crtc_plane_state = new_plane_state; 719 } 720 721 intel_plane_copy_uapi_to_hw_state(new_plane_state, 722 new_primary_crtc_plane_state, 723 crtc); 724 725 new_plane_state->uapi.visible = false; 726 if (!new_crtc_state) 727 return 0; 728 729 return intel_plane_atomic_check_with_state(old_crtc_state, 730 new_crtc_state, 731 old_plane_state, 732 new_plane_state); 733 } 734 735 static struct intel_plane * 736 skl_next_plane_to_commit(struct intel_atomic_state *state, 737 struct intel_crtc *crtc, 738 struct skl_ddb_entry ddb[I915_MAX_PLANES], 739 struct skl_ddb_entry ddb_y[I915_MAX_PLANES], 740 unsigned int *update_mask) 741 { 742 struct intel_crtc_state *crtc_state = 743 intel_atomic_get_new_crtc_state(state, crtc); 744 struct intel_plane_state __maybe_unused *plane_state; 745 struct intel_plane *plane; 746 int i; 747 748 if (*update_mask == 0) 749 return NULL; 750 751 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 752 enum plane_id plane_id = plane->id; 753 754 if (crtc->pipe != plane->pipe || 755 !(*update_mask & BIT(plane_id))) 756 continue; 757 758 if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb[plane_id], 759 ddb, I915_MAX_PLANES, plane_id) || 760 skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id], 761 ddb_y, I915_MAX_PLANES, plane_id)) 762 continue; 763 764 *update_mask &= ~BIT(plane_id); 765 ddb[plane_id] = crtc_state->wm.skl.plane_ddb[plane_id]; 766 ddb_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id]; 767 768 return plane; 769 } 770 771 /* should never happen */ 772 drm_WARN_ON(state->base.dev, 1); 773 774 return NULL; 775 } 776 777 void intel_plane_update_noarm(struct intel_dsb *dsb, 778 struct intel_plane *plane, 779 const struct intel_crtc_state *crtc_state, 780 const struct intel_plane_state *plane_state) 781 { 782 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 783 784 trace_intel_plane_update_noarm(plane, crtc); 785 786 if (plane->update_noarm) 787 plane->update_noarm(dsb, plane, crtc_state, plane_state); 788 } 789 790 void intel_plane_async_flip(struct intel_dsb *dsb, 791 struct intel_plane *plane, 792 const struct intel_crtc_state *crtc_state, 793 const struct intel_plane_state *plane_state, 794 bool async_flip) 795 { 796 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 797 798 trace_intel_plane_async_flip(plane, crtc, async_flip); 799 plane->async_flip(dsb, plane, crtc_state, plane_state, async_flip); 800 } 801 802 void intel_plane_update_arm(struct intel_dsb *dsb, 803 struct intel_plane *plane, 804 const struct intel_crtc_state *crtc_state, 805 const struct intel_plane_state *plane_state) 806 { 807 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 808 809 if (crtc_state->do_async_flip && plane->async_flip) { 810 intel_plane_async_flip(dsb, plane, crtc_state, plane_state, true); 811 return; 812 } 813 814 trace_intel_plane_update_arm(plane, crtc); 815 plane->update_arm(dsb, plane, crtc_state, plane_state); 816 } 817 818 void intel_plane_disable_arm(struct intel_dsb *dsb, 819 struct intel_plane *plane, 820 const struct intel_crtc_state *crtc_state) 821 { 822 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 823 824 trace_intel_plane_disable_arm(plane, crtc); 825 plane->disable_arm(dsb, plane, crtc_state); 826 } 827 828 void intel_crtc_planes_update_noarm(struct intel_dsb *dsb, 829 struct intel_atomic_state *state, 830 struct intel_crtc *crtc) 831 { 832 struct intel_crtc_state *new_crtc_state = 833 intel_atomic_get_new_crtc_state(state, crtc); 834 u32 update_mask = new_crtc_state->update_planes; 835 struct intel_plane_state *new_plane_state; 836 struct intel_plane *plane; 837 int i; 838 839 if (new_crtc_state->do_async_flip) 840 return; 841 842 /* 843 * Since we only write non-arming registers here, 844 * the order does not matter even for skl+. 845 */ 846 for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) { 847 if (crtc->pipe != plane->pipe || 848 !(update_mask & BIT(plane->id))) 849 continue; 850 851 /* TODO: for mailbox updates this should be skipped */ 852 if (new_plane_state->uapi.visible || 853 new_plane_state->planar_slave) 854 intel_plane_update_noarm(dsb, plane, 855 new_crtc_state, new_plane_state); 856 } 857 } 858 859 static void skl_crtc_planes_update_arm(struct intel_dsb *dsb, 860 struct intel_atomic_state *state, 861 struct intel_crtc *crtc) 862 { 863 struct intel_crtc_state *old_crtc_state = 864 intel_atomic_get_old_crtc_state(state, crtc); 865 struct intel_crtc_state *new_crtc_state = 866 intel_atomic_get_new_crtc_state(state, crtc); 867 struct skl_ddb_entry ddb[I915_MAX_PLANES]; 868 struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; 869 u32 update_mask = new_crtc_state->update_planes; 870 struct intel_plane *plane; 871 872 memcpy(ddb, old_crtc_state->wm.skl.plane_ddb, 873 sizeof(old_crtc_state->wm.skl.plane_ddb)); 874 memcpy(ddb_y, old_crtc_state->wm.skl.plane_ddb_y, 875 sizeof(old_crtc_state->wm.skl.plane_ddb_y)); 876 877 while ((plane = skl_next_plane_to_commit(state, crtc, ddb, ddb_y, &update_mask))) { 878 struct intel_plane_state *new_plane_state = 879 intel_atomic_get_new_plane_state(state, plane); 880 881 /* 882 * TODO: for mailbox updates intel_plane_update_noarm() 883 * would have to be called here as well. 884 */ 885 if (new_plane_state->uapi.visible || 886 new_plane_state->planar_slave) 887 intel_plane_update_arm(dsb, plane, new_crtc_state, new_plane_state); 888 else 889 intel_plane_disable_arm(dsb, plane, new_crtc_state); 890 } 891 } 892 893 static void i9xx_crtc_planes_update_arm(struct intel_dsb *dsb, 894 struct intel_atomic_state *state, 895 struct intel_crtc *crtc) 896 { 897 struct intel_crtc_state *new_crtc_state = 898 intel_atomic_get_new_crtc_state(state, crtc); 899 u32 update_mask = new_crtc_state->update_planes; 900 struct intel_plane_state *new_plane_state; 901 struct intel_plane *plane; 902 int i; 903 904 for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) { 905 if (crtc->pipe != plane->pipe || 906 !(update_mask & BIT(plane->id))) 907 continue; 908 909 /* 910 * TODO: for mailbox updates intel_plane_update_noarm() 911 * would have to be called here as well. 912 */ 913 if (new_plane_state->uapi.visible) 914 intel_plane_update_arm(dsb, plane, new_crtc_state, new_plane_state); 915 else 916 intel_plane_disable_arm(dsb, plane, new_crtc_state); 917 } 918 } 919 920 void intel_crtc_planes_update_arm(struct intel_dsb *dsb, 921 struct intel_atomic_state *state, 922 struct intel_crtc *crtc) 923 { 924 struct drm_i915_private *i915 = to_i915(state->base.dev); 925 926 if (DISPLAY_VER(i915) >= 9) 927 skl_crtc_planes_update_arm(dsb, state, crtc); 928 else 929 i9xx_crtc_planes_update_arm(dsb, state, crtc); 930 } 931 932 int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state, 933 struct intel_crtc_state *crtc_state, 934 int min_scale, int max_scale, 935 bool can_position) 936 { 937 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 938 struct drm_framebuffer *fb = plane_state->hw.fb; 939 struct drm_rect *src = &plane_state->uapi.src; 940 struct drm_rect *dst = &plane_state->uapi.dst; 941 const struct drm_rect *clip = &crtc_state->pipe_src; 942 unsigned int rotation = plane_state->hw.rotation; 943 int hscale, vscale; 944 945 if (!fb) { 946 plane_state->uapi.visible = false; 947 return 0; 948 } 949 950 drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation); 951 952 /* Check scaling */ 953 hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale); 954 vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale); 955 if (hscale < 0 || vscale < 0) { 956 drm_dbg_kms(&i915->drm, "Invalid scaling of plane\n"); 957 drm_rect_debug_print("src: ", src, true); 958 drm_rect_debug_print("dst: ", dst, false); 959 return -ERANGE; 960 } 961 962 /* 963 * FIXME: This might need further adjustment for seamless scaling 964 * with phase information, for the 2p2 and 2p1 scenarios. 965 */ 966 plane_state->uapi.visible = drm_rect_clip_scaled(src, dst, clip); 967 968 drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation); 969 970 if (!can_position && plane_state->uapi.visible && 971 !drm_rect_equals(dst, clip)) { 972 drm_dbg_kms(&i915->drm, "Plane must cover entire CRTC\n"); 973 drm_rect_debug_print("dst: ", dst, false); 974 drm_rect_debug_print("clip: ", clip, false); 975 return -EINVAL; 976 } 977 978 /* final plane coordinates will be relative to the plane's pipe */ 979 drm_rect_translate(dst, -clip->x1, -clip->y1); 980 981 return 0; 982 } 983 984 int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) 985 { 986 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 987 const struct drm_framebuffer *fb = plane_state->hw.fb; 988 struct drm_rect *src = &plane_state->uapi.src; 989 u32 src_x, src_y, src_w, src_h, hsub, vsub; 990 bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation); 991 992 /* 993 * FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS 994 * abuses hsub/vsub so we can't use them here. But as they 995 * are limited to 32bpp RGB formats we don't actually need 996 * to check anything. 997 */ 998 if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || 999 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) 1000 return 0; 1001 1002 /* 1003 * Hardware doesn't handle subpixel coordinates. 1004 * Adjust to (macro)pixel boundary, but be careful not to 1005 * increase the source viewport size, because that could 1006 * push the downscaling factor out of bounds. 1007 */ 1008 src_x = src->x1 >> 16; 1009 src_w = drm_rect_width(src) >> 16; 1010 src_y = src->y1 >> 16; 1011 src_h = drm_rect_height(src) >> 16; 1012 1013 drm_rect_init(src, src_x << 16, src_y << 16, 1014 src_w << 16, src_h << 16); 1015 1016 if (fb->format->format == DRM_FORMAT_RGB565 && rotated) { 1017 hsub = 2; 1018 vsub = 2; 1019 } else if (DISPLAY_VER(i915) >= 20 && 1020 intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) { 1021 /* 1022 * This allows NV12 and P0xx formats to have odd size and/or odd 1023 * source coordinates on DISPLAY_VER(i915) >= 20 1024 */ 1025 hsub = 1; 1026 vsub = 1; 1027 1028 /* Wa_16023981245 */ 1029 if ((DISPLAY_VERx100(i915) == 2000 || 1030 DISPLAY_VERx100(i915) == 3000) && 1031 src_x % 2 != 0) 1032 hsub = 2; 1033 } else { 1034 hsub = fb->format->hsub; 1035 vsub = fb->format->vsub; 1036 } 1037 1038 if (rotated) 1039 hsub = vsub = max(hsub, vsub); 1040 1041 if (src_x % hsub || src_w % hsub) { 1042 drm_dbg_kms(&i915->drm, "src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n", 1043 src_x, src_w, hsub, str_yes_no(rotated)); 1044 return -EINVAL; 1045 } 1046 1047 if (src_y % vsub || src_h % vsub) { 1048 drm_dbg_kms(&i915->drm, "src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n", 1049 src_y, src_h, vsub, str_yes_no(rotated)); 1050 return -EINVAL; 1051 } 1052 1053 return 0; 1054 } 1055 1056 static int add_dma_resv_fences(struct dma_resv *resv, 1057 struct drm_plane_state *new_plane_state) 1058 { 1059 struct dma_fence *fence = dma_fence_get(new_plane_state->fence); 1060 struct dma_fence *new; 1061 int ret; 1062 1063 ret = dma_resv_get_singleton(resv, dma_resv_usage_rw(false), &new); 1064 if (ret) 1065 goto error; 1066 1067 if (new && fence) { 1068 struct dma_fence_chain *chain = dma_fence_chain_alloc(); 1069 1070 if (!chain) { 1071 ret = -ENOMEM; 1072 goto error; 1073 } 1074 1075 dma_fence_chain_init(chain, fence, new, 1); 1076 fence = &chain->base; 1077 1078 } else if (new) { 1079 fence = new; 1080 } 1081 1082 dma_fence_put(new_plane_state->fence); 1083 new_plane_state->fence = fence; 1084 return 0; 1085 1086 error: 1087 dma_fence_put(fence); 1088 return ret; 1089 } 1090 1091 /** 1092 * intel_prepare_plane_fb - Prepare fb for usage on plane 1093 * @_plane: drm plane to prepare for 1094 * @_new_plane_state: the plane state being prepared 1095 * 1096 * Prepares a framebuffer for usage on a display plane. Generally this 1097 * involves pinning the underlying object and updating the frontbuffer tracking 1098 * bits. Some older platforms need special physical address handling for 1099 * cursor planes. 1100 * 1101 * Returns 0 on success, negative error code on failure. 1102 */ 1103 static int 1104 intel_prepare_plane_fb(struct drm_plane *_plane, 1105 struct drm_plane_state *_new_plane_state) 1106 { 1107 struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY }; 1108 struct intel_plane *plane = to_intel_plane(_plane); 1109 struct intel_plane_state *new_plane_state = 1110 to_intel_plane_state(_new_plane_state); 1111 struct intel_atomic_state *state = 1112 to_intel_atomic_state(new_plane_state->uapi.state); 1113 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1114 struct intel_plane_state *old_plane_state = 1115 intel_atomic_get_old_plane_state(state, plane); 1116 struct drm_gem_object *obj = intel_fb_bo(new_plane_state->hw.fb); 1117 struct drm_gem_object *old_obj = intel_fb_bo(old_plane_state->hw.fb); 1118 int ret; 1119 1120 if (old_obj) { 1121 const struct intel_crtc_state *new_crtc_state = 1122 intel_atomic_get_new_crtc_state(state, 1123 to_intel_crtc(old_plane_state->hw.crtc)); 1124 1125 /* Big Hammer, we also need to ensure that any pending 1126 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 1127 * current scanout is retired before unpinning the old 1128 * framebuffer. Note that we rely on userspace rendering 1129 * into the buffer attached to the pipe they are waiting 1130 * on. If not, userspace generates a GPU hang with IPEHR 1131 * point to the MI_WAIT_FOR_EVENT. 1132 * 1133 * This should only fail upon a hung GPU, in which case we 1134 * can safely continue. 1135 */ 1136 if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) { 1137 ret = add_dma_resv_fences(old_obj->resv, 1138 &new_plane_state->uapi); 1139 if (ret < 0) 1140 return ret; 1141 } 1142 } 1143 1144 if (!obj) 1145 return 0; 1146 1147 ret = intel_plane_pin_fb(new_plane_state); 1148 if (ret) 1149 return ret; 1150 1151 ret = drm_gem_plane_helper_prepare_fb(&plane->base, &new_plane_state->uapi); 1152 if (ret < 0) 1153 goto unpin_fb; 1154 1155 if (new_plane_state->uapi.fence) { 1156 i915_gem_fence_wait_priority(new_plane_state->uapi.fence, 1157 &attr); 1158 1159 intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc, 1160 new_plane_state->uapi.fence); 1161 } 1162 1163 /* 1164 * We declare pageflips to be interactive and so merit a small bias 1165 * towards upclocking to deliver the frame on time. By only changing 1166 * the RPS thresholds to sample more regularly and aim for higher 1167 * clocks we can hopefully deliver low power workloads (like kodi) 1168 * that are not quite steady state without resorting to forcing 1169 * maximum clocks following a vblank miss (see do_rps_boost()). 1170 */ 1171 intel_display_rps_mark_interactive(dev_priv, state, true); 1172 1173 return 0; 1174 1175 unpin_fb: 1176 intel_plane_unpin_fb(new_plane_state); 1177 1178 return ret; 1179 } 1180 1181 /** 1182 * intel_cleanup_plane_fb - Cleans up an fb after plane use 1183 * @plane: drm plane to clean up for 1184 * @_old_plane_state: the state from the previous modeset 1185 * 1186 * Cleans up a framebuffer that has just been removed from a plane. 1187 */ 1188 static void 1189 intel_cleanup_plane_fb(struct drm_plane *plane, 1190 struct drm_plane_state *_old_plane_state) 1191 { 1192 struct intel_plane_state *old_plane_state = 1193 to_intel_plane_state(_old_plane_state); 1194 struct intel_atomic_state *state = 1195 to_intel_atomic_state(old_plane_state->uapi.state); 1196 struct drm_i915_private *dev_priv = to_i915(plane->dev); 1197 struct drm_gem_object *obj = intel_fb_bo(old_plane_state->hw.fb); 1198 1199 if (!obj) 1200 return; 1201 1202 intel_display_rps_mark_interactive(dev_priv, state, false); 1203 1204 intel_plane_unpin_fb(old_plane_state); 1205 } 1206 1207 static const struct drm_plane_helper_funcs intel_plane_helper_funcs = { 1208 .prepare_fb = intel_prepare_plane_fb, 1209 .cleanup_fb = intel_cleanup_plane_fb, 1210 }; 1211 1212 void intel_plane_helper_add(struct intel_plane *plane) 1213 { 1214 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); 1215 } 1216 1217 void intel_plane_init_cursor_vblank_work(struct intel_plane_state *old_plane_state, 1218 struct intel_plane_state *new_plane_state) 1219 { 1220 if (!old_plane_state->ggtt_vma || 1221 old_plane_state->ggtt_vma == new_plane_state->ggtt_vma) 1222 return; 1223 1224 drm_vblank_work_init(&old_plane_state->unpin_work, old_plane_state->uapi.crtc, 1225 intel_cursor_unpin_work); 1226 } 1227