1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "i915_reg.h" 8 #include "intel_de.h" 9 #include "intel_display_types.h" 10 #include "intel_fb.h" 11 #include "skl_scaler.h" 12 #include "skl_universal_plane.h" 13 14 /* 15 * The hardware phase 0.0 refers to the center of the pixel. 16 * We want to start from the top/left edge which is phase 17 * -0.5. That matches how the hardware calculates the scaling 18 * factors (from top-left of the first pixel to bottom-right 19 * of the last pixel, as opposed to the pixel centers). 20 * 21 * For 4:2:0 subsampled chroma planes we obviously have to 22 * adjust that so that the chroma sample position lands in 23 * the right spot. 24 * 25 * Note that for packed YCbCr 4:2:2 formats there is no way to 26 * control chroma siting. The hardware simply replicates the 27 * chroma samples for both of the luma samples, and thus we don't 28 * actually get the expected MPEG2 chroma siting convention :( 29 * The same behaviour is observed on pre-SKL platforms as well. 30 * 31 * Theory behind the formula (note that we ignore sub-pixel 32 * source coordinates): 33 * s = source sample position 34 * d = destination sample position 35 * 36 * Downscaling 4:1: 37 * -0.5 38 * | 0.0 39 * | | 1.5 (initial phase) 40 * | | | 41 * v v v 42 * | s | s | s | s | 43 * | d | 44 * 45 * Upscaling 1:4: 46 * -0.5 47 * | -0.375 (initial phase) 48 * | | 0.0 49 * | | | 50 * v v v 51 * | s | 52 * | d | d | d | d | 53 */ 54 static u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited) 55 { 56 int phase = -0x8000; 57 u16 trip = 0; 58 59 if (chroma_cosited) 60 phase += (sub - 1) * 0x8000 / sub; 61 62 phase += scale / (2 * sub); 63 64 /* 65 * Hardware initial phase limited to [-0.5:1.5]. 66 * Since the max hardware scale factor is 3.0, we 67 * should never actually excdeed 1.0 here. 68 */ 69 WARN_ON(phase < -0x8000 || phase > 0x18000); 70 71 if (phase < 0) 72 phase = 0x10000 + phase; 73 else 74 trip = PS_PHASE_TRIP; 75 76 return ((phase >> 2) & PS_PHASE_MASK) | trip; 77 } 78 79 #define SKL_MIN_SRC_W 8 80 #define SKL_MAX_SRC_W 4096 81 #define SKL_MIN_SRC_H 8 82 #define SKL_MAX_SRC_H 4096 83 #define SKL_MIN_DST_W 8 84 #define SKL_MAX_DST_W 4096 85 #define SKL_MIN_DST_H 8 86 #define SKL_MAX_DST_H 4096 87 #define ICL_MAX_SRC_W 5120 88 #define ICL_MAX_SRC_H 4096 89 #define ICL_MAX_DST_W 5120 90 #define ICL_MAX_DST_H 4096 91 #define TGL_MAX_SRC_W 5120 92 #define TGL_MAX_SRC_H 8192 93 #define TGL_MAX_DST_W 8192 94 #define TGL_MAX_DST_H 8192 95 #define MTL_MAX_SRC_W 4096 96 #define MTL_MAX_SRC_H 8192 97 #define MTL_MAX_DST_W 8192 98 #define MTL_MAX_DST_H 8192 99 #define SKL_MIN_YUV_420_SRC_W 16 100 #define SKL_MIN_YUV_420_SRC_H 16 101 102 static int 103 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 104 unsigned int scaler_user, int *scaler_id, 105 int src_w, int src_h, int dst_w, int dst_h, 106 const struct drm_format_info *format, 107 u64 modifier, bool need_scaler) 108 { 109 struct intel_display *display = to_intel_display(crtc_state); 110 struct intel_crtc_scaler_state *scaler_state = 111 &crtc_state->scaler_state; 112 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 113 const struct drm_display_mode *adjusted_mode = 114 &crtc_state->hw.adjusted_mode; 115 int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); 116 int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); 117 int min_src_w, min_src_h, min_dst_w, min_dst_h; 118 int max_src_w, max_src_h, max_dst_w, max_dst_h; 119 120 /* 121 * Src coordinates are already rotated by 270 degrees for 122 * the 90/270 degree plane rotation cases (to match the 123 * GTT mapping), hence no need to account for rotation here. 124 */ 125 if (src_w != dst_w || src_h != dst_h) 126 need_scaler = true; 127 128 /* 129 * Scaling/fitting not supported in IF-ID mode in GEN9+ 130 * TODO: Interlace fetch mode doesn't support YUV420 planar formats. 131 * Once NV12 is enabled, handle it here while allocating scaler 132 * for NV12. 133 */ 134 if (DISPLAY_VER(display) >= 9 && crtc_state->hw.enable && 135 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 136 drm_dbg_kms(display->drm, 137 "Pipe/Plane scaling not supported with IF-ID mode\n"); 138 return -EINVAL; 139 } 140 141 /* 142 * if plane is being disabled or scaler is no more required or force detach 143 * - free scaler binded to this plane/crtc 144 * - in order to do this, update crtc->scaler_usage 145 * 146 * Here scaler state in crtc_state is set free so that 147 * scaler can be assigned to other user. Actual register 148 * update to free the scaler is done in plane/panel-fit programming. 149 * For this purpose crtc/plane_state->scaler_id isn't reset here. 150 */ 151 if (force_detach || !need_scaler) { 152 if (*scaler_id >= 0) { 153 scaler_state->scaler_users &= ~(1 << scaler_user); 154 scaler_state->scalers[*scaler_id].in_use = false; 155 156 drm_dbg_kms(display->drm, 157 "scaler_user index %u.%u: " 158 "Staged freeing scaler id %d scaler_users = 0x%x\n", 159 crtc->pipe, scaler_user, *scaler_id, 160 scaler_state->scaler_users); 161 *scaler_id = -1; 162 } 163 return 0; 164 } 165 166 if (format && intel_format_info_is_yuv_semiplanar(format, modifier) && 167 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { 168 drm_dbg_kms(display->drm, 169 "Planar YUV: src dimensions not met\n"); 170 return -EINVAL; 171 } 172 173 min_src_w = SKL_MIN_SRC_W; 174 min_src_h = SKL_MIN_SRC_H; 175 min_dst_w = SKL_MIN_DST_W; 176 min_dst_h = SKL_MIN_DST_H; 177 178 if (DISPLAY_VER(display) < 11) { 179 max_src_w = SKL_MAX_SRC_W; 180 max_src_h = SKL_MAX_SRC_H; 181 max_dst_w = SKL_MAX_DST_W; 182 max_dst_h = SKL_MAX_DST_H; 183 } else if (DISPLAY_VER(display) < 12) { 184 max_src_w = ICL_MAX_SRC_W; 185 max_src_h = ICL_MAX_SRC_H; 186 max_dst_w = ICL_MAX_DST_W; 187 max_dst_h = ICL_MAX_DST_H; 188 } else if (DISPLAY_VER(display) < 14) { 189 max_src_w = TGL_MAX_SRC_W; 190 max_src_h = TGL_MAX_SRC_H; 191 max_dst_w = TGL_MAX_DST_W; 192 max_dst_h = TGL_MAX_DST_H; 193 } else { 194 max_src_w = MTL_MAX_SRC_W; 195 max_src_h = MTL_MAX_SRC_H; 196 max_dst_w = MTL_MAX_DST_W; 197 max_dst_h = MTL_MAX_DST_H; 198 } 199 200 /* range checks */ 201 if (src_w < min_src_w || src_h < min_src_h || 202 dst_w < min_dst_w || dst_h < min_dst_h || 203 src_w > max_src_w || src_h > max_src_h || 204 dst_w > max_dst_w || dst_h > max_dst_h) { 205 drm_dbg_kms(display->drm, 206 "scaler_user index %u.%u: src %ux%u dst %ux%u " 207 "size is out of scaler range\n", 208 crtc->pipe, scaler_user, src_w, src_h, 209 dst_w, dst_h); 210 return -EINVAL; 211 } 212 213 /* 214 * The pipe scaler does not use all the bits of PIPESRC, at least 215 * on the earlier platforms. So even when we're scaling a plane 216 * the *pipe* source size must not be too large. For simplicity 217 * we assume the limits match the scaler destination size limits. 218 * Might not be 100% accurate on all platforms, but good enough for 219 * now. 220 */ 221 if (pipe_src_w > max_dst_w || pipe_src_h > max_dst_h) { 222 drm_dbg_kms(display->drm, 223 "scaler_user index %u.%u: pipe src size %ux%u " 224 "is out of scaler range\n", 225 crtc->pipe, scaler_user, pipe_src_w, pipe_src_h); 226 return -EINVAL; 227 } 228 229 /* mark this plane as a scaler user in crtc_state */ 230 scaler_state->scaler_users |= (1 << scaler_user); 231 drm_dbg_kms(display->drm, "scaler_user index %u.%u: " 232 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", 233 crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, 234 scaler_state->scaler_users); 235 236 return 0; 237 } 238 239 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state) 240 { 241 const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 242 int width, height; 243 244 if (crtc_state->pch_pfit.enabled) { 245 width = drm_rect_width(&crtc_state->pch_pfit.dst); 246 height = drm_rect_height(&crtc_state->pch_pfit.dst); 247 } else { 248 width = pipe_mode->crtc_hdisplay; 249 height = pipe_mode->crtc_vdisplay; 250 } 251 return skl_update_scaler(crtc_state, !crtc_state->hw.active, 252 SKL_CRTC_INDEX, 253 &crtc_state->scaler_state.scaler_id, 254 drm_rect_width(&crtc_state->pipe_src), 255 drm_rect_height(&crtc_state->pipe_src), 256 width, height, NULL, 0, 257 crtc_state->pch_pfit.enabled); 258 } 259 260 /** 261 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 262 * @crtc_state: crtc's scaler state 263 * @plane_state: atomic plane state to update 264 * 265 * Return 266 * 0 - scaler_usage updated successfully 267 * error - requested scaling cannot be supported or other error condition 268 */ 269 int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 270 struct intel_plane_state *plane_state) 271 { 272 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 273 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 274 struct drm_framebuffer *fb = plane_state->hw.fb; 275 bool force_detach = !fb || !plane_state->uapi.visible; 276 bool need_scaler = false; 277 278 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ 279 if (!icl_is_hdr_plane(dev_priv, plane->id) && 280 fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) 281 need_scaler = true; 282 283 return skl_update_scaler(crtc_state, force_detach, 284 drm_plane_index(&plane->base), 285 &plane_state->scaler_id, 286 drm_rect_width(&plane_state->uapi.src) >> 16, 287 drm_rect_height(&plane_state->uapi.src) >> 16, 288 drm_rect_width(&plane_state->uapi.dst), 289 drm_rect_height(&plane_state->uapi.dst), 290 fb ? fb->format : NULL, 291 fb ? fb->modifier : 0, 292 need_scaler); 293 } 294 295 static int intel_allocate_scaler(struct intel_crtc_scaler_state *scaler_state, 296 struct intel_crtc *crtc) 297 { 298 int i; 299 300 for (i = 0; i < crtc->num_scalers; i++) { 301 if (scaler_state->scalers[i].in_use) 302 continue; 303 304 scaler_state->scalers[i].in_use = true; 305 306 return i; 307 } 308 309 return -1; 310 } 311 312 static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state, 313 int num_scalers_need, struct intel_crtc *crtc, 314 const char *name, int idx, 315 struct intel_plane_state *plane_state, 316 int *scaler_id) 317 { 318 struct intel_display *display = to_intel_display(crtc); 319 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 320 u32 mode; 321 322 if (*scaler_id < 0) 323 *scaler_id = intel_allocate_scaler(scaler_state, crtc); 324 325 if (drm_WARN(display->drm, *scaler_id < 0, 326 "Cannot find scaler for %s:%d\n", name, idx)) 327 return -EINVAL; 328 329 /* set scaler mode */ 330 if (plane_state && plane_state->hw.fb && 331 plane_state->hw.fb->format->is_yuv && 332 plane_state->hw.fb->format->num_planes > 1) { 333 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 334 335 if (DISPLAY_VER(display) == 9) { 336 mode = SKL_PS_SCALER_MODE_NV12; 337 } else if (icl_is_hdr_plane(dev_priv, plane->id)) { 338 /* 339 * On gen11+'s HDR planes we only use the scaler for 340 * scaling. They have a dedicated chroma upsampler, so 341 * we don't need the scaler to upsample the UV plane. 342 */ 343 mode = PS_SCALER_MODE_NORMAL; 344 } else { 345 struct intel_plane *linked = 346 plane_state->planar_linked_plane; 347 348 mode = PS_SCALER_MODE_PLANAR; 349 350 if (linked) 351 mode |= PS_BINDING_Y_PLANE(linked->id); 352 } 353 } else if (DISPLAY_VER(display) >= 10) { 354 mode = PS_SCALER_MODE_NORMAL; 355 } else if (num_scalers_need == 1 && crtc->num_scalers > 1) { 356 /* 357 * when only 1 scaler is in use on a pipe with 2 scalers 358 * scaler 0 operates in high quality (HQ) mode. 359 * In this case use scaler 0 to take advantage of HQ mode 360 */ 361 scaler_state->scalers[*scaler_id].in_use = false; 362 *scaler_id = 0; 363 scaler_state->scalers[0].in_use = true; 364 mode = SKL_PS_SCALER_MODE_HQ; 365 } else { 366 mode = SKL_PS_SCALER_MODE_DYN; 367 } 368 369 /* 370 * FIXME: we should also check the scaler factors for pfit, so 371 * this shouldn't be tied directly to planes. 372 */ 373 if (plane_state && plane_state->hw.fb) { 374 const struct drm_framebuffer *fb = plane_state->hw.fb; 375 const struct drm_rect *src = &plane_state->uapi.src; 376 const struct drm_rect *dst = &plane_state->uapi.dst; 377 int hscale, vscale, max_vscale, max_hscale; 378 379 /* 380 * FIXME: When two scalers are needed, but only one of 381 * them needs to downscale, we should make sure that 382 * the one that needs downscaling support is assigned 383 * as the first scaler, so we don't reject downscaling 384 * unnecessarily. 385 */ 386 387 if (DISPLAY_VER(display) >= 14) { 388 /* 389 * On versions 14 and up, only the first 390 * scaler supports a vertical scaling factor 391 * of more than 1.0, while a horizontal 392 * scaling factor of 3.0 is supported. 393 */ 394 max_hscale = 0x30000 - 1; 395 if (*scaler_id == 0) 396 max_vscale = 0x30000 - 1; 397 else 398 max_vscale = 0x10000; 399 400 } else if (DISPLAY_VER(display) >= 10 || 401 !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) { 402 max_hscale = 0x30000 - 1; 403 max_vscale = 0x30000 - 1; 404 } else { 405 max_hscale = 0x20000 - 1; 406 max_vscale = 0x20000 - 1; 407 } 408 409 /* 410 * FIXME: We should change the if-else block above to 411 * support HQ vs dynamic scaler properly. 412 */ 413 414 /* Check if required scaling is within limits */ 415 hscale = drm_rect_calc_hscale(src, dst, 1, max_hscale); 416 vscale = drm_rect_calc_vscale(src, dst, 1, max_vscale); 417 418 if (hscale < 0 || vscale < 0) { 419 drm_dbg_kms(display->drm, 420 "Scaler %d doesn't support required plane scaling\n", 421 *scaler_id); 422 drm_rect_debug_print("src: ", src, true); 423 drm_rect_debug_print("dst: ", dst, false); 424 425 return -EINVAL; 426 } 427 } 428 429 drm_dbg_kms(display->drm, "Attached scaler id %u.%u to %s:%d\n", 430 crtc->pipe, *scaler_id, name, idx); 431 scaler_state->scalers[*scaler_id].mode = mode; 432 433 return 0; 434 } 435 436 static int setup_crtc_scaler(struct intel_atomic_state *state, 437 struct intel_crtc *crtc) 438 { 439 struct intel_crtc_state *crtc_state = 440 intel_atomic_get_new_crtc_state(state, crtc); 441 struct intel_crtc_scaler_state *scaler_state = 442 &crtc_state->scaler_state; 443 444 return intel_atomic_setup_scaler(scaler_state, 445 hweight32(scaler_state->scaler_users), 446 crtc, "CRTC", crtc->base.base.id, 447 NULL, &scaler_state->scaler_id); 448 } 449 450 static int setup_plane_scaler(struct intel_atomic_state *state, 451 struct intel_crtc *crtc, 452 struct intel_plane *plane) 453 { 454 struct intel_display *display = to_intel_display(state); 455 struct intel_crtc_state *crtc_state = 456 intel_atomic_get_new_crtc_state(state, crtc); 457 struct intel_crtc_scaler_state *scaler_state = 458 &crtc_state->scaler_state; 459 struct intel_plane_state *plane_state; 460 461 /* plane on different crtc cannot be a scaler user of this crtc */ 462 if (drm_WARN_ON(display->drm, plane->pipe != crtc->pipe)) 463 return 0; 464 465 plane_state = intel_atomic_get_new_plane_state(state, plane); 466 467 /* 468 * GLK+ scalers don't have a HQ mode so it 469 * isn't necessary to change between HQ and dyn mode 470 * on those platforms. 471 */ 472 if (!plane_state && DISPLAY_VER(display) >= 10) 473 return 0; 474 475 plane_state = intel_atomic_get_plane_state(state, plane); 476 if (IS_ERR(plane_state)) 477 return PTR_ERR(plane_state); 478 479 return intel_atomic_setup_scaler(scaler_state, 480 hweight32(scaler_state->scaler_users), 481 crtc, "PLANE", plane->base.base.id, 482 plane_state, &plane_state->scaler_id); 483 } 484 485 /** 486 * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests 487 * @state: atomic state 488 * @crtc: crtc 489 * 490 * This function sets up scalers based on staged scaling requests for 491 * a @crtc and its planes. It is called from crtc level check path. If request 492 * is a supportable request, it attaches scalers to requested planes and crtc. 493 * 494 * This function takes into account the current scaler(s) in use by any planes 495 * not being part of this atomic state 496 * 497 * Returns: 498 * 0 - scalers were setup successfully 499 * error code - otherwise 500 */ 501 int intel_atomic_setup_scalers(struct intel_atomic_state *state, 502 struct intel_crtc *crtc) 503 { 504 struct intel_display *display = to_intel_display(crtc); 505 struct intel_crtc_state *crtc_state = 506 intel_atomic_get_new_crtc_state(state, crtc); 507 struct intel_crtc_scaler_state *scaler_state = 508 &crtc_state->scaler_state; 509 int num_scalers_need; 510 int i; 511 512 num_scalers_need = hweight32(scaler_state->scaler_users); 513 514 /* 515 * High level flow: 516 * - staged scaler requests are already in scaler_state->scaler_users 517 * - check whether staged scaling requests can be supported 518 * - add planes using scalers that aren't in current transaction 519 * - assign scalers to requested users 520 * - as part of plane commit, scalers will be committed 521 * (i.e., either attached or detached) to respective planes in hw 522 * - as part of crtc_commit, scaler will be either attached or detached 523 * to crtc in hw 524 */ 525 526 /* fail if required scalers > available scalers */ 527 if (num_scalers_need > crtc->num_scalers) { 528 drm_dbg_kms(display->drm, 529 "Too many scaling requests %d > %d\n", 530 num_scalers_need, crtc->num_scalers); 531 return -EINVAL; 532 } 533 534 /* walkthrough scaler_users bits and start assigning scalers */ 535 for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) { 536 int ret; 537 538 /* skip if scaler not required */ 539 if (!(scaler_state->scaler_users & (1 << i))) 540 continue; 541 542 if (i == SKL_CRTC_INDEX) { 543 ret = setup_crtc_scaler(state, crtc); 544 if (ret) 545 return ret; 546 } else { 547 struct intel_plane *plane = 548 to_intel_plane(drm_plane_from_index(display->drm, i)); 549 550 ret = setup_plane_scaler(state, crtc, plane); 551 if (ret) 552 return ret; 553 } 554 } 555 556 return 0; 557 } 558 559 static int glk_coef_tap(int i) 560 { 561 return i % 7; 562 } 563 564 static u16 glk_nearest_filter_coef(int t) 565 { 566 return t == 3 ? 0x0800 : 0x3000; 567 } 568 569 /* 570 * Theory behind setting nearest-neighbor integer scaling: 571 * 572 * 17 phase of 7 taps requires 119 coefficients in 60 dwords per set. 573 * The letter represents the filter tap (D is the center tap) and the number 574 * represents the coefficient set for a phase (0-16). 575 * 576 * +------------+------------------------+------------------------+ 577 * |Index value | Data value coeffient 1 | Data value coeffient 2 | 578 * +------------+------------------------+------------------------+ 579 * | 00h | B0 | A0 | 580 * +------------+------------------------+------------------------+ 581 * | 01h | D0 | C0 | 582 * +------------+------------------------+------------------------+ 583 * | 02h | F0 | E0 | 584 * +------------+------------------------+------------------------+ 585 * | 03h | A1 | G0 | 586 * +------------+------------------------+------------------------+ 587 * | 04h | C1 | B1 | 588 * +------------+------------------------+------------------------+ 589 * | ... | ... | ... | 590 * +------------+------------------------+------------------------+ 591 * | 38h | B16 | A16 | 592 * +------------+------------------------+------------------------+ 593 * | 39h | D16 | C16 | 594 * +------------+------------------------+------------------------+ 595 * | 3Ah | F16 | C16 | 596 * +------------+------------------------+------------------------+ 597 * | 3Bh | Reserved | G16 | 598 * +------------+------------------------+------------------------+ 599 * 600 * To enable nearest-neighbor scaling: program scaler coefficents with 601 * the center tap (Dxx) values set to 1 and all other values set to 0 as per 602 * SCALER_COEFFICIENT_FORMAT 603 * 604 */ 605 606 static void glk_program_nearest_filter_coefs(struct intel_display *display, 607 enum pipe pipe, int id, int set) 608 { 609 int i; 610 611 intel_de_write_fw(display, GLK_PS_COEF_INDEX_SET(pipe, id, set), 612 PS_COEF_INDEX_AUTO_INC); 613 614 for (i = 0; i < 17 * 7; i += 2) { 615 u32 tmp; 616 int t; 617 618 t = glk_coef_tap(i); 619 tmp = glk_nearest_filter_coef(t); 620 621 t = glk_coef_tap(i + 1); 622 tmp |= glk_nearest_filter_coef(t) << 16; 623 624 intel_de_write_fw(display, GLK_PS_COEF_DATA_SET(pipe, id, set), 625 tmp); 626 } 627 628 intel_de_write_fw(display, GLK_PS_COEF_INDEX_SET(pipe, id, set), 0); 629 } 630 631 static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set) 632 { 633 if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) { 634 return (PS_FILTER_PROGRAMMED | 635 PS_Y_VERT_FILTER_SELECT(set) | 636 PS_Y_HORZ_FILTER_SELECT(set) | 637 PS_UV_VERT_FILTER_SELECT(set) | 638 PS_UV_HORZ_FILTER_SELECT(set)); 639 } 640 641 return PS_FILTER_MEDIUM; 642 } 643 644 static void skl_scaler_setup_filter(struct intel_display *display, enum pipe pipe, 645 int id, int set, enum drm_scaling_filter filter) 646 { 647 switch (filter) { 648 case DRM_SCALING_FILTER_DEFAULT: 649 break; 650 case DRM_SCALING_FILTER_NEAREST_NEIGHBOR: 651 glk_program_nearest_filter_coefs(display, pipe, id, set); 652 break; 653 default: 654 MISSING_CASE(filter); 655 } 656 } 657 658 void skl_pfit_enable(const struct intel_crtc_state *crtc_state) 659 { 660 struct intel_display *display = to_intel_display(crtc_state); 661 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 662 const struct intel_crtc_scaler_state *scaler_state = 663 &crtc_state->scaler_state; 664 const struct drm_rect *dst = &crtc_state->pch_pfit.dst; 665 u16 uv_rgb_hphase, uv_rgb_vphase; 666 enum pipe pipe = crtc->pipe; 667 int width = drm_rect_width(dst); 668 int height = drm_rect_height(dst); 669 int x = dst->x1; 670 int y = dst->y1; 671 int hscale, vscale; 672 struct drm_rect src; 673 int id; 674 u32 ps_ctrl; 675 676 if (!crtc_state->pch_pfit.enabled) 677 return; 678 679 if (drm_WARN_ON(display->drm, 680 crtc_state->scaler_state.scaler_id < 0)) 681 return; 682 683 drm_rect_init(&src, 0, 0, 684 drm_rect_width(&crtc_state->pipe_src) << 16, 685 drm_rect_height(&crtc_state->pipe_src) << 16); 686 687 hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX); 688 vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX); 689 690 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); 691 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); 692 693 id = scaler_state->scaler_id; 694 695 ps_ctrl = PS_SCALER_EN | PS_BINDING_PIPE | scaler_state->scalers[id].mode | 696 skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0); 697 698 skl_scaler_setup_filter(display, pipe, id, 0, 699 crtc_state->hw.scaling_filter); 700 701 intel_de_write_fw(display, SKL_PS_CTRL(pipe, id), ps_ctrl); 702 703 intel_de_write_fw(display, SKL_PS_VPHASE(pipe, id), 704 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); 705 intel_de_write_fw(display, SKL_PS_HPHASE(pipe, id), 706 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); 707 intel_de_write_fw(display, SKL_PS_WIN_POS(pipe, id), 708 PS_WIN_XPOS(x) | PS_WIN_YPOS(y)); 709 intel_de_write_fw(display, SKL_PS_WIN_SZ(pipe, id), 710 PS_WIN_XSIZE(width) | PS_WIN_YSIZE(height)); 711 } 712 713 void 714 skl_program_plane_scaler(struct intel_plane *plane, 715 const struct intel_crtc_state *crtc_state, 716 const struct intel_plane_state *plane_state) 717 { 718 struct intel_display *display = to_intel_display(plane); 719 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 720 const struct drm_framebuffer *fb = plane_state->hw.fb; 721 enum pipe pipe = plane->pipe; 722 int scaler_id = plane_state->scaler_id; 723 const struct intel_scaler *scaler = 724 &crtc_state->scaler_state.scalers[scaler_id]; 725 int crtc_x = plane_state->uapi.dst.x1; 726 int crtc_y = plane_state->uapi.dst.y1; 727 u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); 728 u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); 729 u16 y_hphase, uv_rgb_hphase; 730 u16 y_vphase, uv_rgb_vphase; 731 int hscale, vscale; 732 u32 ps_ctrl; 733 734 hscale = drm_rect_calc_hscale(&plane_state->uapi.src, 735 &plane_state->uapi.dst, 736 0, INT_MAX); 737 vscale = drm_rect_calc_vscale(&plane_state->uapi.src, 738 &plane_state->uapi.dst, 739 0, INT_MAX); 740 741 /* TODO: handle sub-pixel coordinates */ 742 if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && 743 !icl_is_hdr_plane(dev_priv, plane->id)) { 744 y_hphase = skl_scaler_calc_phase(1, hscale, false); 745 y_vphase = skl_scaler_calc_phase(1, vscale, false); 746 747 /* MPEG2 chroma siting convention */ 748 uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true); 749 uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false); 750 } else { 751 /* not used */ 752 y_hphase = 0; 753 y_vphase = 0; 754 755 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); 756 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); 757 } 758 759 ps_ctrl = PS_SCALER_EN | PS_BINDING_PLANE(plane->id) | scaler->mode | 760 skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0); 761 762 skl_scaler_setup_filter(display, pipe, scaler_id, 0, 763 plane_state->hw.scaling_filter); 764 765 intel_de_write_fw(display, SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); 766 intel_de_write_fw(display, SKL_PS_VPHASE(pipe, scaler_id), 767 PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase)); 768 intel_de_write_fw(display, SKL_PS_HPHASE(pipe, scaler_id), 769 PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase)); 770 intel_de_write_fw(display, SKL_PS_WIN_POS(pipe, scaler_id), 771 PS_WIN_XPOS(crtc_x) | PS_WIN_YPOS(crtc_y)); 772 intel_de_write_fw(display, SKL_PS_WIN_SZ(pipe, scaler_id), 773 PS_WIN_XSIZE(crtc_w) | PS_WIN_YSIZE(crtc_h)); 774 } 775 776 static void skl_detach_scaler(struct intel_crtc *crtc, int id) 777 { 778 struct intel_display *display = to_intel_display(crtc); 779 780 intel_de_write_fw(display, SKL_PS_CTRL(crtc->pipe, id), 0); 781 intel_de_write_fw(display, SKL_PS_WIN_POS(crtc->pipe, id), 0); 782 intel_de_write_fw(display, SKL_PS_WIN_SZ(crtc->pipe, id), 0); 783 } 784 785 /* 786 * This function detaches (aka. unbinds) unused scalers in hardware 787 */ 788 void skl_detach_scalers(const struct intel_crtc_state *crtc_state) 789 { 790 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 791 const struct intel_crtc_scaler_state *scaler_state = 792 &crtc_state->scaler_state; 793 int i; 794 795 /* loop through and disable scalers that aren't in use */ 796 for (i = 0; i < crtc->num_scalers; i++) { 797 if (!scaler_state->scalers[i].in_use) 798 skl_detach_scaler(crtc, i); 799 } 800 } 801 802 void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state) 803 { 804 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 805 int i; 806 807 for (i = 0; i < crtc->num_scalers; i++) 808 skl_detach_scaler(crtc, i); 809 } 810 811 void skl_scaler_get_config(struct intel_crtc_state *crtc_state) 812 { 813 struct intel_display *display = to_intel_display(crtc_state); 814 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 815 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; 816 int id = -1; 817 int i; 818 819 /* find scaler attached to this pipe */ 820 for (i = 0; i < crtc->num_scalers; i++) { 821 u32 ctl, pos, size; 822 823 ctl = intel_de_read(display, SKL_PS_CTRL(crtc->pipe, i)); 824 if ((ctl & (PS_SCALER_EN | PS_BINDING_MASK)) != (PS_SCALER_EN | PS_BINDING_PIPE)) 825 continue; 826 827 id = i; 828 crtc_state->pch_pfit.enabled = true; 829 830 pos = intel_de_read(display, SKL_PS_WIN_POS(crtc->pipe, i)); 831 size = intel_de_read(display, SKL_PS_WIN_SZ(crtc->pipe, i)); 832 833 drm_rect_init(&crtc_state->pch_pfit.dst, 834 REG_FIELD_GET(PS_WIN_XPOS_MASK, pos), 835 REG_FIELD_GET(PS_WIN_YPOS_MASK, pos), 836 REG_FIELD_GET(PS_WIN_XSIZE_MASK, size), 837 REG_FIELD_GET(PS_WIN_YSIZE_MASK, size)); 838 839 scaler_state->scalers[i].in_use = true; 840 break; 841 } 842 843 scaler_state->scaler_id = id; 844 if (id >= 0) 845 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 846 else 847 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 848 } 849