1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include <drm/drm_print.h> 7 8 #include "i915_utils.h" 9 #include "intel_de.h" 10 #include "intel_display_regs.h" 11 #include "intel_display_trace.h" 12 #include "intel_display_types.h" 13 #include "intel_display_wa.h" 14 #include "intel_fb.h" 15 #include "skl_scaler.h" 16 #include "skl_universal_plane.h" 17 18 /* 19 * The hardware phase 0.0 refers to the center of the pixel. 20 * We want to start from the top/left edge which is phase 21 * -0.5. That matches how the hardware calculates the scaling 22 * factors (from top-left of the first pixel to bottom-right 23 * of the last pixel, as opposed to the pixel centers). 24 * 25 * For 4:2:0 subsampled chroma planes we obviously have to 26 * adjust that so that the chroma sample position lands in 27 * the right spot. 28 * 29 * Note that for packed YCbCr 4:2:2 formats there is no way to 30 * control chroma siting. The hardware simply replicates the 31 * chroma samples for both of the luma samples, and thus we don't 32 * actually get the expected MPEG2 chroma siting convention :( 33 * The same behaviour is observed on pre-SKL platforms as well. 34 * 35 * Theory behind the formula (note that we ignore sub-pixel 36 * source coordinates): 37 * s = source sample position 38 * d = destination sample position 39 * 40 * Downscaling 4:1: 41 * -0.5 42 * | 0.0 43 * | | 1.5 (initial phase) 44 * | | | 45 * v v v 46 * | s | s | s | s | 47 * | d | 48 * 49 * Upscaling 1:4: 50 * -0.5 51 * | -0.375 (initial phase) 52 * | | 0.0 53 * | | | 54 * v v v 55 * | s | 56 * | d | d | d | d | 57 */ 58 static u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited) 59 { 60 int phase = -0x8000; 61 u16 trip = 0; 62 63 if (chroma_cosited) 64 phase += (sub - 1) * 0x8000 / sub; 65 66 phase += scale / (2 * sub); 67 68 /* 69 * Hardware initial phase limited to [-0.5:1.5]. 70 * Since the max hardware scale factor is 3.0, we 71 * should never actually exceed 1.0 here. 72 */ 73 WARN_ON(phase < -0x8000 || phase > 0x18000); 74 75 if (phase < 0) 76 phase = 0x10000 + phase; 77 else 78 trip = PS_PHASE_TRIP; 79 80 return ((phase >> 2) & PS_PHASE_MASK) | trip; 81 } 82 83 static void skl_scaler_min_src_size(const struct drm_format_info *format, 84 u64 modifier, int *min_w, int *min_h) 85 { 86 if (format && intel_format_info_is_yuv_semiplanar(format, modifier)) { 87 *min_w = 16; 88 *min_h = 16; 89 } else { 90 *min_w = 8; 91 *min_h = 8; 92 } 93 } 94 95 static void skl_scaler_max_src_size(struct intel_display *display, 96 int *max_w, int *max_h) 97 { 98 if (DISPLAY_VER(display) >= 14) { 99 *max_w = 4096; 100 *max_h = 8192; 101 } else if (DISPLAY_VER(display) >= 12) { 102 *max_w = 5120; 103 *max_h = 8192; 104 } else if (DISPLAY_VER(display) == 11) { 105 *max_w = 5120; 106 *max_h = 4096; 107 } else { 108 *max_w = 4096; 109 *max_h = 4096; 110 } 111 } 112 113 static void skl_scaler_min_dst_size(int *min_w, int *min_h) 114 { 115 *min_w = 8; 116 *min_h = 8; 117 } 118 119 static void skl_scaler_max_dst_size(struct intel_crtc *crtc, 120 int *max_w, int *max_h) 121 { 122 struct intel_display *display = to_intel_display(crtc); 123 124 if (DISPLAY_VER(display) >= 12) { 125 *max_w = 8192; 126 *max_h = 8192; 127 } else if (DISPLAY_VER(display) == 11) { 128 *max_w = 5120; 129 *max_h = 4096; 130 } else { 131 *max_w = 4096; 132 *max_h = 4096; 133 } 134 } 135 136 enum drm_mode_status 137 skl_scaler_mode_valid(struct intel_display *display, 138 const struct drm_display_mode *mode, 139 enum intel_output_format output_format, 140 int num_joined_pipes) 141 { 142 int max_h, max_w; 143 144 if (num_joined_pipes < 2 && output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { 145 skl_scaler_max_src_size(display, &max_w, &max_h); 146 if (mode->hdisplay > max_h) 147 return MODE_NO_420; 148 } 149 150 return MODE_OK; 151 } 152 153 static int 154 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 155 unsigned int scaler_user, int *scaler_id, 156 int src_w, int src_h, int dst_w, int dst_h, 157 const struct drm_format_info *format, 158 u64 modifier, bool need_scaler) 159 { 160 struct intel_display *display = to_intel_display(crtc_state); 161 struct intel_crtc_scaler_state *scaler_state = 162 &crtc_state->scaler_state; 163 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 164 const struct drm_display_mode *adjusted_mode = 165 &crtc_state->hw.adjusted_mode; 166 int pipe_src_w = drm_rect_width(&crtc_state->pipe_src); 167 int pipe_src_h = drm_rect_height(&crtc_state->pipe_src); 168 int min_src_w, min_src_h, min_dst_w, min_dst_h; 169 int max_src_w, max_src_h, max_dst_w, max_dst_h; 170 171 /* 172 * Src coordinates are already rotated by 270 degrees for 173 * the 90/270 degree plane rotation cases (to match the 174 * GTT mapping), hence no need to account for rotation here. 175 */ 176 if (src_w != dst_w || src_h != dst_h) 177 need_scaler = true; 178 179 /* 180 * Scaling/fitting not supported in IF-ID mode in GEN9+ 181 * TODO: Interlace fetch mode doesn't support YUV420 planar formats. 182 * Once NV12 is enabled, handle it here while allocating scaler 183 * for NV12. 184 */ 185 if (DISPLAY_VER(display) >= 9 && crtc_state->hw.enable && 186 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 187 drm_dbg_kms(display->drm, 188 "[CRTC:%d:%s] scaling not supported with IF-ID mode\n", 189 crtc->base.base.id, crtc->base.name); 190 return -EINVAL; 191 } 192 193 /* 194 * if plane is being disabled or scaler is no more required or force detach 195 * - free scaler binded to this plane/crtc 196 * - in order to do this, update crtc->scaler_usage 197 * 198 * Here scaler state in crtc_state is set free so that 199 * scaler can be assigned to other user. Actual register 200 * update to free the scaler is done in plane/panel-fit programming. 201 * For this purpose crtc/plane_state->scaler_id isn't reset here. 202 */ 203 if (force_detach || !need_scaler) { 204 if (*scaler_id >= 0) { 205 scaler_state->scaler_users &= ~(1 << scaler_user); 206 scaler_state->scalers[*scaler_id].in_use = false; 207 208 drm_dbg_kms(display->drm, 209 "[CRTC:%d:%s] scaler_user index %u.%u: " 210 "Staged freeing scaler id %d scaler_users = 0x%x\n", 211 crtc->base.base.id, crtc->base.name, 212 crtc->pipe, scaler_user, *scaler_id, 213 scaler_state->scaler_users); 214 *scaler_id = -1; 215 } 216 return 0; 217 } 218 219 skl_scaler_min_src_size(format, modifier, &min_src_w, &min_src_h); 220 skl_scaler_max_src_size(display, &max_src_w, &max_src_h); 221 222 skl_scaler_min_dst_size(&min_dst_w, &min_dst_h); 223 skl_scaler_max_dst_size(crtc, &max_dst_w, &max_dst_h); 224 225 /* range checks */ 226 if (src_w < min_src_w || src_h < min_src_h || 227 dst_w < min_dst_w || dst_h < min_dst_h || 228 src_w > max_src_w || src_h > max_src_h || 229 dst_w > max_dst_w || dst_h > max_dst_h) { 230 drm_dbg_kms(display->drm, 231 "[CRTC:%d:%s] scaler_user index %u.%u: src %ux%u dst %ux%u " 232 "size is out of scaler range\n", 233 crtc->base.base.id, crtc->base.name, 234 crtc->pipe, scaler_user, src_w, src_h, 235 dst_w, dst_h); 236 return -EINVAL; 237 } 238 239 /* 240 * The pipe scaler does not use all the bits of PIPESRC, at least 241 * on the earlier platforms. So even when we're scaling a plane 242 * the *pipe* source size must not be too large. For simplicity 243 * we assume the limits match the scaler destination size limits. 244 * Might not be 100% accurate on all platforms, but good enough for 245 * now. 246 */ 247 if (pipe_src_w > max_dst_w || pipe_src_h > max_dst_h) { 248 drm_dbg_kms(display->drm, 249 "[CRTC:%d:%s] scaler_user index %u.%u: pipe src size %ux%u " 250 "is out of scaler range\n", 251 crtc->base.base.id, crtc->base.name, 252 crtc->pipe, scaler_user, pipe_src_w, pipe_src_h); 253 return -EINVAL; 254 } 255 256 /* mark this plane as a scaler user in crtc_state */ 257 scaler_state->scaler_users |= (1 << scaler_user); 258 drm_dbg_kms(display->drm, "[CRTC:%d:%s] scaler_user index %u.%u: " 259 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", 260 crtc->base.base.id, crtc->base.name, 261 crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, 262 scaler_state->scaler_users); 263 264 return 0; 265 } 266 267 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state) 268 { 269 const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 270 int width, height; 271 272 if (crtc_state->pch_pfit.enabled) { 273 width = drm_rect_width(&crtc_state->pch_pfit.dst); 274 height = drm_rect_height(&crtc_state->pch_pfit.dst); 275 } else { 276 width = pipe_mode->crtc_hdisplay; 277 height = pipe_mode->crtc_vdisplay; 278 } 279 return skl_update_scaler(crtc_state, !crtc_state->hw.active, 280 SKL_CRTC_INDEX, 281 &crtc_state->scaler_state.scaler_id, 282 drm_rect_width(&crtc_state->pipe_src), 283 drm_rect_height(&crtc_state->pipe_src), 284 width, height, NULL, 0, 285 crtc_state->pch_pfit.enabled); 286 } 287 288 /** 289 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 290 * @crtc_state: crtc's scaler state 291 * @plane_state: atomic plane state to update 292 * 293 * Return 294 * 0 - scaler_usage updated successfully 295 * error - requested scaling cannot be supported or other error condition 296 */ 297 int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 298 struct intel_plane_state *plane_state) 299 { 300 struct intel_display *display = to_intel_display(plane_state); 301 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 302 struct drm_framebuffer *fb = plane_state->hw.fb; 303 bool force_detach = !fb || !plane_state->uapi.visible; 304 bool need_scaler = false; 305 306 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ 307 if (!icl_is_hdr_plane(display, plane->id) && 308 fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) 309 need_scaler = true; 310 311 return skl_update_scaler(crtc_state, force_detach, 312 drm_plane_index(&plane->base), 313 &plane_state->scaler_id, 314 drm_rect_width(&plane_state->uapi.src) >> 16, 315 drm_rect_height(&plane_state->uapi.src) >> 16, 316 drm_rect_width(&plane_state->uapi.dst), 317 drm_rect_height(&plane_state->uapi.dst), 318 fb ? fb->format : NULL, 319 fb ? fb->modifier : 0, 320 need_scaler); 321 } 322 323 static int intel_allocate_scaler(struct intel_crtc_scaler_state *scaler_state, 324 struct intel_crtc *crtc) 325 { 326 int i; 327 328 for (i = 0; i < crtc->num_scalers; i++) { 329 if (scaler_state->scalers[i].in_use) 330 continue; 331 332 scaler_state->scalers[i].in_use = true; 333 334 return i; 335 } 336 337 return -1; 338 } 339 340 static void 341 calculate_max_scale(struct intel_crtc *crtc, 342 bool is_yuv_semiplanar, 343 int scaler_id, 344 int *max_hscale, int *max_vscale) 345 { 346 struct intel_display *display = to_intel_display(crtc); 347 348 /* 349 * FIXME: When two scalers are needed, but only one of 350 * them needs to downscale, we should make sure that 351 * the one that needs downscaling support is assigned 352 * as the first scaler, so we don't reject downscaling 353 * unnecessarily. 354 */ 355 356 if (DISPLAY_VER(display) >= 14) { 357 /* 358 * On versions 14 and up, only the first 359 * scaler supports a vertical scaling factor 360 * of more than 1.0, while a horizontal 361 * scaling factor of 3.0 is supported. 362 */ 363 *max_hscale = 0x30000 - 1; 364 365 if (scaler_id == 0) 366 *max_vscale = 0x30000 - 1; 367 else 368 *max_vscale = 0x10000; 369 } else if (DISPLAY_VER(display) >= 10 || !is_yuv_semiplanar) { 370 *max_hscale = 0x30000 - 1; 371 *max_vscale = 0x30000 - 1; 372 } else { 373 *max_hscale = 0x20000 - 1; 374 *max_vscale = 0x20000 - 1; 375 } 376 } 377 378 static int intel_atomic_setup_scaler(struct intel_crtc_state *crtc_state, 379 int num_scalers_need, struct intel_crtc *crtc, 380 const char *name, int idx, 381 struct intel_plane_state *plane_state, 382 int *scaler_id) 383 { 384 struct intel_display *display = to_intel_display(crtc); 385 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; 386 u32 mode; 387 int hscale = 0; 388 int vscale = 0; 389 390 if (*scaler_id < 0) 391 *scaler_id = intel_allocate_scaler(scaler_state, crtc); 392 393 if (drm_WARN(display->drm, *scaler_id < 0, 394 "Cannot find scaler for %s:%d\n", name, idx)) 395 return -EINVAL; 396 397 /* set scaler mode */ 398 if (plane_state && plane_state->hw.fb && 399 plane_state->hw.fb->format->is_yuv && 400 plane_state->hw.fb->format->num_planes > 1) { 401 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 402 403 if (DISPLAY_VER(display) == 9) { 404 mode = SKL_PS_SCALER_MODE_NV12; 405 } else if (icl_is_hdr_plane(display, plane->id)) { 406 /* 407 * On gen11+'s HDR planes we only use the scaler for 408 * scaling. They have a dedicated chroma upsampler, so 409 * we don't need the scaler to upsample the UV plane. 410 */ 411 mode = PS_SCALER_MODE_NORMAL; 412 } else { 413 struct intel_plane *linked = 414 plane_state->planar_linked_plane; 415 416 mode = PS_SCALER_MODE_PLANAR; 417 418 if (linked) 419 mode |= PS_BINDING_Y_PLANE(linked->id); 420 } 421 } else if (DISPLAY_VER(display) >= 10) { 422 mode = PS_SCALER_MODE_NORMAL; 423 } else if (num_scalers_need == 1 && crtc->num_scalers > 1) { 424 /* 425 * when only 1 scaler is in use on a pipe with 2 scalers 426 * scaler 0 operates in high quality (HQ) mode. 427 * In this case use scaler 0 to take advantage of HQ mode 428 */ 429 scaler_state->scalers[*scaler_id].in_use = false; 430 *scaler_id = 0; 431 scaler_state->scalers[0].in_use = true; 432 mode = SKL_PS_SCALER_MODE_HQ; 433 } else { 434 mode = SKL_PS_SCALER_MODE_DYN; 435 } 436 437 if (plane_state && plane_state->hw.fb) { 438 const struct drm_framebuffer *fb = plane_state->hw.fb; 439 const struct drm_rect *src = &plane_state->uapi.src; 440 const struct drm_rect *dst = &plane_state->uapi.dst; 441 int max_hscale, max_vscale; 442 443 calculate_max_scale(crtc, 444 intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier), 445 *scaler_id, &max_hscale, &max_vscale); 446 447 /* 448 * FIXME: We should change the if-else block above to 449 * support HQ vs dynamic scaler properly. 450 */ 451 452 /* Check if required scaling is within limits */ 453 hscale = drm_rect_calc_hscale(src, dst, 1, max_hscale); 454 vscale = drm_rect_calc_vscale(src, dst, 1, max_vscale); 455 456 if (hscale < 0 || vscale < 0) { 457 drm_dbg_kms(display->drm, 458 "[CRTC:%d:%s] scaler %d doesn't support required plane scaling\n", 459 crtc->base.base.id, crtc->base.name, *scaler_id); 460 drm_rect_debug_print("src: ", src, true); 461 drm_rect_debug_print("dst: ", dst, false); 462 463 return -EINVAL; 464 } 465 } 466 467 if (crtc_state->pch_pfit.enabled) { 468 struct drm_rect src; 469 int max_hscale, max_vscale; 470 471 drm_rect_init(&src, 0, 0, 472 drm_rect_width(&crtc_state->pipe_src) << 16, 473 drm_rect_height(&crtc_state->pipe_src) << 16); 474 475 calculate_max_scale(crtc, 0, *scaler_id, 476 &max_hscale, &max_vscale); 477 478 /* 479 * When configured for Pipe YUV 420 encoding for port output, 480 * limit downscaling to less than 1.5 (source/destination) in 481 * the horizontal direction and 1.0 in the vertical direction. 482 */ 483 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { 484 max_hscale = 0x18000 - 1; 485 max_vscale = 0x10000; 486 } 487 488 hscale = drm_rect_calc_hscale(&src, &crtc_state->pch_pfit.dst, 489 0, max_hscale); 490 vscale = drm_rect_calc_vscale(&src, &crtc_state->pch_pfit.dst, 491 0, max_vscale); 492 493 if (hscale < 0 || vscale < 0) { 494 drm_dbg_kms(display->drm, 495 "Scaler %d doesn't support required pipe scaling\n", 496 *scaler_id); 497 drm_rect_debug_print("src: ", &src, true); 498 drm_rect_debug_print("dst: ", &crtc_state->pch_pfit.dst, false); 499 500 return -EINVAL; 501 } 502 } 503 504 scaler_state->scalers[*scaler_id].hscale = hscale; 505 scaler_state->scalers[*scaler_id].vscale = vscale; 506 507 drm_dbg_kms(display->drm, "[CRTC:%d:%s] attached scaler id %u.%u to %s:%d\n", 508 crtc->base.base.id, crtc->base.name, 509 crtc->pipe, *scaler_id, name, idx); 510 scaler_state->scalers[*scaler_id].mode = mode; 511 512 return 0; 513 } 514 515 static int setup_crtc_scaler(struct intel_atomic_state *state, 516 struct intel_crtc *crtc) 517 { 518 struct intel_crtc_state *crtc_state = 519 intel_atomic_get_new_crtc_state(state, crtc); 520 struct intel_crtc_scaler_state *scaler_state = 521 &crtc_state->scaler_state; 522 523 return intel_atomic_setup_scaler(crtc_state, 524 hweight32(scaler_state->scaler_users), 525 crtc, "CRTC", crtc->base.base.id, 526 NULL, &scaler_state->scaler_id); 527 } 528 529 static int setup_plane_scaler(struct intel_atomic_state *state, 530 struct intel_crtc *crtc, 531 struct intel_plane *plane) 532 { 533 struct intel_display *display = to_intel_display(state); 534 struct intel_crtc_state *crtc_state = 535 intel_atomic_get_new_crtc_state(state, crtc); 536 struct intel_crtc_scaler_state *scaler_state = 537 &crtc_state->scaler_state; 538 struct intel_plane_state *plane_state; 539 540 /* plane on different crtc cannot be a scaler user of this crtc */ 541 if (drm_WARN_ON(display->drm, plane->pipe != crtc->pipe)) 542 return 0; 543 544 plane_state = intel_atomic_get_new_plane_state(state, plane); 545 546 /* 547 * GLK+ scalers don't have a HQ mode so it 548 * isn't necessary to change between HQ and dyn mode 549 * on those platforms. 550 */ 551 if (!plane_state && DISPLAY_VER(display) >= 10) 552 return 0; 553 554 plane_state = intel_atomic_get_plane_state(state, plane); 555 if (IS_ERR(plane_state)) 556 return PTR_ERR(plane_state); 557 558 return intel_atomic_setup_scaler(crtc_state, 559 hweight32(scaler_state->scaler_users), 560 crtc, "PLANE", plane->base.base.id, 561 plane_state, &plane_state->scaler_id); 562 } 563 564 /** 565 * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests 566 * @state: atomic state 567 * @crtc: crtc 568 * 569 * This function sets up scalers based on staged scaling requests for 570 * a @crtc and its planes. It is called from crtc level check path. If request 571 * is a supportable request, it attaches scalers to requested planes and crtc. 572 * 573 * This function takes into account the current scaler(s) in use by any planes 574 * not being part of this atomic state 575 * 576 * Returns: 577 * 0 - scalers were setup successfully 578 * error code - otherwise 579 */ 580 int intel_atomic_setup_scalers(struct intel_atomic_state *state, 581 struct intel_crtc *crtc) 582 { 583 struct intel_display *display = to_intel_display(crtc); 584 struct intel_crtc_state *crtc_state = 585 intel_atomic_get_new_crtc_state(state, crtc); 586 struct intel_crtc_scaler_state *scaler_state = 587 &crtc_state->scaler_state; 588 int num_scalers_need; 589 int i; 590 591 num_scalers_need = hweight32(scaler_state->scaler_users); 592 593 /* 594 * High level flow: 595 * - staged scaler requests are already in scaler_state->scaler_users 596 * - check whether staged scaling requests can be supported 597 * - add planes using scalers that aren't in current transaction 598 * - assign scalers to requested users 599 * - as part of plane commit, scalers will be committed 600 * (i.e., either attached or detached) to respective planes in hw 601 * - as part of crtc_commit, scaler will be either attached or detached 602 * to crtc in hw 603 */ 604 605 /* fail if required scalers > available scalers */ 606 if (num_scalers_need > crtc->num_scalers) { 607 drm_dbg_kms(display->drm, 608 "[CRTC:%d:%s] too many scaling requests %d > %d\n", 609 crtc->base.base.id, crtc->base.name, 610 num_scalers_need, crtc->num_scalers); 611 return -EINVAL; 612 } 613 614 /* walkthrough scaler_users bits and start assigning scalers */ 615 for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) { 616 int ret; 617 618 /* skip if scaler not required */ 619 if (!(scaler_state->scaler_users & (1 << i))) 620 continue; 621 622 if (i == SKL_CRTC_INDEX) { 623 ret = setup_crtc_scaler(state, crtc); 624 if (ret) 625 return ret; 626 } else { 627 struct intel_plane *plane = 628 to_intel_plane(drm_plane_from_index(display->drm, i)); 629 630 ret = setup_plane_scaler(state, crtc, plane); 631 if (ret) 632 return ret; 633 } 634 } 635 636 return 0; 637 } 638 639 static int glk_coef_tap(int i) 640 { 641 return i % 7; 642 } 643 644 static u16 glk_nearest_filter_coef(int t) 645 { 646 return t == 3 ? 0x0800 : 0x3000; 647 } 648 649 /* 650 * Theory behind setting nearest-neighbor integer scaling: 651 * 652 * 17 phase of 7 taps requires 119 coefficients in 60 dwords per set. 653 * The letter represents the filter tap (D is the center tap) and the number 654 * represents the coefficient set for a phase (0-16). 655 * 656 * +------------+--------------------------+--------------------------+ 657 * |Index value | Data value coefficient 1 | Data value coefficient 2 | 658 * +------------+--------------------------+--------------------------+ 659 * | 00h | B0 | A0 | 660 * +------------+--------------------------+--------------------------+ 661 * | 01h | D0 | C0 | 662 * +------------+--------------------------+--------------------------+ 663 * | 02h | F0 | E0 | 664 * +------------+--------------------------+--------------------------+ 665 * | 03h | A1 | G0 | 666 * +------------+--------------------------+--------------------------+ 667 * | 04h | C1 | B1 | 668 * +------------+--------------------------+--------------------------+ 669 * | ... | ... | ... | 670 * +------------+--------------------------+--------------------------+ 671 * | 38h | B16 | A16 | 672 * +------------+--------------------------+--------------------------+ 673 * | 39h | D16 | C16 | 674 * +------------+--------------------------+--------------------------+ 675 * | 3Ah | F16 | C16 | 676 * +------------+--------------------------+--------------------------+ 677 * | 3Bh | Reserved | G16 | 678 * +------------+--------------------------+--------------------------+ 679 * 680 * To enable nearest-neighbor scaling: program scaler coefficients with 681 * the center tap (Dxx) values set to 1 and all other values set to 0 as per 682 * SCALER_COEFFICIENT_FORMAT 683 * 684 */ 685 686 static void glk_program_nearest_filter_coefs(struct intel_display *display, 687 struct intel_dsb *dsb, 688 enum pipe pipe, int id, int set) 689 { 690 int i; 691 692 intel_de_write_dsb(display, dsb, 693 GLK_PS_COEF_INDEX_SET(pipe, id, set), 694 PS_COEF_INDEX_AUTO_INC); 695 696 for (i = 0; i < 17 * 7; i += 2) { 697 u32 tmp; 698 int t; 699 700 t = glk_coef_tap(i); 701 tmp = glk_nearest_filter_coef(t); 702 703 t = glk_coef_tap(i + 1); 704 tmp |= glk_nearest_filter_coef(t) << 16; 705 706 intel_de_write_dsb(display, dsb, 707 GLK_PS_COEF_DATA_SET(pipe, id, set), tmp); 708 } 709 710 intel_de_write_dsb(display, dsb, 711 GLK_PS_COEF_INDEX_SET(pipe, id, set), 0); 712 } 713 714 static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter) 715 { 716 if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) 717 return (PS_FILTER_PROGRAMMED | 718 PS_Y_VERT_FILTER_SELECT(0) | 719 PS_Y_HORZ_FILTER_SELECT(0) | 720 PS_UV_VERT_FILTER_SELECT(0) | 721 PS_UV_HORZ_FILTER_SELECT(0)); 722 723 return PS_FILTER_MEDIUM; 724 } 725 726 static void skl_scaler_setup_filter(struct intel_display *display, 727 struct intel_dsb *dsb, enum pipe pipe, 728 int id, int set, enum drm_scaling_filter filter) 729 { 730 switch (filter) { 731 case DRM_SCALING_FILTER_DEFAULT: 732 break; 733 case DRM_SCALING_FILTER_NEAREST_NEIGHBOR: 734 glk_program_nearest_filter_coefs(display, dsb, pipe, id, set); 735 break; 736 default: 737 MISSING_CASE(filter); 738 } 739 } 740 741 void skl_pfit_enable(const struct intel_crtc_state *crtc_state) 742 { 743 struct intel_display *display = to_intel_display(crtc_state); 744 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 745 const struct intel_crtc_scaler_state *scaler_state = 746 &crtc_state->scaler_state; 747 const struct drm_rect *dst = &crtc_state->pch_pfit.dst; 748 u16 uv_rgb_hphase, uv_rgb_vphase; 749 enum pipe pipe = crtc->pipe; 750 int width = drm_rect_width(dst); 751 int height = drm_rect_height(dst); 752 int x = dst->x1; 753 int y = dst->y1; 754 int hscale, vscale; 755 struct drm_rect src; 756 int id; 757 u32 ps_ctrl; 758 759 if (!crtc_state->pch_pfit.enabled) 760 return; 761 762 if (drm_WARN_ON(display->drm, 763 crtc_state->scaler_state.scaler_id < 0)) 764 return; 765 766 if (intel_display_wa(display, 14011503117)) 767 adl_scaler_ecc_mask(crtc_state); 768 769 drm_rect_init(&src, 0, 0, 770 drm_rect_width(&crtc_state->pipe_src) << 16, 771 drm_rect_height(&crtc_state->pipe_src) << 16); 772 773 hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX); 774 vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX); 775 776 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); 777 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); 778 779 id = scaler_state->scaler_id; 780 781 ps_ctrl = PS_SCALER_EN | PS_BINDING_PIPE | scaler_state->scalers[id].mode | 782 skl_scaler_get_filter_select(crtc_state->hw.scaling_filter); 783 784 trace_intel_pipe_scaler_update_arm(crtc, id, x, y, width, height); 785 786 skl_scaler_setup_filter(display, NULL, pipe, id, 0, 787 crtc_state->hw.scaling_filter); 788 789 intel_de_write_fw(display, SKL_PS_CTRL(pipe, id), ps_ctrl); 790 791 intel_de_write_fw(display, SKL_PS_VPHASE(pipe, id), 792 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); 793 intel_de_write_fw(display, SKL_PS_HPHASE(pipe, id), 794 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); 795 intel_de_write_fw(display, SKL_PS_WIN_POS(pipe, id), 796 PS_WIN_XPOS(x) | PS_WIN_YPOS(y)); 797 intel_de_write_fw(display, SKL_PS_WIN_SZ(pipe, id), 798 PS_WIN_XSIZE(width) | PS_WIN_YSIZE(height)); 799 } 800 801 void 802 skl_program_plane_scaler(struct intel_dsb *dsb, 803 struct intel_plane *plane, 804 const struct intel_crtc_state *crtc_state, 805 const struct intel_plane_state *plane_state) 806 { 807 struct intel_display *display = to_intel_display(plane); 808 const struct drm_framebuffer *fb = plane_state->hw.fb; 809 enum pipe pipe = plane->pipe; 810 int scaler_id = plane_state->scaler_id; 811 const struct intel_scaler *scaler = 812 &crtc_state->scaler_state.scalers[scaler_id]; 813 int crtc_x = plane_state->uapi.dst.x1; 814 int crtc_y = plane_state->uapi.dst.y1; 815 u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); 816 u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); 817 u16 y_hphase, uv_rgb_hphase; 818 u16 y_vphase, uv_rgb_vphase; 819 int hscale, vscale; 820 u32 ps_ctrl; 821 822 hscale = drm_rect_calc_hscale(&plane_state->uapi.src, 823 &plane_state->uapi.dst, 824 0, INT_MAX); 825 vscale = drm_rect_calc_vscale(&plane_state->uapi.src, 826 &plane_state->uapi.dst, 827 0, INT_MAX); 828 829 /* TODO: handle sub-pixel coordinates */ 830 if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && 831 !icl_is_hdr_plane(display, plane->id)) { 832 y_hphase = skl_scaler_calc_phase(1, hscale, false); 833 y_vphase = skl_scaler_calc_phase(1, vscale, false); 834 835 /* MPEG2 chroma siting convention */ 836 uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true); 837 uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false); 838 } else { 839 /* not used */ 840 y_hphase = 0; 841 y_vphase = 0; 842 843 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); 844 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); 845 } 846 847 ps_ctrl = PS_SCALER_EN | PS_BINDING_PLANE(plane->id) | scaler->mode | 848 skl_scaler_get_filter_select(plane_state->hw.scaling_filter); 849 850 trace_intel_plane_scaler_update_arm(plane, scaler_id, 851 crtc_x, crtc_y, crtc_w, crtc_h); 852 853 skl_scaler_setup_filter(display, dsb, pipe, scaler_id, 0, 854 plane_state->hw.scaling_filter); 855 856 intel_de_write_dsb(display, dsb, SKL_PS_CTRL(pipe, scaler_id), 857 ps_ctrl); 858 intel_de_write_dsb(display, dsb, SKL_PS_VPHASE(pipe, scaler_id), 859 PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase)); 860 intel_de_write_dsb(display, dsb, SKL_PS_HPHASE(pipe, scaler_id), 861 PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase)); 862 intel_de_write_dsb(display, dsb, SKL_PS_WIN_POS(pipe, scaler_id), 863 PS_WIN_XPOS(crtc_x) | PS_WIN_YPOS(crtc_y)); 864 intel_de_write_dsb(display, dsb, SKL_PS_WIN_SZ(pipe, scaler_id), 865 PS_WIN_XSIZE(crtc_w) | PS_WIN_YSIZE(crtc_h)); 866 } 867 868 static void skl_detach_scaler(struct intel_dsb *dsb, 869 struct intel_crtc *crtc, int id) 870 { 871 struct intel_display *display = to_intel_display(crtc); 872 873 trace_intel_scaler_disable_arm(crtc, id); 874 875 intel_de_write_dsb(display, dsb, SKL_PS_CTRL(crtc->pipe, id), 0); 876 intel_de_write_dsb(display, dsb, SKL_PS_WIN_POS(crtc->pipe, id), 0); 877 intel_de_write_dsb(display, dsb, SKL_PS_WIN_SZ(crtc->pipe, id), 0); 878 } 879 880 /* 881 * This function detaches (aka. unbinds) unused scalers in hardware 882 */ 883 void skl_detach_scalers(struct intel_dsb *dsb, 884 const struct intel_crtc_state *crtc_state) 885 { 886 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 887 const struct intel_crtc_scaler_state *scaler_state = 888 &crtc_state->scaler_state; 889 int i; 890 891 /* loop through and disable scalers that aren't in use */ 892 for (i = 0; i < crtc->num_scalers; i++) { 893 if (!scaler_state->scalers[i].in_use) 894 skl_detach_scaler(dsb, crtc, i); 895 } 896 } 897 898 void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state) 899 { 900 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 901 int i; 902 903 for (i = 0; i < crtc->num_scalers; i++) 904 skl_detach_scaler(NULL, crtc, i); 905 } 906 907 void skl_scaler_get_config(struct intel_crtc_state *crtc_state) 908 { 909 struct intel_display *display = to_intel_display(crtc_state); 910 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 911 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; 912 int id = -1; 913 int i; 914 915 /* find scaler attached to this pipe */ 916 for (i = 0; i < crtc->num_scalers; i++) { 917 u32 ctl, pos, size; 918 919 ctl = intel_de_read(display, SKL_PS_CTRL(crtc->pipe, i)); 920 if ((ctl & (PS_SCALER_EN | PS_BINDING_MASK)) != (PS_SCALER_EN | PS_BINDING_PIPE)) 921 continue; 922 923 id = i; 924 crtc_state->pch_pfit.enabled = true; 925 926 pos = intel_de_read(display, SKL_PS_WIN_POS(crtc->pipe, i)); 927 size = intel_de_read(display, SKL_PS_WIN_SZ(crtc->pipe, i)); 928 929 drm_rect_init(&crtc_state->pch_pfit.dst, 930 REG_FIELD_GET(PS_WIN_XPOS_MASK, pos), 931 REG_FIELD_GET(PS_WIN_YPOS_MASK, pos), 932 REG_FIELD_GET(PS_WIN_XSIZE_MASK, size), 933 REG_FIELD_GET(PS_WIN_YSIZE_MASK, size)); 934 935 scaler_state->scalers[i].in_use = true; 936 break; 937 } 938 939 scaler_state->scaler_id = id; 940 if (id >= 0) 941 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 942 else 943 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 944 } 945 946 void adl_scaler_ecc_mask(const struct intel_crtc_state *crtc_state) 947 { 948 struct intel_display *display = to_intel_display(crtc_state); 949 950 if (!crtc_state->pch_pfit.enabled) 951 return; 952 953 intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, ~0); 954 } 955 956 void adl_scaler_ecc_unmask(const struct intel_crtc_state *crtc_state) 957 { 958 struct intel_display *display = to_intel_display(crtc_state); 959 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 960 const struct intel_crtc_scaler_state *scaler_state = 961 &crtc_state->scaler_state; 962 963 if (scaler_state->scaler_id < 0) 964 return; 965 966 intel_de_write_fw(display, 967 SKL_PS_ECC_STAT(crtc->pipe, scaler_state->scaler_id), 968 1); 969 intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, 0); 970 } 971