1 /* 2 * Copyright (C) 2015 Broadcom 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9 /** 10 * DOC: VC4 plane module 11 * 12 * Each DRM plane is a layer of pixels being scanned out by the HVS. 13 * 14 * At atomic modeset check time, we compute the HVS display element 15 * state that would be necessary for displaying the plane (giving us a 16 * chance to figure out if a plane configuration is invalid), then at 17 * atomic flush time the CRTC will ask us to write our element state 18 * into the region of the HVS that it has allocated for us. 19 */ 20 21 #include <drm/drm_atomic.h> 22 #include <drm/drm_atomic_helper.h> 23 #include <drm/drm_fb_cma_helper.h> 24 #include <drm/drm_plane_helper.h> 25 26 #include "uapi/drm/vc4_drm.h" 27 #include "vc4_drv.h" 28 #include "vc4_regs.h" 29 30 static const struct hvs_format { 31 u32 drm; /* DRM_FORMAT_* */ 32 u32 hvs; /* HVS_FORMAT_* */ 33 u32 pixel_order; 34 } hvs_formats[] = { 35 { 36 .drm = DRM_FORMAT_XRGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, 37 .pixel_order = HVS_PIXEL_ORDER_ABGR, 38 }, 39 { 40 .drm = DRM_FORMAT_ARGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, 41 .pixel_order = HVS_PIXEL_ORDER_ABGR, 42 }, 43 { 44 .drm = DRM_FORMAT_ABGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, 45 .pixel_order = HVS_PIXEL_ORDER_ARGB, 46 }, 47 { 48 .drm = DRM_FORMAT_XBGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, 49 .pixel_order = HVS_PIXEL_ORDER_ARGB, 50 }, 51 { 52 .drm = DRM_FORMAT_RGB565, .hvs = HVS_PIXEL_FORMAT_RGB565, 53 .pixel_order = HVS_PIXEL_ORDER_XRGB, 54 }, 55 { 56 .drm = DRM_FORMAT_BGR565, .hvs = HVS_PIXEL_FORMAT_RGB565, 57 .pixel_order = HVS_PIXEL_ORDER_XBGR, 58 }, 59 { 60 .drm = DRM_FORMAT_ARGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551, 61 .pixel_order = HVS_PIXEL_ORDER_ABGR, 62 }, 63 { 64 .drm = DRM_FORMAT_XRGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551, 65 .pixel_order = HVS_PIXEL_ORDER_ABGR, 66 }, 67 { 68 .drm = DRM_FORMAT_RGB888, .hvs = HVS_PIXEL_FORMAT_RGB888, 69 .pixel_order = HVS_PIXEL_ORDER_XRGB, 70 }, 71 { 72 .drm = DRM_FORMAT_BGR888, .hvs = HVS_PIXEL_FORMAT_RGB888, 73 .pixel_order = HVS_PIXEL_ORDER_XBGR, 74 }, 75 { 76 .drm = DRM_FORMAT_YUV422, 77 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE, 78 .pixel_order = HVS_PIXEL_ORDER_XYCBCR, 79 }, 80 { 81 .drm = DRM_FORMAT_YVU422, 82 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE, 83 .pixel_order = HVS_PIXEL_ORDER_XYCRCB, 84 }, 85 { 86 .drm = DRM_FORMAT_YUV420, 87 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE, 88 .pixel_order = HVS_PIXEL_ORDER_XYCBCR, 89 }, 90 { 91 .drm = DRM_FORMAT_YVU420, 92 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE, 93 .pixel_order = HVS_PIXEL_ORDER_XYCRCB, 94 }, 95 { 96 .drm = DRM_FORMAT_NV12, 97 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE, 98 .pixel_order = HVS_PIXEL_ORDER_XYCBCR, 99 }, 100 { 101 .drm = DRM_FORMAT_NV21, 102 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE, 103 .pixel_order = HVS_PIXEL_ORDER_XYCRCB, 104 }, 105 { 106 .drm = DRM_FORMAT_NV16, 107 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE, 108 .pixel_order = HVS_PIXEL_ORDER_XYCBCR, 109 }, 110 { 111 .drm = DRM_FORMAT_NV61, 112 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE, 113 .pixel_order = HVS_PIXEL_ORDER_XYCRCB, 114 }, 115 }; 116 117 static const struct hvs_format *vc4_get_hvs_format(u32 drm_format) 118 { 119 unsigned i; 120 121 for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) { 122 if (hvs_formats[i].drm == drm_format) 123 return &hvs_formats[i]; 124 } 125 126 return NULL; 127 } 128 129 static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst) 130 { 131 if (dst > src) 132 return VC4_SCALING_PPF; 133 else if (dst < src) 134 return VC4_SCALING_TPZ; 135 else 136 return VC4_SCALING_NONE; 137 } 138 139 static bool plane_enabled(struct drm_plane_state *state) 140 { 141 return state->fb && state->crtc; 142 } 143 144 static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane) 145 { 146 struct vc4_plane_state *vc4_state; 147 148 if (WARN_ON(!plane->state)) 149 return NULL; 150 151 vc4_state = kmemdup(plane->state, sizeof(*vc4_state), GFP_KERNEL); 152 if (!vc4_state) 153 return NULL; 154 155 memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm)); 156 157 __drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base); 158 159 if (vc4_state->dlist) { 160 vc4_state->dlist = kmemdup(vc4_state->dlist, 161 vc4_state->dlist_count * 4, 162 GFP_KERNEL); 163 if (!vc4_state->dlist) { 164 kfree(vc4_state); 165 return NULL; 166 } 167 vc4_state->dlist_size = vc4_state->dlist_count; 168 } 169 170 return &vc4_state->base; 171 } 172 173 static void vc4_plane_destroy_state(struct drm_plane *plane, 174 struct drm_plane_state *state) 175 { 176 struct vc4_dev *vc4 = to_vc4_dev(plane->dev); 177 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 178 179 if (vc4_state->lbm.allocated) { 180 unsigned long irqflags; 181 182 spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags); 183 drm_mm_remove_node(&vc4_state->lbm); 184 spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags); 185 } 186 187 kfree(vc4_state->dlist); 188 __drm_atomic_helper_plane_destroy_state(&vc4_state->base); 189 kfree(state); 190 } 191 192 /* Called during init to allocate the plane's atomic state. */ 193 static void vc4_plane_reset(struct drm_plane *plane) 194 { 195 struct vc4_plane_state *vc4_state; 196 197 WARN_ON(plane->state); 198 199 vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL); 200 if (!vc4_state) 201 return; 202 203 plane->state = &vc4_state->base; 204 plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE; 205 vc4_state->base.plane = plane; 206 } 207 208 static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val) 209 { 210 if (vc4_state->dlist_count == vc4_state->dlist_size) { 211 u32 new_size = max(4u, vc4_state->dlist_count * 2); 212 u32 *new_dlist = kmalloc_array(new_size, 4, GFP_KERNEL); 213 214 if (!new_dlist) 215 return; 216 memcpy(new_dlist, vc4_state->dlist, vc4_state->dlist_count * 4); 217 218 kfree(vc4_state->dlist); 219 vc4_state->dlist = new_dlist; 220 vc4_state->dlist_size = new_size; 221 } 222 223 vc4_state->dlist[vc4_state->dlist_count++] = val; 224 } 225 226 /* Returns the scl0/scl1 field based on whether the dimensions need to 227 * be up/down/non-scaled. 228 * 229 * This is a replication of a table from the spec. 230 */ 231 static u32 vc4_get_scl_field(struct drm_plane_state *state, int plane) 232 { 233 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 234 235 switch (vc4_state->x_scaling[plane] << 2 | vc4_state->y_scaling[plane]) { 236 case VC4_SCALING_PPF << 2 | VC4_SCALING_PPF: 237 return SCALER_CTL0_SCL_H_PPF_V_PPF; 238 case VC4_SCALING_TPZ << 2 | VC4_SCALING_PPF: 239 return SCALER_CTL0_SCL_H_TPZ_V_PPF; 240 case VC4_SCALING_PPF << 2 | VC4_SCALING_TPZ: 241 return SCALER_CTL0_SCL_H_PPF_V_TPZ; 242 case VC4_SCALING_TPZ << 2 | VC4_SCALING_TPZ: 243 return SCALER_CTL0_SCL_H_TPZ_V_TPZ; 244 case VC4_SCALING_PPF << 2 | VC4_SCALING_NONE: 245 return SCALER_CTL0_SCL_H_PPF_V_NONE; 246 case VC4_SCALING_NONE << 2 | VC4_SCALING_PPF: 247 return SCALER_CTL0_SCL_H_NONE_V_PPF; 248 case VC4_SCALING_NONE << 2 | VC4_SCALING_TPZ: 249 return SCALER_CTL0_SCL_H_NONE_V_TPZ; 250 case VC4_SCALING_TPZ << 2 | VC4_SCALING_NONE: 251 return SCALER_CTL0_SCL_H_TPZ_V_NONE; 252 default: 253 case VC4_SCALING_NONE << 2 | VC4_SCALING_NONE: 254 /* The unity case is independently handled by 255 * SCALER_CTL0_UNITY. 256 */ 257 return 0; 258 } 259 } 260 261 static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) 262 { 263 struct drm_plane *plane = state->plane; 264 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 265 struct drm_framebuffer *fb = state->fb; 266 struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0); 267 u32 subpixel_src_mask = (1 << 16) - 1; 268 u32 format = fb->format->format; 269 int num_planes = fb->format->num_planes; 270 u32 h_subsample = 1; 271 u32 v_subsample = 1; 272 int i; 273 274 for (i = 0; i < num_planes; i++) 275 vc4_state->offsets[i] = bo->paddr + fb->offsets[i]; 276 277 /* We don't support subpixel source positioning for scaling. */ 278 if ((state->src_x & subpixel_src_mask) || 279 (state->src_y & subpixel_src_mask) || 280 (state->src_w & subpixel_src_mask) || 281 (state->src_h & subpixel_src_mask)) { 282 return -EINVAL; 283 } 284 285 vc4_state->src_x = state->src_x >> 16; 286 vc4_state->src_y = state->src_y >> 16; 287 vc4_state->src_w[0] = state->src_w >> 16; 288 vc4_state->src_h[0] = state->src_h >> 16; 289 290 vc4_state->crtc_x = state->crtc_x; 291 vc4_state->crtc_y = state->crtc_y; 292 vc4_state->crtc_w = state->crtc_w; 293 vc4_state->crtc_h = state->crtc_h; 294 295 vc4_state->x_scaling[0] = vc4_get_scaling_mode(vc4_state->src_w[0], 296 vc4_state->crtc_w); 297 vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0], 298 vc4_state->crtc_h); 299 300 if (num_planes > 1) { 301 vc4_state->is_yuv = true; 302 303 h_subsample = drm_format_horz_chroma_subsampling(format); 304 v_subsample = drm_format_vert_chroma_subsampling(format); 305 vc4_state->src_w[1] = vc4_state->src_w[0] / h_subsample; 306 vc4_state->src_h[1] = vc4_state->src_h[0] / v_subsample; 307 308 vc4_state->x_scaling[1] = 309 vc4_get_scaling_mode(vc4_state->src_w[1], 310 vc4_state->crtc_w); 311 vc4_state->y_scaling[1] = 312 vc4_get_scaling_mode(vc4_state->src_h[1], 313 vc4_state->crtc_h); 314 315 /* YUV conversion requires that scaling be enabled, 316 * even on a plane that's otherwise 1:1. Choose TPZ 317 * for simplicity. 318 */ 319 if (vc4_state->x_scaling[0] == VC4_SCALING_NONE) 320 vc4_state->x_scaling[0] = VC4_SCALING_TPZ; 321 if (vc4_state->y_scaling[0] == VC4_SCALING_NONE) 322 vc4_state->y_scaling[0] = VC4_SCALING_TPZ; 323 } else { 324 vc4_state->x_scaling[1] = VC4_SCALING_NONE; 325 vc4_state->y_scaling[1] = VC4_SCALING_NONE; 326 } 327 328 vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE && 329 vc4_state->y_scaling[0] == VC4_SCALING_NONE && 330 vc4_state->x_scaling[1] == VC4_SCALING_NONE && 331 vc4_state->y_scaling[1] == VC4_SCALING_NONE); 332 333 /* No configuring scaling on the cursor plane, since it gets 334 non-vblank-synced updates, and scaling requires requires 335 LBM changes which have to be vblank-synced. 336 */ 337 if (plane->type == DRM_PLANE_TYPE_CURSOR && !vc4_state->is_unity) 338 return -EINVAL; 339 340 /* Clamp the on-screen start x/y to 0. The hardware doesn't 341 * support negative y, and negative x wastes bandwidth. 342 */ 343 if (vc4_state->crtc_x < 0) { 344 for (i = 0; i < num_planes; i++) { 345 u32 cpp = fb->format->cpp[i]; 346 u32 subs = ((i == 0) ? 1 : h_subsample); 347 348 vc4_state->offsets[i] += (cpp * 349 (-vc4_state->crtc_x) / subs); 350 } 351 vc4_state->src_w[0] += vc4_state->crtc_x; 352 vc4_state->src_w[1] += vc4_state->crtc_x / h_subsample; 353 vc4_state->crtc_x = 0; 354 } 355 356 if (vc4_state->crtc_y < 0) { 357 for (i = 0; i < num_planes; i++) { 358 u32 subs = ((i == 0) ? 1 : v_subsample); 359 360 vc4_state->offsets[i] += (fb->pitches[i] * 361 (-vc4_state->crtc_y) / subs); 362 } 363 vc4_state->src_h[0] += vc4_state->crtc_y; 364 vc4_state->src_h[1] += vc4_state->crtc_y / v_subsample; 365 vc4_state->crtc_y = 0; 366 } 367 368 return 0; 369 } 370 371 static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst) 372 { 373 u32 scale, recip; 374 375 scale = (1 << 16) * src / dst; 376 377 /* The specs note that while the reciprocal would be defined 378 * as (1<<32)/scale, ~0 is close enough. 379 */ 380 recip = ~0 / scale; 381 382 vc4_dlist_write(vc4_state, 383 VC4_SET_FIELD(scale, SCALER_TPZ0_SCALE) | 384 VC4_SET_FIELD(0, SCALER_TPZ0_IPHASE)); 385 vc4_dlist_write(vc4_state, 386 VC4_SET_FIELD(recip, SCALER_TPZ1_RECIP)); 387 } 388 389 static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst) 390 { 391 u32 scale = (1 << 16) * src / dst; 392 393 vc4_dlist_write(vc4_state, 394 SCALER_PPF_AGC | 395 VC4_SET_FIELD(scale, SCALER_PPF_SCALE) | 396 VC4_SET_FIELD(0, SCALER_PPF_IPHASE)); 397 } 398 399 static u32 vc4_lbm_size(struct drm_plane_state *state) 400 { 401 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 402 /* This is the worst case number. One of the two sizes will 403 * be used depending on the scaling configuration. 404 */ 405 u32 pix_per_line = max(vc4_state->src_w[0], (u32)vc4_state->crtc_w); 406 u32 lbm; 407 408 if (!vc4_state->is_yuv) { 409 if (vc4_state->is_unity) 410 return 0; 411 else if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ) 412 lbm = pix_per_line * 8; 413 else { 414 /* In special cases, this multiplier might be 12. */ 415 lbm = pix_per_line * 16; 416 } 417 } else { 418 /* There are cases for this going down to a multiplier 419 * of 2, but according to the firmware source, the 420 * table in the docs is somewhat wrong. 421 */ 422 lbm = pix_per_line * 16; 423 } 424 425 lbm = roundup(lbm, 32); 426 427 return lbm; 428 } 429 430 static void vc4_write_scaling_parameters(struct drm_plane_state *state, 431 int channel) 432 { 433 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 434 435 /* Ch0 H-PPF Word 0: Scaling Parameters */ 436 if (vc4_state->x_scaling[channel] == VC4_SCALING_PPF) { 437 vc4_write_ppf(vc4_state, 438 vc4_state->src_w[channel], vc4_state->crtc_w); 439 } 440 441 /* Ch0 V-PPF Words 0-1: Scaling Parameters, Context */ 442 if (vc4_state->y_scaling[channel] == VC4_SCALING_PPF) { 443 vc4_write_ppf(vc4_state, 444 vc4_state->src_h[channel], vc4_state->crtc_h); 445 vc4_dlist_write(vc4_state, 0xc0c0c0c0); 446 } 447 448 /* Ch0 H-TPZ Words 0-1: Scaling Parameters, Recip */ 449 if (vc4_state->x_scaling[channel] == VC4_SCALING_TPZ) { 450 vc4_write_tpz(vc4_state, 451 vc4_state->src_w[channel], vc4_state->crtc_w); 452 } 453 454 /* Ch0 V-TPZ Words 0-2: Scaling Parameters, Recip, Context */ 455 if (vc4_state->y_scaling[channel] == VC4_SCALING_TPZ) { 456 vc4_write_tpz(vc4_state, 457 vc4_state->src_h[channel], vc4_state->crtc_h); 458 vc4_dlist_write(vc4_state, 0xc0c0c0c0); 459 } 460 } 461 462 /* Writes out a full display list for an active plane to the plane's 463 * private dlist state. 464 */ 465 static int vc4_plane_mode_set(struct drm_plane *plane, 466 struct drm_plane_state *state) 467 { 468 struct vc4_dev *vc4 = to_vc4_dev(plane->dev); 469 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 470 struct drm_framebuffer *fb = state->fb; 471 u32 ctl0_offset = vc4_state->dlist_count; 472 const struct hvs_format *format = vc4_get_hvs_format(fb->format->format); 473 int num_planes = drm_format_num_planes(format->drm); 474 bool mix_plane_alpha; 475 bool covers_screen; 476 u32 scl0, scl1, pitch0; 477 u32 lbm_size, tiling; 478 unsigned long irqflags; 479 int ret, i; 480 481 ret = vc4_plane_setup_clipping_and_scaling(state); 482 if (ret) 483 return ret; 484 485 /* Allocate the LBM memory that the HVS will use for temporary 486 * storage due to our scaling/format conversion. 487 */ 488 lbm_size = vc4_lbm_size(state); 489 if (lbm_size) { 490 if (!vc4_state->lbm.allocated) { 491 spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags); 492 ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm, 493 &vc4_state->lbm, 494 lbm_size, 32, 0, 0); 495 spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags); 496 } else { 497 WARN_ON_ONCE(lbm_size != vc4_state->lbm.size); 498 } 499 } 500 501 if (ret) 502 return ret; 503 504 /* SCL1 is used for Cb/Cr scaling of planar formats. For RGB 505 * and 4:4:4, scl1 should be set to scl0 so both channels of 506 * the scaler do the same thing. For YUV, the Y plane needs 507 * to be put in channel 1 and Cb/Cr in channel 0, so we swap 508 * the scl fields here. 509 */ 510 if (num_planes == 1) { 511 scl0 = vc4_get_scl_field(state, 0); 512 scl1 = scl0; 513 } else { 514 scl0 = vc4_get_scl_field(state, 1); 515 scl1 = vc4_get_scl_field(state, 0); 516 } 517 518 switch (fb->modifier) { 519 case DRM_FORMAT_MOD_LINEAR: 520 tiling = SCALER_CTL0_TILING_LINEAR; 521 pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH); 522 break; 523 524 case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED: { 525 /* For T-tiled, the FB pitch is "how many bytes from 526 * one row to the next, such that pitch * tile_h == 527 * tile_size * tiles_per_row." 528 */ 529 u32 tile_size_shift = 12; /* T tiles are 4kb */ 530 u32 tile_h_shift = 5; /* 16 and 32bpp are 32 pixels high */ 531 u32 tiles_w = fb->pitches[0] >> (tile_size_shift - tile_h_shift); 532 533 tiling = SCALER_CTL0_TILING_256B_OR_T; 534 535 pitch0 = (VC4_SET_FIELD(0, SCALER_PITCH0_TILE_Y_OFFSET) | 536 VC4_SET_FIELD(0, SCALER_PITCH0_TILE_WIDTH_L) | 537 VC4_SET_FIELD(tiles_w, SCALER_PITCH0_TILE_WIDTH_R)); 538 break; 539 } 540 541 default: 542 DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx", 543 (long long)fb->modifier); 544 return -EINVAL; 545 } 546 547 /* Control word */ 548 vc4_dlist_write(vc4_state, 549 SCALER_CTL0_VALID | 550 (format->pixel_order << SCALER_CTL0_ORDER_SHIFT) | 551 (format->hvs << SCALER_CTL0_PIXEL_FORMAT_SHIFT) | 552 VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) | 553 (vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) | 554 VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) | 555 VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1)); 556 557 /* Position Word 0: Image Positions and Alpha Value */ 558 vc4_state->pos0_offset = vc4_state->dlist_count; 559 vc4_dlist_write(vc4_state, 560 VC4_SET_FIELD(state->alpha >> 8, SCALER_POS0_FIXED_ALPHA) | 561 VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) | 562 VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y)); 563 564 /* Position Word 1: Scaled Image Dimensions. */ 565 if (!vc4_state->is_unity) { 566 vc4_dlist_write(vc4_state, 567 VC4_SET_FIELD(vc4_state->crtc_w, 568 SCALER_POS1_SCL_WIDTH) | 569 VC4_SET_FIELD(vc4_state->crtc_h, 570 SCALER_POS1_SCL_HEIGHT)); 571 } 572 573 /* Don't waste cycles mixing with plane alpha if the set alpha 574 * is opaque or there is no per-pixel alpha information. 575 * In any case we use the alpha property value as the fixed alpha. 576 */ 577 mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE && 578 fb->format->has_alpha; 579 580 /* Position Word 2: Source Image Size, Alpha */ 581 vc4_state->pos2_offset = vc4_state->dlist_count; 582 vc4_dlist_write(vc4_state, 583 VC4_SET_FIELD(fb->format->has_alpha ? 584 SCALER_POS2_ALPHA_MODE_PIPELINE : 585 SCALER_POS2_ALPHA_MODE_FIXED, 586 SCALER_POS2_ALPHA_MODE) | 587 (mix_plane_alpha ? SCALER_POS2_ALPHA_MIX : 0) | 588 (fb->format->has_alpha ? SCALER_POS2_ALPHA_PREMULT : 0) | 589 VC4_SET_FIELD(vc4_state->src_w[0], SCALER_POS2_WIDTH) | 590 VC4_SET_FIELD(vc4_state->src_h[0], SCALER_POS2_HEIGHT)); 591 592 /* Position Word 3: Context. Written by the HVS. */ 593 vc4_dlist_write(vc4_state, 0xc0c0c0c0); 594 595 596 /* Pointer Word 0/1/2: RGB / Y / Cb / Cr Pointers 597 * 598 * The pointers may be any byte address. 599 */ 600 vc4_state->ptr0_offset = vc4_state->dlist_count; 601 for (i = 0; i < num_planes; i++) 602 vc4_dlist_write(vc4_state, vc4_state->offsets[i]); 603 604 /* Pointer Context Word 0/1/2: Written by the HVS */ 605 for (i = 0; i < num_planes; i++) 606 vc4_dlist_write(vc4_state, 0xc0c0c0c0); 607 608 /* Pitch word 0 */ 609 vc4_dlist_write(vc4_state, pitch0); 610 611 /* Pitch word 1/2 */ 612 for (i = 1; i < num_planes; i++) { 613 vc4_dlist_write(vc4_state, 614 VC4_SET_FIELD(fb->pitches[i], SCALER_SRC_PITCH)); 615 } 616 617 /* Colorspace conversion words */ 618 if (vc4_state->is_yuv) { 619 vc4_dlist_write(vc4_state, SCALER_CSC0_ITR_R_601_5); 620 vc4_dlist_write(vc4_state, SCALER_CSC1_ITR_R_601_5); 621 vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5); 622 } 623 624 if (!vc4_state->is_unity) { 625 /* LBM Base Address. */ 626 if (vc4_state->y_scaling[0] != VC4_SCALING_NONE || 627 vc4_state->y_scaling[1] != VC4_SCALING_NONE) { 628 vc4_dlist_write(vc4_state, vc4_state->lbm.start); 629 } 630 631 if (num_planes > 1) { 632 /* Emit Cb/Cr as channel 0 and Y as channel 633 * 1. This matches how we set up scl0/scl1 634 * above. 635 */ 636 vc4_write_scaling_parameters(state, 1); 637 } 638 vc4_write_scaling_parameters(state, 0); 639 640 /* If any PPF setup was done, then all the kernel 641 * pointers get uploaded. 642 */ 643 if (vc4_state->x_scaling[0] == VC4_SCALING_PPF || 644 vc4_state->y_scaling[0] == VC4_SCALING_PPF || 645 vc4_state->x_scaling[1] == VC4_SCALING_PPF || 646 vc4_state->y_scaling[1] == VC4_SCALING_PPF) { 647 u32 kernel = VC4_SET_FIELD(vc4->hvs->mitchell_netravali_filter.start, 648 SCALER_PPF_KERNEL_OFFSET); 649 650 /* HPPF plane 0 */ 651 vc4_dlist_write(vc4_state, kernel); 652 /* VPPF plane 0 */ 653 vc4_dlist_write(vc4_state, kernel); 654 /* HPPF plane 1 */ 655 vc4_dlist_write(vc4_state, kernel); 656 /* VPPF plane 1 */ 657 vc4_dlist_write(vc4_state, kernel); 658 } 659 } 660 661 vc4_state->dlist[ctl0_offset] |= 662 VC4_SET_FIELD(vc4_state->dlist_count, SCALER_CTL0_SIZE); 663 664 /* crtc_* are already clipped coordinates. */ 665 covers_screen = vc4_state->crtc_x == 0 && vc4_state->crtc_y == 0 && 666 vc4_state->crtc_w == state->crtc->mode.hdisplay && 667 vc4_state->crtc_h == state->crtc->mode.vdisplay; 668 /* Background fill might be necessary when the plane has per-pixel 669 * alpha content or a non-opaque plane alpha and could blend from the 670 * background or does not cover the entire screen. 671 */ 672 vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen || 673 state->alpha != DRM_BLEND_ALPHA_OPAQUE; 674 675 return 0; 676 } 677 678 /* If a modeset involves changing the setup of a plane, the atomic 679 * infrastructure will call this to validate a proposed plane setup. 680 * However, if a plane isn't getting updated, this (and the 681 * corresponding vc4_plane_atomic_update) won't get called. Thus, we 682 * compute the dlist here and have all active plane dlists get updated 683 * in the CRTC's flush. 684 */ 685 static int vc4_plane_atomic_check(struct drm_plane *plane, 686 struct drm_plane_state *state) 687 { 688 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 689 690 vc4_state->dlist_count = 0; 691 692 if (plane_enabled(state)) 693 return vc4_plane_mode_set(plane, state); 694 else 695 return 0; 696 } 697 698 static void vc4_plane_atomic_update(struct drm_plane *plane, 699 struct drm_plane_state *old_state) 700 { 701 /* No contents here. Since we don't know where in the CRTC's 702 * dlist we should be stored, our dlist is uploaded to the 703 * hardware with vc4_plane_write_dlist() at CRTC atomic_flush 704 * time. 705 */ 706 } 707 708 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist) 709 { 710 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state); 711 int i; 712 713 vc4_state->hw_dlist = dlist; 714 715 /* Can't memcpy_toio() because it needs to be 32-bit writes. */ 716 for (i = 0; i < vc4_state->dlist_count; i++) 717 writel(vc4_state->dlist[i], &dlist[i]); 718 719 return vc4_state->dlist_count; 720 } 721 722 u32 vc4_plane_dlist_size(const struct drm_plane_state *state) 723 { 724 const struct vc4_plane_state *vc4_state = 725 container_of(state, typeof(*vc4_state), base); 726 727 return vc4_state->dlist_count; 728 } 729 730 /* Updates the plane to immediately (well, once the FIFO needs 731 * refilling) scan out from at a new framebuffer. 732 */ 733 void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb) 734 { 735 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state); 736 struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0); 737 uint32_t addr; 738 739 /* We're skipping the address adjustment for negative origin, 740 * because this is only called on the primary plane. 741 */ 742 WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0); 743 addr = bo->paddr + fb->offsets[0]; 744 745 /* Write the new address into the hardware immediately. The 746 * scanout will start from this address as soon as the FIFO 747 * needs to refill with pixels. 748 */ 749 writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset]); 750 751 /* Also update the CPU-side dlist copy, so that any later 752 * atomic updates that don't do a new modeset on our plane 753 * also use our updated address. 754 */ 755 vc4_state->dlist[vc4_state->ptr0_offset] = addr; 756 } 757 758 static void vc4_plane_atomic_async_update(struct drm_plane *plane, 759 struct drm_plane_state *state) 760 { 761 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state); 762 763 if (plane->state->fb != state->fb) { 764 vc4_plane_async_set_fb(plane, state->fb); 765 drm_atomic_set_fb_for_plane(plane->state, state->fb); 766 } 767 768 /* Set the cursor's position on the screen. This is the 769 * expected change from the drm_mode_cursor_universal() 770 * helper. 771 */ 772 plane->state->crtc_x = state->crtc_x; 773 plane->state->crtc_y = state->crtc_y; 774 775 /* Allow changing the start position within the cursor BO, if 776 * that matters. 777 */ 778 plane->state->src_x = state->src_x; 779 plane->state->src_y = state->src_y; 780 781 /* Update the display list based on the new crtc_x/y. */ 782 vc4_plane_atomic_check(plane, plane->state); 783 784 /* Note that we can't just call vc4_plane_write_dlist() 785 * because that would smash the context data that the HVS is 786 * currently using. 787 */ 788 writel(vc4_state->dlist[vc4_state->pos0_offset], 789 &vc4_state->hw_dlist[vc4_state->pos0_offset]); 790 writel(vc4_state->dlist[vc4_state->pos2_offset], 791 &vc4_state->hw_dlist[vc4_state->pos2_offset]); 792 writel(vc4_state->dlist[vc4_state->ptr0_offset], 793 &vc4_state->hw_dlist[vc4_state->ptr0_offset]); 794 } 795 796 static int vc4_plane_atomic_async_check(struct drm_plane *plane, 797 struct drm_plane_state *state) 798 { 799 /* No configuring new scaling in the fast path. */ 800 if (plane->state->crtc_w != state->crtc_w || 801 plane->state->crtc_h != state->crtc_h || 802 plane->state->src_w != state->src_w || 803 plane->state->src_h != state->src_h) 804 return -EINVAL; 805 806 return 0; 807 } 808 809 static int vc4_prepare_fb(struct drm_plane *plane, 810 struct drm_plane_state *state) 811 { 812 struct vc4_bo *bo; 813 struct dma_fence *fence; 814 int ret; 815 816 if ((plane->state->fb == state->fb) || !state->fb) 817 return 0; 818 819 bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base); 820 821 ret = vc4_bo_inc_usecnt(bo); 822 if (ret) 823 return ret; 824 825 fence = reservation_object_get_excl_rcu(bo->resv); 826 drm_atomic_set_fence_for_plane(state, fence); 827 828 return 0; 829 } 830 831 static void vc4_cleanup_fb(struct drm_plane *plane, 832 struct drm_plane_state *state) 833 { 834 struct vc4_bo *bo; 835 836 if (plane->state->fb == state->fb || !state->fb) 837 return; 838 839 bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base); 840 vc4_bo_dec_usecnt(bo); 841 } 842 843 static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = { 844 .atomic_check = vc4_plane_atomic_check, 845 .atomic_update = vc4_plane_atomic_update, 846 .prepare_fb = vc4_prepare_fb, 847 .cleanup_fb = vc4_cleanup_fb, 848 .atomic_async_check = vc4_plane_atomic_async_check, 849 .atomic_async_update = vc4_plane_atomic_async_update, 850 }; 851 852 static void vc4_plane_destroy(struct drm_plane *plane) 853 { 854 drm_plane_helper_disable(plane); 855 drm_plane_cleanup(plane); 856 } 857 858 static bool vc4_format_mod_supported(struct drm_plane *plane, 859 uint32_t format, 860 uint64_t modifier) 861 { 862 /* Support T_TILING for RGB formats only. */ 863 switch (format) { 864 case DRM_FORMAT_XRGB8888: 865 case DRM_FORMAT_ARGB8888: 866 case DRM_FORMAT_ABGR8888: 867 case DRM_FORMAT_XBGR8888: 868 case DRM_FORMAT_RGB565: 869 case DRM_FORMAT_BGR565: 870 case DRM_FORMAT_ARGB1555: 871 case DRM_FORMAT_XRGB1555: 872 return true; 873 case DRM_FORMAT_YUV422: 874 case DRM_FORMAT_YVU422: 875 case DRM_FORMAT_YUV420: 876 case DRM_FORMAT_YVU420: 877 case DRM_FORMAT_NV12: 878 case DRM_FORMAT_NV16: 879 default: 880 return (modifier == DRM_FORMAT_MOD_LINEAR); 881 } 882 } 883 884 static const struct drm_plane_funcs vc4_plane_funcs = { 885 .update_plane = drm_atomic_helper_update_plane, 886 .disable_plane = drm_atomic_helper_disable_plane, 887 .destroy = vc4_plane_destroy, 888 .set_property = NULL, 889 .reset = vc4_plane_reset, 890 .atomic_duplicate_state = vc4_plane_duplicate_state, 891 .atomic_destroy_state = vc4_plane_destroy_state, 892 .format_mod_supported = vc4_format_mod_supported, 893 }; 894 895 struct drm_plane *vc4_plane_init(struct drm_device *dev, 896 enum drm_plane_type type) 897 { 898 struct drm_plane *plane = NULL; 899 struct vc4_plane *vc4_plane; 900 u32 formats[ARRAY_SIZE(hvs_formats)]; 901 u32 num_formats = 0; 902 int ret = 0; 903 unsigned i; 904 static const uint64_t modifiers[] = { 905 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED, 906 DRM_FORMAT_MOD_LINEAR, 907 DRM_FORMAT_MOD_INVALID 908 }; 909 910 vc4_plane = devm_kzalloc(dev->dev, sizeof(*vc4_plane), 911 GFP_KERNEL); 912 if (!vc4_plane) 913 return ERR_PTR(-ENOMEM); 914 915 for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) { 916 /* Don't allow YUV in cursor planes, since that means 917 * tuning on the scaler, which we don't allow for the 918 * cursor. 919 */ 920 if (type != DRM_PLANE_TYPE_CURSOR || 921 hvs_formats[i].hvs < HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE) { 922 formats[num_formats++] = hvs_formats[i].drm; 923 } 924 } 925 plane = &vc4_plane->base; 926 ret = drm_universal_plane_init(dev, plane, 0, 927 &vc4_plane_funcs, 928 formats, num_formats, 929 modifiers, type, NULL); 930 931 drm_plane_helper_add(plane, &vc4_plane_helper_funcs); 932 933 drm_plane_create_alpha_property(plane); 934 935 return plane; 936 } 937