1 /* 2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 3 * Author:Mark Yao <mark.yao@rock-chips.com> 4 * 5 * This software is licensed under the terms of the GNU General Public 6 * License version 2, as published by the Free Software Foundation, and 7 * may be copied, distributed, and modified under those terms. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 15 #include <drm/drm.h> 16 #include <drm/drmP.h> 17 #include <drm/drm_crtc.h> 18 #include <drm/drm_crtc_helper.h> 19 #include <drm/drm_plane_helper.h> 20 21 #include <linux/kernel.h> 22 #include <linux/module.h> 23 #include <linux/platform_device.h> 24 #include <linux/clk.h> 25 #include <linux/of.h> 26 #include <linux/of_device.h> 27 #include <linux/pm_runtime.h> 28 #include <linux/component.h> 29 30 #include <linux/reset.h> 31 #include <linux/delay.h> 32 33 #include "rockchip_drm_drv.h" 34 #include "rockchip_drm_gem.h" 35 #include "rockchip_drm_fb.h" 36 #include "rockchip_drm_vop.h" 37 38 #define VOP_REG(off, _mask, s) \ 39 {.offset = off, \ 40 .mask = _mask, \ 41 .shift = s,} 42 43 #define __REG_SET_RELAXED(x, off, mask, shift, v) \ 44 vop_mask_write_relaxed(x, off, (mask) << shift, (v) << shift) 45 #define __REG_SET_NORMAL(x, off, mask, shift, v) \ 46 vop_mask_write(x, off, (mask) << shift, (v) << shift) 47 48 #define REG_SET(x, base, reg, v, mode) \ 49 __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v) 50 51 #define VOP_WIN_SET(x, win, name, v) \ 52 REG_SET(x, win->base, win->phy->name, v, RELAXED) 53 #define VOP_CTRL_SET(x, name, v) \ 54 REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL) 55 56 #define VOP_WIN_GET(x, win, name) \ 57 vop_read_reg(x, win->base, &win->phy->name) 58 59 #define VOP_WIN_GET_YRGBADDR(vop, win) \ 60 vop_readl(vop, win->base + win->phy->yrgb_mst.offset) 61 62 #define to_vop(x) container_of(x, struct vop, crtc) 63 #define to_vop_win(x) container_of(x, struct vop_win, base) 64 65 struct vop_win_state { 66 struct list_head head; 67 struct drm_framebuffer *fb; 68 dma_addr_t yrgb_mst; 69 struct drm_pending_vblank_event *event; 70 }; 71 72 struct vop_win { 73 struct drm_plane base; 74 const struct vop_win_data *data; 75 struct vop *vop; 76 77 struct list_head pending; 78 struct vop_win_state *active; 79 }; 80 81 struct vop { 82 struct drm_crtc crtc; 83 struct device *dev; 84 struct drm_device *drm_dev; 85 bool is_enabled; 86 87 int connector_type; 88 int connector_out_mode; 89 90 /* mutex vsync_ work */ 91 struct mutex vsync_mutex; 92 bool vsync_work_pending; 93 struct completion dsp_hold_completion; 94 95 const struct vop_data *data; 96 97 uint32_t *regsbak; 98 void __iomem *regs; 99 100 /* physical map length of vop register */ 101 uint32_t len; 102 103 /* one time only one process allowed to config the register */ 104 spinlock_t reg_lock; 105 /* lock vop irq reg */ 106 spinlock_t irq_lock; 107 108 unsigned int irq; 109 110 /* vop AHP clk */ 111 struct clk *hclk; 112 /* vop dclk */ 113 struct clk *dclk; 114 /* vop share memory frequency */ 115 struct clk *aclk; 116 117 /* vop dclk reset */ 118 struct reset_control *dclk_rst; 119 120 int pipe; 121 122 struct vop_win win[]; 123 }; 124 125 enum vop_data_format { 126 VOP_FMT_ARGB8888 = 0, 127 VOP_FMT_RGB888, 128 VOP_FMT_RGB565, 129 VOP_FMT_YUV420SP = 4, 130 VOP_FMT_YUV422SP, 131 VOP_FMT_YUV444SP, 132 }; 133 134 struct vop_reg_data { 135 uint32_t offset; 136 uint32_t value; 137 }; 138 139 struct vop_reg { 140 uint32_t offset; 141 uint32_t shift; 142 uint32_t mask; 143 }; 144 145 struct vop_ctrl { 146 struct vop_reg standby; 147 struct vop_reg data_blank; 148 struct vop_reg gate_en; 149 struct vop_reg mmu_en; 150 struct vop_reg rgb_en; 151 struct vop_reg edp_en; 152 struct vop_reg hdmi_en; 153 struct vop_reg mipi_en; 154 struct vop_reg out_mode; 155 struct vop_reg dither_down; 156 struct vop_reg dither_up; 157 struct vop_reg pin_pol; 158 159 struct vop_reg htotal_pw; 160 struct vop_reg hact_st_end; 161 struct vop_reg vtotal_pw; 162 struct vop_reg vact_st_end; 163 struct vop_reg hpost_st_end; 164 struct vop_reg vpost_st_end; 165 }; 166 167 struct vop_win_phy { 168 const uint32_t *data_formats; 169 uint32_t nformats; 170 171 struct vop_reg enable; 172 struct vop_reg format; 173 struct vop_reg rb_swap; 174 struct vop_reg act_info; 175 struct vop_reg dsp_info; 176 struct vop_reg dsp_st; 177 struct vop_reg yrgb_mst; 178 struct vop_reg uv_mst; 179 struct vop_reg yrgb_vir; 180 struct vop_reg uv_vir; 181 182 struct vop_reg dst_alpha_ctl; 183 struct vop_reg src_alpha_ctl; 184 }; 185 186 struct vop_win_data { 187 uint32_t base; 188 const struct vop_win_phy *phy; 189 enum drm_plane_type type; 190 }; 191 192 struct vop_data { 193 const struct vop_reg_data *init_table; 194 unsigned int table_size; 195 const struct vop_ctrl *ctrl; 196 const struct vop_win_data *win; 197 unsigned int win_size; 198 }; 199 200 static const uint32_t formats_01[] = { 201 DRM_FORMAT_XRGB8888, 202 DRM_FORMAT_ARGB8888, 203 DRM_FORMAT_XBGR8888, 204 DRM_FORMAT_ABGR8888, 205 DRM_FORMAT_RGB888, 206 DRM_FORMAT_BGR888, 207 DRM_FORMAT_RGB565, 208 DRM_FORMAT_BGR565, 209 DRM_FORMAT_NV12, 210 DRM_FORMAT_NV16, 211 DRM_FORMAT_NV24, 212 }; 213 214 static const uint32_t formats_234[] = { 215 DRM_FORMAT_XRGB8888, 216 DRM_FORMAT_ARGB8888, 217 DRM_FORMAT_XBGR8888, 218 DRM_FORMAT_ABGR8888, 219 DRM_FORMAT_RGB888, 220 DRM_FORMAT_BGR888, 221 DRM_FORMAT_RGB565, 222 DRM_FORMAT_BGR565, 223 }; 224 225 static const struct vop_win_phy win01_data = { 226 .data_formats = formats_01, 227 .nformats = ARRAY_SIZE(formats_01), 228 .enable = VOP_REG(WIN0_CTRL0, 0x1, 0), 229 .format = VOP_REG(WIN0_CTRL0, 0x7, 1), 230 .rb_swap = VOP_REG(WIN0_CTRL0, 0x1, 12), 231 .act_info = VOP_REG(WIN0_ACT_INFO, 0x1fff1fff, 0), 232 .dsp_info = VOP_REG(WIN0_DSP_INFO, 0x0fff0fff, 0), 233 .dsp_st = VOP_REG(WIN0_DSP_ST, 0x1fff1fff, 0), 234 .yrgb_mst = VOP_REG(WIN0_YRGB_MST, 0xffffffff, 0), 235 .uv_mst = VOP_REG(WIN0_CBR_MST, 0xffffffff, 0), 236 .yrgb_vir = VOP_REG(WIN0_VIR, 0x3fff, 0), 237 .uv_vir = VOP_REG(WIN0_VIR, 0x3fff, 16), 238 .src_alpha_ctl = VOP_REG(WIN0_SRC_ALPHA_CTRL, 0xff, 0), 239 .dst_alpha_ctl = VOP_REG(WIN0_DST_ALPHA_CTRL, 0xff, 0), 240 }; 241 242 static const struct vop_win_phy win23_data = { 243 .data_formats = formats_234, 244 .nformats = ARRAY_SIZE(formats_234), 245 .enable = VOP_REG(WIN2_CTRL0, 0x1, 0), 246 .format = VOP_REG(WIN2_CTRL0, 0x7, 1), 247 .rb_swap = VOP_REG(WIN2_CTRL0, 0x1, 12), 248 .dsp_info = VOP_REG(WIN2_DSP_INFO0, 0x0fff0fff, 0), 249 .dsp_st = VOP_REG(WIN2_DSP_ST0, 0x1fff1fff, 0), 250 .yrgb_mst = VOP_REG(WIN2_MST0, 0xffffffff, 0), 251 .yrgb_vir = VOP_REG(WIN2_VIR0_1, 0x1fff, 0), 252 .src_alpha_ctl = VOP_REG(WIN2_SRC_ALPHA_CTRL, 0xff, 0), 253 .dst_alpha_ctl = VOP_REG(WIN2_DST_ALPHA_CTRL, 0xff, 0), 254 }; 255 256 static const struct vop_ctrl ctrl_data = { 257 .standby = VOP_REG(SYS_CTRL, 0x1, 22), 258 .gate_en = VOP_REG(SYS_CTRL, 0x1, 23), 259 .mmu_en = VOP_REG(SYS_CTRL, 0x1, 20), 260 .rgb_en = VOP_REG(SYS_CTRL, 0x1, 12), 261 .hdmi_en = VOP_REG(SYS_CTRL, 0x1, 13), 262 .edp_en = VOP_REG(SYS_CTRL, 0x1, 14), 263 .mipi_en = VOP_REG(SYS_CTRL, 0x1, 15), 264 .dither_down = VOP_REG(DSP_CTRL1, 0xf, 1), 265 .dither_up = VOP_REG(DSP_CTRL1, 0x1, 6), 266 .data_blank = VOP_REG(DSP_CTRL0, 0x1, 19), 267 .out_mode = VOP_REG(DSP_CTRL0, 0xf, 0), 268 .pin_pol = VOP_REG(DSP_CTRL0, 0xf, 4), 269 .htotal_pw = VOP_REG(DSP_HTOTAL_HS_END, 0x1fff1fff, 0), 270 .hact_st_end = VOP_REG(DSP_HACT_ST_END, 0x1fff1fff, 0), 271 .vtotal_pw = VOP_REG(DSP_VTOTAL_VS_END, 0x1fff1fff, 0), 272 .vact_st_end = VOP_REG(DSP_VACT_ST_END, 0x1fff1fff, 0), 273 .hpost_st_end = VOP_REG(POST_DSP_HACT_INFO, 0x1fff1fff, 0), 274 .vpost_st_end = VOP_REG(POST_DSP_VACT_INFO, 0x1fff1fff, 0), 275 }; 276 277 static const struct vop_reg_data vop_init_reg_table[] = { 278 {SYS_CTRL, 0x00c00000}, 279 {DSP_CTRL0, 0x00000000}, 280 {WIN0_CTRL0, 0x00000080}, 281 {WIN1_CTRL0, 0x00000080}, 282 }; 283 284 /* 285 * Note: rk3288 has a dedicated 'cursor' window, however, that window requires 286 * special support to get alpha blending working. For now, just use overlay 287 * window 3 for the drm cursor. 288 * 289 */ 290 static const struct vop_win_data rk3288_vop_win_data[] = { 291 { .base = 0x00, .phy = &win01_data, .type = DRM_PLANE_TYPE_PRIMARY }, 292 { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_OVERLAY }, 293 { .base = 0x00, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY }, 294 { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_CURSOR }, 295 }; 296 297 static const struct vop_data rk3288_vop = { 298 .init_table = vop_init_reg_table, 299 .table_size = ARRAY_SIZE(vop_init_reg_table), 300 .ctrl = &ctrl_data, 301 .win = rk3288_vop_win_data, 302 .win_size = ARRAY_SIZE(rk3288_vop_win_data), 303 }; 304 305 static const struct of_device_id vop_driver_dt_match[] = { 306 { .compatible = "rockchip,rk3288-vop", 307 .data = &rk3288_vop }, 308 {}, 309 }; 310 311 static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v) 312 { 313 writel(v, vop->regs + offset); 314 vop->regsbak[offset >> 2] = v; 315 } 316 317 static inline uint32_t vop_readl(struct vop *vop, uint32_t offset) 318 { 319 return readl(vop->regs + offset); 320 } 321 322 static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base, 323 const struct vop_reg *reg) 324 { 325 return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask; 326 } 327 328 static inline void vop_cfg_done(struct vop *vop) 329 { 330 writel(0x01, vop->regs + REG_CFG_DONE); 331 } 332 333 static inline void vop_mask_write(struct vop *vop, uint32_t offset, 334 uint32_t mask, uint32_t v) 335 { 336 if (mask) { 337 uint32_t cached_val = vop->regsbak[offset >> 2]; 338 339 cached_val = (cached_val & ~mask) | v; 340 writel(cached_val, vop->regs + offset); 341 vop->regsbak[offset >> 2] = cached_val; 342 } 343 } 344 345 static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset, 346 uint32_t mask, uint32_t v) 347 { 348 if (mask) { 349 uint32_t cached_val = vop->regsbak[offset >> 2]; 350 351 cached_val = (cached_val & ~mask) | v; 352 writel_relaxed(cached_val, vop->regs + offset); 353 vop->regsbak[offset >> 2] = cached_val; 354 } 355 } 356 357 static bool has_rb_swapped(uint32_t format) 358 { 359 switch (format) { 360 case DRM_FORMAT_XBGR8888: 361 case DRM_FORMAT_ABGR8888: 362 case DRM_FORMAT_BGR888: 363 case DRM_FORMAT_BGR565: 364 return true; 365 default: 366 return false; 367 } 368 } 369 370 static enum vop_data_format vop_convert_format(uint32_t format) 371 { 372 switch (format) { 373 case DRM_FORMAT_XRGB8888: 374 case DRM_FORMAT_ARGB8888: 375 case DRM_FORMAT_XBGR8888: 376 case DRM_FORMAT_ABGR8888: 377 return VOP_FMT_ARGB8888; 378 case DRM_FORMAT_RGB888: 379 case DRM_FORMAT_BGR888: 380 return VOP_FMT_RGB888; 381 case DRM_FORMAT_RGB565: 382 case DRM_FORMAT_BGR565: 383 return VOP_FMT_RGB565; 384 case DRM_FORMAT_NV12: 385 return VOP_FMT_YUV420SP; 386 case DRM_FORMAT_NV16: 387 return VOP_FMT_YUV422SP; 388 case DRM_FORMAT_NV24: 389 return VOP_FMT_YUV444SP; 390 default: 391 DRM_ERROR("unsupport format[%08x]\n", format); 392 return -EINVAL; 393 } 394 } 395 396 static bool is_alpha_support(uint32_t format) 397 { 398 switch (format) { 399 case DRM_FORMAT_ARGB8888: 400 case DRM_FORMAT_ABGR8888: 401 return true; 402 default: 403 return false; 404 } 405 } 406 407 static void vop_dsp_hold_valid_irq_enable(struct vop *vop) 408 { 409 unsigned long flags; 410 411 if (WARN_ON(!vop->is_enabled)) 412 return; 413 414 spin_lock_irqsave(&vop->irq_lock, flags); 415 416 vop_mask_write(vop, INTR_CTRL0, DSP_HOLD_VALID_INTR_MASK, 417 DSP_HOLD_VALID_INTR_EN(1)); 418 419 spin_unlock_irqrestore(&vop->irq_lock, flags); 420 } 421 422 static void vop_dsp_hold_valid_irq_disable(struct vop *vop) 423 { 424 unsigned long flags; 425 426 if (WARN_ON(!vop->is_enabled)) 427 return; 428 429 spin_lock_irqsave(&vop->irq_lock, flags); 430 431 vop_mask_write(vop, INTR_CTRL0, DSP_HOLD_VALID_INTR_MASK, 432 DSP_HOLD_VALID_INTR_EN(0)); 433 434 spin_unlock_irqrestore(&vop->irq_lock, flags); 435 } 436 437 static void vop_enable(struct drm_crtc *crtc) 438 { 439 struct vop *vop = to_vop(crtc); 440 int ret; 441 442 if (vop->is_enabled) 443 return; 444 445 ret = pm_runtime_get_sync(vop->dev); 446 if (ret < 0) { 447 dev_err(vop->dev, "failed to get pm runtime: %d\n", ret); 448 return; 449 } 450 451 ret = clk_enable(vop->hclk); 452 if (ret < 0) { 453 dev_err(vop->dev, "failed to enable hclk - %d\n", ret); 454 return; 455 } 456 457 ret = clk_enable(vop->dclk); 458 if (ret < 0) { 459 dev_err(vop->dev, "failed to enable dclk - %d\n", ret); 460 goto err_disable_hclk; 461 } 462 463 ret = clk_enable(vop->aclk); 464 if (ret < 0) { 465 dev_err(vop->dev, "failed to enable aclk - %d\n", ret); 466 goto err_disable_dclk; 467 } 468 469 /* 470 * Slave iommu shares power, irq and clock with vop. It was associated 471 * automatically with this master device via common driver code. 472 * Now that we have enabled the clock we attach it to the shared drm 473 * mapping. 474 */ 475 ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev); 476 if (ret) { 477 dev_err(vop->dev, "failed to attach dma mapping, %d\n", ret); 478 goto err_disable_aclk; 479 } 480 481 /* 482 * At here, vop clock & iommu is enable, R/W vop regs would be safe. 483 */ 484 vop->is_enabled = true; 485 486 spin_lock(&vop->reg_lock); 487 488 VOP_CTRL_SET(vop, standby, 0); 489 490 spin_unlock(&vop->reg_lock); 491 492 enable_irq(vop->irq); 493 494 drm_vblank_on(vop->drm_dev, vop->pipe); 495 496 return; 497 498 err_disable_aclk: 499 clk_disable(vop->aclk); 500 err_disable_dclk: 501 clk_disable(vop->dclk); 502 err_disable_hclk: 503 clk_disable(vop->hclk); 504 } 505 506 static void vop_disable(struct drm_crtc *crtc) 507 { 508 struct vop *vop = to_vop(crtc); 509 510 if (!vop->is_enabled) 511 return; 512 513 drm_vblank_off(crtc->dev, vop->pipe); 514 515 /* 516 * Vop standby will take effect at end of current frame, 517 * if dsp hold valid irq happen, it means standby complete. 518 * 519 * we must wait standby complete when we want to disable aclk, 520 * if not, memory bus maybe dead. 521 */ 522 reinit_completion(&vop->dsp_hold_completion); 523 vop_dsp_hold_valid_irq_enable(vop); 524 525 spin_lock(&vop->reg_lock); 526 527 VOP_CTRL_SET(vop, standby, 1); 528 529 spin_unlock(&vop->reg_lock); 530 531 wait_for_completion(&vop->dsp_hold_completion); 532 533 vop_dsp_hold_valid_irq_disable(vop); 534 535 disable_irq(vop->irq); 536 537 vop->is_enabled = false; 538 539 /* 540 * vop standby complete, so iommu detach is safe. 541 */ 542 rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev); 543 544 clk_disable(vop->dclk); 545 clk_disable(vop->aclk); 546 clk_disable(vop->hclk); 547 pm_runtime_put(vop->dev); 548 } 549 550 /* 551 * Caller must hold vsync_mutex. 552 */ 553 static struct drm_framebuffer *vop_win_last_pending_fb(struct vop_win *vop_win) 554 { 555 struct vop_win_state *last; 556 struct vop_win_state *active = vop_win->active; 557 558 if (list_empty(&vop_win->pending)) 559 return active ? active->fb : NULL; 560 561 last = list_last_entry(&vop_win->pending, struct vop_win_state, head); 562 return last ? last->fb : NULL; 563 } 564 565 /* 566 * Caller must hold vsync_mutex. 567 */ 568 static int vop_win_queue_fb(struct vop_win *vop_win, 569 struct drm_framebuffer *fb, dma_addr_t yrgb_mst, 570 struct drm_pending_vblank_event *event) 571 { 572 struct vop_win_state *state; 573 574 state = kzalloc(sizeof(*state), GFP_KERNEL); 575 if (!state) 576 return -ENOMEM; 577 578 state->fb = fb; 579 state->yrgb_mst = yrgb_mst; 580 state->event = event; 581 582 list_add_tail(&state->head, &vop_win->pending); 583 584 return 0; 585 } 586 587 static int vop_update_plane_event(struct drm_plane *plane, 588 struct drm_crtc *crtc, 589 struct drm_framebuffer *fb, int crtc_x, 590 int crtc_y, unsigned int crtc_w, 591 unsigned int crtc_h, uint32_t src_x, 592 uint32_t src_y, uint32_t src_w, 593 uint32_t src_h, 594 struct drm_pending_vblank_event *event) 595 { 596 struct vop_win *vop_win = to_vop_win(plane); 597 const struct vop_win_data *win = vop_win->data; 598 struct vop *vop = to_vop(crtc); 599 struct drm_gem_object *obj; 600 struct rockchip_gem_object *rk_obj; 601 unsigned long offset; 602 unsigned int actual_w; 603 unsigned int actual_h; 604 unsigned int dsp_stx; 605 unsigned int dsp_sty; 606 unsigned int y_vir_stride; 607 dma_addr_t yrgb_mst; 608 enum vop_data_format format; 609 uint32_t val; 610 bool is_alpha; 611 bool rb_swap; 612 bool visible; 613 int ret; 614 struct drm_rect dest = { 615 .x1 = crtc_x, 616 .y1 = crtc_y, 617 .x2 = crtc_x + crtc_w, 618 .y2 = crtc_y + crtc_h, 619 }; 620 struct drm_rect src = { 621 /* 16.16 fixed point */ 622 .x1 = src_x, 623 .y1 = src_y, 624 .x2 = src_x + src_w, 625 .y2 = src_y + src_h, 626 }; 627 const struct drm_rect clip = { 628 .x2 = crtc->mode.hdisplay, 629 .y2 = crtc->mode.vdisplay, 630 }; 631 bool can_position = plane->type != DRM_PLANE_TYPE_PRIMARY; 632 633 ret = drm_plane_helper_check_update(plane, crtc, fb, 634 &src, &dest, &clip, 635 DRM_PLANE_HELPER_NO_SCALING, 636 DRM_PLANE_HELPER_NO_SCALING, 637 can_position, false, &visible); 638 if (ret) 639 return ret; 640 641 if (!visible) 642 return 0; 643 644 is_alpha = is_alpha_support(fb->pixel_format); 645 rb_swap = has_rb_swapped(fb->pixel_format); 646 format = vop_convert_format(fb->pixel_format); 647 if (format < 0) 648 return format; 649 650 obj = rockchip_fb_get_gem_obj(fb, 0); 651 if (!obj) { 652 DRM_ERROR("fail to get rockchip gem object from framebuffer\n"); 653 return -EINVAL; 654 } 655 656 rk_obj = to_rockchip_obj(obj); 657 658 actual_w = (src.x2 - src.x1) >> 16; 659 actual_h = (src.y2 - src.y1) >> 16; 660 crtc_x = max(0, crtc_x); 661 crtc_y = max(0, crtc_y); 662 663 dsp_stx = crtc_x + crtc->mode.htotal - crtc->mode.hsync_start; 664 dsp_sty = crtc_y + crtc->mode.vtotal - crtc->mode.vsync_start; 665 666 offset = (src.x1 >> 16) * (fb->bits_per_pixel >> 3); 667 offset += (src.y1 >> 16) * fb->pitches[0]; 668 yrgb_mst = rk_obj->dma_addr + offset; 669 670 y_vir_stride = fb->pitches[0] / (fb->bits_per_pixel >> 3); 671 672 /* 673 * If this plane update changes the plane's framebuffer, (or more 674 * precisely, if this update has a different framebuffer than the last 675 * update), enqueue it so we can track when it completes. 676 * 677 * Only when we discover that this update has completed, can we 678 * unreference any previous framebuffers. 679 */ 680 mutex_lock(&vop->vsync_mutex); 681 if (fb != vop_win_last_pending_fb(vop_win)) { 682 ret = drm_vblank_get(plane->dev, vop->pipe); 683 if (ret) { 684 DRM_ERROR("failed to get vblank, %d\n", ret); 685 mutex_unlock(&vop->vsync_mutex); 686 return ret; 687 } 688 689 drm_framebuffer_reference(fb); 690 691 ret = vop_win_queue_fb(vop_win, fb, yrgb_mst, event); 692 if (ret) { 693 drm_vblank_put(plane->dev, vop->pipe); 694 mutex_unlock(&vop->vsync_mutex); 695 return ret; 696 } 697 698 vop->vsync_work_pending = true; 699 } 700 mutex_unlock(&vop->vsync_mutex); 701 702 spin_lock(&vop->reg_lock); 703 704 VOP_WIN_SET(vop, win, format, format); 705 VOP_WIN_SET(vop, win, yrgb_vir, y_vir_stride); 706 VOP_WIN_SET(vop, win, yrgb_mst, yrgb_mst); 707 val = (actual_h - 1) << 16; 708 val |= (actual_w - 1) & 0xffff; 709 VOP_WIN_SET(vop, win, act_info, val); 710 VOP_WIN_SET(vop, win, dsp_info, val); 711 val = (dsp_sty - 1) << 16; 712 val |= (dsp_stx - 1) & 0xffff; 713 VOP_WIN_SET(vop, win, dsp_st, val); 714 VOP_WIN_SET(vop, win, rb_swap, rb_swap); 715 716 if (is_alpha) { 717 VOP_WIN_SET(vop, win, dst_alpha_ctl, 718 DST_FACTOR_M0(ALPHA_SRC_INVERSE)); 719 val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) | 720 SRC_ALPHA_M0(ALPHA_STRAIGHT) | 721 SRC_BLEND_M0(ALPHA_PER_PIX) | 722 SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) | 723 SRC_FACTOR_M0(ALPHA_ONE); 724 VOP_WIN_SET(vop, win, src_alpha_ctl, val); 725 } else { 726 VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0)); 727 } 728 729 VOP_WIN_SET(vop, win, enable, 1); 730 731 vop_cfg_done(vop); 732 spin_unlock(&vop->reg_lock); 733 734 return 0; 735 } 736 737 static int vop_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, 738 struct drm_framebuffer *fb, int crtc_x, int crtc_y, 739 unsigned int crtc_w, unsigned int crtc_h, 740 uint32_t src_x, uint32_t src_y, uint32_t src_w, 741 uint32_t src_h) 742 { 743 return vop_update_plane_event(plane, crtc, fb, crtc_x, crtc_y, crtc_w, 744 crtc_h, src_x, src_y, src_w, src_h, 745 NULL); 746 } 747 748 static int vop_update_primary_plane(struct drm_crtc *crtc, 749 struct drm_pending_vblank_event *event) 750 { 751 unsigned int crtc_w, crtc_h; 752 753 crtc_w = crtc->primary->fb->width - crtc->x; 754 crtc_h = crtc->primary->fb->height - crtc->y; 755 756 return vop_update_plane_event(crtc->primary, crtc, crtc->primary->fb, 757 0, 0, crtc_w, crtc_h, crtc->x << 16, 758 crtc->y << 16, crtc_w << 16, 759 crtc_h << 16, event); 760 } 761 762 static int vop_disable_plane(struct drm_plane *plane) 763 { 764 struct vop_win *vop_win = to_vop_win(plane); 765 const struct vop_win_data *win = vop_win->data; 766 struct vop *vop; 767 int ret; 768 769 if (!plane->crtc) 770 return 0; 771 772 vop = to_vop(plane->crtc); 773 774 ret = drm_vblank_get(plane->dev, vop->pipe); 775 if (ret) { 776 DRM_ERROR("failed to get vblank, %d\n", ret); 777 return ret; 778 } 779 780 mutex_lock(&vop->vsync_mutex); 781 782 ret = vop_win_queue_fb(vop_win, NULL, 0, NULL); 783 if (ret) { 784 drm_vblank_put(plane->dev, vop->pipe); 785 mutex_unlock(&vop->vsync_mutex); 786 return ret; 787 } 788 789 vop->vsync_work_pending = true; 790 mutex_unlock(&vop->vsync_mutex); 791 792 spin_lock(&vop->reg_lock); 793 VOP_WIN_SET(vop, win, enable, 0); 794 vop_cfg_done(vop); 795 spin_unlock(&vop->reg_lock); 796 797 return 0; 798 } 799 800 static void vop_plane_destroy(struct drm_plane *plane) 801 { 802 vop_disable_plane(plane); 803 drm_plane_cleanup(plane); 804 } 805 806 static const struct drm_plane_funcs vop_plane_funcs = { 807 .update_plane = vop_update_plane, 808 .disable_plane = vop_disable_plane, 809 .destroy = vop_plane_destroy, 810 }; 811 812 int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc, 813 int connector_type, 814 int out_mode) 815 { 816 struct vop *vop = to_vop(crtc); 817 818 vop->connector_type = connector_type; 819 vop->connector_out_mode = out_mode; 820 821 return 0; 822 } 823 EXPORT_SYMBOL_GPL(rockchip_drm_crtc_mode_config); 824 825 static int vop_crtc_enable_vblank(struct drm_crtc *crtc) 826 { 827 struct vop *vop = to_vop(crtc); 828 unsigned long flags; 829 830 if (!vop->is_enabled) 831 return -EPERM; 832 833 spin_lock_irqsave(&vop->irq_lock, flags); 834 835 vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(1)); 836 837 spin_unlock_irqrestore(&vop->irq_lock, flags); 838 839 return 0; 840 } 841 842 static void vop_crtc_disable_vblank(struct drm_crtc *crtc) 843 { 844 struct vop *vop = to_vop(crtc); 845 unsigned long flags; 846 847 if (!vop->is_enabled) 848 return; 849 850 spin_lock_irqsave(&vop->irq_lock, flags); 851 vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(0)); 852 spin_unlock_irqrestore(&vop->irq_lock, flags); 853 } 854 855 static const struct rockchip_crtc_funcs private_crtc_funcs = { 856 .enable_vblank = vop_crtc_enable_vblank, 857 .disable_vblank = vop_crtc_disable_vblank, 858 }; 859 860 static void vop_crtc_dpms(struct drm_crtc *crtc, int mode) 861 { 862 DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode); 863 864 switch (mode) { 865 case DRM_MODE_DPMS_ON: 866 vop_enable(crtc); 867 break; 868 case DRM_MODE_DPMS_STANDBY: 869 case DRM_MODE_DPMS_SUSPEND: 870 case DRM_MODE_DPMS_OFF: 871 vop_disable(crtc); 872 break; 873 default: 874 DRM_DEBUG_KMS("unspecified mode %d\n", mode); 875 break; 876 } 877 } 878 879 static void vop_crtc_prepare(struct drm_crtc *crtc) 880 { 881 vop_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 882 } 883 884 static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, 885 const struct drm_display_mode *mode, 886 struct drm_display_mode *adjusted_mode) 887 { 888 if (adjusted_mode->htotal == 0 || adjusted_mode->vtotal == 0) 889 return false; 890 891 return true; 892 } 893 894 static int vop_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 895 struct drm_framebuffer *old_fb) 896 { 897 int ret; 898 899 crtc->x = x; 900 crtc->y = y; 901 902 ret = vop_update_primary_plane(crtc, NULL); 903 if (ret < 0) { 904 DRM_ERROR("fail to update plane\n"); 905 return ret; 906 } 907 908 return 0; 909 } 910 911 static int vop_crtc_mode_set(struct drm_crtc *crtc, 912 struct drm_display_mode *mode, 913 struct drm_display_mode *adjusted_mode, 914 int x, int y, struct drm_framebuffer *fb) 915 { 916 struct vop *vop = to_vop(crtc); 917 u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start; 918 u16 hdisplay = adjusted_mode->hdisplay; 919 u16 htotal = adjusted_mode->htotal; 920 u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start; 921 u16 hact_end = hact_st + hdisplay; 922 u16 vdisplay = adjusted_mode->vdisplay; 923 u16 vtotal = adjusted_mode->vtotal; 924 u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start; 925 u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start; 926 u16 vact_end = vact_st + vdisplay; 927 int ret, ret_clk; 928 uint32_t val; 929 930 /* 931 * disable dclk to stop frame scan, so that we can safe config mode and 932 * enable iommu. 933 */ 934 clk_disable(vop->dclk); 935 936 switch (vop->connector_type) { 937 case DRM_MODE_CONNECTOR_LVDS: 938 VOP_CTRL_SET(vop, rgb_en, 1); 939 break; 940 case DRM_MODE_CONNECTOR_eDP: 941 VOP_CTRL_SET(vop, edp_en, 1); 942 break; 943 case DRM_MODE_CONNECTOR_HDMIA: 944 VOP_CTRL_SET(vop, hdmi_en, 1); 945 break; 946 default: 947 DRM_ERROR("unsupport connector_type[%d]\n", 948 vop->connector_type); 949 ret = -EINVAL; 950 goto out; 951 }; 952 VOP_CTRL_SET(vop, out_mode, vop->connector_out_mode); 953 954 val = 0x8; 955 val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1; 956 val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1); 957 VOP_CTRL_SET(vop, pin_pol, val); 958 959 VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len); 960 val = hact_st << 16; 961 val |= hact_end; 962 VOP_CTRL_SET(vop, hact_st_end, val); 963 VOP_CTRL_SET(vop, hpost_st_end, val); 964 965 VOP_CTRL_SET(vop, vtotal_pw, (vtotal << 16) | vsync_len); 966 val = vact_st << 16; 967 val |= vact_end; 968 VOP_CTRL_SET(vop, vact_st_end, val); 969 VOP_CTRL_SET(vop, vpost_st_end, val); 970 971 ret = vop_crtc_mode_set_base(crtc, x, y, fb); 972 if (ret) 973 goto out; 974 975 /* 976 * reset dclk, take all mode config affect, so the clk would run in 977 * correct frame. 978 */ 979 reset_control_assert(vop->dclk_rst); 980 usleep_range(10, 20); 981 reset_control_deassert(vop->dclk_rst); 982 983 clk_set_rate(vop->dclk, adjusted_mode->clock * 1000); 984 out: 985 ret_clk = clk_enable(vop->dclk); 986 if (ret_clk < 0) { 987 dev_err(vop->dev, "failed to enable dclk - %d\n", ret_clk); 988 return ret_clk; 989 } 990 991 return ret; 992 } 993 994 static void vop_crtc_commit(struct drm_crtc *crtc) 995 { 996 } 997 998 static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = { 999 .dpms = vop_crtc_dpms, 1000 .prepare = vop_crtc_prepare, 1001 .mode_fixup = vop_crtc_mode_fixup, 1002 .mode_set = vop_crtc_mode_set, 1003 .mode_set_base = vop_crtc_mode_set_base, 1004 .commit = vop_crtc_commit, 1005 }; 1006 1007 static int vop_crtc_page_flip(struct drm_crtc *crtc, 1008 struct drm_framebuffer *fb, 1009 struct drm_pending_vblank_event *event, 1010 uint32_t page_flip_flags) 1011 { 1012 struct vop *vop = to_vop(crtc); 1013 struct drm_framebuffer *old_fb = crtc->primary->fb; 1014 int ret; 1015 1016 /* when the page flip is requested, crtc should be on */ 1017 if (!vop->is_enabled) { 1018 DRM_DEBUG("page flip request rejected because crtc is off.\n"); 1019 return 0; 1020 } 1021 1022 crtc->primary->fb = fb; 1023 1024 ret = vop_update_primary_plane(crtc, event); 1025 if (ret) 1026 crtc->primary->fb = old_fb; 1027 1028 return ret; 1029 } 1030 1031 static void vop_win_state_complete(struct vop_win *vop_win, 1032 struct vop_win_state *state) 1033 { 1034 struct vop *vop = vop_win->vop; 1035 struct drm_crtc *crtc = &vop->crtc; 1036 struct drm_device *drm = crtc->dev; 1037 unsigned long flags; 1038 1039 if (state->event) { 1040 spin_lock_irqsave(&drm->event_lock, flags); 1041 drm_send_vblank_event(drm, -1, state->event); 1042 spin_unlock_irqrestore(&drm->event_lock, flags); 1043 } 1044 1045 list_del(&state->head); 1046 drm_vblank_put(crtc->dev, vop->pipe); 1047 } 1048 1049 static void vop_crtc_destroy(struct drm_crtc *crtc) 1050 { 1051 drm_crtc_cleanup(crtc); 1052 } 1053 1054 static const struct drm_crtc_funcs vop_crtc_funcs = { 1055 .set_config = drm_crtc_helper_set_config, 1056 .page_flip = vop_crtc_page_flip, 1057 .destroy = vop_crtc_destroy, 1058 }; 1059 1060 static bool vop_win_state_is_active(struct vop_win *vop_win, 1061 struct vop_win_state *state) 1062 { 1063 bool active = false; 1064 1065 if (state->fb) { 1066 dma_addr_t yrgb_mst; 1067 1068 /* check yrgb_mst to tell if pending_fb is now front */ 1069 yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data); 1070 1071 active = (yrgb_mst == state->yrgb_mst); 1072 } else { 1073 bool enabled; 1074 1075 /* if enable bit is clear, plane is now disabled */ 1076 enabled = VOP_WIN_GET(vop_win->vop, vop_win->data, enable); 1077 1078 active = (enabled == 0); 1079 } 1080 1081 return active; 1082 } 1083 1084 static void vop_win_state_destroy(struct vop_win_state *state) 1085 { 1086 struct drm_framebuffer *fb = state->fb; 1087 1088 if (fb) 1089 drm_framebuffer_unreference(fb); 1090 1091 kfree(state); 1092 } 1093 1094 static void vop_win_update_state(struct vop_win *vop_win) 1095 { 1096 struct vop_win_state *state, *n, *new_active = NULL; 1097 1098 /* Check if any pending states are now active */ 1099 list_for_each_entry(state, &vop_win->pending, head) 1100 if (vop_win_state_is_active(vop_win, state)) { 1101 new_active = state; 1102 break; 1103 } 1104 1105 if (!new_active) 1106 return; 1107 1108 /* 1109 * Destroy any 'skipped' pending states - states that were queued 1110 * before the newly active state. 1111 */ 1112 list_for_each_entry_safe(state, n, &vop_win->pending, head) { 1113 if (state == new_active) 1114 break; 1115 vop_win_state_complete(vop_win, state); 1116 vop_win_state_destroy(state); 1117 } 1118 1119 vop_win_state_complete(vop_win, new_active); 1120 1121 if (vop_win->active) 1122 vop_win_state_destroy(vop_win->active); 1123 vop_win->active = new_active; 1124 } 1125 1126 static bool vop_win_has_pending_state(struct vop_win *vop_win) 1127 { 1128 return !list_empty(&vop_win->pending); 1129 } 1130 1131 static irqreturn_t vop_isr_thread(int irq, void *data) 1132 { 1133 struct vop *vop = data; 1134 const struct vop_data *vop_data = vop->data; 1135 unsigned int i; 1136 1137 mutex_lock(&vop->vsync_mutex); 1138 1139 if (!vop->vsync_work_pending) 1140 goto done; 1141 1142 vop->vsync_work_pending = false; 1143 1144 for (i = 0; i < vop_data->win_size; i++) { 1145 struct vop_win *vop_win = &vop->win[i]; 1146 1147 vop_win_update_state(vop_win); 1148 if (vop_win_has_pending_state(vop_win)) 1149 vop->vsync_work_pending = true; 1150 } 1151 1152 done: 1153 mutex_unlock(&vop->vsync_mutex); 1154 1155 return IRQ_HANDLED; 1156 } 1157 1158 static irqreturn_t vop_isr(int irq, void *data) 1159 { 1160 struct vop *vop = data; 1161 uint32_t intr0_reg, active_irqs; 1162 unsigned long flags; 1163 int ret = IRQ_NONE; 1164 1165 /* 1166 * INTR_CTRL0 register has interrupt status, enable and clear bits, we 1167 * must hold irq_lock to avoid a race with enable/disable_vblank(). 1168 */ 1169 spin_lock_irqsave(&vop->irq_lock, flags); 1170 intr0_reg = vop_readl(vop, INTR_CTRL0); 1171 active_irqs = intr0_reg & INTR_MASK; 1172 /* Clear all active interrupt sources */ 1173 if (active_irqs) 1174 vop_writel(vop, INTR_CTRL0, 1175 intr0_reg | (active_irqs << INTR_CLR_SHIFT)); 1176 spin_unlock_irqrestore(&vop->irq_lock, flags); 1177 1178 /* This is expected for vop iommu irqs, since the irq is shared */ 1179 if (!active_irqs) 1180 return IRQ_NONE; 1181 1182 if (active_irqs & DSP_HOLD_VALID_INTR) { 1183 complete(&vop->dsp_hold_completion); 1184 active_irqs &= ~DSP_HOLD_VALID_INTR; 1185 ret = IRQ_HANDLED; 1186 } 1187 1188 if (active_irqs & FS_INTR) { 1189 drm_handle_vblank(vop->drm_dev, vop->pipe); 1190 active_irqs &= ~FS_INTR; 1191 ret = (vop->vsync_work_pending) ? IRQ_WAKE_THREAD : IRQ_HANDLED; 1192 } 1193 1194 /* Unhandled irqs are spurious. */ 1195 if (active_irqs) 1196 DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs); 1197 1198 return ret; 1199 } 1200 1201 static int vop_create_crtc(struct vop *vop) 1202 { 1203 const struct vop_data *vop_data = vop->data; 1204 struct device *dev = vop->dev; 1205 struct drm_device *drm_dev = vop->drm_dev; 1206 struct drm_plane *primary = NULL, *cursor = NULL, *plane; 1207 struct drm_crtc *crtc = &vop->crtc; 1208 struct device_node *port; 1209 int ret; 1210 int i; 1211 1212 /* 1213 * Create drm_plane for primary and cursor planes first, since we need 1214 * to pass them to drm_crtc_init_with_planes, which sets the 1215 * "possible_crtcs" to the newly initialized crtc. 1216 */ 1217 for (i = 0; i < vop_data->win_size; i++) { 1218 struct vop_win *vop_win = &vop->win[i]; 1219 const struct vop_win_data *win_data = vop_win->data; 1220 1221 if (win_data->type != DRM_PLANE_TYPE_PRIMARY && 1222 win_data->type != DRM_PLANE_TYPE_CURSOR) 1223 continue; 1224 1225 ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base, 1226 0, &vop_plane_funcs, 1227 win_data->phy->data_formats, 1228 win_data->phy->nformats, 1229 win_data->type); 1230 if (ret) { 1231 DRM_ERROR("failed to initialize plane\n"); 1232 goto err_cleanup_planes; 1233 } 1234 1235 plane = &vop_win->base; 1236 if (plane->type == DRM_PLANE_TYPE_PRIMARY) 1237 primary = plane; 1238 else if (plane->type == DRM_PLANE_TYPE_CURSOR) 1239 cursor = plane; 1240 } 1241 1242 ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, 1243 &vop_crtc_funcs); 1244 if (ret) 1245 return ret; 1246 1247 drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs); 1248 1249 /* 1250 * Create drm_planes for overlay windows with possible_crtcs restricted 1251 * to the newly created crtc. 1252 */ 1253 for (i = 0; i < vop_data->win_size; i++) { 1254 struct vop_win *vop_win = &vop->win[i]; 1255 const struct vop_win_data *win_data = vop_win->data; 1256 unsigned long possible_crtcs = 1 << drm_crtc_index(crtc); 1257 1258 if (win_data->type != DRM_PLANE_TYPE_OVERLAY) 1259 continue; 1260 1261 ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base, 1262 possible_crtcs, 1263 &vop_plane_funcs, 1264 win_data->phy->data_formats, 1265 win_data->phy->nformats, 1266 win_data->type); 1267 if (ret) { 1268 DRM_ERROR("failed to initialize overlay plane\n"); 1269 goto err_cleanup_crtc; 1270 } 1271 } 1272 1273 port = of_get_child_by_name(dev->of_node, "port"); 1274 if (!port) { 1275 DRM_ERROR("no port node found in %s\n", 1276 dev->of_node->full_name); 1277 goto err_cleanup_crtc; 1278 } 1279 1280 init_completion(&vop->dsp_hold_completion); 1281 crtc->port = port; 1282 vop->pipe = drm_crtc_index(crtc); 1283 rockchip_register_crtc_funcs(drm_dev, &private_crtc_funcs, vop->pipe); 1284 1285 return 0; 1286 1287 err_cleanup_crtc: 1288 drm_crtc_cleanup(crtc); 1289 err_cleanup_planes: 1290 list_for_each_entry(plane, &drm_dev->mode_config.plane_list, head) 1291 drm_plane_cleanup(plane); 1292 return ret; 1293 } 1294 1295 static void vop_destroy_crtc(struct vop *vop) 1296 { 1297 struct drm_crtc *crtc = &vop->crtc; 1298 1299 rockchip_unregister_crtc_funcs(vop->drm_dev, vop->pipe); 1300 of_node_put(crtc->port); 1301 drm_crtc_cleanup(crtc); 1302 } 1303 1304 static int vop_initial(struct vop *vop) 1305 { 1306 const struct vop_data *vop_data = vop->data; 1307 const struct vop_reg_data *init_table = vop_data->init_table; 1308 struct reset_control *ahb_rst; 1309 int i, ret; 1310 1311 vop->hclk = devm_clk_get(vop->dev, "hclk_vop"); 1312 if (IS_ERR(vop->hclk)) { 1313 dev_err(vop->dev, "failed to get hclk source\n"); 1314 return PTR_ERR(vop->hclk); 1315 } 1316 vop->aclk = devm_clk_get(vop->dev, "aclk_vop"); 1317 if (IS_ERR(vop->aclk)) { 1318 dev_err(vop->dev, "failed to get aclk source\n"); 1319 return PTR_ERR(vop->aclk); 1320 } 1321 vop->dclk = devm_clk_get(vop->dev, "dclk_vop"); 1322 if (IS_ERR(vop->dclk)) { 1323 dev_err(vop->dev, "failed to get dclk source\n"); 1324 return PTR_ERR(vop->dclk); 1325 } 1326 1327 ret = clk_prepare(vop->hclk); 1328 if (ret < 0) { 1329 dev_err(vop->dev, "failed to prepare hclk\n"); 1330 return ret; 1331 } 1332 1333 ret = clk_prepare(vop->dclk); 1334 if (ret < 0) { 1335 dev_err(vop->dev, "failed to prepare dclk\n"); 1336 goto err_unprepare_hclk; 1337 } 1338 1339 ret = clk_prepare(vop->aclk); 1340 if (ret < 0) { 1341 dev_err(vop->dev, "failed to prepare aclk\n"); 1342 goto err_unprepare_dclk; 1343 } 1344 1345 /* 1346 * enable hclk, so that we can config vop register. 1347 */ 1348 ret = clk_enable(vop->hclk); 1349 if (ret < 0) { 1350 dev_err(vop->dev, "failed to prepare aclk\n"); 1351 goto err_unprepare_aclk; 1352 } 1353 /* 1354 * do hclk_reset, reset all vop registers. 1355 */ 1356 ahb_rst = devm_reset_control_get(vop->dev, "ahb"); 1357 if (IS_ERR(ahb_rst)) { 1358 dev_err(vop->dev, "failed to get ahb reset\n"); 1359 ret = PTR_ERR(ahb_rst); 1360 goto err_disable_hclk; 1361 } 1362 reset_control_assert(ahb_rst); 1363 usleep_range(10, 20); 1364 reset_control_deassert(ahb_rst); 1365 1366 memcpy(vop->regsbak, vop->regs, vop->len); 1367 1368 for (i = 0; i < vop_data->table_size; i++) 1369 vop_writel(vop, init_table[i].offset, init_table[i].value); 1370 1371 for (i = 0; i < vop_data->win_size; i++) { 1372 const struct vop_win_data *win = &vop_data->win[i]; 1373 1374 VOP_WIN_SET(vop, win, enable, 0); 1375 } 1376 1377 vop_cfg_done(vop); 1378 1379 /* 1380 * do dclk_reset, let all config take affect. 1381 */ 1382 vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk"); 1383 if (IS_ERR(vop->dclk_rst)) { 1384 dev_err(vop->dev, "failed to get dclk reset\n"); 1385 ret = PTR_ERR(vop->dclk_rst); 1386 goto err_unprepare_aclk; 1387 } 1388 reset_control_assert(vop->dclk_rst); 1389 usleep_range(10, 20); 1390 reset_control_deassert(vop->dclk_rst); 1391 1392 clk_disable(vop->hclk); 1393 1394 vop->is_enabled = false; 1395 1396 return 0; 1397 1398 err_disable_hclk: 1399 clk_disable(vop->hclk); 1400 err_unprepare_aclk: 1401 clk_unprepare(vop->aclk); 1402 err_unprepare_dclk: 1403 clk_unprepare(vop->dclk); 1404 err_unprepare_hclk: 1405 clk_unprepare(vop->hclk); 1406 return ret; 1407 } 1408 1409 /* 1410 * Initialize the vop->win array elements. 1411 */ 1412 static void vop_win_init(struct vop *vop) 1413 { 1414 const struct vop_data *vop_data = vop->data; 1415 unsigned int i; 1416 1417 for (i = 0; i < vop_data->win_size; i++) { 1418 struct vop_win *vop_win = &vop->win[i]; 1419 const struct vop_win_data *win_data = &vop_data->win[i]; 1420 1421 vop_win->data = win_data; 1422 vop_win->vop = vop; 1423 INIT_LIST_HEAD(&vop_win->pending); 1424 } 1425 } 1426 1427 static int vop_bind(struct device *dev, struct device *master, void *data) 1428 { 1429 struct platform_device *pdev = to_platform_device(dev); 1430 const struct of_device_id *of_id; 1431 const struct vop_data *vop_data; 1432 struct drm_device *drm_dev = data; 1433 struct vop *vop; 1434 struct resource *res; 1435 size_t alloc_size; 1436 int ret, irq; 1437 1438 of_id = of_match_device(vop_driver_dt_match, dev); 1439 vop_data = of_id->data; 1440 if (!vop_data) 1441 return -ENODEV; 1442 1443 /* Allocate vop struct and its vop_win array */ 1444 alloc_size = sizeof(*vop) + sizeof(*vop->win) * vop_data->win_size; 1445 vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL); 1446 if (!vop) 1447 return -ENOMEM; 1448 1449 vop->dev = dev; 1450 vop->data = vop_data; 1451 vop->drm_dev = drm_dev; 1452 dev_set_drvdata(dev, vop); 1453 1454 vop_win_init(vop); 1455 1456 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1457 vop->len = resource_size(res); 1458 vop->regs = devm_ioremap_resource(dev, res); 1459 if (IS_ERR(vop->regs)) 1460 return PTR_ERR(vop->regs); 1461 1462 vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL); 1463 if (!vop->regsbak) 1464 return -ENOMEM; 1465 1466 ret = vop_initial(vop); 1467 if (ret < 0) { 1468 dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret); 1469 return ret; 1470 } 1471 1472 irq = platform_get_irq(pdev, 0); 1473 if (irq < 0) { 1474 dev_err(dev, "cannot find irq for vop\n"); 1475 return irq; 1476 } 1477 vop->irq = (unsigned int)irq; 1478 1479 spin_lock_init(&vop->reg_lock); 1480 spin_lock_init(&vop->irq_lock); 1481 1482 mutex_init(&vop->vsync_mutex); 1483 1484 ret = devm_request_threaded_irq(dev, vop->irq, vop_isr, vop_isr_thread, 1485 IRQF_SHARED, dev_name(dev), vop); 1486 if (ret) 1487 return ret; 1488 1489 /* IRQ is initially disabled; it gets enabled in power_on */ 1490 disable_irq(vop->irq); 1491 1492 ret = vop_create_crtc(vop); 1493 if (ret) 1494 return ret; 1495 1496 pm_runtime_enable(&pdev->dev); 1497 return 0; 1498 } 1499 1500 static void vop_unbind(struct device *dev, struct device *master, void *data) 1501 { 1502 struct vop *vop = dev_get_drvdata(dev); 1503 1504 pm_runtime_disable(dev); 1505 vop_destroy_crtc(vop); 1506 } 1507 1508 static const struct component_ops vop_component_ops = { 1509 .bind = vop_bind, 1510 .unbind = vop_unbind, 1511 }; 1512 1513 static int vop_probe(struct platform_device *pdev) 1514 { 1515 struct device *dev = &pdev->dev; 1516 1517 if (!dev->of_node) { 1518 dev_err(dev, "can't find vop devices\n"); 1519 return -ENODEV; 1520 } 1521 1522 return component_add(dev, &vop_component_ops); 1523 } 1524 1525 static int vop_remove(struct platform_device *pdev) 1526 { 1527 component_del(&pdev->dev, &vop_component_ops); 1528 1529 return 0; 1530 } 1531 1532 struct platform_driver vop_platform_driver = { 1533 .probe = vop_probe, 1534 .remove = vop_remove, 1535 .driver = { 1536 .name = "rockchip-vop", 1537 .owner = THIS_MODULE, 1538 .of_match_table = of_match_ptr(vop_driver_dt_match), 1539 }, 1540 }; 1541 1542 module_platform_driver(vop_platform_driver); 1543 1544 MODULE_AUTHOR("Mark Yao <mark.yao@rock-chips.com>"); 1545 MODULE_DESCRIPTION("ROCKCHIP VOP Driver"); 1546 MODULE_LICENSE("GPL v2"); 1547