1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Samsung Electronics Co.Ltd 4 * Authors: Joonyoung Shim <jy0922.shim@samsung.com> 5 */ 6 7 #include <linux/refcount.h> 8 #include <linux/clk.h> 9 #include <linux/component.h> 10 #include <linux/delay.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/err.h> 13 #include <linux/interrupt.h> 14 #include <linux/io.h> 15 #include <linux/kernel.h> 16 #include <linux/of.h> 17 #include <linux/platform_device.h> 18 #include <linux/pm_runtime.h> 19 #include <linux/slab.h> 20 #include <linux/uaccess.h> 21 #include <linux/workqueue.h> 22 23 #include <drm/drm_file.h> 24 #include <drm/exynos_drm.h> 25 26 #include "exynos_drm_drv.h" 27 #include "exynos_drm_g2d.h" 28 #include "exynos_drm_gem.h" 29 30 #define G2D_HW_MAJOR_VER 4 31 #define G2D_HW_MINOR_VER 1 32 33 /* vaild register range set from user: 0x0104 ~ 0x0880 */ 34 #define G2D_VALID_START 0x0104 35 #define G2D_VALID_END 0x0880 36 37 /* general registers */ 38 #define G2D_SOFT_RESET 0x0000 39 #define G2D_INTEN 0x0004 40 #define G2D_INTC_PEND 0x000C 41 #define G2D_DMA_SFR_BASE_ADDR 0x0080 42 #define G2D_DMA_COMMAND 0x0084 43 #define G2D_DMA_STATUS 0x008C 44 #define G2D_DMA_HOLD_CMD 0x0090 45 46 /* command registers */ 47 #define G2D_BITBLT_START 0x0100 48 49 /* registers for base address */ 50 #define G2D_SRC_BASE_ADDR 0x0304 51 #define G2D_SRC_STRIDE 0x0308 52 #define G2D_SRC_COLOR_MODE 0x030C 53 #define G2D_SRC_LEFT_TOP 0x0310 54 #define G2D_SRC_RIGHT_BOTTOM 0x0314 55 #define G2D_SRC_PLANE2_BASE_ADDR 0x0318 56 #define G2D_DST_BASE_ADDR 0x0404 57 #define G2D_DST_STRIDE 0x0408 58 #define G2D_DST_COLOR_MODE 0x040C 59 #define G2D_DST_LEFT_TOP 0x0410 60 #define G2D_DST_RIGHT_BOTTOM 0x0414 61 #define G2D_DST_PLANE2_BASE_ADDR 0x0418 62 #define G2D_PAT_BASE_ADDR 0x0500 63 #define G2D_MSK_BASE_ADDR 0x0520 64 65 /* G2D_SOFT_RESET */ 66 #define G2D_SFRCLEAR (1 << 1) 67 #define G2D_R (1 << 0) 68 69 /* G2D_INTEN */ 70 #define G2D_INTEN_ACF (1 << 3) 71 #define G2D_INTEN_UCF (1 << 2) 72 #define G2D_INTEN_GCF (1 << 1) 73 #define G2D_INTEN_SCF (1 << 0) 74 75 /* G2D_INTC_PEND */ 76 #define G2D_INTP_ACMD_FIN (1 << 3) 77 #define G2D_INTP_UCMD_FIN (1 << 2) 78 #define G2D_INTP_GCMD_FIN (1 << 1) 79 #define G2D_INTP_SCMD_FIN (1 << 0) 80 81 /* G2D_DMA_COMMAND */ 82 #define G2D_DMA_HALT (1 << 2) 83 #define G2D_DMA_CONTINUE (1 << 1) 84 #define G2D_DMA_START (1 << 0) 85 86 /* G2D_DMA_STATUS */ 87 #define G2D_DMA_LIST_DONE_COUNT (0xFF << 17) 88 #define G2D_DMA_BITBLT_DONE_COUNT (0xFFFF << 1) 89 #define G2D_DMA_DONE (1 << 0) 90 #define G2D_DMA_LIST_DONE_COUNT_OFFSET 17 91 92 /* G2D_DMA_HOLD_CMD */ 93 #define G2D_USER_HOLD (1 << 2) 94 #define G2D_LIST_HOLD (1 << 1) 95 #define G2D_BITBLT_HOLD (1 << 0) 96 97 /* G2D_BITBLT_START */ 98 #define G2D_START_CASESEL (1 << 2) 99 #define G2D_START_NHOLT (1 << 1) 100 #define G2D_START_BITBLT (1 << 0) 101 102 /* buffer color format */ 103 #define G2D_FMT_XRGB8888 0 104 #define G2D_FMT_ARGB8888 1 105 #define G2D_FMT_RGB565 2 106 #define G2D_FMT_XRGB1555 3 107 #define G2D_FMT_ARGB1555 4 108 #define G2D_FMT_XRGB4444 5 109 #define G2D_FMT_ARGB4444 6 110 #define G2D_FMT_PACKED_RGB888 7 111 #define G2D_FMT_A8 11 112 #define G2D_FMT_L8 12 113 114 /* buffer valid length */ 115 #define G2D_LEN_MIN 1 116 #define G2D_LEN_MAX 8000 117 118 #define G2D_CMDLIST_SIZE (PAGE_SIZE / 4) 119 #define G2D_CMDLIST_NUM 64 120 #define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) 121 #define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) 122 123 /* maximum buffer pool size of userptr is 64MB as default */ 124 #define MAX_POOL (64 * 1024 * 1024) 125 126 enum { 127 BUF_TYPE_GEM = 1, 128 BUF_TYPE_USERPTR, 129 }; 130 131 enum g2d_reg_type { 132 REG_TYPE_NONE = -1, 133 REG_TYPE_SRC, 134 REG_TYPE_SRC_PLANE2, 135 REG_TYPE_DST, 136 REG_TYPE_DST_PLANE2, 137 REG_TYPE_PAT, 138 REG_TYPE_MSK, 139 MAX_REG_TYPE_NR 140 }; 141 142 enum g2d_flag_bits { 143 /* 144 * If set, suspends the runqueue worker after the currently 145 * processed node is finished. 146 */ 147 G2D_BIT_SUSPEND_RUNQUEUE, 148 /* 149 * If set, indicates that the engine is currently busy. 150 */ 151 G2D_BIT_ENGINE_BUSY, 152 }; 153 154 /* cmdlist data structure */ 155 struct g2d_cmdlist { 156 u32 head; 157 unsigned long data[G2D_CMDLIST_DATA_NUM]; 158 u32 last; /* last data offset */ 159 }; 160 161 /* 162 * A structure of buffer description 163 * 164 * @format: color format 165 * @stride: buffer stride/pitch in bytes 166 * @left_x: the x coordinates of left top corner 167 * @top_y: the y coordinates of left top corner 168 * @right_x: the x coordinates of right bottom corner 169 * @bottom_y: the y coordinates of right bottom corner 170 * 171 */ 172 struct g2d_buf_desc { 173 unsigned int format; 174 unsigned int stride; 175 unsigned int left_x; 176 unsigned int top_y; 177 unsigned int right_x; 178 unsigned int bottom_y; 179 }; 180 181 /* 182 * A structure of buffer information 183 * 184 * @map_nr: manages the number of mapped buffers 185 * @reg_types: stores regitster type in the order of requested command 186 * @handles: stores buffer handle in its reg_type position 187 * @types: stores buffer type in its reg_type position 188 * @descs: stores buffer description in its reg_type position 189 * 190 */ 191 struct g2d_buf_info { 192 unsigned int map_nr; 193 enum g2d_reg_type reg_types[MAX_REG_TYPE_NR]; 194 void *obj[MAX_REG_TYPE_NR]; 195 unsigned int types[MAX_REG_TYPE_NR]; 196 struct g2d_buf_desc descs[MAX_REG_TYPE_NR]; 197 }; 198 199 struct drm_exynos_pending_g2d_event { 200 struct drm_pending_event base; 201 struct drm_exynos_g2d_event event; 202 }; 203 204 struct g2d_cmdlist_userptr { 205 struct list_head list; 206 dma_addr_t dma_addr; 207 unsigned long userptr; 208 unsigned long size; 209 struct page **pages; 210 unsigned int npages; 211 struct sg_table *sgt; 212 refcount_t refcount; 213 bool in_pool; 214 bool out_of_list; 215 }; 216 struct g2d_cmdlist_node { 217 struct list_head list; 218 struct g2d_cmdlist *cmdlist; 219 dma_addr_t dma_addr; 220 struct g2d_buf_info buf_info; 221 222 struct drm_exynos_pending_g2d_event *event; 223 }; 224 225 struct g2d_runqueue_node { 226 struct list_head list; 227 struct list_head run_cmdlist; 228 struct list_head event_list; 229 struct drm_file *filp; 230 pid_t pid; 231 struct completion complete; 232 int async; 233 }; 234 235 struct g2d_data { 236 struct device *dev; 237 void *dma_priv; 238 struct clk *gate_clk; 239 void __iomem *regs; 240 int irq; 241 struct workqueue_struct *g2d_workq; 242 struct work_struct runqueue_work; 243 struct drm_device *drm_dev; 244 unsigned long flags; 245 246 /* cmdlist */ 247 struct g2d_cmdlist_node *cmdlist_node; 248 struct list_head free_cmdlist; 249 struct mutex cmdlist_mutex; 250 dma_addr_t cmdlist_pool; 251 void *cmdlist_pool_virt; 252 unsigned long cmdlist_dma_attrs; 253 254 /* runqueue*/ 255 struct g2d_runqueue_node *runqueue_node; 256 struct list_head runqueue; 257 struct mutex runqueue_mutex; 258 struct kmem_cache *runqueue_slab; 259 260 unsigned long current_pool; 261 unsigned long max_pool; 262 }; 263 264 static inline void g2d_hw_reset(struct g2d_data *g2d) 265 { 266 writel(G2D_R | G2D_SFRCLEAR, g2d->regs + G2D_SOFT_RESET); 267 clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); 268 } 269 270 static int g2d_init_cmdlist(struct g2d_data *g2d) 271 { 272 struct device *dev = g2d->dev; 273 struct g2d_cmdlist_node *node; 274 int nr; 275 int ret; 276 struct g2d_buf_info *buf_info; 277 278 g2d->cmdlist_dma_attrs = DMA_ATTR_WRITE_COMBINE; 279 280 g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(g2d->drm_dev), 281 G2D_CMDLIST_POOL_SIZE, 282 &g2d->cmdlist_pool, GFP_KERNEL, 283 g2d->cmdlist_dma_attrs); 284 if (!g2d->cmdlist_pool_virt) { 285 dev_err(dev, "failed to allocate dma memory\n"); 286 return -ENOMEM; 287 } 288 289 node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL); 290 if (!node) { 291 ret = -ENOMEM; 292 goto err; 293 } 294 295 for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) { 296 unsigned int i; 297 298 node[nr].cmdlist = 299 g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE; 300 node[nr].dma_addr = 301 g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE; 302 303 buf_info = &node[nr].buf_info; 304 for (i = 0; i < MAX_REG_TYPE_NR; i++) 305 buf_info->reg_types[i] = REG_TYPE_NONE; 306 307 list_add_tail(&node[nr].list, &g2d->free_cmdlist); 308 } 309 310 return 0; 311 312 err: 313 dma_free_attrs(to_dma_dev(g2d->drm_dev), G2D_CMDLIST_POOL_SIZE, 314 g2d->cmdlist_pool_virt, 315 g2d->cmdlist_pool, g2d->cmdlist_dma_attrs); 316 return ret; 317 } 318 319 static void g2d_fini_cmdlist(struct g2d_data *g2d) 320 { 321 kfree(g2d->cmdlist_node); 322 323 if (g2d->cmdlist_pool_virt && g2d->cmdlist_pool) { 324 dma_free_attrs(to_dma_dev(g2d->drm_dev), 325 G2D_CMDLIST_POOL_SIZE, 326 g2d->cmdlist_pool_virt, 327 g2d->cmdlist_pool, g2d->cmdlist_dma_attrs); 328 } 329 } 330 331 static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d) 332 { 333 struct device *dev = g2d->dev; 334 struct g2d_cmdlist_node *node; 335 336 mutex_lock(&g2d->cmdlist_mutex); 337 if (list_empty(&g2d->free_cmdlist)) { 338 dev_err(dev, "there is no free cmdlist\n"); 339 mutex_unlock(&g2d->cmdlist_mutex); 340 return NULL; 341 } 342 343 node = list_first_entry(&g2d->free_cmdlist, struct g2d_cmdlist_node, 344 list); 345 list_del_init(&node->list); 346 mutex_unlock(&g2d->cmdlist_mutex); 347 348 return node; 349 } 350 351 static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node) 352 { 353 mutex_lock(&g2d->cmdlist_mutex); 354 list_move_tail(&node->list, &g2d->free_cmdlist); 355 mutex_unlock(&g2d->cmdlist_mutex); 356 } 357 358 static void g2d_add_cmdlist_to_inuse(struct drm_exynos_file_private *file_priv, 359 struct g2d_cmdlist_node *node) 360 { 361 struct g2d_cmdlist_node *lnode; 362 363 if (list_empty(&file_priv->inuse_cmdlist)) 364 goto add_to_list; 365 366 /* this links to base address of new cmdlist */ 367 lnode = list_entry(file_priv->inuse_cmdlist.prev, 368 struct g2d_cmdlist_node, list); 369 lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr; 370 371 add_to_list: 372 list_add_tail(&node->list, &file_priv->inuse_cmdlist); 373 374 if (node->event) 375 list_add_tail(&node->event->base.link, &file_priv->event_list); 376 } 377 378 static void g2d_userptr_put_dma_addr(struct g2d_data *g2d, 379 void *obj, 380 bool force) 381 { 382 struct g2d_cmdlist_userptr *g2d_userptr = obj; 383 384 if (!obj) 385 return; 386 387 if (force) 388 goto out; 389 390 refcount_dec(&g2d_userptr->refcount); 391 392 if (refcount_read(&g2d_userptr->refcount) > 0) 393 return; 394 395 if (g2d_userptr->in_pool) 396 return; 397 398 out: 399 dma_unmap_sgtable(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt, 400 DMA_BIDIRECTIONAL, 0); 401 402 unpin_user_pages_dirty_lock(g2d_userptr->pages, g2d_userptr->npages, 403 true); 404 kvfree(g2d_userptr->pages); 405 406 if (!g2d_userptr->out_of_list) 407 list_del_init(&g2d_userptr->list); 408 409 sg_free_table(g2d_userptr->sgt); 410 kfree(g2d_userptr->sgt); 411 kfree(g2d_userptr); 412 } 413 414 static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d, 415 unsigned long userptr, 416 unsigned long size, 417 struct drm_file *filp, 418 void **obj) 419 { 420 struct drm_exynos_file_private *file_priv = filp->driver_priv; 421 struct g2d_cmdlist_userptr *g2d_userptr; 422 struct sg_table *sgt; 423 unsigned long start, end; 424 unsigned int npages, offset; 425 int ret; 426 427 if (!size) { 428 DRM_DEV_ERROR(g2d->dev, "invalid userptr size.\n"); 429 return ERR_PTR(-EINVAL); 430 } 431 432 /* check if userptr already exists in userptr_list. */ 433 list_for_each_entry(g2d_userptr, &file_priv->userptr_list, list) { 434 if (g2d_userptr->userptr == userptr) { 435 /* 436 * also check size because there could be same address 437 * and different size. 438 */ 439 if (g2d_userptr->size == size) { 440 refcount_inc(&g2d_userptr->refcount); 441 *obj = g2d_userptr; 442 443 return &g2d_userptr->dma_addr; 444 } 445 446 /* 447 * at this moment, maybe g2d dma is accessing this 448 * g2d_userptr memory region so just remove this 449 * g2d_userptr object from userptr_list not to be 450 * referred again and also except it the userptr 451 * pool to be released after the dma access completion. 452 */ 453 g2d_userptr->out_of_list = true; 454 g2d_userptr->in_pool = false; 455 list_del_init(&g2d_userptr->list); 456 457 break; 458 } 459 } 460 461 g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL); 462 if (!g2d_userptr) 463 return ERR_PTR(-ENOMEM); 464 465 refcount_set(&g2d_userptr->refcount, 1); 466 g2d_userptr->size = size; 467 468 start = userptr & PAGE_MASK; 469 offset = userptr & ~PAGE_MASK; 470 end = PAGE_ALIGN(userptr + size); 471 npages = (end - start) >> PAGE_SHIFT; 472 g2d_userptr->pages = kvmalloc_array(npages, sizeof(*g2d_userptr->pages), 473 GFP_KERNEL); 474 if (!g2d_userptr->pages) { 475 ret = -ENOMEM; 476 goto err_free; 477 } 478 479 ret = pin_user_pages_fast(start, npages, 480 FOLL_WRITE | FOLL_LONGTERM, 481 g2d_userptr->pages); 482 if (ret != npages) { 483 DRM_DEV_ERROR(g2d->dev, 484 "failed to get user pages from userptr.\n"); 485 if (ret < 0) 486 goto err_destroy_pages; 487 npages = ret; 488 ret = -EFAULT; 489 goto err_unpin_pages; 490 } 491 g2d_userptr->npages = npages; 492 493 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 494 if (!sgt) { 495 ret = -ENOMEM; 496 goto err_unpin_pages; 497 } 498 499 ret = sg_alloc_table_from_pages(sgt, 500 g2d_userptr->pages, 501 npages, offset, size, GFP_KERNEL); 502 if (ret < 0) { 503 DRM_DEV_ERROR(g2d->dev, "failed to get sgt from pages.\n"); 504 goto err_free_sgt; 505 } 506 507 g2d_userptr->sgt = sgt; 508 509 ret = dma_map_sgtable(to_dma_dev(g2d->drm_dev), sgt, 510 DMA_BIDIRECTIONAL, 0); 511 if (ret) { 512 DRM_DEV_ERROR(g2d->dev, "failed to map sgt with dma region.\n"); 513 goto err_sg_free_table; 514 } 515 516 g2d_userptr->dma_addr = sgt->sgl[0].dma_address; 517 g2d_userptr->userptr = userptr; 518 519 list_add_tail(&g2d_userptr->list, &file_priv->userptr_list); 520 521 if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) { 522 g2d->current_pool += npages << PAGE_SHIFT; 523 g2d_userptr->in_pool = true; 524 } 525 526 *obj = g2d_userptr; 527 528 return &g2d_userptr->dma_addr; 529 530 err_sg_free_table: 531 sg_free_table(sgt); 532 533 err_free_sgt: 534 kfree(sgt); 535 536 err_unpin_pages: 537 unpin_user_pages(g2d_userptr->pages, npages); 538 539 err_destroy_pages: 540 kvfree(g2d_userptr->pages); 541 542 err_free: 543 kfree(g2d_userptr); 544 545 return ERR_PTR(ret); 546 } 547 548 static void g2d_userptr_free_all(struct g2d_data *g2d, struct drm_file *filp) 549 { 550 struct drm_exynos_file_private *file_priv = filp->driver_priv; 551 struct g2d_cmdlist_userptr *g2d_userptr, *n; 552 553 list_for_each_entry_safe(g2d_userptr, n, &file_priv->userptr_list, list) 554 if (g2d_userptr->in_pool) 555 g2d_userptr_put_dma_addr(g2d, g2d_userptr, true); 556 557 g2d->current_pool = 0; 558 } 559 560 static enum g2d_reg_type g2d_get_reg_type(struct g2d_data *g2d, int reg_offset) 561 { 562 enum g2d_reg_type reg_type; 563 564 switch (reg_offset) { 565 case G2D_SRC_BASE_ADDR: 566 case G2D_SRC_STRIDE: 567 case G2D_SRC_COLOR_MODE: 568 case G2D_SRC_LEFT_TOP: 569 case G2D_SRC_RIGHT_BOTTOM: 570 reg_type = REG_TYPE_SRC; 571 break; 572 case G2D_SRC_PLANE2_BASE_ADDR: 573 reg_type = REG_TYPE_SRC_PLANE2; 574 break; 575 case G2D_DST_BASE_ADDR: 576 case G2D_DST_STRIDE: 577 case G2D_DST_COLOR_MODE: 578 case G2D_DST_LEFT_TOP: 579 case G2D_DST_RIGHT_BOTTOM: 580 reg_type = REG_TYPE_DST; 581 break; 582 case G2D_DST_PLANE2_BASE_ADDR: 583 reg_type = REG_TYPE_DST_PLANE2; 584 break; 585 case G2D_PAT_BASE_ADDR: 586 reg_type = REG_TYPE_PAT; 587 break; 588 case G2D_MSK_BASE_ADDR: 589 reg_type = REG_TYPE_MSK; 590 break; 591 default: 592 reg_type = REG_TYPE_NONE; 593 DRM_DEV_ERROR(g2d->dev, "Unknown register offset![%d]\n", 594 reg_offset); 595 break; 596 } 597 598 return reg_type; 599 } 600 601 static unsigned long g2d_get_buf_bpp(unsigned int format) 602 { 603 unsigned long bpp; 604 605 switch (format) { 606 case G2D_FMT_XRGB8888: 607 case G2D_FMT_ARGB8888: 608 bpp = 4; 609 break; 610 case G2D_FMT_RGB565: 611 case G2D_FMT_XRGB1555: 612 case G2D_FMT_ARGB1555: 613 case G2D_FMT_XRGB4444: 614 case G2D_FMT_ARGB4444: 615 bpp = 2; 616 break; 617 case G2D_FMT_PACKED_RGB888: 618 bpp = 3; 619 break; 620 default: 621 bpp = 1; 622 break; 623 } 624 625 return bpp; 626 } 627 628 static bool g2d_check_buf_desc_is_valid(struct g2d_data *g2d, 629 struct g2d_buf_desc *buf_desc, 630 enum g2d_reg_type reg_type, 631 unsigned long size) 632 { 633 int width, height; 634 unsigned long bpp, last_pos; 635 636 /* 637 * check source and destination buffers only. 638 * so the others are always valid. 639 */ 640 if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST) 641 return true; 642 643 /* This check also makes sure that right_x > left_x. */ 644 width = (int)buf_desc->right_x - (int)buf_desc->left_x; 645 if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) { 646 DRM_DEV_ERROR(g2d->dev, "width[%d] is out of range!\n", width); 647 return false; 648 } 649 650 /* This check also makes sure that bottom_y > top_y. */ 651 height = (int)buf_desc->bottom_y - (int)buf_desc->top_y; 652 if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) { 653 DRM_DEV_ERROR(g2d->dev, 654 "height[%d] is out of range!\n", height); 655 return false; 656 } 657 658 bpp = g2d_get_buf_bpp(buf_desc->format); 659 660 /* Compute the position of the last byte that the engine accesses. */ 661 last_pos = ((unsigned long)buf_desc->bottom_y - 1) * 662 (unsigned long)buf_desc->stride + 663 (unsigned long)buf_desc->right_x * bpp - 1; 664 665 /* 666 * Since right_x > left_x and bottom_y > top_y we already know 667 * that the first_pos < last_pos (first_pos being the position 668 * of the first byte the engine accesses), it just remains to 669 * check if last_pos is smaller then the buffer size. 670 */ 671 672 if (last_pos >= size) { 673 DRM_DEV_ERROR(g2d->dev, "last engine access position [%lu] " 674 "is out of range [%lu]!\n", last_pos, size); 675 return false; 676 } 677 678 return true; 679 } 680 681 static int g2d_map_cmdlist_gem(struct g2d_data *g2d, 682 struct g2d_cmdlist_node *node, 683 struct drm_device *drm_dev, 684 struct drm_file *file) 685 { 686 struct g2d_cmdlist *cmdlist = node->cmdlist; 687 struct g2d_buf_info *buf_info = &node->buf_info; 688 int offset; 689 int ret; 690 int i; 691 692 for (i = 0; i < buf_info->map_nr; i++) { 693 struct g2d_buf_desc *buf_desc; 694 enum g2d_reg_type reg_type; 695 int reg_pos; 696 unsigned long handle; 697 dma_addr_t *addr; 698 699 reg_pos = cmdlist->last - 2 * (i + 1); 700 701 offset = cmdlist->data[reg_pos]; 702 handle = cmdlist->data[reg_pos + 1]; 703 704 reg_type = g2d_get_reg_type(g2d, offset); 705 if (reg_type == REG_TYPE_NONE) { 706 ret = -EFAULT; 707 goto err; 708 } 709 710 buf_desc = &buf_info->descs[reg_type]; 711 712 if (buf_info->types[reg_type] == BUF_TYPE_GEM) { 713 struct exynos_drm_gem *exynos_gem; 714 715 exynos_gem = exynos_drm_gem_get(file, handle); 716 if (!exynos_gem) { 717 ret = -EFAULT; 718 goto err; 719 } 720 721 if (!g2d_check_buf_desc_is_valid(g2d, buf_desc, 722 reg_type, exynos_gem->size)) { 723 exynos_drm_gem_put(exynos_gem); 724 ret = -EFAULT; 725 goto err; 726 } 727 728 addr = &exynos_gem->dma_addr; 729 buf_info->obj[reg_type] = exynos_gem; 730 } else { 731 struct drm_exynos_g2d_userptr g2d_userptr; 732 733 if (copy_from_user(&g2d_userptr, (void __user *)handle, 734 sizeof(struct drm_exynos_g2d_userptr))) { 735 ret = -EFAULT; 736 goto err; 737 } 738 739 if (!g2d_check_buf_desc_is_valid(g2d, buf_desc, 740 reg_type, 741 g2d_userptr.size)) { 742 ret = -EFAULT; 743 goto err; 744 } 745 746 addr = g2d_userptr_get_dma_addr(g2d, 747 g2d_userptr.userptr, 748 g2d_userptr.size, 749 file, 750 &buf_info->obj[reg_type]); 751 if (IS_ERR(addr)) { 752 ret = -EFAULT; 753 goto err; 754 } 755 } 756 757 cmdlist->data[reg_pos + 1] = *addr; 758 buf_info->reg_types[i] = reg_type; 759 } 760 761 return 0; 762 763 err: 764 buf_info->map_nr = i; 765 return ret; 766 } 767 768 static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d, 769 struct g2d_cmdlist_node *node, 770 struct drm_file *filp) 771 { 772 struct g2d_buf_info *buf_info = &node->buf_info; 773 int i; 774 775 for (i = 0; i < buf_info->map_nr; i++) { 776 struct g2d_buf_desc *buf_desc; 777 enum g2d_reg_type reg_type; 778 void *obj; 779 780 reg_type = buf_info->reg_types[i]; 781 782 buf_desc = &buf_info->descs[reg_type]; 783 obj = buf_info->obj[reg_type]; 784 785 if (buf_info->types[reg_type] == BUF_TYPE_GEM) 786 exynos_drm_gem_put(obj); 787 else 788 g2d_userptr_put_dma_addr(g2d, obj, false); 789 790 buf_info->reg_types[i] = REG_TYPE_NONE; 791 buf_info->obj[reg_type] = NULL; 792 buf_info->types[reg_type] = 0; 793 memset(buf_desc, 0x00, sizeof(*buf_desc)); 794 } 795 796 buf_info->map_nr = 0; 797 } 798 799 static void g2d_dma_start(struct g2d_data *g2d, 800 struct g2d_runqueue_node *runqueue_node) 801 { 802 struct g2d_cmdlist_node *node = 803 list_first_entry(&runqueue_node->run_cmdlist, 804 struct g2d_cmdlist_node, list); 805 806 set_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); 807 writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); 808 writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); 809 } 810 811 static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d) 812 { 813 struct g2d_runqueue_node *runqueue_node; 814 815 if (list_empty(&g2d->runqueue)) 816 return NULL; 817 818 runqueue_node = list_first_entry(&g2d->runqueue, 819 struct g2d_runqueue_node, list); 820 list_del_init(&runqueue_node->list); 821 return runqueue_node; 822 } 823 824 static void g2d_free_runqueue_node(struct g2d_data *g2d, 825 struct g2d_runqueue_node *runqueue_node) 826 { 827 struct g2d_cmdlist_node *node; 828 829 mutex_lock(&g2d->cmdlist_mutex); 830 /* 831 * commands in run_cmdlist have been completed so unmap all gem 832 * objects in each command node so that they are unreferenced. 833 */ 834 list_for_each_entry(node, &runqueue_node->run_cmdlist, list) 835 g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp); 836 list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist); 837 mutex_unlock(&g2d->cmdlist_mutex); 838 839 kmem_cache_free(g2d->runqueue_slab, runqueue_node); 840 } 841 842 /** 843 * g2d_remove_runqueue_nodes - remove items from the list of runqueue nodes 844 * @g2d: G2D state object 845 * @file: if not zero, only remove items with this DRM file 846 * 847 * Has to be called under runqueue lock. 848 */ 849 static void g2d_remove_runqueue_nodes(struct g2d_data *g2d, struct drm_file *file) 850 { 851 struct g2d_runqueue_node *node, *n; 852 853 if (list_empty(&g2d->runqueue)) 854 return; 855 856 list_for_each_entry_safe(node, n, &g2d->runqueue, list) { 857 if (file && node->filp != file) 858 continue; 859 860 list_del_init(&node->list); 861 g2d_free_runqueue_node(g2d, node); 862 } 863 } 864 865 static void g2d_runqueue_worker(struct work_struct *work) 866 { 867 struct g2d_data *g2d = container_of(work, struct g2d_data, 868 runqueue_work); 869 struct g2d_runqueue_node *runqueue_node; 870 871 /* 872 * The engine is busy and the completion of the current node is going 873 * to poke the runqueue worker, so nothing to do here. 874 */ 875 if (test_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags)) 876 return; 877 878 mutex_lock(&g2d->runqueue_mutex); 879 880 runqueue_node = g2d->runqueue_node; 881 g2d->runqueue_node = NULL; 882 883 if (runqueue_node) { 884 pm_runtime_mark_last_busy(g2d->dev); 885 pm_runtime_put_autosuspend(g2d->dev); 886 887 complete(&runqueue_node->complete); 888 if (runqueue_node->async) 889 g2d_free_runqueue_node(g2d, runqueue_node); 890 } 891 892 if (!test_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags)) { 893 g2d->runqueue_node = g2d_get_runqueue_node(g2d); 894 895 if (g2d->runqueue_node) { 896 int ret; 897 898 ret = pm_runtime_resume_and_get(g2d->dev); 899 if (ret < 0) { 900 dev_err(g2d->dev, "failed to enable G2D device.\n"); 901 goto out; 902 } 903 904 g2d_dma_start(g2d, g2d->runqueue_node); 905 } 906 } 907 908 out: 909 mutex_unlock(&g2d->runqueue_mutex); 910 } 911 912 static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no) 913 { 914 struct drm_device *drm_dev = g2d->drm_dev; 915 struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node; 916 struct drm_exynos_pending_g2d_event *e; 917 struct timespec64 now; 918 919 if (list_empty(&runqueue_node->event_list)) 920 return; 921 922 e = list_first_entry(&runqueue_node->event_list, 923 struct drm_exynos_pending_g2d_event, base.link); 924 925 ktime_get_ts64(&now); 926 e->event.tv_sec = now.tv_sec; 927 e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC; 928 e->event.cmdlist_no = cmdlist_no; 929 930 drm_send_event(drm_dev, &e->base); 931 } 932 933 static irqreturn_t g2d_irq_handler(int irq, void *dev_id) 934 { 935 struct g2d_data *g2d = dev_id; 936 u32 pending; 937 938 pending = readl_relaxed(g2d->regs + G2D_INTC_PEND); 939 if (pending) 940 writel_relaxed(pending, g2d->regs + G2D_INTC_PEND); 941 942 if (pending & G2D_INTP_GCMD_FIN) { 943 u32 cmdlist_no = readl_relaxed(g2d->regs + G2D_DMA_STATUS); 944 945 cmdlist_no = (cmdlist_no & G2D_DMA_LIST_DONE_COUNT) >> 946 G2D_DMA_LIST_DONE_COUNT_OFFSET; 947 948 g2d_finish_event(g2d, cmdlist_no); 949 950 writel_relaxed(0, g2d->regs + G2D_DMA_HOLD_CMD); 951 if (!(pending & G2D_INTP_ACMD_FIN)) { 952 writel_relaxed(G2D_DMA_CONTINUE, 953 g2d->regs + G2D_DMA_COMMAND); 954 } 955 } 956 957 if (pending & G2D_INTP_ACMD_FIN) { 958 clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); 959 queue_work(g2d->g2d_workq, &g2d->runqueue_work); 960 } 961 962 return IRQ_HANDLED; 963 } 964 965 /** 966 * g2d_wait_finish - wait for the G2D engine to finish the current runqueue node 967 * @g2d: G2D state object 968 * @file: if not zero, only wait if the current runqueue node belongs 969 * to the DRM file 970 * 971 * Should the engine not become idle after a 100ms timeout, a hardware 972 * reset is issued. 973 */ 974 static void g2d_wait_finish(struct g2d_data *g2d, struct drm_file *file) 975 { 976 struct device *dev = g2d->dev; 977 978 struct g2d_runqueue_node *runqueue_node = NULL; 979 unsigned int tries = 10; 980 981 mutex_lock(&g2d->runqueue_mutex); 982 983 /* If no node is currently processed, we have nothing to do. */ 984 if (!g2d->runqueue_node) 985 goto out; 986 987 runqueue_node = g2d->runqueue_node; 988 989 /* Check if the currently processed item belongs to us. */ 990 if (file && runqueue_node->filp != file) 991 goto out; 992 993 mutex_unlock(&g2d->runqueue_mutex); 994 995 /* Wait for the G2D engine to finish. */ 996 while (tries-- && (g2d->runqueue_node == runqueue_node)) 997 mdelay(10); 998 999 mutex_lock(&g2d->runqueue_mutex); 1000 1001 if (g2d->runqueue_node != runqueue_node) 1002 goto out; 1003 1004 dev_err(dev, "wait timed out, resetting engine...\n"); 1005 g2d_hw_reset(g2d); 1006 1007 /* 1008 * After the hardware reset of the engine we are going to loose 1009 * the IRQ which triggers the PM runtime put(). 1010 * So do this manually here. 1011 */ 1012 pm_runtime_mark_last_busy(dev); 1013 pm_runtime_put_autosuspend(dev); 1014 1015 complete(&runqueue_node->complete); 1016 if (runqueue_node->async) 1017 g2d_free_runqueue_node(g2d, runqueue_node); 1018 1019 out: 1020 mutex_unlock(&g2d->runqueue_mutex); 1021 } 1022 1023 static int g2d_check_reg_offset(struct g2d_data *g2d, 1024 struct g2d_cmdlist_node *node, 1025 int nr, bool for_addr) 1026 { 1027 struct g2d_cmdlist *cmdlist = node->cmdlist; 1028 int reg_offset; 1029 int index; 1030 int i; 1031 1032 for (i = 0; i < nr; i++) { 1033 struct g2d_buf_info *buf_info = &node->buf_info; 1034 struct g2d_buf_desc *buf_desc; 1035 enum g2d_reg_type reg_type; 1036 unsigned long value; 1037 1038 index = cmdlist->last - 2 * (i + 1); 1039 1040 reg_offset = cmdlist->data[index] & ~0xfffff000; 1041 if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END) 1042 goto err; 1043 if (reg_offset % 4) 1044 goto err; 1045 1046 switch (reg_offset) { 1047 case G2D_SRC_BASE_ADDR: 1048 case G2D_SRC_PLANE2_BASE_ADDR: 1049 case G2D_DST_BASE_ADDR: 1050 case G2D_DST_PLANE2_BASE_ADDR: 1051 case G2D_PAT_BASE_ADDR: 1052 case G2D_MSK_BASE_ADDR: 1053 if (!for_addr) 1054 goto err; 1055 1056 reg_type = g2d_get_reg_type(g2d, reg_offset); 1057 1058 /* check userptr buffer type. */ 1059 if ((cmdlist->data[index] & ~0x7fffffff) >> 31) { 1060 buf_info->types[reg_type] = BUF_TYPE_USERPTR; 1061 cmdlist->data[index] &= ~G2D_BUF_USERPTR; 1062 } else 1063 buf_info->types[reg_type] = BUF_TYPE_GEM; 1064 break; 1065 case G2D_SRC_STRIDE: 1066 case G2D_DST_STRIDE: 1067 if (for_addr) 1068 goto err; 1069 1070 reg_type = g2d_get_reg_type(g2d, reg_offset); 1071 1072 buf_desc = &buf_info->descs[reg_type]; 1073 buf_desc->stride = cmdlist->data[index + 1]; 1074 break; 1075 case G2D_SRC_COLOR_MODE: 1076 case G2D_DST_COLOR_MODE: 1077 if (for_addr) 1078 goto err; 1079 1080 reg_type = g2d_get_reg_type(g2d, reg_offset); 1081 1082 buf_desc = &buf_info->descs[reg_type]; 1083 value = cmdlist->data[index + 1]; 1084 1085 buf_desc->format = value & 0xf; 1086 break; 1087 case G2D_SRC_LEFT_TOP: 1088 case G2D_DST_LEFT_TOP: 1089 if (for_addr) 1090 goto err; 1091 1092 reg_type = g2d_get_reg_type(g2d, reg_offset); 1093 1094 buf_desc = &buf_info->descs[reg_type]; 1095 value = cmdlist->data[index + 1]; 1096 1097 buf_desc->left_x = value & 0x1fff; 1098 buf_desc->top_y = (value & 0x1fff0000) >> 16; 1099 break; 1100 case G2D_SRC_RIGHT_BOTTOM: 1101 case G2D_DST_RIGHT_BOTTOM: 1102 if (for_addr) 1103 goto err; 1104 1105 reg_type = g2d_get_reg_type(g2d, reg_offset); 1106 1107 buf_desc = &buf_info->descs[reg_type]; 1108 value = cmdlist->data[index + 1]; 1109 1110 buf_desc->right_x = value & 0x1fff; 1111 buf_desc->bottom_y = (value & 0x1fff0000) >> 16; 1112 break; 1113 default: 1114 if (for_addr) 1115 goto err; 1116 break; 1117 } 1118 } 1119 1120 return 0; 1121 1122 err: 1123 dev_err(g2d->dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]); 1124 return -EINVAL; 1125 } 1126 1127 /* ioctl functions */ 1128 int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data, 1129 struct drm_file *file) 1130 { 1131 struct drm_exynos_g2d_get_ver *ver = data; 1132 1133 ver->major = G2D_HW_MAJOR_VER; 1134 ver->minor = G2D_HW_MINOR_VER; 1135 1136 return 0; 1137 } 1138 1139 int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, 1140 struct drm_file *file) 1141 { 1142 struct drm_exynos_file_private *file_priv = file->driver_priv; 1143 struct exynos_drm_private *priv = drm_dev->dev_private; 1144 struct g2d_data *g2d = dev_get_drvdata(priv->g2d_dev); 1145 struct drm_exynos_g2d_set_cmdlist *req = data; 1146 struct drm_exynos_g2d_cmd *cmd; 1147 struct drm_exynos_pending_g2d_event *e; 1148 struct g2d_cmdlist_node *node; 1149 struct g2d_cmdlist *cmdlist; 1150 int size; 1151 int ret; 1152 1153 node = g2d_get_cmdlist(g2d); 1154 if (!node) 1155 return -ENOMEM; 1156 1157 /* 1158 * To avoid an integer overflow for the later size computations, we 1159 * enforce a maximum number of submitted commands here. This limit is 1160 * sufficient for all conceivable usage cases of the G2D. 1161 */ 1162 if (req->cmd_nr > G2D_CMDLIST_DATA_NUM || 1163 req->cmd_buf_nr > G2D_CMDLIST_DATA_NUM) { 1164 dev_err(g2d->dev, "number of submitted G2D commands exceeds limit\n"); 1165 return -EINVAL; 1166 } 1167 1168 node->event = NULL; 1169 1170 if (req->event_type != G2D_EVENT_NOT) { 1171 e = kzalloc(sizeof(*node->event), GFP_KERNEL); 1172 if (!e) { 1173 ret = -ENOMEM; 1174 goto err; 1175 } 1176 1177 e->event.base.type = DRM_EXYNOS_G2D_EVENT; 1178 e->event.base.length = sizeof(e->event); 1179 e->event.user_data = req->user_data; 1180 1181 ret = drm_event_reserve_init(drm_dev, file, &e->base, &e->event.base); 1182 if (ret) { 1183 kfree(e); 1184 goto err; 1185 } 1186 1187 node->event = e; 1188 } 1189 1190 cmdlist = node->cmdlist; 1191 1192 cmdlist->last = 0; 1193 1194 /* 1195 * If don't clear SFR registers, the cmdlist is affected by register 1196 * values of previous cmdlist. G2D hw executes SFR clear command and 1197 * a next command at the same time then the next command is ignored and 1198 * is executed rightly from next next command, so needs a dummy command 1199 * to next command of SFR clear command. 1200 */ 1201 cmdlist->data[cmdlist->last++] = G2D_SOFT_RESET; 1202 cmdlist->data[cmdlist->last++] = G2D_SFRCLEAR; 1203 cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR; 1204 cmdlist->data[cmdlist->last++] = 0; 1205 1206 /* 1207 * 'LIST_HOLD' command should be set to the DMA_HOLD_CMD_REG 1208 * and GCF bit should be set to INTEN register if user wants 1209 * G2D interrupt event once current command list execution is 1210 * finished. 1211 * Otherwise only ACF bit should be set to INTEN register so 1212 * that one interrupt is occurred after all command lists 1213 * have been completed. 1214 */ 1215 if (node->event) { 1216 cmdlist->data[cmdlist->last++] = G2D_INTEN; 1217 cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF | G2D_INTEN_GCF; 1218 cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD; 1219 cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD; 1220 } else { 1221 cmdlist->data[cmdlist->last++] = G2D_INTEN; 1222 cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF; 1223 } 1224 1225 /* 1226 * Check the size of cmdlist. The 2 that is added last comes from 1227 * the implicit G2D_BITBLT_START that is appended once we have 1228 * checked all the submitted commands. 1229 */ 1230 size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2; 1231 if (size > G2D_CMDLIST_DATA_NUM) { 1232 dev_err(g2d->dev, "cmdlist size is too big\n"); 1233 ret = -EINVAL; 1234 goto err_free_event; 1235 } 1236 1237 cmd = (struct drm_exynos_g2d_cmd *)(unsigned long)req->cmd; 1238 1239 if (copy_from_user(cmdlist->data + cmdlist->last, 1240 (void __user *)cmd, 1241 sizeof(*cmd) * req->cmd_nr)) { 1242 ret = -EFAULT; 1243 goto err_free_event; 1244 } 1245 cmdlist->last += req->cmd_nr * 2; 1246 1247 ret = g2d_check_reg_offset(g2d, node, req->cmd_nr, false); 1248 if (ret < 0) 1249 goto err_free_event; 1250 1251 node->buf_info.map_nr = req->cmd_buf_nr; 1252 if (req->cmd_buf_nr) { 1253 struct drm_exynos_g2d_cmd *cmd_buf; 1254 1255 cmd_buf = (struct drm_exynos_g2d_cmd *) 1256 (unsigned long)req->cmd_buf; 1257 1258 if (copy_from_user(cmdlist->data + cmdlist->last, 1259 (void __user *)cmd_buf, 1260 sizeof(*cmd_buf) * req->cmd_buf_nr)) { 1261 ret = -EFAULT; 1262 goto err_free_event; 1263 } 1264 cmdlist->last += req->cmd_buf_nr * 2; 1265 1266 ret = g2d_check_reg_offset(g2d, node, req->cmd_buf_nr, true); 1267 if (ret < 0) 1268 goto err_free_event; 1269 1270 ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file); 1271 if (ret < 0) 1272 goto err_unmap; 1273 } 1274 1275 cmdlist->data[cmdlist->last++] = G2D_BITBLT_START; 1276 cmdlist->data[cmdlist->last++] = G2D_START_BITBLT; 1277 1278 /* head */ 1279 cmdlist->head = cmdlist->last / 2; 1280 1281 /* tail */ 1282 cmdlist->data[cmdlist->last] = 0; 1283 1284 g2d_add_cmdlist_to_inuse(file_priv, node); 1285 1286 return 0; 1287 1288 err_unmap: 1289 g2d_unmap_cmdlist_gem(g2d, node, file); 1290 err_free_event: 1291 if (node->event) 1292 drm_event_cancel_free(drm_dev, &node->event->base); 1293 err: 1294 g2d_put_cmdlist(g2d, node); 1295 return ret; 1296 } 1297 1298 int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, 1299 struct drm_file *file) 1300 { 1301 struct drm_exynos_file_private *file_priv = file->driver_priv; 1302 struct exynos_drm_private *priv = drm_dev->dev_private; 1303 struct g2d_data *g2d = dev_get_drvdata(priv->g2d_dev); 1304 struct drm_exynos_g2d_exec *req = data; 1305 struct g2d_runqueue_node *runqueue_node; 1306 struct list_head *run_cmdlist; 1307 struct list_head *event_list; 1308 1309 runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL); 1310 if (!runqueue_node) 1311 return -ENOMEM; 1312 1313 run_cmdlist = &runqueue_node->run_cmdlist; 1314 event_list = &runqueue_node->event_list; 1315 INIT_LIST_HEAD(run_cmdlist); 1316 INIT_LIST_HEAD(event_list); 1317 init_completion(&runqueue_node->complete); 1318 runqueue_node->async = req->async; 1319 1320 list_splice_init(&file_priv->inuse_cmdlist, run_cmdlist); 1321 list_splice_init(&file_priv->event_list, event_list); 1322 1323 if (list_empty(run_cmdlist)) { 1324 dev_err(g2d->dev, "there is no inuse cmdlist\n"); 1325 kmem_cache_free(g2d->runqueue_slab, runqueue_node); 1326 return -EPERM; 1327 } 1328 1329 mutex_lock(&g2d->runqueue_mutex); 1330 runqueue_node->pid = current->pid; 1331 runqueue_node->filp = file; 1332 list_add_tail(&runqueue_node->list, &g2d->runqueue); 1333 mutex_unlock(&g2d->runqueue_mutex); 1334 1335 /* Let the runqueue know that there is work to do. */ 1336 queue_work(g2d->g2d_workq, &g2d->runqueue_work); 1337 1338 if (req->async) 1339 goto out; 1340 1341 wait_for_completion(&runqueue_node->complete); 1342 g2d_free_runqueue_node(g2d, runqueue_node); 1343 1344 out: 1345 return 0; 1346 } 1347 1348 int g2d_open(struct drm_device *drm_dev, struct drm_file *file) 1349 { 1350 struct drm_exynos_file_private *file_priv = file->driver_priv; 1351 1352 INIT_LIST_HEAD(&file_priv->inuse_cmdlist); 1353 INIT_LIST_HEAD(&file_priv->event_list); 1354 INIT_LIST_HEAD(&file_priv->userptr_list); 1355 1356 return 0; 1357 } 1358 1359 void g2d_close(struct drm_device *drm_dev, struct drm_file *file) 1360 { 1361 struct drm_exynos_file_private *file_priv = file->driver_priv; 1362 struct exynos_drm_private *priv = drm_dev->dev_private; 1363 struct g2d_data *g2d; 1364 struct g2d_cmdlist_node *node, *n; 1365 1366 if (!priv->g2d_dev) 1367 return; 1368 1369 g2d = dev_get_drvdata(priv->g2d_dev); 1370 1371 /* Remove the runqueue nodes that belong to us. */ 1372 mutex_lock(&g2d->runqueue_mutex); 1373 g2d_remove_runqueue_nodes(g2d, file); 1374 mutex_unlock(&g2d->runqueue_mutex); 1375 1376 /* 1377 * Wait for the runqueue worker to finish its current node. 1378 * After this the engine should no longer be accessing any 1379 * memory belonging to us. 1380 */ 1381 g2d_wait_finish(g2d, file); 1382 1383 /* 1384 * Even after the engine is idle, there might still be stale cmdlists 1385 * (i.e. cmdlisst which we submitted but never executed) around, with 1386 * their corresponding GEM/userptr buffers. 1387 * Properly unmap these buffers here. 1388 */ 1389 mutex_lock(&g2d->cmdlist_mutex); 1390 list_for_each_entry_safe(node, n, &file_priv->inuse_cmdlist, list) { 1391 g2d_unmap_cmdlist_gem(g2d, node, file); 1392 list_move_tail(&node->list, &g2d->free_cmdlist); 1393 } 1394 mutex_unlock(&g2d->cmdlist_mutex); 1395 1396 /* release all g2d_userptr in pool. */ 1397 g2d_userptr_free_all(g2d, file); 1398 } 1399 1400 static int g2d_bind(struct device *dev, struct device *master, void *data) 1401 { 1402 struct g2d_data *g2d = dev_get_drvdata(dev); 1403 struct drm_device *drm_dev = data; 1404 struct exynos_drm_private *priv = drm_dev->dev_private; 1405 int ret; 1406 1407 g2d->drm_dev = drm_dev; 1408 1409 /* allocate dma-aware cmdlist buffer. */ 1410 ret = g2d_init_cmdlist(g2d); 1411 if (ret < 0) { 1412 dev_err(dev, "cmdlist init failed\n"); 1413 return ret; 1414 } 1415 1416 ret = exynos_drm_register_dma(drm_dev, dev, &g2d->dma_priv); 1417 if (ret < 0) { 1418 dev_err(dev, "failed to enable iommu.\n"); 1419 g2d_fini_cmdlist(g2d); 1420 return ret; 1421 } 1422 priv->g2d_dev = dev; 1423 1424 dev_info(dev, "The Exynos G2D (ver %d.%d) successfully registered.\n", 1425 G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER); 1426 return 0; 1427 } 1428 1429 static void g2d_unbind(struct device *dev, struct device *master, void *data) 1430 { 1431 struct g2d_data *g2d = dev_get_drvdata(dev); 1432 struct drm_device *drm_dev = data; 1433 struct exynos_drm_private *priv = drm_dev->dev_private; 1434 1435 /* Suspend operation and wait for engine idle. */ 1436 set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); 1437 g2d_wait_finish(g2d, NULL); 1438 priv->g2d_dev = NULL; 1439 1440 cancel_work_sync(&g2d->runqueue_work); 1441 exynos_drm_unregister_dma(g2d->drm_dev, dev, &g2d->dma_priv); 1442 } 1443 1444 static const struct component_ops g2d_component_ops = { 1445 .bind = g2d_bind, 1446 .unbind = g2d_unbind, 1447 }; 1448 1449 static int g2d_probe(struct platform_device *pdev) 1450 { 1451 struct device *dev = &pdev->dev; 1452 struct g2d_data *g2d; 1453 int ret; 1454 1455 g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL); 1456 if (!g2d) 1457 return -ENOMEM; 1458 1459 g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab", 1460 sizeof(struct g2d_runqueue_node), 0, 0, NULL); 1461 if (!g2d->runqueue_slab) 1462 return -ENOMEM; 1463 1464 g2d->dev = dev; 1465 1466 g2d->g2d_workq = create_singlethread_workqueue("g2d"); 1467 if (!g2d->g2d_workq) { 1468 dev_err(dev, "failed to create workqueue\n"); 1469 ret = -EINVAL; 1470 goto err_destroy_slab; 1471 } 1472 1473 INIT_WORK(&g2d->runqueue_work, g2d_runqueue_worker); 1474 INIT_LIST_HEAD(&g2d->free_cmdlist); 1475 INIT_LIST_HEAD(&g2d->runqueue); 1476 1477 mutex_init(&g2d->cmdlist_mutex); 1478 mutex_init(&g2d->runqueue_mutex); 1479 1480 g2d->gate_clk = devm_clk_get(dev, "fimg2d"); 1481 if (IS_ERR(g2d->gate_clk)) { 1482 dev_err(dev, "failed to get gate clock\n"); 1483 ret = PTR_ERR(g2d->gate_clk); 1484 goto err_destroy_workqueue; 1485 } 1486 1487 pm_runtime_use_autosuspend(dev); 1488 pm_runtime_set_autosuspend_delay(dev, 2000); 1489 pm_runtime_enable(dev); 1490 clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); 1491 clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); 1492 1493 g2d->regs = devm_platform_ioremap_resource(pdev, 0); 1494 if (IS_ERR(g2d->regs)) { 1495 ret = PTR_ERR(g2d->regs); 1496 goto err_put_clk; 1497 } 1498 1499 g2d->irq = platform_get_irq(pdev, 0); 1500 if (g2d->irq < 0) { 1501 ret = g2d->irq; 1502 goto err_put_clk; 1503 } 1504 1505 ret = devm_request_irq(dev, g2d->irq, g2d_irq_handler, 0, 1506 "drm_g2d", g2d); 1507 if (ret < 0) { 1508 dev_err(dev, "irq request failed\n"); 1509 goto err_put_clk; 1510 } 1511 1512 g2d->max_pool = MAX_POOL; 1513 1514 platform_set_drvdata(pdev, g2d); 1515 1516 ret = component_add(dev, &g2d_component_ops); 1517 if (ret < 0) { 1518 dev_err(dev, "failed to register drm g2d device\n"); 1519 goto err_put_clk; 1520 } 1521 1522 return 0; 1523 1524 err_put_clk: 1525 pm_runtime_disable(dev); 1526 err_destroy_workqueue: 1527 destroy_workqueue(g2d->g2d_workq); 1528 err_destroy_slab: 1529 kmem_cache_destroy(g2d->runqueue_slab); 1530 return ret; 1531 } 1532 1533 static void g2d_remove(struct platform_device *pdev) 1534 { 1535 struct g2d_data *g2d = platform_get_drvdata(pdev); 1536 1537 component_del(&pdev->dev, &g2d_component_ops); 1538 1539 /* There should be no locking needed here. */ 1540 g2d_remove_runqueue_nodes(g2d, NULL); 1541 1542 pm_runtime_dont_use_autosuspend(&pdev->dev); 1543 pm_runtime_disable(&pdev->dev); 1544 1545 g2d_fini_cmdlist(g2d); 1546 destroy_workqueue(g2d->g2d_workq); 1547 kmem_cache_destroy(g2d->runqueue_slab); 1548 } 1549 1550 static int g2d_suspend(struct device *dev) 1551 { 1552 struct g2d_data *g2d = dev_get_drvdata(dev); 1553 1554 /* 1555 * Suspend the runqueue worker operation and wait until the G2D 1556 * engine is idle. 1557 */ 1558 set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); 1559 g2d_wait_finish(g2d, NULL); 1560 flush_work(&g2d->runqueue_work); 1561 1562 return 0; 1563 } 1564 1565 static int g2d_resume(struct device *dev) 1566 { 1567 struct g2d_data *g2d = dev_get_drvdata(dev); 1568 1569 clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); 1570 queue_work(g2d->g2d_workq, &g2d->runqueue_work); 1571 1572 return 0; 1573 } 1574 1575 static int g2d_runtime_suspend(struct device *dev) 1576 { 1577 struct g2d_data *g2d = dev_get_drvdata(dev); 1578 1579 clk_disable_unprepare(g2d->gate_clk); 1580 1581 return 0; 1582 } 1583 1584 static int g2d_runtime_resume(struct device *dev) 1585 { 1586 struct g2d_data *g2d = dev_get_drvdata(dev); 1587 int ret; 1588 1589 ret = clk_prepare_enable(g2d->gate_clk); 1590 if (ret < 0) 1591 dev_warn(dev, "failed to enable clock.\n"); 1592 1593 return ret; 1594 } 1595 1596 static const struct dev_pm_ops g2d_pm_ops = { 1597 SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume) 1598 RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL) 1599 }; 1600 1601 static const struct of_device_id exynos_g2d_match[] = { 1602 { .compatible = "samsung,exynos5250-g2d" }, 1603 { .compatible = "samsung,exynos4212-g2d" }, 1604 {}, 1605 }; 1606 MODULE_DEVICE_TABLE(of, exynos_g2d_match); 1607 1608 struct platform_driver g2d_driver = { 1609 .probe = g2d_probe, 1610 .remove_new = g2d_remove, 1611 .driver = { 1612 .name = "exynos-drm-g2d", 1613 .owner = THIS_MODULE, 1614 .pm = pm_ptr(&g2d_pm_ops), 1615 .of_match_table = exynos_g2d_match, 1616 }, 1617 }; 1618