1 /* 2 * Copyright (C) 2013 Red Hat 3 * Author: Rob Clark <robdclark@gmail.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include "msm_drv.h" 19 #include "msm_gpu.h" 20 #include "msm_kms.h" 21 22 static void msm_fb_output_poll_changed(struct drm_device *dev) 23 { 24 #ifdef CONFIG_DRM_MSM_FBDEV 25 struct msm_drm_private *priv = dev->dev_private; 26 if (priv->fbdev) 27 drm_fb_helper_hotplug_event(priv->fbdev); 28 #endif 29 } 30 31 static const struct drm_mode_config_funcs mode_config_funcs = { 32 .fb_create = msm_framebuffer_create, 33 .output_poll_changed = msm_fb_output_poll_changed, 34 .atomic_check = msm_atomic_check, 35 .atomic_commit = msm_atomic_commit, 36 }; 37 38 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu) 39 { 40 struct msm_drm_private *priv = dev->dev_private; 41 int idx = priv->num_mmus++; 42 43 if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus))) 44 return -EINVAL; 45 46 priv->mmus[idx] = mmu; 47 48 return idx; 49 } 50 51 #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING 52 static bool reglog = false; 53 MODULE_PARM_DESC(reglog, "Enable register read/write logging"); 54 module_param(reglog, bool, 0600); 55 #else 56 #define reglog 0 57 #endif 58 59 #ifdef CONFIG_DRM_MSM_FBDEV 60 static bool fbdev = true; 61 MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer"); 62 module_param(fbdev, bool, 0600); 63 #endif 64 65 static char *vram = "16m"; 66 MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU"); 67 module_param(vram, charp, 0); 68 69 /* 70 * Util/helpers: 71 */ 72 73 void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, 74 const char *dbgname) 75 { 76 struct resource *res; 77 unsigned long size; 78 void __iomem *ptr; 79 80 if (name) 81 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); 82 else 83 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 84 85 if (!res) { 86 dev_err(&pdev->dev, "failed to get memory resource: %s\n", name); 87 return ERR_PTR(-EINVAL); 88 } 89 90 size = resource_size(res); 91 92 ptr = devm_ioremap_nocache(&pdev->dev, res->start, size); 93 if (!ptr) { 94 dev_err(&pdev->dev, "failed to ioremap: %s\n", name); 95 return ERR_PTR(-ENOMEM); 96 } 97 98 if (reglog) 99 printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size); 100 101 return ptr; 102 } 103 104 void msm_writel(u32 data, void __iomem *addr) 105 { 106 if (reglog) 107 printk(KERN_DEBUG "IO:W %p %08x\n", addr, data); 108 writel(data, addr); 109 } 110 111 u32 msm_readl(const void __iomem *addr) 112 { 113 u32 val = readl(addr); 114 if (reglog) 115 printk(KERN_ERR "IO:R %p %08x\n", addr, val); 116 return val; 117 } 118 119 struct vblank_event { 120 struct list_head node; 121 int crtc_id; 122 bool enable; 123 }; 124 125 static void vblank_ctrl_worker(struct work_struct *work) 126 { 127 struct msm_vblank_ctrl *vbl_ctrl = container_of(work, 128 struct msm_vblank_ctrl, work); 129 struct msm_drm_private *priv = container_of(vbl_ctrl, 130 struct msm_drm_private, vblank_ctrl); 131 struct msm_kms *kms = priv->kms; 132 struct vblank_event *vbl_ev, *tmp; 133 unsigned long flags; 134 135 spin_lock_irqsave(&vbl_ctrl->lock, flags); 136 list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) { 137 list_del(&vbl_ev->node); 138 spin_unlock_irqrestore(&vbl_ctrl->lock, flags); 139 140 if (vbl_ev->enable) 141 kms->funcs->enable_vblank(kms, 142 priv->crtcs[vbl_ev->crtc_id]); 143 else 144 kms->funcs->disable_vblank(kms, 145 priv->crtcs[vbl_ev->crtc_id]); 146 147 kfree(vbl_ev); 148 149 spin_lock_irqsave(&vbl_ctrl->lock, flags); 150 } 151 152 spin_unlock_irqrestore(&vbl_ctrl->lock, flags); 153 } 154 155 static int vblank_ctrl_queue_work(struct msm_drm_private *priv, 156 int crtc_id, bool enable) 157 { 158 struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl; 159 struct vblank_event *vbl_ev; 160 unsigned long flags; 161 162 vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC); 163 if (!vbl_ev) 164 return -ENOMEM; 165 166 vbl_ev->crtc_id = crtc_id; 167 vbl_ev->enable = enable; 168 169 spin_lock_irqsave(&vbl_ctrl->lock, flags); 170 list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list); 171 spin_unlock_irqrestore(&vbl_ctrl->lock, flags); 172 173 queue_work(priv->wq, &vbl_ctrl->work); 174 175 return 0; 176 } 177 178 /* 179 * DRM operations: 180 */ 181 182 static int msm_unload(struct drm_device *dev) 183 { 184 struct msm_drm_private *priv = dev->dev_private; 185 struct msm_kms *kms = priv->kms; 186 struct msm_gpu *gpu = priv->gpu; 187 struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl; 188 struct vblank_event *vbl_ev, *tmp; 189 190 /* We must cancel and cleanup any pending vblank enable/disable 191 * work before drm_irq_uninstall() to avoid work re-enabling an 192 * irq after uninstall has disabled it. 193 */ 194 cancel_work_sync(&vbl_ctrl->work); 195 list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) { 196 list_del(&vbl_ev->node); 197 kfree(vbl_ev); 198 } 199 200 drm_kms_helper_poll_fini(dev); 201 drm_mode_config_cleanup(dev); 202 drm_vblank_cleanup(dev); 203 204 pm_runtime_get_sync(dev->dev); 205 drm_irq_uninstall(dev); 206 pm_runtime_put_sync(dev->dev); 207 208 flush_workqueue(priv->wq); 209 destroy_workqueue(priv->wq); 210 211 if (kms) { 212 pm_runtime_disable(dev->dev); 213 kms->funcs->destroy(kms); 214 } 215 216 if (gpu) { 217 mutex_lock(&dev->struct_mutex); 218 gpu->funcs->pm_suspend(gpu); 219 mutex_unlock(&dev->struct_mutex); 220 gpu->funcs->destroy(gpu); 221 } 222 223 if (priv->vram.paddr) { 224 DEFINE_DMA_ATTRS(attrs); 225 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); 226 drm_mm_takedown(&priv->vram.mm); 227 dma_free_attrs(dev->dev, priv->vram.size, NULL, 228 priv->vram.paddr, &attrs); 229 } 230 231 component_unbind_all(dev->dev, dev); 232 233 dev->dev_private = NULL; 234 235 kfree(priv); 236 237 return 0; 238 } 239 240 static int get_mdp_ver(struct platform_device *pdev) 241 { 242 #ifdef CONFIG_OF 243 static const struct of_device_id match_types[] = { { 244 .compatible = "qcom,mdss_mdp", 245 .data = (void *)5, 246 }, { 247 /* end node */ 248 } }; 249 struct device *dev = &pdev->dev; 250 const struct of_device_id *match; 251 match = of_match_node(match_types, dev->of_node); 252 if (match) 253 return (int)(unsigned long)match->data; 254 #endif 255 return 4; 256 } 257 258 #include <linux/of_address.h> 259 260 static int msm_init_vram(struct drm_device *dev) 261 { 262 struct msm_drm_private *priv = dev->dev_private; 263 unsigned long size = 0; 264 int ret = 0; 265 266 #ifdef CONFIG_OF 267 /* In the device-tree world, we could have a 'memory-region' 268 * phandle, which gives us a link to our "vram". Allocating 269 * is all nicely abstracted behind the dma api, but we need 270 * to know the entire size to allocate it all in one go. There 271 * are two cases: 272 * 1) device with no IOMMU, in which case we need exclusive 273 * access to a VRAM carveout big enough for all gpu 274 * buffers 275 * 2) device with IOMMU, but where the bootloader puts up 276 * a splash screen. In this case, the VRAM carveout 277 * need only be large enough for fbdev fb. But we need 278 * exclusive access to the buffer to avoid the kernel 279 * using those pages for other purposes (which appears 280 * as corruption on screen before we have a chance to 281 * load and do initial modeset) 282 */ 283 struct device_node *node; 284 285 node = of_parse_phandle(dev->dev->of_node, "memory-region", 0); 286 if (node) { 287 struct resource r; 288 ret = of_address_to_resource(node, 0, &r); 289 if (ret) 290 return ret; 291 size = r.end - r.start; 292 DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start); 293 } else 294 #endif 295 296 /* if we have no IOMMU, then we need to use carveout allocator. 297 * Grab the entire CMA chunk carved out in early startup in 298 * mach-msm: 299 */ 300 if (!iommu_present(&platform_bus_type)) { 301 DRM_INFO("using %s VRAM carveout\n", vram); 302 size = memparse(vram, NULL); 303 } 304 305 if (size) { 306 DEFINE_DMA_ATTRS(attrs); 307 void *p; 308 309 priv->vram.size = size; 310 311 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); 312 313 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); 314 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 315 316 /* note that for no-kernel-mapping, the vaddr returned 317 * is bogus, but non-null if allocation succeeded: 318 */ 319 p = dma_alloc_attrs(dev->dev, size, 320 &priv->vram.paddr, GFP_KERNEL, &attrs); 321 if (!p) { 322 dev_err(dev->dev, "failed to allocate VRAM\n"); 323 priv->vram.paddr = 0; 324 return -ENOMEM; 325 } 326 327 dev_info(dev->dev, "VRAM: %08x->%08x\n", 328 (uint32_t)priv->vram.paddr, 329 (uint32_t)(priv->vram.paddr + size)); 330 } 331 332 return ret; 333 } 334 335 static int msm_load(struct drm_device *dev, unsigned long flags) 336 { 337 struct platform_device *pdev = dev->platformdev; 338 struct msm_drm_private *priv; 339 struct msm_kms *kms; 340 int ret; 341 342 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 343 if (!priv) { 344 dev_err(dev->dev, "failed to allocate private data\n"); 345 return -ENOMEM; 346 } 347 348 dev->dev_private = priv; 349 350 priv->wq = alloc_ordered_workqueue("msm", 0); 351 init_waitqueue_head(&priv->fence_event); 352 init_waitqueue_head(&priv->pending_crtcs_event); 353 354 INIT_LIST_HEAD(&priv->inactive_list); 355 INIT_LIST_HEAD(&priv->fence_cbs); 356 INIT_LIST_HEAD(&priv->vblank_ctrl.event_list); 357 INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker); 358 spin_lock_init(&priv->vblank_ctrl.lock); 359 360 drm_mode_config_init(dev); 361 362 platform_set_drvdata(pdev, dev); 363 364 /* Bind all our sub-components: */ 365 ret = component_bind_all(dev->dev, dev); 366 if (ret) 367 return ret; 368 369 ret = msm_init_vram(dev); 370 if (ret) 371 goto fail; 372 373 switch (get_mdp_ver(pdev)) { 374 case 4: 375 kms = mdp4_kms_init(dev); 376 break; 377 case 5: 378 kms = mdp5_kms_init(dev); 379 break; 380 default: 381 kms = ERR_PTR(-ENODEV); 382 break; 383 } 384 385 if (IS_ERR(kms)) { 386 /* 387 * NOTE: once we have GPU support, having no kms should not 388 * be considered fatal.. ideally we would still support gpu 389 * and (for example) use dmabuf/prime to share buffers with 390 * imx drm driver on iMX5 391 */ 392 dev_err(dev->dev, "failed to load kms\n"); 393 ret = PTR_ERR(kms); 394 goto fail; 395 } 396 397 priv->kms = kms; 398 399 if (kms) { 400 pm_runtime_enable(dev->dev); 401 ret = kms->funcs->hw_init(kms); 402 if (ret) { 403 dev_err(dev->dev, "kms hw init failed: %d\n", ret); 404 goto fail; 405 } 406 } 407 408 dev->mode_config.funcs = &mode_config_funcs; 409 410 ret = drm_vblank_init(dev, priv->num_crtcs); 411 if (ret < 0) { 412 dev_err(dev->dev, "failed to initialize vblank\n"); 413 goto fail; 414 } 415 416 pm_runtime_get_sync(dev->dev); 417 ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0)); 418 pm_runtime_put_sync(dev->dev); 419 if (ret < 0) { 420 dev_err(dev->dev, "failed to install IRQ handler\n"); 421 goto fail; 422 } 423 424 drm_mode_config_reset(dev); 425 426 #ifdef CONFIG_DRM_MSM_FBDEV 427 if (fbdev) 428 priv->fbdev = msm_fbdev_init(dev); 429 #endif 430 431 ret = msm_debugfs_late_init(dev); 432 if (ret) 433 goto fail; 434 435 drm_kms_helper_poll_init(dev); 436 437 return 0; 438 439 fail: 440 msm_unload(dev); 441 return ret; 442 } 443 444 static void load_gpu(struct drm_device *dev) 445 { 446 static DEFINE_MUTEX(init_lock); 447 struct msm_drm_private *priv = dev->dev_private; 448 449 mutex_lock(&init_lock); 450 451 if (!priv->gpu) 452 priv->gpu = adreno_load_gpu(dev); 453 454 mutex_unlock(&init_lock); 455 } 456 457 static int msm_open(struct drm_device *dev, struct drm_file *file) 458 { 459 struct msm_file_private *ctx; 460 461 /* For now, load gpu on open.. to avoid the requirement of having 462 * firmware in the initrd. 463 */ 464 load_gpu(dev); 465 466 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 467 if (!ctx) 468 return -ENOMEM; 469 470 file->driver_priv = ctx; 471 472 return 0; 473 } 474 475 static void msm_preclose(struct drm_device *dev, struct drm_file *file) 476 { 477 struct msm_drm_private *priv = dev->dev_private; 478 struct msm_file_private *ctx = file->driver_priv; 479 struct msm_kms *kms = priv->kms; 480 481 if (kms) 482 kms->funcs->preclose(kms, file); 483 484 mutex_lock(&dev->struct_mutex); 485 if (ctx == priv->lastctx) 486 priv->lastctx = NULL; 487 mutex_unlock(&dev->struct_mutex); 488 489 kfree(ctx); 490 } 491 492 static void msm_lastclose(struct drm_device *dev) 493 { 494 #ifdef CONFIG_DRM_MSM_FBDEV 495 struct msm_drm_private *priv = dev->dev_private; 496 if (priv->fbdev) 497 drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev); 498 #endif 499 } 500 501 static irqreturn_t msm_irq(int irq, void *arg) 502 { 503 struct drm_device *dev = arg; 504 struct msm_drm_private *priv = dev->dev_private; 505 struct msm_kms *kms = priv->kms; 506 BUG_ON(!kms); 507 return kms->funcs->irq(kms); 508 } 509 510 static void msm_irq_preinstall(struct drm_device *dev) 511 { 512 struct msm_drm_private *priv = dev->dev_private; 513 struct msm_kms *kms = priv->kms; 514 BUG_ON(!kms); 515 kms->funcs->irq_preinstall(kms); 516 } 517 518 static int msm_irq_postinstall(struct drm_device *dev) 519 { 520 struct msm_drm_private *priv = dev->dev_private; 521 struct msm_kms *kms = priv->kms; 522 BUG_ON(!kms); 523 return kms->funcs->irq_postinstall(kms); 524 } 525 526 static void msm_irq_uninstall(struct drm_device *dev) 527 { 528 struct msm_drm_private *priv = dev->dev_private; 529 struct msm_kms *kms = priv->kms; 530 BUG_ON(!kms); 531 kms->funcs->irq_uninstall(kms); 532 } 533 534 static int msm_enable_vblank(struct drm_device *dev, int crtc_id) 535 { 536 struct msm_drm_private *priv = dev->dev_private; 537 struct msm_kms *kms = priv->kms; 538 if (!kms) 539 return -ENXIO; 540 DBG("dev=%p, crtc=%d", dev, crtc_id); 541 return vblank_ctrl_queue_work(priv, crtc_id, true); 542 } 543 544 static void msm_disable_vblank(struct drm_device *dev, int crtc_id) 545 { 546 struct msm_drm_private *priv = dev->dev_private; 547 struct msm_kms *kms = priv->kms; 548 if (!kms) 549 return; 550 DBG("dev=%p, crtc=%d", dev, crtc_id); 551 vblank_ctrl_queue_work(priv, crtc_id, false); 552 } 553 554 /* 555 * DRM debugfs: 556 */ 557 558 #ifdef CONFIG_DEBUG_FS 559 static int msm_gpu_show(struct drm_device *dev, struct seq_file *m) 560 { 561 struct msm_drm_private *priv = dev->dev_private; 562 struct msm_gpu *gpu = priv->gpu; 563 564 if (gpu) { 565 seq_printf(m, "%s Status:\n", gpu->name); 566 gpu->funcs->show(gpu, m); 567 } 568 569 return 0; 570 } 571 572 static int msm_gem_show(struct drm_device *dev, struct seq_file *m) 573 { 574 struct msm_drm_private *priv = dev->dev_private; 575 struct msm_gpu *gpu = priv->gpu; 576 577 if (gpu) { 578 seq_printf(m, "Active Objects (%s):\n", gpu->name); 579 msm_gem_describe_objects(&gpu->active_list, m); 580 } 581 582 seq_printf(m, "Inactive Objects:\n"); 583 msm_gem_describe_objects(&priv->inactive_list, m); 584 585 return 0; 586 } 587 588 static int msm_mm_show(struct drm_device *dev, struct seq_file *m) 589 { 590 return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm); 591 } 592 593 static int msm_fb_show(struct drm_device *dev, struct seq_file *m) 594 { 595 struct msm_drm_private *priv = dev->dev_private; 596 struct drm_framebuffer *fb, *fbdev_fb = NULL; 597 598 if (priv->fbdev) { 599 seq_printf(m, "fbcon "); 600 fbdev_fb = priv->fbdev->fb; 601 msm_framebuffer_describe(fbdev_fb, m); 602 } 603 604 mutex_lock(&dev->mode_config.fb_lock); 605 list_for_each_entry(fb, &dev->mode_config.fb_list, head) { 606 if (fb == fbdev_fb) 607 continue; 608 609 seq_printf(m, "user "); 610 msm_framebuffer_describe(fb, m); 611 } 612 mutex_unlock(&dev->mode_config.fb_lock); 613 614 return 0; 615 } 616 617 static int show_locked(struct seq_file *m, void *arg) 618 { 619 struct drm_info_node *node = (struct drm_info_node *) m->private; 620 struct drm_device *dev = node->minor->dev; 621 int (*show)(struct drm_device *dev, struct seq_file *m) = 622 node->info_ent->data; 623 int ret; 624 625 ret = mutex_lock_interruptible(&dev->struct_mutex); 626 if (ret) 627 return ret; 628 629 ret = show(dev, m); 630 631 mutex_unlock(&dev->struct_mutex); 632 633 return ret; 634 } 635 636 static struct drm_info_list msm_debugfs_list[] = { 637 {"gpu", show_locked, 0, msm_gpu_show}, 638 {"gem", show_locked, 0, msm_gem_show}, 639 { "mm", show_locked, 0, msm_mm_show }, 640 { "fb", show_locked, 0, msm_fb_show }, 641 }; 642 643 static int late_init_minor(struct drm_minor *minor) 644 { 645 int ret; 646 647 if (!minor) 648 return 0; 649 650 ret = msm_rd_debugfs_init(minor); 651 if (ret) { 652 dev_err(minor->dev->dev, "could not install rd debugfs\n"); 653 return ret; 654 } 655 656 ret = msm_perf_debugfs_init(minor); 657 if (ret) { 658 dev_err(minor->dev->dev, "could not install perf debugfs\n"); 659 return ret; 660 } 661 662 return 0; 663 } 664 665 int msm_debugfs_late_init(struct drm_device *dev) 666 { 667 int ret; 668 ret = late_init_minor(dev->primary); 669 if (ret) 670 return ret; 671 ret = late_init_minor(dev->render); 672 if (ret) 673 return ret; 674 ret = late_init_minor(dev->control); 675 return ret; 676 } 677 678 static int msm_debugfs_init(struct drm_minor *minor) 679 { 680 struct drm_device *dev = minor->dev; 681 int ret; 682 683 ret = drm_debugfs_create_files(msm_debugfs_list, 684 ARRAY_SIZE(msm_debugfs_list), 685 minor->debugfs_root, minor); 686 687 if (ret) { 688 dev_err(dev->dev, "could not install msm_debugfs_list\n"); 689 return ret; 690 } 691 692 return 0; 693 } 694 695 static void msm_debugfs_cleanup(struct drm_minor *minor) 696 { 697 drm_debugfs_remove_files(msm_debugfs_list, 698 ARRAY_SIZE(msm_debugfs_list), minor); 699 if (!minor->dev->dev_private) 700 return; 701 msm_rd_debugfs_cleanup(minor); 702 msm_perf_debugfs_cleanup(minor); 703 } 704 #endif 705 706 /* 707 * Fences: 708 */ 709 710 int msm_wait_fence(struct drm_device *dev, uint32_t fence, 711 ktime_t *timeout , bool interruptible) 712 { 713 struct msm_drm_private *priv = dev->dev_private; 714 int ret; 715 716 if (!priv->gpu) 717 return 0; 718 719 if (fence > priv->gpu->submitted_fence) { 720 DRM_ERROR("waiting on invalid fence: %u (of %u)\n", 721 fence, priv->gpu->submitted_fence); 722 return -EINVAL; 723 } 724 725 if (!timeout) { 726 /* no-wait: */ 727 ret = fence_completed(dev, fence) ? 0 : -EBUSY; 728 } else { 729 ktime_t now = ktime_get(); 730 unsigned long remaining_jiffies; 731 732 if (ktime_compare(*timeout, now) < 0) { 733 remaining_jiffies = 0; 734 } else { 735 ktime_t rem = ktime_sub(*timeout, now); 736 struct timespec ts = ktime_to_timespec(rem); 737 remaining_jiffies = timespec_to_jiffies(&ts); 738 } 739 740 if (interruptible) 741 ret = wait_event_interruptible_timeout(priv->fence_event, 742 fence_completed(dev, fence), 743 remaining_jiffies); 744 else 745 ret = wait_event_timeout(priv->fence_event, 746 fence_completed(dev, fence), 747 remaining_jiffies); 748 749 if (ret == 0) { 750 DBG("timeout waiting for fence: %u (completed: %u)", 751 fence, priv->completed_fence); 752 ret = -ETIMEDOUT; 753 } else if (ret != -ERESTARTSYS) { 754 ret = 0; 755 } 756 } 757 758 return ret; 759 } 760 761 int msm_queue_fence_cb(struct drm_device *dev, 762 struct msm_fence_cb *cb, uint32_t fence) 763 { 764 struct msm_drm_private *priv = dev->dev_private; 765 int ret = 0; 766 767 mutex_lock(&dev->struct_mutex); 768 if (!list_empty(&cb->work.entry)) { 769 ret = -EINVAL; 770 } else if (fence > priv->completed_fence) { 771 cb->fence = fence; 772 list_add_tail(&cb->work.entry, &priv->fence_cbs); 773 } else { 774 queue_work(priv->wq, &cb->work); 775 } 776 mutex_unlock(&dev->struct_mutex); 777 778 return ret; 779 } 780 781 /* called from workqueue */ 782 void msm_update_fence(struct drm_device *dev, uint32_t fence) 783 { 784 struct msm_drm_private *priv = dev->dev_private; 785 786 mutex_lock(&dev->struct_mutex); 787 priv->completed_fence = max(fence, priv->completed_fence); 788 789 while (!list_empty(&priv->fence_cbs)) { 790 struct msm_fence_cb *cb; 791 792 cb = list_first_entry(&priv->fence_cbs, 793 struct msm_fence_cb, work.entry); 794 795 if (cb->fence > priv->completed_fence) 796 break; 797 798 list_del_init(&cb->work.entry); 799 queue_work(priv->wq, &cb->work); 800 } 801 802 mutex_unlock(&dev->struct_mutex); 803 804 wake_up_all(&priv->fence_event); 805 } 806 807 void __msm_fence_worker(struct work_struct *work) 808 { 809 struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work); 810 cb->func(cb); 811 } 812 813 /* 814 * DRM ioctls: 815 */ 816 817 static int msm_ioctl_get_param(struct drm_device *dev, void *data, 818 struct drm_file *file) 819 { 820 struct msm_drm_private *priv = dev->dev_private; 821 struct drm_msm_param *args = data; 822 struct msm_gpu *gpu; 823 824 /* for now, we just have 3d pipe.. eventually this would need to 825 * be more clever to dispatch to appropriate gpu module: 826 */ 827 if (args->pipe != MSM_PIPE_3D0) 828 return -EINVAL; 829 830 gpu = priv->gpu; 831 832 if (!gpu) 833 return -ENXIO; 834 835 return gpu->funcs->get_param(gpu, args->param, &args->value); 836 } 837 838 static int msm_ioctl_gem_new(struct drm_device *dev, void *data, 839 struct drm_file *file) 840 { 841 struct drm_msm_gem_new *args = data; 842 843 if (args->flags & ~MSM_BO_FLAGS) { 844 DRM_ERROR("invalid flags: %08x\n", args->flags); 845 return -EINVAL; 846 } 847 848 return msm_gem_new_handle(dev, file, args->size, 849 args->flags, &args->handle); 850 } 851 852 static inline ktime_t to_ktime(struct drm_msm_timespec timeout) 853 { 854 return ktime_set(timeout.tv_sec, timeout.tv_nsec); 855 } 856 857 static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data, 858 struct drm_file *file) 859 { 860 struct drm_msm_gem_cpu_prep *args = data; 861 struct drm_gem_object *obj; 862 ktime_t timeout = to_ktime(args->timeout); 863 int ret; 864 865 if (args->op & ~MSM_PREP_FLAGS) { 866 DRM_ERROR("invalid op: %08x\n", args->op); 867 return -EINVAL; 868 } 869 870 obj = drm_gem_object_lookup(dev, file, args->handle); 871 if (!obj) 872 return -ENOENT; 873 874 ret = msm_gem_cpu_prep(obj, args->op, &timeout); 875 876 drm_gem_object_unreference_unlocked(obj); 877 878 return ret; 879 } 880 881 static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data, 882 struct drm_file *file) 883 { 884 struct drm_msm_gem_cpu_fini *args = data; 885 struct drm_gem_object *obj; 886 int ret; 887 888 obj = drm_gem_object_lookup(dev, file, args->handle); 889 if (!obj) 890 return -ENOENT; 891 892 ret = msm_gem_cpu_fini(obj); 893 894 drm_gem_object_unreference_unlocked(obj); 895 896 return ret; 897 } 898 899 static int msm_ioctl_gem_info(struct drm_device *dev, void *data, 900 struct drm_file *file) 901 { 902 struct drm_msm_gem_info *args = data; 903 struct drm_gem_object *obj; 904 int ret = 0; 905 906 if (args->pad) 907 return -EINVAL; 908 909 obj = drm_gem_object_lookup(dev, file, args->handle); 910 if (!obj) 911 return -ENOENT; 912 913 args->offset = msm_gem_mmap_offset(obj); 914 915 drm_gem_object_unreference_unlocked(obj); 916 917 return ret; 918 } 919 920 static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, 921 struct drm_file *file) 922 { 923 struct drm_msm_wait_fence *args = data; 924 ktime_t timeout = to_ktime(args->timeout); 925 926 if (args->pad) { 927 DRM_ERROR("invalid pad: %08x\n", args->pad); 928 return -EINVAL; 929 } 930 931 return msm_wait_fence(dev, args->fence, &timeout, true); 932 } 933 934 static const struct drm_ioctl_desc msm_ioctls[] = { 935 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 936 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 937 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 938 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 939 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 940 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 941 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 942 }; 943 944 static const struct vm_operations_struct vm_ops = { 945 .fault = msm_gem_fault, 946 .open = drm_gem_vm_open, 947 .close = drm_gem_vm_close, 948 }; 949 950 static const struct file_operations fops = { 951 .owner = THIS_MODULE, 952 .open = drm_open, 953 .release = drm_release, 954 .unlocked_ioctl = drm_ioctl, 955 #ifdef CONFIG_COMPAT 956 .compat_ioctl = drm_compat_ioctl, 957 #endif 958 .poll = drm_poll, 959 .read = drm_read, 960 .llseek = no_llseek, 961 .mmap = msm_gem_mmap, 962 }; 963 964 static struct drm_driver msm_driver = { 965 .driver_features = DRIVER_HAVE_IRQ | 966 DRIVER_GEM | 967 DRIVER_PRIME | 968 DRIVER_RENDER | 969 DRIVER_ATOMIC | 970 DRIVER_MODESET, 971 .load = msm_load, 972 .unload = msm_unload, 973 .open = msm_open, 974 .preclose = msm_preclose, 975 .lastclose = msm_lastclose, 976 .set_busid = drm_platform_set_busid, 977 .irq_handler = msm_irq, 978 .irq_preinstall = msm_irq_preinstall, 979 .irq_postinstall = msm_irq_postinstall, 980 .irq_uninstall = msm_irq_uninstall, 981 .get_vblank_counter = drm_vblank_count, 982 .enable_vblank = msm_enable_vblank, 983 .disable_vblank = msm_disable_vblank, 984 .gem_free_object = msm_gem_free_object, 985 .gem_vm_ops = &vm_ops, 986 .dumb_create = msm_gem_dumb_create, 987 .dumb_map_offset = msm_gem_dumb_map_offset, 988 .dumb_destroy = drm_gem_dumb_destroy, 989 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 990 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 991 .gem_prime_export = drm_gem_prime_export, 992 .gem_prime_import = drm_gem_prime_import, 993 .gem_prime_pin = msm_gem_prime_pin, 994 .gem_prime_unpin = msm_gem_prime_unpin, 995 .gem_prime_get_sg_table = msm_gem_prime_get_sg_table, 996 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table, 997 .gem_prime_vmap = msm_gem_prime_vmap, 998 .gem_prime_vunmap = msm_gem_prime_vunmap, 999 .gem_prime_mmap = msm_gem_prime_mmap, 1000 #ifdef CONFIG_DEBUG_FS 1001 .debugfs_init = msm_debugfs_init, 1002 .debugfs_cleanup = msm_debugfs_cleanup, 1003 #endif 1004 .ioctls = msm_ioctls, 1005 .num_ioctls = DRM_MSM_NUM_IOCTLS, 1006 .fops = &fops, 1007 .name = "msm", 1008 .desc = "MSM Snapdragon DRM", 1009 .date = "20130625", 1010 .major = 1, 1011 .minor = 0, 1012 }; 1013 1014 #ifdef CONFIG_PM_SLEEP 1015 static int msm_pm_suspend(struct device *dev) 1016 { 1017 struct drm_device *ddev = dev_get_drvdata(dev); 1018 1019 drm_kms_helper_poll_disable(ddev); 1020 1021 return 0; 1022 } 1023 1024 static int msm_pm_resume(struct device *dev) 1025 { 1026 struct drm_device *ddev = dev_get_drvdata(dev); 1027 1028 drm_kms_helper_poll_enable(ddev); 1029 1030 return 0; 1031 } 1032 #endif 1033 1034 static const struct dev_pm_ops msm_pm_ops = { 1035 SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume) 1036 }; 1037 1038 /* 1039 * Componentized driver support: 1040 */ 1041 1042 #ifdef CONFIG_OF 1043 /* NOTE: the CONFIG_OF case duplicates the same code as exynos or imx 1044 * (or probably any other).. so probably some room for some helpers 1045 */ 1046 static int compare_of(struct device *dev, void *data) 1047 { 1048 return dev->of_node == data; 1049 } 1050 1051 static int add_components(struct device *dev, struct component_match **matchptr, 1052 const char *name) 1053 { 1054 struct device_node *np = dev->of_node; 1055 unsigned i; 1056 1057 for (i = 0; ; i++) { 1058 struct device_node *node; 1059 1060 node = of_parse_phandle(np, name, i); 1061 if (!node) 1062 break; 1063 1064 component_match_add(dev, matchptr, compare_of, node); 1065 } 1066 1067 return 0; 1068 } 1069 #else 1070 static int compare_dev(struct device *dev, void *data) 1071 { 1072 return dev == data; 1073 } 1074 #endif 1075 1076 static int msm_drm_bind(struct device *dev) 1077 { 1078 return drm_platform_init(&msm_driver, to_platform_device(dev)); 1079 } 1080 1081 static void msm_drm_unbind(struct device *dev) 1082 { 1083 drm_put_dev(platform_get_drvdata(to_platform_device(dev))); 1084 } 1085 1086 static const struct component_master_ops msm_drm_ops = { 1087 .bind = msm_drm_bind, 1088 .unbind = msm_drm_unbind, 1089 }; 1090 1091 /* 1092 * Platform driver: 1093 */ 1094 1095 static int msm_pdev_probe(struct platform_device *pdev) 1096 { 1097 struct component_match *match = NULL; 1098 #ifdef CONFIG_OF 1099 add_components(&pdev->dev, &match, "connectors"); 1100 add_components(&pdev->dev, &match, "gpus"); 1101 #else 1102 /* For non-DT case, it kinda sucks. We don't actually have a way 1103 * to know whether or not we are waiting for certain devices (or if 1104 * they are simply not present). But for non-DT we only need to 1105 * care about apq8064/apq8060/etc (all mdp4/a3xx): 1106 */ 1107 static const char *devnames[] = { 1108 "hdmi_msm.0", "kgsl-3d0.0", 1109 }; 1110 int i; 1111 1112 DBG("Adding components.."); 1113 1114 for (i = 0; i < ARRAY_SIZE(devnames); i++) { 1115 struct device *dev; 1116 1117 dev = bus_find_device_by_name(&platform_bus_type, 1118 NULL, devnames[i]); 1119 if (!dev) { 1120 dev_info(&pdev->dev, "still waiting for %s\n", devnames[i]); 1121 return -EPROBE_DEFER; 1122 } 1123 1124 component_match_add(&pdev->dev, &match, compare_dev, dev); 1125 } 1126 #endif 1127 1128 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 1129 return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match); 1130 } 1131 1132 static int msm_pdev_remove(struct platform_device *pdev) 1133 { 1134 component_master_del(&pdev->dev, &msm_drm_ops); 1135 1136 return 0; 1137 } 1138 1139 static const struct platform_device_id msm_id[] = { 1140 { "mdp", 0 }, 1141 { } 1142 }; 1143 1144 static const struct of_device_id dt_match[] = { 1145 { .compatible = "qcom,mdp" }, /* mdp4 */ 1146 { .compatible = "qcom,mdss_mdp" }, /* mdp5 */ 1147 {} 1148 }; 1149 MODULE_DEVICE_TABLE(of, dt_match); 1150 1151 static struct platform_driver msm_platform_driver = { 1152 .probe = msm_pdev_probe, 1153 .remove = msm_pdev_remove, 1154 .driver = { 1155 .name = "msm", 1156 .of_match_table = dt_match, 1157 .pm = &msm_pm_ops, 1158 }, 1159 .id_table = msm_id, 1160 }; 1161 1162 static int __init msm_drm_register(void) 1163 { 1164 DBG("init"); 1165 msm_dsi_register(); 1166 msm_edp_register(); 1167 hdmi_register(); 1168 adreno_register(); 1169 return platform_driver_register(&msm_platform_driver); 1170 } 1171 1172 static void __exit msm_drm_unregister(void) 1173 { 1174 DBG("fini"); 1175 platform_driver_unregister(&msm_platform_driver); 1176 hdmi_unregister(); 1177 adreno_unregister(); 1178 msm_edp_unregister(); 1179 msm_dsi_unregister(); 1180 } 1181 1182 module_init(msm_drm_register); 1183 module_exit(msm_drm_unregister); 1184 1185 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com"); 1186 MODULE_DESCRIPTION("MSM DRM Driver"); 1187 MODULE_LICENSE("GPL"); 1188