1 /* 2 * Copyright (C) 2013 Red Hat 3 * Author: Rob Clark <robdclark@gmail.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include "msm_drv.h" 19 #include "msm_gpu.h" 20 #include "msm_kms.h" 21 22 static void msm_fb_output_poll_changed(struct drm_device *dev) 23 { 24 struct msm_drm_private *priv = dev->dev_private; 25 if (priv->fbdev) 26 drm_fb_helper_hotplug_event(priv->fbdev); 27 } 28 29 static const struct drm_mode_config_funcs mode_config_funcs = { 30 .fb_create = msm_framebuffer_create, 31 .output_poll_changed = msm_fb_output_poll_changed, 32 .atomic_check = drm_atomic_helper_check, 33 .atomic_commit = msm_atomic_commit, 34 }; 35 36 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu) 37 { 38 struct msm_drm_private *priv = dev->dev_private; 39 int idx = priv->num_mmus++; 40 41 if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus))) 42 return -EINVAL; 43 44 priv->mmus[idx] = mmu; 45 46 return idx; 47 } 48 49 #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING 50 static bool reglog = false; 51 MODULE_PARM_DESC(reglog, "Enable register read/write logging"); 52 module_param(reglog, bool, 0600); 53 #else 54 #define reglog 0 55 #endif 56 57 static char *vram = "16m"; 58 MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU"); 59 module_param(vram, charp, 0); 60 61 /* 62 * Util/helpers: 63 */ 64 65 void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, 66 const char *dbgname) 67 { 68 struct resource *res; 69 unsigned long size; 70 void __iomem *ptr; 71 72 if (name) 73 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); 74 else 75 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 76 77 if (!res) { 78 dev_err(&pdev->dev, "failed to get memory resource: %s\n", name); 79 return ERR_PTR(-EINVAL); 80 } 81 82 size = resource_size(res); 83 84 ptr = devm_ioremap_nocache(&pdev->dev, res->start, size); 85 if (!ptr) { 86 dev_err(&pdev->dev, "failed to ioremap: %s\n", name); 87 return ERR_PTR(-ENOMEM); 88 } 89 90 if (reglog) 91 printk(KERN_DEBUG "IO:region %s %08x %08lx\n", dbgname, (u32)ptr, size); 92 93 return ptr; 94 } 95 96 void msm_writel(u32 data, void __iomem *addr) 97 { 98 if (reglog) 99 printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data); 100 writel(data, addr); 101 } 102 103 u32 msm_readl(const void __iomem *addr) 104 { 105 u32 val = readl(addr); 106 if (reglog) 107 printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val); 108 return val; 109 } 110 111 /* 112 * DRM operations: 113 */ 114 115 static int msm_unload(struct drm_device *dev) 116 { 117 struct msm_drm_private *priv = dev->dev_private; 118 struct msm_kms *kms = priv->kms; 119 struct msm_gpu *gpu = priv->gpu; 120 121 drm_kms_helper_poll_fini(dev); 122 drm_mode_config_cleanup(dev); 123 drm_vblank_cleanup(dev); 124 125 pm_runtime_get_sync(dev->dev); 126 drm_irq_uninstall(dev); 127 pm_runtime_put_sync(dev->dev); 128 129 flush_workqueue(priv->wq); 130 destroy_workqueue(priv->wq); 131 132 if (kms) { 133 pm_runtime_disable(dev->dev); 134 kms->funcs->destroy(kms); 135 } 136 137 if (gpu) { 138 mutex_lock(&dev->struct_mutex); 139 gpu->funcs->pm_suspend(gpu); 140 gpu->funcs->destroy(gpu); 141 mutex_unlock(&dev->struct_mutex); 142 } 143 144 if (priv->vram.paddr) { 145 DEFINE_DMA_ATTRS(attrs); 146 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); 147 drm_mm_takedown(&priv->vram.mm); 148 dma_free_attrs(dev->dev, priv->vram.size, NULL, 149 priv->vram.paddr, &attrs); 150 } 151 152 component_unbind_all(dev->dev, dev); 153 154 dev->dev_private = NULL; 155 156 kfree(priv); 157 158 return 0; 159 } 160 161 static int get_mdp_ver(struct platform_device *pdev) 162 { 163 #ifdef CONFIG_OF 164 static const struct of_device_id match_types[] = { { 165 .compatible = "qcom,mdss_mdp", 166 .data = (void *)5, 167 }, { 168 /* end node */ 169 } }; 170 struct device *dev = &pdev->dev; 171 const struct of_device_id *match; 172 match = of_match_node(match_types, dev->of_node); 173 if (match) 174 return (int)match->data; 175 #endif 176 return 4; 177 } 178 179 static int msm_load(struct drm_device *dev, unsigned long flags) 180 { 181 struct platform_device *pdev = dev->platformdev; 182 struct msm_drm_private *priv; 183 struct msm_kms *kms; 184 int ret; 185 186 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 187 if (!priv) { 188 dev_err(dev->dev, "failed to allocate private data\n"); 189 return -ENOMEM; 190 } 191 192 dev->dev_private = priv; 193 194 priv->wq = alloc_ordered_workqueue("msm", 0); 195 init_waitqueue_head(&priv->fence_event); 196 init_waitqueue_head(&priv->pending_crtcs_event); 197 198 INIT_LIST_HEAD(&priv->inactive_list); 199 INIT_LIST_HEAD(&priv->fence_cbs); 200 201 drm_mode_config_init(dev); 202 203 /* if we have no IOMMU, then we need to use carveout allocator. 204 * Grab the entire CMA chunk carved out in early startup in 205 * mach-msm: 206 */ 207 if (!iommu_present(&platform_bus_type)) { 208 DEFINE_DMA_ATTRS(attrs); 209 unsigned long size; 210 void *p; 211 212 DBG("using %s VRAM carveout", vram); 213 size = memparse(vram, NULL); 214 priv->vram.size = size; 215 216 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); 217 218 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); 219 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 220 221 /* note that for no-kernel-mapping, the vaddr returned 222 * is bogus, but non-null if allocation succeeded: 223 */ 224 p = dma_alloc_attrs(dev->dev, size, 225 &priv->vram.paddr, GFP_KERNEL, &attrs); 226 if (!p) { 227 dev_err(dev->dev, "failed to allocate VRAM\n"); 228 priv->vram.paddr = 0; 229 ret = -ENOMEM; 230 goto fail; 231 } 232 233 dev_info(dev->dev, "VRAM: %08x->%08x\n", 234 (uint32_t)priv->vram.paddr, 235 (uint32_t)(priv->vram.paddr + size)); 236 } 237 238 platform_set_drvdata(pdev, dev); 239 240 /* Bind all our sub-components: */ 241 ret = component_bind_all(dev->dev, dev); 242 if (ret) 243 return ret; 244 245 switch (get_mdp_ver(pdev)) { 246 case 4: 247 kms = mdp4_kms_init(dev); 248 break; 249 case 5: 250 kms = mdp5_kms_init(dev); 251 break; 252 default: 253 kms = ERR_PTR(-ENODEV); 254 break; 255 } 256 257 if (IS_ERR(kms)) { 258 /* 259 * NOTE: once we have GPU support, having no kms should not 260 * be considered fatal.. ideally we would still support gpu 261 * and (for example) use dmabuf/prime to share buffers with 262 * imx drm driver on iMX5 263 */ 264 dev_err(dev->dev, "failed to load kms\n"); 265 ret = PTR_ERR(kms); 266 goto fail; 267 } 268 269 priv->kms = kms; 270 271 if (kms) { 272 pm_runtime_enable(dev->dev); 273 ret = kms->funcs->hw_init(kms); 274 if (ret) { 275 dev_err(dev->dev, "kms hw init failed: %d\n", ret); 276 goto fail; 277 } 278 } 279 280 dev->mode_config.min_width = 0; 281 dev->mode_config.min_height = 0; 282 dev->mode_config.max_width = 2048; 283 dev->mode_config.max_height = 2048; 284 dev->mode_config.funcs = &mode_config_funcs; 285 286 ret = drm_vblank_init(dev, priv->num_crtcs); 287 if (ret < 0) { 288 dev_err(dev->dev, "failed to initialize vblank\n"); 289 goto fail; 290 } 291 292 pm_runtime_get_sync(dev->dev); 293 ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0)); 294 pm_runtime_put_sync(dev->dev); 295 if (ret < 0) { 296 dev_err(dev->dev, "failed to install IRQ handler\n"); 297 goto fail; 298 } 299 300 drm_mode_config_reset(dev); 301 302 #ifdef CONFIG_DRM_MSM_FBDEV 303 priv->fbdev = msm_fbdev_init(dev); 304 #endif 305 306 ret = msm_debugfs_late_init(dev); 307 if (ret) 308 goto fail; 309 310 drm_kms_helper_poll_init(dev); 311 312 return 0; 313 314 fail: 315 msm_unload(dev); 316 return ret; 317 } 318 319 static void load_gpu(struct drm_device *dev) 320 { 321 static DEFINE_MUTEX(init_lock); 322 struct msm_drm_private *priv = dev->dev_private; 323 324 mutex_lock(&init_lock); 325 326 if (!priv->gpu) 327 priv->gpu = adreno_load_gpu(dev); 328 329 mutex_unlock(&init_lock); 330 } 331 332 static int msm_open(struct drm_device *dev, struct drm_file *file) 333 { 334 struct msm_file_private *ctx; 335 336 /* For now, load gpu on open.. to avoid the requirement of having 337 * firmware in the initrd. 338 */ 339 load_gpu(dev); 340 341 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 342 if (!ctx) 343 return -ENOMEM; 344 345 file->driver_priv = ctx; 346 347 return 0; 348 } 349 350 static void msm_preclose(struct drm_device *dev, struct drm_file *file) 351 { 352 struct msm_drm_private *priv = dev->dev_private; 353 struct msm_file_private *ctx = file->driver_priv; 354 struct msm_kms *kms = priv->kms; 355 356 if (kms) 357 kms->funcs->preclose(kms, file); 358 359 mutex_lock(&dev->struct_mutex); 360 if (ctx == priv->lastctx) 361 priv->lastctx = NULL; 362 mutex_unlock(&dev->struct_mutex); 363 364 kfree(ctx); 365 } 366 367 static void msm_lastclose(struct drm_device *dev) 368 { 369 struct msm_drm_private *priv = dev->dev_private; 370 if (priv->fbdev) 371 drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev); 372 } 373 374 static irqreturn_t msm_irq(int irq, void *arg) 375 { 376 struct drm_device *dev = arg; 377 struct msm_drm_private *priv = dev->dev_private; 378 struct msm_kms *kms = priv->kms; 379 BUG_ON(!kms); 380 return kms->funcs->irq(kms); 381 } 382 383 static void msm_irq_preinstall(struct drm_device *dev) 384 { 385 struct msm_drm_private *priv = dev->dev_private; 386 struct msm_kms *kms = priv->kms; 387 BUG_ON(!kms); 388 kms->funcs->irq_preinstall(kms); 389 } 390 391 static int msm_irq_postinstall(struct drm_device *dev) 392 { 393 struct msm_drm_private *priv = dev->dev_private; 394 struct msm_kms *kms = priv->kms; 395 BUG_ON(!kms); 396 return kms->funcs->irq_postinstall(kms); 397 } 398 399 static void msm_irq_uninstall(struct drm_device *dev) 400 { 401 struct msm_drm_private *priv = dev->dev_private; 402 struct msm_kms *kms = priv->kms; 403 BUG_ON(!kms); 404 kms->funcs->irq_uninstall(kms); 405 } 406 407 static int msm_enable_vblank(struct drm_device *dev, int crtc_id) 408 { 409 struct msm_drm_private *priv = dev->dev_private; 410 struct msm_kms *kms = priv->kms; 411 if (!kms) 412 return -ENXIO; 413 DBG("dev=%p, crtc=%d", dev, crtc_id); 414 return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]); 415 } 416 417 static void msm_disable_vblank(struct drm_device *dev, int crtc_id) 418 { 419 struct msm_drm_private *priv = dev->dev_private; 420 struct msm_kms *kms = priv->kms; 421 if (!kms) 422 return; 423 DBG("dev=%p, crtc=%d", dev, crtc_id); 424 kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]); 425 } 426 427 /* 428 * DRM debugfs: 429 */ 430 431 #ifdef CONFIG_DEBUG_FS 432 static int msm_gpu_show(struct drm_device *dev, struct seq_file *m) 433 { 434 struct msm_drm_private *priv = dev->dev_private; 435 struct msm_gpu *gpu = priv->gpu; 436 437 if (gpu) { 438 seq_printf(m, "%s Status:\n", gpu->name); 439 gpu->funcs->show(gpu, m); 440 } 441 442 return 0; 443 } 444 445 static int msm_gem_show(struct drm_device *dev, struct seq_file *m) 446 { 447 struct msm_drm_private *priv = dev->dev_private; 448 struct msm_gpu *gpu = priv->gpu; 449 450 if (gpu) { 451 seq_printf(m, "Active Objects (%s):\n", gpu->name); 452 msm_gem_describe_objects(&gpu->active_list, m); 453 } 454 455 seq_printf(m, "Inactive Objects:\n"); 456 msm_gem_describe_objects(&priv->inactive_list, m); 457 458 return 0; 459 } 460 461 static int msm_mm_show(struct drm_device *dev, struct seq_file *m) 462 { 463 return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm); 464 } 465 466 static int msm_fb_show(struct drm_device *dev, struct seq_file *m) 467 { 468 struct msm_drm_private *priv = dev->dev_private; 469 struct drm_framebuffer *fb, *fbdev_fb = NULL; 470 471 if (priv->fbdev) { 472 seq_printf(m, "fbcon "); 473 fbdev_fb = priv->fbdev->fb; 474 msm_framebuffer_describe(fbdev_fb, m); 475 } 476 477 mutex_lock(&dev->mode_config.fb_lock); 478 list_for_each_entry(fb, &dev->mode_config.fb_list, head) { 479 if (fb == fbdev_fb) 480 continue; 481 482 seq_printf(m, "user "); 483 msm_framebuffer_describe(fb, m); 484 } 485 mutex_unlock(&dev->mode_config.fb_lock); 486 487 return 0; 488 } 489 490 static int show_locked(struct seq_file *m, void *arg) 491 { 492 struct drm_info_node *node = (struct drm_info_node *) m->private; 493 struct drm_device *dev = node->minor->dev; 494 int (*show)(struct drm_device *dev, struct seq_file *m) = 495 node->info_ent->data; 496 int ret; 497 498 ret = mutex_lock_interruptible(&dev->struct_mutex); 499 if (ret) 500 return ret; 501 502 ret = show(dev, m); 503 504 mutex_unlock(&dev->struct_mutex); 505 506 return ret; 507 } 508 509 static struct drm_info_list msm_debugfs_list[] = { 510 {"gpu", show_locked, 0, msm_gpu_show}, 511 {"gem", show_locked, 0, msm_gem_show}, 512 { "mm", show_locked, 0, msm_mm_show }, 513 { "fb", show_locked, 0, msm_fb_show }, 514 }; 515 516 static int late_init_minor(struct drm_minor *minor) 517 { 518 int ret; 519 520 if (!minor) 521 return 0; 522 523 ret = msm_rd_debugfs_init(minor); 524 if (ret) { 525 dev_err(minor->dev->dev, "could not install rd debugfs\n"); 526 return ret; 527 } 528 529 ret = msm_perf_debugfs_init(minor); 530 if (ret) { 531 dev_err(minor->dev->dev, "could not install perf debugfs\n"); 532 return ret; 533 } 534 535 return 0; 536 } 537 538 int msm_debugfs_late_init(struct drm_device *dev) 539 { 540 int ret; 541 ret = late_init_minor(dev->primary); 542 if (ret) 543 return ret; 544 ret = late_init_minor(dev->render); 545 if (ret) 546 return ret; 547 ret = late_init_minor(dev->control); 548 return ret; 549 } 550 551 static int msm_debugfs_init(struct drm_minor *minor) 552 { 553 struct drm_device *dev = minor->dev; 554 int ret; 555 556 ret = drm_debugfs_create_files(msm_debugfs_list, 557 ARRAY_SIZE(msm_debugfs_list), 558 minor->debugfs_root, minor); 559 560 if (ret) { 561 dev_err(dev->dev, "could not install msm_debugfs_list\n"); 562 return ret; 563 } 564 565 return 0; 566 } 567 568 static void msm_debugfs_cleanup(struct drm_minor *minor) 569 { 570 drm_debugfs_remove_files(msm_debugfs_list, 571 ARRAY_SIZE(msm_debugfs_list), minor); 572 if (!minor->dev->dev_private) 573 return; 574 msm_rd_debugfs_cleanup(minor); 575 msm_perf_debugfs_cleanup(minor); 576 } 577 #endif 578 579 /* 580 * Fences: 581 */ 582 583 int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, 584 struct timespec *timeout) 585 { 586 struct msm_drm_private *priv = dev->dev_private; 587 int ret; 588 589 if (!priv->gpu) 590 return 0; 591 592 if (fence > priv->gpu->submitted_fence) { 593 DRM_ERROR("waiting on invalid fence: %u (of %u)\n", 594 fence, priv->gpu->submitted_fence); 595 return -EINVAL; 596 } 597 598 if (!timeout) { 599 /* no-wait: */ 600 ret = fence_completed(dev, fence) ? 0 : -EBUSY; 601 } else { 602 unsigned long timeout_jiffies = timespec_to_jiffies(timeout); 603 unsigned long start_jiffies = jiffies; 604 unsigned long remaining_jiffies; 605 606 if (time_after(start_jiffies, timeout_jiffies)) 607 remaining_jiffies = 0; 608 else 609 remaining_jiffies = timeout_jiffies - start_jiffies; 610 611 ret = wait_event_interruptible_timeout(priv->fence_event, 612 fence_completed(dev, fence), 613 remaining_jiffies); 614 615 if (ret == 0) { 616 DBG("timeout waiting for fence: %u (completed: %u)", 617 fence, priv->completed_fence); 618 ret = -ETIMEDOUT; 619 } else if (ret != -ERESTARTSYS) { 620 ret = 0; 621 } 622 } 623 624 return ret; 625 } 626 627 int msm_queue_fence_cb(struct drm_device *dev, 628 struct msm_fence_cb *cb, uint32_t fence) 629 { 630 struct msm_drm_private *priv = dev->dev_private; 631 int ret = 0; 632 633 mutex_lock(&dev->struct_mutex); 634 if (!list_empty(&cb->work.entry)) { 635 ret = -EINVAL; 636 } else if (fence > priv->completed_fence) { 637 cb->fence = fence; 638 list_add_tail(&cb->work.entry, &priv->fence_cbs); 639 } else { 640 queue_work(priv->wq, &cb->work); 641 } 642 mutex_unlock(&dev->struct_mutex); 643 644 return ret; 645 } 646 647 /* called from workqueue */ 648 void msm_update_fence(struct drm_device *dev, uint32_t fence) 649 { 650 struct msm_drm_private *priv = dev->dev_private; 651 652 mutex_lock(&dev->struct_mutex); 653 priv->completed_fence = max(fence, priv->completed_fence); 654 655 while (!list_empty(&priv->fence_cbs)) { 656 struct msm_fence_cb *cb; 657 658 cb = list_first_entry(&priv->fence_cbs, 659 struct msm_fence_cb, work.entry); 660 661 if (cb->fence > priv->completed_fence) 662 break; 663 664 list_del_init(&cb->work.entry); 665 queue_work(priv->wq, &cb->work); 666 } 667 668 mutex_unlock(&dev->struct_mutex); 669 670 wake_up_all(&priv->fence_event); 671 } 672 673 void __msm_fence_worker(struct work_struct *work) 674 { 675 struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work); 676 cb->func(cb); 677 } 678 679 /* 680 * DRM ioctls: 681 */ 682 683 static int msm_ioctl_get_param(struct drm_device *dev, void *data, 684 struct drm_file *file) 685 { 686 struct msm_drm_private *priv = dev->dev_private; 687 struct drm_msm_param *args = data; 688 struct msm_gpu *gpu; 689 690 /* for now, we just have 3d pipe.. eventually this would need to 691 * be more clever to dispatch to appropriate gpu module: 692 */ 693 if (args->pipe != MSM_PIPE_3D0) 694 return -EINVAL; 695 696 gpu = priv->gpu; 697 698 if (!gpu) 699 return -ENXIO; 700 701 return gpu->funcs->get_param(gpu, args->param, &args->value); 702 } 703 704 static int msm_ioctl_gem_new(struct drm_device *dev, void *data, 705 struct drm_file *file) 706 { 707 struct drm_msm_gem_new *args = data; 708 709 if (args->flags & ~MSM_BO_FLAGS) { 710 DRM_ERROR("invalid flags: %08x\n", args->flags); 711 return -EINVAL; 712 } 713 714 return msm_gem_new_handle(dev, file, args->size, 715 args->flags, &args->handle); 716 } 717 718 #define TS(t) ((struct timespec){ .tv_sec = (t).tv_sec, .tv_nsec = (t).tv_nsec }) 719 720 static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data, 721 struct drm_file *file) 722 { 723 struct drm_msm_gem_cpu_prep *args = data; 724 struct drm_gem_object *obj; 725 int ret; 726 727 if (args->op & ~MSM_PREP_FLAGS) { 728 DRM_ERROR("invalid op: %08x\n", args->op); 729 return -EINVAL; 730 } 731 732 obj = drm_gem_object_lookup(dev, file, args->handle); 733 if (!obj) 734 return -ENOENT; 735 736 ret = msm_gem_cpu_prep(obj, args->op, &TS(args->timeout)); 737 738 drm_gem_object_unreference_unlocked(obj); 739 740 return ret; 741 } 742 743 static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data, 744 struct drm_file *file) 745 { 746 struct drm_msm_gem_cpu_fini *args = data; 747 struct drm_gem_object *obj; 748 int ret; 749 750 obj = drm_gem_object_lookup(dev, file, args->handle); 751 if (!obj) 752 return -ENOENT; 753 754 ret = msm_gem_cpu_fini(obj); 755 756 drm_gem_object_unreference_unlocked(obj); 757 758 return ret; 759 } 760 761 static int msm_ioctl_gem_info(struct drm_device *dev, void *data, 762 struct drm_file *file) 763 { 764 struct drm_msm_gem_info *args = data; 765 struct drm_gem_object *obj; 766 int ret = 0; 767 768 if (args->pad) 769 return -EINVAL; 770 771 obj = drm_gem_object_lookup(dev, file, args->handle); 772 if (!obj) 773 return -ENOENT; 774 775 args->offset = msm_gem_mmap_offset(obj); 776 777 drm_gem_object_unreference_unlocked(obj); 778 779 return ret; 780 } 781 782 static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, 783 struct drm_file *file) 784 { 785 struct drm_msm_wait_fence *args = data; 786 787 if (args->pad) { 788 DRM_ERROR("invalid pad: %08x\n", args->pad); 789 return -EINVAL; 790 } 791 792 return msm_wait_fence_interruptable(dev, args->fence, 793 &TS(args->timeout)); 794 } 795 796 static const struct drm_ioctl_desc msm_ioctls[] = { 797 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 798 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 799 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 800 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 801 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 802 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 803 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), 804 }; 805 806 static const struct vm_operations_struct vm_ops = { 807 .fault = msm_gem_fault, 808 .open = drm_gem_vm_open, 809 .close = drm_gem_vm_close, 810 }; 811 812 static const struct file_operations fops = { 813 .owner = THIS_MODULE, 814 .open = drm_open, 815 .release = drm_release, 816 .unlocked_ioctl = drm_ioctl, 817 #ifdef CONFIG_COMPAT 818 .compat_ioctl = drm_compat_ioctl, 819 #endif 820 .poll = drm_poll, 821 .read = drm_read, 822 .llseek = no_llseek, 823 .mmap = msm_gem_mmap, 824 }; 825 826 static struct drm_driver msm_driver = { 827 .driver_features = DRIVER_HAVE_IRQ | 828 DRIVER_GEM | 829 DRIVER_PRIME | 830 DRIVER_RENDER | 831 DRIVER_MODESET, 832 .load = msm_load, 833 .unload = msm_unload, 834 .open = msm_open, 835 .preclose = msm_preclose, 836 .lastclose = msm_lastclose, 837 .set_busid = drm_platform_set_busid, 838 .irq_handler = msm_irq, 839 .irq_preinstall = msm_irq_preinstall, 840 .irq_postinstall = msm_irq_postinstall, 841 .irq_uninstall = msm_irq_uninstall, 842 .get_vblank_counter = drm_vblank_count, 843 .enable_vblank = msm_enable_vblank, 844 .disable_vblank = msm_disable_vblank, 845 .gem_free_object = msm_gem_free_object, 846 .gem_vm_ops = &vm_ops, 847 .dumb_create = msm_gem_dumb_create, 848 .dumb_map_offset = msm_gem_dumb_map_offset, 849 .dumb_destroy = drm_gem_dumb_destroy, 850 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 851 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 852 .gem_prime_export = drm_gem_prime_export, 853 .gem_prime_import = drm_gem_prime_import, 854 .gem_prime_pin = msm_gem_prime_pin, 855 .gem_prime_unpin = msm_gem_prime_unpin, 856 .gem_prime_get_sg_table = msm_gem_prime_get_sg_table, 857 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table, 858 .gem_prime_vmap = msm_gem_prime_vmap, 859 .gem_prime_vunmap = msm_gem_prime_vunmap, 860 .gem_prime_mmap = msm_gem_prime_mmap, 861 #ifdef CONFIG_DEBUG_FS 862 .debugfs_init = msm_debugfs_init, 863 .debugfs_cleanup = msm_debugfs_cleanup, 864 #endif 865 .ioctls = msm_ioctls, 866 .num_ioctls = DRM_MSM_NUM_IOCTLS, 867 .fops = &fops, 868 .name = "msm", 869 .desc = "MSM Snapdragon DRM", 870 .date = "20130625", 871 .major = 1, 872 .minor = 0, 873 }; 874 875 #ifdef CONFIG_PM_SLEEP 876 static int msm_pm_suspend(struct device *dev) 877 { 878 struct drm_device *ddev = dev_get_drvdata(dev); 879 880 drm_kms_helper_poll_disable(ddev); 881 882 return 0; 883 } 884 885 static int msm_pm_resume(struct device *dev) 886 { 887 struct drm_device *ddev = dev_get_drvdata(dev); 888 889 drm_kms_helper_poll_enable(ddev); 890 891 return 0; 892 } 893 #endif 894 895 static const struct dev_pm_ops msm_pm_ops = { 896 SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume) 897 }; 898 899 /* 900 * Componentized driver support: 901 */ 902 903 #ifdef CONFIG_OF 904 /* NOTE: the CONFIG_OF case duplicates the same code as exynos or imx 905 * (or probably any other).. so probably some room for some helpers 906 */ 907 static int compare_of(struct device *dev, void *data) 908 { 909 return dev->of_node == data; 910 } 911 912 static int add_components(struct device *dev, struct component_match **matchptr, 913 const char *name) 914 { 915 struct device_node *np = dev->of_node; 916 unsigned i; 917 918 for (i = 0; ; i++) { 919 struct device_node *node; 920 921 node = of_parse_phandle(np, name, i); 922 if (!node) 923 break; 924 925 component_match_add(dev, matchptr, compare_of, node); 926 } 927 928 return 0; 929 } 930 #else 931 static int compare_dev(struct device *dev, void *data) 932 { 933 return dev == data; 934 } 935 #endif 936 937 static int msm_drm_bind(struct device *dev) 938 { 939 return drm_platform_init(&msm_driver, to_platform_device(dev)); 940 } 941 942 static void msm_drm_unbind(struct device *dev) 943 { 944 drm_put_dev(platform_get_drvdata(to_platform_device(dev))); 945 } 946 947 static const struct component_master_ops msm_drm_ops = { 948 .bind = msm_drm_bind, 949 .unbind = msm_drm_unbind, 950 }; 951 952 /* 953 * Platform driver: 954 */ 955 956 static int msm_pdev_probe(struct platform_device *pdev) 957 { 958 struct component_match *match = NULL; 959 #ifdef CONFIG_OF 960 add_components(&pdev->dev, &match, "connectors"); 961 add_components(&pdev->dev, &match, "gpus"); 962 #else 963 /* For non-DT case, it kinda sucks. We don't actually have a way 964 * to know whether or not we are waiting for certain devices (or if 965 * they are simply not present). But for non-DT we only need to 966 * care about apq8064/apq8060/etc (all mdp4/a3xx): 967 */ 968 static const char *devnames[] = { 969 "hdmi_msm.0", "kgsl-3d0.0", 970 }; 971 int i; 972 973 DBG("Adding components.."); 974 975 for (i = 0; i < ARRAY_SIZE(devnames); i++) { 976 struct device *dev; 977 978 dev = bus_find_device_by_name(&platform_bus_type, 979 NULL, devnames[i]); 980 if (!dev) { 981 dev_info(&pdev->dev, "still waiting for %s\n", devnames[i]); 982 return -EPROBE_DEFER; 983 } 984 985 component_match_add(&pdev->dev, &match, compare_dev, dev); 986 } 987 #endif 988 989 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 990 return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match); 991 } 992 993 static int msm_pdev_remove(struct platform_device *pdev) 994 { 995 component_master_del(&pdev->dev, &msm_drm_ops); 996 997 return 0; 998 } 999 1000 static const struct platform_device_id msm_id[] = { 1001 { "mdp", 0 }, 1002 { } 1003 }; 1004 1005 static const struct of_device_id dt_match[] = { 1006 { .compatible = "qcom,mdp" }, /* mdp4 */ 1007 { .compatible = "qcom,mdss_mdp" }, /* mdp5 */ 1008 {} 1009 }; 1010 MODULE_DEVICE_TABLE(of, dt_match); 1011 1012 static struct platform_driver msm_platform_driver = { 1013 .probe = msm_pdev_probe, 1014 .remove = msm_pdev_remove, 1015 .driver = { 1016 .name = "msm", 1017 .of_match_table = dt_match, 1018 .pm = &msm_pm_ops, 1019 }, 1020 .id_table = msm_id, 1021 }; 1022 1023 static int __init msm_drm_register(void) 1024 { 1025 DBG("init"); 1026 hdmi_register(); 1027 adreno_register(); 1028 return platform_driver_register(&msm_platform_driver); 1029 } 1030 1031 static void __exit msm_drm_unregister(void) 1032 { 1033 DBG("fini"); 1034 platform_driver_unregister(&msm_platform_driver); 1035 hdmi_unregister(); 1036 adreno_unregister(); 1037 } 1038 1039 module_init(msm_drm_register); 1040 module_exit(msm_drm_unregister); 1041 1042 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com"); 1043 MODULE_DESCRIPTION("MSM DRM Driver"); 1044 MODULE_LICENSE("GPL"); 1045