1 /* 2 * Copyright (C) 2012 Avionic Design GmbH 3 * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 */ 9 10 #include <linux/bitops.h> 11 #include <linux/host1x.h> 12 #include <linux/idr.h> 13 #include <linux/iommu.h> 14 15 #include <drm/drm_atomic.h> 16 #include <drm/drm_atomic_helper.h> 17 18 #include "drm.h" 19 #include "gem.h" 20 21 #define DRIVER_NAME "tegra" 22 #define DRIVER_DESC "NVIDIA Tegra graphics" 23 #define DRIVER_DATE "20120330" 24 #define DRIVER_MAJOR 0 25 #define DRIVER_MINOR 0 26 #define DRIVER_PATCHLEVEL 0 27 28 #define CARVEOUT_SZ SZ_64M 29 #define CDMA_GATHER_FETCHES_MAX_NB 16383 30 31 struct tegra_drm_file { 32 struct idr contexts; 33 struct mutex lock; 34 }; 35 36 static int tegra_atomic_check(struct drm_device *drm, 37 struct drm_atomic_state *state) 38 { 39 int err; 40 41 err = drm_atomic_helper_check(drm, state); 42 if (err < 0) 43 return err; 44 45 return tegra_display_hub_atomic_check(drm, state); 46 } 47 48 static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = { 49 .fb_create = tegra_fb_create, 50 #ifdef CONFIG_DRM_FBDEV_EMULATION 51 .output_poll_changed = drm_fb_helper_output_poll_changed, 52 #endif 53 .atomic_check = tegra_atomic_check, 54 .atomic_commit = drm_atomic_helper_commit, 55 }; 56 57 static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state) 58 { 59 struct drm_device *drm = old_state->dev; 60 struct tegra_drm *tegra = drm->dev_private; 61 62 if (tegra->hub) { 63 drm_atomic_helper_commit_modeset_disables(drm, old_state); 64 tegra_display_hub_atomic_commit(drm, old_state); 65 drm_atomic_helper_commit_planes(drm, old_state, 0); 66 drm_atomic_helper_commit_modeset_enables(drm, old_state); 67 drm_atomic_helper_commit_hw_done(old_state); 68 drm_atomic_helper_wait_for_vblanks(drm, old_state); 69 drm_atomic_helper_cleanup_planes(drm, old_state); 70 } else { 71 drm_atomic_helper_commit_tail_rpm(old_state); 72 } 73 } 74 75 static const struct drm_mode_config_helper_funcs 76 tegra_drm_mode_config_helpers = { 77 .atomic_commit_tail = tegra_atomic_commit_tail, 78 }; 79 80 static int tegra_drm_load(struct drm_device *drm, unsigned long flags) 81 { 82 struct host1x_device *device = to_host1x_device(drm->dev); 83 struct tegra_drm *tegra; 84 int err; 85 86 tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); 87 if (!tegra) 88 return -ENOMEM; 89 90 if (iommu_present(&platform_bus_type)) { 91 u64 carveout_start, carveout_end, gem_start, gem_end; 92 struct iommu_domain_geometry *geometry; 93 unsigned long order; 94 95 tegra->domain = iommu_domain_alloc(&platform_bus_type); 96 if (!tegra->domain) { 97 err = -ENOMEM; 98 goto free; 99 } 100 101 geometry = &tegra->domain->geometry; 102 gem_start = geometry->aperture_start; 103 gem_end = geometry->aperture_end - CARVEOUT_SZ; 104 carveout_start = gem_end + 1; 105 carveout_end = geometry->aperture_end; 106 107 order = __ffs(tegra->domain->pgsize_bitmap); 108 init_iova_domain(&tegra->carveout.domain, 1UL << order, 109 carveout_start >> order); 110 111 tegra->carveout.shift = iova_shift(&tegra->carveout.domain); 112 tegra->carveout.limit = carveout_end >> tegra->carveout.shift; 113 114 drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1); 115 mutex_init(&tegra->mm_lock); 116 117 DRM_DEBUG("IOMMU apertures:\n"); 118 DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end); 119 DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start, 120 carveout_end); 121 } 122 123 mutex_init(&tegra->clients_lock); 124 INIT_LIST_HEAD(&tegra->clients); 125 126 drm->dev_private = tegra; 127 tegra->drm = drm; 128 129 drm_mode_config_init(drm); 130 131 drm->mode_config.min_width = 0; 132 drm->mode_config.min_height = 0; 133 134 drm->mode_config.max_width = 4096; 135 drm->mode_config.max_height = 4096; 136 137 drm->mode_config.allow_fb_modifiers = true; 138 139 drm->mode_config.normalize_zpos = true; 140 141 drm->mode_config.funcs = &tegra_drm_mode_config_funcs; 142 drm->mode_config.helper_private = &tegra_drm_mode_config_helpers; 143 144 err = tegra_drm_fb_prepare(drm); 145 if (err < 0) 146 goto config; 147 148 drm_kms_helper_poll_init(drm); 149 150 err = host1x_device_init(device); 151 if (err < 0) 152 goto fbdev; 153 154 if (tegra->hub) { 155 err = tegra_display_hub_prepare(tegra->hub); 156 if (err < 0) 157 goto device; 158 } 159 160 /* 161 * We don't use the drm_irq_install() helpers provided by the DRM 162 * core, so we need to set this manually in order to allow the 163 * DRM_IOCTL_WAIT_VBLANK to operate correctly. 164 */ 165 drm->irq_enabled = true; 166 167 /* syncpoints are used for full 32-bit hardware VBLANK counters */ 168 drm->max_vblank_count = 0xffffffff; 169 170 err = drm_vblank_init(drm, drm->mode_config.num_crtc); 171 if (err < 0) 172 goto hub; 173 174 drm_mode_config_reset(drm); 175 176 err = tegra_drm_fb_init(drm); 177 if (err < 0) 178 goto hub; 179 180 return 0; 181 182 hub: 183 if (tegra->hub) 184 tegra_display_hub_cleanup(tegra->hub); 185 device: 186 host1x_device_exit(device); 187 fbdev: 188 drm_kms_helper_poll_fini(drm); 189 tegra_drm_fb_free(drm); 190 config: 191 drm_mode_config_cleanup(drm); 192 193 if (tegra->domain) { 194 iommu_domain_free(tegra->domain); 195 drm_mm_takedown(&tegra->mm); 196 mutex_destroy(&tegra->mm_lock); 197 put_iova_domain(&tegra->carveout.domain); 198 } 199 free: 200 kfree(tegra); 201 return err; 202 } 203 204 static void tegra_drm_unload(struct drm_device *drm) 205 { 206 struct host1x_device *device = to_host1x_device(drm->dev); 207 struct tegra_drm *tegra = drm->dev_private; 208 int err; 209 210 drm_kms_helper_poll_fini(drm); 211 tegra_drm_fb_exit(drm); 212 drm_atomic_helper_shutdown(drm); 213 drm_mode_config_cleanup(drm); 214 215 err = host1x_device_exit(device); 216 if (err < 0) 217 return; 218 219 if (tegra->domain) { 220 iommu_domain_free(tegra->domain); 221 drm_mm_takedown(&tegra->mm); 222 mutex_destroy(&tegra->mm_lock); 223 put_iova_domain(&tegra->carveout.domain); 224 } 225 226 kfree(tegra); 227 } 228 229 static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) 230 { 231 struct tegra_drm_file *fpriv; 232 233 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 234 if (!fpriv) 235 return -ENOMEM; 236 237 idr_init(&fpriv->contexts); 238 mutex_init(&fpriv->lock); 239 filp->driver_priv = fpriv; 240 241 return 0; 242 } 243 244 static void tegra_drm_context_free(struct tegra_drm_context *context) 245 { 246 context->client->ops->close_channel(context); 247 kfree(context); 248 } 249 250 static struct host1x_bo * 251 host1x_bo_lookup(struct drm_file *file, u32 handle) 252 { 253 struct drm_gem_object *gem; 254 struct tegra_bo *bo; 255 256 gem = drm_gem_object_lookup(file, handle); 257 if (!gem) 258 return NULL; 259 260 bo = to_tegra_bo(gem); 261 return &bo->base; 262 } 263 264 static int host1x_reloc_copy_from_user(struct host1x_reloc *dest, 265 struct drm_tegra_reloc __user *src, 266 struct drm_device *drm, 267 struct drm_file *file) 268 { 269 u32 cmdbuf, target; 270 int err; 271 272 err = get_user(cmdbuf, &src->cmdbuf.handle); 273 if (err < 0) 274 return err; 275 276 err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset); 277 if (err < 0) 278 return err; 279 280 err = get_user(target, &src->target.handle); 281 if (err < 0) 282 return err; 283 284 err = get_user(dest->target.offset, &src->target.offset); 285 if (err < 0) 286 return err; 287 288 err = get_user(dest->shift, &src->shift); 289 if (err < 0) 290 return err; 291 292 dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf); 293 if (!dest->cmdbuf.bo) 294 return -ENOENT; 295 296 dest->target.bo = host1x_bo_lookup(file, target); 297 if (!dest->target.bo) 298 return -ENOENT; 299 300 return 0; 301 } 302 303 static int host1x_waitchk_copy_from_user(struct host1x_waitchk *dest, 304 struct drm_tegra_waitchk __user *src, 305 struct drm_file *file) 306 { 307 u32 cmdbuf; 308 int err; 309 310 err = get_user(cmdbuf, &src->handle); 311 if (err < 0) 312 return err; 313 314 err = get_user(dest->offset, &src->offset); 315 if (err < 0) 316 return err; 317 318 err = get_user(dest->syncpt_id, &src->syncpt); 319 if (err < 0) 320 return err; 321 322 err = get_user(dest->thresh, &src->thresh); 323 if (err < 0) 324 return err; 325 326 dest->bo = host1x_bo_lookup(file, cmdbuf); 327 if (!dest->bo) 328 return -ENOENT; 329 330 return 0; 331 } 332 333 int tegra_drm_submit(struct tegra_drm_context *context, 334 struct drm_tegra_submit *args, struct drm_device *drm, 335 struct drm_file *file) 336 { 337 unsigned int num_cmdbufs = args->num_cmdbufs; 338 unsigned int num_relocs = args->num_relocs; 339 unsigned int num_waitchks = args->num_waitchks; 340 struct drm_tegra_cmdbuf __user *user_cmdbufs; 341 struct drm_tegra_reloc __user *user_relocs; 342 struct drm_tegra_waitchk __user *user_waitchks; 343 struct drm_tegra_syncpt __user *user_syncpt; 344 struct drm_tegra_syncpt syncpt; 345 struct host1x *host1x = dev_get_drvdata(drm->dev->parent); 346 struct drm_gem_object **refs; 347 struct host1x_syncpt *sp; 348 struct host1x_job *job; 349 unsigned int num_refs; 350 int err; 351 352 user_cmdbufs = u64_to_user_ptr(args->cmdbufs); 353 user_relocs = u64_to_user_ptr(args->relocs); 354 user_waitchks = u64_to_user_ptr(args->waitchks); 355 user_syncpt = u64_to_user_ptr(args->syncpts); 356 357 /* We don't yet support other than one syncpt_incr struct per submit */ 358 if (args->num_syncpts != 1) 359 return -EINVAL; 360 361 /* We don't yet support waitchks */ 362 if (args->num_waitchks != 0) 363 return -EINVAL; 364 365 job = host1x_job_alloc(context->channel, args->num_cmdbufs, 366 args->num_relocs, args->num_waitchks); 367 if (!job) 368 return -ENOMEM; 369 370 job->num_relocs = args->num_relocs; 371 job->num_waitchk = args->num_waitchks; 372 job->client = (u32)args->context; 373 job->class = context->client->base.class; 374 job->serialize = true; 375 376 /* 377 * Track referenced BOs so that they can be unreferenced after the 378 * submission is complete. 379 */ 380 num_refs = num_cmdbufs + num_relocs * 2 + num_waitchks; 381 382 refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL); 383 if (!refs) { 384 err = -ENOMEM; 385 goto put; 386 } 387 388 /* reuse as an iterator later */ 389 num_refs = 0; 390 391 while (num_cmdbufs) { 392 struct drm_tegra_cmdbuf cmdbuf; 393 struct host1x_bo *bo; 394 struct tegra_bo *obj; 395 u64 offset; 396 397 if (copy_from_user(&cmdbuf, user_cmdbufs, sizeof(cmdbuf))) { 398 err = -EFAULT; 399 goto fail; 400 } 401 402 /* 403 * The maximum number of CDMA gather fetches is 16383, a higher 404 * value means the words count is malformed. 405 */ 406 if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) { 407 err = -EINVAL; 408 goto fail; 409 } 410 411 bo = host1x_bo_lookup(file, cmdbuf.handle); 412 if (!bo) { 413 err = -ENOENT; 414 goto fail; 415 } 416 417 offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32); 418 obj = host1x_to_tegra_bo(bo); 419 refs[num_refs++] = &obj->gem; 420 421 /* 422 * Gather buffer base address must be 4-bytes aligned, 423 * unaligned offset is malformed and cause commands stream 424 * corruption on the buffer address relocation. 425 */ 426 if (offset & 3 || offset >= obj->gem.size) { 427 err = -EINVAL; 428 goto fail; 429 } 430 431 host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset); 432 num_cmdbufs--; 433 user_cmdbufs++; 434 } 435 436 /* copy and resolve relocations from submit */ 437 while (num_relocs--) { 438 struct host1x_reloc *reloc; 439 struct tegra_bo *obj; 440 441 err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs], 442 &user_relocs[num_relocs], drm, 443 file); 444 if (err < 0) 445 goto fail; 446 447 reloc = &job->relocarray[num_relocs]; 448 obj = host1x_to_tegra_bo(reloc->cmdbuf.bo); 449 refs[num_refs++] = &obj->gem; 450 451 /* 452 * The unaligned cmdbuf offset will cause an unaligned write 453 * during of the relocations patching, corrupting the commands 454 * stream. 455 */ 456 if (reloc->cmdbuf.offset & 3 || 457 reloc->cmdbuf.offset >= obj->gem.size) { 458 err = -EINVAL; 459 goto fail; 460 } 461 462 obj = host1x_to_tegra_bo(reloc->target.bo); 463 refs[num_refs++] = &obj->gem; 464 465 if (reloc->target.offset >= obj->gem.size) { 466 err = -EINVAL; 467 goto fail; 468 } 469 } 470 471 /* copy and resolve waitchks from submit */ 472 while (num_waitchks--) { 473 struct host1x_waitchk *wait = &job->waitchk[num_waitchks]; 474 struct tegra_bo *obj; 475 476 err = host1x_waitchk_copy_from_user( 477 wait, &user_waitchks[num_waitchks], file); 478 if (err < 0) 479 goto fail; 480 481 obj = host1x_to_tegra_bo(wait->bo); 482 refs[num_refs++] = &obj->gem; 483 484 /* 485 * The unaligned offset will cause an unaligned write during 486 * of the waitchks patching, corrupting the commands stream. 487 */ 488 if (wait->offset & 3 || 489 wait->offset >= obj->gem.size) { 490 err = -EINVAL; 491 goto fail; 492 } 493 } 494 495 if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) { 496 err = -EFAULT; 497 goto fail; 498 } 499 500 /* check whether syncpoint ID is valid */ 501 sp = host1x_syncpt_get(host1x, syncpt.id); 502 if (!sp) { 503 err = -ENOENT; 504 goto fail; 505 } 506 507 job->is_addr_reg = context->client->ops->is_addr_reg; 508 job->is_valid_class = context->client->ops->is_valid_class; 509 job->syncpt_incrs = syncpt.incrs; 510 job->syncpt_id = syncpt.id; 511 job->timeout = 10000; 512 513 if (args->timeout && args->timeout < 10000) 514 job->timeout = args->timeout; 515 516 err = host1x_job_pin(job, context->client->base.dev); 517 if (err) 518 goto fail; 519 520 err = host1x_job_submit(job); 521 if (err) { 522 host1x_job_unpin(job); 523 goto fail; 524 } 525 526 args->fence = job->syncpt_end; 527 528 fail: 529 while (num_refs--) 530 drm_gem_object_put_unlocked(refs[num_refs]); 531 532 kfree(refs); 533 534 put: 535 host1x_job_put(job); 536 return err; 537 } 538 539 540 #ifdef CONFIG_DRM_TEGRA_STAGING 541 static int tegra_gem_create(struct drm_device *drm, void *data, 542 struct drm_file *file) 543 { 544 struct drm_tegra_gem_create *args = data; 545 struct tegra_bo *bo; 546 547 bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags, 548 &args->handle); 549 if (IS_ERR(bo)) 550 return PTR_ERR(bo); 551 552 return 0; 553 } 554 555 static int tegra_gem_mmap(struct drm_device *drm, void *data, 556 struct drm_file *file) 557 { 558 struct drm_tegra_gem_mmap *args = data; 559 struct drm_gem_object *gem; 560 struct tegra_bo *bo; 561 562 gem = drm_gem_object_lookup(file, args->handle); 563 if (!gem) 564 return -EINVAL; 565 566 bo = to_tegra_bo(gem); 567 568 args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node); 569 570 drm_gem_object_put_unlocked(gem); 571 572 return 0; 573 } 574 575 static int tegra_syncpt_read(struct drm_device *drm, void *data, 576 struct drm_file *file) 577 { 578 struct host1x *host = dev_get_drvdata(drm->dev->parent); 579 struct drm_tegra_syncpt_read *args = data; 580 struct host1x_syncpt *sp; 581 582 sp = host1x_syncpt_get(host, args->id); 583 if (!sp) 584 return -EINVAL; 585 586 args->value = host1x_syncpt_read_min(sp); 587 return 0; 588 } 589 590 static int tegra_syncpt_incr(struct drm_device *drm, void *data, 591 struct drm_file *file) 592 { 593 struct host1x *host1x = dev_get_drvdata(drm->dev->parent); 594 struct drm_tegra_syncpt_incr *args = data; 595 struct host1x_syncpt *sp; 596 597 sp = host1x_syncpt_get(host1x, args->id); 598 if (!sp) 599 return -EINVAL; 600 601 return host1x_syncpt_incr(sp); 602 } 603 604 static int tegra_syncpt_wait(struct drm_device *drm, void *data, 605 struct drm_file *file) 606 { 607 struct host1x *host1x = dev_get_drvdata(drm->dev->parent); 608 struct drm_tegra_syncpt_wait *args = data; 609 struct host1x_syncpt *sp; 610 611 sp = host1x_syncpt_get(host1x, args->id); 612 if (!sp) 613 return -EINVAL; 614 615 return host1x_syncpt_wait(sp, args->thresh, 616 msecs_to_jiffies(args->timeout), 617 &args->value); 618 } 619 620 static int tegra_client_open(struct tegra_drm_file *fpriv, 621 struct tegra_drm_client *client, 622 struct tegra_drm_context *context) 623 { 624 int err; 625 626 err = client->ops->open_channel(client, context); 627 if (err < 0) 628 return err; 629 630 err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL); 631 if (err < 0) { 632 client->ops->close_channel(context); 633 return err; 634 } 635 636 context->client = client; 637 context->id = err; 638 639 return 0; 640 } 641 642 static int tegra_open_channel(struct drm_device *drm, void *data, 643 struct drm_file *file) 644 { 645 struct tegra_drm_file *fpriv = file->driver_priv; 646 struct tegra_drm *tegra = drm->dev_private; 647 struct drm_tegra_open_channel *args = data; 648 struct tegra_drm_context *context; 649 struct tegra_drm_client *client; 650 int err = -ENODEV; 651 652 context = kzalloc(sizeof(*context), GFP_KERNEL); 653 if (!context) 654 return -ENOMEM; 655 656 mutex_lock(&fpriv->lock); 657 658 list_for_each_entry(client, &tegra->clients, list) 659 if (client->base.class == args->client) { 660 err = tegra_client_open(fpriv, client, context); 661 if (err < 0) 662 break; 663 664 args->context = context->id; 665 break; 666 } 667 668 if (err < 0) 669 kfree(context); 670 671 mutex_unlock(&fpriv->lock); 672 return err; 673 } 674 675 static int tegra_close_channel(struct drm_device *drm, void *data, 676 struct drm_file *file) 677 { 678 struct tegra_drm_file *fpriv = file->driver_priv; 679 struct drm_tegra_close_channel *args = data; 680 struct tegra_drm_context *context; 681 int err = 0; 682 683 mutex_lock(&fpriv->lock); 684 685 context = idr_find(&fpriv->contexts, args->context); 686 if (!context) { 687 err = -EINVAL; 688 goto unlock; 689 } 690 691 idr_remove(&fpriv->contexts, context->id); 692 tegra_drm_context_free(context); 693 694 unlock: 695 mutex_unlock(&fpriv->lock); 696 return err; 697 } 698 699 static int tegra_get_syncpt(struct drm_device *drm, void *data, 700 struct drm_file *file) 701 { 702 struct tegra_drm_file *fpriv = file->driver_priv; 703 struct drm_tegra_get_syncpt *args = data; 704 struct tegra_drm_context *context; 705 struct host1x_syncpt *syncpt; 706 int err = 0; 707 708 mutex_lock(&fpriv->lock); 709 710 context = idr_find(&fpriv->contexts, args->context); 711 if (!context) { 712 err = -ENODEV; 713 goto unlock; 714 } 715 716 if (args->index >= context->client->base.num_syncpts) { 717 err = -EINVAL; 718 goto unlock; 719 } 720 721 syncpt = context->client->base.syncpts[args->index]; 722 args->id = host1x_syncpt_id(syncpt); 723 724 unlock: 725 mutex_unlock(&fpriv->lock); 726 return err; 727 } 728 729 static int tegra_submit(struct drm_device *drm, void *data, 730 struct drm_file *file) 731 { 732 struct tegra_drm_file *fpriv = file->driver_priv; 733 struct drm_tegra_submit *args = data; 734 struct tegra_drm_context *context; 735 int err; 736 737 mutex_lock(&fpriv->lock); 738 739 context = idr_find(&fpriv->contexts, args->context); 740 if (!context) { 741 err = -ENODEV; 742 goto unlock; 743 } 744 745 err = context->client->ops->submit(context, args, drm, file); 746 747 unlock: 748 mutex_unlock(&fpriv->lock); 749 return err; 750 } 751 752 static int tegra_get_syncpt_base(struct drm_device *drm, void *data, 753 struct drm_file *file) 754 { 755 struct tegra_drm_file *fpriv = file->driver_priv; 756 struct drm_tegra_get_syncpt_base *args = data; 757 struct tegra_drm_context *context; 758 struct host1x_syncpt_base *base; 759 struct host1x_syncpt *syncpt; 760 int err = 0; 761 762 mutex_lock(&fpriv->lock); 763 764 context = idr_find(&fpriv->contexts, args->context); 765 if (!context) { 766 err = -ENODEV; 767 goto unlock; 768 } 769 770 if (args->syncpt >= context->client->base.num_syncpts) { 771 err = -EINVAL; 772 goto unlock; 773 } 774 775 syncpt = context->client->base.syncpts[args->syncpt]; 776 777 base = host1x_syncpt_get_base(syncpt); 778 if (!base) { 779 err = -ENXIO; 780 goto unlock; 781 } 782 783 args->id = host1x_syncpt_base_id(base); 784 785 unlock: 786 mutex_unlock(&fpriv->lock); 787 return err; 788 } 789 790 static int tegra_gem_set_tiling(struct drm_device *drm, void *data, 791 struct drm_file *file) 792 { 793 struct drm_tegra_gem_set_tiling *args = data; 794 enum tegra_bo_tiling_mode mode; 795 struct drm_gem_object *gem; 796 unsigned long value = 0; 797 struct tegra_bo *bo; 798 799 switch (args->mode) { 800 case DRM_TEGRA_GEM_TILING_MODE_PITCH: 801 mode = TEGRA_BO_TILING_MODE_PITCH; 802 803 if (args->value != 0) 804 return -EINVAL; 805 806 break; 807 808 case DRM_TEGRA_GEM_TILING_MODE_TILED: 809 mode = TEGRA_BO_TILING_MODE_TILED; 810 811 if (args->value != 0) 812 return -EINVAL; 813 814 break; 815 816 case DRM_TEGRA_GEM_TILING_MODE_BLOCK: 817 mode = TEGRA_BO_TILING_MODE_BLOCK; 818 819 if (args->value > 5) 820 return -EINVAL; 821 822 value = args->value; 823 break; 824 825 default: 826 return -EINVAL; 827 } 828 829 gem = drm_gem_object_lookup(file, args->handle); 830 if (!gem) 831 return -ENOENT; 832 833 bo = to_tegra_bo(gem); 834 835 bo->tiling.mode = mode; 836 bo->tiling.value = value; 837 838 drm_gem_object_put_unlocked(gem); 839 840 return 0; 841 } 842 843 static int tegra_gem_get_tiling(struct drm_device *drm, void *data, 844 struct drm_file *file) 845 { 846 struct drm_tegra_gem_get_tiling *args = data; 847 struct drm_gem_object *gem; 848 struct tegra_bo *bo; 849 int err = 0; 850 851 gem = drm_gem_object_lookup(file, args->handle); 852 if (!gem) 853 return -ENOENT; 854 855 bo = to_tegra_bo(gem); 856 857 switch (bo->tiling.mode) { 858 case TEGRA_BO_TILING_MODE_PITCH: 859 args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH; 860 args->value = 0; 861 break; 862 863 case TEGRA_BO_TILING_MODE_TILED: 864 args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED; 865 args->value = 0; 866 break; 867 868 case TEGRA_BO_TILING_MODE_BLOCK: 869 args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK; 870 args->value = bo->tiling.value; 871 break; 872 873 default: 874 err = -EINVAL; 875 break; 876 } 877 878 drm_gem_object_put_unlocked(gem); 879 880 return err; 881 } 882 883 static int tegra_gem_set_flags(struct drm_device *drm, void *data, 884 struct drm_file *file) 885 { 886 struct drm_tegra_gem_set_flags *args = data; 887 struct drm_gem_object *gem; 888 struct tegra_bo *bo; 889 890 if (args->flags & ~DRM_TEGRA_GEM_FLAGS) 891 return -EINVAL; 892 893 gem = drm_gem_object_lookup(file, args->handle); 894 if (!gem) 895 return -ENOENT; 896 897 bo = to_tegra_bo(gem); 898 bo->flags = 0; 899 900 if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP) 901 bo->flags |= TEGRA_BO_BOTTOM_UP; 902 903 drm_gem_object_put_unlocked(gem); 904 905 return 0; 906 } 907 908 static int tegra_gem_get_flags(struct drm_device *drm, void *data, 909 struct drm_file *file) 910 { 911 struct drm_tegra_gem_get_flags *args = data; 912 struct drm_gem_object *gem; 913 struct tegra_bo *bo; 914 915 gem = drm_gem_object_lookup(file, args->handle); 916 if (!gem) 917 return -ENOENT; 918 919 bo = to_tegra_bo(gem); 920 args->flags = 0; 921 922 if (bo->flags & TEGRA_BO_BOTTOM_UP) 923 args->flags |= DRM_TEGRA_GEM_BOTTOM_UP; 924 925 drm_gem_object_put_unlocked(gem); 926 927 return 0; 928 } 929 #endif 930 931 static const struct drm_ioctl_desc tegra_drm_ioctls[] = { 932 #ifdef CONFIG_DRM_TEGRA_STAGING 933 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, 934 DRM_UNLOCKED | DRM_RENDER_ALLOW), 935 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, 936 DRM_UNLOCKED | DRM_RENDER_ALLOW), 937 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, 938 DRM_UNLOCKED | DRM_RENDER_ALLOW), 939 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, 940 DRM_UNLOCKED | DRM_RENDER_ALLOW), 941 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, 942 DRM_UNLOCKED | DRM_RENDER_ALLOW), 943 DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, 944 DRM_UNLOCKED | DRM_RENDER_ALLOW), 945 DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, 946 DRM_UNLOCKED | DRM_RENDER_ALLOW), 947 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, 948 DRM_UNLOCKED | DRM_RENDER_ALLOW), 949 DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, 950 DRM_UNLOCKED | DRM_RENDER_ALLOW), 951 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, 952 DRM_UNLOCKED | DRM_RENDER_ALLOW), 953 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, 954 DRM_UNLOCKED | DRM_RENDER_ALLOW), 955 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, 956 DRM_UNLOCKED | DRM_RENDER_ALLOW), 957 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, 958 DRM_UNLOCKED | DRM_RENDER_ALLOW), 959 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, 960 DRM_UNLOCKED | DRM_RENDER_ALLOW), 961 #endif 962 }; 963 964 static const struct file_operations tegra_drm_fops = { 965 .owner = THIS_MODULE, 966 .open = drm_open, 967 .release = drm_release, 968 .unlocked_ioctl = drm_ioctl, 969 .mmap = tegra_drm_mmap, 970 .poll = drm_poll, 971 .read = drm_read, 972 .compat_ioctl = drm_compat_ioctl, 973 .llseek = noop_llseek, 974 }; 975 976 static int tegra_drm_context_cleanup(int id, void *p, void *data) 977 { 978 struct tegra_drm_context *context = p; 979 980 tegra_drm_context_free(context); 981 982 return 0; 983 } 984 985 static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file) 986 { 987 struct tegra_drm_file *fpriv = file->driver_priv; 988 989 mutex_lock(&fpriv->lock); 990 idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL); 991 mutex_unlock(&fpriv->lock); 992 993 idr_destroy(&fpriv->contexts); 994 mutex_destroy(&fpriv->lock); 995 kfree(fpriv); 996 } 997 998 #ifdef CONFIG_DEBUG_FS 999 static int tegra_debugfs_framebuffers(struct seq_file *s, void *data) 1000 { 1001 struct drm_info_node *node = (struct drm_info_node *)s->private; 1002 struct drm_device *drm = node->minor->dev; 1003 struct drm_framebuffer *fb; 1004 1005 mutex_lock(&drm->mode_config.fb_lock); 1006 1007 list_for_each_entry(fb, &drm->mode_config.fb_list, head) { 1008 seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n", 1009 fb->base.id, fb->width, fb->height, 1010 fb->format->depth, 1011 fb->format->cpp[0] * 8, 1012 drm_framebuffer_read_refcount(fb)); 1013 } 1014 1015 mutex_unlock(&drm->mode_config.fb_lock); 1016 1017 return 0; 1018 } 1019 1020 static int tegra_debugfs_iova(struct seq_file *s, void *data) 1021 { 1022 struct drm_info_node *node = (struct drm_info_node *)s->private; 1023 struct drm_device *drm = node->minor->dev; 1024 struct tegra_drm *tegra = drm->dev_private; 1025 struct drm_printer p = drm_seq_file_printer(s); 1026 1027 if (tegra->domain) { 1028 mutex_lock(&tegra->mm_lock); 1029 drm_mm_print(&tegra->mm, &p); 1030 mutex_unlock(&tegra->mm_lock); 1031 } 1032 1033 return 0; 1034 } 1035 1036 static struct drm_info_list tegra_debugfs_list[] = { 1037 { "framebuffers", tegra_debugfs_framebuffers, 0 }, 1038 { "iova", tegra_debugfs_iova, 0 }, 1039 }; 1040 1041 static int tegra_debugfs_init(struct drm_minor *minor) 1042 { 1043 return drm_debugfs_create_files(tegra_debugfs_list, 1044 ARRAY_SIZE(tegra_debugfs_list), 1045 minor->debugfs_root, minor); 1046 } 1047 #endif 1048 1049 static struct drm_driver tegra_drm_driver = { 1050 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | 1051 DRIVER_ATOMIC | DRIVER_RENDER, 1052 .load = tegra_drm_load, 1053 .unload = tegra_drm_unload, 1054 .open = tegra_drm_open, 1055 .postclose = tegra_drm_postclose, 1056 .lastclose = drm_fb_helper_lastclose, 1057 1058 #if defined(CONFIG_DEBUG_FS) 1059 .debugfs_init = tegra_debugfs_init, 1060 #endif 1061 1062 .gem_free_object_unlocked = tegra_bo_free_object, 1063 .gem_vm_ops = &tegra_bo_vm_ops, 1064 1065 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 1066 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 1067 .gem_prime_export = tegra_gem_prime_export, 1068 .gem_prime_import = tegra_gem_prime_import, 1069 1070 .dumb_create = tegra_bo_dumb_create, 1071 1072 .ioctls = tegra_drm_ioctls, 1073 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls), 1074 .fops = &tegra_drm_fops, 1075 1076 .name = DRIVER_NAME, 1077 .desc = DRIVER_DESC, 1078 .date = DRIVER_DATE, 1079 .major = DRIVER_MAJOR, 1080 .minor = DRIVER_MINOR, 1081 .patchlevel = DRIVER_PATCHLEVEL, 1082 }; 1083 1084 int tegra_drm_register_client(struct tegra_drm *tegra, 1085 struct tegra_drm_client *client) 1086 { 1087 mutex_lock(&tegra->clients_lock); 1088 list_add_tail(&client->list, &tegra->clients); 1089 mutex_unlock(&tegra->clients_lock); 1090 1091 return 0; 1092 } 1093 1094 int tegra_drm_unregister_client(struct tegra_drm *tegra, 1095 struct tegra_drm_client *client) 1096 { 1097 mutex_lock(&tegra->clients_lock); 1098 list_del_init(&client->list); 1099 mutex_unlock(&tegra->clients_lock); 1100 1101 return 0; 1102 } 1103 1104 void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma) 1105 { 1106 struct iova *alloc; 1107 void *virt; 1108 gfp_t gfp; 1109 int err; 1110 1111 if (tegra->domain) 1112 size = iova_align(&tegra->carveout.domain, size); 1113 else 1114 size = PAGE_ALIGN(size); 1115 1116 gfp = GFP_KERNEL | __GFP_ZERO; 1117 if (!tegra->domain) { 1118 /* 1119 * Many units only support 32-bit addresses, even on 64-bit 1120 * SoCs. If there is no IOMMU to translate into a 32-bit IO 1121 * virtual address space, force allocations to be in the 1122 * lower 32-bit range. 1123 */ 1124 gfp |= GFP_DMA; 1125 } 1126 1127 virt = (void *)__get_free_pages(gfp, get_order(size)); 1128 if (!virt) 1129 return ERR_PTR(-ENOMEM); 1130 1131 if (!tegra->domain) { 1132 /* 1133 * If IOMMU is disabled, devices address physical memory 1134 * directly. 1135 */ 1136 *dma = virt_to_phys(virt); 1137 return virt; 1138 } 1139 1140 alloc = alloc_iova(&tegra->carveout.domain, 1141 size >> tegra->carveout.shift, 1142 tegra->carveout.limit, true); 1143 if (!alloc) { 1144 err = -EBUSY; 1145 goto free_pages; 1146 } 1147 1148 *dma = iova_dma_addr(&tegra->carveout.domain, alloc); 1149 err = iommu_map(tegra->domain, *dma, virt_to_phys(virt), 1150 size, IOMMU_READ | IOMMU_WRITE); 1151 if (err < 0) 1152 goto free_iova; 1153 1154 return virt; 1155 1156 free_iova: 1157 __free_iova(&tegra->carveout.domain, alloc); 1158 free_pages: 1159 free_pages((unsigned long)virt, get_order(size)); 1160 1161 return ERR_PTR(err); 1162 } 1163 1164 void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt, 1165 dma_addr_t dma) 1166 { 1167 if (tegra->domain) 1168 size = iova_align(&tegra->carveout.domain, size); 1169 else 1170 size = PAGE_ALIGN(size); 1171 1172 if (tegra->domain) { 1173 iommu_unmap(tegra->domain, dma, size); 1174 free_iova(&tegra->carveout.domain, 1175 iova_pfn(&tegra->carveout.domain, dma)); 1176 } 1177 1178 free_pages((unsigned long)virt, get_order(size)); 1179 } 1180 1181 static int host1x_drm_probe(struct host1x_device *dev) 1182 { 1183 struct drm_driver *driver = &tegra_drm_driver; 1184 struct drm_device *drm; 1185 int err; 1186 1187 drm = drm_dev_alloc(driver, &dev->dev); 1188 if (IS_ERR(drm)) 1189 return PTR_ERR(drm); 1190 1191 dev_set_drvdata(&dev->dev, drm); 1192 1193 err = drm_dev_register(drm, 0); 1194 if (err < 0) 1195 goto unref; 1196 1197 return 0; 1198 1199 unref: 1200 drm_dev_unref(drm); 1201 return err; 1202 } 1203 1204 static int host1x_drm_remove(struct host1x_device *dev) 1205 { 1206 struct drm_device *drm = dev_get_drvdata(&dev->dev); 1207 1208 drm_dev_unregister(drm); 1209 drm_dev_unref(drm); 1210 1211 return 0; 1212 } 1213 1214 #ifdef CONFIG_PM_SLEEP 1215 static int host1x_drm_suspend(struct device *dev) 1216 { 1217 struct drm_device *drm = dev_get_drvdata(dev); 1218 struct tegra_drm *tegra = drm->dev_private; 1219 1220 drm_kms_helper_poll_disable(drm); 1221 tegra_drm_fb_suspend(drm); 1222 1223 tegra->state = drm_atomic_helper_suspend(drm); 1224 if (IS_ERR(tegra->state)) { 1225 tegra_drm_fb_resume(drm); 1226 drm_kms_helper_poll_enable(drm); 1227 return PTR_ERR(tegra->state); 1228 } 1229 1230 return 0; 1231 } 1232 1233 static int host1x_drm_resume(struct device *dev) 1234 { 1235 struct drm_device *drm = dev_get_drvdata(dev); 1236 struct tegra_drm *tegra = drm->dev_private; 1237 1238 drm_atomic_helper_resume(drm, tegra->state); 1239 tegra_drm_fb_resume(drm); 1240 drm_kms_helper_poll_enable(drm); 1241 1242 return 0; 1243 } 1244 #endif 1245 1246 static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend, 1247 host1x_drm_resume); 1248 1249 static const struct of_device_id host1x_drm_subdevs[] = { 1250 { .compatible = "nvidia,tegra20-dc", }, 1251 { .compatible = "nvidia,tegra20-hdmi", }, 1252 { .compatible = "nvidia,tegra20-gr2d", }, 1253 { .compatible = "nvidia,tegra20-gr3d", }, 1254 { .compatible = "nvidia,tegra30-dc", }, 1255 { .compatible = "nvidia,tegra30-hdmi", }, 1256 { .compatible = "nvidia,tegra30-gr2d", }, 1257 { .compatible = "nvidia,tegra30-gr3d", }, 1258 { .compatible = "nvidia,tegra114-dsi", }, 1259 { .compatible = "nvidia,tegra114-hdmi", }, 1260 { .compatible = "nvidia,tegra114-gr3d", }, 1261 { .compatible = "nvidia,tegra124-dc", }, 1262 { .compatible = "nvidia,tegra124-sor", }, 1263 { .compatible = "nvidia,tegra124-hdmi", }, 1264 { .compatible = "nvidia,tegra124-dsi", }, 1265 { .compatible = "nvidia,tegra124-vic", }, 1266 { .compatible = "nvidia,tegra132-dsi", }, 1267 { .compatible = "nvidia,tegra210-dc", }, 1268 { .compatible = "nvidia,tegra210-dsi", }, 1269 { .compatible = "nvidia,tegra210-sor", }, 1270 { .compatible = "nvidia,tegra210-sor1", }, 1271 { .compatible = "nvidia,tegra210-vic", }, 1272 { .compatible = "nvidia,tegra186-display", }, 1273 { .compatible = "nvidia,tegra186-dc", }, 1274 { .compatible = "nvidia,tegra186-sor", }, 1275 { .compatible = "nvidia,tegra186-sor1", }, 1276 { .compatible = "nvidia,tegra186-vic", }, 1277 { /* sentinel */ } 1278 }; 1279 1280 static struct host1x_driver host1x_drm_driver = { 1281 .driver = { 1282 .name = "drm", 1283 .pm = &host1x_drm_pm_ops, 1284 }, 1285 .probe = host1x_drm_probe, 1286 .remove = host1x_drm_remove, 1287 .subdevs = host1x_drm_subdevs, 1288 }; 1289 1290 static struct platform_driver * const drivers[] = { 1291 &tegra_display_hub_driver, 1292 &tegra_dc_driver, 1293 &tegra_hdmi_driver, 1294 &tegra_dsi_driver, 1295 &tegra_dpaux_driver, 1296 &tegra_sor_driver, 1297 &tegra_gr2d_driver, 1298 &tegra_gr3d_driver, 1299 &tegra_vic_driver, 1300 }; 1301 1302 static int __init host1x_drm_init(void) 1303 { 1304 int err; 1305 1306 err = host1x_driver_register(&host1x_drm_driver); 1307 if (err < 0) 1308 return err; 1309 1310 err = platform_register_drivers(drivers, ARRAY_SIZE(drivers)); 1311 if (err < 0) 1312 goto unregister_host1x; 1313 1314 return 0; 1315 1316 unregister_host1x: 1317 host1x_driver_unregister(&host1x_drm_driver); 1318 return err; 1319 } 1320 module_init(host1x_drm_init); 1321 1322 static void __exit host1x_drm_exit(void) 1323 { 1324 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); 1325 host1x_driver_unregister(&host1x_drm_driver); 1326 } 1327 module_exit(host1x_drm_exit); 1328 1329 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); 1330 MODULE_DESCRIPTION("NVIDIA Tegra DRM driver"); 1331 MODULE_LICENSE("GPL v2"); 1332