1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2015-2018 Etnaviv Project 4 */ 5 6 #include <linux/component.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/module.h> 9 #include <linux/of.h> 10 #include <linux/of_device.h> 11 #include <linux/platform_device.h> 12 #include <linux/uaccess.h> 13 14 #include <drm/drm_debugfs.h> 15 #include <drm/drm_drv.h> 16 #include <drm/drm_file.h> 17 #include <drm/drm_ioctl.h> 18 #include <drm/drm_of.h> 19 #include <drm/drm_prime.h> 20 21 #include "etnaviv_cmdbuf.h" 22 #include "etnaviv_drv.h" 23 #include "etnaviv_gpu.h" 24 #include "etnaviv_gem.h" 25 #include "etnaviv_mmu.h" 26 #include "etnaviv_perfmon.h" 27 28 /* 29 * DRM operations: 30 */ 31 32 static struct device_node *etnaviv_of_first_available_node(void) 33 { 34 struct device_node *np; 35 36 for_each_compatible_node(np, NULL, "vivante,gc") { 37 if (of_device_is_available(np)) 38 return np; 39 } 40 41 return NULL; 42 } 43 44 static void load_gpu(struct drm_device *dev) 45 { 46 struct etnaviv_drm_private *priv = dev->dev_private; 47 unsigned int i; 48 49 for (i = 0; i < ETNA_MAX_PIPES; i++) { 50 struct etnaviv_gpu *g = priv->gpu[i]; 51 52 if (g) { 53 int ret; 54 55 ret = etnaviv_gpu_init(g); 56 if (ret) 57 priv->gpu[i] = NULL; 58 } 59 } 60 } 61 62 static int etnaviv_open(struct drm_device *dev, struct drm_file *file) 63 { 64 struct etnaviv_drm_private *priv = dev->dev_private; 65 struct etnaviv_file_private *ctx; 66 int ret, i; 67 68 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 69 if (!ctx) 70 return -ENOMEM; 71 72 ret = xa_alloc_cyclic(&priv->active_contexts, &ctx->id, ctx, 73 xa_limit_32b, &priv->next_context_id, GFP_KERNEL); 74 if (ret < 0) 75 goto out_free; 76 77 ctx->mmu = etnaviv_iommu_context_init(priv->mmu_global, 78 priv->cmdbuf_suballoc); 79 if (!ctx->mmu) { 80 ret = -ENOMEM; 81 goto out_free; 82 } 83 84 for (i = 0; i < ETNA_MAX_PIPES; i++) { 85 struct etnaviv_gpu *gpu = priv->gpu[i]; 86 struct drm_gpu_scheduler *sched; 87 88 if (gpu) { 89 sched = &gpu->sched; 90 drm_sched_entity_init(&ctx->sched_entity[i], 91 DRM_SCHED_PRIORITY_NORMAL, &sched, 92 1, NULL); 93 } 94 } 95 96 file->driver_priv = ctx; 97 98 return 0; 99 100 out_free: 101 kfree(ctx); 102 return ret; 103 } 104 105 static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file) 106 { 107 struct etnaviv_drm_private *priv = dev->dev_private; 108 struct etnaviv_file_private *ctx = file->driver_priv; 109 unsigned int i; 110 111 for (i = 0; i < ETNA_MAX_PIPES; i++) { 112 struct etnaviv_gpu *gpu = priv->gpu[i]; 113 114 if (gpu) 115 drm_sched_entity_destroy(&ctx->sched_entity[i]); 116 } 117 118 etnaviv_iommu_context_put(ctx->mmu); 119 120 xa_erase(&priv->active_contexts, ctx->id); 121 122 kfree(ctx); 123 } 124 125 /* 126 * DRM debugfs: 127 */ 128 129 #ifdef CONFIG_DEBUG_FS 130 static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m) 131 { 132 struct etnaviv_drm_private *priv = dev->dev_private; 133 134 etnaviv_gem_describe_objects(priv, m); 135 136 return 0; 137 } 138 139 static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m) 140 { 141 struct drm_printer p = drm_seq_file_printer(m); 142 143 read_lock(&dev->vma_offset_manager->vm_lock); 144 drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p); 145 read_unlock(&dev->vma_offset_manager->vm_lock); 146 147 return 0; 148 } 149 150 static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m) 151 { 152 struct drm_printer p = drm_seq_file_printer(m); 153 struct etnaviv_iommu_context *mmu_context; 154 155 seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev)); 156 157 /* 158 * Lock the GPU to avoid a MMU context switch just now and elevate 159 * the refcount of the current context to avoid it disappearing from 160 * under our feet. 161 */ 162 mutex_lock(&gpu->lock); 163 mmu_context = gpu->mmu_context; 164 if (mmu_context) 165 etnaviv_iommu_context_get(mmu_context); 166 mutex_unlock(&gpu->lock); 167 168 if (!mmu_context) 169 return 0; 170 171 mutex_lock(&mmu_context->lock); 172 drm_mm_print(&mmu_context->mm, &p); 173 mutex_unlock(&mmu_context->lock); 174 175 etnaviv_iommu_context_put(mmu_context); 176 177 return 0; 178 } 179 180 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m) 181 { 182 struct etnaviv_cmdbuf *buf = &gpu->buffer; 183 u32 size = buf->size; 184 u32 *ptr = buf->vaddr; 185 u32 i; 186 187 seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n", 188 buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf), 189 size - buf->user_size); 190 191 for (i = 0; i < size / 4; i++) { 192 if (i && !(i % 4)) 193 seq_puts(m, "\n"); 194 if (i % 4 == 0) 195 seq_printf(m, "\t0x%p: ", ptr + i); 196 seq_printf(m, "%08x ", *(ptr + i)); 197 } 198 seq_puts(m, "\n"); 199 } 200 201 static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m) 202 { 203 seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev)); 204 205 mutex_lock(&gpu->lock); 206 etnaviv_buffer_dump(gpu, m); 207 mutex_unlock(&gpu->lock); 208 209 return 0; 210 } 211 212 static int show_unlocked(struct seq_file *m, void *arg) 213 { 214 struct drm_info_node *node = (struct drm_info_node *) m->private; 215 struct drm_device *dev = node->minor->dev; 216 int (*show)(struct drm_device *dev, struct seq_file *m) = 217 node->info_ent->data; 218 219 return show(dev, m); 220 } 221 222 static int show_each_gpu(struct seq_file *m, void *arg) 223 { 224 struct drm_info_node *node = (struct drm_info_node *) m->private; 225 struct drm_device *dev = node->minor->dev; 226 struct etnaviv_drm_private *priv = dev->dev_private; 227 struct etnaviv_gpu *gpu; 228 int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) = 229 node->info_ent->data; 230 unsigned int i; 231 int ret = 0; 232 233 for (i = 0; i < ETNA_MAX_PIPES; i++) { 234 gpu = priv->gpu[i]; 235 if (!gpu) 236 continue; 237 238 ret = show(gpu, m); 239 if (ret < 0) 240 break; 241 } 242 243 return ret; 244 } 245 246 static struct drm_info_list etnaviv_debugfs_list[] = { 247 {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs}, 248 {"gem", show_unlocked, 0, etnaviv_gem_show}, 249 { "mm", show_unlocked, 0, etnaviv_mm_show }, 250 {"mmu", show_each_gpu, 0, etnaviv_mmu_show}, 251 {"ring", show_each_gpu, 0, etnaviv_ring_show}, 252 }; 253 254 static void etnaviv_debugfs_init(struct drm_minor *minor) 255 { 256 drm_debugfs_create_files(etnaviv_debugfs_list, 257 ARRAY_SIZE(etnaviv_debugfs_list), 258 minor->debugfs_root, minor); 259 } 260 #endif 261 262 /* 263 * DRM ioctls: 264 */ 265 266 static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data, 267 struct drm_file *file) 268 { 269 struct etnaviv_drm_private *priv = dev->dev_private; 270 struct drm_etnaviv_param *args = data; 271 struct etnaviv_gpu *gpu; 272 273 if (args->pipe >= ETNA_MAX_PIPES) 274 return -EINVAL; 275 276 gpu = priv->gpu[args->pipe]; 277 if (!gpu) 278 return -ENXIO; 279 280 return etnaviv_gpu_get_param(gpu, args->param, &args->value); 281 } 282 283 static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data, 284 struct drm_file *file) 285 { 286 struct drm_etnaviv_gem_new *args = data; 287 288 if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED | 289 ETNA_BO_FORCE_MMU)) 290 return -EINVAL; 291 292 return etnaviv_gem_new_handle(dev, file, args->size, 293 args->flags, &args->handle); 294 } 295 296 static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data, 297 struct drm_file *file) 298 { 299 struct drm_etnaviv_gem_cpu_prep *args = data; 300 struct drm_gem_object *obj; 301 int ret; 302 303 if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC)) 304 return -EINVAL; 305 306 obj = drm_gem_object_lookup(file, args->handle); 307 if (!obj) 308 return -ENOENT; 309 310 ret = etnaviv_gem_cpu_prep(obj, args->op, &args->timeout); 311 312 drm_gem_object_put(obj); 313 314 return ret; 315 } 316 317 static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data, 318 struct drm_file *file) 319 { 320 struct drm_etnaviv_gem_cpu_fini *args = data; 321 struct drm_gem_object *obj; 322 int ret; 323 324 if (args->flags) 325 return -EINVAL; 326 327 obj = drm_gem_object_lookup(file, args->handle); 328 if (!obj) 329 return -ENOENT; 330 331 ret = etnaviv_gem_cpu_fini(obj); 332 333 drm_gem_object_put(obj); 334 335 return ret; 336 } 337 338 static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data, 339 struct drm_file *file) 340 { 341 struct drm_etnaviv_gem_info *args = data; 342 struct drm_gem_object *obj; 343 int ret; 344 345 if (args->pad) 346 return -EINVAL; 347 348 obj = drm_gem_object_lookup(file, args->handle); 349 if (!obj) 350 return -ENOENT; 351 352 ret = etnaviv_gem_mmap_offset(obj, &args->offset); 353 drm_gem_object_put(obj); 354 355 return ret; 356 } 357 358 static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data, 359 struct drm_file *file) 360 { 361 struct drm_etnaviv_wait_fence *args = data; 362 struct etnaviv_drm_private *priv = dev->dev_private; 363 struct drm_etnaviv_timespec *timeout = &args->timeout; 364 struct etnaviv_gpu *gpu; 365 366 if (args->flags & ~(ETNA_WAIT_NONBLOCK)) 367 return -EINVAL; 368 369 if (args->pipe >= ETNA_MAX_PIPES) 370 return -EINVAL; 371 372 gpu = priv->gpu[args->pipe]; 373 if (!gpu) 374 return -ENXIO; 375 376 if (args->flags & ETNA_WAIT_NONBLOCK) 377 timeout = NULL; 378 379 return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence, 380 timeout); 381 } 382 383 static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data, 384 struct drm_file *file) 385 { 386 struct drm_etnaviv_gem_userptr *args = data; 387 388 if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) || 389 args->flags == 0) 390 return -EINVAL; 391 392 if (offset_in_page(args->user_ptr | args->user_size) || 393 (uintptr_t)args->user_ptr != args->user_ptr || 394 (u32)args->user_size != args->user_size || 395 args->user_ptr & ~PAGE_MASK) 396 return -EINVAL; 397 398 if (!access_ok((void __user *)(unsigned long)args->user_ptr, 399 args->user_size)) 400 return -EFAULT; 401 402 return etnaviv_gem_new_userptr(dev, file, args->user_ptr, 403 args->user_size, args->flags, 404 &args->handle); 405 } 406 407 static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data, 408 struct drm_file *file) 409 { 410 struct etnaviv_drm_private *priv = dev->dev_private; 411 struct drm_etnaviv_gem_wait *args = data; 412 struct drm_etnaviv_timespec *timeout = &args->timeout; 413 struct drm_gem_object *obj; 414 struct etnaviv_gpu *gpu; 415 int ret; 416 417 if (args->flags & ~(ETNA_WAIT_NONBLOCK)) 418 return -EINVAL; 419 420 if (args->pipe >= ETNA_MAX_PIPES) 421 return -EINVAL; 422 423 gpu = priv->gpu[args->pipe]; 424 if (!gpu) 425 return -ENXIO; 426 427 obj = drm_gem_object_lookup(file, args->handle); 428 if (!obj) 429 return -ENOENT; 430 431 if (args->flags & ETNA_WAIT_NONBLOCK) 432 timeout = NULL; 433 434 ret = etnaviv_gem_wait_bo(gpu, obj, timeout); 435 436 drm_gem_object_put(obj); 437 438 return ret; 439 } 440 441 static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data, 442 struct drm_file *file) 443 { 444 struct etnaviv_drm_private *priv = dev->dev_private; 445 struct drm_etnaviv_pm_domain *args = data; 446 struct etnaviv_gpu *gpu; 447 448 if (args->pipe >= ETNA_MAX_PIPES) 449 return -EINVAL; 450 451 gpu = priv->gpu[args->pipe]; 452 if (!gpu) 453 return -ENXIO; 454 455 return etnaviv_pm_query_dom(gpu, args); 456 } 457 458 static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data, 459 struct drm_file *file) 460 { 461 struct etnaviv_drm_private *priv = dev->dev_private; 462 struct drm_etnaviv_pm_signal *args = data; 463 struct etnaviv_gpu *gpu; 464 465 if (args->pipe >= ETNA_MAX_PIPES) 466 return -EINVAL; 467 468 gpu = priv->gpu[args->pipe]; 469 if (!gpu) 470 return -ENXIO; 471 472 return etnaviv_pm_query_sig(gpu, args); 473 } 474 475 static const struct drm_ioctl_desc etnaviv_ioctls[] = { 476 #define ETNA_IOCTL(n, func, flags) \ 477 DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags) 478 ETNA_IOCTL(GET_PARAM, get_param, DRM_RENDER_ALLOW), 479 ETNA_IOCTL(GEM_NEW, gem_new, DRM_RENDER_ALLOW), 480 ETNA_IOCTL(GEM_INFO, gem_info, DRM_RENDER_ALLOW), 481 ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_RENDER_ALLOW), 482 ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_RENDER_ALLOW), 483 ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_RENDER_ALLOW), 484 ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_RENDER_ALLOW), 485 ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_RENDER_ALLOW), 486 ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_RENDER_ALLOW), 487 ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_RENDER_ALLOW), 488 ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW), 489 }; 490 491 DEFINE_DRM_GEM_FOPS(fops); 492 493 static const struct drm_driver etnaviv_drm_driver = { 494 .driver_features = DRIVER_GEM | DRIVER_RENDER, 495 .open = etnaviv_open, 496 .postclose = etnaviv_postclose, 497 .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table, 498 #ifdef CONFIG_DEBUG_FS 499 .debugfs_init = etnaviv_debugfs_init, 500 #endif 501 .ioctls = etnaviv_ioctls, 502 .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS, 503 .fops = &fops, 504 .name = "etnaviv", 505 .desc = "etnaviv DRM", 506 .date = "20151214", 507 .major = 1, 508 .minor = 4, 509 }; 510 511 /* 512 * Platform driver: 513 */ 514 static int etnaviv_bind(struct device *dev) 515 { 516 struct etnaviv_drm_private *priv; 517 struct drm_device *drm; 518 int ret; 519 520 drm = drm_dev_alloc(&etnaviv_drm_driver, dev); 521 if (IS_ERR(drm)) 522 return PTR_ERR(drm); 523 524 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 525 if (!priv) { 526 dev_err(dev, "failed to allocate private data\n"); 527 ret = -ENOMEM; 528 goto out_put; 529 } 530 drm->dev_private = priv; 531 532 dma_set_max_seg_size(dev, SZ_2G); 533 534 xa_init_flags(&priv->active_contexts, XA_FLAGS_ALLOC); 535 536 mutex_init(&priv->gem_lock); 537 INIT_LIST_HEAD(&priv->gem_list); 538 priv->num_gpus = 0; 539 priv->shm_gfp_mask = GFP_HIGHUSER | __GFP_RETRY_MAYFAIL | __GFP_NOWARN; 540 541 priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev); 542 if (IS_ERR(priv->cmdbuf_suballoc)) { 543 dev_err(drm->dev, "Failed to create cmdbuf suballocator\n"); 544 ret = PTR_ERR(priv->cmdbuf_suballoc); 545 goto out_free_priv; 546 } 547 548 dev_set_drvdata(dev, drm); 549 550 ret = component_bind_all(dev, drm); 551 if (ret < 0) 552 goto out_destroy_suballoc; 553 554 load_gpu(drm); 555 556 ret = drm_dev_register(drm, 0); 557 if (ret) 558 goto out_unbind; 559 560 return 0; 561 562 out_unbind: 563 component_unbind_all(dev, drm); 564 out_destroy_suballoc: 565 etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc); 566 out_free_priv: 567 kfree(priv); 568 out_put: 569 drm_dev_put(drm); 570 571 return ret; 572 } 573 574 static void etnaviv_unbind(struct device *dev) 575 { 576 struct drm_device *drm = dev_get_drvdata(dev); 577 struct etnaviv_drm_private *priv = drm->dev_private; 578 579 drm_dev_unregister(drm); 580 581 component_unbind_all(dev, drm); 582 583 etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc); 584 585 xa_destroy(&priv->active_contexts); 586 587 drm->dev_private = NULL; 588 kfree(priv); 589 590 drm_dev_put(drm); 591 } 592 593 static const struct component_master_ops etnaviv_master_ops = { 594 .bind = etnaviv_bind, 595 .unbind = etnaviv_unbind, 596 }; 597 598 static int etnaviv_pdev_probe(struct platform_device *pdev) 599 { 600 struct device *dev = &pdev->dev; 601 struct device_node *first_node = NULL; 602 struct component_match *match = NULL; 603 604 if (!dev->platform_data) { 605 struct device_node *core_node; 606 607 for_each_compatible_node(core_node, NULL, "vivante,gc") { 608 if (!of_device_is_available(core_node)) 609 continue; 610 611 drm_of_component_match_add(&pdev->dev, &match, 612 component_compare_of, core_node); 613 } 614 } else { 615 char **names = dev->platform_data; 616 unsigned i; 617 618 for (i = 0; names[i]; i++) 619 component_match_add(dev, &match, component_compare_dev_name, names[i]); 620 } 621 622 /* 623 * PTA and MTLB can have 40 bit base addresses, but 624 * unfortunately, an entry in the MTLB can only point to a 625 * 32 bit base address of a STLB. Moreover, to initialize the 626 * MMU we need a command buffer with a 32 bit address because 627 * without an MMU there is only an indentity mapping between 628 * the internal 32 bit addresses and the bus addresses. 629 * 630 * To make things easy, we set the dma_coherent_mask to 32 631 * bit to make sure we are allocating the command buffers and 632 * TLBs in the lower 4 GiB address space. 633 */ 634 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)) || 635 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) { 636 dev_dbg(&pdev->dev, "No suitable DMA available\n"); 637 return -ENODEV; 638 } 639 640 /* 641 * Apply the same DMA configuration to the virtual etnaviv 642 * device as the GPU we found. This assumes that all Vivante 643 * GPUs in the system share the same DMA constraints. 644 */ 645 first_node = etnaviv_of_first_available_node(); 646 if (first_node) { 647 of_dma_configure(&pdev->dev, first_node, true); 648 of_node_put(first_node); 649 } 650 651 return component_master_add_with_match(dev, &etnaviv_master_ops, match); 652 } 653 654 static int etnaviv_pdev_remove(struct platform_device *pdev) 655 { 656 component_master_del(&pdev->dev, &etnaviv_master_ops); 657 658 return 0; 659 } 660 661 static struct platform_driver etnaviv_platform_driver = { 662 .probe = etnaviv_pdev_probe, 663 .remove = etnaviv_pdev_remove, 664 .driver = { 665 .name = "etnaviv", 666 }, 667 }; 668 669 static int etnaviv_create_platform_device(const char *name, 670 struct platform_device **ppdev) 671 { 672 struct platform_device *pdev; 673 int ret; 674 675 pdev = platform_device_alloc(name, PLATFORM_DEVID_NONE); 676 if (!pdev) 677 return -ENOMEM; 678 679 ret = platform_device_add(pdev); 680 if (ret) { 681 platform_device_put(pdev); 682 return ret; 683 } 684 685 *ppdev = pdev; 686 687 return 0; 688 } 689 690 static void etnaviv_destroy_platform_device(struct platform_device **ppdev) 691 { 692 struct platform_device *pdev = *ppdev; 693 694 if (!pdev) 695 return; 696 697 platform_device_unregister(pdev); 698 699 *ppdev = NULL; 700 } 701 702 static struct platform_device *etnaviv_drm; 703 704 static int __init etnaviv_init(void) 705 { 706 int ret; 707 struct device_node *np; 708 709 etnaviv_validate_init(); 710 711 ret = platform_driver_register(&etnaviv_gpu_driver); 712 if (ret != 0) 713 return ret; 714 715 ret = platform_driver_register(&etnaviv_platform_driver); 716 if (ret != 0) 717 goto unregister_gpu_driver; 718 719 /* 720 * If the DT contains at least one available GPU device, instantiate 721 * the DRM platform device. 722 */ 723 np = etnaviv_of_first_available_node(); 724 if (np) { 725 of_node_put(np); 726 727 ret = etnaviv_create_platform_device("etnaviv", &etnaviv_drm); 728 if (ret) 729 goto unregister_platform_driver; 730 } 731 732 return 0; 733 734 unregister_platform_driver: 735 platform_driver_unregister(&etnaviv_platform_driver); 736 unregister_gpu_driver: 737 platform_driver_unregister(&etnaviv_gpu_driver); 738 return ret; 739 } 740 module_init(etnaviv_init); 741 742 static void __exit etnaviv_exit(void) 743 { 744 etnaviv_destroy_platform_device(&etnaviv_drm); 745 platform_driver_unregister(&etnaviv_platform_driver); 746 platform_driver_unregister(&etnaviv_gpu_driver); 747 } 748 module_exit(etnaviv_exit); 749 750 MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>"); 751 MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>"); 752 MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>"); 753 MODULE_DESCRIPTION("etnaviv DRM Driver"); 754 MODULE_LICENSE("GPL v2"); 755 MODULE_ALIAS("platform:etnaviv"); 756