1 /* 2 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 3 * Authors: 4 * Inki Dae <inki.dae@samsung.com> 5 * Joonyoung Shim <jy0922.shim@samsung.com> 6 * Seung-Woo Kim <sw0312.kim@samsung.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 */ 13 14 #include <linux/pm_runtime.h> 15 #include <drm/drmP.h> 16 #include <drm/drm_atomic.h> 17 #include <drm/drm_atomic_helper.h> 18 #include <drm/drm_crtc_helper.h> 19 20 #include <linux/component.h> 21 22 #include <drm/exynos_drm.h> 23 24 #include "exynos_drm_drv.h" 25 #include "exynos_drm_crtc.h" 26 #include "exynos_drm_fbdev.h" 27 #include "exynos_drm_fb.h" 28 #include "exynos_drm_gem.h" 29 #include "exynos_drm_plane.h" 30 #include "exynos_drm_vidi.h" 31 #include "exynos_drm_g2d.h" 32 #include "exynos_drm_ipp.h" 33 #include "exynos_drm_iommu.h" 34 35 #define DRIVER_NAME "exynos" 36 #define DRIVER_DESC "Samsung SoC DRM" 37 #define DRIVER_DATE "20110530" 38 #define DRIVER_MAJOR 1 39 #define DRIVER_MINOR 0 40 41 struct exynos_atomic_commit { 42 struct work_struct work; 43 struct drm_device *dev; 44 struct drm_atomic_state *state; 45 u32 crtcs; 46 }; 47 48 static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit) 49 { 50 struct drm_device *dev = commit->dev; 51 struct exynos_drm_private *priv = dev->dev_private; 52 struct drm_atomic_state *state = commit->state; 53 54 drm_atomic_helper_commit_modeset_disables(dev, state); 55 56 drm_atomic_helper_commit_modeset_enables(dev, state); 57 58 /* 59 * Exynos can't update planes with CRTCs and encoders disabled, 60 * its updates routines, specially for FIMD, requires the clocks 61 * to be enabled. So it is necessary to handle the modeset operations 62 * *before* the commit_planes() step, this way it will always 63 * have the relevant clocks enabled to perform the update. 64 */ 65 66 drm_atomic_helper_commit_planes(dev, state, 0); 67 68 drm_atomic_helper_wait_for_vblanks(dev, state); 69 70 drm_atomic_helper_cleanup_planes(dev, state); 71 72 drm_atomic_state_put(state); 73 74 spin_lock(&priv->lock); 75 priv->pending &= ~commit->crtcs; 76 spin_unlock(&priv->lock); 77 78 wake_up_all(&priv->wait); 79 80 kfree(commit); 81 } 82 83 static void exynos_drm_atomic_work(struct work_struct *work) 84 { 85 struct exynos_atomic_commit *commit = container_of(work, 86 struct exynos_atomic_commit, work); 87 88 exynos_atomic_commit_complete(commit); 89 } 90 91 static struct device *exynos_drm_get_dma_device(void); 92 93 static int exynos_drm_load(struct drm_device *dev, unsigned long flags) 94 { 95 struct exynos_drm_private *private; 96 struct drm_encoder *encoder; 97 unsigned int clone_mask; 98 int cnt, ret; 99 100 private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL); 101 if (!private) 102 return -ENOMEM; 103 104 init_waitqueue_head(&private->wait); 105 spin_lock_init(&private->lock); 106 107 dev_set_drvdata(dev->dev, dev); 108 dev->dev_private = (void *)private; 109 110 /* the first real CRTC device is used for all dma mapping operations */ 111 private->dma_dev = exynos_drm_get_dma_device(); 112 if (!private->dma_dev) { 113 DRM_ERROR("no device found for DMA mapping operations.\n"); 114 ret = -ENODEV; 115 goto err_free_private; 116 } 117 DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n", 118 dev_name(private->dma_dev)); 119 120 /* create common IOMMU mapping for all devices attached to Exynos DRM */ 121 ret = drm_create_iommu_mapping(dev); 122 if (ret < 0) { 123 DRM_ERROR("failed to create iommu mapping.\n"); 124 goto err_free_private; 125 } 126 127 drm_mode_config_init(dev); 128 129 exynos_drm_mode_config_init(dev); 130 131 /* setup possible_clones. */ 132 cnt = 0; 133 clone_mask = 0; 134 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) 135 clone_mask |= (1 << (cnt++)); 136 137 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) 138 encoder->possible_clones = clone_mask; 139 140 platform_set_drvdata(dev->platformdev, dev); 141 142 /* Try to bind all sub drivers. */ 143 ret = component_bind_all(dev->dev, dev); 144 if (ret) 145 goto err_mode_config_cleanup; 146 147 ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 148 if (ret) 149 goto err_unbind_all; 150 151 /* Probe non kms sub drivers and virtual display driver. */ 152 ret = exynos_drm_device_subdrv_probe(dev); 153 if (ret) 154 goto err_cleanup_vblank; 155 156 drm_mode_config_reset(dev); 157 158 /* 159 * enable drm irq mode. 160 * - with irq_enabled = true, we can use the vblank feature. 161 * 162 * P.S. note that we wouldn't use drm irq handler but 163 * just specific driver own one instead because 164 * drm framework supports only one irq handler. 165 */ 166 dev->irq_enabled = true; 167 168 /* init kms poll for handling hpd */ 169 drm_kms_helper_poll_init(dev); 170 171 /* force connectors detection */ 172 drm_helper_hpd_irq_event(dev); 173 174 return 0; 175 176 err_cleanup_vblank: 177 drm_vblank_cleanup(dev); 178 err_unbind_all: 179 component_unbind_all(dev->dev, dev); 180 err_mode_config_cleanup: 181 drm_mode_config_cleanup(dev); 182 drm_release_iommu_mapping(dev); 183 err_free_private: 184 kfree(private); 185 186 return ret; 187 } 188 189 static int exynos_drm_unload(struct drm_device *dev) 190 { 191 exynos_drm_device_subdrv_remove(dev); 192 193 exynos_drm_fbdev_fini(dev); 194 drm_kms_helper_poll_fini(dev); 195 196 drm_vblank_cleanup(dev); 197 component_unbind_all(dev->dev, dev); 198 drm_mode_config_cleanup(dev); 199 drm_release_iommu_mapping(dev); 200 201 kfree(dev->dev_private); 202 dev->dev_private = NULL; 203 204 return 0; 205 } 206 207 static int commit_is_pending(struct exynos_drm_private *priv, u32 crtcs) 208 { 209 bool pending; 210 211 spin_lock(&priv->lock); 212 pending = priv->pending & crtcs; 213 spin_unlock(&priv->lock); 214 215 return pending; 216 } 217 218 int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, 219 bool nonblock) 220 { 221 struct exynos_drm_private *priv = dev->dev_private; 222 struct exynos_atomic_commit *commit; 223 struct drm_crtc *crtc; 224 struct drm_crtc_state *crtc_state; 225 int i, ret; 226 227 commit = kzalloc(sizeof(*commit), GFP_KERNEL); 228 if (!commit) 229 return -ENOMEM; 230 231 ret = drm_atomic_helper_prepare_planes(dev, state); 232 if (ret) { 233 kfree(commit); 234 return ret; 235 } 236 237 /* This is the point of no return */ 238 239 INIT_WORK(&commit->work, exynos_drm_atomic_work); 240 commit->dev = dev; 241 commit->state = state; 242 243 /* Wait until all affected CRTCs have completed previous commits and 244 * mark them as pending. 245 */ 246 for_each_crtc_in_state(state, crtc, crtc_state, i) 247 commit->crtcs |= drm_crtc_mask(crtc); 248 249 wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs)); 250 251 spin_lock(&priv->lock); 252 priv->pending |= commit->crtcs; 253 spin_unlock(&priv->lock); 254 255 drm_atomic_helper_swap_state(state, true); 256 257 drm_atomic_state_get(state); 258 if (nonblock) 259 schedule_work(&commit->work); 260 else 261 exynos_atomic_commit_complete(commit); 262 263 return 0; 264 } 265 266 int exynos_atomic_check(struct drm_device *dev, 267 struct drm_atomic_state *state) 268 { 269 int ret; 270 271 ret = drm_atomic_helper_check_modeset(dev, state); 272 if (ret) 273 return ret; 274 275 ret = drm_atomic_normalize_zpos(dev, state); 276 if (ret) 277 return ret; 278 279 ret = drm_atomic_helper_check_planes(dev, state); 280 if (ret) 281 return ret; 282 283 return ret; 284 } 285 286 static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) 287 { 288 struct drm_exynos_file_private *file_priv; 289 int ret; 290 291 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); 292 if (!file_priv) 293 return -ENOMEM; 294 295 file->driver_priv = file_priv; 296 297 ret = exynos_drm_subdrv_open(dev, file); 298 if (ret) 299 goto err_file_priv_free; 300 301 return ret; 302 303 err_file_priv_free: 304 kfree(file_priv); 305 file->driver_priv = NULL; 306 return ret; 307 } 308 309 static void exynos_drm_preclose(struct drm_device *dev, 310 struct drm_file *file) 311 { 312 struct drm_crtc *crtc; 313 314 exynos_drm_subdrv_close(dev, file); 315 316 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 317 exynos_drm_crtc_cancel_page_flip(crtc, file); 318 } 319 320 static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) 321 { 322 kfree(file->driver_priv); 323 file->driver_priv = NULL; 324 } 325 326 static void exynos_drm_lastclose(struct drm_device *dev) 327 { 328 exynos_drm_fbdev_restore_mode(dev); 329 } 330 331 static const struct vm_operations_struct exynos_drm_gem_vm_ops = { 332 .fault = exynos_drm_gem_fault, 333 .open = drm_gem_vm_open, 334 .close = drm_gem_vm_close, 335 }; 336 337 static const struct drm_ioctl_desc exynos_ioctls[] = { 338 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl, 339 DRM_AUTH | DRM_RENDER_ALLOW), 340 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP, exynos_drm_gem_map_ioctl, 341 DRM_AUTH | DRM_RENDER_ALLOW), 342 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl, 343 DRM_RENDER_ALLOW), 344 DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl, 345 DRM_AUTH), 346 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, exynos_g2d_get_ver_ioctl, 347 DRM_AUTH | DRM_RENDER_ALLOW), 348 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST, exynos_g2d_set_cmdlist_ioctl, 349 DRM_AUTH | DRM_RENDER_ALLOW), 350 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl, 351 DRM_AUTH | DRM_RENDER_ALLOW), 352 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY, exynos_drm_ipp_get_property, 353 DRM_AUTH | DRM_RENDER_ALLOW), 354 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY, exynos_drm_ipp_set_property, 355 DRM_AUTH | DRM_RENDER_ALLOW), 356 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF, exynos_drm_ipp_queue_buf, 357 DRM_AUTH | DRM_RENDER_ALLOW), 358 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL, exynos_drm_ipp_cmd_ctrl, 359 DRM_AUTH | DRM_RENDER_ALLOW), 360 }; 361 362 static const struct file_operations exynos_drm_driver_fops = { 363 .owner = THIS_MODULE, 364 .open = drm_open, 365 .mmap = exynos_drm_gem_mmap, 366 .poll = drm_poll, 367 .read = drm_read, 368 .unlocked_ioctl = drm_ioctl, 369 #ifdef CONFIG_COMPAT 370 .compat_ioctl = drm_compat_ioctl, 371 #endif 372 .release = drm_release, 373 }; 374 375 static struct drm_driver exynos_drm_driver = { 376 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME 377 | DRIVER_ATOMIC | DRIVER_RENDER, 378 .load = exynos_drm_load, 379 .unload = exynos_drm_unload, 380 .open = exynos_drm_open, 381 .preclose = exynos_drm_preclose, 382 .lastclose = exynos_drm_lastclose, 383 .postclose = exynos_drm_postclose, 384 .get_vblank_counter = drm_vblank_no_hw_counter, 385 .enable_vblank = exynos_drm_crtc_enable_vblank, 386 .disable_vblank = exynos_drm_crtc_disable_vblank, 387 .gem_free_object_unlocked = exynos_drm_gem_free_object, 388 .gem_vm_ops = &exynos_drm_gem_vm_ops, 389 .dumb_create = exynos_drm_gem_dumb_create, 390 .dumb_map_offset = exynos_drm_gem_dumb_map_offset, 391 .dumb_destroy = drm_gem_dumb_destroy, 392 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 393 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 394 .gem_prime_export = drm_gem_prime_export, 395 .gem_prime_import = drm_gem_prime_import, 396 .gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table, 397 .gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table, 398 .gem_prime_vmap = exynos_drm_gem_prime_vmap, 399 .gem_prime_vunmap = exynos_drm_gem_prime_vunmap, 400 .gem_prime_mmap = exynos_drm_gem_prime_mmap, 401 .ioctls = exynos_ioctls, 402 .num_ioctls = ARRAY_SIZE(exynos_ioctls), 403 .fops = &exynos_drm_driver_fops, 404 .name = DRIVER_NAME, 405 .desc = DRIVER_DESC, 406 .date = DRIVER_DATE, 407 .major = DRIVER_MAJOR, 408 .minor = DRIVER_MINOR, 409 }; 410 411 #ifdef CONFIG_PM_SLEEP 412 static int exynos_drm_suspend(struct device *dev) 413 { 414 struct drm_device *drm_dev = dev_get_drvdata(dev); 415 struct drm_connector *connector; 416 417 if (pm_runtime_suspended(dev) || !drm_dev) 418 return 0; 419 420 drm_modeset_lock_all(drm_dev); 421 drm_for_each_connector(connector, drm_dev) { 422 int old_dpms = connector->dpms; 423 424 if (connector->funcs->dpms) 425 connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF); 426 427 /* Set the old mode back to the connector for resume */ 428 connector->dpms = old_dpms; 429 } 430 drm_modeset_unlock_all(drm_dev); 431 432 return 0; 433 } 434 435 static int exynos_drm_resume(struct device *dev) 436 { 437 struct drm_device *drm_dev = dev_get_drvdata(dev); 438 struct drm_connector *connector; 439 440 if (pm_runtime_suspended(dev) || !drm_dev) 441 return 0; 442 443 drm_modeset_lock_all(drm_dev); 444 drm_for_each_connector(connector, drm_dev) { 445 if (connector->funcs->dpms) { 446 int dpms = connector->dpms; 447 448 connector->dpms = DRM_MODE_DPMS_OFF; 449 connector->funcs->dpms(connector, dpms); 450 } 451 } 452 drm_modeset_unlock_all(drm_dev); 453 454 return 0; 455 } 456 #endif 457 458 static const struct dev_pm_ops exynos_drm_pm_ops = { 459 SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_suspend, exynos_drm_resume) 460 }; 461 462 /* forward declaration */ 463 static struct platform_driver exynos_drm_platform_driver; 464 465 struct exynos_drm_driver_info { 466 struct platform_driver *driver; 467 unsigned int flags; 468 }; 469 470 #define DRM_COMPONENT_DRIVER BIT(0) /* supports component framework */ 471 #define DRM_VIRTUAL_DEVICE BIT(1) /* create virtual platform device */ 472 #define DRM_DMA_DEVICE BIT(2) /* can be used for dma allocations */ 473 474 #define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL) 475 476 /* 477 * Connector drivers should not be placed before associated crtc drivers, 478 * because connector requires pipe number of its crtc during initialization. 479 */ 480 static struct exynos_drm_driver_info exynos_drm_drivers[] = { 481 { 482 DRV_PTR(fimd_driver, CONFIG_DRM_EXYNOS_FIMD), 483 DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE 484 }, { 485 DRV_PTR(exynos5433_decon_driver, CONFIG_DRM_EXYNOS5433_DECON), 486 DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE 487 }, { 488 DRV_PTR(decon_driver, CONFIG_DRM_EXYNOS7_DECON), 489 DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE 490 }, { 491 DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER), 492 DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE 493 }, { 494 DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC), 495 DRM_COMPONENT_DRIVER 496 }, { 497 DRV_PTR(dp_driver, CONFIG_DRM_EXYNOS_DP), 498 DRM_COMPONENT_DRIVER 499 }, { 500 DRV_PTR(dsi_driver, CONFIG_DRM_EXYNOS_DSI), 501 DRM_COMPONENT_DRIVER 502 }, { 503 DRV_PTR(hdmi_driver, CONFIG_DRM_EXYNOS_HDMI), 504 DRM_COMPONENT_DRIVER 505 }, { 506 DRV_PTR(vidi_driver, CONFIG_DRM_EXYNOS_VIDI), 507 DRM_COMPONENT_DRIVER | DRM_VIRTUAL_DEVICE 508 }, { 509 DRV_PTR(g2d_driver, CONFIG_DRM_EXYNOS_G2D), 510 }, { 511 DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC), 512 }, { 513 DRV_PTR(rotator_driver, CONFIG_DRM_EXYNOS_ROTATOR), 514 }, { 515 DRV_PTR(gsc_driver, CONFIG_DRM_EXYNOS_GSC), 516 }, { 517 DRV_PTR(ipp_driver, CONFIG_DRM_EXYNOS_IPP), 518 DRM_VIRTUAL_DEVICE 519 }, { 520 &exynos_drm_platform_driver, 521 DRM_VIRTUAL_DEVICE 522 } 523 }; 524 525 static int compare_dev(struct device *dev, void *data) 526 { 527 return dev == (struct device *)data; 528 } 529 530 static struct component_match *exynos_drm_match_add(struct device *dev) 531 { 532 struct component_match *match = NULL; 533 int i; 534 535 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) { 536 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i]; 537 struct device *p = NULL, *d; 538 539 if (!info->driver || !(info->flags & DRM_COMPONENT_DRIVER)) 540 continue; 541 542 while ((d = bus_find_device(&platform_bus_type, p, 543 &info->driver->driver, 544 (void *)platform_bus_type.match))) { 545 put_device(p); 546 component_match_add(dev, &match, compare_dev, d); 547 p = d; 548 } 549 put_device(p); 550 } 551 552 return match ?: ERR_PTR(-ENODEV); 553 } 554 555 static int exynos_drm_bind(struct device *dev) 556 { 557 return drm_platform_init(&exynos_drm_driver, to_platform_device(dev)); 558 } 559 560 static void exynos_drm_unbind(struct device *dev) 561 { 562 drm_put_dev(dev_get_drvdata(dev)); 563 } 564 565 static const struct component_master_ops exynos_drm_ops = { 566 .bind = exynos_drm_bind, 567 .unbind = exynos_drm_unbind, 568 }; 569 570 static int exynos_drm_platform_probe(struct platform_device *pdev) 571 { 572 struct component_match *match; 573 574 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 575 exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls); 576 577 match = exynos_drm_match_add(&pdev->dev); 578 if (IS_ERR(match)) 579 return PTR_ERR(match); 580 581 return component_master_add_with_match(&pdev->dev, &exynos_drm_ops, 582 match); 583 } 584 585 static int exynos_drm_platform_remove(struct platform_device *pdev) 586 { 587 component_master_del(&pdev->dev, &exynos_drm_ops); 588 return 0; 589 } 590 591 static struct platform_driver exynos_drm_platform_driver = { 592 .probe = exynos_drm_platform_probe, 593 .remove = exynos_drm_platform_remove, 594 .driver = { 595 .name = "exynos-drm", 596 .pm = &exynos_drm_pm_ops, 597 }, 598 }; 599 600 static struct device *exynos_drm_get_dma_device(void) 601 { 602 int i; 603 604 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) { 605 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i]; 606 struct device *dev; 607 608 if (!info->driver || !(info->flags & DRM_DMA_DEVICE)) 609 continue; 610 611 while ((dev = bus_find_device(&platform_bus_type, NULL, 612 &info->driver->driver, 613 (void *)platform_bus_type.match))) { 614 put_device(dev); 615 return dev; 616 } 617 } 618 return NULL; 619 } 620 621 static void exynos_drm_unregister_devices(void) 622 { 623 int i; 624 625 for (i = ARRAY_SIZE(exynos_drm_drivers) - 1; i >= 0; --i) { 626 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i]; 627 struct device *dev; 628 629 if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE)) 630 continue; 631 632 while ((dev = bus_find_device(&platform_bus_type, NULL, 633 &info->driver->driver, 634 (void *)platform_bus_type.match))) { 635 put_device(dev); 636 platform_device_unregister(to_platform_device(dev)); 637 } 638 } 639 } 640 641 static int exynos_drm_register_devices(void) 642 { 643 struct platform_device *pdev; 644 int i; 645 646 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) { 647 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i]; 648 649 if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE)) 650 continue; 651 652 pdev = platform_device_register_simple( 653 info->driver->driver.name, -1, NULL, 0); 654 if (IS_ERR(pdev)) 655 goto fail; 656 } 657 658 return 0; 659 fail: 660 exynos_drm_unregister_devices(); 661 return PTR_ERR(pdev); 662 } 663 664 static void exynos_drm_unregister_drivers(void) 665 { 666 int i; 667 668 for (i = ARRAY_SIZE(exynos_drm_drivers) - 1; i >= 0; --i) { 669 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i]; 670 671 if (!info->driver) 672 continue; 673 674 platform_driver_unregister(info->driver); 675 } 676 } 677 678 static int exynos_drm_register_drivers(void) 679 { 680 int i, ret; 681 682 for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) { 683 struct exynos_drm_driver_info *info = &exynos_drm_drivers[i]; 684 685 if (!info->driver) 686 continue; 687 688 ret = platform_driver_register(info->driver); 689 if (ret) 690 goto fail; 691 } 692 return 0; 693 fail: 694 exynos_drm_unregister_drivers(); 695 return ret; 696 } 697 698 static int exynos_drm_init(void) 699 { 700 int ret; 701 702 ret = exynos_drm_register_devices(); 703 if (ret) 704 return ret; 705 706 ret = exynos_drm_register_drivers(); 707 if (ret) 708 goto err_unregister_pdevs; 709 710 return 0; 711 712 err_unregister_pdevs: 713 exynos_drm_unregister_devices(); 714 715 return ret; 716 } 717 718 static void exynos_drm_exit(void) 719 { 720 exynos_drm_unregister_drivers(); 721 exynos_drm_unregister_devices(); 722 } 723 724 module_init(exynos_drm_init); 725 module_exit(exynos_drm_exit); 726 727 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); 728 MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); 729 MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>"); 730 MODULE_DESCRIPTION("Samsung SoC DRM Driver"); 731 MODULE_LICENSE("GPL"); 732