1 /* 2 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org 3 * 4 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California. 5 * All Rights Reserved. 6 * 7 * Author Rickard E. (Rik) Faith <faith@valinux.com> 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the next 17 * paragraph) shall be included in all copies or substantial portions of the 18 * Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 26 * DEALINGS IN THE SOFTWARE. 27 */ 28 29 #include <linux/debugfs.h> 30 #include <linux/fs.h> 31 #include <linux/module.h> 32 #include <linux/moduleparam.h> 33 #include <linux/mount.h> 34 #include <linux/pseudo_fs.h> 35 #include <linux/slab.h> 36 #include <linux/srcu.h> 37 38 #include <drm/drm_client.h> 39 #include <drm/drm_color_mgmt.h> 40 #include <drm/drm_drv.h> 41 #include <drm/drm_file.h> 42 #include <drm/drm_managed.h> 43 #include <drm/drm_mode_object.h> 44 #include <drm/drm_print.h> 45 46 #include "drm_crtc_internal.h" 47 #include "drm_internal.h" 48 #include "drm_legacy.h" 49 50 MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"); 51 MODULE_DESCRIPTION("DRM shared core routines"); 52 MODULE_LICENSE("GPL and additional rights"); 53 54 static DEFINE_SPINLOCK(drm_minor_lock); 55 static struct idr drm_minors_idr; 56 57 /* 58 * If the drm core fails to init for whatever reason, 59 * we should prevent any drivers from registering with it. 60 * It's best to check this at drm_dev_init(), as some drivers 61 * prefer to embed struct drm_device into their own device 62 * structure and call drm_dev_init() themselves. 63 */ 64 static bool drm_core_init_complete = false; 65 66 static struct dentry *drm_debugfs_root; 67 68 DEFINE_STATIC_SRCU(drm_unplug_srcu); 69 70 /* 71 * DRM Minors 72 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each 73 * of them is represented by a drm_minor object. Depending on the capabilities 74 * of the device-driver, different interfaces are registered. 75 * 76 * Minors can be accessed via dev->$minor_name. This pointer is either 77 * NULL or a valid drm_minor pointer and stays valid as long as the device is 78 * valid. This means, DRM minors have the same life-time as the underlying 79 * device. However, this doesn't mean that the minor is active. Minors are 80 * registered and unregistered dynamically according to device-state. 81 */ 82 83 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev, 84 unsigned int type) 85 { 86 switch (type) { 87 case DRM_MINOR_PRIMARY: 88 return &dev->primary; 89 case DRM_MINOR_RENDER: 90 return &dev->render; 91 default: 92 BUG(); 93 } 94 } 95 96 static void drm_minor_alloc_release(struct drm_device *dev, void *data) 97 { 98 struct drm_minor *minor = data; 99 unsigned long flags; 100 101 WARN_ON(dev != minor->dev); 102 103 put_device(minor->kdev); 104 105 spin_lock_irqsave(&drm_minor_lock, flags); 106 idr_remove(&drm_minors_idr, minor->index); 107 spin_unlock_irqrestore(&drm_minor_lock, flags); 108 } 109 110 static int drm_minor_alloc(struct drm_device *dev, unsigned int type) 111 { 112 struct drm_minor *minor; 113 unsigned long flags; 114 int r; 115 116 minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL); 117 if (!minor) 118 return -ENOMEM; 119 120 minor->type = type; 121 minor->dev = dev; 122 123 idr_preload(GFP_KERNEL); 124 spin_lock_irqsave(&drm_minor_lock, flags); 125 r = idr_alloc(&drm_minors_idr, 126 NULL, 127 64 * type, 128 64 * (type + 1), 129 GFP_NOWAIT); 130 spin_unlock_irqrestore(&drm_minor_lock, flags); 131 idr_preload_end(); 132 133 if (r < 0) 134 return r; 135 136 minor->index = r; 137 138 r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor); 139 if (r) 140 return r; 141 142 minor->kdev = drm_sysfs_minor_alloc(minor); 143 if (IS_ERR(minor->kdev)) 144 return PTR_ERR(minor->kdev); 145 146 *drm_minor_get_slot(dev, type) = minor; 147 return 0; 148 } 149 150 static int drm_minor_register(struct drm_device *dev, unsigned int type) 151 { 152 struct drm_minor *minor; 153 unsigned long flags; 154 int ret; 155 156 DRM_DEBUG("\n"); 157 158 minor = *drm_minor_get_slot(dev, type); 159 if (!minor) 160 return 0; 161 162 ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root); 163 if (ret) { 164 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); 165 goto err_debugfs; 166 } 167 168 ret = device_add(minor->kdev); 169 if (ret) 170 goto err_debugfs; 171 172 /* replace NULL with @minor so lookups will succeed from now on */ 173 spin_lock_irqsave(&drm_minor_lock, flags); 174 idr_replace(&drm_minors_idr, minor, minor->index); 175 spin_unlock_irqrestore(&drm_minor_lock, flags); 176 177 DRM_DEBUG("new minor registered %d\n", minor->index); 178 return 0; 179 180 err_debugfs: 181 drm_debugfs_cleanup(minor); 182 return ret; 183 } 184 185 static void drm_minor_unregister(struct drm_device *dev, unsigned int type) 186 { 187 struct drm_minor *minor; 188 unsigned long flags; 189 190 minor = *drm_minor_get_slot(dev, type); 191 if (!minor || !device_is_registered(minor->kdev)) 192 return; 193 194 /* replace @minor with NULL so lookups will fail from now on */ 195 spin_lock_irqsave(&drm_minor_lock, flags); 196 idr_replace(&drm_minors_idr, NULL, minor->index); 197 spin_unlock_irqrestore(&drm_minor_lock, flags); 198 199 device_del(minor->kdev); 200 dev_set_drvdata(minor->kdev, NULL); /* safety belt */ 201 drm_debugfs_cleanup(minor); 202 } 203 204 /* 205 * Looks up the given minor-ID and returns the respective DRM-minor object. The 206 * refence-count of the underlying device is increased so you must release this 207 * object with drm_minor_release(). 208 * 209 * As long as you hold this minor, it is guaranteed that the object and the 210 * minor->dev pointer will stay valid! However, the device may get unplugged and 211 * unregistered while you hold the minor. 212 */ 213 struct drm_minor *drm_minor_acquire(unsigned int minor_id) 214 { 215 struct drm_minor *minor; 216 unsigned long flags; 217 218 spin_lock_irqsave(&drm_minor_lock, flags); 219 minor = idr_find(&drm_minors_idr, minor_id); 220 if (minor) 221 drm_dev_get(minor->dev); 222 spin_unlock_irqrestore(&drm_minor_lock, flags); 223 224 if (!minor) { 225 return ERR_PTR(-ENODEV); 226 } else if (drm_dev_is_unplugged(minor->dev)) { 227 drm_dev_put(minor->dev); 228 return ERR_PTR(-ENODEV); 229 } 230 231 return minor; 232 } 233 234 void drm_minor_release(struct drm_minor *minor) 235 { 236 drm_dev_put(minor->dev); 237 } 238 239 /** 240 * DOC: driver instance overview 241 * 242 * A device instance for a drm driver is represented by &struct drm_device. This 243 * is allocated and initialized with devm_drm_dev_alloc(), usually from 244 * bus-specific ->probe() callbacks implemented by the driver. The driver then 245 * needs to initialize all the various subsystems for the drm device like memory 246 * management, vblank handling, modesetting support and initial output 247 * configuration plus obviously initialize all the corresponding hardware bits. 248 * Finally when everything is up and running and ready for userspace the device 249 * instance can be published using drm_dev_register(). 250 * 251 * There is also deprecated support for initalizing device instances using 252 * bus-specific helpers and the &drm_driver.load callback. But due to 253 * backwards-compatibility needs the device instance have to be published too 254 * early, which requires unpretty global locking to make safe and is therefore 255 * only support for existing drivers not yet converted to the new scheme. 256 * 257 * When cleaning up a device instance everything needs to be done in reverse: 258 * First unpublish the device instance with drm_dev_unregister(). Then clean up 259 * any other resources allocated at device initialization and drop the driver's 260 * reference to &drm_device using drm_dev_put(). 261 * 262 * Note that any allocation or resource which is visible to userspace must be 263 * released only when the final drm_dev_put() is called, and not when the 264 * driver is unbound from the underlying physical struct &device. Best to use 265 * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and 266 * related functions. 267 * 268 * devres managed resources like devm_kmalloc() can only be used for resources 269 * directly related to the underlying hardware device, and only used in code 270 * paths fully protected by drm_dev_enter() and drm_dev_exit(). 271 * 272 * Display driver example 273 * ~~~~~~~~~~~~~~~~~~~~~~ 274 * 275 * The following example shows a typical structure of a DRM display driver. 276 * The example focus on the probe() function and the other functions that is 277 * almost always present and serves as a demonstration of devm_drm_dev_alloc(). 278 * 279 * .. code-block:: c 280 * 281 * struct driver_device { 282 * struct drm_device drm; 283 * void *userspace_facing; 284 * struct clk *pclk; 285 * }; 286 * 287 * static struct drm_driver driver_drm_driver = { 288 * [...] 289 * }; 290 * 291 * static int driver_probe(struct platform_device *pdev) 292 * { 293 * struct driver_device *priv; 294 * struct drm_device *drm; 295 * int ret; 296 * 297 * priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver, 298 * struct driver_device, drm); 299 * if (IS_ERR(priv)) 300 * return PTR_ERR(priv); 301 * drm = &priv->drm; 302 * 303 * ret = drmm_mode_config_init(drm); 304 * if (ret) 305 * return ret; 306 * 307 * priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL); 308 * if (!priv->userspace_facing) 309 * return -ENOMEM; 310 * 311 * priv->pclk = devm_clk_get(dev, "PCLK"); 312 * if (IS_ERR(priv->pclk)) 313 * return PTR_ERR(priv->pclk); 314 * 315 * // Further setup, display pipeline etc 316 * 317 * platform_set_drvdata(pdev, drm); 318 * 319 * drm_mode_config_reset(drm); 320 * 321 * ret = drm_dev_register(drm); 322 * if (ret) 323 * return ret; 324 * 325 * drm_fbdev_generic_setup(drm, 32); 326 * 327 * return 0; 328 * } 329 * 330 * // This function is called before the devm_ resources are released 331 * static int driver_remove(struct platform_device *pdev) 332 * { 333 * struct drm_device *drm = platform_get_drvdata(pdev); 334 * 335 * drm_dev_unregister(drm); 336 * drm_atomic_helper_shutdown(drm) 337 * 338 * return 0; 339 * } 340 * 341 * // This function is called on kernel restart and shutdown 342 * static void driver_shutdown(struct platform_device *pdev) 343 * { 344 * drm_atomic_helper_shutdown(platform_get_drvdata(pdev)); 345 * } 346 * 347 * static int __maybe_unused driver_pm_suspend(struct device *dev) 348 * { 349 * return drm_mode_config_helper_suspend(dev_get_drvdata(dev)); 350 * } 351 * 352 * static int __maybe_unused driver_pm_resume(struct device *dev) 353 * { 354 * drm_mode_config_helper_resume(dev_get_drvdata(dev)); 355 * 356 * return 0; 357 * } 358 * 359 * static const struct dev_pm_ops driver_pm_ops = { 360 * SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume) 361 * }; 362 * 363 * static struct platform_driver driver_driver = { 364 * .driver = { 365 * [...] 366 * .pm = &driver_pm_ops, 367 * }, 368 * .probe = driver_probe, 369 * .remove = driver_remove, 370 * .shutdown = driver_shutdown, 371 * }; 372 * module_platform_driver(driver_driver); 373 * 374 * Drivers that want to support device unplugging (USB, DT overlay unload) should 375 * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect 376 * regions that is accessing device resources to prevent use after they're 377 * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one 378 * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before 379 * drm_atomic_helper_shutdown() is called. This means that if the disable code 380 * paths are protected, they will not run on regular driver module unload, 381 * possibily leaving the hardware enabled. 382 */ 383 384 /** 385 * drm_put_dev - Unregister and release a DRM device 386 * @dev: DRM device 387 * 388 * Called at module unload time or when a PCI device is unplugged. 389 * 390 * Cleans up all DRM device, calling drm_lastclose(). 391 * 392 * Note: Use of this function is deprecated. It will eventually go away 393 * completely. Please use drm_dev_unregister() and drm_dev_put() explicitly 394 * instead to make sure that the device isn't userspace accessible any more 395 * while teardown is in progress, ensuring that userspace can't access an 396 * inconsistent state. 397 */ 398 void drm_put_dev(struct drm_device *dev) 399 { 400 DRM_DEBUG("\n"); 401 402 if (!dev) { 403 DRM_ERROR("cleanup called no dev\n"); 404 return; 405 } 406 407 drm_dev_unregister(dev); 408 drm_dev_put(dev); 409 } 410 EXPORT_SYMBOL(drm_put_dev); 411 412 /** 413 * drm_dev_enter - Enter device critical section 414 * @dev: DRM device 415 * @idx: Pointer to index that will be passed to the matching drm_dev_exit() 416 * 417 * This function marks and protects the beginning of a section that should not 418 * be entered after the device has been unplugged. The section end is marked 419 * with drm_dev_exit(). Calls to this function can be nested. 420 * 421 * Returns: 422 * True if it is OK to enter the section, false otherwise. 423 */ 424 bool drm_dev_enter(struct drm_device *dev, int *idx) 425 { 426 *idx = srcu_read_lock(&drm_unplug_srcu); 427 428 if (dev->unplugged) { 429 srcu_read_unlock(&drm_unplug_srcu, *idx); 430 return false; 431 } 432 433 return true; 434 } 435 EXPORT_SYMBOL(drm_dev_enter); 436 437 /** 438 * drm_dev_exit - Exit device critical section 439 * @idx: index returned from drm_dev_enter() 440 * 441 * This function marks the end of a section that should not be entered after 442 * the device has been unplugged. 443 */ 444 void drm_dev_exit(int idx) 445 { 446 srcu_read_unlock(&drm_unplug_srcu, idx); 447 } 448 EXPORT_SYMBOL(drm_dev_exit); 449 450 /** 451 * drm_dev_unplug - unplug a DRM device 452 * @dev: DRM device 453 * 454 * This unplugs a hotpluggable DRM device, which makes it inaccessible to 455 * userspace operations. Entry-points can use drm_dev_enter() and 456 * drm_dev_exit() to protect device resources in a race free manner. This 457 * essentially unregisters the device like drm_dev_unregister(), but can be 458 * called while there are still open users of @dev. 459 */ 460 void drm_dev_unplug(struct drm_device *dev) 461 { 462 /* 463 * After synchronizing any critical read section is guaranteed to see 464 * the new value of ->unplugged, and any critical section which might 465 * still have seen the old value of ->unplugged is guaranteed to have 466 * finished. 467 */ 468 dev->unplugged = true; 469 synchronize_srcu(&drm_unplug_srcu); 470 471 drm_dev_unregister(dev); 472 } 473 EXPORT_SYMBOL(drm_dev_unplug); 474 475 /* 476 * DRM internal mount 477 * We want to be able to allocate our own "struct address_space" to control 478 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow 479 * stand-alone address_space objects, so we need an underlying inode. As there 480 * is no way to allocate an independent inode easily, we need a fake internal 481 * VFS mount-point. 482 * 483 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free() 484 * frees it again. You are allowed to use iget() and iput() to get references to 485 * the inode. But each drm_fs_inode_new() call must be paired with exactly one 486 * drm_fs_inode_free() call (which does not have to be the last iput()). 487 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it 488 * between multiple inode-users. You could, technically, call 489 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an 490 * iput(), but this way you'd end up with a new vfsmount for each inode. 491 */ 492 493 static int drm_fs_cnt; 494 static struct vfsmount *drm_fs_mnt; 495 496 static int drm_fs_init_fs_context(struct fs_context *fc) 497 { 498 return init_pseudo(fc, 0x010203ff) ? 0 : -ENOMEM; 499 } 500 501 static struct file_system_type drm_fs_type = { 502 .name = "drm", 503 .owner = THIS_MODULE, 504 .init_fs_context = drm_fs_init_fs_context, 505 .kill_sb = kill_anon_super, 506 }; 507 508 static struct inode *drm_fs_inode_new(void) 509 { 510 struct inode *inode; 511 int r; 512 513 r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt); 514 if (r < 0) { 515 DRM_ERROR("Cannot mount pseudo fs: %d\n", r); 516 return ERR_PTR(r); 517 } 518 519 inode = alloc_anon_inode(drm_fs_mnt->mnt_sb); 520 if (IS_ERR(inode)) 521 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); 522 523 return inode; 524 } 525 526 static void drm_fs_inode_free(struct inode *inode) 527 { 528 if (inode) { 529 iput(inode); 530 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); 531 } 532 } 533 534 /** 535 * DOC: component helper usage recommendations 536 * 537 * DRM drivers that drive hardware where a logical device consists of a pile of 538 * independent hardware blocks are recommended to use the :ref:`component helper 539 * library<component>`. For consistency and better options for code reuse the 540 * following guidelines apply: 541 * 542 * - The entire device initialization procedure should be run from the 543 * &component_master_ops.master_bind callback, starting with 544 * devm_drm_dev_alloc(), then binding all components with 545 * component_bind_all() and finishing with drm_dev_register(). 546 * 547 * - The opaque pointer passed to all components through component_bind_all() 548 * should point at &struct drm_device of the device instance, not some driver 549 * specific private structure. 550 * 551 * - The component helper fills the niche where further standardization of 552 * interfaces is not practical. When there already is, or will be, a 553 * standardized interface like &drm_bridge or &drm_panel, providing its own 554 * functions to find such components at driver load time, like 555 * drm_of_find_panel_or_bridge(), then the component helper should not be 556 * used. 557 */ 558 559 static void drm_dev_init_release(struct drm_device *dev, void *res) 560 { 561 drm_legacy_ctxbitmap_cleanup(dev); 562 drm_legacy_remove_map_hash(dev); 563 drm_fs_inode_free(dev->anon_inode); 564 565 put_device(dev->dev); 566 /* Prevent use-after-free in drm_managed_release when debugging is 567 * enabled. Slightly awkward, but can't really be helped. */ 568 dev->dev = NULL; 569 mutex_destroy(&dev->master_mutex); 570 mutex_destroy(&dev->clientlist_mutex); 571 mutex_destroy(&dev->filelist_mutex); 572 mutex_destroy(&dev->struct_mutex); 573 drm_legacy_destroy_members(dev); 574 } 575 576 /** 577 * drm_dev_init - Initialise new DRM device 578 * @dev: DRM device 579 * @driver: DRM driver 580 * @parent: Parent device object 581 * 582 * Initialize a new DRM device. No device registration is done. 583 * Call drm_dev_register() to advertice the device to user space and register it 584 * with other core subsystems. This should be done last in the device 585 * initialization sequence to make sure userspace can't access an inconsistent 586 * state. 587 * 588 * The initial ref-count of the object is 1. Use drm_dev_get() and 589 * drm_dev_put() to take and drop further ref-counts. 590 * 591 * It is recommended that drivers embed &struct drm_device into their own device 592 * structure. 593 * 594 * Drivers that do not want to allocate their own device struct 595 * embedding &struct drm_device can call drm_dev_alloc() instead. For drivers 596 * that do embed &struct drm_device it must be placed first in the overall 597 * structure, and the overall structure must be allocated using kmalloc(): The 598 * drm core's release function unconditionally calls kfree() on the @dev pointer 599 * when the final reference is released. To override this behaviour, and so 600 * allow embedding of the drm_device inside the driver's device struct at an 601 * arbitrary offset, you must supply a &drm_driver.release callback and control 602 * the finalization explicitly. 603 * 604 * Note that drivers must call drmm_add_final_kfree() after this function has 605 * completed successfully. 606 * 607 * RETURNS: 608 * 0 on success, or error code on failure. 609 */ 610 int drm_dev_init(struct drm_device *dev, 611 struct drm_driver *driver, 612 struct device *parent) 613 { 614 int ret; 615 616 if (!drm_core_init_complete) { 617 DRM_ERROR("DRM core is not initialized\n"); 618 return -ENODEV; 619 } 620 621 if (WARN_ON(!parent)) 622 return -EINVAL; 623 624 kref_init(&dev->ref); 625 dev->dev = get_device(parent); 626 dev->driver = driver; 627 628 INIT_LIST_HEAD(&dev->managed.resources); 629 spin_lock_init(&dev->managed.lock); 630 631 /* no per-device feature limits by default */ 632 dev->driver_features = ~0u; 633 634 drm_legacy_init_members(dev); 635 INIT_LIST_HEAD(&dev->filelist); 636 INIT_LIST_HEAD(&dev->filelist_internal); 637 INIT_LIST_HEAD(&dev->clientlist); 638 INIT_LIST_HEAD(&dev->vblank_event_list); 639 640 spin_lock_init(&dev->event_lock); 641 mutex_init(&dev->struct_mutex); 642 mutex_init(&dev->filelist_mutex); 643 mutex_init(&dev->clientlist_mutex); 644 mutex_init(&dev->master_mutex); 645 646 ret = drmm_add_action(dev, drm_dev_init_release, NULL); 647 if (ret) 648 return ret; 649 650 dev->anon_inode = drm_fs_inode_new(); 651 if (IS_ERR(dev->anon_inode)) { 652 ret = PTR_ERR(dev->anon_inode); 653 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret); 654 goto err; 655 } 656 657 if (drm_core_check_feature(dev, DRIVER_RENDER)) { 658 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); 659 if (ret) 660 goto err; 661 } 662 663 ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY); 664 if (ret) 665 goto err; 666 667 ret = drm_legacy_create_map_hash(dev); 668 if (ret) 669 goto err; 670 671 drm_legacy_ctxbitmap_init(dev); 672 673 if (drm_core_check_feature(dev, DRIVER_GEM)) { 674 ret = drm_gem_init(dev); 675 if (ret) { 676 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n"); 677 goto err; 678 } 679 } 680 681 ret = drm_dev_set_unique(dev, dev_name(parent)); 682 if (ret) 683 goto err; 684 685 return 0; 686 687 err: 688 drm_managed_release(dev); 689 690 return ret; 691 } 692 EXPORT_SYMBOL(drm_dev_init); 693 694 static void devm_drm_dev_init_release(void *data) 695 { 696 drm_dev_put(data); 697 } 698 699 static int devm_drm_dev_init(struct device *parent, 700 struct drm_device *dev, 701 struct drm_driver *driver) 702 { 703 int ret; 704 705 ret = drm_dev_init(dev, driver, parent); 706 if (ret) 707 return ret; 708 709 ret = devm_add_action(parent, devm_drm_dev_init_release, dev); 710 if (ret) 711 devm_drm_dev_init_release(dev); 712 713 return ret; 714 } 715 716 void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver, 717 size_t size, size_t offset) 718 { 719 void *container; 720 struct drm_device *drm; 721 int ret; 722 723 container = kzalloc(size, GFP_KERNEL); 724 if (!container) 725 return ERR_PTR(-ENOMEM); 726 727 drm = container + offset; 728 ret = devm_drm_dev_init(parent, drm, driver); 729 if (ret) { 730 kfree(container); 731 return ERR_PTR(ret); 732 } 733 drmm_add_final_kfree(drm, container); 734 735 return container; 736 } 737 EXPORT_SYMBOL(__devm_drm_dev_alloc); 738 739 /** 740 * drm_dev_alloc - Allocate new DRM device 741 * @driver: DRM driver to allocate device for 742 * @parent: Parent device object 743 * 744 * This is the deprecated version of devm_drm_dev_alloc(), which does not support 745 * subclassing through embedding the struct &drm_device in a driver private 746 * structure, and which does not support automatic cleanup through devres. 747 * 748 * RETURNS: 749 * Pointer to new DRM device, or ERR_PTR on failure. 750 */ 751 struct drm_device *drm_dev_alloc(struct drm_driver *driver, 752 struct device *parent) 753 { 754 struct drm_device *dev; 755 int ret; 756 757 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 758 if (!dev) 759 return ERR_PTR(-ENOMEM); 760 761 ret = drm_dev_init(dev, driver, parent); 762 if (ret) { 763 kfree(dev); 764 return ERR_PTR(ret); 765 } 766 767 drmm_add_final_kfree(dev, dev); 768 769 return dev; 770 } 771 EXPORT_SYMBOL(drm_dev_alloc); 772 773 static void drm_dev_release(struct kref *ref) 774 { 775 struct drm_device *dev = container_of(ref, struct drm_device, ref); 776 777 if (dev->driver->release) 778 dev->driver->release(dev); 779 780 drm_managed_release(dev); 781 782 kfree(dev->managed.final_kfree); 783 } 784 785 /** 786 * drm_dev_get - Take reference of a DRM device 787 * @dev: device to take reference of or NULL 788 * 789 * This increases the ref-count of @dev by one. You *must* already own a 790 * reference when calling this. Use drm_dev_put() to drop this reference 791 * again. 792 * 793 * This function never fails. However, this function does not provide *any* 794 * guarantee whether the device is alive or running. It only provides a 795 * reference to the object and the memory associated with it. 796 */ 797 void drm_dev_get(struct drm_device *dev) 798 { 799 if (dev) 800 kref_get(&dev->ref); 801 } 802 EXPORT_SYMBOL(drm_dev_get); 803 804 /** 805 * drm_dev_put - Drop reference of a DRM device 806 * @dev: device to drop reference of or NULL 807 * 808 * This decreases the ref-count of @dev by one. The device is destroyed if the 809 * ref-count drops to zero. 810 */ 811 void drm_dev_put(struct drm_device *dev) 812 { 813 if (dev) 814 kref_put(&dev->ref, drm_dev_release); 815 } 816 EXPORT_SYMBOL(drm_dev_put); 817 818 static int create_compat_control_link(struct drm_device *dev) 819 { 820 struct drm_minor *minor; 821 char *name; 822 int ret; 823 824 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 825 return 0; 826 827 minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY); 828 if (!minor) 829 return 0; 830 831 /* 832 * Some existing userspace out there uses the existing of the controlD* 833 * sysfs files to figure out whether it's a modeset driver. It only does 834 * readdir, hence a symlink is sufficient (and the least confusing 835 * option). Otherwise controlD* is entirely unused. 836 * 837 * Old controlD chardev have been allocated in the range 838 * 64-127. 839 */ 840 name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64); 841 if (!name) 842 return -ENOMEM; 843 844 ret = sysfs_create_link(minor->kdev->kobj.parent, 845 &minor->kdev->kobj, 846 name); 847 848 kfree(name); 849 850 return ret; 851 } 852 853 static void remove_compat_control_link(struct drm_device *dev) 854 { 855 struct drm_minor *minor; 856 char *name; 857 858 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 859 return; 860 861 minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY); 862 if (!minor) 863 return; 864 865 name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64); 866 if (!name) 867 return; 868 869 sysfs_remove_link(minor->kdev->kobj.parent, name); 870 871 kfree(name); 872 } 873 874 /** 875 * drm_dev_register - Register DRM device 876 * @dev: Device to register 877 * @flags: Flags passed to the driver's .load() function 878 * 879 * Register the DRM device @dev with the system, advertise device to user-space 880 * and start normal device operation. @dev must be initialized via drm_dev_init() 881 * previously. 882 * 883 * Never call this twice on any device! 884 * 885 * NOTE: To ensure backward compatibility with existing drivers method this 886 * function calls the &drm_driver.load method after registering the device 887 * nodes, creating race conditions. Usage of the &drm_driver.load methods is 888 * therefore deprecated, drivers must perform all initialization before calling 889 * drm_dev_register(). 890 * 891 * RETURNS: 892 * 0 on success, negative error code on failure. 893 */ 894 int drm_dev_register(struct drm_device *dev, unsigned long flags) 895 { 896 struct drm_driver *driver = dev->driver; 897 int ret; 898 899 if (!driver->load) 900 drm_mode_config_validate(dev); 901 902 WARN_ON(!dev->managed.final_kfree); 903 904 if (drm_dev_needs_global_mutex(dev)) 905 mutex_lock(&drm_global_mutex); 906 907 ret = drm_minor_register(dev, DRM_MINOR_RENDER); 908 if (ret) 909 goto err_minors; 910 911 ret = drm_minor_register(dev, DRM_MINOR_PRIMARY); 912 if (ret) 913 goto err_minors; 914 915 ret = create_compat_control_link(dev); 916 if (ret) 917 goto err_minors; 918 919 dev->registered = true; 920 921 if (dev->driver->load) { 922 ret = dev->driver->load(dev, flags); 923 if (ret) 924 goto err_minors; 925 } 926 927 if (drm_core_check_feature(dev, DRIVER_MODESET)) 928 drm_modeset_register_all(dev); 929 930 ret = 0; 931 932 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", 933 driver->name, driver->major, driver->minor, 934 driver->patchlevel, driver->date, 935 dev->dev ? dev_name(dev->dev) : "virtual device", 936 dev->primary->index); 937 938 goto out_unlock; 939 940 err_minors: 941 remove_compat_control_link(dev); 942 drm_minor_unregister(dev, DRM_MINOR_PRIMARY); 943 drm_minor_unregister(dev, DRM_MINOR_RENDER); 944 out_unlock: 945 if (drm_dev_needs_global_mutex(dev)) 946 mutex_unlock(&drm_global_mutex); 947 return ret; 948 } 949 EXPORT_SYMBOL(drm_dev_register); 950 951 /** 952 * drm_dev_unregister - Unregister DRM device 953 * @dev: Device to unregister 954 * 955 * Unregister the DRM device from the system. This does the reverse of 956 * drm_dev_register() but does not deallocate the device. The caller must call 957 * drm_dev_put() to drop their final reference. 958 * 959 * A special form of unregistering for hotpluggable devices is drm_dev_unplug(), 960 * which can be called while there are still open users of @dev. 961 * 962 * This should be called first in the device teardown code to make sure 963 * userspace can't access the device instance any more. 964 */ 965 void drm_dev_unregister(struct drm_device *dev) 966 { 967 if (drm_core_check_feature(dev, DRIVER_LEGACY)) 968 drm_lastclose(dev); 969 970 dev->registered = false; 971 972 drm_client_dev_unregister(dev); 973 974 if (drm_core_check_feature(dev, DRIVER_MODESET)) 975 drm_modeset_unregister_all(dev); 976 977 if (dev->driver->unload) 978 dev->driver->unload(dev); 979 980 if (dev->agp) 981 drm_pci_agp_destroy(dev); 982 983 drm_legacy_rmmaps(dev); 984 985 remove_compat_control_link(dev); 986 drm_minor_unregister(dev, DRM_MINOR_PRIMARY); 987 drm_minor_unregister(dev, DRM_MINOR_RENDER); 988 } 989 EXPORT_SYMBOL(drm_dev_unregister); 990 991 /** 992 * drm_dev_set_unique - Set the unique name of a DRM device 993 * @dev: device of which to set the unique name 994 * @name: unique name 995 * 996 * Sets the unique name of a DRM device using the specified string. This is 997 * already done by drm_dev_init(), drivers should only override the default 998 * unique name for backwards compatibility reasons. 999 * 1000 * Return: 0 on success or a negative error code on failure. 1001 */ 1002 int drm_dev_set_unique(struct drm_device *dev, const char *name) 1003 { 1004 drmm_kfree(dev, dev->unique); 1005 dev->unique = drmm_kstrdup(dev, name, GFP_KERNEL); 1006 1007 return dev->unique ? 0 : -ENOMEM; 1008 } 1009 EXPORT_SYMBOL(drm_dev_set_unique); 1010 1011 /* 1012 * DRM Core 1013 * The DRM core module initializes all global DRM objects and makes them 1014 * available to drivers. Once setup, drivers can probe their respective 1015 * devices. 1016 * Currently, core management includes: 1017 * - The "DRM-Global" key/value database 1018 * - Global ID management for connectors 1019 * - DRM major number allocation 1020 * - DRM minor management 1021 * - DRM sysfs class 1022 * - DRM debugfs root 1023 * 1024 * Furthermore, the DRM core provides dynamic char-dev lookups. For each 1025 * interface registered on a DRM device, you can request minor numbers from DRM 1026 * core. DRM core takes care of major-number management and char-dev 1027 * registration. A stub ->open() callback forwards any open() requests to the 1028 * registered minor. 1029 */ 1030 1031 static int drm_stub_open(struct inode *inode, struct file *filp) 1032 { 1033 const struct file_operations *new_fops; 1034 struct drm_minor *minor; 1035 int err; 1036 1037 DRM_DEBUG("\n"); 1038 1039 minor = drm_minor_acquire(iminor(inode)); 1040 if (IS_ERR(minor)) 1041 return PTR_ERR(minor); 1042 1043 new_fops = fops_get(minor->dev->driver->fops); 1044 if (!new_fops) { 1045 err = -ENODEV; 1046 goto out; 1047 } 1048 1049 replace_fops(filp, new_fops); 1050 if (filp->f_op->open) 1051 err = filp->f_op->open(inode, filp); 1052 else 1053 err = 0; 1054 1055 out: 1056 drm_minor_release(minor); 1057 1058 return err; 1059 } 1060 1061 static const struct file_operations drm_stub_fops = { 1062 .owner = THIS_MODULE, 1063 .open = drm_stub_open, 1064 .llseek = noop_llseek, 1065 }; 1066 1067 static void drm_core_exit(void) 1068 { 1069 unregister_chrdev(DRM_MAJOR, "drm"); 1070 debugfs_remove(drm_debugfs_root); 1071 drm_sysfs_destroy(); 1072 idr_destroy(&drm_minors_idr); 1073 drm_connector_ida_destroy(); 1074 } 1075 1076 static int __init drm_core_init(void) 1077 { 1078 int ret; 1079 1080 drm_connector_ida_init(); 1081 idr_init(&drm_minors_idr); 1082 1083 ret = drm_sysfs_init(); 1084 if (ret < 0) { 1085 DRM_ERROR("Cannot create DRM class: %d\n", ret); 1086 goto error; 1087 } 1088 1089 drm_debugfs_root = debugfs_create_dir("dri", NULL); 1090 1091 ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops); 1092 if (ret < 0) 1093 goto error; 1094 1095 drm_core_init_complete = true; 1096 1097 DRM_DEBUG("Initialized\n"); 1098 return 0; 1099 1100 error: 1101 drm_core_exit(); 1102 return ret; 1103 } 1104 1105 module_init(drm_core_init); 1106 module_exit(drm_core_exit); 1107