1 /* 2 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org 3 * 4 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California. 5 * All Rights Reserved. 6 * 7 * Author Rickard E. (Rik) Faith <faith@valinux.com> 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the next 17 * paragraph) shall be included in all copies or substantial portions of the 18 * Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 26 * DEALINGS IN THE SOFTWARE. 27 */ 28 29 #include <linux/debugfs.h> 30 #include <linux/fs.h> 31 #include <linux/module.h> 32 #include <linux/moduleparam.h> 33 #include <linux/mount.h> 34 #include <linux/pseudo_fs.h> 35 #include <linux/slab.h> 36 #include <linux/srcu.h> 37 38 #include <drm/drm_client.h> 39 #include <drm/drm_color_mgmt.h> 40 #include <drm/drm_drv.h> 41 #include <drm/drm_file.h> 42 #include <drm/drm_managed.h> 43 #include <drm/drm_mode_object.h> 44 #include <drm/drm_print.h> 45 46 #include "drm_crtc_internal.h" 47 #include "drm_internal.h" 48 #include "drm_legacy.h" 49 50 MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"); 51 MODULE_DESCRIPTION("DRM shared core routines"); 52 MODULE_LICENSE("GPL and additional rights"); 53 54 static DEFINE_SPINLOCK(drm_minor_lock); 55 static struct idr drm_minors_idr; 56 57 /* 58 * If the drm core fails to init for whatever reason, 59 * we should prevent any drivers from registering with it. 60 * It's best to check this at drm_dev_init(), as some drivers 61 * prefer to embed struct drm_device into their own device 62 * structure and call drm_dev_init() themselves. 63 */ 64 static bool drm_core_init_complete = false; 65 66 static struct dentry *drm_debugfs_root; 67 68 DEFINE_STATIC_SRCU(drm_unplug_srcu); 69 70 /* 71 * DRM Minors 72 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each 73 * of them is represented by a drm_minor object. Depending on the capabilities 74 * of the device-driver, different interfaces are registered. 75 * 76 * Minors can be accessed via dev->$minor_name. This pointer is either 77 * NULL or a valid drm_minor pointer and stays valid as long as the device is 78 * valid. This means, DRM minors have the same life-time as the underlying 79 * device. However, this doesn't mean that the minor is active. Minors are 80 * registered and unregistered dynamically according to device-state. 81 */ 82 83 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev, 84 unsigned int type) 85 { 86 switch (type) { 87 case DRM_MINOR_PRIMARY: 88 return &dev->primary; 89 case DRM_MINOR_RENDER: 90 return &dev->render; 91 default: 92 BUG(); 93 } 94 } 95 96 static void drm_minor_alloc_release(struct drm_device *dev, void *data) 97 { 98 struct drm_minor *minor = data; 99 unsigned long flags; 100 101 WARN_ON(dev != minor->dev); 102 103 put_device(minor->kdev); 104 105 spin_lock_irqsave(&drm_minor_lock, flags); 106 idr_remove(&drm_minors_idr, minor->index); 107 spin_unlock_irqrestore(&drm_minor_lock, flags); 108 } 109 110 static int drm_minor_alloc(struct drm_device *dev, unsigned int type) 111 { 112 struct drm_minor *minor; 113 unsigned long flags; 114 int r; 115 116 minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL); 117 if (!minor) 118 return -ENOMEM; 119 120 minor->type = type; 121 minor->dev = dev; 122 123 idr_preload(GFP_KERNEL); 124 spin_lock_irqsave(&drm_minor_lock, flags); 125 r = idr_alloc(&drm_minors_idr, 126 NULL, 127 64 * type, 128 64 * (type + 1), 129 GFP_NOWAIT); 130 spin_unlock_irqrestore(&drm_minor_lock, flags); 131 idr_preload_end(); 132 133 if (r < 0) 134 return r; 135 136 minor->index = r; 137 138 r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor); 139 if (r) 140 return r; 141 142 minor->kdev = drm_sysfs_minor_alloc(minor); 143 if (IS_ERR(minor->kdev)) 144 return PTR_ERR(minor->kdev); 145 146 *drm_minor_get_slot(dev, type) = minor; 147 return 0; 148 } 149 150 static int drm_minor_register(struct drm_device *dev, unsigned int type) 151 { 152 struct drm_minor *minor; 153 unsigned long flags; 154 int ret; 155 156 DRM_DEBUG("\n"); 157 158 minor = *drm_minor_get_slot(dev, type); 159 if (!minor) 160 return 0; 161 162 ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root); 163 if (ret) { 164 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); 165 goto err_debugfs; 166 } 167 168 ret = device_add(minor->kdev); 169 if (ret) 170 goto err_debugfs; 171 172 /* replace NULL with @minor so lookups will succeed from now on */ 173 spin_lock_irqsave(&drm_minor_lock, flags); 174 idr_replace(&drm_minors_idr, minor, minor->index); 175 spin_unlock_irqrestore(&drm_minor_lock, flags); 176 177 DRM_DEBUG("new minor registered %d\n", minor->index); 178 return 0; 179 180 err_debugfs: 181 drm_debugfs_cleanup(minor); 182 return ret; 183 } 184 185 static void drm_minor_unregister(struct drm_device *dev, unsigned int type) 186 { 187 struct drm_minor *minor; 188 unsigned long flags; 189 190 minor = *drm_minor_get_slot(dev, type); 191 if (!minor || !device_is_registered(minor->kdev)) 192 return; 193 194 /* replace @minor with NULL so lookups will fail from now on */ 195 spin_lock_irqsave(&drm_minor_lock, flags); 196 idr_replace(&drm_minors_idr, NULL, minor->index); 197 spin_unlock_irqrestore(&drm_minor_lock, flags); 198 199 device_del(minor->kdev); 200 dev_set_drvdata(minor->kdev, NULL); /* safety belt */ 201 drm_debugfs_cleanup(minor); 202 } 203 204 /* 205 * Looks up the given minor-ID and returns the respective DRM-minor object. The 206 * refence-count of the underlying device is increased so you must release this 207 * object with drm_minor_release(). 208 * 209 * As long as you hold this minor, it is guaranteed that the object and the 210 * minor->dev pointer will stay valid! However, the device may get unplugged and 211 * unregistered while you hold the minor. 212 */ 213 struct drm_minor *drm_minor_acquire(unsigned int minor_id) 214 { 215 struct drm_minor *minor; 216 unsigned long flags; 217 218 spin_lock_irqsave(&drm_minor_lock, flags); 219 minor = idr_find(&drm_minors_idr, minor_id); 220 if (minor) 221 drm_dev_get(minor->dev); 222 spin_unlock_irqrestore(&drm_minor_lock, flags); 223 224 if (!minor) { 225 return ERR_PTR(-ENODEV); 226 } else if (drm_dev_is_unplugged(minor->dev)) { 227 drm_dev_put(minor->dev); 228 return ERR_PTR(-ENODEV); 229 } 230 231 return minor; 232 } 233 234 void drm_minor_release(struct drm_minor *minor) 235 { 236 drm_dev_put(minor->dev); 237 } 238 239 /** 240 * DOC: driver instance overview 241 * 242 * A device instance for a drm driver is represented by &struct drm_device. This 243 * is allocated and initialized with devm_drm_dev_alloc(), usually from 244 * bus-specific ->probe() callbacks implemented by the driver. The driver then 245 * needs to initialize all the various subsystems for the drm device like memory 246 * management, vblank handling, modesetting support and initial output 247 * configuration plus obviously initialize all the corresponding hardware bits. 248 * Finally when everything is up and running and ready for userspace the device 249 * instance can be published using drm_dev_register(). 250 * 251 * There is also deprecated support for initalizing device instances using 252 * bus-specific helpers and the &drm_driver.load callback. But due to 253 * backwards-compatibility needs the device instance have to be published too 254 * early, which requires unpretty global locking to make safe and is therefore 255 * only support for existing drivers not yet converted to the new scheme. 256 * 257 * When cleaning up a device instance everything needs to be done in reverse: 258 * First unpublish the device instance with drm_dev_unregister(). Then clean up 259 * any other resources allocated at device initialization and drop the driver's 260 * reference to &drm_device using drm_dev_put(). 261 * 262 * Note that any allocation or resource which is visible to userspace must be 263 * released only when the final drm_dev_put() is called, and not when the 264 * driver is unbound from the underlying physical struct &device. Best to use 265 * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and 266 * related functions. 267 * 268 * devres managed resources like devm_kmalloc() can only be used for resources 269 * directly related to the underlying hardware device, and only used in code 270 * paths fully protected by drm_dev_enter() and drm_dev_exit(). 271 * 272 * Display driver example 273 * ~~~~~~~~~~~~~~~~~~~~~~ 274 * 275 * The following example shows a typical structure of a DRM display driver. 276 * The example focus on the probe() function and the other functions that is 277 * almost always present and serves as a demonstration of devm_drm_dev_alloc(). 278 * 279 * .. code-block:: c 280 * 281 * struct driver_device { 282 * struct drm_device drm; 283 * void *userspace_facing; 284 * struct clk *pclk; 285 * }; 286 * 287 * static const struct drm_driver driver_drm_driver = { 288 * [...] 289 * }; 290 * 291 * static int driver_probe(struct platform_device *pdev) 292 * { 293 * struct driver_device *priv; 294 * struct drm_device *drm; 295 * int ret; 296 * 297 * priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver, 298 * struct driver_device, drm); 299 * if (IS_ERR(priv)) 300 * return PTR_ERR(priv); 301 * drm = &priv->drm; 302 * 303 * ret = drmm_mode_config_init(drm); 304 * if (ret) 305 * return ret; 306 * 307 * priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL); 308 * if (!priv->userspace_facing) 309 * return -ENOMEM; 310 * 311 * priv->pclk = devm_clk_get(dev, "PCLK"); 312 * if (IS_ERR(priv->pclk)) 313 * return PTR_ERR(priv->pclk); 314 * 315 * // Further setup, display pipeline etc 316 * 317 * platform_set_drvdata(pdev, drm); 318 * 319 * drm_mode_config_reset(drm); 320 * 321 * ret = drm_dev_register(drm); 322 * if (ret) 323 * return ret; 324 * 325 * drm_fbdev_generic_setup(drm, 32); 326 * 327 * return 0; 328 * } 329 * 330 * // This function is called before the devm_ resources are released 331 * static int driver_remove(struct platform_device *pdev) 332 * { 333 * struct drm_device *drm = platform_get_drvdata(pdev); 334 * 335 * drm_dev_unregister(drm); 336 * drm_atomic_helper_shutdown(drm) 337 * 338 * return 0; 339 * } 340 * 341 * // This function is called on kernel restart and shutdown 342 * static void driver_shutdown(struct platform_device *pdev) 343 * { 344 * drm_atomic_helper_shutdown(platform_get_drvdata(pdev)); 345 * } 346 * 347 * static int __maybe_unused driver_pm_suspend(struct device *dev) 348 * { 349 * return drm_mode_config_helper_suspend(dev_get_drvdata(dev)); 350 * } 351 * 352 * static int __maybe_unused driver_pm_resume(struct device *dev) 353 * { 354 * drm_mode_config_helper_resume(dev_get_drvdata(dev)); 355 * 356 * return 0; 357 * } 358 * 359 * static const struct dev_pm_ops driver_pm_ops = { 360 * SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume) 361 * }; 362 * 363 * static struct platform_driver driver_driver = { 364 * .driver = { 365 * [...] 366 * .pm = &driver_pm_ops, 367 * }, 368 * .probe = driver_probe, 369 * .remove = driver_remove, 370 * .shutdown = driver_shutdown, 371 * }; 372 * module_platform_driver(driver_driver); 373 * 374 * Drivers that want to support device unplugging (USB, DT overlay unload) should 375 * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect 376 * regions that is accessing device resources to prevent use after they're 377 * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one 378 * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before 379 * drm_atomic_helper_shutdown() is called. This means that if the disable code 380 * paths are protected, they will not run on regular driver module unload, 381 * possibily leaving the hardware enabled. 382 */ 383 384 /** 385 * drm_put_dev - Unregister and release a DRM device 386 * @dev: DRM device 387 * 388 * Called at module unload time or when a PCI device is unplugged. 389 * 390 * Cleans up all DRM device, calling drm_lastclose(). 391 * 392 * Note: Use of this function is deprecated. It will eventually go away 393 * completely. Please use drm_dev_unregister() and drm_dev_put() explicitly 394 * instead to make sure that the device isn't userspace accessible any more 395 * while teardown is in progress, ensuring that userspace can't access an 396 * inconsistent state. 397 */ 398 void drm_put_dev(struct drm_device *dev) 399 { 400 DRM_DEBUG("\n"); 401 402 if (!dev) { 403 DRM_ERROR("cleanup called no dev\n"); 404 return; 405 } 406 407 drm_dev_unregister(dev); 408 drm_dev_put(dev); 409 } 410 EXPORT_SYMBOL(drm_put_dev); 411 412 /** 413 * drm_dev_enter - Enter device critical section 414 * @dev: DRM device 415 * @idx: Pointer to index that will be passed to the matching drm_dev_exit() 416 * 417 * This function marks and protects the beginning of a section that should not 418 * be entered after the device has been unplugged. The section end is marked 419 * with drm_dev_exit(). Calls to this function can be nested. 420 * 421 * Returns: 422 * True if it is OK to enter the section, false otherwise. 423 */ 424 bool drm_dev_enter(struct drm_device *dev, int *idx) 425 { 426 *idx = srcu_read_lock(&drm_unplug_srcu); 427 428 if (dev->unplugged) { 429 srcu_read_unlock(&drm_unplug_srcu, *idx); 430 return false; 431 } 432 433 return true; 434 } 435 EXPORT_SYMBOL(drm_dev_enter); 436 437 /** 438 * drm_dev_exit - Exit device critical section 439 * @idx: index returned from drm_dev_enter() 440 * 441 * This function marks the end of a section that should not be entered after 442 * the device has been unplugged. 443 */ 444 void drm_dev_exit(int idx) 445 { 446 srcu_read_unlock(&drm_unplug_srcu, idx); 447 } 448 EXPORT_SYMBOL(drm_dev_exit); 449 450 /** 451 * drm_dev_unplug - unplug a DRM device 452 * @dev: DRM device 453 * 454 * This unplugs a hotpluggable DRM device, which makes it inaccessible to 455 * userspace operations. Entry-points can use drm_dev_enter() and 456 * drm_dev_exit() to protect device resources in a race free manner. This 457 * essentially unregisters the device like drm_dev_unregister(), but can be 458 * called while there are still open users of @dev. 459 */ 460 void drm_dev_unplug(struct drm_device *dev) 461 { 462 /* 463 * After synchronizing any critical read section is guaranteed to see 464 * the new value of ->unplugged, and any critical section which might 465 * still have seen the old value of ->unplugged is guaranteed to have 466 * finished. 467 */ 468 dev->unplugged = true; 469 synchronize_srcu(&drm_unplug_srcu); 470 471 drm_dev_unregister(dev); 472 473 /* Clear all CPU mappings pointing to this device */ 474 unmap_mapping_range(dev->anon_inode->i_mapping, 0, 0, 1); 475 } 476 EXPORT_SYMBOL(drm_dev_unplug); 477 478 /* 479 * DRM internal mount 480 * We want to be able to allocate our own "struct address_space" to control 481 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow 482 * stand-alone address_space objects, so we need an underlying inode. As there 483 * is no way to allocate an independent inode easily, we need a fake internal 484 * VFS mount-point. 485 * 486 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free() 487 * frees it again. You are allowed to use iget() and iput() to get references to 488 * the inode. But each drm_fs_inode_new() call must be paired with exactly one 489 * drm_fs_inode_free() call (which does not have to be the last iput()). 490 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it 491 * between multiple inode-users. You could, technically, call 492 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an 493 * iput(), but this way you'd end up with a new vfsmount for each inode. 494 */ 495 496 static int drm_fs_cnt; 497 static struct vfsmount *drm_fs_mnt; 498 499 static int drm_fs_init_fs_context(struct fs_context *fc) 500 { 501 return init_pseudo(fc, 0x010203ff) ? 0 : -ENOMEM; 502 } 503 504 static struct file_system_type drm_fs_type = { 505 .name = "drm", 506 .owner = THIS_MODULE, 507 .init_fs_context = drm_fs_init_fs_context, 508 .kill_sb = kill_anon_super, 509 }; 510 511 static struct inode *drm_fs_inode_new(void) 512 { 513 struct inode *inode; 514 int r; 515 516 r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt); 517 if (r < 0) { 518 DRM_ERROR("Cannot mount pseudo fs: %d\n", r); 519 return ERR_PTR(r); 520 } 521 522 inode = alloc_anon_inode(drm_fs_mnt->mnt_sb); 523 if (IS_ERR(inode)) 524 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); 525 526 return inode; 527 } 528 529 static void drm_fs_inode_free(struct inode *inode) 530 { 531 if (inode) { 532 iput(inode); 533 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); 534 } 535 } 536 537 /** 538 * DOC: component helper usage recommendations 539 * 540 * DRM drivers that drive hardware where a logical device consists of a pile of 541 * independent hardware blocks are recommended to use the :ref:`component helper 542 * library<component>`. For consistency and better options for code reuse the 543 * following guidelines apply: 544 * 545 * - The entire device initialization procedure should be run from the 546 * &component_master_ops.master_bind callback, starting with 547 * devm_drm_dev_alloc(), then binding all components with 548 * component_bind_all() and finishing with drm_dev_register(). 549 * 550 * - The opaque pointer passed to all components through component_bind_all() 551 * should point at &struct drm_device of the device instance, not some driver 552 * specific private structure. 553 * 554 * - The component helper fills the niche where further standardization of 555 * interfaces is not practical. When there already is, or will be, a 556 * standardized interface like &drm_bridge or &drm_panel, providing its own 557 * functions to find such components at driver load time, like 558 * drm_of_find_panel_or_bridge(), then the component helper should not be 559 * used. 560 */ 561 562 static void drm_dev_init_release(struct drm_device *dev, void *res) 563 { 564 drm_legacy_ctxbitmap_cleanup(dev); 565 drm_legacy_remove_map_hash(dev); 566 drm_fs_inode_free(dev->anon_inode); 567 568 put_device(dev->dev); 569 /* Prevent use-after-free in drm_managed_release when debugging is 570 * enabled. Slightly awkward, but can't really be helped. */ 571 dev->dev = NULL; 572 mutex_destroy(&dev->master_mutex); 573 mutex_destroy(&dev->clientlist_mutex); 574 mutex_destroy(&dev->filelist_mutex); 575 mutex_destroy(&dev->struct_mutex); 576 drm_legacy_destroy_members(dev); 577 } 578 579 static int drm_dev_init(struct drm_device *dev, 580 const struct drm_driver *driver, 581 struct device *parent) 582 { 583 int ret; 584 585 if (!drm_core_init_complete) { 586 DRM_ERROR("DRM core is not initialized\n"); 587 return -ENODEV; 588 } 589 590 if (WARN_ON(!parent)) 591 return -EINVAL; 592 593 kref_init(&dev->ref); 594 dev->dev = get_device(parent); 595 dev->driver = driver; 596 597 INIT_LIST_HEAD(&dev->managed.resources); 598 spin_lock_init(&dev->managed.lock); 599 600 /* no per-device feature limits by default */ 601 dev->driver_features = ~0u; 602 603 drm_legacy_init_members(dev); 604 INIT_LIST_HEAD(&dev->filelist); 605 INIT_LIST_HEAD(&dev->filelist_internal); 606 INIT_LIST_HEAD(&dev->clientlist); 607 INIT_LIST_HEAD(&dev->vblank_event_list); 608 609 spin_lock_init(&dev->event_lock); 610 mutex_init(&dev->struct_mutex); 611 mutex_init(&dev->filelist_mutex); 612 mutex_init(&dev->clientlist_mutex); 613 mutex_init(&dev->master_mutex); 614 615 ret = drmm_add_action(dev, drm_dev_init_release, NULL); 616 if (ret) 617 return ret; 618 619 dev->anon_inode = drm_fs_inode_new(); 620 if (IS_ERR(dev->anon_inode)) { 621 ret = PTR_ERR(dev->anon_inode); 622 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret); 623 goto err; 624 } 625 626 if (drm_core_check_feature(dev, DRIVER_RENDER)) { 627 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); 628 if (ret) 629 goto err; 630 } 631 632 ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY); 633 if (ret) 634 goto err; 635 636 ret = drm_legacy_create_map_hash(dev); 637 if (ret) 638 goto err; 639 640 drm_legacy_ctxbitmap_init(dev); 641 642 if (drm_core_check_feature(dev, DRIVER_GEM)) { 643 ret = drm_gem_init(dev); 644 if (ret) { 645 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n"); 646 goto err; 647 } 648 } 649 650 ret = drm_dev_set_unique(dev, dev_name(parent)); 651 if (ret) 652 goto err; 653 654 return 0; 655 656 err: 657 drm_managed_release(dev); 658 659 return ret; 660 } 661 662 static void devm_drm_dev_init_release(void *data) 663 { 664 drm_dev_put(data); 665 } 666 667 static int devm_drm_dev_init(struct device *parent, 668 struct drm_device *dev, 669 const struct drm_driver *driver) 670 { 671 int ret; 672 673 ret = drm_dev_init(dev, driver, parent); 674 if (ret) 675 return ret; 676 677 return devm_add_action_or_reset(parent, 678 devm_drm_dev_init_release, dev); 679 } 680 681 void *__devm_drm_dev_alloc(struct device *parent, 682 const struct drm_driver *driver, 683 size_t size, size_t offset) 684 { 685 void *container; 686 struct drm_device *drm; 687 int ret; 688 689 container = kzalloc(size, GFP_KERNEL); 690 if (!container) 691 return ERR_PTR(-ENOMEM); 692 693 drm = container + offset; 694 ret = devm_drm_dev_init(parent, drm, driver); 695 if (ret) { 696 kfree(container); 697 return ERR_PTR(ret); 698 } 699 drmm_add_final_kfree(drm, container); 700 701 return container; 702 } 703 EXPORT_SYMBOL(__devm_drm_dev_alloc); 704 705 /** 706 * drm_dev_alloc - Allocate new DRM device 707 * @driver: DRM driver to allocate device for 708 * @parent: Parent device object 709 * 710 * This is the deprecated version of devm_drm_dev_alloc(), which does not support 711 * subclassing through embedding the struct &drm_device in a driver private 712 * structure, and which does not support automatic cleanup through devres. 713 * 714 * RETURNS: 715 * Pointer to new DRM device, or ERR_PTR on failure. 716 */ 717 struct drm_device *drm_dev_alloc(const struct drm_driver *driver, 718 struct device *parent) 719 { 720 struct drm_device *dev; 721 int ret; 722 723 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 724 if (!dev) 725 return ERR_PTR(-ENOMEM); 726 727 ret = drm_dev_init(dev, driver, parent); 728 if (ret) { 729 kfree(dev); 730 return ERR_PTR(ret); 731 } 732 733 drmm_add_final_kfree(dev, dev); 734 735 return dev; 736 } 737 EXPORT_SYMBOL(drm_dev_alloc); 738 739 static void drm_dev_release(struct kref *ref) 740 { 741 struct drm_device *dev = container_of(ref, struct drm_device, ref); 742 743 if (dev->driver->release) 744 dev->driver->release(dev); 745 746 drm_managed_release(dev); 747 748 kfree(dev->managed.final_kfree); 749 } 750 751 /** 752 * drm_dev_get - Take reference of a DRM device 753 * @dev: device to take reference of or NULL 754 * 755 * This increases the ref-count of @dev by one. You *must* already own a 756 * reference when calling this. Use drm_dev_put() to drop this reference 757 * again. 758 * 759 * This function never fails. However, this function does not provide *any* 760 * guarantee whether the device is alive or running. It only provides a 761 * reference to the object and the memory associated with it. 762 */ 763 void drm_dev_get(struct drm_device *dev) 764 { 765 if (dev) 766 kref_get(&dev->ref); 767 } 768 EXPORT_SYMBOL(drm_dev_get); 769 770 /** 771 * drm_dev_put - Drop reference of a DRM device 772 * @dev: device to drop reference of or NULL 773 * 774 * This decreases the ref-count of @dev by one. The device is destroyed if the 775 * ref-count drops to zero. 776 */ 777 void drm_dev_put(struct drm_device *dev) 778 { 779 if (dev) 780 kref_put(&dev->ref, drm_dev_release); 781 } 782 EXPORT_SYMBOL(drm_dev_put); 783 784 static int create_compat_control_link(struct drm_device *dev) 785 { 786 struct drm_minor *minor; 787 char *name; 788 int ret; 789 790 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 791 return 0; 792 793 minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY); 794 if (!minor) 795 return 0; 796 797 /* 798 * Some existing userspace out there uses the existing of the controlD* 799 * sysfs files to figure out whether it's a modeset driver. It only does 800 * readdir, hence a symlink is sufficient (and the least confusing 801 * option). Otherwise controlD* is entirely unused. 802 * 803 * Old controlD chardev have been allocated in the range 804 * 64-127. 805 */ 806 name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64); 807 if (!name) 808 return -ENOMEM; 809 810 ret = sysfs_create_link(minor->kdev->kobj.parent, 811 &minor->kdev->kobj, 812 name); 813 814 kfree(name); 815 816 return ret; 817 } 818 819 static void remove_compat_control_link(struct drm_device *dev) 820 { 821 struct drm_minor *minor; 822 char *name; 823 824 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 825 return; 826 827 minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY); 828 if (!minor) 829 return; 830 831 name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64); 832 if (!name) 833 return; 834 835 sysfs_remove_link(minor->kdev->kobj.parent, name); 836 837 kfree(name); 838 } 839 840 /** 841 * drm_dev_register - Register DRM device 842 * @dev: Device to register 843 * @flags: Flags passed to the driver's .load() function 844 * 845 * Register the DRM device @dev with the system, advertise device to user-space 846 * and start normal device operation. @dev must be initialized via drm_dev_init() 847 * previously. 848 * 849 * Never call this twice on any device! 850 * 851 * NOTE: To ensure backward compatibility with existing drivers method this 852 * function calls the &drm_driver.load method after registering the device 853 * nodes, creating race conditions. Usage of the &drm_driver.load methods is 854 * therefore deprecated, drivers must perform all initialization before calling 855 * drm_dev_register(). 856 * 857 * RETURNS: 858 * 0 on success, negative error code on failure. 859 */ 860 int drm_dev_register(struct drm_device *dev, unsigned long flags) 861 { 862 const struct drm_driver *driver = dev->driver; 863 int ret; 864 865 if (!driver->load) 866 drm_mode_config_validate(dev); 867 868 WARN_ON(!dev->managed.final_kfree); 869 870 if (drm_dev_needs_global_mutex(dev)) 871 mutex_lock(&drm_global_mutex); 872 873 ret = drm_minor_register(dev, DRM_MINOR_RENDER); 874 if (ret) 875 goto err_minors; 876 877 ret = drm_minor_register(dev, DRM_MINOR_PRIMARY); 878 if (ret) 879 goto err_minors; 880 881 ret = create_compat_control_link(dev); 882 if (ret) 883 goto err_minors; 884 885 dev->registered = true; 886 887 if (dev->driver->load) { 888 ret = dev->driver->load(dev, flags); 889 if (ret) 890 goto err_minors; 891 } 892 893 if (drm_core_check_feature(dev, DRIVER_MODESET)) 894 drm_modeset_register_all(dev); 895 896 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", 897 driver->name, driver->major, driver->minor, 898 driver->patchlevel, driver->date, 899 dev->dev ? dev_name(dev->dev) : "virtual device", 900 dev->primary->index); 901 902 goto out_unlock; 903 904 err_minors: 905 remove_compat_control_link(dev); 906 drm_minor_unregister(dev, DRM_MINOR_PRIMARY); 907 drm_minor_unregister(dev, DRM_MINOR_RENDER); 908 out_unlock: 909 if (drm_dev_needs_global_mutex(dev)) 910 mutex_unlock(&drm_global_mutex); 911 return ret; 912 } 913 EXPORT_SYMBOL(drm_dev_register); 914 915 /** 916 * drm_dev_unregister - Unregister DRM device 917 * @dev: Device to unregister 918 * 919 * Unregister the DRM device from the system. This does the reverse of 920 * drm_dev_register() but does not deallocate the device. The caller must call 921 * drm_dev_put() to drop their final reference. 922 * 923 * A special form of unregistering for hotpluggable devices is drm_dev_unplug(), 924 * which can be called while there are still open users of @dev. 925 * 926 * This should be called first in the device teardown code to make sure 927 * userspace can't access the device instance any more. 928 */ 929 void drm_dev_unregister(struct drm_device *dev) 930 { 931 if (drm_core_check_feature(dev, DRIVER_LEGACY)) 932 drm_lastclose(dev); 933 934 dev->registered = false; 935 936 drm_client_dev_unregister(dev); 937 938 if (drm_core_check_feature(dev, DRIVER_MODESET)) 939 drm_modeset_unregister_all(dev); 940 941 if (dev->driver->unload) 942 dev->driver->unload(dev); 943 944 if (dev->agp) 945 drm_pci_agp_destroy(dev); 946 947 drm_legacy_rmmaps(dev); 948 949 remove_compat_control_link(dev); 950 drm_minor_unregister(dev, DRM_MINOR_PRIMARY); 951 drm_minor_unregister(dev, DRM_MINOR_RENDER); 952 } 953 EXPORT_SYMBOL(drm_dev_unregister); 954 955 /** 956 * drm_dev_set_unique - Set the unique name of a DRM device 957 * @dev: device of which to set the unique name 958 * @name: unique name 959 * 960 * Sets the unique name of a DRM device using the specified string. This is 961 * already done by drm_dev_init(), drivers should only override the default 962 * unique name for backwards compatibility reasons. 963 * 964 * Return: 0 on success or a negative error code on failure. 965 */ 966 int drm_dev_set_unique(struct drm_device *dev, const char *name) 967 { 968 drmm_kfree(dev, dev->unique); 969 dev->unique = drmm_kstrdup(dev, name, GFP_KERNEL); 970 971 return dev->unique ? 0 : -ENOMEM; 972 } 973 EXPORT_SYMBOL(drm_dev_set_unique); 974 975 /* 976 * DRM Core 977 * The DRM core module initializes all global DRM objects and makes them 978 * available to drivers. Once setup, drivers can probe their respective 979 * devices. 980 * Currently, core management includes: 981 * - The "DRM-Global" key/value database 982 * - Global ID management for connectors 983 * - DRM major number allocation 984 * - DRM minor management 985 * - DRM sysfs class 986 * - DRM debugfs root 987 * 988 * Furthermore, the DRM core provides dynamic char-dev lookups. For each 989 * interface registered on a DRM device, you can request minor numbers from DRM 990 * core. DRM core takes care of major-number management and char-dev 991 * registration. A stub ->open() callback forwards any open() requests to the 992 * registered minor. 993 */ 994 995 static int drm_stub_open(struct inode *inode, struct file *filp) 996 { 997 const struct file_operations *new_fops; 998 struct drm_minor *minor; 999 int err; 1000 1001 DRM_DEBUG("\n"); 1002 1003 minor = drm_minor_acquire(iminor(inode)); 1004 if (IS_ERR(minor)) 1005 return PTR_ERR(minor); 1006 1007 new_fops = fops_get(minor->dev->driver->fops); 1008 if (!new_fops) { 1009 err = -ENODEV; 1010 goto out; 1011 } 1012 1013 replace_fops(filp, new_fops); 1014 if (filp->f_op->open) 1015 err = filp->f_op->open(inode, filp); 1016 else 1017 err = 0; 1018 1019 out: 1020 drm_minor_release(minor); 1021 1022 return err; 1023 } 1024 1025 static const struct file_operations drm_stub_fops = { 1026 .owner = THIS_MODULE, 1027 .open = drm_stub_open, 1028 .llseek = noop_llseek, 1029 }; 1030 1031 static void drm_core_exit(void) 1032 { 1033 unregister_chrdev(DRM_MAJOR, "drm"); 1034 debugfs_remove(drm_debugfs_root); 1035 drm_sysfs_destroy(); 1036 idr_destroy(&drm_minors_idr); 1037 drm_connector_ida_destroy(); 1038 } 1039 1040 static int __init drm_core_init(void) 1041 { 1042 int ret; 1043 1044 drm_connector_ida_init(); 1045 idr_init(&drm_minors_idr); 1046 1047 ret = drm_sysfs_init(); 1048 if (ret < 0) { 1049 DRM_ERROR("Cannot create DRM class: %d\n", ret); 1050 goto error; 1051 } 1052 1053 drm_debugfs_root = debugfs_create_dir("dri", NULL); 1054 1055 ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops); 1056 if (ret < 0) 1057 goto error; 1058 1059 drm_core_init_complete = true; 1060 1061 DRM_DEBUG("Initialized\n"); 1062 return 0; 1063 1064 error: 1065 drm_core_exit(); 1066 return ret; 1067 } 1068 1069 module_init(drm_core_init); 1070 module_exit(drm_core_exit); 1071