1 /* 2 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org 3 * 4 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California. 5 * All Rights Reserved. 6 * 7 * Author Rickard E. (Rik) Faith <faith@valinux.com> 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the next 17 * paragraph) shall be included in all copies or substantial portions of the 18 * Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 26 * DEALINGS IN THE SOFTWARE. 27 */ 28 29 #include <linux/debugfs.h> 30 #include <linux/fs.h> 31 #include <linux/module.h> 32 #include <linux/moduleparam.h> 33 #include <linux/mount.h> 34 #include <linux/slab.h> 35 #include <drm/drmP.h> 36 #include <drm/drm_core.h> 37 #include "drm_legacy.h" 38 39 unsigned int drm_debug = 0; /* 1 to enable debug output */ 40 EXPORT_SYMBOL(drm_debug); 41 42 unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ 43 44 unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ 45 46 /* 47 * Default to use monotonic timestamps for wait-for-vblank and page-flip 48 * complete events. 49 */ 50 unsigned int drm_timestamp_monotonic = 1; 51 52 MODULE_AUTHOR(CORE_AUTHOR); 53 MODULE_DESCRIPTION(CORE_DESC); 54 MODULE_LICENSE("GPL and additional rights"); 55 MODULE_PARM_DESC(debug, "Enable debug output"); 56 MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]"); 57 MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); 58 MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps"); 59 60 module_param_named(debug, drm_debug, int, 0600); 61 module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); 62 module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); 63 module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); 64 65 static DEFINE_SPINLOCK(drm_minor_lock); 66 static struct idr drm_minors_idr; 67 68 struct class *drm_class; 69 static struct dentry *drm_debugfs_root; 70 71 int drm_err(const char *func, const char *format, ...) 72 { 73 struct va_format vaf; 74 va_list args; 75 int r; 76 77 va_start(args, format); 78 79 vaf.fmt = format; 80 vaf.va = &args; 81 82 r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf); 83 84 va_end(args); 85 86 return r; 87 } 88 EXPORT_SYMBOL(drm_err); 89 90 void drm_ut_debug_printk(const char *function_name, const char *format, ...) 91 { 92 struct va_format vaf; 93 va_list args; 94 95 va_start(args, format); 96 vaf.fmt = format; 97 vaf.va = &args; 98 99 printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf); 100 101 va_end(args); 102 } 103 EXPORT_SYMBOL(drm_ut_debug_printk); 104 105 struct drm_master *drm_master_create(struct drm_minor *minor) 106 { 107 struct drm_master *master; 108 109 master = kzalloc(sizeof(*master), GFP_KERNEL); 110 if (!master) 111 return NULL; 112 113 kref_init(&master->refcount); 114 spin_lock_init(&master->lock.spinlock); 115 init_waitqueue_head(&master->lock.lock_queue); 116 if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) { 117 kfree(master); 118 return NULL; 119 } 120 INIT_LIST_HEAD(&master->magicfree); 121 master->minor = minor; 122 123 return master; 124 } 125 126 struct drm_master *drm_master_get(struct drm_master *master) 127 { 128 kref_get(&master->refcount); 129 return master; 130 } 131 EXPORT_SYMBOL(drm_master_get); 132 133 static void drm_master_destroy(struct kref *kref) 134 { 135 struct drm_master *master = container_of(kref, struct drm_master, refcount); 136 struct drm_magic_entry *pt, *next; 137 struct drm_device *dev = master->minor->dev; 138 struct drm_map_list *r_list, *list_temp; 139 140 mutex_lock(&dev->struct_mutex); 141 if (dev->driver->master_destroy) 142 dev->driver->master_destroy(dev, master); 143 144 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) { 145 if (r_list->master == master) { 146 drm_rmmap_locked(dev, r_list->map); 147 r_list = NULL; 148 } 149 } 150 151 if (master->unique) { 152 kfree(master->unique); 153 master->unique = NULL; 154 master->unique_len = 0; 155 } 156 157 list_for_each_entry_safe(pt, next, &master->magicfree, head) { 158 list_del(&pt->head); 159 drm_ht_remove_item(&master->magiclist, &pt->hash_item); 160 kfree(pt); 161 } 162 163 drm_ht_remove(&master->magiclist); 164 165 mutex_unlock(&dev->struct_mutex); 166 kfree(master); 167 } 168 169 void drm_master_put(struct drm_master **master) 170 { 171 kref_put(&(*master)->refcount, drm_master_destroy); 172 *master = NULL; 173 } 174 EXPORT_SYMBOL(drm_master_put); 175 176 int drm_setmaster_ioctl(struct drm_device *dev, void *data, 177 struct drm_file *file_priv) 178 { 179 int ret = 0; 180 181 mutex_lock(&dev->master_mutex); 182 if (file_priv->is_master) 183 goto out_unlock; 184 185 if (file_priv->minor->master) { 186 ret = -EINVAL; 187 goto out_unlock; 188 } 189 190 if (!file_priv->master) { 191 ret = -EINVAL; 192 goto out_unlock; 193 } 194 195 file_priv->minor->master = drm_master_get(file_priv->master); 196 file_priv->is_master = 1; 197 if (dev->driver->master_set) { 198 ret = dev->driver->master_set(dev, file_priv, false); 199 if (unlikely(ret != 0)) { 200 file_priv->is_master = 0; 201 drm_master_put(&file_priv->minor->master); 202 } 203 } 204 205 out_unlock: 206 mutex_unlock(&dev->master_mutex); 207 return ret; 208 } 209 210 int drm_dropmaster_ioctl(struct drm_device *dev, void *data, 211 struct drm_file *file_priv) 212 { 213 int ret = -EINVAL; 214 215 mutex_lock(&dev->master_mutex); 216 if (!file_priv->is_master) 217 goto out_unlock; 218 219 if (!file_priv->minor->master) 220 goto out_unlock; 221 222 ret = 0; 223 if (dev->driver->master_drop) 224 dev->driver->master_drop(dev, file_priv, false); 225 drm_master_put(&file_priv->minor->master); 226 file_priv->is_master = 0; 227 228 out_unlock: 229 mutex_unlock(&dev->master_mutex); 230 return ret; 231 } 232 233 /* 234 * DRM Minors 235 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each 236 * of them is represented by a drm_minor object. Depending on the capabilities 237 * of the device-driver, different interfaces are registered. 238 * 239 * Minors can be accessed via dev->$minor_name. This pointer is either 240 * NULL or a valid drm_minor pointer and stays valid as long as the device is 241 * valid. This means, DRM minors have the same life-time as the underlying 242 * device. However, this doesn't mean that the minor is active. Minors are 243 * registered and unregistered dynamically according to device-state. 244 */ 245 246 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev, 247 unsigned int type) 248 { 249 switch (type) { 250 case DRM_MINOR_LEGACY: 251 return &dev->primary; 252 case DRM_MINOR_RENDER: 253 return &dev->render; 254 case DRM_MINOR_CONTROL: 255 return &dev->control; 256 default: 257 return NULL; 258 } 259 } 260 261 static int drm_minor_alloc(struct drm_device *dev, unsigned int type) 262 { 263 struct drm_minor *minor; 264 unsigned long flags; 265 int r; 266 267 minor = kzalloc(sizeof(*minor), GFP_KERNEL); 268 if (!minor) 269 return -ENOMEM; 270 271 minor->type = type; 272 minor->dev = dev; 273 274 idr_preload(GFP_KERNEL); 275 spin_lock_irqsave(&drm_minor_lock, flags); 276 r = idr_alloc(&drm_minors_idr, 277 NULL, 278 64 * type, 279 64 * (type + 1), 280 GFP_NOWAIT); 281 spin_unlock_irqrestore(&drm_minor_lock, flags); 282 idr_preload_end(); 283 284 if (r < 0) 285 goto err_free; 286 287 minor->index = r; 288 289 minor->kdev = drm_sysfs_minor_alloc(minor); 290 if (IS_ERR(minor->kdev)) { 291 r = PTR_ERR(minor->kdev); 292 goto err_index; 293 } 294 295 *drm_minor_get_slot(dev, type) = minor; 296 return 0; 297 298 err_index: 299 spin_lock_irqsave(&drm_minor_lock, flags); 300 idr_remove(&drm_minors_idr, minor->index); 301 spin_unlock_irqrestore(&drm_minor_lock, flags); 302 err_free: 303 kfree(minor); 304 return r; 305 } 306 307 static void drm_minor_free(struct drm_device *dev, unsigned int type) 308 { 309 struct drm_minor **slot, *minor; 310 unsigned long flags; 311 312 slot = drm_minor_get_slot(dev, type); 313 minor = *slot; 314 if (!minor) 315 return; 316 317 drm_mode_group_destroy(&minor->mode_group); 318 put_device(minor->kdev); 319 320 spin_lock_irqsave(&drm_minor_lock, flags); 321 idr_remove(&drm_minors_idr, minor->index); 322 spin_unlock_irqrestore(&drm_minor_lock, flags); 323 324 kfree(minor); 325 *slot = NULL; 326 } 327 328 static int drm_minor_register(struct drm_device *dev, unsigned int type) 329 { 330 struct drm_minor *minor; 331 unsigned long flags; 332 int ret; 333 334 DRM_DEBUG("\n"); 335 336 minor = *drm_minor_get_slot(dev, type); 337 if (!minor) 338 return 0; 339 340 ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root); 341 if (ret) { 342 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); 343 return ret; 344 } 345 346 ret = device_add(minor->kdev); 347 if (ret) 348 goto err_debugfs; 349 350 /* replace NULL with @minor so lookups will succeed from now on */ 351 spin_lock_irqsave(&drm_minor_lock, flags); 352 idr_replace(&drm_minors_idr, minor, minor->index); 353 spin_unlock_irqrestore(&drm_minor_lock, flags); 354 355 DRM_DEBUG("new minor registered %d\n", minor->index); 356 return 0; 357 358 err_debugfs: 359 drm_debugfs_cleanup(minor); 360 return ret; 361 } 362 363 static void drm_minor_unregister(struct drm_device *dev, unsigned int type) 364 { 365 struct drm_minor *minor; 366 unsigned long flags; 367 368 minor = *drm_minor_get_slot(dev, type); 369 if (!minor || !device_is_registered(minor->kdev)) 370 return; 371 372 /* replace @minor with NULL so lookups will fail from now on */ 373 spin_lock_irqsave(&drm_minor_lock, flags); 374 idr_replace(&drm_minors_idr, NULL, minor->index); 375 spin_unlock_irqrestore(&drm_minor_lock, flags); 376 377 device_del(minor->kdev); 378 dev_set_drvdata(minor->kdev, NULL); /* safety belt */ 379 drm_debugfs_cleanup(minor); 380 } 381 382 /** 383 * drm_minor_acquire - Acquire a DRM minor 384 * @minor_id: Minor ID of the DRM-minor 385 * 386 * Looks up the given minor-ID and returns the respective DRM-minor object. The 387 * refence-count of the underlying device is increased so you must release this 388 * object with drm_minor_release(). 389 * 390 * As long as you hold this minor, it is guaranteed that the object and the 391 * minor->dev pointer will stay valid! However, the device may get unplugged and 392 * unregistered while you hold the minor. 393 * 394 * Returns: 395 * Pointer to minor-object with increased device-refcount, or PTR_ERR on 396 * failure. 397 */ 398 struct drm_minor *drm_minor_acquire(unsigned int minor_id) 399 { 400 struct drm_minor *minor; 401 unsigned long flags; 402 403 spin_lock_irqsave(&drm_minor_lock, flags); 404 minor = idr_find(&drm_minors_idr, minor_id); 405 if (minor) 406 drm_dev_ref(minor->dev); 407 spin_unlock_irqrestore(&drm_minor_lock, flags); 408 409 if (!minor) { 410 return ERR_PTR(-ENODEV); 411 } else if (drm_device_is_unplugged(minor->dev)) { 412 drm_dev_unref(minor->dev); 413 return ERR_PTR(-ENODEV); 414 } 415 416 return minor; 417 } 418 419 /** 420 * drm_minor_release - Release DRM minor 421 * @minor: Pointer to DRM minor object 422 * 423 * Release a minor that was previously acquired via drm_minor_acquire(). 424 */ 425 void drm_minor_release(struct drm_minor *minor) 426 { 427 drm_dev_unref(minor->dev); 428 } 429 430 /** 431 * drm_put_dev - Unregister and release a DRM device 432 * @dev: DRM device 433 * 434 * Called at module unload time or when a PCI device is unplugged. 435 * 436 * Use of this function is discouraged. It will eventually go away completely. 437 * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead. 438 * 439 * Cleans up all DRM device, calling drm_lastclose(). 440 */ 441 void drm_put_dev(struct drm_device *dev) 442 { 443 DRM_DEBUG("\n"); 444 445 if (!dev) { 446 DRM_ERROR("cleanup called no dev\n"); 447 return; 448 } 449 450 drm_dev_unregister(dev); 451 drm_dev_unref(dev); 452 } 453 EXPORT_SYMBOL(drm_put_dev); 454 455 void drm_unplug_dev(struct drm_device *dev) 456 { 457 /* for a USB device */ 458 drm_minor_unregister(dev, DRM_MINOR_LEGACY); 459 drm_minor_unregister(dev, DRM_MINOR_RENDER); 460 drm_minor_unregister(dev, DRM_MINOR_CONTROL); 461 462 mutex_lock(&drm_global_mutex); 463 464 drm_device_set_unplugged(dev); 465 466 if (dev->open_count == 0) { 467 drm_put_dev(dev); 468 } 469 mutex_unlock(&drm_global_mutex); 470 } 471 EXPORT_SYMBOL(drm_unplug_dev); 472 473 /* 474 * DRM internal mount 475 * We want to be able to allocate our own "struct address_space" to control 476 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow 477 * stand-alone address_space objects, so we need an underlying inode. As there 478 * is no way to allocate an independent inode easily, we need a fake internal 479 * VFS mount-point. 480 * 481 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free() 482 * frees it again. You are allowed to use iget() and iput() to get references to 483 * the inode. But each drm_fs_inode_new() call must be paired with exactly one 484 * drm_fs_inode_free() call (which does not have to be the last iput()). 485 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it 486 * between multiple inode-users. You could, technically, call 487 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an 488 * iput(), but this way you'd end up with a new vfsmount for each inode. 489 */ 490 491 static int drm_fs_cnt; 492 static struct vfsmount *drm_fs_mnt; 493 494 static const struct dentry_operations drm_fs_dops = { 495 .d_dname = simple_dname, 496 }; 497 498 static const struct super_operations drm_fs_sops = { 499 .statfs = simple_statfs, 500 }; 501 502 static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags, 503 const char *dev_name, void *data) 504 { 505 return mount_pseudo(fs_type, 506 "drm:", 507 &drm_fs_sops, 508 &drm_fs_dops, 509 0x010203ff); 510 } 511 512 static struct file_system_type drm_fs_type = { 513 .name = "drm", 514 .owner = THIS_MODULE, 515 .mount = drm_fs_mount, 516 .kill_sb = kill_anon_super, 517 }; 518 519 static struct inode *drm_fs_inode_new(void) 520 { 521 struct inode *inode; 522 int r; 523 524 r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt); 525 if (r < 0) { 526 DRM_ERROR("Cannot mount pseudo fs: %d\n", r); 527 return ERR_PTR(r); 528 } 529 530 inode = alloc_anon_inode(drm_fs_mnt->mnt_sb); 531 if (IS_ERR(inode)) 532 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); 533 534 return inode; 535 } 536 537 static void drm_fs_inode_free(struct inode *inode) 538 { 539 if (inode) { 540 iput(inode); 541 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); 542 } 543 } 544 545 /** 546 * drm_dev_alloc - Allocate new DRM device 547 * @driver: DRM driver to allocate device for 548 * @parent: Parent device object 549 * 550 * Allocate and initialize a new DRM device. No device registration is done. 551 * Call drm_dev_register() to advertice the device to user space and register it 552 * with other core subsystems. 553 * 554 * The initial ref-count of the object is 1. Use drm_dev_ref() and 555 * drm_dev_unref() to take and drop further ref-counts. 556 * 557 * RETURNS: 558 * Pointer to new DRM device, or NULL if out of memory. 559 */ 560 struct drm_device *drm_dev_alloc(struct drm_driver *driver, 561 struct device *parent) 562 { 563 struct drm_device *dev; 564 int ret; 565 566 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 567 if (!dev) 568 return NULL; 569 570 kref_init(&dev->ref); 571 dev->dev = parent; 572 dev->driver = driver; 573 574 INIT_LIST_HEAD(&dev->filelist); 575 INIT_LIST_HEAD(&dev->ctxlist); 576 INIT_LIST_HEAD(&dev->vmalist); 577 INIT_LIST_HEAD(&dev->maplist); 578 INIT_LIST_HEAD(&dev->vblank_event_list); 579 580 spin_lock_init(&dev->buf_lock); 581 spin_lock_init(&dev->event_lock); 582 mutex_init(&dev->struct_mutex); 583 mutex_init(&dev->ctxlist_mutex); 584 mutex_init(&dev->master_mutex); 585 586 dev->anon_inode = drm_fs_inode_new(); 587 if (IS_ERR(dev->anon_inode)) { 588 ret = PTR_ERR(dev->anon_inode); 589 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret); 590 goto err_free; 591 } 592 593 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 594 ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL); 595 if (ret) 596 goto err_minors; 597 } 598 599 if (drm_core_check_feature(dev, DRIVER_RENDER)) { 600 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); 601 if (ret) 602 goto err_minors; 603 } 604 605 ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY); 606 if (ret) 607 goto err_minors; 608 609 if (drm_ht_create(&dev->map_hash, 12)) 610 goto err_minors; 611 612 ret = drm_legacy_ctxbitmap_init(dev); 613 if (ret) { 614 DRM_ERROR("Cannot allocate memory for context bitmap.\n"); 615 goto err_ht; 616 } 617 618 if (driver->driver_features & DRIVER_GEM) { 619 ret = drm_gem_init(dev); 620 if (ret) { 621 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n"); 622 goto err_ctxbitmap; 623 } 624 } 625 626 return dev; 627 628 err_ctxbitmap: 629 drm_legacy_ctxbitmap_cleanup(dev); 630 err_ht: 631 drm_ht_remove(&dev->map_hash); 632 err_minors: 633 drm_minor_free(dev, DRM_MINOR_LEGACY); 634 drm_minor_free(dev, DRM_MINOR_RENDER); 635 drm_minor_free(dev, DRM_MINOR_CONTROL); 636 drm_fs_inode_free(dev->anon_inode); 637 err_free: 638 mutex_destroy(&dev->master_mutex); 639 kfree(dev); 640 return NULL; 641 } 642 EXPORT_SYMBOL(drm_dev_alloc); 643 644 static void drm_dev_release(struct kref *ref) 645 { 646 struct drm_device *dev = container_of(ref, struct drm_device, ref); 647 648 if (dev->driver->driver_features & DRIVER_GEM) 649 drm_gem_destroy(dev); 650 651 drm_legacy_ctxbitmap_cleanup(dev); 652 drm_ht_remove(&dev->map_hash); 653 drm_fs_inode_free(dev->anon_inode); 654 655 drm_minor_free(dev, DRM_MINOR_LEGACY); 656 drm_minor_free(dev, DRM_MINOR_RENDER); 657 drm_minor_free(dev, DRM_MINOR_CONTROL); 658 659 mutex_destroy(&dev->master_mutex); 660 kfree(dev->unique); 661 kfree(dev); 662 } 663 664 /** 665 * drm_dev_ref - Take reference of a DRM device 666 * @dev: device to take reference of or NULL 667 * 668 * This increases the ref-count of @dev by one. You *must* already own a 669 * reference when calling this. Use drm_dev_unref() to drop this reference 670 * again. 671 * 672 * This function never fails. However, this function does not provide *any* 673 * guarantee whether the device is alive or running. It only provides a 674 * reference to the object and the memory associated with it. 675 */ 676 void drm_dev_ref(struct drm_device *dev) 677 { 678 if (dev) 679 kref_get(&dev->ref); 680 } 681 EXPORT_SYMBOL(drm_dev_ref); 682 683 /** 684 * drm_dev_unref - Drop reference of a DRM device 685 * @dev: device to drop reference of or NULL 686 * 687 * This decreases the ref-count of @dev by one. The device is destroyed if the 688 * ref-count drops to zero. 689 */ 690 void drm_dev_unref(struct drm_device *dev) 691 { 692 if (dev) 693 kref_put(&dev->ref, drm_dev_release); 694 } 695 EXPORT_SYMBOL(drm_dev_unref); 696 697 /** 698 * drm_dev_register - Register DRM device 699 * @dev: Device to register 700 * @flags: Flags passed to the driver's .load() function 701 * 702 * Register the DRM device @dev with the system, advertise device to user-space 703 * and start normal device operation. @dev must be allocated via drm_dev_alloc() 704 * previously. 705 * 706 * Never call this twice on any device! 707 * 708 * RETURNS: 709 * 0 on success, negative error code on failure. 710 */ 711 int drm_dev_register(struct drm_device *dev, unsigned long flags) 712 { 713 int ret; 714 715 mutex_lock(&drm_global_mutex); 716 717 ret = drm_minor_register(dev, DRM_MINOR_CONTROL); 718 if (ret) 719 goto err_minors; 720 721 ret = drm_minor_register(dev, DRM_MINOR_RENDER); 722 if (ret) 723 goto err_minors; 724 725 ret = drm_minor_register(dev, DRM_MINOR_LEGACY); 726 if (ret) 727 goto err_minors; 728 729 if (dev->driver->load) { 730 ret = dev->driver->load(dev, flags); 731 if (ret) 732 goto err_minors; 733 } 734 735 /* setup grouping for legacy outputs */ 736 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 737 ret = drm_mode_group_init_legacy_group(dev, 738 &dev->primary->mode_group); 739 if (ret) 740 goto err_unload; 741 } 742 743 ret = 0; 744 goto out_unlock; 745 746 err_unload: 747 if (dev->driver->unload) 748 dev->driver->unload(dev); 749 err_minors: 750 drm_minor_unregister(dev, DRM_MINOR_LEGACY); 751 drm_minor_unregister(dev, DRM_MINOR_RENDER); 752 drm_minor_unregister(dev, DRM_MINOR_CONTROL); 753 out_unlock: 754 mutex_unlock(&drm_global_mutex); 755 return ret; 756 } 757 EXPORT_SYMBOL(drm_dev_register); 758 759 /** 760 * drm_dev_unregister - Unregister DRM device 761 * @dev: Device to unregister 762 * 763 * Unregister the DRM device from the system. This does the reverse of 764 * drm_dev_register() but does not deallocate the device. The caller must call 765 * drm_dev_unref() to drop their final reference. 766 */ 767 void drm_dev_unregister(struct drm_device *dev) 768 { 769 struct drm_map_list *r_list, *list_temp; 770 771 drm_lastclose(dev); 772 773 if (dev->driver->unload) 774 dev->driver->unload(dev); 775 776 if (dev->agp) 777 drm_pci_agp_destroy(dev); 778 779 drm_vblank_cleanup(dev); 780 781 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) 782 drm_rmmap(dev, r_list->map); 783 784 drm_minor_unregister(dev, DRM_MINOR_LEGACY); 785 drm_minor_unregister(dev, DRM_MINOR_RENDER); 786 drm_minor_unregister(dev, DRM_MINOR_CONTROL); 787 } 788 EXPORT_SYMBOL(drm_dev_unregister); 789 790 /** 791 * drm_dev_set_unique - Set the unique name of a DRM device 792 * @dev: device of which to set the unique name 793 * @fmt: format string for unique name 794 * 795 * Sets the unique name of a DRM device using the specified format string and 796 * a variable list of arguments. Drivers can use this at driver probe time if 797 * the unique name of the devices they drive is static. 798 * 799 * Return: 0 on success or a negative error code on failure. 800 */ 801 int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...) 802 { 803 va_list ap; 804 805 kfree(dev->unique); 806 807 va_start(ap, fmt); 808 dev->unique = kvasprintf(GFP_KERNEL, fmt, ap); 809 va_end(ap); 810 811 return dev->unique ? 0 : -ENOMEM; 812 } 813 EXPORT_SYMBOL(drm_dev_set_unique); 814 815 /* 816 * DRM Core 817 * The DRM core module initializes all global DRM objects and makes them 818 * available to drivers. Once setup, drivers can probe their respective 819 * devices. 820 * Currently, core management includes: 821 * - The "DRM-Global" key/value database 822 * - Global ID management for connectors 823 * - DRM major number allocation 824 * - DRM minor management 825 * - DRM sysfs class 826 * - DRM debugfs root 827 * 828 * Furthermore, the DRM core provides dynamic char-dev lookups. For each 829 * interface registered on a DRM device, you can request minor numbers from DRM 830 * core. DRM core takes care of major-number management and char-dev 831 * registration. A stub ->open() callback forwards any open() requests to the 832 * registered minor. 833 */ 834 835 static int drm_stub_open(struct inode *inode, struct file *filp) 836 { 837 const struct file_operations *new_fops; 838 struct drm_minor *minor; 839 int err; 840 841 DRM_DEBUG("\n"); 842 843 mutex_lock(&drm_global_mutex); 844 minor = drm_minor_acquire(iminor(inode)); 845 if (IS_ERR(minor)) { 846 err = PTR_ERR(minor); 847 goto out_unlock; 848 } 849 850 new_fops = fops_get(minor->dev->driver->fops); 851 if (!new_fops) { 852 err = -ENODEV; 853 goto out_release; 854 } 855 856 replace_fops(filp, new_fops); 857 if (filp->f_op->open) 858 err = filp->f_op->open(inode, filp); 859 else 860 err = 0; 861 862 out_release: 863 drm_minor_release(minor); 864 out_unlock: 865 mutex_unlock(&drm_global_mutex); 866 return err; 867 } 868 869 static const struct file_operations drm_stub_fops = { 870 .owner = THIS_MODULE, 871 .open = drm_stub_open, 872 .llseek = noop_llseek, 873 }; 874 875 static int __init drm_core_init(void) 876 { 877 int ret = -ENOMEM; 878 879 drm_global_init(); 880 drm_connector_ida_init(); 881 idr_init(&drm_minors_idr); 882 883 if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops)) 884 goto err_p1; 885 886 drm_class = drm_sysfs_create(THIS_MODULE, "drm"); 887 if (IS_ERR(drm_class)) { 888 printk(KERN_ERR "DRM: Error creating drm class.\n"); 889 ret = PTR_ERR(drm_class); 890 goto err_p2; 891 } 892 893 drm_debugfs_root = debugfs_create_dir("dri", NULL); 894 if (!drm_debugfs_root) { 895 DRM_ERROR("Cannot create /sys/kernel/debug/dri\n"); 896 ret = -1; 897 goto err_p3; 898 } 899 900 DRM_INFO("Initialized %s %d.%d.%d %s\n", 901 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); 902 return 0; 903 err_p3: 904 drm_sysfs_destroy(); 905 err_p2: 906 unregister_chrdev(DRM_MAJOR, "drm"); 907 908 idr_destroy(&drm_minors_idr); 909 err_p1: 910 return ret; 911 } 912 913 static void __exit drm_core_exit(void) 914 { 915 debugfs_remove(drm_debugfs_root); 916 drm_sysfs_destroy(); 917 918 unregister_chrdev(DRM_MAJOR, "drm"); 919 920 drm_connector_ida_destroy(); 921 idr_destroy(&drm_minors_idr); 922 } 923 924 module_init(drm_core_init); 925 module_exit(drm_core_exit); 926