1 /*- 2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. 3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 23 * OTHER DEALINGS IN THE SOFTWARE. 24 * 25 * Authors: 26 * Rickard E. (Rik) Faith <faith@valinux.com> 27 * Gareth Hughes <gareth@valinux.com> 28 * 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /** @file drm_drv.c 35 * The catch-all file for DRM device support, including module setup/teardown, 36 * open/close, and ioctl dispatch. 37 */ 38 39 #include <sys/limits.h> 40 #include <sys/sysent.h> 41 #include <dev/drm2/drmP.h> 42 #include <dev/drm2/drm.h> 43 #include <dev/drm2/drm_core.h> 44 #include <dev/drm2/drm_global.h> 45 #include <dev/drm2/drm_sarea.h> 46 #include <dev/drm2/drm_mode.h> 47 48 #ifdef DRM_DEBUG_DEFAULT_ON 49 int drm_debug_flag = (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS | 50 DRM_DEBUGBITS_FAILED_IOCTL); 51 #else 52 int drm_debug_flag = 0; 53 #endif 54 int drm_notyet_flag = 0; 55 56 unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ 57 unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ 58 59 /* 60 * Default to use monotonic timestamps for wait-for-vblank and page-flip 61 * complete events. 62 */ 63 unsigned int drm_timestamp_monotonic = 1; 64 65 static int drm_load(struct drm_device *dev); 66 static void drm_unload(struct drm_device *dev); 67 static drm_pci_id_list_t *drm_find_description(int vendor, int device, 68 drm_pci_id_list_t *idlist); 69 static int drm_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, 70 vm_size_t size, struct vm_object **obj_res, int nprot); 71 72 static int 73 drm_modevent(module_t mod, int type, void *data) 74 { 75 76 switch (type) { 77 case MOD_LOAD: 78 TUNABLE_INT_FETCH("drm.debug", &drm_debug_flag); 79 TUNABLE_INT_FETCH("drm.notyet", &drm_notyet_flag); 80 break; 81 } 82 return (0); 83 } 84 85 static moduledata_t drm_mod = { 86 "drmn", 87 drm_modevent, 88 0 89 }; 90 DECLARE_MODULE(drmn, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); 91 MODULE_VERSION(drmn, 1); 92 MODULE_DEPEND(drmn, agp, 1, 1, 1); 93 MODULE_DEPEND(drmn, pci, 1, 1, 1); 94 MODULE_DEPEND(drmn, mem, 1, 1, 1); 95 MODULE_DEPEND(drmn, iicbus, 1, 1, 1); 96 97 static drm_ioctl_desc_t drm_ioctls[256] = { 98 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0), 99 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), 100 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), 101 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), 102 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0), 103 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0), 104 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0), 105 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED), 106 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY), 107 108 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 109 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 110 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 111 DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 112 113 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 114 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH), 115 116 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 117 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), 118 119 DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY), 120 DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY), 121 122 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), 123 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 124 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 125 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH), 126 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 127 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 128 DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH), 129 130 DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 131 DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 132 133 DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH), 134 DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH), 135 136 DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), 137 138 DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 139 DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 140 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH), 141 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH), 142 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH), 143 DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH), 144 145 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 146 147 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 148 DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 149 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 150 DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH), 151 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 152 DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 153 DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 154 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 155 156 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 157 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 158 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED), 159 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), 160 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 161 162 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED), 163 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), 164 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), 165 166 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 167 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 168 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 169 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 170 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 171 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 172 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 173 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED), 174 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), 175 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 176 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 177 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 178 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 179 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED), 180 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 181 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 182 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 183 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 184 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 185 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 186 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 187 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 188 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 189 DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 190 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 191 }; 192 193 static struct cdevsw drm_cdevsw = { 194 .d_version = D_VERSION, 195 .d_open = drm_open, 196 .d_read = drm_read, 197 .d_ioctl = drm_ioctl, 198 .d_poll = drm_poll, 199 .d_mmap = drm_mmap, 200 .d_mmap_single = drm_mmap_single, 201 .d_name = "drm", 202 .d_flags = D_TRACKCLOSE 203 }; 204 205 static int drm_msi = 1; /* Enable by default. */ 206 SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device"); 207 SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1, 208 "Enable MSI interrupts for drm devices"); 209 210 static struct drm_msi_blacklist_entry drm_msi_blacklist[] = { 211 {0x8086, 0x2772}, /* Intel i945G */ \ 212 {0x8086, 0x27A2}, /* Intel i945GM */ \ 213 {0x8086, 0x27AE}, /* Intel i945GME */ \ 214 {0, 0} 215 }; 216 217 static int drm_msi_is_blacklisted(struct drm_device *dev, unsigned long flags) 218 { 219 int i = 0; 220 221 if (dev->driver->use_msi != NULL) { 222 int use_msi; 223 224 use_msi = dev->driver->use_msi(dev, flags); 225 226 return (!use_msi); 227 } 228 229 /* TODO: Maybe move this to a callback in i915? */ 230 for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) { 231 if ((drm_msi_blacklist[i].vendor == dev->pci_vendor) && 232 (drm_msi_blacklist[i].device == dev->pci_device)) { 233 return 1; 234 } 235 } 236 237 return 0; 238 } 239 240 int drm_probe(device_t kdev, drm_pci_id_list_t *idlist) 241 { 242 drm_pci_id_list_t *id_entry; 243 int vendor, device; 244 245 vendor = pci_get_vendor(kdev); 246 device = pci_get_device(kdev); 247 248 if (pci_get_class(kdev) != PCIC_DISPLAY 249 || pci_get_subclass(kdev) != PCIS_DISPLAY_VGA) 250 return ENXIO; 251 252 id_entry = drm_find_description(vendor, device, idlist); 253 if (id_entry != NULL) { 254 if (!device_get_desc(kdev)) { 255 DRM_DEBUG("desc : %s\n", device_get_desc(kdev)); 256 device_set_desc(kdev, id_entry->name); 257 } 258 return 0; 259 } 260 261 return ENXIO; 262 } 263 264 int drm_attach(device_t kdev, drm_pci_id_list_t *idlist) 265 { 266 struct drm_device *dev; 267 drm_pci_id_list_t *id_entry; 268 int error, msicount; 269 270 dev = device_get_softc(kdev); 271 272 dev->device = kdev; 273 274 dev->pci_domain = pci_get_domain(dev->device); 275 dev->pci_bus = pci_get_bus(dev->device); 276 dev->pci_slot = pci_get_slot(dev->device); 277 dev->pci_func = pci_get_function(dev->device); 278 279 dev->pci_vendor = pci_get_vendor(dev->device); 280 dev->pci_device = pci_get_device(dev->device); 281 dev->pci_subvendor = pci_get_subvendor(dev->device); 282 dev->pci_subdevice = pci_get_subdevice(dev->device); 283 284 id_entry = drm_find_description(dev->pci_vendor, 285 dev->pci_device, idlist); 286 dev->id_entry = id_entry; 287 288 if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) { 289 if (drm_msi && 290 !drm_msi_is_blacklisted(dev, dev->id_entry->driver_private)) { 291 msicount = pci_msi_count(dev->device); 292 DRM_DEBUG("MSI count = %d\n", msicount); 293 if (msicount > 1) 294 msicount = 1; 295 296 if (pci_alloc_msi(dev->device, &msicount) == 0) { 297 DRM_INFO("MSI enabled %d message(s)\n", 298 msicount); 299 dev->msi_enabled = 1; 300 dev->irqrid = 1; 301 } 302 } 303 304 dev->irqr = bus_alloc_resource_any(dev->device, SYS_RES_IRQ, 305 &dev->irqrid, RF_SHAREABLE); 306 if (!dev->irqr) { 307 return (ENOENT); 308 } 309 310 dev->irq = (int) rman_get_start(dev->irqr); 311 } 312 313 mtx_init(&dev->dev_lock, "drmdev", NULL, MTX_DEF); 314 mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF); 315 mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF); 316 mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF); 317 mtx_init(&dev->event_lock, "drmev", NULL, MTX_DEF); 318 sx_init(&dev->dev_struct_lock, "drmslk"); 319 320 error = drm_load(dev); 321 if (error) 322 goto error; 323 324 error = drm_create_cdevs(kdev); 325 if (error) 326 goto error; 327 328 return (error); 329 error: 330 if (dev->irqr) { 331 bus_release_resource(dev->device, SYS_RES_IRQ, 332 dev->irqrid, dev->irqr); 333 } 334 if (dev->msi_enabled) { 335 pci_release_msi(dev->device); 336 } 337 return (error); 338 } 339 340 int 341 drm_create_cdevs(device_t kdev) 342 { 343 struct drm_device *dev; 344 int error, unit; 345 346 unit = device_get_unit(kdev); 347 dev = device_get_softc(kdev); 348 349 error = make_dev_p(MAKEDEV_WAITOK | MAKEDEV_CHECKNAME, &dev->devnode, 350 &drm_cdevsw, 0, DRM_DEV_UID, DRM_DEV_GID, 351 DRM_DEV_MODE, "dri/card%d", unit); 352 if (error == 0) 353 dev->devnode->si_drv1 = dev; 354 return (error); 355 } 356 357 int drm_detach(device_t kdev) 358 { 359 struct drm_device *dev; 360 361 dev = device_get_softc(kdev); 362 drm_unload(dev); 363 if (dev->irqr) { 364 bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid, 365 dev->irqr); 366 if (dev->msi_enabled) { 367 pci_release_msi(dev->device); 368 DRM_INFO("MSI released\n"); 369 } 370 } 371 return (0); 372 } 373 374 #ifndef DRM_DEV_NAME 375 #define DRM_DEV_NAME "drm" 376 #endif 377 378 devclass_t drm_devclass; 379 380 drm_pci_id_list_t *drm_find_description(int vendor, int device, 381 drm_pci_id_list_t *idlist) 382 { 383 int i = 0; 384 385 for (i = 0; idlist[i].vendor != 0; i++) { 386 if ((idlist[i].vendor == vendor) && 387 ((idlist[i].device == device) || 388 (idlist[i].device == 0))) { 389 return &idlist[i]; 390 } 391 } 392 return NULL; 393 } 394 395 static int drm_firstopen(struct drm_device *dev) 396 { 397 drm_local_map_t *map; 398 int i; 399 400 DRM_LOCK_ASSERT(dev); 401 402 /* prebuild the SAREA */ 403 i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, 404 _DRM_CONTAINS_LOCK, &map); 405 if (i != 0) 406 return i; 407 408 if (dev->driver->firstopen) 409 dev->driver->firstopen(dev); 410 411 dev->buf_use = 0; 412 413 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) { 414 i = drm_dma_setup(dev); 415 if (i != 0) 416 return i; 417 } 418 419 for (i = 0; i < DRM_HASH_SIZE; i++) { 420 dev->magiclist[i].head = NULL; 421 dev->magiclist[i].tail = NULL; 422 } 423 424 dev->lock.lock_queue = 0; 425 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 426 dev->irq_enabled = 0; 427 dev->context_flag = 0; 428 dev->last_context = 0; 429 dev->if_version = 0; 430 431 dev->buf_sigio = NULL; 432 433 DRM_DEBUG("\n"); 434 435 return 0; 436 } 437 438 static int drm_lastclose(struct drm_device *dev) 439 { 440 drm_magic_entry_t *pt, *next; 441 drm_local_map_t *map, *mapsave; 442 int i; 443 444 DRM_LOCK_ASSERT(dev); 445 446 DRM_DEBUG("\n"); 447 448 if (dev->driver->lastclose != NULL) 449 dev->driver->lastclose(dev); 450 451 if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled) 452 drm_irq_uninstall(dev); 453 454 if (dev->unique) { 455 free(dev->unique, DRM_MEM_DRIVER); 456 dev->unique = NULL; 457 dev->unique_len = 0; 458 } 459 /* Clear pid list */ 460 for (i = 0; i < DRM_HASH_SIZE; i++) { 461 for (pt = dev->magiclist[i].head; pt; pt = next) { 462 next = pt->next; 463 free(pt, DRM_MEM_MAGIC); 464 } 465 dev->magiclist[i].head = dev->magiclist[i].tail = NULL; 466 } 467 468 DRM_UNLOCK(dev); 469 drm_drawable_free_all(dev); 470 DRM_LOCK(dev); 471 472 /* Clear AGP information */ 473 if (dev->agp) { 474 drm_agp_mem_t *entry; 475 drm_agp_mem_t *nexte; 476 477 /* Remove AGP resources, but leave dev->agp intact until 478 * drm_unload is called. 479 */ 480 for (entry = dev->agp->memory; entry; entry = nexte) { 481 nexte = entry->next; 482 if (entry->bound) 483 drm_agp_unbind_memory(entry->handle); 484 drm_agp_free_memory(entry->handle); 485 free(entry, DRM_MEM_AGPLISTS); 486 } 487 dev->agp->memory = NULL; 488 489 if (dev->agp->acquired) 490 drm_agp_release(dev); 491 492 dev->agp->acquired = 0; 493 dev->agp->enabled = 0; 494 } 495 if (dev->sg != NULL) { 496 drm_sg_cleanup(dev->sg); 497 dev->sg = NULL; 498 } 499 500 TAILQ_FOREACH_SAFE(map, &dev->maplist, link, mapsave) { 501 if (!(map->flags & _DRM_DRIVER)) 502 drm_rmmap(dev, map); 503 } 504 505 drm_dma_takedown(dev); 506 if (dev->lock.hw_lock) { 507 dev->lock.hw_lock = NULL; /* SHM removed */ 508 dev->lock.file_priv = NULL; 509 DRM_WAKEUP_INT((void *)&dev->lock.lock_queue); 510 } 511 512 return 0; 513 } 514 515 static int drm_load(struct drm_device *dev) 516 { 517 int i, retcode; 518 519 DRM_DEBUG("\n"); 520 521 TAILQ_INIT(&dev->maplist); 522 dev->map_unrhdr = new_unrhdr(1, ((1 << DRM_MAP_HANDLE_BITS) - 1), NULL); 523 if (dev->map_unrhdr == NULL) { 524 DRM_ERROR("Couldn't allocate map number allocator\n"); 525 return EINVAL; 526 } 527 528 529 drm_mem_init(); 530 drm_sysctl_init(dev); 531 TAILQ_INIT(&dev->files); 532 533 dev->counters = 6; 534 dev->types[0] = _DRM_STAT_LOCK; 535 dev->types[1] = _DRM_STAT_OPENS; 536 dev->types[2] = _DRM_STAT_CLOSES; 537 dev->types[3] = _DRM_STAT_IOCTLS; 538 dev->types[4] = _DRM_STAT_LOCKS; 539 dev->types[5] = _DRM_STAT_UNLOCKS; 540 541 for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++) 542 atomic_set(&dev->counts[i], 0); 543 544 INIT_LIST_HEAD(&dev->vblank_event_list); 545 546 if (drm_core_has_AGP(dev)) { 547 if (drm_device_is_agp(dev)) 548 dev->agp = drm_agp_init(); 549 if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && 550 dev->agp == NULL) { 551 DRM_ERROR("Card isn't AGP, or couldn't initialize " 552 "AGP.\n"); 553 retcode = ENOMEM; 554 goto error; 555 } 556 if (dev->agp != NULL && dev->agp->info.ai_aperture_base != 0) { 557 if (drm_mtrr_add(dev->agp->info.ai_aperture_base, 558 dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0) 559 dev->agp->mtrr = 1; 560 } 561 } 562 563 retcode = drm_ctxbitmap_init(dev); 564 if (retcode != 0) { 565 DRM_ERROR("Cannot allocate memory for context bitmap.\n"); 566 goto error; 567 } 568 569 dev->drw_unrhdr = new_unrhdr(1, INT_MAX, NULL); 570 if (dev->drw_unrhdr == NULL) { 571 DRM_ERROR("Couldn't allocate drawable number allocator\n"); 572 retcode = ENOMEM; 573 goto error; 574 } 575 576 if (dev->driver->driver_features & DRIVER_GEM) { 577 retcode = drm_gem_init(dev); 578 if (retcode != 0) { 579 DRM_ERROR("Cannot initialize graphics execution " 580 "manager (GEM)\n"); 581 goto error1; 582 } 583 } 584 585 if (dev->driver->load != NULL) { 586 DRM_LOCK(dev); 587 /* Shared code returns -errno. */ 588 retcode = -dev->driver->load(dev, 589 dev->id_entry->driver_private); 590 if (pci_enable_busmaster(dev->device)) 591 DRM_ERROR("Request to enable bus-master failed.\n"); 592 DRM_UNLOCK(dev); 593 if (retcode != 0) 594 goto error1; 595 } 596 597 DRM_INFO("Initialized %s %d.%d.%d %s\n", 598 dev->driver->name, 599 dev->driver->major, 600 dev->driver->minor, 601 dev->driver->patchlevel, 602 dev->driver->date); 603 604 return 0; 605 606 error1: 607 delete_unrhdr(dev->drw_unrhdr); 608 drm_gem_destroy(dev); 609 error: 610 drm_ctxbitmap_cleanup(dev); 611 drm_sysctl_cleanup(dev); 612 DRM_LOCK(dev); 613 drm_lastclose(dev); 614 DRM_UNLOCK(dev); 615 if (dev->devnode != NULL) 616 destroy_dev(dev->devnode); 617 618 mtx_destroy(&dev->drw_lock); 619 mtx_destroy(&dev->vbl_lock); 620 mtx_destroy(&dev->irq_lock); 621 mtx_destroy(&dev->dev_lock); 622 mtx_destroy(&dev->event_lock); 623 sx_destroy(&dev->dev_struct_lock); 624 625 return retcode; 626 } 627 628 static void drm_unload(struct drm_device *dev) 629 { 630 int i; 631 632 DRM_DEBUG("\n"); 633 634 drm_sysctl_cleanup(dev); 635 if (dev->devnode != NULL) 636 destroy_dev(dev->devnode); 637 638 drm_ctxbitmap_cleanup(dev); 639 640 if (dev->driver->driver_features & DRIVER_GEM) 641 drm_gem_destroy(dev); 642 643 if (dev->agp && dev->agp->mtrr) { 644 int __unused retcode; 645 646 retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base, 647 dev->agp->info.ai_aperture_size, DRM_MTRR_WC); 648 DRM_DEBUG("mtrr_del = %d", retcode); 649 } 650 651 drm_vblank_cleanup(dev); 652 653 DRM_LOCK(dev); 654 drm_lastclose(dev); 655 DRM_UNLOCK(dev); 656 657 /* Clean up PCI resources allocated by drm_bufs.c. We're not really 658 * worried about resource consumption while the DRM is inactive (between 659 * lastclose and firstopen or unload) because these aren't actually 660 * taking up KVA, just keeping the PCI resource allocated. 661 */ 662 for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) { 663 if (dev->pcir[i] == NULL) 664 continue; 665 bus_release_resource(dev->device, SYS_RES_MEMORY, 666 dev->pcirid[i], dev->pcir[i]); 667 dev->pcir[i] = NULL; 668 } 669 670 if (dev->agp) { 671 free(dev->agp, DRM_MEM_AGPLISTS); 672 dev->agp = NULL; 673 } 674 675 if (dev->driver->unload != NULL) { 676 DRM_LOCK(dev); 677 dev->driver->unload(dev); 678 DRM_UNLOCK(dev); 679 } 680 681 delete_unrhdr(dev->drw_unrhdr); 682 delete_unrhdr(dev->map_unrhdr); 683 684 drm_mem_uninit(); 685 686 if (pci_disable_busmaster(dev->device)) 687 DRM_ERROR("Request to disable bus-master failed.\n"); 688 689 mtx_destroy(&dev->drw_lock); 690 mtx_destroy(&dev->vbl_lock); 691 mtx_destroy(&dev->irq_lock); 692 mtx_destroy(&dev->dev_lock); 693 mtx_destroy(&dev->event_lock); 694 sx_destroy(&dev->dev_struct_lock); 695 } 696 697 int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv) 698 { 699 struct drm_version *version = data; 700 int len; 701 702 #define DRM_COPY( name, value ) \ 703 len = strlen( value ); \ 704 if ( len > name##_len ) len = name##_len; \ 705 name##_len = strlen( value ); \ 706 if ( len && name ) { \ 707 if ( DRM_COPY_TO_USER( name, value, len ) ) \ 708 return EFAULT; \ 709 } 710 711 version->version_major = dev->driver->major; 712 version->version_minor = dev->driver->minor; 713 version->version_patchlevel = dev->driver->patchlevel; 714 715 DRM_COPY(version->name, dev->driver->name); 716 DRM_COPY(version->date, dev->driver->date); 717 DRM_COPY(version->desc, dev->driver->desc); 718 719 return 0; 720 } 721 722 int 723 drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) 724 { 725 struct drm_device *dev; 726 int retcode; 727 728 dev = kdev->si_drv1; 729 if (dev == NULL) 730 return (ENXIO); 731 732 DRM_DEBUG("open_count = %d\n", dev->open_count); 733 734 retcode = drm_open_helper(kdev, flags, fmt, p, dev); 735 736 if (retcode == 0) { 737 atomic_inc(&dev->counts[_DRM_STAT_OPENS]); 738 DRM_LOCK(dev); 739 mtx_lock(&Giant); 740 device_busy(dev->device); 741 mtx_unlock(&Giant); 742 if (!dev->open_count++) 743 retcode = drm_firstopen(dev); 744 DRM_UNLOCK(dev); 745 } 746 747 return (retcode); 748 } 749 750 void drm_close(void *data) 751 { 752 struct drm_file *file_priv = data; 753 struct drm_device *dev = file_priv->dev; 754 int retcode = 0; 755 756 DRM_DEBUG("open_count = %d\n", dev->open_count); 757 758 DRM_LOCK(dev); 759 760 if (dev->driver->preclose != NULL) 761 dev->driver->preclose(dev, file_priv); 762 763 /* ======================================================== 764 * Begin inline drm_release 765 */ 766 767 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", 768 DRM_CURRENTPID, (long)dev->device, dev->open_count); 769 770 if (dev->driver->driver_features & DRIVER_GEM) 771 drm_gem_release(dev, file_priv); 772 773 if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) 774 && dev->lock.file_priv == file_priv) { 775 DRM_DEBUG("Process %d dead, freeing lock for context %d\n", 776 DRM_CURRENTPID, 777 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); 778 if (dev->driver->reclaim_buffers_locked != NULL) 779 dev->driver->reclaim_buffers_locked(dev, file_priv); 780 781 drm_lock_free(&dev->lock, 782 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); 783 784 /* FIXME: may require heavy-handed reset of 785 hardware at this point, possibly 786 processed via a callback to the X 787 server. */ 788 } else if (dev->driver->reclaim_buffers_locked != NULL && 789 dev->lock.hw_lock != NULL) { 790 /* The lock is required to reclaim buffers */ 791 for (;;) { 792 if (!dev->lock.hw_lock) { 793 /* Device has been unregistered */ 794 retcode = EINTR; 795 break; 796 } 797 if (drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) { 798 dev->lock.file_priv = file_priv; 799 dev->lock.lock_time = jiffies; 800 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); 801 break; /* Got lock */ 802 } 803 /* Contention */ 804 retcode = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue, 805 PCATCH, "drmlk2", 0); 806 if (retcode) 807 break; 808 } 809 if (retcode == 0) { 810 dev->driver->reclaim_buffers_locked(dev, file_priv); 811 drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT); 812 } 813 } 814 815 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && 816 !dev->driver->reclaim_buffers_locked) 817 drm_reclaim_buffers(dev, file_priv); 818 819 funsetown(&dev->buf_sigio); 820 seldrain(&file_priv->event_poll); 821 822 if (dev->driver->postclose != NULL) 823 dev->driver->postclose(dev, file_priv); 824 TAILQ_REMOVE(&dev->files, file_priv, link); 825 free(file_priv, DRM_MEM_FILES); 826 827 /* ======================================================== 828 * End inline drm_release 829 */ 830 831 atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); 832 mtx_lock(&Giant); 833 device_unbusy(dev->device); 834 mtx_unlock(&Giant); 835 if (--dev->open_count == 0) { 836 retcode = drm_lastclose(dev); 837 } 838 839 DRM_UNLOCK(dev); 840 } 841 842 extern drm_ioctl_desc_t drm_compat_ioctls[]; 843 844 /* drm_ioctl is called whenever a process performs an ioctl on /dev/drm. 845 */ 846 int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags, 847 DRM_STRUCTPROC *p) 848 { 849 struct drm_device *dev = drm_get_device_from_kdev(kdev); 850 int retcode = 0; 851 drm_ioctl_desc_t *ioctl; 852 int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv); 853 int nr = DRM_IOCTL_NR(cmd); 854 int is_driver_ioctl = 0; 855 struct drm_file *file_priv; 856 857 retcode = devfs_get_cdevpriv((void **)&file_priv); 858 if (retcode != 0) { 859 DRM_ERROR("can't find authenticator\n"); 860 return EINVAL; 861 } 862 863 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); 864 ++file_priv->ioctl_count; 865 866 DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n", 867 DRM_CURRENTPID, cmd, nr, (long)dev->device, 868 file_priv->authenticated); 869 870 switch (cmd) { 871 case FIONBIO: 872 case FIOASYNC: 873 return 0; 874 875 case FIOSETOWN: 876 return fsetown(*(int *)data, &dev->buf_sigio); 877 878 case FIOGETOWN: 879 *(int *) data = fgetown(&dev->buf_sigio); 880 return 0; 881 } 882 883 if (IOCGROUP(cmd) != DRM_IOCTL_BASE) { 884 DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd)); 885 return EINVAL; 886 } 887 888 #ifdef COMPAT_FREEBSD32 889 /* 890 * Called whenever a 32-bit process running under a 64-bit 891 * kernel performs an ioctl on /dev/drm. 892 */ 893 if (SV_CURPROC_FLAG(SV_ILP32) && drm_compat_ioctls[nr].func != NULL) 894 /* 895 * Assume that ioctls without an explicit compat 896 * routine will just work. This may not always be a 897 * good assumption, but it's better than always 898 * failing. 899 */ 900 ioctl = &drm_compat_ioctls[nr]; 901 else 902 #endif 903 ioctl = &drm_ioctls[nr]; 904 /* It's not a core DRM ioctl, try driver-specific. */ 905 if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) { 906 /* The array entries begin at DRM_COMMAND_BASE ioctl nr */ 907 nr -= DRM_COMMAND_BASE; 908 if (nr > dev->driver->max_ioctl) { 909 DRM_DEBUG("Bad driver ioctl number, 0x%x (of 0x%x)\n", 910 nr, dev->driver->max_ioctl); 911 return EINVAL; 912 } 913 #ifdef COMPAT_FREEBSD32 914 if (SV_CURPROC_FLAG(SV_ILP32) && 915 nr < *dev->driver->compat_ioctls_nr && 916 dev->driver->compat_ioctls[nr].func != NULL) 917 ioctl = &dev->driver->compat_ioctls[nr]; 918 else 919 #endif 920 ioctl = &dev->driver->ioctls[nr]; 921 is_driver_ioctl = 1; 922 } 923 func = ioctl->func; 924 925 if (func == NULL) { 926 DRM_DEBUG("no function\n"); 927 return EINVAL; 928 } 929 930 if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) || 931 ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || 932 ((ioctl->flags & DRM_MASTER) && !file_priv->master)) 933 return EACCES; 934 935 if (is_driver_ioctl) { 936 if ((ioctl->flags & DRM_UNLOCKED) == 0) 937 DRM_LOCK(dev); 938 /* shared code returns -errno */ 939 retcode = -func(dev, data, file_priv); 940 if ((ioctl->flags & DRM_UNLOCKED) == 0) 941 DRM_UNLOCK(dev); 942 } else { 943 retcode = func(dev, data, file_priv); 944 } 945 946 if (retcode != 0) 947 DRM_DEBUG(" returning %d\n", retcode); 948 if (retcode != 0 && 949 (drm_debug_flag & DRM_DEBUGBITS_FAILED_IOCTL) != 0) { 950 printf( 951 "pid %d, cmd 0x%02lx, nr 0x%02x/%1d, dev 0x%lx, auth %d, res %d\n", 952 DRM_CURRENTPID, cmd, nr, is_driver_ioctl, (long)dev->device, 953 file_priv->authenticated, retcode); 954 } 955 956 return retcode; 957 } 958 959 drm_local_map_t *drm_getsarea(struct drm_device *dev) 960 { 961 drm_local_map_t *map; 962 963 DRM_LOCK_ASSERT(dev); 964 TAILQ_FOREACH(map, &dev->maplist, link) { 965 if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK)) 966 return map; 967 } 968 969 return NULL; 970 } 971 972 int 973 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx, 974 struct sysctl_oid *top) 975 { 976 struct sysctl_oid *oid; 977 978 snprintf(dev->busid_str, sizeof(dev->busid_str), 979 "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus, 980 dev->pci_slot, dev->pci_func); 981 oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid", 982 CTLFLAG_RD, dev->busid_str, 0, NULL); 983 if (oid == NULL) 984 return (ENOMEM); 985 dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0; 986 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, 987 "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL); 988 if (oid == NULL) 989 return (ENOMEM); 990 991 return (0); 992 } 993 994 static int 995 drm_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size, 996 struct vm_object **obj_res, int nprot) 997 { 998 struct drm_device *dev; 999 1000 dev = drm_get_device_from_kdev(kdev); 1001 if (dev->drm_ttm_bdev != NULL) { 1002 return (ttm_bo_mmap_single(dev->drm_ttm_bdev, offset, size, 1003 obj_res, nprot)); 1004 } else if ((dev->driver->driver_features & DRIVER_GEM) != 0) { 1005 return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot)); 1006 } else { 1007 return (ENODEV); 1008 } 1009 } 1010 1011 #if DRM_LINUX 1012 1013 #include <sys/sysproto.h> 1014 1015 MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1); 1016 1017 #define LINUX_IOCTL_DRM_MIN 0x6400 1018 #define LINUX_IOCTL_DRM_MAX 0x64ff 1019 1020 static linux_ioctl_function_t drm_linux_ioctl; 1021 static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl, 1022 LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX}; 1023 1024 /* The bits for in/out are switched on Linux */ 1025 #define LINUX_IOC_IN IOC_OUT 1026 #define LINUX_IOC_OUT IOC_IN 1027 1028 static int 1029 drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args) 1030 { 1031 int error; 1032 int cmd = args->cmd; 1033 1034 args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT); 1035 if (cmd & LINUX_IOC_IN) 1036 args->cmd |= IOC_IN; 1037 if (cmd & LINUX_IOC_OUT) 1038 args->cmd |= IOC_OUT; 1039 1040 error = ioctl(p, (struct ioctl_args *)args); 1041 1042 return error; 1043 } 1044 #endif /* DRM_LINUX */ 1045 1046 1047 static int 1048 drm_core_init(void *arg) 1049 { 1050 1051 drm_global_init(); 1052 1053 #if DRM_LINUX 1054 linux_ioctl_register_handler(&drm_handler); 1055 #endif /* DRM_LINUX */ 1056 1057 DRM_INFO("Initialized %s %d.%d.%d %s\n", 1058 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); 1059 return 0; 1060 } 1061 1062 static void 1063 drm_core_exit(void *arg) 1064 { 1065 1066 #if DRM_LINUX 1067 linux_ioctl_unregister_handler(&drm_handler); 1068 #endif /* DRM_LINUX */ 1069 1070 drm_global_release(); 1071 } 1072 1073 SYSINIT(drm_register, SI_SUB_KLD, SI_ORDER_MIDDLE, 1074 drm_core_init, NULL); 1075 SYSUNINIT(drm_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE, 1076 drm_core_exit, NULL); 1077 1078 bool 1079 dmi_check_system(const struct dmi_system_id *sysid) 1080 { 1081 1082 /* XXXKIB */ 1083 return (false); 1084 } 1085