1 /*- 2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. 3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 23 * OTHER DEALINGS IN THE SOFTWARE. 24 * 25 * Authors: 26 * Rickard E. (Rik) Faith <faith@valinux.com> 27 * Gareth Hughes <gareth@valinux.com> 28 * 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /** @file drm_drv.c 35 * The catch-all file for DRM device support, including module setup/teardown, 36 * open/close, and ioctl dispatch. 37 */ 38 39 #include <sys/limits.h> 40 #include <sys/sysent.h> 41 #include <dev/drm2/drmP.h> 42 #include <dev/drm2/drm.h> 43 #include <dev/drm2/drm_sarea.h> 44 #include <dev/drm2/drm_mode.h> 45 46 #ifdef DRM_DEBUG_DEFAULT_ON 47 int drm_debug_flag = (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS | 48 DRM_DEBUGBITS_FAILED_IOCTL); 49 #else 50 int drm_debug_flag = 0; 51 #endif 52 int drm_notyet_flag = 0; 53 54 unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ 55 unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ 56 57 static int drm_load(struct drm_device *dev); 58 static void drm_unload(struct drm_device *dev); 59 static drm_pci_id_list_t *drm_find_description(int vendor, int device, 60 drm_pci_id_list_t *idlist); 61 62 static int 63 drm_modevent(module_t mod, int type, void *data) 64 { 65 66 switch (type) { 67 case MOD_LOAD: 68 TUNABLE_INT_FETCH("drm.debug", &drm_debug_flag); 69 TUNABLE_INT_FETCH("drm.notyet", &drm_notyet_flag); 70 break; 71 } 72 return (0); 73 } 74 75 static moduledata_t drm_mod = { 76 "drmn", 77 drm_modevent, 78 0 79 }; 80 DECLARE_MODULE(drmn, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); 81 MODULE_VERSION(drmn, 1); 82 MODULE_DEPEND(drmn, agp, 1, 1, 1); 83 MODULE_DEPEND(drmn, pci, 1, 1, 1); 84 MODULE_DEPEND(drmn, mem, 1, 1, 1); 85 MODULE_DEPEND(drmn, iicbus, 1, 1, 1); 86 87 static drm_ioctl_desc_t drm_ioctls[256] = { 88 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0), 89 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), 90 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), 91 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), 92 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0), 93 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0), 94 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0), 95 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED), 96 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY), 97 98 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 99 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 100 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 101 DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 102 103 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 104 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH), 105 106 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 107 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), 108 109 DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY), 110 DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY), 111 112 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), 113 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 114 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 115 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH), 116 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 117 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 118 DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH), 119 120 DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 121 DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 122 123 DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH), 124 DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH), 125 126 DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), 127 128 DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 129 DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 130 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH), 131 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH), 132 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH), 133 DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH), 134 135 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 136 137 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 138 DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 139 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 140 DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH), 141 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 142 DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 143 DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 144 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 145 146 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 147 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 148 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED), 149 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), 150 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 151 152 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED), 153 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), 154 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), 155 156 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 157 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 158 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 159 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 160 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 161 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 162 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 163 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED), 164 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), 165 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 166 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 167 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 168 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 169 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED), 170 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 171 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 172 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 173 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 174 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 175 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 176 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 177 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 178 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 179 DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 180 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 181 }; 182 183 static struct cdevsw drm_cdevsw = { 184 .d_version = D_VERSION, 185 .d_open = drm_open, 186 .d_read = drm_read, 187 .d_ioctl = drm_ioctl, 188 .d_poll = drm_poll, 189 .d_mmap = drm_mmap, 190 .d_mmap_single = drm_gem_mmap_single, 191 .d_name = "drm", 192 .d_flags = D_TRACKCLOSE 193 }; 194 195 static int drm_msi = 1; /* Enable by default. */ 196 TUNABLE_INT("hw.drm.msi", &drm_msi); 197 SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device"); 198 SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1, 199 "Enable MSI interrupts for drm devices"); 200 201 static struct drm_msi_blacklist_entry drm_msi_blacklist[] = { 202 {0x8086, 0x2772}, /* Intel i945G */ \ 203 {0x8086, 0x27A2}, /* Intel i945GM */ \ 204 {0x8086, 0x27AE}, /* Intel i945GME */ \ 205 {0, 0} 206 }; 207 208 static int drm_msi_is_blacklisted(int vendor, int device) 209 { 210 int i = 0; 211 212 for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) { 213 if ((drm_msi_blacklist[i].vendor == vendor) && 214 (drm_msi_blacklist[i].device == device)) { 215 return 1; 216 } 217 } 218 219 return 0; 220 } 221 222 int drm_probe(device_t kdev, drm_pci_id_list_t *idlist) 223 { 224 drm_pci_id_list_t *id_entry; 225 int vendor, device; 226 227 vendor = pci_get_vendor(kdev); 228 device = pci_get_device(kdev); 229 230 if (pci_get_class(kdev) != PCIC_DISPLAY 231 || pci_get_subclass(kdev) != PCIS_DISPLAY_VGA) 232 return ENXIO; 233 234 id_entry = drm_find_description(vendor, device, idlist); 235 if (id_entry != NULL) { 236 if (!device_get_desc(kdev)) { 237 DRM_DEBUG("desc : %s\n", device_get_desc(kdev)); 238 device_set_desc(kdev, id_entry->name); 239 } 240 return 0; 241 } 242 243 return ENXIO; 244 } 245 246 int drm_attach(device_t kdev, drm_pci_id_list_t *idlist) 247 { 248 struct drm_device *dev; 249 drm_pci_id_list_t *id_entry; 250 int error, msicount; 251 252 dev = device_get_softc(kdev); 253 254 dev->device = kdev; 255 256 dev->pci_domain = pci_get_domain(dev->device); 257 dev->pci_bus = pci_get_bus(dev->device); 258 dev->pci_slot = pci_get_slot(dev->device); 259 dev->pci_func = pci_get_function(dev->device); 260 261 dev->pci_vendor = pci_get_vendor(dev->device); 262 dev->pci_device = pci_get_device(dev->device); 263 264 if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) { 265 if (drm_msi && 266 !drm_msi_is_blacklisted(dev->pci_vendor, dev->pci_device)) { 267 msicount = pci_msi_count(dev->device); 268 DRM_DEBUG("MSI count = %d\n", msicount); 269 if (msicount > 1) 270 msicount = 1; 271 272 if (pci_alloc_msi(dev->device, &msicount) == 0) { 273 DRM_INFO("MSI enabled %d message(s)\n", 274 msicount); 275 dev->msi_enabled = 1; 276 dev->irqrid = 1; 277 } 278 } 279 280 dev->irqr = bus_alloc_resource_any(dev->device, SYS_RES_IRQ, 281 &dev->irqrid, RF_SHAREABLE); 282 if (!dev->irqr) { 283 return (ENOENT); 284 } 285 286 dev->irq = (int) rman_get_start(dev->irqr); 287 } 288 289 mtx_init(&dev->dev_lock, "drmdev", NULL, MTX_DEF); 290 mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF); 291 mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF); 292 mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF); 293 mtx_init(&dev->event_lock, "drmev", NULL, MTX_DEF); 294 sx_init(&dev->dev_struct_lock, "drmslk"); 295 296 id_entry = drm_find_description(dev->pci_vendor, 297 dev->pci_device, idlist); 298 dev->id_entry = id_entry; 299 300 error = drm_load(dev); 301 if (error == 0) 302 error = drm_create_cdevs(kdev); 303 return (error); 304 } 305 306 int 307 drm_create_cdevs(device_t kdev) 308 { 309 struct drm_device *dev; 310 int error, unit; 311 312 unit = device_get_unit(kdev); 313 dev = device_get_softc(kdev); 314 315 error = make_dev_p(MAKEDEV_WAITOK | MAKEDEV_CHECKNAME, &dev->devnode, 316 &drm_cdevsw, 0, DRM_DEV_UID, DRM_DEV_GID, 317 DRM_DEV_MODE, "dri/card%d", unit); 318 if (error == 0) 319 dev->devnode->si_drv1 = dev; 320 return (error); 321 } 322 323 int drm_detach(device_t kdev) 324 { 325 struct drm_device *dev; 326 327 dev = device_get_softc(kdev); 328 drm_unload(dev); 329 if (dev->irqr) { 330 bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid, 331 dev->irqr); 332 if (dev->msi_enabled) { 333 pci_release_msi(dev->device); 334 DRM_INFO("MSI released\n"); 335 } 336 } 337 return (0); 338 } 339 340 #ifndef DRM_DEV_NAME 341 #define DRM_DEV_NAME "drm" 342 #endif 343 344 devclass_t drm_devclass; 345 346 drm_pci_id_list_t *drm_find_description(int vendor, int device, 347 drm_pci_id_list_t *idlist) 348 { 349 int i = 0; 350 351 for (i = 0; idlist[i].vendor != 0; i++) { 352 if ((idlist[i].vendor == vendor) && 353 ((idlist[i].device == device) || 354 (idlist[i].device == 0))) { 355 return &idlist[i]; 356 } 357 } 358 return NULL; 359 } 360 361 static int drm_firstopen(struct drm_device *dev) 362 { 363 drm_local_map_t *map; 364 int i; 365 366 DRM_LOCK_ASSERT(dev); 367 368 /* prebuild the SAREA */ 369 i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, 370 _DRM_CONTAINS_LOCK, &map); 371 if (i != 0) 372 return i; 373 374 if (dev->driver->firstopen) 375 dev->driver->firstopen(dev); 376 377 dev->buf_use = 0; 378 379 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) { 380 i = drm_dma_setup(dev); 381 if (i != 0) 382 return i; 383 } 384 385 for (i = 0; i < DRM_HASH_SIZE; i++) { 386 dev->magiclist[i].head = NULL; 387 dev->magiclist[i].tail = NULL; 388 } 389 390 dev->lock.lock_queue = 0; 391 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 392 dev->irq_enabled = 0; 393 dev->context_flag = 0; 394 dev->last_context = 0; 395 dev->if_version = 0; 396 397 dev->buf_sigio = NULL; 398 399 DRM_DEBUG("\n"); 400 401 return 0; 402 } 403 404 static int drm_lastclose(struct drm_device *dev) 405 { 406 drm_magic_entry_t *pt, *next; 407 drm_local_map_t *map, *mapsave; 408 int i; 409 410 DRM_LOCK_ASSERT(dev); 411 412 DRM_DEBUG("\n"); 413 414 if (dev->driver->lastclose != NULL) 415 dev->driver->lastclose(dev); 416 417 if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled) 418 drm_irq_uninstall(dev); 419 420 if (dev->unique) { 421 free(dev->unique, DRM_MEM_DRIVER); 422 dev->unique = NULL; 423 dev->unique_len = 0; 424 } 425 /* Clear pid list */ 426 for (i = 0; i < DRM_HASH_SIZE; i++) { 427 for (pt = dev->magiclist[i].head; pt; pt = next) { 428 next = pt->next; 429 free(pt, DRM_MEM_MAGIC); 430 } 431 dev->magiclist[i].head = dev->magiclist[i].tail = NULL; 432 } 433 434 DRM_UNLOCK(dev); 435 drm_drawable_free_all(dev); 436 DRM_LOCK(dev); 437 438 /* Clear AGP information */ 439 if (dev->agp) { 440 drm_agp_mem_t *entry; 441 drm_agp_mem_t *nexte; 442 443 /* Remove AGP resources, but leave dev->agp intact until 444 * drm_unload is called. 445 */ 446 for (entry = dev->agp->memory; entry; entry = nexte) { 447 nexte = entry->next; 448 if (entry->bound) 449 drm_agp_unbind_memory(entry->handle); 450 drm_agp_free_memory(entry->handle); 451 free(entry, DRM_MEM_AGPLISTS); 452 } 453 dev->agp->memory = NULL; 454 455 if (dev->agp->acquired) 456 drm_agp_release(dev); 457 458 dev->agp->acquired = 0; 459 dev->agp->enabled = 0; 460 } 461 if (dev->sg != NULL) { 462 drm_sg_cleanup(dev->sg); 463 dev->sg = NULL; 464 } 465 466 TAILQ_FOREACH_SAFE(map, &dev->maplist, link, mapsave) { 467 if (!(map->flags & _DRM_DRIVER)) 468 drm_rmmap(dev, map); 469 } 470 471 drm_dma_takedown(dev); 472 if (dev->lock.hw_lock) { 473 dev->lock.hw_lock = NULL; /* SHM removed */ 474 dev->lock.file_priv = NULL; 475 DRM_WAKEUP_INT((void *)&dev->lock.lock_queue); 476 } 477 478 return 0; 479 } 480 481 static int drm_load(struct drm_device *dev) 482 { 483 int i, retcode; 484 485 DRM_DEBUG("\n"); 486 487 TAILQ_INIT(&dev->maplist); 488 dev->map_unrhdr = new_unrhdr(1, ((1 << DRM_MAP_HANDLE_BITS) - 1), NULL); 489 if (dev->map_unrhdr == NULL) { 490 DRM_ERROR("Couldn't allocate map number allocator\n"); 491 return EINVAL; 492 } 493 494 495 drm_mem_init(); 496 drm_sysctl_init(dev); 497 TAILQ_INIT(&dev->files); 498 499 dev->counters = 6; 500 dev->types[0] = _DRM_STAT_LOCK; 501 dev->types[1] = _DRM_STAT_OPENS; 502 dev->types[2] = _DRM_STAT_CLOSES; 503 dev->types[3] = _DRM_STAT_IOCTLS; 504 dev->types[4] = _DRM_STAT_LOCKS; 505 dev->types[5] = _DRM_STAT_UNLOCKS; 506 507 for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++) 508 atomic_set(&dev->counts[i], 0); 509 510 INIT_LIST_HEAD(&dev->vblank_event_list); 511 512 if (drm_core_has_AGP(dev)) { 513 if (drm_device_is_agp(dev)) 514 dev->agp = drm_agp_init(); 515 if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && 516 dev->agp == NULL) { 517 DRM_ERROR("Card isn't AGP, or couldn't initialize " 518 "AGP.\n"); 519 retcode = ENOMEM; 520 goto error; 521 } 522 if (dev->agp != NULL && dev->agp->info.ai_aperture_base != 0) { 523 if (drm_mtrr_add(dev->agp->info.ai_aperture_base, 524 dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0) 525 dev->agp->mtrr = 1; 526 } 527 } 528 529 retcode = drm_ctxbitmap_init(dev); 530 if (retcode != 0) { 531 DRM_ERROR("Cannot allocate memory for context bitmap.\n"); 532 goto error; 533 } 534 535 dev->drw_unrhdr = new_unrhdr(1, INT_MAX, NULL); 536 if (dev->drw_unrhdr == NULL) { 537 DRM_ERROR("Couldn't allocate drawable number allocator\n"); 538 retcode = ENOMEM; 539 goto error; 540 } 541 542 if (dev->driver->driver_features & DRIVER_GEM) { 543 retcode = drm_gem_init(dev); 544 if (retcode != 0) { 545 DRM_ERROR("Cannot initialize graphics execution " 546 "manager (GEM)\n"); 547 goto error1; 548 } 549 } 550 551 if (dev->driver->load != NULL) { 552 DRM_LOCK(dev); 553 /* Shared code returns -errno. */ 554 retcode = -dev->driver->load(dev, 555 dev->id_entry->driver_private); 556 if (pci_enable_busmaster(dev->device)) 557 DRM_ERROR("Request to enable bus-master failed.\n"); 558 DRM_UNLOCK(dev); 559 if (retcode != 0) 560 goto error; 561 } 562 563 DRM_INFO("Initialized %s %d.%d.%d %s\n", 564 dev->driver->name, 565 dev->driver->major, 566 dev->driver->minor, 567 dev->driver->patchlevel, 568 dev->driver->date); 569 570 return 0; 571 572 error1: 573 delete_unrhdr(dev->drw_unrhdr); 574 error: 575 drm_sysctl_cleanup(dev); 576 DRM_LOCK(dev); 577 drm_lastclose(dev); 578 DRM_UNLOCK(dev); 579 if (dev->devnode != NULL) 580 destroy_dev(dev->devnode); 581 582 mtx_destroy(&dev->drw_lock); 583 mtx_destroy(&dev->vbl_lock); 584 mtx_destroy(&dev->irq_lock); 585 mtx_destroy(&dev->dev_lock); 586 mtx_destroy(&dev->event_lock); 587 sx_destroy(&dev->dev_struct_lock); 588 589 return retcode; 590 } 591 592 static void drm_unload(struct drm_device *dev) 593 { 594 int i; 595 596 DRM_DEBUG("\n"); 597 598 drm_sysctl_cleanup(dev); 599 if (dev->devnode != NULL) 600 destroy_dev(dev->devnode); 601 602 drm_ctxbitmap_cleanup(dev); 603 604 if (dev->driver->driver_features & DRIVER_GEM) 605 drm_gem_destroy(dev); 606 607 if (dev->agp && dev->agp->mtrr) { 608 int __unused retcode; 609 610 retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base, 611 dev->agp->info.ai_aperture_size, DRM_MTRR_WC); 612 DRM_DEBUG("mtrr_del = %d", retcode); 613 } 614 615 drm_vblank_cleanup(dev); 616 617 DRM_LOCK(dev); 618 drm_lastclose(dev); 619 DRM_UNLOCK(dev); 620 621 /* Clean up PCI resources allocated by drm_bufs.c. We're not really 622 * worried about resource consumption while the DRM is inactive (between 623 * lastclose and firstopen or unload) because these aren't actually 624 * taking up KVA, just keeping the PCI resource allocated. 625 */ 626 for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) { 627 if (dev->pcir[i] == NULL) 628 continue; 629 bus_release_resource(dev->device, SYS_RES_MEMORY, 630 dev->pcirid[i], dev->pcir[i]); 631 dev->pcir[i] = NULL; 632 } 633 634 if (dev->agp) { 635 free(dev->agp, DRM_MEM_AGPLISTS); 636 dev->agp = NULL; 637 } 638 639 if (dev->driver->unload != NULL) { 640 DRM_LOCK(dev); 641 dev->driver->unload(dev); 642 DRM_UNLOCK(dev); 643 } 644 645 delete_unrhdr(dev->drw_unrhdr); 646 delete_unrhdr(dev->map_unrhdr); 647 648 drm_mem_uninit(); 649 650 if (pci_disable_busmaster(dev->device)) 651 DRM_ERROR("Request to disable bus-master failed.\n"); 652 653 mtx_destroy(&dev->drw_lock); 654 mtx_destroy(&dev->vbl_lock); 655 mtx_destroy(&dev->irq_lock); 656 mtx_destroy(&dev->dev_lock); 657 mtx_destroy(&dev->event_lock); 658 sx_destroy(&dev->dev_struct_lock); 659 } 660 661 int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv) 662 { 663 struct drm_version *version = data; 664 int len; 665 666 #define DRM_COPY( name, value ) \ 667 len = strlen( value ); \ 668 if ( len > name##_len ) len = name##_len; \ 669 name##_len = strlen( value ); \ 670 if ( len && name ) { \ 671 if ( DRM_COPY_TO_USER( name, value, len ) ) \ 672 return EFAULT; \ 673 } 674 675 version->version_major = dev->driver->major; 676 version->version_minor = dev->driver->minor; 677 version->version_patchlevel = dev->driver->patchlevel; 678 679 DRM_COPY(version->name, dev->driver->name); 680 DRM_COPY(version->date, dev->driver->date); 681 DRM_COPY(version->desc, dev->driver->desc); 682 683 return 0; 684 } 685 686 int 687 drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) 688 { 689 struct drm_device *dev; 690 int retcode; 691 692 dev = kdev->si_drv1; 693 if (dev == NULL) 694 return (ENXIO); 695 696 DRM_DEBUG("open_count = %d\n", dev->open_count); 697 698 retcode = drm_open_helper(kdev, flags, fmt, p, dev); 699 700 if (retcode == 0) { 701 atomic_inc(&dev->counts[_DRM_STAT_OPENS]); 702 DRM_LOCK(dev); 703 mtx_lock(&Giant); 704 device_busy(dev->device); 705 mtx_unlock(&Giant); 706 if (!dev->open_count++) 707 retcode = drm_firstopen(dev); 708 DRM_UNLOCK(dev); 709 } 710 711 return (retcode); 712 } 713 714 void drm_close(void *data) 715 { 716 struct drm_file *file_priv = data; 717 struct drm_device *dev = file_priv->dev; 718 int retcode = 0; 719 720 DRM_DEBUG("open_count = %d\n", dev->open_count); 721 722 DRM_LOCK(dev); 723 724 if (dev->driver->preclose != NULL) 725 dev->driver->preclose(dev, file_priv); 726 727 /* ======================================================== 728 * Begin inline drm_release 729 */ 730 731 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", 732 DRM_CURRENTPID, (long)dev->device, dev->open_count); 733 734 if (dev->driver->driver_features & DRIVER_GEM) 735 drm_gem_release(dev, file_priv); 736 737 if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) 738 && dev->lock.file_priv == file_priv) { 739 DRM_DEBUG("Process %d dead, freeing lock for context %d\n", 740 DRM_CURRENTPID, 741 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); 742 if (dev->driver->reclaim_buffers_locked != NULL) 743 dev->driver->reclaim_buffers_locked(dev, file_priv); 744 745 drm_lock_free(&dev->lock, 746 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); 747 748 /* FIXME: may require heavy-handed reset of 749 hardware at this point, possibly 750 processed via a callback to the X 751 server. */ 752 } else if (dev->driver->reclaim_buffers_locked != NULL && 753 dev->lock.hw_lock != NULL) { 754 /* The lock is required to reclaim buffers */ 755 for (;;) { 756 if (!dev->lock.hw_lock) { 757 /* Device has been unregistered */ 758 retcode = EINTR; 759 break; 760 } 761 if (drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) { 762 dev->lock.file_priv = file_priv; 763 dev->lock.lock_time = jiffies; 764 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); 765 break; /* Got lock */ 766 } 767 /* Contention */ 768 retcode = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue, 769 PCATCH, "drmlk2", 0); 770 if (retcode) 771 break; 772 } 773 if (retcode == 0) { 774 dev->driver->reclaim_buffers_locked(dev, file_priv); 775 drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT); 776 } 777 } 778 779 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && 780 !dev->driver->reclaim_buffers_locked) 781 drm_reclaim_buffers(dev, file_priv); 782 783 funsetown(&dev->buf_sigio); 784 seldrain(&file_priv->event_poll); 785 786 if (dev->driver->postclose != NULL) 787 dev->driver->postclose(dev, file_priv); 788 TAILQ_REMOVE(&dev->files, file_priv, link); 789 free(file_priv, DRM_MEM_FILES); 790 791 /* ======================================================== 792 * End inline drm_release 793 */ 794 795 atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); 796 mtx_lock(&Giant); 797 device_unbusy(dev->device); 798 mtx_unlock(&Giant); 799 if (--dev->open_count == 0) { 800 retcode = drm_lastclose(dev); 801 } 802 803 DRM_UNLOCK(dev); 804 } 805 806 extern drm_ioctl_desc_t drm_compat_ioctls[]; 807 808 /* drm_ioctl is called whenever a process performs an ioctl on /dev/drm. 809 */ 810 int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags, 811 DRM_STRUCTPROC *p) 812 { 813 struct drm_device *dev = drm_get_device_from_kdev(kdev); 814 int retcode = 0; 815 drm_ioctl_desc_t *ioctl; 816 int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv); 817 int nr = DRM_IOCTL_NR(cmd); 818 int is_driver_ioctl = 0; 819 struct drm_file *file_priv; 820 821 retcode = devfs_get_cdevpriv((void **)&file_priv); 822 if (retcode != 0) { 823 DRM_ERROR("can't find authenticator\n"); 824 return EINVAL; 825 } 826 827 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); 828 ++file_priv->ioctl_count; 829 830 DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n", 831 DRM_CURRENTPID, cmd, nr, (long)dev->device, 832 file_priv->authenticated); 833 834 switch (cmd) { 835 case FIONBIO: 836 case FIOASYNC: 837 return 0; 838 839 case FIOSETOWN: 840 return fsetown(*(int *)data, &dev->buf_sigio); 841 842 case FIOGETOWN: 843 *(int *) data = fgetown(&dev->buf_sigio); 844 return 0; 845 } 846 847 if (IOCGROUP(cmd) != DRM_IOCTL_BASE) { 848 DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd)); 849 return EINVAL; 850 } 851 852 #ifdef COMPAT_FREEBSD32 853 /* 854 * Called whenever a 32-bit process running under a 64-bit 855 * kernel performs an ioctl on /dev/drm. 856 */ 857 if (SV_CURPROC_FLAG(SV_ILP32) && drm_compat_ioctls[nr].func != NULL) 858 /* 859 * Assume that ioctls without an explicit compat 860 * routine will just work. This may not always be a 861 * good assumption, but it's better than always 862 * failing. 863 */ 864 ioctl = &drm_compat_ioctls[nr]; 865 else 866 #endif 867 ioctl = &drm_ioctls[nr]; 868 /* It's not a core DRM ioctl, try driver-specific. */ 869 if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) { 870 /* The array entries begin at DRM_COMMAND_BASE ioctl nr */ 871 nr -= DRM_COMMAND_BASE; 872 if (nr > dev->driver->max_ioctl) { 873 DRM_DEBUG("Bad driver ioctl number, 0x%x (of 0x%x)\n", 874 nr, dev->driver->max_ioctl); 875 return EINVAL; 876 } 877 #ifdef COMPAT_FREEBSD32 878 if (SV_CURPROC_FLAG(SV_ILP32) && 879 nr < *dev->driver->compat_ioctls_nr && 880 dev->driver->compat_ioctls[nr].func != NULL) 881 ioctl = &dev->driver->compat_ioctls[nr]; 882 else 883 #endif 884 ioctl = &dev->driver->ioctls[nr]; 885 is_driver_ioctl = 1; 886 } 887 func = ioctl->func; 888 889 if (func == NULL) { 890 DRM_DEBUG("no function\n"); 891 return EINVAL; 892 } 893 894 if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) || 895 ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || 896 ((ioctl->flags & DRM_MASTER) && !file_priv->master)) 897 return EACCES; 898 899 if (is_driver_ioctl) { 900 if ((ioctl->flags & DRM_UNLOCKED) == 0) 901 DRM_LOCK(dev); 902 /* shared code returns -errno */ 903 retcode = -func(dev, data, file_priv); 904 if ((ioctl->flags & DRM_UNLOCKED) == 0) 905 DRM_UNLOCK(dev); 906 } else { 907 retcode = func(dev, data, file_priv); 908 } 909 910 if (retcode != 0) 911 DRM_DEBUG(" returning %d\n", retcode); 912 if (retcode != 0 && 913 (drm_debug_flag & DRM_DEBUGBITS_FAILED_IOCTL) != 0) { 914 printf( 915 "pid %d, cmd 0x%02lx, nr 0x%02x/%1d, dev 0x%lx, auth %d, res %d\n", 916 DRM_CURRENTPID, cmd, nr, is_driver_ioctl, (long)dev->device, 917 file_priv->authenticated, retcode); 918 } 919 920 return retcode; 921 } 922 923 drm_local_map_t *drm_getsarea(struct drm_device *dev) 924 { 925 drm_local_map_t *map; 926 927 DRM_LOCK_ASSERT(dev); 928 TAILQ_FOREACH(map, &dev->maplist, link) { 929 if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK)) 930 return map; 931 } 932 933 return NULL; 934 } 935 936 int 937 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx, 938 struct sysctl_oid *top) 939 { 940 struct sysctl_oid *oid; 941 942 snprintf(dev->busid_str, sizeof(dev->busid_str), 943 "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus, 944 dev->pci_slot, dev->pci_func); 945 oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid", 946 CTLFLAG_RD, dev->busid_str, 0, NULL); 947 if (oid == NULL) 948 return (ENOMEM); 949 dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0; 950 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, 951 "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL); 952 if (oid == NULL) 953 return (ENOMEM); 954 955 return (0); 956 } 957 958 #if DRM_LINUX 959 960 #include <sys/sysproto.h> 961 962 MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1); 963 964 #define LINUX_IOCTL_DRM_MIN 0x6400 965 #define LINUX_IOCTL_DRM_MAX 0x64ff 966 967 static linux_ioctl_function_t drm_linux_ioctl; 968 static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl, 969 LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX}; 970 971 SYSINIT(drm_register, SI_SUB_KLD, SI_ORDER_MIDDLE, 972 linux_ioctl_register_handler, &drm_handler); 973 SYSUNINIT(drm_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE, 974 linux_ioctl_unregister_handler, &drm_handler); 975 976 /* The bits for in/out are switched on Linux */ 977 #define LINUX_IOC_IN IOC_OUT 978 #define LINUX_IOC_OUT IOC_IN 979 980 static int 981 drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args) 982 { 983 int error; 984 int cmd = args->cmd; 985 986 args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT); 987 if (cmd & LINUX_IOC_IN) 988 args->cmd |= IOC_IN; 989 if (cmd & LINUX_IOC_OUT) 990 args->cmd |= IOC_OUT; 991 992 error = ioctl(p, (struct ioctl_args *)args); 993 994 return error; 995 } 996 #endif /* DRM_LINUX */ 997 998 bool 999 dmi_check_system(const struct dmi_system_id *sysid) 1000 { 1001 1002 /* XXXKIB */ 1003 return (false); 1004 } 1005 1006