xref: /freebsd/sys/dev/drm2/drm_drv.c (revision ec273ebf3b6aed5fba8c56b6ece5ad8693a48ea7)
1 /*-
2  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  *
25  * Authors:
26  *    Rickard E. (Rik) Faith <faith@valinux.com>
27  *    Gareth Hughes <gareth@valinux.com>
28  *
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /** @file drm_drv.c
35  * The catch-all file for DRM device support, including module setup/teardown,
36  * open/close, and ioctl dispatch.
37  */
38 
39 #include <sys/limits.h>
40 #include <sys/sysent.h>
41 #include <dev/drm2/drmP.h>
42 #include <dev/drm2/drm.h>
43 #include <dev/drm2/drm_core.h>
44 #include <dev/drm2/drm_global.h>
45 #include <dev/drm2/drm_sarea.h>
46 #include <dev/drm2/drm_mode.h>
47 
48 #ifdef DRM_DEBUG_DEFAULT_ON
49 int drm_debug_flag = (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS |
50     DRM_DEBUGBITS_FAILED_IOCTL);
51 #else
52 int drm_debug_flag = 0;
53 #endif
54 int drm_notyet_flag = 0;
55 
56 unsigned int drm_vblank_offdelay = 5000;    /* Default to 5000 msecs. */
57 unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
58 
59 /*
60  * Default to use monotonic timestamps for wait-for-vblank and page-flip
61  * complete events.
62  */
63 unsigned int drm_timestamp_monotonic = 1;
64 
65 static int drm_load(struct drm_device *dev);
66 static void drm_unload(struct drm_device *dev);
67 static drm_pci_id_list_t *drm_find_description(int vendor, int device,
68     drm_pci_id_list_t *idlist);
69 static int drm_mmap_single(struct cdev *kdev, vm_ooffset_t *offset,
70     vm_size_t size, struct vm_object **obj_res, int nprot);
71 
72 static int
73 drm_modevent(module_t mod, int type, void *data)
74 {
75 
76 	switch (type) {
77 	case MOD_LOAD:
78 		TUNABLE_INT_FETCH("drm.debug", &drm_debug_flag);
79 		TUNABLE_INT_FETCH("drm.notyet", &drm_notyet_flag);
80 		break;
81 	}
82 	return (0);
83 }
84 
85 static moduledata_t drm_mod = {
86 	"drmn",
87 	drm_modevent,
88 	0
89 };
90 DECLARE_MODULE(drmn, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
91 MODULE_VERSION(drmn, 1);
92 MODULE_DEPEND(drmn, agp, 1, 1, 1);
93 MODULE_DEPEND(drmn, pci, 1, 1, 1);
94 MODULE_DEPEND(drmn, mem, 1, 1, 1);
95 MODULE_DEPEND(drmn, iicbus, 1, 1, 1);
96 
97 static drm_ioctl_desc_t		  drm_ioctls[256] = {
98 	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
99 	DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
100 	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
101 	DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
102 	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
103 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
104 	DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
105 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED),
106 	DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
107 
108 	DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
109 	DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
110 	DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
111 	DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
112 
113 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
114 	DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
115 
116 	DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
117 	DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
118 
119 	DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
120 	DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
121 
122 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
123 	DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
124 	DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
125 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
126 	DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
127 	DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
128 	DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
129 
130 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
131 	DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
132 
133 	DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
134 	DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
135 
136 	DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
137 
138 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
139 	DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
140 	DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
141 	DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
142 	DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
143 	DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH),
144 
145 	DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
146 
147 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
148 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
149 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
150 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
151 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
152 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
153 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
154 	DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
155 
156 	DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
157 	DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
158 	DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
159 	DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
160 	DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
161 
162 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
163 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
164 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
165 
166 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
167 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
168 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
169 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
170 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
171 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
172 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
173 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
174 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
175 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
176 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
177 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
178 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
179 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
180 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
181 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
182 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
183 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
184 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
185 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
186 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
187 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
188 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
189 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
190 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
191 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
192 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
193 };
194 
195 static struct cdevsw drm_cdevsw = {
196 	.d_version =	D_VERSION,
197 	.d_open =	drm_open,
198 	.d_read =	drm_read,
199 	.d_ioctl =	drm_ioctl,
200 	.d_poll =	drm_poll,
201 	.d_mmap =	drm_mmap,
202 	.d_mmap_single = drm_mmap_single,
203 	.d_name =	"drm",
204 	.d_flags =	D_TRACKCLOSE
205 };
206 
207 static int drm_msi = 1;	/* Enable by default. */
208 SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
209 SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1,
210     "Enable MSI interrupts for drm devices");
211 
212 static struct drm_msi_blacklist_entry drm_msi_blacklist[] = {
213 	{0x8086, 0x2772}, /* Intel i945G	*/ \
214 	{0x8086, 0x27A2}, /* Intel i945GM	*/ \
215 	{0x8086, 0x27AE}, /* Intel i945GME	*/ \
216 	{0, 0}
217 };
218 
219 static int drm_msi_is_blacklisted(struct drm_device *dev, unsigned long flags)
220 {
221 	int i = 0;
222 
223 	if (dev->driver->use_msi != NULL) {
224 		int use_msi;
225 
226 		use_msi = dev->driver->use_msi(dev, flags);
227 
228 		return (!use_msi);
229 	}
230 
231 	/* TODO: Maybe move this to a callback in i915? */
232 	for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) {
233 		if ((drm_msi_blacklist[i].vendor == dev->pci_vendor) &&
234 		    (drm_msi_blacklist[i].device == dev->pci_device)) {
235 			return 1;
236 		}
237 	}
238 
239 	return 0;
240 }
241 
242 int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
243 {
244 	drm_pci_id_list_t *id_entry;
245 	int vendor, device;
246 
247 	vendor = pci_get_vendor(kdev);
248 	device = pci_get_device(kdev);
249 
250 	if (pci_get_class(kdev) != PCIC_DISPLAY
251 	    || (pci_get_subclass(kdev) != PCIS_DISPLAY_VGA &&
252 	    pci_get_subclass(kdev) != PCIS_DISPLAY_OTHER))
253 		return ENXIO;
254 
255 	id_entry = drm_find_description(vendor, device, idlist);
256 	if (id_entry != NULL) {
257 		if (!device_get_desc(kdev)) {
258 			DRM_DEBUG("desc : %s\n", device_get_desc(kdev));
259 			device_set_desc(kdev, id_entry->name);
260 		}
261 		return 0;
262 	}
263 
264 	return ENXIO;
265 }
266 
267 int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
268 {
269 	struct drm_device *dev;
270 	drm_pci_id_list_t *id_entry;
271 	int error, msicount;
272 
273 	dev = device_get_softc(kdev);
274 
275 	dev->device = kdev;
276 
277 	dev->pci_domain = pci_get_domain(dev->device);
278 	dev->pci_bus = pci_get_bus(dev->device);
279 	dev->pci_slot = pci_get_slot(dev->device);
280 	dev->pci_func = pci_get_function(dev->device);
281 
282 	dev->pci_vendor = pci_get_vendor(dev->device);
283 	dev->pci_device = pci_get_device(dev->device);
284 	dev->pci_subvendor = pci_get_subvendor(dev->device);
285 	dev->pci_subdevice = pci_get_subdevice(dev->device);
286 
287 	id_entry = drm_find_description(dev->pci_vendor,
288 	    dev->pci_device, idlist);
289 	dev->id_entry = id_entry;
290 
291 	if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) {
292 		if (drm_msi &&
293 		    !drm_msi_is_blacklisted(dev, dev->id_entry->driver_private)) {
294 			msicount = pci_msi_count(dev->device);
295 			DRM_DEBUG("MSI count = %d\n", msicount);
296 			if (msicount > 1)
297 				msicount = 1;
298 
299 			if (pci_alloc_msi(dev->device, &msicount) == 0) {
300 				DRM_INFO("MSI enabled %d message(s)\n",
301 				    msicount);
302 				dev->msi_enabled = 1;
303 				dev->irqrid = 1;
304 			}
305 		}
306 
307 		dev->irqr = bus_alloc_resource_any(dev->device, SYS_RES_IRQ,
308 		    &dev->irqrid, RF_SHAREABLE);
309 		if (!dev->irqr) {
310 			return (ENOENT);
311 		}
312 
313 		dev->irq = (int) rman_get_start(dev->irqr);
314 	}
315 
316 	mtx_init(&dev->dev_lock, "drmdev", NULL, MTX_DEF);
317 	mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF);
318 	mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF);
319 	mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF);
320 	mtx_init(&dev->event_lock, "drmev", NULL, MTX_DEF);
321 	sx_init(&dev->dev_struct_lock, "drmslk");
322 
323 	error = drm_load(dev);
324 	if (error)
325 		goto error;
326 
327 	error = drm_create_cdevs(kdev);
328 	if (error)
329 		goto error;
330 
331 	return (error);
332 error:
333 	if (dev->irqr) {
334 		bus_release_resource(dev->device, SYS_RES_IRQ,
335 		    dev->irqrid, dev->irqr);
336 	}
337 	if (dev->msi_enabled) {
338 		pci_release_msi(dev->device);
339 	}
340 	return (error);
341 }
342 
343 int
344 drm_create_cdevs(device_t kdev)
345 {
346 	struct drm_device *dev;
347 	int error, unit;
348 
349 	unit = device_get_unit(kdev);
350 	dev = device_get_softc(kdev);
351 
352 	error = make_dev_p(MAKEDEV_WAITOK | MAKEDEV_CHECKNAME, &dev->devnode,
353 	    &drm_cdevsw, 0, DRM_DEV_UID, DRM_DEV_GID,
354 	    DRM_DEV_MODE, "dri/card%d", unit);
355 	if (error == 0)
356 		dev->devnode->si_drv1 = dev;
357 	return (error);
358 }
359 
360 int drm_detach(device_t kdev)
361 {
362 	struct drm_device *dev;
363 
364 	dev = device_get_softc(kdev);
365 	drm_unload(dev);
366 	if (dev->irqr) {
367 		bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid,
368 		    dev->irqr);
369 		if (dev->msi_enabled) {
370 			pci_release_msi(dev->device);
371 			DRM_INFO("MSI released\n");
372 		}
373 	}
374 	return (0);
375 }
376 
377 #ifndef DRM_DEV_NAME
378 #define DRM_DEV_NAME "drm"
379 #endif
380 
381 devclass_t drm_devclass;
382 
383 drm_pci_id_list_t *drm_find_description(int vendor, int device,
384     drm_pci_id_list_t *idlist)
385 {
386 	int i = 0;
387 
388 	for (i = 0; idlist[i].vendor != 0; i++) {
389 		if ((idlist[i].vendor == vendor) &&
390 		    ((idlist[i].device == device) ||
391 		    (idlist[i].device == 0))) {
392 			return &idlist[i];
393 		}
394 	}
395 	return NULL;
396 }
397 
398 static int drm_firstopen(struct drm_device *dev)
399 {
400 	drm_local_map_t *map;
401 	int i;
402 
403 	DRM_LOCK_ASSERT(dev);
404 
405 	/* prebuild the SAREA */
406 	i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
407 	    _DRM_CONTAINS_LOCK, &map);
408 	if (i != 0)
409 		return i;
410 
411 	if (dev->driver->firstopen)
412 		dev->driver->firstopen(dev);
413 
414 	dev->buf_use = 0;
415 
416 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
417 		i = drm_dma_setup(dev);
418 		if (i != 0)
419 			return i;
420 	}
421 
422 	for (i = 0; i < DRM_HASH_SIZE; i++) {
423 		dev->magiclist[i].head = NULL;
424 		dev->magiclist[i].tail = NULL;
425 	}
426 
427 	dev->lock.lock_queue = 0;
428 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
429 		dev->irq_enabled = 0;
430 	dev->context_flag = 0;
431 	dev->last_context = 0;
432 	dev->if_version = 0;
433 
434 	dev->buf_sigio = NULL;
435 
436 	DRM_DEBUG("\n");
437 
438 	return 0;
439 }
440 
441 static int drm_lastclose(struct drm_device *dev)
442 {
443 	drm_magic_entry_t *pt, *next;
444 	drm_local_map_t *map, *mapsave;
445 	int i;
446 
447 	DRM_LOCK_ASSERT(dev);
448 
449 	DRM_DEBUG("\n");
450 
451 	if (dev->driver->lastclose != NULL)
452 		dev->driver->lastclose(dev);
453 
454 	if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled)
455 		drm_irq_uninstall(dev);
456 
457 	if (dev->unique) {
458 		free(dev->unique, DRM_MEM_DRIVER);
459 		dev->unique = NULL;
460 		dev->unique_len = 0;
461 	}
462 	/* Clear pid list */
463 	for (i = 0; i < DRM_HASH_SIZE; i++) {
464 		for (pt = dev->magiclist[i].head; pt; pt = next) {
465 			next = pt->next;
466 			free(pt, DRM_MEM_MAGIC);
467 		}
468 		dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
469 	}
470 
471 	DRM_UNLOCK(dev);
472 	drm_drawable_free_all(dev);
473 	DRM_LOCK(dev);
474 
475 	/* Clear AGP information */
476 	if (dev->agp) {
477 		drm_agp_mem_t *entry;
478 		drm_agp_mem_t *nexte;
479 
480 		/* Remove AGP resources, but leave dev->agp intact until
481 		 * drm_unload is called.
482 		 */
483 		for (entry = dev->agp->memory; entry; entry = nexte) {
484 			nexte = entry->next;
485 			if (entry->bound)
486 				drm_agp_unbind_memory(entry->handle);
487 			drm_agp_free_memory(entry->handle);
488 			free(entry, DRM_MEM_AGPLISTS);
489 		}
490 		dev->agp->memory = NULL;
491 
492 		if (dev->agp->acquired)
493 			drm_agp_release(dev);
494 
495 		dev->agp->acquired = 0;
496 		dev->agp->enabled  = 0;
497 	}
498 	if (dev->sg != NULL) {
499 		drm_sg_cleanup(dev->sg);
500 		dev->sg = NULL;
501 	}
502 
503 	TAILQ_FOREACH_SAFE(map, &dev->maplist, link, mapsave) {
504 		if (!(map->flags & _DRM_DRIVER))
505 			drm_rmmap(dev, map);
506 	}
507 
508 	drm_dma_takedown(dev);
509 	if (dev->lock.hw_lock) {
510 		dev->lock.hw_lock = NULL; /* SHM removed */
511 		dev->lock.file_priv = NULL;
512 		DRM_WAKEUP_INT((void *)&dev->lock.lock_queue);
513 	}
514 
515 	return 0;
516 }
517 
518 static int drm_load(struct drm_device *dev)
519 {
520 	int i, retcode;
521 
522 	DRM_DEBUG("\n");
523 
524 	TAILQ_INIT(&dev->maplist);
525 	dev->map_unrhdr = new_unrhdr(1, ((1 << DRM_MAP_HANDLE_BITS) - 1), NULL);
526 	if (dev->map_unrhdr == NULL) {
527 		DRM_ERROR("Couldn't allocate map number allocator\n");
528 		return EINVAL;
529 	}
530 
531 
532 	drm_mem_init();
533 	drm_sysctl_init(dev);
534 	TAILQ_INIT(&dev->files);
535 
536 	dev->counters  = 6;
537 	dev->types[0]  = _DRM_STAT_LOCK;
538 	dev->types[1]  = _DRM_STAT_OPENS;
539 	dev->types[2]  = _DRM_STAT_CLOSES;
540 	dev->types[3]  = _DRM_STAT_IOCTLS;
541 	dev->types[4]  = _DRM_STAT_LOCKS;
542 	dev->types[5]  = _DRM_STAT_UNLOCKS;
543 
544 	for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
545 		atomic_set(&dev->counts[i], 0);
546 
547 	INIT_LIST_HEAD(&dev->vblank_event_list);
548 
549 	if (drm_core_has_AGP(dev)) {
550 		if (drm_device_is_agp(dev))
551 			dev->agp = drm_agp_init();
552 		if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) &&
553 		    dev->agp == NULL) {
554 			DRM_ERROR("Card isn't AGP, or couldn't initialize "
555 			    "AGP.\n");
556 			retcode = ENOMEM;
557 			goto error;
558 		}
559 		if (dev->agp != NULL && dev->agp->info.ai_aperture_base != 0) {
560 			if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
561 			    dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
562 				dev->agp->mtrr = 1;
563 		}
564 	}
565 
566 	retcode = drm_ctxbitmap_init(dev);
567 	if (retcode != 0) {
568 		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
569 		goto error;
570 	}
571 
572 	dev->drw_unrhdr = new_unrhdr(1, INT_MAX, NULL);
573 	if (dev->drw_unrhdr == NULL) {
574 		DRM_ERROR("Couldn't allocate drawable number allocator\n");
575 		retcode = ENOMEM;
576 		goto error;
577 	}
578 
579 	if (dev->driver->driver_features & DRIVER_GEM) {
580 		retcode = drm_gem_init(dev);
581 		if (retcode != 0) {
582 			DRM_ERROR("Cannot initialize graphics execution "
583 				  "manager (GEM)\n");
584 			goto error1;
585 		}
586 	}
587 
588 	if (dev->driver->load != NULL) {
589 		DRM_LOCK(dev);
590 		/* Shared code returns -errno. */
591 		retcode = -dev->driver->load(dev,
592 		    dev->id_entry->driver_private);
593 		if (pci_enable_busmaster(dev->device))
594 			DRM_ERROR("Request to enable bus-master failed.\n");
595 		DRM_UNLOCK(dev);
596 		if (retcode != 0)
597 			goto error1;
598 	}
599 
600 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
601 	    dev->driver->name,
602 	    dev->driver->major,
603 	    dev->driver->minor,
604 	    dev->driver->patchlevel,
605 	    dev->driver->date);
606 
607 	return 0;
608 
609 error1:
610 	delete_unrhdr(dev->drw_unrhdr);
611 	drm_gem_destroy(dev);
612 error:
613 	drm_ctxbitmap_cleanup(dev);
614 	drm_sysctl_cleanup(dev);
615 	DRM_LOCK(dev);
616 	drm_lastclose(dev);
617 	DRM_UNLOCK(dev);
618 	if (dev->devnode != NULL)
619 		destroy_dev(dev->devnode);
620 
621 	mtx_destroy(&dev->drw_lock);
622 	mtx_destroy(&dev->vbl_lock);
623 	mtx_destroy(&dev->irq_lock);
624 	mtx_destroy(&dev->dev_lock);
625 	mtx_destroy(&dev->event_lock);
626 	sx_destroy(&dev->dev_struct_lock);
627 
628 	return retcode;
629 }
630 
631 static void drm_unload(struct drm_device *dev)
632 {
633 	int i;
634 
635 	DRM_DEBUG("\n");
636 
637 	drm_sysctl_cleanup(dev);
638 	if (dev->devnode != NULL)
639 		destroy_dev(dev->devnode);
640 
641 	drm_ctxbitmap_cleanup(dev);
642 
643 	if (dev->driver->driver_features & DRIVER_GEM)
644 		drm_gem_destroy(dev);
645 
646 	if (dev->agp && dev->agp->mtrr) {
647 		int __unused retcode;
648 
649 		retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
650 		    dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
651 		DRM_DEBUG("mtrr_del = %d", retcode);
652 	}
653 
654 	drm_vblank_cleanup(dev);
655 
656 	DRM_LOCK(dev);
657 	drm_lastclose(dev);
658 	DRM_UNLOCK(dev);
659 
660 	/* Clean up PCI resources allocated by drm_bufs.c.  We're not really
661 	 * worried about resource consumption while the DRM is inactive (between
662 	 * lastclose and firstopen or unload) because these aren't actually
663 	 * taking up KVA, just keeping the PCI resource allocated.
664 	 */
665 	for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) {
666 		if (dev->pcir[i] == NULL)
667 			continue;
668 		bus_release_resource(dev->device, SYS_RES_MEMORY,
669 		    dev->pcirid[i], dev->pcir[i]);
670 		dev->pcir[i] = NULL;
671 	}
672 
673 	if (dev->agp) {
674 		free(dev->agp, DRM_MEM_AGPLISTS);
675 		dev->agp = NULL;
676 	}
677 
678 	if (dev->driver->unload != NULL) {
679 		DRM_LOCK(dev);
680 		dev->driver->unload(dev);
681 		DRM_UNLOCK(dev);
682 	}
683 
684 	delete_unrhdr(dev->drw_unrhdr);
685 	delete_unrhdr(dev->map_unrhdr);
686 
687 	drm_mem_uninit();
688 
689 	if (pci_disable_busmaster(dev->device))
690 		DRM_ERROR("Request to disable bus-master failed.\n");
691 
692 	mtx_destroy(&dev->drw_lock);
693 	mtx_destroy(&dev->vbl_lock);
694 	mtx_destroy(&dev->irq_lock);
695 	mtx_destroy(&dev->dev_lock);
696 	mtx_destroy(&dev->event_lock);
697 	sx_destroy(&dev->dev_struct_lock);
698 }
699 
700 int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
701 {
702 	struct drm_version *version = data;
703 	int len;
704 
705 #define DRM_COPY( name, value )						\
706 	len = strlen( value );						\
707 	if ( len > name##_len ) len = name##_len;			\
708 	name##_len = strlen( value );					\
709 	if ( len && name ) {						\
710 		if ( DRM_COPY_TO_USER( name, value, len ) )		\
711 			return EFAULT;				\
712 	}
713 
714 	version->version_major		= dev->driver->major;
715 	version->version_minor		= dev->driver->minor;
716 	version->version_patchlevel	= dev->driver->patchlevel;
717 
718 	DRM_COPY(version->name, dev->driver->name);
719 	DRM_COPY(version->date, dev->driver->date);
720 	DRM_COPY(version->desc, dev->driver->desc);
721 
722 	return 0;
723 }
724 
725 int
726 drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
727 {
728 	struct drm_device *dev;
729 	int retcode;
730 
731 	dev = kdev->si_drv1;
732 	if (dev == NULL)
733 		return (ENXIO);
734 
735 	DRM_DEBUG("open_count = %d\n", dev->open_count);
736 
737 	retcode = drm_open_helper(kdev, flags, fmt, p, dev);
738 
739 	if (retcode == 0) {
740 		atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
741 		DRM_LOCK(dev);
742 		mtx_lock(&Giant);
743 		device_busy(dev->device);
744 		mtx_unlock(&Giant);
745 		if (!dev->open_count++)
746 			retcode = drm_firstopen(dev);
747 		DRM_UNLOCK(dev);
748 	}
749 
750 	return (retcode);
751 }
752 
753 void drm_close(void *data)
754 {
755 	struct drm_file *file_priv = data;
756 	struct drm_device *dev = file_priv->dev;
757 	int retcode = 0;
758 
759 	DRM_DEBUG("open_count = %d\n", dev->open_count);
760 
761 	DRM_LOCK(dev);
762 
763 	if (dev->driver->preclose != NULL)
764 		dev->driver->preclose(dev, file_priv);
765 
766 	/* ========================================================
767 	 * Begin inline drm_release
768 	 */
769 
770 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
771 	    DRM_CURRENTPID, (long)dev->device, dev->open_count);
772 
773 	if (dev->driver->driver_features & DRIVER_GEM)
774 		drm_gem_release(dev, file_priv);
775 
776 	if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
777 	    && dev->lock.file_priv == file_priv) {
778 		DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
779 			  DRM_CURRENTPID,
780 			  _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
781 		if (dev->driver->reclaim_buffers_locked != NULL)
782 			dev->driver->reclaim_buffers_locked(dev, file_priv);
783 
784 		drm_lock_free(&dev->lock,
785 		    _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
786 
787 				/* FIXME: may require heavy-handed reset of
788                                    hardware at this point, possibly
789                                    processed via a callback to the X
790                                    server. */
791 	} else if (dev->driver->reclaim_buffers_locked != NULL &&
792 	    dev->lock.hw_lock != NULL) {
793 		/* The lock is required to reclaim buffers */
794 		for (;;) {
795 			if (!dev->lock.hw_lock) {
796 				/* Device has been unregistered */
797 				retcode = EINTR;
798 				break;
799 			}
800 			if (drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) {
801 				dev->lock.file_priv = file_priv;
802 				dev->lock.lock_time = jiffies;
803 				atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
804 				break;	/* Got lock */
805 			}
806 			/* Contention */
807 			retcode = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue,
808 			    PCATCH, "drmlk2", 0);
809 			if (retcode)
810 				break;
811 		}
812 		if (retcode == 0) {
813 			dev->driver->reclaim_buffers_locked(dev, file_priv);
814 			drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
815 		}
816 	}
817 
818 	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
819 	    !dev->driver->reclaim_buffers_locked)
820 		drm_reclaim_buffers(dev, file_priv);
821 
822 	funsetown(&dev->buf_sigio);
823 	seldrain(&file_priv->event_poll);
824 
825 	if (dev->driver->postclose != NULL)
826 		dev->driver->postclose(dev, file_priv);
827 	TAILQ_REMOVE(&dev->files, file_priv, link);
828 	free(file_priv, DRM_MEM_FILES);
829 
830 	/* ========================================================
831 	 * End inline drm_release
832 	 */
833 
834 	atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
835 	mtx_lock(&Giant);
836 	device_unbusy(dev->device);
837 	mtx_unlock(&Giant);
838 	if (--dev->open_count == 0) {
839 		retcode = drm_lastclose(dev);
840 	}
841 
842 	DRM_UNLOCK(dev);
843 }
844 
845 extern drm_ioctl_desc_t drm_compat_ioctls[];
846 
847 /* drm_ioctl is called whenever a process performs an ioctl on /dev/drm.
848  */
849 int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
850     DRM_STRUCTPROC *p)
851 {
852 	struct drm_device *dev = drm_get_device_from_kdev(kdev);
853 	int retcode = 0;
854 	drm_ioctl_desc_t *ioctl;
855 	int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv);
856 	int nr = DRM_IOCTL_NR(cmd);
857 	int is_driver_ioctl = 0;
858 	struct drm_file *file_priv;
859 
860 	retcode = devfs_get_cdevpriv((void **)&file_priv);
861 	if (retcode != 0) {
862 		DRM_ERROR("can't find authenticator\n");
863 		return EINVAL;
864 	}
865 
866 	atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
867 	++file_priv->ioctl_count;
868 
869 	DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
870 	    DRM_CURRENTPID, cmd, nr, (long)dev->device,
871 	    file_priv->authenticated);
872 
873 	switch (cmd) {
874 	case FIONBIO:
875 	case FIOASYNC:
876 		return 0;
877 
878 	case FIOSETOWN:
879 		return fsetown(*(int *)data, &dev->buf_sigio);
880 
881 	case FIOGETOWN:
882 		*(int *) data = fgetown(&dev->buf_sigio);
883 		return 0;
884 	}
885 
886 	if (IOCGROUP(cmd) != DRM_IOCTL_BASE) {
887 		DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd));
888 		return EINVAL;
889 	}
890 
891 #ifdef COMPAT_FREEBSD32
892 	/*
893 	 * Called whenever a 32-bit process running under a 64-bit
894 	 * kernel performs an ioctl on /dev/drm.
895 	 */
896 	if (SV_CURPROC_FLAG(SV_ILP32) && drm_compat_ioctls[nr].func != NULL)
897 		/*
898 		 * Assume that ioctls without an explicit compat
899 		 * routine will just work.  This may not always be a
900 		 * good assumption, but it's better than always
901 		 * failing.
902 		 */
903 		ioctl = &drm_compat_ioctls[nr];
904 	else
905 #endif
906 		ioctl = &drm_ioctls[nr];
907 	/* It's not a core DRM ioctl, try driver-specific. */
908 	if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) {
909 		/* The array entries begin at DRM_COMMAND_BASE ioctl nr */
910 		nr -= DRM_COMMAND_BASE;
911 		if (nr >= dev->driver->max_ioctl) {
912 			DRM_DEBUG("Bad driver ioctl number, 0x%x (of 0x%x)\n",
913 			    nr, dev->driver->max_ioctl);
914 			return EINVAL;
915 		}
916 #ifdef COMPAT_FREEBSD32
917 		if (SV_CURPROC_FLAG(SV_ILP32) &&
918 		    nr < *dev->driver->compat_ioctls_nr &&
919 		    dev->driver->compat_ioctls[nr].func != NULL)
920 			ioctl = &dev->driver->compat_ioctls[nr];
921 		else
922 #endif
923 			ioctl = &dev->driver->ioctls[nr];
924 		is_driver_ioctl = 1;
925 	}
926 	func = ioctl->func;
927 
928 	if (func == NULL) {
929 		DRM_DEBUG("no function\n");
930 		return EINVAL;
931 	}
932 
933 	if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) ||
934 	    ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
935 	    ((ioctl->flags & DRM_MASTER) && !file_priv->master))
936 		return EACCES;
937 
938 	if (is_driver_ioctl) {
939 		if ((ioctl->flags & DRM_UNLOCKED) == 0)
940 			DRM_LOCK(dev);
941 		/* shared code returns -errno */
942 		retcode = -func(dev, data, file_priv);
943 		if ((ioctl->flags & DRM_UNLOCKED) == 0)
944 			DRM_UNLOCK(dev);
945 	} else {
946 		retcode = func(dev, data, file_priv);
947 	}
948 
949 	if (retcode != 0)
950 		DRM_DEBUG("    returning %d\n", retcode);
951 	if (retcode != 0 &&
952 	    (drm_debug_flag & DRM_DEBUGBITS_FAILED_IOCTL) != 0) {
953 		printf(
954 "pid %d, cmd 0x%02lx, nr 0x%02x/%1d, dev 0x%lx, auth %d, res %d\n",
955 		    DRM_CURRENTPID, cmd, nr, is_driver_ioctl, (long)dev->device,
956 		    file_priv->authenticated, retcode);
957 	}
958 
959 	return retcode;
960 }
961 
962 drm_local_map_t *drm_getsarea(struct drm_device *dev)
963 {
964 	drm_local_map_t *map;
965 
966 	DRM_LOCK_ASSERT(dev);
967 	TAILQ_FOREACH(map, &dev->maplist, link) {
968 		if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK))
969 			return map;
970 	}
971 
972 	return NULL;
973 }
974 
975 int
976 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
977     struct sysctl_oid *top)
978 {
979 	struct sysctl_oid *oid;
980 
981 	snprintf(dev->busid_str, sizeof(dev->busid_str),
982 	     "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
983 	     dev->pci_slot, dev->pci_func);
984 	oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
985 	    CTLFLAG_RD, dev->busid_str, 0, NULL);
986 	if (oid == NULL)
987 		return (ENOMEM);
988 	dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
989 	oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
990 	    "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
991 	if (oid == NULL)
992 		return (ENOMEM);
993 
994 	return (0);
995 }
996 
997 static int
998 drm_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
999     struct vm_object **obj_res, int nprot)
1000 {
1001 	struct drm_device *dev;
1002 
1003 	dev = drm_get_device_from_kdev(kdev);
1004 	if (dev->drm_ttm_bdev != NULL) {
1005 		return (ttm_bo_mmap_single(dev->drm_ttm_bdev, offset, size,
1006 		    obj_res, nprot));
1007 	} else if ((dev->driver->driver_features & DRIVER_GEM) != 0) {
1008 		return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot));
1009 	} else {
1010 		return (ENODEV);
1011 	}
1012 }
1013 
1014 #if DRM_LINUX
1015 
1016 #include <sys/sysproto.h>
1017 
1018 MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
1019 
1020 #define LINUX_IOCTL_DRM_MIN		0x6400
1021 #define LINUX_IOCTL_DRM_MAX		0x64ff
1022 
1023 static linux_ioctl_function_t drm_linux_ioctl;
1024 static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
1025     LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
1026 
1027 /* The bits for in/out are switched on Linux */
1028 #define LINUX_IOC_IN	IOC_OUT
1029 #define LINUX_IOC_OUT	IOC_IN
1030 
1031 static int
1032 drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
1033 {
1034 	int error;
1035 	int cmd = args->cmd;
1036 
1037 	args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
1038 	if (cmd & LINUX_IOC_IN)
1039 		args->cmd |= IOC_IN;
1040 	if (cmd & LINUX_IOC_OUT)
1041 		args->cmd |= IOC_OUT;
1042 
1043 	error = ioctl(p, (struct ioctl_args *)args);
1044 
1045 	return error;
1046 }
1047 #endif /* DRM_LINUX */
1048 
1049 
1050 static int
1051 drm_core_init(void *arg)
1052 {
1053 
1054 	drm_global_init();
1055 
1056 #if DRM_LINUX
1057 	linux_ioctl_register_handler(&drm_handler);
1058 #endif /* DRM_LINUX */
1059 
1060 	DRM_INFO("Initialized %s %d.%d.%d %s\n",
1061 		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
1062 	return 0;
1063 }
1064 
1065 static void
1066 drm_core_exit(void *arg)
1067 {
1068 
1069 #if DRM_LINUX
1070 	linux_ioctl_unregister_handler(&drm_handler);
1071 #endif /* DRM_LINUX */
1072 
1073 	drm_global_release();
1074 }
1075 
1076 SYSINIT(drm_register, SI_SUB_KLD, SI_ORDER_MIDDLE,
1077     drm_core_init, NULL);
1078 SYSUNINIT(drm_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE,
1079     drm_core_exit, NULL);
1080 
1081 static bool
1082 dmi_found(const struct dmi_system_id *dsi)
1083 {
1084 	char *hw_vendor, *hw_prod;
1085 	int i, slot;
1086 	bool res;
1087 
1088 	hw_vendor = kern_getenv("smbios.planar.maker");
1089 	hw_prod = kern_getenv("smbios.planar.product");
1090 	res = true;
1091 	for (i = 0; i < nitems(dsi->matches); i++) {
1092 		slot = dsi->matches[i].slot;
1093 		switch (slot) {
1094 		case DMI_NONE:
1095 			break;
1096 		case DMI_SYS_VENDOR:
1097 		case DMI_BOARD_VENDOR:
1098 			if (hw_vendor != NULL &&
1099 			    !strcmp(hw_vendor, dsi->matches[i].substr)) {
1100 				break;
1101 			} else {
1102 				res = false;
1103 				goto out;
1104 			}
1105 		case DMI_PRODUCT_NAME:
1106 		case DMI_BOARD_NAME:
1107 			if (hw_prod != NULL &&
1108 			    !strcmp(hw_prod, dsi->matches[i].substr)) {
1109 				break;
1110 			} else {
1111 				res = false;
1112 				goto out;
1113 			}
1114 		default:
1115 			res = false;
1116 			goto out;
1117 		}
1118 	}
1119 out:
1120 	freeenv(hw_vendor);
1121 	freeenv(hw_prod);
1122 
1123 	return (res);
1124 }
1125 
1126 bool
1127 dmi_check_system(const struct dmi_system_id *sysid)
1128 {
1129 	const struct dmi_system_id *dsi;
1130 	bool res;
1131 
1132 	for (res = false, dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
1133 		if (dmi_found(dsi)) {
1134 			res = true;
1135 			if (dsi->callback != NULL && dsi->callback(dsi))
1136 				break;
1137 		}
1138 	}
1139 	return (res);
1140 }
1141