xref: /titanic_44/usr/src/uts/common/io/drm/drm_sunmod.c (revision 2fd415f4d49063706ad7fbc8b867ebe4a580b7fd)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Common misc module interfaces of DRM under Solaris
29  */
30 
31 /*
32  * This module calls into gfx and agpmaster misc modules respectively
33  * for generic graphics operations and AGP master device support.
34  */
35 
36 #include "drm_sunmod.h"
37 #include <sys/modctl.h>
38 #include <sys/kmem.h>
39 #include <vm/seg_kmem.h>
40 
41 static struct modlmisc modlmisc = {
42 	&mod_miscops, "DRM common interfaces"
43 };
44 
45 static struct modlinkage modlinkage = {
46 	MODREV_1, (void *)&modlmisc, NULL
47 };
48 
49 static drm_inst_list_t	*drm_inst_head;
50 static kmutex_t	drm_inst_list_lock;
51 
52 static int drm_sun_open(dev_t *, int, int, cred_t *);
53 static int drm_sun_close(dev_t, int, int, cred_t *);
54 static int drm_sun_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
55 static int drm_sun_devmap(dev_t, devmap_cookie_t, offset_t, size_t,
56     size_t *, uint_t);
57 
58 /*
59  * devmap callbacks for AGP and PCI GART
60  */
61 static int drm_devmap_map(devmap_cookie_t, dev_t,
62     uint_t, offset_t, size_t, void **);
63 static int drm_devmap_dup(devmap_cookie_t, void *,
64     devmap_cookie_t, void **);
65 static void drm_devmap_unmap(devmap_cookie_t, void *,
66     offset_t, size_t, devmap_cookie_t, void **, devmap_cookie_t, void **);
67 
68 static drm_inst_list_t *drm_supp_alloc_drv_entry(dev_info_t *);
69 static drm_inst_state_t *drm_sup_devt_to_state(dev_t);
70 static void drm_supp_free_drv_entry(dev_info_t *);
71 
72 static struct devmap_callback_ctl drm_devmap_callbacks = {
73 		DEVMAP_OPS_REV, 		/* devmap_rev */
74 		drm_devmap_map,				/* devmap_map */
75 		NULL,			/* devmap_access */
76 		drm_devmap_dup,			/* devmap_dup */
77 		drm_devmap_unmap 		/* devmap_unmap */
78 };
79 
80 /*
81  * Common device operations structure for all DRM drivers
82  */
83 struct cb_ops drm_cb_ops = {
84 	drm_sun_open,			/* cb_open */
85 	drm_sun_close,			/* cb_close */
86 	nodev,					/* cb_strategy */
87 	nodev,					/* cb_print */
88 	nodev,					/* cb_dump */
89 	nodev,					/* cb_read */
90 	nodev,					/* cb_write */
91 	drm_sun_ioctl,		/* cb_ioctl */
92 	drm_sun_devmap,		/* cb_devmap */
93 	nodev,					/* cb_mmap */
94 	NULL,			/* cb_segmap */
95 	nochpoll,				/* cb_chpoll */
96 	ddi_prop_op,		/* cb_prop_op */
97 	0,					/* cb_stream */
98 	D_NEW | D_MTSAFE |D_DEVMAP	/* cb_flag */
99 };
100 
101 int
102 _init(void)
103 {
104 	int	error;
105 
106 	if ((error = mod_install(&modlinkage)) != 0) {
107 		return (error);
108 	}
109 
110 	/* initialize the instance list lock */
111 	mutex_init(&drm_inst_list_lock, NULL, MUTEX_DRIVER, NULL);
112 	return (0);
113 }
114 
115 int
116 _fini(void)
117 {
118 	int	err;
119 
120 	if ((err = mod_remove(&modlinkage)) != 0)
121 		return (err);
122 
123 	mutex_destroy(&drm_inst_list_lock);
124 	return (0);
125 }
126 
127 int
128 _info(struct modinfo *modinfop)
129 {
130 	return (mod_info(&modlinkage, modinfop));
131 }
132 
133 void *
134 drm_supp_register(dev_info_t *dip, drm_device_t *dp)
135 {
136 	int		error;
137 	char	buf[80];
138 	int		instance = ddi_get_instance(dip);
139 	ddi_acc_handle_t	pci_cfg_handle;
140 	agp_master_softc_t	*agpm;
141 	drm_inst_state_t	*mstate;
142 	drm_inst_list_t		*entry;
143 	gfxp_vgatext_softc_ptr_t gfxp;
144 	struct dev_ops	*devop;
145 
146 	ASSERT(dip != NULL);
147 
148 	entry = drm_supp_alloc_drv_entry(dip);
149 	if (entry == NULL) {
150 		cmn_err(CE_WARN, "drm_supp_register: failed to get softstate");
151 		return (NULL);
152 	}
153 	mstate = &entry->disl_state;
154 
155 	/*
156 	 * DRM drivers are required to use common cb_ops
157 	 */
158 	devop = ddi_get_driver(dip);
159 	if (devop->devo_cb_ops != &drm_cb_ops) {
160 		devop->devo_cb_ops = &drm_cb_ops;
161 	}
162 
163 	/* Generic graphics initialization */
164 	gfxp = gfxp_vgatext_softc_alloc();
165 	error = gfxp_vgatext_attach(dip, DDI_ATTACH, gfxp);
166 	if (error != DDI_SUCCESS) {
167 		DRM_ERROR("drm_supp_regiter: failed to init gfx");
168 		goto exit1;
169 	}
170 
171 	/* create a minor node for common graphics ops */
172 	(void) sprintf(buf, "%s%d", GFX_NAME, instance);
173 	error = ddi_create_minor_node(dip, buf, S_IFCHR,
174 	    INST2NODE0(instance), DDI_NT_DISPLAY, NULL);
175 	if (error != DDI_SUCCESS) {
176 		DRM_ERROR("drm_supp_regiter: "
177 		    "failed to create minor node for gfx");
178 		goto exit2;
179 	}
180 
181 	/* setup mapping for later PCI config space access */
182 	error = pci_config_setup(dip, &pci_cfg_handle);
183 	if (error != DDI_SUCCESS) {
184 		DRM_ERROR("drm_supp_regiter: "
185 		    "PCI configuration space setup failed");
186 		goto exit2;
187 	}
188 
189 	/* AGP master attach */
190 	agpm = NULL;
191 	if (dp->driver->use_agp) {
192 		DRM_DEBUG("drm_supp_regiter: driver use AGP\n");
193 		error = agpmaster_attach(dip, &agpm,
194 		    pci_cfg_handle, INST2NODE1(instance));
195 		if ((error != DDI_SUCCESS) && (dp->driver->require_agp)) {
196 			DRM_ERROR("drm_supp_regiter: "
197 			    "AGP master support not available");
198 			goto exit3;
199 		}
200 	}
201 
202 	mutex_enter(&mstate->mis_lock);
203 	mstate->mis_major = ddi_driver_major(dip);
204 	mstate->mis_dip = dip;
205 	mstate->mis_gfxp = gfxp;
206 	mstate->mis_agpm = agpm;
207 	mstate->mis_cfg_hdl = pci_cfg_handle;
208 	mstate->mis_devp = dp;
209 	mutex_exit(&mstate->mis_lock);
210 
211 	/* create minor node for DRM access */
212 	(void) sprintf(buf, "%s%d", DRM_DEVNODE, instance);
213 	if (ddi_create_minor_node(dip, buf, S_IFCHR,
214 	    INST2NODE2(instance), DDI_NT_DISPLAY_DRM, 0)) {
215 		DRM_ERROR("supp_regiter: faled to create minor node for drm");
216 		goto exit4;
217 	}
218 
219 	return ((void *)mstate);
220 
221 exit4:
222 	if ((dp->driver->use_agp) && agpm)
223 		agpmaster_detach(&agpm);
224 exit3:
225 	pci_config_teardown(&pci_cfg_handle);
226 exit2:
227 	gfxp_vgatext_detach(dip, DDI_DETACH, gfxp);
228 exit1:
229 	gfxp_vgatext_softc_free(gfxp);
230 	drm_supp_free_drv_entry(dip);
231 	ddi_remove_minor_node(dip, NULL);
232 
233 	return (NULL);
234 }
235 
236 
237 int
238 drm_supp_unregister(void *handle)
239 {
240 	drm_inst_list_t		*list;
241 	drm_inst_state_t	*mstate;
242 
243 	list = (drm_inst_list_t *)handle;
244 	mstate = &list->disl_state;
245 	mutex_enter(&mstate->mis_lock);
246 
247 	/* AGP master detach */
248 	if (mstate->mis_agpm != NULL)
249 		agpmaster_detach(&mstate->mis_agpm);
250 
251 	/* free PCI config access handle */
252 	if (mstate->mis_cfg_hdl)
253 		pci_config_teardown(&mstate->mis_cfg_hdl);
254 
255 	/* graphics misc module detach */
256 	if (mstate->mis_gfxp) {
257 		(void) gfxp_vgatext_detach(mstate->mis_dip, DDI_DETACH,
258 		    mstate->mis_gfxp);
259 		gfxp_vgatext_softc_free(mstate->mis_gfxp);
260 	}
261 
262 	mstate->mis_devp = NULL;
263 
264 	/* remove all minor nodes */
265 	ddi_remove_minor_node(mstate->mis_dip, NULL);
266 	mutex_exit(&mstate->mis_lock);
267 	drm_supp_free_drv_entry(mstate->mis_dip);
268 
269 	return (DDI_SUCCESS);
270 }
271 
272 
273 /*ARGSUSED*/
274 static int
275 drm_sun_open(dev_t *devp, int flag, int otyp, cred_t *credp)
276 {
277 	drm_inst_state_t	*mstate;
278 	drm_cminor_t	*mp, *newp;
279 	drm_device_t	*dp;
280 	minor_t		minor;
281 	int		newminor;
282 	int		instance;
283 	int		err;
284 
285 	mstate = drm_sup_devt_to_state(*devp);
286 	/*
287 	 * return ENXIO for deferred attach so that system can
288 	 * attach us again.
289 	 */
290 	if (mstate == NULL)
291 		return (ENXIO);
292 
293 	/*
294 	 * The lest significant 15 bits are used for minor_number, and
295 	 * the mid 3 bits are used for instance number. All minor numbers
296 	 * are used as follows:
297 	 * 0 -- gfx
298 	 * 1 -- agpmaster
299 	 * 2 -- drm
300 	 * (3, MAX_CLONE_MINOR) -- drm minor node for clone open.
301 	 */
302 	minor = DEV2MINOR(*devp);
303 	instance = DEV2INST(*devp);
304 	ASSERT(minor <= MAX_CLONE_MINOR);
305 
306 	/*
307 	 * No operations for VGA & AGP mater devices, always return OK.
308 	 */
309 	if ((minor == GFX_MINOR) || (minor == AGPMASTER_MINOR))
310 		return (0);
311 
312 	/*
313 	 * From here, we start to process drm
314 	 */
315 
316 	dp = mstate->mis_devp;
317 	if (!dp)
318 		return (ENXIO);
319 
320 	/*
321 	 * Drm driver implements a software lock to serialize access
322 	 * to graphics hardware based on per-process granulation. Before
323 	 * operating graphics hardware, all clients, including kernel
324 	 * and applications, must acquire this lock via DRM_IOCTL_LOCK
325 	 * ioctl, and release it via DRM_IOCTL_UNLOCK after finishing
326 	 * operations. Drm driver will grant r/w permission to the
327 	 * process which acquires this lock (Kernel is assumed to have
328 	 * process ID 0).
329 	 *
330 	 * A process might be terminated without releasing drm lock, in
331 	 * this case, drm driver is responsible for clearing the holding.
332 	 * To be informed of process exiting, drm driver uses clone open
333 	 * to guarantee that each call to open(9e) have one corresponding
334 	 * call to close(9e). In most cases, a process will close drm
335 	 * during process termination, so that drm driver could have a
336 	 * chance to release drm lock.
337 	 *
338 	 * In fact, a driver cannot know exactly when a process exits.
339 	 * Clone open doesn't address this issue completely: Because of
340 	 * inheritance, child processes inherit file descriptors from
341 	 * their parent. As a result, if the parent exits before its
342 	 * children, drm close(9e) entrypoint won't be called until all
343 	 * of its children terminate.
344 	 *
345 	 * Another issue brought up by inhertance is the process PID
346 	 * that calls the drm close() entry point may not be the same
347 	 * as the one who called open(). Per-process struct is allocated
348 	 * when a process first open() drm, and released when the process
349 	 * last close() drm. Since open()/close() may be not the same
350 	 * process, PID cannot be used for key to lookup per-process
351 	 * struct. So, we associate minor number with per-process struct
352 	 * during open()'ing, and find corresponding process struct
353 	 * via minor number when close() is called.
354 	 */
355 	newp = kmem_zalloc(sizeof (drm_cminor_t), KM_SLEEP);
356 	mutex_enter(&dp->dev_lock);
357 	for (newminor = DRM_MIN_CLONEMINOR; newminor < MAX_CLONE_MINOR;
358 	    newminor ++) {
359 		TAILQ_FOREACH(mp, &dp->minordevs, link) {
360 			if (mp->minor == newminor)
361 				break;
362 		}
363 		if (mp == NULL)
364 			goto gotminor;
365 	}
366 
367 	mutex_exit(&dp->dev_lock);
368 	return (EMFILE);
369 
370 gotminor:
371 	TAILQ_INSERT_TAIL(&dp->minordevs, newp, link);
372 	newp->minor = newminor;
373 	mutex_exit(&dp->dev_lock);
374 	err = drm_open(dp, newp, flag, otyp, credp);
375 	if (err) {
376 		mutex_enter(&dp->dev_lock);
377 		TAILQ_REMOVE(&dp->minordevs, newp, link);
378 		(void) kmem_free(mp, sizeof (drm_cminor_t));
379 		mutex_exit(&dp->dev_lock);
380 
381 		return (err);
382 	}
383 
384 	/* return a clone minor */
385 	newminor = newminor | (instance << NBITSMNODE);
386 	*devp = makedevice(getmajor(*devp), newminor);
387 	return (err);
388 }
389 
390 /*ARGSUSED*/
391 static int
392 drm_sun_close(dev_t dev, int flag, int otyp, cred_t *credp)
393 {
394 	drm_inst_state_t	*mstate;
395 	drm_device_t		*dp;
396 	minor_t		minor;
397 	int		ret;
398 
399 	mstate = drm_sup_devt_to_state(dev);
400 	if (mstate == NULL)
401 		return (EBADF);
402 
403 	minor = DEV2MINOR(dev);
404 	ASSERT(minor <= MAX_CLONE_MINOR);
405 	if ((minor == GFX_MINOR) || (minor == AGPMASTER_MINOR))
406 		return (0);
407 
408 	dp = mstate->mis_devp;
409 	if (dp == NULL) {
410 		DRM_ERROR("drm_sun_close: NULL soft state");
411 		return (ENXIO);
412 	}
413 
414 	ret = drm_close(dp, minor, flag, otyp, credp);
415 
416 	return (ret);
417 }
418 
419 /*ARGSUSED*/
420 static int
421 drm_sun_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
422     cred_t *credp, int *rvalp)
423 {
424 	extern drm_ioctl_desc_t drm_ioctls[];
425 
426 	drm_inst_state_t	*mstate;
427 	drm_device_t		*dp;
428 	drm_ioctl_desc_t	*ioctl;
429 	drm_ioctl_t		*func;
430 	drm_file_t		*fpriv;
431 	minor_t		minor;
432 	int		retval;
433 	int		nr;
434 
435 	if (cmd == VIS_GETIDENTIFIER) {
436 		if (ddi_copyout(&text_ident, (void *)arg,
437 		    sizeof (struct vis_identifier), mode))
438 			return (EFAULT);
439 	}
440 
441 	mstate = drm_sup_devt_to_state(dev);
442 	if (mstate == NULL) {
443 		return (EIO);
444 	}
445 
446 	minor = DEV2MINOR(dev);
447 	ASSERT(minor <= MAX_CLONE_MINOR);
448 	switch (minor) {
449 	case GFX_MINOR:
450 		retval = gfxp_vgatext_ioctl(dev, cmd, arg,
451 		    mode, credp, rvalp, mstate->mis_gfxp);
452 		return (retval);
453 
454 	case AGPMASTER_MINOR:
455 		retval = agpmaster_ioctl(dev, cmd, arg, mode,
456 		    credp, rvalp, mstate->mis_agpm);
457 		return (retval);
458 
459 	case DRM_MINOR:
460 	default:	/* DRM cloning minor nodes */
461 		break;
462 	}
463 
464 	dp = mstate->mis_devp;
465 	ASSERT(dp != NULL);
466 
467 	nr = DRM_IOCTL_NR(cmd);
468 	ioctl = &drm_ioctls[nr];
469 	atomic_inc_32(&dp->counts[_DRM_STAT_IOCTLS]);
470 
471 	/* It's not a core DRM ioctl, try driver-specific. */
472 	if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) {
473 		/* The array entries begin at DRM_COMMAND_BASE ioctl nr */
474 		nr -= DRM_COMMAND_BASE;
475 		if (nr > dp->driver->max_driver_ioctl) {
476 			DRM_ERROR("Bad driver ioctl number, 0x%x (of 0x%x)",
477 			    nr, dp->driver->max_driver_ioctl);
478 			return (EINVAL);
479 		}
480 		ioctl = &dp->driver->driver_ioctls[nr];
481 	}
482 
483 	func = ioctl->func;
484 	if (func == NULL) {
485 		return (ENOTSUP);
486 	}
487 
488 	mutex_enter(&dp->dev_lock);
489 	fpriv = drm_find_file_by_proc(dp, credp);
490 	mutex_exit(&dp->dev_lock);
491 	if (fpriv == NULL) {
492 		DRM_ERROR("drm_sun_ioctl : can't find authenticator");
493 		return (EACCES);
494 	}
495 
496 	if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(credp)) ||
497 	    ((ioctl->flags & DRM_AUTH) && !fpriv->authenticated) ||
498 	    ((ioctl->flags & DRM_MASTER) && !fpriv->master))
499 		return (EACCES);
500 
501 	fpriv->dev = dev;
502 	fpriv->credp = credp;
503 
504 	retval = func(dp, arg, fpriv, mode);
505 
506 	return (retval);
507 }
508 
509 /*ARGSUSED*/
510 static int
511 drm_sun_devmap(dev_t dev, devmap_cookie_t dhp, offset_t offset,
512     size_t len, size_t *maplen, uint_t model)
513 {
514 	extern int drm_get_pci_index_reg(dev_info_t *, uint_t, uint_t, off_t *);
515 
516 	drm_inst_state_t	*mstate;
517 	drm_device_t		*dp;
518 	ddi_umem_cookie_t	cookie;
519 	drm_local_map_t		*map = NULL;
520 	unsigned long	aperbase;
521 	u_offset_t		handle;
522 	offset_t		koff;
523 	caddr_t			kva;
524 	minor_t			minor;
525 	size_t			length;
526 	int			ret;
527 
528 	static ddi_device_acc_attr_t dev_attr = {
529 		DDI_DEVICE_ATTR_V0,
530 		DDI_NEVERSWAP_ACC,
531 		DDI_STRICTORDER_ACC,
532 	};
533 	static ddi_device_acc_attr_t gem_dev_attr = {
534 		DDI_DEVICE_ATTR_V0,
535 		DDI_NEVERSWAP_ACC,
536 		DDI_MERGING_OK_ACC
537 	};
538 
539 	mstate = drm_sup_devt_to_state(dev);
540 	if (mstate == NULL)
541 		return (ENXIO);
542 
543 	minor = DEV2MINOR(dev);
544 	switch (minor) {
545 	case GFX_MINOR:
546 		ret = gfxp_vgatext_devmap(dev, dhp, offset, len, maplen, model,
547 		    mstate->mis_gfxp);
548 		return (ret);
549 
550 	case AGPMASTER_MINOR:
551 		return (ENOTSUP);
552 
553 	case DRM_MINOR:
554 		break;
555 
556 	default:
557 		/* DRM cloning nodes */
558 		if (minor > MAX_CLONE_MINOR)
559 			return (EBADF);
560 		break;
561 	}
562 
563 
564 	dp = mstate->mis_devp;
565 	if (dp == NULL) {
566 		DRM_ERROR("drm_sun_devmap: NULL soft state");
567 		return (EINVAL);
568 	}
569 
570 	mutex_enter(&dp->dev_lock);
571 
572 	if (dp->driver->use_gem == 1) {
573 		struct idr_list *entry;
574 		drm_cminor_t *mp;
575 
576 		mp = drm_find_file_by_minor(dp, minor);
577 		if (!mp) {
578 			mutex_exit(&dp->dev_lock);
579 			DRM_ERROR("drm_sun_devmap: can't find authenticator");
580 			return (EACCES);
581 		}
582 
583 		spin_lock(&dp->struct_mutex);
584 		idr_list_for_each(entry, &(mp->fpriv->object_idr)) {
585 			if ((uintptr_t)entry->obj == (u_offset_t)offset) {
586 				map = entry->obj->map;
587 				goto goon;
588 			}
589 		}
590 goon:
591 		spin_unlock(&dp->struct_mutex);
592 	}
593 
594 	if (map == NULL) {
595 		/*
596 		 * We will solve 32-bit application on 64-bit kernel
597 		 * issue later, now, we just use low 32-bit
598 		 */
599 		handle = (u_offset_t)offset;
600 		handle &= 0xffffffff;
601 
602 		TAILQ_FOREACH(map, &dp->maplist, link) {
603 			if (handle ==
604 			    ((u_offset_t)((uintptr_t)map->handle) & 0xffffffff))
605 				break;
606 		}
607 
608 		/*
609 		 * Temporarily, because offset is phys_addr for register
610 		 * and framebuffer, is kernel virtual_addr for others
611 		 * Maybe we will use hash table to solve this issue later.
612 		 */
613 		if (map == NULL) {
614 			TAILQ_FOREACH(map, &dp->maplist, link) {
615 				if (handle == (map->offset & 0xffffffff))
616 					break;
617 			}
618 		}
619 	}
620 
621 	if (map == NULL) {
622 		u_offset_t	tmp;
623 
624 		mutex_exit(&dp->dev_lock);
625 		cmn_err(CE_WARN, "Can't find map, offset=0x%llx, len=%x\n",
626 		    offset, (int)len);
627 		cmn_err(CE_WARN, "Current mapping:\n");
628 		TAILQ_FOREACH(map, &dp->maplist, link) {
629 		tmp = (u_offset_t)((uintptr_t)map->handle) & 0xffffffff;
630 		cmn_err(CE_WARN, "map(handle=0x%p, size=0x%lx,type=%d,"
631 		    "offset=0x%lx), handle=%llx, tmp=%lld", map->handle,
632 		    map->size, map->type, map->offset, handle, tmp);
633 		}
634 		return (-1);
635 	}
636 	if (map->flags & _DRM_RESTRICTED) {
637 		mutex_exit(&dp->dev_lock);
638 		cmn_err(CE_WARN, "restricted map\n");
639 		return (-1);
640 	}
641 
642 	mutex_exit(&dp->dev_lock);
643 	switch (map->type) {
644 	case _DRM_FRAME_BUFFER:
645 	case _DRM_REGISTERS:
646 		{
647 			int	regno;
648 			off_t	regoff;
649 
650 			regno = drm_get_pci_index_reg(dp->dip,
651 			    map->offset, (uint_t)len, &regoff);
652 			if (regno < 0) {
653 				DRM_ERROR("devmap: failed to get register"
654 				    " offset=0x%llx, len=0x%x", handle, len);
655 				return (EINVAL);
656 			}
657 
658 			ret = devmap_devmem_setup(dhp, dp->dip, NULL,
659 			    regno, (offset_t)regoff, len, PROT_ALL,
660 			    0, &dev_attr);
661 			if (ret != 0) {
662 				*maplen = 0;
663 				DRM_ERROR("devmap: failed, regno=%d,type=%d,"
664 				    " handle=0x%x, offset=0x%llx, len=0x%x",
665 				    regno, map->type, handle, offset, len);
666 				return (ret);
667 			}
668 			*maplen = len;
669 			return (ret);
670 		}
671 
672 	case _DRM_SHM:
673 		if (map->drm_umem_cookie == NULL)
674 			return (EINVAL);
675 		length = ptob(btopr(map->size));
676 		ret = devmap_umem_setup(dhp, dp->dip, NULL,
677 		    map->drm_umem_cookie, 0, length,
678 		    PROT_ALL, IOMEM_DATA_CACHED, NULL);
679 		if (ret != 0) {
680 			*maplen = 0;
681 			return (ret);
682 		}
683 		*maplen = length;
684 
685 		return (DDI_SUCCESS);
686 
687 	case _DRM_AGP:
688 		if (dp->agp == NULL) {
689 			cmn_err(CE_WARN, "drm_sun_devmap: attempted to mmap AGP"
690 			    "memory before AGP support is enabled");
691 			return (DDI_FAILURE);
692 		}
693 
694 		aperbase = dp->agp->base;
695 		koff = map->offset - aperbase;
696 		length = ptob(btopr(len));
697 		kva = map->dev_addr;
698 		cookie = gfxp_umem_cookie_init(kva, length);
699 		if (cookie == NULL) {
700 			cmn_err(CE_WARN, "devmap:failed to get umem_cookie");
701 			return (DDI_FAILURE);
702 		}
703 
704 		if ((ret = devmap_umem_setup(dhp, dp->dip,
705 		    &drm_devmap_callbacks, cookie, 0, length, PROT_ALL,
706 		    IOMEM_DATA_UNCACHED | DEVMAP_ALLOW_REMAP, &dev_attr)) < 0) {
707 			gfxp_umem_cookie_destroy(cookie);
708 			cmn_err(CE_WARN, "devmap:failed, retval=%d", ret);
709 			return (DDI_FAILURE);
710 		}
711 		*maplen = length;
712 		break;
713 
714 	case _DRM_SCATTER_GATHER:
715 		koff = map->offset - (unsigned long)(caddr_t)dp->sg->virtual;
716 		kva = map->dev_addr + koff;
717 		length = ptob(btopr(len));
718 		if (length > map->size) {
719 			cmn_err(CE_WARN, "offset=0x%lx, virtual=0x%p,"
720 			    "mapsize=0x%lx,len=0x%lx", map->offset,
721 			    dp->sg->virtual, map->size, len);
722 			return (DDI_FAILURE);
723 		}
724 		cookie = gfxp_umem_cookie_init(kva, length);
725 		if (cookie == NULL) {
726 			cmn_err(CE_WARN, "devmap:failed to get umem_cookie");
727 			return (DDI_FAILURE);
728 		}
729 		ret = devmap_umem_setup(dhp, dp->dip,
730 		    &drm_devmap_callbacks, cookie, 0, length, PROT_ALL,
731 		    IOMEM_DATA_UNCACHED | DEVMAP_ALLOW_REMAP, &dev_attr);
732 		if (ret != 0) {
733 			cmn_err(CE_WARN, "sun_devmap: umem_setup fail");
734 			gfxp_umem_cookie_destroy(cookie);
735 			return (DDI_FAILURE);
736 		}
737 		*maplen = length;
738 		break;
739 
740 	case _DRM_TTM:
741 		if (map->drm_umem_cookie == NULL)
742 			return (EINVAL);
743 
744 		if (gfxp_devmap_umem_setup(dhp, dp->dip,
745 		    NULL, map->drm_umem_cookie, 0, map->size, PROT_ALL,
746 		    IOMEM_DATA_UC_WR_COMBINE | DEVMAP_ALLOW_REMAP,
747 		    &gem_dev_attr)) {
748 			cmn_err(CE_WARN, "devmap:failed, retval=%d", ret);
749 			return (DDI_FAILURE);
750 		}
751 		*maplen = map->size;
752 		return (DDI_SUCCESS);
753 
754 	default:
755 		return (DDI_FAILURE);
756 	}
757 	return (DDI_SUCCESS);
758 
759 }
760 
761 /*ARGSUSED*/
762 static int
763 drm_devmap_map(devmap_cookie_t dhc, dev_t dev, uint_t flags,
764     offset_t offset, size_t len, void **new_priv)
765 {
766 	devmap_handle_t			*dhp;
767 	drm_inst_state_t		*statep;
768 	struct ddi_umem_cookie 	*cp;
769 
770 	statep = drm_sup_devt_to_state(dev);
771 	ASSERT(statep != NULL);
772 
773 	/*
774 	 * This driver only supports MAP_SHARED,
775 	 * and doesn't support MAP_PRIVATE
776 	 */
777 	if (flags & MAP_PRIVATE) {
778 		cmn_err(CE_WARN, "!DRM driver doesn't support MAP_PRIVATE");
779 		return (EINVAL);
780 	}
781 
782 	mutex_enter(&statep->dis_ctxlock);
783 	dhp = (devmap_handle_t *)dhc;
784 	cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
785 	cp->cook_refcnt = 1;
786 	mutex_exit(&statep->dis_ctxlock);
787 	*new_priv = statep;
788 
789 	return (0);
790 }
791 
792 /*ARGSUSED*/
793 static void
794 drm_devmap_unmap(devmap_cookie_t dhc, void *pvtp, offset_t off, size_t len,
795     devmap_cookie_t new_dhp1, void **new_pvtp1, devmap_cookie_t new_dhp2,
796     void **new_pvtp2)
797 {
798 	devmap_handle_t		*dhp;
799 	devmap_handle_t		*ndhp;
800 	drm_inst_state_t		*statep;
801 	struct ddi_umem_cookie	*cp;
802 	struct ddi_umem_cookie	*ncp;
803 
804 	dhp = (devmap_handle_t *)dhc;
805 	statep = (drm_inst_state_t *)pvtp;
806 
807 	mutex_enter(&statep->dis_ctxlock);
808 	cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
809 	if (new_dhp1 != NULL) {
810 		ndhp = (devmap_handle_t *)new_dhp1;
811 		ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
812 		ncp->cook_refcnt ++;
813 		*new_pvtp1 = statep;
814 		ASSERT(ncp == cp);
815 	}
816 
817 	if (new_dhp2 != NULL) {
818 		ndhp = (devmap_handle_t *)new_dhp2;
819 		ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
820 		ncp->cook_refcnt ++;
821 		*new_pvtp2 = statep;
822 		ASSERT(ncp == cp);
823 	}
824 
825 	cp->cook_refcnt --;
826 	if (cp->cook_refcnt == 0) {
827 		gfxp_umem_cookie_destroy(dhp->dh_cookie);
828 		dhp->dh_cookie = NULL;
829 	}
830 	mutex_exit(&statep->dis_ctxlock);
831 }
832 
833 
834 /*ARGSUSED*/
835 static int
836 drm_devmap_dup(devmap_cookie_t dhc, void *pvtp, devmap_cookie_t new_dhc,
837     void **new_pvtp)
838 {
839 	devmap_handle_t			*dhp;
840 	drm_inst_state_t    *statep;
841 	struct ddi_umem_cookie *cp;
842 
843 	statep = (drm_inst_state_t *)pvtp;
844 	mutex_enter(&statep->dis_ctxlock);
845 	dhp = (devmap_handle_t *)dhc;
846 	cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
847 	cp->cook_refcnt ++;
848 	mutex_exit(&statep->dis_ctxlock);
849 	*new_pvtp = statep;
850 
851 	return (0);
852 }
853 
854 int
855 drm_dev_to_instance(dev_t dev)
856 {
857 	return (DEV2INST(dev));
858 }
859 
860 /*
861  * drm_supp_alloc_drv_entry()
862  *
863  * Description:
864  *	Create a DRM entry and add it into the instance list (drm_inst_head).
865  *	Note that we don't allow a duplicated entry
866  */
867 static drm_inst_list_t *
868 drm_supp_alloc_drv_entry(dev_info_t *dip)
869 {
870 	drm_inst_list_t	**plist;
871 	drm_inst_list_t	*list;
872 	drm_inst_list_t	*entry;
873 
874 	/* protect the driver list */
875 	mutex_enter(&drm_inst_list_lock);
876 	plist = &drm_inst_head;
877 	list = *plist;
878 	while (list) {
879 		if (list->disl_state.mis_dip == dip) {
880 			mutex_exit(&drm_inst_list_lock);
881 			cmn_err(CE_WARN, "%s%d already registered",
882 			    ddi_driver_name(dip), ddi_get_instance(dip));
883 			return (NULL);
884 		}
885 		plist = &list->disl_next;
886 		list = list->disl_next;
887 	}
888 
889 	/* "dip" is not registered, create new one and add to list */
890 	entry = kmem_zalloc(sizeof (*entry), KM_SLEEP);
891 	*plist = entry;
892 	entry->disl_state.mis_dip = dip;
893 	mutex_init(&entry->disl_state.mis_lock, NULL, MUTEX_DRIVER, NULL);
894 	mutex_init(&entry->disl_state.dis_ctxlock, NULL, MUTEX_DRIVER, NULL);
895 	mutex_exit(&drm_inst_list_lock);
896 
897 	return (entry);
898 
899 }	/* drm_supp_alloc_drv_entry */
900 
901 /*
902  * drm_supp_free_drv_entry()
903  */
904 static void
905 drm_supp_free_drv_entry(dev_info_t *dip)
906 {
907 	drm_inst_list_t		*list;
908 	drm_inst_list_t		**plist;
909 	drm_inst_state_t	*mstate;
910 
911 	/* protect the driver list */
912 	mutex_enter(&drm_inst_list_lock);
913 	plist = &drm_inst_head;
914 	list = *plist;
915 	while (list) {
916 		if (list->disl_state.mis_dip == dip) {
917 			*plist = list->disl_next;
918 			mstate = &list->disl_state;
919 			mutex_destroy(&mstate->mis_lock);
920 			mutex_destroy(&mstate->dis_ctxlock);
921 			kmem_free(list, sizeof (*list));
922 			mutex_exit(&drm_inst_list_lock);
923 			return;
924 		}
925 		plist = &list->disl_next;
926 		list = list->disl_next;
927 	}
928 	mutex_exit(&drm_inst_list_lock);
929 
930 }	/* drm_supp_free_drv_entry() */
931 
932 /*
933  * drm_sup_devt_to_state()
934  *
935  * description:
936  *	Get the soft state of DRM instance by device number
937  */
938 static drm_inst_state_t *
939 drm_sup_devt_to_state(dev_t dev)
940 {
941 	drm_inst_list_t	*list;
942 	drm_inst_state_t	*mstate;
943 	major_t	major = getmajor(dev);
944 	int		instance = DEV2INST(dev);
945 
946 	mutex_enter(&drm_inst_list_lock);
947 	list = drm_inst_head;
948 	while (list) {
949 		mstate = &list->disl_state;
950 		mutex_enter(&mstate->mis_lock);
951 
952 		if ((mstate->mis_major == major) &&
953 		    (ddi_get_instance(mstate->mis_dip) == instance)) {
954 			mutex_exit(&mstate->mis_lock);
955 			mutex_exit(&drm_inst_list_lock);
956 			return (mstate);
957 		}
958 
959 		list = list->disl_next;
960 		mutex_exit(&mstate->mis_lock);
961 	}
962 
963 	mutex_exit(&drm_inst_list_lock);
964 	return (NULL);
965 
966 }	/* drm_sup_devt_to_state() */
967 
968 int
969 drm_supp_get_irq(void *handle)
970 {
971 	drm_inst_list_t *list;
972 	drm_inst_state_t    *mstate;
973 	int		irq;
974 
975 	list = (drm_inst_list_t *)handle;
976 	mstate = &list->disl_state;
977 	ASSERT(mstate != NULL);
978 	irq = pci_config_get8(mstate->mis_cfg_hdl, PCI_CONF_ILINE);
979 	return (irq);
980 }
981 
982 int
983 drm_supp_device_capability(void *handle, int capid)
984 {
985 	drm_inst_list_t *list;
986 	drm_inst_state_t    *mstate;
987 	uint8_t		cap = 0;
988 	uint16_t	caps_ptr;
989 
990 	list = (drm_inst_list_t *)handle;
991 	mstate = &list->disl_state;
992 	ASSERT(mstate != NULL);
993 
994 	/* has capabilities list ? */
995 	if ((pci_config_get16(mstate->mis_cfg_hdl, PCI_CONF_STAT) &
996 	    PCI_CONF_CAP_MASK) == 0)
997 		return (NULL);
998 
999 	caps_ptr = pci_config_get8(mstate->mis_cfg_hdl, PCI_CONF_CAP_PTR);
1000 	while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
1001 		cap = pci_config_get32(mstate->mis_cfg_hdl, caps_ptr);
1002 		if ((cap & PCI_CONF_CAPID_MASK) == capid)
1003 			return (cap);
1004 		caps_ptr = pci_config_get8(mstate->mis_cfg_hdl,
1005 		    caps_ptr + PCI_CAP_NEXT_PTR);
1006 	}
1007 
1008 	return (0);
1009 }
1010