xref: /titanic_44/usr/src/uts/common/io/drm/drm_sunmod.c (revision 159d09a20817016f09b3ea28d1bdada4a336bb91)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Common misc module interfaces of DRM under Solaris
29  */
30 
31 /*
32  * This module calls into gfx and agpmaster misc modules respectively
33  * for generic graphics operations and AGP master device support.
34  */
35 
36 #include "drm_sunmod.h"
37 #include <sys/modctl.h>
38 #include <sys/kmem.h>
39 #include <vm/seg_kmem.h>
40 
41 static struct modlmisc modlmisc = {
42 	&mod_miscops, "DRM common interfaces"
43 };
44 
45 static struct modlinkage modlinkage = {
46 	MODREV_1, (void *)&modlmisc, NULL
47 };
48 
49 static drm_inst_list_t	*drm_inst_head;
50 static kmutex_t	drm_inst_list_lock;
51 
52 static int drm_sun_open(dev_t *, int, int, cred_t *);
53 static int drm_sun_close(dev_t, int, int, cred_t *);
54 static int drm_sun_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
55 static int drm_sun_devmap(dev_t, devmap_cookie_t, offset_t, size_t,
56     size_t *, uint_t);
57 
58 /*
59  * devmap callbacks for AGP and PCI GART
60  */
61 static int drm_devmap_map(devmap_cookie_t, dev_t,
62     uint_t, offset_t, size_t, void **);
63 static int drm_devmap_dup(devmap_cookie_t, void *,
64     devmap_cookie_t, void **);
65 static void drm_devmap_unmap(devmap_cookie_t, void *,
66     offset_t, size_t, devmap_cookie_t, void **, devmap_cookie_t, void **);
67 
68 static drm_inst_list_t *drm_supp_alloc_drv_entry(dev_info_t *);
69 static drm_inst_state_t *drm_sup_devt_to_state(dev_t);
70 static void drm_supp_free_drv_entry(dev_info_t *);
71 
72 static struct devmap_callback_ctl drm_devmap_callbacks = {
73 		DEVMAP_OPS_REV, 		/* devmap_rev */
74 		drm_devmap_map,				/* devmap_map */
75 		NULL,			/* devmap_access */
76 		drm_devmap_dup,			/* devmap_dup */
77 		drm_devmap_unmap 		/* devmap_unmap */
78 };
79 
80 /*
81  * Common device operations structure for all DRM drivers
82  */
83 struct cb_ops drm_cb_ops = {
84 	drm_sun_open,			/* cb_open */
85 	drm_sun_close,			/* cb_close */
86 	nodev,					/* cb_strategy */
87 	nodev,					/* cb_print */
88 	nodev,					/* cb_dump */
89 	nodev,					/* cb_read */
90 	nodev,					/* cb_write */
91 	drm_sun_ioctl,		/* cb_ioctl */
92 	drm_sun_devmap,		/* cb_devmap */
93 	nodev,					/* cb_mmap */
94 	NULL,			/* cb_segmap */
95 	nochpoll,				/* cb_chpoll */
96 	ddi_prop_op,		/* cb_prop_op */
97 	0,					/* cb_stream */
98 	D_NEW | D_MTSAFE |D_DEVMAP	/* cb_flag */
99 };
100 
101 
102 int
103 _init(void)
104 {
105 	int	error;
106 
107 	if ((error = mod_install(&modlinkage)) != 0) {
108 		return (error);
109 	}
110 
111 	/* initialize the instance list lock */
112 	mutex_init(&drm_inst_list_lock, NULL, MUTEX_DRIVER, NULL);
113 	return (0);
114 }
115 
116 int
117 _fini(void)
118 {
119 	int	err;
120 
121 	if ((err = mod_remove(&modlinkage)) != 0)
122 		return (err);
123 
124 	mutex_destroy(&drm_inst_list_lock);
125 	return (0);
126 }
127 
128 int
129 _info(struct modinfo *modinfop)
130 {
131 	return (mod_info(&modlinkage, modinfop));
132 }
133 
134 void *
135 drm_supp_register(dev_info_t *dip, drm_device_t *dp)
136 {
137 	int		error;
138 	char	buf[80];
139 	int		instance = ddi_get_instance(dip);
140 	ddi_acc_handle_t	pci_cfg_handle;
141 	agp_master_softc_t	*agpm;
142 	drm_inst_state_t	*mstate;
143 	drm_inst_list_t		*entry;
144 	gfxp_vgatext_softc_ptr_t gfxp;
145 	struct dev_ops	*devop;
146 
147 	ASSERT(dip != NULL);
148 
149 	entry = drm_supp_alloc_drv_entry(dip);
150 	if (entry == NULL) {
151 		cmn_err(CE_WARN, "drm_supp_register: failed to get softstate");
152 		return (NULL);
153 	}
154 	mstate = &entry->disl_state;
155 
156 	/*
157 	 * DRM drivers are required to use common cb_ops
158 	 */
159 	devop = ddi_get_driver(dip);
160 	if (devop->devo_cb_ops != &drm_cb_ops) {
161 		devop->devo_cb_ops = &drm_cb_ops;
162 	}
163 
164 	/* Generic graphics initialization */
165 	gfxp = gfxp_vgatext_softc_alloc();
166 	error = gfxp_vgatext_attach(dip, DDI_ATTACH, gfxp);
167 	if (error != DDI_SUCCESS) {
168 		DRM_ERROR("drm_supp_regiter: failed to init gfx");
169 		goto exit1;
170 	}
171 
172 	/* create a minor node for common graphics ops */
173 	(void) sprintf(buf, "%s%d", GFX_NAME, instance);
174 	error = ddi_create_minor_node(dip, buf, S_IFCHR,
175 	    INST2NODE0(instance), DDI_NT_DISPLAY, NULL);
176 	if (error != DDI_SUCCESS) {
177 		DRM_ERROR("drm_supp_regiter: "
178 		    "failed to create minor node for gfx");
179 		goto exit2;
180 	}
181 
182 	/* setup mapping for later PCI config space access */
183 	error = pci_config_setup(dip, &pci_cfg_handle);
184 	if (error != DDI_SUCCESS) {
185 		DRM_ERROR("drm_supp_regiter: "
186 		    "PCI configuration space setup failed");
187 		goto exit2;
188 	}
189 
190 	/* AGP master attach */
191 	agpm = NULL;
192 	if (dp->driver->use_agp) {
193 		DRM_DEBUG("drm_supp_regiter: driver use AGP\n");
194 		error = agpmaster_attach(dip, &agpm,
195 		    pci_cfg_handle, INST2NODE1(instance));
196 		if ((error != DDI_SUCCESS) && (dp->driver->require_agp)) {
197 			DRM_ERROR("drm_supp_regiter: "
198 			    "AGP master support not available");
199 			goto exit3;
200 		}
201 	}
202 
203 	mutex_enter(&mstate->mis_lock);
204 	mstate->mis_major = ddi_driver_major(dip);
205 	mstate->mis_dip = dip;
206 	mstate->mis_gfxp = gfxp;
207 	mstate->mis_agpm = agpm;
208 	mstate->mis_cfg_hdl = pci_cfg_handle;
209 	mstate->mis_devp = dp;
210 	mutex_exit(&mstate->mis_lock);
211 
212 	/* create minor node for DRM access */
213 	(void) sprintf(buf, "%s%d", DRM_DEVNODE, instance);
214 	if (ddi_create_minor_node(dip, buf, S_IFCHR,
215 	    INST2NODE2(instance), DDI_NT_DISPLAY_DRM, 0)) {
216 		DRM_ERROR("supp_regiter: faled to create minor node for drm");
217 		goto exit4;
218 	}
219 
220 	return ((void *)mstate);
221 
222 exit4:
223 	if ((dp->driver->use_agp) && agpm)
224 		agpmaster_detach(&agpm);
225 exit3:
226 	pci_config_teardown(&pci_cfg_handle);
227 exit2:
228 	gfxp_vgatext_detach(dip, DDI_DETACH, gfxp);
229 exit1:
230 	gfxp_vgatext_softc_free(gfxp);
231 	drm_supp_free_drv_entry(dip);
232 	ddi_remove_minor_node(dip, NULL);
233 
234 	return (NULL);
235 }
236 
237 
238 int
239 drm_supp_unregister(void *handle)
240 {
241 	drm_inst_list_t		*list;
242 	drm_inst_state_t	*mstate;
243 
244 	list = (drm_inst_list_t *)handle;
245 	mstate = &list->disl_state;
246 	mutex_enter(&mstate->mis_lock);
247 
248 	/* AGP master detach */
249 	if (mstate->mis_agpm != NULL)
250 		agpmaster_detach(&mstate->mis_agpm);
251 
252 	/* free PCI config access handle */
253 	if (mstate->mis_cfg_hdl)
254 		pci_config_teardown(&mstate->mis_cfg_hdl);
255 
256 	/* graphics misc module detach */
257 	if (mstate->mis_gfxp) {
258 		(void) gfxp_vgatext_detach(mstate->mis_dip, DDI_DETACH,
259 		    mstate->mis_gfxp);
260 		gfxp_vgatext_softc_free(mstate->mis_gfxp);
261 	}
262 
263 	mstate->mis_devp = NULL;
264 
265 	/* remove all minor nodes */
266 	ddi_remove_minor_node(mstate->mis_dip, NULL);
267 	mutex_exit(&mstate->mis_lock);
268 	drm_supp_free_drv_entry(mstate->mis_dip);
269 
270 	return (DDI_SUCCESS);
271 }
272 
273 
274 /*ARGSUSED*/
275 static int
276 drm_sun_open(dev_t *devp, int flag, int otyp, cred_t *credp)
277 {
278 	drm_inst_state_t	*mstate;
279 	drm_cminor_t	*mp, *newp;
280 	drm_device_t	*dp;
281 	minor_t		minor;
282 	int		newminor;
283 	int		instance;
284 	int		err;
285 
286 	mstate = drm_sup_devt_to_state(*devp);
287 	/*
288 	 * return ENXIO for deferred attach so that system can
289 	 * attach us again.
290 	 */
291 	if (mstate == NULL)
292 		return (ENXIO);
293 
294 	/*
295 	 * The lest significant 15 bits are used for minor_number, and
296 	 * the mid 3 bits are used for instance number. All minor numbers
297 	 * are used as follows:
298 	 * 0 -- gfx
299 	 * 1 -- agpmaster
300 	 * 2 -- drm
301 	 * (3, MAX_CLONE_MINOR) -- drm minor node for clone open.
302 	 */
303 	minor = DEV2MINOR(*devp);
304 	instance = DEV2INST(*devp);
305 	ASSERT(minor <= MAX_CLONE_MINOR);
306 
307 	/*
308 	 * No operations for VGA & AGP mater devices, always return OK.
309 	 */
310 	if ((minor == GFX_MINOR) || (minor == AGPMASTER_MINOR))
311 		return (0);
312 
313 	/*
314 	 * From here, we start to process drm
315 	 */
316 
317 	dp = mstate->mis_devp;
318 	if (!dp)
319 		return (ENXIO);
320 
321 	/*
322 	 * Drm driver implements a software lock to serialize access
323 	 * to graphics hardware based on per-process granulation. Before
324 	 * operating graphics hardware, all clients, including kernel
325 	 * and applications, must acquire this lock via DRM_IOCTL_LOCK
326 	 * ioctl, and release it via DRM_IOCTL_UNLOCK after finishing
327 	 * operations. Drm driver will grant r/w permission to the
328 	 * process which acquires this lock (Kernel is assumed to have
329 	 * process ID 0).
330 	 *
331 	 * A process might be terminated without releasing drm lock, in
332 	 * this case, drm driver is responsible for clearing the holding.
333 	 * To be informed of process exiting, drm driver uses clone open
334 	 * to guarantee that each call to open(9e) have one corresponding
335 	 * call to close(9e). In most cases, a process will close drm
336 	 * during process termination, so that drm driver could have a
337 	 * chance to release drm lock.
338 	 *
339 	 * In fact, a driver cannot know exactly when a process exits.
340 	 * Clone open doesn't address this issue completely: Because of
341 	 * inheritance, child processes inherit file descriptors from
342 	 * their parent. As a result, if the parent exits before its
343 	 * children, drm close(9e) entrypoint won't be called until all
344 	 * of its children terminate.
345 	 *
346 	 * Another issue brought up by inhertance is the process PID
347 	 * that calls the drm close() entry point may not be the same
348 	 * as the one who called open(). Per-process struct is allocated
349 	 * when a process first open() drm, and released when the process
350 	 * last close() drm. Since open()/close() may be not the same
351 	 * process, PID cannot be used for key to lookup per-process
352 	 * struct. So, we associate minor number with per-process struct
353 	 * during open()'ing, and find corresponding process struct
354 	 * via minor number when close() is called.
355 	 */
356 	newp = kmem_zalloc(sizeof (drm_cminor_t), KM_SLEEP);
357 	mutex_enter(&dp->dev_lock);
358 	for (newminor = DRM_MIN_CLONEMINOR; newminor < MAX_CLONE_MINOR;
359 	    newminor ++) {
360 		TAILQ_FOREACH(mp, &dp->minordevs, link) {
361 			if (mp->minor == newminor)
362 				break;
363 		}
364 		if (mp == NULL)
365 			goto gotminor;
366 	}
367 
368 	mutex_exit(&dp->dev_lock);
369 	return (EMFILE);
370 
371 gotminor:
372 	TAILQ_INSERT_TAIL(&dp->minordevs, newp, link);
373 	newp->minor = newminor;
374 	mutex_exit(&dp->dev_lock);
375 	err = drm_open(dp, newp, flag, otyp, credp);
376 	if (err) {
377 		mutex_enter(&dp->dev_lock);
378 		TAILQ_REMOVE(&dp->minordevs, newp, link);
379 		(void) kmem_free(mp, sizeof (drm_cminor_t));
380 		mutex_exit(&dp->dev_lock);
381 
382 		return (err);
383 	}
384 
385 	/* return a clone minor */
386 	newminor = newminor | (instance << NBITSMNODE);
387 	*devp = makedevice(getmajor(*devp), newminor);
388 	return (err);
389 }
390 
391 /*ARGSUSED*/
392 static int
393 drm_sun_close(dev_t dev, int flag, int otyp, cred_t *credp)
394 {
395 	drm_inst_state_t	*mstate;
396 	drm_device_t		*dp;
397 	minor_t		minor;
398 	int		ret;
399 
400 	mstate = drm_sup_devt_to_state(dev);
401 	if (mstate == NULL)
402 		return (EBADF);
403 
404 	minor = DEV2MINOR(dev);
405 	ASSERT(minor <= MAX_CLONE_MINOR);
406 	if ((minor == GFX_MINOR) || (minor == AGPMASTER_MINOR))
407 		return (0);
408 
409 	dp = mstate->mis_devp;
410 	if (dp == NULL) {
411 		DRM_ERROR("drm_sun_close: NULL soft state");
412 		return (ENXIO);
413 	}
414 
415 	ret = drm_close(dp, minor, flag, otyp, credp);
416 
417 	return (ret);
418 }
419 
420 /*ARGSUSED*/
421 static int
422 drm_sun_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
423     cred_t *credp, int *rvalp)
424 {
425 	extern drm_ioctl_desc_t drm_ioctls[];
426 
427 	drm_inst_state_t	*mstate;
428 	drm_device_t		*dp;
429 	drm_ioctl_desc_t	*ioctl;
430 	drm_ioctl_t		*func;
431 	drm_file_t		*fpriv;
432 	minor_t		minor;
433 	int		retval;
434 	int		nr;
435 
436 	if (cmd == VIS_GETIDENTIFIER) {
437 		if (ddi_copyout(&text_ident, (void *)arg,
438 		    sizeof (struct vis_identifier), mode))
439 			return (EFAULT);
440 	}
441 
442 	mstate = drm_sup_devt_to_state(dev);
443 	if (mstate == NULL) {
444 		return (EIO);
445 	}
446 
447 	minor = DEV2MINOR(dev);
448 	ASSERT(minor <= MAX_CLONE_MINOR);
449 	switch (minor) {
450 	case GFX_MINOR:
451 		retval = gfxp_vgatext_ioctl(dev, cmd, arg,
452 		    mode, credp, rvalp, mstate->mis_gfxp);
453 		return (retval);
454 
455 	case AGPMASTER_MINOR:
456 		retval = agpmaster_ioctl(dev, cmd, arg, mode,
457 		    credp, rvalp, mstate->mis_agpm);
458 		return (retval);
459 
460 	case DRM_MINOR:
461 	default:	/* DRM cloning minor nodes */
462 		break;
463 	}
464 
465 	dp = mstate->mis_devp;
466 	ASSERT(dp != NULL);
467 
468 	nr = DRM_IOCTL_NR(cmd);
469 	ioctl = &drm_ioctls[nr];
470 	atomic_inc_32(&dp->counts[_DRM_STAT_IOCTLS]);
471 
472 	/* It's not a core DRM ioctl, try driver-specific. */
473 	if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) {
474 		/* The array entries begin at DRM_COMMAND_BASE ioctl nr */
475 		nr -= DRM_COMMAND_BASE;
476 		if (nr > dp->driver->max_driver_ioctl) {
477 			DRM_ERROR("Bad driver ioctl number, 0x%x (of 0x%x)",
478 			    nr, dp->driver->max_driver_ioctl);
479 			return (EINVAL);
480 		}
481 		ioctl = &dp->driver->driver_ioctls[nr];
482 	}
483 
484 	func = ioctl->func;
485 	if (func == NULL) {
486 		return (ENOTSUP);
487 	}
488 
489 	mutex_enter(&dp->dev_lock);
490 	fpriv = drm_find_file_by_proc(dp, credp);
491 	mutex_exit(&dp->dev_lock);
492 	if (fpriv == NULL) {
493 		DRM_ERROR("drm_sun_ioctl : can't find authenticator");
494 		return (EACCES);
495 	}
496 
497 	if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(credp)) ||
498 	    ((ioctl->flags & DRM_AUTH) && !fpriv->authenticated) ||
499 	    ((ioctl->flags & DRM_MASTER) && !fpriv->master))
500 		return (EACCES);
501 
502 	retval = func(dp, arg, fpriv, mode);
503 
504 	return (retval);
505 }
506 
507 /*ARGSUSED*/
508 static int
509 drm_sun_devmap(dev_t dev, devmap_cookie_t dhp, offset_t offset,
510     size_t len, size_t *maplen, uint_t model)
511 {
512 	extern int drm_get_pci_index_reg(dev_info_t *, uint_t, uint_t, off_t *);
513 
514 	drm_inst_state_t	*mstate;
515 	drm_device_t		*dp;
516 	ddi_umem_cookie_t	cookie;
517 	drm_local_map_t		*map;
518 	unsigned long	aperbase;
519 	u_offset_t		handle;
520 	offset_t		koff;
521 	caddr_t			kva;
522 	minor_t			minor;
523 	size_t			length;
524 	int			ret;
525 
526 	static ddi_device_acc_attr_t dev_attr = {
527 		DDI_DEVICE_ATTR_V0,
528 		DDI_NEVERSWAP_ACC,
529 		DDI_STRICTORDER_ACC,
530 	};
531 
532 	mstate = drm_sup_devt_to_state(dev);
533 	if (mstate == NULL)
534 		return (ENXIO);
535 
536 	minor = DEV2MINOR(dev);
537 	switch (minor) {
538 	case GFX_MINOR:
539 		ret = gfxp_vgatext_devmap(dev, dhp, offset, len, maplen, model,
540 		    mstate->mis_gfxp);
541 		return (ret);
542 
543 	case AGPMASTER_MINOR:
544 		return (ENOTSUP);
545 
546 	case DRM_MINOR:
547 		break;
548 
549 	default:
550 		/* DRM cloning nodes */
551 		if (minor > MAX_CLONE_MINOR)
552 			return (EBADF);
553 		break;
554 	}
555 
556 
557 	dp = mstate->mis_devp;
558 	if (dp == NULL) {
559 		DRM_ERROR("drm_sun_devmap: NULL soft state");
560 		return (EINVAL);
561 	}
562 
563 	/*
564 	 * We will solve 32-bit application on 64-bit kernel
565 	 * issue later, now, we just use low 32-bit
566 	 */
567 	handle = (u_offset_t)offset;
568 	handle &= 0xffffffff;
569 	mutex_enter(&dp->dev_lock);
570 	TAILQ_FOREACH(map, &dp->maplist, link) {
571 		if (handle ==
572 		    ((u_offset_t)((uintptr_t)map->handle) & 0xffffffff))
573 			break;
574 	}
575 
576 	/*
577 	 * Temporarily, because offset is phys_addr for register
578 	 * and framebuffer, is kernel virtual_addr for others
579 	 * Maybe we will use hash table to solve this issue later.
580 	 */
581 	if (map == NULL) {
582 		TAILQ_FOREACH(map, &dp->maplist, link) {
583 			if (handle == (map->offset & 0xffffffff))
584 				break;
585 		}
586 	}
587 
588 	if (map == NULL) {
589 		u_offset_t	tmp;
590 
591 		mutex_exit(&dp->dev_lock);
592 		cmn_err(CE_WARN, "Can't find map, offset=0x%llx, len=%x\n",
593 		    offset, (int)len);
594 		cmn_err(CE_WARN, "Current mapping:\n");
595 		TAILQ_FOREACH(map, &dp->maplist, link) {
596 		tmp = (u_offset_t)((uintptr_t)map->handle) & 0xffffffff;
597 		cmn_err(CE_WARN, "map(handle=0x%p, size=0x%lx,type=%d,"
598 		    "offset=0x%lx), handle=%llx, tmp=%lld", map->handle,
599 		    map->size, map->type, map->offset, handle, tmp);
600 		}
601 		return (-1);
602 	}
603 	if (map->flags & _DRM_RESTRICTED) {
604 		mutex_exit(&dp->dev_lock);
605 		cmn_err(CE_WARN, "restricted map\n");
606 		return (-1);
607 	}
608 
609 	mutex_exit(&dp->dev_lock);
610 	switch (map->type) {
611 	case _DRM_FRAME_BUFFER:
612 	case _DRM_REGISTERS:
613 		{
614 			int	regno;
615 			off_t	regoff;
616 
617 			regno = drm_get_pci_index_reg(dp->dip,
618 			    map->offset, (uint_t)len, &regoff);
619 			if (regno < 0) {
620 				DRM_ERROR("devmap: failed to get register"
621 				    " offset=0x%llx, len=0x%x", handle, len);
622 				return (EINVAL);
623 			}
624 
625 			ret = devmap_devmem_setup(dhp, dp->dip, NULL,
626 			    regno, (offset_t)regoff, len, PROT_ALL,
627 			    0, &dev_attr);
628 			if (ret != 0) {
629 				*maplen = 0;
630 				DRM_ERROR("devmap: failed, regno=%d,type=%d,"
631 				    " handle=0x%x, offset=0x%llx, len=0x%x",
632 				    regno, map->type, handle, offset, len);
633 				return (ret);
634 			}
635 			*maplen = len;
636 			return (ret);
637 		}
638 
639 	case _DRM_SHM:
640 		if (map->drm_umem_cookie == NULL)
641 			return (EINVAL);
642 		length = ptob(btopr(map->size));
643 		ret = devmap_umem_setup(dhp, dp->dip, NULL,
644 		    map->drm_umem_cookie, 0, length,
645 		    PROT_ALL, IOMEM_DATA_CACHED, NULL);
646 		if (ret != 0) {
647 			*maplen = 0;
648 			return (ret);
649 		}
650 		*maplen = length;
651 
652 		return (DDI_SUCCESS);
653 
654 	case _DRM_AGP:
655 		if (dp->agp == NULL) {
656 			cmn_err(CE_WARN, "drm_sun_devmap: attempted to mmap AGP"
657 			    "memory before AGP support is enabled");
658 			return (DDI_FAILURE);
659 		}
660 
661 		aperbase = dp->agp->base;
662 		koff = map->offset - aperbase;
663 		length = ptob(btopr(len));
664 		kva = map->dev_addr;
665 		cookie = gfxp_umem_cookie_init(kva, length);
666 		if (cookie == NULL) {
667 			cmn_err(CE_WARN, "devmap:failed to get umem_cookie");
668 			return (DDI_FAILURE);
669 		}
670 
671 		if ((ret = devmap_umem_setup(dhp, dp->dip,
672 		    &drm_devmap_callbacks, cookie, 0, length, PROT_ALL,
673 		    IOMEM_DATA_UNCACHED | DEVMAP_ALLOW_REMAP, &dev_attr)) < 0) {
674 			gfxp_umem_cookie_destroy(cookie);
675 			cmn_err(CE_WARN, "devmap:failed, retval=%d", ret);
676 			return (DDI_FAILURE);
677 		}
678 		*maplen = length;
679 		break;
680 
681 	case _DRM_SCATTER_GATHER:
682 		koff = map->offset - (unsigned long)(caddr_t)dp->sg->virtual;
683 		kva = map->dev_addr + koff;
684 		length = ptob(btopr(len));
685 		if (length > map->size) {
686 			cmn_err(CE_WARN, "offset=0x%lx, virtual=0x%p,"
687 			    "mapsize=0x%lx,len=0x%lx", map->offset,
688 			    dp->sg->virtual, map->size, len);
689 			return (DDI_FAILURE);
690 		}
691 		cookie = gfxp_umem_cookie_init(kva, length);
692 		if (cookie == NULL) {
693 			cmn_err(CE_WARN, "devmap:failed to get umem_cookie");
694 			return (DDI_FAILURE);
695 		}
696 		ret = devmap_umem_setup(dhp, dp->dip,
697 		    &drm_devmap_callbacks, cookie, 0, length, PROT_ALL,
698 		    IOMEM_DATA_UNCACHED | DEVMAP_ALLOW_REMAP, &dev_attr);
699 		if (ret != 0) {
700 			cmn_err(CE_WARN, "sun_devmap: umem_setup fail");
701 			gfxp_umem_cookie_destroy(cookie);
702 			return (DDI_FAILURE);
703 		}
704 		*maplen = length;
705 		break;
706 
707 	default:
708 		return (DDI_FAILURE);
709 	}
710 	return (DDI_SUCCESS);
711 
712 }
713 
714 /*ARGSUSED*/
715 static int
716 drm_devmap_map(devmap_cookie_t dhc, dev_t dev, uint_t flags,
717     offset_t offset, size_t len, void **new_priv)
718 {
719 	devmap_handle_t			*dhp;
720 	drm_inst_state_t		*statep;
721 	struct ddi_umem_cookie 	*cp;
722 
723 	statep = drm_sup_devt_to_state(dev);
724 	ASSERT(statep != NULL);
725 
726 	/*
727 	 * This driver only supports MAP_SHARED,
728 	 * and doesn't support MAP_PRIVATE
729 	 */
730 	if (flags & MAP_PRIVATE) {
731 		cmn_err(CE_WARN, "!DRM driver doesn't support MAP_PRIVATE");
732 		return (EINVAL);
733 	}
734 
735 	mutex_enter(&statep->dis_ctxlock);
736 	dhp = (devmap_handle_t *)dhc;
737 	cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
738 	cp->cook_refcnt = 1;
739 	mutex_exit(&statep->dis_ctxlock);
740 	*new_priv = statep;
741 
742 	return (0);
743 }
744 
745 /*ARGSUSED*/
746 static void
747 drm_devmap_unmap(devmap_cookie_t dhc, void *pvtp, offset_t off, size_t len,
748     devmap_cookie_t new_dhp1, void **new_pvtp1, devmap_cookie_t new_dhp2,
749     void **new_pvtp2)
750 {
751 	devmap_handle_t		*dhp;
752 	devmap_handle_t		*ndhp;
753 	drm_inst_state_t		*statep;
754 	struct ddi_umem_cookie	*cp;
755 	struct ddi_umem_cookie	*ncp;
756 
757 	dhp = (devmap_handle_t *)dhc;
758 	statep = (drm_inst_state_t *)pvtp;
759 
760 	mutex_enter(&statep->dis_ctxlock);
761 	cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
762 	if (new_dhp1 != NULL) {
763 		ndhp = (devmap_handle_t *)new_dhp1;
764 		ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
765 		ncp->cook_refcnt ++;
766 		*new_pvtp1 = statep;
767 		ASSERT(ncp == cp);
768 	}
769 
770 	if (new_dhp2 != NULL) {
771 		ndhp = (devmap_handle_t *)new_dhp2;
772 		ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
773 		ncp->cook_refcnt ++;
774 		*new_pvtp2 = statep;
775 		ASSERT(ncp == cp);
776 	}
777 
778 	cp->cook_refcnt --;
779 	if (cp->cook_refcnt == 0) {
780 		gfxp_umem_cookie_destroy(dhp->dh_cookie);
781 		dhp->dh_cookie = NULL;
782 	}
783 	mutex_exit(&statep->dis_ctxlock);
784 }
785 
786 
787 /*ARGSUSED*/
788 static int
789 drm_devmap_dup(devmap_cookie_t dhc, void *pvtp, devmap_cookie_t new_dhc,
790     void **new_pvtp)
791 {
792 	devmap_handle_t			*dhp;
793 	drm_inst_state_t    *statep;
794 	struct ddi_umem_cookie *cp;
795 
796 	statep = (drm_inst_state_t *)pvtp;
797 	mutex_enter(&statep->dis_ctxlock);
798 	dhp = (devmap_handle_t *)dhc;
799 	cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
800 	cp->cook_refcnt ++;
801 	mutex_exit(&statep->dis_ctxlock);
802 	*new_pvtp = statep;
803 
804 	return (0);
805 }
806 
807 int
808 drm_dev_to_instance(dev_t dev)
809 {
810 	return (DEV2INST(dev));
811 }
812 
813 /*
814  * drm_supp_alloc_drv_entry()
815  *
816  * Description:
817  *	Create a DRM entry and add it into the instance list (drm_inst_head).
818  *	Note that we don't allow a duplicated entry
819  */
820 static drm_inst_list_t *
821 drm_supp_alloc_drv_entry(dev_info_t *dip)
822 {
823 	drm_inst_list_t	**plist;
824 	drm_inst_list_t	*list;
825 	drm_inst_list_t	*entry;
826 
827 	/* protect the driver list */
828 	mutex_enter(&drm_inst_list_lock);
829 	plist = &drm_inst_head;
830 	list = *plist;
831 	while (list) {
832 		if (list->disl_state.mis_dip == dip) {
833 			mutex_exit(&drm_inst_list_lock);
834 			cmn_err(CE_WARN, "%s%d already registered",
835 			    ddi_driver_name(dip), ddi_get_instance(dip));
836 			return (NULL);
837 		}
838 		plist = &list->disl_next;
839 		list = list->disl_next;
840 	}
841 
842 	/* "dip" is not registered, create new one and add to list */
843 	entry = kmem_zalloc(sizeof (*entry), KM_SLEEP);
844 	*plist = entry;
845 	entry->disl_state.mis_dip = dip;
846 	mutex_init(&entry->disl_state.mis_lock, NULL, MUTEX_DRIVER, NULL);
847 	mutex_init(&entry->disl_state.dis_ctxlock, NULL, MUTEX_DRIVER, NULL);
848 	mutex_exit(&drm_inst_list_lock);
849 
850 	return (entry);
851 
852 }	/* drm_supp_alloc_drv_entry */
853 
854 /*
855  * drm_supp_free_drv_entry()
856  */
857 static void
858 drm_supp_free_drv_entry(dev_info_t *dip)
859 {
860 	drm_inst_list_t		*list;
861 	drm_inst_list_t		**plist;
862 	drm_inst_state_t	*mstate;
863 
864 	/* protect the driver list */
865 	mutex_enter(&drm_inst_list_lock);
866 	plist = &drm_inst_head;
867 	list = *plist;
868 	while (list) {
869 		if (list->disl_state.mis_dip == dip) {
870 			*plist = list->disl_next;
871 			mstate = &list->disl_state;
872 			mutex_destroy(&mstate->mis_lock);
873 			mutex_destroy(&mstate->dis_ctxlock);
874 			kmem_free(list, sizeof (*list));
875 			mutex_exit(&drm_inst_list_lock);
876 			return;
877 		}
878 		plist = &list->disl_next;
879 		list = list->disl_next;
880 	}
881 	mutex_exit(&drm_inst_list_lock);
882 
883 }	/* drm_supp_free_drv_entry() */
884 
885 /*
886  * drm_sup_devt_to_state()
887  *
888  * description:
889  *	Get the soft state of DRM instance by device number
890  */
891 static drm_inst_state_t *
892 drm_sup_devt_to_state(dev_t dev)
893 {
894 	drm_inst_list_t	*list;
895 	drm_inst_state_t	*mstate;
896 	major_t	major = getmajor(dev);
897 	int		instance = DEV2INST(dev);
898 
899 	mutex_enter(&drm_inst_list_lock);
900 	list = drm_inst_head;
901 	while (list) {
902 		mstate = &list->disl_state;
903 		mutex_enter(&mstate->mis_lock);
904 
905 		if ((mstate->mis_major == major) &&
906 		    (ddi_get_instance(mstate->mis_dip) == instance)) {
907 			mutex_exit(&mstate->mis_lock);
908 			mutex_exit(&drm_inst_list_lock);
909 			return (mstate);
910 		}
911 
912 		list = list->disl_next;
913 		mutex_exit(&mstate->mis_lock);
914 	}
915 
916 	mutex_exit(&drm_inst_list_lock);
917 	return (NULL);
918 
919 }	/* drm_sup_devt_to_state() */
920 
921 int
922 drm_supp_get_irq(void *handle)
923 {
924 	drm_inst_list_t *list;
925 	drm_inst_state_t    *mstate;
926 	int		irq;
927 
928 	list = (drm_inst_list_t *)handle;
929 	mstate = &list->disl_state;
930 	ASSERT(mstate != NULL);
931 	irq = pci_config_get8(mstate->mis_cfg_hdl, PCI_CONF_ILINE);
932 	return (irq);
933 }
934 
935 int
936 drm_supp_device_capability(void *handle, int capid)
937 {
938 	drm_inst_list_t *list;
939 	drm_inst_state_t    *mstate;
940 	uint8_t		cap = 0;
941 	uint16_t	caps_ptr;
942 
943 	list = (drm_inst_list_t *)handle;
944 	mstate = &list->disl_state;
945 	ASSERT(mstate != NULL);
946 
947 	/* has capabilities list ? */
948 	if ((pci_config_get16(mstate->mis_cfg_hdl, PCI_CONF_STAT) &
949 	    PCI_CONF_CAP_MASK) == 0)
950 		return (NULL);
951 
952 	caps_ptr = pci_config_get8(mstate->mis_cfg_hdl, PCI_CONF_CAP_PTR);
953 	while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
954 		cap = pci_config_get32(mstate->mis_cfg_hdl, caps_ptr);
955 		if ((cap & PCI_CONF_CAPID_MASK) == capid)
956 			return (cap);
957 		caps_ptr = pci_config_get8(mstate->mis_cfg_hdl,
958 		    caps_ptr + PCI_CAP_NEXT_PTR);
959 	}
960 
961 	return (0);
962 }
963