xref: /titanic_44/usr/src/uts/common/io/drm/drm_sunmod.c (revision 4a634bb80136cc001d14ab96addd9915105e5223)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Common misc module interfaces of DRM under Solaris
31  */
32 
33 /*
34  * This module calls into gfx and agpmaster misc modules respectively
35  * for generic graphics operations and AGP master device support.
36  */
37 
38 #include "drm_sunmod.h"
39 #include <sys/modctl.h>
40 #include <sys/kmem.h>
41 #include <vm/seg_kmem.h>
42 
43 static struct modlmisc modlmisc = {
44 	&mod_miscops, "DRM common interfaces %I%"
45 };
46 
47 static struct modlinkage modlinkage = {
48 	MODREV_1, (void *)&modlmisc, NULL
49 };
50 
51 static drm_inst_list_t	*drm_inst_head;
52 static kmutex_t	drm_inst_list_lock;
53 
54 static int drm_sun_open(dev_t *, int, int, cred_t *);
55 static int drm_sun_close(dev_t, int, int, cred_t *);
56 static int drm_sun_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
57 static int drm_sun_devmap(dev_t, devmap_cookie_t, offset_t, size_t,
58     size_t *, uint_t);
59 
60 /*
61  * devmap callbacks for AGP and PCI GART
62  */
63 static int drm_devmap_map(devmap_cookie_t, dev_t,
64     uint_t, offset_t, size_t, void **);
65 static int drm_devmap_dup(devmap_cookie_t, void *,
66     devmap_cookie_t, void **);
67 static void drm_devmap_unmap(devmap_cookie_t, void *,
68     offset_t, size_t, devmap_cookie_t, void **, devmap_cookie_t, void **);
69 
70 static drm_inst_list_t *drm_supp_alloc_drv_entry(dev_info_t *);
71 static drm_inst_state_t *drm_sup_devt_to_state(dev_t);
72 static void drm_supp_free_drv_entry(dev_info_t *);
73 
74 static struct devmap_callback_ctl drm_devmap_callbacks = {
75 		DEVMAP_OPS_REV, 		/* devmap_rev */
76 		drm_devmap_map,				/* devmap_map */
77 		NULL,			/* devmap_access */
78 		drm_devmap_dup,			/* devmap_dup */
79 		drm_devmap_unmap 		/* devmap_unmap */
80 };
81 
82 /*
83  * Common device operations structure for all DRM drivers
84  */
85 struct cb_ops drm_cb_ops = {
86 	drm_sun_open,			/* cb_open */
87 	drm_sun_close,			/* cb_close */
88 	nodev,					/* cb_strategy */
89 	nodev,					/* cb_print */
90 	nodev,					/* cb_dump */
91 	nodev,					/* cb_read */
92 	nodev,					/* cb_write */
93 	drm_sun_ioctl,		/* cb_ioctl */
94 	drm_sun_devmap,		/* cb_devmap */
95 	nodev,					/* cb_mmap */
96 	NULL,			/* cb_segmap */
97 	nochpoll,				/* cb_chpoll */
98 	ddi_prop_op,		/* cb_prop_op */
99 	0,					/* cb_stream */
100 	D_NEW | D_MTSAFE |D_DEVMAP	/* cb_flag */
101 };
102 
103 
104 int
105 _init(void)
106 {
107 	int	error;
108 
109 	if ((error = mod_install(&modlinkage)) != 0) {
110 		return (error);
111 	}
112 
113 	/* initialize the instance list lock */
114 	mutex_init(&drm_inst_list_lock, NULL, MUTEX_DRIVER, NULL);
115 	return (0);
116 }
117 
118 int
119 _fini(void)
120 {
121 	int	err;
122 
123 	if ((err = mod_remove(&modlinkage)) != 0)
124 		return (err);
125 
126 	mutex_destroy(&drm_inst_list_lock);
127 	return (0);
128 }
129 
130 int
131 _info(struct modinfo *modinfop)
132 {
133 	return (mod_info(&modlinkage, modinfop));
134 }
135 
136 void *
137 drm_supp_register(dev_info_t *dip, drm_device_t *dp)
138 {
139 	int		error;
140 	char	buf[80];
141 	int		instance = ddi_get_instance(dip);
142 	ddi_acc_handle_t	pci_cfg_handle;
143 	agp_master_softc_t	*agpm;
144 	drm_inst_state_t	*mstate;
145 	drm_inst_list_t		*entry;
146 	gfxp_vgatext_softc_ptr_t gfxp;
147 	struct dev_ops	*devop;
148 
149 	ASSERT(dip != NULL);
150 
151 	entry = drm_supp_alloc_drv_entry(dip);
152 	if (entry == NULL) {
153 		cmn_err(CE_WARN, "drm_supp_register: failed to get softstate");
154 		return (NULL);
155 	}
156 	mstate = &entry->disl_state;
157 
158 	/*
159 	 * DRM drivers are required to use common cb_ops
160 	 */
161 	devop = ddi_get_driver(dip);
162 	if (devop->devo_cb_ops != &drm_cb_ops) {
163 		devop->devo_cb_ops = &drm_cb_ops;
164 	}
165 
166 	/* Generic graphics initialization */
167 	gfxp = gfxp_vgatext_softc_alloc();
168 	error = gfxp_vgatext_attach(dip, DDI_ATTACH, gfxp);
169 	if (error != DDI_SUCCESS) {
170 		DRM_ERROR("drm_supp_regiter: failed to init gfx");
171 		goto exit1;
172 	}
173 
174 	/* create a minor node for common graphics ops */
175 	(void) sprintf(buf, "%s%d", GFX_NAME, instance);
176 	error = ddi_create_minor_node(dip, buf, S_IFCHR,
177 	    INST2NODE0(instance), DDI_NT_DISPLAY, NULL);
178 	if (error != DDI_SUCCESS) {
179 		DRM_ERROR("drm_supp_regiter: "
180 		    "failed to create minor node for gfx");
181 		goto exit2;
182 	}
183 
184 	/* setup mapping for later PCI config space access */
185 	error = pci_config_setup(dip, &pci_cfg_handle);
186 	if (error != DDI_SUCCESS) {
187 		DRM_ERROR("drm_supp_regiter: "
188 		    "PCI configuration space setup failed");
189 		goto exit2;
190 	}
191 
192 	/* AGP master attach */
193 	agpm = NULL;
194 	if (dp->driver->use_agp) {
195 		DRM_DEBUG("drm_supp_regiter: driver use AGP\n");
196 		error = agpmaster_attach(dip, &agpm,
197 		    pci_cfg_handle, INST2NODE1(instance));
198 		if ((error != DDI_SUCCESS) && (dp->driver->require_agp)) {
199 			DRM_ERROR("drm_supp_regiter: "
200 			    "AGP master support not available");
201 			goto exit3;
202 		}
203 	}
204 
205 	mutex_enter(&mstate->mis_lock);
206 	mstate->mis_major = ddi_driver_major(dip);
207 	mstate->mis_dip = dip;
208 	mstate->mis_gfxp = gfxp;
209 	mstate->mis_agpm = agpm;
210 	mstate->mis_cfg_hdl = pci_cfg_handle;
211 	mstate->mis_devp = dp;
212 	mutex_exit(&mstate->mis_lock);
213 
214 	/* create minor node for DRM access */
215 	(void) sprintf(buf, "%s%d", DRM_DEVNODE, instance);
216 	if (ddi_create_minor_node(dip, buf, S_IFCHR,
217 	    INST2NODE2(instance), DDI_NT_DISPLAY_DRM, 0)) {
218 		DRM_ERROR("supp_regiter: faled to create minor node for drm");
219 		goto exit4;
220 	}
221 
222 	return ((void *)mstate);
223 
224 exit4:
225 	if ((dp->driver->use_agp) && agpm)
226 		agpmaster_detach(&agpm);
227 exit3:
228 	pci_config_teardown(&pci_cfg_handle);
229 exit2:
230 	gfxp_vgatext_detach(dip, DDI_DETACH, gfxp);
231 exit1:
232 	gfxp_vgatext_softc_free(gfxp);
233 	drm_supp_free_drv_entry(dip);
234 	ddi_remove_minor_node(dip, NULL);
235 
236 	return (NULL);
237 }
238 
239 
240 int
241 drm_supp_unregister(void *handle)
242 {
243 	drm_inst_list_t		*list;
244 	drm_inst_state_t	*mstate;
245 
246 	list = (drm_inst_list_t *)handle;
247 	mstate = &list->disl_state;
248 	mutex_enter(&mstate->mis_lock);
249 
250 	/* AGP master detach */
251 	if (mstate->mis_agpm != NULL)
252 		agpmaster_detach(&mstate->mis_agpm);
253 
254 	/* free PCI config access handle */
255 	if (mstate->mis_cfg_hdl)
256 		pci_config_teardown(&mstate->mis_cfg_hdl);
257 
258 	/* graphics misc module detach */
259 	if (mstate->mis_gfxp) {
260 		(void) gfxp_vgatext_detach(mstate->mis_dip, DDI_DETACH,
261 		    mstate->mis_gfxp);
262 		gfxp_vgatext_softc_free(mstate->mis_gfxp);
263 	}
264 
265 	mstate->mis_devp = NULL;
266 
267 	/* remove all minor nodes */
268 	ddi_remove_minor_node(mstate->mis_dip, NULL);
269 	mutex_exit(&mstate->mis_lock);
270 	drm_supp_free_drv_entry(mstate->mis_dip);
271 
272 	return (DDI_SUCCESS);
273 }
274 
275 
276 /*ARGSUSED*/
277 static int
278 drm_sun_open(dev_t *devp, int flag, int otyp, cred_t *credp)
279 {
280 	drm_inst_state_t	*mstate;
281 	drm_cminor_t	*mp, *newp;
282 	drm_device_t	*dp;
283 	minor_t		minor;
284 	int		newminor;
285 	int		instance;
286 	int		err;
287 
288 	mstate = drm_sup_devt_to_state(*devp);
289 	/*
290 	 * return ENXIO for deferred attach so that system can
291 	 * attach us again.
292 	 */
293 	if (mstate == NULL)
294 		return (ENXIO);
295 
296 	/*
297 	 * The lest significant 15 bits are used for minor_number, and
298 	 * the mid 3 bits are used for instance number. All minor numbers
299 	 * are used as follows:
300 	 * 0 -- gfx
301 	 * 1 -- agpmaster
302 	 * 2 -- drm
303 	 * (3, MAX_CLONE_MINOR) -- drm minor node for clone open.
304 	 */
305 	minor = DEV2MINOR(*devp);
306 	instance = DEV2INST(*devp);
307 	ASSERT(minor <= MAX_CLONE_MINOR);
308 
309 	/*
310 	 * No operations for VGA & AGP mater devices, always return OK.
311 	 */
312 	if ((minor == GFX_MINOR) || (minor == AGPMASTER_MINOR))
313 		return (0);
314 
315 	/*
316 	 * From here, we start to process drm
317 	 */
318 
319 	dp = mstate->mis_devp;
320 	if (!dp)
321 		return (ENXIO);
322 
323 	/*
324 	 * Drm driver implements a software lock to serialize access
325 	 * to graphics hardware based on per-process granulation. Before
326 	 * operating graphics hardware, all clients, including kernel
327 	 * and applications, must acquire this lock via DRM_IOCTL_LOCK
328 	 * ioctl, and release it via DRM_IOCTL_UNLOCK after finishing
329 	 * operations. Drm driver will grant r/w permission to the
330 	 * process which acquires this lock (Kernel is assumed to have
331 	 * process ID 0).
332 	 *
333 	 * A process might be terminated without releasing drm lock, in
334 	 * this case, drm driver is responsible for clearing the holding.
335 	 * To be informed of process exiting, drm driver uses clone open
336 	 * to guarantee that each call to open(9e) have one corresponding
337 	 * call to close(9e). In most cases, a process will close drm
338 	 * during process termination, so that drm driver could have a
339 	 * chance to release drm lock.
340 	 *
341 	 * In fact, a driver cannot know exactly when a process exits.
342 	 * Clone open doesn't address this issue completely: Because of
343 	 * inheritance, child processes inherit file descriptors from
344 	 * their parent. As a result, if the parent exits before its
345 	 * children, drm close(9e) entrypoint won't be called until all
346 	 * of its children terminate.
347 	 *
348 	 * Another issue brought up by inhertance is the process PID
349 	 * that calls the drm close() entry point may not be the same
350 	 * as the one who called open(). Per-process struct is allocated
351 	 * when a process first open() drm, and released when the process
352 	 * last close() drm. Since open()/close() may be not the same
353 	 * process, PID cannot be used for key to lookup per-process
354 	 * struct. So, we associate minor number with per-process struct
355 	 * during open()'ing, and find corresponding process struct
356 	 * via minor number when close() is called.
357 	 */
358 	newp = kmem_zalloc(sizeof (drm_cminor_t), KM_SLEEP);
359 	mutex_enter(&dp->dev_lock);
360 	for (newminor = DRM_MIN_CLONEMINOR; newminor < MAX_CLONE_MINOR;
361 	    newminor ++) {
362 		TAILQ_FOREACH(mp, &dp->minordevs, link) {
363 			if (mp->minor == newminor)
364 				break;
365 		}
366 		if (mp == NULL)
367 			goto gotminor;
368 	}
369 
370 	mutex_exit(&dp->dev_lock);
371 	return (EMFILE);
372 
373 gotminor:
374 	TAILQ_INSERT_TAIL(&dp->minordevs, newp, link);
375 	newp->minor = newminor;
376 	mutex_exit(&dp->dev_lock);
377 	err = drm_open(dp, newp, flag, otyp, credp);
378 	if (err) {
379 		mutex_enter(&dp->dev_lock);
380 		TAILQ_REMOVE(&dp->minordevs, newp, link);
381 		(void) kmem_free(mp, sizeof (drm_cminor_t));
382 		mutex_exit(&dp->dev_lock);
383 
384 		return (err);
385 	}
386 
387 	/* return a clone minor */
388 	newminor = newminor | (instance << NBITSMNODE);
389 	*devp = makedevice(getmajor(*devp), newminor);
390 	return (err);
391 }
392 
393 /*ARGSUSED*/
394 static int
395 drm_sun_close(dev_t dev, int flag, int otyp, cred_t *credp)
396 {
397 	drm_inst_state_t	*mstate;
398 	drm_device_t		*dp;
399 	minor_t		minor;
400 	int		ret;
401 
402 	mstate = drm_sup_devt_to_state(dev);
403 	if (mstate == NULL)
404 		return (EBADF);
405 
406 	minor = DEV2MINOR(dev);
407 	ASSERT(minor <= MAX_CLONE_MINOR);
408 	if ((minor == GFX_MINOR) || (minor == AGPMASTER_MINOR))
409 		return (0);
410 
411 	dp = mstate->mis_devp;
412 	if (dp == NULL) {
413 		DRM_ERROR("drm_sun_close: NULL soft state");
414 		return (ENXIO);
415 	}
416 
417 	ret = drm_close(dp, minor, flag, otyp, credp);
418 
419 	return (ret);
420 }
421 
422 /*ARGSUSED*/
423 static int
424 drm_sun_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
425     cred_t *credp, int *rvalp)
426 {
427 	extern drm_ioctl_desc_t drm_ioctls[];
428 
429 	drm_inst_state_t	*mstate;
430 	drm_device_t		*dp;
431 	drm_ioctl_desc_t	*ioctl;
432 	drm_ioctl_t		*func;
433 	drm_file_t		*fpriv;
434 	minor_t		minor;
435 	int		retval;
436 	int		nr;
437 
438 	if (cmd == VIS_GETIDENTIFIER) {
439 		if (ddi_copyout(&text_ident, (void *)arg,
440 		    sizeof (struct vis_identifier), mode))
441 			return (EFAULT);
442 	}
443 
444 	mstate = drm_sup_devt_to_state(dev);
445 	if (mstate == NULL) {
446 		return (EIO);
447 	}
448 
449 	minor = DEV2MINOR(dev);
450 	ASSERT(minor <= MAX_CLONE_MINOR);
451 	switch (minor) {
452 	case GFX_MINOR:
453 		retval = gfxp_vgatext_ioctl(dev, cmd, arg,
454 		    mode, credp, rvalp, mstate->mis_gfxp);
455 		return (retval);
456 
457 	case AGPMASTER_MINOR:
458 		retval = agpmaster_ioctl(dev, cmd, arg, mode,
459 		    credp, rvalp, mstate->mis_agpm);
460 		return (retval);
461 
462 	case DRM_MINOR:
463 	default:	/* DRM cloning minor nodes */
464 		break;
465 	}
466 
467 	dp = mstate->mis_devp;
468 	ASSERT(dp != NULL);
469 
470 	nr = DRM_IOCTL_NR(cmd);
471 	ioctl = &drm_ioctls[nr];
472 	atomic_inc_32(&dp->counts[_DRM_STAT_IOCTLS]);
473 
474 	/* It's not a core DRM ioctl, try driver-specific. */
475 	if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) {
476 		/* The array entries begin at DRM_COMMAND_BASE ioctl nr */
477 		nr -= DRM_COMMAND_BASE;
478 		if (nr > dp->driver->max_driver_ioctl) {
479 			DRM_ERROR("Bad driver ioctl number, 0x%x (of 0x%x)",
480 			    nr, dp->driver->max_driver_ioctl);
481 			return (EINVAL);
482 		}
483 		ioctl = &dp->driver->driver_ioctls[nr];
484 	}
485 
486 	func = ioctl->func;
487 	if (func == NULL) {
488 		return (ENOTSUP);
489 	}
490 
491 	mutex_enter(&dp->dev_lock);
492 	fpriv = drm_find_file_by_proc(dp, credp);
493 	mutex_exit(&dp->dev_lock);
494 	if (fpriv == NULL) {
495 		DRM_ERROR("drm_sun_ioctl : can't find authenticator");
496 		return (EACCES);
497 	}
498 
499 	if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(credp)) ||
500 	    ((ioctl->flags & DRM_AUTH) && !fpriv->authenticated) ||
501 	    ((ioctl->flags & DRM_MASTER) && !fpriv->master))
502 		return (EACCES);
503 
504 	retval = func(dp, arg, fpriv, mode);
505 
506 	return (retval);
507 }
508 
509 /*ARGSUSED*/
510 static int
511 drm_sun_devmap(dev_t dev, devmap_cookie_t dhp, offset_t offset,
512     size_t len, size_t *maplen, uint_t model)
513 {
514 	extern int drm_get_pci_index_reg(dev_info_t *, uint_t, uint_t, off_t *);
515 
516 	drm_inst_state_t	*mstate;
517 	drm_device_t		*dp;
518 	ddi_umem_cookie_t	cookie;
519 	drm_local_map_t		*map;
520 	unsigned long	aperbase;
521 	u_offset_t		handle;
522 	offset_t		koff;
523 	caddr_t			kva;
524 	minor_t			minor;
525 	size_t			length;
526 	int			ret;
527 
528 	static ddi_device_acc_attr_t dev_attr = {
529 		DDI_DEVICE_ATTR_V0,
530 		DDI_NEVERSWAP_ACC,
531 		DDI_STRICTORDER_ACC,
532 	};
533 
534 	mstate = drm_sup_devt_to_state(dev);
535 	if (mstate == NULL)
536 		return (ENXIO);
537 
538 	minor = DEV2MINOR(dev);
539 	switch (minor) {
540 	case GFX_MINOR:
541 		ret = gfxp_vgatext_devmap(dev, dhp, offset, len, maplen, model,
542 		    mstate->mis_gfxp);
543 		return (ret);
544 
545 	case AGPMASTER_MINOR:
546 		return (ENOTSUP);
547 
548 	case DRM_MINOR:
549 		break;
550 
551 	default:
552 		/* DRM cloning nodes */
553 		if (minor > MAX_CLONE_MINOR)
554 			return (EBADF);
555 		break;
556 	}
557 
558 
559 	dp = mstate->mis_devp;
560 	if (dp == NULL) {
561 		DRM_ERROR("drm_sun_devmap: NULL soft state");
562 		return (EINVAL);
563 	}
564 
565 	/*
566 	 * We will solve 32-bit application on 64-bit kernel
567 	 * issue later, now, we just use low 32-bit
568 	 */
569 	handle = (u_offset_t)offset;
570 	handle &= 0xffffffff;
571 	mutex_enter(&dp->dev_lock);
572 	TAILQ_FOREACH(map, &dp->maplist, link) {
573 		if (handle ==
574 		    ((u_offset_t)((uintptr_t)map->handle) & 0xffffffff))
575 			break;
576 	}
577 
578 	/*
579 	 * Temporarily, because offset is phys_addr for register
580 	 * and framebuffer, is kernel virtual_addr for others
581 	 * Maybe we will use hash table to solve this issue later.
582 	 */
583 	if (map == NULL) {
584 		TAILQ_FOREACH(map, &dp->maplist, link) {
585 			if (handle == (map->offset & 0xffffffff))
586 				break;
587 		}
588 	}
589 
590 	if (map == NULL) {
591 		u_offset_t	tmp;
592 
593 		mutex_exit(&dp->dev_lock);
594 		cmn_err(CE_WARN, "Can't find map, offset=0x%llx, len=%x\n",
595 		    offset, (int)len);
596 		cmn_err(CE_WARN, "Current mapping:\n");
597 		TAILQ_FOREACH(map, &dp->maplist, link) {
598 		tmp = (u_offset_t)((uintptr_t)map->handle) & 0xffffffff;
599 		cmn_err(CE_WARN, "map(handle=0x%p, size=0x%lx,type=%d,"
600 		    "offset=0x%lx), handle=%llx, tmp=%lld", map->handle,
601 		    map->size, map->type, map->offset, handle, tmp);
602 		}
603 		return (-1);
604 	}
605 	if (map->flags & _DRM_RESTRICTED) {
606 		mutex_exit(&dp->dev_lock);
607 		cmn_err(CE_WARN, "restricted map\n");
608 		return (-1);
609 	}
610 
611 	mutex_exit(&dp->dev_lock);
612 	switch (map->type) {
613 	case _DRM_FRAME_BUFFER:
614 	case _DRM_REGISTERS:
615 		{
616 			int	regno;
617 			off_t	regoff;
618 
619 			regno = drm_get_pci_index_reg(dp->dip,
620 			    map->offset, (uint_t)len, &regoff);
621 			if (regno < 0) {
622 				DRM_ERROR("devmap: failed to get register"
623 				    " offset=0x%llx, len=0x%x", handle, len);
624 				return (EINVAL);
625 			}
626 
627 			ret = devmap_devmem_setup(dhp, dp->dip, NULL,
628 			    regno, (offset_t)regoff, len, PROT_ALL,
629 			    0, &dev_attr);
630 			if (ret != 0) {
631 				*maplen = 0;
632 				DRM_ERROR("devmap: failed, regno=%d,type=%d,"
633 				    " handle=0x%x, offset=0x%llx, len=0x%x",
634 				    regno, map->type, handle, offset, len);
635 				return (ret);
636 			}
637 			*maplen = len;
638 			return (ret);
639 		}
640 
641 	case _DRM_SHM:
642 		if (map->drm_umem_cookie == NULL)
643 			return (EINVAL);
644 		length = ptob(btopr(map->size));
645 		ret = devmap_umem_setup(dhp, dp->dip, NULL,
646 		    map->drm_umem_cookie, 0, length,
647 		    PROT_ALL, IOMEM_DATA_CACHED, NULL);
648 		if (ret != 0) {
649 			*maplen = 0;
650 			return (ret);
651 		}
652 		*maplen = length;
653 
654 		return (DDI_SUCCESS);
655 
656 	case _DRM_AGP:
657 		if (dp->agp == NULL) {
658 			cmn_err(CE_WARN, "drm_sun_devmap: attempted to mmap AGP"
659 			    "memory before AGP support is enabled");
660 			return (DDI_FAILURE);
661 		}
662 
663 		aperbase = dp->agp->base;
664 		koff = map->offset - aperbase;
665 		length = ptob(btopr(len));
666 		kva = map->dev_addr;
667 		cookie = gfxp_umem_cookie_init(kva, length);
668 		if (cookie == NULL) {
669 			cmn_err(CE_WARN, "devmap:failed to get umem_cookie");
670 			return (DDI_FAILURE);
671 		}
672 
673 		if ((ret = devmap_umem_setup(dhp, dp->dip,
674 		    &drm_devmap_callbacks, cookie, 0, length, PROT_ALL,
675 		    IOMEM_DATA_UNCACHED | DEVMAP_ALLOW_REMAP, &dev_attr)) < 0) {
676 			gfxp_umem_cookie_destroy(cookie);
677 			cmn_err(CE_WARN, "devmap:failed, retval=%d", ret);
678 			return (DDI_FAILURE);
679 		}
680 		*maplen = length;
681 		break;
682 
683 	case _DRM_SCATTER_GATHER:
684 		koff = map->offset - (unsigned long)(caddr_t)dp->sg->virtual;
685 		kva = map->dev_addr + koff;
686 		length = ptob(btopr(len));
687 		if (length > map->size) {
688 			cmn_err(CE_WARN, "offset=0x%lx, virtual=0x%p,"
689 			    "mapsize=0x%lx,len=0x%lx", map->offset,
690 			    dp->sg->virtual, map->size, len);
691 			return (DDI_FAILURE);
692 		}
693 		cookie = gfxp_umem_cookie_init(kva, length);
694 		if (cookie == NULL) {
695 			cmn_err(CE_WARN, "devmap:failed to get umem_cookie");
696 			return (DDI_FAILURE);
697 		}
698 		ret = devmap_umem_setup(dhp, dp->dip,
699 		    &drm_devmap_callbacks, cookie, 0, length, PROT_ALL,
700 		    IOMEM_DATA_UNCACHED | DEVMAP_ALLOW_REMAP, &dev_attr);
701 		if (ret != 0) {
702 			cmn_err(CE_WARN, "sun_devmap: umem_setup fail");
703 			gfxp_umem_cookie_destroy(cookie);
704 			return (DDI_FAILURE);
705 		}
706 		*maplen = length;
707 		break;
708 
709 	default:
710 		return (DDI_FAILURE);
711 	}
712 	return (DDI_SUCCESS);
713 
714 }
715 
716 /*ARGSUSED*/
717 static int
718 drm_devmap_map(devmap_cookie_t dhc, dev_t dev, uint_t flags,
719     offset_t offset, size_t len, void **new_priv)
720 {
721 	devmap_handle_t			*dhp;
722 	drm_inst_state_t		*statep;
723 	struct ddi_umem_cookie 	*cp;
724 
725 	statep = drm_sup_devt_to_state(dev);
726 	ASSERT(statep != NULL);
727 
728 	/*
729 	 * This driver only supports MAP_SHARED,
730 	 * and doesn't support MAP_PRIVATE
731 	 */
732 	if (flags & MAP_PRIVATE) {
733 		cmn_err(CE_WARN, "!DRM driver doesn't support MAP_PRIVATE");
734 		return (EINVAL);
735 	}
736 
737 	mutex_enter(&statep->dis_ctxlock);
738 	dhp = (devmap_handle_t *)dhc;
739 	cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
740 	cp->cook_refcnt = 1;
741 	mutex_exit(&statep->dis_ctxlock);
742 	*new_priv = statep;
743 
744 	return (0);
745 }
746 
747 /*ARGSUSED*/
748 static void
749 drm_devmap_unmap(devmap_cookie_t dhc, void *pvtp, offset_t off, size_t len,
750     devmap_cookie_t new_dhp1, void **new_pvtp1, devmap_cookie_t new_dhp2,
751     void **new_pvtp2)
752 {
753 	devmap_handle_t		*dhp;
754 	devmap_handle_t		*ndhp;
755 	drm_inst_state_t		*statep;
756 	struct ddi_umem_cookie	*cp;
757 	struct ddi_umem_cookie	*ncp;
758 
759 	dhp = (devmap_handle_t *)dhc;
760 	statep = (drm_inst_state_t *)pvtp;
761 
762 	mutex_enter(&statep->dis_ctxlock);
763 	cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
764 	if (new_dhp1 != NULL) {
765 		ndhp = (devmap_handle_t *)new_dhp1;
766 		ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
767 		ncp->cook_refcnt ++;
768 		*new_pvtp1 = statep;
769 		ASSERT(ncp == cp);
770 	}
771 
772 	if (new_dhp2 != NULL) {
773 		ndhp = (devmap_handle_t *)new_dhp2;
774 		ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
775 		ncp->cook_refcnt ++;
776 		*new_pvtp2 = statep;
777 		ASSERT(ncp == cp);
778 	}
779 
780 	cp->cook_refcnt --;
781 	if (cp->cook_refcnt == 0) {
782 		gfxp_umem_cookie_destroy(dhp->dh_cookie);
783 		dhp->dh_cookie = NULL;
784 	}
785 	mutex_exit(&statep->dis_ctxlock);
786 }
787 
788 
789 /*ARGSUSED*/
790 static int
791 drm_devmap_dup(devmap_cookie_t dhc, void *pvtp, devmap_cookie_t new_dhc,
792     void **new_pvtp)
793 {
794 	devmap_handle_t			*dhp;
795 	drm_inst_state_t    *statep;
796 	struct ddi_umem_cookie *cp;
797 
798 	statep = (drm_inst_state_t *)pvtp;
799 	mutex_enter(&statep->dis_ctxlock);
800 	dhp = (devmap_handle_t *)dhc;
801 	cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
802 	cp->cook_refcnt ++;
803 	mutex_exit(&statep->dis_ctxlock);
804 	*new_pvtp = statep;
805 
806 	return (0);
807 }
808 
809 int
810 drm_dev_to_instance(dev_t dev)
811 {
812 	return (DEV2INST(dev));
813 }
814 
815 /*
816  * drm_supp_alloc_drv_entry()
817  *
818  * Description:
819  *	Create a DRM entry and add it into the instance list (drm_inst_head).
820  *	Note that we don't allow a duplicated entry
821  */
822 static drm_inst_list_t *
823 drm_supp_alloc_drv_entry(dev_info_t *dip)
824 {
825 	drm_inst_list_t	**plist;
826 	drm_inst_list_t	*list;
827 	drm_inst_list_t	*entry;
828 
829 	/* protect the driver list */
830 	mutex_enter(&drm_inst_list_lock);
831 	plist = &drm_inst_head;
832 	list = *plist;
833 	while (list) {
834 		if (list->disl_state.mis_dip == dip) {
835 			mutex_exit(&drm_inst_list_lock);
836 			cmn_err(CE_WARN, "%s%d already registered",
837 			    ddi_driver_name(dip), ddi_get_instance(dip));
838 			return (NULL);
839 		}
840 		plist = &list->disl_next;
841 		list = list->disl_next;
842 	}
843 
844 	/* "dip" is not registered, create new one and add to list */
845 	entry = kmem_zalloc(sizeof (*entry), KM_SLEEP);
846 	*plist = entry;
847 	entry->disl_state.mis_dip = dip;
848 	mutex_init(&entry->disl_state.mis_lock, NULL, MUTEX_DRIVER, NULL);
849 	mutex_init(&entry->disl_state.dis_ctxlock, NULL, MUTEX_DRIVER, NULL);
850 	mutex_exit(&drm_inst_list_lock);
851 
852 	return (entry);
853 
854 }	/* drm_supp_alloc_drv_entry */
855 
856 /*
857  * drm_supp_free_drv_entry()
858  */
859 static void
860 drm_supp_free_drv_entry(dev_info_t *dip)
861 {
862 	drm_inst_list_t		*list;
863 	drm_inst_list_t		**plist;
864 	drm_inst_state_t	*mstate;
865 
866 	/* protect the driver list */
867 	mutex_enter(&drm_inst_list_lock);
868 	plist = &drm_inst_head;
869 	list = *plist;
870 	while (list) {
871 		if (list->disl_state.mis_dip == dip) {
872 			*plist = list->disl_next;
873 			mstate = &list->disl_state;
874 			mutex_destroy(&mstate->mis_lock);
875 			mutex_destroy(&mstate->dis_ctxlock);
876 			kmem_free(list, sizeof (*list));
877 			mutex_exit(&drm_inst_list_lock);
878 			return;
879 		}
880 		plist = &list->disl_next;
881 		list = list->disl_next;
882 	}
883 	mutex_exit(&drm_inst_list_lock);
884 
885 }	/* drm_supp_free_drv_entry() */
886 
887 /*
888  * drm_sup_devt_to_state()
889  *
890  * description:
891  *	Get the soft state of DRM instance by device number
892  */
893 static drm_inst_state_t *
894 drm_sup_devt_to_state(dev_t dev)
895 {
896 	drm_inst_list_t	*list;
897 	drm_inst_state_t	*mstate;
898 	major_t	major = getmajor(dev);
899 	int		instance = DEV2INST(dev);
900 
901 	mutex_enter(&drm_inst_list_lock);
902 	list = drm_inst_head;
903 	while (list) {
904 		mstate = &list->disl_state;
905 		mutex_enter(&mstate->mis_lock);
906 
907 		if ((mstate->mis_major == major) &&
908 		    (ddi_get_instance(mstate->mis_dip) == instance)) {
909 			mutex_exit(&mstate->mis_lock);
910 			mutex_exit(&drm_inst_list_lock);
911 			return (mstate);
912 		}
913 
914 		list = list->disl_next;
915 		mutex_exit(&mstate->mis_lock);
916 	}
917 
918 	mutex_exit(&drm_inst_list_lock);
919 	return (NULL);
920 
921 }	/* drm_sup_devt_to_state() */
922 
923 int
924 drm_supp_get_irq(void *handle)
925 {
926 	drm_inst_list_t *list;
927 	drm_inst_state_t    *mstate;
928 	int		irq;
929 
930 	list = (drm_inst_list_t *)handle;
931 	mstate = &list->disl_state;
932 	ASSERT(mstate != NULL);
933 	irq = pci_config_get8(mstate->mis_cfg_hdl, PCI_CONF_ILINE);
934 	return (irq);
935 }
936 
937 int
938 drm_supp_device_capability(void *handle, int capid)
939 {
940 	drm_inst_list_t *list;
941 	drm_inst_state_t    *mstate;
942 	uint8_t		cap = 0;
943 	uint16_t	caps_ptr;
944 
945 	list = (drm_inst_list_t *)handle;
946 	mstate = &list->disl_state;
947 	ASSERT(mstate != NULL);
948 
949 	/* has capabilities list ? */
950 	if ((pci_config_get16(mstate->mis_cfg_hdl, PCI_CONF_STAT) &
951 	    PCI_CONF_CAP_MASK) == 0)
952 		return (NULL);
953 
954 	caps_ptr = pci_config_get8(mstate->mis_cfg_hdl, PCI_CONF_CAP_PTR);
955 	while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
956 		cap = pci_config_get32(mstate->mis_cfg_hdl, caps_ptr);
957 		if ((cap & PCI_CONF_CAPID_MASK) == capid)
958 			return (cap);
959 		caps_ptr = pci_config_get8(mstate->mis_cfg_hdl,
960 		    caps_ptr + PCI_CAP_NEXT_PTR);
961 	}
962 
963 	return (0);
964 }
965