1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * Common misc module interfaces of DRM under Solaris
29 */
30
31 /*
32 * This module calls into gfx and agpmaster misc modules respectively
33 * for generic graphics operations and AGP master device support.
34 */
35
36 #include "drm_sunmod.h"
37 #include <sys/modctl.h>
38 #include <sys/kmem.h>
39 #include <vm/seg_kmem.h>
40
41 static struct modlmisc modlmisc = {
42 &mod_miscops, "DRM common interfaces"
43 };
44
45 static struct modlinkage modlinkage = {
46 MODREV_1, (void *)&modlmisc, NULL
47 };
48
49 static drm_inst_list_t *drm_inst_head;
50 static kmutex_t drm_inst_list_lock;
51
52 static int drm_sun_open(dev_t *, int, int, cred_t *);
53 static int drm_sun_close(dev_t, int, int, cred_t *);
54 static int drm_sun_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
55 static int drm_sun_devmap(dev_t, devmap_cookie_t, offset_t, size_t,
56 size_t *, uint_t);
57
58 /*
59 * devmap callbacks for AGP and PCI GART
60 */
61 static int drm_devmap_map(devmap_cookie_t, dev_t,
62 uint_t, offset_t, size_t, void **);
63 static int drm_devmap_dup(devmap_cookie_t, void *,
64 devmap_cookie_t, void **);
65 static void drm_devmap_unmap(devmap_cookie_t, void *,
66 offset_t, size_t, devmap_cookie_t, void **, devmap_cookie_t, void **);
67
68 static drm_inst_list_t *drm_supp_alloc_drv_entry(dev_info_t *);
69 static drm_inst_state_t *drm_sup_devt_to_state(dev_t);
70 static void drm_supp_free_drv_entry(dev_info_t *);
71
72 static struct devmap_callback_ctl drm_devmap_callbacks = {
73 DEVMAP_OPS_REV, /* devmap_rev */
74 drm_devmap_map, /* devmap_map */
75 NULL, /* devmap_access */
76 drm_devmap_dup, /* devmap_dup */
77 drm_devmap_unmap /* devmap_unmap */
78 };
79
80 /*
81 * Common device operations structure for all DRM drivers
82 */
83 struct cb_ops drm_cb_ops = {
84 drm_sun_open, /* cb_open */
85 drm_sun_close, /* cb_close */
86 nodev, /* cb_strategy */
87 nodev, /* cb_print */
88 nodev, /* cb_dump */
89 nodev, /* cb_read */
90 nodev, /* cb_write */
91 drm_sun_ioctl, /* cb_ioctl */
92 drm_sun_devmap, /* cb_devmap */
93 nodev, /* cb_mmap */
94 NULL, /* cb_segmap */
95 nochpoll, /* cb_chpoll */
96 ddi_prop_op, /* cb_prop_op */
97 0, /* cb_stream */
98 D_NEW | D_MTSAFE |D_DEVMAP /* cb_flag */
99 };
100
101 int
_init(void)102 _init(void)
103 {
104 int error;
105
106 if ((error = mod_install(&modlinkage)) != 0) {
107 return (error);
108 }
109
110 /* initialize the instance list lock */
111 mutex_init(&drm_inst_list_lock, NULL, MUTEX_DRIVER, NULL);
112 return (0);
113 }
114
115 int
_fini(void)116 _fini(void)
117 {
118 int err;
119
120 if ((err = mod_remove(&modlinkage)) != 0)
121 return (err);
122
123 mutex_destroy(&drm_inst_list_lock);
124 return (0);
125 }
126
127 int
_info(struct modinfo * modinfop)128 _info(struct modinfo *modinfop)
129 {
130 return (mod_info(&modlinkage, modinfop));
131 }
132
133 void *
drm_supp_register(dev_info_t * dip,drm_device_t * dp)134 drm_supp_register(dev_info_t *dip, drm_device_t *dp)
135 {
136 int error;
137 char buf[80];
138 int instance = ddi_get_instance(dip);
139 ddi_acc_handle_t pci_cfg_handle;
140 agp_master_softc_t *agpm;
141 drm_inst_state_t *mstate;
142 drm_inst_list_t *entry;
143 gfxp_vgatext_softc_ptr_t gfxp;
144 struct dev_ops *devop;
145
146 ASSERT(dip != NULL);
147
148 entry = drm_supp_alloc_drv_entry(dip);
149 if (entry == NULL) {
150 cmn_err(CE_WARN, "drm_supp_register: failed to get softstate");
151 return (NULL);
152 }
153 mstate = &entry->disl_state;
154
155 /*
156 * DRM drivers are required to use common cb_ops
157 */
158 devop = ddi_get_driver(dip);
159 if (devop->devo_cb_ops != &drm_cb_ops) {
160 devop->devo_cb_ops = &drm_cb_ops;
161 }
162
163 /* Generic graphics initialization */
164 gfxp = gfxp_vgatext_softc_alloc();
165 error = gfxp_vgatext_attach(dip, DDI_ATTACH, gfxp);
166 if (error != DDI_SUCCESS) {
167 DRM_ERROR("drm_supp_regiter: failed to init gfx");
168 goto exit1;
169 }
170
171 /* create a minor node for common graphics ops */
172 (void) sprintf(buf, "%s%d", GFX_NAME, instance);
173 error = ddi_create_minor_node(dip, buf, S_IFCHR,
174 INST2NODE0(instance), DDI_NT_DISPLAY, NULL);
175 if (error != DDI_SUCCESS) {
176 DRM_ERROR("drm_supp_regiter: "
177 "failed to create minor node for gfx");
178 goto exit2;
179 }
180
181 /* setup mapping for later PCI config space access */
182 error = pci_config_setup(dip, &pci_cfg_handle);
183 if (error != DDI_SUCCESS) {
184 DRM_ERROR("drm_supp_regiter: "
185 "PCI configuration space setup failed");
186 goto exit2;
187 }
188
189 /* AGP master attach */
190 agpm = NULL;
191 if (dp->driver->use_agp) {
192 DRM_DEBUG("drm_supp_regiter: driver use AGP\n");
193 error = agpmaster_attach(dip, &agpm,
194 pci_cfg_handle, INST2NODE1(instance));
195 if ((error != DDI_SUCCESS) && (dp->driver->require_agp)) {
196 DRM_ERROR("drm_supp_regiter: "
197 "AGP master support not available");
198 goto exit3;
199 }
200 }
201
202 mutex_enter(&mstate->mis_lock);
203 mstate->mis_major = ddi_driver_major(dip);
204 mstate->mis_dip = dip;
205 mstate->mis_gfxp = gfxp;
206 mstate->mis_agpm = agpm;
207 mstate->mis_cfg_hdl = pci_cfg_handle;
208 mstate->mis_devp = dp;
209 mutex_exit(&mstate->mis_lock);
210
211 /* create minor node for DRM access */
212 (void) sprintf(buf, "%s%d", DRM_DEVNODE, instance);
213 if (ddi_create_minor_node(dip, buf, S_IFCHR,
214 INST2NODE2(instance), DDI_NT_DISPLAY_DRM, 0)) {
215 DRM_ERROR("supp_regiter: faled to create minor node for drm");
216 goto exit4;
217 }
218
219 return ((void *)mstate);
220
221 exit4:
222 if ((dp->driver->use_agp) && agpm)
223 agpmaster_detach(&agpm);
224 exit3:
225 pci_config_teardown(&pci_cfg_handle);
226 exit2:
227 (void) gfxp_vgatext_detach(dip, DDI_DETACH, gfxp);
228 exit1:
229 gfxp_vgatext_softc_free(gfxp);
230 drm_supp_free_drv_entry(dip);
231 ddi_remove_minor_node(dip, NULL);
232
233 return (NULL);
234 }
235
236
237 int
drm_supp_unregister(void * handle)238 drm_supp_unregister(void *handle)
239 {
240 drm_inst_list_t *list;
241 drm_inst_state_t *mstate;
242
243 list = (drm_inst_list_t *)handle;
244 mstate = &list->disl_state;
245 mutex_enter(&mstate->mis_lock);
246
247 /* AGP master detach */
248 if (mstate->mis_agpm != NULL)
249 agpmaster_detach(&mstate->mis_agpm);
250
251 /* free PCI config access handle */
252 if (mstate->mis_cfg_hdl)
253 pci_config_teardown(&mstate->mis_cfg_hdl);
254
255 /* graphics misc module detach */
256 if (mstate->mis_gfxp) {
257 (void) gfxp_vgatext_detach(mstate->mis_dip, DDI_DETACH,
258 mstate->mis_gfxp);
259 gfxp_vgatext_softc_free(mstate->mis_gfxp);
260 }
261
262 mstate->mis_devp = NULL;
263
264 /* remove all minor nodes */
265 ddi_remove_minor_node(mstate->mis_dip, NULL);
266 mutex_exit(&mstate->mis_lock);
267 drm_supp_free_drv_entry(mstate->mis_dip);
268
269 return (DDI_SUCCESS);
270 }
271
272
273 /*ARGSUSED*/
274 static int
drm_sun_open(dev_t * devp,int flag,int otyp,cred_t * credp)275 drm_sun_open(dev_t *devp, int flag, int otyp, cred_t *credp)
276 {
277 drm_inst_state_t *mstate;
278 drm_cminor_t *mp, *newp;
279 drm_device_t *dp;
280 minor_t minor;
281 int newminor;
282 int instance;
283 int err;
284
285 mstate = drm_sup_devt_to_state(*devp);
286 /*
287 * return ENXIO for deferred attach so that system can
288 * attach us again.
289 */
290 if (mstate == NULL)
291 return (ENXIO);
292
293 /*
294 * The lest significant 15 bits are used for minor_number, and
295 * the mid 3 bits are used for instance number. All minor numbers
296 * are used as follows:
297 * 0 -- gfx
298 * 1 -- agpmaster
299 * 2 -- drm
300 * (3, MAX_CLONE_MINOR) -- drm minor node for clone open.
301 */
302 minor = DEV2MINOR(*devp);
303 instance = DEV2INST(*devp);
304 ASSERT(minor <= MAX_CLONE_MINOR);
305
306 /*
307 * No operations for VGA & AGP mater devices, always return OK.
308 */
309 if ((minor == GFX_MINOR) || (minor == AGPMASTER_MINOR))
310 return (0);
311
312 /*
313 * From here, we start to process drm
314 */
315
316 dp = mstate->mis_devp;
317 if (!dp)
318 return (ENXIO);
319
320 /*
321 * Drm driver implements a software lock to serialize access
322 * to graphics hardware based on per-process granulation. Before
323 * operating graphics hardware, all clients, including kernel
324 * and applications, must acquire this lock via DRM_IOCTL_LOCK
325 * ioctl, and release it via DRM_IOCTL_UNLOCK after finishing
326 * operations. Drm driver will grant r/w permission to the
327 * process which acquires this lock (Kernel is assumed to have
328 * process ID 0).
329 *
330 * A process might be terminated without releasing drm lock, in
331 * this case, drm driver is responsible for clearing the holding.
332 * To be informed of process exiting, drm driver uses clone open
333 * to guarantee that each call to open(9e) have one corresponding
334 * call to close(9e). In most cases, a process will close drm
335 * during process termination, so that drm driver could have a
336 * chance to release drm lock.
337 *
338 * In fact, a driver cannot know exactly when a process exits.
339 * Clone open doesn't address this issue completely: Because of
340 * inheritance, child processes inherit file descriptors from
341 * their parent. As a result, if the parent exits before its
342 * children, drm close(9e) entrypoint won't be called until all
343 * of its children terminate.
344 *
345 * Another issue brought up by inhertance is the process PID
346 * that calls the drm close() entry point may not be the same
347 * as the one who called open(). Per-process struct is allocated
348 * when a process first open() drm, and released when the process
349 * last close() drm. Since open()/close() may be not the same
350 * process, PID cannot be used for key to lookup per-process
351 * struct. So, we associate minor number with per-process struct
352 * during open()'ing, and find corresponding process struct
353 * via minor number when close() is called.
354 */
355 newp = kmem_zalloc(sizeof (drm_cminor_t), KM_SLEEP);
356 mutex_enter(&dp->dev_lock);
357 for (newminor = DRM_MIN_CLONEMINOR; newminor < MAX_CLONE_MINOR;
358 newminor ++) {
359 TAILQ_FOREACH(mp, &dp->minordevs, link) {
360 if (mp->minor == newminor)
361 break;
362 }
363 if (mp == NULL)
364 goto gotminor;
365 }
366
367 mutex_exit(&dp->dev_lock);
368 (void) kmem_free(newp, sizeof (drm_cminor_t));
369 return (EMFILE);
370
371 gotminor:
372 TAILQ_INSERT_TAIL(&dp->minordevs, newp, link);
373 newp->minor = newminor;
374 mutex_exit(&dp->dev_lock);
375 err = drm_open(dp, newp, flag, otyp, credp);
376 if (err) {
377 mutex_enter(&dp->dev_lock);
378 TAILQ_REMOVE(&dp->minordevs, newp, link);
379 (void) kmem_free(newp, sizeof (drm_cminor_t));
380 mutex_exit(&dp->dev_lock);
381
382 return (err);
383 }
384
385 /* return a clone minor */
386 newminor = newminor | (instance << NBITSMNODE);
387 *devp = makedevice(getmajor(*devp), newminor);
388 return (err);
389 }
390
391 /*ARGSUSED*/
392 static int
drm_sun_close(dev_t dev,int flag,int otyp,cred_t * credp)393 drm_sun_close(dev_t dev, int flag, int otyp, cred_t *credp)
394 {
395 drm_inst_state_t *mstate;
396 drm_device_t *dp;
397 minor_t minor;
398 int ret;
399
400 mstate = drm_sup_devt_to_state(dev);
401 if (mstate == NULL)
402 return (EBADF);
403
404 minor = DEV2MINOR(dev);
405 ASSERT(minor <= MAX_CLONE_MINOR);
406 if ((minor == GFX_MINOR) || (minor == AGPMASTER_MINOR))
407 return (0);
408
409 dp = mstate->mis_devp;
410 if (dp == NULL) {
411 DRM_ERROR("drm_sun_close: NULL soft state");
412 return (ENXIO);
413 }
414
415 ret = drm_close(dp, minor, flag, otyp, credp);
416
417 return (ret);
418 }
419
420 /*ARGSUSED*/
421 static int
drm_sun_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)422 drm_sun_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
423 cred_t *credp, int *rvalp)
424 {
425 extern drm_ioctl_desc_t drm_ioctls[];
426
427 drm_inst_state_t *mstate;
428 drm_device_t *dp;
429 drm_ioctl_desc_t *ioctl;
430 drm_ioctl_t *func;
431 drm_file_t *fpriv;
432 minor_t minor;
433 int retval;
434 int nr;
435
436 if (cmd == VIS_GETIDENTIFIER) {
437 if (ddi_copyout(&text_ident, (void *)arg,
438 sizeof (struct vis_identifier), mode))
439 return (EFAULT);
440 }
441
442 mstate = drm_sup_devt_to_state(dev);
443 if (mstate == NULL) {
444 return (EIO);
445 }
446
447 minor = DEV2MINOR(dev);
448 ASSERT(minor <= MAX_CLONE_MINOR);
449 switch (minor) {
450 case GFX_MINOR:
451 retval = gfxp_vgatext_ioctl(dev, cmd, arg,
452 mode, credp, rvalp, mstate->mis_gfxp);
453 return (retval);
454
455 case AGPMASTER_MINOR:
456 retval = agpmaster_ioctl(dev, cmd, arg, mode,
457 credp, rvalp, mstate->mis_agpm);
458 return (retval);
459
460 case DRM_MINOR:
461 default: /* DRM cloning minor nodes */
462 break;
463 }
464
465 dp = mstate->mis_devp;
466 ASSERT(dp != NULL);
467
468 nr = DRM_IOCTL_NR(cmd);
469 ioctl = &drm_ioctls[nr];
470 atomic_inc_32(&dp->counts[_DRM_STAT_IOCTLS]);
471
472 /* It's not a core DRM ioctl, try driver-specific. */
473 if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) {
474 /* The array entries begin at DRM_COMMAND_BASE ioctl nr */
475 nr -= DRM_COMMAND_BASE;
476 if (nr > dp->driver->max_driver_ioctl) {
477 DRM_ERROR("Bad driver ioctl number, 0x%x (of 0x%x)",
478 nr, dp->driver->max_driver_ioctl);
479 return (EINVAL);
480 }
481 ioctl = &dp->driver->driver_ioctls[nr];
482 }
483
484 func = ioctl->func;
485 if (func == NULL) {
486 return (ENOTSUP);
487 }
488
489 mutex_enter(&dp->dev_lock);
490 fpriv = drm_find_file_by_proc(dp, credp);
491 mutex_exit(&dp->dev_lock);
492 if (fpriv == NULL) {
493 DRM_ERROR("drm_sun_ioctl : can't find authenticator");
494 return (EACCES);
495 }
496
497 if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(credp)) ||
498 ((ioctl->flags & DRM_AUTH) && !fpriv->authenticated) ||
499 ((ioctl->flags & DRM_MASTER) && !fpriv->master))
500 return (EACCES);
501
502 fpriv->dev = dev;
503 fpriv->credp = credp;
504
505 retval = func(dp, arg, fpriv, mode);
506
507 return (retval);
508 }
509
510 /*ARGSUSED*/
511 static int
drm_sun_devmap(dev_t dev,devmap_cookie_t dhp,offset_t offset,size_t len,size_t * maplen,uint_t model)512 drm_sun_devmap(dev_t dev, devmap_cookie_t dhp, offset_t offset,
513 size_t len, size_t *maplen, uint_t model)
514 {
515 extern int drm_get_pci_index_reg(dev_info_t *, uint_t, uint_t, off_t *);
516
517 drm_inst_state_t *mstate;
518 drm_device_t *dp;
519 ddi_umem_cookie_t cookie;
520 drm_local_map_t *map = NULL;
521 unsigned long aperbase;
522 u_offset_t handle;
523 offset_t koff;
524 caddr_t kva;
525 minor_t minor;
526 size_t length;
527 int ret;
528
529 static ddi_device_acc_attr_t dev_attr = {
530 DDI_DEVICE_ATTR_V0,
531 DDI_NEVERSWAP_ACC,
532 DDI_STRICTORDER_ACC,
533 };
534 static ddi_device_acc_attr_t gem_dev_attr = {
535 DDI_DEVICE_ATTR_V0,
536 DDI_NEVERSWAP_ACC,
537 DDI_MERGING_OK_ACC
538 };
539
540 mstate = drm_sup_devt_to_state(dev);
541 if (mstate == NULL)
542 return (ENXIO);
543
544 minor = DEV2MINOR(dev);
545 switch (minor) {
546 case GFX_MINOR:
547 ret = gfxp_vgatext_devmap(dev, dhp, offset, len, maplen, model,
548 mstate->mis_gfxp);
549 return (ret);
550
551 case AGPMASTER_MINOR:
552 return (ENOTSUP);
553
554 case DRM_MINOR:
555 break;
556
557 default:
558 /* DRM cloning nodes */
559 if (minor > MAX_CLONE_MINOR)
560 return (EBADF);
561 break;
562 }
563
564
565 dp = mstate->mis_devp;
566 if (dp == NULL) {
567 DRM_ERROR("drm_sun_devmap: NULL soft state");
568 return (EINVAL);
569 }
570
571 mutex_enter(&dp->dev_lock);
572
573 if (dp->driver->use_gem == 1) {
574 struct idr_list *entry;
575 drm_cminor_t *mp;
576
577 mp = drm_find_file_by_minor(dp, minor);
578 if (!mp) {
579 mutex_exit(&dp->dev_lock);
580 DRM_ERROR("drm_sun_devmap: can't find authenticator");
581 return (EACCES);
582 }
583
584 spin_lock(&dp->struct_mutex);
585 idr_list_for_each(entry, &(mp->fpriv->object_idr)) {
586 if ((uintptr_t)entry->obj == (u_offset_t)offset) {
587 map = entry->obj->map;
588 goto goon;
589 }
590 }
591 goon:
592 spin_unlock(&dp->struct_mutex);
593 }
594
595 if (map == NULL) {
596 /*
597 * We will solve 32-bit application on 64-bit kernel
598 * issue later, now, we just use low 32-bit
599 */
600 handle = (u_offset_t)offset;
601 handle &= 0xffffffff;
602
603 TAILQ_FOREACH(map, &dp->maplist, link) {
604 if (handle ==
605 ((u_offset_t)((uintptr_t)map->handle) & 0xffffffff))
606 break;
607 }
608
609 /*
610 * Temporarily, because offset is phys_addr for register
611 * and framebuffer, is kernel virtual_addr for others
612 * Maybe we will use hash table to solve this issue later.
613 */
614 if (map == NULL) {
615 TAILQ_FOREACH(map, &dp->maplist, link) {
616 if (handle == (map->offset & 0xffffffff))
617 break;
618 }
619 }
620 }
621
622 if (map == NULL) {
623 u_offset_t tmp;
624
625 mutex_exit(&dp->dev_lock);
626 cmn_err(CE_WARN, "Can't find map, offset=0x%llx, len=%x\n",
627 offset, (int)len);
628 cmn_err(CE_WARN, "Current mapping:\n");
629 TAILQ_FOREACH(map, &dp->maplist, link) {
630 tmp = (u_offset_t)((uintptr_t)map->handle) & 0xffffffff;
631 cmn_err(CE_WARN, "map(handle=0x%p, size=0x%lx,type=%d,"
632 "offset=0x%lx), handle=%llx, tmp=%lld", map->handle,
633 map->size, map->type, map->offset, handle, tmp);
634 }
635 return (-1);
636 }
637 if (map->flags & _DRM_RESTRICTED) {
638 mutex_exit(&dp->dev_lock);
639 cmn_err(CE_WARN, "restricted map\n");
640 return (-1);
641 }
642
643 mutex_exit(&dp->dev_lock);
644 switch (map->type) {
645 case _DRM_FRAME_BUFFER:
646 case _DRM_REGISTERS:
647 {
648 int regno;
649 off_t regoff;
650
651 regno = drm_get_pci_index_reg(dp->dip,
652 map->offset, (uint_t)len, ®off);
653 if (regno < 0) {
654 DRM_ERROR("devmap: failed to get register"
655 " offset=0x%llx, len=0x%x", handle, len);
656 return (EINVAL);
657 }
658
659 ret = devmap_devmem_setup(dhp, dp->dip, NULL,
660 regno, (offset_t)regoff, len, PROT_ALL,
661 0, &dev_attr);
662 if (ret != 0) {
663 *maplen = 0;
664 DRM_ERROR("devmap: failed, regno=%d,type=%d,"
665 " handle=0x%x, offset=0x%llx, len=0x%x",
666 regno, map->type, handle, offset, len);
667 return (ret);
668 }
669 *maplen = len;
670 return (ret);
671 }
672
673 case _DRM_SHM:
674 if (map->drm_umem_cookie == NULL)
675 return (EINVAL);
676 length = ptob(btopr(map->size));
677 ret = devmap_umem_setup(dhp, dp->dip, NULL,
678 map->drm_umem_cookie, 0, length,
679 PROT_ALL, IOMEM_DATA_CACHED, NULL);
680 if (ret != 0) {
681 *maplen = 0;
682 return (ret);
683 }
684 *maplen = length;
685
686 return (DDI_SUCCESS);
687
688 case _DRM_AGP:
689 if (dp->agp == NULL) {
690 cmn_err(CE_WARN, "drm_sun_devmap: attempted to mmap AGP"
691 "memory before AGP support is enabled");
692 return (DDI_FAILURE);
693 }
694
695 aperbase = dp->agp->base;
696 koff = map->offset - aperbase;
697 length = ptob(btopr(len));
698 kva = map->dev_addr;
699 cookie = gfxp_umem_cookie_init(kva, length);
700 if (cookie == NULL) {
701 cmn_err(CE_WARN, "devmap:failed to get umem_cookie");
702 return (DDI_FAILURE);
703 }
704
705 if ((ret = devmap_umem_setup(dhp, dp->dip,
706 &drm_devmap_callbacks, cookie, 0, length, PROT_ALL,
707 IOMEM_DATA_UNCACHED | DEVMAP_ALLOW_REMAP, &dev_attr)) < 0) {
708 gfxp_umem_cookie_destroy(cookie);
709 cmn_err(CE_WARN, "devmap:failed, retval=%d", ret);
710 return (DDI_FAILURE);
711 }
712 *maplen = length;
713 break;
714
715 case _DRM_SCATTER_GATHER:
716 koff = map->offset - (unsigned long)(caddr_t)dp->sg->virtual;
717 kva = map->dev_addr + koff;
718 length = ptob(btopr(len));
719 if (length > map->size) {
720 cmn_err(CE_WARN, "offset=0x%lx, virtual=0x%p,"
721 "mapsize=0x%lx,len=0x%lx", map->offset,
722 dp->sg->virtual, map->size, len);
723 return (DDI_FAILURE);
724 }
725 cookie = gfxp_umem_cookie_init(kva, length);
726 if (cookie == NULL) {
727 cmn_err(CE_WARN, "devmap:failed to get umem_cookie");
728 return (DDI_FAILURE);
729 }
730 ret = devmap_umem_setup(dhp, dp->dip,
731 &drm_devmap_callbacks, cookie, 0, length, PROT_ALL,
732 IOMEM_DATA_UNCACHED | DEVMAP_ALLOW_REMAP, &dev_attr);
733 if (ret != 0) {
734 cmn_err(CE_WARN, "sun_devmap: umem_setup fail");
735 gfxp_umem_cookie_destroy(cookie);
736 return (DDI_FAILURE);
737 }
738 *maplen = length;
739 break;
740
741 case _DRM_TTM:
742 if (map->drm_umem_cookie == NULL)
743 return (EINVAL);
744
745 if (gfxp_devmap_umem_setup(dhp, dp->dip,
746 NULL, map->drm_umem_cookie, 0, map->size, PROT_ALL,
747 IOMEM_DATA_UC_WR_COMBINE | DEVMAP_ALLOW_REMAP,
748 &gem_dev_attr)) {
749 cmn_err(CE_WARN, "devmap:failed, retval=%d", ret);
750 return (DDI_FAILURE);
751 }
752 *maplen = map->size;
753 return (DDI_SUCCESS);
754
755 default:
756 return (DDI_FAILURE);
757 }
758 return (DDI_SUCCESS);
759
760 }
761
762 /*ARGSUSED*/
763 static int
drm_devmap_map(devmap_cookie_t dhc,dev_t dev,uint_t flags,offset_t offset,size_t len,void ** new_priv)764 drm_devmap_map(devmap_cookie_t dhc, dev_t dev, uint_t flags,
765 offset_t offset, size_t len, void **new_priv)
766 {
767 devmap_handle_t *dhp;
768 drm_inst_state_t *statep;
769 struct ddi_umem_cookie *cp;
770
771 statep = drm_sup_devt_to_state(dev);
772 ASSERT(statep != NULL);
773
774 /*
775 * This driver only supports MAP_SHARED,
776 * and doesn't support MAP_PRIVATE
777 */
778 if (flags & MAP_PRIVATE) {
779 cmn_err(CE_WARN, "!DRM driver doesn't support MAP_PRIVATE");
780 return (EINVAL);
781 }
782
783 mutex_enter(&statep->dis_ctxlock);
784 dhp = (devmap_handle_t *)dhc;
785 cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
786 cp->cook_refcnt = 1;
787 mutex_exit(&statep->dis_ctxlock);
788 *new_priv = statep;
789
790 return (0);
791 }
792
793 /*ARGSUSED*/
794 static void
drm_devmap_unmap(devmap_cookie_t dhc,void * pvtp,offset_t off,size_t len,devmap_cookie_t new_dhp1,void ** new_pvtp1,devmap_cookie_t new_dhp2,void ** new_pvtp2)795 drm_devmap_unmap(devmap_cookie_t dhc, void *pvtp, offset_t off, size_t len,
796 devmap_cookie_t new_dhp1, void **new_pvtp1, devmap_cookie_t new_dhp2,
797 void **new_pvtp2)
798 {
799 devmap_handle_t *dhp;
800 devmap_handle_t *ndhp;
801 drm_inst_state_t *statep;
802 struct ddi_umem_cookie *cp;
803 struct ddi_umem_cookie *ncp;
804
805 dhp = (devmap_handle_t *)dhc;
806 statep = (drm_inst_state_t *)pvtp;
807
808 mutex_enter(&statep->dis_ctxlock);
809 cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
810 if (new_dhp1 != NULL) {
811 ndhp = (devmap_handle_t *)new_dhp1;
812 ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
813 ncp->cook_refcnt ++;
814 *new_pvtp1 = statep;
815 ASSERT(ncp == cp);
816 }
817
818 if (new_dhp2 != NULL) {
819 ndhp = (devmap_handle_t *)new_dhp2;
820 ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
821 ncp->cook_refcnt ++;
822 *new_pvtp2 = statep;
823 ASSERT(ncp == cp);
824 }
825
826 cp->cook_refcnt --;
827 if (cp->cook_refcnt == 0) {
828 gfxp_umem_cookie_destroy(dhp->dh_cookie);
829 dhp->dh_cookie = NULL;
830 }
831 mutex_exit(&statep->dis_ctxlock);
832 }
833
834
835 /*ARGSUSED*/
836 static int
drm_devmap_dup(devmap_cookie_t dhc,void * pvtp,devmap_cookie_t new_dhc,void ** new_pvtp)837 drm_devmap_dup(devmap_cookie_t dhc, void *pvtp, devmap_cookie_t new_dhc,
838 void **new_pvtp)
839 {
840 devmap_handle_t *dhp;
841 drm_inst_state_t *statep;
842 struct ddi_umem_cookie *cp;
843
844 statep = (drm_inst_state_t *)pvtp;
845 mutex_enter(&statep->dis_ctxlock);
846 dhp = (devmap_handle_t *)dhc;
847 cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
848 cp->cook_refcnt ++;
849 mutex_exit(&statep->dis_ctxlock);
850 *new_pvtp = statep;
851
852 return (0);
853 }
854
855 int
drm_dev_to_instance(dev_t dev)856 drm_dev_to_instance(dev_t dev)
857 {
858 return (DEV2INST(dev));
859 }
860
861 /*
862 * drm_supp_alloc_drv_entry()
863 *
864 * Description:
865 * Create a DRM entry and add it into the instance list (drm_inst_head).
866 * Note that we don't allow a duplicated entry
867 */
868 static drm_inst_list_t *
drm_supp_alloc_drv_entry(dev_info_t * dip)869 drm_supp_alloc_drv_entry(dev_info_t *dip)
870 {
871 drm_inst_list_t **plist;
872 drm_inst_list_t *list;
873 drm_inst_list_t *entry;
874
875 /* protect the driver list */
876 mutex_enter(&drm_inst_list_lock);
877 plist = &drm_inst_head;
878 list = *plist;
879 while (list) {
880 if (list->disl_state.mis_dip == dip) {
881 mutex_exit(&drm_inst_list_lock);
882 cmn_err(CE_WARN, "%s%d already registered",
883 ddi_driver_name(dip), ddi_get_instance(dip));
884 return (NULL);
885 }
886 plist = &list->disl_next;
887 list = list->disl_next;
888 }
889
890 /* "dip" is not registered, create new one and add to list */
891 entry = kmem_zalloc(sizeof (*entry), KM_SLEEP);
892 *plist = entry;
893 entry->disl_state.mis_dip = dip;
894 mutex_init(&entry->disl_state.mis_lock, NULL, MUTEX_DRIVER, NULL);
895 mutex_init(&entry->disl_state.dis_ctxlock, NULL, MUTEX_DRIVER, NULL);
896 mutex_exit(&drm_inst_list_lock);
897
898 return (entry);
899
900 } /* drm_supp_alloc_drv_entry */
901
902 /*
903 * drm_supp_free_drv_entry()
904 */
905 static void
drm_supp_free_drv_entry(dev_info_t * dip)906 drm_supp_free_drv_entry(dev_info_t *dip)
907 {
908 drm_inst_list_t *list;
909 drm_inst_list_t **plist;
910 drm_inst_state_t *mstate;
911
912 /* protect the driver list */
913 mutex_enter(&drm_inst_list_lock);
914 plist = &drm_inst_head;
915 list = *plist;
916 while (list) {
917 if (list->disl_state.mis_dip == dip) {
918 *plist = list->disl_next;
919 mstate = &list->disl_state;
920 mutex_destroy(&mstate->mis_lock);
921 mutex_destroy(&mstate->dis_ctxlock);
922 kmem_free(list, sizeof (*list));
923 mutex_exit(&drm_inst_list_lock);
924 return;
925 }
926 plist = &list->disl_next;
927 list = list->disl_next;
928 }
929 mutex_exit(&drm_inst_list_lock);
930
931 } /* drm_supp_free_drv_entry() */
932
933 /*
934 * drm_sup_devt_to_state()
935 *
936 * description:
937 * Get the soft state of DRM instance by device number
938 */
939 static drm_inst_state_t *
drm_sup_devt_to_state(dev_t dev)940 drm_sup_devt_to_state(dev_t dev)
941 {
942 drm_inst_list_t *list;
943 drm_inst_state_t *mstate;
944 major_t major = getmajor(dev);
945 int instance = DEV2INST(dev);
946
947 mutex_enter(&drm_inst_list_lock);
948 list = drm_inst_head;
949 while (list) {
950 mstate = &list->disl_state;
951 mutex_enter(&mstate->mis_lock);
952
953 if ((mstate->mis_major == major) &&
954 (ddi_get_instance(mstate->mis_dip) == instance)) {
955 mutex_exit(&mstate->mis_lock);
956 mutex_exit(&drm_inst_list_lock);
957 return (mstate);
958 }
959
960 list = list->disl_next;
961 mutex_exit(&mstate->mis_lock);
962 }
963
964 mutex_exit(&drm_inst_list_lock);
965 return (NULL);
966
967 } /* drm_sup_devt_to_state() */
968
969 int
drm_supp_get_irq(void * handle)970 drm_supp_get_irq(void *handle)
971 {
972 drm_inst_list_t *list;
973 drm_inst_state_t *mstate;
974 int irq;
975
976 list = (drm_inst_list_t *)handle;
977 mstate = &list->disl_state;
978 ASSERT(mstate != NULL);
979 irq = pci_config_get8(mstate->mis_cfg_hdl, PCI_CONF_ILINE);
980 return (irq);
981 }
982
983 int
drm_supp_device_capability(void * handle,int capid)984 drm_supp_device_capability(void *handle, int capid)
985 {
986 drm_inst_list_t *list;
987 drm_inst_state_t *mstate;
988 uint8_t cap = 0;
989 uint16_t caps_ptr;
990
991 list = (drm_inst_list_t *)handle;
992 mstate = &list->disl_state;
993 ASSERT(mstate != NULL);
994
995 /* has capabilities list ? */
996 if ((pci_config_get16(mstate->mis_cfg_hdl, PCI_CONF_STAT) &
997 PCI_CONF_CAP_MASK) == 0)
998 return (NULL);
999
1000 caps_ptr = pci_config_get8(mstate->mis_cfg_hdl, PCI_CONF_CAP_PTR);
1001 while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
1002 cap = pci_config_get32(mstate->mis_cfg_hdl, caps_ptr);
1003 if ((cap & PCI_CONF_CAPID_MASK) == capid)
1004 return (cap);
1005 caps_ptr = pci_config_get8(mstate->mis_cfg_hdl,
1006 caps_ptr + PCI_CAP_NEXT_PTR);
1007 }
1008
1009 return (0);
1010 }
1011