xref: /titanic_50/usr/src/uts/common/io/srn.c (revision 174bc6499d233e329ecd3d98a880a7b07df16bfa)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 /*
29  * srn	Provide apm-like interfaces to Xorg
30  */
31 
32 #include <sys/types.h>
33 #include <sys/errno.h>
34 #include <sys/modctl.h>
35 #include <sys/conf.h>		/* driver flags and functions */
36 #include <sys/open.h>		/* OTYP_CHR definition */
37 #include <sys/stat.h>		/* S_IFCHR definition */
38 #include <sys/pathname.h>	/* name -> dev_info xlation */
39 #include <sys/kmem.h>		/* memory alloc stuff */
40 #include <sys/debug.h>
41 #include <sys/pm.h>
42 #include <sys/ddi.h>
43 #include <sys/sunddi.h>
44 #include <sys/epm.h>
45 #include <sys/vfs.h>
46 #include <sys/mode.h>
47 #include <sys/mkdev.h>
48 #include <sys/promif.h>
49 #include <sys/consdev.h>
50 #include <sys/ddi_impldefs.h>
51 #include <sys/poll.h>
52 #include <sys/note.h>
53 #include <sys/taskq.h>
54 #include <sys/policy.h>
55 #include <sys/srn.h>
56 
57 /*
58  * Minor number is instance<<8 + clone minor from range 1-255;
59  * But only one will be allocated
60  */
61 #define	SRN_MINOR_TO_CLONE(minor) ((minor) & (SRN_MAX_CLONE - 1))
62 #define	SU		0x002
63 #define	SG		0x004
64 
65 extern kmutex_t	srn_clone_lock;	/* protects srn_clones array */
66 extern kcondvar_t srn_clones_cv[SRN_MAX_CLONE];
67 extern uint_t	srn_poll_cnt[SRN_MAX_CLONE];
68 
69 /*
70  * The soft state of the srn driver.  Since there will only be
71  * one of these, just reference it through a static struct.
72  */
73 static struct srnstate {
74 	dev_info_t	*srn_dip;		/* ptr to our dev_info node */
75 	int		srn_instance;		/* for ddi_get_instance() */
76 	uchar_t		srn_clones[SRN_MAX_CLONE]; /* unique opens	*/
77 	struct cred	*srn_cred[SRN_MAX_CLONE]; /* cred for each open	*/
78 	int		srn_type[SRN_MAX_CLONE]; /* type of handshake */
79 	int		srn_delivered[SRN_MAX_CLONE];
80 	srn_event_info_t srn_pending[SRN_MAX_CLONE];
81 	int		srn_fault[SRN_MAX_CLONE];
82 } srn = { NULL, -1};
83 typedef struct srnstate *srn_state_t;
84 
85 kcondvar_t	srn_clones_cv[SRN_MAX_CLONE];
86 uint_t		srn_poll_cnt[SRN_MAX_CLONE];	/* count of events for poll */
87 int		srn_apm_count;
88 int		srn_autosx_count;
89 /* Number of seconds to wait for clients to ack a poll */
90 int		srn_timeout = 10;
91 
92 struct pollhead	srn_pollhead[SRN_MAX_CLONE];
93 
94 static int	srn_open(dev_t *, int, int, cred_t *);
95 static int	srn_close(dev_t, int, int, cred_t *);
96 static int	srn_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
97 static int	srn_chpoll(dev_t, short, int, short *, struct pollhead **);
98 
99 static struct cb_ops srn_cb_ops = {
100 	srn_open,	/* open */
101 	srn_close,	/* close */
102 	nodev,		/* strategy */
103 	nodev,		/* print */
104 	nodev,		/* dump */
105 	nodev,		/* read */
106 	nodev,		/* write */
107 	srn_ioctl,	/* ioctl */
108 	nodev,		/* devmap */
109 	nodev,		/* mmap */
110 	nodev,		/* segmap */
111 	srn_chpoll,	/* poll */
112 	ddi_prop_op,	/* prop_op */
113 	NULL,		/* streamtab */
114 	D_NEW | D_MP	/* driver compatibility flag */
115 };
116 
117 static int srn_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
118     void **result);
119 static int srn_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
120 static int srn_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
121 static void srn_notify(int type, int event);
122 
123 static struct dev_ops srn_ops = {
124 	DEVO_REV,		/* devo_rev */
125 	0,			/* refcnt */
126 	srn_getinfo,		/* info */
127 	nulldev,		/* identify */
128 	nulldev,		/* probe */
129 	srn_attach,		/* attach */
130 	srn_detach,		/* detach */
131 	nodev,			/* reset */
132 	&srn_cb_ops,		/* driver operations */
133 	NULL,			/* bus operations */
134 	NULL,			/* power */
135 	ddi_quiesce_not_needed,		/* quiesce */
136 };
137 
138 static struct modldrv modldrv = {
139 	&mod_driverops,
140 	"srn driver",
141 	&srn_ops
142 };
143 
144 static struct modlinkage modlinkage = {
145 	MODREV_1, &modldrv, 0
146 };
147 
148 /* Local functions */
149 
150 int
151 _init(void)
152 {
153 	return (mod_install(&modlinkage));
154 }
155 
156 int
157 _fini(void)
158 {
159 	return (mod_remove(&modlinkage));
160 }
161 
162 int
163 _info(struct modinfo *modinfop)
164 {
165 	return (mod_info(&modlinkage, modinfop));
166 }
167 
168 static int
169 srn_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
170 {
171 	int		i;
172 	extern void (*srn_signal)(int, int);
173 
174 	switch (cmd) {
175 
176 	case DDI_ATTACH:
177 		if (srn.srn_instance != -1)	/* Only allow one instance */
178 			return (DDI_FAILURE);
179 		srn.srn_instance = ddi_get_instance(dip);
180 		if (ddi_create_minor_node(dip, "srn", S_IFCHR,
181 		    (srn.srn_instance << 8) + 0, DDI_PSEUDO, 0)
182 		    != DDI_SUCCESS) {
183 			return (DDI_FAILURE);
184 		}
185 		srn.srn_dip = dip;	/* srn_init and getinfo depend on it */
186 
187 		for (i = 0; i < SRN_MAX_CLONE; i++)
188 			cv_init(&srn_clones_cv[i], NULL, CV_DEFAULT, NULL);
189 
190 		srn.srn_instance = ddi_get_instance(dip);
191 		mutex_enter(&srn_clone_lock);
192 		srn_signal = srn_notify;
193 		mutex_exit(&srn_clone_lock);
194 		ddi_report_dev(dip);
195 		return (DDI_SUCCESS);
196 
197 	default:
198 		return (DDI_FAILURE);
199 	}
200 }
201 
202 /* ARGSUSED */
203 static int
204 srn_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
205 {
206 	int i;
207 	extern int srn_inuse;
208 	extern void (*srn_signal)(int, int);
209 
210 	switch (cmd) {
211 	case DDI_DETACH:
212 
213 		mutex_enter(&srn_clone_lock);
214 		while (srn_inuse) {
215 			mutex_exit(&srn_clone_lock);
216 			delay(1);
217 			mutex_enter(&srn_clone_lock);
218 		}
219 		srn_signal = NULL;
220 		mutex_exit(&srn_clone_lock);
221 
222 		for (i = 0; i < SRN_MAX_CLONE; i++)
223 			cv_destroy(&srn_clones_cv[i]);
224 
225 		ddi_remove_minor_node(dip, NULL);
226 		srn.srn_instance = -1;
227 		return (DDI_SUCCESS);
228 
229 	default:
230 		return (DDI_FAILURE);
231 	}
232 }
233 
234 
235 #ifdef DEBUG
236 char *srn_cmd_string;
237 int srn_cmd;
238 #endif
239 
240 /*
241  * Returns true if permission granted by credentials
242  * XXX
243  */
244 static int
245 srn_perms(int perm, cred_t *cr)
246 {
247 	if ((perm & SU) && secpolicy_power_mgmt(cr) == 0) /* privileged? */
248 		return (1);
249 	if ((perm & SG) && (crgetgid(cr) == 0))	/* group 0 is ok */
250 		return (1);
251 	return (0);
252 }
253 
254 static int
255 srn_chpoll(dev_t dev, short events, int anyyet, short *reventsp,
256 	struct pollhead **phpp)
257 {
258 	extern struct pollhead srn_pollhead[];
259 	int	clone;
260 
261 	clone = SRN_MINOR_TO_CLONE(getminor(dev));
262 	if ((events & (POLLIN | POLLRDNORM)) && srn_poll_cnt[clone]) {
263 		*reventsp |= (POLLIN | POLLRDNORM);
264 	} else {
265 		*reventsp = 0;
266 		if (!anyyet) {
267 			*phpp = &srn_pollhead[clone];
268 		}
269 	}
270 	return (0);
271 }
272 
273 /*ARGSUSED*/
274 static int
275 srn_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
276 {
277 	dev_t	dev;
278 	int	instance;
279 
280 	switch (infocmd) {
281 	case DDI_INFO_DEVT2DEVINFO:
282 		if (srn.srn_instance == -1)
283 			return (DDI_FAILURE);
284 		*result = srn.srn_dip;
285 		return (DDI_SUCCESS);
286 
287 	case DDI_INFO_DEVT2INSTANCE:
288 		dev = (dev_t)arg;
289 		instance = getminor(dev) >> 8;
290 		*result = (void *)(uintptr_t)instance;
291 		return (DDI_SUCCESS);
292 
293 	default:
294 		return (DDI_FAILURE);
295 	}
296 }
297 
298 
299 /*ARGSUSED1*/
300 static int
301 srn_open(dev_t *devp, int flag, int otyp, cred_t *cr)
302 {
303 	int		clone;
304 
305 	if (otyp != OTYP_CHR)
306 		return (EINVAL);
307 
308 	mutex_enter(&srn_clone_lock);
309 	for (clone = 1; clone < SRN_MAX_CLONE - 1; clone++)
310 		if (!srn.srn_clones[clone])
311 			break;
312 
313 	if (clone == SRN_MAX_CLONE) {
314 		mutex_exit(&srn_clone_lock);
315 		return (ENXIO);
316 	}
317 	srn.srn_cred[clone] = cr;
318 	ASSERT(srn_apm_count >= 0);
319 	srn_apm_count++;
320 	srn.srn_type[clone] = SRN_TYPE_APM;
321 	crhold(cr);
322 
323 	*devp = makedevice(getmajor(*devp), (srn.srn_instance << 8) +
324 	    clone);
325 	srn.srn_clones[clone] = 1;
326 	srn.srn_cred[clone] = cr;
327 	crhold(cr);
328 	mutex_exit(&srn_clone_lock);
329 	PMD(PMD_SX, ("srn open OK\n"))
330 	return (0);
331 }
332 
333 /*ARGSUSED1*/
334 static int
335 srn_close(dev_t dev, int flag, int otyp, cred_t *cr)
336 {
337 	int clone;
338 
339 	if (otyp != OTYP_CHR)
340 		return (EINVAL);
341 
342 	clone = SRN_MINOR_TO_CLONE(getminor(dev));
343 	PMD(PMD_SX, ("srn_close: minor %x, clone %x\n", getminor(dev),
344 	    clone))
345 	mutex_enter(&srn_clone_lock);
346 	crfree(srn.srn_cred[clone]);
347 	srn.srn_cred[clone] = 0;
348 	srn_poll_cnt[clone] = 0;
349 	srn.srn_fault[clone] = 0;
350 	if (srn.srn_pending[clone].ae_type || srn.srn_delivered[clone]) {
351 		srn.srn_pending[clone].ae_type = 0;
352 		srn.srn_delivered[clone] = 0;
353 		cv_signal(&srn_clones_cv[clone]);
354 	}
355 	switch (srn.srn_type[clone]) {
356 	case SRN_TYPE_AUTOSX:
357 		ASSERT(srn_autosx_count);
358 		srn_autosx_count--;
359 		break;
360 	case SRN_TYPE_APM:
361 		ASSERT(srn_apm_count);
362 		srn_apm_count--;
363 		break;
364 	default:
365 		ASSERT(0);
366 		return (EINVAL);
367 	}
368 	srn.srn_clones[clone] = 0;
369 	mutex_exit(&srn_clone_lock);
370 	return (0);
371 }
372 
373 /*ARGSUSED*/
374 static int
375 srn_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
376 {
377 	int clone = SRN_MINOR_TO_CLONE(getminor(dev));
378 
379 	PMD(PMD_SX, ("ioctl: %x: begin\n", cmd))
380 
381 	switch (cmd) {
382 	case SRN_IOC_NEXTEVENT:
383 	case SRN_IOC_SUSPEND:
384 	case SRN_IOC_RESUME:
385 	case SRN_IOC_AUTOSX:
386 		break;
387 	default:
388 		return (ENOTTY);
389 	}
390 
391 	if (!srn_perms(SU | SG, srn.srn_cred[clone])) {
392 		return (EPERM);
393 	}
394 	switch (cmd) {
395 	case SRN_IOC_AUTOSX:
396 		PMD(PMD_SX, ("SRN_IOC_AUTOSX entered\n"))
397 		mutex_enter(&srn_clone_lock);
398 		if (!srn.srn_clones[clone]) {
399 			PMD(PMD_SX, (" ioctl !srn_clones--EINVAL\n"))
400 			mutex_exit(&srn_clone_lock);
401 			return (EINVAL);
402 		}
403 		if (srn.srn_pending[clone].ae_type) {
404 			PMD(PMD_SX, ("AUTOSX while pending--EBUSY\n"))
405 			mutex_exit(&srn_clone_lock);
406 			return (EBUSY);
407 		}
408 		if (srn.srn_type[clone] == SRN_TYPE_AUTOSX) {
409 			PMD(PMD_SX, ("AUTOSX already--EBUSY\n"))
410 			mutex_exit(&srn_clone_lock);
411 			return (EBUSY);
412 		}
413 		ASSERT(srn.srn_type[clone] == SRN_TYPE_APM);
414 		srn.srn_type[clone] = SRN_TYPE_AUTOSX;
415 		srn.srn_fault[clone] = 0;
416 		srn_apm_count--;
417 		ASSERT(srn_apm_count >= 0);
418 		ASSERT(srn_autosx_count >= 0);
419 		srn_autosx_count++;
420 		mutex_exit(&srn_clone_lock);
421 		PMD(PMD_SX, ("SRN_IOC_AUTOSX returns success\n"))
422 		return (0);
423 
424 	case SRN_IOC_NEXTEVENT:
425 		/*
426 		 * return the next suspend or resume event;  there should
427 		 * be one, cause we only get called if we've signalled a
428 		 * poll data completion
429 		 * then wake up the kernel thread sleeping for the delivery
430 		 */
431 		PMD(PMD_SX, ("SRN_IOC_NEXTEVENT entered\n"))
432 		if (srn.srn_fault[clone]) {
433 			PMD(PMD_SX, ("SRN_IOC_NEXTEVENT clone %d fault "
434 			    "cleared\n", clone))
435 			srn.srn_fault[clone] = 0;
436 		}
437 		mutex_enter(&srn_clone_lock);
438 		if (srn_poll_cnt[clone] == 0) {
439 			mutex_exit(&srn_clone_lock);
440 			PMD(PMD_SX, ("SRN_IOC_NEXTEVENT clone %d "
441 			    "EWOULDBLOCK\n", clone))
442 			return (EWOULDBLOCK);
443 		}
444 		ASSERT(srn.srn_pending[clone].ae_type);
445 		if (ddi_copyout(&srn.srn_pending[clone], (void *)arg,
446 		    sizeof (srn_event_info_t), mode) != 0) {
447 			mutex_exit(&srn_clone_lock);
448 			PMD(PMD_SX, ("SRN_IOC_NEXTEVENT clone %d EFAULT\n",
449 			    clone))
450 			return (EFAULT);
451 		}
452 		if (srn.srn_type[clone] == SRN_TYPE_APM)
453 			srn.srn_delivered[clone] =
454 			    srn.srn_pending[clone].ae_type;
455 		PMD(PMD_SX, ("SRN_IOC_NEXTEVENT clone %d delivered %x\n",
456 		    clone, srn.srn_pending[clone].ae_type))
457 		srn_poll_cnt[clone] = 0;
458 		mutex_exit(&srn_clone_lock);
459 		return (0);
460 
461 	case SRN_IOC_SUSPEND:
462 		/* ack suspend */
463 		PMD(PMD_SX, ("SRN_IOC_SUSPEND entered clone %d\n", clone))
464 		if (srn.srn_fault[clone]) {
465 			PMD(PMD_SX, ("SRN_IOC_SUSPEND clone %d fault "
466 			    "cleared\n", clone))
467 			srn.srn_fault[clone] = 0;
468 		}
469 		mutex_enter(&srn_clone_lock);
470 		if (srn.srn_delivered[clone] != SRN_SUSPEND_REQ) {
471 			mutex_exit(&srn_clone_lock);
472 			PMD(PMD_SX, ("SRN_IOC_SUSPEND EINVAL\n"))
473 			return (EINVAL);
474 		}
475 		srn.srn_delivered[clone] = 0;
476 		srn.srn_pending[clone].ae_type = 0;
477 		/* notify the kernel suspend thread  to continue */
478 		PMD(PMD_SX, ("SRN_IOC_SUSPEND clone %d ok\n", clone))
479 		cv_signal(&srn_clones_cv[clone]);
480 		mutex_exit(&srn_clone_lock);
481 		return (0);
482 
483 	case SRN_IOC_RESUME:
484 		/* ack resume */
485 		PMD(PMD_SX, ("SRN_IOC_RESUME entered clone %d\n", clone))
486 		if (srn.srn_fault[clone]) {
487 			PMD(PMD_SX, ("SRN_IOC_RESUME clone %d fault "
488 			    "cleared\n", clone))
489 			srn.srn_fault[clone] = 0;
490 		}
491 		mutex_enter(&srn_clone_lock);
492 		if (srn.srn_delivered[clone] != SRN_NORMAL_RESUME) {
493 			mutex_exit(&srn_clone_lock);
494 			PMD(PMD_SX, ("SRN_IOC_RESUME EINVAL\n"))
495 			return (EINVAL);
496 		}
497 		srn.srn_delivered[clone] = 0;
498 		srn.srn_pending[clone].ae_type = 0;
499 		/* notify the kernel resume thread  to continue */
500 		PMD(PMD_SX, ("SRN_IOC_RESUME ok for clone %d\n", clone))
501 		cv_signal(&srn_clones_cv[clone]);
502 		mutex_exit(&srn_clone_lock);
503 		return (0);
504 
505 	default:
506 		PMD(PMD_SX, ("srn_ioctl unknown cmd EINVAL\n"))
507 		return (EINVAL);
508 	}
509 }
510 /*
511  * A very simple handshake with the srn driver,
512  * only one outstanding event at a time.
513  * The OS delivers the event and depending on type,
514  * either blocks waiting for the ack, or drives on
515  */
516 void
517 srn_notify(int type, int event)
518 {
519 	int clone, count;
520 	PMD(PMD_SX, ("srn_notify entered with type %d, event 0x%x\n",
521 	    type, event));
522 	ASSERT(mutex_owned(&srn_clone_lock));
523 	switch (type) {
524 	case SRN_TYPE_APM:
525 		if (srn_apm_count == 0) {
526 			PMD(PMD_SX, ("no apm types\n"))
527 			return;
528 		}
529 		count = srn_apm_count;
530 		break;
531 	case SRN_TYPE_AUTOSX:
532 		if (srn_autosx_count == 0) {
533 			PMD(PMD_SX, ("no autosx types\n"))
534 			return;
535 		}
536 		count = srn_autosx_count;
537 		break;
538 	default:
539 		ASSERT(0);
540 		break;
541 	}
542 	ASSERT(count > 0);
543 	PMD(PMD_SX, ("count %d\n", count))
544 	for (clone = 0; clone < SRN_MAX_CLONE; clone++) {
545 		if (srn.srn_type[clone] == type) {
546 #ifdef DEBUG
547 			if (type == SRN_TYPE_APM && !srn.srn_fault[clone]) {
548 				ASSERT(srn.srn_pending[clone].ae_type == 0);
549 				ASSERT(srn_poll_cnt[clone] == 0);
550 				ASSERT(srn.srn_delivered[clone] == 0);
551 			}
552 #endif
553 			srn.srn_pending[clone].ae_type = event;
554 			srn_poll_cnt[clone] = 1;
555 			PMD(PMD_SX, ("pollwake %d\n", clone))
556 			pollwakeup(&srn_pollhead[clone], (POLLRDNORM | POLLIN));
557 			count--;
558 			if (count == 0)
559 				break;
560 		}
561 	}
562 	if (type == SRN_TYPE_AUTOSX) {		/* we don't wait */
563 		PMD(PMD_SX, ("Not waiting for AUTOSX ack\n"))
564 		return;
565 	}
566 	ASSERT(type == SRN_TYPE_APM);
567 	/* otherwise wait for acks */
568 restart:
569 	/*
570 	 * We wait until all of the pending events are cleared.
571 	 * We have to start over every time we do a cv_wait because
572 	 * we give up the mutex and can be re-entered
573 	 */
574 	for (clone = 1; clone < SRN_MAX_CLONE; clone++) {
575 		if (srn.srn_clones[clone] == 0 ||
576 		    srn.srn_type[clone] != SRN_TYPE_APM)
577 			continue;
578 		if (srn.srn_pending[clone].ae_type && !srn.srn_fault[clone]) {
579 			PMD(PMD_SX, ("srn_notify waiting for ack for clone %d, "
580 			    "event %x\n", clone, event))
581 			if (cv_timedwait(&srn_clones_cv[clone],
582 			    &srn_clone_lock, ddi_get_lbolt() +
583 			    drv_usectohz(srn_timeout * 1000000)) == -1) {
584 				/*
585 				 * Client didn't respond, mark it as faulted
586 				 * and continue as if a regular signal.
587 				 */
588 				PMD(PMD_SX, ("srn_notify: clone %d did not "
589 				    "ack event %x\n", clone, event))
590 				cmn_err(CE_WARN, "srn_notify: clone %d did "
591 				    "not ack event %x\n", clone, event);
592 				srn.srn_fault[clone] = 1;
593 			}
594 			goto restart;
595 		}
596 	}
597 	PMD(PMD_SX, ("srn_notify done with %x\n", event))
598 }
599