xref: /illumos-gate/usr/src/uts/common/io/srn.c (revision 2f4149ea09454b3def6fe8245992f7e26571a1e8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  * Copyright 2017 Joyent, Inc.
26  */
27 
28 
29 /*
30  * srn	Provide apm-like interfaces to Xorg
31  */
32 
33 #include <sys/types.h>
34 #include <sys/errno.h>
35 #include <sys/modctl.h>
36 #include <sys/conf.h>		/* driver flags and functions */
37 #include <sys/open.h>		/* OTYP_CHR definition */
38 #include <sys/stat.h>		/* S_IFCHR definition */
39 #include <sys/pathname.h>	/* name -> dev_info xlation */
40 #include <sys/kmem.h>		/* memory alloc stuff */
41 #include <sys/debug.h>
42 #include <sys/pm.h>
43 #include <sys/ddi.h>
44 #include <sys/sunddi.h>
45 #include <sys/epm.h>
46 #include <sys/vfs.h>
47 #include <sys/mode.h>
48 #include <sys/mkdev.h>
49 #include <sys/promif.h>
50 #include <sys/consdev.h>
51 #include <sys/ddi_impldefs.h>
52 #include <sys/poll.h>
53 #include <sys/note.h>
54 #include <sys/taskq.h>
55 #include <sys/policy.h>
56 #include <sys/srn.h>
57 
58 /*
59  * Minor number is instance<<8 + clone minor from range 1-255;
60  * But only one will be allocated
61  */
62 #define	SRN_MINOR_TO_CLONE(minor) ((minor) & (SRN_MAX_CLONE - 1))
63 #define	SU		0x002
64 #define	SG		0x004
65 
66 extern kmutex_t	srn_clone_lock;	/* protects srn_clones array */
67 extern kcondvar_t srn_clones_cv[SRN_MAX_CLONE];
68 extern uint_t	srn_poll_cnt[SRN_MAX_CLONE];
69 
70 /*
71  * The soft state of the srn driver.  Since there will only be
72  * one of these, just reference it through a static struct.
73  */
74 static struct srnstate {
75 	dev_info_t	*srn_dip;		/* ptr to our dev_info node */
76 	int		srn_instance;		/* for ddi_get_instance() */
77 	uchar_t		srn_clones[SRN_MAX_CLONE]; /* unique opens	*/
78 	struct cred	*srn_cred[SRN_MAX_CLONE]; /* cred for each open	*/
79 	int		srn_type[SRN_MAX_CLONE]; /* type of handshake */
80 	int		srn_delivered[SRN_MAX_CLONE];
81 	srn_event_info_t srn_pending[SRN_MAX_CLONE];
82 	int		srn_fault[SRN_MAX_CLONE];
83 } srn = { NULL, -1};
84 typedef struct srnstate *srn_state_t;
85 
86 kcondvar_t	srn_clones_cv[SRN_MAX_CLONE];
87 uint_t		srn_poll_cnt[SRN_MAX_CLONE];	/* count of events for poll */
88 int		srn_apm_count;
89 int		srn_autosx_count;
90 /* Number of seconds to wait for clients to ack a poll */
91 int		srn_timeout = 10;
92 
93 struct pollhead	srn_pollhead[SRN_MAX_CLONE];
94 
95 static int	srn_open(dev_t *, int, int, cred_t *);
96 static int	srn_close(dev_t, int, int, cred_t *);
97 static int	srn_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
98 static int	srn_chpoll(dev_t, short, int, short *, struct pollhead **);
99 
100 static struct cb_ops srn_cb_ops = {
101 	srn_open,	/* open */
102 	srn_close,	/* close */
103 	nodev,		/* strategy */
104 	nodev,		/* print */
105 	nodev,		/* dump */
106 	nodev,		/* read */
107 	nodev,		/* write */
108 	srn_ioctl,	/* ioctl */
109 	nodev,		/* devmap */
110 	nodev,		/* mmap */
111 	nodev,		/* segmap */
112 	srn_chpoll,	/* poll */
113 	ddi_prop_op,	/* prop_op */
114 	NULL,		/* streamtab */
115 	D_NEW | D_MP	/* driver compatibility flag */
116 };
117 
118 static int srn_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
119     void **result);
120 static int srn_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
121 static int srn_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
122 static void srn_notify(int type, int event);
123 
124 static struct dev_ops srn_ops = {
125 	DEVO_REV,		/* devo_rev */
126 	0,			/* refcnt */
127 	srn_getinfo,		/* info */
128 	nulldev,		/* identify */
129 	nulldev,		/* probe */
130 	srn_attach,		/* attach */
131 	srn_detach,		/* detach */
132 	nodev,			/* reset */
133 	&srn_cb_ops,		/* driver operations */
134 	NULL,			/* bus operations */
135 	NULL,			/* power */
136 	ddi_quiesce_not_needed,		/* quiesce */
137 };
138 
139 static struct modldrv modldrv = {
140 	&mod_driverops,
141 	"srn driver",
142 	&srn_ops
143 };
144 
145 static struct modlinkage modlinkage = {
146 	MODREV_1, &modldrv, 0
147 };
148 
149 /* Local functions */
150 
151 int
152 _init(void)
153 {
154 	return (mod_install(&modlinkage));
155 }
156 
157 int
158 _fini(void)
159 {
160 	return (mod_remove(&modlinkage));
161 }
162 
163 int
164 _info(struct modinfo *modinfop)
165 {
166 	return (mod_info(&modlinkage, modinfop));
167 }
168 
169 static int
170 srn_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
171 {
172 	int		i;
173 	extern void (*srn_signal)(int, int);
174 
175 	switch (cmd) {
176 
177 	case DDI_ATTACH:
178 		if (srn.srn_instance != -1)	/* Only allow one instance */
179 			return (DDI_FAILURE);
180 		srn.srn_instance = ddi_get_instance(dip);
181 		if (ddi_create_minor_node(dip, "srn", S_IFCHR,
182 		    (srn.srn_instance << 8) + 0, DDI_PSEUDO, 0)
183 		    != DDI_SUCCESS) {
184 			return (DDI_FAILURE);
185 		}
186 		srn.srn_dip = dip;	/* srn_init and getinfo depend on it */
187 
188 		for (i = 0; i < SRN_MAX_CLONE; i++)
189 			cv_init(&srn_clones_cv[i], NULL, CV_DEFAULT, NULL);
190 
191 		srn.srn_instance = ddi_get_instance(dip);
192 		mutex_enter(&srn_clone_lock);
193 		srn_signal = srn_notify;
194 		mutex_exit(&srn_clone_lock);
195 		ddi_report_dev(dip);
196 		return (DDI_SUCCESS);
197 
198 	default:
199 		return (DDI_FAILURE);
200 	}
201 }
202 
203 /* ARGSUSED */
204 static int
205 srn_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
206 {
207 	int i;
208 	extern int srn_inuse;
209 	extern void (*srn_signal)(int, int);
210 
211 	switch (cmd) {
212 	case DDI_DETACH:
213 
214 		mutex_enter(&srn_clone_lock);
215 		while (srn_inuse) {
216 			mutex_exit(&srn_clone_lock);
217 			delay(1);
218 			mutex_enter(&srn_clone_lock);
219 		}
220 		srn_signal = NULL;
221 		mutex_exit(&srn_clone_lock);
222 
223 		for (i = 0; i < SRN_MAX_CLONE; i++)
224 			cv_destroy(&srn_clones_cv[i]);
225 
226 		ddi_remove_minor_node(dip, NULL);
227 		srn.srn_instance = -1;
228 		return (DDI_SUCCESS);
229 
230 	default:
231 		return (DDI_FAILURE);
232 	}
233 }
234 
235 
236 #ifdef DEBUG
237 char *srn_cmd_string;
238 int srn_cmd;
239 #endif
240 
241 /*
242  * Returns true if permission granted by credentials
243  * XXX
244  */
245 static int
246 srn_perms(int perm, cred_t *cr)
247 {
248 	if ((perm & SU) && secpolicy_power_mgmt(cr) == 0) /* privileged? */
249 		return (1);
250 	if ((perm & SG) && (crgetgid(cr) == 0))	/* group 0 is ok */
251 		return (1);
252 	return (0);
253 }
254 
255 static int
256 srn_chpoll(dev_t dev, short events, int anyyet, short *reventsp,
257     struct pollhead **phpp)
258 {
259 	extern struct pollhead srn_pollhead[];
260 	int	clone;
261 
262 	clone = SRN_MINOR_TO_CLONE(getminor(dev));
263 	if ((events & (POLLIN | POLLRDNORM)) && srn_poll_cnt[clone]) {
264 		*reventsp |= (POLLIN | POLLRDNORM);
265 	} else {
266 		*reventsp = 0;
267 	}
268 
269 	if ((*reventsp == 0 && !anyyet) || (events & POLLET)) {
270 		*phpp = &srn_pollhead[clone];
271 	}
272 	return (0);
273 }
274 
275 /*ARGSUSED*/
276 static int
277 srn_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
278 {
279 	dev_t	dev;
280 	int	instance;
281 
282 	switch (infocmd) {
283 	case DDI_INFO_DEVT2DEVINFO:
284 		if (srn.srn_instance == -1)
285 			return (DDI_FAILURE);
286 		*result = srn.srn_dip;
287 		return (DDI_SUCCESS);
288 
289 	case DDI_INFO_DEVT2INSTANCE:
290 		dev = (dev_t)arg;
291 		instance = getminor(dev) >> 8;
292 		*result = (void *)(uintptr_t)instance;
293 		return (DDI_SUCCESS);
294 
295 	default:
296 		return (DDI_FAILURE);
297 	}
298 }
299 
300 
301 /*ARGSUSED1*/
302 static int
303 srn_open(dev_t *devp, int flag, int otyp, cred_t *cr)
304 {
305 	int		clone;
306 
307 	if (otyp != OTYP_CHR)
308 		return (EINVAL);
309 
310 	mutex_enter(&srn_clone_lock);
311 	for (clone = 1; clone < SRN_MAX_CLONE - 1; clone++)
312 		if (!srn.srn_clones[clone])
313 			break;
314 
315 	if (clone == SRN_MAX_CLONE) {
316 		mutex_exit(&srn_clone_lock);
317 		return (ENXIO);
318 	}
319 	srn.srn_cred[clone] = cr;
320 	ASSERT(srn_apm_count >= 0);
321 	srn_apm_count++;
322 	srn.srn_type[clone] = SRN_TYPE_APM;
323 	crhold(cr);
324 
325 	*devp = makedevice(getmajor(*devp), (srn.srn_instance << 8) +
326 	    clone);
327 	srn.srn_clones[clone] = 1;
328 	srn.srn_cred[clone] = cr;
329 	crhold(cr);
330 	mutex_exit(&srn_clone_lock);
331 	PMD(PMD_SX, ("srn open OK\n"))
332 	return (0);
333 }
334 
335 /*ARGSUSED1*/
336 static int
337 srn_close(dev_t dev, int flag, int otyp, cred_t *cr)
338 {
339 	int clone;
340 
341 	if (otyp != OTYP_CHR)
342 		return (EINVAL);
343 
344 	clone = SRN_MINOR_TO_CLONE(getminor(dev));
345 	PMD(PMD_SX, ("srn_close: minor %x, clone %x\n", getminor(dev),
346 	    clone))
347 	mutex_enter(&srn_clone_lock);
348 	crfree(srn.srn_cred[clone]);
349 	srn.srn_cred[clone] = 0;
350 	srn_poll_cnt[clone] = 0;
351 	srn.srn_fault[clone] = 0;
352 	if (srn.srn_pending[clone].ae_type || srn.srn_delivered[clone]) {
353 		srn.srn_pending[clone].ae_type = 0;
354 		srn.srn_delivered[clone] = 0;
355 		cv_signal(&srn_clones_cv[clone]);
356 	}
357 	switch (srn.srn_type[clone]) {
358 	case SRN_TYPE_AUTOSX:
359 		ASSERT(srn_autosx_count);
360 		srn_autosx_count--;
361 		break;
362 	case SRN_TYPE_APM:
363 		ASSERT(srn_apm_count);
364 		srn_apm_count--;
365 		break;
366 	default:
367 		ASSERT(0);
368 		return (EINVAL);
369 	}
370 	srn.srn_clones[clone] = 0;
371 	mutex_exit(&srn_clone_lock);
372 	return (0);
373 }
374 
375 /*ARGSUSED*/
376 static int
377 srn_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
378 {
379 	int clone = SRN_MINOR_TO_CLONE(getminor(dev));
380 
381 	PMD(PMD_SX, ("ioctl: %x: begin\n", cmd))
382 
383 	switch (cmd) {
384 	case SRN_IOC_NEXTEVENT:
385 	case SRN_IOC_SUSPEND:
386 	case SRN_IOC_RESUME:
387 	case SRN_IOC_AUTOSX:
388 		break;
389 	default:
390 		return (ENOTTY);
391 	}
392 
393 	if (!srn_perms(SU | SG, srn.srn_cred[clone])) {
394 		return (EPERM);
395 	}
396 	switch (cmd) {
397 	case SRN_IOC_AUTOSX:
398 		PMD(PMD_SX, ("SRN_IOC_AUTOSX entered\n"))
399 		mutex_enter(&srn_clone_lock);
400 		if (!srn.srn_clones[clone]) {
401 			PMD(PMD_SX, (" ioctl !srn_clones--EINVAL\n"))
402 			mutex_exit(&srn_clone_lock);
403 			return (EINVAL);
404 		}
405 		if (srn.srn_pending[clone].ae_type) {
406 			PMD(PMD_SX, ("AUTOSX while pending--EBUSY\n"))
407 			mutex_exit(&srn_clone_lock);
408 			return (EBUSY);
409 		}
410 		if (srn.srn_type[clone] == SRN_TYPE_AUTOSX) {
411 			PMD(PMD_SX, ("AUTOSX already--EBUSY\n"))
412 			mutex_exit(&srn_clone_lock);
413 			return (EBUSY);
414 		}
415 		ASSERT(srn.srn_type[clone] == SRN_TYPE_APM);
416 		srn.srn_type[clone] = SRN_TYPE_AUTOSX;
417 		srn.srn_fault[clone] = 0;
418 		srn_apm_count--;
419 		ASSERT(srn_apm_count >= 0);
420 		ASSERT(srn_autosx_count >= 0);
421 		srn_autosx_count++;
422 		mutex_exit(&srn_clone_lock);
423 		PMD(PMD_SX, ("SRN_IOC_AUTOSX returns success\n"))
424 		return (0);
425 
426 	case SRN_IOC_NEXTEVENT:
427 		/*
428 		 * return the next suspend or resume event;  there should
429 		 * be one, cause we only get called if we've signalled a
430 		 * poll data completion
431 		 * then wake up the kernel thread sleeping for the delivery
432 		 */
433 		PMD(PMD_SX, ("SRN_IOC_NEXTEVENT entered\n"))
434 		if (srn.srn_fault[clone]) {
435 			PMD(PMD_SX, ("SRN_IOC_NEXTEVENT clone %d fault "
436 			    "cleared\n", clone))
437 			srn.srn_fault[clone] = 0;
438 		}
439 		mutex_enter(&srn_clone_lock);
440 		if (srn_poll_cnt[clone] == 0) {
441 			mutex_exit(&srn_clone_lock);
442 			PMD(PMD_SX, ("SRN_IOC_NEXTEVENT clone %d "
443 			    "EWOULDBLOCK\n", clone))
444 			return (EWOULDBLOCK);
445 		}
446 		ASSERT(srn.srn_pending[clone].ae_type);
447 		if (ddi_copyout(&srn.srn_pending[clone], (void *)arg,
448 		    sizeof (srn_event_info_t), mode) != 0) {
449 			mutex_exit(&srn_clone_lock);
450 			PMD(PMD_SX, ("SRN_IOC_NEXTEVENT clone %d EFAULT\n",
451 			    clone))
452 			return (EFAULT);
453 		}
454 		if (srn.srn_type[clone] == SRN_TYPE_APM)
455 			srn.srn_delivered[clone] =
456 			    srn.srn_pending[clone].ae_type;
457 		PMD(PMD_SX, ("SRN_IOC_NEXTEVENT clone %d delivered %x\n",
458 		    clone, srn.srn_pending[clone].ae_type))
459 		srn_poll_cnt[clone] = 0;
460 		mutex_exit(&srn_clone_lock);
461 		return (0);
462 
463 	case SRN_IOC_SUSPEND:
464 		/* ack suspend */
465 		PMD(PMD_SX, ("SRN_IOC_SUSPEND entered clone %d\n", clone))
466 		if (srn.srn_fault[clone]) {
467 			PMD(PMD_SX, ("SRN_IOC_SUSPEND clone %d fault "
468 			    "cleared\n", clone))
469 			srn.srn_fault[clone] = 0;
470 		}
471 		mutex_enter(&srn_clone_lock);
472 		if (srn.srn_delivered[clone] != SRN_SUSPEND_REQ) {
473 			mutex_exit(&srn_clone_lock);
474 			PMD(PMD_SX, ("SRN_IOC_SUSPEND EINVAL\n"))
475 			return (EINVAL);
476 		}
477 		srn.srn_delivered[clone] = 0;
478 		srn.srn_pending[clone].ae_type = 0;
479 		/* notify the kernel suspend thread  to continue */
480 		PMD(PMD_SX, ("SRN_IOC_SUSPEND clone %d ok\n", clone))
481 		cv_signal(&srn_clones_cv[clone]);
482 		mutex_exit(&srn_clone_lock);
483 		return (0);
484 
485 	case SRN_IOC_RESUME:
486 		/* ack resume */
487 		PMD(PMD_SX, ("SRN_IOC_RESUME entered clone %d\n", clone))
488 		if (srn.srn_fault[clone]) {
489 			PMD(PMD_SX, ("SRN_IOC_RESUME clone %d fault "
490 			    "cleared\n", clone))
491 			srn.srn_fault[clone] = 0;
492 		}
493 		mutex_enter(&srn_clone_lock);
494 		if (srn.srn_delivered[clone] != SRN_NORMAL_RESUME) {
495 			mutex_exit(&srn_clone_lock);
496 			PMD(PMD_SX, ("SRN_IOC_RESUME EINVAL\n"))
497 			return (EINVAL);
498 		}
499 		srn.srn_delivered[clone] = 0;
500 		srn.srn_pending[clone].ae_type = 0;
501 		/* notify the kernel resume thread  to continue */
502 		PMD(PMD_SX, ("SRN_IOC_RESUME ok for clone %d\n", clone))
503 		cv_signal(&srn_clones_cv[clone]);
504 		mutex_exit(&srn_clone_lock);
505 		return (0);
506 
507 	default:
508 		PMD(PMD_SX, ("srn_ioctl unknown cmd EINVAL\n"))
509 		return (EINVAL);
510 	}
511 }
512 /*
513  * A very simple handshake with the srn driver,
514  * only one outstanding event at a time.
515  * The OS delivers the event and depending on type,
516  * either blocks waiting for the ack, or drives on
517  */
518 void
519 srn_notify(int type, int event)
520 {
521 	int clone, count;
522 	PMD(PMD_SX, ("srn_notify entered with type %d, event 0x%x\n",
523 	    type, event));
524 	ASSERT(mutex_owned(&srn_clone_lock));
525 	switch (type) {
526 	case SRN_TYPE_APM:
527 		if (srn_apm_count == 0) {
528 			PMD(PMD_SX, ("no apm types\n"))
529 			return;
530 		}
531 		count = srn_apm_count;
532 		break;
533 	case SRN_TYPE_AUTOSX:
534 		if (srn_autosx_count == 0) {
535 			PMD(PMD_SX, ("no autosx types\n"))
536 			return;
537 		}
538 		count = srn_autosx_count;
539 		break;
540 	default:
541 		ASSERT(0);
542 		break;
543 	}
544 	ASSERT(count > 0);
545 	PMD(PMD_SX, ("count %d\n", count))
546 	for (clone = 0; clone < SRN_MAX_CLONE; clone++) {
547 		if (srn.srn_type[clone] == type) {
548 #ifdef DEBUG
549 			if (type == SRN_TYPE_APM && !srn.srn_fault[clone]) {
550 				ASSERT(srn.srn_pending[clone].ae_type == 0);
551 				ASSERT(srn_poll_cnt[clone] == 0);
552 				ASSERT(srn.srn_delivered[clone] == 0);
553 			}
554 #endif
555 			srn.srn_pending[clone].ae_type = event;
556 			srn_poll_cnt[clone] = 1;
557 			PMD(PMD_SX, ("pollwake %d\n", clone))
558 			pollwakeup(&srn_pollhead[clone], (POLLRDNORM | POLLIN));
559 			count--;
560 			if (count == 0)
561 				break;
562 		}
563 	}
564 	if (type == SRN_TYPE_AUTOSX) {		/* we don't wait */
565 		PMD(PMD_SX, ("Not waiting for AUTOSX ack\n"))
566 		return;
567 	}
568 	ASSERT(type == SRN_TYPE_APM);
569 	/* otherwise wait for acks */
570 restart:
571 	/*
572 	 * We wait until all of the pending events are cleared.
573 	 * We have to start over every time we do a cv_wait because
574 	 * we give up the mutex and can be re-entered
575 	 */
576 	for (clone = 1; clone < SRN_MAX_CLONE; clone++) {
577 		if (srn.srn_clones[clone] == 0 ||
578 		    srn.srn_type[clone] != SRN_TYPE_APM)
579 			continue;
580 		if (srn.srn_pending[clone].ae_type && !srn.srn_fault[clone]) {
581 			PMD(PMD_SX, ("srn_notify waiting for ack for clone %d, "
582 			    "event %x\n", clone, event))
583 			if (cv_timedwait(&srn_clones_cv[clone],
584 			    &srn_clone_lock, ddi_get_lbolt() +
585 			    drv_usectohz(srn_timeout * 1000000)) == -1) {
586 				/*
587 				 * Client didn't respond, mark it as faulted
588 				 * and continue as if a regular signal.
589 				 */
590 				PMD(PMD_SX, ("srn_notify: clone %d did not "
591 				    "ack event %x\n", clone, event))
592 				cmn_err(CE_WARN, "srn_notify: clone %d did "
593 				    "not ack event %x\n", clone, event);
594 				srn.srn_fault[clone] = 1;
595 			}
596 			goto restart;
597 		}
598 	}
599 	PMD(PMD_SX, ("srn_notify done with %x\n", event))
600 }
601