xref: /titanic_44/usr/src/uts/sun/io/ttymux/ttymux.c (revision 193974072f41a843678abf5f61979c748687e66b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 
27 /*
28  * DESCRIPTION
29  *
30  * ttymux - Multiplexer driver for multiplexing termio compliant streams onto
31  * a single upper stream.
32  *
33  * ADD2FRONT macro can be used to specify the order in which a console
34  * device is put in the queue of multiplexed physical serial devices,
35  * during the association and disassociation of a console interface.
36  * When this macro is defined, the device is placed in front of the queue,
37  * otherwise by default it is placed at the end.
38  * Console I/O happens to each of the physical devices in the order of
39  * their position in this queue.
40  */
41 
42 #include <sys/types.h>
43 #include <sys/file.h>
44 #include <sys/stream.h>
45 #include <sys/strsubr.h>
46 #include <sys/strlog.h>
47 #include <sys/strsun.h>
48 #include <sys/modctl.h>
49 #include <sys/debug.h>
50 #include <sys/kbio.h>
51 #include <sys/devops.h>
52 #include <sys/errno.h>
53 #include <sys/stat.h>
54 #include <sys/kmem.h>
55 #include <sys/ddi.h>
56 #include <sys/consdev.h>
57 #include <sys/tty.h>
58 #include <sys/ptyvar.h>
59 #include <sys/termio.h>
60 #include <sys/fcntl.h>
61 #include <sys/mkdev.h>
62 #include <sys/ser_sync.h>
63 #include <sys/esunddi.h>
64 #include <sys/policy.h>
65 
66 #include <sys/ttymux.h>
67 #include "ttymux_impl.h"
68 
69 /*
70  * Extern declarations
71  */
72 extern mblk_t *mkiocb(uint_t);
73 extern int nulldev();
74 extern uintptr_t space_fetch(char *key);
75 
76 extern int sm_ioctl_cmd(sm_uqi_t *, mblk_t *);
77 extern int ttymux_abort_ioctl(mblk_t *);
78 extern int ttymux_device_fini(sm_lqi_t *);
79 extern int ttymux_device_init(sm_lqi_t *);
80 
81 /*
82  * Exported interfaces
83  */
84 int sm_disassociate(int, sm_lqi_t *, ulong_t);
85 int sm_associate(int, sm_lqi_t *, ulong_t, uint_t, char *);
86 
87 /*
88  * Variables defined here and visible only internally
89  */
90 sm_ss_t		*sm_ssp = 0;
91 static int	sm_instance = 0;
92 static int	smctlunit;
93 
94 static uint_t	sm_default_trflag = 0;
95 uint_t		sm_max_units = 6;
96 uint_t		sm_minor_cnt = 0;
97 static uint_t	sm_refuse_opens = 0;
98 
99 /*
100  * Local definitions.
101  */
102 
103 /* force these flags to be unset on console devices */
104 static ulong_t	sm_cmask = (ulong_t)(CRTSXOFF|CRTSCTS);
105 
106 /*
107  * SECTION
108  * Implementation Section:
109  */
110 void
sm_debug(char * msg,...)111 sm_debug(char *msg, ...)
112 {
113 	va_list	args;
114 	char	buf[256];
115 	int	sz;
116 
117 	va_start(args, msg);
118 	sz = vsnprintf(buf, sizeof (buf), msg, args);
119 	va_end(args);
120 
121 	if (sz < 0)
122 		(void) strlog(ddi_driver_major(sm_ssp->sm_dip), sm_instance, 1,
123 		    SL_TRACE, "vsnprintf parse error\n");
124 	else if (sz > sizeof (buf)) {
125 		char *b;
126 		size_t	len = sz + 1;
127 
128 		b = kmem_alloc(len, KM_SLEEP);
129 		va_start(args, msg);
130 		sz = vsnprintf(b, len, msg, args);
131 		va_end(args);
132 		if (sz > 0)
133 			(void) strlog(ddi_driver_major(sm_ssp->sm_dip),
134 			    sm_instance, 1, SL_TRACE, b);
135 		kmem_free(b, len);
136 	} else {
137 
138 		(void) strlog(ddi_driver_major(sm_ssp->sm_dip), sm_instance,
139 		    1, SL_TRACE, buf);
140 	}
141 }
142 
143 void
sm_log(char * msg,...)144 sm_log(char *msg, ...)
145 {
146 	va_list	args;
147 	char	buf[128];
148 	int	sz;
149 
150 	va_start(args, msg);
151 	sz = vsnprintf(buf, sizeof (buf), msg, args);
152 	va_end(args);
153 
154 	if (sz < 0)
155 		(void) strlog(ddi_driver_major(sm_ssp->sm_dip), sm_instance, 1,
156 		    SL_TRACE, "vsnprintf parse error\n");
157 	else if (sz > sizeof (buf)) {
158 		char *b;
159 		size_t	len = sz + 1;
160 
161 		b = kmem_alloc(len, KM_SLEEP);
162 		va_start(args, msg);
163 		sz = vsnprintf(b, len, msg, args);
164 		va_end(args);
165 		if (sz > 0)
166 			(void) strlog(ddi_driver_major(sm_ssp->sm_dip),
167 			    sm_instance, 1, SL_NOTE, b);
168 		kmem_free(b, len);
169 	} else {
170 
171 		(void) strlog(ddi_driver_major(sm_ssp->sm_dip), sm_instance,
172 		    1, SL_NOTE, buf);
173 	}
174 }
175 
176 /*
177  * Should only be called if the caller can guarantee that the vnode
178  * and/or the stream won't disappear while finding the dip.
179  * This routine is only called during an I_PLINK request so it's safe.
180  * The routine obtains the dev_t for a linked se stream.
181  */
182 static void
sm_setdip(queue_t * q,sm_lqi_t * lqi)183 sm_setdip(queue_t *q, sm_lqi_t *lqi)
184 {
185 	lqi->sm_dev = q && STREAM(q) ? STREAM(q)->sd_vnode->v_rdev : NODEV;
186 }
187 
188 /*
189  * Called from driver close, state change reports and I_PUNLINK ioctl.
190  * A lower stream has been unlinked - clean up the state associated with it.
191  */
192 void
sm_lqifree(sm_lqi_t * lqi)193 sm_lqifree(sm_lqi_t *lqi)
194 {
195 	int mu_owned;
196 	sm_lqi_t **pplqi;
197 
198 	ASSERT(mutex_owned(lqi->sm_umutex));
199 	ASSERT(SM_RQ(lqi) != 0);
200 
201 	/*
202 	 * Clear all state associated with this lower queue except
203 	 * the identity of the queues themselves and the link id which
204 	 * can only be cleared by issuing a streams I_PUNLINK ioctl.
205 	 *
206 	 * The association of a lower queue is a two step process:
207 	 * 1. initialise the lower q data structure on I_PLINK
208 	 * 2. associate an upper q with the lower q on SM_CMD_ASSOCIATE.
209 	 *
210 	 * If step 2 has ocurred then
211 	 * remove this lower queue info from the logical unit.
212 	 */
213 	if (lqi->sm_uqi) {
214 		sm_dbg('Y', ("lqifree unit %d, ", lqi->sm_uqi->sm_lunit));
215 		if ((mu_owned = mutex_owned(lqi->sm_uqi->sm_umutex)) == 0)
216 			LOCK_UNIT(lqi->sm_uqi);
217 
218 		pplqi = &lqi->sm_uqi->sm_lqs;
219 		while (*pplqi != lqi) {
220 			ASSERT(*pplqi);
221 			pplqi = &((*pplqi)->sm_nlqi);
222 		}
223 		*pplqi = lqi->sm_nlqi;
224 		lqi->sm_uqi->sm_nlqs--;
225 
226 		if (mu_owned == 0)
227 			UNLOCK_UNIT(lqi->sm_uqi);
228 
229 		lqi->sm_uqi = 0;
230 	}
231 }
232 
233 /*
234  * Given a q return the associated lower queue data structure or NULL.
235  * Return the data locked.
236  */
237 static sm_lqi_t *
get_lqi_byq(queue_t * q)238 get_lqi_byq(queue_t *q)
239 {
240 	int i;
241 	sm_lqi_t *lqi, *flqi = 0;
242 
243 	for (i = 0; i < MAX_LQS; i++) {
244 		lqi = &sm_ssp->sm_lqs[i];
245 		LOCK_UNIT(lqi);
246 		if (flqi == 0 && lqi->sm_linkid == 0) /* assumes muxids != 0 */
247 			flqi = lqi;
248 		else if (SM_RQ(lqi) == q || SM_WQ(lqi) == q) {
249 			if (flqi)
250 				UNLOCK_UNIT(flqi);
251 			return (lqi);
252 		}
253 		else
254 			UNLOCK_UNIT(lqi);
255 	}
256 	return (flqi);
257 }
258 
259 /*
260  * Given a streams link identifier return the associated lower queue data
261  * structure or NULL.
262  */
263 sm_lqi_t *
get_lqi_byid(int linkid)264 get_lqi_byid(int linkid)
265 {
266 	int i;
267 	sm_lqi_t *lqi;
268 
269 	if (linkid == 0)
270 		return (NULL);
271 	for (i = 0; i < MAX_LQS; i++) {
272 		lqi = &sm_ssp->sm_lqs[i];
273 		if (lqi->sm_linkid == linkid)
274 			return (lqi);
275 	}
276 	return (NULL);
277 }
278 
279 /*
280  * Given a dev_t for a lower stream return the associated lower queue data
281  * structure or NULL.
282  */
283 sm_lqi_t *
get_lqi_bydevt(dev_t dev)284 get_lqi_bydevt(dev_t dev)
285 {
286 	int i;
287 	sm_lqi_t *lqi;
288 
289 	if (dev == NODEV)
290 		return (NULL);
291 
292 	for (i = 0; i < MAX_LQS; i++) {
293 		lqi = &sm_ssp->sm_lqs[i];
294 		if (lqi->sm_dev == dev)
295 			return (lqi);
296 	}
297 	return (NULL);
298 }
299 
300 /*
301  * Determine whether the input flag is set on at least
302  * howmany queues.
303  */
304 static int
sm_is_flag_set(sm_uqi_t * uqi,uint_t flag,uint_t howmany)305 sm_is_flag_set(sm_uqi_t *uqi, uint_t flag, uint_t howmany)
306 {
307 	sm_lqi_t *lqi;
308 
309 	if (howmany == 0)
310 		return (0);
311 
312 	for (lqi = uqi->sm_lqs; lqi; lqi = lqi->sm_nlqi) {
313 		if (lqi->sm_flags & flag)
314 			if (--howmany == 0)
315 				return (1);
316 	}
317 	return (0);
318 }
319 
320 /*
321  * How many usable queues are associated with a given upper stream
322  */
323 static int
sm_uwq_error(sm_uqi_t * uqi)324 sm_uwq_error(sm_uqi_t *uqi)
325 {
326 	return (sm_is_flag_set(uqi, (WERROR_MODE|HANGUP_MODE), uqi->sm_nlqs));
327 }
328 
329 /*
330  * How many of the queues associated with a given upper stream
331  * - do not - have the given flags set.
332  */
333 static int
sm_q_count(sm_uqi_t * uqi,uint_t flag)334 sm_q_count(sm_uqi_t *uqi, uint_t flag)
335 {
336 	sm_lqi_t *lqi;
337 	int count = 0;
338 
339 	for (lqi = uqi->sm_lqs; lqi; lqi = lqi->sm_nlqi) {
340 		if ((lqi->sm_flags & flag) == 0)
341 			count++;
342 	}
343 	return (count);
344 }
345 
346 /*
347  * How many of the queues associated with a given upper stream
348  * - do not - have the given flags set.
349  */
350 static int
sm_qs_without(sm_uqi_t * uqi,uint_t flag,uint_t ioflag)351 sm_qs_without(sm_uqi_t *uqi, uint_t flag, uint_t ioflag)
352 {
353 	sm_lqi_t *lqi;
354 	int count = 0;
355 
356 	for (lqi = uqi->sm_lqs; lqi; lqi = lqi->sm_nlqi) {
357 		if ((lqi->sm_flags & flag) == 0 &&
358 		    (lqi->sm_ioflag & ioflag) == 0)
359 			count++;
360 	}
361 	return (count);
362 }
363 
364 /*
365  * How many usable queues are associated with a given upper stream
366  */
367 static int
sm_good_qs(sm_uqi_t * uqi)368 sm_good_qs(sm_uqi_t *uqi)
369 {
370 	return (sm_q_count(uqi, (WERROR_MODE|HANGUP_MODE)));
371 }
372 
373 static int
sm_cnt_oqs(sm_uqi_t * uqi)374 sm_cnt_oqs(sm_uqi_t *uqi)
375 {
376 	return (sm_qs_without(uqi, (WERROR_MODE|HANGUP_MODE),
377 	    (uint_t)FOROUTPUT));
378 }
379 
380 /*
381  * Send an ioctl downstream and remember that it was sent so that
382  * its response can be caught on the way back up.
383  */
384 static void
sm_issue_ioctl(void * arg)385 sm_issue_ioctl(void *arg)
386 {
387 	sm_lqi_t *lqi = arg;
388 	uint_t cmdflag = 0;
389 	queue_t *q = SM_WQ(lqi);
390 	int iocmd, size;
391 
392 	LOCK_UNIT(lqi);
393 
394 	lqi->sm_bid = 0;
395 	if ((lqi->sm_flags & (WERROR_MODE|HANGUP_MODE)) == 0 &&
396 	    (lqi->sm_flags & (WANT_CDSTAT|WANT_TCSET))) {
397 		mblk_t *pioc;
398 
399 		if (lqi->sm_flags & WANT_TCSET) {
400 			lqi->sm_flags &= ~WANT_TCSET;
401 			iocmd = TCSETS;
402 			cmdflag = WANT_TCSET;
403 		} else if (lqi->sm_flags & WANT_SC) {
404 			lqi->sm_flags &= ~WANT_SC;
405 			iocmd = TIOCGSOFTCAR;
406 			cmdflag = WANT_SC;
407 		} else if (lqi->sm_flags & WANT_CD) {
408 			lqi->sm_flags &= ~WANT_CD;
409 			iocmd = TIOCMGET;
410 		} else if (lqi->sm_flags & WANT_CL) {
411 			lqi->sm_flags &= ~WANT_CL;
412 			iocmd = TCGETS;
413 			cmdflag = WANT_CL;
414 		} else {
415 			UNLOCK_UNIT(lqi);
416 			return;
417 		}
418 
419 		if (pioc = mkiocb(iocmd)) {
420 			if (cmdflag == WANT_TCSET) {
421 				pioc->b_cont =
422 				    sm_allocb(sizeof (struct termios),
423 				    BPRI_MED);
424 				if (pioc->b_cont == 0) {
425 					freemsg(pioc);
426 					pioc = 0;
427 				} else {
428 					struct termios *tc = (struct termios *)
429 					    pioc->b_cont->b_wptr;
430 
431 					bzero((caddr_t)tc,
432 					    sizeof (struct termios));
433 					tc->c_cflag = lqi->sm_ttycommon->
434 					    t_cflag;
435 					pioc->b_cont->b_rptr =
436 					    pioc->b_cont->b_wptr;
437 					pioc->b_cont->b_wptr +=
438 					    sizeof (struct termios);
439 				}
440 				size = sizeof (struct iocblk) +
441 				    sizeof (struct termios);
442 			}
443 			else
444 				size = sizeof (struct iocblk);
445 		}
446 		else
447 			size = sizeof (struct iocblk);
448 
449 		if (pioc != 0) {
450 
451 			lqi->sm_piocid = ((struct iocblk *)pioc->b_rptr)->
452 			    ioc_id;
453 			lqi->sm_flags |= SM_IOCPENDING;
454 
455 			/* lqi->sm_flags |= cmdflag; */
456 			UNLOCK_UNIT(lqi);
457 			(void) putq(q, pioc);
458 		} else {
459 			UNLOCK_UNIT(lqi);
460 			lqi->sm_bid = qbufcall(WR(q), size, BPRI_MED,
461 			    sm_issue_ioctl, lqi);
462 		}
463 	}
464 	else
465 		UNLOCK_UNIT(lqi);
466 }
467 
468 /*
469  * Associate one of the drivers minor nodes with a serial device.
470  */
471 int
sm_associate(int unit,sm_lqi_t * plqi,ulong_t tag,uint_t ioflag,char * dp)472 sm_associate(int unit, sm_lqi_t *plqi, ulong_t tag, uint_t ioflag, char *dp)
473 {
474 	sm_uqi_t *uqi;
475 	int rval = 0;
476 
477 	sm_dbg('Y', ("sm_associate(%d, %d, %d): ",
478 	    (plqi) ? plqi->sm_linkid : 0, unit, ioflag));
479 	/*
480 	 * Check the data is valid.
481 	 * Associate a lower queue with a logical unit.
482 	 */
483 
484 	if (unit < 0 || unit >= NLUNITS || plqi == 0 ||
485 	    (uqi = get_uqi(sm_ssp, unit)) == 0) {
486 		sm_dbg('@', (" invalid: lqi=0x%p lui=0x%p:", plqi, uqi));
487 		rval = EINVAL;
488 	} else {
489 		if ((ioflag & FORIO) == 0)
490 			ioflag = FORIO;
491 
492 		LOCK_UNIT(plqi);
493 
494 		if (plqi->sm_uqi) {
495 			if (plqi->sm_uqi->sm_lunit == unit) {
496 				if ((ioflag & (uint_t)FORIO) != 0)
497 					plqi->sm_ioflag =
498 					    (ioflag & (uint_t)FORIO);
499 				rval = 0;
500 			} else {
501 				sm_dbg('@', ("already associated with unit %d:",
502 				    plqi->sm_uqi->sm_lunit));
503 				rval = EINVAL;
504 			}
505 		} else {
506 
507 			LOCK_UNIT(uqi);
508 
509 			if ((ioflag & (uint_t)FORIO) != 0)
510 				plqi->sm_ioflag = (ioflag & (uint_t)FORIO);
511 
512 			plqi->sm_ttycommon->t_cflag = uqi->sm_ttycommon->
513 			    t_cflag;
514 			plqi->sm_ttycommon->t_flags = uqi->sm_ttycommon->
515 			    t_flags;
516 			plqi->sm_uqi = uqi;
517 			plqi->sm_mbits = 0;
518 			plqi->sm_tag = tag;
519 
520 			if (*dp == '/')
521 				(void) strncpy(plqi->sm_path, dp, MAXPATHLEN);
522 			else
523 				*(plqi->sm_path) = '\0';
524 
525 			plqi->sm_flags |= WANT_TCSET;
526 #ifdef ADD2FRONT
527 			plqi->sm_nlqi = uqi->sm_lqs;
528 			uqi->sm_lqs = plqi;
529 #else
530 			plqi->sm_nlqi = 0;
531 			if (uqi->sm_lqs) {
532 				sm_lqi_t *lq;
533 				for (lq = uqi->sm_lqs; lq->sm_nlqi;
534 				    lq = lq->sm_nlqi) {
535 				}
536 				lq->sm_nlqi = plqi;
537 			} else
538 				uqi->sm_lqs = plqi;
539 #endif
540 			uqi->sm_nlqs++;
541 
542 			(void) ttymux_device_init(plqi);
543 
544 			UNLOCK_UNIT(uqi);
545 			rval = 0;
546 			/*
547 			 * Everything looks good so it's now ok to enable lower
548 			 * queue processing.
549 			 * Note the lower queue should be enabled as soon as
550 			 * I_PLINK returns (used in sm_get_ttymodes etc).
551 			 * Schedule ioctls to obtain the terminal settings.
552 			 */
553 
554 			if ((uqi->sm_flags & FULLY_OPEN) || uqi->sm_waitq)
555 				plqi->sm_uqflags |= SM_UQVALID;
556 
557 			qenable(SM_RQ(plqi));
558 			if (plqi->sm_flags & (WANT_CDSTAT|WANT_TCSET)) {
559 				/*
560 				 * Bypass the lower half of the driver (hence
561 				 * no qwriter) and apply the current termio
562 				 * settings on the lower stream.
563 				 */
564 				UNLOCK_UNIT(plqi);
565 				if (plqi->sm_bid) {
566 					qunbufcall(SM_WQ(plqi), plqi->sm_bid);
567 					plqi->sm_bid = 0;
568 				}
569 				/*
570 				 * Only set cflags on the lower q if we know
571 				 * the settings on any other lower queue.
572 				 */
573 				sm_issue_ioctl(plqi);
574 				LOCK_UNIT(plqi);
575 
576 			}
577 		}
578 
579 		UNLOCK_UNIT(plqi);
580 	}
581 	sm_dbg('Y', ("sm_associate: rval=%d.\n", rval));
582 	return (rval);
583 }
584 
585 /*
586  * Break an association between one of the driver's minor nodes and
587  * a serial device.
588  */
589 int
sm_disassociate(int unit,sm_lqi_t * plqi,ulong_t tag)590 sm_disassociate(int unit, sm_lqi_t *plqi, ulong_t tag)
591 {
592 	sm_uqi_t *uqi;
593 	int rval = 0;
594 
595 	sm_dbg('Y', ("sm_disassociate: link %d, unit %d: ",
596 	    (plqi) ? plqi->sm_linkid : 0, unit));
597 	/*
598 	 * Check the data is valid.
599 	 * Disassociate a lower queue with a logical unit.
600 	 */
601 	if (unit < 0 || unit >= NLUNITS || plqi == 0 ||
602 	    (uqi = get_uqi(sm_ssp, unit)) == 0) {
603 		sm_dbg('@', ("invalid: lqi=0x%p lui=0x%p", plqi, uqi));
604 		rval = EINVAL;
605 	} else {
606 		LOCK_UNIT(plqi);
607 
608 		if (plqi->sm_uqi == NULL) {
609 			sm_dbg('@', ("unit not associated"));
610 			rval = EINVAL;
611 		} else if (plqi->sm_uqi->sm_lunit != unit) {
612 			sm_dbg('@', ("unit and linkid not related",
613 			    plqi->sm_uqi->sm_lunit));
614 			rval = EINVAL;
615 		} else if (plqi->sm_tag != tag) {
616 			sm_dbg('@',
617 			    ("Invalid tag for TTYMUX_DISASSOC ioctl\n"));
618 			rval = EPERM;
619 		} else {
620 			sm_dbg('Y', ("disassociating "));
621 
622 			(void) ttymux_device_fini(plqi);
623 
624 			/*
625 			 * Indicate that carrier status is no
626 			 * longer required and that the upper
627 			 * queue should not be used by plqi
628 			 */
629 			plqi->sm_flags &= ~(WANT_CDSTAT|WANT_TCSET);
630 			plqi->sm_uqflags &= ~(SM_UQVALID|SM_OBPCNDEV);
631 			plqi->sm_ioflag = 0u;
632 
633 			sm_lqifree(plqi);
634 			rval = 0;
635 		}
636 		UNLOCK_UNIT(plqi);
637 	}
638 	sm_dbg('Y', (" rval=%d.\n", rval));
639 	return (rval);
640 
641 }
642 
643 /*
644  * Streams helper routines;
645  */
646 
647 /*
648  * Schedule a qbufcall for an upper queue.
649  * Must be called within the perimiter of the parameter q.
650  * fn must reenable the q.
651  * Called:
652  *	 whenever a message must be placed on multiple queues and allocb fails;
653  */
654 static void
sm_sched_uqcb(queue_t * q,int memreq,int pri,void (* fn)())655 sm_sched_uqcb(queue_t *q, int memreq, int pri, void (*fn)())
656 {
657 	sm_uqi_t	*uqi = q->q_ptr;
658 
659 	if (uqi->sm_ttybid != 0)
660 		qunbufcall(q, uqi->sm_ttybid);
661 
662 	noenable(q);
663 
664 	uqi->sm_ttybid = qbufcall(q, memreq, pri, fn, uqi);
665 }
666 
667 /*
668  * qbufcall routine to restart the queues when memory is available.
669  */
670 static void
sm_reenable_q(sm_uqi_t * uqi)671 sm_reenable_q(sm_uqi_t *uqi)
672 {
673 	queue_t *wq = SM_WQ(uqi);
674 
675 	if ((uqi->sm_flags & SM_STOPPED) == 0) {
676 		enableok(wq);
677 		qenable(wq);
678 	}
679 }
680 
681 /*
682  * Place a message on the write queue of each stream associated with
683  * the given upper stream.
684  */
685 static void
sm_senddown(sm_uqi_t * uqi)686 sm_senddown(sm_uqi_t *uqi)
687 {
688 	sm_lqi_t *lqi;
689 
690 	for (lqi = uqi->sm_lqs; lqi != 0; lqi = lqi->sm_nlqi) {
691 		if (lqi->sm_mp != 0) {
692 			putnext(SM_WQ(lqi), lqi->sm_mp);
693 			lqi->sm_mp = 0;
694 		}
695 	}
696 }
697 
698 /*
699  * For each lower device that should receive a write message duplicate
700  * the message block.
701  */
702 static int
sm_dupmsg(sm_uqi_t * uqi,mblk_t * mp)703 sm_dupmsg(sm_uqi_t *uqi, mblk_t *mp)
704 {
705 	sm_lqi_t	*lqi;
706 	mblk_t	*origmp = mp;
707 
708 	for (lqi = uqi->sm_lqs; lqi != 0; lqi = lqi->sm_nlqi) {
709 		lqi->sm_mp = 0;
710 		if (lqi->sm_flags & WERROR_MODE) {
711 			continue;
712 		}
713 		if ((lqi->sm_ioflag & (uint_t)FOROUTPUT) == 0) {
714 			if (DB_TYPE(mp) == M_DATA)
715 				continue;
716 		}
717 		if (lqi->sm_nlqi == 0) {
718 			lqi->sm_mp = mp;
719 			origmp = NULL;
720 		} else if ((lqi->sm_mp = sm_copymsg(mp)) == 0) {
721 			sm_lqi_t *flqi;
722 
723 			for (flqi = uqi->sm_lqs; flqi != lqi;
724 			    flqi = flqi->sm_nlqi) {
725 				if (lqi->sm_mp) {
726 				/* must have been sm_copymsg */
727 					sm_freemsg(lqi->sm_mp);
728 					lqi->sm_mp = 0;
729 				}
730 			}
731 			return (sm_cnt_oqs(uqi) * msgdsize(mp));
732 		}
733 	}
734 	if (origmp != NULL)
735 		freemsg(origmp);
736 	return (0);
737 }
738 
739 /*
740  * Return 1 if all associated lower devices have room for another message
741  * otherwise return 0.
742  */
743 static int
sm_cansenddown(sm_uqi_t * uqi)744 sm_cansenddown(sm_uqi_t *uqi)
745 {
746 
747 	register sm_lqi_t	*lqi;
748 
749 	if (uqi->sm_lqs == 0)
750 		return (0);
751 
752 	for (lqi = uqi->sm_lqs; lqi != 0; lqi = lqi->sm_nlqi) {
753 		if ((lqi->sm_flags & WERROR_MODE) == 0 &&
754 		    canputnext(SM_WQ(lqi)) == 0)
755 			return (0);
756 	}
757 	return (1);
758 }
759 
760 /*
761  * Put a message down all associated lower queues.
762  * Return 1 if the q function was called.
763  */
764 static int
sm_putqs(queue_t * q,mblk_t * mp,int (* qfn)())765 sm_putqs(queue_t *q, mblk_t *mp, int (*qfn)())
766 {
767 	register sm_uqi_t *uqi = (sm_uqi_t *)q->q_ptr;
768 	register int memreq;
769 	int pri = (DB_TYPE(mp) < QPCTL) ? BPRI_MED : BPRI_HI;
770 	int rval = 0;
771 
772 	if (uqi->sm_lqs == 0 || (uqi->sm_flags & WERROR_MODE)) {
773 
774 		sm_dbg('Q', ("sm_putqs: freeing (0x%p 0x%p).\n", uqi->sm_lqs,
775 		    uqi->sm_flags));
776 		freemsg(mp);
777 	} else if (pri != BPRI_HI && sm_cansenddown(uqi) == 0) {
778 		/* a lower q is flow controlled */
779 		(void) qfn(q, mp);
780 		rval = 1;
781 	} else if ((memreq = sm_dupmsg(uqi, mp)) == 0) {
782 
783 		sm_senddown(uqi);
784 
785 	} else {
786 		sm_log("sm_putqs: msg 0x%x - can't alloc %d bytes (pri %d).\n",
787 		    DB_TYPE(mp), memreq, pri);
788 		sm_sched_uqcb(q, memreq, pri, sm_reenable_q);
789 
790 		(void) qfn(q, mp);
791 		rval = 1;
792 
793 	}
794 
795 	return (rval);
796 }
797 
798 /*
799  * Service a streams link and unlink requests.
800  */
801 static void
sm_link_req(queue_t * wq,mblk_t * mp)802 sm_link_req(queue_t *wq, mblk_t *mp)
803 {
804 	struct linkblk *linkp;
805 	int rval;
806 	int cmd;
807 	sm_lqi_t *plqi;
808 
809 	ASSERT(DB_TYPE(mp) == M_IOCTL);
810 
811 	cmd = ((struct iocblk *)mp->b_rptr)->ioc_cmd;
812 	switch (cmd) {
813 
814 	case I_LINK:
815 	case I_PLINK:
816 		sm_dbg('G', ("sm_link_req: M_IOCTL %x (I_PLINK).\n", cmd));
817 
818 		linkp = (struct linkblk *)mp->b_cont->b_rptr;
819 
820 		/*
821 		 * 1.	Sanity check the link block.
822 		 * 2.	Validate that the queue is not already linked
823 		 *		(and resources available).
824 		 * 3.	Validate that the lower queue is not associated with
825 		 *		a logical unit.
826 		 * 4.	Remember that this lower queue is linked to the driver.
827 		 */
828 		if ((linkp == NULL) || (MBLKL(mp) < sizeof (*linkp)) ||
829 		    linkp->l_qbot == NULL) {
830 			sm_dbg('I', ("sm_link_req: invalid link block.\n"));
831 			rval = EINVAL;
832 		} else if ((plqi = get_lqi_byq(linkp->l_qbot)) == 0) {
833 			sm_dbg('I', ("sm_link_req: out of resources.\n"));
834 			rval = EBUSY; /* out of resources */
835 		} else if (plqi->sm_uqi) {
836 			UNLOCK_UNIT(plqi); /* was aquired by get_lqi_byq */
837 			sm_dbg('I', ("sm_link_req: already associated.\n"));
838 			rval = EBUSY; /* already linked */
839 		} else {
840 			SM_WQ(plqi) = linkp->l_qbot;
841 			SM_RQ(plqi)	= OTHERQ(linkp->l_qbot);
842 
843 			linkp->l_qbot->q_ptr =
844 			    OTHERQ(linkp->l_qbot)->q_ptr = plqi;
845 			plqi->sm_linkid = linkp->l_index;
846 			UNLOCK_UNIT(plqi); /* was aquired by get_lqi_byq */
847 
848 			sm_dbg('H', ("sm_link_req: linkid = %d.\n",
849 			    linkp->l_index));
850 
851 			sm_setdip(linkp->l_qbot, plqi);
852 			plqi->sm_ttycommon->t_flags = 0;
853 			plqi->sm_ttycommon->t_cflag = 0;
854 			plqi->sm_mbits = 0;
855 			(void) ttymux_device_init(plqi);
856 			rval = 0;
857 		}
858 
859 		break;
860 
861 	case I_UNLINK:
862 	case I_PUNLINK:
863 		sm_dbg('G', ("sm_link_req: M_IOCTL (I_PUNLINK).\n"));
864 
865 		linkp = (struct linkblk *)mp->b_cont->b_rptr;
866 
867 		if ((linkp == NULL) ||
868 		    (MBLKL(mp) < sizeof (*linkp)) ||
869 		    linkp->l_qbot == NULL) {
870 			rval = EINVAL;
871 		} else if ((plqi = get_lqi_byid(linkp->l_index)) == 0) {
872 			rval = EINVAL;
873 		} else {
874 			sm_uqi_t *uqi;
875 			int werrmode;
876 
877 			/*
878 			 * Mark the lower q as invalid.
879 			 */
880 			sm_dbg('G', ("I_PUNLINK: freeing link %d\n",
881 			    linkp->l_index));
882 
883 			if (plqi->sm_bid) {
884 				qunbufcall(SM_RQ(plqi), plqi->sm_bid);
885 				plqi->sm_bid = 0;
886 			}
887 			if (plqi->sm_ttybid) {
888 				qunbufcall(SM_RQ(plqi), plqi->sm_ttybid);
889 				plqi->sm_ttybid = 0;
890 			}
891 
892 			uqi = plqi->sm_uqi;
893 
894 
895 			(void) ttymux_device_fini(plqi);
896 
897 			if (uqi)
898 				(void) sm_disassociate(uqi->sm_lunit,
899 				    plqi, plqi->sm_tag);
900 
901 			LOCK_UNIT(plqi);
902 
903 			plqi->sm_piocid = 0;
904 
905 			werrmode = (plqi->sm_flags & (WERROR_MODE|HANGUP_MODE))
906 			    ? 1 : 0;
907 
908 			plqi->sm_mbits = 0;
909 			plqi->sm_flags = 0;
910 
911 			ttycommon_close(plqi->sm_ttycommon);
912 			/* SM_RQ(plqi) = SM_WQ(plqi) = 0; */
913 			plqi->sm_ttycommon->t_flags = 0;
914 			plqi->sm_ttycommon->t_cflag = 0;
915 			plqi->sm_ttycommon->t_iflag = 0;
916 			plqi->sm_linkid = 0;
917 			plqi->sm_dev = NODEV;
918 			plqi->sm_hadkadbchar = 0;
919 			plqi->sm_nachar = sm_ssp->sm_abs;
920 
921 			UNLOCK_UNIT(plqi);
922 			if (uqi &&
923 			    werrmode &&
924 			    (uqi->sm_flags & FULLY_OPEN) &&
925 			    sm_uwq_error(uqi) &&
926 			    putnextctl(SM_RQ(uqi), M_HANGUP) == 0) {
927 				sm_log("sm_link_req: putnextctl(M_HANGUP)"
928 				    " failed.\n");
929 			}
930 
931 			rval = 0;
932 		}
933 
934 		break;
935 	default:
936 		rval = EINVAL;
937 	}
938 	if (rval != 0)
939 		miocnak(wq, mp, 0, rval);
940 	else
941 		miocack(wq, mp, 0, 0);
942 }
943 
944 static int
sm_getiocinfo(mblk_t * mp,struct sm_iocinfo * info)945 sm_getiocinfo(mblk_t *mp, struct sm_iocinfo *info)
946 {
947 	switch (DB_TYPE(mp)) {
948 	case M_COPYOUT:
949 		info->sm_id = ((struct copyreq *)mp->b_rptr)->cq_id;
950 		info->sm_cmd = ((struct copyreq *)mp->b_rptr)->cq_cmd;
951 		info->sm_data = (((struct copyreq *)mp->b_rptr)->cq_size &&
952 		    mp->b_cont) ? (void *)mp->b_cont->b_rptr : 0;
953 		break;
954 	case M_COPYIN:
955 		info->sm_id = ((struct copyresp *)mp->b_rptr)->cp_id;
956 		info->sm_cmd = ((struct copyresp *)mp->b_rptr)->cp_cmd;
957 		info->sm_data = 0;
958 		break;
959 	case M_IOCACK:
960 		info->sm_id = ((struct iocblk *)mp->b_rptr)->ioc_id;
961 		info->sm_cmd = ((struct iocblk *)mp->b_rptr)->ioc_cmd;
962 		/* the se driver has bug so we cannot use ioc_count */
963 		info->sm_data = (((struct iocblk *)mp->b_rptr)->
964 		    ioc_error == 0 && mp->b_cont) ?
965 		    (void *)mp->b_cont->b_rptr : 0;
966 		break;
967 	case M_IOCNAK:
968 		info->sm_id = ((struct iocblk *)mp->b_rptr)->ioc_id;
969 		info->sm_cmd = ((struct iocblk *)mp->b_rptr)->ioc_cmd;
970 		info->sm_data = 0;
971 		break;
972 	case M_IOCDATA:
973 		info->sm_id = ((struct copyresp *)mp->b_rptr)->cp_id;
974 		info->sm_cmd = ((struct copyresp *)mp->b_rptr)->cp_cmd;
975 		info->sm_data = (((struct copyresp *)mp->b_rptr)->
976 		    cp_rval == 0 && mp->b_cont) ?
977 		    (void *)mp->b_cont->b_rptr : 0;
978 		break;
979 	case M_IOCTL:
980 		info->sm_id = ((struct iocblk *)mp->b_rptr)->ioc_id;
981 		info->sm_cmd = ((struct iocblk *)mp->b_rptr)->ioc_cmd;
982 		info->sm_data = 0;
983 		break;
984 	default:
985 		return (EINVAL);
986 	}
987 	return (0);
988 }
989 
990 /*
991  * Record the termio settings that have been set on the upper stream
992  */
993 static int
sm_update_ttyinfo(mblk_t * mp,sm_uqi_t * uqi)994 sm_update_ttyinfo(mblk_t *mp, sm_uqi_t *uqi)
995 {
996 	int err;
997 	struct sm_iocinfo info;
998 
999 	if ((err = sm_getiocinfo(mp, &info)) != 0)
1000 		return (err);
1001 
1002 	switch (info.sm_cmd) {
1003 	case TIOCSPPS:
1004 	case TIOCGPPS:
1005 	case TIOCGPPSEV:
1006 		return (ENOTSUP);
1007 	case TIOCGWINSZ:
1008 	case TIOCSWINSZ:
1009 		break;
1010 	case TCSBRK:
1011 	case TIOCSBRK:
1012 	case TIOCCBRK:
1013 		break;
1014 	case TCSETSF:
1015 		uqi->sm_flags |= FLUSHR_PEND;
1016 		sm_dbg('I', ("TCSETSF: FLUSH is pending\n"));
1017 		/*FALLTHROUGH*/
1018 	case TCSETSW:
1019 	case TCSETS:
1020 	case TCGETS:
1021 		if (info.sm_data != 0) {
1022 			((struct termios *)info.sm_data)->c_cflag &=
1023 			    (tcflag_t)(~uqi->sm_cmask);
1024 			uqi->sm_ttycommon->t_cflag =
1025 			    ((struct termios *)info.sm_data)->c_cflag;
1026 		}
1027 		break;
1028 	case TCSETAF:
1029 		sm_dbg('I', ("TCSETAF: FLUSH is pending\n"));
1030 		uqi->sm_flags |= FLUSHR_PEND;
1031 		/*FALLTHROUGH*/
1032 	case TCSETAW:
1033 	case TCSETA:
1034 	case TCGETA:
1035 		if (info.sm_data != 0) {
1036 			((struct termio *)info.sm_data)->c_cflag &=
1037 			    (tcflag_t)(~uqi->sm_cmask);
1038 			uqi->sm_ttycommon->t_cflag =
1039 			    (tcflag_t)((struct termio *)info.sm_data)->c_cflag;
1040 		}
1041 		break;
1042 	case TIOCSSOFTCAR:
1043 	case TIOCGSOFTCAR:
1044 		if (info.sm_data != 0) {
1045 			if (*(int *)info.sm_data == 1)
1046 				uqi->sm_ttycommon->t_flags |= TS_SOFTCAR;
1047 			else
1048 				uqi->sm_ttycommon->t_flags &= ~TS_SOFTCAR;
1049 		}
1050 		break;
1051 	case TIOCMSET:
1052 	case TIOCMGET:
1053 		if (info.sm_data != 0)
1054 			uqi->sm_mbits = *(int *)info.sm_data;
1055 		break;
1056 	case TIOCMBIS:
1057 		if (info.sm_data != 0)
1058 			uqi->sm_mbits |= *(int *)info.sm_data;
1059 		break;
1060 	case TIOCMBIC:
1061 		if (info.sm_data != 0)
1062 			uqi->sm_mbits &= ~(*(int *)info.sm_data);
1063 		break;
1064 	default:
1065 		return (EINVAL);
1066 		/* NOTREACHED */
1067 	} /* end switch cmd */
1068 
1069 	if ((uqi->sm_mbits & TIOCM_CD) ||
1070 	    (uqi->sm_ttycommon->t_flags & TS_SOFTCAR) ||
1071 	    (uqi->sm_ttycommon->t_cflag & CLOCAL))
1072 		uqi->sm_flags |= SM_CARON;
1073 	else
1074 		uqi->sm_flags &= ~SM_CARON;
1075 
1076 	return (0);
1077 }
1078 
1079 /*
1080  * SECTION
1081  * STREAM's interface to the OS.
1082  * Routines directly callable from the OS.
1083  */
1084 
1085 /*
1086  * Processes high priority messages comming from modules above the
1087  * multiplexor.
1088  * Return 1 if the queue was disabled.
1089  */
1090 static int
sm_hp_uwput(queue_t * wq,mblk_t * mp)1091 sm_hp_uwput(queue_t *wq, mblk_t *mp)
1092 {
1093 	sm_uqi_t	*uqi = (sm_uqi_t *)(wq->q_ptr);
1094 	int	rval = 0;
1095 	sm_lqi_t	*plqi;
1096 	int	msgtype = DB_TYPE(mp);
1097 
1098 	switch (msgtype) {
1099 
1100 	case M_FLUSH:
1101 		/*
1102 		 * How to flush the bottom half:
1103 		 * putctl1(SM_WQ(plqi), *mp->b_rptr)
1104 		 * will work on the bottom half but if FLUSHR is set
1105 		 * when is the right time to flush the upper read queue.
1106 		 *
1107 		 * Could set uqi->sm_flags & WANT_FLUSH but then what happens
1108 		 * if FLUSHR is set and the driver sends up a FLUSHR
1109 		 * before it handles the current FLUSHR request
1110 		 * (if only there was an id for the message that could
1111 		 * be matched when it returns back from the drivers.
1112 		 *
1113 		 * Thus I'm going by the book - the bottom half acts like
1114 		 * a stream head and turns around FLUSHW back down to
1115 		 * the driver (see lrput). The upper half acts like a
1116 		 * driver and turns around FLUSHR:
1117 		 */
1118 
1119 		sm_dbg('I', ("sm_hp_uwput: FLUSH request 0x%x\n", *mp->b_rptr));
1120 		/* flush the upper write queue */
1121 		if (*mp->b_rptr & FLUSHW)
1122 			flushq(wq, FLUSHDATA);
1123 
1124 		/*
1125 		 * flush each associated lower write queue
1126 		 * and pass down the driver (ignore the FLUSHR and deal with
1127 		 * it when it comes back up the read side.
1128 		 */
1129 		for (plqi = uqi->sm_lqs; plqi != 0; plqi = plqi->sm_nlqi) {
1130 			if ((plqi->sm_flags & WERROR_MODE) == 0 &&
1131 			    SM_WQ(plqi)) {
1132 				sm_dbg('I', ("flush lq 0x%p\n", SM_WQ(plqi)));
1133 				if (*mp->b_rptr & FLUSHW)
1134 					flushq(SM_WQ(plqi), FLUSHDATA);
1135 				(void) putnextctl1(SM_WQ(plqi), M_FLUSH,
1136 				    *mp->b_rptr);
1137 			}
1138 		}
1139 		break;
1140 
1141 	case M_STARTI:
1142 		for (plqi = uqi->sm_lqs; plqi != 0; plqi = plqi->sm_nlqi) {
1143 			plqi->sm_flags &= ~SM_ISTOPPED;
1144 			if ((plqi->sm_flags & WERROR_MODE) == 0)
1145 				(void) putnextctl(SM_WQ(plqi), msgtype);
1146 		}
1147 		break;
1148 
1149 	case M_STOPI:
1150 		for (plqi = uqi->sm_lqs; plqi != 0; plqi = plqi->sm_nlqi) {
1151 			plqi->sm_flags |= SM_ISTOPPED;
1152 			if ((plqi->sm_flags & WERROR_MODE) == 0)
1153 				(void) putnextctl(SM_WQ(plqi), msgtype);
1154 		}
1155 		break;
1156 
1157 	case M_STOP:	/* must never be queued */
1158 		uqi->sm_flags |= SM_STOPPED;
1159 		noenable(wq);
1160 		for (plqi = uqi->sm_lqs; plqi != 0; plqi = plqi->sm_nlqi)
1161 			if ((plqi->sm_flags & WERROR_MODE) == 0)
1162 				(void) putnextctl(SM_WQ(plqi), msgtype);
1163 
1164 		rval = 1;
1165 		break;
1166 
1167 	case M_START:	/* never be queued */
1168 		uqi->sm_flags &= ~SM_STOPPED;
1169 		enableok(wq);
1170 		qenable(wq);
1171 		for (plqi = uqi->sm_lqs; plqi != 0; plqi = plqi->sm_nlqi)
1172 			if ((plqi->sm_flags & WERROR_MODE) == 0)
1173 				(void) putnextctl(SM_WQ(plqi), msgtype);
1174 
1175 		break;
1176 
1177 	case M_PCSIG:
1178 	case M_COPYOUT:
1179 	case M_COPYIN:
1180 	case M_IOCACK:
1181 	case M_IOCNAK:
1182 		/* Wrong direction for message */
1183 		break;
1184 	case M_READ:
1185 		break;
1186 	case M_PCPROTO:
1187 	case M_PCRSE:
1188 	default:
1189 		sm_dbg('I', ("sm_hp_uwput: default case %d.\n", msgtype));
1190 		break;
1191 	} /* end switch on high pri message type */
1192 
1193 	freemsg(mp);
1194 	return (rval);
1195 }
1196 
1197 static int
sm_default_uwioctl(queue_t * wq,mblk_t * mp,int (* qfn)())1198 sm_default_uwioctl(queue_t *wq, mblk_t *mp, int (*qfn)())
1199 {
1200 	int	err;
1201 	struct iocblk	*iobp;
1202 	sm_uqi_t	*uqi;
1203 
1204 	uqi = (sm_uqi_t *)(wq->q_ptr);
1205 	iobp = (struct iocblk *)mp->b_rptr;
1206 
1207 	switch (iobp->ioc_cmd) {
1208 	case TIOCEXCL:
1209 	case TIOCNXCL:
1210 	case TIOCSTI:
1211 		/*
1212 		 * The three ioctl types we support do not require any
1213 		 * additional allocation and should not return a pending
1214 		 * ioctl state. For this reason it is safe for us to ignore
1215 		 * the return value from ttycommon_ioctl().
1216 		 * Additionally, we translate any error response from
1217 		 * ttycommon_ioctl() into EINVAL.
1218 		 */
1219 		(void) ttycommon_ioctl(uqi->sm_ttycommon, wq, mp, &err);
1220 		if (err < 0)
1221 			miocnak(wq, mp, 0, EINVAL);
1222 		else
1223 			miocack(wq, mp, 0, 0);
1224 		return (0);
1225 	default:
1226 		break;
1227 	}
1228 	if ((err = sm_update_ttyinfo(mp, uqi)) != 0) {
1229 		miocnak(wq, mp, 0, err);
1230 		return (0);
1231 	}
1232 
1233 	/*
1234 	 * If uqi->sm_siocdata.sm_iocid just overwrite it since the stream
1235 	 * head will have timed it out
1236 	 */
1237 	uqi->sm_siocdata.sm_iocid = iobp->ioc_id;
1238 	uqi->sm_siocdata.sm_acked = 0;
1239 	uqi->sm_siocdata.sm_nacks = sm_good_qs(uqi);
1240 	uqi->sm_siocdata.sm_acnt = 0;
1241 	uqi->sm_siocdata.sm_policy = uqi->sm_policy;
1242 	uqi->sm_siocdata.sm_flags = 0;
1243 	sm_dbg('Z', (" want %d acks for id %d.\n",
1244 	    uqi->sm_siocdata.sm_nacks, iobp->ioc_id));
1245 
1246 	return (sm_putqs(wq, mp, qfn));
1247 }
1248 
1249 /*
1250  *
1251  * sm_uwput - put function for an upper STREAM write.
1252  */
1253 static int
sm_uwput(queue_t * wq,mblk_t * mp)1254 sm_uwput(queue_t *wq, mblk_t *mp)
1255 {
1256 	sm_uqi_t		*uqi;
1257 	uchar_t		msgtype;
1258 	int		cmd;
1259 	struct iocblk	*iobp;
1260 
1261 	uqi = (sm_uqi_t *)(wq->q_ptr);
1262 	msgtype = DB_TYPE(mp);
1263 
1264 	ASSERT(uqi != 0 && sm_ssp != 0);
1265 
1266 	if (msgtype >= QPCTL && msgtype != M_IOCDATA) {
1267 		(void) sm_hp_uwput(wq, mp);
1268 		return (0);
1269 	}
1270 
1271 	switch (DB_TYPE(mp)) {
1272 	case M_DATA:
1273 	case M_DELAY:
1274 	case M_BREAK:
1275 	default:
1276 		(void) sm_putqs(wq, mp, putq);
1277 		break;
1278 
1279 	case M_CTL:
1280 		if (((struct iocblk *)mp->b_rptr)->ioc_cmd == MC_CANONQUERY) {
1281 			(void) putnextctl1(OTHERQ(wq), M_CTL, MC_NOCANON);
1282 		}
1283 		freemsg(mp);
1284 		break;
1285 	case M_IOCDATA: /* not handled as high pri because may need to putbq */
1286 		sm_dbg('M', ("sm_uwput(M_IOCDATA)\n"));
1287 		/*FALLTHROUGH*/
1288 	case M_IOCTL:
1289 		cmd = (msgtype == M_IOCDATA) ?
1290 		    ((struct copyresp *)mp->b_rptr)->cp_cmd :
1291 		    ((struct iocblk *)mp->b_rptr)->ioc_cmd;
1292 
1293 		iobp = (struct iocblk *)mp->b_rptr;
1294 		iobp->ioc_rval = 0;
1295 
1296 		sm_dbg('M', ("sm_uwput(M_IOCTL:%d)\n", cmd));
1297 
1298 		switch (cmd) {
1299 
1300 		case CONSGETABORTENABLE:
1301 			iobp->ioc_error = ttymux_abort_ioctl(mp);
1302 			DB_TYPE(mp) = iobp->ioc_error ? M_IOCNAK : M_IOCACK;
1303 			qreply(wq, mp);
1304 			break;
1305 		case CONSSETABORTENABLE:
1306 			iobp->ioc_error =
1307 			    secpolicy_sys_config(iobp->ioc_cr, B_FALSE) != 0 ?
1308 			    EPERM : ttymux_abort_ioctl(mp);
1309 			DB_TYPE(mp) = iobp->ioc_error ? M_IOCNAK : M_IOCACK;
1310 			qreply(wq, mp);
1311 			break;
1312 		case TTYMUX_SETABORT:
1313 			if (secpolicy_sys_config(iobp->ioc_cr, B_FALSE) != 0) {
1314 				iobp->ioc_error = EPERM;
1315 				DB_TYPE(mp) = M_IOCNAK;
1316 				qreply(wq, mp);
1317 				break;
1318 			}
1319 			/*FALLTHROUGH*/
1320 		case TTYMUX_GETABORT:
1321 		case TTYMUX_GETABORTSTR:
1322 		case TTYMUX_ASSOC:
1323 		case TTYMUX_DISASSOC:
1324 		case TTYMUX_SETCTL:
1325 		case TTYMUX_GETLINK:
1326 		case TTYMUX_CONSDEV:
1327 		case TTYMUX_GETCTL:
1328 		case TTYMUX_LIST:
1329 			(void) sm_ioctl_cmd(uqi, mp);
1330 			qreply(wq, mp);
1331 			break;
1332 		case I_LINK:
1333 		case I_PLINK:
1334 		case I_UNLINK:
1335 		case I_PUNLINK:
1336 			qwriter(wq, mp, sm_link_req, PERIM_OUTER);
1337 			break;
1338 		case TCSETSW:
1339 		case TCSETSF:
1340 		case TCSETAW:
1341 		case TCSETAF:
1342 		case TCSBRK:
1343 			if (wq->q_first) {
1344 				sm_dbg('A', ("sm_uwput: TCSET-> on srv q.\n"));
1345 				/* keep message order intact */
1346 				(void) putq(wq, mp);
1347 				break;
1348 			}
1349 			/*FALLTHROUGH*/
1350 		default:
1351 			(void) sm_default_uwioctl(wq, mp, putq);
1352 			break;
1353 		}
1354 
1355 		break; /* M_IOCTL */
1356 
1357 	} /* end switch on message type */
1358 
1359 	return (0);
1360 }
1361 
1362 /*
1363  * sm_uwsrv - service function for an upper STREAM write.
1364  * 'sm_uwsrv' takes a q parameter.	The q parameter specifies the queue
1365  * which is to be serviced.	This function reads the messages which are on
1366  * this service queue and passes them to the appropriate lower driver queue.
1367  */
1368 static int
sm_uwsrv(queue_t * q)1369 sm_uwsrv(queue_t *q)
1370 {
1371 	mblk_t	*mp;
1372 	sm_uqi_t	*uqi = (sm_uqi_t *)(q->q_ptr);
1373 	int		msgtype;
1374 
1375 	ASSERT(q == SM_WQ(uqi));
1376 
1377 	/*
1378 	 * Empty the queue unless explicitly stopped.
1379 	 */
1380 	while (mp = getq(q)) {
1381 		msgtype = DB_TYPE(mp);
1382 
1383 		if (msgtype >= QPCTL && msgtype != M_IOCDATA)
1384 			if (sm_hp_uwput(q, mp)) {
1385 				sm_dbg('T', ("sm_uwsrv: flowcontrolled.\n"));
1386 				break; /* indicates that the is disabled */
1387 			}
1388 			else
1389 				continue;
1390 
1391 		if (uqi->sm_flags & SM_STOPPED) {
1392 			(void) putbq(q, mp);
1393 			sm_dbg('T', ("sm_uwsrv: SM_STOPPED.\n"));
1394 			break;
1395 		}
1396 
1397 		/*
1398 		 * Read any ttycommon data that may
1399 		 * change (TS_SOFTCAR, CREAD, etc.).
1400 		 */
1401 		switch (DB_TYPE(mp)) {
1402 		case M_IOCTL:
1403 		case M_IOCDATA:
1404 			if (sm_default_uwioctl(q, mp, putbq))
1405 				return (0);
1406 			break;
1407 
1408 		default:
1409 			if (sm_putqs(q, mp, putbq))
1410 				return (0);
1411 		}
1412 	}
1413 	return (0);
1414 }
1415 
1416 /*
1417  * Lower write side service routine used for backenabling upstream
1418  * flow control.
1419  */
1420 static int
sm_lwsrv(queue_t * q)1421 sm_lwsrv(queue_t *q)
1422 {
1423 	sm_lqi_t *lqi = (sm_lqi_t *)q->q_ptr;
1424 	queue_t *uwq;
1425 
1426 	LOCK_UNIT(lqi);
1427 	if (lqi->sm_uqflags & SM_UQVALID) {
1428 		/*
1429 		 * It's safe to lock uqi since lwsrv runs asynchronously
1430 		 * with the upper write routines so this cannot be an
1431 		 * upper half thread. While holding the lqi lock and
1432 		 * if SM_UQVALID is set we are guaranteed that
1433 		 * lqi->sm_uqi will be valid.
1434 		 */
1435 		sm_dbg('I', ("sm_lwsrv: re-enabling upper queue.\n"));
1436 
1437 		uwq = SM_WQ(lqi->sm_uqi);
1438 		UNLOCK_UNIT(lqi);
1439 		qenable(uwq);
1440 	} else  {
1441 		UNLOCK_UNIT(lqi);
1442 	}
1443 	return (0);
1444 }
1445 
1446 /*
1447  * Upper read queue ioctl response handler for messages
1448  * passed from the lower half of the driver.
1449  */
1450 static int
sm_uriocack(queue_t * rq,mblk_t * mp)1451 sm_uriocack(queue_t *rq, mblk_t *mp)
1452 {
1453 	sm_uqi_t		*uqi = (sm_uqi_t *)rq->q_ptr;
1454 	int		err, flag;
1455 	sm_iocdata_t	*iodp;
1456 	struct sm_iocinfo	info;
1457 
1458 	if ((err = sm_getiocinfo(mp, &info)) != 0) {
1459 		sm_dbg('I', ("Unknown ioctl response\n"));
1460 		return (err);
1461 	}
1462 
1463 	if (info.sm_id == uqi->sm_piocdata.sm_iocid) {
1464 		iodp = &uqi->sm_piocdata;
1465 	} else if (info.sm_id == uqi->sm_siocdata.sm_iocid) {
1466 		iodp = &uqi->sm_siocdata;
1467 	} else {
1468 		sm_log("Unexpected ioctl response\n");
1469 		sm_dbg('I', ("Unexpected ioctl response (id %d)\n",
1470 		    info.sm_id));
1471 
1472 		/*
1473 		 * If the response is sent up it will result in
1474 		 * duplicate ioctl responses. The ioctl has probably been
1475 		 * timed out by the stream head so dispose of the response
1476 		 * (since it has arrived too late.
1477 		 */
1478 		goto out;
1479 	}
1480 
1481 	flag = SM_COPYIN;
1482 
1483 	switch (DB_TYPE(mp)) {
1484 	case M_COPYOUT:
1485 		flag = SM_COPYOUT;
1486 		/*FALLTHRU*/
1487 	case M_COPYIN:
1488 		if (iodp->sm_flags & flag)
1489 			goto out;
1490 		iodp->sm_flags |= flag;
1491 
1492 		break;
1493 	case M_IOCACK:
1494 		iodp->sm_ackcnt += 1;
1495 		iodp->sm_acnt += 1;
1496 		if (iodp->sm_policy == FIRSTACK) {
1497 			if (iodp->sm_acnt == iodp->sm_nacks)
1498 				iodp->sm_iocid = 0;
1499 			if (iodp->sm_acnt == 1)
1500 				iodp->sm_acked = 1;
1501 			else
1502 				goto out;
1503 		} else {
1504 			if (iodp->sm_acnt == iodp->sm_nacks) {
1505 				iodp->sm_iocid = 0;
1506 				iodp->sm_acked = 1;
1507 			} else
1508 				goto out;
1509 		}
1510 		break;
1511 	case M_IOCNAK:
1512 		iodp->sm_nakcnt += 1;
1513 		iodp->sm_acnt += 1;
1514 		if (iodp->sm_acnt == iodp->sm_nacks) {
1515 			iodp->sm_iocid = 0;
1516 			if (iodp->sm_acked == 0) {
1517 				iodp->sm_acked = 1;
1518 				break;
1519 			}
1520 		}
1521 		goto out;
1522 	default:
1523 		goto out;
1524 	}
1525 
1526 	/*
1527 	 * Merge the tty settings each of the associated lower streams.
1528 	 */
1529 	if (info.sm_data)
1530 		(void) sm_update_ttyinfo(mp, uqi);
1531 
1532 	if (iodp == &uqi->sm_piocdata) {
1533 		if (iodp->sm_iocid == 0) {
1534 			uqi->sm_flags &= ~SM_IOCPENDING;
1535 		}
1536 	} else {
1537 		sm_dbg('I', ("sm_uriocack: forwarding response for %d.\n",
1538 		    info.sm_id));
1539 		putnext(rq, mp);
1540 		return (0);
1541 	}
1542 out:
1543 	sm_dbg('I', ("sm_uriocack: freeing response for %d.\n", info.sm_id));
1544 	freemsg(mp);
1545 	return (0);
1546 }
1547 
1548 /*
1549  * Transfer a message from the lower read side of the multiplexer onto
1550  * the associated upper stream.
1551  */
1552 static int
sm_ursendup(queue_t * q,mblk_t * mp)1553 sm_ursendup(queue_t *q, mblk_t *mp)
1554 {
1555 	sm_uqi_t	*uqi = (sm_uqi_t *)q->q_ptr;
1556 
1557 	if (!canputnext(q) && DB_TYPE(mp) < QPCTL) {
1558 		sm_dbg('I', ("sm_ursendup: flow controlled.\n"));
1559 		return (1);
1560 	}
1561 
1562 	switch (DB_TYPE(mp)) {
1563 	case M_COPYIN:
1564 	case M_COPYOUT:
1565 	case M_IOCACK:
1566 	case M_IOCNAK:
1567 		(void) sm_uriocack(q, mp);
1568 		break;
1569 	case M_HANGUP:
1570 		if (sm_uwq_error(uqi)) {
1571 			/* there are no usable lower q's */
1572 			uqi->sm_flags &= ~SM_CARON;
1573 			putnext(q, mp);
1574 		} else {
1575 			/* there are still usable q's - don't send up */
1576 			freemsg(mp);
1577 		}
1578 		break;
1579 	case M_ERROR:
1580 		if (sm_uwq_error(uqi)) {
1581 			/* there are no usable lower q's */
1582 			uqi->sm_flags &= ~SM_CARON;
1583 			putnext(q, mp);
1584 		} else if (*mp->b_rptr == NOERROR) {
1585 			/* the error has cleared */
1586 			uqi->sm_flags &= ~ERROR_MODE;
1587 			putnext(q, mp);
1588 		} else {
1589 			/* there are still usable q's - don't send up */
1590 			freemsg(mp);
1591 		}
1592 		break;
1593 	case M_FLUSH:
1594 		flushq(q, FLUSHDATA);
1595 		putnext(q, mp);	/* time to use FLUSHR_PEND flag */
1596 		break;
1597 	case M_CTL:
1598 		/* wrong direction - must have come from sm_close */
1599 		uqi->sm_flags |= SM_CLOSE;
1600 		sm_dbg('I', ("sm_ursrv: had SM_CLOSE.\n"));
1601 		freemsg(mp);
1602 		break;
1603 	case M_UNHANGUP:
1604 		/* just pass them all up - they're harmless */
1605 		uqi->sm_flags |= SM_CARON;
1606 		/* FALLTHROUGH */
1607 	default:
1608 		putnext(q, mp);
1609 		break;
1610 	}
1611 
1612 	return (0);
1613 }
1614 
1615 /*
1616  * sm_urput - put function for a lower STREAM read.
1617  */
1618 static int
sm_urput(queue_t * q,mblk_t * mp)1619 sm_urput(queue_t *q, mblk_t *mp)
1620 {
1621 	if (sm_ursendup(q, mp) != 0)
1622 		(void) putq(q, mp);
1623 
1624 	return (0);
1625 }
1626 
1627 /*
1628  * Upper read side service routine.
1629  * Read side needs to be fast so only check for duplicate M_IOCTL acks.
1630  */
1631 static int
sm_ursrv(queue_t * q)1632 sm_ursrv(queue_t *q)
1633 {
1634 	sm_uqi_t	*uqi = (sm_uqi_t *)q->q_ptr;
1635 	mblk_t	*mp;
1636 	int	flags = uqi->sm_flags;
1637 
1638 	while ((mp = getq(q))) {
1639 		if (sm_ursendup(q, mp) != 0) {
1640 			sm_dbg('I', ("sm_ursrv: flow controlled.\n"));
1641 			(void) putbq(q, mp);
1642 			uqi->sm_flags |= WANT_RENB;
1643 			break;
1644 		}
1645 	}
1646 
1647 	/*
1648 	 * If the q service was called because it was no longer
1649 	 * flow controled then enable each of the driver queues.
1650 	 */
1651 	if ((flags & WANT_RENB) && !(uqi->sm_flags & WANT_RENB)) {
1652 		sm_lqi_t *lqi;
1653 		queue_t *drq; /* read q of linked driver */
1654 
1655 		uqi->sm_flags &= ~WANT_RENB;
1656 		for (lqi = uqi->sm_lqs; lqi != 0; lqi = lqi->sm_nlqi) {
1657 			drq = SM_RQ(lqi)->q_next;
1658 			if (drq && drq->q_first != 0)
1659 				qenable(drq);
1660 		}
1661 	}
1662 
1663 	return (0);
1664 }
1665 
1666 /*
1667  * Check a message sent from a linked device for abort requests and
1668  * for flow control.
1669  */
1670 static int
sm_lrmsg_check(queue_t * q,mblk_t * mp)1671 sm_lrmsg_check(queue_t *q, mblk_t *mp)
1672 {
1673 	sm_lqi_t	*lqi	= (sm_lqi_t *)q->q_ptr;
1674 
1675 	switch (DB_TYPE(mp)) {
1676 	case M_DATA:
1677 		LOCK_UNIT(lqi);
1678 		/*
1679 		 * check for abort - only allow abort on I/O consoles
1680 		 * known to OBP -
1681 		 * fix it when we do polled io
1682 		 */
1683 		if ((lqi->sm_ioflag & (uint_t)FORINPUT) == 0) {
1684 			freemsg(mp);
1685 			UNLOCK_UNIT(lqi);
1686 			return (1);
1687 		}
1688 		if ((lqi->sm_uqflags & SM_OBPCNDEV) &&
1689 		    lqi->sm_ctrla_abort_on &&
1690 		    abort_enable == KIOCABORTALTERNATE) {
1691 
1692 			uchar_t		*rxc;
1693 			boolean_t	aborted = B_FALSE;
1694 
1695 			for (rxc = mp->b_rptr;
1696 			    rxc != mp->b_wptr;
1697 			    rxc++)
1698 
1699 				if (*rxc == *lqi->sm_nachar) {
1700 					lqi->sm_nachar++;
1701 					if (*lqi->sm_nachar == '\0') {
1702 						abort_sequence_enter(
1703 						    (char *)NULL);
1704 						lqi->sm_nachar = sm_ssp->sm_abs;
1705 						aborted = B_TRUE;
1706 					}
1707 				} else
1708 					lqi->sm_nachar = (*rxc == *sm_ssp->
1709 					    sm_abs) ?
1710 					    sm_ssp->
1711 					    sm_abs + 1 :
1712 					    sm_ssp->sm_abs;
1713 
1714 			if (aborted) {
1715 				freemsg(mp);
1716 				UNLOCK_UNIT(lqi);
1717 				return (1);
1718 			}
1719 		}
1720 		UNLOCK_UNIT(lqi);
1721 		break;
1722 	case M_BREAK:	/* we'll eventually see this as a flush */
1723 		LOCK_UNIT(lqi);
1724 		/*
1725 		 * Only allow abort on OBP devices. When polled I/O is
1726 		 * supported allow abort on any console device.
1727 		 * Parity errors are reported upstream as breaks so
1728 		 * ensure that there is no data in the message before
1729 		 * deciding whether to abort.
1730 		 */
1731 		if ((lqi->sm_uqflags & SM_OBPCNDEV) && /* console stream */
1732 		    (mp->b_wptr - mp->b_rptr == 0 &&
1733 		    msgdsize(mp) == 0)) {	/* not due to parity */
1734 
1735 			if (lqi->sm_break_abort_on &&
1736 			    abort_enable != KIOCABORTALTERNATE)
1737 				abort_sequence_enter((char *)NULL);
1738 
1739 			freemsg(mp);
1740 			UNLOCK_UNIT(lqi);
1741 			return (1);
1742 		} else {
1743 			UNLOCK_UNIT(lqi);
1744 		}
1745 		break;
1746 	default:
1747 		break;
1748 	}
1749 
1750 	if (DB_TYPE(mp) >= QPCTL)
1751 		return (0);
1752 
1753 	LOCK_UNIT(lqi); /* lock out the upper half */
1754 	if ((lqi->sm_uqflags & SM_UQVALID) && SM_RQ(lqi->sm_uqi)) {
1755 		UNLOCK_UNIT(lqi);
1756 		if (!canput(SM_RQ(lqi->sm_uqi))) {
1757 			sm_dbg('I', ("sm_lrmsg_check: flow controlled.\n"));
1758 			(void) putq(q, mp);
1759 			return (1);
1760 		}
1761 	} else {
1762 		UNLOCK_UNIT(lqi);
1763 	}
1764 
1765 	return (0);
1766 }
1767 
1768 /*
1769  * sm_sendup - deliver a message to the upper read side of the multiplexer
1770  */
1771 static int
sm_sendup(queue_t * q,mblk_t * mp)1772 sm_sendup(queue_t *q, mblk_t *mp)
1773 {
1774 	sm_lqi_t	*lqi	= (sm_lqi_t *)q->q_ptr;
1775 
1776 	if (sm_ssp == NULL) {
1777 		freemsg(mp);
1778 		return (0);
1779 	}
1780 
1781 	/*
1782 	 * Check for CD status change messages from driver.
1783 	 * (Remark: this is an se driver thread running at soft interupt
1784 	 * priority and the waiters are in user context).
1785 	 */
1786 	switch (DB_TYPE(mp)) {
1787 	case M_DATA:
1788 	case M_BREAK:	/* we'll eventually see this as a flush */
1789 		break;
1790 
1791 	/* high priority messages */
1792 	case M_IOCACK:
1793 	case M_IOCNAK:
1794 		if ((lqi->sm_flags & SM_IOCPENDING) && lqi->sm_piocid ==
1795 		    ((struct iocblk *)mp->b_rptr)->ioc_id) {
1796 			freemsg(mp);
1797 			lqi->sm_flags &= ~SM_IOCPENDING;
1798 			sm_issue_ioctl(lqi);
1799 			return (0);
1800 		}
1801 		break;
1802 	case M_UNHANGUP:
1803 		/*
1804 		 * If the driver can send an M_UNHANGUP it must be able to
1805 		 * accept messages from above (ie clear WERROR_MODE if set).
1806 		 */
1807 		sm_dbg('E', ("lrput: M_UNHANGUP\n"));
1808 		lqi->sm_mbits |= TIOCM_CD;
1809 		lqi->sm_flags &= ~(WERROR_MODE|HANGUP_MODE);
1810 
1811 		break;
1812 
1813 	case M_HANGUP:
1814 		sm_dbg('E', ("lrput: MHANGUP\n"));
1815 		lqi->sm_mbits &= ~TIOCM_CD;
1816 		lqi->sm_flags |= (WERROR_MODE|HANGUP_MODE);
1817 		break;
1818 
1819 	case M_ERROR:
1820 
1821 		sm_dbg('E', ("lrput: MERROR\n"));
1822 		/*
1823 		 * Tell the driver to flush rd/wr queue if its read/write error.
1824 		 * if its a read/write error flush rq/wq (type in first bytes).
1825 		 */
1826 		if ((mp->b_wptr - mp->b_rptr) == 2) {
1827 			uchar_t	rw = 0;
1828 
1829 			if (*mp->b_rptr == NOERROR) {
1830 				/* not in error anymore */
1831 				lqi->sm_flags &= ~ERROR_MODE;
1832 				lqi->sm_flags |= WANT_CD;
1833 			} else {
1834 				if (*mp->b_rptr != 0) {
1835 					/* read error */
1836 					rw |= FLUSHR;
1837 					lqi->sm_flags |= RERROR_MODE;
1838 				}
1839 				mp->b_rptr++;
1840 				if (*mp->b_rptr != 0) {
1841 					/* write error */
1842 					rw |= FLUSHW;
1843 					lqi->sm_flags |= WERROR_MODE;
1844 				}
1845 
1846 				mp->b_rptr--;
1847 				/* has next driver done qprocsoff */
1848 				if (rw && OTHERQ(q)->q_next != NULL) {
1849 					(void) putnextctl1(OTHERQ(q), M_FLUSH,
1850 					    rw);
1851 				}
1852 			}
1853 		} else if (*mp->b_rptr != 0 && OTHERQ(q)->q_next != NULL) {
1854 			sm_dbg('E', ("lrput: old style MERROR (?)\n"));
1855 
1856 			lqi->sm_flags |= (RERROR_MODE | WERROR_MODE);
1857 			(void) putnextctl1(OTHERQ(q), M_FLUSH, FLUSHRW);
1858 		}
1859 		break;
1860 
1861 	case M_PCSIG:
1862 	case M_SIG:
1863 		break;
1864 	case M_COPYOUT:
1865 	case M_COPYIN:
1866 		break;
1867 	case M_FLUSH:
1868 		/* flush the read queue and pass on up */
1869 		flushq(q, FLUSHDATA);
1870 		break;
1871 	default:
1872 		break;
1873 	}
1874 
1875 	LOCK_UNIT(lqi); /* lock out the upper half */
1876 	if (lqi->sm_uqflags & SM_UQVALID && SM_RQ(lqi->sm_uqi)) {
1877 		UNLOCK_UNIT(lqi);
1878 		(void) putq(SM_RQ(lqi->sm_uqi), mp);
1879 		return (0);
1880 	} else {
1881 		sm_dbg('I', ("sm_sendup: uq not valid\n"));
1882 		freemsg(mp);
1883 	}
1884 	UNLOCK_UNIT(lqi);
1885 
1886 	return (0);
1887 }
1888 
1889 /*
1890  * sm_lrput - put function for a lower STREAM read.
1891  */
1892 static int
sm_lrput(queue_t * q,mblk_t * mp)1893 sm_lrput(queue_t *q, mblk_t *mp)
1894 {
1895 	if (sm_lrmsg_check(q, mp) == 0)
1896 		(void) sm_sendup(q, mp);
1897 	return (0);
1898 }
1899 
1900 /*
1901  * sm_lrsrv - service function for the lower read STREAM.
1902  */
1903 static int
sm_lrsrv(queue_t * q)1904 sm_lrsrv(queue_t *q)
1905 {
1906 	mblk_t	*mp;
1907 
1908 	sm_dbg('I', ("sm_lrsrv: not controlled.\n"));
1909 	while (mp = getq(q))
1910 		(void) sm_sendup(q, mp);
1911 
1912 	return (0);
1913 }
1914 
1915 /*
1916  * Check whether a thread is allowed to open the requested device.
1917  */
1918 static int
sm_ok_to_open(sm_uqi_t * uqi,int protocol,cred_t * credp,int * abort_waiters)1919 sm_ok_to_open(sm_uqi_t *uqi, int protocol, cred_t *credp, int *abort_waiters)
1920 {
1921 	int rval = 0;
1922 	int proto;
1923 
1924 	*abort_waiters = 0;
1925 
1926 	switch (protocol) {
1927 		case ASYNC_DEVICE: /* Standard async protocol */
1928 		if ((uqi->sm_protocol == NULL_PROTOCOL) ||
1929 		    (uqi->sm_protocol == ASYN_PROTOCOL)) {
1930 			/*
1931 			 * Lock out other incompatible protocol requests.
1932 			 */
1933 			proto = ASYN_PROTOCOL;
1934 			rval = 0;
1935 		} else
1936 			rval = EBUSY;
1937 		break;
1938 
1939 		case OUTLINE:	/* Outdial protocol */
1940 		if ((uqi->sm_protocol == NULL_PROTOCOL) ||
1941 		    (uqi->sm_protocol == OUTD_PROTOCOL)) {
1942 			proto = OUTD_PROTOCOL;
1943 			rval = 0;
1944 		} else if (uqi->sm_protocol == ASYN_PROTOCOL) {
1945 			/*
1946 			 * check for dialout request on a line that is already
1947 			 * open for dial in:
1948 			 * kick off any thread that is waiting to fully open
1949 			 */
1950 			if (uqi->sm_flags & FULLY_OPEN)
1951 				rval = EBUSY;
1952 			else {
1953 				proto = OUTD_PROTOCOL;
1954 				*abort_waiters = 1;
1955 			}
1956 		} else
1957 			rval = EBUSY;
1958 		break;
1959 		default:
1960 			rval = ENOTSUP;
1961 	}
1962 
1963 	if (rval == 0 &&
1964 	    (uqi->sm_ttycommon->t_flags & TS_XCLUDE) &&
1965 	    secpolicy_excl_open(credp) != 0) {
1966 
1967 		if (uqi->sm_flags & FULLY_OPEN) {
1968 			rval = EBUSY; /* exclusive device already open */
1969 		} else {
1970 			/* NB TS_XCLUDE cant be set during open so NOTREACHED */
1971 			/* force any waiters to yield TS_XCLUDE */
1972 			*abort_waiters = 1;
1973 		}
1974 	}
1975 
1976 	if (rval == 0)
1977 		uqi->sm_protocol = proto;
1978 
1979 	sm_dbg('A', ("ok_to_open (0x%p, %d) proto=%d rval %d (wabort=%d)",
1980 	    uqi, protocol, uqi->sm_protocol, rval, *abort_waiters));
1981 
1982 	return (rval);
1983 }
1984 
1985 /* wait for memory to become available whilst performing a qwait */
1986 /*ARGSUSED*/
dummy_callback(void * arg)1987 static void dummy_callback(void *arg)
1988 {}
1989 
1990 /* ARGSUSED */
1991 static int
sm_dump_msg(queue_t * q,mblk_t * mp)1992 sm_dump_msg(queue_t *q, mblk_t *mp)
1993 {
1994 	freemsg(mp);
1995 	return (0);
1996 }
1997 
1998 /*
1999  * Wait for a message to arrive - must be called with exclusive
2000  * access at the outer perimiter.
2001  */
2002 static int
sm_qwait_sig(sm_uqi_t * uqi,queue_t * q)2003 sm_qwait_sig(sm_uqi_t *uqi, queue_t *q)
2004 {
2005 	int err;
2006 
2007 	sm_dbg('C', ("sm_qwait_sig: waiting.\n"));
2008 
2009 	uqi->sm_waitq = q;
2010 	uqi->sm_nwaiters++;	/* required by the close routine */
2011 	err = qwait_sig(q);
2012 	if (--uqi->sm_nwaiters == 0)
2013 		uqi->sm_waitq = 0;
2014 
2015 	if (err == 0)
2016 		err = EINTR;
2017 	else if (q->q_ptr == 0) /* can happen if there are multiple waiters */
2018 		err = -1;
2019 	else if (uqi->sm_flags & SM_CLOSE) {
2020 		uqi->sm_flags &= ~SM_CLOSE;
2021 		err = 1;	/* a different protocol has closed its stream */
2022 	}
2023 	else
2024 		err = 0;	/* was worth waiting for */
2025 
2026 	sm_dbg('C', ("sm_qwait_sig: rval %d\n", err));
2027 	return (err);
2028 }
2029 
2030 /*
2031  * Defer the opening of one the drivers devices until the state of each
2032  * associated lower stream is known.
2033  */
2034 static int
sm_defer_open(sm_uqi_t * uqi,queue_t * q)2035 sm_defer_open(sm_uqi_t *uqi, queue_t *q)
2036 {
2037 	uint_t cmdflags = WANT_CDSTAT;
2038 	int err, nqs;
2039 
2040 	while ((nqs = sm_good_qs(uqi)) == 0) {
2041 		sm_dbg('C', ("sm_defer_open: no good qs\n"));
2042 		if (err = sm_qwait_sig(uqi, q))
2043 			return (err);
2044 	}
2045 
2046 	while ((uqi->sm_flags & SM_CARON) == 0) {
2047 		int iocmd;
2048 		mblk_t *pioc;
2049 
2050 		sm_dbg('C', ("sm_defer_open: flags 0x%x cmdflags 0x%x\n",
2051 		    uqi->sm_flags, cmdflags));
2052 		if (cmdflags == 0) {
2053 			if (err = sm_qwait_sig(uqi, q))
2054 				return (err);
2055 			continue;	/* waiting for an M_UNHANGUP */
2056 		} else if (cmdflags & WANT_SC) {
2057 			cmdflags &= ~WANT_SC;
2058 			iocmd = TIOCGSOFTCAR;
2059 		} else if (cmdflags & WANT_CD) {
2060 			cmdflags &= ~WANT_CD;
2061 			iocmd = TIOCMGET;
2062 		} else if (cmdflags & WANT_CL) {
2063 			cmdflags &= ~WANT_CL;
2064 			iocmd = TCGETS;
2065 		}
2066 
2067 		if (uqi->sm_piocdata.sm_iocid == 0) {
2068 			while ((pioc = mkiocb(iocmd)) == 0) {
2069 				bufcall_id_t id =
2070 				    qbufcall(q, sizeof (struct iocblk),
2071 				    BPRI_MED, dummy_callback, 0);
2072 				if (err = sm_qwait_sig(uqi, q)) {
2073 					/* wait for the bufcall */
2074 					qunbufcall(q, id);
2075 					return (err);
2076 				}
2077 				qunbufcall(q, id);
2078 			}
2079 
2080 			uqi->sm_flags |= SM_IOCPENDING;
2081 
2082 			uqi->sm_piocdata.sm_iocid =
2083 			    ((struct iocblk *)pioc->b_rptr)->ioc_id;
2084 			uqi->sm_piocdata.sm_acked = 0;
2085 			uqi->sm_piocdata.sm_nacks = nqs;
2086 			uqi->sm_piocdata.sm_acnt = 0;
2087 			uqi->sm_piocdata.sm_ackcnt = uqi->
2088 			    sm_piocdata.sm_nakcnt = 0;
2089 			uqi->sm_piocdata.sm_policy = uqi->sm_policy;
2090 			uqi->sm_piocdata.sm_flags = SM_INTERNALIOC;
2091 			if (sm_putqs(WR(q), pioc, sm_dump_msg) != 0) {
2092 				uqi->sm_piocdata.sm_iocid = 0;
2093 				sm_log("sm_defer_open: bad putqs\n");
2094 				return (-1);
2095 			}
2096 		}
2097 
2098 		sm_dbg('C', ("sm_defer_open: flags 0x%x\n", uqi->sm_flags));
2099 		while ((uqi->sm_flags & SM_CARON) == 0 &&
2100 		    (uqi->sm_flags & SM_IOCPENDING) != 0)
2101 			if (err = sm_qwait_sig(uqi, q))
2102 				return (err);
2103 
2104 		sm_dbg('C', ("defer_open: uq flags 0x%x.\n", uqi->sm_flags));
2105 	}
2106 	sm_dbg('C', ("defer_open: return 0.\n"));
2107 	return (0);
2108 }
2109 
2110 static int
sm_open(queue_t * rq,dev_t * devp,int flag,int sflag,cred_t * credp)2111 sm_open(queue_t *rq, dev_t *devp, int flag, int sflag, cred_t *credp)
2112 {
2113 	int		ftstat;
2114 	int		unit;
2115 	int		protocol;
2116 	sm_uqi_t		*uqi;
2117 	int		abort_waiters;
2118 
2119 	if (sm_ssp == NULL)
2120 		return (ENXIO);
2121 	/*
2122 	 * sflag = 0 => streams device.
2123 	 */
2124 	if (sflag != 0 || DEV_TO_UNIT(*devp) >= NLUNITS) {
2125 		sm_dbg('C', ("open: sflag=%d or bad dev_t.\n", sflag));
2126 		return (ENXIO);
2127 	}
2128 
2129 	unit = DEV_TO_UNIT(*devp);
2130 	protocol = DEV_TO_PROTOBITS(*devp);
2131 
2132 	uqi = get_uqi(sm_ssp, unit);
2133 
2134 	sm_dbg('C', ("open(0x%p, %d, 0x%x) :- unit=%d, proto=%d, uqi=0x%p\n",
2135 	    rq, *devp, flag, unit, protocol, uqi));
2136 
2137 	if (uqi == 0)
2138 		return (ENXIO);
2139 
2140 	if (sm_refuse_opens && unit > smctlunit && uqi->sm_nlqs == 0)
2141 		return (ENXIO);
2142 
2143 	if (uqi->sm_flags & EXCL_OPEN && (flag & FEXCL)) {
2144 		return (EBUSY); /* device in use */
2145 	}
2146 
2147 	if ((flag & FEXCL)) {
2148 		if (secpolicy_excl_open(credp) != 0)
2149 			return (EPERM);
2150 
2151 		if ((uqi->sm_flags & FULLY_OPEN) || uqi->sm_nwaiters > 0)
2152 			return (EBUSY); /* device in use */
2153 
2154 		uqi->sm_flags |= EXCL_OPEN;
2155 	}
2156 
2157 	if (uqi->sm_protocol == NULL_PROTOCOL) {
2158 		struct termios *termiosp;
2159 		int len;
2160 
2161 		if (ddi_getlongprop(DDI_DEV_T_ANY, ddi_root_node(),
2162 		    DDI_PROP_NOTPROM, "ttymodes", (caddr_t)&termiosp, &len)
2163 		    == DDI_PROP_SUCCESS &&
2164 		    (len == sizeof (struct termios))) {
2165 
2166 			sm_dbg('C', ("open: c_cflag=0x%x\n",
2167 				termiosp->c_cflag));
2168 
2169 			uqi->sm_ttycommon->t_iflag = termiosp->c_iflag;
2170 			uqi->sm_ttycommon->t_cflag = termiosp->c_cflag;
2171 			uqi->sm_ttycommon->t_stopc = termiosp->c_cc[VSTOP];
2172 			uqi->sm_ttycommon->t_startc = termiosp->c_cc[VSTART];
2173 
2174 			/*
2175 			 * IGNBRK,BRKINT,INPCK,IXON,IXANY,IXOFF - drivers
2176 			 * PARMRK,IGNPAR,ISTRIP - how to report parity
2177 			 * INLCR,IGNCR,ICRNL,IUCLC - ldterm (sophisticated I/O)
2178 			 * IXON, IXANY, IXOFF - flow control input
2179 			 * CBAUD,CSIZE,CS5-8,CSTOPB,PARENB,PARODD,HUPCL,
2180 			 * RCV1EN,XMT1EN,LOBLK,XCLUDE,CRTSXOFF,CRTSCTS,
2181 			 * CIBAUD,PAREXT,CBAUDEXT,CIBAUDEXT,CREAD,CLOCAL
2182 			 */
2183 
2184 			kmem_free(termiosp, len);
2185 		}
2186 		else
2187 			bzero((caddr_t)uqi->sm_ttycommon,
2188 			    sizeof (uqi->sm_ttycommon));
2189 
2190 		if (*devp == rconsdev) {
2191 			uqi->sm_cmask = sm_cmask;
2192 			uqi->sm_ttycommon->t_flags |= TS_SOFTCAR;
2193 		} else {
2194 			uqi->sm_ttycommon->t_flags &= ~TS_SOFTCAR;
2195 		}
2196 
2197 		/*
2198 		 * Clear the default CLOCAL and TS_SOFTCAR flags since
2199 		 * they must correspond to the settings on the real devices.
2200 		 */
2201 
2202 		uqi->sm_ttycommon->t_cflag &= ~(uqi->sm_cmask|CLOCAL);
2203 		uqi->sm_mbits = 0;
2204 		uqi->sm_policy = FIRSTACK;
2205 		if (unit == 0 && sm_ssp->sm_ms == 0)
2206 			sm_ssp->sm_ms = (sm_mux_state_t *)
2207 			    space_fetch(TTYMUXPTR);
2208 		if (sm_ssp->sm_ms) {
2209 			if (sm_ssp->sm_ms->sm_cons_stdin.sm_dev == *devp ||
2210 			    sm_ssp->sm_ms->sm_cons_stdout.sm_dev == *devp)
2211 				sm_ssp->sm_lconsole = uqi;
2212 		}
2213 	}
2214 
2215 	/*
2216 	 * Does this thread need to wait?
2217 	 */
2218 
2219 	sm_dbg('C', ("sm_open: %d %d 0x%p 0x%x\n",
2220 	    !(flag & (FNDELAY|FNONBLOCK)), !(protocol == OUTLINE), uqi->sm_lqs,
2221 	    uqi->sm_flags));
2222 
2223 tryopen:
2224 
2225 	abort_waiters = 0;
2226 	if (ftstat = sm_ok_to_open(uqi, protocol, credp, &abort_waiters)) {
2227 		sm_dbg('C', ("open failed stat=%d.\n", ftstat));
2228 
2229 		if ((uqi->sm_flags & FULLY_OPEN) == 0 && uqi->sm_nwaiters == 0)
2230 			uqi->sm_protocol = NULL_PROTOCOL;
2231 		if (flag & FEXCL)
2232 			uqi->sm_flags &= ~EXCL_OPEN;
2233 		return (ftstat);
2234 	}
2235 
2236 	if (abort_waiters) {
2237 		uqi->sm_dev = *devp;
2238 		/* different device wants to use the unit */
2239 		SM_RQ(uqi) = rq;
2240 		SM_WQ(uqi) = WR(rq);
2241 	}
2242 	if (rq->q_ptr == 0) {
2243 		sm_lqi_t *lqi;
2244 
2245 		uqi->sm_dev = *devp;
2246 		rq->q_ptr = WR(rq)->q_ptr = uqi;
2247 		SM_RQ(uqi) = rq;
2248 		SM_WQ(uqi) = WR(rq);
2249 		qprocson(rq);
2250 		for (lqi = uqi->sm_lqs; lqi != 0; lqi = lqi->sm_nlqi) {
2251 			LOCK_UNIT(lqi);
2252 			lqi->sm_uqflags |= SM_UQVALID;
2253 			UNLOCK_UNIT(lqi);
2254 		}
2255 
2256 		sm_dbg('C', ("sm_open: SM_UQVALID set on lqs.\n"));
2257 	}
2258 
2259 	if (*devp != rconsdev && BLOCKING(uqi, protocol, flag)) {
2260 
2261 		uqi->sm_flags |= WANT_CDSTAT;
2262 
2263 		do {
2264 			/*
2265 			 * Wait for notifications of changes in the CLOCAL
2266 			 * and TS_SOFTCAR flags and a TIOCM_CD flag of a
2267 			 * TIOCMGET request (come in on the write side queue).
2268 			 */
2269 
2270 			if ((ftstat = sm_defer_open(uqi, rq)) != EINTR) {
2271 				if (ftstat) {
2272 					goto tryopen;
2273 				} else {
2274 					continue;
2275 				}
2276 			}
2277 
2278 			if (uqi->sm_nwaiters == 0) {	/* clean up */
2279 				/*
2280 				 * only opens on an asynchronous
2281 				 * protocols reach here so checking
2282 				 * nwaiters == 0 is sufficient to
2283 				 * ensure that no other thread
2284 				 * is waiting on this logical unit
2285 				 */
2286 				if ((uqi->sm_flags & FULLY_OPEN) == 0) {
2287 
2288 					sm_lqi_t *lqi;
2289 
2290 					uqi->sm_dev = NODEV;
2291 					sm_dbg('C', ("sm_open FULLY_OPEN=0\n"));
2292 					for (lqi = uqi->sm_lqs; lqi != 0;
2293 					    lqi = lqi->sm_nlqi) {
2294 						LOCK_UNIT(lqi);
2295 						lqi->sm_uqflags &= ~SM_UQVALID;
2296 						UNLOCK_UNIT(lqi);
2297 					}
2298 
2299 					qprocsoff(rq);
2300 					rq->q_ptr = WR(rq)->q_ptr = 0;
2301 					SM_RQ(uqi) = 0;
2302 					SM_WQ(uqi) = 0;
2303 				}
2304 			}
2305 			if ((uqi->sm_flags & FULLY_OPEN) == 0 &&
2306 			    uqi->sm_nwaiters == 0)
2307 				uqi->sm_protocol = NULL_PROTOCOL;
2308 			if (flag & FEXCL)
2309 				uqi->sm_flags &= ~EXCL_OPEN;
2310 			sm_dbg('C', ("sm_open: done (ret %d).\n", ftstat));
2311 			return (ftstat);
2312 		} while (BLOCKING(uqi, protocol, flag));
2313 	}
2314 
2315 	uqi->sm_flags |= FULLY_OPEN;
2316 
2317 	sm_dbg('C', ("sm_open done (ret %d).\n", ftstat));
2318 	return (ftstat);
2319 }
2320 
2321 /*
2322  * Multiplexer device close routine.
2323  */
2324 /*ARGSUSED*/
2325 static int
sm_close(queue_t * rq,int flag,cred_t * credp)2326 sm_close(queue_t *rq, int flag, cred_t *credp)
2327 {
2328 	sm_uqi_t *uqi = (sm_uqi_t *)rq->q_ptr;
2329 	sm_lqi_t *lqi;
2330 
2331 	if (sm_ssp == NULL)
2332 		return (ENXIO);
2333 
2334 	if (uqi == NULL) {
2335 		sm_dbg('C', ("close: WARN:- q 0x%p already closed.\n", rq));
2336 		return (ENXIO);
2337 	}
2338 
2339 	sm_dbg('C', ("close: uqi=0x%p unit=%d q=0x%p)\n", uqi, uqi->sm_lunit,
2340 	    rq));
2341 
2342 	if (SM_RQ(uqi) != rq)
2343 		sm_dbg('C', ("sm_close: rq != current uqi queue\n"));
2344 
2345 	if (uqi->sm_ttybid) {
2346 		qunbufcall(SM_RQ(uqi), uqi->sm_ttybid);
2347 		uqi->sm_ttybid = 0;
2348 	}
2349 
2350 	/*
2351 	 * Tell all the linked queues that the upper queue has gone
2352 	 * Note close will never get called on a stream while there is a
2353 	 * thread blocked trying to open the same stream.
2354 	 * If there is a blocked open on a different stream but on
2355 	 * the same logical unit it will reset the lower queue flags.
2356 	 */
2357 	for (lqi = uqi->sm_lqs; lqi != 0; lqi = lqi->sm_nlqi) {
2358 		LOCK_UNIT(lqi);
2359 		lqi->sm_uqflags &= ~SM_UQVALID;
2360 		UNLOCK_UNIT(lqi);
2361 	}
2362 
2363 	/*
2364 	 * Turn off the STREAMs queue processing for this queue.
2365 	 */
2366 	qprocsoff(rq);
2367 
2368 	/*
2369 	 * Similarly we will never get here if there is thread trying to
2370 	 * open ths stream.
2371 	 */
2372 	LOCK_UNIT(uqi);
2373 	if (uqi->sm_waitq == 0)
2374 		uqi->sm_flags = (uqi->sm_flags & SM_OBPCNDEV) ? SM_OBPCNDEV :
2375 		    0U;
2376 
2377 	uqi->sm_dev = NODEV;
2378 	uqi->sm_protocol = NULL_PROTOCOL;
2379 	ttycommon_close(uqi->sm_ttycommon);
2380 	/* it just frees any pending ioctl */
2381 
2382 	uqi->sm_ttycommon->t_cflag = 0;
2383 	uqi->sm_ttycommon->t_flags = 0;
2384 
2385 	/*
2386 	 * Reset the queue pointers to NULL.
2387 	 * If a thread is qwaiting in the open routine it will recheck
2388 	 * the q_ptr.
2389 	 */
2390 	rq->q_ptr = NULL;
2391 	WR(rq)->q_ptr = NULL;
2392 	UNLOCK_UNIT(uqi);
2393 
2394 	if (sm_ssp->sm_lconsole == uqi) {
2395 		/* this will never be the outdial device closing */
2396 		sm_ssp->sm_lconsole = 0;
2397 	}
2398 	/*
2399 	 * If there is another thread waiting for this close then unblock
2400 	 * the thread by putting a message on its read queue.
2401 	 */
2402 	if (uqi->sm_waitq) {
2403 		sm_dbg('C', ("close(0x%p): doing putctl on 0x%p\n",
2404 		    rq, uqi->sm_waitq));
2405 		if (rq == uqi->sm_waitq)
2406 			sm_log("close: waitq and closeq are same q\n");
2407 		(void) putctl(uqi->sm_waitq, M_CTL);
2408 	}
2409 
2410 	uqi->sm_flags &= ~(EXCL_OPEN | FULLY_OPEN);
2411 	sm_dbg('C', ("close: returning ok.\n"));
2412 	return (0);
2413 }
2414 
2415 /*
2416  * Initialise the software abort sequence for use when one of the
2417  * driver's nodes provides the system console.
2418  */
2419 static void
sm_set_abort()2420 sm_set_abort()
2421 {
2422 	char ds[3] = { '\r', '~', CNTRL('b') };
2423 	char as[SM_MAX_ABSLEN];
2424 	int len = SM_MAX_ABSLEN;
2425 
2426 	if (ddi_prop_op(DDI_DEV_T_ANY, sm_ssp->sm_dip, PROP_LEN_AND_VAL_BUF, 0,
2427 	    "abort-str", as, &len) != DDI_PROP_SUCCESS ||
2428 	    (len = strlen(as)) < SM_MIN_ABSLEN) {
2429 		(void) strcpy(as, ds);
2430 		len = strlen(as);
2431 	} else {
2432 		char *s;
2433 		int i;
2434 
2435 		for (s = as, i = 0; i < len-1; i++, s++) {
2436 			if (as[i] == '^' && as[i+1] >= 'a' && as[i+1] <= 'z') {
2437 				*s = as[i+1] - 'a' + 1;
2438 				i++;
2439 			} else {
2440 				*s = as[i];
2441 			}
2442 		}
2443 		*s++ = as[i];
2444 		*s = '\0';
2445 		len = strlen(as);
2446 	}
2447 
2448 	if (len < SM_MIN_ABSLEN)
2449 		(void) strcpy(sm_ssp->sm_abs, ds);
2450 	else
2451 		(void) strcpy(sm_ssp->sm_abs, as);
2452 }
2453 
2454 /*
2455  *
2456  * sm_attach - initialisation routine per driver instance.
2457  */
2458 static int
sm_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)2459 sm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2460 {
2461 	int unit;
2462 	char name[32];
2463 	sm_uqi_t *uqi;
2464 	sm_lqi_t *lqip;
2465 
2466 	/*
2467 	 * Is this an attach?
2468 	 */
2469 	if (cmd != DDI_ATTACH) {
2470 		return (DDI_FAILURE);
2471 	}
2472 
2473 	/*
2474 	 * Validate the instance number (sm is a single instance driver).
2475 	 */
2476 	if (sm_ssp) {	/* only one instance allowed */
2477 		return (DDI_FAILURE);
2478 	}
2479 
2480 	sm_instance = ddi_get_instance(dip);
2481 
2482 	/*
2483 	 * Create the default minor node which will become the console.
2484 	 * (create it with three different names).:
2485 	 *	con which appears in the /dev filesystem;
2486 	 *	input which matches the prom /multiplexer:input node;
2487 	 *	output which matches the prom /multiplexer:input node
2488 	 * Create a minor node for control operations.
2489 	 */
2490 	if (ddi_create_minor_node(dip, "con", S_IFCHR, 0,
2491 	    DDI_PSEUDO, 0) != DDI_SUCCESS ||
2492 	    ddi_create_minor_node(dip, "input", S_IFCHR, 0,
2493 	    DDI_PSEUDO, 0) != DDI_SUCCESS ||
2494 	    ddi_create_minor_node(dip, "output", S_IFCHR, 0,
2495 	    DDI_PSEUDO, 0) != DDI_SUCCESS ||
2496 	    ddi_create_minor_node(dip, "ctl", S_IFCHR, 1,
2497 	    DDI_PSEUDO, 0) != DDI_SUCCESS) {
2498 
2499 		cmn_err(CE_WARN, "sm_attach: create minors failed.\n");
2500 		ddi_remove_minor_node(dip, NULL);
2501 		return (DDI_FAILURE);
2502 	}
2503 
2504 	smctlunit = 1;
2505 
2506 	/*
2507 	 * Allocate private state for this instance.
2508 	 */
2509 	sm_ssp = (sm_ss_t *)kmem_zalloc(sizeof (sm_ss_t), KM_SLEEP);
2510 
2511 	/*
2512 	 * Initialise per instance data.
2513 	 */
2514 	sm_ssp->sm_dip = dip;
2515 
2516 	/*
2517 	 * Get required debug level.
2518 	 */
2519 	sm_ssp->sm_trflag = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2520 	    DDI_PROP_DONTPASS, "sm-trlv", sm_default_trflag);
2521 
2522 	sm_max_units = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2523 	    DDI_PROP_DONTPASS, "sm-max-units", sm_max_units);
2524 	sm_minor_cnt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2525 	    DDI_PROP_DONTPASS, "sm-minor-cnt", 0);
2526 
2527 	sm_refuse_opens = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2528 	    DDI_PROP_DONTPASS, "sm-refuse-opens", sm_refuse_opens);
2529 
2530 	sm_ssp->sm_ctrla_abort_on = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2531 	    DDI_PROP_DONTPASS, "sm-ctrla-abort-on", 1);
2532 	sm_ssp->sm_break_abort_on = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2533 	    DDI_PROP_DONTPASS, "sm-break-abort-on", 0);
2534 
2535 	sm_set_abort();
2536 
2537 	sm_ssp->sm_lqs = (sm_lqi_t *)kmem_zalloc(sizeof (sm_lqi_t) * MAX_LQS,
2538 	    KM_SLEEP);
2539 	sm_ssp->sm_uqs = (sm_uqi_t *)kmem_zalloc(sizeof (sm_uqi_t) * NLUNITS,
2540 	    KM_SLEEP);
2541 
2542 	for (unit = 2; unit < NLUNITS && unit < sm_minor_cnt + 2; unit++) {
2543 
2544 		if (snprintf(name, sizeof (name), "sm%c", 'a' + unit-2) >
2545 		    sizeof (name)) {
2546 			cmn_err(CE_WARN,
2547 			    "sm_attach: create device for unit %d failed.\n",
2548 			    unit);
2549 		} else if (ddi_create_minor_node(dip, name, S_IFCHR,
2550 		    unit, DDI_NT_SERIAL, NULL) != DDI_SUCCESS) {
2551 			ddi_remove_minor_node(dip, NULL);
2552 			return (DDI_FAILURE);
2553 		}
2554 
2555 		if (snprintf(name, sizeof (name), "sm%c,cu", 'a' + unit-2) >
2556 		    sizeof (name)) {
2557 			cmn_err(CE_WARN,
2558 			    "sm_attach: create cu device for unit %d failed.\n",
2559 			    unit);
2560 			continue;
2561 		} else if (ddi_create_minor_node(dip, name, S_IFCHR,
2562 		    unit|OUTLINE, DDI_NT_SERIAL_DO, NULL) != DDI_SUCCESS) {
2563 			ddi_remove_minor_node(dip, NULL);
2564 			return (DDI_FAILURE);
2565 		}
2566 	}
2567 
2568 	for (unit = 0; unit < NLUNITS; unit++) {
2569 
2570 		uqi = get_uqi(sm_ssp, unit);
2571 		uqi->sm_lqs = 0;
2572 		uqi->sm_dev = NODEV;
2573 		uqi->sm_nlqs = 0;
2574 		uqi->sm_lunit = unit;
2575 		uqi->sm_protocol = NULL_PROTOCOL;
2576 		mutex_init(uqi->sm_umutex, NULL, MUTEX_DRIVER, NULL);
2577 		cv_init(uqi->sm_ucv, NULL, CV_DRIVER, NULL);
2578 		mutex_init(&uqi->sm_ttycommon->t_excl, NULL,
2579 		    MUTEX_DRIVER, NULL);
2580 	}
2581 
2582 	for (unit = 0; unit < MAX_LQS; unit++) {
2583 		lqip = get_lqi(sm_ssp, unit);
2584 		lqip->sm_unit = unit;
2585 		lqip->sm_hadkadbchar = 0;
2586 		lqip->sm_nachar = sm_ssp->sm_abs;
2587 		lqip->sm_ioflag = FORIO;
2588 		lqip->sm_ctrla_abort_on = sm_ssp->sm_ctrla_abort_on;
2589 		lqip->sm_break_abort_on = sm_ssp->sm_break_abort_on;
2590 		mutex_init(lqip->sm_umutex, NULL, MUTEX_DRIVER, NULL);
2591 		cv_init(lqip->sm_ucv, NULL, CV_DRIVER, NULL);
2592 		mutex_init(&lqip->sm_ttycommon->t_excl, NULL,
2593 		    MUTEX_DRIVER, NULL);
2594 	}
2595 
2596 	return (DDI_SUCCESS);
2597 }
2598 
2599 /*
2600  *
2601  * sm_detach - detach routine per driver instance.
2602  */
2603 static int
sm_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)2604 sm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2605 {
2606 	sm_uqi_t		*lu;
2607 	sm_lqi_t		*pu;
2608 	int		unit;
2609 
2610 	/*
2611 	 * Is this a detach request for instance 0 (single instance driver).
2612 	 */
2613 	if (cmd != DDI_DETACH)
2614 		return (DDI_FAILURE);
2615 
2616 	if (sm_ssp == NULL)
2617 		return (DDI_FAILURE);
2618 
2619 	sm_dbg('V', ("detach ..."));
2620 
2621 
2622 	/*
2623 	 * Check that all the upper and lower queues are closed.
2624 	 */
2625 
2626 	for (unit = 0; unit < NLUNITS; unit++) {
2627 		lu = &sm_ssp->sm_uqs[unit];
2628 		if (lu && lu->sm_protocol != NULL_PROTOCOL) {
2629 			sm_dbg('V', ("detach: upper unit still open.\n"));
2630 			return (DDI_FAILURE);
2631 		}
2632 	}
2633 	for (unit = 0; unit < MAX_LQS; unit++) {
2634 		pu = &sm_ssp->sm_lqs[unit];
2635 		if (pu && pu->sm_linkid != 0) {
2636 			sm_dbg('V', ("detach: lower unit still linked (%d)\n",
2637 			    pu->sm_linkid));
2638 			return (DDI_FAILURE);
2639 		}
2640 	}
2641 
2642 	for (unit = 0; unit < NLUNITS; unit++) {
2643 		lu = &sm_ssp->sm_uqs[unit];
2644 		mutex_destroy(lu->sm_umutex);
2645 		cv_destroy(lu->sm_ucv);
2646 		mutex_destroy(&lu->sm_ttycommon->t_excl);
2647 	}
2648 	for (unit = 0; unit < MAX_LQS; unit++) {
2649 		pu = &sm_ssp->sm_lqs[unit];
2650 		mutex_destroy(pu->sm_umutex);
2651 		cv_destroy(pu->sm_ucv);
2652 		mutex_destroy(&pu->sm_ttycommon->t_excl);
2653 	}
2654 
2655 	/*
2656 	 * Tidy up per instance state.
2657 	 */
2658 	kmem_free(sm_ssp->sm_lqs, sizeof (sm_lqi_t) * MAX_LQS);
2659 	kmem_free(sm_ssp->sm_uqs, sizeof (sm_uqi_t) * NLUNITS);
2660 	kmem_free(sm_ssp, sizeof (sm_ss_t));
2661 
2662 	sm_ssp = 0;
2663 
2664 	/*
2665 	 * Remove all of the devices created in attach.
2666 	 */
2667 	ddi_remove_minor_node(dip, NULL);
2668 
2669 	return (DDI_SUCCESS);
2670 }
2671 
2672 /*
2673  * SECTION
2674  * Driver interface to the OS.
2675  */
2676 
2677 /*
2678  * The driver is responsible for managing the mapping between the file system
2679  * device types (major/minor pairs) and the corresponding instance of the driver
2680  * or device information pointer (dip).
2681  * sm_info - return the instance or dip corresponding to the dev_t.
2682  */
2683 /*ARGSUSED*/
2684 static int
sm_info(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)2685 sm_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
2686 {
2687 	int res = DDI_SUCCESS;
2688 
2689 	switch (infocmd) {
2690 	case DDI_INFO_DEVT2DEVINFO:
2691 		if (sm_ssp == NULL)
2692 			res = DDI_FAILURE;
2693 		else
2694 			*result = (void *)sm_ssp->sm_dip;
2695 		break;
2696 
2697 	case DDI_INFO_DEVT2INSTANCE:
2698 		*result = (void*)0;	/* single instance driver */
2699 		break;
2700 
2701 	default:
2702 		res = DDI_FAILURE;
2703 		break;
2704 	}
2705 
2706 	return (res);
2707 }
2708 
2709 /*
2710  * End of driver implementation
2711  */
2712 
2713 /*
2714  * Loadable module interface to the kernel
2715  */
2716 
2717 /*
2718  * Firstly the Streams specific interface
2719  */
2720 
2721 /*
2722  * Solaris driver/STREAM initialisation structures.
2723  */
2724 static struct module_info uinfo =
2725 {
2726 	SM_MOD_ID,
2727 	TTYMUX_DRVNAME,
2728 	0,		/* min packet size */
2729 	INFPSZ,		/* max packet size */
2730 	2048,		/* high water mark */
2731 	256,		/* low water mark */
2732 };
2733 
2734 /*
2735  * Use zero water marks becuase the lower queues are used only for flow control.
2736  */
2737 static struct module_info linfo =
2738 {
2739 	SM_MOD_ID,
2740 	TTYMUX_DRVNAME,
2741 	0,		/* min packet size */
2742 	INFPSZ,		/* max packet size */
2743 	0,		/* high water mark */
2744 	0		/* low water mark	*/
2745 };
2746 
2747 
2748 /*
2749  * Solaris upper read STREAM initialisation structure.
2750  */
2751 static struct qinit urinit =
2752 {
2753 	sm_urput,	/* put */
2754 	sm_ursrv,	/* service */
2755 	sm_open,	/* open */
2756 	sm_close,	/* close */
2757 	NULL,		/* admin */
2758 	&uinfo,		/* module info */
2759 	NULL		/* stats */
2760 };
2761 
2762 /*
2763  * Solaris upper write STREAM initialisation structure.
2764  */
2765 static struct qinit uwinit =
2766 {
2767 	sm_uwput,
2768 	sm_uwsrv,
2769 	NULL,
2770 	NULL,
2771 	NULL,
2772 	&uinfo,
2773 	NULL
2774 };
2775 
2776 /*
2777  * Solaris lower read STREAM initialisation structure.
2778  */
2779 static struct qinit lrinit =
2780 {
2781 	sm_lrput,
2782 	sm_lrsrv,
2783 	NULL,
2784 	NULL, NULL,
2785 	&linfo,
2786 	NULL
2787 };
2788 
2789 /*
2790  * Solaris lower write STREAM initialisation structure.
2791  */
2792 static struct qinit lwinit =
2793 {
2794 	putq,
2795 	sm_lwsrv,
2796 	NULL,
2797 	NULL,
2798 	NULL,
2799 	&linfo,
2800 	NULL
2801 };
2802 
2803 /*
2804  * Multiplexing STREAM structure.
2805  */
2806 struct streamtab sm_streamtab =
2807 {
2808 	&urinit,
2809 	&uwinit,
2810 	&lrinit,
2811 	&lwinit
2812 };
2813 
2814 /*
2815  * Driver operations structure (struct cb_ops) and
2816  * driver dynamic loading functions (struct dev_ops).
2817  */
2818 
2819 /*
2820  * Fold the Stream interface to the kernel into the driver interface
2821  * to the OS.
2822  */
2823 
2824 DDI_DEFINE_STREAM_OPS(sm_ops, \
2825 	nulldev, nulldev, \
2826 	sm_attach, sm_detach, nodev, \
2827 	sm_info, (D_NEW | D_MTQPAIR|D_MTOUTPERIM|D_MTOCEXCL | D_MP),
2828 	&sm_streamtab, ddi_quiesce_not_supported);
2829 
2830 /*
2831  * Driver module information.
2832  */
2833 extern struct mod_ops mod_driverops;
2834 static struct modldrv modldrv =
2835 {
2836 	&mod_driverops,
2837 	"serial mux driver",
2838 	&sm_ops
2839 };
2840 
2841 static struct modlinkage modlinkage =
2842 {
2843 	MODREV_1,
2844 	&modldrv,
2845 	NULL
2846 };
2847 
2848 /*
2849  * Define the body of our interface to the OS.
2850  */
2851 
2852 /*
2853  * '_init' is called by Solaris to initialise any driver
2854  * specific state and to install the driver.
2855  */
2856 int
_init(void)2857 _init(void)
2858 {
2859 	return (mod_install(&modlinkage));
2860 }
2861 
2862 /*
2863  * _info - return this drivers interface to the kernel.
2864  */
2865 int
_info(struct modinfo * modinfop)2866 _info(struct modinfo *modinfop)
2867 {
2868 	return (mod_info(&modlinkage, modinfop));
2869 }
2870 
2871 /*
2872  * _fini - the OS is finished with the services provided by the driver.
2873  * remove ourself and then remove any footprint that remains.
2874  */
2875 int
_fini(void)2876 _fini(void)
2877 {
2878 	return (mod_remove(&modlinkage));
2879 }
2880