xref: /titanic_44/usr/src/uts/common/os/strsubr.c (revision 8eea8e29cc4374d1ee24c25a07f45af132db3499)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
23 /*	  All Rights Reserved  	*/
24 
25 
26 /*
27  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
28  * Use is subject to license terms.
29  */
30 
31 #pragma ident	"%Z%%M%	%I%	%E% SMI"
32 
33 #include <sys/types.h>
34 #include <sys/sysmacros.h>
35 #include <sys/param.h>
36 #include <sys/errno.h>
37 #include <sys/signal.h>
38 #include <sys/proc.h>
39 #include <sys/conf.h>
40 #include <sys/cred.h>
41 #include <sys/user.h>
42 #include <sys/vnode.h>
43 #include <sys/file.h>
44 #include <sys/session.h>
45 #include <sys/stream.h>
46 #include <sys/strsubr.h>
47 #include <sys/stropts.h>
48 #include <sys/poll.h>
49 #include <sys/systm.h>
50 #include <sys/cpuvar.h>
51 #include <sys/uio.h>
52 #include <sys/cmn_err.h>
53 #include <sys/priocntl.h>
54 #include <sys/procset.h>
55 #include <sys/vmem.h>
56 #include <sys/bitmap.h>
57 #include <sys/kmem.h>
58 #include <sys/siginfo.h>
59 #include <sys/vtrace.h>
60 #include <sys/callb.h>
61 #include <sys/debug.h>
62 #include <sys/modctl.h>
63 #include <sys/vmsystm.h>
64 #include <vm/page.h>
65 #include <sys/atomic.h>
66 #include <sys/suntpi.h>
67 #include <sys/strlog.h>
68 #include <sys/promif.h>
69 #include <sys/project.h>
70 #include <sys/vm.h>
71 #include <sys/taskq.h>
72 #include <sys/sunddi.h>
73 #include <sys/sunldi_impl.h>
74 #include <sys/strsun.h>
75 #include <sys/isa_defs.h>
76 #include <sys/multidata.h>
77 #include <sys/pattr.h>
78 #include <sys/strft.h>
79 #include <sys/zone.h>
80 
81 #define	O_SAMESTR(q)	(((q)->q_next) && \
82 	(((q)->q_flag & QREADR) == ((q)->q_next->q_flag & QREADR)))
83 
84 /*
85  * WARNING:
86  * The variables and routines in this file are private, belonging
87  * to the STREAMS subsystem. These should not be used by modules
88  * or drivers. Compatibility will not be guaranteed.
89  */
90 
91 /*
92  * Id value used to distinguish between different multiplexor links.
93  */
94 static int32_t lnk_id = 0;
95 
96 #define	STREAMS_LOPRI MINCLSYSPRI
97 static pri_t streams_lopri = STREAMS_LOPRI;
98 
99 #define	STRSTAT(x)	(str_statistics.x.value.ui64++)
100 typedef struct str_stat {
101 	kstat_named_t	sqenables;
102 	kstat_named_t	stenables;
103 	kstat_named_t	syncqservice;
104 	kstat_named_t	freebs;
105 	kstat_named_t	qwr_outer;
106 	kstat_named_t	rservice;
107 	kstat_named_t	strwaits;
108 	kstat_named_t	taskqfails;
109 	kstat_named_t	bufcalls;
110 	kstat_named_t	qhelps;
111 	kstat_named_t	qremoved;
112 	kstat_named_t	sqremoved;
113 	kstat_named_t	bcwaits;
114 	kstat_named_t	sqtoomany;
115 } str_stat_t;
116 
117 static str_stat_t str_statistics = {
118 	{ "sqenables",		KSTAT_DATA_UINT64 },
119 	{ "stenables",		KSTAT_DATA_UINT64 },
120 	{ "syncqservice",	KSTAT_DATA_UINT64 },
121 	{ "freebs",		KSTAT_DATA_UINT64 },
122 	{ "qwr_outer",		KSTAT_DATA_UINT64 },
123 	{ "rservice",		KSTAT_DATA_UINT64 },
124 	{ "strwaits",		KSTAT_DATA_UINT64 },
125 	{ "taskqfails",		KSTAT_DATA_UINT64 },
126 	{ "bufcalls",		KSTAT_DATA_UINT64 },
127 	{ "qhelps",		KSTAT_DATA_UINT64 },
128 	{ "qremoved",		KSTAT_DATA_UINT64 },
129 	{ "sqremoved",		KSTAT_DATA_UINT64 },
130 	{ "bcwaits",		KSTAT_DATA_UINT64 },
131 	{ "sqtoomany",		KSTAT_DATA_UINT64 },
132 };
133 
134 static kstat_t *str_kstat;
135 
136 /*
137  * qrunflag was used previously to control background scheduling of queues. It
138  * is not used anymore, but kept here in case some module still wants to access
139  * it via qready() and setqsched macros.
140  */
141 char qrunflag;			/*  Unused */
142 
143 /*
144  * Most of the streams scheduling is done via task queues. Task queues may fail
145  * for non-sleep dispatches, so there are two backup threads servicing failed
146  * requests for queues and syncqs. Both of these threads also service failed
147  * dispatches freebs requests. Queues are put in the list specified by `qhead'
148  * and `qtail' pointers, syncqs use `sqhead' and `sqtail' pointers and freebs
149  * requests are put into `freebs_list' which has no tail pointer. All three
150  * lists are protected by a single `service_queue' lock and use
151  * `services_to_run' condition variable for signaling background threads. Use of
152  * a single lock should not be a problem because it is only used under heavy
153  * loads when task queues start to fail and at that time it may be a good idea
154  * to throttle scheduling requests.
155  *
156  * NOTE: queues and syncqs should be scheduled by two separate threads because
157  * queue servicing may be blocked waiting for a syncq which may be also
158  * scheduled for background execution. This may create a deadlock when only one
159  * thread is used for both.
160  */
161 
162 static taskq_t *streams_taskq;		/* Used for most STREAMS scheduling */
163 
164 static kmutex_t service_queue;		/* protects all of servicing vars */
165 static kcondvar_t services_to_run;	/* wake up background service thread */
166 static kcondvar_t syncqs_to_run;	/* wake up background service thread */
167 
168 /*
169  * List of queues scheduled for background processing dueue to lack of resources
170  * in the task queues. Protected by service_queue lock;
171  */
172 static struct queue *qhead;
173 static struct queue *qtail;
174 
175 /*
176  * Same list for syncqs
177  */
178 static syncq_t *sqhead;
179 static syncq_t *sqtail;
180 
181 static mblk_t *freebs_list;	/* list of buffers to free */
182 
183 /*
184  * Backup threads for servicing queues and syncqs
185  */
186 kthread_t *streams_qbkgrnd_thread;
187 kthread_t *streams_sqbkgrnd_thread;
188 
189 /*
190  * Bufcalls related variables.
191  */
192 struct bclist	strbcalls;	/* list of waiting bufcalls */
193 kmutex_t	strbcall_lock;	/* protects bufcall list (strbcalls) */
194 kcondvar_t	strbcall_cv;	/* Signaling when a bufcall is added */
195 kmutex_t	bcall_monitor;	/* sleep/wakeup style monitor */
196 kcondvar_t	bcall_cv;	/* wait 'till executing bufcall completes */
197 kthread_t	*bc_bkgrnd_thread; /* Thread to service bufcall requests */
198 
199 kmutex_t	strresources;	/* protects global resources */
200 kmutex_t	muxifier;	/* single-threads multiplexor creation */
201 
202 extern void	time_to_wait(clock_t *, clock_t);
203 
204 /*
205  * run_queues is no longer used, but is kept in case some 3-d party
206  * module/driver decides to use it.
207  */
208 int run_queues = 0;
209 
210 /*
211  * sq_max_size is the depth of the syncq (in number of messages) before
212  * qfill_syncq() starts QFULL'ing destination queues. As its primary
213  * consumer - IP is no longer D_MTPERMOD, but there may be other
214  * modules/drivers depend on this syncq flow control, we prefer to
215  * choose a large number as the default value. For potential
216  * performance gain, this value is tunable in /etc/system.
217  */
218 int sq_max_size = 10000;
219 
220 /*
221  * the number of ciputctrl structures per syncq and stream we create when
222  * needed.
223  */
224 int n_ciputctrl;
225 int max_n_ciputctrl = 16;
226 /*
227  * if n_ciputctrl is < min_n_ciputctrl don't even create ciputctrl_cache.
228  */
229 int min_n_ciputctrl = 2;
230 
231 static struct mux_node *mux_nodes;	/* mux info for cycle checking */
232 
233 /*
234  * Per-driver/module syncqs
235  * ========================
236  *
237  * For drivers/modules that use PERMOD or outer syncqs we keep a list of
238  * perdm structures, new entries being added (and new syncqs allocated) when
239  * setq() encounters a module/driver with a streamtab that it hasn't seen
240  * before.
241  * The reason for this mechanism is that some modules and drivers share a
242  * common streamtab and it is necessary for those modules and drivers to also
243  * share a common PERMOD syncq.
244  *
245  * perdm_list --> dm_str == streamtab_1
246  *                dm_sq == syncq_1
247  *                dm_ref
248  *                dm_next --> dm_str == streamtab_2
249  *                            dm_sq == syncq_2
250  *                            dm_ref
251  *                            dm_next --> ... NULL
252  *
253  * The dm_ref field is incremented for each new driver/module that takes
254  * a reference to the perdm structure and hence shares the syncq.
255  * References are held in the fmodsw_impl_t structure for each STREAMS module
256  * or the dev_impl array (indexed by device major number) for each driver.
257  *
258  * perdm_list -> [dm_ref == 1] -> [dm_ref == 2] -> [dm_ref == 1] -> NULL
259  *		     ^                 ^ ^               ^
260  *                   |  ______________/  |               |
261  *                   | /                 |               |
262  * dev_impl:     ...|x|y|...          module A	      module B
263  *
264  * When a module/driver is unloaded the reference count is decremented and,
265  * when it falls to zero, the perdm structure is removed from the list and
266  * the syncq is freed (see rele_dm()).
267  */
268 perdm_t *perdm_list = NULL;
269 static krwlock_t perdm_rwlock;
270 cdevsw_impl_t *devimpl;
271 
272 extern struct qinit strdata;
273 extern struct qinit stwdata;
274 
275 static void runservice(queue_t *);
276 static void streams_bufcall_service(void);
277 static void streams_qbkgrnd_service(void);
278 static void streams_sqbkgrnd_service(void);
279 static syncq_t *new_syncq(void);
280 static void free_syncq(syncq_t *);
281 static void outer_insert(syncq_t *, syncq_t *);
282 static void outer_remove(syncq_t *, syncq_t *);
283 static void write_now(syncq_t *);
284 static void clr_qfull(queue_t *);
285 static void enable_svc(queue_t *);
286 static void runbufcalls(void);
287 static void sqenable(syncq_t *);
288 static void sqfill_events(syncq_t *, queue_t *, mblk_t *, void (*)());
289 static void wait_q_syncq(queue_t *);
290 
291 static void queue_service(queue_t *);
292 static void stream_service(stdata_t *);
293 static void syncq_service(syncq_t *);
294 static void qwriter_outer_service(syncq_t *);
295 static void mblk_free(mblk_t *);
296 #ifdef DEBUG
297 static int qprocsareon(queue_t *);
298 #endif
299 
300 static void set_nfsrv_ptr(queue_t *, queue_t *, queue_t *, queue_t *);
301 static void reset_nfsrv_ptr(queue_t *, queue_t *);
302 
303 static void sq_run_events(syncq_t *);
304 static int propagate_syncq(queue_t *);
305 
306 static void	blocksq(syncq_t *, ushort_t, int);
307 static void	unblocksq(syncq_t *, ushort_t, int);
308 static int	dropsq(syncq_t *, uint16_t);
309 static void	emptysq(syncq_t *);
310 static sqlist_t *sqlist_alloc(struct stdata *, int);
311 static void	sqlist_free(sqlist_t *);
312 static sqlist_t	*sqlist_build(queue_t *, struct stdata *, boolean_t);
313 static void	sqlist_insert(sqlist_t *, syncq_t *);
314 static void	sqlist_insertall(sqlist_t *, queue_t *);
315 
316 static void	strsetuio(stdata_t *);
317 
318 struct kmem_cache *stream_head_cache;
319 struct kmem_cache *queue_cache;
320 struct kmem_cache *syncq_cache;
321 struct kmem_cache *qband_cache;
322 struct kmem_cache *linkinfo_cache;
323 struct kmem_cache *ciputctrl_cache = NULL;
324 
325 static linkinfo_t *linkinfo_list;
326 
327 /*
328  *  Qinit structure and Module_info structures
329  *	for passthru read and write queues
330  */
331 
332 static void pass_wput(queue_t *, mblk_t *);
333 static queue_t *link_addpassthru(stdata_t *);
334 static void link_rempassthru(queue_t *);
335 
336 struct  module_info passthru_info = {
337 	0,
338 	"passthru",
339 	0,
340 	INFPSZ,
341 	STRHIGH,
342 	STRLOW
343 };
344 
345 struct  qinit passthru_rinit = {
346 	(int (*)())putnext,
347 	NULL,
348 	NULL,
349 	NULL,
350 	NULL,
351 	&passthru_info,
352 	NULL
353 };
354 
355 struct  qinit passthru_winit = {
356 	(int (*)()) pass_wput,
357 	NULL,
358 	NULL,
359 	NULL,
360 	NULL,
361 	&passthru_info,
362 	NULL
363 };
364 
365 /*
366  * Special form of assertion: verify that X implies Y i.e. when X is true Y
367  * should also be true.
368  */
369 #define	IMPLY(X, Y)	ASSERT(!(X) || (Y))
370 
371 /*
372  * Logical equivalence. Verify that both X and Y are either TRUE or FALSE.
373  */
374 #define	EQUIV(X, Y)	{ IMPLY(X, Y); IMPLY(Y, X); }
375 
376 /*
377  * Verify correctness of list head/tail pointers.
378  */
379 #define	LISTCHECK(head, tail, link) {				\
380 	EQUIV(head, tail);					\
381 	IMPLY(tail != NULL, tail->link == NULL);		\
382 }
383 
384 /*
385  * Enqueue a list element `el' in the end of a list denoted by `head' and `tail'
386  * using a `link' field.
387  */
388 #define	ENQUEUE(el, head, tail, link) {				\
389 	ASSERT(el->link == NULL);				\
390 	LISTCHECK(head, tail, link);				\
391 	if (head == NULL)					\
392 		head = el;					\
393 	else							\
394 		tail->link = el;				\
395 	tail = el;						\
396 }
397 
398 /*
399  * Dequeue the first element of the list denoted by `head' and `tail' pointers
400  * using a `link' field and put result into `el'.
401  */
402 #define	DQ(el, head, tail, link) {				\
403 	LISTCHECK(head, tail, link);				\
404 	el = head;						\
405 	if (head != NULL) {					\
406 		head = head->link;				\
407 		if (head == NULL)				\
408 			tail = NULL;				\
409 		el->link = NULL;				\
410 	}							\
411 }
412 
413 /*
414  * Remove `el' from the list using `chase' and `curr' pointers and return result
415  * in `succeed'.
416  */
417 #define	RMQ(el, head, tail, link, chase, curr, succeed) {	\
418 	LISTCHECK(head, tail, link);				\
419 	chase = NULL;						\
420 	succeed = 0;						\
421 	for (curr = head; (curr != el) && (curr != NULL); curr = curr->link) \
422 		chase = curr;					\
423 	if (curr != NULL) {					\
424 		succeed = 1;					\
425 		ASSERT(curr == el);				\
426 		if (chase != NULL)				\
427 			chase->link = curr->link;		\
428 		else						\
429 			head = curr->link;			\
430 		curr->link = NULL;				\
431 		if (curr == tail)				\
432 			tail = chase;				\
433 	}							\
434 	LISTCHECK(head, tail, link);				\
435 }
436 
437 /* Handling of delayed messages on the inner syncq. */
438 
439 /*
440  * DEBUG versions should use function versions (to simplify tracing) and
441  * non-DEBUG kernels should use macro versions.
442  */
443 
444 /*
445  * Put a queue on the syncq list of queues.
446  * Assumes SQLOCK held.
447  */
448 #define	SQPUT_Q(sq, qp)							\
449 {									\
450 	ASSERT(MUTEX_HELD(SQLOCK(sq)));					\
451 	if (!(qp->q_sqflags & Q_SQQUEUED)) {				\
452 		/* The queue should not be linked anywhere */		\
453 		ASSERT((qp->q_sqprev == NULL) && (qp->q_sqnext == NULL)); \
454 		/* Head and tail may only be NULL simultaneously */	\
455 		EQUIV(sq->sq_head, sq->sq_tail);			\
456 		/* Queue may be only enqueyed on its syncq */		\
457 		ASSERT(sq == qp->q_syncq);				\
458 		/* Check the correctness of SQ_MESSAGES flag */		\
459 		EQUIV(sq->sq_head, (sq->sq_flags & SQ_MESSAGES));	\
460 		/* Sanity check first/last elements of the list */	\
461 		IMPLY(sq->sq_head != NULL, sq->sq_head->q_sqprev == NULL);\
462 		IMPLY(sq->sq_tail != NULL, sq->sq_tail->q_sqnext == NULL);\
463 		/*							\
464 		 * Sanity check of priority field: empty queue should	\
465 		 * have zero priority					\
466 		 * and nqueues equal to zero.				\
467 		 */							\
468 		IMPLY(sq->sq_head == NULL, sq->sq_pri == 0);		\
469 		/* Sanity check of sq_nqueues field */			\
470 		EQUIV(sq->sq_head, sq->sq_nqueues);			\
471 		if (sq->sq_head == NULL) {				\
472 			sq->sq_head = sq->sq_tail = qp;			\
473 			sq->sq_flags |= SQ_MESSAGES;			\
474 		} else if (qp->q_spri == 0) {				\
475 			qp->q_sqprev = sq->sq_tail;			\
476 			sq->sq_tail->q_sqnext = qp;			\
477 			sq->sq_tail = qp;				\
478 		} else {						\
479 			/*						\
480 			 * Put this queue in priority order: higher	\
481 			 * priority gets closer to the head.		\
482 			 */						\
483 			queue_t **qpp = &sq->sq_tail;			\
484 			queue_t *qnext = NULL;				\
485 									\
486 			while (*qpp != NULL && qp->q_spri > (*qpp)->q_spri) { \
487 				qnext = *qpp;				\
488 				qpp = &(*qpp)->q_sqprev;		\
489 			}						\
490 			qp->q_sqnext = qnext;				\
491 			qp->q_sqprev = *qpp;				\
492 			if (*qpp != NULL) {				\
493 				(*qpp)->q_sqnext = qp;			\
494 			} else {					\
495 				sq->sq_head = qp;			\
496 				sq->sq_pri = sq->sq_head->q_spri;	\
497 			}						\
498 			*qpp = qp;					\
499 		}							\
500 		qp->q_sqflags |= Q_SQQUEUED;				\
501 		qp->q_sqtstamp = lbolt;					\
502 		sq->sq_nqueues++;					\
503 	}								\
504 }
505 
506 /*
507  * Remove a queue from the syncq list
508  * Assumes SQLOCK held.
509  */
510 #define	SQRM_Q(sq, qp)							\
511 	{								\
512 		ASSERT(MUTEX_HELD(SQLOCK(sq)));				\
513 		ASSERT(qp->q_sqflags & Q_SQQUEUED);			\
514 		ASSERT(sq->sq_head != NULL && sq->sq_tail != NULL);	\
515 		ASSERT((sq->sq_flags & SQ_MESSAGES) != 0);		\
516 		/* Check that the queue is actually in the list */	\
517 		ASSERT(qp->q_sqnext != NULL || sq->sq_tail == qp);	\
518 		ASSERT(qp->q_sqprev != NULL || sq->sq_head == qp);	\
519 		ASSERT(sq->sq_nqueues != 0);				\
520 		if (qp->q_sqprev == NULL) {				\
521 			/* First queue on list, make head q_sqnext */	\
522 			sq->sq_head = qp->q_sqnext;			\
523 		} else {						\
524 			/* Make prev->next == next */			\
525 			qp->q_sqprev->q_sqnext = qp->q_sqnext;		\
526 		}							\
527 		if (qp->q_sqnext == NULL) {				\
528 			/* Last queue on list, make tail sqprev */	\
529 			sq->sq_tail = qp->q_sqprev;			\
530 		} else {						\
531 			/* Make next->prev == prev */			\
532 			qp->q_sqnext->q_sqprev = qp->q_sqprev;		\
533 		}							\
534 		/* clear out references on this queue */		\
535 		qp->q_sqprev = qp->q_sqnext = NULL;			\
536 		qp->q_sqflags &= ~Q_SQQUEUED;				\
537 		/* If there is nothing queued, clear SQ_MESSAGES */	\
538 		if (sq->sq_head != NULL) {				\
539 			sq->sq_pri = sq->sq_head->q_spri;		\
540 		} else	{						\
541 			sq->sq_flags &= ~SQ_MESSAGES;			\
542 			sq->sq_pri = 0;					\
543 		}							\
544 		sq->sq_nqueues--;					\
545 		ASSERT(sq->sq_head != NULL || sq->sq_evhead != NULL ||	\
546 		    (sq->sq_flags & SQ_QUEUED) == 0);			\
547 	}
548 
549 /* Hide the definition from the header file. */
550 #ifdef SQPUT_MP
551 #undef SQPUT_MP
552 #endif
553 
554 /*
555  * Put a message on the queue syncq.
556  * Assumes QLOCK held.
557  */
558 #define	SQPUT_MP(qp, mp)						\
559 	{								\
560 		ASSERT(MUTEX_HELD(QLOCK(qp)));				\
561 		ASSERT(qp->q_sqhead == NULL ||				\
562 		    (qp->q_sqtail != NULL &&				\
563 		    qp->q_sqtail->b_next == NULL));			\
564 		qp->q_syncqmsgs++;					\
565 		ASSERT(qp->q_syncqmsgs != 0);	/* Wraparound */	\
566 		if (qp->q_sqhead == NULL) {				\
567 			qp->q_sqhead = qp->q_sqtail = mp;		\
568 		} else {						\
569 			qp->q_sqtail->b_next = mp;			\
570 			qp->q_sqtail = mp;				\
571 		}							\
572 		ASSERT(qp->q_syncqmsgs > 0);				\
573 	}
574 
575 #define	SQ_PUTCOUNT_SETFAST_LOCKED(sq) {				\
576 		ASSERT(MUTEX_HELD(SQLOCK(sq)));				\
577 		if ((sq)->sq_ciputctrl != NULL) {			\
578 			int i;						\
579 			int nlocks = (sq)->sq_nciputctrl;		\
580 			ciputctrl_t *cip = (sq)->sq_ciputctrl;		\
581 			ASSERT((sq)->sq_type & SQ_CIPUT);		\
582 			for (i = 0; i <= nlocks; i++) {			\
583 				ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \
584 				cip[i].ciputctrl_count |= SQ_FASTPUT;	\
585 			}						\
586 		}							\
587 	}
588 
589 
590 #define	SQ_PUTCOUNT_CLRFAST_LOCKED(sq) {				\
591 		ASSERT(MUTEX_HELD(SQLOCK(sq)));				\
592 		if ((sq)->sq_ciputctrl != NULL) {			\
593 			int i;						\
594 			int nlocks = (sq)->sq_nciputctrl;		\
595 			ciputctrl_t *cip = (sq)->sq_ciputctrl;		\
596 			ASSERT((sq)->sq_type & SQ_CIPUT);		\
597 			for (i = 0; i <= nlocks; i++) {			\
598 				ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \
599 				cip[i].ciputctrl_count &= ~SQ_FASTPUT;	\
600 			}						\
601 		}							\
602 	}
603 
604 /*
605  * Run service procedures for all queues in the stream head.
606  */
607 #define	STR_SERVICE(stp, q) {						\
608 	ASSERT(MUTEX_HELD(&stp->sd_qlock));				\
609 	while (stp->sd_qhead != NULL) {					\
610 		DQ(q, stp->sd_qhead, stp->sd_qtail, q_link);		\
611 		ASSERT(stp->sd_nqueues > 0);				\
612 		stp->sd_nqueues--;					\
613 		ASSERT(!(q->q_flag & QINSERVICE));			\
614 		mutex_exit(&stp->sd_qlock);				\
615 		queue_service(q);					\
616 		mutex_enter(&stp->sd_qlock);				\
617 	}								\
618 	ASSERT(stp->sd_nqueues == 0);					\
619 	ASSERT((stp->sd_qhead == NULL) && (stp->sd_qtail == NULL));	\
620 }
621 
622 /*
623  * constructor/destructor routines for the stream head cache
624  */
625 /* ARGSUSED */
626 static int
627 stream_head_constructor(void *buf, void *cdrarg, int kmflags)
628 {
629 	stdata_t *stp = buf;
630 
631 	mutex_init(&stp->sd_lock, NULL, MUTEX_DEFAULT, NULL);
632 	mutex_init(&stp->sd_reflock, NULL, MUTEX_DEFAULT, NULL);
633 	mutex_init(&stp->sd_qlock, NULL, MUTEX_DEFAULT, NULL);
634 	cv_init(&stp->sd_monitor, NULL, CV_DEFAULT, NULL);
635 	cv_init(&stp->sd_iocmonitor, NULL, CV_DEFAULT, NULL);
636 	cv_init(&stp->sd_qcv, NULL, CV_DEFAULT, NULL);
637 	cv_init(&stp->sd_zcopy_wait, NULL, CV_DEFAULT, NULL);
638 	stp->sd_wrq = NULL;
639 
640 	return (0);
641 }
642 
643 /* ARGSUSED */
644 static void
645 stream_head_destructor(void *buf, void *cdrarg)
646 {
647 	stdata_t *stp = buf;
648 
649 	mutex_destroy(&stp->sd_lock);
650 	mutex_destroy(&stp->sd_reflock);
651 	mutex_destroy(&stp->sd_qlock);
652 	cv_destroy(&stp->sd_monitor);
653 	cv_destroy(&stp->sd_iocmonitor);
654 	cv_destroy(&stp->sd_qcv);
655 	cv_destroy(&stp->sd_zcopy_wait);
656 }
657 
658 /*
659  * constructor/destructor routines for the queue cache
660  */
661 /* ARGSUSED */
662 static int
663 queue_constructor(void *buf, void *cdrarg, int kmflags)
664 {
665 	queinfo_t *qip = buf;
666 	queue_t *qp = &qip->qu_rqueue;
667 	queue_t *wqp = &qip->qu_wqueue;
668 	syncq_t	*sq = &qip->qu_syncq;
669 
670 	qp->q_first = NULL;
671 	qp->q_link = NULL;
672 	qp->q_count = 0;
673 	qp->q_mblkcnt = 0;
674 	qp->q_sqhead = NULL;
675 	qp->q_sqtail = NULL;
676 	qp->q_sqnext = NULL;
677 	qp->q_sqprev = NULL;
678 	qp->q_sqflags = 0;
679 	qp->q_rwcnt = 0;
680 	qp->q_spri = 0;
681 
682 	mutex_init(QLOCK(qp), NULL, MUTEX_DEFAULT, NULL);
683 	cv_init(&qp->q_wait, NULL, CV_DEFAULT, NULL);
684 
685 	wqp->q_first = NULL;
686 	wqp->q_link = NULL;
687 	wqp->q_count = 0;
688 	wqp->q_mblkcnt = 0;
689 	wqp->q_sqhead = NULL;
690 	wqp->q_sqtail = NULL;
691 	wqp->q_sqnext = NULL;
692 	wqp->q_sqprev = NULL;
693 	wqp->q_sqflags = 0;
694 	wqp->q_rwcnt = 0;
695 	wqp->q_spri = 0;
696 
697 	mutex_init(QLOCK(wqp), NULL, MUTEX_DEFAULT, NULL);
698 	cv_init(&wqp->q_wait, NULL, CV_DEFAULT, NULL);
699 
700 	sq->sq_head = NULL;
701 	sq->sq_tail = NULL;
702 	sq->sq_evhead = NULL;
703 	sq->sq_evtail = NULL;
704 	sq->sq_callbpend = NULL;
705 	sq->sq_outer = NULL;
706 	sq->sq_onext = NULL;
707 	sq->sq_oprev = NULL;
708 	sq->sq_next = NULL;
709 	sq->sq_svcflags = 0;
710 	sq->sq_servcount = 0;
711 	sq->sq_needexcl = 0;
712 	sq->sq_nqueues = 0;
713 	sq->sq_pri = 0;
714 
715 	mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL);
716 	cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL);
717 	cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL);
718 
719 	return (0);
720 }
721 
722 /* ARGSUSED */
723 static void
724 queue_destructor(void *buf, void *cdrarg)
725 {
726 	queinfo_t *qip = buf;
727 	queue_t *qp = &qip->qu_rqueue;
728 	queue_t *wqp = &qip->qu_wqueue;
729 	syncq_t	*sq = &qip->qu_syncq;
730 
731 	ASSERT(qp->q_sqhead == NULL);
732 	ASSERT(wqp->q_sqhead == NULL);
733 	ASSERT(qp->q_sqnext == NULL);
734 	ASSERT(wqp->q_sqnext == NULL);
735 	ASSERT(qp->q_rwcnt == 0);
736 	ASSERT(wqp->q_rwcnt == 0);
737 
738 	mutex_destroy(&qp->q_lock);
739 	cv_destroy(&qp->q_wait);
740 
741 	mutex_destroy(&wqp->q_lock);
742 	cv_destroy(&wqp->q_wait);
743 
744 	mutex_destroy(&sq->sq_lock);
745 	cv_destroy(&sq->sq_wait);
746 	cv_destroy(&sq->sq_exitwait);
747 }
748 
749 /*
750  * constructor/destructor routines for the syncq cache
751  */
752 /* ARGSUSED */
753 static int
754 syncq_constructor(void *buf, void *cdrarg, int kmflags)
755 {
756 	syncq_t	*sq = buf;
757 
758 	bzero(buf, sizeof (syncq_t));
759 
760 	mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL);
761 	cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL);
762 	cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL);
763 
764 	return (0);
765 }
766 
767 /* ARGSUSED */
768 static void
769 syncq_destructor(void *buf, void *cdrarg)
770 {
771 	syncq_t	*sq = buf;
772 
773 	ASSERT(sq->sq_head == NULL);
774 	ASSERT(sq->sq_tail == NULL);
775 	ASSERT(sq->sq_evhead == NULL);
776 	ASSERT(sq->sq_evtail == NULL);
777 	ASSERT(sq->sq_callbpend == NULL);
778 	ASSERT(sq->sq_callbflags == 0);
779 	ASSERT(sq->sq_outer == NULL);
780 	ASSERT(sq->sq_onext == NULL);
781 	ASSERT(sq->sq_oprev == NULL);
782 	ASSERT(sq->sq_next == NULL);
783 	ASSERT(sq->sq_needexcl == 0);
784 	ASSERT(sq->sq_svcflags == 0);
785 	ASSERT(sq->sq_servcount == 0);
786 	ASSERT(sq->sq_nqueues == 0);
787 	ASSERT(sq->sq_pri == 0);
788 	ASSERT(sq->sq_count == 0);
789 	ASSERT(sq->sq_rmqcount == 0);
790 	ASSERT(sq->sq_cancelid == 0);
791 	ASSERT(sq->sq_ciputctrl == NULL);
792 	ASSERT(sq->sq_nciputctrl == 0);
793 	ASSERT(sq->sq_type == 0);
794 	ASSERT(sq->sq_flags == 0);
795 
796 	mutex_destroy(&sq->sq_lock);
797 	cv_destroy(&sq->sq_wait);
798 	cv_destroy(&sq->sq_exitwait);
799 }
800 
801 /* ARGSUSED */
802 static int
803 ciputctrl_constructor(void *buf, void *cdrarg, int kmflags)
804 {
805 	ciputctrl_t *cip = buf;
806 	int i;
807 
808 	for (i = 0; i < n_ciputctrl; i++) {
809 		cip[i].ciputctrl_count = SQ_FASTPUT;
810 		mutex_init(&cip[i].ciputctrl_lock, NULL, MUTEX_DEFAULT, NULL);
811 	}
812 
813 	return (0);
814 }
815 
816 /* ARGSUSED */
817 static void
818 ciputctrl_destructor(void *buf, void *cdrarg)
819 {
820 	ciputctrl_t *cip = buf;
821 	int i;
822 
823 	for (i = 0; i < n_ciputctrl; i++) {
824 		ASSERT(cip[i].ciputctrl_count & SQ_FASTPUT);
825 		mutex_destroy(&cip[i].ciputctrl_lock);
826 	}
827 }
828 
829 /*
830  * Init routine run from main at boot time.
831  */
832 void
833 strinit(void)
834 {
835 	int i;
836 	int ncpus = ((boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus);
837 
838 	/*
839 	 * Set up mux_node structures.
840 	 */
841 	mux_nodes = kmem_zalloc((sizeof (struct mux_node) * devcnt), KM_SLEEP);
842 	for (i = 0; i < devcnt; i++)
843 		mux_nodes[i].mn_imaj = i;
844 
845 	stream_head_cache = kmem_cache_create("stream_head_cache",
846 		sizeof (stdata_t), 0,
847 		stream_head_constructor, stream_head_destructor, NULL,
848 		NULL, NULL, 0);
849 
850 	queue_cache = kmem_cache_create("queue_cache", sizeof (queinfo_t), 0,
851 		queue_constructor, queue_destructor, NULL, NULL, NULL, 0);
852 
853 	syncq_cache = kmem_cache_create("syncq_cache", sizeof (syncq_t), 0,
854 		syncq_constructor, syncq_destructor, NULL, NULL, NULL, 0);
855 
856 	qband_cache = kmem_cache_create("qband_cache",
857 		sizeof (qband_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
858 
859 	linkinfo_cache = kmem_cache_create("linkinfo_cache",
860 		sizeof (linkinfo_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
861 
862 	n_ciputctrl = ncpus;
863 	n_ciputctrl = 1 << highbit(n_ciputctrl - 1);
864 	ASSERT(n_ciputctrl >= 1);
865 	n_ciputctrl = MIN(n_ciputctrl, max_n_ciputctrl);
866 	if (n_ciputctrl >= min_n_ciputctrl) {
867 		ciputctrl_cache = kmem_cache_create("ciputctrl_cache",
868 			sizeof (ciputctrl_t) * n_ciputctrl,
869 			sizeof (ciputctrl_t), ciputctrl_constructor,
870 			ciputctrl_destructor, NULL, NULL, NULL, 0);
871 	}
872 
873 	streams_taskq = system_taskq;
874 
875 	if (streams_taskq == NULL)
876 		panic("strinit: no memory for streams taskq!");
877 
878 	bc_bkgrnd_thread = thread_create(NULL, 0,
879 	    streams_bufcall_service, NULL, 0, &p0, TS_RUN, streams_lopri);
880 
881 	streams_qbkgrnd_thread = thread_create(NULL, 0,
882 	    streams_qbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri);
883 
884 	streams_sqbkgrnd_thread = thread_create(NULL, 0,
885 	    streams_sqbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri);
886 
887 	/*
888 	 * Create STREAMS kstats.
889 	 */
890 	str_kstat = kstat_create("streams", 0, "strstat",
891 	    "net", KSTAT_TYPE_NAMED,
892 	    sizeof (str_statistics) / sizeof (kstat_named_t),
893 	    KSTAT_FLAG_VIRTUAL);
894 
895 	if (str_kstat != NULL) {
896 		str_kstat->ks_data = &str_statistics;
897 		kstat_install(str_kstat);
898 	}
899 
900 	/*
901 	 * TPI support routine initialisation.
902 	 */
903 	tpi_init();
904 }
905 
906 void
907 str_sendsig(vnode_t *vp, int event, uchar_t band, int error)
908 {
909 	struct stdata *stp;
910 
911 	ASSERT(vp->v_stream);
912 	stp = vp->v_stream;
913 	/* Have to hold sd_lock to prevent siglist from changing */
914 	mutex_enter(&stp->sd_lock);
915 	if (stp->sd_sigflags & event)
916 		strsendsig(stp->sd_siglist, event, band, error);
917 	mutex_exit(&stp->sd_lock);
918 }
919 
920 /*
921  * Send the "sevent" set of signals to a process.
922  * This might send more than one signal if the process is registered
923  * for multiple events. The caller should pass in an sevent that only
924  * includes the events for which the process has registered.
925  */
926 static void
927 dosendsig(proc_t *proc, int events, int sevent, k_siginfo_t *info,
928 	uchar_t band, int error)
929 {
930 	ASSERT(MUTEX_HELD(&proc->p_lock));
931 
932 	info->si_band = 0;
933 	info->si_errno = 0;
934 
935 	if (sevent & S_ERROR) {
936 		sevent &= ~S_ERROR;
937 		info->si_code = POLL_ERR;
938 		info->si_errno = error;
939 		TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
940 			"strsendsig:proc %p info %p", proc, info);
941 		sigaddq(proc, NULL, info, KM_NOSLEEP);
942 		info->si_errno = 0;
943 	}
944 	if (sevent & S_HANGUP) {
945 		sevent &= ~S_HANGUP;
946 		info->si_code = POLL_HUP;
947 		TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
948 			"strsendsig:proc %p info %p", proc, info);
949 		sigaddq(proc, NULL, info, KM_NOSLEEP);
950 	}
951 	if (sevent & S_HIPRI) {
952 		sevent &= ~S_HIPRI;
953 		info->si_code = POLL_PRI;
954 		TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
955 			"strsendsig:proc %p info %p", proc, info);
956 		sigaddq(proc, NULL, info, KM_NOSLEEP);
957 	}
958 	if (sevent & S_RDBAND) {
959 		sevent &= ~S_RDBAND;
960 		if (events & S_BANDURG)
961 			sigtoproc(proc, NULL, SIGURG);
962 		else
963 			sigtoproc(proc, NULL, SIGPOLL);
964 	}
965 	if (sevent & S_WRBAND) {
966 		sevent &= ~S_WRBAND;
967 		sigtoproc(proc, NULL, SIGPOLL);
968 	}
969 	if (sevent & S_INPUT) {
970 		sevent &= ~S_INPUT;
971 		info->si_code = POLL_IN;
972 		info->si_band = band;
973 		TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
974 			"strsendsig:proc %p info %p", proc, info);
975 		sigaddq(proc, NULL, info, KM_NOSLEEP);
976 		info->si_band = 0;
977 	}
978 	if (sevent & S_OUTPUT) {
979 		sevent &= ~S_OUTPUT;
980 		info->si_code = POLL_OUT;
981 		info->si_band = band;
982 		TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
983 			"strsendsig:proc %p info %p", proc, info);
984 		sigaddq(proc, NULL, info, KM_NOSLEEP);
985 		info->si_band = 0;
986 	}
987 	if (sevent & S_MSG) {
988 		sevent &= ~S_MSG;
989 		info->si_code = POLL_MSG;
990 		info->si_band = band;
991 		TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
992 			"strsendsig:proc %p info %p", proc, info);
993 		sigaddq(proc, NULL, info, KM_NOSLEEP);
994 		info->si_band = 0;
995 	}
996 	if (sevent & S_RDNORM) {
997 		sevent &= ~S_RDNORM;
998 		sigtoproc(proc, NULL, SIGPOLL);
999 	}
1000 	if (sevent != 0) {
1001 		panic("strsendsig: unknown event(s) %x", sevent);
1002 	}
1003 }
1004 
1005 /*
1006  * Send SIGPOLL/SIGURG signal to all processes and process groups
1007  * registered on the given signal list that want a signal for at
1008  * least one of the specified events.
1009  *
1010  * Must be called with exclusive access to siglist (caller holding sd_lock).
1011  *
1012  * strioctl(I_SETSIG/I_ESETSIG) will only change siglist when holding
1013  * sd_lock and the ioctl code maintains a PID_HOLD on the pid structure
1014  * while it is in the siglist.
1015  *
1016  * For performance reasons (MP scalability) the code drops pidlock
1017  * when sending signals to a single process.
1018  * When sending to a process group the code holds
1019  * pidlock to prevent the membership in the process group from changing
1020  * while walking the p_pglink list.
1021  */
1022 void
1023 strsendsig(strsig_t *siglist, int event, uchar_t band, int error)
1024 {
1025 	strsig_t *ssp;
1026 	k_siginfo_t info;
1027 	struct pid *pidp;
1028 	proc_t  *proc;
1029 
1030 	info.si_signo = SIGPOLL;
1031 	info.si_errno = 0;
1032 	for (ssp = siglist; ssp; ssp = ssp->ss_next) {
1033 		int sevent;
1034 
1035 		sevent = ssp->ss_events & event;
1036 		if (sevent == 0)
1037 			continue;
1038 
1039 		if ((pidp = ssp->ss_pidp) == NULL) {
1040 			/* pid was released but still on event list */
1041 			continue;
1042 		}
1043 
1044 
1045 		if (ssp->ss_pid > 0) {
1046 			/*
1047 			 * XXX This unfortunately still generates
1048 			 * a signal when a fd is closed but
1049 			 * the proc is active.
1050 			 */
1051 			ASSERT(ssp->ss_pid == pidp->pid_id);
1052 
1053 			mutex_enter(&pidlock);
1054 			proc = prfind_zone(pidp->pid_id, ALL_ZONES);
1055 			if (proc == NULL) {
1056 				mutex_exit(&pidlock);
1057 				continue;
1058 			}
1059 			mutex_enter(&proc->p_lock);
1060 			mutex_exit(&pidlock);
1061 			dosendsig(proc, ssp->ss_events, sevent, &info,
1062 				band, error);
1063 			mutex_exit(&proc->p_lock);
1064 		} else {
1065 			/*
1066 			 * Send to process group. Hold pidlock across
1067 			 * calls to dosendsig().
1068 			 */
1069 			pid_t pgrp = -ssp->ss_pid;
1070 
1071 			mutex_enter(&pidlock);
1072 			proc = pgfind_zone(pgrp, ALL_ZONES);
1073 			while (proc != NULL) {
1074 				mutex_enter(&proc->p_lock);
1075 				dosendsig(proc, ssp->ss_events, sevent,
1076 					&info, band, error);
1077 				mutex_exit(&proc->p_lock);
1078 				proc = proc->p_pglink;
1079 			}
1080 			mutex_exit(&pidlock);
1081 		}
1082 	}
1083 }
1084 
1085 /*
1086  * Attach a stream device or module.
1087  * qp is a read queue; the new queue goes in so its next
1088  * read ptr is the argument, and the write queue corresponding
1089  * to the argument points to this queue. Return 0 on success,
1090  * or a non-zero errno on failure.
1091  */
1092 int
1093 qattach(queue_t *qp, dev_t *devp, int oflag, cred_t *crp, fmodsw_impl_t *fp,
1094     boolean_t is_insert)
1095 {
1096 	major_t			major;
1097 	cdevsw_impl_t		*dp;
1098 	struct streamtab	*str;
1099 	queue_t			*rq;
1100 	queue_t			*wrq;
1101 	uint32_t		qflag;
1102 	uint32_t		sqtype;
1103 	perdm_t			*dmp;
1104 	int			error;
1105 	int			sflag;
1106 
1107 	rq = allocq();
1108 	wrq = _WR(rq);
1109 	STREAM(rq) = STREAM(wrq) = STREAM(qp);
1110 
1111 	if (fp != NULL) {
1112 		str = fp->f_str;
1113 		qflag = fp->f_qflag;
1114 		sqtype = fp->f_sqtype;
1115 		dmp = fp->f_dmp;
1116 		IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL);
1117 		sflag = MODOPEN;
1118 
1119 		/*
1120 		 * stash away a pointer to the module structure so we can
1121 		 * unref it in qdetach.
1122 		 */
1123 		rq->q_fp = fp;
1124 	} else {
1125 		ASSERT(!is_insert);
1126 
1127 		major = getmajor(*devp);
1128 		dp = &devimpl[major];
1129 
1130 		str = dp->d_str;
1131 		ASSERT(str == STREAMSTAB(major));
1132 
1133 		qflag = dp->d_qflag;
1134 		ASSERT(qflag & QISDRV);
1135 		sqtype = dp->d_sqtype;
1136 
1137 		/* create perdm_t if needed */
1138 		if (NEED_DM(dp->d_dmp, qflag))
1139 			dp->d_dmp = hold_dm(str, qflag, sqtype);
1140 
1141 		dmp = dp->d_dmp;
1142 		sflag = 0;
1143 	}
1144 
1145 	TRACE_2(TR_FAC_STREAMS_FR, TR_QATTACH_FLAGS,
1146 	    "qattach:qflag == %X(%X)", qflag, *devp);
1147 
1148 	/* setq might sleep in allocator - avoid holding locks. */
1149 	setq(rq, str->st_rdinit, str->st_wrinit, dmp, qflag, sqtype, B_FALSE);
1150 
1151 	/*
1152 	 * Before calling the module's open routine, set up the q_next
1153 	 * pointer for inserting a module in the middle of a stream.
1154 	 *
1155 	 * Note that we can always set _QINSERTING and set up q_next
1156 	 * pointer for both inserting and pushing a module.  Then there
1157 	 * is no need for the is_insert parameter.  In insertq(), called
1158 	 * by qprocson(), assume that q_next of the new module always points
1159 	 * to the correct queue and use it for insertion.  Everything should
1160 	 * work out fine.  But in the first release of _I_INSERT, we
1161 	 * distinguish between inserting and pushing to make sure that
1162 	 * pushing a module follows the same code path as before.
1163 	 */
1164 	if (is_insert) {
1165 		rq->q_flag |= _QINSERTING;
1166 		rq->q_next = qp;
1167 	}
1168 
1169 	/*
1170 	 * If there is an outer perimeter get exclusive access during
1171 	 * the open procedure.  Bump up the reference count on the queue.
1172 	 */
1173 	entersq(rq->q_syncq, SQ_OPENCLOSE);
1174 	error = (*rq->q_qinfo->qi_qopen)(rq, devp, oflag, sflag, crp);
1175 	if (error != 0)
1176 		goto failed;
1177 	leavesq(rq->q_syncq, SQ_OPENCLOSE);
1178 	ASSERT(qprocsareon(rq));
1179 	return (0);
1180 
1181 failed:
1182 	rq->q_flag &= ~_QINSERTING;
1183 	if (backq(wrq) != NULL && backq(wrq)->q_next == wrq)
1184 		qprocsoff(rq);
1185 	leavesq(rq->q_syncq, SQ_OPENCLOSE);
1186 	rq->q_next = wrq->q_next = NULL;
1187 	qdetach(rq, 0, 0, crp, B_FALSE);
1188 	return (error);
1189 }
1190 
1191 /*
1192  * Handle second open of stream. For modules, set the
1193  * last argument to MODOPEN and do not pass any open flags.
1194  * Ignore dummydev since this is not the first open.
1195  */
1196 int
1197 qreopen(queue_t *qp, dev_t *devp, int flag, cred_t *crp)
1198 {
1199 	int	error;
1200 	dev_t dummydev;
1201 	queue_t *wqp = _WR(qp);
1202 
1203 	ASSERT(qp->q_flag & QREADR);
1204 	entersq(qp->q_syncq, SQ_OPENCLOSE);
1205 
1206 	dummydev = *devp;
1207 	if (error = ((*qp->q_qinfo->qi_qopen)(qp, &dummydev,
1208 	    (wqp->q_next ? 0 : flag), (wqp->q_next ? MODOPEN : 0), crp))) {
1209 		leavesq(qp->q_syncq, SQ_OPENCLOSE);
1210 		mutex_enter(&STREAM(qp)->sd_lock);
1211 		qp->q_stream->sd_flag |= STREOPENFAIL;
1212 		mutex_exit(&STREAM(qp)->sd_lock);
1213 		return (error);
1214 	}
1215 	leavesq(qp->q_syncq, SQ_OPENCLOSE);
1216 
1217 	/*
1218 	 * successful open should have done qprocson()
1219 	 */
1220 	ASSERT(qprocsareon(_RD(qp)));
1221 	return (0);
1222 }
1223 
1224 /*
1225  * Detach a stream module or device.
1226  * If clmode == 1 then the module or driver was opened and its
1227  * close routine must be called. If clmode == 0, the module
1228  * or driver was never opened or the open failed, and so its close
1229  * should not be called.
1230  */
1231 void
1232 qdetach(queue_t *qp, int clmode, int flag, cred_t *crp, boolean_t is_remove)
1233 {
1234 	queue_t *wqp = _WR(qp);
1235 	ASSERT(STREAM(qp)->sd_flag & (STRCLOSE|STWOPEN|STRPLUMB));
1236 
1237 	if (STREAM_NEEDSERVICE(STREAM(qp)))
1238 		stream_runservice(STREAM(qp));
1239 
1240 	if (clmode) {
1241 		/*
1242 		 * Make sure that all the messages on the write side syncq are
1243 		 * processed and nothing is left. Since we are closing, no new
1244 		 * messages may appear there.
1245 		 */
1246 		wait_q_syncq(wqp);
1247 
1248 		entersq(qp->q_syncq, SQ_OPENCLOSE);
1249 		if (is_remove) {
1250 			mutex_enter(QLOCK(qp));
1251 			qp->q_flag |= _QREMOVING;
1252 			mutex_exit(QLOCK(qp));
1253 		}
1254 		(*qp->q_qinfo->qi_qclose)(qp, flag, crp);
1255 		/*
1256 		 * Check that qprocsoff() was actually called.
1257 		 */
1258 		ASSERT((qp->q_flag & QWCLOSE) && (wqp->q_flag & QWCLOSE));
1259 
1260 		leavesq(qp->q_syncq, SQ_OPENCLOSE);
1261 	} else {
1262 		disable_svc(qp);
1263 	}
1264 
1265 	/*
1266 	 * Allow any threads blocked in entersq to proceed and discover
1267 	 * the QWCLOSE is set.
1268 	 * Note: This assumes that all users of entersq check QWCLOSE.
1269 	 * Currently runservice is the only entersq that can happen
1270 	 * after removeq has finished.
1271 	 * Removeq will have discarded all messages destined to the closing
1272 	 * pair of queues from the syncq.
1273 	 * NOTE: Calling a function inside an assert is unconventional.
1274 	 * However, it does not cause any problem since flush_syncq() does
1275 	 * not change any state except when it returns non-zero i.e.
1276 	 * when the assert will trigger.
1277 	 */
1278 	ASSERT(flush_syncq(qp->q_syncq, qp) == 0);
1279 	ASSERT(flush_syncq(wqp->q_syncq, wqp) == 0);
1280 	ASSERT((qp->q_flag & QPERMOD) ||
1281 		((qp->q_syncq->sq_head == NULL) &&
1282 		(wqp->q_syncq->sq_head == NULL)));
1283 
1284 	/*
1285 	 * Flush the queues before q_next is set to NULL. This is needed
1286 	 * in order to backenable any downstream queue before we go away.
1287 	 * Note: we are already removed from the stream so that the
1288 	 * backenabling will not cause any messages to be delivered to our
1289 	 * put procedures.
1290 	 */
1291 	flushq(qp, FLUSHALL);
1292 	flushq(wqp, FLUSHALL);
1293 
1294 	/*
1295 	 * wait for any pending service processing to complete
1296 	 */
1297 	wait_svc(qp);
1298 
1299 	/* Tidy up - removeq only does a half-remove from stream */
1300 	qp->q_next = wqp->q_next = NULL;
1301 	ASSERT(!(qp->q_flag & QENAB));
1302 	ASSERT(!(wqp->q_flag & QENAB));
1303 
1304 	/* release any fmodsw_impl_t structure held on behalf of the queue */
1305 
1306 	ASSERT(qp->q_fp != NULL || qp->q_flag & QISDRV);
1307 	if (qp->q_fp != NULL)
1308 		fmodsw_rele(qp->q_fp);
1309 
1310 	/* freeq removes us from the outer perimeter if any */
1311 	freeq(qp);
1312 }
1313 
1314 /* Prevent service procedures from being called */
1315 void
1316 disable_svc(queue_t *qp)
1317 {
1318 	queue_t *wqp = _WR(qp);
1319 
1320 	ASSERT(qp->q_flag & QREADR);
1321 	mutex_enter(QLOCK(qp));
1322 	qp->q_flag |= QWCLOSE;
1323 	mutex_exit(QLOCK(qp));
1324 	mutex_enter(QLOCK(wqp));
1325 	wqp->q_flag |= QWCLOSE;
1326 	mutex_exit(QLOCK(wqp));
1327 }
1328 
1329 /* allow service procedures to be called again */
1330 void
1331 enable_svc(queue_t *qp)
1332 {
1333 	queue_t *wqp = _WR(qp);
1334 
1335 	ASSERT(qp->q_flag & QREADR);
1336 	mutex_enter(QLOCK(qp));
1337 	qp->q_flag &= ~QWCLOSE;
1338 	mutex_exit(QLOCK(qp));
1339 	mutex_enter(QLOCK(wqp));
1340 	wqp->q_flag &= ~QWCLOSE;
1341 	mutex_exit(QLOCK(wqp));
1342 }
1343 
1344 /*
1345  * Remove queue from qhead/qtail if it is enabled.
1346  * Only reset QENAB if the queue was removed from the runlist.
1347  * A queue goes through 3 stages:
1348  *	It is on the service list and QENAB is set.
1349  *	It is removed from the service list but QENAB is still set.
1350  *	QENAB gets changed to QINSERVICE.
1351  *	QINSERVICE is reset (when the service procedure is done)
1352  * Thus we can not reset QENAB unless we actually removed it from the service
1353  * queue.
1354  */
1355 void
1356 remove_runlist(queue_t *qp)
1357 {
1358 	if (qp->q_flag & QENAB && qhead != NULL) {
1359 		queue_t *q_chase;
1360 		queue_t *q_curr;
1361 		int removed;
1362 
1363 		mutex_enter(&service_queue);
1364 		RMQ(qp, qhead, qtail, q_link, q_chase, q_curr, removed);
1365 		mutex_exit(&service_queue);
1366 		if (removed) {
1367 			STRSTAT(qremoved);
1368 			qp->q_flag &= ~QENAB;
1369 		}
1370 	}
1371 }
1372 
1373 
1374 /*
1375  * wait for any pending service processing to complete.
1376  * The removal of queues from the runlist is not atomic with the
1377  * clearing of the QENABLED flag and setting the INSERVICE flag.
1378  * consequently it is possible for remove_runlist in strclose
1379  * to not find the queue on the runlist but for it to be QENABLED
1380  * and not yet INSERVICE -> hence wait_svc needs to check QENABLED
1381  * as well as INSERVICE.
1382  */
1383 void
1384 wait_svc(queue_t *qp)
1385 {
1386 	queue_t *wqp = _WR(qp);
1387 
1388 	ASSERT(qp->q_flag & QREADR);
1389 
1390 	/*
1391 	 * Try to remove queues from qhead/qtail list.
1392 	 */
1393 	if (qhead != NULL) {
1394 		remove_runlist(qp);
1395 		remove_runlist(wqp);
1396 	}
1397 	/*
1398 	 * Wait till the syncqs associated with the queue
1399 	 * will dissapear from background processing list.
1400 	 * This only needs to be done for non-PERMOD perimeters since
1401 	 * for PERMOD perimeters the syncq may be shared and will only be freed
1402 	 * when the last module/driver is unloaded.
1403 	 * If for PERMOD perimeters queue was on the syncq list, removeq()
1404 	 * should call propagate_syncq() or drain_syncq() for it. Both of these
1405 	 * function remove the queue from its syncq list, so sqthread will not
1406 	 * try to access the queue.
1407 	 */
1408 	if (!(qp->q_flag & QPERMOD)) {
1409 		syncq_t *rsq = qp->q_syncq;
1410 		syncq_t *wsq = wqp->q_syncq;
1411 
1412 		/*
1413 		 * Disable rsq and wsq and wait for any background processing of
1414 		 * syncq to complete.
1415 		 */
1416 		wait_sq_svc(rsq);
1417 		if (wsq != rsq)
1418 			wait_sq_svc(wsq);
1419 	}
1420 
1421 	mutex_enter(QLOCK(qp));
1422 	while (qp->q_flag & (QINSERVICE|QENAB))
1423 		cv_wait(&qp->q_wait, QLOCK(qp));
1424 	mutex_exit(QLOCK(qp));
1425 	mutex_enter(QLOCK(wqp));
1426 	while (wqp->q_flag & (QINSERVICE|QENAB))
1427 		cv_wait(&wqp->q_wait, QLOCK(wqp));
1428 	mutex_exit(QLOCK(wqp));
1429 }
1430 
1431 /*
1432  * Put ioctl data from userland buffer `arg' into the mblk chain `bp'.
1433  * `flag' must always contain either K_TO_K or U_TO_K; STR_NOSIG may
1434  * also be set, and is passed through to allocb_cred_wait().
1435  *
1436  * Returns errno on failure, zero on success.
1437  */
1438 int
1439 putiocd(mblk_t *bp, char *arg, int flag, cred_t *cr)
1440 {
1441 	mblk_t *tmp;
1442 	ssize_t  count;
1443 	size_t n;
1444 	int error = 0;
1445 
1446 	ASSERT((flag & (U_TO_K | K_TO_K)) == U_TO_K ||
1447 		(flag & (U_TO_K | K_TO_K)) == K_TO_K);
1448 
1449 	if (bp->b_datap->db_type == M_IOCTL) {
1450 		count = ((struct iocblk *)bp->b_rptr)->ioc_count;
1451 	} else {
1452 		ASSERT(bp->b_datap->db_type == M_COPYIN);
1453 		count = ((struct copyreq *)bp->b_rptr)->cq_size;
1454 	}
1455 	/*
1456 	 * strdoioctl validates ioc_count, so if this assert fails it
1457 	 * cannot be due to user error.
1458 	 */
1459 	ASSERT(count >= 0);
1460 
1461 	while (count > 0) {
1462 		n = MIN(MAXIOCBSZ, count);
1463 		if ((tmp = allocb_cred_wait(n, (flag & STR_NOSIG), &error,
1464 		    cr)) == NULL) {
1465 			return (error);
1466 		}
1467 		error = strcopyin(arg, tmp->b_wptr, n, flag & (U_TO_K|K_TO_K));
1468 		if (error != 0) {
1469 			freeb(tmp);
1470 			return (error);
1471 		}
1472 		arg += n;
1473 		DB_CPID(tmp) = curproc->p_pid;
1474 		tmp->b_wptr += n;
1475 		count -= n;
1476 		bp = (bp->b_cont = tmp);
1477 	}
1478 
1479 	return (0);
1480 }
1481 
1482 /*
1483  * Copy ioctl data to user-land. Return non-zero errno on failure,
1484  * 0 for success.
1485  */
1486 int
1487 getiocd(mblk_t *bp, char *arg, int copymode)
1488 {
1489 	ssize_t count;
1490 	size_t  n;
1491 	int	error;
1492 
1493 	if (bp->b_datap->db_type == M_IOCACK)
1494 		count = ((struct iocblk *)bp->b_rptr)->ioc_count;
1495 	else {
1496 		ASSERT(bp->b_datap->db_type == M_COPYOUT);
1497 		count = ((struct copyreq *)bp->b_rptr)->cq_size;
1498 	}
1499 	ASSERT(count >= 0);
1500 
1501 	for (bp = bp->b_cont; bp && count;
1502 	    count -= n, bp = bp->b_cont, arg += n) {
1503 		n = MIN(count, bp->b_wptr - bp->b_rptr);
1504 		error = strcopyout(bp->b_rptr, arg, n, copymode);
1505 		if (error)
1506 			return (error);
1507 	}
1508 	ASSERT(count == 0);
1509 	return (0);
1510 }
1511 
1512 /*
1513  * Allocate a linkinfo entry given the write queue of the
1514  * bottom module of the top stream and the write queue of the
1515  * stream head of the bottom stream.
1516  */
1517 linkinfo_t *
1518 alloclink(queue_t *qup, queue_t *qdown, file_t *fpdown)
1519 {
1520 	linkinfo_t *linkp;
1521 
1522 	linkp = kmem_cache_alloc(linkinfo_cache, KM_SLEEP);
1523 
1524 	linkp->li_lblk.l_qtop = qup;
1525 	linkp->li_lblk.l_qbot = qdown;
1526 	linkp->li_fpdown = fpdown;
1527 
1528 	mutex_enter(&strresources);
1529 	linkp->li_next = linkinfo_list;
1530 	linkp->li_prev = NULL;
1531 	if (linkp->li_next)
1532 		linkp->li_next->li_prev = linkp;
1533 	linkinfo_list = linkp;
1534 	linkp->li_lblk.l_index = ++lnk_id;
1535 	ASSERT(lnk_id != 0);	/* this should never wrap in practice */
1536 	mutex_exit(&strresources);
1537 
1538 	return (linkp);
1539 }
1540 
1541 /*
1542  * Free a linkinfo entry.
1543  */
1544 void
1545 lbfree(linkinfo_t *linkp)
1546 {
1547 	mutex_enter(&strresources);
1548 	if (linkp->li_next)
1549 		linkp->li_next->li_prev = linkp->li_prev;
1550 	if (linkp->li_prev)
1551 		linkp->li_prev->li_next = linkp->li_next;
1552 	else
1553 		linkinfo_list = linkp->li_next;
1554 	mutex_exit(&strresources);
1555 
1556 	kmem_cache_free(linkinfo_cache, linkp);
1557 }
1558 
1559 /*
1560  * Check for a potential linking cycle.
1561  * Return 1 if a link will result in a cycle,
1562  * and 0 otherwise.
1563  */
1564 int
1565 linkcycle(stdata_t *upstp, stdata_t *lostp)
1566 {
1567 	struct mux_node *np;
1568 	struct mux_edge *ep;
1569 	int i;
1570 	major_t lomaj;
1571 	major_t upmaj;
1572 	/*
1573 	 * if the lower stream is a pipe/FIFO, return, since link
1574 	 * cycles can not happen on pipes/FIFOs
1575 	 */
1576 	if (lostp->sd_vnode->v_type == VFIFO)
1577 		return (0);
1578 
1579 	for (i = 0; i < devcnt; i++) {
1580 		np = &mux_nodes[i];
1581 		MUX_CLEAR(np);
1582 	}
1583 	lomaj = getmajor(lostp->sd_vnode->v_rdev);
1584 	upmaj = getmajor(upstp->sd_vnode->v_rdev);
1585 	np = &mux_nodes[lomaj];
1586 	for (;;) {
1587 		if (!MUX_DIDVISIT(np)) {
1588 			if (np->mn_imaj == upmaj)
1589 				return (1);
1590 			if (np->mn_outp == NULL) {
1591 				MUX_VISIT(np);
1592 				if (np->mn_originp == NULL)
1593 					return (0);
1594 				np = np->mn_originp;
1595 				continue;
1596 			}
1597 			MUX_VISIT(np);
1598 			np->mn_startp = np->mn_outp;
1599 		} else {
1600 			if (np->mn_startp == NULL) {
1601 				if (np->mn_originp == NULL)
1602 					return (0);
1603 				else {
1604 					np = np->mn_originp;
1605 					continue;
1606 				}
1607 			}
1608 			/*
1609 			 * If ep->me_nodep is a FIFO (me_nodep == NULL),
1610 			 * ignore the edge and move on. ep->me_nodep gets
1611 			 * set to NULL in mux_addedge() if it is a FIFO.
1612 			 *
1613 			 */
1614 			ep = np->mn_startp;
1615 			np->mn_startp = ep->me_nextp;
1616 			if (ep->me_nodep == NULL)
1617 				continue;
1618 			ep->me_nodep->mn_originp = np;
1619 			np = ep->me_nodep;
1620 		}
1621 	}
1622 }
1623 
1624 /*
1625  * Find linkinfo entry corresponding to the parameters.
1626  */
1627 linkinfo_t *
1628 findlinks(stdata_t *stp, int index, int type)
1629 {
1630 	linkinfo_t *linkp;
1631 	struct mux_edge *mep;
1632 	struct mux_node *mnp;
1633 	queue_t *qup;
1634 
1635 	mutex_enter(&strresources);
1636 	if ((type & LINKTYPEMASK) == LINKNORMAL) {
1637 		qup = getendq(stp->sd_wrq);
1638 		for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) {
1639 			if ((qup == linkp->li_lblk.l_qtop) &&
1640 			    (!index || (index == linkp->li_lblk.l_index))) {
1641 				mutex_exit(&strresources);
1642 				return (linkp);
1643 			}
1644 		}
1645 	} else {
1646 		ASSERT((type & LINKTYPEMASK) == LINKPERSIST);
1647 		mnp = &mux_nodes[getmajor(stp->sd_vnode->v_rdev)];
1648 		mep = mnp->mn_outp;
1649 		while (mep) {
1650 			if ((index == 0) || (index == mep->me_muxid))
1651 				break;
1652 			mep = mep->me_nextp;
1653 		}
1654 		if (!mep) {
1655 			mutex_exit(&strresources);
1656 			return (NULL);
1657 		}
1658 		for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) {
1659 			if ((!linkp->li_lblk.l_qtop) &&
1660 			    (mep->me_muxid == linkp->li_lblk.l_index)) {
1661 				mutex_exit(&strresources);
1662 				return (linkp);
1663 			}
1664 		}
1665 	}
1666 	mutex_exit(&strresources);
1667 	return (NULL);
1668 }
1669 
1670 /*
1671  * Given a queue ptr, follow the chain of q_next pointers until you reach the
1672  * last queue on the chain and return it.
1673  */
1674 queue_t *
1675 getendq(queue_t *q)
1676 {
1677 	ASSERT(q != NULL);
1678 	while (_SAMESTR(q))
1679 		q = q->q_next;
1680 	return (q);
1681 }
1682 
1683 /*
1684  * wait for the syncq count to drop to zero.
1685  * sq could be either outer or inner.
1686  */
1687 
1688 static void
1689 wait_syncq(syncq_t *sq)
1690 {
1691 	uint16_t count;
1692 
1693 	mutex_enter(SQLOCK(sq));
1694 	count = sq->sq_count;
1695 	SQ_PUTLOCKS_ENTER(sq);
1696 	SUM_SQ_PUTCOUNTS(sq, count);
1697 	while (count != 0) {
1698 		sq->sq_flags |= SQ_WANTWAKEUP;
1699 		SQ_PUTLOCKS_EXIT(sq);
1700 		cv_wait(&sq->sq_wait, SQLOCK(sq));
1701 		count = sq->sq_count;
1702 		SQ_PUTLOCKS_ENTER(sq);
1703 		SUM_SQ_PUTCOUNTS(sq, count);
1704 	}
1705 	SQ_PUTLOCKS_EXIT(sq);
1706 	mutex_exit(SQLOCK(sq));
1707 }
1708 
1709 /*
1710  * Wait while there are any messages for the queue in its syncq.
1711  */
1712 static void
1713 wait_q_syncq(queue_t *q)
1714 {
1715 	if ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) {
1716 		syncq_t *sq = q->q_syncq;
1717 
1718 		mutex_enter(SQLOCK(sq));
1719 		while ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) {
1720 			sq->sq_flags |= SQ_WANTWAKEUP;
1721 			cv_wait(&sq->sq_wait, SQLOCK(sq));
1722 		}
1723 		mutex_exit(SQLOCK(sq));
1724 	}
1725 }
1726 
1727 
1728 int
1729 mlink_file(vnode_t *vp, int cmd, struct file *fpdown, cred_t *crp, int *rvalp,
1730     int lhlink)
1731 {
1732 	struct stdata *stp;
1733 	struct strioctl strioc;
1734 	struct linkinfo *linkp;
1735 	struct stdata *stpdown;
1736 	struct streamtab *str;
1737 	queue_t *passq;
1738 	syncq_t *passyncq;
1739 	queue_t *rq;
1740 	cdevsw_impl_t *dp;
1741 	uint32_t qflag;
1742 	uint32_t sqtype;
1743 	perdm_t *dmp;
1744 	int error = 0;
1745 
1746 	stp = vp->v_stream;
1747 	TRACE_1(TR_FAC_STREAMS_FR,
1748 		TR_I_LINK, "I_LINK/I_PLINK:stp %p", stp);
1749 	/*
1750 	 * Test for invalid upper stream
1751 	 */
1752 	if (stp->sd_flag & STRHUP) {
1753 		return (ENXIO);
1754 	}
1755 	if (vp->v_type == VFIFO) {
1756 		return (EINVAL);
1757 	}
1758 	if (stp->sd_strtab == NULL) {
1759 		return (EINVAL);
1760 	}
1761 	if (!stp->sd_strtab->st_muxwinit) {
1762 		return (EINVAL);
1763 	}
1764 	if (fpdown == NULL) {
1765 		return (EBADF);
1766 	}
1767 	if (getmajor(stp->sd_vnode->v_rdev) >= devcnt) {
1768 		return (EINVAL);
1769 	}
1770 	mutex_enter(&muxifier);
1771 	if (stp->sd_flag & STPLEX) {
1772 		mutex_exit(&muxifier);
1773 		return (ENXIO);
1774 	}
1775 
1776 	/*
1777 	 * Test for invalid lower stream.
1778 	 * The check for the v_type != VFIFO and having a major
1779 	 * number not >= devcnt is done to avoid problems with
1780 	 * adding mux_node entry past the end of mux_nodes[].
1781 	 * For FIFO's we don't add an entry so this isn't a
1782 	 * problem.
1783 	 */
1784 	if (((stpdown = fpdown->f_vnode->v_stream) == NULL) ||
1785 	    (stpdown == stp) || (stpdown->sd_flag &
1786 	    (STPLEX|STRHUP|STRDERR|STWRERR|IOCWAIT|STRPLUMB)) ||
1787 	    ((stpdown->sd_vnode->v_type != VFIFO) &&
1788 	    (getmajor(stpdown->sd_vnode->v_rdev) >= devcnt)) ||
1789 	    linkcycle(stp, stpdown)) {
1790 		mutex_exit(&muxifier);
1791 		return (EINVAL);
1792 	}
1793 	TRACE_1(TR_FAC_STREAMS_FR,
1794 		TR_STPDOWN, "stpdown:%p", stpdown);
1795 	rq = getendq(stp->sd_wrq);
1796 	if (cmd == I_PLINK)
1797 		rq = NULL;
1798 
1799 	linkp = alloclink(rq, stpdown->sd_wrq, fpdown);
1800 
1801 	strioc.ic_cmd = cmd;
1802 	strioc.ic_timout = INFTIM;
1803 	strioc.ic_len = sizeof (struct linkblk);
1804 	strioc.ic_dp = (char *)&linkp->li_lblk;
1805 
1806 	/*
1807 	 * STRPLUMB protects plumbing changes and should be set before
1808 	 * link_addpassthru()/link_rempassthru() are called, so it is set here
1809 	 * and cleared in the end of mlink when passthru queue is removed.
1810 	 * Setting of STRPLUMB prevents reopens of the stream while passthru
1811 	 * queue is in-place (it is not a proper module and doesn't have open
1812 	 * entry point).
1813 	 *
1814 	 * STPLEX prevents any threads from entering the stream from above. It
1815 	 * can't be set before the call to link_addpassthru() because putnext
1816 	 * from below may cause stream head I/O routines to be called and these
1817 	 * routines assert that STPLEX is not set. After link_addpassthru()
1818 	 * nothing may come from below since the pass queue syncq is blocked.
1819 	 * Note also that STPLEX should be cleared before the call to
1820 	 * link_remmpassthru() since when messages start flowing to the stream
1821 	 * head (e.g. because of message propagation from the pass queue) stream
1822 	 * head I/O routines may be called with STPLEX flag set.
1823 	 *
1824 	 * When STPLEX is set, nothing may come into the stream from above and
1825 	 * it is safe to do a setq which will change stream head. So, the
1826 	 * correct sequence of actions is:
1827 	 *
1828 	 * 1) Set STRPLUMB
1829 	 * 2) Call link_addpassthru()
1830 	 * 3) Set STPLEX
1831 	 * 4) Call setq and update the stream state
1832 	 * 5) Clear STPLEX
1833 	 * 6) Call link_rempassthru()
1834 	 * 7) Clear STRPLUMB
1835 	 *
1836 	 * The same sequence applies to munlink() code.
1837 	 */
1838 	mutex_enter(&stpdown->sd_lock);
1839 	stpdown->sd_flag |= STRPLUMB;
1840 	mutex_exit(&stpdown->sd_lock);
1841 	/*
1842 	 * Add passthru queue below lower mux. This will block
1843 	 * syncqs of lower muxs read queue during I_LINK/I_UNLINK.
1844 	 */
1845 	passq = link_addpassthru(stpdown);
1846 
1847 	mutex_enter(&stpdown->sd_lock);
1848 	stpdown->sd_flag |= STPLEX;
1849 	mutex_exit(&stpdown->sd_lock);
1850 
1851 	rq = _RD(stpdown->sd_wrq);
1852 	/*
1853 	 * There may be messages in the streamhead's syncq due to messages
1854 	 * that arrived before link_addpassthru() was done. To avoid
1855 	 * background processing of the syncq happening simultaneous with
1856 	 * setq processing, we disable the streamhead syncq and wait until
1857 	 * existing background thread finishes working on it.
1858 	 */
1859 	wait_sq_svc(rq->q_syncq);
1860 	passyncq = passq->q_syncq;
1861 	if (!(passyncq->sq_flags & SQ_BLOCKED))
1862 		blocksq(passyncq, SQ_BLOCKED, 0);
1863 
1864 	ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE);
1865 	ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq));
1866 	rq->q_ptr = _WR(rq)->q_ptr = NULL;
1867 
1868 	/* setq might sleep in allocator - avoid holding locks. */
1869 	/* Note: we are holding muxifier here. */
1870 
1871 	str = stp->sd_strtab;
1872 	dp = &devimpl[getmajor(vp->v_rdev)];
1873 	ASSERT(dp->d_str == str);
1874 
1875 	qflag = dp->d_qflag;
1876 	sqtype = dp->d_sqtype;
1877 
1878 	/* create perdm_t if needed */
1879 	if (NEED_DM(dp->d_dmp, qflag))
1880 		dp->d_dmp = hold_dm(str, qflag, sqtype);
1881 
1882 	dmp = dp->d_dmp;
1883 
1884 	setq(rq, str->st_muxrinit, str->st_muxwinit, dmp, qflag, sqtype,
1885 	    B_TRUE);
1886 
1887 	/*
1888 	 * XXX Remove any "odd" messages from the queue.
1889 	 * Keep only M_DATA, M_PROTO, M_PCPROTO.
1890 	 */
1891 	error = strdoioctl(stp, &strioc, FNATIVE,
1892 	    K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp);
1893 	if (error != 0) {
1894 		lbfree(linkp);
1895 
1896 		if (!(passyncq->sq_flags & SQ_BLOCKED))
1897 			blocksq(passyncq, SQ_BLOCKED, 0);
1898 		/*
1899 		 * Restore the stream head queue and then remove
1900 		 * the passq. Turn off STPLEX before we turn on
1901 		 * the stream by removing the passq.
1902 		 */
1903 		rq->q_ptr = _WR(rq)->q_ptr = stpdown;
1904 		setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO,
1905 		    B_TRUE);
1906 
1907 		mutex_enter(&stpdown->sd_lock);
1908 		stpdown->sd_flag &= ~STPLEX;
1909 		mutex_exit(&stpdown->sd_lock);
1910 
1911 		link_rempassthru(passq);
1912 
1913 		mutex_enter(&stpdown->sd_lock);
1914 		stpdown->sd_flag &= ~STRPLUMB;
1915 		/* Wakeup anyone waiting for STRPLUMB to clear. */
1916 		cv_broadcast(&stpdown->sd_monitor);
1917 		mutex_exit(&stpdown->sd_lock);
1918 
1919 		mutex_exit(&muxifier);
1920 		return (error);
1921 	}
1922 	mutex_enter(&fpdown->f_tlock);
1923 	fpdown->f_count++;
1924 	mutex_exit(&fpdown->f_tlock);
1925 
1926 	/*
1927 	 * if we've made it here the linkage is all set up so we should also
1928 	 * set up the layered driver linkages
1929 	 */
1930 
1931 	ASSERT((cmd == I_LINK) || (cmd == I_PLINK));
1932 	if (cmd == I_LINK) {
1933 		ldi_mlink_fp(stp, fpdown, lhlink, LINKNORMAL);
1934 	} else {
1935 		ldi_mlink_fp(stp, fpdown, lhlink, LINKPERSIST);
1936 	}
1937 
1938 	link_rempassthru(passq);
1939 
1940 	mux_addedge(stp, stpdown, linkp->li_lblk.l_index);
1941 
1942 	/*
1943 	 * Mark the upper stream as having dependent links
1944 	 * so that strclose can clean it up.
1945 	 */
1946 	if (cmd == I_LINK) {
1947 		mutex_enter(&stp->sd_lock);
1948 		stp->sd_flag |= STRHASLINKS;
1949 		mutex_exit(&stp->sd_lock);
1950 	}
1951 	/*
1952 	 * Wake up any other processes that may have been
1953 	 * waiting on the lower stream. These will all
1954 	 * error out.
1955 	 */
1956 	mutex_enter(&stpdown->sd_lock);
1957 	/* The passthru module is removed so we may release STRPLUMB */
1958 	stpdown->sd_flag &= ~STRPLUMB;
1959 	cv_broadcast(&rq->q_wait);
1960 	cv_broadcast(&_WR(rq)->q_wait);
1961 	cv_broadcast(&stpdown->sd_monitor);
1962 	mutex_exit(&stpdown->sd_lock);
1963 	mutex_exit(&muxifier);
1964 	*rvalp = linkp->li_lblk.l_index;
1965 	return (0);
1966 }
1967 
1968 int
1969 mlink(vnode_t *vp, int cmd, int arg, cred_t *crp, int *rvalp, int lhlink)
1970 {
1971 	int		ret;
1972 	struct file	*fpdown;
1973 
1974 	fpdown = getf(arg);
1975 	ret = mlink_file(vp, cmd, fpdown, crp, rvalp, lhlink);
1976 	if (fpdown != NULL)
1977 		releasef(arg);
1978 	return (ret);
1979 }
1980 
1981 /*
1982  * Unlink a multiplexor link. Stp is the controlling stream for the
1983  * link, and linkp points to the link's entry in the linkinfo list.
1984  * The muxifier lock must be held on entry and is dropped on exit.
1985  *
1986  * NOTE : Currently it is assumed that mux would process all the messages
1987  * sitting on it's queue before ACKing the UNLINK. It is the responsibility
1988  * of the mux to handle all the messages that arrive before UNLINK.
1989  * If the mux has to send down messages on its lower stream before
1990  * ACKing I_UNLINK, then it *should* know to handle messages even
1991  * after the UNLINK is acked (actually it should be able to handle till we
1992  * re-block the read side of the pass queue here). If the mux does not
1993  * open up the lower stream, any messages that arrive during UNLINK
1994  * will be put in the stream head. In the case of lower stream opening
1995  * up, some messages might land in the stream head depending on when
1996  * the message arrived and when the read side of the pass queue was
1997  * re-blocked.
1998  */
1999 int
2000 munlink(stdata_t *stp, linkinfo_t *linkp, int flag, cred_t *crp, int *rvalp)
2001 {
2002 	struct strioctl strioc;
2003 	struct stdata *stpdown;
2004 	queue_t *rq, *wrq;
2005 	queue_t	*passq;
2006 	syncq_t *passyncq;
2007 	int error = 0;
2008 	file_t *fpdown;
2009 
2010 	ASSERT(MUTEX_HELD(&muxifier));
2011 
2012 	stpdown = linkp->li_fpdown->f_vnode->v_stream;
2013 
2014 	/*
2015 	 * See the comment in mlink() concerning STRPLUMB/STPLEX flags.
2016 	 */
2017 	mutex_enter(&stpdown->sd_lock);
2018 	stpdown->sd_flag |= STRPLUMB;
2019 	mutex_exit(&stpdown->sd_lock);
2020 
2021 	/*
2022 	 * Add passthru queue below lower mux. This will block
2023 	 * syncqs of lower muxs read queue during I_LINK/I_UNLINK.
2024 	 */
2025 	passq = link_addpassthru(stpdown);
2026 
2027 	if ((flag & LINKTYPEMASK) == LINKNORMAL)
2028 		strioc.ic_cmd = I_UNLINK;
2029 	else
2030 		strioc.ic_cmd = I_PUNLINK;
2031 	strioc.ic_timout = INFTIM;
2032 	strioc.ic_len = sizeof (struct linkblk);
2033 	strioc.ic_dp = (char *)&linkp->li_lblk;
2034 
2035 	error = strdoioctl(stp, &strioc, FNATIVE,
2036 	    K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp);
2037 
2038 	/*
2039 	 * If there was an error and this is not called via strclose,
2040 	 * return to the user. Otherwise, pretend there was no error
2041 	 * and close the link.
2042 	 */
2043 	if (error) {
2044 		if (flag & LINKCLOSE) {
2045 			cmn_err(CE_WARN, "KERNEL: munlink: could not perform "
2046 			    "unlink ioctl, closing anyway (%d)\n", error);
2047 		} else {
2048 			link_rempassthru(passq);
2049 			mutex_enter(&stpdown->sd_lock);
2050 			stpdown->sd_flag &= ~STRPLUMB;
2051 			cv_broadcast(&stpdown->sd_monitor);
2052 			mutex_exit(&stpdown->sd_lock);
2053 			mutex_exit(&muxifier);
2054 			return (error);
2055 		}
2056 	}
2057 
2058 	mux_rmvedge(stp, linkp->li_lblk.l_index);
2059 	fpdown = linkp->li_fpdown;
2060 	lbfree(linkp);
2061 
2062 	/*
2063 	 * We go ahead and drop muxifier here--it's a nasty global lock that
2064 	 * can slow others down. It's okay to since attempts to mlink() this
2065 	 * stream will be stopped because STPLEX is still set in the stdata
2066 	 * structure, and munlink() is stopped because mux_rmvedge() and
2067 	 * lbfree() have removed it from mux_nodes[] and linkinfo_list,
2068 	 * respectively.  Note that we defer the closef() of fpdown until
2069 	 * after we drop muxifier since strclose() can call munlinkall().
2070 	 */
2071 	mutex_exit(&muxifier);
2072 
2073 	wrq = stpdown->sd_wrq;
2074 	rq = _RD(wrq);
2075 
2076 	/*
2077 	 * Get rid of outstanding service procedure runs, before we make
2078 	 * it a stream head, since a stream head doesn't have any service
2079 	 * procedure.
2080 	 */
2081 	disable_svc(rq);
2082 	wait_svc(rq);
2083 
2084 	/*
2085 	 * Since we don't disable the syncq for QPERMOD, we wait for whatever
2086 	 * is queued up to be finished. mux should take care that nothing is
2087 	 * send down to this queue. We should do it now as we're going to block
2088 	 * passyncq if it was unblocked.
2089 	 */
2090 	if (wrq->q_flag & QPERMOD) {
2091 		syncq_t	*sq = wrq->q_syncq;
2092 
2093 		mutex_enter(SQLOCK(sq));
2094 		while (wrq->q_sqflags & Q_SQQUEUED) {
2095 			sq->sq_flags |= SQ_WANTWAKEUP;
2096 			cv_wait(&sq->sq_wait, SQLOCK(sq));
2097 		}
2098 		mutex_exit(SQLOCK(sq));
2099 	}
2100 	passyncq = passq->q_syncq;
2101 	if (!(passyncq->sq_flags & SQ_BLOCKED)) {
2102 
2103 		syncq_t *sq, *outer;
2104 
2105 		/*
2106 		 * Messages could be flowing from underneath. We will
2107 		 * block the read side of the passq. This would be
2108 		 * sufficient for QPAIR and QPERQ muxes to ensure
2109 		 * that no data is flowing up into this queue
2110 		 * and hence no thread active in this instance of
2111 		 * lower mux. But for QPERMOD and QMTOUTPERIM there
2112 		 * could be messages on the inner and outer/inner
2113 		 * syncqs respectively. We will wait for them to drain.
2114 		 * Because passq is blocked messages end up in the syncq
2115 		 * And qfill_syncq could possibly end up setting QFULL
2116 		 * which will access the rq->q_flag. Hence, we have to
2117 		 * acquire the QLOCK in setq.
2118 		 *
2119 		 * XXX Messages can also flow from top into this
2120 		 * queue though the unlink is over (Ex. some instance
2121 		 * in putnext() called from top that has still not
2122 		 * accessed this queue. And also putq(lowerq) ?).
2123 		 * Solution : How about blocking the l_qtop queue ?
2124 		 * Do we really care about such pure D_MP muxes ?
2125 		 */
2126 
2127 		blocksq(passyncq, SQ_BLOCKED, 0);
2128 
2129 		sq = rq->q_syncq;
2130 		if ((outer = sq->sq_outer) != NULL) {
2131 
2132 			/*
2133 			 * We have to just wait for the outer sq_count
2134 			 * drop to zero. As this does not prevent new
2135 			 * messages to enter the outer perimeter, this
2136 			 * is subject to starvation.
2137 			 *
2138 			 * NOTE :Because of blocksq above, messages could
2139 			 * be in the inner syncq only because of some
2140 			 * thread holding the outer perimeter exclusively.
2141 			 * Hence it would be sufficient to wait for the
2142 			 * exclusive holder of the outer perimeter to drain
2143 			 * the inner and outer syncqs. But we will not depend
2144 			 * on this feature and hence check the inner syncqs
2145 			 * separately.
2146 			 */
2147 			wait_syncq(outer);
2148 		}
2149 
2150 
2151 		/*
2152 		 * There could be messages destined for
2153 		 * this queue. Let the exclusive holder
2154 		 * drain it.
2155 		 */
2156 
2157 		wait_syncq(sq);
2158 		ASSERT((rq->q_flag & QPERMOD) ||
2159 			((rq->q_syncq->sq_head == NULL) &&
2160 			(_WR(rq)->q_syncq->sq_head == NULL)));
2161 	}
2162 
2163 	/*
2164 	 * We haven't taken care of QPERMOD case yet. QPERMOD is a special
2165 	 * case as we don't disable its syncq or remove it off the syncq
2166 	 * service list.
2167 	 */
2168 	if (rq->q_flag & QPERMOD) {
2169 		syncq_t	*sq = rq->q_syncq;
2170 
2171 		mutex_enter(SQLOCK(sq));
2172 		while (rq->q_sqflags & Q_SQQUEUED) {
2173 			sq->sq_flags |= SQ_WANTWAKEUP;
2174 			cv_wait(&sq->sq_wait, SQLOCK(sq));
2175 		}
2176 		mutex_exit(SQLOCK(sq));
2177 	}
2178 
2179 	/*
2180 	 * flush_syncq changes states only when there is some messages to
2181 	 * free. ie when it returns non-zero value to return.
2182 	 */
2183 	ASSERT(flush_syncq(rq->q_syncq, rq) == 0);
2184 	ASSERT(flush_syncq(wrq->q_syncq, wrq) == 0);
2185 
2186 	/*
2187 	 * No body else should know about this queue now.
2188 	 * If the mux did not process the messages before
2189 	 * acking the I_UNLINK, free them now.
2190 	 */
2191 
2192 	flushq(rq, FLUSHALL);
2193 	flushq(_WR(rq), FLUSHALL);
2194 
2195 	/*
2196 	 * Convert the mux lower queue into a stream head queue.
2197 	 * Turn off STPLEX before we turn on the stream by removing the passq.
2198 	 */
2199 	rq->q_ptr = wrq->q_ptr = stpdown;
2200 	setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, B_TRUE);
2201 
2202 	ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE);
2203 	ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq));
2204 
2205 	enable_svc(rq);
2206 
2207 	/*
2208 	 * Now it is a proper stream, so STPLEX is cleared. But STRPLUMB still
2209 	 * needs to be set to prevent reopen() of the stream - such reopen may
2210 	 * try to call non-existent pass queue open routine and panic.
2211 	 */
2212 	mutex_enter(&stpdown->sd_lock);
2213 	stpdown->sd_flag &= ~STPLEX;
2214 	mutex_exit(&stpdown->sd_lock);
2215 
2216 	ASSERT(((flag & LINKTYPEMASK) == LINKNORMAL) ||
2217 	    ((flag & LINKTYPEMASK) == LINKPERSIST));
2218 
2219 	/* clean up the layered driver linkages */
2220 	if ((flag & LINKTYPEMASK) == LINKNORMAL) {
2221 		ldi_munlink_fp(stp, fpdown, LINKNORMAL);
2222 	} else {
2223 		ldi_munlink_fp(stp, fpdown, LINKPERSIST);
2224 	}
2225 
2226 	link_rempassthru(passq);
2227 
2228 	/*
2229 	 * Now all plumbing changes are finished and STRPLUMB is no
2230 	 * longer needed.
2231 	 */
2232 	mutex_enter(&stpdown->sd_lock);
2233 	stpdown->sd_flag &= ~STRPLUMB;
2234 	cv_broadcast(&stpdown->sd_monitor);
2235 	mutex_exit(&stpdown->sd_lock);
2236 
2237 	(void) closef(fpdown);
2238 	return (0);
2239 }
2240 
2241 /*
2242  * Unlink all multiplexor links for which stp is the controlling stream.
2243  * Return 0, or a non-zero errno on failure.
2244  */
2245 int
2246 munlinkall(stdata_t *stp, int flag, cred_t *crp, int *rvalp)
2247 {
2248 	linkinfo_t *linkp;
2249 	int error = 0;
2250 
2251 	mutex_enter(&muxifier);
2252 	while (linkp = findlinks(stp, 0, flag)) {
2253 		/*
2254 		 * munlink() releases the muxifier lock.
2255 		 */
2256 		if (error = munlink(stp, linkp, flag, crp, rvalp))
2257 			return (error);
2258 		mutex_enter(&muxifier);
2259 	}
2260 	mutex_exit(&muxifier);
2261 	return (0);
2262 }
2263 
2264 /*
2265  * A multiplexor link has been made. Add an
2266  * edge to the directed graph.
2267  */
2268 void
2269 mux_addedge(stdata_t *upstp, stdata_t *lostp, int muxid)
2270 {
2271 	struct mux_node *np;
2272 	struct mux_edge *ep;
2273 	major_t upmaj;
2274 	major_t lomaj;
2275 
2276 	upmaj = getmajor(upstp->sd_vnode->v_rdev);
2277 	lomaj = getmajor(lostp->sd_vnode->v_rdev);
2278 	np = &mux_nodes[upmaj];
2279 	if (np->mn_outp) {
2280 		ep = np->mn_outp;
2281 		while (ep->me_nextp)
2282 			ep = ep->me_nextp;
2283 		ep->me_nextp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP);
2284 		ep = ep->me_nextp;
2285 	} else {
2286 		np->mn_outp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP);
2287 		ep = np->mn_outp;
2288 	}
2289 	ep->me_nextp = NULL;
2290 	ep->me_muxid = muxid;
2291 	if (lostp->sd_vnode->v_type == VFIFO)
2292 		ep->me_nodep = NULL;
2293 	else
2294 		ep->me_nodep = &mux_nodes[lomaj];
2295 }
2296 
2297 /*
2298  * A multiplexor link has been removed. Remove the
2299  * edge in the directed graph.
2300  */
2301 void
2302 mux_rmvedge(stdata_t *upstp, int muxid)
2303 {
2304 	struct mux_node *np;
2305 	struct mux_edge *ep;
2306 	struct mux_edge *pep = NULL;
2307 	major_t upmaj;
2308 
2309 	upmaj = getmajor(upstp->sd_vnode->v_rdev);
2310 	np = &mux_nodes[upmaj];
2311 	ASSERT(np->mn_outp != NULL);
2312 	ep = np->mn_outp;
2313 	while (ep) {
2314 		if (ep->me_muxid == muxid) {
2315 			if (pep)
2316 				pep->me_nextp = ep->me_nextp;
2317 			else
2318 				np->mn_outp = ep->me_nextp;
2319 			kmem_free(ep, sizeof (struct mux_edge));
2320 			return;
2321 		}
2322 		pep = ep;
2323 		ep = ep->me_nextp;
2324 	}
2325 	ASSERT(0);	/* should not reach here */
2326 }
2327 
2328 /*
2329  * Translate the device flags (from conf.h) to the corresponding
2330  * qflag and sq_flag (type) values.
2331  */
2332 int
2333 devflg_to_qflag(struct streamtab *stp, uint32_t devflag, uint32_t *qflagp,
2334 	uint32_t *sqtypep)
2335 {
2336 	uint32_t qflag = 0;
2337 	uint32_t sqtype = 0;
2338 
2339 	if (devflag & _D_OLD)
2340 		goto bad;
2341 
2342 	/* Inner perimeter presence and scope */
2343 	switch (devflag & D_MTINNER_MASK) {
2344 	case D_MP:
2345 		qflag |= QMTSAFE;
2346 		sqtype |= SQ_CI;
2347 		break;
2348 	case D_MTPERQ|D_MP:
2349 		qflag |= QPERQ;
2350 		break;
2351 	case D_MTQPAIR|D_MP:
2352 		qflag |= QPAIR;
2353 		break;
2354 	case D_MTPERMOD|D_MP:
2355 		qflag |= QPERMOD;
2356 		break;
2357 	default:
2358 		goto bad;
2359 	}
2360 
2361 	/* Outer perimeter */
2362 	if (devflag & D_MTOUTPERIM) {
2363 		switch (devflag & D_MTINNER_MASK) {
2364 		case D_MP:
2365 		case D_MTPERQ|D_MP:
2366 		case D_MTQPAIR|D_MP:
2367 			break;
2368 		default:
2369 			goto bad;
2370 		}
2371 		qflag |= QMTOUTPERIM;
2372 	}
2373 
2374 	/* Inner perimeter modifiers */
2375 	if (devflag & D_MTINNER_MOD) {
2376 		switch (devflag & D_MTINNER_MASK) {
2377 		case D_MP:
2378 			goto bad;
2379 		default:
2380 			break;
2381 		}
2382 		if (devflag & D_MTPUTSHARED)
2383 			sqtype |= SQ_CIPUT;
2384 		if (devflag & _D_MTOCSHARED) {
2385 			/*
2386 			 * The code in putnext assumes that it has the
2387 			 * highest concurrency by not checking sq_count.
2388 			 * Thus _D_MTOCSHARED can only be supported when
2389 			 * D_MTPUTSHARED is set.
2390 			 */
2391 			if (!(devflag & D_MTPUTSHARED))
2392 				goto bad;
2393 			sqtype |= SQ_CIOC;
2394 		}
2395 		if (devflag & _D_MTCBSHARED) {
2396 			/*
2397 			 * The code in putnext assumes that it has the
2398 			 * highest concurrency by not checking sq_count.
2399 			 * Thus _D_MTCBSHARED can only be supported when
2400 			 * D_MTPUTSHARED is set.
2401 			 */
2402 			if (!(devflag & D_MTPUTSHARED))
2403 				goto bad;
2404 			sqtype |= SQ_CICB;
2405 		}
2406 		if (devflag & _D_MTSVCSHARED) {
2407 			/*
2408 			 * The code in putnext assumes that it has the
2409 			 * highest concurrency by not checking sq_count.
2410 			 * Thus _D_MTSVCSHARED can only be supported when
2411 			 * D_MTPUTSHARED is set. Also _D_MTSVCSHARED is
2412 			 * supported only for QPERMOD.
2413 			 */
2414 			if (!(devflag & D_MTPUTSHARED) || !(qflag & QPERMOD))
2415 				goto bad;
2416 			sqtype |= SQ_CISVC;
2417 		}
2418 	}
2419 
2420 	/* Default outer perimeter concurrency */
2421 	sqtype |= SQ_CO;
2422 
2423 	/* Outer perimeter modifiers */
2424 	if (devflag & D_MTOCEXCL) {
2425 		if (!(devflag & D_MTOUTPERIM)) {
2426 			/* No outer perimeter */
2427 			goto bad;
2428 		}
2429 		sqtype &= ~SQ_COOC;
2430 	}
2431 
2432 	/* Synchronous Streams extended qinit structure */
2433 	if (devflag & D_SYNCSTR)
2434 		qflag |= QSYNCSTR;
2435 
2436 	*qflagp = qflag;
2437 	*sqtypep = sqtype;
2438 	return (0);
2439 
2440 bad:
2441 	cmn_err(CE_WARN,
2442 	    "stropen: bad MT flags (0x%x) in driver '%s'",
2443 	    (int)(qflag & D_MTSAFETY_MASK),
2444 	    stp->st_rdinit->qi_minfo->mi_idname);
2445 
2446 	return (EINVAL);
2447 }
2448 
2449 /*
2450  * Set the interface values for a pair of queues (qinit structure,
2451  * packet sizes, water marks).
2452  * setq assumes that the caller does not have a claim (entersq or claimq)
2453  * on the queue.
2454  */
2455 void
2456 setq(queue_t *rq, struct qinit *rinit, struct qinit *winit,
2457     perdm_t *dmp, uint32_t qflag, uint32_t sqtype, boolean_t lock_needed)
2458 {
2459 	queue_t *wq;
2460 	syncq_t	*sq, *outer;
2461 
2462 	ASSERT(rq->q_flag & QREADR);
2463 	ASSERT((qflag & QMT_TYPEMASK) != 0);
2464 	IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL);
2465 
2466 	wq = _WR(rq);
2467 	rq->q_qinfo = rinit;
2468 	rq->q_hiwat = rinit->qi_minfo->mi_hiwat;
2469 	rq->q_lowat = rinit->qi_minfo->mi_lowat;
2470 	rq->q_minpsz = rinit->qi_minfo->mi_minpsz;
2471 	rq->q_maxpsz = rinit->qi_minfo->mi_maxpsz;
2472 	wq->q_qinfo = winit;
2473 	wq->q_hiwat = winit->qi_minfo->mi_hiwat;
2474 	wq->q_lowat = winit->qi_minfo->mi_lowat;
2475 	wq->q_minpsz = winit->qi_minfo->mi_minpsz;
2476 	wq->q_maxpsz = winit->qi_minfo->mi_maxpsz;
2477 
2478 	/* Remove old syncqs */
2479 	sq = rq->q_syncq;
2480 	outer = sq->sq_outer;
2481 	if (outer != NULL) {
2482 		ASSERT(wq->q_syncq->sq_outer == outer);
2483 		outer_remove(outer, rq->q_syncq);
2484 		if (wq->q_syncq != rq->q_syncq)
2485 			outer_remove(outer, wq->q_syncq);
2486 	}
2487 	ASSERT(sq->sq_outer == NULL);
2488 	ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL);
2489 
2490 	if (sq != SQ(rq)) {
2491 		if (!(rq->q_flag & QPERMOD))
2492 			free_syncq(sq);
2493 		if (wq->q_syncq == rq->q_syncq)
2494 			wq->q_syncq = NULL;
2495 		rq->q_syncq = NULL;
2496 	}
2497 	if (wq->q_syncq != NULL && wq->q_syncq != sq &&
2498 	    wq->q_syncq != SQ(rq)) {
2499 		free_syncq(wq->q_syncq);
2500 		wq->q_syncq = NULL;
2501 	}
2502 	ASSERT(rq->q_syncq == NULL || (rq->q_syncq->sq_head == NULL &&
2503 				rq->q_syncq->sq_tail == NULL));
2504 	ASSERT(wq->q_syncq == NULL || (wq->q_syncq->sq_head == NULL &&
2505 				wq->q_syncq->sq_tail == NULL));
2506 
2507 	if (!(rq->q_flag & QPERMOD) &&
2508 	    rq->q_syncq != NULL && rq->q_syncq->sq_ciputctrl != NULL) {
2509 		ASSERT(rq->q_syncq->sq_nciputctrl == n_ciputctrl - 1);
2510 		SUMCHECK_CIPUTCTRL_COUNTS(rq->q_syncq->sq_ciputctrl,
2511 		    rq->q_syncq->sq_nciputctrl, 0);
2512 		ASSERT(ciputctrl_cache != NULL);
2513 		kmem_cache_free(ciputctrl_cache, rq->q_syncq->sq_ciputctrl);
2514 		rq->q_syncq->sq_ciputctrl = NULL;
2515 		rq->q_syncq->sq_nciputctrl = 0;
2516 	}
2517 
2518 	if (!(wq->q_flag & QPERMOD) &&
2519 	    wq->q_syncq != NULL && wq->q_syncq->sq_ciputctrl != NULL) {
2520 		ASSERT(wq->q_syncq->sq_nciputctrl == n_ciputctrl - 1);
2521 		SUMCHECK_CIPUTCTRL_COUNTS(wq->q_syncq->sq_ciputctrl,
2522 		    wq->q_syncq->sq_nciputctrl, 0);
2523 		ASSERT(ciputctrl_cache != NULL);
2524 		kmem_cache_free(ciputctrl_cache, wq->q_syncq->sq_ciputctrl);
2525 		wq->q_syncq->sq_ciputctrl = NULL;
2526 		wq->q_syncq->sq_nciputctrl = 0;
2527 	}
2528 
2529 	sq = SQ(rq);
2530 	ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL);
2531 	ASSERT(sq->sq_outer == NULL);
2532 	ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL);
2533 
2534 	/*
2535 	 * Create syncqs based on qflag and sqtype. Set the SQ_TYPES_IN_FLAGS
2536 	 * bits in sq_flag based on the sqtype.
2537 	 */
2538 	ASSERT((sq->sq_flags & ~SQ_TYPES_IN_FLAGS) == 0);
2539 
2540 	rq->q_syncq = wq->q_syncq = sq;
2541 	sq->sq_type = sqtype;
2542 	sq->sq_flags = (sqtype & SQ_TYPES_IN_FLAGS);
2543 
2544 	/*
2545 	 *  We are making sq_svcflags zero,
2546 	 *  resetting SQ_DISABLED in case it was set by
2547 	 *  wait_svc() in the munlink path.
2548 	 *
2549 	 */
2550 	ASSERT((sq->sq_svcflags & SQ_SERVICE) == 0);
2551 	sq->sq_svcflags = 0;
2552 
2553 	/*
2554 	 * We need to acquire the lock here for the mlink and munlink case,
2555 	 * where canputnext, backenable, etc can access the q_flag.
2556 	 */
2557 	if (lock_needed) {
2558 		mutex_enter(QLOCK(rq));
2559 		rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2560 		mutex_exit(QLOCK(rq));
2561 		mutex_enter(QLOCK(wq));
2562 		wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2563 		mutex_exit(QLOCK(wq));
2564 	} else {
2565 		rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2566 		wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2567 	}
2568 
2569 	if (qflag & QPERQ) {
2570 		/* Allocate a separate syncq for the write side */
2571 		sq = new_syncq();
2572 		sq->sq_type = rq->q_syncq->sq_type;
2573 		sq->sq_flags = rq->q_syncq->sq_flags;
2574 		ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL &&
2575 		    sq->sq_oprev == NULL);
2576 		wq->q_syncq = sq;
2577 	}
2578 	if (qflag & QPERMOD) {
2579 		sq = dmp->dm_sq;
2580 
2581 		/*
2582 		 * Assert that we do have an inner perimeter syncq and that it
2583 		 * does not have an outer perimeter associated with it.
2584 		 */
2585 		ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL &&
2586 		    sq->sq_oprev == NULL);
2587 		rq->q_syncq = wq->q_syncq = sq;
2588 	}
2589 	if (qflag & QMTOUTPERIM) {
2590 		outer = dmp->dm_sq;
2591 
2592 		ASSERT(outer->sq_outer == NULL);
2593 		outer_insert(outer, rq->q_syncq);
2594 		if (wq->q_syncq != rq->q_syncq)
2595 			outer_insert(outer, wq->q_syncq);
2596 	}
2597 	ASSERT((rq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) ==
2598 		(rq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS));
2599 	ASSERT((wq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) ==
2600 		(wq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS));
2601 	ASSERT((rq->q_flag & QMT_TYPEMASK) == (qflag & QMT_TYPEMASK));
2602 
2603 	/*
2604 	 * Initialize struio() types.
2605 	 */
2606 	rq->q_struiot =
2607 	    (rq->q_flag & QSYNCSTR) ? rinit->qi_struiot : STRUIOT_NONE;
2608 	wq->q_struiot =
2609 	    (wq->q_flag & QSYNCSTR) ? winit->qi_struiot : STRUIOT_NONE;
2610 }
2611 
2612 perdm_t *
2613 hold_dm(struct streamtab *str, uint32_t qflag, uint32_t sqtype)
2614 {
2615 	syncq_t	*sq;
2616 	perdm_t	**pp;
2617 	perdm_t	*p;
2618 	perdm_t	*dmp;
2619 
2620 	ASSERT(str != NULL);
2621 	ASSERT(qflag & (QPERMOD | QMTOUTPERIM));
2622 
2623 	rw_enter(&perdm_rwlock, RW_READER);
2624 	for (p = perdm_list; p != NULL; p = p->dm_next) {
2625 		if (p->dm_str == str) {	/* found one */
2626 			atomic_add_32(&(p->dm_ref), 1);
2627 			rw_exit(&perdm_rwlock);
2628 			return (p);
2629 		}
2630 	}
2631 	rw_exit(&perdm_rwlock);
2632 
2633 	sq = new_syncq();
2634 	if (qflag & QPERMOD) {
2635 		sq->sq_type = sqtype | SQ_PERMOD;
2636 		sq->sq_flags = sqtype & SQ_TYPES_IN_FLAGS;
2637 	} else {
2638 		ASSERT(qflag & QMTOUTPERIM);
2639 		sq->sq_onext = sq->sq_oprev = sq;
2640 	}
2641 
2642 	dmp = kmem_alloc(sizeof (perdm_t), KM_SLEEP);
2643 	dmp->dm_sq = sq;
2644 	dmp->dm_str = str;
2645 	dmp->dm_ref = 1;
2646 	dmp->dm_next = NULL;
2647 
2648 	rw_enter(&perdm_rwlock, RW_WRITER);
2649 	for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) {
2650 		if (p->dm_str == str) {	/* already present */
2651 			p->dm_ref++;
2652 			rw_exit(&perdm_rwlock);
2653 			free_syncq(sq);
2654 			kmem_free(dmp, sizeof (perdm_t));
2655 			return (p);
2656 		}
2657 	}
2658 
2659 	*pp = dmp;
2660 	rw_exit(&perdm_rwlock);
2661 	return (dmp);
2662 }
2663 
2664 void
2665 rele_dm(perdm_t *dmp)
2666 {
2667 	perdm_t **pp;
2668 	perdm_t *p;
2669 
2670 	rw_enter(&perdm_rwlock, RW_WRITER);
2671 	ASSERT(dmp->dm_ref > 0);
2672 
2673 	if (--dmp->dm_ref > 0) {
2674 		rw_exit(&perdm_rwlock);
2675 		return;
2676 	}
2677 
2678 	for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next))
2679 		if (p == dmp)
2680 			break;
2681 	ASSERT(p == dmp);
2682 	*pp = p->dm_next;
2683 	rw_exit(&perdm_rwlock);
2684 
2685 	/*
2686 	 * Wait for any background processing that relies on the
2687 	 * syncq to complete before it is freed.
2688 	 */
2689 	wait_sq_svc(p->dm_sq);
2690 	free_syncq(p->dm_sq);
2691 	kmem_free(p, sizeof (perdm_t));
2692 }
2693 
2694 /*
2695  * Make a protocol message given control and data buffers.
2696  * n.b., this can block; be careful of what locks you hold when calling it.
2697  *
2698  * If sd_maxblk is less than *iosize this routine can fail part way through
2699  * (due to an allocation failure). In this case on return *iosize will contain
2700  * the amount that was consumed. Otherwise *iosize will not be modified
2701  * i.e. it will contain the amount that was consumed.
2702  */
2703 int
2704 strmakemsg(
2705 	struct strbuf *mctl,
2706 	ssize_t *iosize,
2707 	struct uio *uiop,
2708 	stdata_t *stp,
2709 	int32_t flag,
2710 	mblk_t **mpp)
2711 {
2712 	mblk_t *mpctl = NULL;
2713 	mblk_t *mpdata = NULL;
2714 	int error;
2715 
2716 	ASSERT(uiop != NULL);
2717 
2718 	*mpp = NULL;
2719 	/* Create control part, if any */
2720 	if ((mctl != NULL) && (mctl->len >= 0)) {
2721 		error = strmakectl(mctl, flag, uiop->uio_fmode, &mpctl);
2722 		if (error)
2723 			return (error);
2724 	}
2725 	/* Create data part, if any */
2726 	if (*iosize >= 0) {
2727 		error = strmakedata(iosize, uiop, stp, flag, &mpdata);
2728 		if (error) {
2729 			freemsg(mpctl);
2730 			return (error);
2731 		}
2732 	}
2733 	if (mpctl != NULL) {
2734 		if (mpdata != NULL)
2735 			linkb(mpctl, mpdata);
2736 		*mpp = mpctl;
2737 	} else {
2738 		*mpp = mpdata;
2739 	}
2740 	return (0);
2741 }
2742 
2743 /*
2744  * Make the control part of a protocol message given a control buffer.
2745  * n.b., this can block; be careful of what locks you hold when calling it.
2746  */
2747 int
2748 strmakectl(
2749 	struct strbuf *mctl,
2750 	int32_t flag,
2751 	int32_t fflag,
2752 	mblk_t **mpp)
2753 {
2754 	mblk_t *bp = NULL;
2755 	unsigned char msgtype;
2756 	int error = 0;
2757 
2758 	*mpp = NULL;
2759 	/*
2760 	 * Create control part of message, if any.
2761 	 */
2762 	if ((mctl != NULL) && (mctl->len >= 0)) {
2763 		caddr_t base;
2764 		int ctlcount;
2765 		int allocsz;
2766 
2767 		if (flag & RS_HIPRI)
2768 			msgtype = M_PCPROTO;
2769 		else
2770 			msgtype = M_PROTO;
2771 
2772 		ctlcount = mctl->len;
2773 		base = mctl->buf;
2774 
2775 		/*
2776 		 * Give modules a better chance to reuse M_PROTO/M_PCPROTO
2777 		 * blocks by increasing the size to something more usable.
2778 		 */
2779 		allocsz = MAX(ctlcount, 64);
2780 
2781 		/*
2782 		 * Range checking has already been done; simply try
2783 		 * to allocate a message block for the ctl part.
2784 		 */
2785 		while (!(bp = allocb(allocsz, BPRI_MED))) {
2786 			if (fflag & (FNDELAY|FNONBLOCK))
2787 				return (EAGAIN);
2788 			if (error = strwaitbuf(allocsz, BPRI_MED))
2789 				return (error);
2790 		}
2791 
2792 		bp->b_datap->db_type = msgtype;
2793 		if (copyin(base, bp->b_wptr, ctlcount)) {
2794 			freeb(bp);
2795 			return (EFAULT);
2796 		}
2797 		bp->b_wptr += ctlcount;
2798 	}
2799 	*mpp = bp;
2800 	return (0);
2801 }
2802 
2803 /*
2804  * Make a protocol message given data buffers.
2805  * n.b., this can block; be careful of what locks you hold when calling it.
2806  *
2807  * If sd_maxblk is less than *iosize this routine can fail part way through
2808  * (due to an allocation failure). In this case on return *iosize will contain
2809  * the amount that was consumed. Otherwise *iosize will not be modified
2810  * i.e. it will contain the amount that was consumed.
2811  */
2812 int
2813 strmakedata(
2814 	ssize_t   *iosize,
2815 	struct uio *uiop,
2816 	stdata_t *stp,
2817 	int32_t flag,
2818 	mblk_t **mpp)
2819 {
2820 	mblk_t *mp = NULL;
2821 	mblk_t *bp;
2822 	int wroff = (int)stp->sd_wroff;
2823 	int error = 0;
2824 	ssize_t maxblk;
2825 	ssize_t count = *iosize;
2826 	cred_t *cr = CRED();
2827 
2828 	*mpp = NULL;
2829 	if (count < 0)
2830 		return (0);
2831 
2832 	maxblk = stp->sd_maxblk;
2833 	if (maxblk == INFPSZ)
2834 		maxblk = count;
2835 
2836 	/*
2837 	 * Create data part of message, if any.
2838 	 */
2839 	do {
2840 		ssize_t size;
2841 		dblk_t  *dp;
2842 
2843 		ASSERT(uiop);
2844 
2845 		size = MIN(count, maxblk);
2846 
2847 		while ((bp = allocb_cred(size + wroff, cr)) == NULL) {
2848 			error = EAGAIN;
2849 			if ((uiop->uio_fmode & (FNDELAY|FNONBLOCK)) ||
2850 			    (error = strwaitbuf(size + wroff, BPRI_MED)) != 0) {
2851 				if (count == *iosize) {
2852 					freemsg(mp);
2853 					return (error);
2854 				} else {
2855 					*iosize -= count;
2856 					*mpp = mp;
2857 					return (0);
2858 				}
2859 			}
2860 		}
2861 		dp = bp->b_datap;
2862 		dp->db_cpid = curproc->p_pid;
2863 		ASSERT(wroff <= dp->db_lim - bp->b_wptr);
2864 		bp->b_wptr = bp->b_rptr = bp->b_rptr + wroff;
2865 
2866 		if (flag & STRUIO_POSTPONE) {
2867 			/*
2868 			 * Setup the stream uio portion of the
2869 			 * dblk for subsequent use by struioget().
2870 			 */
2871 			dp->db_struioflag = STRUIO_SPEC;
2872 			dp->db_cksumstart = 0;
2873 			dp->db_cksumstuff = 0;
2874 			dp->db_cksumend = size;
2875 			*(long long *)dp->db_struioun.data = 0ll;
2876 		} else {
2877 			if (stp->sd_copyflag & STRCOPYCACHED)
2878 				uiop->uio_extflg |= UIO_COPY_CACHED;
2879 
2880 			if (size != 0) {
2881 				error = uiomove(bp->b_wptr, size, UIO_WRITE,
2882 				    uiop);
2883 				if (error != 0) {
2884 					freeb(bp);
2885 					freemsg(mp);
2886 					return (error);
2887 				}
2888 			}
2889 		}
2890 
2891 		bp->b_wptr += size;
2892 		count -= size;
2893 
2894 		if (mp == NULL)
2895 			mp = bp;
2896 		else
2897 			linkb(mp, bp);
2898 	} while (count > 0);
2899 
2900 	*mpp = mp;
2901 	return (0);
2902 }
2903 
2904 /*
2905  * Wait for a buffer to become available. Return non-zero errno
2906  * if not able to wait, 0 if buffer is probably there.
2907  */
2908 int
2909 strwaitbuf(size_t size, int pri)
2910 {
2911 	bufcall_id_t id;
2912 
2913 	mutex_enter(&bcall_monitor);
2914 	if ((id = bufcall(size, pri, (void (*)(void *))cv_broadcast,
2915 	    &ttoproc(curthread)->p_flag_cv)) == 0) {
2916 		mutex_exit(&bcall_monitor);
2917 		return (ENOSR);
2918 	}
2919 	if (!cv_wait_sig(&(ttoproc(curthread)->p_flag_cv), &bcall_monitor)) {
2920 		unbufcall(id);
2921 		mutex_exit(&bcall_monitor);
2922 		return (EINTR);
2923 	}
2924 	unbufcall(id);
2925 	mutex_exit(&bcall_monitor);
2926 	return (0);
2927 }
2928 
2929 /*
2930  * This function waits for a read or write event to happen on a stream.
2931  * fmode can specify FNDELAY and/or FNONBLOCK.
2932  * The timeout is in ms with -1 meaning infinite.
2933  * The flag values work as follows:
2934  *	READWAIT	Check for read side errors, send M_READ
2935  *	GETWAIT		Check for read side errors, no M_READ
2936  *	WRITEWAIT	Check for write side errors.
2937  *	NOINTR		Do not return error if nonblocking or timeout.
2938  * 	STR_NOERROR	Ignore all errors except STPLEX.
2939  *	STR_NOSIG	Ignore/hold signals during the duration of the call.
2940  *	STR_PEEK	Pass through the strgeterr().
2941  */
2942 int
2943 strwaitq(stdata_t *stp, int flag, ssize_t count, int fmode, clock_t timout,
2944     int *done)
2945 {
2946 	int slpflg, errs;
2947 	int error;
2948 	kcondvar_t *sleepon;
2949 	mblk_t *mp;
2950 	ssize_t *rd_count;
2951 	clock_t rval;
2952 
2953 	ASSERT(MUTEX_HELD(&stp->sd_lock));
2954 	if ((flag & READWAIT) || (flag & GETWAIT)) {
2955 		slpflg = RSLEEP;
2956 		sleepon = &_RD(stp->sd_wrq)->q_wait;
2957 		errs = STRDERR|STPLEX;
2958 	} else {
2959 		slpflg = WSLEEP;
2960 		sleepon = &stp->sd_wrq->q_wait;
2961 		errs = STWRERR|STRHUP|STPLEX;
2962 	}
2963 	if (flag & STR_NOERROR)
2964 		errs = STPLEX;
2965 
2966 	if (stp->sd_wakeq & slpflg) {
2967 		/*
2968 		 * A strwakeq() is pending, no need to sleep.
2969 		 */
2970 		stp->sd_wakeq &= ~slpflg;
2971 		*done = 0;
2972 		return (0);
2973 	}
2974 
2975 	if (fmode & (FNDELAY|FNONBLOCK)) {
2976 		if (!(flag & NOINTR))
2977 			error = EAGAIN;
2978 		else
2979 			error = 0;
2980 		*done = 1;
2981 		return (error);
2982 	}
2983 
2984 	if (stp->sd_flag & errs) {
2985 		/*
2986 		 * Check for errors before going to sleep since the
2987 		 * caller might not have checked this while holding
2988 		 * sd_lock.
2989 		 */
2990 		error = strgeterr(stp, errs, (flag & STR_PEEK));
2991 		if (error != 0) {
2992 			*done = 1;
2993 			return (error);
2994 		}
2995 	}
2996 
2997 	/*
2998 	 * If any module downstream has requested read notification
2999 	 * by setting SNDMREAD flag using M_SETOPTS, send a message
3000 	 * down stream.
3001 	 */
3002 	if ((flag & READWAIT) && (stp->sd_flag & SNDMREAD)) {
3003 		mutex_exit(&stp->sd_lock);
3004 		if (!(mp = allocb_wait(sizeof (ssize_t), BPRI_MED,
3005 		    (flag & STR_NOSIG), &error))) {
3006 			mutex_enter(&stp->sd_lock);
3007 			*done = 1;
3008 			return (error);
3009 		}
3010 		mp->b_datap->db_type = M_READ;
3011 		rd_count = (ssize_t *)mp->b_wptr;
3012 		*rd_count = count;
3013 		mp->b_wptr += sizeof (ssize_t);
3014 		/*
3015 		 * Send the number of bytes requested by the
3016 		 * read as the argument to M_READ.
3017 		 */
3018 		stream_willservice(stp);
3019 		putnext(stp->sd_wrq, mp);
3020 		stream_runservice(stp);
3021 		mutex_enter(&stp->sd_lock);
3022 
3023 		/*
3024 		 * If any data arrived due to inline processing
3025 		 * of putnext(), don't sleep.
3026 		 */
3027 		if (_RD(stp->sd_wrq)->q_first != NULL) {
3028 			*done = 0;
3029 			return (0);
3030 		}
3031 	}
3032 
3033 	stp->sd_flag |= slpflg;
3034 	TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAIT2,
3035 		"strwaitq sleeps (2):%p, %X, %lX, %X, %p",
3036 		stp, flag, count, fmode, done);
3037 
3038 	rval = str_cv_wait(sleepon, &stp->sd_lock, timout, flag & STR_NOSIG);
3039 	if (rval > 0) {
3040 		/* EMPTY */
3041 		TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAKE2,
3042 			"strwaitq awakes(2):%X, %X, %X, %X, %X",
3043 			stp, flag, count, fmode, done);
3044 	} else if (rval == 0) {
3045 		TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_INTR2,
3046 			"strwaitq interrupt #2:%p, %X, %lX, %X, %p",
3047 			stp, flag, count, fmode, done);
3048 		stp->sd_flag &= ~slpflg;
3049 		cv_broadcast(sleepon);
3050 		if (!(flag & NOINTR))
3051 			error = EINTR;
3052 		else
3053 			error = 0;
3054 		*done = 1;
3055 		return (error);
3056 	} else {
3057 		/* timeout */
3058 		TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_TIME,
3059 			"strwaitq timeout:%p, %X, %lX, %X, %p",
3060 			stp, flag, count, fmode, done);
3061 		*done = 1;
3062 		if (!(flag & NOINTR))
3063 			return (ETIME);
3064 		else
3065 			return (0);
3066 	}
3067 	/*
3068 	 * If the caller implements delayed errors (i.e. queued after data)
3069 	 * we can not check for errors here since data as well as an
3070 	 * error might have arrived at the stream head. We return to
3071 	 * have the caller check the read queue before checking for errors.
3072 	 */
3073 	if ((stp->sd_flag & errs) && !(flag & STR_DELAYERR)) {
3074 		error = strgeterr(stp, errs, (flag & STR_PEEK));
3075 		if (error != 0) {
3076 			*done = 1;
3077 			return (error);
3078 		}
3079 	}
3080 	*done = 0;
3081 	return (0);
3082 }
3083 
3084 /*
3085  * Perform job control discipline access checks.
3086  * Return 0 for success and the errno for failure.
3087  */
3088 
3089 #define	cantsend(p, t, sig) \
3090 	(sigismember(&(p)->p_ignore, sig) || signal_is_blocked((t), sig))
3091 
3092 int
3093 straccess(struct stdata *stp, enum jcaccess mode)
3094 {
3095 	extern kcondvar_t lbolt_cv;	/* XXX: should be in a header file */
3096 	kthread_t *t = curthread;
3097 	proc_t *p = ttoproc(t);
3098 	sess_t *sp;
3099 
3100 	if (stp->sd_sidp == NULL || stp->sd_vnode->v_type == VFIFO)
3101 		return (0);
3102 
3103 	mutex_enter(&p->p_lock);
3104 	sp = p->p_sessp;
3105 
3106 	for (;;) {
3107 		/*
3108 		 * If this is not the calling process's controlling terminal
3109 		 * or if the calling process is already in the foreground
3110 		 * then allow access.
3111 		 */
3112 		if (sp->s_dev != stp->sd_vnode->v_rdev ||
3113 		    p->p_pgidp == stp->sd_pgidp) {
3114 			mutex_exit(&p->p_lock);
3115 			return (0);
3116 		}
3117 
3118 		/*
3119 		 * Check to see if controlling terminal has been deallocated.
3120 		 */
3121 		if (sp->s_vp == NULL) {
3122 			if (!cantsend(p, t, SIGHUP))
3123 				sigtoproc(p, t, SIGHUP);
3124 			mutex_exit(&p->p_lock);
3125 			return (EIO);
3126 		}
3127 
3128 		if (mode == JCGETP) {
3129 			mutex_exit(&p->p_lock);
3130 			return (0);
3131 		}
3132 
3133 		if (mode == JCREAD) {
3134 			if (p->p_detached || cantsend(p, t, SIGTTIN)) {
3135 				mutex_exit(&p->p_lock);
3136 				return (EIO);
3137 			}
3138 			mutex_exit(&p->p_lock);
3139 			pgsignal(p->p_pgidp, SIGTTIN);
3140 			mutex_enter(&p->p_lock);
3141 		} else {  /* mode == JCWRITE or JCSETP */
3142 			if ((mode == JCWRITE && !(stp->sd_flag & STRTOSTOP)) ||
3143 			    cantsend(p, t, SIGTTOU)) {
3144 				mutex_exit(&p->p_lock);
3145 				return (0);
3146 			}
3147 			if (p->p_detached) {
3148 				mutex_exit(&p->p_lock);
3149 				return (EIO);
3150 			}
3151 			mutex_exit(&p->p_lock);
3152 			pgsignal(p->p_pgidp, SIGTTOU);
3153 			mutex_enter(&p->p_lock);
3154 		}
3155 
3156 		/*
3157 		 * We call cv_wait_sig_swap() to cause the appropriate
3158 		 * action for the jobcontrol signal to take place.
3159 		 * If the signal is being caught, we will take the
3160 		 * EINTR error return.  Otherwise, the default action
3161 		 * of causing the process to stop will take place.
3162 		 * In this case, we rely on the periodic cv_broadcast() on
3163 		 * &lbolt_cv to wake us up to loop around and test again.
3164 		 * We can't get here if the signal is ignored or
3165 		 * if the current thread is blocking the signal.
3166 		 */
3167 		if (!cv_wait_sig_swap(&lbolt_cv, &p->p_lock)) {
3168 			mutex_exit(&p->p_lock);
3169 			return (EINTR);
3170 		}
3171 	}
3172 }
3173 
3174 /*
3175  * Return size of message of block type (bp->b_datap->db_type)
3176  */
3177 size_t
3178 xmsgsize(mblk_t *bp)
3179 {
3180 	unsigned char type;
3181 	size_t count = 0;
3182 
3183 	type = bp->b_datap->db_type;
3184 
3185 	for (; bp; bp = bp->b_cont) {
3186 		if (type != bp->b_datap->db_type)
3187 			break;
3188 		ASSERT(bp->b_wptr >= bp->b_rptr);
3189 		count += bp->b_wptr - bp->b_rptr;
3190 	}
3191 	return (count);
3192 }
3193 
3194 /*
3195  * Allocate a stream head.
3196  */
3197 struct stdata *
3198 shalloc(queue_t *qp)
3199 {
3200 	stdata_t *stp;
3201 
3202 	stp = kmem_cache_alloc(stream_head_cache, KM_SLEEP);
3203 
3204 	stp->sd_wrq = _WR(qp);
3205 	stp->sd_strtab = NULL;
3206 	stp->sd_iocid = 0;
3207 	stp->sd_mate = NULL;
3208 	stp->sd_freezer = NULL;
3209 	stp->sd_refcnt = 0;
3210 	stp->sd_wakeq = 0;
3211 	stp->sd_anchor = 0;
3212 	stp->sd_struiowrq = NULL;
3213 	stp->sd_struiordq = NULL;
3214 	stp->sd_struiodnak = 0;
3215 	stp->sd_struionak = NULL;
3216 #ifdef C2_AUDIT
3217 	stp->sd_t_audit_data = NULL;
3218 #endif
3219 	stp->sd_rput_opt = 0;
3220 	stp->sd_wput_opt = 0;
3221 	stp->sd_read_opt = 0;
3222 	stp->sd_rprotofunc = strrput_proto;
3223 	stp->sd_rmiscfunc = strrput_misc;
3224 	stp->sd_rderrfunc = stp->sd_wrerrfunc = NULL;
3225 	stp->sd_ciputctrl = NULL;
3226 	stp->sd_nciputctrl = 0;
3227 	stp->sd_qhead = NULL;
3228 	stp->sd_qtail = NULL;
3229 	stp->sd_servid = NULL;
3230 	stp->sd_nqueues = 0;
3231 	stp->sd_svcflags = 0;
3232 	stp->sd_copyflag = 0;
3233 	return (stp);
3234 }
3235 
3236 /*
3237  * Free a stream head.
3238  */
3239 void
3240 shfree(stdata_t *stp)
3241 {
3242 	ASSERT(MUTEX_NOT_HELD(&stp->sd_lock));
3243 
3244 	stp->sd_wrq = NULL;
3245 
3246 	mutex_enter(&stp->sd_qlock);
3247 	while (stp->sd_svcflags & STRS_SCHEDULED) {
3248 		STRSTAT(strwaits);
3249 		cv_wait(&stp->sd_qcv, &stp->sd_qlock);
3250 	}
3251 	mutex_exit(&stp->sd_qlock);
3252 
3253 	if (stp->sd_ciputctrl != NULL) {
3254 		ASSERT(stp->sd_nciputctrl == n_ciputctrl - 1);
3255 		SUMCHECK_CIPUTCTRL_COUNTS(stp->sd_ciputctrl,
3256 		    stp->sd_nciputctrl, 0);
3257 		ASSERT(ciputctrl_cache != NULL);
3258 		kmem_cache_free(ciputctrl_cache, stp->sd_ciputctrl);
3259 		stp->sd_ciputctrl = NULL;
3260 		stp->sd_nciputctrl = 0;
3261 	}
3262 	ASSERT(stp->sd_qhead == NULL);
3263 	ASSERT(stp->sd_qtail == NULL);
3264 	ASSERT(stp->sd_nqueues == 0);
3265 	kmem_cache_free(stream_head_cache, stp);
3266 }
3267 
3268 /*
3269  * Allocate a pair of queues and a syncq for the pair
3270  */
3271 queue_t *
3272 allocq(void)
3273 {
3274 	queinfo_t *qip;
3275 	queue_t *qp, *wqp;
3276 	syncq_t	*sq;
3277 
3278 	qip = kmem_cache_alloc(queue_cache, KM_SLEEP);
3279 
3280 	qp = &qip->qu_rqueue;
3281 	wqp = &qip->qu_wqueue;
3282 	sq = &qip->qu_syncq;
3283 
3284 	qp->q_last	= NULL;
3285 	qp->q_next	= NULL;
3286 	qp->q_ptr	= NULL;
3287 	qp->q_flag	= QUSE | QREADR;
3288 	qp->q_bandp	= NULL;
3289 	qp->q_stream	= NULL;
3290 	qp->q_syncq	= sq;
3291 	qp->q_nband	= 0;
3292 	qp->q_nfsrv	= NULL;
3293 	qp->q_draining	= 0;
3294 	qp->q_syncqmsgs	= 0;
3295 	qp->q_spri	= 0;
3296 	qp->q_qtstamp	= 0;
3297 	qp->q_sqtstamp	= 0;
3298 	qp->q_fp	= NULL;
3299 
3300 	wqp->q_last	= NULL;
3301 	wqp->q_next	= NULL;
3302 	wqp->q_ptr	= NULL;
3303 	wqp->q_flag	= QUSE;
3304 	wqp->q_bandp	= NULL;
3305 	wqp->q_stream	= NULL;
3306 	wqp->q_syncq	= sq;
3307 	wqp->q_nband	= 0;
3308 	wqp->q_nfsrv	= NULL;
3309 	wqp->q_draining	= 0;
3310 	wqp->q_syncqmsgs = 0;
3311 	wqp->q_qtstamp	= 0;
3312 	wqp->q_sqtstamp	= 0;
3313 	wqp->q_spri	= 0;
3314 
3315 	sq->sq_count	= 0;
3316 	sq->sq_rmqcount	= 0;
3317 	sq->sq_flags	= 0;
3318 	sq->sq_type	= 0;
3319 	sq->sq_callbflags = 0;
3320 	sq->sq_cancelid	= 0;
3321 	sq->sq_ciputctrl = NULL;
3322 	sq->sq_nciputctrl = 0;
3323 	sq->sq_needexcl = 0;
3324 	sq->sq_svcflags = 0;
3325 
3326 	return (qp);
3327 }
3328 
3329 /*
3330  * Free a pair of queues and the "attached" syncq.
3331  * Discard any messages left on the syncq(s), remove the syncq(s) from the
3332  * outer perimeter, and free the syncq(s) if they are not the "attached" syncq.
3333  */
3334 void
3335 freeq(queue_t *qp)
3336 {
3337 	qband_t *qbp, *nqbp;
3338 	syncq_t *sq, *outer;
3339 	queue_t *wqp = _WR(qp);
3340 
3341 	ASSERT(qp->q_flag & QREADR);
3342 
3343 	(void) flush_syncq(qp->q_syncq, qp);
3344 	(void) flush_syncq(wqp->q_syncq, wqp);
3345 	ASSERT(qp->q_syncqmsgs == 0 && wqp->q_syncqmsgs == 0);
3346 
3347 	outer = qp->q_syncq->sq_outer;
3348 	if (outer != NULL) {
3349 		outer_remove(outer, qp->q_syncq);
3350 		if (wqp->q_syncq != qp->q_syncq)
3351 			outer_remove(outer, wqp->q_syncq);
3352 	}
3353 	/*
3354 	 * Free any syncqs that are outside what allocq returned.
3355 	 */
3356 	if (qp->q_syncq != SQ(qp) && !(qp->q_flag & QPERMOD))
3357 		free_syncq(qp->q_syncq);
3358 	if (qp->q_syncq != wqp->q_syncq && wqp->q_syncq != SQ(qp))
3359 		free_syncq(wqp->q_syncq);
3360 
3361 	ASSERT((qp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0);
3362 	ASSERT((wqp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0);
3363 	ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
3364 	ASSERT(MUTEX_NOT_HELD(QLOCK(wqp)));
3365 	sq = SQ(qp);
3366 	ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
3367 	ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL);
3368 	ASSERT(sq->sq_outer == NULL);
3369 	ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL);
3370 	ASSERT(sq->sq_callbpend == NULL);
3371 	ASSERT(sq->sq_needexcl == 0);
3372 
3373 	if (sq->sq_ciputctrl != NULL) {
3374 		ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1);
3375 		SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl,
3376 		    sq->sq_nciputctrl, 0);
3377 		ASSERT(ciputctrl_cache != NULL);
3378 		kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl);
3379 		sq->sq_ciputctrl = NULL;
3380 		sq->sq_nciputctrl = 0;
3381 	}
3382 
3383 	ASSERT(qp->q_first == NULL && wqp->q_first == NULL);
3384 	ASSERT(qp->q_count == 0 && wqp->q_count == 0);
3385 	ASSERT(qp->q_mblkcnt == 0 && wqp->q_mblkcnt == 0);
3386 
3387 	qp->q_flag &= ~QUSE;
3388 	wqp->q_flag &= ~QUSE;
3389 
3390 	/* NOTE: Uncomment the assert below once bugid 1159635 is fixed. */
3391 	/* ASSERT((qp->q_flag & QWANTW) == 0 && (wqp->q_flag & QWANTW) == 0); */
3392 
3393 	qbp = qp->q_bandp;
3394 	while (qbp) {
3395 		nqbp = qbp->qb_next;
3396 		freeband(qbp);
3397 		qbp = nqbp;
3398 	}
3399 	qbp = wqp->q_bandp;
3400 	while (qbp) {
3401 		nqbp = qbp->qb_next;
3402 		freeband(qbp);
3403 		qbp = nqbp;
3404 	}
3405 	kmem_cache_free(queue_cache, qp);
3406 }
3407 
3408 /*
3409  * Allocate a qband structure.
3410  */
3411 qband_t *
3412 allocband(void)
3413 {
3414 	qband_t *qbp;
3415 
3416 	qbp = kmem_cache_alloc(qband_cache, KM_NOSLEEP);
3417 	if (qbp == NULL)
3418 		return (NULL);
3419 
3420 	qbp->qb_next	= NULL;
3421 	qbp->qb_count	= 0;
3422 	qbp->qb_mblkcnt	= 0;
3423 	qbp->qb_first	= NULL;
3424 	qbp->qb_last	= NULL;
3425 	qbp->qb_flag	= 0;
3426 
3427 	return (qbp);
3428 }
3429 
3430 /*
3431  * Free a qband structure.
3432  */
3433 void
3434 freeband(qband_t *qbp)
3435 {
3436 	kmem_cache_free(qband_cache, qbp);
3437 }
3438 
3439 /*
3440  * Just like putnextctl(9F), except that allocb_wait() is used.
3441  *
3442  * Consolidation Private, and of course only callable from the stream head or
3443  * routines that may block.
3444  */
3445 int
3446 putnextctl_wait(queue_t *q, int type)
3447 {
3448 	mblk_t *bp;
3449 	int error;
3450 
3451 	if ((datamsg(type) && (type != M_DELAY)) ||
3452 	    (bp = allocb_wait(0, BPRI_HI, 0, &error)) == NULL)
3453 		return (0);
3454 
3455 	bp->b_datap->db_type = (unsigned char)type;
3456 	putnext(q, bp);
3457 	return (1);
3458 }
3459 
3460 /*
3461  * run any possible bufcalls.
3462  */
3463 void
3464 runbufcalls(void)
3465 {
3466 	strbufcall_t *bcp;
3467 
3468 	mutex_enter(&bcall_monitor);
3469 	mutex_enter(&strbcall_lock);
3470 
3471 	if (strbcalls.bc_head) {
3472 		size_t count;
3473 		int nevent;
3474 
3475 		/*
3476 		 * count how many events are on the list
3477 		 * now so we can check to avoid looping
3478 		 * in low memory situations
3479 		 */
3480 		nevent = 0;
3481 		for (bcp = strbcalls.bc_head; bcp; bcp = bcp->bc_next)
3482 			nevent++;
3483 
3484 		/*
3485 		 * get estimate of available memory from kmem_avail().
3486 		 * awake all bufcall functions waiting for
3487 		 * memory whose request could be satisfied
3488 		 * by 'count' memory and let 'em fight for it.
3489 		 */
3490 		count = kmem_avail();
3491 		while ((bcp = strbcalls.bc_head) != NULL && nevent) {
3492 			STRSTAT(bufcalls);
3493 			--nevent;
3494 			if (bcp->bc_size <= count) {
3495 				bcp->bc_executor = curthread;
3496 				mutex_exit(&strbcall_lock);
3497 				(*bcp->bc_func)(bcp->bc_arg);
3498 				mutex_enter(&strbcall_lock);
3499 				bcp->bc_executor = NULL;
3500 				cv_broadcast(&bcall_cv);
3501 				strbcalls.bc_head = bcp->bc_next;
3502 				kmem_free(bcp, sizeof (strbufcall_t));
3503 			} else {
3504 				/*
3505 				 * too big, try again later - note
3506 				 * that nevent was decremented above
3507 				 * so we won't retry this one on this
3508 				 * iteration of the loop
3509 				 */
3510 				if (bcp->bc_next != NULL) {
3511 					strbcalls.bc_head = bcp->bc_next;
3512 					bcp->bc_next = NULL;
3513 					strbcalls.bc_tail->bc_next = bcp;
3514 					strbcalls.bc_tail = bcp;
3515 				}
3516 			}
3517 		}
3518 		if (strbcalls.bc_head == NULL)
3519 			strbcalls.bc_tail = NULL;
3520 	}
3521 
3522 	mutex_exit(&strbcall_lock);
3523 	mutex_exit(&bcall_monitor);
3524 }
3525 
3526 
3527 /*
3528  * actually run queue's service routine.
3529  */
3530 static void
3531 runservice(queue_t *q)
3532 {
3533 	qband_t *qbp;
3534 
3535 	ASSERT(q->q_qinfo->qi_srvp);
3536 again:
3537 	entersq(q->q_syncq, SQ_SVC);
3538 	TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_START,
3539 		"runservice starts:%p", q);
3540 
3541 	if (!(q->q_flag & QWCLOSE))
3542 		(*q->q_qinfo->qi_srvp)(q);
3543 
3544 	TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_END,
3545 		"runservice ends:(%p)", q);
3546 
3547 	leavesq(q->q_syncq, SQ_SVC);
3548 
3549 	mutex_enter(QLOCK(q));
3550 	if (q->q_flag & QENAB) {
3551 		q->q_flag &= ~QENAB;
3552 		mutex_exit(QLOCK(q));
3553 		goto again;
3554 	}
3555 	q->q_flag &= ~QINSERVICE;
3556 	q->q_flag &= ~QBACK;
3557 	for (qbp = q->q_bandp; qbp; qbp = qbp->qb_next)
3558 		qbp->qb_flag &= ~QB_BACK;
3559 	/*
3560 	 * Wakeup thread waiting for the service procedure
3561 	 * to be run (strclose and qdetach).
3562 	 */
3563 	cv_broadcast(&q->q_wait);
3564 
3565 	mutex_exit(QLOCK(q));
3566 }
3567 
3568 /*
3569  * Background processing of bufcalls.
3570  */
3571 void
3572 streams_bufcall_service(void)
3573 {
3574 	callb_cpr_t	cprinfo;
3575 
3576 	CALLB_CPR_INIT(&cprinfo, &strbcall_lock, callb_generic_cpr,
3577 	    "streams_bufcall_service");
3578 
3579 	mutex_enter(&strbcall_lock);
3580 
3581 	for (;;) {
3582 		if (strbcalls.bc_head != NULL && kmem_avail() > 0) {
3583 			mutex_exit(&strbcall_lock);
3584 			runbufcalls();
3585 			mutex_enter(&strbcall_lock);
3586 		}
3587 		if (strbcalls.bc_head != NULL) {
3588 			clock_t wt, tick;
3589 
3590 			STRSTAT(bcwaits);
3591 			/* Wait for memory to become available */
3592 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
3593 			tick = SEC_TO_TICK(60);
3594 			time_to_wait(&wt, tick);
3595 			(void) cv_timedwait(&memavail_cv, &strbcall_lock, wt);
3596 			CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock);
3597 		}
3598 
3599 		/* Wait for new work to arrive */
3600 		if (strbcalls.bc_head == NULL) {
3601 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
3602 			cv_wait(&strbcall_cv, &strbcall_lock);
3603 			CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock);
3604 		}
3605 	}
3606 }
3607 
3608 /*
3609  * Background processing of streams background tasks which failed
3610  * taskq_dispatch.
3611  */
3612 static void
3613 streams_qbkgrnd_service(void)
3614 {
3615 	callb_cpr_t cprinfo;
3616 	queue_t *q;
3617 
3618 	CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr,
3619 	    "streams_bkgrnd_service");
3620 
3621 	mutex_enter(&service_queue);
3622 
3623 	for (;;) {
3624 		/*
3625 		 * Wait for work to arrive.
3626 		 */
3627 		while ((freebs_list == NULL) && (qhead == NULL)) {
3628 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
3629 			cv_wait(&services_to_run, &service_queue);
3630 			CALLB_CPR_SAFE_END(&cprinfo, &service_queue);
3631 		}
3632 		/*
3633 		 * Handle all pending freebs requests to free memory.
3634 		 */
3635 		while (freebs_list != NULL) {
3636 			mblk_t *mp = freebs_list;
3637 			freebs_list = mp->b_next;
3638 			mutex_exit(&service_queue);
3639 			mblk_free(mp);
3640 			mutex_enter(&service_queue);
3641 		}
3642 		/*
3643 		 * Run pending queues.
3644 		 */
3645 		while (qhead != NULL) {
3646 			DQ(q, qhead, qtail, q_link);
3647 			ASSERT(q != NULL);
3648 			mutex_exit(&service_queue);
3649 			queue_service(q);
3650 			mutex_enter(&service_queue);
3651 		}
3652 		ASSERT(qhead == NULL && qtail == NULL);
3653 	}
3654 }
3655 
3656 /*
3657  * Background processing of streams background tasks which failed
3658  * taskq_dispatch.
3659  */
3660 static void
3661 streams_sqbkgrnd_service(void)
3662 {
3663 	callb_cpr_t cprinfo;
3664 	syncq_t *sq;
3665 
3666 	CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr,
3667 	    "streams_sqbkgrnd_service");
3668 
3669 	mutex_enter(&service_queue);
3670 
3671 	for (;;) {
3672 		/*
3673 		 * Wait for work to arrive.
3674 		 */
3675 		while (sqhead == NULL) {
3676 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
3677 			cv_wait(&syncqs_to_run, &service_queue);
3678 			CALLB_CPR_SAFE_END(&cprinfo, &service_queue);
3679 		}
3680 
3681 		/*
3682 		 * Run pending syncqs.
3683 		 */
3684 		while (sqhead != NULL) {
3685 			DQ(sq, sqhead, sqtail, sq_next);
3686 			ASSERT(sq != NULL);
3687 			ASSERT(sq->sq_svcflags & SQ_BGTHREAD);
3688 			mutex_exit(&service_queue);
3689 			syncq_service(sq);
3690 			mutex_enter(&service_queue);
3691 		}
3692 	}
3693 }
3694 
3695 /*
3696  * Disable the syncq and wait for background syncq processing to complete.
3697  * If the syncq is placed on the sqhead/sqtail queue, try to remove it from the
3698  * list.
3699  */
3700 void
3701 wait_sq_svc(syncq_t *sq)
3702 {
3703 	mutex_enter(SQLOCK(sq));
3704 	sq->sq_svcflags |= SQ_DISABLED;
3705 	if (sq->sq_svcflags & SQ_BGTHREAD) {
3706 		syncq_t *sq_chase;
3707 		syncq_t *sq_curr;
3708 		int removed;
3709 
3710 		ASSERT(sq->sq_servcount == 1);
3711 		mutex_enter(&service_queue);
3712 		RMQ(sq, sqhead, sqtail, sq_next, sq_chase, sq_curr, removed);
3713 		mutex_exit(&service_queue);
3714 		if (removed) {
3715 			sq->sq_svcflags &= ~SQ_BGTHREAD;
3716 			sq->sq_servcount = 0;
3717 			STRSTAT(sqremoved);
3718 			goto done;
3719 		}
3720 	}
3721 	while (sq->sq_servcount != 0) {
3722 		sq->sq_flags |= SQ_WANTWAKEUP;
3723 		cv_wait(&sq->sq_wait, SQLOCK(sq));
3724 	}
3725 done:
3726 	mutex_exit(SQLOCK(sq));
3727 }
3728 
3729 /*
3730  * Put a syncq on the list of syncq's to be serviced by the sqthread.
3731  * Add the argument to the end of the sqhead list and set the flag
3732  * indicating this syncq has been enabled.  If it has already been
3733  * enabled, don't do anything.
3734  * This routine assumes that SQLOCK is held.
3735  * NOTE that the lock order is to have the SQLOCK first,
3736  * so if the service_syncq lock is held, we need to release it
3737  * before aquiring the SQLOCK (mostly relevant for the background
3738  * thread, and this seems to be common among the STREAMS global locks).
3739  * Note the the sq_svcflags are protected by the SQLOCK.
3740  */
3741 void
3742 sqenable(syncq_t *sq)
3743 {
3744 	/*
3745 	 * This is probably not important except for where I believe it
3746 	 * is being called.  At that point, it should be held (and it
3747 	 * is a pain to release it just for this routine, so don't do
3748 	 * it).
3749 	 */
3750 	ASSERT(MUTEX_HELD(SQLOCK(sq)));
3751 
3752 	IMPLY(sq->sq_servcount == 0, sq->sq_next == NULL);
3753 	IMPLY(sq->sq_next != NULL, sq->sq_svcflags & SQ_BGTHREAD);
3754 
3755 	/*
3756 	 * Do not put on list if background thread is scheduled or
3757 	 * syncq is disabled.
3758 	 */
3759 	if (sq->sq_svcflags & (SQ_DISABLED | SQ_BGTHREAD))
3760 		return;
3761 
3762 	/*
3763 	 * Check whether we should enable sq at all.
3764 	 * Non PERMOD syncqs may be drained by at most one thread.
3765 	 * PERMOD syncqs may be drained by several threads but we limit the
3766 	 * total amount to the lesser of
3767 	 *	Number of queues on the squeue and
3768 	 *	Number of CPUs.
3769 	 */
3770 	if (sq->sq_servcount != 0) {
3771 		if (((sq->sq_type & SQ_PERMOD) == 0) ||
3772 		    (sq->sq_servcount >= MIN(sq->sq_nqueues, ncpus_online))) {
3773 			STRSTAT(sqtoomany);
3774 			return;
3775 		}
3776 	}
3777 
3778 	sq->sq_tstamp = lbolt;
3779 	STRSTAT(sqenables);
3780 
3781 	/* Attempt a taskq dispatch */
3782 	sq->sq_servid = (void *)taskq_dispatch(streams_taskq,
3783 	    (task_func_t *)syncq_service, sq, TQ_NOSLEEP | TQ_NOQUEUE);
3784 	if (sq->sq_servid != NULL) {
3785 		sq->sq_servcount++;
3786 		return;
3787 	}
3788 
3789 	/*
3790 	 * This taskq dispatch failed, but a previous one may have succeeded.
3791 	 * Don't try to schedule on the background thread whilst there is
3792 	 * outstanding taskq processing.
3793 	 */
3794 	if (sq->sq_servcount != 0)
3795 		return;
3796 
3797 	/*
3798 	 * System is low on resources and can't perform a non-sleeping
3799 	 * dispatch. Schedule the syncq for a background thread and mark the
3800 	 * syncq to avoid any further taskq dispatch attempts.
3801 	 */
3802 	mutex_enter(&service_queue);
3803 	STRSTAT(taskqfails);
3804 	ENQUEUE(sq, sqhead, sqtail, sq_next);
3805 	sq->sq_svcflags |= SQ_BGTHREAD;
3806 	sq->sq_servcount = 1;
3807 	cv_signal(&syncqs_to_run);
3808 	mutex_exit(&service_queue);
3809 }
3810 
3811 /*
3812  * Note: fifo_close() depends on the mblk_t on the queue being freed
3813  * asynchronously. The asynchronous freeing of messages breaks the
3814  * recursive call chain of fifo_close() while there are I_SENDFD type of
3815  * messages refering other file pointers on the queue. Then when
3816  * closing pipes it can avoid stack overflow in case of daisy-chained
3817  * pipes, and also avoid deadlock in case of fifonode_t pairs (which
3818  * share the same fifolock_t).
3819  */
3820 
3821 /* ARGSUSED */
3822 void
3823 freebs_enqueue(mblk_t *mp, dblk_t *dbp)
3824 {
3825 	ASSERT(dbp->db_mblk == mp);
3826 
3827 	/*
3828 	 * Check data sanity. The dblock should have non-empty free function.
3829 	 * It is better to panic here then later when the dblock is freed
3830 	 * asynchronously when the context is lost.
3831 	 */
3832 	if (dbp->db_frtnp->free_func == NULL) {
3833 		panic("freebs_enqueue: dblock %p has a NULL free callback",
3834 		    (void *) dbp);
3835 	}
3836 
3837 	STRSTAT(freebs);
3838 	if (taskq_dispatch(streams_taskq, (task_func_t *)mblk_free, mp,
3839 	    TQ_NOSLEEP) == NULL) {
3840 		/*
3841 		 * System is low on resources and can't perform a non-sleeping
3842 		 * dispatch. Schedule for a background thread.
3843 		 */
3844 		mutex_enter(&service_queue);
3845 		STRSTAT(taskqfails);
3846 		mp->b_next = freebs_list;
3847 		freebs_list = mp;
3848 		cv_signal(&services_to_run);
3849 		mutex_exit(&service_queue);
3850 	}
3851 }
3852 
3853 /*
3854  * Set the QBACK or QB_BACK flag in the given queue for
3855  * the given priority band.
3856  */
3857 void
3858 setqback(queue_t *q, unsigned char pri)
3859 {
3860 	int i;
3861 	qband_t *qbp;
3862 	qband_t **qbpp;
3863 
3864 	ASSERT(MUTEX_HELD(QLOCK(q)));
3865 	if (pri != 0) {
3866 		if (pri > q->q_nband) {
3867 			qbpp = &q->q_bandp;
3868 			while (*qbpp)
3869 				qbpp = &(*qbpp)->qb_next;
3870 			while (pri > q->q_nband) {
3871 				if ((*qbpp = allocband()) == NULL) {
3872 					cmn_err(CE_WARN,
3873 					    "setqback: can't allocate qband\n");
3874 					return;
3875 				}
3876 				(*qbpp)->qb_hiwat = q->q_hiwat;
3877 				(*qbpp)->qb_lowat = q->q_lowat;
3878 				q->q_nband++;
3879 				qbpp = &(*qbpp)->qb_next;
3880 			}
3881 		}
3882 		qbp = q->q_bandp;
3883 		i = pri;
3884 		while (--i)
3885 			qbp = qbp->qb_next;
3886 		qbp->qb_flag |= QB_BACK;
3887 	} else {
3888 		q->q_flag |= QBACK;
3889 	}
3890 }
3891 
3892 int
3893 strcopyin(void *from, void *to, size_t len, int copyflag)
3894 {
3895 	if (copyflag & U_TO_K) {
3896 		ASSERT((copyflag & K_TO_K) == 0);
3897 		if (copyin(from, to, len))
3898 			return (EFAULT);
3899 	} else {
3900 		ASSERT(copyflag & K_TO_K);
3901 		bcopy(from, to, len);
3902 	}
3903 	return (0);
3904 }
3905 
3906 int
3907 strcopyout(void *from, void *to, size_t len, int copyflag)
3908 {
3909 	if (copyflag & U_TO_K) {
3910 		if (copyout(from, to, len))
3911 			return (EFAULT);
3912 	} else {
3913 		ASSERT(copyflag & K_TO_K);
3914 		bcopy(from, to, len);
3915 	}
3916 	return (0);
3917 }
3918 
3919 /*
3920  * strsignal_nolock() posts a signal to the process(es) at the stream head.
3921  * It assumes that the stream head lock is already held, whereas strsignal()
3922  * acquires the lock first.  This routine was created because a few callers
3923  * release the stream head lock before calling only to re-acquire it after
3924  * it returns.
3925  */
3926 void
3927 strsignal_nolock(stdata_t *stp, int sig, int32_t band)
3928 {
3929 	ASSERT(MUTEX_HELD(&stp->sd_lock));
3930 	switch (sig) {
3931 	case SIGPOLL:
3932 		if (stp->sd_sigflags & S_MSG)
3933 			strsendsig(stp->sd_siglist, S_MSG, (uchar_t)band, 0);
3934 		break;
3935 
3936 	default:
3937 		if (stp->sd_pgidp) {
3938 			pgsignal(stp->sd_pgidp, sig);
3939 		}
3940 		break;
3941 	}
3942 }
3943 
3944 void
3945 strsignal(stdata_t *stp, int sig, int32_t band)
3946 {
3947 	TRACE_3(TR_FAC_STREAMS_FR, TR_SENDSIG,
3948 		"strsignal:%p, %X, %X", stp, sig, band);
3949 
3950 	mutex_enter(&stp->sd_lock);
3951 	switch (sig) {
3952 	case SIGPOLL:
3953 		if (stp->sd_sigflags & S_MSG)
3954 			strsendsig(stp->sd_siglist, S_MSG, (uchar_t)band, 0);
3955 		break;
3956 
3957 	default:
3958 		if (stp->sd_pgidp) {
3959 			pgsignal(stp->sd_pgidp, sig);
3960 		}
3961 		break;
3962 	}
3963 	mutex_exit(&stp->sd_lock);
3964 }
3965 
3966 void
3967 strhup(stdata_t *stp)
3968 {
3969 	pollwakeup(&stp->sd_pollist, POLLHUP);
3970 	mutex_enter(&stp->sd_lock);
3971 	if (stp->sd_sigflags & S_HANGUP)
3972 		strsendsig(stp->sd_siglist, S_HANGUP, 0, 0);
3973 	mutex_exit(&stp->sd_lock);
3974 }
3975 
3976 void
3977 stralloctty(sess_t *sp, stdata_t *stp)
3978 {
3979 	mutex_enter(&stp->sd_lock);
3980 	mutex_enter(&pidlock);
3981 	stp->sd_sidp = sp->s_sidp;
3982 	stp->sd_pgidp = sp->s_sidp;
3983 	PID_HOLD(stp->sd_pgidp);
3984 	PID_HOLD(stp->sd_sidp);
3985 	mutex_exit(&pidlock);
3986 	mutex_exit(&stp->sd_lock);
3987 }
3988 
3989 void
3990 strfreectty(stdata_t *stp)
3991 {
3992 	mutex_enter(&stp->sd_lock);
3993 	pgsignal(stp->sd_pgidp, SIGHUP);
3994 	mutex_enter(&pidlock);
3995 	PID_RELE(stp->sd_pgidp);
3996 	PID_RELE(stp->sd_sidp);
3997 	stp->sd_pgidp = NULL;
3998 	stp->sd_sidp = NULL;
3999 	mutex_exit(&pidlock);
4000 	mutex_exit(&stp->sd_lock);
4001 	if (!(stp->sd_flag & STRHUP))
4002 		strhup(stp);
4003 }
4004 
4005 void
4006 strctty(stdata_t *stp)
4007 {
4008 	extern vnode_t *makectty();
4009 	proc_t *p = curproc;
4010 	sess_t *sp = p->p_sessp;
4011 
4012 	mutex_enter(&stp->sd_lock);
4013 	/*
4014 	 * No need to hold the session lock or do a TTYHOLD,
4015 	 * because this is the only thread that can be the
4016 	 * session leader and not have a controlling tty.
4017 	 */
4018 	if ((stp->sd_flag & (STRHUP|STRDERR|STWRERR|STPLEX)) == 0 &&
4019 	    stp->sd_sidp == NULL &&		/* not allocated as ctty */
4020 	    sp->s_sidp == p->p_pidp &&		/* session leader */
4021 	    sp->s_flag != SESS_CLOSE &&		/* session is not closing */
4022 	    sp->s_vp == NULL) {			/* without ctty */
4023 		mutex_exit(&stp->sd_lock);
4024 		ASSERT(stp->sd_pgidp == NULL);
4025 		alloctty(p, makectty(stp->sd_vnode));
4026 		stralloctty(sp, stp);
4027 		mutex_enter(&stp->sd_lock);
4028 		stp->sd_flag |= STRISTTY;	/* just to be sure */
4029 	}
4030 	mutex_exit(&stp->sd_lock);
4031 }
4032 
4033 /*
4034  * enable first back queue with svc procedure.
4035  * Use pri == -1 to avoid the setqback
4036  */
4037 void
4038 backenable(queue_t *q, int pri)
4039 {
4040 	queue_t	*nq;
4041 
4042 	/*
4043 	 * our presence might not prevent other modules in our own
4044 	 * stream from popping/pushing since the caller of getq might not
4045 	 * have a claim on the queue (some drivers do a getq on somebody
4046 	 * else's queue - they know that the queue itself is not going away
4047 	 * but the framework has to guarantee q_next in that stream.)
4048 	 */
4049 	claimstr(q);
4050 
4051 	/* find nearest back queue with service proc */
4052 	for (nq = backq(q); nq && !nq->q_qinfo->qi_srvp; nq = backq(nq)) {
4053 		ASSERT(STRMATED(q->q_stream) || STREAM(q) == STREAM(nq));
4054 	}
4055 
4056 	if (nq) {
4057 		kthread_t *freezer;
4058 		/*
4059 		 * backenable can be called either with no locks held
4060 		 * or with the stream frozen (the latter occurs when a module
4061 		 * calls rmvq with the stream frozen.) If the stream is frozen
4062 		 * by the caller the caller will hold all qlocks in the stream.
4063 		 */
4064 		freezer = STREAM(q)->sd_freezer;
4065 		if (freezer != curthread) {
4066 			mutex_enter(QLOCK(nq));
4067 		}
4068 #ifdef DEBUG
4069 		else {
4070 			ASSERT(frozenstr(q));
4071 			ASSERT(MUTEX_HELD(QLOCK(q)));
4072 			ASSERT(MUTEX_HELD(QLOCK(nq)));
4073 		}
4074 #endif
4075 		if (pri != -1)
4076 			setqback(nq, pri);
4077 		qenable_locked(nq);
4078 		if (freezer != curthread)
4079 			mutex_exit(QLOCK(nq));
4080 	}
4081 	releasestr(q);
4082 }
4083 
4084 /*
4085  * Return the appropriate errno when one of flags_to_check is set
4086  * in sd_flags. Uses the exported error routines if they are set.
4087  * Will return 0 if non error is set (or if the exported error routines
4088  * do not return an error).
4089  *
4090  * If there is both a read and write error to check we prefer the read error.
4091  * Also, give preference to recorded errno's over the error functions.
4092  * The flags that are handled are:
4093  *	STPLEX		return EINVAL
4094  *	STRDERR		return sd_rerror (and clear if STRDERRNONPERSIST)
4095  *	STWRERR		return sd_werror (and clear if STWRERRNONPERSIST)
4096  *	STRHUP		return sd_werror
4097  *
4098  * If the caller indicates that the operation is a peek a nonpersistent error
4099  * is not cleared.
4100  */
4101 int
4102 strgeterr(stdata_t *stp, int32_t flags_to_check, int ispeek)
4103 {
4104 	int32_t sd_flag = stp->sd_flag & flags_to_check;
4105 	int error = 0;
4106 
4107 	ASSERT(MUTEX_HELD(&stp->sd_lock));
4108 	ASSERT((flags_to_check & ~(STRDERR|STWRERR|STRHUP|STPLEX)) == 0);
4109 	if (sd_flag & STPLEX)
4110 		error = EINVAL;
4111 	else if (sd_flag & STRDERR) {
4112 		error = stp->sd_rerror;
4113 		if ((stp->sd_flag & STRDERRNONPERSIST) && !ispeek) {
4114 			/*
4115 			 * Read errors are non-persistent i.e. discarded once
4116 			 * returned to a non-peeking caller,
4117 			 */
4118 			stp->sd_rerror = 0;
4119 			stp->sd_flag &= ~STRDERR;
4120 		}
4121 		if (error == 0 && stp->sd_rderrfunc != NULL) {
4122 			int clearerr = 0;
4123 
4124 			error = (*stp->sd_rderrfunc)(stp->sd_vnode, ispeek,
4125 						&clearerr);
4126 			if (clearerr) {
4127 				stp->sd_flag &= ~STRDERR;
4128 				stp->sd_rderrfunc = NULL;
4129 			}
4130 		}
4131 	} else if (sd_flag & STWRERR) {
4132 		error = stp->sd_werror;
4133 		if ((stp->sd_flag & STWRERRNONPERSIST) && !ispeek) {
4134 			/*
4135 			 * Write errors are non-persistent i.e. discarded once
4136 			 * returned to a non-peeking caller,
4137 			 */
4138 			stp->sd_werror = 0;
4139 			stp->sd_flag &= ~STWRERR;
4140 		}
4141 		if (error == 0 && stp->sd_wrerrfunc != NULL) {
4142 			int clearerr = 0;
4143 
4144 			error = (*stp->sd_wrerrfunc)(stp->sd_vnode, ispeek,
4145 						&clearerr);
4146 			if (clearerr) {
4147 				stp->sd_flag &= ~STWRERR;
4148 				stp->sd_wrerrfunc = NULL;
4149 			}
4150 		}
4151 	} else if (sd_flag & STRHUP) {
4152 		/* sd_werror set when STRHUP */
4153 		error = stp->sd_werror;
4154 	}
4155 	return (error);
4156 }
4157 
4158 
4159 /*
4160  * single-thread open/close/push/pop
4161  * for twisted streams also
4162  */
4163 int
4164 strstartplumb(stdata_t *stp, int flag, int cmd)
4165 {
4166 	int waited = 1;
4167 	int error = 0;
4168 
4169 	if (STRMATED(stp)) {
4170 		struct stdata *stmatep = stp->sd_mate;
4171 
4172 		STRLOCKMATES(stp);
4173 		while (waited) {
4174 			waited = 0;
4175 			while (stmatep->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
4176 				if ((cmd == I_POP) &&
4177 				    (flag & (FNDELAY|FNONBLOCK))) {
4178 					STRUNLOCKMATES(stp);
4179 					return (EAGAIN);
4180 				}
4181 				waited = 1;
4182 				mutex_exit(&stp->sd_lock);
4183 				if (!cv_wait_sig(&stmatep->sd_monitor,
4184 				    &stmatep->sd_lock)) {
4185 					mutex_exit(&stmatep->sd_lock);
4186 					return (EINTR);
4187 				}
4188 				mutex_exit(&stmatep->sd_lock);
4189 				STRLOCKMATES(stp);
4190 			}
4191 			while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
4192 				if ((cmd == I_POP) &&
4193 					(flag & (FNDELAY|FNONBLOCK))) {
4194 					STRUNLOCKMATES(stp);
4195 					return (EAGAIN);
4196 				}
4197 				waited = 1;
4198 				mutex_exit(&stmatep->sd_lock);
4199 				if (!cv_wait_sig(&stp->sd_monitor,
4200 				    &stp->sd_lock)) {
4201 					mutex_exit(&stp->sd_lock);
4202 					return (EINTR);
4203 				}
4204 				mutex_exit(&stp->sd_lock);
4205 				STRLOCKMATES(stp);
4206 			}
4207 			if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) {
4208 				error = strgeterr(stp,
4209 					STRDERR|STWRERR|STRHUP|STPLEX, 0);
4210 				if (error != 0) {
4211 					STRUNLOCKMATES(stp);
4212 					return (error);
4213 				}
4214 			}
4215 		}
4216 		stp->sd_flag |= STRPLUMB;
4217 		STRUNLOCKMATES(stp);
4218 	} else {
4219 		mutex_enter(&stp->sd_lock);
4220 		while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
4221 			if (((cmd == I_POP) || (cmd == _I_REMOVE)) &&
4222 			    (flag & (FNDELAY|FNONBLOCK))) {
4223 				mutex_exit(&stp->sd_lock);
4224 				return (EAGAIN);
4225 			}
4226 			if (!cv_wait_sig(&stp->sd_monitor, &stp->sd_lock)) {
4227 				mutex_exit(&stp->sd_lock);
4228 				return (EINTR);
4229 			}
4230 			if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) {
4231 				error = strgeterr(stp,
4232 					STRDERR|STWRERR|STRHUP|STPLEX, 0);
4233 				if (error != 0) {
4234 					mutex_exit(&stp->sd_lock);
4235 					return (error);
4236 				}
4237 			}
4238 		}
4239 		stp->sd_flag |= STRPLUMB;
4240 		mutex_exit(&stp->sd_lock);
4241 	}
4242 	return (0);
4243 }
4244 
4245 /*
4246  * Complete the plumbing operation associated with stream `stp'.
4247  */
4248 void
4249 strendplumb(stdata_t *stp)
4250 {
4251 	ASSERT(MUTEX_HELD(&stp->sd_lock));
4252 	ASSERT(stp->sd_flag & STRPLUMB);
4253 	stp->sd_flag &= ~STRPLUMB;
4254 	cv_broadcast(&stp->sd_monitor);
4255 }
4256 
4257 /*
4258  * This describes how the STREAMS framework handles synchronization
4259  * during open/push and close/pop.
4260  * The key interfaces for open and close are qprocson and qprocsoff,
4261  * respectively. While the close case in general is harder both open
4262  * have close have significant similarities.
4263  *
4264  * During close the STREAMS framework has to both ensure that there
4265  * are no stale references to the queue pair (and syncq) that
4266  * are being closed and also provide the guarantees that are documented
4267  * in qprocsoff(9F).
4268  * If there are stale references to the queue that is closing it can
4269  * result in kernel memory corruption or kernel panics.
4270  *
4271  * Note that is it up to the module/driver to ensure that it itself
4272  * does not have any stale references to the closing queues once its close
4273  * routine returns. This includes:
4274  *  - Cancelling any timeout/bufcall/qtimeout/qbufcall callback routines
4275  *    associated with the queues. For timeout and bufcall callbacks the
4276  *    module/driver also has to ensure (or wait for) any callbacks that
4277  *    are in progress.
4278  *  - If the module/driver is using esballoc it has to ensure that any
4279  *    esballoc free functions do not refer to a queue that has closed.
4280  *    (Note that in general the close routine can not wait for the esballoc'ed
4281  *    messages to be freed since that can cause a deadlock.)
4282  *  - Cancelling any interrupts that refer to the closing queues and
4283  *    also ensuring that there are no interrupts in progress that will
4284  *    refer to the closing queues once the close routine returns.
4285  *  - For multiplexors removing any driver global state that refers to
4286  *    the closing queue and also ensuring that there are no threads in
4287  *    the multiplexor that has picked up a queue pointer but not yet
4288  *    finished using it.
4289  *
4290  * In addition, a driver/module can only reference the q_next pointer
4291  * in its open, close, put, or service procedures or in a
4292  * qtimeout/qbufcall callback procedure executing "on" the correct
4293  * stream. Thus it can not reference the q_next pointer in an interrupt
4294  * routine or a timeout, bufcall or esballoc callback routine. Likewise
4295  * it can not reference q_next of a different queue e.g. in a mux that
4296  * passes messages from one queues put/service procedure to another queue.
4297  * In all the cases when the driver/module can not access the q_next
4298  * field it must use the *next* versions e.g. canputnext instead of
4299  * canput(q->q_next) and putnextctl instead of putctl(q->q_next, ...).
4300  *
4301  *
4302  * Assuming that the driver/module conforms to the above constraints
4303  * the STREAMS framework has to avoid stale references to q_next for all
4304  * the framework internal cases which include (but are not limited to):
4305  *  - Threads in canput/canputnext/backenable and elsewhere that are
4306  *    walking q_next.
4307  *  - Messages on a syncq that have a reference to the queue through b_queue.
4308  *  - Messages on an outer perimeter (syncq) that have a reference to the
4309  *    queue through b_queue.
4310  *  - Threads that use q_nfsrv (e.g. canput) to find a queue.
4311  *    Note that only canput and bcanput use q_nfsrv without any locking.
4312  *
4313  * The STREAMS framework providing the qprocsoff(9F) guarantees means that
4314  * after qprocsoff returns, the framework has to ensure that no threads can
4315  * enter the put or service routines for the closing read or write-side queue.
4316  * In addition to preventing "direct" entry into the put procedures
4317  * the framework also has to prevent messages being drained from
4318  * the syncq or the outer perimeter.
4319  * XXX Note that currently qdetach does relies on D_MTOCEXCL as the only
4320  * mechanism to prevent qwriter(PERIM_OUTER) from running after
4321  * qprocsoff has returned.
4322  * Note that if a module/driver uses put(9F) on one of its own queues
4323  * it is up to the module/driver to ensure that the put() doesn't
4324  * get called when the queue is closing.
4325  *
4326  *
4327  * The framework aspects of the above "contract" is implemented by
4328  * qprocsoff, removeq, and strlock:
4329  *  - qprocsoff (disable_svc) sets QWCLOSE to prevent runservice from
4330  *    entering the service procedures.
4331  *  - strlock acquires the sd_lock and sd_reflock to prevent putnext,
4332  *    canputnext, backenable etc from dereferencing the q_next that will
4333  *    soon change.
4334  *  - strlock waits for sd_refcnt to be zero to wait for e.g. any canputnext
4335  *    or other q_next walker that uses claimstr/releasestr to finish.
4336  *  - optionally for every syncq in the stream strlock acquires all the
4337  *    sq_lock's and waits for all sq_counts to drop to a value that indicates
4338  *    that no thread executes in the put or service procedures and that no
4339  *    thread is draining into the module/driver. This ensures that no
4340  *    open, close, put, service, or qtimeout/qbufcall callback procedure is
4341  *    currently executing hence no such thread can end up with the old stale
4342  *    q_next value and no canput/backenable can have the old stale
4343  *    q_nfsrv/q_next.
4344  *  - qdetach (wait_svc) makes sure that any scheduled or running threads
4345  *    have either finished or observed the QWCLOSE flag and gone away.
4346  */
4347 
4348 
4349 /*
4350  * Get all the locks necessary to change q_next.
4351  *
4352  * Wait for sd_refcnt to reach 0 and, if sqlist is present, wait for  the
4353  * sq_count of each syncq in the list to drop to sq_rmqcount, indicating that
4354  * the only threads inside the sqncq are threads currently calling removeq().
4355  * Since threads calling removeq() are in the process of removing their queues
4356  * from the stream, we do not need to worry about them accessing a stale q_next
4357  * pointer and thus we do not need to wait for them to exit (in fact, waiting
4358  * for them can cause deadlock).
4359  *
4360  * This routine is subject to starvation since it does not set any flag to
4361  * prevent threads from entering a module in the stream(i.e. sq_count can
4362  * increase on some syncq while it is waiting on some other syncq.)
4363  *
4364  * Assumes that only one thread attempts to call strlock for a given
4365  * stream. If this is not the case the two threads would deadlock.
4366  * This assumption is guaranteed since strlock is only called by insertq
4367  * and removeq and streams plumbing changes are single-threaded for
4368  * a given stream using the STWOPEN, STRCLOSE, and STRPLUMB flags.
4369  *
4370  * For pipes, it is not difficult to atomically designate a pair of streams
4371  * to be mated. Once mated atomically by the framework the twisted pair remain
4372  * configured that way until dismantled atomically by the framework.
4373  * When plumbing takes place on a twisted stream it is necessary to ensure that
4374  * this operation is done exclusively on the twisted stream since two such
4375  * operations, each initiated on different ends of the pipe will deadlock
4376  * waiting for each other to complete.
4377  *
4378  * On entry, no locks should be held.
4379  * The locks acquired and held by strlock depends on a few factors.
4380  * - If sqlist is non-NULL all the syncq locks in the sqlist will be acquired
4381  *   and held on exit and all sq_count are at an acceptable level.
4382  * - In all cases, sd_lock and sd_reflock are acquired and held on exit with
4383  *   sd_refcnt being zero.
4384  */
4385 
4386 static void
4387 strlock(struct stdata *stp, sqlist_t *sqlist)
4388 {
4389 	syncql_t *sql, *sql2;
4390 retry:
4391 	/*
4392 	 * Wait for any claimstr to go away.
4393 	 */
4394 	if (STRMATED(stp)) {
4395 		struct stdata *stp1, *stp2;
4396 
4397 		STRLOCKMATES(stp);
4398 		/*
4399 		 * Note that the selection of locking order is not
4400 		 * important, just that they are always aquired in
4401 		 * the same order.  To assure this, we choose this
4402 		 * order based on the value of the pointer, and since
4403 		 * the pointer will not change for the life of this
4404 		 * pair, we will always grab the locks in the same
4405 		 * order (and hence, prevent deadlocks).
4406 		 */
4407 		if (&(stp->sd_lock) > &((stp->sd_mate)->sd_lock)) {
4408 			stp1 = stp;
4409 			stp2 = stp->sd_mate;
4410 		} else {
4411 			stp2 = stp;
4412 			stp1 = stp->sd_mate;
4413 		}
4414 		mutex_enter(&stp1->sd_reflock);
4415 		if (stp1->sd_refcnt > 0) {
4416 			STRUNLOCKMATES(stp);
4417 			cv_wait(&stp1->sd_monitor, &stp1->sd_reflock);
4418 			mutex_exit(&stp1->sd_reflock);
4419 			goto retry;
4420 		}
4421 		mutex_enter(&stp2->sd_reflock);
4422 		if (stp2->sd_refcnt > 0) {
4423 			STRUNLOCKMATES(stp);
4424 			mutex_exit(&stp1->sd_reflock);
4425 			cv_wait(&stp2->sd_monitor, &stp2->sd_reflock);
4426 			mutex_exit(&stp2->sd_reflock);
4427 			goto retry;
4428 		}
4429 		STREAM_PUTLOCKS_ENTER(stp1);
4430 		STREAM_PUTLOCKS_ENTER(stp2);
4431 	} else {
4432 		mutex_enter(&stp->sd_lock);
4433 		mutex_enter(&stp->sd_reflock);
4434 		while (stp->sd_refcnt > 0) {
4435 			mutex_exit(&stp->sd_lock);
4436 			cv_wait(&stp->sd_monitor, &stp->sd_reflock);
4437 			if (mutex_tryenter(&stp->sd_lock) == 0) {
4438 				mutex_exit(&stp->sd_reflock);
4439 				mutex_enter(&stp->sd_lock);
4440 				mutex_enter(&stp->sd_reflock);
4441 			}
4442 		}
4443 		STREAM_PUTLOCKS_ENTER(stp);
4444 	}
4445 
4446 	if (sqlist == NULL)
4447 		return;
4448 
4449 	for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) {
4450 		syncq_t *sq = sql->sql_sq;
4451 		uint16_t count;
4452 
4453 		mutex_enter(SQLOCK(sq));
4454 		count = sq->sq_count;
4455 		ASSERT(sq->sq_rmqcount <= count);
4456 		SQ_PUTLOCKS_ENTER(sq);
4457 		SUM_SQ_PUTCOUNTS(sq, count);
4458 		if (count == sq->sq_rmqcount)
4459 			continue;
4460 
4461 		/* Failed - drop all locks that we have acquired so far */
4462 		if (STRMATED(stp)) {
4463 			STREAM_PUTLOCKS_EXIT(stp);
4464 			STREAM_PUTLOCKS_EXIT(stp->sd_mate);
4465 			STRUNLOCKMATES(stp);
4466 			mutex_exit(&stp->sd_reflock);
4467 			mutex_exit(&stp->sd_mate->sd_reflock);
4468 		} else {
4469 			STREAM_PUTLOCKS_EXIT(stp);
4470 			mutex_exit(&stp->sd_lock);
4471 			mutex_exit(&stp->sd_reflock);
4472 		}
4473 		for (sql2 = sqlist->sqlist_head; sql2 != sql;
4474 		    sql2 = sql2->sql_next) {
4475 			SQ_PUTLOCKS_EXIT(sql2->sql_sq);
4476 			mutex_exit(SQLOCK(sql2->sql_sq));
4477 		}
4478 
4479 		/*
4480 		 * The wait loop below may starve when there are many threads
4481 		 * claiming the syncq. This is especially a problem with permod
4482 		 * syncqs (IP). To lessen the impact of the problem we increment
4483 		 * sq_needexcl and clear fastbits so that putnexts will slow
4484 		 * down and call sqenable instead of draining right away.
4485 		 */
4486 		sq->sq_needexcl++;
4487 		SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
4488 		while (count > sq->sq_rmqcount) {
4489 			sq->sq_flags |= SQ_WANTWAKEUP;
4490 			SQ_PUTLOCKS_EXIT(sq);
4491 			cv_wait(&sq->sq_wait, SQLOCK(sq));
4492 			count = sq->sq_count;
4493 			SQ_PUTLOCKS_ENTER(sq);
4494 			SUM_SQ_PUTCOUNTS(sq, count);
4495 		}
4496 		sq->sq_needexcl--;
4497 		if (sq->sq_needexcl == 0)
4498 			SQ_PUTCOUNT_SETFAST_LOCKED(sq);
4499 		SQ_PUTLOCKS_EXIT(sq);
4500 		ASSERT(count == sq->sq_rmqcount);
4501 		mutex_exit(SQLOCK(sq));
4502 		goto retry;
4503 	}
4504 }
4505 
4506 /*
4507  * Drop all the locks that strlock acquired.
4508  */
4509 static void
4510 strunlock(struct stdata *stp, sqlist_t *sqlist)
4511 {
4512 	syncql_t *sql;
4513 
4514 	if (STRMATED(stp)) {
4515 		STREAM_PUTLOCKS_EXIT(stp);
4516 		STREAM_PUTLOCKS_EXIT(stp->sd_mate);
4517 		STRUNLOCKMATES(stp);
4518 		mutex_exit(&stp->sd_reflock);
4519 		mutex_exit(&stp->sd_mate->sd_reflock);
4520 	} else {
4521 		STREAM_PUTLOCKS_EXIT(stp);
4522 		mutex_exit(&stp->sd_lock);
4523 		mutex_exit(&stp->sd_reflock);
4524 	}
4525 
4526 	if (sqlist == NULL)
4527 		return;
4528 
4529 	for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) {
4530 		SQ_PUTLOCKS_EXIT(sql->sql_sq);
4531 		mutex_exit(SQLOCK(sql->sql_sq));
4532 	}
4533 }
4534 
4535 
4536 /*
4537  * Given two read queues, insert a new single one after another.
4538  *
4539  * This routine acquires all the necessary locks in order to change
4540  * q_next and related pointer using strlock().
4541  * It depends on the stream head ensuring that there are no concurrent
4542  * insertq or removeq on the same stream. The stream head ensures this
4543  * using the flags STWOPEN, STRCLOSE, and STRPLUMB.
4544  *
4545  * Note that no syncq locks are held during the q_next change. This is
4546  * applied to all streams since, unlike removeq, there is no problem of stale
4547  * pointers when adding a module to the stream. Thus drivers/modules that do a
4548  * canput(rq->q_next) would never get a closed/freed queue pointer even if we
4549  * applied this optimization to all streams.
4550  */
4551 void
4552 insertq(struct stdata *stp, queue_t *new)
4553 {
4554 	queue_t	*after;
4555 	queue_t *wafter;
4556 	queue_t *wnew = _WR(new);
4557 	boolean_t have_fifo = B_FALSE;
4558 
4559 	if (new->q_flag & _QINSERTING) {
4560 		ASSERT(stp->sd_vnode->v_type != VFIFO);
4561 		after = new->q_next;
4562 		wafter = _WR(new->q_next);
4563 	} else {
4564 		after = _RD(stp->sd_wrq);
4565 		wafter = stp->sd_wrq;
4566 	}
4567 
4568 	TRACE_2(TR_FAC_STREAMS_FR, TR_INSERTQ,
4569 		"insertq:%p, %p", after, new);
4570 	ASSERT(after->q_flag & QREADR);
4571 	ASSERT(new->q_flag & QREADR);
4572 
4573 	strlock(stp, NULL);
4574 
4575 	/* Do we have a FIFO? */
4576 	if (wafter->q_next == after) {
4577 		have_fifo = B_TRUE;
4578 		wnew->q_next = new;
4579 	} else {
4580 		wnew->q_next = wafter->q_next;
4581 	}
4582 	new->q_next = after;
4583 
4584 	set_nfsrv_ptr(new, wnew, after, wafter);
4585 	/*
4586 	 * set_nfsrv_ptr() needs to know if this is an insertion or not,
4587 	 * so only reset this flag after calling it.
4588 	 */
4589 	new->q_flag &= ~_QINSERTING;
4590 
4591 	if (have_fifo) {
4592 		wafter->q_next = wnew;
4593 	} else {
4594 		if (wafter->q_next)
4595 			_OTHERQ(wafter->q_next)->q_next = new;
4596 		wafter->q_next = wnew;
4597 	}
4598 
4599 	set_qend(new);
4600 	/* The QEND flag might have to be updated for the upstream guy */
4601 	set_qend(after);
4602 
4603 	ASSERT(_SAMESTR(new) == O_SAMESTR(new));
4604 	ASSERT(_SAMESTR(wnew) == O_SAMESTR(wnew));
4605 	ASSERT(_SAMESTR(after) == O_SAMESTR(after));
4606 	ASSERT(_SAMESTR(wafter) == O_SAMESTR(wafter));
4607 	strsetuio(stp);
4608 
4609 	/*
4610 	 * If this was a module insertion, bump the push count.
4611 	 */
4612 	if (!(new->q_flag & QISDRV))
4613 		stp->sd_pushcnt++;
4614 
4615 	strunlock(stp, NULL);
4616 }
4617 
4618 /*
4619  * Given a read queue, unlink it from any neighbors.
4620  *
4621  * This routine acquires all the necessary locks in order to
4622  * change q_next and related pointers and also guard against
4623  * stale references (e.g. through q_next) to the queue that
4624  * is being removed. It also plays part of the role in ensuring
4625  * that the module's/driver's put procedure doesn't get called
4626  * after qprocsoff returns.
4627  *
4628  * Removeq depends on the stream head ensuring that there are
4629  * no concurrent insertq or removeq on the same stream. The
4630  * stream head ensures this using the flags STWOPEN, STRCLOSE and
4631  * STRPLUMB.
4632  *
4633  * The set of locks needed to remove the queue is different in
4634  * different cases:
4635  *
4636  * Acquire sd_lock, sd_reflock, and all the syncq locks in the stream after
4637  * waiting for the syncq reference count to drop to 0 indicating that no
4638  * non-close threads are present anywhere in the stream. This ensures that any
4639  * module/driver can reference q_next in its open, close, put, or service
4640  * procedures.
4641  *
4642  * The sq_rmqcount counter tracks the number of threads inside removeq().
4643  * strlock() ensures that there is either no threads executing inside perimeter
4644  * or there is only a thread calling qprocsoff().
4645  *
4646  * strlock() compares the value of sq_count with the number of threads inside
4647  * removeq() and waits until sq_count is equal to sq_rmqcount. We need to wakeup
4648  * any threads waiting in strlock() when the sq_rmqcount increases.
4649  */
4650 
4651 void
4652 removeq(queue_t *qp)
4653 {
4654 	queue_t *wqp = _WR(qp);
4655 	struct stdata *stp = STREAM(qp);
4656 	sqlist_t *sqlist = NULL;
4657 	boolean_t isdriver;
4658 	int moved;
4659 	syncq_t *sq = qp->q_syncq;
4660 	syncq_t *wsq = wqp->q_syncq;
4661 
4662 	ASSERT(stp);
4663 
4664 	TRACE_2(TR_FAC_STREAMS_FR, TR_REMOVEQ,
4665 		"removeq:%p %p", qp, wqp);
4666 	ASSERT(qp->q_flag&QREADR);
4667 
4668 	/*
4669 	 * For queues using Synchronous streams, we must wait for all threads in
4670 	 * rwnext() to drain out before proceeding.
4671 	 */
4672 	if (qp->q_flag & QSYNCSTR) {
4673 		/* First, we need wakeup any threads blocked in rwnext() */
4674 		mutex_enter(SQLOCK(sq));
4675 		if (sq->sq_flags & SQ_WANTWAKEUP) {
4676 			sq->sq_flags &= ~SQ_WANTWAKEUP;
4677 			cv_broadcast(&sq->sq_wait);
4678 		}
4679 		mutex_exit(SQLOCK(sq));
4680 
4681 		if (wsq != sq) {
4682 			mutex_enter(SQLOCK(wsq));
4683 			if (wsq->sq_flags & SQ_WANTWAKEUP) {
4684 				wsq->sq_flags &= ~SQ_WANTWAKEUP;
4685 				cv_broadcast(&wsq->sq_wait);
4686 			}
4687 			mutex_exit(SQLOCK(wsq));
4688 		}
4689 
4690 		mutex_enter(QLOCK(qp));
4691 		while (qp->q_rwcnt > 0) {
4692 			qp->q_flag |= QWANTRMQSYNC;
4693 			cv_wait(&qp->q_wait, QLOCK(qp));
4694 		}
4695 		mutex_exit(QLOCK(qp));
4696 
4697 		mutex_enter(QLOCK(wqp));
4698 		while (wqp->q_rwcnt > 0) {
4699 			wqp->q_flag |= QWANTRMQSYNC;
4700 			cv_wait(&wqp->q_wait, QLOCK(wqp));
4701 		}
4702 		mutex_exit(QLOCK(wqp));
4703 	}
4704 
4705 	mutex_enter(SQLOCK(sq));
4706 	sq->sq_rmqcount++;
4707 	if (sq->sq_flags & SQ_WANTWAKEUP) {
4708 		sq->sq_flags &= ~SQ_WANTWAKEUP;
4709 		cv_broadcast(&sq->sq_wait);
4710 	}
4711 	mutex_exit(SQLOCK(sq));
4712 
4713 	isdriver = (qp->q_flag & QISDRV);
4714 
4715 	sqlist = sqlist_build(qp, stp, STRMATED(stp));
4716 	strlock(stp, sqlist);
4717 
4718 	reset_nfsrv_ptr(qp, wqp);
4719 
4720 	ASSERT(wqp->q_next == NULL || backq(qp)->q_next == qp);
4721 	ASSERT(qp->q_next == NULL || backq(wqp)->q_next == wqp);
4722 	/* Do we have a FIFO? */
4723 	if (wqp->q_next == qp) {
4724 		stp->sd_wrq->q_next = _RD(stp->sd_wrq);
4725 	} else {
4726 		if (wqp->q_next)
4727 			backq(qp)->q_next = qp->q_next;
4728 		if (qp->q_next)
4729 			backq(wqp)->q_next = wqp->q_next;
4730 	}
4731 
4732 	/* The QEND flag might have to be updated for the upstream guy */
4733 	if (qp->q_next)
4734 		set_qend(qp->q_next);
4735 
4736 	ASSERT(_SAMESTR(stp->sd_wrq) == O_SAMESTR(stp->sd_wrq));
4737 	ASSERT(_SAMESTR(_RD(stp->sd_wrq)) == O_SAMESTR(_RD(stp->sd_wrq)));
4738 
4739 	/*
4740 	 * Move any messages destined for the put procedures to the next
4741 	 * syncq in line. Otherwise free them.
4742 	 */
4743 	moved = 0;
4744 	/*
4745 	 * Quick check to see whether there are any messages or events.
4746 	 */
4747 	if (qp->q_syncqmsgs != 0 || (qp->q_syncq->sq_flags & SQ_EVENTS))
4748 		moved += propagate_syncq(qp);
4749 	if (wqp->q_syncqmsgs != 0 ||
4750 	    (wqp->q_syncq->sq_flags & SQ_EVENTS))
4751 		moved += propagate_syncq(wqp);
4752 
4753 	strsetuio(stp);
4754 
4755 	/*
4756 	 * If this was a module removal, decrement the push count.
4757 	 */
4758 	if (!isdriver)
4759 		stp->sd_pushcnt--;
4760 
4761 	strunlock(stp, sqlist);
4762 	sqlist_free(sqlist);
4763 
4764 	/*
4765 	 * Make sure any messages that were propagated are drained.
4766 	 * Also clear any QFULL bit caused by messages that were propagated.
4767 	 */
4768 
4769 	if (qp->q_next != NULL) {
4770 		clr_qfull(qp);
4771 		/*
4772 		 * For the driver calling qprocsoff, propagate_syncq
4773 		 * frees all the messages instead of putting it in
4774 		 * the stream head
4775 		 */
4776 		if (!isdriver && (moved > 0))
4777 			emptysq(qp->q_next->q_syncq);
4778 	}
4779 	if (wqp->q_next != NULL) {
4780 		clr_qfull(wqp);
4781 		/*
4782 		 * We come here for any pop of a module except for the
4783 		 * case of driver being removed. We don't call emptysq
4784 		 * if we did not move any messages. This will avoid holding
4785 		 * PERMOD syncq locks in emptysq
4786 		 */
4787 		if (moved > 0)
4788 			emptysq(wqp->q_next->q_syncq);
4789 	}
4790 
4791 	mutex_enter(SQLOCK(sq));
4792 	sq->sq_rmqcount--;
4793 	mutex_exit(SQLOCK(sq));
4794 }
4795 
4796 /*
4797  * Prevent further entry by setting a flag (like SQ_FROZEN, SQ_BLOCKED or
4798  * SQ_WRITER) on a syncq.
4799  * If maxcnt is not -1 it assumes that caller has "maxcnt" claim(s) on the
4800  * sync queue and waits until sq_count reaches maxcnt.
4801  *
4802  * if maxcnt is -1 there's no need to grab sq_putlocks since the caller
4803  * does not care about putnext threads that are in the middle of calling put
4804  * entry points.
4805  *
4806  * This routine is used for both inner and outer syncqs.
4807  */
4808 static void
4809 blocksq(syncq_t *sq, ushort_t flag, int maxcnt)
4810 {
4811 	uint16_t count = 0;
4812 
4813 	mutex_enter(SQLOCK(sq));
4814 	/*
4815 	 * Wait for SQ_FROZEN/SQ_BLOCKED to be reset.
4816 	 * SQ_FROZEN will be set if there is a frozen stream that has a
4817 	 * queue which also refers to this "shared" syncq.
4818 	 * SQ_BLOCKED will be set if there is "off" queue which also
4819 	 * refers to this "shared" syncq.
4820 	 */
4821 	if (maxcnt != -1) {
4822 		count = sq->sq_count;
4823 		SQ_PUTLOCKS_ENTER(sq);
4824 		SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
4825 		SUM_SQ_PUTCOUNTS(sq, count);
4826 	}
4827 	sq->sq_needexcl++;
4828 	ASSERT(sq->sq_needexcl != 0);	/* wraparound */
4829 
4830 	while ((sq->sq_flags & flag) ||
4831 	    (maxcnt != -1 && count > (unsigned)maxcnt)) {
4832 		sq->sq_flags |= SQ_WANTWAKEUP;
4833 		if (maxcnt != -1) {
4834 			SQ_PUTLOCKS_EXIT(sq);
4835 		}
4836 		cv_wait(&sq->sq_wait, SQLOCK(sq));
4837 		if (maxcnt != -1) {
4838 			count = sq->sq_count;
4839 			SQ_PUTLOCKS_ENTER(sq);
4840 			SUM_SQ_PUTCOUNTS(sq, count);
4841 		}
4842 	}
4843 	sq->sq_needexcl--;
4844 	sq->sq_flags |= flag;
4845 	ASSERT(maxcnt == -1 || count == maxcnt);
4846 	if (maxcnt != -1) {
4847 		if (sq->sq_needexcl == 0) {
4848 			SQ_PUTCOUNT_SETFAST_LOCKED(sq);
4849 		}
4850 		SQ_PUTLOCKS_EXIT(sq);
4851 	} else if (sq->sq_needexcl == 0) {
4852 		SQ_PUTCOUNT_SETFAST(sq);
4853 	}
4854 
4855 	mutex_exit(SQLOCK(sq));
4856 }
4857 
4858 /*
4859  * Reset a flag that was set with blocksq.
4860  *
4861  * Can not use this routine to reset SQ_WRITER.
4862  *
4863  * If "isouter" is set then the syncq is assumed to be an outer perimeter
4864  * and drain_syncq is not called. Instead we rely on the qwriter_outer thread
4865  * to handle the queued qwriter operations.
4866  *
4867  * no need to grab sq_putlocks here. See comment in strsubr.h that explains when
4868  * sq_putlocks are used.
4869  */
4870 static void
4871 unblocksq(syncq_t *sq, uint16_t resetflag, int isouter)
4872 {
4873 	uint16_t flags;
4874 
4875 	mutex_enter(SQLOCK(sq));
4876 	ASSERT(resetflag != SQ_WRITER);
4877 	ASSERT(sq->sq_flags & resetflag);
4878 	flags = sq->sq_flags & ~resetflag;
4879 	sq->sq_flags = flags;
4880 	if (flags & (SQ_QUEUED | SQ_WANTWAKEUP)) {
4881 		if (flags & SQ_WANTWAKEUP) {
4882 			flags &= ~SQ_WANTWAKEUP;
4883 			cv_broadcast(&sq->sq_wait);
4884 		}
4885 		sq->sq_flags = flags;
4886 		if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) {
4887 			if (!isouter) {
4888 				/* drain_syncq drops SQLOCK */
4889 				drain_syncq(sq);
4890 				return;
4891 			}
4892 		}
4893 	}
4894 	mutex_exit(SQLOCK(sq));
4895 }
4896 
4897 /*
4898  * Reset a flag that was set with blocksq.
4899  * Does not drain the syncq. Use emptysq() for that.
4900  * Returns 1 if SQ_QUEUED is set. Otherwise 0.
4901  *
4902  * no need to grab sq_putlocks here. See comment in strsubr.h that explains when
4903  * sq_putlocks are used.
4904  */
4905 static int
4906 dropsq(syncq_t *sq, uint16_t resetflag)
4907 {
4908 	uint16_t flags;
4909 
4910 	mutex_enter(SQLOCK(sq));
4911 	ASSERT(sq->sq_flags & resetflag);
4912 	flags = sq->sq_flags & ~resetflag;
4913 	if (flags & SQ_WANTWAKEUP) {
4914 		flags &= ~SQ_WANTWAKEUP;
4915 		cv_broadcast(&sq->sq_wait);
4916 	}
4917 	sq->sq_flags = flags;
4918 	mutex_exit(SQLOCK(sq));
4919 	if (flags & SQ_QUEUED)
4920 		return (1);
4921 	return (0);
4922 }
4923 
4924 /*
4925  * Empty all the messages on a syncq.
4926  *
4927  * no need to grab sq_putlocks here. See comment in strsubr.h that explains when
4928  * sq_putlocks are used.
4929  */
4930 static void
4931 emptysq(syncq_t *sq)
4932 {
4933 	uint16_t flags;
4934 
4935 	mutex_enter(SQLOCK(sq));
4936 	flags = sq->sq_flags;
4937 	if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) {
4938 		/*
4939 		 * To prevent potential recursive invocation of drain_syncq we
4940 		 * do not call drain_syncq if count is non-zero.
4941 		 */
4942 		if (sq->sq_count == 0) {
4943 			/* drain_syncq() drops SQLOCK */
4944 			drain_syncq(sq);
4945 			return;
4946 		} else
4947 			sqenable(sq);
4948 	}
4949 	mutex_exit(SQLOCK(sq));
4950 }
4951 
4952 /*
4953  * Ordered insert while removing duplicates.
4954  */
4955 static void
4956 sqlist_insert(sqlist_t *sqlist, syncq_t *sqp)
4957 {
4958 	syncql_t *sqlp, **prev_sqlpp, *new_sqlp;
4959 
4960 	prev_sqlpp = &sqlist->sqlist_head;
4961 	while ((sqlp = *prev_sqlpp) != NULL) {
4962 		if (sqlp->sql_sq >= sqp) {
4963 			if (sqlp->sql_sq == sqp)	/* duplicate */
4964 				return;
4965 			break;
4966 		}
4967 		prev_sqlpp = &sqlp->sql_next;
4968 	}
4969 	new_sqlp = &sqlist->sqlist_array[sqlist->sqlist_index++];
4970 	ASSERT((char *)new_sqlp < (char *)sqlist + sqlist->sqlist_size);
4971 	new_sqlp->sql_next = sqlp;
4972 	new_sqlp->sql_sq = sqp;
4973 	*prev_sqlpp = new_sqlp;
4974 }
4975 
4976 /*
4977  * Walk the write side queues until we hit either the driver
4978  * or a twist in the stream (_SAMESTR will return false in both
4979  * these cases) then turn around and walk the read side queues
4980  * back up to the stream head.
4981  */
4982 static void
4983 sqlist_insertall(sqlist_t *sqlist, queue_t *q)
4984 {
4985 	while (q != NULL) {
4986 		sqlist_insert(sqlist, q->q_syncq);
4987 
4988 		if (_SAMESTR(q))
4989 			q = q->q_next;
4990 		else if (!(q->q_flag & QREADR))
4991 			q = _RD(q);
4992 		else
4993 			q = NULL;
4994 	}
4995 }
4996 
4997 /*
4998  * Allocate and build a list of all syncqs in a stream and the syncq(s)
4999  * associated with the "q" parameter. The resulting list is sorted in a
5000  * canonical order and is free of duplicates.
5001  * Assumes the passed queue is a _RD(q).
5002  */
5003 static sqlist_t *
5004 sqlist_build(queue_t *q, struct stdata *stp, boolean_t do_twist)
5005 {
5006 	sqlist_t *sqlist = sqlist_alloc(stp, KM_SLEEP);
5007 
5008 	/*
5009 	 * start with the current queue/qpair
5010 	 */
5011 	ASSERT(q->q_flag & QREADR);
5012 
5013 	sqlist_insert(sqlist, q->q_syncq);
5014 	sqlist_insert(sqlist, _WR(q)->q_syncq);
5015 
5016 	sqlist_insertall(sqlist, stp->sd_wrq);
5017 	if (do_twist)
5018 		sqlist_insertall(sqlist, stp->sd_mate->sd_wrq);
5019 
5020 	return (sqlist);
5021 }
5022 
5023 static sqlist_t *
5024 sqlist_alloc(struct stdata *stp, int kmflag)
5025 {
5026 	size_t sqlist_size;
5027 	sqlist_t *sqlist;
5028 
5029 	/*
5030 	 * Allocate 2 syncql_t's for each pushed module. Note that
5031 	 * the sqlist_t structure already has 4 syncql_t's built in:
5032 	 * 2 for the stream head, and 2 for the driver/other stream head.
5033 	 */
5034 	sqlist_size = 2 * sizeof (syncql_t) * stp->sd_pushcnt +
5035 		sizeof (sqlist_t);
5036 	if (STRMATED(stp))
5037 		sqlist_size += 2 * sizeof (syncql_t) * stp->sd_mate->sd_pushcnt;
5038 	sqlist = kmem_alloc(sqlist_size, kmflag);
5039 
5040 	sqlist->sqlist_head = NULL;
5041 	sqlist->sqlist_size = sqlist_size;
5042 	sqlist->sqlist_index = 0;
5043 
5044 	return (sqlist);
5045 }
5046 
5047 /*
5048  * Free the list created by sqlist_alloc()
5049  */
5050 static void
5051 sqlist_free(sqlist_t *sqlist)
5052 {
5053 	kmem_free(sqlist, sqlist->sqlist_size);
5054 }
5055 
5056 /*
5057  * Prevent any new entries into any syncq in this stream.
5058  * Used by freezestr.
5059  */
5060 void
5061 strblock(queue_t *q)
5062 {
5063 	struct stdata	*stp;
5064 	syncql_t	*sql;
5065 	sqlist_t	*sqlist;
5066 
5067 	q = _RD(q);
5068 
5069 	stp = STREAM(q);
5070 	ASSERT(stp != NULL);
5071 
5072 	/*
5073 	 * Get a sorted list with all the duplicates removed containing
5074 	 * all the syncqs referenced by this stream.
5075 	 */
5076 	sqlist = sqlist_build(q, stp, B_FALSE);
5077 	for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next)
5078 		blocksq(sql->sql_sq, SQ_FROZEN, -1);
5079 	sqlist_free(sqlist);
5080 }
5081 
5082 /*
5083  * Release the block on new entries into this stream
5084  */
5085 void
5086 strunblock(queue_t *q)
5087 {
5088 	struct stdata	*stp;
5089 	syncql_t	*sql;
5090 	sqlist_t	*sqlist;
5091 	int		drain_needed;
5092 
5093 	q = _RD(q);
5094 
5095 	/*
5096 	 * Get a sorted list with all the duplicates removed containing
5097 	 * all the syncqs referenced by this stream.
5098 	 * Have to drop the SQ_FROZEN flag on all the syncqs before
5099 	 * starting to drain them; otherwise the draining might
5100 	 * cause a freezestr in some module on the stream (which
5101 	 * would deadlock.)
5102 	 */
5103 	stp = STREAM(q);
5104 	ASSERT(stp != NULL);
5105 	sqlist = sqlist_build(q, stp, B_FALSE);
5106 	drain_needed = 0;
5107 	for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next)
5108 		drain_needed += dropsq(sql->sql_sq, SQ_FROZEN);
5109 	if (drain_needed) {
5110 		for (sql = sqlist->sqlist_head; sql != NULL;
5111 		    sql = sql->sql_next)
5112 			emptysq(sql->sql_sq);
5113 	}
5114 	sqlist_free(sqlist);
5115 }
5116 
5117 #ifdef DEBUG
5118 static int
5119 qprocsareon(queue_t *rq)
5120 {
5121 	if (rq->q_next == NULL)
5122 		return (0);
5123 	return (_WR(rq->q_next)->q_next == _WR(rq));
5124 }
5125 
5126 int
5127 qclaimed(queue_t *q)
5128 {
5129 	uint_t count;
5130 
5131 	count = q->q_syncq->sq_count;
5132 	SUM_SQ_PUTCOUNTS(q->q_syncq, count);
5133 	return (count != 0);
5134 }
5135 
5136 /*
5137  * Check if anyone has frozen this stream with freezestr
5138  */
5139 int
5140 frozenstr(queue_t *q)
5141 {
5142 	return ((q->q_syncq->sq_flags & SQ_FROZEN) != 0);
5143 }
5144 #endif /* DEBUG */
5145 
5146 /*
5147  * Enter a queue.
5148  * Obsoleted interface. Should not be used.
5149  */
5150 void
5151 enterq(queue_t *q)
5152 {
5153 	entersq(q->q_syncq, SQ_CALLBACK);
5154 }
5155 
5156 void
5157 leaveq(queue_t *q)
5158 {
5159 	leavesq(q->q_syncq, SQ_CALLBACK);
5160 }
5161 
5162 /*
5163  * Enter a perimeter. c_inner and c_outer specifies which concurrency bits
5164  * to check.
5165  * Wait if SQ_QUEUED is set to preserve ordering between messages and qwriter
5166  * calls and the running of open, close and service procedures.
5167  *
5168  * if c_inner bit is set no need to grab sq_putlocks since we don't care
5169  * if other threads have entered or are entering put entry point.
5170  *
5171  * if c_inner bit is set it might have been posible to use
5172  * sq_putlocks/sq_putcounts instead of SQLOCK/sq_count (e.g. to optimize
5173  * open/close path for IP) but since the count may need to be decremented in
5174  * qwait() we wouldn't know which counter to decrement. Currently counter is
5175  * selected by current cpu_seqid and current CPU can change at any moment. XXX
5176  * in the future we might use curthread id bits to select the counter and this
5177  * would stay constant across routine calls.
5178  */
5179 void
5180 entersq(syncq_t *sq, int entrypoint)
5181 {
5182 	uint16_t	count = 0;
5183 	uint16_t	flags;
5184 	uint16_t	waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL;
5185 	uint16_t	type;
5186 	uint_t		c_inner = entrypoint & SQ_CI;
5187 	uint_t		c_outer = entrypoint & SQ_CO;
5188 
5189 	/*
5190 	 * Increment ref count to keep closes out of this queue.
5191 	 */
5192 	ASSERT(sq);
5193 	ASSERT(c_inner && c_outer);
5194 	mutex_enter(SQLOCK(sq));
5195 	flags = sq->sq_flags;
5196 	type = sq->sq_type;
5197 	if (!(type & c_inner)) {
5198 		/* Make sure all putcounts now use slowlock. */
5199 		count = sq->sq_count;
5200 		SQ_PUTLOCKS_ENTER(sq);
5201 		SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
5202 		SUM_SQ_PUTCOUNTS(sq, count);
5203 		sq->sq_needexcl++;
5204 		ASSERT(sq->sq_needexcl != 0);	/* wraparound */
5205 		waitflags |= SQ_MESSAGES;
5206 	}
5207 	/*
5208 	 * Wait until we can enter the inner perimeter.
5209 	 * If we want exclusive access we wait until sq_count is 0.
5210 	 * We have to do this before entering the outer perimeter in order
5211 	 * to preserve put/close message ordering.
5212 	 */
5213 	while ((flags & waitflags) || (!(type & c_inner) && count != 0)) {
5214 		sq->sq_flags = flags | SQ_WANTWAKEUP;
5215 		if (!(type & c_inner)) {
5216 			SQ_PUTLOCKS_EXIT(sq);
5217 		}
5218 		cv_wait(&sq->sq_wait, SQLOCK(sq));
5219 		if (!(type & c_inner)) {
5220 			count = sq->sq_count;
5221 			SQ_PUTLOCKS_ENTER(sq);
5222 			SUM_SQ_PUTCOUNTS(sq, count);
5223 		}
5224 		flags = sq->sq_flags;
5225 	}
5226 
5227 	if (!(type & c_inner)) {
5228 		ASSERT(sq->sq_needexcl > 0);
5229 		sq->sq_needexcl--;
5230 		if (sq->sq_needexcl == 0) {
5231 			SQ_PUTCOUNT_SETFAST_LOCKED(sq);
5232 		}
5233 	}
5234 
5235 	/* Check if we need to enter the outer perimeter */
5236 	if (!(type & c_outer)) {
5237 		/*
5238 		 * We have to enter the outer perimeter exclusively before
5239 		 * we can increment sq_count to avoid deadlock. This implies
5240 		 * that we have to re-check sq_flags and sq_count.
5241 		 *
5242 		 * is it possible to have c_inner set when c_outer is not set?
5243 		 */
5244 		if (!(type & c_inner)) {
5245 			SQ_PUTLOCKS_EXIT(sq);
5246 		}
5247 		mutex_exit(SQLOCK(sq));
5248 		outer_enter(sq->sq_outer, SQ_GOAWAY);
5249 		mutex_enter(SQLOCK(sq));
5250 		flags = sq->sq_flags;
5251 		/*
5252 		 * there should be no need to recheck sq_putcounts
5253 		 * because outer_enter() has already waited for them to clear
5254 		 * after setting SQ_WRITER.
5255 		 */
5256 		count = sq->sq_count;
5257 #ifdef DEBUG
5258 		/*
5259 		 * SUMCHECK_SQ_PUTCOUNTS should return the sum instead
5260 		 * of doing an ASSERT internally. Others should do
5261 		 * something like
5262 		 *	 ASSERT(SUMCHECK_SQ_PUTCOUNTS(sq) == 0);
5263 		 * without the need to #ifdef DEBUG it.
5264 		 */
5265 		SUMCHECK_SQ_PUTCOUNTS(sq, 0);
5266 #endif
5267 		while ((flags & (SQ_EXCL|SQ_BLOCKED|SQ_FROZEN)) ||
5268 		    (!(type & c_inner) && count != 0)) {
5269 			sq->sq_flags = flags | SQ_WANTWAKEUP;
5270 			cv_wait(&sq->sq_wait, SQLOCK(sq));
5271 			count = sq->sq_count;
5272 			flags = sq->sq_flags;
5273 		}
5274 	}
5275 
5276 	sq->sq_count++;
5277 	ASSERT(sq->sq_count != 0);	/* Wraparound */
5278 	if (!(type & c_inner)) {
5279 		/* Exclusive entry */
5280 		ASSERT(sq->sq_count == 1);
5281 		sq->sq_flags |= SQ_EXCL;
5282 		if (type & c_outer) {
5283 			SQ_PUTLOCKS_EXIT(sq);
5284 		}
5285 	}
5286 	mutex_exit(SQLOCK(sq));
5287 }
5288 
5289 /*
5290  * leave a syncq. announce to framework that closes may proceed.
5291  * c_inner and c_outer specifies which concurrency bits
5292  * to check.
5293  *
5294  * must never be called from driver or module put entry point.
5295  *
5296  * no need to grab sq_putlocks here. See comment in strsubr.h that explains when
5297  * sq_putlocks are used.
5298  */
5299 void
5300 leavesq(syncq_t *sq, int entrypoint)
5301 {
5302 	uint16_t	flags;
5303 	uint16_t	type;
5304 	uint_t		c_outer = entrypoint & SQ_CO;
5305 #ifdef DEBUG
5306 	uint_t		c_inner = entrypoint & SQ_CI;
5307 #endif
5308 
5309 	/*
5310 	 * decrement ref count, drain the syncq if possible, and wake up
5311 	 * any waiting close.
5312 	 */
5313 	ASSERT(sq);
5314 	ASSERT(c_inner && c_outer);
5315 	mutex_enter(SQLOCK(sq));
5316 	flags = sq->sq_flags;
5317 	type = sq->sq_type;
5318 	if (flags & (SQ_QUEUED|SQ_WANTWAKEUP|SQ_WANTEXWAKEUP)) {
5319 
5320 		if (flags & SQ_WANTWAKEUP) {
5321 			flags &= ~SQ_WANTWAKEUP;
5322 			cv_broadcast(&sq->sq_wait);
5323 		}
5324 		if (flags & SQ_WANTEXWAKEUP) {
5325 			flags &= ~SQ_WANTEXWAKEUP;
5326 			cv_broadcast(&sq->sq_exitwait);
5327 		}
5328 
5329 		if ((flags & SQ_QUEUED) && !(flags & SQ_STAYAWAY)) {
5330 			/*
5331 			 * The syncq needs to be drained. "Exit" the syncq
5332 			 * before calling drain_syncq.
5333 			 */
5334 			ASSERT(sq->sq_count != 0);
5335 			sq->sq_count--;
5336 			ASSERT((flags & SQ_EXCL) || (type & c_inner));
5337 			sq->sq_flags = flags & ~SQ_EXCL;
5338 			drain_syncq(sq);
5339 			ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
5340 			/* Check if we need to exit the outer perimeter */
5341 			/* XXX will this ever be true? */
5342 			if (!(type & c_outer))
5343 				outer_exit(sq->sq_outer);
5344 			return;
5345 		}
5346 	}
5347 	ASSERT(sq->sq_count != 0);
5348 	sq->sq_count--;
5349 	ASSERT((flags & SQ_EXCL) || (type & c_inner));
5350 	sq->sq_flags = flags & ~SQ_EXCL;
5351 	mutex_exit(SQLOCK(sq));
5352 
5353 	/* Check if we need to exit the outer perimeter */
5354 	if (!(sq->sq_type & c_outer))
5355 		outer_exit(sq->sq_outer);
5356 }
5357 
5358 /*
5359  * Prevent q_next from changing in this stream by incrementing sq_count.
5360  *
5361  * no need to grab sq_putlocks here. See comment in strsubr.h that explains when
5362  * sq_putlocks are used.
5363  */
5364 void
5365 claimq(queue_t *qp)
5366 {
5367 	syncq_t	*sq = qp->q_syncq;
5368 
5369 	mutex_enter(SQLOCK(sq));
5370 	sq->sq_count++;
5371 	ASSERT(sq->sq_count != 0);	/* Wraparound */
5372 	mutex_exit(SQLOCK(sq));
5373 }
5374 
5375 /*
5376  * Undo claimq.
5377  *
5378  * no need to grab sq_putlocks here. See comment in strsubr.h that explains when
5379  * sq_putlocks are used.
5380  */
5381 void
5382 releaseq(queue_t *qp)
5383 {
5384 	syncq_t	*sq = qp->q_syncq;
5385 	uint16_t flags;
5386 
5387 	mutex_enter(SQLOCK(sq));
5388 	ASSERT(sq->sq_count > 0);
5389 	sq->sq_count--;
5390 
5391 	flags = sq->sq_flags;
5392 	if (flags & (SQ_WANTWAKEUP|SQ_QUEUED)) {
5393 		if (flags & SQ_WANTWAKEUP) {
5394 			flags &= ~SQ_WANTWAKEUP;
5395 			cv_broadcast(&sq->sq_wait);
5396 		}
5397 		sq->sq_flags = flags;
5398 		if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) {
5399 			/*
5400 			 * To prevent potential recursive invocation of
5401 			 * drain_syncq we do not call drain_syncq if count is
5402 			 * non-zero.
5403 			 */
5404 			if (sq->sq_count == 0) {
5405 				drain_syncq(sq);
5406 				return;
5407 			} else
5408 				sqenable(sq);
5409 		}
5410 	}
5411 	mutex_exit(SQLOCK(sq));
5412 }
5413 
5414 /*
5415  * Prevent q_next from changing in this stream by incrementing sd_refcnt.
5416  */
5417 void
5418 claimstr(queue_t *qp)
5419 {
5420 	struct stdata *stp = STREAM(qp);
5421 
5422 	mutex_enter(&stp->sd_reflock);
5423 	stp->sd_refcnt++;
5424 	ASSERT(stp->sd_refcnt != 0);	/* Wraparound */
5425 	mutex_exit(&stp->sd_reflock);
5426 }
5427 
5428 /*
5429  * Undo claimstr.
5430  */
5431 void
5432 releasestr(queue_t *qp)
5433 {
5434 	struct stdata *stp = STREAM(qp);
5435 
5436 	mutex_enter(&stp->sd_reflock);
5437 	ASSERT(stp->sd_refcnt != 0);
5438 	stp->sd_refcnt--;
5439 	cv_broadcast(&stp->sd_monitor);
5440 	mutex_exit(&stp->sd_reflock);
5441 }
5442 
5443 static syncq_t *
5444 new_syncq(void)
5445 {
5446 	return (kmem_cache_alloc(syncq_cache, KM_SLEEP));
5447 }
5448 
5449 static void
5450 free_syncq(syncq_t *sq)
5451 {
5452 	ASSERT(sq->sq_head == NULL);
5453 	ASSERT(sq->sq_outer == NULL);
5454 	ASSERT(sq->sq_callbpend == NULL);
5455 	ASSERT((sq->sq_onext == NULL && sq->sq_oprev == NULL) ||
5456 	    (sq->sq_onext == sq && sq->sq_oprev == sq));
5457 
5458 	if (sq->sq_ciputctrl != NULL) {
5459 		ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1);
5460 		SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl,
5461 		    sq->sq_nciputctrl, 0);
5462 		ASSERT(ciputctrl_cache != NULL);
5463 		kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl);
5464 	}
5465 
5466 	sq->sq_tail = NULL;
5467 	sq->sq_evhead = NULL;
5468 	sq->sq_evtail = NULL;
5469 	sq->sq_ciputctrl = NULL;
5470 	sq->sq_nciputctrl = 0;
5471 	sq->sq_count = 0;
5472 	sq->sq_rmqcount = 0;
5473 	sq->sq_callbflags = 0;
5474 	sq->sq_cancelid = 0;
5475 	sq->sq_next = NULL;
5476 	sq->sq_needexcl = 0;
5477 	sq->sq_svcflags = 0;
5478 	sq->sq_nqueues = 0;
5479 	sq->sq_pri = 0;
5480 	sq->sq_onext = NULL;
5481 	sq->sq_oprev = NULL;
5482 	sq->sq_flags = 0;
5483 	sq->sq_type = 0;
5484 	sq->sq_servcount = 0;
5485 
5486 	kmem_cache_free(syncq_cache, sq);
5487 }
5488 
5489 /* Outer perimeter code */
5490 
5491 /*
5492  * The outer syncq uses the fields and flags in the syncq slightly
5493  * differently from the inner syncqs.
5494  *	sq_count	Incremented when there are pending or running
5495  *			writers at the outer perimeter to prevent the set of
5496  *			inner syncqs that belong to the outer perimeter from
5497  *			changing.
5498  *	sq_head/tail	List of deferred qwriter(OUTER) operations.
5499  *
5500  *	SQ_BLOCKED	Set to prevent traversing of sq_next,sq_prev while
5501  *			inner syncqs are added to or removed from the
5502  *			outer perimeter.
5503  *	SQ_QUEUED	sq_head/tail has messages or eventsqueued.
5504  *
5505  *	SQ_WRITER	A thread is currently traversing all the inner syncqs
5506  *			setting the SQ_WRITER flag.
5507  */
5508 
5509 /*
5510  * Get write access at the outer perimeter.
5511  * Note that read access is done by entersq, putnext, and put by simply
5512  * incrementing sq_count in the inner syncq.
5513  *
5514  * Waits until "flags" is no longer set in the outer to prevent multiple
5515  * threads from having write access at the same time. SQ_WRITER has to be part
5516  * of "flags".
5517  *
5518  * Increases sq_count on the outer syncq to keep away outer_insert/remove
5519  * until the outer_exit is finished.
5520  *
5521  * outer_enter is vulnerable to starvation since it does not prevent new
5522  * threads from entering the inner syncqs while it is waiting for sq_count to
5523  * go to zero.
5524  */
5525 void
5526 outer_enter(syncq_t *outer, uint16_t flags)
5527 {
5528 	syncq_t	*sq;
5529 	int	wait_needed;
5530 	uint16_t	count;
5531 
5532 	ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5533 	    outer->sq_oprev != NULL);
5534 	ASSERT(flags & SQ_WRITER);
5535 
5536 retry:
5537 	mutex_enter(SQLOCK(outer));
5538 	while (outer->sq_flags & flags) {
5539 		outer->sq_flags |= SQ_WANTWAKEUP;
5540 		cv_wait(&outer->sq_wait, SQLOCK(outer));
5541 	}
5542 
5543 	ASSERT(!(outer->sq_flags & SQ_WRITER));
5544 	outer->sq_flags |= SQ_WRITER;
5545 	outer->sq_count++;
5546 	ASSERT(outer->sq_count != 0);	/* wraparound */
5547 	wait_needed = 0;
5548 	/*
5549 	 * Set SQ_WRITER on all the inner syncqs while holding
5550 	 * the SQLOCK on the outer syncq. This ensures that the changing
5551 	 * of SQ_WRITER is atomic under the outer SQLOCK.
5552 	 */
5553 	for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) {
5554 		mutex_enter(SQLOCK(sq));
5555 		count = sq->sq_count;
5556 		SQ_PUTLOCKS_ENTER(sq);
5557 		sq->sq_flags |= SQ_WRITER;
5558 		SUM_SQ_PUTCOUNTS(sq, count);
5559 		if (count != 0)
5560 			wait_needed = 1;
5561 		SQ_PUTLOCKS_EXIT(sq);
5562 		mutex_exit(SQLOCK(sq));
5563 	}
5564 	mutex_exit(SQLOCK(outer));
5565 
5566 	/*
5567 	 * Get everybody out of the syncqs sequentially.
5568 	 * Note that we don't actually need to aqiure the PUTLOCKS, since
5569 	 * we have already cleared the fastbit, and set QWRITER.  By
5570 	 * definition, the count can not increase since putnext will
5571 	 * take the slowlock path (and the purpose of aquiring the
5572 	 * putlocks was to make sure it didn't increase while we were
5573 	 * waiting).
5574 	 *
5575 	 * Note that we still aquire the PUTLOCKS to be safe.
5576 	 */
5577 	if (wait_needed) {
5578 		for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) {
5579 			mutex_enter(SQLOCK(sq));
5580 			count = sq->sq_count;
5581 			SQ_PUTLOCKS_ENTER(sq);
5582 			SUM_SQ_PUTCOUNTS(sq, count);
5583 			while (count != 0) {
5584 				sq->sq_flags |= SQ_WANTWAKEUP;
5585 				SQ_PUTLOCKS_EXIT(sq);
5586 				cv_wait(&sq->sq_wait, SQLOCK(sq));
5587 				count = sq->sq_count;
5588 				SQ_PUTLOCKS_ENTER(sq);
5589 				SUM_SQ_PUTCOUNTS(sq, count);
5590 			}
5591 			SQ_PUTLOCKS_EXIT(sq);
5592 			mutex_exit(SQLOCK(sq));
5593 		}
5594 		/*
5595 		 * Verify that none of the flags got set while we
5596 		 * were waiting for the sq_counts to drop.
5597 		 * If this happens we exit and retry entering the
5598 		 * outer perimeter.
5599 		 */
5600 		mutex_enter(SQLOCK(outer));
5601 		if (outer->sq_flags & (flags & ~SQ_WRITER)) {
5602 			mutex_exit(SQLOCK(outer));
5603 			outer_exit(outer);
5604 			goto retry;
5605 		}
5606 		mutex_exit(SQLOCK(outer));
5607 	}
5608 }
5609 
5610 /*
5611  * Drop the write access at the outer perimeter.
5612  * Read access is dropped implicitly (by putnext, put, and leavesq) by
5613  * decrementing sq_count.
5614  */
5615 void
5616 outer_exit(syncq_t *outer)
5617 {
5618 	syncq_t	*sq;
5619 	int	 drain_needed;
5620 	uint16_t flags;
5621 
5622 	ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5623 	    outer->sq_oprev != NULL);
5624 	ASSERT(MUTEX_NOT_HELD(SQLOCK(outer)));
5625 
5626 	/*
5627 	 * Atomically (from the perspective of threads calling become_writer)
5628 	 * drop the write access at the outer perimeter by holding
5629 	 * SQLOCK(outer) across all the dropsq calls and the resetting of
5630 	 * SQ_WRITER.
5631 	 * This defines a locking order between the outer perimeter
5632 	 * SQLOCK and the inner perimeter SQLOCKs.
5633 	 */
5634 	mutex_enter(SQLOCK(outer));
5635 	flags = outer->sq_flags;
5636 	ASSERT(outer->sq_flags & SQ_WRITER);
5637 	if (flags & SQ_QUEUED) {
5638 		write_now(outer);
5639 		flags = outer->sq_flags;
5640 	}
5641 
5642 	/*
5643 	 * sq_onext is stable since sq_count has not yet been decreased.
5644 	 * Reset the SQ_WRITER flags in all syncqs.
5645 	 * After dropping SQ_WRITER on the outer syncq we empty all the
5646 	 * inner syncqs.
5647 	 */
5648 	drain_needed = 0;
5649 	for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext)
5650 		drain_needed += dropsq(sq, SQ_WRITER);
5651 	ASSERT(!(outer->sq_flags & SQ_QUEUED));
5652 	flags &= ~SQ_WRITER;
5653 	if (drain_needed) {
5654 		outer->sq_flags = flags;
5655 		mutex_exit(SQLOCK(outer));
5656 		for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext)
5657 			emptysq(sq);
5658 		mutex_enter(SQLOCK(outer));
5659 		flags = outer->sq_flags;
5660 	}
5661 	if (flags & SQ_WANTWAKEUP) {
5662 		flags &= ~SQ_WANTWAKEUP;
5663 		cv_broadcast(&outer->sq_wait);
5664 	}
5665 	outer->sq_flags = flags;
5666 	ASSERT(outer->sq_count > 0);
5667 	outer->sq_count--;
5668 	mutex_exit(SQLOCK(outer));
5669 }
5670 
5671 /*
5672  * Add another syncq to an outer perimeter.
5673  * Block out all other access to the outer perimeter while it is being
5674  * changed using blocksq.
5675  * Assumes that the caller has *not* done an outer_enter.
5676  *
5677  * Vulnerable to starvation in blocksq.
5678  */
5679 static void
5680 outer_insert(syncq_t *outer, syncq_t *sq)
5681 {
5682 	ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5683 	    outer->sq_oprev != NULL);
5684 	ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL &&
5685 	    sq->sq_oprev == NULL);	/* Can't be in an outer perimeter */
5686 
5687 	/* Get exclusive access to the outer perimeter list */
5688 	blocksq(outer, SQ_BLOCKED, 0);
5689 	ASSERT(outer->sq_flags & SQ_BLOCKED);
5690 	ASSERT(!(outer->sq_flags & SQ_WRITER));
5691 
5692 	mutex_enter(SQLOCK(sq));
5693 	sq->sq_outer = outer;
5694 	outer->sq_onext->sq_oprev = sq;
5695 	sq->sq_onext = outer->sq_onext;
5696 	outer->sq_onext = sq;
5697 	sq->sq_oprev = outer;
5698 	mutex_exit(SQLOCK(sq));
5699 	unblocksq(outer, SQ_BLOCKED, 1);
5700 }
5701 
5702 /*
5703  * Remove a syncq from an outer perimeter.
5704  * Block out all other access to the outer perimeter while it is being
5705  * changed using blocksq.
5706  * Assumes that the caller has *not* done an outer_enter.
5707  *
5708  * Vulnerable to starvation in blocksq.
5709  */
5710 static void
5711 outer_remove(syncq_t *outer, syncq_t *sq)
5712 {
5713 	ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5714 	    outer->sq_oprev != NULL);
5715 	ASSERT(sq->sq_outer == outer);
5716 
5717 	/* Get exclusive access to the outer perimeter list */
5718 	blocksq(outer, SQ_BLOCKED, 0);
5719 	ASSERT(outer->sq_flags & SQ_BLOCKED);
5720 	ASSERT(!(outer->sq_flags & SQ_WRITER));
5721 
5722 	mutex_enter(SQLOCK(sq));
5723 	sq->sq_outer = NULL;
5724 	sq->sq_onext->sq_oprev = sq->sq_oprev;
5725 	sq->sq_oprev->sq_onext = sq->sq_onext;
5726 	sq->sq_oprev = sq->sq_onext = NULL;
5727 	mutex_exit(SQLOCK(sq));
5728 	unblocksq(outer, SQ_BLOCKED, 1);
5729 }
5730 
5731 /*
5732  * Queue a deferred qwriter(OUTER) callback for this outer perimeter.
5733  * If this is the first callback for this outer perimeter then add
5734  * this outer perimeter to the list of outer perimeters that
5735  * the qwriter_outer_thread will process.
5736  *
5737  * Increments sq_count in the outer syncq to prevent the membership
5738  * of the outer perimeter (in terms of inner syncqs) to change while
5739  * the callback is pending.
5740  */
5741 static void
5742 queue_writer(syncq_t *outer, void (*func)(), queue_t *q, mblk_t *mp)
5743 {
5744 	ASSERT(MUTEX_HELD(SQLOCK(outer)));
5745 
5746 	mp->b_prev = (mblk_t *)func;
5747 	mp->b_queue = q;
5748 	mp->b_next = NULL;
5749 	outer->sq_count++;	/* Decremented when dequeued */
5750 	ASSERT(outer->sq_count != 0);	/* Wraparound */
5751 	if (outer->sq_evhead == NULL) {
5752 		/* First message. */
5753 		outer->sq_evhead = outer->sq_evtail = mp;
5754 		outer->sq_flags |= SQ_EVENTS;
5755 		mutex_exit(SQLOCK(outer));
5756 		STRSTAT(qwr_outer);
5757 		(void) taskq_dispatch(streams_taskq,
5758 		    (task_func_t *)qwriter_outer_service, outer, TQ_SLEEP);
5759 	} else {
5760 		ASSERT(outer->sq_flags & SQ_EVENTS);
5761 		outer->sq_evtail->b_next = mp;
5762 		outer->sq_evtail = mp;
5763 		mutex_exit(SQLOCK(outer));
5764 	}
5765 }
5766 
5767 /*
5768  * Try and upgrade to write access at the outer perimeter. If this can
5769  * not be done without blocking then queue the callback to be done
5770  * by the qwriter_outer_thread.
5771  *
5772  * This routine can only be called from put or service procedures plus
5773  * asynchronous callback routines that have properly entered to
5774  * queue (with entersq.) Thus qwriter(OUTER) assumes the caller has one claim
5775  * on the syncq associated with q.
5776  */
5777 void
5778 qwriter_outer(queue_t *q, mblk_t *mp, void (*func)())
5779 {
5780 	syncq_t	*osq, *sq, *outer;
5781 	int	failed;
5782 	uint16_t flags;
5783 
5784 	osq = q->q_syncq;
5785 	outer = osq->sq_outer;
5786 	if (outer == NULL)
5787 		panic("qwriter(PERIM_OUTER): no outer perimeter");
5788 	ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5789 	    outer->sq_oprev != NULL);
5790 
5791 	mutex_enter(SQLOCK(outer));
5792 	flags = outer->sq_flags;
5793 	/*
5794 	 * If some thread is traversing sq_next, or if we are blocked by
5795 	 * outer_insert or outer_remove, or if the we already have queued
5796 	 * callbacks, then queue this callback for later processing.
5797 	 *
5798 	 * Also queue the qwriter for an interrupt thread in order
5799 	 * to reduce the time spent running at high IPL.
5800 	 * to identify there are events.
5801 	 */
5802 	if ((flags & SQ_GOAWAY) || (curthread->t_pri >= kpreemptpri)) {
5803 		/*
5804 		 * Queue the become_writer request.
5805 		 * The queueing is atomic under SQLOCK(outer) in order
5806 		 * to synchronize with outer_exit.
5807 		 * queue_writer will drop the outer SQLOCK
5808 		 */
5809 		if (flags & SQ_BLOCKED) {
5810 			/* Must set SQ_WRITER on inner perimeter */
5811 			mutex_enter(SQLOCK(osq));
5812 			osq->sq_flags |= SQ_WRITER;
5813 			mutex_exit(SQLOCK(osq));
5814 		} else {
5815 			if (!(flags & SQ_WRITER)) {
5816 				/*
5817 				 * The outer could have been SQ_BLOCKED thus
5818 				 * SQ_WRITER might not be set on the inner.
5819 				 */
5820 				mutex_enter(SQLOCK(osq));
5821 				osq->sq_flags |= SQ_WRITER;
5822 				mutex_exit(SQLOCK(osq));
5823 			}
5824 			ASSERT(osq->sq_flags & SQ_WRITER);
5825 		}
5826 		queue_writer(outer, func, q, mp);
5827 		return;
5828 	}
5829 	/*
5830 	 * We are half-way to exclusive access to the outer perimeter.
5831 	 * Prevent any outer_enter, qwriter(OUTER), or outer_insert/remove
5832 	 * while the inner syncqs are traversed.
5833 	 */
5834 	outer->sq_count++;
5835 	ASSERT(outer->sq_count != 0);	/* wraparound */
5836 	flags |= SQ_WRITER;
5837 	/*
5838 	 * Check if we can run the function immediately. Mark all
5839 	 * syncqs with the writer flag to prevent new entries into
5840 	 * put and service procedures.
5841 	 *
5842 	 * Set SQ_WRITER on all the inner syncqs while holding
5843 	 * the SQLOCK on the outer syncq. This ensures that the changing
5844 	 * of SQ_WRITER is atomic under the outer SQLOCK.
5845 	 */
5846 	failed = 0;
5847 	for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) {
5848 		uint16_t count;
5849 		uint_t	maxcnt = (sq == osq) ? 1 : 0;
5850 
5851 		mutex_enter(SQLOCK(sq));
5852 		count = sq->sq_count;
5853 		SQ_PUTLOCKS_ENTER(sq);
5854 		SUM_SQ_PUTCOUNTS(sq, count);
5855 		if (sq->sq_count > maxcnt)
5856 			failed = 1;
5857 		sq->sq_flags |= SQ_WRITER;
5858 		SQ_PUTLOCKS_EXIT(sq);
5859 		mutex_exit(SQLOCK(sq));
5860 	}
5861 	if (failed) {
5862 		/*
5863 		 * Some other thread has a read claim on the outer perimeter.
5864 		 * Queue the callback for deferred processing.
5865 		 *
5866 		 * queue_writer will set SQ_QUEUED before we drop SQ_WRITER
5867 		 * so that other qwriter(OUTER) calls will queue their
5868 		 * callbacks as well. queue_writer increments sq_count so we
5869 		 * decrement to compensate for the our increment.
5870 		 *
5871 		 * Dropping SQ_WRITER enables the writer thread to work
5872 		 * on this outer perimeter.
5873 		 */
5874 		outer->sq_flags = flags;
5875 		queue_writer(outer, func, q, mp);
5876 		/* queue_writer dropper the lock */
5877 		mutex_enter(SQLOCK(outer));
5878 		ASSERT(outer->sq_count > 0);
5879 		outer->sq_count--;
5880 		ASSERT(outer->sq_flags & SQ_WRITER);
5881 		flags = outer->sq_flags;
5882 		flags &= ~SQ_WRITER;
5883 		if (flags & SQ_WANTWAKEUP) {
5884 			flags &= ~SQ_WANTWAKEUP;
5885 			cv_broadcast(&outer->sq_wait);
5886 		}
5887 		outer->sq_flags = flags;
5888 		mutex_exit(SQLOCK(outer));
5889 		return;
5890 	} else {
5891 		outer->sq_flags = flags;
5892 		mutex_exit(SQLOCK(outer));
5893 	}
5894 
5895 	/* Can run it immediately */
5896 	(*func)(q, mp);
5897 
5898 	outer_exit(outer);
5899 }
5900 
5901 /*
5902  * Dequeue all writer callbacks from the outer perimeter and run them.
5903  */
5904 static void
5905 write_now(syncq_t *outer)
5906 {
5907 	mblk_t		*mp;
5908 	queue_t		*q;
5909 	void	(*func)();
5910 
5911 	ASSERT(MUTEX_HELD(SQLOCK(outer)));
5912 	ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5913 	    outer->sq_oprev != NULL);
5914 	while ((mp = outer->sq_evhead) != NULL) {
5915 		/*
5916 		 * queues cannot be placed on the queuelist on the outer
5917 		 * perimiter.
5918 		 */
5919 		ASSERT(!(outer->sq_flags & SQ_MESSAGES));
5920 		ASSERT((outer->sq_flags & SQ_EVENTS));
5921 
5922 		outer->sq_evhead = mp->b_next;
5923 		if (outer->sq_evhead == NULL) {
5924 			outer->sq_evtail = NULL;
5925 			outer->sq_flags &= ~SQ_EVENTS;
5926 		}
5927 		ASSERT(outer->sq_count != 0);
5928 		outer->sq_count--;	/* Incremented when enqueued. */
5929 		mutex_exit(SQLOCK(outer));
5930 		/*
5931 		 * Drop the message if the queue is closing.
5932 		 * Make sure that the queue is "claimed" when the callback
5933 		 * is run in order to satisfy various ASSERTs.
5934 		 */
5935 		q = mp->b_queue;
5936 		func = (void (*)())mp->b_prev;
5937 		ASSERT(func != NULL);
5938 		mp->b_next = mp->b_prev = NULL;
5939 		if (q->q_flag & QWCLOSE) {
5940 			freemsg(mp);
5941 		} else {
5942 			claimq(q);
5943 			(*func)(q, mp);
5944 			releaseq(q);
5945 		}
5946 		mutex_enter(SQLOCK(outer));
5947 	}
5948 	ASSERT(MUTEX_HELD(SQLOCK(outer)));
5949 }
5950 
5951 /*
5952  * The list of messages on the inner syncq is effectively hashed
5953  * by destination queue.  These destination queues are doubly
5954  * linked lists (hopefully) in priority order.  Messages are then
5955  * put on the queue referenced by the q_sqhead/q_sqtail elements.
5956  * Additional messages are linked together by the b_next/b_prev
5957  * elements in the mblk, with (similar to putq()) the first message
5958  * having a NULL b_prev and the last message having a NULL b_next.
5959  *
5960  * Events, such as qwriter callbacks, are put onto a list in FIFO
5961  * order referenced by sq_evhead, and sq_evtail.  This is a singly
5962  * linked list, and messages here MUST be processed in the order queued.
5963  */
5964 
5965 /*
5966  * Run the events on the syncq event list (sq_evhead).
5967  * Assumes there is only one claim on the syncq, it is
5968  * already exclusive (SQ_EXCL set), and the SQLOCK held.
5969  * Messages here are processed in order, with the SQ_EXCL bit
5970  * held all the way through till the last message is processed.
5971  */
5972 void
5973 sq_run_events(syncq_t *sq)
5974 {
5975 	mblk_t		*bp;
5976 	queue_t		*qp;
5977 	uint16_t	flags = sq->sq_flags;
5978 	void		(*func)();
5979 
5980 	ASSERT(MUTEX_HELD(SQLOCK(sq)));
5981 	ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
5982 		sq->sq_oprev == NULL) ||
5983 		(sq->sq_outer != NULL && sq->sq_onext != NULL &&
5984 		sq->sq_oprev != NULL));
5985 
5986 	ASSERT(flags & SQ_EXCL);
5987 	ASSERT(sq->sq_count == 1);
5988 
5989 	/*
5990 	 * We need to process all of the events on this list.  It
5991 	 * is possible that new events will be added while we are
5992 	 * away processing a callback, so on every loop, we start
5993 	 * back at the beginning of the list.
5994 	 */
5995 	/*
5996 	 * We have to reaccess sq_evhead since there is a
5997 	 * possibility of a new entry while we were running
5998 	 * the callback.
5999 	 */
6000 	for (bp = sq->sq_evhead; bp != NULL; bp = sq->sq_evhead) {
6001 		ASSERT(bp->b_queue->q_syncq == sq);
6002 		ASSERT(sq->sq_flags & SQ_EVENTS);
6003 
6004 		qp = bp->b_queue;
6005 		func = (void (*)())bp->b_prev;
6006 		ASSERT(func != NULL);
6007 
6008 		/*
6009 		 * Messages from the event queue must be taken off in
6010 		 * FIFO order.
6011 		 */
6012 		ASSERT(sq->sq_evhead == bp);
6013 		sq->sq_evhead = bp->b_next;
6014 
6015 		if (bp->b_next == NULL) {
6016 			/* Deleting last */
6017 			ASSERT(sq->sq_evtail == bp);
6018 			sq->sq_evtail = NULL;
6019 			sq->sq_flags &= ~SQ_EVENTS;
6020 		}
6021 		bp->b_prev = bp->b_next = NULL;
6022 		ASSERT(bp->b_datap->db_ref != 0);
6023 
6024 		mutex_exit(SQLOCK(sq));
6025 
6026 		(*func)(qp, bp);
6027 
6028 		mutex_enter(SQLOCK(sq));
6029 		/*
6030 		 * re-read the flags, since they could have changed.
6031 		 */
6032 		flags = sq->sq_flags;
6033 		ASSERT(flags & SQ_EXCL);
6034 	}
6035 	ASSERT(sq->sq_evhead == NULL && sq->sq_evtail == NULL);
6036 	ASSERT(!(sq->sq_flags & SQ_EVENTS));
6037 
6038 	if (flags & SQ_WANTWAKEUP) {
6039 		flags &= ~SQ_WANTWAKEUP;
6040 		cv_broadcast(&sq->sq_wait);
6041 	}
6042 	if (flags & SQ_WANTEXWAKEUP) {
6043 		flags &= ~SQ_WANTEXWAKEUP;
6044 		cv_broadcast(&sq->sq_exitwait);
6045 	}
6046 	sq->sq_flags = flags;
6047 }
6048 
6049 /*
6050  * Put messages on the event list.
6051  * If we can go exclusive now, do so and process the event list, otherwise
6052  * let the last claim service this list (or wake the sqthread).
6053  * This procedure assumes SQLOCK is held.  To run the event list, it
6054  * must be called with no claims.
6055  */
6056 static void
6057 sqfill_events(syncq_t *sq, queue_t *q, mblk_t *mp, void (*func)())
6058 {
6059 	uint16_t count;
6060 
6061 	ASSERT(MUTEX_HELD(SQLOCK(sq)));
6062 	ASSERT(func != NULL);
6063 
6064 	/*
6065 	 * This is a callback.  Add it to the list of callbacks
6066 	 * and see about upgrading.
6067 	 */
6068 	mp->b_prev = (mblk_t *)func;
6069 	mp->b_queue = q;
6070 	mp->b_next = NULL;
6071 	if (sq->sq_evhead == NULL) {
6072 		sq->sq_evhead = sq->sq_evtail = mp;
6073 		sq->sq_flags |= SQ_EVENTS;
6074 	} else {
6075 		ASSERT(sq->sq_evtail != NULL);
6076 		ASSERT(sq->sq_evtail->b_next == NULL);
6077 		ASSERT(sq->sq_flags & SQ_EVENTS);
6078 		sq->sq_evtail->b_next = mp;
6079 		sq->sq_evtail = mp;
6080 	}
6081 	/*
6082 	 * We have set SQ_EVENTS, so threads will have to
6083 	 * unwind out of the perimiter, and new entries will
6084 	 * not grab a putlock.  But we still need to know
6085 	 * how many threads have already made a claim to the
6086 	 * syncq, so grab the putlocks, and sum the counts.
6087 	 * If there are no claims on the syncq, we can upgrade
6088 	 * to exclusive, and run the event list.
6089 	 * NOTE: We hold the SQLOCK, so we can just grab the
6090 	 * putlocks.
6091 	 */
6092 	count = sq->sq_count;
6093 	SQ_PUTLOCKS_ENTER(sq);
6094 	SUM_SQ_PUTCOUNTS(sq, count);
6095 	/*
6096 	 * We have no claim, so we need to check if there
6097 	 * are no others, then we can upgrade.
6098 	 */
6099 	/*
6100 	 * There are currently no claims on
6101 	 * the syncq by this thread (at least on this entry). The thread who has
6102 	 * the claim should drain syncq.
6103 	 */
6104 	if (count > 0) {
6105 		/*
6106 		 * Can't upgrade - other threads inside.
6107 		 */
6108 		SQ_PUTLOCKS_EXIT(sq);
6109 		mutex_exit(SQLOCK(sq));
6110 		return;
6111 	}
6112 	/*
6113 	 * Need to set SQ_EXCL and make a claim on the syncq.
6114 	 */
6115 	ASSERT((sq->sq_flags & SQ_EXCL) == 0);
6116 	sq->sq_flags |= SQ_EXCL;
6117 	ASSERT(sq->sq_count == 0);
6118 	sq->sq_count++;
6119 	SQ_PUTLOCKS_EXIT(sq);
6120 
6121 	/* Process the events list */
6122 	sq_run_events(sq);
6123 
6124 	/*
6125 	 * Release our claim...
6126 	 */
6127 	sq->sq_count--;
6128 
6129 	/*
6130 	 * And release SQ_EXCL.
6131 	 * We don't need to acquire the putlocks to release
6132 	 * SQ_EXCL, since we are exclusive, and hold the SQLOCK.
6133 	 */
6134 	sq->sq_flags &= ~SQ_EXCL;
6135 
6136 	/*
6137 	 * sq_run_events should have released SQ_EXCL
6138 	 */
6139 	ASSERT(!(sq->sq_flags & SQ_EXCL));
6140 
6141 	/*
6142 	 * If anything happened while we were running the
6143 	 * events (or was there before), we need to process
6144 	 * them now.  We shouldn't be exclusive sine we
6145 	 * released the perimiter above (plus, we asserted
6146 	 * for it).
6147 	 */
6148 	if (!(sq->sq_flags & SQ_STAYAWAY) && (sq->sq_flags & SQ_QUEUED))
6149 		drain_syncq(sq);
6150 	else
6151 		mutex_exit(SQLOCK(sq));
6152 }
6153 
6154 /*
6155  * Perform delayed processing. The caller has to make sure that it is safe
6156  * to enter the syncq (e.g. by checking that none of the SQ_STAYAWAY bits are
6157  * set.)
6158  *
6159  * Assume that the caller has NO claims on the syncq.  However, a claim
6160  * on the syncq does not indicate that a thread is draining the syncq.
6161  * There may be more claims on the syncq than there are threads draining
6162  * (i.e.  #_threads_draining <= sq_count)
6163  *
6164  * drain_syncq has to terminate when one of the SQ_STAYAWAY bits gets set
6165  * in order to preserve qwriter(OUTER) ordering constraints.
6166  *
6167  * sq_putcount only needs to be checked when dispatching the queued
6168  * writer call for CIPUT sync queue, but this is handled in sq_run_events.
6169  */
6170 void
6171 drain_syncq(syncq_t *sq)
6172 {
6173 	queue_t		*qp;
6174 	uint16_t	count;
6175 	uint16_t	type = sq->sq_type;
6176 	uint16_t	flags = sq->sq_flags;
6177 	boolean_t	bg_service = sq->sq_svcflags & SQ_SERVICE;
6178 
6179 	TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START,
6180 		"drain_syncq start:%p", sq);
6181 	ASSERT(MUTEX_HELD(SQLOCK(sq)));
6182 	ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6183 		sq->sq_oprev == NULL) ||
6184 		(sq->sq_outer != NULL && sq->sq_onext != NULL &&
6185 		sq->sq_oprev != NULL));
6186 
6187 	/*
6188 	 * Drop SQ_SERVICE flag.
6189 	 */
6190 	if (bg_service)
6191 		sq->sq_svcflags &= ~SQ_SERVICE;
6192 
6193 	/*
6194 	 * If SQ_EXCL is set, someone else is processing this syncq - let him
6195 	 * finish the job.
6196 	 */
6197 	if (flags & SQ_EXCL) {
6198 		if (bg_service) {
6199 			ASSERT(sq->sq_servcount != 0);
6200 			sq->sq_servcount--;
6201 		}
6202 		mutex_exit(SQLOCK(sq));
6203 		return;
6204 	}
6205 
6206 	/*
6207 	 * This routine can be called by a background thread if
6208 	 * it was scheduled by a hi-priority thread.  SO, if there are
6209 	 * NOT messages queued, return (remember, we have the SQLOCK,
6210 	 * and it cannot change until we release it). Wakeup any waiters also.
6211 	 */
6212 	if (!(flags & SQ_QUEUED)) {
6213 		if (flags & SQ_WANTWAKEUP) {
6214 			flags &= ~SQ_WANTWAKEUP;
6215 			cv_broadcast(&sq->sq_wait);
6216 		}
6217 		if (flags & SQ_WANTEXWAKEUP) {
6218 			flags &= ~SQ_WANTEXWAKEUP;
6219 			cv_broadcast(&sq->sq_exitwait);
6220 		}
6221 		sq->sq_flags = flags;
6222 		if (bg_service) {
6223 			ASSERT(sq->sq_servcount != 0);
6224 			sq->sq_servcount--;
6225 		}
6226 		mutex_exit(SQLOCK(sq));
6227 		return;
6228 	}
6229 
6230 	/*
6231 	 * If this is not a concurrent put perimiter, we need to
6232 	 * become exclusive to drain.  Also, if not CIPUT, we would
6233 	 * not have acquired a putlock, so we don't need to check
6234 	 * the putcounts.  If not entering with a claim, we test
6235 	 * for sq_count == 0.
6236 	 */
6237 	type = sq->sq_type;
6238 	if (!(type & SQ_CIPUT)) {
6239 		if (sq->sq_count > 1) {
6240 			if (bg_service) {
6241 				ASSERT(sq->sq_servcount != 0);
6242 				sq->sq_servcount--;
6243 			}
6244 			mutex_exit(SQLOCK(sq));
6245 			return;
6246 		}
6247 		sq->sq_flags |= SQ_EXCL;
6248 	}
6249 
6250 	/*
6251 	 * This is where we make a claim to the syncq.
6252 	 * This can either be done by incrementing a putlock, or
6253 	 * the sq_count.  But since we already have the SQLOCK
6254 	 * here, we just bump the sq_count.
6255 	 *
6256 	 * Note that after we make a claim, we need to let the code
6257 	 * fall through to the end of this routine to clean itself
6258 	 * up.  A return in the while loop will put the syncq in a
6259 	 * very bad state.
6260 	 */
6261 	sq->sq_count++;
6262 	ASSERT(sq->sq_count != 0);	/* wraparound */
6263 
6264 	while ((flags = sq->sq_flags) & SQ_QUEUED) {
6265 		/*
6266 		 * If we are told to stayaway or went exclusive,
6267 		 * we are done.
6268 		 */
6269 		if (flags & (SQ_STAYAWAY)) {
6270 			break;
6271 		}
6272 
6273 		/*
6274 		 * If there are events to run, do so.
6275 		 * We have one claim to the syncq, so if there are
6276 		 * more than one, other threads are running.
6277 		 */
6278 		if (sq->sq_evhead != NULL) {
6279 			ASSERT(sq->sq_flags & SQ_EVENTS);
6280 
6281 			count = sq->sq_count;
6282 			SQ_PUTLOCKS_ENTER(sq);
6283 			SUM_SQ_PUTCOUNTS(sq, count);
6284 			if (count > 1) {
6285 				SQ_PUTLOCKS_EXIT(sq);
6286 				/* Can't upgrade - other threads inside */
6287 				break;
6288 			}
6289 			ASSERT((flags & SQ_EXCL) == 0);
6290 			sq->sq_flags = flags | SQ_EXCL;
6291 			SQ_PUTLOCKS_EXIT(sq);
6292 			/*
6293 			 * we have the only claim, run the events,
6294 			 * sq_run_events will clear the SQ_EXCL flag.
6295 			 */
6296 			sq_run_events(sq);
6297 
6298 			/*
6299 			 * If this is a CIPUT perimiter, we need
6300 			 * to drop the SQ_EXCL flag so we can properly
6301 			 * continue draining the syncq.
6302 			 */
6303 			if (type & SQ_CIPUT) {
6304 				ASSERT(sq->sq_flags & SQ_EXCL);
6305 				sq->sq_flags &= ~SQ_EXCL;
6306 			}
6307 
6308 			/*
6309 			 * And go back to the beginning just in case
6310 			 * anything changed while we were away.
6311 			 */
6312 			ASSERT((sq->sq_flags & SQ_EXCL) || (type & SQ_CIPUT));
6313 			continue;
6314 		}
6315 
6316 		ASSERT(sq->sq_evhead == NULL);
6317 		ASSERT(!(sq->sq_flags & SQ_EVENTS));
6318 
6319 		/*
6320 		 * Find the queue that is not draining.
6321 		 *
6322 		 * q_draining is protected by QLOCK which we do not hold.
6323 		 * But if it was set, then a thread was draining, and if it gets
6324 		 * cleared, then it was because the thread has successfully
6325 		 * drained the syncq, or a GOAWAY state occured. For the GOAWAY
6326 		 * state to happen, a thread needs the SQLOCK which we hold, and
6327 		 * if there was such a flag, we whould have already seen it.
6328 		 */
6329 
6330 		for (qp = sq->sq_head;
6331 		    qp != NULL && (qp->q_draining ||
6332 			(qp->q_sqflags & Q_SQDRAINING));
6333 		    qp = qp->q_sqnext)
6334 			;
6335 
6336 		if (qp == NULL)
6337 			break;
6338 
6339 		/*
6340 		 * We have a queue to work on, and we hold the
6341 		 * SQLOCK and one claim, call qdrain_syncq.
6342 		 * This means we need to release the SQLOCK and
6343 		 * aquire the QLOCK (OK since we have a claim).
6344 		 * Note that qdrain_syncq will actually dequeue
6345 		 * this queue from the sq_head list when it is
6346 		 * convinced all the work is done and release
6347 		 * the QLOCK before returning.
6348 		 */
6349 		qp->q_sqflags |= Q_SQDRAINING;
6350 		mutex_exit(SQLOCK(sq));
6351 		mutex_enter(QLOCK(qp));
6352 		qdrain_syncq(sq, qp);
6353 		mutex_enter(SQLOCK(sq));
6354 
6355 		/* The queue is drained */
6356 		ASSERT(qp->q_sqflags & Q_SQDRAINING);
6357 		qp->q_sqflags &= ~Q_SQDRAINING;
6358 		/*
6359 		 * NOTE: After this point qp should not be used since it may be
6360 		 * closed.
6361 		 */
6362 	}
6363 
6364 	ASSERT(MUTEX_HELD(SQLOCK(sq)));
6365 	flags = sq->sq_flags;
6366 
6367 	/*
6368 	 * sq->sq_head cannot change because we hold the
6369 	 * sqlock. However, a thread CAN decide that it is no longer
6370 	 * going to drain that queue.  However, this should be due to
6371 	 * a GOAWAY state, and we should see that here.
6372 	 *
6373 	 * This loop is not very efficient. One solution may be adding a second
6374 	 * pointer to the "draining" queue, but it is difficult to do when
6375 	 * queues are inserted in the middle due to priority ordering. Another
6376 	 * possibility is to yank the queue out of the sq list and put it onto
6377 	 * the "draining list" and then put it back if it can't be drained.
6378 	 */
6379 
6380 	ASSERT((sq->sq_head == NULL) || (flags & SQ_GOAWAY) ||
6381 		(type & SQ_CI) || sq->sq_head->q_draining);
6382 
6383 	/* Drop SQ_EXCL for non-CIPUT perimiters */
6384 	if (!(type & SQ_CIPUT))
6385 		flags &= ~SQ_EXCL;
6386 	ASSERT((flags & SQ_EXCL) == 0);
6387 
6388 	/* Wake up any waiters. */
6389 	if (flags & SQ_WANTWAKEUP) {
6390 		flags &= ~SQ_WANTWAKEUP;
6391 		cv_broadcast(&sq->sq_wait);
6392 	}
6393 	if (flags & SQ_WANTEXWAKEUP) {
6394 		flags &= ~SQ_WANTEXWAKEUP;
6395 		cv_broadcast(&sq->sq_exitwait);
6396 	}
6397 	sq->sq_flags = flags;
6398 
6399 	ASSERT(sq->sq_count != 0);
6400 	/* Release our claim. */
6401 	sq->sq_count--;
6402 
6403 	if (bg_service) {
6404 		ASSERT(sq->sq_servcount != 0);
6405 		sq->sq_servcount--;
6406 	}
6407 
6408 	mutex_exit(SQLOCK(sq));
6409 
6410 	TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END,
6411 		"drain_syncq end:%p", sq);
6412 }
6413 
6414 
6415 /*
6416  *
6417  * qdrain_syncq can be called (currently) from only one of two places:
6418  *	drain_syncq
6419  * 	putnext  (or some variation of it).
6420  * and eventually
6421  * 	qwait(_sig)
6422  *
6423  * If called from drain_syncq, we found it in the list
6424  * of queue's needing service, so there is work to be done (or it
6425  * wouldn't be on the list).
6426  *
6427  * If called from some putnext variation, it was because the
6428  * perimiter is open, but messages are blocking a putnext and
6429  * there is not a thread working on it.  Now a thread could start
6430  * working on it while we are getting ready to do so ourself, but
6431  * the thread would set the q_draining flag, and we can spin out.
6432  *
6433  * As for qwait(_sig), I think I shall let it continue to call
6434  * drain_syncq directly (after all, it will get here eventually).
6435  *
6436  * qdrain_syncq has to terminate when:
6437  * - one of the SQ_STAYAWAY bits gets set to preserve qwriter(OUTER) ordering
6438  * - SQ_EVENTS gets set to preserve qwriter(INNER) ordering
6439  *
6440  * ASSUMES:
6441  *	One claim
6442  * 	QLOCK held
6443  * 	SQLOCK not held
6444  *	Will release QLOCK before returning
6445  */
6446 void
6447 qdrain_syncq(syncq_t *sq, queue_t *q)
6448 {
6449 	mblk_t		*bp;
6450 	boolean_t	do_clr;
6451 #ifdef DEBUG
6452 	uint16_t	count;
6453 #endif
6454 
6455 	TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START,
6456 		"drain_syncq start:%p", sq);
6457 	ASSERT(q->q_syncq == sq);
6458 	ASSERT(MUTEX_HELD(QLOCK(q)));
6459 	ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
6460 	/*
6461 	 * For non-CIPUT perimiters, we should be called with the
6462 	 * exclusive bit set already.  For non-CIPUT perimiters we
6463 	 * will be doing a concurrent drain, so it better not be set.
6464 	 */
6465 	ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)));
6466 	ASSERT(!((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL)));
6467 	ASSERT((sq->sq_type & SQ_CIPUT) || (sq->sq_flags & SQ_EXCL));
6468 	/*
6469 	 * All outer pointers are set, or none of them are
6470 	 */
6471 	ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6472 		sq->sq_oprev == NULL) ||
6473 		(sq->sq_outer != NULL && sq->sq_onext != NULL &&
6474 		sq->sq_oprev != NULL));
6475 #ifdef DEBUG
6476 	count = sq->sq_count;
6477 	/*
6478 	 * This is OK without the putlocks, because we have one
6479 	 * claim either from the sq_count, or a putcount.  We could
6480 	 * get an erroneous value from other counts, but ours won't
6481 	 * change, so one way or another, we will have at least a
6482 	 * value of one.
6483 	 */
6484 	SUM_SQ_PUTCOUNTS(sq, count);
6485 	ASSERT(count >= 1);
6486 #endif /* DEBUG */
6487 
6488 	/*
6489 	 * The first thing to do here, is find out if a thread is already
6490 	 * draining this queue or the queue is closing. If so, we are done,
6491 	 * just return. Also, if there are no messages, we are done as well.
6492 	 * Note that we check the q_sqhead since there is s window of
6493 	 * opportunity for us to enter here because Q_SQQUEUED was set, but is
6494 	 * not anymore.
6495 	 */
6496 	if (q->q_draining || (q->q_sqhead == NULL)) {
6497 		mutex_exit(QLOCK(q));
6498 		return;
6499 	}
6500 
6501 	/*
6502 	 * If the perimiter is exclusive, there is nothing we can
6503 	 * do right now, go away.
6504 	 * Note that there is nothing to prevent this case from changing
6505 	 * right after this check, but the spin-out will catch it.
6506 	 */
6507 
6508 	/* Tell other threads that we are draining this queue */
6509 	q->q_draining = 1;	/* Protected by QLOCK */
6510 
6511 	for (bp = q->q_sqhead; bp != NULL; bp = q->q_sqhead) {
6512 
6513 		/*
6514 		 * Because we can enter this routine just because
6515 		 * a putnext is blocked, we need to spin out if
6516 		 * the perimiter wants to go exclusive as well
6517 		 * as just blocked. We need to spin out also if
6518 		 * events are queued on the syncq.
6519 		 * Don't check for SQ_EXCL, because non-CIPUT
6520 		 * perimiters would set it, and it can't become
6521 		 * exclusive while we hold a claim.
6522 		 */
6523 		if (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS)) {
6524 			break;
6525 		}
6526 
6527 #ifdef DEBUG
6528 		/*
6529 		 * Since we are in qdrain_syncq, we already know the queue,
6530 		 * but for sanity, we want to check this against the qp that
6531 		 * was passed in by bp->b_queue.
6532 		 */
6533 
6534 		ASSERT(bp->b_queue == q);
6535 		ASSERT(bp->b_queue->q_syncq == sq);
6536 		bp->b_queue = NULL;
6537 
6538 		/*
6539 		 * We would have the following check in the DEBUG code:
6540 		 *
6541 		 * if (bp->b_prev != NULL)  {
6542 		 *	ASSERT(bp->b_prev == (void (*)())q->q_qinfo->qi_putp);
6543 		 * }
6544 		 *
6545 		 * This can't be done, however, since IP modifies qinfo
6546 		 * structure at run-time (switching between IPv4 qinfo and IPv6
6547 		 * qinfo), invalidating the check.
6548 		 * So the assignment to func is left here, but the ASSERT itself
6549 		 * is removed until the whole issue is resolved.
6550 		 */
6551 #endif
6552 		ASSERT(q->q_sqhead == bp);
6553 		q->q_sqhead = bp->b_next;
6554 		bp->b_prev = bp->b_next = NULL;
6555 		ASSERT(q->q_syncqmsgs > 0);
6556 		mutex_exit(QLOCK(q));
6557 
6558 		ASSERT(bp->b_datap->db_ref != 0);
6559 
6560 		(void) (*q->q_qinfo->qi_putp)(q, bp);
6561 
6562 		mutex_enter(QLOCK(q));
6563 		/*
6564 		 * We should decrement q_syncqmsgs only after executing the
6565 		 * put procedure to avoid a possible race with putnext().
6566 		 * In putnext() though it sees Q_SQQUEUED is set, there is
6567 		 * an optimization which allows putnext to call the put
6568 		 * procedure directly if (q_syncqmsgs == 0) and thus
6569 		 * a message reodering could otherwise occur.
6570 		 */
6571 		q->q_syncqmsgs--;
6572 
6573 		/*
6574 		 * Clear QFULL in the next service procedure queue if
6575 		 * this is the last message destined to that queue.
6576 		 *
6577 		 * It would make better sense to have some sort of
6578 		 * tunable for the low water mark, but these symantics
6579 		 * are not yet defined.  So, alas, we use a constant.
6580 		 */
6581 		do_clr = (q->q_syncqmsgs == 0);
6582 		mutex_exit(QLOCK(q));
6583 
6584 		if (do_clr)
6585 			clr_qfull(q);
6586 
6587 		mutex_enter(QLOCK(q));
6588 		/*
6589 		 * Always clear SQ_EXCL when CIPUT in order to handle
6590 		 * qwriter(INNER).
6591 		 */
6592 		/*
6593 		 * The putp() can call qwriter and get exclusive access
6594 		 * IFF this is the only claim.  So, we need to test for
6595 		 * this possibility so we can aquire the mutex and clear
6596 		 * the bit.
6597 		 */
6598 		if ((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL)) {
6599 			mutex_enter(SQLOCK(sq));
6600 			sq->sq_flags &= ~SQ_EXCL;
6601 			mutex_exit(SQLOCK(sq));
6602 		}
6603 	}
6604 
6605 	/*
6606 	 * We should either have no queues on the syncq, or we were
6607 	 * told to goaway by a waiter (which we will wake up at the
6608 	 * end of this function).
6609 	 */
6610 	ASSERT((q->q_sqhead == NULL) ||
6611 	    (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS)));
6612 
6613 	ASSERT(MUTEX_HELD(QLOCK(q)));
6614 	ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
6615 
6616 	/*
6617 	 * Remove the q from the syncq list if all the messages are
6618 	 * drained.
6619 	 */
6620 	if (q->q_sqhead == NULL) {
6621 		mutex_enter(SQLOCK(sq));
6622 		if (q->q_sqflags & Q_SQQUEUED)
6623 			SQRM_Q(sq, q);
6624 		mutex_exit(SQLOCK(sq));
6625 		/*
6626 		 * Since the queue is removed from the list, reset its priority.
6627 		 */
6628 		q->q_spri = 0;
6629 	}
6630 
6631 	/*
6632 	 * Remember, the q_draining flag is used to let another
6633 	 * thread know that there is a thread currently draining
6634 	 * the messages for a queue.  Since we are now done with
6635 	 * this queue (even if there may be messages still there),
6636 	 * we need to clear this flag so some thread will work
6637 	 * on it if needed.
6638 	 */
6639 	ASSERT(q->q_draining);
6640 	q->q_draining = 0;
6641 
6642 	/* called with a claim, so OK to drop all locks. */
6643 	mutex_exit(QLOCK(q));
6644 
6645 	TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END,
6646 		"drain_syncq end:%p", sq);
6647 }
6648 /* END OF QDRAIN_SYNCQ  */
6649 
6650 
6651 /*
6652  * This is the mate to qdrain_syncq, except that it is putting the
6653  * message onto the the queue instead draining.  Since the
6654  * message is destined for the queue that is selected, there is
6655  * no need to identify the function because the message is
6656  * intended for the put routine for the queue.  But this
6657  * routine will do it anyway just in case (but only for debug kernels).
6658  *
6659  * After the message is enqueued on the syncq, it calls putnext_tail()
6660  * which will schedule a background thread to actually process the message.
6661  *
6662  * Assumes that there is a claim on the syncq (sq->sq_count > 0) and
6663  * SQLOCK(sq) and QLOCK(q) are not held.
6664  */
6665 void
6666 qfill_syncq(syncq_t *sq, queue_t *q, mblk_t *mp)
6667 {
6668 	queue_t		*fq = NULL;
6669 
6670 	ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
6671 	ASSERT(MUTEX_NOT_HELD(QLOCK(q)));
6672 	ASSERT(sq->sq_count > 0);
6673 	ASSERT(q->q_syncq == sq);
6674 	ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6675 		sq->sq_oprev == NULL) ||
6676 		(sq->sq_outer != NULL && sq->sq_onext != NULL &&
6677 		sq->sq_oprev != NULL));
6678 
6679 	mutex_enter(QLOCK(q));
6680 
6681 	/*
6682 	 * Set QFULL in next service procedure queue (that cares) if not
6683 	 * already set and if there are already more messages on the syncq
6684 	 * than sq_max_size.  If sq_max_size is 0, no flow control will be
6685 	 * asserted on any syncq.
6686 	 *
6687 	 * The fq here is the next queue with a service procedure.
6688 	 * This is where we would fail canputnext, so this is where we
6689 	 * need to set QFULL.
6690 	 *
6691 	 * LOCKING HIERARCHY: In the case when fq != q we need to
6692 	 *  a) Take QLOCK(fq) to set QFULL flag and
6693 	 *  b) Take sd_reflock in the case of the hot stream to update
6694 	 *  	sd_refcnt.
6695 	 * We already have QLOCK at this point. To avoid cross-locks with
6696 	 * freezestr() which grabs all QLOCKs and with strlock() which grabs
6697 	 * both SQLOCK and sd_reflock, we need to drop respective locks first.
6698 	 */
6699 	if ((sq_max_size != 0) && (!(q->q_nfsrv->q_flag & QFULL)) &&
6700 	    (q->q_syncqmsgs > sq_max_size)) {
6701 		if ((fq = q->q_nfsrv) == q) {
6702 			fq->q_flag |= QFULL;
6703 		} else {
6704 			mutex_exit(QLOCK(q));
6705 			mutex_enter(QLOCK(fq));
6706 			fq->q_flag |= QFULL;
6707 			mutex_exit(QLOCK(fq));
6708 			mutex_enter(QLOCK(q));
6709 		}
6710 	}
6711 
6712 #ifdef DEBUG
6713 	/*
6714 	 * This is used for debug in the qfill_syncq/qdrain_syncq case
6715 	 * to trace the queue that the message is intended for.  Note
6716 	 * that the original use was to identify the queue and function
6717 	 * to call on the drain.  In the new syncq, we have the context
6718 	 * of the queue that we are draining, so call it's putproc and
6719 	 * don't rely on the saved values.  But for debug this is still
6720 	 * usefull information.
6721 	 */
6722 	mp->b_prev = (mblk_t *)q->q_qinfo->qi_putp;
6723 	mp->b_queue = q;
6724 	mp->b_next = NULL;
6725 #endif
6726 	ASSERT(q->q_syncq == sq);
6727 	/*
6728 	 * Enqueue the message on the list.
6729 	 * SQPUT_MP() accesses q_syncqmsgs.  We are already holding QLOCK to
6730 	 * protect it.  So its ok to acquire SQLOCK after SQPUT_MP().
6731 	 */
6732 	SQPUT_MP(q, mp);
6733 	mutex_enter(SQLOCK(sq));
6734 
6735 	/*
6736 	 * And queue on syncq for scheduling, if not already queued.
6737 	 * Note that we need the SQLOCK for this, and for testing flags
6738 	 * at the end to see if we will drain.  So grab it now, and
6739 	 * release it before we call qdrain_syncq or return.
6740 	 */
6741 	if (!(q->q_sqflags & Q_SQQUEUED)) {
6742 		q->q_spri = curthread->t_pri;
6743 		SQPUT_Q(sq, q);
6744 	}
6745 #ifdef DEBUG
6746 	else {
6747 		/*
6748 		 * All of these conditions MUST be true!
6749 		 */
6750 		ASSERT(sq->sq_tail != NULL);
6751 		if (sq->sq_tail == sq->sq_head) {
6752 			ASSERT((q->q_sqprev == NULL) &&
6753 			    (q->q_sqnext == NULL));
6754 		} else {
6755 			ASSERT((q->q_sqprev != NULL) ||
6756 			    (q->q_sqnext != NULL));
6757 		}
6758 		ASSERT(sq->sq_flags & SQ_QUEUED);
6759 		ASSERT(q->q_syncqmsgs != 0);
6760 		ASSERT(q->q_sqflags & Q_SQQUEUED);
6761 	}
6762 #endif
6763 	mutex_exit(QLOCK(q));
6764 	/*
6765 	 * SQLOCK is still held, so sq_count can be safely decremented.
6766 	 */
6767 	sq->sq_count--;
6768 
6769 	putnext_tail(sq, q, 0);
6770 	/* Should not reference sq or q after this point. */
6771 }
6772 
6773 /*  End of qfill_syncq  */
6774 
6775 /*
6776  * Remove all messages from a syncq (if qp is NULL) or remove all messages
6777  * that would be put into qp by drain_syncq.
6778  * Used when deleting the syncq (qp == NULL) or when detaching
6779  * a queue (qp != NULL).
6780  * Return non-zero if one or more messages were freed.
6781  *
6782  * no need to grab sq_putlocks here. See comment in strsubr.h that explains when
6783  * sq_putlocks are used.
6784  *
6785  * NOTE: This function assumes that it is called from the close() context and
6786  * that all the queues in the syncq are going aay. For this reason it doesn't
6787  * acquire QLOCK for modifying q_sqhead/q_sqtail fields. This assumption is
6788  * currently valid, but it is useful to rethink this function to behave properly
6789  * in other cases.
6790  */
6791 int
6792 flush_syncq(syncq_t *sq, queue_t *qp)
6793 {
6794 	mblk_t		*bp, *mp_head, *mp_next, *mp_prev;
6795 	queue_t		*q;
6796 	int		ret = 0;
6797 
6798 	mutex_enter(SQLOCK(sq));
6799 
6800 	/*
6801 	 * Before we leave, we need to make sure there are no
6802 	 * events listed for this queue.  All events for this queue
6803 	 * will just be freed.
6804 	 */
6805 	if (qp != NULL && sq->sq_evhead != NULL) {
6806 		ASSERT(sq->sq_flags & SQ_EVENTS);
6807 
6808 		mp_prev = NULL;
6809 		for (bp = sq->sq_evhead; bp != NULL; bp = mp_next) {
6810 			mp_next = bp->b_next;
6811 			if (bp->b_queue == qp) {
6812 				/* Delete this message */
6813 				if (mp_prev != NULL) {
6814 					mp_prev->b_next = mp_next;
6815 					/*
6816 					 * Update sq_evtail if the last element
6817 					 * is removed.
6818 					 */
6819 					if (bp == sq->sq_evtail) {
6820 						ASSERT(mp_next == NULL);
6821 						sq->sq_evtail = mp_prev;
6822 					}
6823 				} else
6824 					sq->sq_evhead = mp_next;
6825 				if (sq->sq_evhead == NULL)
6826 					sq->sq_flags &= ~SQ_EVENTS;
6827 				bp->b_prev = bp->b_next = NULL;
6828 				freemsg(bp);
6829 				ret++;
6830 			} else {
6831 				mp_prev = bp;
6832 			}
6833 		}
6834 	}
6835 
6836 	/*
6837 	 * Walk sq_head and:
6838 	 *	- match qp if qp is set, remove it's messages
6839 	 *	- all if qp is not set
6840 	 */
6841 	q = sq->sq_head;
6842 	while (q != NULL) {
6843 		ASSERT(q->q_syncq == sq);
6844 		if ((qp == NULL) || (qp == q)) {
6845 			/*
6846 			 * Yank the messages as a list off the queue
6847 			 */
6848 			mp_head = q->q_sqhead;
6849 			/*
6850 			 * We do not have QLOCK(q) here (which is safe due to
6851 			 * assumptions mentioned above). To obtain the lock we
6852 			 * need to release SQLOCK which may allow lots of things
6853 			 * to change upon us. This place requires more analysis.
6854 			 */
6855 			q->q_sqhead = q->q_sqtail = NULL;
6856 			ASSERT(mp_head->b_queue &&
6857 			    mp_head->b_queue->q_syncq == sq);
6858 
6859 			/*
6860 			 * Free each of the messages.
6861 			 */
6862 			for (bp = mp_head; bp != NULL; bp = mp_next) {
6863 				mp_next = bp->b_next;
6864 				bp->b_prev = bp->b_next = NULL;
6865 				freemsg(bp);
6866 				ret++;
6867 			}
6868 			/*
6869 			 * Now remove the queue from the syncq.
6870 			 */
6871 			ASSERT(q->q_sqflags & Q_SQQUEUED);
6872 			SQRM_Q(sq, q);
6873 			q->q_spri = 0;
6874 			q->q_syncqmsgs = 0;
6875 
6876 			/*
6877 			 * If qp was specified, we are done with it and are
6878 			 * going to drop SQLOCK(sq) and return. We wakeup syncq
6879 			 * waiters while we still have the SQLOCK.
6880 			 */
6881 			if ((qp != NULL) && (sq->sq_flags & SQ_WANTWAKEUP)) {
6882 				sq->sq_flags &= ~SQ_WANTWAKEUP;
6883 				cv_broadcast(&sq->sq_wait);
6884 			}
6885 			/* Drop SQLOCK across clr_qfull */
6886 			mutex_exit(SQLOCK(sq));
6887 
6888 			/*
6889 			 * We avoid doing the test that drain_syncq does and
6890 			 * unconditionally clear qfull for every flushed
6891 			 * message. Since flush_syncq is only called during
6892 			 * close this should not be a problem.
6893 			 */
6894 			clr_qfull(q);
6895 			if (qp != NULL) {
6896 				return (ret);
6897 			} else {
6898 				mutex_enter(SQLOCK(sq));
6899 				/*
6900 				 * The head was removed by SQRM_Q above.
6901 				 * reread the new head and flush it.
6902 				 */
6903 				q = sq->sq_head;
6904 			}
6905 		} else {
6906 			q = q->q_sqnext;
6907 		}
6908 		ASSERT(MUTEX_HELD(SQLOCK(sq)));
6909 	}
6910 
6911 	if (sq->sq_flags & SQ_WANTWAKEUP) {
6912 		sq->sq_flags &= ~SQ_WANTWAKEUP;
6913 		cv_broadcast(&sq->sq_wait);
6914 	}
6915 
6916 	mutex_exit(SQLOCK(sq));
6917 	return (ret);
6918 }
6919 
6920 /*
6921  * Propagate all messages from a syncq to the next syncq that are associated
6922  * with the specified queue. If the queue is attached to a driver or if the
6923  * messages have been added due to a qwriter(PERIM_INNER), free the messages.
6924  *
6925  * Assumes that the stream is strlock()'ed. We don't come here if there
6926  * are no messages to propagate.
6927  *
6928  * NOTE : If the queue is attached to a driver, all the messages are freed
6929  * as there is no point in propagating the messages from the driver syncq
6930  * to the closing stream head which will in turn get freed later.
6931  */
6932 static int
6933 propagate_syncq(queue_t *qp)
6934 {
6935 	mblk_t		*bp, *head, *tail, *prev, *next;
6936 	syncq_t 	*sq;
6937 	queue_t		*nqp;
6938 	syncq_t		*nsq;
6939 	boolean_t	isdriver;
6940 	int 		moved = 0;
6941 	uint16_t	flags;
6942 	pri_t		priority = curthread->t_pri;
6943 #ifdef DEBUG
6944 	void		(*func)();
6945 #endif
6946 
6947 	sq = qp->q_syncq;
6948 	ASSERT(MUTEX_HELD(SQLOCK(sq)));
6949 	/* debug macro */
6950 	SQ_PUTLOCKS_HELD(sq);
6951 	/*
6952 	 * As entersq() does not increment the sq_count for
6953 	 * the write side, check sq_count for non-QPERQ
6954 	 * perimeters alone.
6955 	 */
6956 	ASSERT((qp->q_flag & QPERQ) || (sq->sq_count >= 1));
6957 
6958 	/*
6959 	 * propagate_syncq() can be called because of either messages on the
6960 	 * queue syncq or because on events on the queue syncq. Do actual
6961 	 * message propagations if there are any messages.
6962 	 */
6963 	if (qp->q_syncqmsgs) {
6964 		isdriver = (qp->q_flag & QISDRV);
6965 
6966 		if (!isdriver) {
6967 			nqp = qp->q_next;
6968 			nsq = nqp->q_syncq;
6969 			ASSERT(MUTEX_HELD(SQLOCK(nsq)));
6970 			/* debug macro */
6971 			SQ_PUTLOCKS_HELD(nsq);
6972 #ifdef DEBUG
6973 			func = (void (*)())nqp->q_qinfo->qi_putp;
6974 #endif
6975 		}
6976 
6977 		SQRM_Q(sq, qp);
6978 		priority = MAX(qp->q_spri, priority);
6979 		qp->q_spri = 0;
6980 		head = qp->q_sqhead;
6981 		tail = qp->q_sqtail;
6982 		qp->q_sqhead = qp->q_sqtail = NULL;
6983 		qp->q_syncqmsgs = 0;
6984 
6985 		/*
6986 		 * Walk the list of messages, and free them if this is a driver,
6987 		 * otherwise reset the b_prev and b_queue value to the new putp.
6988 		 * Afterward, we will just add the head to the end of the next
6989 		 * syncq, and point the tail to the end of this one.
6990 		 */
6991 
6992 		for (bp = head; bp != NULL; bp = next) {
6993 			next = bp->b_next;
6994 			if (isdriver) {
6995 				bp->b_prev = bp->b_next = NULL;
6996 				freemsg(bp);
6997 				continue;
6998 			}
6999 			/* Change the q values for this message */
7000 			bp->b_queue = nqp;
7001 #ifdef DEBUG
7002 			bp->b_prev = (mblk_t *)func;
7003 #endif
7004 			moved++;
7005 		}
7006 		/*
7007 		 * Attach list of messages to the end of the new queue (if there
7008 		 * is a list of messages).
7009 		 */
7010 
7011 		if (!isdriver && head != NULL) {
7012 			ASSERT(tail != NULL);
7013 			if (nqp->q_sqhead == NULL) {
7014 				nqp->q_sqhead = head;
7015 			} else {
7016 				ASSERT(nqp->q_sqtail != NULL);
7017 				nqp->q_sqtail->b_next = head;
7018 			}
7019 			nqp->q_sqtail = tail;
7020 			/*
7021 			 * When messages are moved from high priority queue to
7022 			 * another queue, the destination queue priority is
7023 			 * upgraded.
7024 			 */
7025 
7026 			if (priority > nqp->q_spri)
7027 				nqp->q_spri = priority;
7028 
7029 			SQPUT_Q(nsq, nqp);
7030 
7031 			nqp->q_syncqmsgs += moved;
7032 			ASSERT(nqp->q_syncqmsgs != 0);
7033 		}
7034 	}
7035 
7036 	/*
7037 	 * Before we leave, we need to make sure there are no
7038 	 * events listed for this queue.  All events for this queue
7039 	 * will just be freed.
7040 	 */
7041 	if (sq->sq_evhead != NULL) {
7042 		ASSERT(sq->sq_flags & SQ_EVENTS);
7043 		prev = NULL;
7044 		for (bp = sq->sq_evhead; bp != NULL; bp = next) {
7045 			next = bp->b_next;
7046 			if (bp->b_queue == qp) {
7047 				/* Delete this message */
7048 				if (prev != NULL) {
7049 					prev->b_next = next;
7050 					/*
7051 					 * Update sq_evtail if the last element
7052 					 * is removed.
7053 					 */
7054 					if (bp == sq->sq_evtail) {
7055 						ASSERT(next == NULL);
7056 						sq->sq_evtail = prev;
7057 					}
7058 				} else
7059 					sq->sq_evhead = next;
7060 				if (sq->sq_evhead == NULL)
7061 					sq->sq_flags &= ~SQ_EVENTS;
7062 				bp->b_prev = bp->b_next = NULL;
7063 				freemsg(bp);
7064 			} else {
7065 				prev = bp;
7066 			}
7067 		}
7068 	}
7069 
7070 	flags = sq->sq_flags;
7071 
7072 	/* Wake up any waiter before leaving. */
7073 	if (flags & SQ_WANTWAKEUP) {
7074 		flags &= ~SQ_WANTWAKEUP;
7075 		cv_broadcast(&sq->sq_wait);
7076 	}
7077 	sq->sq_flags = flags;
7078 
7079 	return (moved);
7080 }
7081 
7082 /*
7083  * Try and upgrade to exclusive access at the inner perimeter. If this can
7084  * not be done without blocking then request will be queued on the syncq
7085  * and drain_syncq will run it later.
7086  *
7087  * This routine can only be called from put or service procedures plus
7088  * asynchronous callback routines that have properly entered to
7089  * queue (with entersq.) Thus qwriter_inner assumes the caller has one claim
7090  * on the syncq associated with q.
7091  */
7092 void
7093 qwriter_inner(queue_t *q, mblk_t *mp, void (*func)())
7094 {
7095 	syncq_t	*sq = q->q_syncq;
7096 	uint16_t count;
7097 
7098 	mutex_enter(SQLOCK(sq));
7099 	count = sq->sq_count;
7100 	SQ_PUTLOCKS_ENTER(sq);
7101 	SUM_SQ_PUTCOUNTS(sq, count);
7102 	ASSERT(count >= 1);
7103 	ASSERT(sq->sq_type & (SQ_CIPUT|SQ_CISVC));
7104 
7105 	if (count == 1) {
7106 		/*
7107 		 * Can upgrade. This case also handles nested qwriter calls
7108 		 * (when the qwriter callback function calls qwriter). In that
7109 		 * case SQ_EXCL is already set.
7110 		 */
7111 		sq->sq_flags |= SQ_EXCL;
7112 		SQ_PUTLOCKS_EXIT(sq);
7113 		mutex_exit(SQLOCK(sq));
7114 		(*func)(q, mp);
7115 		/*
7116 		 * Assumes that leavesq, putnext, and drain_syncq will reset
7117 		 * SQ_EXCL for SQ_CIPUT/SQ_CISVC queues. We leave SQ_EXCL on
7118 		 * until putnext, leavesq, or drain_syncq drops it.
7119 		 * That way we handle nested qwriter(INNER) without dropping
7120 		 * SQ_EXCL until the outermost qwriter callback routine is
7121 		 * done.
7122 		 */
7123 		return;
7124 	}
7125 	SQ_PUTLOCKS_EXIT(sq);
7126 	sqfill_events(sq, q, mp, func);
7127 }
7128 
7129 /*
7130  * Synchronous callback support functions
7131  */
7132 
7133 /*
7134  * Allocate a callback parameter structure.
7135  * Assumes that caller initializes the flags and the id.
7136  * Acquires SQLOCK(sq) if non-NULL is returned.
7137  */
7138 callbparams_t *
7139 callbparams_alloc(syncq_t *sq, void (*func)(void *), void *arg, int kmflags)
7140 {
7141 	callbparams_t *cbp;
7142 	size_t size = sizeof (callbparams_t);
7143 
7144 	cbp = kmem_alloc(size, kmflags & ~KM_PANIC);
7145 
7146 	/*
7147 	 * Only try tryhard allocation if the caller is ready to panic.
7148 	 * Otherwise just fail.
7149 	 */
7150 	if (cbp == NULL) {
7151 		if (kmflags & KM_PANIC)
7152 			cbp = kmem_alloc_tryhard(sizeof (callbparams_t),
7153 			    &size, kmflags);
7154 		else
7155 			return (NULL);
7156 	}
7157 
7158 	ASSERT(size >= sizeof (callbparams_t));
7159 	cbp->cbp_size = size;
7160 	cbp->cbp_sq = sq;
7161 	cbp->cbp_func = func;
7162 	cbp->cbp_arg = arg;
7163 	mutex_enter(SQLOCK(sq));
7164 	cbp->cbp_next = sq->sq_callbpend;
7165 	sq->sq_callbpend = cbp;
7166 	return (cbp);
7167 }
7168 
7169 void
7170 callbparams_free(syncq_t *sq, callbparams_t *cbp)
7171 {
7172 	callbparams_t **pp, *p;
7173 
7174 	ASSERT(MUTEX_HELD(SQLOCK(sq)));
7175 
7176 	for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) {
7177 		if (p == cbp) {
7178 			*pp = p->cbp_next;
7179 			kmem_free(p, p->cbp_size);
7180 			return;
7181 		}
7182 	}
7183 	(void) (STRLOG(0, 0, 0, SL_CONSOLE,
7184 	    "callbparams_free: not found\n"));
7185 }
7186 
7187 void
7188 callbparams_free_id(syncq_t *sq, callbparams_id_t id, int32_t flag)
7189 {
7190 	callbparams_t **pp, *p;
7191 
7192 	ASSERT(MUTEX_HELD(SQLOCK(sq)));
7193 
7194 	for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) {
7195 		if (p->cbp_id == id && p->cbp_flags == flag) {
7196 			*pp = p->cbp_next;
7197 			kmem_free(p, p->cbp_size);
7198 			return;
7199 		}
7200 	}
7201 	(void) (STRLOG(0, 0, 0, SL_CONSOLE,
7202 	    "callbparams_free_id: not found\n"));
7203 }
7204 
7205 /*
7206  * Callback wrapper function used by once-only callbacks that can be
7207  * cancelled (qtimeout and qbufcall)
7208  * Contains inline version of entersq(sq, SQ_CALLBACK) that can be
7209  * cancelled by the qun* functions.
7210  */
7211 void
7212 qcallbwrapper(void *arg)
7213 {
7214 	callbparams_t *cbp = arg;
7215 	syncq_t	*sq;
7216 	uint16_t count = 0;
7217 	uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL;
7218 	uint16_t type;
7219 
7220 	sq = cbp->cbp_sq;
7221 	mutex_enter(SQLOCK(sq));
7222 	type = sq->sq_type;
7223 	if (!(type & SQ_CICB)) {
7224 		count = sq->sq_count;
7225 		SQ_PUTLOCKS_ENTER(sq);
7226 		SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
7227 		SUM_SQ_PUTCOUNTS(sq, count);
7228 		sq->sq_needexcl++;
7229 		ASSERT(sq->sq_needexcl != 0);	/* wraparound */
7230 		waitflags |= SQ_MESSAGES;
7231 	}
7232 	/* Can not handle exlusive entry at outer perimeter */
7233 	ASSERT(type & SQ_COCB);
7234 
7235 	while ((sq->sq_flags & waitflags) || (!(type & SQ_CICB) &&count != 0)) {
7236 		if ((sq->sq_callbflags & cbp->cbp_flags) &&
7237 		    (sq->sq_cancelid == cbp->cbp_id)) {
7238 			/* timeout has been cancelled */
7239 			sq->sq_callbflags |= SQ_CALLB_BYPASSED;
7240 			callbparams_free(sq, cbp);
7241 			if (!(type & SQ_CICB)) {
7242 				ASSERT(sq->sq_needexcl > 0);
7243 				sq->sq_needexcl--;
7244 				if (sq->sq_needexcl == 0) {
7245 					SQ_PUTCOUNT_SETFAST_LOCKED(sq);
7246 				}
7247 				SQ_PUTLOCKS_EXIT(sq);
7248 			}
7249 			mutex_exit(SQLOCK(sq));
7250 			return;
7251 		}
7252 		sq->sq_flags |= SQ_WANTWAKEUP;
7253 		if (!(type & SQ_CICB)) {
7254 			SQ_PUTLOCKS_EXIT(sq);
7255 		}
7256 		cv_wait(&sq->sq_wait, SQLOCK(sq));
7257 		if (!(type & SQ_CICB)) {
7258 			count = sq->sq_count;
7259 			SQ_PUTLOCKS_ENTER(sq);
7260 			SUM_SQ_PUTCOUNTS(sq, count);
7261 		}
7262 	}
7263 
7264 	sq->sq_count++;
7265 	ASSERT(sq->sq_count != 0);	/* Wraparound */
7266 	if (!(type & SQ_CICB)) {
7267 		ASSERT(count == 0);
7268 		sq->sq_flags |= SQ_EXCL;
7269 		ASSERT(sq->sq_needexcl > 0);
7270 		sq->sq_needexcl--;
7271 		if (sq->sq_needexcl == 0) {
7272 			SQ_PUTCOUNT_SETFAST_LOCKED(sq);
7273 		}
7274 		SQ_PUTLOCKS_EXIT(sq);
7275 	}
7276 
7277 	mutex_exit(SQLOCK(sq));
7278 
7279 	cbp->cbp_func(cbp->cbp_arg);
7280 
7281 	/*
7282 	 * We drop the lock only for leavesq to re-acquire it.
7283 	 * Possible optimization is inline of leavesq.
7284 	 */
7285 	mutex_enter(SQLOCK(sq));
7286 	callbparams_free(sq, cbp);
7287 	mutex_exit(SQLOCK(sq));
7288 	leavesq(sq, SQ_CALLBACK);
7289 }
7290 
7291 /*
7292  * no need to grab sq_putlocks here. See comment in strsubr.h that
7293  * explains when sq_putlocks are used.
7294  *
7295  * sq_count (or one of the sq_putcounts) has already been
7296  * decremented by the caller, and if SQ_QUEUED, we need to call
7297  * drain_syncq (the global syncq drain).
7298  * If putnext_tail is called with the SQ_EXCL bit set, we are in
7299  * one of two states, non-CIPUT perimiter, and we need to clear
7300  * it, or we went exclusive in the put procedure.  In any case,
7301  * we want to clear the bit now, and it is probably easier to do
7302  * this at the beginning of this function (remember, we hold
7303  * the SQLOCK).  Lastly, if there are other messages queued
7304  * on the syncq (and not for our destination), enable the syncq
7305  * for background work.
7306  */
7307 
7308 /* ARGSUSED */
7309 void
7310 putnext_tail(syncq_t *sq, queue_t *qp, uint32_t passflags)
7311 {
7312 	uint16_t	flags = sq->sq_flags;
7313 
7314 	ASSERT(MUTEX_HELD(SQLOCK(sq)));
7315 	ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
7316 
7317 	/* Clear SQ_EXCL if set in passflags */
7318 	if (passflags & SQ_EXCL) {
7319 		flags &= ~SQ_EXCL;
7320 	}
7321 	if (flags & SQ_WANTWAKEUP) {
7322 		flags &= ~SQ_WANTWAKEUP;
7323 		cv_broadcast(&sq->sq_wait);
7324 	}
7325 	if (flags & SQ_WANTEXWAKEUP) {
7326 		flags &= ~SQ_WANTEXWAKEUP;
7327 		cv_broadcast(&sq->sq_exitwait);
7328 	}
7329 	sq->sq_flags = flags;
7330 
7331 	/*
7332 	 * We have cleared SQ_EXCL if we were asked to, and started
7333 	 * the wakeup process for waiters.  If there are no writers
7334 	 * then we need to drain the syncq if we were told to, or
7335 	 * enable the background thread to do it.
7336 	 */
7337 	if (!(flags & (SQ_STAYAWAY|SQ_EXCL))) {
7338 		if ((passflags & SQ_QUEUED) ||
7339 		    (sq->sq_svcflags & SQ_DISABLED)) {
7340 			/* drain_syncq will take care of events in the list */
7341 			drain_syncq(sq);
7342 			return;
7343 		} else if (flags & SQ_QUEUED) {
7344 			sqenable(sq);
7345 		}
7346 	}
7347 	/* Drop the SQLOCK on exit */
7348 	mutex_exit(SQLOCK(sq));
7349 	TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
7350 		"putnext_end:(%p, %p, %p) done", NULL, qp, sq);
7351 }
7352 
7353 void
7354 set_qend(queue_t *q)
7355 {
7356 	mutex_enter(QLOCK(q));
7357 	if (!O_SAMESTR(q))
7358 		q->q_flag |= QEND;
7359 	else
7360 		q->q_flag &= ~QEND;
7361 	mutex_exit(QLOCK(q));
7362 	q = _OTHERQ(q);
7363 	mutex_enter(QLOCK(q));
7364 	if (!O_SAMESTR(q))
7365 		q->q_flag |= QEND;
7366 	else
7367 		q->q_flag &= ~QEND;
7368 	mutex_exit(QLOCK(q));
7369 }
7370 
7371 
7372 void
7373 clr_qfull(queue_t *q)
7374 {
7375 	queue_t	*oq = q;
7376 
7377 	q = q->q_nfsrv;
7378 	/* Fast check if there is any work to do before getting the lock. */
7379 	if ((q->q_flag & (QFULL|QWANTW)) == 0) {
7380 		return;
7381 	}
7382 
7383 	/*
7384 	 * Do not reset QFULL (and backenable) if the q_count is the reason
7385 	 * for QFULL being set.
7386 	 */
7387 	mutex_enter(QLOCK(q));
7388 	/*
7389 	 * If both q_count and q_mblkcnt are less than the hiwat mark
7390 	 */
7391 	if ((q->q_count < q->q_hiwat) && (q->q_mblkcnt < q->q_hiwat)) {
7392 		q->q_flag &= ~QFULL;
7393 		/*
7394 		 * A little more confusing, how about this way:
7395 		 * if someone wants to write,
7396 		 * AND
7397 		 *    both counts are less than the lowat mark
7398 		 *    OR
7399 		 *    the lowat mark is zero
7400 		 * THEN
7401 		 * backenable
7402 		 */
7403 		if ((q->q_flag & QWANTW) &&
7404 		    (((q->q_count < q->q_lowat) &&
7405 		    (q->q_mblkcnt < q->q_lowat)) || q->q_lowat == 0)) {
7406 			q->q_flag &= ~QWANTW;
7407 			mutex_exit(QLOCK(q));
7408 			backenable(oq, 0);
7409 		} else
7410 			mutex_exit(QLOCK(q));
7411 	} else
7412 		mutex_exit(QLOCK(q));
7413 }
7414 
7415 /*
7416  * Set the forward service procedure pointer.
7417  *
7418  * Called at insert-time to cache a queue's next forward service procedure in
7419  * q_nfsrv; used by canput() and canputnext().  If the queue to be inserted
7420  * has a service procedure then q_nfsrv points to itself.  If the queue to be
7421  * inserted does not have a service procedure, then q_nfsrv points to the next
7422  * queue forward that has a service procedure.  If the queue is at the logical
7423  * end of the stream (driver for write side, stream head for the read side)
7424  * and does not have a service procedure, then q_nfsrv also points to itself.
7425  */
7426 void
7427 set_nfsrv_ptr(
7428 	queue_t  *rnew,		/* read queue pointer to new module */
7429 	queue_t  *wnew,		/* write queue pointer to new module */
7430 	queue_t  *prev_rq,	/* read queue pointer to the module above */
7431 	queue_t  *prev_wq)	/* write queue pointer to the module above */
7432 {
7433 	queue_t *qp;
7434 
7435 	if (prev_wq->q_next == NULL) {
7436 		/*
7437 		 * Insert the driver, initialize the driver and stream head.
7438 		 * In this case, prev_rq/prev_wq should be the stream head.
7439 		 * _I_INSERT does not allow inserting a driver.  Make sure
7440 		 * that it is not an insertion.
7441 		 */
7442 		ASSERT(!(rnew->q_flag & _QINSERTING));
7443 		wnew->q_nfsrv = wnew;
7444 		if (rnew->q_qinfo->qi_srvp)
7445 			rnew->q_nfsrv = rnew;
7446 		else
7447 			rnew->q_nfsrv = prev_rq;
7448 		prev_rq->q_nfsrv = prev_rq;
7449 		prev_wq->q_nfsrv = prev_wq;
7450 	} else {
7451 		/*
7452 		 * set up read side q_nfsrv pointer.  This MUST be done
7453 		 * before setting the write side, because the setting of
7454 		 * the write side for a fifo may depend on it.
7455 		 *
7456 		 * Suppose we have a fifo that only has pipemod pushed.
7457 		 * pipemod has no read or write service procedures, so
7458 		 * nfsrv for both pipemod queues points to prev_rq (the
7459 		 * stream read head).  Now push bufmod (which has only a
7460 		 * read service procedure).  Doing the write side first,
7461 		 * wnew->q_nfsrv is set to pipemod's writeq nfsrv, which
7462 		 * is WRONG; the next queue forward from wnew with a
7463 		 * service procedure will be rnew, not the stream read head.
7464 		 * Since the downstream queue (which in the case of a fifo
7465 		 * is the read queue rnew) can affect upstream queues, it
7466 		 * needs to be done first.  Setting up the read side first
7467 		 * sets nfsrv for both pipemod queues to rnew and then
7468 		 * when the write side is set up, wnew-q_nfsrv will also
7469 		 * point to rnew.
7470 		 */
7471 		if (rnew->q_qinfo->qi_srvp) {
7472 			/*
7473 			 * use _OTHERQ() because, if this is a pipe, next
7474 			 * module may have been pushed from other end and
7475 			 * q_next could be a read queue.
7476 			 */
7477 			qp = _OTHERQ(prev_wq->q_next);
7478 			while (qp && qp->q_nfsrv != qp) {
7479 				qp->q_nfsrv = rnew;
7480 				qp = backq(qp);
7481 			}
7482 			rnew->q_nfsrv = rnew;
7483 		} else
7484 			rnew->q_nfsrv = prev_rq->q_nfsrv;
7485 
7486 		/* set up write side q_nfsrv pointer */
7487 		if (wnew->q_qinfo->qi_srvp) {
7488 			wnew->q_nfsrv = wnew;
7489 
7490 			/*
7491 			 * For insertion, need to update nfsrv of the modules
7492 			 * above which do not have a service routine.
7493 			 */
7494 			if (rnew->q_flag & _QINSERTING) {
7495 				for (qp = prev_wq;
7496 				    qp != NULL && qp->q_nfsrv != qp;
7497 				    qp = backq(qp)) {
7498 					qp->q_nfsrv = wnew->q_nfsrv;
7499 				}
7500 			}
7501 		} else {
7502 			if (prev_wq->q_next == prev_rq)
7503 				/*
7504 				 * Since prev_wq/prev_rq are the middle of a
7505 				 * fifo, wnew/rnew will also be the middle of
7506 				 * a fifo and wnew's nfsrv is same as rnew's.
7507 				 */
7508 				wnew->q_nfsrv = rnew->q_nfsrv;
7509 			else
7510 				wnew->q_nfsrv = prev_wq->q_next->q_nfsrv;
7511 		}
7512 	}
7513 }
7514 
7515 /*
7516  * Reset the forward service procedure pointer; called at remove-time.
7517  */
7518 void
7519 reset_nfsrv_ptr(queue_t *rqp, queue_t *wqp)
7520 {
7521 	queue_t *tmp_qp;
7522 
7523 	/* Reset the write side q_nfsrv pointer for _I_REMOVE */
7524 	if ((rqp->q_flag & _QREMOVING) && (wqp->q_qinfo->qi_srvp != NULL)) {
7525 		for (tmp_qp = backq(wqp);
7526 		    tmp_qp != NULL && tmp_qp->q_nfsrv == wqp;
7527 		    tmp_qp = backq(tmp_qp)) {
7528 			tmp_qp->q_nfsrv = wqp->q_nfsrv;
7529 		}
7530 	}
7531 
7532 	/* reset the read side q_nfsrv pointer */
7533 	if (rqp->q_qinfo->qi_srvp) {
7534 		if (wqp->q_next) {	/* non-driver case */
7535 			tmp_qp = _OTHERQ(wqp->q_next);
7536 			while (tmp_qp && tmp_qp->q_nfsrv == rqp) {
7537 				/* Note that rqp->q_next cannot be NULL */
7538 				ASSERT(rqp->q_next != NULL);
7539 				tmp_qp->q_nfsrv = rqp->q_next->q_nfsrv;
7540 				tmp_qp = backq(tmp_qp);
7541 			}
7542 		}
7543 	}
7544 }
7545 
7546 /*
7547  * This routine should be called after all stream geometry changes to update
7548  * the stream head cached struio() rd/wr queue pointers. Note must be called
7549  * with the streamlock()ed.
7550  *
7551  * Note: only enables Synchronous STREAMS for a side of a Stream which has
7552  *	 an explicit synchronous barrier module queue. That is, a queue that
7553  *	 has specified a struio() type.
7554  */
7555 static void
7556 strsetuio(stdata_t *stp)
7557 {
7558 	queue_t *wrq;
7559 
7560 	if (stp->sd_flag & STPLEX) {
7561 		/*
7562 		 * Not stremahead, but a mux, so no Synchronous STREAMS.
7563 		 */
7564 		stp->sd_struiowrq = NULL;
7565 		stp->sd_struiordq = NULL;
7566 		return;
7567 	}
7568 	/*
7569 	 * Scan the write queue(s) while synchronous
7570 	 * until we find a qinfo uio type specified.
7571 	 */
7572 	wrq = stp->sd_wrq->q_next;
7573 	while (wrq) {
7574 		if (wrq->q_struiot == STRUIOT_NONE) {
7575 			wrq = 0;
7576 			break;
7577 		}
7578 		if (wrq->q_struiot != STRUIOT_DONTCARE)
7579 			break;
7580 		if (! _SAMESTR(wrq)) {
7581 			wrq = 0;
7582 			break;
7583 		}
7584 		wrq = wrq->q_next;
7585 	}
7586 	stp->sd_struiowrq = wrq;
7587 	/*
7588 	 * Scan the read queue(s) while synchronous
7589 	 * until we find a qinfo uio type specified.
7590 	 */
7591 	wrq = stp->sd_wrq->q_next;
7592 	while (wrq) {
7593 		if (_RD(wrq)->q_struiot == STRUIOT_NONE) {
7594 			wrq = 0;
7595 			break;
7596 		}
7597 		if (_RD(wrq)->q_struiot != STRUIOT_DONTCARE)
7598 			break;
7599 		if (! _SAMESTR(wrq)) {
7600 			wrq = 0;
7601 			break;
7602 		}
7603 		wrq = wrq->q_next;
7604 	}
7605 	stp->sd_struiordq = wrq ? _RD(wrq) : 0;
7606 }
7607 
7608 /*
7609  * pass_wput, unblocks the passthru queues, so that
7610  * messages can arrive at muxs lower read queue, before
7611  * I_LINK/I_UNLINK is acked/nacked.
7612  */
7613 static void
7614 pass_wput(queue_t *q, mblk_t *mp)
7615 {
7616 	syncq_t *sq;
7617 
7618 	sq = _RD(q)->q_syncq;
7619 	if (sq->sq_flags & SQ_BLOCKED)
7620 		unblocksq(sq, SQ_BLOCKED, 0);
7621 	putnext(q, mp);
7622 }
7623 
7624 /*
7625  * Set up queues for the link/unlink.
7626  * Create a new queue and block it and then insert it
7627  * below the stream head on the lower stream.
7628  * This prevents any messages from arriving during the setq
7629  * as well as while the mux is processing the LINK/I_UNLINK.
7630  * The blocked passq is unblocked once the LINK/I_UNLINK has
7631  * been acked or nacked or if a message is generated and sent
7632  * down muxs write put procedure.
7633  * see pass_wput().
7634  *
7635  * After the new queue is inserted, all messages coming from below are
7636  * blocked. The call to strlock will ensure that all activity in the stream head
7637  * read queue syncq is stopped (sq_count drops to zero).
7638  */
7639 static queue_t *
7640 link_addpassthru(stdata_t *stpdown)
7641 {
7642 	queue_t *passq;
7643 	sqlist_t sqlist;
7644 
7645 	passq = allocq();
7646 	STREAM(passq) = STREAM(_WR(passq)) = stpdown;
7647 	/* setq might sleep in allocator - avoid holding locks. */
7648 	setq(passq, &passthru_rinit, &passthru_winit, NULL, QPERQ,
7649 	    SQ_CI|SQ_CO, B_FALSE);
7650 	claimq(passq);
7651 	blocksq(passq->q_syncq, SQ_BLOCKED, 1);
7652 	insertq(STREAM(passq), passq);
7653 
7654 	/*
7655 	 * Use strlock() to wait for the stream head sq_count to drop to zero
7656 	 * since we are going to change q_ptr in the stream head.  Note that
7657 	 * insertq() doesn't wait for any syncq counts to drop to zero.
7658 	 */
7659 	sqlist.sqlist_head = NULL;
7660 	sqlist.sqlist_index = 0;
7661 	sqlist.sqlist_size = sizeof (sqlist_t);
7662 	sqlist_insert(&sqlist, _RD(stpdown->sd_wrq)->q_syncq);
7663 	strlock(stpdown, &sqlist);
7664 	strunlock(stpdown, &sqlist);
7665 
7666 	releaseq(passq);
7667 	return (passq);
7668 }
7669 
7670 /*
7671  * Let messages flow up into the mux by removing
7672  * the passq.
7673  */
7674 static void
7675 link_rempassthru(queue_t *passq)
7676 {
7677 	claimq(passq);
7678 	removeq(passq);
7679 	releaseq(passq);
7680 	freeq(passq);
7681 }
7682 
7683 /*
7684  * wait for an event with optional timeout and optional return if
7685  * a signal is sent to the thread
7686  * tim:  -1 : no timeout
7687  *       otherwise the value is relative time in milliseconds to wait
7688  * nosig:  if 0 then signals will be ignored, otherwise signals
7689  *       will terminate wait
7690  * returns >0 on success, 0 if signal was encountered, -1 if timeout
7691  * was reached.
7692  */
7693 clock_t
7694 str_cv_wait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim, int nosigs)
7695 {
7696 	clock_t ret, now, tick;
7697 
7698 	if (tim < 0) {
7699 		if (nosigs) {
7700 			cv_wait(cvp, mp);
7701 			ret = 1;
7702 		} else {
7703 			ret = cv_wait_sig(cvp, mp);
7704 		}
7705 	} else if (tim > 0) {
7706 		/*
7707 		 * convert milliseconds to clock ticks
7708 		 */
7709 		tick = MSEC_TO_TICK_ROUNDUP(tim);
7710 		time_to_wait(&now, tick);
7711 		if (nosigs) {
7712 			ret = cv_timedwait(cvp, mp, now);
7713 		} else {
7714 			ret = cv_timedwait_sig(cvp, mp, now);
7715 		}
7716 	} else {
7717 		ret = -1;
7718 	}
7719 	return (ret);
7720 }
7721 
7722 /*
7723  * Wait until the stream head can determine if it is at the mark but
7724  * don't wait forever to prevent a race condition between the "mark" state
7725  * in the stream head and any mark state in the caller/user of this routine.
7726  *
7727  * This is used by sockets and for a socket it would be incorrect
7728  * to return a failure for SIOCATMARK when there is no data in the receive
7729  * queue and the marked urgent data is traveling up the stream.
7730  *
7731  * This routine waits until the mark is known by waiting for one of these
7732  * three events:
7733  *	The stream head read queue becoming non-empty (including an EOF)
7734  *	The STRATMARK flag being set. (Due to a MSGMARKNEXT message.)
7735  *	The STRNOTATMARK flag being set (which indicates that the transport
7736  *	has sent a MSGNOTMARKNEXT message to indicate that it is not at
7737  *	the mark).
7738  *
7739  * The routine returns 1 if the stream is at the mark; 0 if it can
7740  * be determined that the stream is not at the mark.
7741  * If the wait times out and it can't determine
7742  * whether or not the stream might be at the mark the routine will return -1.
7743  *
7744  * Note: This routine should only be used when a mark is pending i.e.,
7745  * in the socket case the SIGURG has been posted.
7746  * Note2: This can not wakeup just because synchronous streams indicate
7747  * that data is available since it is not possible to use the synchronous
7748  * streams interfaces to determine the b_flag value for the data queued below
7749  * the stream head.
7750  */
7751 int
7752 strwaitmark(vnode_t *vp)
7753 {
7754 	struct stdata *stp = vp->v_stream;
7755 	queue_t *rq = _RD(stp->sd_wrq);
7756 	int mark;
7757 
7758 	mutex_enter(&stp->sd_lock);
7759 	while (rq->q_first == NULL &&
7760 	    !(stp->sd_flag & (STRATMARK|STRNOTATMARK|STREOF))) {
7761 		stp->sd_flag |= RSLEEP;
7762 
7763 		/* Wait for 100 milliseconds for any state change. */
7764 		if (str_cv_wait(&rq->q_wait, &stp->sd_lock, 100, 1) == -1) {
7765 			mutex_exit(&stp->sd_lock);
7766 			return (-1);
7767 		}
7768 	}
7769 	if (stp->sd_flag & STRATMARK)
7770 		mark = 1;
7771 	else if (rq->q_first != NULL && (rq->q_first->b_flag & MSGMARK))
7772 		mark = 1;
7773 	else
7774 		mark = 0;
7775 
7776 	mutex_exit(&stp->sd_lock);
7777 	return (mark);
7778 }
7779 
7780 /*
7781  * Set a read side error. If persist is set change the socket error
7782  * to persistent. If errfunc is set install the function as the exported
7783  * error handler.
7784  */
7785 void
7786 strsetrerror(vnode_t *vp, int error, int persist, errfunc_t errfunc)
7787 {
7788 	struct stdata *stp = vp->v_stream;
7789 
7790 	mutex_enter(&stp->sd_lock);
7791 	stp->sd_rerror = error;
7792 	if (error == 0 && errfunc == NULL)
7793 		stp->sd_flag &= ~STRDERR;
7794 	else
7795 		stp->sd_flag |= STRDERR;
7796 	if (persist) {
7797 		stp->sd_flag &= ~STRDERRNONPERSIST;
7798 	} else {
7799 		stp->sd_flag |= STRDERRNONPERSIST;
7800 	}
7801 	stp->sd_rderrfunc = errfunc;
7802 	if (error != 0 || errfunc != NULL) {
7803 		cv_broadcast(&_RD(stp->sd_wrq)->q_wait);	/* readers */
7804 		cv_broadcast(&stp->sd_wrq->q_wait);		/* writers */
7805 		cv_broadcast(&stp->sd_monitor);			/* ioctllers */
7806 
7807 		mutex_exit(&stp->sd_lock);
7808 		pollwakeup(&stp->sd_pollist, POLLERR);
7809 		mutex_enter(&stp->sd_lock);
7810 
7811 		if (stp->sd_sigflags & S_ERROR)
7812 			strsendsig(stp->sd_siglist, S_ERROR, 0, error);
7813 	}
7814 	mutex_exit(&stp->sd_lock);
7815 }
7816 
7817 /*
7818  * Set a write side error. If persist is set change the socket error
7819  * to persistent.
7820  */
7821 void
7822 strsetwerror(vnode_t *vp, int error, int persist, errfunc_t errfunc)
7823 {
7824 	struct stdata *stp = vp->v_stream;
7825 
7826 	mutex_enter(&stp->sd_lock);
7827 	stp->sd_werror = error;
7828 	if (error == 0 && errfunc == NULL)
7829 		stp->sd_flag &= ~STWRERR;
7830 	else
7831 		stp->sd_flag |= STWRERR;
7832 	if (persist) {
7833 		stp->sd_flag &= ~STWRERRNONPERSIST;
7834 	} else {
7835 		stp->sd_flag |= STWRERRNONPERSIST;
7836 	}
7837 	stp->sd_wrerrfunc = errfunc;
7838 	if (error != 0 || errfunc != NULL) {
7839 		cv_broadcast(&_RD(stp->sd_wrq)->q_wait);	/* readers */
7840 		cv_broadcast(&stp->sd_wrq->q_wait);		/* writers */
7841 		cv_broadcast(&stp->sd_monitor);			/* ioctllers */
7842 
7843 		mutex_exit(&stp->sd_lock);
7844 		pollwakeup(&stp->sd_pollist, POLLERR);
7845 		mutex_enter(&stp->sd_lock);
7846 
7847 		if (stp->sd_sigflags & S_ERROR)
7848 			strsendsig(stp->sd_siglist, S_ERROR, 0, error);
7849 	}
7850 	mutex_exit(&stp->sd_lock);
7851 }
7852 
7853 /*
7854  * Make the stream return 0 (EOF) when all data has been read.
7855  * No effect on write side.
7856  */
7857 void
7858 strseteof(vnode_t *vp, int eof)
7859 {
7860 	struct stdata *stp = vp->v_stream;
7861 
7862 	mutex_enter(&stp->sd_lock);
7863 	if (!eof) {
7864 		stp->sd_flag &= ~STREOF;
7865 		mutex_exit(&stp->sd_lock);
7866 		return;
7867 	}
7868 	stp->sd_flag |= STREOF;
7869 	if (stp->sd_flag & RSLEEP) {
7870 		stp->sd_flag &= ~RSLEEP;
7871 		cv_broadcast(&_RD(stp->sd_wrq)->q_wait);
7872 	}
7873 
7874 	mutex_exit(&stp->sd_lock);
7875 	pollwakeup(&stp->sd_pollist, POLLIN|POLLRDNORM);
7876 	mutex_enter(&stp->sd_lock);
7877 
7878 	if (stp->sd_sigflags & (S_INPUT|S_RDNORM))
7879 		strsendsig(stp->sd_siglist, S_INPUT|S_RDNORM, 0, 0);
7880 	mutex_exit(&stp->sd_lock);
7881 }
7882 
7883 void
7884 strflushrq(vnode_t *vp, int flag)
7885 {
7886 	struct stdata *stp = vp->v_stream;
7887 
7888 	mutex_enter(&stp->sd_lock);
7889 	flushq(_RD(stp->sd_wrq), flag);
7890 	mutex_exit(&stp->sd_lock);
7891 }
7892 
7893 void
7894 strsetrputhooks(vnode_t *vp, uint_t flags,
7895 		msgfunc_t protofunc, msgfunc_t miscfunc)
7896 {
7897 	struct stdata *stp = vp->v_stream;
7898 
7899 	mutex_enter(&stp->sd_lock);
7900 
7901 	if (protofunc == NULL)
7902 		stp->sd_rprotofunc = strrput_proto;
7903 	else
7904 		stp->sd_rprotofunc = protofunc;
7905 
7906 	if (miscfunc == NULL)
7907 		stp->sd_rmiscfunc = strrput_misc;
7908 	else
7909 		stp->sd_rmiscfunc = miscfunc;
7910 
7911 	if (flags & SH_CONSOL_DATA)
7912 		stp->sd_rput_opt |= SR_CONSOL_DATA;
7913 	else
7914 		stp->sd_rput_opt &= ~SR_CONSOL_DATA;
7915 
7916 	if (flags & SH_SIGALLDATA)
7917 		stp->sd_rput_opt |= SR_SIGALLDATA;
7918 	else
7919 		stp->sd_rput_opt &= ~SR_SIGALLDATA;
7920 
7921 	if (flags & SH_IGN_ZEROLEN)
7922 		stp->sd_rput_opt |= SR_IGN_ZEROLEN;
7923 	else
7924 		stp->sd_rput_opt &= ~SR_IGN_ZEROLEN;
7925 
7926 	mutex_exit(&stp->sd_lock);
7927 }
7928 
7929 void
7930 strsetwputhooks(vnode_t *vp, uint_t flags, clock_t closetime)
7931 {
7932 	struct stdata *stp = vp->v_stream;
7933 
7934 	mutex_enter(&stp->sd_lock);
7935 	stp->sd_closetime = closetime;
7936 
7937 	if (flags & SH_SIGPIPE)
7938 		stp->sd_wput_opt |= SW_SIGPIPE;
7939 	else
7940 		stp->sd_wput_opt &= ~SW_SIGPIPE;
7941 	if (flags & SH_RECHECK_ERR)
7942 		stp->sd_wput_opt |= SW_RECHECK_ERR;
7943 	else
7944 		stp->sd_wput_opt &= ~SW_RECHECK_ERR;
7945 
7946 	mutex_exit(&stp->sd_lock);
7947 }
7948 
7949 /* Used within framework when the queue is already locked */
7950 void
7951 qenable_locked(queue_t *q)
7952 {
7953 	stdata_t *stp = STREAM(q);
7954 
7955 	ASSERT(MUTEX_HELD(QLOCK(q)));
7956 
7957 	if (!q->q_qinfo->qi_srvp)
7958 		return;
7959 
7960 	/*
7961 	 * Do not place on run queue if already enabled or closing.
7962 	 */
7963 	if (q->q_flag & (QWCLOSE|QENAB))
7964 		return;
7965 
7966 	/*
7967 	 * mark queue enabled and place on run list if it is not already being
7968 	 * serviced. If it is serviced, the runservice() function will detect
7969 	 * that QENAB is set and call service procedure before clearing
7970 	 * QINSERVICE flag.
7971 	 */
7972 	q->q_flag |= QENAB;
7973 	if (q->q_flag & QINSERVICE)
7974 		return;
7975 
7976 	/* Record the time of qenable */
7977 	q->q_qtstamp = lbolt;
7978 
7979 	/*
7980 	 * Put the queue in the stp list and schedule it for background
7981 	 * processing if it is not already scheduled or if stream head does not
7982 	 * intent to process it in the foreground later by setting
7983 	 * STRS_WILLSERVICE flag.
7984 	 */
7985 	mutex_enter(&stp->sd_qlock);
7986 	/*
7987 	 * If there are already something on the list, stp flags should show
7988 	 * intention to drain it.
7989 	 */
7990 	IMPLY(STREAM_NEEDSERVICE(stp),
7991 	    (stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED)));
7992 
7993 	ENQUEUE(q, stp->sd_qhead, stp->sd_qtail, q_link);
7994 	stp->sd_nqueues++;
7995 
7996 	/*
7997 	 * If no one will drain this stream we are the first producer and
7998 	 * need to schedule it for background thread.
7999 	 */
8000 	if (!(stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))) {
8001 		/*
8002 		 * No one will service this stream later, so we have to
8003 		 * schedule it now.
8004 		 */
8005 		STRSTAT(stenables);
8006 		stp->sd_svcflags |= STRS_SCHEDULED;
8007 		stp->sd_servid = (void *)taskq_dispatch(streams_taskq,
8008 		    (task_func_t *)stream_service, stp, TQ_NOSLEEP|TQ_NOQUEUE);
8009 
8010 		if (stp->sd_servid == NULL) {
8011 			/*
8012 			 * Task queue failed so fail over to the backup
8013 			 * servicing thread.
8014 			 */
8015 			STRSTAT(taskqfails);
8016 			/*
8017 			 * It is safe to clear STRS_SCHEDULED flag because it
8018 			 * was set by this thread above.
8019 			 */
8020 			stp->sd_svcflags &= ~STRS_SCHEDULED;
8021 
8022 			/*
8023 			 * Failover scheduling is protected by service_queue
8024 			 * lock.
8025 			 */
8026 			mutex_enter(&service_queue);
8027 			ASSERT((stp->sd_qhead == q) && (stp->sd_qtail == q));
8028 			ASSERT(q->q_link == NULL);
8029 			/*
8030 			 * Append the queue to qhead/qtail list.
8031 			 */
8032 			if (qhead == NULL)
8033 				qhead = q;
8034 			else
8035 				qtail->q_link = q;
8036 			qtail = q;
8037 			/*
8038 			 * Clear stp queue list.
8039 			 */
8040 			stp->sd_qhead = stp->sd_qtail = NULL;
8041 			stp->sd_nqueues = 0;
8042 			/*
8043 			 * Wakeup background queue processing thread.
8044 			 */
8045 			cv_signal(&services_to_run);
8046 			mutex_exit(&service_queue);
8047 		}
8048 	}
8049 	mutex_exit(&stp->sd_qlock);
8050 }
8051 
8052 static void
8053 queue_service(queue_t *q)
8054 {
8055 	/*
8056 	 * The queue in the list should have
8057 	 * QENAB flag set and should not have
8058 	 * QINSERVICE flag set. QINSERVICE is
8059 	 * set when the queue is dequeued and
8060 	 * qenable_locked doesn't enqueue a
8061 	 * queue with QINSERVICE set.
8062 	 */
8063 
8064 	ASSERT(!(q->q_flag & QINSERVICE));
8065 	ASSERT((q->q_flag & QENAB));
8066 	mutex_enter(QLOCK(q));
8067 	q->q_flag &= ~QENAB;
8068 	q->q_flag |= QINSERVICE;
8069 	mutex_exit(QLOCK(q));
8070 	runservice(q);
8071 }
8072 
8073 static void
8074 syncq_service(syncq_t *sq)
8075 {
8076 	STRSTAT(syncqservice);
8077 	mutex_enter(SQLOCK(sq));
8078 	ASSERT(!(sq->sq_svcflags & SQ_SERVICE));
8079 	ASSERT(sq->sq_servcount != 0);
8080 	ASSERT(sq->sq_next == NULL);
8081 
8082 	/* if we came here from the background thread, clear the flag */
8083 	if (sq->sq_svcflags & SQ_BGTHREAD)
8084 		sq->sq_svcflags &= ~SQ_BGTHREAD;
8085 
8086 	/* let drain_syncq know that it's being called in the background */
8087 	sq->sq_svcflags |= SQ_SERVICE;
8088 	drain_syncq(sq);
8089 }
8090 
8091 static void
8092 qwriter_outer_service(syncq_t *outer)
8093 {
8094 	/*
8095 	 * Note that SQ_WRITER is used on the outer perimeter
8096 	 * to signal that a qwriter(OUTER) is either investigating
8097 	 * running or that it is actually running a function.
8098 	 */
8099 	outer_enter(outer, SQ_BLOCKED|SQ_WRITER);
8100 
8101 	/*
8102 	 * All inner syncq are empty and have SQ_WRITER set
8103 	 * to block entering the outer perimeter.
8104 	 *
8105 	 * We do not need to explicitly call write_now since
8106 	 * outer_exit does it for us.
8107 	 */
8108 	outer_exit(outer);
8109 }
8110 
8111 static void
8112 mblk_free(mblk_t *mp)
8113 {
8114 	dblk_t *dbp = mp->b_datap;
8115 	frtn_t *frp = dbp->db_frtnp;
8116 
8117 	mp->b_next = NULL;
8118 	if (dbp->db_fthdr != NULL)
8119 		str_ftfree(dbp);
8120 
8121 	ASSERT(dbp->db_fthdr == NULL);
8122 	frp->free_func(frp->free_arg);
8123 	ASSERT(dbp->db_mblk == mp);
8124 
8125 	if (dbp->db_credp != NULL) {
8126 		crfree(dbp->db_credp);
8127 		dbp->db_credp = NULL;
8128 	}
8129 	dbp->db_cpid = -1;
8130 	dbp->db_struioflag = 0;
8131 	dbp->db_struioun.cksum.flags = 0;
8132 
8133 	kmem_cache_free(dbp->db_cache, dbp);
8134 }
8135 
8136 /*
8137  * Background processing of the stream queue list.
8138  */
8139 static void
8140 stream_service(stdata_t *stp)
8141 {
8142 	queue_t *q;
8143 
8144 	mutex_enter(&stp->sd_qlock);
8145 
8146 	STR_SERVICE(stp, q);
8147 
8148 	stp->sd_svcflags &= ~STRS_SCHEDULED;
8149 	stp->sd_servid = NULL;
8150 	cv_signal(&stp->sd_qcv);
8151 	mutex_exit(&stp->sd_qlock);
8152 }
8153 
8154 /*
8155  * Foreground processing of the stream queue list.
8156  */
8157 void
8158 stream_runservice(stdata_t *stp)
8159 {
8160 	queue_t *q;
8161 
8162 	mutex_enter(&stp->sd_qlock);
8163 	STRSTAT(rservice);
8164 	/*
8165 	 * We are going to drain this stream queue list, so qenable_locked will
8166 	 * not schedule it until we finish.
8167 	 */
8168 	stp->sd_svcflags |= STRS_WILLSERVICE;
8169 
8170 	STR_SERVICE(stp, q);
8171 
8172 	stp->sd_svcflags &= ~STRS_WILLSERVICE;
8173 	mutex_exit(&stp->sd_qlock);
8174 	/*
8175 	 * Help backup background thread to drain the qhead/qtail list.
8176 	 */
8177 	while (qhead != NULL) {
8178 		STRSTAT(qhelps);
8179 		mutex_enter(&service_queue);
8180 		DQ(q, qhead, qtail, q_link);
8181 		mutex_exit(&service_queue);
8182 		if (q != NULL)
8183 			queue_service(q);
8184 	}
8185 }
8186 
8187 void
8188 stream_willservice(stdata_t *stp)
8189 {
8190 	mutex_enter(&stp->sd_qlock);
8191 	stp->sd_svcflags |= STRS_WILLSERVICE;
8192 	mutex_exit(&stp->sd_qlock);
8193 }
8194 
8195 /*
8196  * Replace the cred currently in the mblk with a different one.
8197  */
8198 void
8199 mblk_setcred(mblk_t *mp, cred_t *cr)
8200 {
8201 	cred_t *ocr = DB_CRED(mp);
8202 
8203 	ASSERT(cr != NULL);
8204 
8205 	if (cr != ocr) {
8206 		crhold(mp->b_datap->db_credp = cr);
8207 		if (ocr != NULL)
8208 			crfree(ocr);
8209 	}
8210 }
8211 
8212 int
8213 hcksum_assoc(mblk_t *mp,  multidata_t *mmd, pdesc_t *pd,
8214     uint32_t start, uint32_t stuff, uint32_t end, uint32_t value,
8215     uint32_t flags, int km_flags)
8216 {
8217 	int rc = 0;
8218 
8219 	ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA);
8220 	if (mp->b_datap->db_type == M_DATA) {
8221 		/* Associate values for M_DATA type */
8222 		mp->b_datap->db_cksumstart = (intptr_t)start;
8223 		mp->b_datap->db_cksumstuff = (intptr_t)stuff;
8224 		mp->b_datap->db_cksumend = (intptr_t)end;
8225 		mp->b_datap->db_struioun.cksum.flags = flags;
8226 		mp->b_datap->db_cksum16 = (uint16_t)value;
8227 
8228 	} else {
8229 		pattrinfo_t pa_info;
8230 
8231 		ASSERT(mmd != NULL);
8232 
8233 		pa_info.type = PATTR_HCKSUM;
8234 		pa_info.len = sizeof (pattr_hcksum_t);
8235 
8236 		if (mmd_addpattr(mmd, pd, &pa_info, B_TRUE, km_flags) != NULL) {
8237 			pattr_hcksum_t *hck = (pattr_hcksum_t *)pa_info.buf;
8238 
8239 			hck->hcksum_start_offset = start;
8240 			hck->hcksum_stuff_offset = stuff;
8241 			hck->hcksum_end_offset = end;
8242 			hck->hcksum_cksum_val.inet_cksum = (uint16_t)value;
8243 			hck->hcksum_flags = flags;
8244 		}
8245 	}
8246 	return (rc);
8247 }
8248 
8249 void
8250 hcksum_retrieve(mblk_t *mp, multidata_t *mmd, pdesc_t *pd,
8251     uint32_t *start, uint32_t *stuff, uint32_t *end,
8252     uint32_t *value, uint32_t *flags)
8253 {
8254 	ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA);
8255 	if (mp->b_datap->db_type == M_DATA) {
8256 		if (flags != NULL) {
8257 			*flags = mp->b_datap->db_struioun.cksum.flags;
8258 			if (*flags & HCK_PARTIALCKSUM) {
8259 				if (start != NULL)
8260 					*start = (uint32_t)
8261 					    mp->b_datap->db_cksumstart;
8262 				if (stuff != NULL)
8263 					*stuff = (uint32_t)
8264 					    mp->b_datap->db_cksumstuff;
8265 				if (end != NULL)
8266 					*end =
8267 					    (uint32_t)mp->b_datap->db_cksumend;
8268 				if (value != NULL)
8269 					*value =
8270 					    (uint32_t)mp->b_datap->db_cksum16;
8271 			}
8272 		}
8273 	} else {
8274 		pattrinfo_t hck_attr = {PATTR_HCKSUM};
8275 
8276 		ASSERT(mmd != NULL);
8277 
8278 		/* get hardware checksum attribute */
8279 		if (mmd_getpattr(mmd, pd, &hck_attr) != NULL) {
8280 			pattr_hcksum_t *hck = (pattr_hcksum_t *)hck_attr.buf;
8281 
8282 			ASSERT(hck_attr.len >= sizeof (pattr_hcksum_t));
8283 			if (flags != NULL)
8284 				*flags = hck->hcksum_flags;
8285 			if (start != NULL)
8286 				*start = hck->hcksum_start_offset;
8287 			if (stuff != NULL)
8288 				*stuff = hck->hcksum_stuff_offset;
8289 			if (end != NULL)
8290 				*end = hck->hcksum_end_offset;
8291 			if (value != NULL)
8292 				*value = (uint32_t)
8293 				    hck->hcksum_cksum_val.inet_cksum;
8294 		}
8295 	}
8296 }
8297 
8298 /*
8299  * Checksum buffer *bp for len bytes with psum partial checksum,
8300  * or 0 if none, and return the 16 bit partial checksum.
8301  */
8302 unsigned
8303 bcksum(uchar_t *bp, int len, unsigned int psum)
8304 {
8305 	int odd = len & 1;
8306 	extern unsigned int ip_ocsum();
8307 
8308 	if (((intptr_t)bp & 1) == 0 && !odd) {
8309 		/*
8310 		 * Bp is 16 bit aligned and len is multiple of 16 bit word.
8311 		 */
8312 		return (ip_ocsum((ushort_t *)bp, len >> 1, psum));
8313 	}
8314 	if (((intptr_t)bp & 1) != 0) {
8315 		/*
8316 		 * Bp isn't 16 bit aligned.
8317 		 */
8318 		unsigned int tsum;
8319 
8320 #ifdef _LITTLE_ENDIAN
8321 		psum += *bp;
8322 #else
8323 		psum += *bp << 8;
8324 #endif
8325 		len--;
8326 		bp++;
8327 		tsum = ip_ocsum((ushort_t *)bp, len >> 1, 0);
8328 		psum += (tsum << 8) & 0xffff | (tsum >> 8);
8329 		if (len & 1) {
8330 			bp += len - 1;
8331 #ifdef _LITTLE_ENDIAN
8332 			psum += *bp << 8;
8333 #else
8334 			psum += *bp;
8335 #endif
8336 		}
8337 	} else {
8338 		/*
8339 		 * Bp is 16 bit aligned.
8340 		 */
8341 		psum = ip_ocsum((ushort_t *)bp, len >> 1, psum);
8342 		if (odd) {
8343 			bp += len - 1;
8344 #ifdef _LITTLE_ENDIAN
8345 			psum += *bp;
8346 #else
8347 			psum += *bp << 8;
8348 #endif
8349 		}
8350 	}
8351 	/*
8352 	 * Normalize psum to 16 bits before returning the new partial
8353 	 * checksum. The max psum value before normalization is 0x3FDFE.
8354 	 */
8355 	return ((psum >> 16) + (psum & 0xFFFF));
8356 }
8357 
8358 boolean_t
8359 is_vmloaned_mblk(mblk_t *mp, multidata_t *mmd, pdesc_t *pd)
8360 {
8361 	boolean_t rc;
8362 
8363 	ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA);
8364 	if (DB_TYPE(mp) == M_DATA) {
8365 		rc = (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0);
8366 	} else {
8367 		pattrinfo_t zcopy_attr = {PATTR_ZCOPY};
8368 
8369 		ASSERT(mmd != NULL);
8370 		rc = (mmd_getpattr(mmd, pd, &zcopy_attr) != NULL);
8371 	}
8372 	return (rc);
8373 }
8374 
8375 void
8376 freemsgchain(mblk_t *mp)
8377 {
8378 	mblk_t	*next;
8379 
8380 	while (mp != NULL) {
8381 		next = mp->b_next;
8382 		mp->b_next = NULL;
8383 
8384 		freemsg(mp);
8385 		mp = next;
8386 	}
8387 }
8388 
8389 mblk_t *
8390 copymsgchain(mblk_t *mp)
8391 {
8392 	mblk_t	*nmp = NULL;
8393 	mblk_t	**nmpp = &nmp;
8394 
8395 	for (; mp != NULL; mp = mp->b_next) {
8396 		if ((*nmpp = copymsg(mp)) == NULL) {
8397 			freemsgchain(nmp);
8398 			return (NULL);
8399 		}
8400 
8401 		nmpp = &((*nmpp)->b_next);
8402 	}
8403 
8404 	return (nmp);
8405 }
8406 
8407 /* NOTE: Do not add code after this point. */
8408 #undef QLOCK
8409 
8410 /*
8411  * replacement for QLOCK macro for those that can't use it.
8412  */
8413 kmutex_t *
8414 QLOCK(queue_t *q)
8415 {
8416 	return (&(q)->q_lock);
8417 }
8418 
8419 /*
8420  * Dummy runqueues/queuerun functions functions for backwards compatibility.
8421  */
8422 #undef runqueues
8423 void
8424 runqueues(void)
8425 {
8426 }
8427 
8428 #undef queuerun
8429 void
8430 queuerun(void)
8431 {
8432 }
8433