xref: /titanic_52/usr/src/uts/common/os/strsubr.c (revision b6c3f7863936abeae522e48a13887dddeb691a45)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
22 /*	  All Rights Reserved  	*/
23 
24 
25 /*
26  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
27  * Use is subject to license terms.
28  */
29 
30 #pragma ident	"%Z%%M%	%I%	%E% SMI"
31 
32 #include <sys/types.h>
33 #include <sys/sysmacros.h>
34 #include <sys/param.h>
35 #include <sys/errno.h>
36 #include <sys/signal.h>
37 #include <sys/proc.h>
38 #include <sys/conf.h>
39 #include <sys/cred.h>
40 #include <sys/user.h>
41 #include <sys/vnode.h>
42 #include <sys/file.h>
43 #include <sys/session.h>
44 #include <sys/stream.h>
45 #include <sys/strsubr.h>
46 #include <sys/stropts.h>
47 #include <sys/poll.h>
48 #include <sys/systm.h>
49 #include <sys/cpuvar.h>
50 #include <sys/uio.h>
51 #include <sys/cmn_err.h>
52 #include <sys/priocntl.h>
53 #include <sys/procset.h>
54 #include <sys/vmem.h>
55 #include <sys/bitmap.h>
56 #include <sys/kmem.h>
57 #include <sys/siginfo.h>
58 #include <sys/vtrace.h>
59 #include <sys/callb.h>
60 #include <sys/debug.h>
61 #include <sys/modctl.h>
62 #include <sys/vmsystm.h>
63 #include <vm/page.h>
64 #include <sys/atomic.h>
65 #include <sys/suntpi.h>
66 #include <sys/strlog.h>
67 #include <sys/promif.h>
68 #include <sys/project.h>
69 #include <sys/vm.h>
70 #include <sys/taskq.h>
71 #include <sys/sunddi.h>
72 #include <sys/sunldi_impl.h>
73 #include <sys/strsun.h>
74 #include <sys/isa_defs.h>
75 #include <sys/multidata.h>
76 #include <sys/pattr.h>
77 #include <sys/strft.h>
78 #include <sys/fs/snode.h>
79 #include <sys/zone.h>
80 #include <sys/open.h>
81 #include <sys/sunldi.h>
82 #include <sys/sad.h>
83 #include <sys/netstack.h>
84 
85 #define	O_SAMESTR(q)	(((q)->q_next) && \
86 	(((q)->q_flag & QREADR) == ((q)->q_next->q_flag & QREADR)))
87 
88 /*
89  * WARNING:
90  * The variables and routines in this file are private, belonging
91  * to the STREAMS subsystem. These should not be used by modules
92  * or drivers. Compatibility will not be guaranteed.
93  */
94 
95 /*
96  * Id value used to distinguish between different multiplexor links.
97  */
98 static int32_t lnk_id = 0;
99 
100 #define	STREAMS_LOPRI MINCLSYSPRI
101 static pri_t streams_lopri = STREAMS_LOPRI;
102 
103 #define	STRSTAT(x)	(str_statistics.x.value.ui64++)
104 typedef struct str_stat {
105 	kstat_named_t	sqenables;
106 	kstat_named_t	stenables;
107 	kstat_named_t	syncqservice;
108 	kstat_named_t	freebs;
109 	kstat_named_t	qwr_outer;
110 	kstat_named_t	rservice;
111 	kstat_named_t	strwaits;
112 	kstat_named_t	taskqfails;
113 	kstat_named_t	bufcalls;
114 	kstat_named_t	qhelps;
115 	kstat_named_t	qremoved;
116 	kstat_named_t	sqremoved;
117 	kstat_named_t	bcwaits;
118 	kstat_named_t	sqtoomany;
119 } str_stat_t;
120 
121 static str_stat_t str_statistics = {
122 	{ "sqenables",		KSTAT_DATA_UINT64 },
123 	{ "stenables",		KSTAT_DATA_UINT64 },
124 	{ "syncqservice",	KSTAT_DATA_UINT64 },
125 	{ "freebs",		KSTAT_DATA_UINT64 },
126 	{ "qwr_outer",		KSTAT_DATA_UINT64 },
127 	{ "rservice",		KSTAT_DATA_UINT64 },
128 	{ "strwaits",		KSTAT_DATA_UINT64 },
129 	{ "taskqfails",		KSTAT_DATA_UINT64 },
130 	{ "bufcalls",		KSTAT_DATA_UINT64 },
131 	{ "qhelps",		KSTAT_DATA_UINT64 },
132 	{ "qremoved",		KSTAT_DATA_UINT64 },
133 	{ "sqremoved",		KSTAT_DATA_UINT64 },
134 	{ "bcwaits",		KSTAT_DATA_UINT64 },
135 	{ "sqtoomany",		KSTAT_DATA_UINT64 },
136 };
137 
138 static kstat_t *str_kstat;
139 
140 /*
141  * qrunflag was used previously to control background scheduling of queues. It
142  * is not used anymore, but kept here in case some module still wants to access
143  * it via qready() and setqsched macros.
144  */
145 char qrunflag;			/*  Unused */
146 
147 /*
148  * Most of the streams scheduling is done via task queues. Task queues may fail
149  * for non-sleep dispatches, so there are two backup threads servicing failed
150  * requests for queues and syncqs. Both of these threads also service failed
151  * dispatches freebs requests. Queues are put in the list specified by `qhead'
152  * and `qtail' pointers, syncqs use `sqhead' and `sqtail' pointers and freebs
153  * requests are put into `freebs_list' which has no tail pointer. All three
154  * lists are protected by a single `service_queue' lock and use
155  * `services_to_run' condition variable for signaling background threads. Use of
156  * a single lock should not be a problem because it is only used under heavy
157  * loads when task queues start to fail and at that time it may be a good idea
158  * to throttle scheduling requests.
159  *
160  * NOTE: queues and syncqs should be scheduled by two separate threads because
161  * queue servicing may be blocked waiting for a syncq which may be also
162  * scheduled for background execution. This may create a deadlock when only one
163  * thread is used for both.
164  */
165 
166 static taskq_t *streams_taskq;		/* Used for most STREAMS scheduling */
167 
168 static kmutex_t service_queue;		/* protects all of servicing vars */
169 static kcondvar_t services_to_run;	/* wake up background service thread */
170 static kcondvar_t syncqs_to_run;	/* wake up background service thread */
171 
172 /*
173  * List of queues scheduled for background processing dueue to lack of resources
174  * in the task queues. Protected by service_queue lock;
175  */
176 static struct queue *qhead;
177 static struct queue *qtail;
178 
179 /*
180  * Same list for syncqs
181  */
182 static syncq_t *sqhead;
183 static syncq_t *sqtail;
184 
185 static mblk_t *freebs_list;	/* list of buffers to free */
186 
187 /*
188  * Backup threads for servicing queues and syncqs
189  */
190 kthread_t *streams_qbkgrnd_thread;
191 kthread_t *streams_sqbkgrnd_thread;
192 
193 /*
194  * Bufcalls related variables.
195  */
196 struct bclist	strbcalls;	/* list of waiting bufcalls */
197 kmutex_t	strbcall_lock;	/* protects bufcall list (strbcalls) */
198 kcondvar_t	strbcall_cv;	/* Signaling when a bufcall is added */
199 kmutex_t	bcall_monitor;	/* sleep/wakeup style monitor */
200 kcondvar_t	bcall_cv;	/* wait 'till executing bufcall completes */
201 kthread_t	*bc_bkgrnd_thread; /* Thread to service bufcall requests */
202 
203 kmutex_t	strresources;	/* protects global resources */
204 kmutex_t	muxifier;	/* single-threads multiplexor creation */
205 
206 static void	*str_stack_init(netstackid_t stackid, netstack_t *ns);
207 static void	str_stack_shutdown(netstackid_t stackid, void *arg);
208 static void	str_stack_fini(netstackid_t stackid, void *arg);
209 
210 extern void	time_to_wait(clock_t *, clock_t);
211 
212 /*
213  * run_queues is no longer used, but is kept in case some 3-d party
214  * module/driver decides to use it.
215  */
216 int run_queues = 0;
217 
218 /*
219  * sq_max_size is the depth of the syncq (in number of messages) before
220  * qfill_syncq() starts QFULL'ing destination queues. As its primary
221  * consumer - IP is no longer D_MTPERMOD, but there may be other
222  * modules/drivers depend on this syncq flow control, we prefer to
223  * choose a large number as the default value. For potential
224  * performance gain, this value is tunable in /etc/system.
225  */
226 int sq_max_size = 10000;
227 
228 /*
229  * the number of ciputctrl structures per syncq and stream we create when
230  * needed.
231  */
232 int n_ciputctrl;
233 int max_n_ciputctrl = 16;
234 /*
235  * if n_ciputctrl is < min_n_ciputctrl don't even create ciputctrl_cache.
236  */
237 int min_n_ciputctrl = 2;
238 
239 /*
240  * Per-driver/module syncqs
241  * ========================
242  *
243  * For drivers/modules that use PERMOD or outer syncqs we keep a list of
244  * perdm structures, new entries being added (and new syncqs allocated) when
245  * setq() encounters a module/driver with a streamtab that it hasn't seen
246  * before.
247  * The reason for this mechanism is that some modules and drivers share a
248  * common streamtab and it is necessary for those modules and drivers to also
249  * share a common PERMOD syncq.
250  *
251  * perdm_list --> dm_str == streamtab_1
252  *                dm_sq == syncq_1
253  *                dm_ref
254  *                dm_next --> dm_str == streamtab_2
255  *                            dm_sq == syncq_2
256  *                            dm_ref
257  *                            dm_next --> ... NULL
258  *
259  * The dm_ref field is incremented for each new driver/module that takes
260  * a reference to the perdm structure and hence shares the syncq.
261  * References are held in the fmodsw_impl_t structure for each STREAMS module
262  * or the dev_impl array (indexed by device major number) for each driver.
263  *
264  * perdm_list -> [dm_ref == 1] -> [dm_ref == 2] -> [dm_ref == 1] -> NULL
265  *		     ^                 ^ ^               ^
266  *                   |  ______________/  |               |
267  *                   | /                 |               |
268  * dev_impl:     ...|x|y|...          module A	      module B
269  *
270  * When a module/driver is unloaded the reference count is decremented and,
271  * when it falls to zero, the perdm structure is removed from the list and
272  * the syncq is freed (see rele_dm()).
273  */
274 perdm_t *perdm_list = NULL;
275 static krwlock_t perdm_rwlock;
276 cdevsw_impl_t *devimpl;
277 
278 extern struct qinit strdata;
279 extern struct qinit stwdata;
280 
281 static void runservice(queue_t *);
282 static void streams_bufcall_service(void);
283 static void streams_qbkgrnd_service(void);
284 static void streams_sqbkgrnd_service(void);
285 static syncq_t *new_syncq(void);
286 static void free_syncq(syncq_t *);
287 static void outer_insert(syncq_t *, syncq_t *);
288 static void outer_remove(syncq_t *, syncq_t *);
289 static void write_now(syncq_t *);
290 static void clr_qfull(queue_t *);
291 static void enable_svc(queue_t *);
292 static void runbufcalls(void);
293 static void sqenable(syncq_t *);
294 static void sqfill_events(syncq_t *, queue_t *, mblk_t *, void (*)());
295 static void wait_q_syncq(queue_t *);
296 static void backenable_insertedq(queue_t *);
297 
298 static void queue_service(queue_t *);
299 static void stream_service(stdata_t *);
300 static void syncq_service(syncq_t *);
301 static void qwriter_outer_service(syncq_t *);
302 static void mblk_free(mblk_t *);
303 #ifdef DEBUG
304 static int qprocsareon(queue_t *);
305 #endif
306 
307 static void set_nfsrv_ptr(queue_t *, queue_t *, queue_t *, queue_t *);
308 static void reset_nfsrv_ptr(queue_t *, queue_t *);
309 
310 static void sq_run_events(syncq_t *);
311 static int propagate_syncq(queue_t *);
312 
313 static void	blocksq(syncq_t *, ushort_t, int);
314 static void	unblocksq(syncq_t *, ushort_t, int);
315 static int	dropsq(syncq_t *, uint16_t);
316 static void	emptysq(syncq_t *);
317 static sqlist_t *sqlist_alloc(struct stdata *, int);
318 static void	sqlist_free(sqlist_t *);
319 static sqlist_t	*sqlist_build(queue_t *, struct stdata *, boolean_t);
320 static void	sqlist_insert(sqlist_t *, syncq_t *);
321 static void	sqlist_insertall(sqlist_t *, queue_t *);
322 
323 static void	strsetuio(stdata_t *);
324 
325 struct kmem_cache *stream_head_cache;
326 struct kmem_cache *queue_cache;
327 struct kmem_cache *syncq_cache;
328 struct kmem_cache *qband_cache;
329 struct kmem_cache *linkinfo_cache;
330 struct kmem_cache *ciputctrl_cache = NULL;
331 
332 static linkinfo_t *linkinfo_list;
333 
334 /* global esballoc throttling queue */
335 static esb_queue_t	system_esbq;
336 
337 /*
338  * esballoc tunable parameters.
339  */
340 int		esbq_max_qlen = 0x16;	/* throttled queue length */
341 clock_t		esbq_timeout = 0x8;	/* timeout to process esb queue */
342 
343 /*
344  * routines to handle esballoc queuing.
345  */
346 static void esballoc_process_queue(esb_queue_t *);
347 static void esballoc_enqueue_mblk(mblk_t *);
348 static void esballoc_timer(void *);
349 static void esballoc_set_timer(esb_queue_t *, clock_t);
350 static void esballoc_mblk_free(mblk_t *);
351 
352 /*
353  *  Qinit structure and Module_info structures
354  *	for passthru read and write queues
355  */
356 
357 static void pass_wput(queue_t *, mblk_t *);
358 static queue_t *link_addpassthru(stdata_t *);
359 static void link_rempassthru(queue_t *);
360 
361 struct  module_info passthru_info = {
362 	0,
363 	"passthru",
364 	0,
365 	INFPSZ,
366 	STRHIGH,
367 	STRLOW
368 };
369 
370 struct  qinit passthru_rinit = {
371 	(int (*)())putnext,
372 	NULL,
373 	NULL,
374 	NULL,
375 	NULL,
376 	&passthru_info,
377 	NULL
378 };
379 
380 struct  qinit passthru_winit = {
381 	(int (*)()) pass_wput,
382 	NULL,
383 	NULL,
384 	NULL,
385 	NULL,
386 	&passthru_info,
387 	NULL
388 };
389 
390 /*
391  * Special form of assertion: verify that X implies Y i.e. when X is true Y
392  * should also be true.
393  */
394 #define	IMPLY(X, Y)	ASSERT(!(X) || (Y))
395 
396 /*
397  * Logical equivalence. Verify that both X and Y are either TRUE or FALSE.
398  */
399 #define	EQUIV(X, Y)	{ IMPLY(X, Y); IMPLY(Y, X); }
400 
401 /*
402  * Verify correctness of list head/tail pointers.
403  */
404 #define	LISTCHECK(head, tail, link) {				\
405 	EQUIV(head, tail);					\
406 	IMPLY(tail != NULL, tail->link == NULL);		\
407 }
408 
409 /*
410  * Enqueue a list element `el' in the end of a list denoted by `head' and `tail'
411  * using a `link' field.
412  */
413 #define	ENQUEUE(el, head, tail, link) {				\
414 	ASSERT(el->link == NULL);				\
415 	LISTCHECK(head, tail, link);				\
416 	if (head == NULL)					\
417 		head = el;					\
418 	else							\
419 		tail->link = el;				\
420 	tail = el;						\
421 }
422 
423 /*
424  * Dequeue the first element of the list denoted by `head' and `tail' pointers
425  * using a `link' field and put result into `el'.
426  */
427 #define	DQ(el, head, tail, link) {				\
428 	LISTCHECK(head, tail, link);				\
429 	el = head;						\
430 	if (head != NULL) {					\
431 		head = head->link;				\
432 		if (head == NULL)				\
433 			tail = NULL;				\
434 		el->link = NULL;				\
435 	}							\
436 }
437 
438 /*
439  * Remove `el' from the list using `chase' and `curr' pointers and return result
440  * in `succeed'.
441  */
442 #define	RMQ(el, head, tail, link, chase, curr, succeed) {	\
443 	LISTCHECK(head, tail, link);				\
444 	chase = NULL;						\
445 	succeed = 0;						\
446 	for (curr = head; (curr != el) && (curr != NULL); curr = curr->link) \
447 		chase = curr;					\
448 	if (curr != NULL) {					\
449 		succeed = 1;					\
450 		ASSERT(curr == el);				\
451 		if (chase != NULL)				\
452 			chase->link = curr->link;		\
453 		else						\
454 			head = curr->link;			\
455 		curr->link = NULL;				\
456 		if (curr == tail)				\
457 			tail = chase;				\
458 	}							\
459 	LISTCHECK(head, tail, link);				\
460 }
461 
462 /* Handling of delayed messages on the inner syncq. */
463 
464 /*
465  * DEBUG versions should use function versions (to simplify tracing) and
466  * non-DEBUG kernels should use macro versions.
467  */
468 
469 /*
470  * Put a queue on the syncq list of queues.
471  * Assumes SQLOCK held.
472  */
473 #define	SQPUT_Q(sq, qp)							\
474 {									\
475 	ASSERT(MUTEX_HELD(SQLOCK(sq)));					\
476 	if (!(qp->q_sqflags & Q_SQQUEUED)) {				\
477 		/* The queue should not be linked anywhere */		\
478 		ASSERT((qp->q_sqprev == NULL) && (qp->q_sqnext == NULL)); \
479 		/* Head and tail may only be NULL simultaneously */	\
480 		EQUIV(sq->sq_head, sq->sq_tail);			\
481 		/* Queue may be only enqueyed on its syncq */		\
482 		ASSERT(sq == qp->q_syncq);				\
483 		/* Check the correctness of SQ_MESSAGES flag */		\
484 		EQUIV(sq->sq_head, (sq->sq_flags & SQ_MESSAGES));	\
485 		/* Sanity check first/last elements of the list */	\
486 		IMPLY(sq->sq_head != NULL, sq->sq_head->q_sqprev == NULL);\
487 		IMPLY(sq->sq_tail != NULL, sq->sq_tail->q_sqnext == NULL);\
488 		/*							\
489 		 * Sanity check of priority field: empty queue should	\
490 		 * have zero priority					\
491 		 * and nqueues equal to zero.				\
492 		 */							\
493 		IMPLY(sq->sq_head == NULL, sq->sq_pri == 0);		\
494 		/* Sanity check of sq_nqueues field */			\
495 		EQUIV(sq->sq_head, sq->sq_nqueues);			\
496 		if (sq->sq_head == NULL) {				\
497 			sq->sq_head = sq->sq_tail = qp;			\
498 			sq->sq_flags |= SQ_MESSAGES;			\
499 		} else if (qp->q_spri == 0) {				\
500 			qp->q_sqprev = sq->sq_tail;			\
501 			sq->sq_tail->q_sqnext = qp;			\
502 			sq->sq_tail = qp;				\
503 		} else {						\
504 			/*						\
505 			 * Put this queue in priority order: higher	\
506 			 * priority gets closer to the head.		\
507 			 */						\
508 			queue_t **qpp = &sq->sq_tail;			\
509 			queue_t *qnext = NULL;				\
510 									\
511 			while (*qpp != NULL && qp->q_spri > (*qpp)->q_spri) { \
512 				qnext = *qpp;				\
513 				qpp = &(*qpp)->q_sqprev;		\
514 			}						\
515 			qp->q_sqnext = qnext;				\
516 			qp->q_sqprev = *qpp;				\
517 			if (*qpp != NULL) {				\
518 				(*qpp)->q_sqnext = qp;			\
519 			} else {					\
520 				sq->sq_head = qp;			\
521 				sq->sq_pri = sq->sq_head->q_spri;	\
522 			}						\
523 			*qpp = qp;					\
524 		}							\
525 		qp->q_sqflags |= Q_SQQUEUED;				\
526 		qp->q_sqtstamp = lbolt;					\
527 		sq->sq_nqueues++;					\
528 	}								\
529 }
530 
531 /*
532  * Remove a queue from the syncq list
533  * Assumes SQLOCK held.
534  */
535 #define	SQRM_Q(sq, qp)							\
536 	{								\
537 		ASSERT(MUTEX_HELD(SQLOCK(sq)));				\
538 		ASSERT(qp->q_sqflags & Q_SQQUEUED);			\
539 		ASSERT(sq->sq_head != NULL && sq->sq_tail != NULL);	\
540 		ASSERT((sq->sq_flags & SQ_MESSAGES) != 0);		\
541 		/* Check that the queue is actually in the list */	\
542 		ASSERT(qp->q_sqnext != NULL || sq->sq_tail == qp);	\
543 		ASSERT(qp->q_sqprev != NULL || sq->sq_head == qp);	\
544 		ASSERT(sq->sq_nqueues != 0);				\
545 		if (qp->q_sqprev == NULL) {				\
546 			/* First queue on list, make head q_sqnext */	\
547 			sq->sq_head = qp->q_sqnext;			\
548 		} else {						\
549 			/* Make prev->next == next */			\
550 			qp->q_sqprev->q_sqnext = qp->q_sqnext;		\
551 		}							\
552 		if (qp->q_sqnext == NULL) {				\
553 			/* Last queue on list, make tail sqprev */	\
554 			sq->sq_tail = qp->q_sqprev;			\
555 		} else {						\
556 			/* Make next->prev == prev */			\
557 			qp->q_sqnext->q_sqprev = qp->q_sqprev;		\
558 		}							\
559 		/* clear out references on this queue */		\
560 		qp->q_sqprev = qp->q_sqnext = NULL;			\
561 		qp->q_sqflags &= ~Q_SQQUEUED;				\
562 		/* If there is nothing queued, clear SQ_MESSAGES */	\
563 		if (sq->sq_head != NULL) {				\
564 			sq->sq_pri = sq->sq_head->q_spri;		\
565 		} else	{						\
566 			sq->sq_flags &= ~SQ_MESSAGES;			\
567 			sq->sq_pri = 0;					\
568 		}							\
569 		sq->sq_nqueues--;					\
570 		ASSERT(sq->sq_head != NULL || sq->sq_evhead != NULL ||	\
571 		    (sq->sq_flags & SQ_QUEUED) == 0);			\
572 	}
573 
574 /* Hide the definition from the header file. */
575 #ifdef SQPUT_MP
576 #undef SQPUT_MP
577 #endif
578 
579 /*
580  * Put a message on the queue syncq.
581  * Assumes QLOCK held.
582  */
583 #define	SQPUT_MP(qp, mp)						\
584 	{								\
585 		ASSERT(MUTEX_HELD(QLOCK(qp)));				\
586 		ASSERT(qp->q_sqhead == NULL ||				\
587 		    (qp->q_sqtail != NULL &&				\
588 		    qp->q_sqtail->b_next == NULL));			\
589 		qp->q_syncqmsgs++;					\
590 		ASSERT(qp->q_syncqmsgs != 0);	/* Wraparound */	\
591 		if (qp->q_sqhead == NULL) {				\
592 			qp->q_sqhead = qp->q_sqtail = mp;		\
593 		} else {						\
594 			qp->q_sqtail->b_next = mp;			\
595 			qp->q_sqtail = mp;				\
596 		}							\
597 		ASSERT(qp->q_syncqmsgs > 0);				\
598 	}
599 
600 #define	SQ_PUTCOUNT_SETFAST_LOCKED(sq) {				\
601 		ASSERT(MUTEX_HELD(SQLOCK(sq)));				\
602 		if ((sq)->sq_ciputctrl != NULL) {			\
603 			int i;						\
604 			int nlocks = (sq)->sq_nciputctrl;		\
605 			ciputctrl_t *cip = (sq)->sq_ciputctrl;		\
606 			ASSERT((sq)->sq_type & SQ_CIPUT);		\
607 			for (i = 0; i <= nlocks; i++) {			\
608 				ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \
609 				cip[i].ciputctrl_count |= SQ_FASTPUT;	\
610 			}						\
611 		}							\
612 	}
613 
614 
615 #define	SQ_PUTCOUNT_CLRFAST_LOCKED(sq) {				\
616 		ASSERT(MUTEX_HELD(SQLOCK(sq)));				\
617 		if ((sq)->sq_ciputctrl != NULL) {			\
618 			int i;						\
619 			int nlocks = (sq)->sq_nciputctrl;		\
620 			ciputctrl_t *cip = (sq)->sq_ciputctrl;		\
621 			ASSERT((sq)->sq_type & SQ_CIPUT);		\
622 			for (i = 0; i <= nlocks; i++) {			\
623 				ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \
624 				cip[i].ciputctrl_count &= ~SQ_FASTPUT;	\
625 			}						\
626 		}							\
627 	}
628 
629 /*
630  * Run service procedures for all queues in the stream head.
631  */
632 #define	STR_SERVICE(stp, q) {						\
633 	ASSERT(MUTEX_HELD(&stp->sd_qlock));				\
634 	while (stp->sd_qhead != NULL) {					\
635 		DQ(q, stp->sd_qhead, stp->sd_qtail, q_link);		\
636 		ASSERT(stp->sd_nqueues > 0);				\
637 		stp->sd_nqueues--;					\
638 		ASSERT(!(q->q_flag & QINSERVICE));			\
639 		mutex_exit(&stp->sd_qlock);				\
640 		queue_service(q);					\
641 		mutex_enter(&stp->sd_qlock);				\
642 	}								\
643 	ASSERT(stp->sd_nqueues == 0);					\
644 	ASSERT((stp->sd_qhead == NULL) && (stp->sd_qtail == NULL));	\
645 }
646 
647 /*
648  * constructor/destructor routines for the stream head cache
649  */
650 /* ARGSUSED */
651 static int
652 stream_head_constructor(void *buf, void *cdrarg, int kmflags)
653 {
654 	stdata_t *stp = buf;
655 
656 	mutex_init(&stp->sd_lock, NULL, MUTEX_DEFAULT, NULL);
657 	mutex_init(&stp->sd_reflock, NULL, MUTEX_DEFAULT, NULL);
658 	mutex_init(&stp->sd_qlock, NULL, MUTEX_DEFAULT, NULL);
659 	cv_init(&stp->sd_monitor, NULL, CV_DEFAULT, NULL);
660 	cv_init(&stp->sd_iocmonitor, NULL, CV_DEFAULT, NULL);
661 	cv_init(&stp->sd_refmonitor, NULL, CV_DEFAULT, NULL);
662 	cv_init(&stp->sd_qcv, NULL, CV_DEFAULT, NULL);
663 	cv_init(&stp->sd_zcopy_wait, NULL, CV_DEFAULT, NULL);
664 	stp->sd_wrq = NULL;
665 
666 	return (0);
667 }
668 
669 /* ARGSUSED */
670 static void
671 stream_head_destructor(void *buf, void *cdrarg)
672 {
673 	stdata_t *stp = buf;
674 
675 	mutex_destroy(&stp->sd_lock);
676 	mutex_destroy(&stp->sd_reflock);
677 	mutex_destroy(&stp->sd_qlock);
678 	cv_destroy(&stp->sd_monitor);
679 	cv_destroy(&stp->sd_iocmonitor);
680 	cv_destroy(&stp->sd_refmonitor);
681 	cv_destroy(&stp->sd_qcv);
682 	cv_destroy(&stp->sd_zcopy_wait);
683 }
684 
685 /*
686  * constructor/destructor routines for the queue cache
687  */
688 /* ARGSUSED */
689 static int
690 queue_constructor(void *buf, void *cdrarg, int kmflags)
691 {
692 	queinfo_t *qip = buf;
693 	queue_t *qp = &qip->qu_rqueue;
694 	queue_t *wqp = &qip->qu_wqueue;
695 	syncq_t	*sq = &qip->qu_syncq;
696 
697 	qp->q_first = NULL;
698 	qp->q_link = NULL;
699 	qp->q_count = 0;
700 	qp->q_mblkcnt = 0;
701 	qp->q_sqhead = NULL;
702 	qp->q_sqtail = NULL;
703 	qp->q_sqnext = NULL;
704 	qp->q_sqprev = NULL;
705 	qp->q_sqflags = 0;
706 	qp->q_rwcnt = 0;
707 	qp->q_spri = 0;
708 
709 	mutex_init(QLOCK(qp), NULL, MUTEX_DEFAULT, NULL);
710 	cv_init(&qp->q_wait, NULL, CV_DEFAULT, NULL);
711 
712 	wqp->q_first = NULL;
713 	wqp->q_link = NULL;
714 	wqp->q_count = 0;
715 	wqp->q_mblkcnt = 0;
716 	wqp->q_sqhead = NULL;
717 	wqp->q_sqtail = NULL;
718 	wqp->q_sqnext = NULL;
719 	wqp->q_sqprev = NULL;
720 	wqp->q_sqflags = 0;
721 	wqp->q_rwcnt = 0;
722 	wqp->q_spri = 0;
723 
724 	mutex_init(QLOCK(wqp), NULL, MUTEX_DEFAULT, NULL);
725 	cv_init(&wqp->q_wait, NULL, CV_DEFAULT, NULL);
726 
727 	sq->sq_head = NULL;
728 	sq->sq_tail = NULL;
729 	sq->sq_evhead = NULL;
730 	sq->sq_evtail = NULL;
731 	sq->sq_callbpend = NULL;
732 	sq->sq_outer = NULL;
733 	sq->sq_onext = NULL;
734 	sq->sq_oprev = NULL;
735 	sq->sq_next = NULL;
736 	sq->sq_svcflags = 0;
737 	sq->sq_servcount = 0;
738 	sq->sq_needexcl = 0;
739 	sq->sq_nqueues = 0;
740 	sq->sq_pri = 0;
741 
742 	mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL);
743 	cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL);
744 	cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL);
745 
746 	return (0);
747 }
748 
749 /* ARGSUSED */
750 static void
751 queue_destructor(void *buf, void *cdrarg)
752 {
753 	queinfo_t *qip = buf;
754 	queue_t *qp = &qip->qu_rqueue;
755 	queue_t *wqp = &qip->qu_wqueue;
756 	syncq_t	*sq = &qip->qu_syncq;
757 
758 	ASSERT(qp->q_sqhead == NULL);
759 	ASSERT(wqp->q_sqhead == NULL);
760 	ASSERT(qp->q_sqnext == NULL);
761 	ASSERT(wqp->q_sqnext == NULL);
762 	ASSERT(qp->q_rwcnt == 0);
763 	ASSERT(wqp->q_rwcnt == 0);
764 
765 	mutex_destroy(&qp->q_lock);
766 	cv_destroy(&qp->q_wait);
767 
768 	mutex_destroy(&wqp->q_lock);
769 	cv_destroy(&wqp->q_wait);
770 
771 	mutex_destroy(&sq->sq_lock);
772 	cv_destroy(&sq->sq_wait);
773 	cv_destroy(&sq->sq_exitwait);
774 }
775 
776 /*
777  * constructor/destructor routines for the syncq cache
778  */
779 /* ARGSUSED */
780 static int
781 syncq_constructor(void *buf, void *cdrarg, int kmflags)
782 {
783 	syncq_t	*sq = buf;
784 
785 	bzero(buf, sizeof (syncq_t));
786 
787 	mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL);
788 	cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL);
789 	cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL);
790 
791 	return (0);
792 }
793 
794 /* ARGSUSED */
795 static void
796 syncq_destructor(void *buf, void *cdrarg)
797 {
798 	syncq_t	*sq = buf;
799 
800 	ASSERT(sq->sq_head == NULL);
801 	ASSERT(sq->sq_tail == NULL);
802 	ASSERT(sq->sq_evhead == NULL);
803 	ASSERT(sq->sq_evtail == NULL);
804 	ASSERT(sq->sq_callbpend == NULL);
805 	ASSERT(sq->sq_callbflags == 0);
806 	ASSERT(sq->sq_outer == NULL);
807 	ASSERT(sq->sq_onext == NULL);
808 	ASSERT(sq->sq_oprev == NULL);
809 	ASSERT(sq->sq_next == NULL);
810 	ASSERT(sq->sq_needexcl == 0);
811 	ASSERT(sq->sq_svcflags == 0);
812 	ASSERT(sq->sq_servcount == 0);
813 	ASSERT(sq->sq_nqueues == 0);
814 	ASSERT(sq->sq_pri == 0);
815 	ASSERT(sq->sq_count == 0);
816 	ASSERT(sq->sq_rmqcount == 0);
817 	ASSERT(sq->sq_cancelid == 0);
818 	ASSERT(sq->sq_ciputctrl == NULL);
819 	ASSERT(sq->sq_nciputctrl == 0);
820 	ASSERT(sq->sq_type == 0);
821 	ASSERT(sq->sq_flags == 0);
822 
823 	mutex_destroy(&sq->sq_lock);
824 	cv_destroy(&sq->sq_wait);
825 	cv_destroy(&sq->sq_exitwait);
826 }
827 
828 /* ARGSUSED */
829 static int
830 ciputctrl_constructor(void *buf, void *cdrarg, int kmflags)
831 {
832 	ciputctrl_t *cip = buf;
833 	int i;
834 
835 	for (i = 0; i < n_ciputctrl; i++) {
836 		cip[i].ciputctrl_count = SQ_FASTPUT;
837 		mutex_init(&cip[i].ciputctrl_lock, NULL, MUTEX_DEFAULT, NULL);
838 	}
839 
840 	return (0);
841 }
842 
843 /* ARGSUSED */
844 static void
845 ciputctrl_destructor(void *buf, void *cdrarg)
846 {
847 	ciputctrl_t *cip = buf;
848 	int i;
849 
850 	for (i = 0; i < n_ciputctrl; i++) {
851 		ASSERT(cip[i].ciputctrl_count & SQ_FASTPUT);
852 		mutex_destroy(&cip[i].ciputctrl_lock);
853 	}
854 }
855 
856 /*
857  * Init routine run from main at boot time.
858  */
859 void
860 strinit(void)
861 {
862 	int ncpus = ((boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus);
863 
864 	stream_head_cache = kmem_cache_create("stream_head_cache",
865 	    sizeof (stdata_t), 0,
866 	    stream_head_constructor, stream_head_destructor, NULL,
867 	    NULL, NULL, 0);
868 
869 	queue_cache = kmem_cache_create("queue_cache", sizeof (queinfo_t), 0,
870 	    queue_constructor, queue_destructor, NULL, NULL, NULL, 0);
871 
872 	syncq_cache = kmem_cache_create("syncq_cache", sizeof (syncq_t), 0,
873 	    syncq_constructor, syncq_destructor, NULL, NULL, NULL, 0);
874 
875 	qband_cache = kmem_cache_create("qband_cache",
876 	    sizeof (qband_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
877 
878 	linkinfo_cache = kmem_cache_create("linkinfo_cache",
879 	    sizeof (linkinfo_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
880 
881 	n_ciputctrl = ncpus;
882 	n_ciputctrl = 1 << highbit(n_ciputctrl - 1);
883 	ASSERT(n_ciputctrl >= 1);
884 	n_ciputctrl = MIN(n_ciputctrl, max_n_ciputctrl);
885 	if (n_ciputctrl >= min_n_ciputctrl) {
886 		ciputctrl_cache = kmem_cache_create("ciputctrl_cache",
887 		    sizeof (ciputctrl_t) * n_ciputctrl,
888 		    sizeof (ciputctrl_t), ciputctrl_constructor,
889 		    ciputctrl_destructor, NULL, NULL, NULL, 0);
890 	}
891 
892 	streams_taskq = system_taskq;
893 
894 	if (streams_taskq == NULL)
895 		panic("strinit: no memory for streams taskq!");
896 
897 	bc_bkgrnd_thread = thread_create(NULL, 0,
898 	    streams_bufcall_service, NULL, 0, &p0, TS_RUN, streams_lopri);
899 
900 	streams_qbkgrnd_thread = thread_create(NULL, 0,
901 	    streams_qbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri);
902 
903 	streams_sqbkgrnd_thread = thread_create(NULL, 0,
904 	    streams_sqbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri);
905 
906 	/*
907 	 * Create STREAMS kstats.
908 	 */
909 	str_kstat = kstat_create("streams", 0, "strstat",
910 	    "net", KSTAT_TYPE_NAMED,
911 	    sizeof (str_statistics) / sizeof (kstat_named_t),
912 	    KSTAT_FLAG_VIRTUAL);
913 
914 	if (str_kstat != NULL) {
915 		str_kstat->ks_data = &str_statistics;
916 		kstat_install(str_kstat);
917 	}
918 
919 	/*
920 	 * TPI support routine initialisation.
921 	 */
922 	tpi_init();
923 
924 	/*
925 	 * Handle to have autopush and persistent link information per
926 	 * zone.
927 	 * Note: uses shutdown hook instead of destroy hook so that the
928 	 * persistent links can be torn down before the destroy hooks
929 	 * in the TCP/IP stack are called.
930 	 */
931 	netstack_register(NS_STR, str_stack_init, str_stack_shutdown,
932 	    str_stack_fini);
933 }
934 
935 void
936 str_sendsig(vnode_t *vp, int event, uchar_t band, int error)
937 {
938 	struct stdata *stp;
939 
940 	ASSERT(vp->v_stream);
941 	stp = vp->v_stream;
942 	/* Have to hold sd_lock to prevent siglist from changing */
943 	mutex_enter(&stp->sd_lock);
944 	if (stp->sd_sigflags & event)
945 		strsendsig(stp->sd_siglist, event, band, error);
946 	mutex_exit(&stp->sd_lock);
947 }
948 
949 /*
950  * Send the "sevent" set of signals to a process.
951  * This might send more than one signal if the process is registered
952  * for multiple events. The caller should pass in an sevent that only
953  * includes the events for which the process has registered.
954  */
955 static void
956 dosendsig(proc_t *proc, int events, int sevent, k_siginfo_t *info,
957 	uchar_t band, int error)
958 {
959 	ASSERT(MUTEX_HELD(&proc->p_lock));
960 
961 	info->si_band = 0;
962 	info->si_errno = 0;
963 
964 	if (sevent & S_ERROR) {
965 		sevent &= ~S_ERROR;
966 		info->si_code = POLL_ERR;
967 		info->si_errno = error;
968 		TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
969 		    "strsendsig:proc %p info %p", proc, info);
970 		sigaddq(proc, NULL, info, KM_NOSLEEP);
971 		info->si_errno = 0;
972 	}
973 	if (sevent & S_HANGUP) {
974 		sevent &= ~S_HANGUP;
975 		info->si_code = POLL_HUP;
976 		TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
977 		    "strsendsig:proc %p info %p", proc, info);
978 		sigaddq(proc, NULL, info, KM_NOSLEEP);
979 	}
980 	if (sevent & S_HIPRI) {
981 		sevent &= ~S_HIPRI;
982 		info->si_code = POLL_PRI;
983 		TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
984 		    "strsendsig:proc %p info %p", proc, info);
985 		sigaddq(proc, NULL, info, KM_NOSLEEP);
986 	}
987 	if (sevent & S_RDBAND) {
988 		sevent &= ~S_RDBAND;
989 		if (events & S_BANDURG)
990 			sigtoproc(proc, NULL, SIGURG);
991 		else
992 			sigtoproc(proc, NULL, SIGPOLL);
993 	}
994 	if (sevent & S_WRBAND) {
995 		sevent &= ~S_WRBAND;
996 		sigtoproc(proc, NULL, SIGPOLL);
997 	}
998 	if (sevent & S_INPUT) {
999 		sevent &= ~S_INPUT;
1000 		info->si_code = POLL_IN;
1001 		info->si_band = band;
1002 		TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
1003 		    "strsendsig:proc %p info %p", proc, info);
1004 		sigaddq(proc, NULL, info, KM_NOSLEEP);
1005 		info->si_band = 0;
1006 	}
1007 	if (sevent & S_OUTPUT) {
1008 		sevent &= ~S_OUTPUT;
1009 		info->si_code = POLL_OUT;
1010 		info->si_band = band;
1011 		TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
1012 		    "strsendsig:proc %p info %p", proc, info);
1013 		sigaddq(proc, NULL, info, KM_NOSLEEP);
1014 		info->si_band = 0;
1015 	}
1016 	if (sevent & S_MSG) {
1017 		sevent &= ~S_MSG;
1018 		info->si_code = POLL_MSG;
1019 		info->si_band = band;
1020 		TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
1021 		    "strsendsig:proc %p info %p", proc, info);
1022 		sigaddq(proc, NULL, info, KM_NOSLEEP);
1023 		info->si_band = 0;
1024 	}
1025 	if (sevent & S_RDNORM) {
1026 		sevent &= ~S_RDNORM;
1027 		sigtoproc(proc, NULL, SIGPOLL);
1028 	}
1029 	if (sevent != 0) {
1030 		panic("strsendsig: unknown event(s) %x", sevent);
1031 	}
1032 }
1033 
1034 /*
1035  * Send SIGPOLL/SIGURG signal to all processes and process groups
1036  * registered on the given signal list that want a signal for at
1037  * least one of the specified events.
1038  *
1039  * Must be called with exclusive access to siglist (caller holding sd_lock).
1040  *
1041  * strioctl(I_SETSIG/I_ESETSIG) will only change siglist when holding
1042  * sd_lock and the ioctl code maintains a PID_HOLD on the pid structure
1043  * while it is in the siglist.
1044  *
1045  * For performance reasons (MP scalability) the code drops pidlock
1046  * when sending signals to a single process.
1047  * When sending to a process group the code holds
1048  * pidlock to prevent the membership in the process group from changing
1049  * while walking the p_pglink list.
1050  */
1051 void
1052 strsendsig(strsig_t *siglist, int event, uchar_t band, int error)
1053 {
1054 	strsig_t *ssp;
1055 	k_siginfo_t info;
1056 	struct pid *pidp;
1057 	proc_t  *proc;
1058 
1059 	info.si_signo = SIGPOLL;
1060 	info.si_errno = 0;
1061 	for (ssp = siglist; ssp; ssp = ssp->ss_next) {
1062 		int sevent;
1063 
1064 		sevent = ssp->ss_events & event;
1065 		if (sevent == 0)
1066 			continue;
1067 
1068 		if ((pidp = ssp->ss_pidp) == NULL) {
1069 			/* pid was released but still on event list */
1070 			continue;
1071 		}
1072 
1073 
1074 		if (ssp->ss_pid > 0) {
1075 			/*
1076 			 * XXX This unfortunately still generates
1077 			 * a signal when a fd is closed but
1078 			 * the proc is active.
1079 			 */
1080 			ASSERT(ssp->ss_pid == pidp->pid_id);
1081 
1082 			mutex_enter(&pidlock);
1083 			proc = prfind_zone(pidp->pid_id, ALL_ZONES);
1084 			if (proc == NULL) {
1085 				mutex_exit(&pidlock);
1086 				continue;
1087 			}
1088 			mutex_enter(&proc->p_lock);
1089 			mutex_exit(&pidlock);
1090 			dosendsig(proc, ssp->ss_events, sevent, &info,
1091 			    band, error);
1092 			mutex_exit(&proc->p_lock);
1093 		} else {
1094 			/*
1095 			 * Send to process group. Hold pidlock across
1096 			 * calls to dosendsig().
1097 			 */
1098 			pid_t pgrp = -ssp->ss_pid;
1099 
1100 			mutex_enter(&pidlock);
1101 			proc = pgfind_zone(pgrp, ALL_ZONES);
1102 			while (proc != NULL) {
1103 				mutex_enter(&proc->p_lock);
1104 				dosendsig(proc, ssp->ss_events, sevent,
1105 				    &info, band, error);
1106 				mutex_exit(&proc->p_lock);
1107 				proc = proc->p_pglink;
1108 			}
1109 			mutex_exit(&pidlock);
1110 		}
1111 	}
1112 }
1113 
1114 /*
1115  * Attach a stream device or module.
1116  * qp is a read queue; the new queue goes in so its next
1117  * read ptr is the argument, and the write queue corresponding
1118  * to the argument points to this queue. Return 0 on success,
1119  * or a non-zero errno on failure.
1120  */
1121 int
1122 qattach(queue_t *qp, dev_t *devp, int oflag, cred_t *crp, fmodsw_impl_t *fp,
1123     boolean_t is_insert)
1124 {
1125 	major_t			major;
1126 	cdevsw_impl_t		*dp;
1127 	struct streamtab	*str;
1128 	queue_t			*rq;
1129 	queue_t			*wrq;
1130 	uint32_t		qflag;
1131 	uint32_t		sqtype;
1132 	perdm_t			*dmp;
1133 	int			error;
1134 	int			sflag;
1135 
1136 	rq = allocq();
1137 	wrq = _WR(rq);
1138 	STREAM(rq) = STREAM(wrq) = STREAM(qp);
1139 
1140 	if (fp != NULL) {
1141 		str = fp->f_str;
1142 		qflag = fp->f_qflag;
1143 		sqtype = fp->f_sqtype;
1144 		dmp = fp->f_dmp;
1145 		IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL);
1146 		sflag = MODOPEN;
1147 
1148 		/*
1149 		 * stash away a pointer to the module structure so we can
1150 		 * unref it in qdetach.
1151 		 */
1152 		rq->q_fp = fp;
1153 	} else {
1154 		ASSERT(!is_insert);
1155 
1156 		major = getmajor(*devp);
1157 		dp = &devimpl[major];
1158 
1159 		str = dp->d_str;
1160 		ASSERT(str == STREAMSTAB(major));
1161 
1162 		qflag = dp->d_qflag;
1163 		ASSERT(qflag & QISDRV);
1164 		sqtype = dp->d_sqtype;
1165 
1166 		/* create perdm_t if needed */
1167 		if (NEED_DM(dp->d_dmp, qflag))
1168 			dp->d_dmp = hold_dm(str, qflag, sqtype);
1169 
1170 		dmp = dp->d_dmp;
1171 		sflag = 0;
1172 	}
1173 
1174 	TRACE_2(TR_FAC_STREAMS_FR, TR_QATTACH_FLAGS,
1175 	    "qattach:qflag == %X(%X)", qflag, *devp);
1176 
1177 	/* setq might sleep in allocator - avoid holding locks. */
1178 	setq(rq, str->st_rdinit, str->st_wrinit, dmp, qflag, sqtype, B_FALSE);
1179 
1180 	/*
1181 	 * Before calling the module's open routine, set up the q_next
1182 	 * pointer for inserting a module in the middle of a stream.
1183 	 *
1184 	 * Note that we can always set _QINSERTING and set up q_next
1185 	 * pointer for both inserting and pushing a module.  Then there
1186 	 * is no need for the is_insert parameter.  In insertq(), called
1187 	 * by qprocson(), assume that q_next of the new module always points
1188 	 * to the correct queue and use it for insertion.  Everything should
1189 	 * work out fine.  But in the first release of _I_INSERT, we
1190 	 * distinguish between inserting and pushing to make sure that
1191 	 * pushing a module follows the same code path as before.
1192 	 */
1193 	if (is_insert) {
1194 		rq->q_flag |= _QINSERTING;
1195 		rq->q_next = qp;
1196 	}
1197 
1198 	/*
1199 	 * If there is an outer perimeter get exclusive access during
1200 	 * the open procedure.  Bump up the reference count on the queue.
1201 	 */
1202 	entersq(rq->q_syncq, SQ_OPENCLOSE);
1203 	error = (*rq->q_qinfo->qi_qopen)(rq, devp, oflag, sflag, crp);
1204 	if (error != 0)
1205 		goto failed;
1206 	leavesq(rq->q_syncq, SQ_OPENCLOSE);
1207 	ASSERT(qprocsareon(rq));
1208 	return (0);
1209 
1210 failed:
1211 	rq->q_flag &= ~_QINSERTING;
1212 	if (backq(wrq) != NULL && backq(wrq)->q_next == wrq)
1213 		qprocsoff(rq);
1214 	leavesq(rq->q_syncq, SQ_OPENCLOSE);
1215 	rq->q_next = wrq->q_next = NULL;
1216 	qdetach(rq, 0, 0, crp, B_FALSE);
1217 	return (error);
1218 }
1219 
1220 /*
1221  * Handle second open of stream. For modules, set the
1222  * last argument to MODOPEN and do not pass any open flags.
1223  * Ignore dummydev since this is not the first open.
1224  */
1225 int
1226 qreopen(queue_t *qp, dev_t *devp, int flag, cred_t *crp)
1227 {
1228 	int	error;
1229 	dev_t dummydev;
1230 	queue_t *wqp = _WR(qp);
1231 
1232 	ASSERT(qp->q_flag & QREADR);
1233 	entersq(qp->q_syncq, SQ_OPENCLOSE);
1234 
1235 	dummydev = *devp;
1236 	if (error = ((*qp->q_qinfo->qi_qopen)(qp, &dummydev,
1237 	    (wqp->q_next ? 0 : flag), (wqp->q_next ? MODOPEN : 0), crp))) {
1238 		leavesq(qp->q_syncq, SQ_OPENCLOSE);
1239 		mutex_enter(&STREAM(qp)->sd_lock);
1240 		qp->q_stream->sd_flag |= STREOPENFAIL;
1241 		mutex_exit(&STREAM(qp)->sd_lock);
1242 		return (error);
1243 	}
1244 	leavesq(qp->q_syncq, SQ_OPENCLOSE);
1245 
1246 	/*
1247 	 * successful open should have done qprocson()
1248 	 */
1249 	ASSERT(qprocsareon(_RD(qp)));
1250 	return (0);
1251 }
1252 
1253 /*
1254  * Detach a stream module or device.
1255  * If clmode == 1 then the module or driver was opened and its
1256  * close routine must be called. If clmode == 0, the module
1257  * or driver was never opened or the open failed, and so its close
1258  * should not be called.
1259  */
1260 void
1261 qdetach(queue_t *qp, int clmode, int flag, cred_t *crp, boolean_t is_remove)
1262 {
1263 	queue_t *wqp = _WR(qp);
1264 	ASSERT(STREAM(qp)->sd_flag & (STRCLOSE|STWOPEN|STRPLUMB));
1265 
1266 	if (STREAM_NEEDSERVICE(STREAM(qp)))
1267 		stream_runservice(STREAM(qp));
1268 
1269 	if (clmode) {
1270 		/*
1271 		 * Make sure that all the messages on the write side syncq are
1272 		 * processed and nothing is left. Since we are closing, no new
1273 		 * messages may appear there.
1274 		 */
1275 		wait_q_syncq(wqp);
1276 
1277 		entersq(qp->q_syncq, SQ_OPENCLOSE);
1278 		if (is_remove) {
1279 			mutex_enter(QLOCK(qp));
1280 			qp->q_flag |= _QREMOVING;
1281 			mutex_exit(QLOCK(qp));
1282 		}
1283 		(*qp->q_qinfo->qi_qclose)(qp, flag, crp);
1284 		/*
1285 		 * Check that qprocsoff() was actually called.
1286 		 */
1287 		ASSERT((qp->q_flag & QWCLOSE) && (wqp->q_flag & QWCLOSE));
1288 
1289 		leavesq(qp->q_syncq, SQ_OPENCLOSE);
1290 	} else {
1291 		disable_svc(qp);
1292 	}
1293 
1294 	/*
1295 	 * Allow any threads blocked in entersq to proceed and discover
1296 	 * the QWCLOSE is set.
1297 	 * Note: This assumes that all users of entersq check QWCLOSE.
1298 	 * Currently runservice is the only entersq that can happen
1299 	 * after removeq has finished.
1300 	 * Removeq will have discarded all messages destined to the closing
1301 	 * pair of queues from the syncq.
1302 	 * NOTE: Calling a function inside an assert is unconventional.
1303 	 * However, it does not cause any problem since flush_syncq() does
1304 	 * not change any state except when it returns non-zero i.e.
1305 	 * when the assert will trigger.
1306 	 */
1307 	ASSERT(flush_syncq(qp->q_syncq, qp) == 0);
1308 	ASSERT(flush_syncq(wqp->q_syncq, wqp) == 0);
1309 	ASSERT((qp->q_flag & QPERMOD) ||
1310 	    ((qp->q_syncq->sq_head == NULL) &&
1311 	    (wqp->q_syncq->sq_head == NULL)));
1312 
1313 	/* release any fmodsw_impl_t structure held on behalf of the queue */
1314 	ASSERT(qp->q_fp != NULL || qp->q_flag & QISDRV);
1315 	if (qp->q_fp != NULL)
1316 		fmodsw_rele(qp->q_fp);
1317 
1318 	/* freeq removes us from the outer perimeter if any */
1319 	freeq(qp);
1320 }
1321 
1322 /* Prevent service procedures from being called */
1323 void
1324 disable_svc(queue_t *qp)
1325 {
1326 	queue_t *wqp = _WR(qp);
1327 
1328 	ASSERT(qp->q_flag & QREADR);
1329 	mutex_enter(QLOCK(qp));
1330 	qp->q_flag |= QWCLOSE;
1331 	mutex_exit(QLOCK(qp));
1332 	mutex_enter(QLOCK(wqp));
1333 	wqp->q_flag |= QWCLOSE;
1334 	mutex_exit(QLOCK(wqp));
1335 }
1336 
1337 /* allow service procedures to be called again */
1338 void
1339 enable_svc(queue_t *qp)
1340 {
1341 	queue_t *wqp = _WR(qp);
1342 
1343 	ASSERT(qp->q_flag & QREADR);
1344 	mutex_enter(QLOCK(qp));
1345 	qp->q_flag &= ~QWCLOSE;
1346 	mutex_exit(QLOCK(qp));
1347 	mutex_enter(QLOCK(wqp));
1348 	wqp->q_flag &= ~QWCLOSE;
1349 	mutex_exit(QLOCK(wqp));
1350 }
1351 
1352 /*
1353  * Remove queue from qhead/qtail if it is enabled.
1354  * Only reset QENAB if the queue was removed from the runlist.
1355  * A queue goes through 3 stages:
1356  *	It is on the service list and QENAB is set.
1357  *	It is removed from the service list but QENAB is still set.
1358  *	QENAB gets changed to QINSERVICE.
1359  *	QINSERVICE is reset (when the service procedure is done)
1360  * Thus we can not reset QENAB unless we actually removed it from the service
1361  * queue.
1362  */
1363 void
1364 remove_runlist(queue_t *qp)
1365 {
1366 	if (qp->q_flag & QENAB && qhead != NULL) {
1367 		queue_t *q_chase;
1368 		queue_t *q_curr;
1369 		int removed;
1370 
1371 		mutex_enter(&service_queue);
1372 		RMQ(qp, qhead, qtail, q_link, q_chase, q_curr, removed);
1373 		mutex_exit(&service_queue);
1374 		if (removed) {
1375 			STRSTAT(qremoved);
1376 			qp->q_flag &= ~QENAB;
1377 		}
1378 	}
1379 }
1380 
1381 
1382 /*
1383  * wait for any pending service processing to complete.
1384  * The removal of queues from the runlist is not atomic with the
1385  * clearing of the QENABLED flag and setting the INSERVICE flag.
1386  * consequently it is possible for remove_runlist in strclose
1387  * to not find the queue on the runlist but for it to be QENABLED
1388  * and not yet INSERVICE -> hence wait_svc needs to check QENABLED
1389  * as well as INSERVICE.
1390  */
1391 void
1392 wait_svc(queue_t *qp)
1393 {
1394 	queue_t *wqp = _WR(qp);
1395 
1396 	ASSERT(qp->q_flag & QREADR);
1397 
1398 	/*
1399 	 * Try to remove queues from qhead/qtail list.
1400 	 */
1401 	if (qhead != NULL) {
1402 		remove_runlist(qp);
1403 		remove_runlist(wqp);
1404 	}
1405 	/*
1406 	 * Wait till the syncqs associated with the queue
1407 	 * will dissapear from background processing list.
1408 	 * This only needs to be done for non-PERMOD perimeters since
1409 	 * for PERMOD perimeters the syncq may be shared and will only be freed
1410 	 * when the last module/driver is unloaded.
1411 	 * If for PERMOD perimeters queue was on the syncq list, removeq()
1412 	 * should call propagate_syncq() or drain_syncq() for it. Both of these
1413 	 * function remove the queue from its syncq list, so sqthread will not
1414 	 * try to access the queue.
1415 	 */
1416 	if (!(qp->q_flag & QPERMOD)) {
1417 		syncq_t *rsq = qp->q_syncq;
1418 		syncq_t *wsq = wqp->q_syncq;
1419 
1420 		/*
1421 		 * Disable rsq and wsq and wait for any background processing of
1422 		 * syncq to complete.
1423 		 */
1424 		wait_sq_svc(rsq);
1425 		if (wsq != rsq)
1426 			wait_sq_svc(wsq);
1427 	}
1428 
1429 	mutex_enter(QLOCK(qp));
1430 	while (qp->q_flag & (QINSERVICE|QENAB))
1431 		cv_wait(&qp->q_wait, QLOCK(qp));
1432 	mutex_exit(QLOCK(qp));
1433 	mutex_enter(QLOCK(wqp));
1434 	while (wqp->q_flag & (QINSERVICE|QENAB))
1435 		cv_wait(&wqp->q_wait, QLOCK(wqp));
1436 	mutex_exit(QLOCK(wqp));
1437 }
1438 
1439 /*
1440  * Put ioctl data from userland buffer `arg' into the mblk chain `bp'.
1441  * `flag' must always contain either K_TO_K or U_TO_K; STR_NOSIG may
1442  * also be set, and is passed through to allocb_cred_wait().
1443  *
1444  * Returns errno on failure, zero on success.
1445  */
1446 int
1447 putiocd(mblk_t *bp, char *arg, int flag, cred_t *cr)
1448 {
1449 	mblk_t *tmp;
1450 	ssize_t  count;
1451 	size_t n;
1452 	int error = 0;
1453 
1454 	ASSERT((flag & (U_TO_K | K_TO_K)) == U_TO_K ||
1455 	    (flag & (U_TO_K | K_TO_K)) == K_TO_K);
1456 
1457 	if (bp->b_datap->db_type == M_IOCTL) {
1458 		count = ((struct iocblk *)bp->b_rptr)->ioc_count;
1459 	} else {
1460 		ASSERT(bp->b_datap->db_type == M_COPYIN);
1461 		count = ((struct copyreq *)bp->b_rptr)->cq_size;
1462 	}
1463 	/*
1464 	 * strdoioctl validates ioc_count, so if this assert fails it
1465 	 * cannot be due to user error.
1466 	 */
1467 	ASSERT(count >= 0);
1468 
1469 	while (count > 0) {
1470 		n = MIN(MAXIOCBSZ, count);
1471 		if ((tmp = allocb_cred_wait(n, (flag & STR_NOSIG), &error,
1472 		    cr)) == NULL) {
1473 			return (error);
1474 		}
1475 		error = strcopyin(arg, tmp->b_wptr, n, flag & (U_TO_K|K_TO_K));
1476 		if (error != 0) {
1477 			freeb(tmp);
1478 			return (error);
1479 		}
1480 		arg += n;
1481 		DB_CPID(tmp) = curproc->p_pid;
1482 		tmp->b_wptr += n;
1483 		count -= n;
1484 		bp = (bp->b_cont = tmp);
1485 	}
1486 
1487 	return (0);
1488 }
1489 
1490 /*
1491  * Copy ioctl data to user-land. Return non-zero errno on failure,
1492  * 0 for success.
1493  */
1494 int
1495 getiocd(mblk_t *bp, char *arg, int copymode)
1496 {
1497 	ssize_t count;
1498 	size_t  n;
1499 	int	error;
1500 
1501 	if (bp->b_datap->db_type == M_IOCACK)
1502 		count = ((struct iocblk *)bp->b_rptr)->ioc_count;
1503 	else {
1504 		ASSERT(bp->b_datap->db_type == M_COPYOUT);
1505 		count = ((struct copyreq *)bp->b_rptr)->cq_size;
1506 	}
1507 	ASSERT(count >= 0);
1508 
1509 	for (bp = bp->b_cont; bp && count;
1510 	    count -= n, bp = bp->b_cont, arg += n) {
1511 		n = MIN(count, bp->b_wptr - bp->b_rptr);
1512 		error = strcopyout(bp->b_rptr, arg, n, copymode);
1513 		if (error)
1514 			return (error);
1515 	}
1516 	ASSERT(count == 0);
1517 	return (0);
1518 }
1519 
1520 /*
1521  * Allocate a linkinfo entry given the write queue of the
1522  * bottom module of the top stream and the write queue of the
1523  * stream head of the bottom stream.
1524  */
1525 linkinfo_t *
1526 alloclink(queue_t *qup, queue_t *qdown, file_t *fpdown)
1527 {
1528 	linkinfo_t *linkp;
1529 
1530 	linkp = kmem_cache_alloc(linkinfo_cache, KM_SLEEP);
1531 
1532 	linkp->li_lblk.l_qtop = qup;
1533 	linkp->li_lblk.l_qbot = qdown;
1534 	linkp->li_fpdown = fpdown;
1535 
1536 	mutex_enter(&strresources);
1537 	linkp->li_next = linkinfo_list;
1538 	linkp->li_prev = NULL;
1539 	if (linkp->li_next)
1540 		linkp->li_next->li_prev = linkp;
1541 	linkinfo_list = linkp;
1542 	linkp->li_lblk.l_index = ++lnk_id;
1543 	ASSERT(lnk_id != 0);	/* this should never wrap in practice */
1544 	mutex_exit(&strresources);
1545 
1546 	return (linkp);
1547 }
1548 
1549 /*
1550  * Free a linkinfo entry.
1551  */
1552 void
1553 lbfree(linkinfo_t *linkp)
1554 {
1555 	mutex_enter(&strresources);
1556 	if (linkp->li_next)
1557 		linkp->li_next->li_prev = linkp->li_prev;
1558 	if (linkp->li_prev)
1559 		linkp->li_prev->li_next = linkp->li_next;
1560 	else
1561 		linkinfo_list = linkp->li_next;
1562 	mutex_exit(&strresources);
1563 
1564 	kmem_cache_free(linkinfo_cache, linkp);
1565 }
1566 
1567 /*
1568  * Check for a potential linking cycle.
1569  * Return 1 if a link will result in a cycle,
1570  * and 0 otherwise.
1571  */
1572 int
1573 linkcycle(stdata_t *upstp, stdata_t *lostp, str_stack_t *ss)
1574 {
1575 	struct mux_node *np;
1576 	struct mux_edge *ep;
1577 	int i;
1578 	major_t lomaj;
1579 	major_t upmaj;
1580 	/*
1581 	 * if the lower stream is a pipe/FIFO, return, since link
1582 	 * cycles can not happen on pipes/FIFOs
1583 	 */
1584 	if (lostp->sd_vnode->v_type == VFIFO)
1585 		return (0);
1586 
1587 	for (i = 0; i < ss->ss_devcnt; i++) {
1588 		np = &ss->ss_mux_nodes[i];
1589 		MUX_CLEAR(np);
1590 	}
1591 	lomaj = getmajor(lostp->sd_vnode->v_rdev);
1592 	upmaj = getmajor(upstp->sd_vnode->v_rdev);
1593 	np = &ss->ss_mux_nodes[lomaj];
1594 	for (;;) {
1595 		if (!MUX_DIDVISIT(np)) {
1596 			if (np->mn_imaj == upmaj)
1597 				return (1);
1598 			if (np->mn_outp == NULL) {
1599 				MUX_VISIT(np);
1600 				if (np->mn_originp == NULL)
1601 					return (0);
1602 				np = np->mn_originp;
1603 				continue;
1604 			}
1605 			MUX_VISIT(np);
1606 			np->mn_startp = np->mn_outp;
1607 		} else {
1608 			if (np->mn_startp == NULL) {
1609 				if (np->mn_originp == NULL)
1610 					return (0);
1611 				else {
1612 					np = np->mn_originp;
1613 					continue;
1614 				}
1615 			}
1616 			/*
1617 			 * If ep->me_nodep is a FIFO (me_nodep == NULL),
1618 			 * ignore the edge and move on. ep->me_nodep gets
1619 			 * set to NULL in mux_addedge() if it is a FIFO.
1620 			 *
1621 			 */
1622 			ep = np->mn_startp;
1623 			np->mn_startp = ep->me_nextp;
1624 			if (ep->me_nodep == NULL)
1625 				continue;
1626 			ep->me_nodep->mn_originp = np;
1627 			np = ep->me_nodep;
1628 		}
1629 	}
1630 }
1631 
1632 /*
1633  * Find linkinfo entry corresponding to the parameters.
1634  */
1635 linkinfo_t *
1636 findlinks(stdata_t *stp, int index, int type, str_stack_t *ss)
1637 {
1638 	linkinfo_t *linkp;
1639 	struct mux_edge *mep;
1640 	struct mux_node *mnp;
1641 	queue_t *qup;
1642 
1643 	mutex_enter(&strresources);
1644 	if ((type & LINKTYPEMASK) == LINKNORMAL) {
1645 		qup = getendq(stp->sd_wrq);
1646 		for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) {
1647 			if ((qup == linkp->li_lblk.l_qtop) &&
1648 			    (!index || (index == linkp->li_lblk.l_index))) {
1649 				mutex_exit(&strresources);
1650 				return (linkp);
1651 			}
1652 		}
1653 	} else {
1654 		ASSERT((type & LINKTYPEMASK) == LINKPERSIST);
1655 		mnp = &ss->ss_mux_nodes[getmajor(stp->sd_vnode->v_rdev)];
1656 		mep = mnp->mn_outp;
1657 		while (mep) {
1658 			if ((index == 0) || (index == mep->me_muxid))
1659 				break;
1660 			mep = mep->me_nextp;
1661 		}
1662 		if (!mep) {
1663 			mutex_exit(&strresources);
1664 			return (NULL);
1665 		}
1666 		for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) {
1667 			if ((!linkp->li_lblk.l_qtop) &&
1668 			    (mep->me_muxid == linkp->li_lblk.l_index)) {
1669 				mutex_exit(&strresources);
1670 				return (linkp);
1671 			}
1672 		}
1673 	}
1674 	mutex_exit(&strresources);
1675 	return (NULL);
1676 }
1677 
1678 /*
1679  * Given a queue ptr, follow the chain of q_next pointers until you reach the
1680  * last queue on the chain and return it.
1681  */
1682 queue_t *
1683 getendq(queue_t *q)
1684 {
1685 	ASSERT(q != NULL);
1686 	while (_SAMESTR(q))
1687 		q = q->q_next;
1688 	return (q);
1689 }
1690 
1691 /*
1692  * wait for the syncq count to drop to zero.
1693  * sq could be either outer or inner.
1694  */
1695 
1696 static void
1697 wait_syncq(syncq_t *sq)
1698 {
1699 	uint16_t count;
1700 
1701 	mutex_enter(SQLOCK(sq));
1702 	count = sq->sq_count;
1703 	SQ_PUTLOCKS_ENTER(sq);
1704 	SUM_SQ_PUTCOUNTS(sq, count);
1705 	while (count != 0) {
1706 		sq->sq_flags |= SQ_WANTWAKEUP;
1707 		SQ_PUTLOCKS_EXIT(sq);
1708 		cv_wait(&sq->sq_wait, SQLOCK(sq));
1709 		count = sq->sq_count;
1710 		SQ_PUTLOCKS_ENTER(sq);
1711 		SUM_SQ_PUTCOUNTS(sq, count);
1712 	}
1713 	SQ_PUTLOCKS_EXIT(sq);
1714 	mutex_exit(SQLOCK(sq));
1715 }
1716 
1717 /*
1718  * Wait while there are any messages for the queue in its syncq.
1719  */
1720 static void
1721 wait_q_syncq(queue_t *q)
1722 {
1723 	if ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) {
1724 		syncq_t *sq = q->q_syncq;
1725 
1726 		mutex_enter(SQLOCK(sq));
1727 		while ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) {
1728 			sq->sq_flags |= SQ_WANTWAKEUP;
1729 			cv_wait(&sq->sq_wait, SQLOCK(sq));
1730 		}
1731 		mutex_exit(SQLOCK(sq));
1732 	}
1733 }
1734 
1735 
1736 int
1737 mlink_file(vnode_t *vp, int cmd, struct file *fpdown, cred_t *crp, int *rvalp,
1738     int lhlink)
1739 {
1740 	struct stdata *stp;
1741 	struct strioctl strioc;
1742 	struct linkinfo *linkp;
1743 	struct stdata *stpdown;
1744 	struct streamtab *str;
1745 	queue_t *passq;
1746 	syncq_t *passyncq;
1747 	queue_t *rq;
1748 	cdevsw_impl_t *dp;
1749 	uint32_t qflag;
1750 	uint32_t sqtype;
1751 	perdm_t *dmp;
1752 	int error = 0;
1753 	netstack_t *ns;
1754 	str_stack_t *ss;
1755 
1756 	stp = vp->v_stream;
1757 	TRACE_1(TR_FAC_STREAMS_FR,
1758 	    TR_I_LINK, "I_LINK/I_PLINK:stp %p", stp);
1759 	/*
1760 	 * Test for invalid upper stream
1761 	 */
1762 	if (stp->sd_flag & STRHUP) {
1763 		return (ENXIO);
1764 	}
1765 	if (vp->v_type == VFIFO) {
1766 		return (EINVAL);
1767 	}
1768 	if (stp->sd_strtab == NULL) {
1769 		return (EINVAL);
1770 	}
1771 	if (!stp->sd_strtab->st_muxwinit) {
1772 		return (EINVAL);
1773 	}
1774 	if (fpdown == NULL) {
1775 		return (EBADF);
1776 	}
1777 	ns = netstack_find_by_cred(crp);
1778 	ASSERT(ns != NULL);
1779 	ss = ns->netstack_str;
1780 	ASSERT(ss != NULL);
1781 
1782 	if (getmajor(stp->sd_vnode->v_rdev) >= ss->ss_devcnt) {
1783 		netstack_rele(ss->ss_netstack);
1784 		return (EINVAL);
1785 	}
1786 	mutex_enter(&muxifier);
1787 	if (stp->sd_flag & STPLEX) {
1788 		mutex_exit(&muxifier);
1789 		netstack_rele(ss->ss_netstack);
1790 		return (ENXIO);
1791 	}
1792 
1793 	/*
1794 	 * Test for invalid lower stream.
1795 	 * The check for the v_type != VFIFO and having a major
1796 	 * number not >= devcnt is done to avoid problems with
1797 	 * adding mux_node entry past the end of mux_nodes[].
1798 	 * For FIFO's we don't add an entry so this isn't a
1799 	 * problem.
1800 	 */
1801 	if (((stpdown = fpdown->f_vnode->v_stream) == NULL) ||
1802 	    (stpdown == stp) || (stpdown->sd_flag &
1803 	    (STPLEX|STRHUP|STRDERR|STWRERR|IOCWAIT|STRPLUMB)) ||
1804 	    ((stpdown->sd_vnode->v_type != VFIFO) &&
1805 	    (getmajor(stpdown->sd_vnode->v_rdev) >= ss->ss_devcnt)) ||
1806 	    linkcycle(stp, stpdown, ss)) {
1807 		mutex_exit(&muxifier);
1808 		netstack_rele(ss->ss_netstack);
1809 		return (EINVAL);
1810 	}
1811 	TRACE_1(TR_FAC_STREAMS_FR,
1812 	    TR_STPDOWN, "stpdown:%p", stpdown);
1813 	rq = getendq(stp->sd_wrq);
1814 	if (cmd == I_PLINK)
1815 		rq = NULL;
1816 
1817 	linkp = alloclink(rq, stpdown->sd_wrq, fpdown);
1818 
1819 	strioc.ic_cmd = cmd;
1820 	strioc.ic_timout = INFTIM;
1821 	strioc.ic_len = sizeof (struct linkblk);
1822 	strioc.ic_dp = (char *)&linkp->li_lblk;
1823 
1824 	/*
1825 	 * STRPLUMB protects plumbing changes and should be set before
1826 	 * link_addpassthru()/link_rempassthru() are called, so it is set here
1827 	 * and cleared in the end of mlink when passthru queue is removed.
1828 	 * Setting of STRPLUMB prevents reopens of the stream while passthru
1829 	 * queue is in-place (it is not a proper module and doesn't have open
1830 	 * entry point).
1831 	 *
1832 	 * STPLEX prevents any threads from entering the stream from above. It
1833 	 * can't be set before the call to link_addpassthru() because putnext
1834 	 * from below may cause stream head I/O routines to be called and these
1835 	 * routines assert that STPLEX is not set. After link_addpassthru()
1836 	 * nothing may come from below since the pass queue syncq is blocked.
1837 	 * Note also that STPLEX should be cleared before the call to
1838 	 * link_remmpassthru() since when messages start flowing to the stream
1839 	 * head (e.g. because of message propagation from the pass queue) stream
1840 	 * head I/O routines may be called with STPLEX flag set.
1841 	 *
1842 	 * When STPLEX is set, nothing may come into the stream from above and
1843 	 * it is safe to do a setq which will change stream head. So, the
1844 	 * correct sequence of actions is:
1845 	 *
1846 	 * 1) Set STRPLUMB
1847 	 * 2) Call link_addpassthru()
1848 	 * 3) Set STPLEX
1849 	 * 4) Call setq and update the stream state
1850 	 * 5) Clear STPLEX
1851 	 * 6) Call link_rempassthru()
1852 	 * 7) Clear STRPLUMB
1853 	 *
1854 	 * The same sequence applies to munlink() code.
1855 	 */
1856 	mutex_enter(&stpdown->sd_lock);
1857 	stpdown->sd_flag |= STRPLUMB;
1858 	mutex_exit(&stpdown->sd_lock);
1859 	/*
1860 	 * Add passthru queue below lower mux. This will block
1861 	 * syncqs of lower muxs read queue during I_LINK/I_UNLINK.
1862 	 */
1863 	passq = link_addpassthru(stpdown);
1864 
1865 	mutex_enter(&stpdown->sd_lock);
1866 	stpdown->sd_flag |= STPLEX;
1867 	mutex_exit(&stpdown->sd_lock);
1868 
1869 	rq = _RD(stpdown->sd_wrq);
1870 	/*
1871 	 * There may be messages in the streamhead's syncq due to messages
1872 	 * that arrived before link_addpassthru() was done. To avoid
1873 	 * background processing of the syncq happening simultaneous with
1874 	 * setq processing, we disable the streamhead syncq and wait until
1875 	 * existing background thread finishes working on it.
1876 	 */
1877 	wait_sq_svc(rq->q_syncq);
1878 	passyncq = passq->q_syncq;
1879 	if (!(passyncq->sq_flags & SQ_BLOCKED))
1880 		blocksq(passyncq, SQ_BLOCKED, 0);
1881 
1882 	ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE);
1883 	ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq));
1884 	rq->q_ptr = _WR(rq)->q_ptr = NULL;
1885 
1886 	/* setq might sleep in allocator - avoid holding locks. */
1887 	/* Note: we are holding muxifier here. */
1888 
1889 	str = stp->sd_strtab;
1890 	dp = &devimpl[getmajor(vp->v_rdev)];
1891 	ASSERT(dp->d_str == str);
1892 
1893 	qflag = dp->d_qflag;
1894 	sqtype = dp->d_sqtype;
1895 
1896 	/* create perdm_t if needed */
1897 	if (NEED_DM(dp->d_dmp, qflag))
1898 		dp->d_dmp = hold_dm(str, qflag, sqtype);
1899 
1900 	dmp = dp->d_dmp;
1901 
1902 	setq(rq, str->st_muxrinit, str->st_muxwinit, dmp, qflag, sqtype,
1903 	    B_TRUE);
1904 
1905 	/*
1906 	 * XXX Remove any "odd" messages from the queue.
1907 	 * Keep only M_DATA, M_PROTO, M_PCPROTO.
1908 	 */
1909 	error = strdoioctl(stp, &strioc, FNATIVE,
1910 	    K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp);
1911 	if (error != 0) {
1912 		lbfree(linkp);
1913 
1914 		if (!(passyncq->sq_flags & SQ_BLOCKED))
1915 			blocksq(passyncq, SQ_BLOCKED, 0);
1916 		/*
1917 		 * Restore the stream head queue and then remove
1918 		 * the passq. Turn off STPLEX before we turn on
1919 		 * the stream by removing the passq.
1920 		 */
1921 		rq->q_ptr = _WR(rq)->q_ptr = stpdown;
1922 		setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO,
1923 		    B_TRUE);
1924 
1925 		mutex_enter(&stpdown->sd_lock);
1926 		stpdown->sd_flag &= ~STPLEX;
1927 		mutex_exit(&stpdown->sd_lock);
1928 
1929 		link_rempassthru(passq);
1930 
1931 		mutex_enter(&stpdown->sd_lock);
1932 		stpdown->sd_flag &= ~STRPLUMB;
1933 		/* Wakeup anyone waiting for STRPLUMB to clear. */
1934 		cv_broadcast(&stpdown->sd_monitor);
1935 		mutex_exit(&stpdown->sd_lock);
1936 
1937 		mutex_exit(&muxifier);
1938 		netstack_rele(ss->ss_netstack);
1939 		return (error);
1940 	}
1941 	mutex_enter(&fpdown->f_tlock);
1942 	fpdown->f_count++;
1943 	mutex_exit(&fpdown->f_tlock);
1944 
1945 	/*
1946 	 * if we've made it here the linkage is all set up so we should also
1947 	 * set up the layered driver linkages
1948 	 */
1949 
1950 	ASSERT((cmd == I_LINK) || (cmd == I_PLINK));
1951 	if (cmd == I_LINK) {
1952 		ldi_mlink_fp(stp, fpdown, lhlink, LINKNORMAL);
1953 	} else {
1954 		ldi_mlink_fp(stp, fpdown, lhlink, LINKPERSIST);
1955 	}
1956 
1957 	link_rempassthru(passq);
1958 
1959 	mux_addedge(stp, stpdown, linkp->li_lblk.l_index, ss);
1960 
1961 	/*
1962 	 * Mark the upper stream as having dependent links
1963 	 * so that strclose can clean it up.
1964 	 */
1965 	if (cmd == I_LINK) {
1966 		mutex_enter(&stp->sd_lock);
1967 		stp->sd_flag |= STRHASLINKS;
1968 		mutex_exit(&stp->sd_lock);
1969 	}
1970 	/*
1971 	 * Wake up any other processes that may have been
1972 	 * waiting on the lower stream. These will all
1973 	 * error out.
1974 	 */
1975 	mutex_enter(&stpdown->sd_lock);
1976 	/* The passthru module is removed so we may release STRPLUMB */
1977 	stpdown->sd_flag &= ~STRPLUMB;
1978 	cv_broadcast(&rq->q_wait);
1979 	cv_broadcast(&_WR(rq)->q_wait);
1980 	cv_broadcast(&stpdown->sd_monitor);
1981 	mutex_exit(&stpdown->sd_lock);
1982 	mutex_exit(&muxifier);
1983 	*rvalp = linkp->li_lblk.l_index;
1984 	netstack_rele(ss->ss_netstack);
1985 	return (0);
1986 }
1987 
1988 int
1989 mlink(vnode_t *vp, int cmd, int arg, cred_t *crp, int *rvalp, int lhlink)
1990 {
1991 	int		ret;
1992 	struct file	*fpdown;
1993 
1994 	fpdown = getf(arg);
1995 	ret = mlink_file(vp, cmd, fpdown, crp, rvalp, lhlink);
1996 	if (fpdown != NULL)
1997 		releasef(arg);
1998 	return (ret);
1999 }
2000 
2001 /*
2002  * Unlink a multiplexor link. Stp is the controlling stream for the
2003  * link, and linkp points to the link's entry in the linkinfo list.
2004  * The muxifier lock must be held on entry and is dropped on exit.
2005  *
2006  * NOTE : Currently it is assumed that mux would process all the messages
2007  * sitting on it's queue before ACKing the UNLINK. It is the responsibility
2008  * of the mux to handle all the messages that arrive before UNLINK.
2009  * If the mux has to send down messages on its lower stream before
2010  * ACKing I_UNLINK, then it *should* know to handle messages even
2011  * after the UNLINK is acked (actually it should be able to handle till we
2012  * re-block the read side of the pass queue here). If the mux does not
2013  * open up the lower stream, any messages that arrive during UNLINK
2014  * will be put in the stream head. In the case of lower stream opening
2015  * up, some messages might land in the stream head depending on when
2016  * the message arrived and when the read side of the pass queue was
2017  * re-blocked.
2018  */
2019 int
2020 munlink(stdata_t *stp, linkinfo_t *linkp, int flag, cred_t *crp, int *rvalp,
2021     str_stack_t *ss)
2022 {
2023 	struct strioctl strioc;
2024 	struct stdata *stpdown;
2025 	queue_t *rq, *wrq;
2026 	queue_t	*passq;
2027 	syncq_t *passyncq;
2028 	int error = 0;
2029 	file_t *fpdown;
2030 
2031 	ASSERT(MUTEX_HELD(&muxifier));
2032 
2033 	stpdown = linkp->li_fpdown->f_vnode->v_stream;
2034 
2035 	/*
2036 	 * See the comment in mlink() concerning STRPLUMB/STPLEX flags.
2037 	 */
2038 	mutex_enter(&stpdown->sd_lock);
2039 	stpdown->sd_flag |= STRPLUMB;
2040 	mutex_exit(&stpdown->sd_lock);
2041 
2042 	/*
2043 	 * Add passthru queue below lower mux. This will block
2044 	 * syncqs of lower muxs read queue during I_LINK/I_UNLINK.
2045 	 */
2046 	passq = link_addpassthru(stpdown);
2047 
2048 	if ((flag & LINKTYPEMASK) == LINKNORMAL)
2049 		strioc.ic_cmd = I_UNLINK;
2050 	else
2051 		strioc.ic_cmd = I_PUNLINK;
2052 	strioc.ic_timout = INFTIM;
2053 	strioc.ic_len = sizeof (struct linkblk);
2054 	strioc.ic_dp = (char *)&linkp->li_lblk;
2055 
2056 	error = strdoioctl(stp, &strioc, FNATIVE,
2057 	    K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp);
2058 
2059 	/*
2060 	 * If there was an error and this is not called via strclose,
2061 	 * return to the user. Otherwise, pretend there was no error
2062 	 * and close the link.
2063 	 */
2064 	if (error) {
2065 		if (flag & LINKCLOSE) {
2066 			cmn_err(CE_WARN, "KERNEL: munlink: could not perform "
2067 			    "unlink ioctl, closing anyway (%d)\n", error);
2068 		} else {
2069 			link_rempassthru(passq);
2070 			mutex_enter(&stpdown->sd_lock);
2071 			stpdown->sd_flag &= ~STRPLUMB;
2072 			cv_broadcast(&stpdown->sd_monitor);
2073 			mutex_exit(&stpdown->sd_lock);
2074 			mutex_exit(&muxifier);
2075 			return (error);
2076 		}
2077 	}
2078 
2079 	mux_rmvedge(stp, linkp->li_lblk.l_index, ss);
2080 	fpdown = linkp->li_fpdown;
2081 	lbfree(linkp);
2082 
2083 	/*
2084 	 * We go ahead and drop muxifier here--it's a nasty global lock that
2085 	 * can slow others down. It's okay to since attempts to mlink() this
2086 	 * stream will be stopped because STPLEX is still set in the stdata
2087 	 * structure, and munlink() is stopped because mux_rmvedge() and
2088 	 * lbfree() have removed it from mux_nodes[] and linkinfo_list,
2089 	 * respectively.  Note that we defer the closef() of fpdown until
2090 	 * after we drop muxifier since strclose() can call munlinkall().
2091 	 */
2092 	mutex_exit(&muxifier);
2093 
2094 	wrq = stpdown->sd_wrq;
2095 	rq = _RD(wrq);
2096 
2097 	/*
2098 	 * Get rid of outstanding service procedure runs, before we make
2099 	 * it a stream head, since a stream head doesn't have any service
2100 	 * procedure.
2101 	 */
2102 	disable_svc(rq);
2103 	wait_svc(rq);
2104 
2105 	/*
2106 	 * Since we don't disable the syncq for QPERMOD, we wait for whatever
2107 	 * is queued up to be finished. mux should take care that nothing is
2108 	 * send down to this queue. We should do it now as we're going to block
2109 	 * passyncq if it was unblocked.
2110 	 */
2111 	if (wrq->q_flag & QPERMOD) {
2112 		syncq_t	*sq = wrq->q_syncq;
2113 
2114 		mutex_enter(SQLOCK(sq));
2115 		while (wrq->q_sqflags & Q_SQQUEUED) {
2116 			sq->sq_flags |= SQ_WANTWAKEUP;
2117 			cv_wait(&sq->sq_wait, SQLOCK(sq));
2118 		}
2119 		mutex_exit(SQLOCK(sq));
2120 	}
2121 	passyncq = passq->q_syncq;
2122 	if (!(passyncq->sq_flags & SQ_BLOCKED)) {
2123 
2124 		syncq_t *sq, *outer;
2125 
2126 		/*
2127 		 * Messages could be flowing from underneath. We will
2128 		 * block the read side of the passq. This would be
2129 		 * sufficient for QPAIR and QPERQ muxes to ensure
2130 		 * that no data is flowing up into this queue
2131 		 * and hence no thread active in this instance of
2132 		 * lower mux. But for QPERMOD and QMTOUTPERIM there
2133 		 * could be messages on the inner and outer/inner
2134 		 * syncqs respectively. We will wait for them to drain.
2135 		 * Because passq is blocked messages end up in the syncq
2136 		 * And qfill_syncq could possibly end up setting QFULL
2137 		 * which will access the rq->q_flag. Hence, we have to
2138 		 * acquire the QLOCK in setq.
2139 		 *
2140 		 * XXX Messages can also flow from top into this
2141 		 * queue though the unlink is over (Ex. some instance
2142 		 * in putnext() called from top that has still not
2143 		 * accessed this queue. And also putq(lowerq) ?).
2144 		 * Solution : How about blocking the l_qtop queue ?
2145 		 * Do we really care about such pure D_MP muxes ?
2146 		 */
2147 
2148 		blocksq(passyncq, SQ_BLOCKED, 0);
2149 
2150 		sq = rq->q_syncq;
2151 		if ((outer = sq->sq_outer) != NULL) {
2152 
2153 			/*
2154 			 * We have to just wait for the outer sq_count
2155 			 * drop to zero. As this does not prevent new
2156 			 * messages to enter the outer perimeter, this
2157 			 * is subject to starvation.
2158 			 *
2159 			 * NOTE :Because of blocksq above, messages could
2160 			 * be in the inner syncq only because of some
2161 			 * thread holding the outer perimeter exclusively.
2162 			 * Hence it would be sufficient to wait for the
2163 			 * exclusive holder of the outer perimeter to drain
2164 			 * the inner and outer syncqs. But we will not depend
2165 			 * on this feature and hence check the inner syncqs
2166 			 * separately.
2167 			 */
2168 			wait_syncq(outer);
2169 		}
2170 
2171 
2172 		/*
2173 		 * There could be messages destined for
2174 		 * this queue. Let the exclusive holder
2175 		 * drain it.
2176 		 */
2177 
2178 		wait_syncq(sq);
2179 		ASSERT((rq->q_flag & QPERMOD) ||
2180 		    ((rq->q_syncq->sq_head == NULL) &&
2181 		    (_WR(rq)->q_syncq->sq_head == NULL)));
2182 	}
2183 
2184 	/*
2185 	 * We haven't taken care of QPERMOD case yet. QPERMOD is a special
2186 	 * case as we don't disable its syncq or remove it off the syncq
2187 	 * service list.
2188 	 */
2189 	if (rq->q_flag & QPERMOD) {
2190 		syncq_t	*sq = rq->q_syncq;
2191 
2192 		mutex_enter(SQLOCK(sq));
2193 		while (rq->q_sqflags & Q_SQQUEUED) {
2194 			sq->sq_flags |= SQ_WANTWAKEUP;
2195 			cv_wait(&sq->sq_wait, SQLOCK(sq));
2196 		}
2197 		mutex_exit(SQLOCK(sq));
2198 	}
2199 
2200 	/*
2201 	 * flush_syncq changes states only when there is some messages to
2202 	 * free. ie when it returns non-zero value to return.
2203 	 */
2204 	ASSERT(flush_syncq(rq->q_syncq, rq) == 0);
2205 	ASSERT(flush_syncq(wrq->q_syncq, wrq) == 0);
2206 
2207 	/*
2208 	 * No body else should know about this queue now.
2209 	 * If the mux did not process the messages before
2210 	 * acking the I_UNLINK, free them now.
2211 	 */
2212 
2213 	flushq(rq, FLUSHALL);
2214 	flushq(_WR(rq), FLUSHALL);
2215 
2216 	/*
2217 	 * Convert the mux lower queue into a stream head queue.
2218 	 * Turn off STPLEX before we turn on the stream by removing the passq.
2219 	 */
2220 	rq->q_ptr = wrq->q_ptr = stpdown;
2221 	setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, B_TRUE);
2222 
2223 	ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE);
2224 	ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq));
2225 
2226 	enable_svc(rq);
2227 
2228 	/*
2229 	 * Now it is a proper stream, so STPLEX is cleared. But STRPLUMB still
2230 	 * needs to be set to prevent reopen() of the stream - such reopen may
2231 	 * try to call non-existent pass queue open routine and panic.
2232 	 */
2233 	mutex_enter(&stpdown->sd_lock);
2234 	stpdown->sd_flag &= ~STPLEX;
2235 	mutex_exit(&stpdown->sd_lock);
2236 
2237 	ASSERT(((flag & LINKTYPEMASK) == LINKNORMAL) ||
2238 	    ((flag & LINKTYPEMASK) == LINKPERSIST));
2239 
2240 	/* clean up the layered driver linkages */
2241 	if ((flag & LINKTYPEMASK) == LINKNORMAL) {
2242 		ldi_munlink_fp(stp, fpdown, LINKNORMAL);
2243 	} else {
2244 		ldi_munlink_fp(stp, fpdown, LINKPERSIST);
2245 	}
2246 
2247 	link_rempassthru(passq);
2248 
2249 	/*
2250 	 * Now all plumbing changes are finished and STRPLUMB is no
2251 	 * longer needed.
2252 	 */
2253 	mutex_enter(&stpdown->sd_lock);
2254 	stpdown->sd_flag &= ~STRPLUMB;
2255 	cv_broadcast(&stpdown->sd_monitor);
2256 	mutex_exit(&stpdown->sd_lock);
2257 
2258 	(void) closef(fpdown);
2259 	return (0);
2260 }
2261 
2262 /*
2263  * Unlink all multiplexor links for which stp is the controlling stream.
2264  * Return 0, or a non-zero errno on failure.
2265  */
2266 int
2267 munlinkall(stdata_t *stp, int flag, cred_t *crp, int *rvalp, str_stack_t *ss)
2268 {
2269 	linkinfo_t *linkp;
2270 	int error = 0;
2271 
2272 	mutex_enter(&muxifier);
2273 	while (linkp = findlinks(stp, 0, flag, ss)) {
2274 		/*
2275 		 * munlink() releases the muxifier lock.
2276 		 */
2277 		if (error = munlink(stp, linkp, flag, crp, rvalp, ss))
2278 			return (error);
2279 		mutex_enter(&muxifier);
2280 	}
2281 	mutex_exit(&muxifier);
2282 	return (0);
2283 }
2284 
2285 /*
2286  * A multiplexor link has been made. Add an
2287  * edge to the directed graph.
2288  */
2289 void
2290 mux_addedge(stdata_t *upstp, stdata_t *lostp, int muxid, str_stack_t *ss)
2291 {
2292 	struct mux_node *np;
2293 	struct mux_edge *ep;
2294 	major_t upmaj;
2295 	major_t lomaj;
2296 
2297 	upmaj = getmajor(upstp->sd_vnode->v_rdev);
2298 	lomaj = getmajor(lostp->sd_vnode->v_rdev);
2299 	np = &ss->ss_mux_nodes[upmaj];
2300 	if (np->mn_outp) {
2301 		ep = np->mn_outp;
2302 		while (ep->me_nextp)
2303 			ep = ep->me_nextp;
2304 		ep->me_nextp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP);
2305 		ep = ep->me_nextp;
2306 	} else {
2307 		np->mn_outp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP);
2308 		ep = np->mn_outp;
2309 	}
2310 	ep->me_nextp = NULL;
2311 	ep->me_muxid = muxid;
2312 	/*
2313 	 * Save the dev_t for the purposes of str_stack_shutdown.
2314 	 * str_stack_shutdown assumes that the device allows reopen, since
2315 	 * this dev_t is the one after any cloning by xx_open().
2316 	 * Would prefer finding the dev_t from before any cloning,
2317 	 * but specfs doesn't retain that.
2318 	 */
2319 	ep->me_dev = upstp->sd_vnode->v_rdev;
2320 	if (lostp->sd_vnode->v_type == VFIFO)
2321 		ep->me_nodep = NULL;
2322 	else
2323 		ep->me_nodep = &ss->ss_mux_nodes[lomaj];
2324 }
2325 
2326 /*
2327  * A multiplexor link has been removed. Remove the
2328  * edge in the directed graph.
2329  */
2330 void
2331 mux_rmvedge(stdata_t *upstp, int muxid, str_stack_t *ss)
2332 {
2333 	struct mux_node *np;
2334 	struct mux_edge *ep;
2335 	struct mux_edge *pep = NULL;
2336 	major_t upmaj;
2337 
2338 	upmaj = getmajor(upstp->sd_vnode->v_rdev);
2339 	np = &ss->ss_mux_nodes[upmaj];
2340 	ASSERT(np->mn_outp != NULL);
2341 	ep = np->mn_outp;
2342 	while (ep) {
2343 		if (ep->me_muxid == muxid) {
2344 			if (pep)
2345 				pep->me_nextp = ep->me_nextp;
2346 			else
2347 				np->mn_outp = ep->me_nextp;
2348 			kmem_free(ep, sizeof (struct mux_edge));
2349 			return;
2350 		}
2351 		pep = ep;
2352 		ep = ep->me_nextp;
2353 	}
2354 	ASSERT(0);	/* should not reach here */
2355 }
2356 
2357 /*
2358  * Translate the device flags (from conf.h) to the corresponding
2359  * qflag and sq_flag (type) values.
2360  */
2361 int
2362 devflg_to_qflag(struct streamtab *stp, uint32_t devflag, uint32_t *qflagp,
2363 	uint32_t *sqtypep)
2364 {
2365 	uint32_t qflag = 0;
2366 	uint32_t sqtype = 0;
2367 
2368 	if (devflag & _D_OLD)
2369 		goto bad;
2370 
2371 	/* Inner perimeter presence and scope */
2372 	switch (devflag & D_MTINNER_MASK) {
2373 	case D_MP:
2374 		qflag |= QMTSAFE;
2375 		sqtype |= SQ_CI;
2376 		break;
2377 	case D_MTPERQ|D_MP:
2378 		qflag |= QPERQ;
2379 		break;
2380 	case D_MTQPAIR|D_MP:
2381 		qflag |= QPAIR;
2382 		break;
2383 	case D_MTPERMOD|D_MP:
2384 		qflag |= QPERMOD;
2385 		break;
2386 	default:
2387 		goto bad;
2388 	}
2389 
2390 	/* Outer perimeter */
2391 	if (devflag & D_MTOUTPERIM) {
2392 		switch (devflag & D_MTINNER_MASK) {
2393 		case D_MP:
2394 		case D_MTPERQ|D_MP:
2395 		case D_MTQPAIR|D_MP:
2396 			break;
2397 		default:
2398 			goto bad;
2399 		}
2400 		qflag |= QMTOUTPERIM;
2401 	}
2402 
2403 	/* Inner perimeter modifiers */
2404 	if (devflag & D_MTINNER_MOD) {
2405 		switch (devflag & D_MTINNER_MASK) {
2406 		case D_MP:
2407 			goto bad;
2408 		default:
2409 			break;
2410 		}
2411 		if (devflag & D_MTPUTSHARED)
2412 			sqtype |= SQ_CIPUT;
2413 		if (devflag & _D_MTOCSHARED) {
2414 			/*
2415 			 * The code in putnext assumes that it has the
2416 			 * highest concurrency by not checking sq_count.
2417 			 * Thus _D_MTOCSHARED can only be supported when
2418 			 * D_MTPUTSHARED is set.
2419 			 */
2420 			if (!(devflag & D_MTPUTSHARED))
2421 				goto bad;
2422 			sqtype |= SQ_CIOC;
2423 		}
2424 		if (devflag & _D_MTCBSHARED) {
2425 			/*
2426 			 * The code in putnext assumes that it has the
2427 			 * highest concurrency by not checking sq_count.
2428 			 * Thus _D_MTCBSHARED can only be supported when
2429 			 * D_MTPUTSHARED is set.
2430 			 */
2431 			if (!(devflag & D_MTPUTSHARED))
2432 				goto bad;
2433 			sqtype |= SQ_CICB;
2434 		}
2435 		if (devflag & _D_MTSVCSHARED) {
2436 			/*
2437 			 * The code in putnext assumes that it has the
2438 			 * highest concurrency by not checking sq_count.
2439 			 * Thus _D_MTSVCSHARED can only be supported when
2440 			 * D_MTPUTSHARED is set. Also _D_MTSVCSHARED is
2441 			 * supported only for QPERMOD.
2442 			 */
2443 			if (!(devflag & D_MTPUTSHARED) || !(qflag & QPERMOD))
2444 				goto bad;
2445 			sqtype |= SQ_CISVC;
2446 		}
2447 	}
2448 
2449 	/* Default outer perimeter concurrency */
2450 	sqtype |= SQ_CO;
2451 
2452 	/* Outer perimeter modifiers */
2453 	if (devflag & D_MTOCEXCL) {
2454 		if (!(devflag & D_MTOUTPERIM)) {
2455 			/* No outer perimeter */
2456 			goto bad;
2457 		}
2458 		sqtype &= ~SQ_COOC;
2459 	}
2460 
2461 	/* Synchronous Streams extended qinit structure */
2462 	if (devflag & D_SYNCSTR)
2463 		qflag |= QSYNCSTR;
2464 
2465 	/*
2466 	 * Private flag used by a transport module to indicate
2467 	 * to sockfs that it supports direct-access mode without
2468 	 * having to go through STREAMS.
2469 	 */
2470 	if (devflag & _D_DIRECT) {
2471 		/* Reject unless the module is fully-MT (no perimeter) */
2472 		if ((qflag & QMT_TYPEMASK) != QMTSAFE)
2473 			goto bad;
2474 		qflag |= _QDIRECT;
2475 	}
2476 
2477 	*qflagp = qflag;
2478 	*sqtypep = sqtype;
2479 	return (0);
2480 
2481 bad:
2482 	cmn_err(CE_WARN,
2483 	    "stropen: bad MT flags (0x%x) in driver '%s'",
2484 	    (int)(qflag & D_MTSAFETY_MASK),
2485 	    stp->st_rdinit->qi_minfo->mi_idname);
2486 
2487 	return (EINVAL);
2488 }
2489 
2490 /*
2491  * Set the interface values for a pair of queues (qinit structure,
2492  * packet sizes, water marks).
2493  * setq assumes that the caller does not have a claim (entersq or claimq)
2494  * on the queue.
2495  */
2496 void
2497 setq(queue_t *rq, struct qinit *rinit, struct qinit *winit,
2498     perdm_t *dmp, uint32_t qflag, uint32_t sqtype, boolean_t lock_needed)
2499 {
2500 	queue_t *wq;
2501 	syncq_t	*sq, *outer;
2502 
2503 	ASSERT(rq->q_flag & QREADR);
2504 	ASSERT((qflag & QMT_TYPEMASK) != 0);
2505 	IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL);
2506 
2507 	wq = _WR(rq);
2508 	rq->q_qinfo = rinit;
2509 	rq->q_hiwat = rinit->qi_minfo->mi_hiwat;
2510 	rq->q_lowat = rinit->qi_minfo->mi_lowat;
2511 	rq->q_minpsz = rinit->qi_minfo->mi_minpsz;
2512 	rq->q_maxpsz = rinit->qi_minfo->mi_maxpsz;
2513 	wq->q_qinfo = winit;
2514 	wq->q_hiwat = winit->qi_minfo->mi_hiwat;
2515 	wq->q_lowat = winit->qi_minfo->mi_lowat;
2516 	wq->q_minpsz = winit->qi_minfo->mi_minpsz;
2517 	wq->q_maxpsz = winit->qi_minfo->mi_maxpsz;
2518 
2519 	/* Remove old syncqs */
2520 	sq = rq->q_syncq;
2521 	outer = sq->sq_outer;
2522 	if (outer != NULL) {
2523 		ASSERT(wq->q_syncq->sq_outer == outer);
2524 		outer_remove(outer, rq->q_syncq);
2525 		if (wq->q_syncq != rq->q_syncq)
2526 			outer_remove(outer, wq->q_syncq);
2527 	}
2528 	ASSERT(sq->sq_outer == NULL);
2529 	ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL);
2530 
2531 	if (sq != SQ(rq)) {
2532 		if (!(rq->q_flag & QPERMOD))
2533 			free_syncq(sq);
2534 		if (wq->q_syncq == rq->q_syncq)
2535 			wq->q_syncq = NULL;
2536 		rq->q_syncq = NULL;
2537 	}
2538 	if (wq->q_syncq != NULL && wq->q_syncq != sq &&
2539 	    wq->q_syncq != SQ(rq)) {
2540 		free_syncq(wq->q_syncq);
2541 		wq->q_syncq = NULL;
2542 	}
2543 	ASSERT(rq->q_syncq == NULL || (rq->q_syncq->sq_head == NULL &&
2544 	    rq->q_syncq->sq_tail == NULL));
2545 	ASSERT(wq->q_syncq == NULL || (wq->q_syncq->sq_head == NULL &&
2546 	    wq->q_syncq->sq_tail == NULL));
2547 
2548 	if (!(rq->q_flag & QPERMOD) &&
2549 	    rq->q_syncq != NULL && rq->q_syncq->sq_ciputctrl != NULL) {
2550 		ASSERT(rq->q_syncq->sq_nciputctrl == n_ciputctrl - 1);
2551 		SUMCHECK_CIPUTCTRL_COUNTS(rq->q_syncq->sq_ciputctrl,
2552 		    rq->q_syncq->sq_nciputctrl, 0);
2553 		ASSERT(ciputctrl_cache != NULL);
2554 		kmem_cache_free(ciputctrl_cache, rq->q_syncq->sq_ciputctrl);
2555 		rq->q_syncq->sq_ciputctrl = NULL;
2556 		rq->q_syncq->sq_nciputctrl = 0;
2557 	}
2558 
2559 	if (!(wq->q_flag & QPERMOD) &&
2560 	    wq->q_syncq != NULL && wq->q_syncq->sq_ciputctrl != NULL) {
2561 		ASSERT(wq->q_syncq->sq_nciputctrl == n_ciputctrl - 1);
2562 		SUMCHECK_CIPUTCTRL_COUNTS(wq->q_syncq->sq_ciputctrl,
2563 		    wq->q_syncq->sq_nciputctrl, 0);
2564 		ASSERT(ciputctrl_cache != NULL);
2565 		kmem_cache_free(ciputctrl_cache, wq->q_syncq->sq_ciputctrl);
2566 		wq->q_syncq->sq_ciputctrl = NULL;
2567 		wq->q_syncq->sq_nciputctrl = 0;
2568 	}
2569 
2570 	sq = SQ(rq);
2571 	ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL);
2572 	ASSERT(sq->sq_outer == NULL);
2573 	ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL);
2574 
2575 	/*
2576 	 * Create syncqs based on qflag and sqtype. Set the SQ_TYPES_IN_FLAGS
2577 	 * bits in sq_flag based on the sqtype.
2578 	 */
2579 	ASSERT((sq->sq_flags & ~SQ_TYPES_IN_FLAGS) == 0);
2580 
2581 	rq->q_syncq = wq->q_syncq = sq;
2582 	sq->sq_type = sqtype;
2583 	sq->sq_flags = (sqtype & SQ_TYPES_IN_FLAGS);
2584 
2585 	/*
2586 	 *  We are making sq_svcflags zero,
2587 	 *  resetting SQ_DISABLED in case it was set by
2588 	 *  wait_svc() in the munlink path.
2589 	 *
2590 	 */
2591 	ASSERT((sq->sq_svcflags & SQ_SERVICE) == 0);
2592 	sq->sq_svcflags = 0;
2593 
2594 	/*
2595 	 * We need to acquire the lock here for the mlink and munlink case,
2596 	 * where canputnext, backenable, etc can access the q_flag.
2597 	 */
2598 	if (lock_needed) {
2599 		mutex_enter(QLOCK(rq));
2600 		rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2601 		mutex_exit(QLOCK(rq));
2602 		mutex_enter(QLOCK(wq));
2603 		wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2604 		mutex_exit(QLOCK(wq));
2605 	} else {
2606 		rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2607 		wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2608 	}
2609 
2610 	if (qflag & QPERQ) {
2611 		/* Allocate a separate syncq for the write side */
2612 		sq = new_syncq();
2613 		sq->sq_type = rq->q_syncq->sq_type;
2614 		sq->sq_flags = rq->q_syncq->sq_flags;
2615 		ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL &&
2616 		    sq->sq_oprev == NULL);
2617 		wq->q_syncq = sq;
2618 	}
2619 	if (qflag & QPERMOD) {
2620 		sq = dmp->dm_sq;
2621 
2622 		/*
2623 		 * Assert that we do have an inner perimeter syncq and that it
2624 		 * does not have an outer perimeter associated with it.
2625 		 */
2626 		ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL &&
2627 		    sq->sq_oprev == NULL);
2628 		rq->q_syncq = wq->q_syncq = sq;
2629 	}
2630 	if (qflag & QMTOUTPERIM) {
2631 		outer = dmp->dm_sq;
2632 
2633 		ASSERT(outer->sq_outer == NULL);
2634 		outer_insert(outer, rq->q_syncq);
2635 		if (wq->q_syncq != rq->q_syncq)
2636 			outer_insert(outer, wq->q_syncq);
2637 	}
2638 	ASSERT((rq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) ==
2639 	    (rq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS));
2640 	ASSERT((wq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) ==
2641 	    (wq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS));
2642 	ASSERT((rq->q_flag & QMT_TYPEMASK) == (qflag & QMT_TYPEMASK));
2643 
2644 	/*
2645 	 * Initialize struio() types.
2646 	 */
2647 	rq->q_struiot =
2648 	    (rq->q_flag & QSYNCSTR) ? rinit->qi_struiot : STRUIOT_NONE;
2649 	wq->q_struiot =
2650 	    (wq->q_flag & QSYNCSTR) ? winit->qi_struiot : STRUIOT_NONE;
2651 }
2652 
2653 perdm_t *
2654 hold_dm(struct streamtab *str, uint32_t qflag, uint32_t sqtype)
2655 {
2656 	syncq_t	*sq;
2657 	perdm_t	**pp;
2658 	perdm_t	*p;
2659 	perdm_t	*dmp;
2660 
2661 	ASSERT(str != NULL);
2662 	ASSERT(qflag & (QPERMOD | QMTOUTPERIM));
2663 
2664 	rw_enter(&perdm_rwlock, RW_READER);
2665 	for (p = perdm_list; p != NULL; p = p->dm_next) {
2666 		if (p->dm_str == str) {	/* found one */
2667 			atomic_add_32(&(p->dm_ref), 1);
2668 			rw_exit(&perdm_rwlock);
2669 			return (p);
2670 		}
2671 	}
2672 	rw_exit(&perdm_rwlock);
2673 
2674 	sq = new_syncq();
2675 	if (qflag & QPERMOD) {
2676 		sq->sq_type = sqtype | SQ_PERMOD;
2677 		sq->sq_flags = sqtype & SQ_TYPES_IN_FLAGS;
2678 	} else {
2679 		ASSERT(qflag & QMTOUTPERIM);
2680 		sq->sq_onext = sq->sq_oprev = sq;
2681 	}
2682 
2683 	dmp = kmem_alloc(sizeof (perdm_t), KM_SLEEP);
2684 	dmp->dm_sq = sq;
2685 	dmp->dm_str = str;
2686 	dmp->dm_ref = 1;
2687 	dmp->dm_next = NULL;
2688 
2689 	rw_enter(&perdm_rwlock, RW_WRITER);
2690 	for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) {
2691 		if (p->dm_str == str) {	/* already present */
2692 			p->dm_ref++;
2693 			rw_exit(&perdm_rwlock);
2694 			free_syncq(sq);
2695 			kmem_free(dmp, sizeof (perdm_t));
2696 			return (p);
2697 		}
2698 	}
2699 
2700 	*pp = dmp;
2701 	rw_exit(&perdm_rwlock);
2702 	return (dmp);
2703 }
2704 
2705 void
2706 rele_dm(perdm_t *dmp)
2707 {
2708 	perdm_t **pp;
2709 	perdm_t *p;
2710 
2711 	rw_enter(&perdm_rwlock, RW_WRITER);
2712 	ASSERT(dmp->dm_ref > 0);
2713 
2714 	if (--dmp->dm_ref > 0) {
2715 		rw_exit(&perdm_rwlock);
2716 		return;
2717 	}
2718 
2719 	for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next))
2720 		if (p == dmp)
2721 			break;
2722 	ASSERT(p == dmp);
2723 	*pp = p->dm_next;
2724 	rw_exit(&perdm_rwlock);
2725 
2726 	/*
2727 	 * Wait for any background processing that relies on the
2728 	 * syncq to complete before it is freed.
2729 	 */
2730 	wait_sq_svc(p->dm_sq);
2731 	free_syncq(p->dm_sq);
2732 	kmem_free(p, sizeof (perdm_t));
2733 }
2734 
2735 /*
2736  * Make a protocol message given control and data buffers.
2737  * n.b., this can block; be careful of what locks you hold when calling it.
2738  *
2739  * If sd_maxblk is less than *iosize this routine can fail part way through
2740  * (due to an allocation failure). In this case on return *iosize will contain
2741  * the amount that was consumed. Otherwise *iosize will not be modified
2742  * i.e. it will contain the amount that was consumed.
2743  */
2744 int
2745 strmakemsg(
2746 	struct strbuf *mctl,
2747 	ssize_t *iosize,
2748 	struct uio *uiop,
2749 	stdata_t *stp,
2750 	int32_t flag,
2751 	mblk_t **mpp)
2752 {
2753 	mblk_t *mpctl = NULL;
2754 	mblk_t *mpdata = NULL;
2755 	int error;
2756 
2757 	ASSERT(uiop != NULL);
2758 
2759 	*mpp = NULL;
2760 	/* Create control part, if any */
2761 	if ((mctl != NULL) && (mctl->len >= 0)) {
2762 		error = strmakectl(mctl, flag, uiop->uio_fmode, &mpctl);
2763 		if (error)
2764 			return (error);
2765 	}
2766 	/* Create data part, if any */
2767 	if (*iosize >= 0) {
2768 		error = strmakedata(iosize, uiop, stp, flag, &mpdata);
2769 		if (error) {
2770 			freemsg(mpctl);
2771 			return (error);
2772 		}
2773 	}
2774 	if (mpctl != NULL) {
2775 		if (mpdata != NULL)
2776 			linkb(mpctl, mpdata);
2777 		*mpp = mpctl;
2778 	} else {
2779 		*mpp = mpdata;
2780 	}
2781 	return (0);
2782 }
2783 
2784 /*
2785  * Make the control part of a protocol message given a control buffer.
2786  * n.b., this can block; be careful of what locks you hold when calling it.
2787  */
2788 int
2789 strmakectl(
2790 	struct strbuf *mctl,
2791 	int32_t flag,
2792 	int32_t fflag,
2793 	mblk_t **mpp)
2794 {
2795 	mblk_t *bp = NULL;
2796 	unsigned char msgtype;
2797 	int error = 0;
2798 
2799 	*mpp = NULL;
2800 	/*
2801 	 * Create control part of message, if any.
2802 	 */
2803 	if ((mctl != NULL) && (mctl->len >= 0)) {
2804 		caddr_t base;
2805 		int ctlcount;
2806 		int allocsz;
2807 
2808 		if (flag & RS_HIPRI)
2809 			msgtype = M_PCPROTO;
2810 		else
2811 			msgtype = M_PROTO;
2812 
2813 		ctlcount = mctl->len;
2814 		base = mctl->buf;
2815 
2816 		/*
2817 		 * Give modules a better chance to reuse M_PROTO/M_PCPROTO
2818 		 * blocks by increasing the size to something more usable.
2819 		 */
2820 		allocsz = MAX(ctlcount, 64);
2821 
2822 		/*
2823 		 * Range checking has already been done; simply try
2824 		 * to allocate a message block for the ctl part.
2825 		 */
2826 		while (!(bp = allocb(allocsz, BPRI_MED))) {
2827 			if (fflag & (FNDELAY|FNONBLOCK))
2828 				return (EAGAIN);
2829 			if (error = strwaitbuf(allocsz, BPRI_MED))
2830 				return (error);
2831 		}
2832 
2833 		bp->b_datap->db_type = msgtype;
2834 		if (copyin(base, bp->b_wptr, ctlcount)) {
2835 			freeb(bp);
2836 			return (EFAULT);
2837 		}
2838 		bp->b_wptr += ctlcount;
2839 	}
2840 	*mpp = bp;
2841 	return (0);
2842 }
2843 
2844 /*
2845  * Make a protocol message given data buffers.
2846  * n.b., this can block; be careful of what locks you hold when calling it.
2847  *
2848  * If sd_maxblk is less than *iosize this routine can fail part way through
2849  * (due to an allocation failure). In this case on return *iosize will contain
2850  * the amount that was consumed. Otherwise *iosize will not be modified
2851  * i.e. it will contain the amount that was consumed.
2852  */
2853 int
2854 strmakedata(
2855 	ssize_t   *iosize,
2856 	struct uio *uiop,
2857 	stdata_t *stp,
2858 	int32_t flag,
2859 	mblk_t **mpp)
2860 {
2861 	mblk_t *mp = NULL;
2862 	mblk_t *bp;
2863 	int wroff = (int)stp->sd_wroff;
2864 	int tail_len = (int)stp->sd_tail;
2865 	int extra = wroff + tail_len;
2866 	int error = 0;
2867 	ssize_t maxblk;
2868 	ssize_t count = *iosize;
2869 	cred_t *cr = CRED();
2870 
2871 	*mpp = NULL;
2872 	if (count < 0)
2873 		return (0);
2874 
2875 	maxblk = stp->sd_maxblk;
2876 	if (maxblk == INFPSZ)
2877 		maxblk = count;
2878 
2879 	/*
2880 	 * Create data part of message, if any.
2881 	 */
2882 	do {
2883 		ssize_t size;
2884 		dblk_t  *dp;
2885 
2886 		ASSERT(uiop);
2887 
2888 		size = MIN(count, maxblk);
2889 
2890 		while ((bp = allocb_cred(size + extra, cr)) == NULL) {
2891 			error = EAGAIN;
2892 			if ((uiop->uio_fmode & (FNDELAY|FNONBLOCK)) ||
2893 			    (error = strwaitbuf(size + extra, BPRI_MED)) != 0) {
2894 				if (count == *iosize) {
2895 					freemsg(mp);
2896 					return (error);
2897 				} else {
2898 					*iosize -= count;
2899 					*mpp = mp;
2900 					return (0);
2901 				}
2902 			}
2903 		}
2904 		dp = bp->b_datap;
2905 		dp->db_cpid = curproc->p_pid;
2906 		ASSERT(wroff <= dp->db_lim - bp->b_wptr);
2907 		bp->b_wptr = bp->b_rptr = bp->b_rptr + wroff;
2908 
2909 		if (flag & STRUIO_POSTPONE) {
2910 			/*
2911 			 * Setup the stream uio portion of the
2912 			 * dblk for subsequent use by struioget().
2913 			 */
2914 			dp->db_struioflag = STRUIO_SPEC;
2915 			dp->db_cksumstart = 0;
2916 			dp->db_cksumstuff = 0;
2917 			dp->db_cksumend = size;
2918 			*(long long *)dp->db_struioun.data = 0ll;
2919 			bp->b_wptr += size;
2920 		} else {
2921 			if (stp->sd_copyflag & STRCOPYCACHED)
2922 				uiop->uio_extflg |= UIO_COPY_CACHED;
2923 
2924 			if (size != 0) {
2925 				error = uiomove(bp->b_wptr, size, UIO_WRITE,
2926 				    uiop);
2927 				if (error != 0) {
2928 					freeb(bp);
2929 					freemsg(mp);
2930 					return (error);
2931 				}
2932 			}
2933 			bp->b_wptr += size;
2934 
2935 			if (stp->sd_wputdatafunc != NULL) {
2936 				mblk_t *newbp;
2937 
2938 				newbp = (stp->sd_wputdatafunc)(stp->sd_vnode,
2939 				    bp, NULL, NULL, NULL, NULL);
2940 				if (newbp == NULL) {
2941 					freeb(bp);
2942 					freemsg(mp);
2943 					return (ECOMM);
2944 				}
2945 				bp = newbp;
2946 			}
2947 		}
2948 
2949 		count -= size;
2950 
2951 		if (mp == NULL)
2952 			mp = bp;
2953 		else
2954 			linkb(mp, bp);
2955 	} while (count > 0);
2956 
2957 	*mpp = mp;
2958 	return (0);
2959 }
2960 
2961 /*
2962  * Wait for a buffer to become available. Return non-zero errno
2963  * if not able to wait, 0 if buffer is probably there.
2964  */
2965 int
2966 strwaitbuf(size_t size, int pri)
2967 {
2968 	bufcall_id_t id;
2969 
2970 	mutex_enter(&bcall_monitor);
2971 	if ((id = bufcall(size, pri, (void (*)(void *))cv_broadcast,
2972 	    &ttoproc(curthread)->p_flag_cv)) == 0) {
2973 		mutex_exit(&bcall_monitor);
2974 		return (ENOSR);
2975 	}
2976 	if (!cv_wait_sig(&(ttoproc(curthread)->p_flag_cv), &bcall_monitor)) {
2977 		unbufcall(id);
2978 		mutex_exit(&bcall_monitor);
2979 		return (EINTR);
2980 	}
2981 	unbufcall(id);
2982 	mutex_exit(&bcall_monitor);
2983 	return (0);
2984 }
2985 
2986 /*
2987  * This function waits for a read or write event to happen on a stream.
2988  * fmode can specify FNDELAY and/or FNONBLOCK.
2989  * The timeout is in ms with -1 meaning infinite.
2990  * The flag values work as follows:
2991  *	READWAIT	Check for read side errors, send M_READ
2992  *	GETWAIT		Check for read side errors, no M_READ
2993  *	WRITEWAIT	Check for write side errors.
2994  *	NOINTR		Do not return error if nonblocking or timeout.
2995  * 	STR_NOERROR	Ignore all errors except STPLEX.
2996  *	STR_NOSIG	Ignore/hold signals during the duration of the call.
2997  *	STR_PEEK	Pass through the strgeterr().
2998  */
2999 int
3000 strwaitq(stdata_t *stp, int flag, ssize_t count, int fmode, clock_t timout,
3001     int *done)
3002 {
3003 	int slpflg, errs;
3004 	int error;
3005 	kcondvar_t *sleepon;
3006 	mblk_t *mp;
3007 	ssize_t *rd_count;
3008 	clock_t rval;
3009 
3010 	ASSERT(MUTEX_HELD(&stp->sd_lock));
3011 	if ((flag & READWAIT) || (flag & GETWAIT)) {
3012 		slpflg = RSLEEP;
3013 		sleepon = &_RD(stp->sd_wrq)->q_wait;
3014 		errs = STRDERR|STPLEX;
3015 	} else {
3016 		slpflg = WSLEEP;
3017 		sleepon = &stp->sd_wrq->q_wait;
3018 		errs = STWRERR|STRHUP|STPLEX;
3019 	}
3020 	if (flag & STR_NOERROR)
3021 		errs = STPLEX;
3022 
3023 	if (stp->sd_wakeq & slpflg) {
3024 		/*
3025 		 * A strwakeq() is pending, no need to sleep.
3026 		 */
3027 		stp->sd_wakeq &= ~slpflg;
3028 		*done = 0;
3029 		return (0);
3030 	}
3031 
3032 	if (fmode & (FNDELAY|FNONBLOCK)) {
3033 		if (!(flag & NOINTR))
3034 			error = EAGAIN;
3035 		else
3036 			error = 0;
3037 		*done = 1;
3038 		return (error);
3039 	}
3040 
3041 	if (stp->sd_flag & errs) {
3042 		/*
3043 		 * Check for errors before going to sleep since the
3044 		 * caller might not have checked this while holding
3045 		 * sd_lock.
3046 		 */
3047 		error = strgeterr(stp, errs, (flag & STR_PEEK));
3048 		if (error != 0) {
3049 			*done = 1;
3050 			return (error);
3051 		}
3052 	}
3053 
3054 	/*
3055 	 * If any module downstream has requested read notification
3056 	 * by setting SNDMREAD flag using M_SETOPTS, send a message
3057 	 * down stream.
3058 	 */
3059 	if ((flag & READWAIT) && (stp->sd_flag & SNDMREAD)) {
3060 		mutex_exit(&stp->sd_lock);
3061 		if (!(mp = allocb_wait(sizeof (ssize_t), BPRI_MED,
3062 		    (flag & STR_NOSIG), &error))) {
3063 			mutex_enter(&stp->sd_lock);
3064 			*done = 1;
3065 			return (error);
3066 		}
3067 		mp->b_datap->db_type = M_READ;
3068 		rd_count = (ssize_t *)mp->b_wptr;
3069 		*rd_count = count;
3070 		mp->b_wptr += sizeof (ssize_t);
3071 		/*
3072 		 * Send the number of bytes requested by the
3073 		 * read as the argument to M_READ.
3074 		 */
3075 		stream_willservice(stp);
3076 		putnext(stp->sd_wrq, mp);
3077 		stream_runservice(stp);
3078 		mutex_enter(&stp->sd_lock);
3079 
3080 		/*
3081 		 * If any data arrived due to inline processing
3082 		 * of putnext(), don't sleep.
3083 		 */
3084 		if (_RD(stp->sd_wrq)->q_first != NULL) {
3085 			*done = 0;
3086 			return (0);
3087 		}
3088 	}
3089 
3090 	stp->sd_flag |= slpflg;
3091 	TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAIT2,
3092 	    "strwaitq sleeps (2):%p, %X, %lX, %X, %p",
3093 	    stp, flag, count, fmode, done);
3094 
3095 	rval = str_cv_wait(sleepon, &stp->sd_lock, timout, flag & STR_NOSIG);
3096 	if (rval > 0) {
3097 		/* EMPTY */
3098 		TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAKE2,
3099 		    "strwaitq awakes(2):%X, %X, %X, %X, %X",
3100 		    stp, flag, count, fmode, done);
3101 	} else if (rval == 0) {
3102 		TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_INTR2,
3103 		    "strwaitq interrupt #2:%p, %X, %lX, %X, %p",
3104 		    stp, flag, count, fmode, done);
3105 		stp->sd_flag &= ~slpflg;
3106 		cv_broadcast(sleepon);
3107 		if (!(flag & NOINTR))
3108 			error = EINTR;
3109 		else
3110 			error = 0;
3111 		*done = 1;
3112 		return (error);
3113 	} else {
3114 		/* timeout */
3115 		TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_TIME,
3116 		    "strwaitq timeout:%p, %X, %lX, %X, %p",
3117 		    stp, flag, count, fmode, done);
3118 		*done = 1;
3119 		if (!(flag & NOINTR))
3120 			return (ETIME);
3121 		else
3122 			return (0);
3123 	}
3124 	/*
3125 	 * If the caller implements delayed errors (i.e. queued after data)
3126 	 * we can not check for errors here since data as well as an
3127 	 * error might have arrived at the stream head. We return to
3128 	 * have the caller check the read queue before checking for errors.
3129 	 */
3130 	if ((stp->sd_flag & errs) && !(flag & STR_DELAYERR)) {
3131 		error = strgeterr(stp, errs, (flag & STR_PEEK));
3132 		if (error != 0) {
3133 			*done = 1;
3134 			return (error);
3135 		}
3136 	}
3137 	*done = 0;
3138 	return (0);
3139 }
3140 
3141 /*
3142  * Perform job control discipline access checks.
3143  * Return 0 for success and the errno for failure.
3144  */
3145 
3146 #define	cantsend(p, t, sig) \
3147 	(sigismember(&(p)->p_ignore, sig) || signal_is_blocked((t), sig))
3148 
3149 int
3150 straccess(struct stdata *stp, enum jcaccess mode)
3151 {
3152 	extern kcondvar_t lbolt_cv;	/* XXX: should be in a header file */
3153 	kthread_t *t = curthread;
3154 	proc_t *p = ttoproc(t);
3155 	sess_t *sp;
3156 
3157 	ASSERT(mutex_owned(&stp->sd_lock));
3158 
3159 	if (stp->sd_sidp == NULL || stp->sd_vnode->v_type == VFIFO)
3160 		return (0);
3161 
3162 	mutex_enter(&p->p_lock);		/* protects p_pgidp */
3163 
3164 	for (;;) {
3165 		mutex_enter(&p->p_splock);	/* protects p->p_sessp */
3166 		sp = p->p_sessp;
3167 		mutex_enter(&sp->s_lock);	/* protects sp->* */
3168 
3169 		/*
3170 		 * If this is not the calling process's controlling terminal
3171 		 * or if the calling process is already in the foreground
3172 		 * then allow access.
3173 		 */
3174 		if (sp->s_dev != stp->sd_vnode->v_rdev ||
3175 		    p->p_pgidp == stp->sd_pgidp) {
3176 			mutex_exit(&sp->s_lock);
3177 			mutex_exit(&p->p_splock);
3178 			mutex_exit(&p->p_lock);
3179 			return (0);
3180 		}
3181 
3182 		/*
3183 		 * Check to see if controlling terminal has been deallocated.
3184 		 */
3185 		if (sp->s_vp == NULL) {
3186 			if (!cantsend(p, t, SIGHUP))
3187 				sigtoproc(p, t, SIGHUP);
3188 			mutex_exit(&sp->s_lock);
3189 			mutex_exit(&p->p_splock);
3190 			mutex_exit(&p->p_lock);
3191 			return (EIO);
3192 		}
3193 
3194 		mutex_exit(&sp->s_lock);
3195 		mutex_exit(&p->p_splock);
3196 
3197 		if (mode == JCGETP) {
3198 			mutex_exit(&p->p_lock);
3199 			return (0);
3200 		}
3201 
3202 		if (mode == JCREAD) {
3203 			if (p->p_detached || cantsend(p, t, SIGTTIN)) {
3204 				mutex_exit(&p->p_lock);
3205 				return (EIO);
3206 			}
3207 			mutex_exit(&p->p_lock);
3208 			mutex_exit(&stp->sd_lock);
3209 			pgsignal(p->p_pgidp, SIGTTIN);
3210 			mutex_enter(&stp->sd_lock);
3211 			mutex_enter(&p->p_lock);
3212 		} else {  /* mode == JCWRITE or JCSETP */
3213 			if ((mode == JCWRITE && !(stp->sd_flag & STRTOSTOP)) ||
3214 			    cantsend(p, t, SIGTTOU)) {
3215 				mutex_exit(&p->p_lock);
3216 				return (0);
3217 			}
3218 			if (p->p_detached) {
3219 				mutex_exit(&p->p_lock);
3220 				return (EIO);
3221 			}
3222 			mutex_exit(&p->p_lock);
3223 			mutex_exit(&stp->sd_lock);
3224 			pgsignal(p->p_pgidp, SIGTTOU);
3225 			mutex_enter(&stp->sd_lock);
3226 			mutex_enter(&p->p_lock);
3227 		}
3228 
3229 		/*
3230 		 * We call cv_wait_sig_swap() to cause the appropriate
3231 		 * action for the jobcontrol signal to take place.
3232 		 * If the signal is being caught, we will take the
3233 		 * EINTR error return.  Otherwise, the default action
3234 		 * of causing the process to stop will take place.
3235 		 * In this case, we rely on the periodic cv_broadcast() on
3236 		 * &lbolt_cv to wake us up to loop around and test again.
3237 		 * We can't get here if the signal is ignored or
3238 		 * if the current thread is blocking the signal.
3239 		 */
3240 		mutex_exit(&stp->sd_lock);
3241 		if (!cv_wait_sig_swap(&lbolt_cv, &p->p_lock)) {
3242 			mutex_exit(&p->p_lock);
3243 			mutex_enter(&stp->sd_lock);
3244 			return (EINTR);
3245 		}
3246 		mutex_exit(&p->p_lock);
3247 		mutex_enter(&stp->sd_lock);
3248 		mutex_enter(&p->p_lock);
3249 	}
3250 }
3251 
3252 /*
3253  * Return size of message of block type (bp->b_datap->db_type)
3254  */
3255 size_t
3256 xmsgsize(mblk_t *bp)
3257 {
3258 	unsigned char type;
3259 	size_t count = 0;
3260 
3261 	type = bp->b_datap->db_type;
3262 
3263 	for (; bp; bp = bp->b_cont) {
3264 		if (type != bp->b_datap->db_type)
3265 			break;
3266 		ASSERT(bp->b_wptr >= bp->b_rptr);
3267 		count += bp->b_wptr - bp->b_rptr;
3268 	}
3269 	return (count);
3270 }
3271 
3272 /*
3273  * Allocate a stream head.
3274  */
3275 struct stdata *
3276 shalloc(queue_t *qp)
3277 {
3278 	stdata_t *stp;
3279 
3280 	stp = kmem_cache_alloc(stream_head_cache, KM_SLEEP);
3281 
3282 	stp->sd_wrq = _WR(qp);
3283 	stp->sd_strtab = NULL;
3284 	stp->sd_iocid = 0;
3285 	stp->sd_mate = NULL;
3286 	stp->sd_freezer = NULL;
3287 	stp->sd_refcnt = 0;
3288 	stp->sd_wakeq = 0;
3289 	stp->sd_anchor = 0;
3290 	stp->sd_struiowrq = NULL;
3291 	stp->sd_struiordq = NULL;
3292 	stp->sd_struiodnak = 0;
3293 	stp->sd_struionak = NULL;
3294 	stp->sd_t_audit_data = NULL;
3295 	stp->sd_rput_opt = 0;
3296 	stp->sd_wput_opt = 0;
3297 	stp->sd_read_opt = 0;
3298 	stp->sd_rprotofunc = strrput_proto;
3299 	stp->sd_rmiscfunc = strrput_misc;
3300 	stp->sd_rderrfunc = stp->sd_wrerrfunc = NULL;
3301 	stp->sd_rputdatafunc = stp->sd_wputdatafunc = NULL;
3302 	stp->sd_ciputctrl = NULL;
3303 	stp->sd_nciputctrl = 0;
3304 	stp->sd_qhead = NULL;
3305 	stp->sd_qtail = NULL;
3306 	stp->sd_servid = NULL;
3307 	stp->sd_nqueues = 0;
3308 	stp->sd_svcflags = 0;
3309 	stp->sd_copyflag = 0;
3310 
3311 	return (stp);
3312 }
3313 
3314 /*
3315  * Free a stream head.
3316  */
3317 void
3318 shfree(stdata_t *stp)
3319 {
3320 	ASSERT(MUTEX_NOT_HELD(&stp->sd_lock));
3321 
3322 	stp->sd_wrq = NULL;
3323 
3324 	mutex_enter(&stp->sd_qlock);
3325 	while (stp->sd_svcflags & STRS_SCHEDULED) {
3326 		STRSTAT(strwaits);
3327 		cv_wait(&stp->sd_qcv, &stp->sd_qlock);
3328 	}
3329 	mutex_exit(&stp->sd_qlock);
3330 
3331 	if (stp->sd_ciputctrl != NULL) {
3332 		ASSERT(stp->sd_nciputctrl == n_ciputctrl - 1);
3333 		SUMCHECK_CIPUTCTRL_COUNTS(stp->sd_ciputctrl,
3334 		    stp->sd_nciputctrl, 0);
3335 		ASSERT(ciputctrl_cache != NULL);
3336 		kmem_cache_free(ciputctrl_cache, stp->sd_ciputctrl);
3337 		stp->sd_ciputctrl = NULL;
3338 		stp->sd_nciputctrl = 0;
3339 	}
3340 	ASSERT(stp->sd_qhead == NULL);
3341 	ASSERT(stp->sd_qtail == NULL);
3342 	ASSERT(stp->sd_nqueues == 0);
3343 	kmem_cache_free(stream_head_cache, stp);
3344 }
3345 
3346 /*
3347  * Allocate a pair of queues and a syncq for the pair
3348  */
3349 queue_t *
3350 allocq(void)
3351 {
3352 	queinfo_t *qip;
3353 	queue_t *qp, *wqp;
3354 	syncq_t	*sq;
3355 
3356 	qip = kmem_cache_alloc(queue_cache, KM_SLEEP);
3357 
3358 	qp = &qip->qu_rqueue;
3359 	wqp = &qip->qu_wqueue;
3360 	sq = &qip->qu_syncq;
3361 
3362 	qp->q_last	= NULL;
3363 	qp->q_next	= NULL;
3364 	qp->q_ptr	= NULL;
3365 	qp->q_flag	= QUSE | QREADR;
3366 	qp->q_bandp	= NULL;
3367 	qp->q_stream	= NULL;
3368 	qp->q_syncq	= sq;
3369 	qp->q_nband	= 0;
3370 	qp->q_nfsrv	= NULL;
3371 	qp->q_draining	= 0;
3372 	qp->q_syncqmsgs	= 0;
3373 	qp->q_spri	= 0;
3374 	qp->q_qtstamp	= 0;
3375 	qp->q_sqtstamp	= 0;
3376 	qp->q_fp	= NULL;
3377 
3378 	wqp->q_last	= NULL;
3379 	wqp->q_next	= NULL;
3380 	wqp->q_ptr	= NULL;
3381 	wqp->q_flag	= QUSE;
3382 	wqp->q_bandp	= NULL;
3383 	wqp->q_stream	= NULL;
3384 	wqp->q_syncq	= sq;
3385 	wqp->q_nband	= 0;
3386 	wqp->q_nfsrv	= NULL;
3387 	wqp->q_draining	= 0;
3388 	wqp->q_syncqmsgs = 0;
3389 	wqp->q_qtstamp	= 0;
3390 	wqp->q_sqtstamp	= 0;
3391 	wqp->q_spri	= 0;
3392 
3393 	sq->sq_count	= 0;
3394 	sq->sq_rmqcount	= 0;
3395 	sq->sq_flags	= 0;
3396 	sq->sq_type	= 0;
3397 	sq->sq_callbflags = 0;
3398 	sq->sq_cancelid	= 0;
3399 	sq->sq_ciputctrl = NULL;
3400 	sq->sq_nciputctrl = 0;
3401 	sq->sq_needexcl = 0;
3402 	sq->sq_svcflags = 0;
3403 
3404 	return (qp);
3405 }
3406 
3407 /*
3408  * Free a pair of queues and the "attached" syncq.
3409  * Discard any messages left on the syncq(s), remove the syncq(s) from the
3410  * outer perimeter, and free the syncq(s) if they are not the "attached" syncq.
3411  */
3412 void
3413 freeq(queue_t *qp)
3414 {
3415 	qband_t *qbp, *nqbp;
3416 	syncq_t *sq, *outer;
3417 	queue_t *wqp = _WR(qp);
3418 
3419 	ASSERT(qp->q_flag & QREADR);
3420 
3421 	/*
3422 	 * If a previously dispatched taskq job is scheduled to run
3423 	 * sync_service() or a service routine is scheduled for the
3424 	 * queues about to be freed, wait here until all service is
3425 	 * done on the queue and all associated queues and syncqs.
3426 	 */
3427 	wait_svc(qp);
3428 
3429 	(void) flush_syncq(qp->q_syncq, qp);
3430 	(void) flush_syncq(wqp->q_syncq, wqp);
3431 	ASSERT(qp->q_syncqmsgs == 0 && wqp->q_syncqmsgs == 0);
3432 
3433 	/*
3434 	 * Flush the queues before q_next is set to NULL This is needed
3435 	 * in order to backenable any downstream queue before we go away.
3436 	 * Note: we are already removed from the stream so that the
3437 	 * backenabling will not cause any messages to be delivered to our
3438 	 * put procedures.
3439 	 */
3440 	flushq(qp, FLUSHALL);
3441 	flushq(wqp, FLUSHALL);
3442 
3443 	/* Tidy up - removeq only does a half-remove from stream */
3444 	qp->q_next = wqp->q_next = NULL;
3445 	ASSERT(!(qp->q_flag & QENAB));
3446 	ASSERT(!(wqp->q_flag & QENAB));
3447 
3448 	outer = qp->q_syncq->sq_outer;
3449 	if (outer != NULL) {
3450 		outer_remove(outer, qp->q_syncq);
3451 		if (wqp->q_syncq != qp->q_syncq)
3452 			outer_remove(outer, wqp->q_syncq);
3453 	}
3454 	/*
3455 	 * Free any syncqs that are outside what allocq returned.
3456 	 */
3457 	if (qp->q_syncq != SQ(qp) && !(qp->q_flag & QPERMOD))
3458 		free_syncq(qp->q_syncq);
3459 	if (qp->q_syncq != wqp->q_syncq && wqp->q_syncq != SQ(qp))
3460 		free_syncq(wqp->q_syncq);
3461 
3462 	ASSERT((qp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0);
3463 	ASSERT((wqp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0);
3464 	ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
3465 	ASSERT(MUTEX_NOT_HELD(QLOCK(wqp)));
3466 	sq = SQ(qp);
3467 	ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
3468 	ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL);
3469 	ASSERT(sq->sq_outer == NULL);
3470 	ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL);
3471 	ASSERT(sq->sq_callbpend == NULL);
3472 	ASSERT(sq->sq_needexcl == 0);
3473 
3474 	if (sq->sq_ciputctrl != NULL) {
3475 		ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1);
3476 		SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl,
3477 		    sq->sq_nciputctrl, 0);
3478 		ASSERT(ciputctrl_cache != NULL);
3479 		kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl);
3480 		sq->sq_ciputctrl = NULL;
3481 		sq->sq_nciputctrl = 0;
3482 	}
3483 
3484 	ASSERT(qp->q_first == NULL && wqp->q_first == NULL);
3485 	ASSERT(qp->q_count == 0 && wqp->q_count == 0);
3486 	ASSERT(qp->q_mblkcnt == 0 && wqp->q_mblkcnt == 0);
3487 
3488 	qp->q_flag &= ~QUSE;
3489 	wqp->q_flag &= ~QUSE;
3490 
3491 	/* NOTE: Uncomment the assert below once bugid 1159635 is fixed. */
3492 	/* ASSERT((qp->q_flag & QWANTW) == 0 && (wqp->q_flag & QWANTW) == 0); */
3493 
3494 	qbp = qp->q_bandp;
3495 	while (qbp) {
3496 		nqbp = qbp->qb_next;
3497 		freeband(qbp);
3498 		qbp = nqbp;
3499 	}
3500 	qbp = wqp->q_bandp;
3501 	while (qbp) {
3502 		nqbp = qbp->qb_next;
3503 		freeband(qbp);
3504 		qbp = nqbp;
3505 	}
3506 	kmem_cache_free(queue_cache, qp);
3507 }
3508 
3509 /*
3510  * Allocate a qband structure.
3511  */
3512 qband_t *
3513 allocband(void)
3514 {
3515 	qband_t *qbp;
3516 
3517 	qbp = kmem_cache_alloc(qband_cache, KM_NOSLEEP);
3518 	if (qbp == NULL)
3519 		return (NULL);
3520 
3521 	qbp->qb_next	= NULL;
3522 	qbp->qb_count	= 0;
3523 	qbp->qb_mblkcnt	= 0;
3524 	qbp->qb_first	= NULL;
3525 	qbp->qb_last	= NULL;
3526 	qbp->qb_flag	= 0;
3527 
3528 	return (qbp);
3529 }
3530 
3531 /*
3532  * Free a qband structure.
3533  */
3534 void
3535 freeband(qband_t *qbp)
3536 {
3537 	kmem_cache_free(qband_cache, qbp);
3538 }
3539 
3540 /*
3541  * Just like putnextctl(9F), except that allocb_wait() is used.
3542  *
3543  * Consolidation Private, and of course only callable from the stream head or
3544  * routines that may block.
3545  */
3546 int
3547 putnextctl_wait(queue_t *q, int type)
3548 {
3549 	mblk_t *bp;
3550 	int error;
3551 
3552 	if ((datamsg(type) && (type != M_DELAY)) ||
3553 	    (bp = allocb_wait(0, BPRI_HI, 0, &error)) == NULL)
3554 		return (0);
3555 
3556 	bp->b_datap->db_type = (unsigned char)type;
3557 	putnext(q, bp);
3558 	return (1);
3559 }
3560 
3561 /*
3562  * run any possible bufcalls.
3563  */
3564 void
3565 runbufcalls(void)
3566 {
3567 	strbufcall_t *bcp;
3568 
3569 	mutex_enter(&bcall_monitor);
3570 	mutex_enter(&strbcall_lock);
3571 
3572 	if (strbcalls.bc_head) {
3573 		size_t count;
3574 		int nevent;
3575 
3576 		/*
3577 		 * count how many events are on the list
3578 		 * now so we can check to avoid looping
3579 		 * in low memory situations
3580 		 */
3581 		nevent = 0;
3582 		for (bcp = strbcalls.bc_head; bcp; bcp = bcp->bc_next)
3583 			nevent++;
3584 
3585 		/*
3586 		 * get estimate of available memory from kmem_avail().
3587 		 * awake all bufcall functions waiting for
3588 		 * memory whose request could be satisfied
3589 		 * by 'count' memory and let 'em fight for it.
3590 		 */
3591 		count = kmem_avail();
3592 		while ((bcp = strbcalls.bc_head) != NULL && nevent) {
3593 			STRSTAT(bufcalls);
3594 			--nevent;
3595 			if (bcp->bc_size <= count) {
3596 				bcp->bc_executor = curthread;
3597 				mutex_exit(&strbcall_lock);
3598 				(*bcp->bc_func)(bcp->bc_arg);
3599 				mutex_enter(&strbcall_lock);
3600 				bcp->bc_executor = NULL;
3601 				cv_broadcast(&bcall_cv);
3602 				strbcalls.bc_head = bcp->bc_next;
3603 				kmem_free(bcp, sizeof (strbufcall_t));
3604 			} else {
3605 				/*
3606 				 * too big, try again later - note
3607 				 * that nevent was decremented above
3608 				 * so we won't retry this one on this
3609 				 * iteration of the loop
3610 				 */
3611 				if (bcp->bc_next != NULL) {
3612 					strbcalls.bc_head = bcp->bc_next;
3613 					bcp->bc_next = NULL;
3614 					strbcalls.bc_tail->bc_next = bcp;
3615 					strbcalls.bc_tail = bcp;
3616 				}
3617 			}
3618 		}
3619 		if (strbcalls.bc_head == NULL)
3620 			strbcalls.bc_tail = NULL;
3621 	}
3622 
3623 	mutex_exit(&strbcall_lock);
3624 	mutex_exit(&bcall_monitor);
3625 }
3626 
3627 
3628 /*
3629  * actually run queue's service routine.
3630  */
3631 static void
3632 runservice(queue_t *q)
3633 {
3634 	qband_t *qbp;
3635 
3636 	ASSERT(q->q_qinfo->qi_srvp);
3637 again:
3638 	entersq(q->q_syncq, SQ_SVC);
3639 	TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_START,
3640 	    "runservice starts:%p", q);
3641 
3642 	if (!(q->q_flag & QWCLOSE))
3643 		(*q->q_qinfo->qi_srvp)(q);
3644 
3645 	TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_END,
3646 	    "runservice ends:(%p)", q);
3647 
3648 	leavesq(q->q_syncq, SQ_SVC);
3649 
3650 	mutex_enter(QLOCK(q));
3651 	if (q->q_flag & QENAB) {
3652 		q->q_flag &= ~QENAB;
3653 		mutex_exit(QLOCK(q));
3654 		goto again;
3655 	}
3656 	q->q_flag &= ~QINSERVICE;
3657 	q->q_flag &= ~QBACK;
3658 	for (qbp = q->q_bandp; qbp; qbp = qbp->qb_next)
3659 		qbp->qb_flag &= ~QB_BACK;
3660 	/*
3661 	 * Wakeup thread waiting for the service procedure
3662 	 * to be run (strclose and qdetach).
3663 	 */
3664 	cv_broadcast(&q->q_wait);
3665 
3666 	mutex_exit(QLOCK(q));
3667 }
3668 
3669 /*
3670  * Background processing of bufcalls.
3671  */
3672 void
3673 streams_bufcall_service(void)
3674 {
3675 	callb_cpr_t	cprinfo;
3676 
3677 	CALLB_CPR_INIT(&cprinfo, &strbcall_lock, callb_generic_cpr,
3678 	    "streams_bufcall_service");
3679 
3680 	mutex_enter(&strbcall_lock);
3681 
3682 	for (;;) {
3683 		if (strbcalls.bc_head != NULL && kmem_avail() > 0) {
3684 			mutex_exit(&strbcall_lock);
3685 			runbufcalls();
3686 			mutex_enter(&strbcall_lock);
3687 		}
3688 		if (strbcalls.bc_head != NULL) {
3689 			clock_t wt, tick;
3690 
3691 			STRSTAT(bcwaits);
3692 			/* Wait for memory to become available */
3693 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
3694 			tick = SEC_TO_TICK(60);
3695 			time_to_wait(&wt, tick);
3696 			(void) cv_timedwait(&memavail_cv, &strbcall_lock, wt);
3697 			CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock);
3698 		}
3699 
3700 		/* Wait for new work to arrive */
3701 		if (strbcalls.bc_head == NULL) {
3702 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
3703 			cv_wait(&strbcall_cv, &strbcall_lock);
3704 			CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock);
3705 		}
3706 	}
3707 }
3708 
3709 /*
3710  * Background processing of streams background tasks which failed
3711  * taskq_dispatch.
3712  */
3713 static void
3714 streams_qbkgrnd_service(void)
3715 {
3716 	callb_cpr_t cprinfo;
3717 	queue_t *q;
3718 
3719 	CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr,
3720 	    "streams_bkgrnd_service");
3721 
3722 	mutex_enter(&service_queue);
3723 
3724 	for (;;) {
3725 		/*
3726 		 * Wait for work to arrive.
3727 		 */
3728 		while ((freebs_list == NULL) && (qhead == NULL)) {
3729 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
3730 			cv_wait(&services_to_run, &service_queue);
3731 			CALLB_CPR_SAFE_END(&cprinfo, &service_queue);
3732 		}
3733 		/*
3734 		 * Handle all pending freebs requests to free memory.
3735 		 */
3736 		while (freebs_list != NULL) {
3737 			mblk_t *mp = freebs_list;
3738 			freebs_list = mp->b_next;
3739 			mutex_exit(&service_queue);
3740 			mblk_free(mp);
3741 			mutex_enter(&service_queue);
3742 		}
3743 		/*
3744 		 * Run pending queues.
3745 		 */
3746 		while (qhead != NULL) {
3747 			DQ(q, qhead, qtail, q_link);
3748 			ASSERT(q != NULL);
3749 			mutex_exit(&service_queue);
3750 			queue_service(q);
3751 			mutex_enter(&service_queue);
3752 		}
3753 		ASSERT(qhead == NULL && qtail == NULL);
3754 	}
3755 }
3756 
3757 /*
3758  * Background processing of streams background tasks which failed
3759  * taskq_dispatch.
3760  */
3761 static void
3762 streams_sqbkgrnd_service(void)
3763 {
3764 	callb_cpr_t cprinfo;
3765 	syncq_t *sq;
3766 
3767 	CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr,
3768 	    "streams_sqbkgrnd_service");
3769 
3770 	mutex_enter(&service_queue);
3771 
3772 	for (;;) {
3773 		/*
3774 		 * Wait for work to arrive.
3775 		 */
3776 		while (sqhead == NULL) {
3777 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
3778 			cv_wait(&syncqs_to_run, &service_queue);
3779 			CALLB_CPR_SAFE_END(&cprinfo, &service_queue);
3780 		}
3781 
3782 		/*
3783 		 * Run pending syncqs.
3784 		 */
3785 		while (sqhead != NULL) {
3786 			DQ(sq, sqhead, sqtail, sq_next);
3787 			ASSERT(sq != NULL);
3788 			ASSERT(sq->sq_svcflags & SQ_BGTHREAD);
3789 			mutex_exit(&service_queue);
3790 			syncq_service(sq);
3791 			mutex_enter(&service_queue);
3792 		}
3793 	}
3794 }
3795 
3796 /*
3797  * Disable the syncq and wait for background syncq processing to complete.
3798  * If the syncq is placed on the sqhead/sqtail queue, try to remove it from the
3799  * list.
3800  */
3801 void
3802 wait_sq_svc(syncq_t *sq)
3803 {
3804 	mutex_enter(SQLOCK(sq));
3805 	sq->sq_svcflags |= SQ_DISABLED;
3806 	if (sq->sq_svcflags & SQ_BGTHREAD) {
3807 		syncq_t *sq_chase;
3808 		syncq_t *sq_curr;
3809 		int removed;
3810 
3811 		ASSERT(sq->sq_servcount == 1);
3812 		mutex_enter(&service_queue);
3813 		RMQ(sq, sqhead, sqtail, sq_next, sq_chase, sq_curr, removed);
3814 		mutex_exit(&service_queue);
3815 		if (removed) {
3816 			sq->sq_svcflags &= ~SQ_BGTHREAD;
3817 			sq->sq_servcount = 0;
3818 			STRSTAT(sqremoved);
3819 			goto done;
3820 		}
3821 	}
3822 	while (sq->sq_servcount != 0) {
3823 		sq->sq_flags |= SQ_WANTWAKEUP;
3824 		cv_wait(&sq->sq_wait, SQLOCK(sq));
3825 	}
3826 done:
3827 	mutex_exit(SQLOCK(sq));
3828 }
3829 
3830 /*
3831  * Put a syncq on the list of syncq's to be serviced by the sqthread.
3832  * Add the argument to the end of the sqhead list and set the flag
3833  * indicating this syncq has been enabled.  If it has already been
3834  * enabled, don't do anything.
3835  * This routine assumes that SQLOCK is held.
3836  * NOTE that the lock order is to have the SQLOCK first,
3837  * so if the service_syncq lock is held, we need to release it
3838  * before aquiring the SQLOCK (mostly relevant for the background
3839  * thread, and this seems to be common among the STREAMS global locks).
3840  * Note the the sq_svcflags are protected by the SQLOCK.
3841  */
3842 void
3843 sqenable(syncq_t *sq)
3844 {
3845 	/*
3846 	 * This is probably not important except for where I believe it
3847 	 * is being called.  At that point, it should be held (and it
3848 	 * is a pain to release it just for this routine, so don't do
3849 	 * it).
3850 	 */
3851 	ASSERT(MUTEX_HELD(SQLOCK(sq)));
3852 
3853 	IMPLY(sq->sq_servcount == 0, sq->sq_next == NULL);
3854 	IMPLY(sq->sq_next != NULL, sq->sq_svcflags & SQ_BGTHREAD);
3855 
3856 	/*
3857 	 * Do not put on list if background thread is scheduled or
3858 	 * syncq is disabled.
3859 	 */
3860 	if (sq->sq_svcflags & (SQ_DISABLED | SQ_BGTHREAD))
3861 		return;
3862 
3863 	/*
3864 	 * Check whether we should enable sq at all.
3865 	 * Non PERMOD syncqs may be drained by at most one thread.
3866 	 * PERMOD syncqs may be drained by several threads but we limit the
3867 	 * total amount to the lesser of
3868 	 *	Number of queues on the squeue and
3869 	 *	Number of CPUs.
3870 	 */
3871 	if (sq->sq_servcount != 0) {
3872 		if (((sq->sq_type & SQ_PERMOD) == 0) ||
3873 		    (sq->sq_servcount >= MIN(sq->sq_nqueues, ncpus_online))) {
3874 			STRSTAT(sqtoomany);
3875 			return;
3876 		}
3877 	}
3878 
3879 	sq->sq_tstamp = lbolt;
3880 	STRSTAT(sqenables);
3881 
3882 	/* Attempt a taskq dispatch */
3883 	sq->sq_servid = (void *)taskq_dispatch(streams_taskq,
3884 	    (task_func_t *)syncq_service, sq, TQ_NOSLEEP | TQ_NOQUEUE);
3885 	if (sq->sq_servid != NULL) {
3886 		sq->sq_servcount++;
3887 		return;
3888 	}
3889 
3890 	/*
3891 	 * This taskq dispatch failed, but a previous one may have succeeded.
3892 	 * Don't try to schedule on the background thread whilst there is
3893 	 * outstanding taskq processing.
3894 	 */
3895 	if (sq->sq_servcount != 0)
3896 		return;
3897 
3898 	/*
3899 	 * System is low on resources and can't perform a non-sleeping
3900 	 * dispatch. Schedule the syncq for a background thread and mark the
3901 	 * syncq to avoid any further taskq dispatch attempts.
3902 	 */
3903 	mutex_enter(&service_queue);
3904 	STRSTAT(taskqfails);
3905 	ENQUEUE(sq, sqhead, sqtail, sq_next);
3906 	sq->sq_svcflags |= SQ_BGTHREAD;
3907 	sq->sq_servcount = 1;
3908 	cv_signal(&syncqs_to_run);
3909 	mutex_exit(&service_queue);
3910 }
3911 
3912 /*
3913  * Note: fifo_close() depends on the mblk_t on the queue being freed
3914  * asynchronously. The asynchronous freeing of messages breaks the
3915  * recursive call chain of fifo_close() while there are I_SENDFD type of
3916  * messages refering other file pointers on the queue. Then when
3917  * closing pipes it can avoid stack overflow in case of daisy-chained
3918  * pipes, and also avoid deadlock in case of fifonode_t pairs (which
3919  * share the same fifolock_t).
3920  */
3921 
3922 void
3923 freebs_enqueue(mblk_t *mp, dblk_t *dbp)
3924 {
3925 	esb_queue_t *eqp = &system_esbq;
3926 
3927 	ASSERT(dbp->db_mblk == mp);
3928 
3929 	/*
3930 	 * Check data sanity. The dblock should have non-empty free function.
3931 	 * It is better to panic here then later when the dblock is freed
3932 	 * asynchronously when the context is lost.
3933 	 */
3934 	if (dbp->db_frtnp->free_func == NULL) {
3935 		panic("freebs_enqueue: dblock %p has a NULL free callback",
3936 		    (void *)dbp);
3937 	}
3938 
3939 	mutex_enter(&eqp->eq_lock);
3940 	/* queue the new mblk on the esballoc queue */
3941 	if (eqp->eq_head == NULL) {
3942 		eqp->eq_head = eqp->eq_tail = mp;
3943 	} else {
3944 		eqp->eq_tail->b_next = mp;
3945 		eqp->eq_tail = mp;
3946 	}
3947 	eqp->eq_len++;
3948 
3949 	/* If we're the first thread to reach the threshold, process */
3950 	if (eqp->eq_len >= esbq_max_qlen &&
3951 	    !(eqp->eq_flags & ESBQ_PROCESSING))
3952 		esballoc_process_queue(eqp);
3953 
3954 	esballoc_set_timer(eqp, esbq_timeout);
3955 	mutex_exit(&eqp->eq_lock);
3956 }
3957 
3958 static void
3959 esballoc_process_queue(esb_queue_t *eqp)
3960 {
3961 	mblk_t	*mp;
3962 
3963 	ASSERT(MUTEX_HELD(&eqp->eq_lock));
3964 
3965 	eqp->eq_flags |= ESBQ_PROCESSING;
3966 
3967 	do {
3968 		/*
3969 		 * Detach the message chain for processing.
3970 		 */
3971 		mp = eqp->eq_head;
3972 		eqp->eq_tail->b_next = NULL;
3973 		eqp->eq_head = eqp->eq_tail = NULL;
3974 		eqp->eq_len = 0;
3975 		mutex_exit(&eqp->eq_lock);
3976 
3977 		/*
3978 		 * Process the message chain.
3979 		 */
3980 		esballoc_enqueue_mblk(mp);
3981 		mutex_enter(&eqp->eq_lock);
3982 	} while ((eqp->eq_len >= esbq_max_qlen) && (eqp->eq_len > 0));
3983 
3984 	eqp->eq_flags &= ~ESBQ_PROCESSING;
3985 }
3986 
3987 /*
3988  * taskq callback routine to free esballoced mblk's
3989  */
3990 static void
3991 esballoc_mblk_free(mblk_t *mp)
3992 {
3993 	mblk_t	*nextmp;
3994 
3995 	for (; mp != NULL; mp = nextmp) {
3996 		nextmp = mp->b_next;
3997 		mp->b_next = NULL;
3998 		mblk_free(mp);
3999 	}
4000 }
4001 
4002 static void
4003 esballoc_enqueue_mblk(mblk_t *mp)
4004 {
4005 
4006 	if (taskq_dispatch(system_taskq, (task_func_t *)esballoc_mblk_free, mp,
4007 	    TQ_NOSLEEP) == NULL) {
4008 		mblk_t *first_mp = mp;
4009 		/*
4010 		 * System is low on resources and can't perform a non-sleeping
4011 		 * dispatch. Schedule for a background thread.
4012 		 */
4013 		mutex_enter(&service_queue);
4014 		STRSTAT(taskqfails);
4015 
4016 		while (mp->b_next != NULL)
4017 			mp = mp->b_next;
4018 
4019 		mp->b_next = freebs_list;
4020 		freebs_list = first_mp;
4021 		cv_signal(&services_to_run);
4022 		mutex_exit(&service_queue);
4023 	}
4024 }
4025 
4026 static void
4027 esballoc_timer(void *arg)
4028 {
4029 	esb_queue_t *eqp = arg;
4030 
4031 	mutex_enter(&eqp->eq_lock);
4032 	eqp->eq_flags &= ~ESBQ_TIMER;
4033 
4034 	if (!(eqp->eq_flags & ESBQ_PROCESSING) &&
4035 	    eqp->eq_len > 0)
4036 		esballoc_process_queue(eqp);
4037 
4038 	esballoc_set_timer(eqp, esbq_timeout);
4039 	mutex_exit(&eqp->eq_lock);
4040 }
4041 
4042 static void
4043 esballoc_set_timer(esb_queue_t *eqp, clock_t eq_timeout)
4044 {
4045 	ASSERT(MUTEX_HELD(&eqp->eq_lock));
4046 
4047 	if (eqp->eq_len > 0 && !(eqp->eq_flags & ESBQ_TIMER)) {
4048 		(void) timeout(esballoc_timer, eqp, eq_timeout);
4049 		eqp->eq_flags |= ESBQ_TIMER;
4050 	}
4051 }
4052 
4053 void
4054 esballoc_queue_init(void)
4055 {
4056 	system_esbq.eq_len = 0;
4057 	system_esbq.eq_head = system_esbq.eq_tail = NULL;
4058 	system_esbq.eq_flags = 0;
4059 }
4060 
4061 /*
4062  * Set the QBACK or QB_BACK flag in the given queue for
4063  * the given priority band.
4064  */
4065 void
4066 setqback(queue_t *q, unsigned char pri)
4067 {
4068 	int i;
4069 	qband_t *qbp;
4070 	qband_t **qbpp;
4071 
4072 	ASSERT(MUTEX_HELD(QLOCK(q)));
4073 	if (pri != 0) {
4074 		if (pri > q->q_nband) {
4075 			qbpp = &q->q_bandp;
4076 			while (*qbpp)
4077 				qbpp = &(*qbpp)->qb_next;
4078 			while (pri > q->q_nband) {
4079 				if ((*qbpp = allocband()) == NULL) {
4080 					cmn_err(CE_WARN,
4081 					    "setqback: can't allocate qband\n");
4082 					return;
4083 				}
4084 				(*qbpp)->qb_hiwat = q->q_hiwat;
4085 				(*qbpp)->qb_lowat = q->q_lowat;
4086 				q->q_nband++;
4087 				qbpp = &(*qbpp)->qb_next;
4088 			}
4089 		}
4090 		qbp = q->q_bandp;
4091 		i = pri;
4092 		while (--i)
4093 			qbp = qbp->qb_next;
4094 		qbp->qb_flag |= QB_BACK;
4095 	} else {
4096 		q->q_flag |= QBACK;
4097 	}
4098 }
4099 
4100 int
4101 strcopyin(void *from, void *to, size_t len, int copyflag)
4102 {
4103 	if (copyflag & U_TO_K) {
4104 		ASSERT((copyflag & K_TO_K) == 0);
4105 		if (copyin(from, to, len))
4106 			return (EFAULT);
4107 	} else {
4108 		ASSERT(copyflag & K_TO_K);
4109 		bcopy(from, to, len);
4110 	}
4111 	return (0);
4112 }
4113 
4114 int
4115 strcopyout(void *from, void *to, size_t len, int copyflag)
4116 {
4117 	if (copyflag & U_TO_K) {
4118 		if (copyout(from, to, len))
4119 			return (EFAULT);
4120 	} else {
4121 		ASSERT(copyflag & K_TO_K);
4122 		bcopy(from, to, len);
4123 	}
4124 	return (0);
4125 }
4126 
4127 /*
4128  * strsignal_nolock() posts a signal to the process(es) at the stream head.
4129  * It assumes that the stream head lock is already held, whereas strsignal()
4130  * acquires the lock first.  This routine was created because a few callers
4131  * release the stream head lock before calling only to re-acquire it after
4132  * it returns.
4133  */
4134 void
4135 strsignal_nolock(stdata_t *stp, int sig, int32_t band)
4136 {
4137 	ASSERT(MUTEX_HELD(&stp->sd_lock));
4138 	switch (sig) {
4139 	case SIGPOLL:
4140 		if (stp->sd_sigflags & S_MSG)
4141 			strsendsig(stp->sd_siglist, S_MSG, (uchar_t)band, 0);
4142 		break;
4143 
4144 	default:
4145 		if (stp->sd_pgidp) {
4146 			pgsignal(stp->sd_pgidp, sig);
4147 		}
4148 		break;
4149 	}
4150 }
4151 
4152 void
4153 strsignal(stdata_t *stp, int sig, int32_t band)
4154 {
4155 	TRACE_3(TR_FAC_STREAMS_FR, TR_SENDSIG,
4156 	    "strsignal:%p, %X, %X", stp, sig, band);
4157 
4158 	mutex_enter(&stp->sd_lock);
4159 	switch (sig) {
4160 	case SIGPOLL:
4161 		if (stp->sd_sigflags & S_MSG)
4162 			strsendsig(stp->sd_siglist, S_MSG, (uchar_t)band, 0);
4163 		break;
4164 
4165 	default:
4166 		if (stp->sd_pgidp) {
4167 			pgsignal(stp->sd_pgidp, sig);
4168 		}
4169 		break;
4170 	}
4171 	mutex_exit(&stp->sd_lock);
4172 }
4173 
4174 void
4175 strhup(stdata_t *stp)
4176 {
4177 	ASSERT(mutex_owned(&stp->sd_lock));
4178 	pollwakeup(&stp->sd_pollist, POLLHUP);
4179 	if (stp->sd_sigflags & S_HANGUP)
4180 		strsendsig(stp->sd_siglist, S_HANGUP, 0, 0);
4181 }
4182 
4183 /*
4184  * Backenable the first queue upstream from `q' with a service procedure.
4185  */
4186 void
4187 backenable(queue_t *q, uchar_t pri)
4188 {
4189 	queue_t	*nq;
4190 
4191 	/*
4192 	 * our presence might not prevent other modules in our own
4193 	 * stream from popping/pushing since the caller of getq might not
4194 	 * have a claim on the queue (some drivers do a getq on somebody
4195 	 * else's queue - they know that the queue itself is not going away
4196 	 * but the framework has to guarantee q_next in that stream.)
4197 	 */
4198 	claimstr(q);
4199 
4200 	/* find nearest back queue with service proc */
4201 	for (nq = backq(q); nq && !nq->q_qinfo->qi_srvp; nq = backq(nq)) {
4202 		ASSERT(STRMATED(q->q_stream) || STREAM(q) == STREAM(nq));
4203 	}
4204 
4205 	if (nq) {
4206 		kthread_t *freezer;
4207 		/*
4208 		 * backenable can be called either with no locks held
4209 		 * or with the stream frozen (the latter occurs when a module
4210 		 * calls rmvq with the stream frozen.) If the stream is frozen
4211 		 * by the caller the caller will hold all qlocks in the stream.
4212 		 * Note that a frozen stream doesn't freeze a mated stream,
4213 		 * so we explicitly check for that.
4214 		 */
4215 		freezer = STREAM(q)->sd_freezer;
4216 		if (freezer != curthread || STREAM(q) != STREAM(nq)) {
4217 			mutex_enter(QLOCK(nq));
4218 		}
4219 #ifdef DEBUG
4220 		else {
4221 			ASSERT(frozenstr(q));
4222 			ASSERT(MUTEX_HELD(QLOCK(q)));
4223 			ASSERT(MUTEX_HELD(QLOCK(nq)));
4224 		}
4225 #endif
4226 		setqback(nq, pri);
4227 		qenable_locked(nq);
4228 		if (freezer != curthread || STREAM(q) != STREAM(nq))
4229 			mutex_exit(QLOCK(nq));
4230 	}
4231 	releasestr(q);
4232 }
4233 
4234 /*
4235  * Return the appropriate errno when one of flags_to_check is set
4236  * in sd_flags. Uses the exported error routines if they are set.
4237  * Will return 0 if non error is set (or if the exported error routines
4238  * do not return an error).
4239  *
4240  * If there is both a read and write error to check we prefer the read error.
4241  * Also, give preference to recorded errno's over the error functions.
4242  * The flags that are handled are:
4243  *	STPLEX		return EINVAL
4244  *	STRDERR		return sd_rerror (and clear if STRDERRNONPERSIST)
4245  *	STWRERR		return sd_werror (and clear if STWRERRNONPERSIST)
4246  *	STRHUP		return sd_werror
4247  *
4248  * If the caller indicates that the operation is a peek a nonpersistent error
4249  * is not cleared.
4250  */
4251 int
4252 strgeterr(stdata_t *stp, int32_t flags_to_check, int ispeek)
4253 {
4254 	int32_t sd_flag = stp->sd_flag & flags_to_check;
4255 	int error = 0;
4256 
4257 	ASSERT(MUTEX_HELD(&stp->sd_lock));
4258 	ASSERT((flags_to_check & ~(STRDERR|STWRERR|STRHUP|STPLEX)) == 0);
4259 	if (sd_flag & STPLEX)
4260 		error = EINVAL;
4261 	else if (sd_flag & STRDERR) {
4262 		error = stp->sd_rerror;
4263 		if ((stp->sd_flag & STRDERRNONPERSIST) && !ispeek) {
4264 			/*
4265 			 * Read errors are non-persistent i.e. discarded once
4266 			 * returned to a non-peeking caller,
4267 			 */
4268 			stp->sd_rerror = 0;
4269 			stp->sd_flag &= ~STRDERR;
4270 		}
4271 		if (error == 0 && stp->sd_rderrfunc != NULL) {
4272 			int clearerr = 0;
4273 
4274 			error = (*stp->sd_rderrfunc)(stp->sd_vnode, ispeek,
4275 			    &clearerr);
4276 			if (clearerr) {
4277 				stp->sd_flag &= ~STRDERR;
4278 				stp->sd_rderrfunc = NULL;
4279 			}
4280 		}
4281 	} else if (sd_flag & STWRERR) {
4282 		error = stp->sd_werror;
4283 		if ((stp->sd_flag & STWRERRNONPERSIST) && !ispeek) {
4284 			/*
4285 			 * Write errors are non-persistent i.e. discarded once
4286 			 * returned to a non-peeking caller,
4287 			 */
4288 			stp->sd_werror = 0;
4289 			stp->sd_flag &= ~STWRERR;
4290 		}
4291 		if (error == 0 && stp->sd_wrerrfunc != NULL) {
4292 			int clearerr = 0;
4293 
4294 			error = (*stp->sd_wrerrfunc)(stp->sd_vnode, ispeek,
4295 			    &clearerr);
4296 			if (clearerr) {
4297 				stp->sd_flag &= ~STWRERR;
4298 				stp->sd_wrerrfunc = NULL;
4299 			}
4300 		}
4301 	} else if (sd_flag & STRHUP) {
4302 		/* sd_werror set when STRHUP */
4303 		error = stp->sd_werror;
4304 	}
4305 	return (error);
4306 }
4307 
4308 
4309 /*
4310  * single-thread open/close/push/pop
4311  * for twisted streams also
4312  */
4313 int
4314 strstartplumb(stdata_t *stp, int flag, int cmd)
4315 {
4316 	int waited = 1;
4317 	int error = 0;
4318 
4319 	if (STRMATED(stp)) {
4320 		struct stdata *stmatep = stp->sd_mate;
4321 
4322 		STRLOCKMATES(stp);
4323 		while (waited) {
4324 			waited = 0;
4325 			while (stmatep->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
4326 				if ((cmd == I_POP) &&
4327 				    (flag & (FNDELAY|FNONBLOCK))) {
4328 					STRUNLOCKMATES(stp);
4329 					return (EAGAIN);
4330 				}
4331 				waited = 1;
4332 				mutex_exit(&stp->sd_lock);
4333 				if (!cv_wait_sig(&stmatep->sd_monitor,
4334 				    &stmatep->sd_lock)) {
4335 					mutex_exit(&stmatep->sd_lock);
4336 					return (EINTR);
4337 				}
4338 				mutex_exit(&stmatep->sd_lock);
4339 				STRLOCKMATES(stp);
4340 			}
4341 			while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
4342 				if ((cmd == I_POP) &&
4343 				    (flag & (FNDELAY|FNONBLOCK))) {
4344 					STRUNLOCKMATES(stp);
4345 					return (EAGAIN);
4346 				}
4347 				waited = 1;
4348 				mutex_exit(&stmatep->sd_lock);
4349 				if (!cv_wait_sig(&stp->sd_monitor,
4350 				    &stp->sd_lock)) {
4351 					mutex_exit(&stp->sd_lock);
4352 					return (EINTR);
4353 				}
4354 				mutex_exit(&stp->sd_lock);
4355 				STRLOCKMATES(stp);
4356 			}
4357 			if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) {
4358 				error = strgeterr(stp,
4359 				    STRDERR|STWRERR|STRHUP|STPLEX, 0);
4360 				if (error != 0) {
4361 					STRUNLOCKMATES(stp);
4362 					return (error);
4363 				}
4364 			}
4365 		}
4366 		stp->sd_flag |= STRPLUMB;
4367 		STRUNLOCKMATES(stp);
4368 	} else {
4369 		mutex_enter(&stp->sd_lock);
4370 		while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
4371 			if (((cmd == I_POP) || (cmd == _I_REMOVE)) &&
4372 			    (flag & (FNDELAY|FNONBLOCK))) {
4373 				mutex_exit(&stp->sd_lock);
4374 				return (EAGAIN);
4375 			}
4376 			if (!cv_wait_sig(&stp->sd_monitor, &stp->sd_lock)) {
4377 				mutex_exit(&stp->sd_lock);
4378 				return (EINTR);
4379 			}
4380 			if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) {
4381 				error = strgeterr(stp,
4382 				    STRDERR|STWRERR|STRHUP|STPLEX, 0);
4383 				if (error != 0) {
4384 					mutex_exit(&stp->sd_lock);
4385 					return (error);
4386 				}
4387 			}
4388 		}
4389 		stp->sd_flag |= STRPLUMB;
4390 		mutex_exit(&stp->sd_lock);
4391 	}
4392 	return (0);
4393 }
4394 
4395 /*
4396  * Complete the plumbing operation associated with stream `stp'.
4397  */
4398 void
4399 strendplumb(stdata_t *stp)
4400 {
4401 	ASSERT(MUTEX_HELD(&stp->sd_lock));
4402 	ASSERT(stp->sd_flag & STRPLUMB);
4403 	stp->sd_flag &= ~STRPLUMB;
4404 	cv_broadcast(&stp->sd_monitor);
4405 }
4406 
4407 /*
4408  * This describes how the STREAMS framework handles synchronization
4409  * during open/push and close/pop.
4410  * The key interfaces for open and close are qprocson and qprocsoff,
4411  * respectively. While the close case in general is harder both open
4412  * have close have significant similarities.
4413  *
4414  * During close the STREAMS framework has to both ensure that there
4415  * are no stale references to the queue pair (and syncq) that
4416  * are being closed and also provide the guarantees that are documented
4417  * in qprocsoff(9F).
4418  * If there are stale references to the queue that is closing it can
4419  * result in kernel memory corruption or kernel panics.
4420  *
4421  * Note that is it up to the module/driver to ensure that it itself
4422  * does not have any stale references to the closing queues once its close
4423  * routine returns. This includes:
4424  *  - Cancelling any timeout/bufcall/qtimeout/qbufcall callback routines
4425  *    associated with the queues. For timeout and bufcall callbacks the
4426  *    module/driver also has to ensure (or wait for) any callbacks that
4427  *    are in progress.
4428  *  - If the module/driver is using esballoc it has to ensure that any
4429  *    esballoc free functions do not refer to a queue that has closed.
4430  *    (Note that in general the close routine can not wait for the esballoc'ed
4431  *    messages to be freed since that can cause a deadlock.)
4432  *  - Cancelling any interrupts that refer to the closing queues and
4433  *    also ensuring that there are no interrupts in progress that will
4434  *    refer to the closing queues once the close routine returns.
4435  *  - For multiplexors removing any driver global state that refers to
4436  *    the closing queue and also ensuring that there are no threads in
4437  *    the multiplexor that has picked up a queue pointer but not yet
4438  *    finished using it.
4439  *
4440  * In addition, a driver/module can only reference the q_next pointer
4441  * in its open, close, put, or service procedures or in a
4442  * qtimeout/qbufcall callback procedure executing "on" the correct
4443  * stream. Thus it can not reference the q_next pointer in an interrupt
4444  * routine or a timeout, bufcall or esballoc callback routine. Likewise
4445  * it can not reference q_next of a different queue e.g. in a mux that
4446  * passes messages from one queues put/service procedure to another queue.
4447  * In all the cases when the driver/module can not access the q_next
4448  * field it must use the *next* versions e.g. canputnext instead of
4449  * canput(q->q_next) and putnextctl instead of putctl(q->q_next, ...).
4450  *
4451  *
4452  * Assuming that the driver/module conforms to the above constraints
4453  * the STREAMS framework has to avoid stale references to q_next for all
4454  * the framework internal cases which include (but are not limited to):
4455  *  - Threads in canput/canputnext/backenable and elsewhere that are
4456  *    walking q_next.
4457  *  - Messages on a syncq that have a reference to the queue through b_queue.
4458  *  - Messages on an outer perimeter (syncq) that have a reference to the
4459  *    queue through b_queue.
4460  *  - Threads that use q_nfsrv (e.g. canput) to find a queue.
4461  *    Note that only canput and bcanput use q_nfsrv without any locking.
4462  *
4463  * The STREAMS framework providing the qprocsoff(9F) guarantees means that
4464  * after qprocsoff returns, the framework has to ensure that no threads can
4465  * enter the put or service routines for the closing read or write-side queue.
4466  * In addition to preventing "direct" entry into the put procedures
4467  * the framework also has to prevent messages being drained from
4468  * the syncq or the outer perimeter.
4469  * XXX Note that currently qdetach does relies on D_MTOCEXCL as the only
4470  * mechanism to prevent qwriter(PERIM_OUTER) from running after
4471  * qprocsoff has returned.
4472  * Note that if a module/driver uses put(9F) on one of its own queues
4473  * it is up to the module/driver to ensure that the put() doesn't
4474  * get called when the queue is closing.
4475  *
4476  *
4477  * The framework aspects of the above "contract" is implemented by
4478  * qprocsoff, removeq, and strlock:
4479  *  - qprocsoff (disable_svc) sets QWCLOSE to prevent runservice from
4480  *    entering the service procedures.
4481  *  - strlock acquires the sd_lock and sd_reflock to prevent putnext,
4482  *    canputnext, backenable etc from dereferencing the q_next that will
4483  *    soon change.
4484  *  - strlock waits for sd_refcnt to be zero to wait for e.g. any canputnext
4485  *    or other q_next walker that uses claimstr/releasestr to finish.
4486  *  - optionally for every syncq in the stream strlock acquires all the
4487  *    sq_lock's and waits for all sq_counts to drop to a value that indicates
4488  *    that no thread executes in the put or service procedures and that no
4489  *    thread is draining into the module/driver. This ensures that no
4490  *    open, close, put, service, or qtimeout/qbufcall callback procedure is
4491  *    currently executing hence no such thread can end up with the old stale
4492  *    q_next value and no canput/backenable can have the old stale
4493  *    q_nfsrv/q_next.
4494  *  - qdetach (wait_svc) makes sure that any scheduled or running threads
4495  *    have either finished or observed the QWCLOSE flag and gone away.
4496  */
4497 
4498 
4499 /*
4500  * Get all the locks necessary to change q_next.
4501  *
4502  * Wait for sd_refcnt to reach 0 and, if sqlist is present, wait for  the
4503  * sq_count of each syncq in the list to drop to sq_rmqcount, indicating that
4504  * the only threads inside the sqncq are threads currently calling removeq().
4505  * Since threads calling removeq() are in the process of removing their queues
4506  * from the stream, we do not need to worry about them accessing a stale q_next
4507  * pointer and thus we do not need to wait for them to exit (in fact, waiting
4508  * for them can cause deadlock).
4509  *
4510  * This routine is subject to starvation since it does not set any flag to
4511  * prevent threads from entering a module in the stream(i.e. sq_count can
4512  * increase on some syncq while it is waiting on some other syncq.)
4513  *
4514  * Assumes that only one thread attempts to call strlock for a given
4515  * stream. If this is not the case the two threads would deadlock.
4516  * This assumption is guaranteed since strlock is only called by insertq
4517  * and removeq and streams plumbing changes are single-threaded for
4518  * a given stream using the STWOPEN, STRCLOSE, and STRPLUMB flags.
4519  *
4520  * For pipes, it is not difficult to atomically designate a pair of streams
4521  * to be mated. Once mated atomically by the framework the twisted pair remain
4522  * configured that way until dismantled atomically by the framework.
4523  * When plumbing takes place on a twisted stream it is necessary to ensure that
4524  * this operation is done exclusively on the twisted stream since two such
4525  * operations, each initiated on different ends of the pipe will deadlock
4526  * waiting for each other to complete.
4527  *
4528  * On entry, no locks should be held.
4529  * The locks acquired and held by strlock depends on a few factors.
4530  * - If sqlist is non-NULL all the syncq locks in the sqlist will be acquired
4531  *   and held on exit and all sq_count are at an acceptable level.
4532  * - In all cases, sd_lock and sd_reflock are acquired and held on exit with
4533  *   sd_refcnt being zero.
4534  */
4535 
4536 static void
4537 strlock(struct stdata *stp, sqlist_t *sqlist)
4538 {
4539 	syncql_t *sql, *sql2;
4540 retry:
4541 	/*
4542 	 * Wait for any claimstr to go away.
4543 	 */
4544 	if (STRMATED(stp)) {
4545 		struct stdata *stp1, *stp2;
4546 
4547 		STRLOCKMATES(stp);
4548 		/*
4549 		 * Note that the selection of locking order is not
4550 		 * important, just that they are always aquired in
4551 		 * the same order.  To assure this, we choose this
4552 		 * order based on the value of the pointer, and since
4553 		 * the pointer will not change for the life of this
4554 		 * pair, we will always grab the locks in the same
4555 		 * order (and hence, prevent deadlocks).
4556 		 */
4557 		if (&(stp->sd_lock) > &((stp->sd_mate)->sd_lock)) {
4558 			stp1 = stp;
4559 			stp2 = stp->sd_mate;
4560 		} else {
4561 			stp2 = stp;
4562 			stp1 = stp->sd_mate;
4563 		}
4564 		mutex_enter(&stp1->sd_reflock);
4565 		if (stp1->sd_refcnt > 0) {
4566 			STRUNLOCKMATES(stp);
4567 			cv_wait(&stp1->sd_refmonitor, &stp1->sd_reflock);
4568 			mutex_exit(&stp1->sd_reflock);
4569 			goto retry;
4570 		}
4571 		mutex_enter(&stp2->sd_reflock);
4572 		if (stp2->sd_refcnt > 0) {
4573 			STRUNLOCKMATES(stp);
4574 			mutex_exit(&stp1->sd_reflock);
4575 			cv_wait(&stp2->sd_refmonitor, &stp2->sd_reflock);
4576 			mutex_exit(&stp2->sd_reflock);
4577 			goto retry;
4578 		}
4579 		STREAM_PUTLOCKS_ENTER(stp1);
4580 		STREAM_PUTLOCKS_ENTER(stp2);
4581 	} else {
4582 		mutex_enter(&stp->sd_lock);
4583 		mutex_enter(&stp->sd_reflock);
4584 		while (stp->sd_refcnt > 0) {
4585 			mutex_exit(&stp->sd_lock);
4586 			cv_wait(&stp->sd_refmonitor, &stp->sd_reflock);
4587 			if (mutex_tryenter(&stp->sd_lock) == 0) {
4588 				mutex_exit(&stp->sd_reflock);
4589 				mutex_enter(&stp->sd_lock);
4590 				mutex_enter(&stp->sd_reflock);
4591 			}
4592 		}
4593 		STREAM_PUTLOCKS_ENTER(stp);
4594 	}
4595 
4596 	if (sqlist == NULL)
4597 		return;
4598 
4599 	for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) {
4600 		syncq_t *sq = sql->sql_sq;
4601 		uint16_t count;
4602 
4603 		mutex_enter(SQLOCK(sq));
4604 		count = sq->sq_count;
4605 		ASSERT(sq->sq_rmqcount <= count);
4606 		SQ_PUTLOCKS_ENTER(sq);
4607 		SUM_SQ_PUTCOUNTS(sq, count);
4608 		if (count == sq->sq_rmqcount)
4609 			continue;
4610 
4611 		/* Failed - drop all locks that we have acquired so far */
4612 		if (STRMATED(stp)) {
4613 			STREAM_PUTLOCKS_EXIT(stp);
4614 			STREAM_PUTLOCKS_EXIT(stp->sd_mate);
4615 			STRUNLOCKMATES(stp);
4616 			mutex_exit(&stp->sd_reflock);
4617 			mutex_exit(&stp->sd_mate->sd_reflock);
4618 		} else {
4619 			STREAM_PUTLOCKS_EXIT(stp);
4620 			mutex_exit(&stp->sd_lock);
4621 			mutex_exit(&stp->sd_reflock);
4622 		}
4623 		for (sql2 = sqlist->sqlist_head; sql2 != sql;
4624 		    sql2 = sql2->sql_next) {
4625 			SQ_PUTLOCKS_EXIT(sql2->sql_sq);
4626 			mutex_exit(SQLOCK(sql2->sql_sq));
4627 		}
4628 
4629 		/*
4630 		 * The wait loop below may starve when there are many threads
4631 		 * claiming the syncq. This is especially a problem with permod
4632 		 * syncqs (IP). To lessen the impact of the problem we increment
4633 		 * sq_needexcl and clear fastbits so that putnexts will slow
4634 		 * down and call sqenable instead of draining right away.
4635 		 */
4636 		sq->sq_needexcl++;
4637 		SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
4638 		while (count > sq->sq_rmqcount) {
4639 			sq->sq_flags |= SQ_WANTWAKEUP;
4640 			SQ_PUTLOCKS_EXIT(sq);
4641 			cv_wait(&sq->sq_wait, SQLOCK(sq));
4642 			count = sq->sq_count;
4643 			SQ_PUTLOCKS_ENTER(sq);
4644 			SUM_SQ_PUTCOUNTS(sq, count);
4645 		}
4646 		sq->sq_needexcl--;
4647 		if (sq->sq_needexcl == 0)
4648 			SQ_PUTCOUNT_SETFAST_LOCKED(sq);
4649 		SQ_PUTLOCKS_EXIT(sq);
4650 		ASSERT(count == sq->sq_rmqcount);
4651 		mutex_exit(SQLOCK(sq));
4652 		goto retry;
4653 	}
4654 }
4655 
4656 /*
4657  * Drop all the locks that strlock acquired.
4658  */
4659 static void
4660 strunlock(struct stdata *stp, sqlist_t *sqlist)
4661 {
4662 	syncql_t *sql;
4663 
4664 	if (STRMATED(stp)) {
4665 		STREAM_PUTLOCKS_EXIT(stp);
4666 		STREAM_PUTLOCKS_EXIT(stp->sd_mate);
4667 		STRUNLOCKMATES(stp);
4668 		mutex_exit(&stp->sd_reflock);
4669 		mutex_exit(&stp->sd_mate->sd_reflock);
4670 	} else {
4671 		STREAM_PUTLOCKS_EXIT(stp);
4672 		mutex_exit(&stp->sd_lock);
4673 		mutex_exit(&stp->sd_reflock);
4674 	}
4675 
4676 	if (sqlist == NULL)
4677 		return;
4678 
4679 	for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) {
4680 		SQ_PUTLOCKS_EXIT(sql->sql_sq);
4681 		mutex_exit(SQLOCK(sql->sql_sq));
4682 	}
4683 }
4684 
4685 /*
4686  * When the module has service procedure, we need check if the next
4687  * module which has service procedure is in flow control to trigger
4688  * the backenable.
4689  */
4690 static void
4691 backenable_insertedq(queue_t *q)
4692 {
4693 	qband_t	*qbp;
4694 
4695 	claimstr(q);
4696 	if (q->q_qinfo->qi_srvp != NULL && q->q_next != NULL) {
4697 		if (q->q_next->q_nfsrv->q_flag & QWANTW)
4698 			backenable(q, 0);
4699 
4700 		qbp = q->q_next->q_nfsrv->q_bandp;
4701 		for (; qbp != NULL; qbp = qbp->qb_next)
4702 			if ((qbp->qb_flag & QB_WANTW) && qbp->qb_first != NULL)
4703 				backenable(q, qbp->qb_first->b_band);
4704 	}
4705 	releasestr(q);
4706 }
4707 
4708 /*
4709  * Given two read queues, insert a new single one after another.
4710  *
4711  * This routine acquires all the necessary locks in order to change
4712  * q_next and related pointer using strlock().
4713  * It depends on the stream head ensuring that there are no concurrent
4714  * insertq or removeq on the same stream. The stream head ensures this
4715  * using the flags STWOPEN, STRCLOSE, and STRPLUMB.
4716  *
4717  * Note that no syncq locks are held during the q_next change. This is
4718  * applied to all streams since, unlike removeq, there is no problem of stale
4719  * pointers when adding a module to the stream. Thus drivers/modules that do a
4720  * canput(rq->q_next) would never get a closed/freed queue pointer even if we
4721  * applied this optimization to all streams.
4722  */
4723 void
4724 insertq(struct stdata *stp, queue_t *new)
4725 {
4726 	queue_t	*after;
4727 	queue_t *wafter;
4728 	queue_t *wnew = _WR(new);
4729 	boolean_t have_fifo = B_FALSE;
4730 
4731 	if (new->q_flag & _QINSERTING) {
4732 		ASSERT(stp->sd_vnode->v_type != VFIFO);
4733 		after = new->q_next;
4734 		wafter = _WR(new->q_next);
4735 	} else {
4736 		after = _RD(stp->sd_wrq);
4737 		wafter = stp->sd_wrq;
4738 	}
4739 
4740 	TRACE_2(TR_FAC_STREAMS_FR, TR_INSERTQ,
4741 	    "insertq:%p, %p", after, new);
4742 	ASSERT(after->q_flag & QREADR);
4743 	ASSERT(new->q_flag & QREADR);
4744 
4745 	strlock(stp, NULL);
4746 
4747 	/* Do we have a FIFO? */
4748 	if (wafter->q_next == after) {
4749 		have_fifo = B_TRUE;
4750 		wnew->q_next = new;
4751 	} else {
4752 		wnew->q_next = wafter->q_next;
4753 	}
4754 	new->q_next = after;
4755 
4756 	set_nfsrv_ptr(new, wnew, after, wafter);
4757 	/*
4758 	 * set_nfsrv_ptr() needs to know if this is an insertion or not,
4759 	 * so only reset this flag after calling it.
4760 	 */
4761 	new->q_flag &= ~_QINSERTING;
4762 
4763 	if (have_fifo) {
4764 		wafter->q_next = wnew;
4765 	} else {
4766 		if (wafter->q_next)
4767 			_OTHERQ(wafter->q_next)->q_next = new;
4768 		wafter->q_next = wnew;
4769 	}
4770 
4771 	set_qend(new);
4772 	/* The QEND flag might have to be updated for the upstream guy */
4773 	set_qend(after);
4774 
4775 	ASSERT(_SAMESTR(new) == O_SAMESTR(new));
4776 	ASSERT(_SAMESTR(wnew) == O_SAMESTR(wnew));
4777 	ASSERT(_SAMESTR(after) == O_SAMESTR(after));
4778 	ASSERT(_SAMESTR(wafter) == O_SAMESTR(wafter));
4779 	strsetuio(stp);
4780 
4781 	/*
4782 	 * If this was a module insertion, bump the push count.
4783 	 */
4784 	if (!(new->q_flag & QISDRV))
4785 		stp->sd_pushcnt++;
4786 
4787 	strunlock(stp, NULL);
4788 
4789 	/* check if the write Q needs backenable */
4790 	backenable_insertedq(wnew);
4791 
4792 	/* check if the read Q needs backenable */
4793 	backenable_insertedq(new);
4794 }
4795 
4796 /*
4797  * Given a read queue, unlink it from any neighbors.
4798  *
4799  * This routine acquires all the necessary locks in order to
4800  * change q_next and related pointers and also guard against
4801  * stale references (e.g. through q_next) to the queue that
4802  * is being removed. It also plays part of the role in ensuring
4803  * that the module's/driver's put procedure doesn't get called
4804  * after qprocsoff returns.
4805  *
4806  * Removeq depends on the stream head ensuring that there are
4807  * no concurrent insertq or removeq on the same stream. The
4808  * stream head ensures this using the flags STWOPEN, STRCLOSE and
4809  * STRPLUMB.
4810  *
4811  * The set of locks needed to remove the queue is different in
4812  * different cases:
4813  *
4814  * Acquire sd_lock, sd_reflock, and all the syncq locks in the stream after
4815  * waiting for the syncq reference count to drop to 0 indicating that no
4816  * non-close threads are present anywhere in the stream. This ensures that any
4817  * module/driver can reference q_next in its open, close, put, or service
4818  * procedures.
4819  *
4820  * The sq_rmqcount counter tracks the number of threads inside removeq().
4821  * strlock() ensures that there is either no threads executing inside perimeter
4822  * or there is only a thread calling qprocsoff().
4823  *
4824  * strlock() compares the value of sq_count with the number of threads inside
4825  * removeq() and waits until sq_count is equal to sq_rmqcount. We need to wakeup
4826  * any threads waiting in strlock() when the sq_rmqcount increases.
4827  */
4828 
4829 void
4830 removeq(queue_t *qp)
4831 {
4832 	queue_t *wqp = _WR(qp);
4833 	struct stdata *stp = STREAM(qp);
4834 	sqlist_t *sqlist = NULL;
4835 	boolean_t isdriver;
4836 	int moved;
4837 	syncq_t *sq = qp->q_syncq;
4838 	syncq_t *wsq = wqp->q_syncq;
4839 
4840 	ASSERT(stp);
4841 
4842 	TRACE_2(TR_FAC_STREAMS_FR, TR_REMOVEQ,
4843 	    "removeq:%p %p", qp, wqp);
4844 	ASSERT(qp->q_flag&QREADR);
4845 
4846 	/*
4847 	 * For queues using Synchronous streams, we must wait for all threads in
4848 	 * rwnext() to drain out before proceeding.
4849 	 */
4850 	if (qp->q_flag & QSYNCSTR) {
4851 		/* First, we need wakeup any threads blocked in rwnext() */
4852 		mutex_enter(SQLOCK(sq));
4853 		if (sq->sq_flags & SQ_WANTWAKEUP) {
4854 			sq->sq_flags &= ~SQ_WANTWAKEUP;
4855 			cv_broadcast(&sq->sq_wait);
4856 		}
4857 		mutex_exit(SQLOCK(sq));
4858 
4859 		if (wsq != sq) {
4860 			mutex_enter(SQLOCK(wsq));
4861 			if (wsq->sq_flags & SQ_WANTWAKEUP) {
4862 				wsq->sq_flags &= ~SQ_WANTWAKEUP;
4863 				cv_broadcast(&wsq->sq_wait);
4864 			}
4865 			mutex_exit(SQLOCK(wsq));
4866 		}
4867 
4868 		mutex_enter(QLOCK(qp));
4869 		while (qp->q_rwcnt > 0) {
4870 			qp->q_flag |= QWANTRMQSYNC;
4871 			cv_wait(&qp->q_wait, QLOCK(qp));
4872 		}
4873 		mutex_exit(QLOCK(qp));
4874 
4875 		mutex_enter(QLOCK(wqp));
4876 		while (wqp->q_rwcnt > 0) {
4877 			wqp->q_flag |= QWANTRMQSYNC;
4878 			cv_wait(&wqp->q_wait, QLOCK(wqp));
4879 		}
4880 		mutex_exit(QLOCK(wqp));
4881 	}
4882 
4883 	mutex_enter(SQLOCK(sq));
4884 	sq->sq_rmqcount++;
4885 	if (sq->sq_flags & SQ_WANTWAKEUP) {
4886 		sq->sq_flags &= ~SQ_WANTWAKEUP;
4887 		cv_broadcast(&sq->sq_wait);
4888 	}
4889 	mutex_exit(SQLOCK(sq));
4890 
4891 	isdriver = (qp->q_flag & QISDRV);
4892 
4893 	sqlist = sqlist_build(qp, stp, STRMATED(stp));
4894 	strlock(stp, sqlist);
4895 
4896 	reset_nfsrv_ptr(qp, wqp);
4897 
4898 	ASSERT(wqp->q_next == NULL || backq(qp)->q_next == qp);
4899 	ASSERT(qp->q_next == NULL || backq(wqp)->q_next == wqp);
4900 	/* Do we have a FIFO? */
4901 	if (wqp->q_next == qp) {
4902 		stp->sd_wrq->q_next = _RD(stp->sd_wrq);
4903 	} else {
4904 		if (wqp->q_next)
4905 			backq(qp)->q_next = qp->q_next;
4906 		if (qp->q_next)
4907 			backq(wqp)->q_next = wqp->q_next;
4908 	}
4909 
4910 	/* The QEND flag might have to be updated for the upstream guy */
4911 	if (qp->q_next)
4912 		set_qend(qp->q_next);
4913 
4914 	ASSERT(_SAMESTR(stp->sd_wrq) == O_SAMESTR(stp->sd_wrq));
4915 	ASSERT(_SAMESTR(_RD(stp->sd_wrq)) == O_SAMESTR(_RD(stp->sd_wrq)));
4916 
4917 	/*
4918 	 * Move any messages destined for the put procedures to the next
4919 	 * syncq in line. Otherwise free them.
4920 	 */
4921 	moved = 0;
4922 	/*
4923 	 * Quick check to see whether there are any messages or events.
4924 	 */
4925 	if (qp->q_syncqmsgs != 0 || (qp->q_syncq->sq_flags & SQ_EVENTS))
4926 		moved += propagate_syncq(qp);
4927 	if (wqp->q_syncqmsgs != 0 ||
4928 	    (wqp->q_syncq->sq_flags & SQ_EVENTS))
4929 		moved += propagate_syncq(wqp);
4930 
4931 	strsetuio(stp);
4932 
4933 	/*
4934 	 * If this was a module removal, decrement the push count.
4935 	 */
4936 	if (!isdriver)
4937 		stp->sd_pushcnt--;
4938 
4939 	strunlock(stp, sqlist);
4940 	sqlist_free(sqlist);
4941 
4942 	/*
4943 	 * Make sure any messages that were propagated are drained.
4944 	 * Also clear any QFULL bit caused by messages that were propagated.
4945 	 */
4946 
4947 	if (qp->q_next != NULL) {
4948 		clr_qfull(qp);
4949 		/*
4950 		 * For the driver calling qprocsoff, propagate_syncq
4951 		 * frees all the messages instead of putting it in
4952 		 * the stream head
4953 		 */
4954 		if (!isdriver && (moved > 0))
4955 			emptysq(qp->q_next->q_syncq);
4956 	}
4957 	if (wqp->q_next != NULL) {
4958 		clr_qfull(wqp);
4959 		/*
4960 		 * We come here for any pop of a module except for the
4961 		 * case of driver being removed. We don't call emptysq
4962 		 * if we did not move any messages. This will avoid holding
4963 		 * PERMOD syncq locks in emptysq
4964 		 */
4965 		if (moved > 0)
4966 			emptysq(wqp->q_next->q_syncq);
4967 	}
4968 
4969 	mutex_enter(SQLOCK(sq));
4970 	sq->sq_rmqcount--;
4971 	mutex_exit(SQLOCK(sq));
4972 }
4973 
4974 /*
4975  * Prevent further entry by setting a flag (like SQ_FROZEN, SQ_BLOCKED or
4976  * SQ_WRITER) on a syncq.
4977  * If maxcnt is not -1 it assumes that caller has "maxcnt" claim(s) on the
4978  * sync queue and waits until sq_count reaches maxcnt.
4979  *
4980  * if maxcnt is -1 there's no need to grab sq_putlocks since the caller
4981  * does not care about putnext threads that are in the middle of calling put
4982  * entry points.
4983  *
4984  * This routine is used for both inner and outer syncqs.
4985  */
4986 static void
4987 blocksq(syncq_t *sq, ushort_t flag, int maxcnt)
4988 {
4989 	uint16_t count = 0;
4990 
4991 	mutex_enter(SQLOCK(sq));
4992 	/*
4993 	 * Wait for SQ_FROZEN/SQ_BLOCKED to be reset.
4994 	 * SQ_FROZEN will be set if there is a frozen stream that has a
4995 	 * queue which also refers to this "shared" syncq.
4996 	 * SQ_BLOCKED will be set if there is "off" queue which also
4997 	 * refers to this "shared" syncq.
4998 	 */
4999 	if (maxcnt != -1) {
5000 		count = sq->sq_count;
5001 		SQ_PUTLOCKS_ENTER(sq);
5002 		SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
5003 		SUM_SQ_PUTCOUNTS(sq, count);
5004 	}
5005 	sq->sq_needexcl++;
5006 	ASSERT(sq->sq_needexcl != 0);	/* wraparound */
5007 
5008 	while ((sq->sq_flags & flag) ||
5009 	    (maxcnt != -1 && count > (unsigned)maxcnt)) {
5010 		sq->sq_flags |= SQ_WANTWAKEUP;
5011 		if (maxcnt != -1) {
5012 			SQ_PUTLOCKS_EXIT(sq);
5013 		}
5014 		cv_wait(&sq->sq_wait, SQLOCK(sq));
5015 		if (maxcnt != -1) {
5016 			count = sq->sq_count;
5017 			SQ_PUTLOCKS_ENTER(sq);
5018 			SUM_SQ_PUTCOUNTS(sq, count);
5019 		}
5020 	}
5021 	sq->sq_needexcl--;
5022 	sq->sq_flags |= flag;
5023 	ASSERT(maxcnt == -1 || count == maxcnt);
5024 	if (maxcnt != -1) {
5025 		if (sq->sq_needexcl == 0) {
5026 			SQ_PUTCOUNT_SETFAST_LOCKED(sq);
5027 		}
5028 		SQ_PUTLOCKS_EXIT(sq);
5029 	} else if (sq->sq_needexcl == 0) {
5030 		SQ_PUTCOUNT_SETFAST(sq);
5031 	}
5032 
5033 	mutex_exit(SQLOCK(sq));
5034 }
5035 
5036 /*
5037  * Reset a flag that was set with blocksq.
5038  *
5039  * Can not use this routine to reset SQ_WRITER.
5040  *
5041  * If "isouter" is set then the syncq is assumed to be an outer perimeter
5042  * and drain_syncq is not called. Instead we rely on the qwriter_outer thread
5043  * to handle the queued qwriter operations.
5044  *
5045  * no need to grab sq_putlocks here. See comment in strsubr.h that explains when
5046  * sq_putlocks are used.
5047  */
5048 static void
5049 unblocksq(syncq_t *sq, uint16_t resetflag, int isouter)
5050 {
5051 	uint16_t flags;
5052 
5053 	mutex_enter(SQLOCK(sq));
5054 	ASSERT(resetflag != SQ_WRITER);
5055 	ASSERT(sq->sq_flags & resetflag);
5056 	flags = sq->sq_flags & ~resetflag;
5057 	sq->sq_flags = flags;
5058 	if (flags & (SQ_QUEUED | SQ_WANTWAKEUP)) {
5059 		if (flags & SQ_WANTWAKEUP) {
5060 			flags &= ~SQ_WANTWAKEUP;
5061 			cv_broadcast(&sq->sq_wait);
5062 		}
5063 		sq->sq_flags = flags;
5064 		if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) {
5065 			if (!isouter) {
5066 				/* drain_syncq drops SQLOCK */
5067 				drain_syncq(sq);
5068 				return;
5069 			}
5070 		}
5071 	}
5072 	mutex_exit(SQLOCK(sq));
5073 }
5074 
5075 /*
5076  * Reset a flag that was set with blocksq.
5077  * Does not drain the syncq. Use emptysq() for that.
5078  * Returns 1 if SQ_QUEUED is set. Otherwise 0.
5079  *
5080  * no need to grab sq_putlocks here. See comment in strsubr.h that explains when
5081  * sq_putlocks are used.
5082  */
5083 static int
5084 dropsq(syncq_t *sq, uint16_t resetflag)
5085 {
5086 	uint16_t flags;
5087 
5088 	mutex_enter(SQLOCK(sq));
5089 	ASSERT(sq->sq_flags & resetflag);
5090 	flags = sq->sq_flags & ~resetflag;
5091 	if (flags & SQ_WANTWAKEUP) {
5092 		flags &= ~SQ_WANTWAKEUP;
5093 		cv_broadcast(&sq->sq_wait);
5094 	}
5095 	sq->sq_flags = flags;
5096 	mutex_exit(SQLOCK(sq));
5097 	if (flags & SQ_QUEUED)
5098 		return (1);
5099 	return (0);
5100 }
5101 
5102 /*
5103  * Empty all the messages on a syncq.
5104  *
5105  * no need to grab sq_putlocks here. See comment in strsubr.h that explains when
5106  * sq_putlocks are used.
5107  */
5108 static void
5109 emptysq(syncq_t *sq)
5110 {
5111 	uint16_t flags;
5112 
5113 	mutex_enter(SQLOCK(sq));
5114 	flags = sq->sq_flags;
5115 	if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) {
5116 		/*
5117 		 * To prevent potential recursive invocation of drain_syncq we
5118 		 * do not call drain_syncq if count is non-zero.
5119 		 */
5120 		if (sq->sq_count == 0) {
5121 			/* drain_syncq() drops SQLOCK */
5122 			drain_syncq(sq);
5123 			return;
5124 		} else
5125 			sqenable(sq);
5126 	}
5127 	mutex_exit(SQLOCK(sq));
5128 }
5129 
5130 /*
5131  * Ordered insert while removing duplicates.
5132  */
5133 static void
5134 sqlist_insert(sqlist_t *sqlist, syncq_t *sqp)
5135 {
5136 	syncql_t *sqlp, **prev_sqlpp, *new_sqlp;
5137 
5138 	prev_sqlpp = &sqlist->sqlist_head;
5139 	while ((sqlp = *prev_sqlpp) != NULL) {
5140 		if (sqlp->sql_sq >= sqp) {
5141 			if (sqlp->sql_sq == sqp)	/* duplicate */
5142 				return;
5143 			break;
5144 		}
5145 		prev_sqlpp = &sqlp->sql_next;
5146 	}
5147 	new_sqlp = &sqlist->sqlist_array[sqlist->sqlist_index++];
5148 	ASSERT((char *)new_sqlp < (char *)sqlist + sqlist->sqlist_size);
5149 	new_sqlp->sql_next = sqlp;
5150 	new_sqlp->sql_sq = sqp;
5151 	*prev_sqlpp = new_sqlp;
5152 }
5153 
5154 /*
5155  * Walk the write side queues until we hit either the driver
5156  * or a twist in the stream (_SAMESTR will return false in both
5157  * these cases) then turn around and walk the read side queues
5158  * back up to the stream head.
5159  */
5160 static void
5161 sqlist_insertall(sqlist_t *sqlist, queue_t *q)
5162 {
5163 	while (q != NULL) {
5164 		sqlist_insert(sqlist, q->q_syncq);
5165 
5166 		if (_SAMESTR(q))
5167 			q = q->q_next;
5168 		else if (!(q->q_flag & QREADR))
5169 			q = _RD(q);
5170 		else
5171 			q = NULL;
5172 	}
5173 }
5174 
5175 /*
5176  * Allocate and build a list of all syncqs in a stream and the syncq(s)
5177  * associated with the "q" parameter. The resulting list is sorted in a
5178  * canonical order and is free of duplicates.
5179  * Assumes the passed queue is a _RD(q).
5180  */
5181 static sqlist_t *
5182 sqlist_build(queue_t *q, struct stdata *stp, boolean_t do_twist)
5183 {
5184 	sqlist_t *sqlist = sqlist_alloc(stp, KM_SLEEP);
5185 
5186 	/*
5187 	 * start with the current queue/qpair
5188 	 */
5189 	ASSERT(q->q_flag & QREADR);
5190 
5191 	sqlist_insert(sqlist, q->q_syncq);
5192 	sqlist_insert(sqlist, _WR(q)->q_syncq);
5193 
5194 	sqlist_insertall(sqlist, stp->sd_wrq);
5195 	if (do_twist)
5196 		sqlist_insertall(sqlist, stp->sd_mate->sd_wrq);
5197 
5198 	return (sqlist);
5199 }
5200 
5201 static sqlist_t *
5202 sqlist_alloc(struct stdata *stp, int kmflag)
5203 {
5204 	size_t sqlist_size;
5205 	sqlist_t *sqlist;
5206 
5207 	/*
5208 	 * Allocate 2 syncql_t's for each pushed module. Note that
5209 	 * the sqlist_t structure already has 4 syncql_t's built in:
5210 	 * 2 for the stream head, and 2 for the driver/other stream head.
5211 	 */
5212 	sqlist_size = 2 * sizeof (syncql_t) * stp->sd_pushcnt +
5213 	    sizeof (sqlist_t);
5214 	if (STRMATED(stp))
5215 		sqlist_size += 2 * sizeof (syncql_t) * stp->sd_mate->sd_pushcnt;
5216 	sqlist = kmem_alloc(sqlist_size, kmflag);
5217 
5218 	sqlist->sqlist_head = NULL;
5219 	sqlist->sqlist_size = sqlist_size;
5220 	sqlist->sqlist_index = 0;
5221 
5222 	return (sqlist);
5223 }
5224 
5225 /*
5226  * Free the list created by sqlist_alloc()
5227  */
5228 static void
5229 sqlist_free(sqlist_t *sqlist)
5230 {
5231 	kmem_free(sqlist, sqlist->sqlist_size);
5232 }
5233 
5234 /*
5235  * Prevent any new entries into any syncq in this stream.
5236  * Used by freezestr.
5237  */
5238 void
5239 strblock(queue_t *q)
5240 {
5241 	struct stdata	*stp;
5242 	syncql_t	*sql;
5243 	sqlist_t	*sqlist;
5244 
5245 	q = _RD(q);
5246 
5247 	stp = STREAM(q);
5248 	ASSERT(stp != NULL);
5249 
5250 	/*
5251 	 * Get a sorted list with all the duplicates removed containing
5252 	 * all the syncqs referenced by this stream.
5253 	 */
5254 	sqlist = sqlist_build(q, stp, B_FALSE);
5255 	for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next)
5256 		blocksq(sql->sql_sq, SQ_FROZEN, -1);
5257 	sqlist_free(sqlist);
5258 }
5259 
5260 /*
5261  * Release the block on new entries into this stream
5262  */
5263 void
5264 strunblock(queue_t *q)
5265 {
5266 	struct stdata	*stp;
5267 	syncql_t	*sql;
5268 	sqlist_t	*sqlist;
5269 	int		drain_needed;
5270 
5271 	q = _RD(q);
5272 
5273 	/*
5274 	 * Get a sorted list with all the duplicates removed containing
5275 	 * all the syncqs referenced by this stream.
5276 	 * Have to drop the SQ_FROZEN flag on all the syncqs before
5277 	 * starting to drain them; otherwise the draining might
5278 	 * cause a freezestr in some module on the stream (which
5279 	 * would deadlock.)
5280 	 */
5281 	stp = STREAM(q);
5282 	ASSERT(stp != NULL);
5283 	sqlist = sqlist_build(q, stp, B_FALSE);
5284 	drain_needed = 0;
5285 	for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next)
5286 		drain_needed += dropsq(sql->sql_sq, SQ_FROZEN);
5287 	if (drain_needed) {
5288 		for (sql = sqlist->sqlist_head; sql != NULL;
5289 		    sql = sql->sql_next)
5290 			emptysq(sql->sql_sq);
5291 	}
5292 	sqlist_free(sqlist);
5293 }
5294 
5295 #ifdef DEBUG
5296 static int
5297 qprocsareon(queue_t *rq)
5298 {
5299 	if (rq->q_next == NULL)
5300 		return (0);
5301 	return (_WR(rq->q_next)->q_next == _WR(rq));
5302 }
5303 
5304 int
5305 qclaimed(queue_t *q)
5306 {
5307 	uint_t count;
5308 
5309 	count = q->q_syncq->sq_count;
5310 	SUM_SQ_PUTCOUNTS(q->q_syncq, count);
5311 	return (count != 0);
5312 }
5313 
5314 /*
5315  * Check if anyone has frozen this stream with freezestr
5316  */
5317 int
5318 frozenstr(queue_t *q)
5319 {
5320 	return ((q->q_syncq->sq_flags & SQ_FROZEN) != 0);
5321 }
5322 #endif /* DEBUG */
5323 
5324 /*
5325  * Enter a queue.
5326  * Obsoleted interface. Should not be used.
5327  */
5328 void
5329 enterq(queue_t *q)
5330 {
5331 	entersq(q->q_syncq, SQ_CALLBACK);
5332 }
5333 
5334 void
5335 leaveq(queue_t *q)
5336 {
5337 	leavesq(q->q_syncq, SQ_CALLBACK);
5338 }
5339 
5340 /*
5341  * Enter a perimeter. c_inner and c_outer specifies which concurrency bits
5342  * to check.
5343  * Wait if SQ_QUEUED is set to preserve ordering between messages and qwriter
5344  * calls and the running of open, close and service procedures.
5345  *
5346  * if c_inner bit is set no need to grab sq_putlocks since we don't care
5347  * if other threads have entered or are entering put entry point.
5348  *
5349  * if c_inner bit is set it might have been posible to use
5350  * sq_putlocks/sq_putcounts instead of SQLOCK/sq_count (e.g. to optimize
5351  * open/close path for IP) but since the count may need to be decremented in
5352  * qwait() we wouldn't know which counter to decrement. Currently counter is
5353  * selected by current cpu_seqid and current CPU can change at any moment. XXX
5354  * in the future we might use curthread id bits to select the counter and this
5355  * would stay constant across routine calls.
5356  */
5357 void
5358 entersq(syncq_t *sq, int entrypoint)
5359 {
5360 	uint16_t	count = 0;
5361 	uint16_t	flags;
5362 	uint16_t	waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL;
5363 	uint16_t	type;
5364 	uint_t		c_inner = entrypoint & SQ_CI;
5365 	uint_t		c_outer = entrypoint & SQ_CO;
5366 
5367 	/*
5368 	 * Increment ref count to keep closes out of this queue.
5369 	 */
5370 	ASSERT(sq);
5371 	ASSERT(c_inner && c_outer);
5372 	mutex_enter(SQLOCK(sq));
5373 	flags = sq->sq_flags;
5374 	type = sq->sq_type;
5375 	if (!(type & c_inner)) {
5376 		/* Make sure all putcounts now use slowlock. */
5377 		count = sq->sq_count;
5378 		SQ_PUTLOCKS_ENTER(sq);
5379 		SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
5380 		SUM_SQ_PUTCOUNTS(sq, count);
5381 		sq->sq_needexcl++;
5382 		ASSERT(sq->sq_needexcl != 0);	/* wraparound */
5383 		waitflags |= SQ_MESSAGES;
5384 	}
5385 	/*
5386 	 * Wait until we can enter the inner perimeter.
5387 	 * If we want exclusive access we wait until sq_count is 0.
5388 	 * We have to do this before entering the outer perimeter in order
5389 	 * to preserve put/close message ordering.
5390 	 */
5391 	while ((flags & waitflags) || (!(type & c_inner) && count != 0)) {
5392 		sq->sq_flags = flags | SQ_WANTWAKEUP;
5393 		if (!(type & c_inner)) {
5394 			SQ_PUTLOCKS_EXIT(sq);
5395 		}
5396 		cv_wait(&sq->sq_wait, SQLOCK(sq));
5397 		if (!(type & c_inner)) {
5398 			count = sq->sq_count;
5399 			SQ_PUTLOCKS_ENTER(sq);
5400 			SUM_SQ_PUTCOUNTS(sq, count);
5401 		}
5402 		flags = sq->sq_flags;
5403 	}
5404 
5405 	if (!(type & c_inner)) {
5406 		ASSERT(sq->sq_needexcl > 0);
5407 		sq->sq_needexcl--;
5408 		if (sq->sq_needexcl == 0) {
5409 			SQ_PUTCOUNT_SETFAST_LOCKED(sq);
5410 		}
5411 	}
5412 
5413 	/* Check if we need to enter the outer perimeter */
5414 	if (!(type & c_outer)) {
5415 		/*
5416 		 * We have to enter the outer perimeter exclusively before
5417 		 * we can increment sq_count to avoid deadlock. This implies
5418 		 * that we have to re-check sq_flags and sq_count.
5419 		 *
5420 		 * is it possible to have c_inner set when c_outer is not set?
5421 		 */
5422 		if (!(type & c_inner)) {
5423 			SQ_PUTLOCKS_EXIT(sq);
5424 		}
5425 		mutex_exit(SQLOCK(sq));
5426 		outer_enter(sq->sq_outer, SQ_GOAWAY);
5427 		mutex_enter(SQLOCK(sq));
5428 		flags = sq->sq_flags;
5429 		/*
5430 		 * there should be no need to recheck sq_putcounts
5431 		 * because outer_enter() has already waited for them to clear
5432 		 * after setting SQ_WRITER.
5433 		 */
5434 		count = sq->sq_count;
5435 #ifdef DEBUG
5436 		/*
5437 		 * SUMCHECK_SQ_PUTCOUNTS should return the sum instead
5438 		 * of doing an ASSERT internally. Others should do
5439 		 * something like
5440 		 *	 ASSERT(SUMCHECK_SQ_PUTCOUNTS(sq) == 0);
5441 		 * without the need to #ifdef DEBUG it.
5442 		 */
5443 		SUMCHECK_SQ_PUTCOUNTS(sq, 0);
5444 #endif
5445 		while ((flags & (SQ_EXCL|SQ_BLOCKED|SQ_FROZEN)) ||
5446 		    (!(type & c_inner) && count != 0)) {
5447 			sq->sq_flags = flags | SQ_WANTWAKEUP;
5448 			cv_wait(&sq->sq_wait, SQLOCK(sq));
5449 			count = sq->sq_count;
5450 			flags = sq->sq_flags;
5451 		}
5452 	}
5453 
5454 	sq->sq_count++;
5455 	ASSERT(sq->sq_count != 0);	/* Wraparound */
5456 	if (!(type & c_inner)) {
5457 		/* Exclusive entry */
5458 		ASSERT(sq->sq_count == 1);
5459 		sq->sq_flags |= SQ_EXCL;
5460 		if (type & c_outer) {
5461 			SQ_PUTLOCKS_EXIT(sq);
5462 		}
5463 	}
5464 	mutex_exit(SQLOCK(sq));
5465 }
5466 
5467 /*
5468  * leave a syncq. announce to framework that closes may proceed.
5469  * c_inner and c_outer specifies which concurrency bits
5470  * to check.
5471  *
5472  * must never be called from driver or module put entry point.
5473  *
5474  * no need to grab sq_putlocks here. See comment in strsubr.h that explains when
5475  * sq_putlocks are used.
5476  */
5477 void
5478 leavesq(syncq_t *sq, int entrypoint)
5479 {
5480 	uint16_t	flags;
5481 	uint16_t	type;
5482 	uint_t		c_outer = entrypoint & SQ_CO;
5483 #ifdef DEBUG
5484 	uint_t		c_inner = entrypoint & SQ_CI;
5485 #endif
5486 
5487 	/*
5488 	 * decrement ref count, drain the syncq if possible, and wake up
5489 	 * any waiting close.
5490 	 */
5491 	ASSERT(sq);
5492 	ASSERT(c_inner && c_outer);
5493 	mutex_enter(SQLOCK(sq));
5494 	flags = sq->sq_flags;
5495 	type = sq->sq_type;
5496 	if (flags & (SQ_QUEUED|SQ_WANTWAKEUP|SQ_WANTEXWAKEUP)) {
5497 
5498 		if (flags & SQ_WANTWAKEUP) {
5499 			flags &= ~SQ_WANTWAKEUP;
5500 			cv_broadcast(&sq->sq_wait);
5501 		}
5502 		if (flags & SQ_WANTEXWAKEUP) {
5503 			flags &= ~SQ_WANTEXWAKEUP;
5504 			cv_broadcast(&sq->sq_exitwait);
5505 		}
5506 
5507 		if ((flags & SQ_QUEUED) && !(flags & SQ_STAYAWAY)) {
5508 			/*
5509 			 * The syncq needs to be drained. "Exit" the syncq
5510 			 * before calling drain_syncq.
5511 			 */
5512 			ASSERT(sq->sq_count != 0);
5513 			sq->sq_count--;
5514 			ASSERT((flags & SQ_EXCL) || (type & c_inner));
5515 			sq->sq_flags = flags & ~SQ_EXCL;
5516 			drain_syncq(sq);
5517 			ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
5518 			/* Check if we need to exit the outer perimeter */
5519 			/* XXX will this ever be true? */
5520 			if (!(type & c_outer))
5521 				outer_exit(sq->sq_outer);
5522 			return;
5523 		}
5524 	}
5525 	ASSERT(sq->sq_count != 0);
5526 	sq->sq_count--;
5527 	ASSERT((flags & SQ_EXCL) || (type & c_inner));
5528 	sq->sq_flags = flags & ~SQ_EXCL;
5529 	mutex_exit(SQLOCK(sq));
5530 
5531 	/* Check if we need to exit the outer perimeter */
5532 	if (!(sq->sq_type & c_outer))
5533 		outer_exit(sq->sq_outer);
5534 }
5535 
5536 /*
5537  * Prevent q_next from changing in this stream by incrementing sq_count.
5538  *
5539  * no need to grab sq_putlocks here. See comment in strsubr.h that explains when
5540  * sq_putlocks are used.
5541  */
5542 void
5543 claimq(queue_t *qp)
5544 {
5545 	syncq_t	*sq = qp->q_syncq;
5546 
5547 	mutex_enter(SQLOCK(sq));
5548 	sq->sq_count++;
5549 	ASSERT(sq->sq_count != 0);	/* Wraparound */
5550 	mutex_exit(SQLOCK(sq));
5551 }
5552 
5553 /*
5554  * Undo claimq.
5555  *
5556  * no need to grab sq_putlocks here. See comment in strsubr.h that explains when
5557  * sq_putlocks are used.
5558  */
5559 void
5560 releaseq(queue_t *qp)
5561 {
5562 	syncq_t	*sq = qp->q_syncq;
5563 	uint16_t flags;
5564 
5565 	mutex_enter(SQLOCK(sq));
5566 	ASSERT(sq->sq_count > 0);
5567 	sq->sq_count--;
5568 
5569 	flags = sq->sq_flags;
5570 	if (flags & (SQ_WANTWAKEUP|SQ_QUEUED)) {
5571 		if (flags & SQ_WANTWAKEUP) {
5572 			flags &= ~SQ_WANTWAKEUP;
5573 			cv_broadcast(&sq->sq_wait);
5574 		}
5575 		sq->sq_flags = flags;
5576 		if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) {
5577 			/*
5578 			 * To prevent potential recursive invocation of
5579 			 * drain_syncq we do not call drain_syncq if count is
5580 			 * non-zero.
5581 			 */
5582 			if (sq->sq_count == 0) {
5583 				drain_syncq(sq);
5584 				return;
5585 			} else
5586 				sqenable(sq);
5587 		}
5588 	}
5589 	mutex_exit(SQLOCK(sq));
5590 }
5591 
5592 /*
5593  * Prevent q_next from changing in this stream by incrementing sd_refcnt.
5594  */
5595 void
5596 claimstr(queue_t *qp)
5597 {
5598 	struct stdata *stp = STREAM(qp);
5599 
5600 	mutex_enter(&stp->sd_reflock);
5601 	stp->sd_refcnt++;
5602 	ASSERT(stp->sd_refcnt != 0);	/* Wraparound */
5603 	mutex_exit(&stp->sd_reflock);
5604 }
5605 
5606 /*
5607  * Undo claimstr.
5608  */
5609 void
5610 releasestr(queue_t *qp)
5611 {
5612 	struct stdata *stp = STREAM(qp);
5613 
5614 	mutex_enter(&stp->sd_reflock);
5615 	ASSERT(stp->sd_refcnt != 0);
5616 	if (--stp->sd_refcnt == 0)
5617 		cv_broadcast(&stp->sd_refmonitor);
5618 	mutex_exit(&stp->sd_reflock);
5619 }
5620 
5621 static syncq_t *
5622 new_syncq(void)
5623 {
5624 	return (kmem_cache_alloc(syncq_cache, KM_SLEEP));
5625 }
5626 
5627 static void
5628 free_syncq(syncq_t *sq)
5629 {
5630 	ASSERT(sq->sq_head == NULL);
5631 	ASSERT(sq->sq_outer == NULL);
5632 	ASSERT(sq->sq_callbpend == NULL);
5633 	ASSERT((sq->sq_onext == NULL && sq->sq_oprev == NULL) ||
5634 	    (sq->sq_onext == sq && sq->sq_oprev == sq));
5635 
5636 	if (sq->sq_ciputctrl != NULL) {
5637 		ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1);
5638 		SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl,
5639 		    sq->sq_nciputctrl, 0);
5640 		ASSERT(ciputctrl_cache != NULL);
5641 		kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl);
5642 	}
5643 
5644 	sq->sq_tail = NULL;
5645 	sq->sq_evhead = NULL;
5646 	sq->sq_evtail = NULL;
5647 	sq->sq_ciputctrl = NULL;
5648 	sq->sq_nciputctrl = 0;
5649 	sq->sq_count = 0;
5650 	sq->sq_rmqcount = 0;
5651 	sq->sq_callbflags = 0;
5652 	sq->sq_cancelid = 0;
5653 	sq->sq_next = NULL;
5654 	sq->sq_needexcl = 0;
5655 	sq->sq_svcflags = 0;
5656 	sq->sq_nqueues = 0;
5657 	sq->sq_pri = 0;
5658 	sq->sq_onext = NULL;
5659 	sq->sq_oprev = NULL;
5660 	sq->sq_flags = 0;
5661 	sq->sq_type = 0;
5662 	sq->sq_servcount = 0;
5663 
5664 	kmem_cache_free(syncq_cache, sq);
5665 }
5666 
5667 /* Outer perimeter code */
5668 
5669 /*
5670  * The outer syncq uses the fields and flags in the syncq slightly
5671  * differently from the inner syncqs.
5672  *	sq_count	Incremented when there are pending or running
5673  *			writers at the outer perimeter to prevent the set of
5674  *			inner syncqs that belong to the outer perimeter from
5675  *			changing.
5676  *	sq_head/tail	List of deferred qwriter(OUTER) operations.
5677  *
5678  *	SQ_BLOCKED	Set to prevent traversing of sq_next,sq_prev while
5679  *			inner syncqs are added to or removed from the
5680  *			outer perimeter.
5681  *	SQ_QUEUED	sq_head/tail has messages or eventsqueued.
5682  *
5683  *	SQ_WRITER	A thread is currently traversing all the inner syncqs
5684  *			setting the SQ_WRITER flag.
5685  */
5686 
5687 /*
5688  * Get write access at the outer perimeter.
5689  * Note that read access is done by entersq, putnext, and put by simply
5690  * incrementing sq_count in the inner syncq.
5691  *
5692  * Waits until "flags" is no longer set in the outer to prevent multiple
5693  * threads from having write access at the same time. SQ_WRITER has to be part
5694  * of "flags".
5695  *
5696  * Increases sq_count on the outer syncq to keep away outer_insert/remove
5697  * until the outer_exit is finished.
5698  *
5699  * outer_enter is vulnerable to starvation since it does not prevent new
5700  * threads from entering the inner syncqs while it is waiting for sq_count to
5701  * go to zero.
5702  */
5703 void
5704 outer_enter(syncq_t *outer, uint16_t flags)
5705 {
5706 	syncq_t	*sq;
5707 	int	wait_needed;
5708 	uint16_t	count;
5709 
5710 	ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5711 	    outer->sq_oprev != NULL);
5712 	ASSERT(flags & SQ_WRITER);
5713 
5714 retry:
5715 	mutex_enter(SQLOCK(outer));
5716 	while (outer->sq_flags & flags) {
5717 		outer->sq_flags |= SQ_WANTWAKEUP;
5718 		cv_wait(&outer->sq_wait, SQLOCK(outer));
5719 	}
5720 
5721 	ASSERT(!(outer->sq_flags & SQ_WRITER));
5722 	outer->sq_flags |= SQ_WRITER;
5723 	outer->sq_count++;
5724 	ASSERT(outer->sq_count != 0);	/* wraparound */
5725 	wait_needed = 0;
5726 	/*
5727 	 * Set SQ_WRITER on all the inner syncqs while holding
5728 	 * the SQLOCK on the outer syncq. This ensures that the changing
5729 	 * of SQ_WRITER is atomic under the outer SQLOCK.
5730 	 */
5731 	for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) {
5732 		mutex_enter(SQLOCK(sq));
5733 		count = sq->sq_count;
5734 		SQ_PUTLOCKS_ENTER(sq);
5735 		sq->sq_flags |= SQ_WRITER;
5736 		SUM_SQ_PUTCOUNTS(sq, count);
5737 		if (count != 0)
5738 			wait_needed = 1;
5739 		SQ_PUTLOCKS_EXIT(sq);
5740 		mutex_exit(SQLOCK(sq));
5741 	}
5742 	mutex_exit(SQLOCK(outer));
5743 
5744 	/*
5745 	 * Get everybody out of the syncqs sequentially.
5746 	 * Note that we don't actually need to aqiure the PUTLOCKS, since
5747 	 * we have already cleared the fastbit, and set QWRITER.  By
5748 	 * definition, the count can not increase since putnext will
5749 	 * take the slowlock path (and the purpose of aquiring the
5750 	 * putlocks was to make sure it didn't increase while we were
5751 	 * waiting).
5752 	 *
5753 	 * Note that we still aquire the PUTLOCKS to be safe.
5754 	 */
5755 	if (wait_needed) {
5756 		for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) {
5757 			mutex_enter(SQLOCK(sq));
5758 			count = sq->sq_count;
5759 			SQ_PUTLOCKS_ENTER(sq);
5760 			SUM_SQ_PUTCOUNTS(sq, count);
5761 			while (count != 0) {
5762 				sq->sq_flags |= SQ_WANTWAKEUP;
5763 				SQ_PUTLOCKS_EXIT(sq);
5764 				cv_wait(&sq->sq_wait, SQLOCK(sq));
5765 				count = sq->sq_count;
5766 				SQ_PUTLOCKS_ENTER(sq);
5767 				SUM_SQ_PUTCOUNTS(sq, count);
5768 			}
5769 			SQ_PUTLOCKS_EXIT(sq);
5770 			mutex_exit(SQLOCK(sq));
5771 		}
5772 		/*
5773 		 * Verify that none of the flags got set while we
5774 		 * were waiting for the sq_counts to drop.
5775 		 * If this happens we exit and retry entering the
5776 		 * outer perimeter.
5777 		 */
5778 		mutex_enter(SQLOCK(outer));
5779 		if (outer->sq_flags & (flags & ~SQ_WRITER)) {
5780 			mutex_exit(SQLOCK(outer));
5781 			outer_exit(outer);
5782 			goto retry;
5783 		}
5784 		mutex_exit(SQLOCK(outer));
5785 	}
5786 }
5787 
5788 /*
5789  * Drop the write access at the outer perimeter.
5790  * Read access is dropped implicitly (by putnext, put, and leavesq) by
5791  * decrementing sq_count.
5792  */
5793 void
5794 outer_exit(syncq_t *outer)
5795 {
5796 	syncq_t	*sq;
5797 	int	 drain_needed;
5798 	uint16_t flags;
5799 
5800 	ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5801 	    outer->sq_oprev != NULL);
5802 	ASSERT(MUTEX_NOT_HELD(SQLOCK(outer)));
5803 
5804 	/*
5805 	 * Atomically (from the perspective of threads calling become_writer)
5806 	 * drop the write access at the outer perimeter by holding
5807 	 * SQLOCK(outer) across all the dropsq calls and the resetting of
5808 	 * SQ_WRITER.
5809 	 * This defines a locking order between the outer perimeter
5810 	 * SQLOCK and the inner perimeter SQLOCKs.
5811 	 */
5812 	mutex_enter(SQLOCK(outer));
5813 	flags = outer->sq_flags;
5814 	ASSERT(outer->sq_flags & SQ_WRITER);
5815 	if (flags & SQ_QUEUED) {
5816 		write_now(outer);
5817 		flags = outer->sq_flags;
5818 	}
5819 
5820 	/*
5821 	 * sq_onext is stable since sq_count has not yet been decreased.
5822 	 * Reset the SQ_WRITER flags in all syncqs.
5823 	 * After dropping SQ_WRITER on the outer syncq we empty all the
5824 	 * inner syncqs.
5825 	 */
5826 	drain_needed = 0;
5827 	for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext)
5828 		drain_needed += dropsq(sq, SQ_WRITER);
5829 	ASSERT(!(outer->sq_flags & SQ_QUEUED));
5830 	flags &= ~SQ_WRITER;
5831 	if (drain_needed) {
5832 		outer->sq_flags = flags;
5833 		mutex_exit(SQLOCK(outer));
5834 		for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext)
5835 			emptysq(sq);
5836 		mutex_enter(SQLOCK(outer));
5837 		flags = outer->sq_flags;
5838 	}
5839 	if (flags & SQ_WANTWAKEUP) {
5840 		flags &= ~SQ_WANTWAKEUP;
5841 		cv_broadcast(&outer->sq_wait);
5842 	}
5843 	outer->sq_flags = flags;
5844 	ASSERT(outer->sq_count > 0);
5845 	outer->sq_count--;
5846 	mutex_exit(SQLOCK(outer));
5847 }
5848 
5849 /*
5850  * Add another syncq to an outer perimeter.
5851  * Block out all other access to the outer perimeter while it is being
5852  * changed using blocksq.
5853  * Assumes that the caller has *not* done an outer_enter.
5854  *
5855  * Vulnerable to starvation in blocksq.
5856  */
5857 static void
5858 outer_insert(syncq_t *outer, syncq_t *sq)
5859 {
5860 	ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5861 	    outer->sq_oprev != NULL);
5862 	ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL &&
5863 	    sq->sq_oprev == NULL);	/* Can't be in an outer perimeter */
5864 
5865 	/* Get exclusive access to the outer perimeter list */
5866 	blocksq(outer, SQ_BLOCKED, 0);
5867 	ASSERT(outer->sq_flags & SQ_BLOCKED);
5868 	ASSERT(!(outer->sq_flags & SQ_WRITER));
5869 
5870 	mutex_enter(SQLOCK(sq));
5871 	sq->sq_outer = outer;
5872 	outer->sq_onext->sq_oprev = sq;
5873 	sq->sq_onext = outer->sq_onext;
5874 	outer->sq_onext = sq;
5875 	sq->sq_oprev = outer;
5876 	mutex_exit(SQLOCK(sq));
5877 	unblocksq(outer, SQ_BLOCKED, 1);
5878 }
5879 
5880 /*
5881  * Remove a syncq from an outer perimeter.
5882  * Block out all other access to the outer perimeter while it is being
5883  * changed using blocksq.
5884  * Assumes that the caller has *not* done an outer_enter.
5885  *
5886  * Vulnerable to starvation in blocksq.
5887  */
5888 static void
5889 outer_remove(syncq_t *outer, syncq_t *sq)
5890 {
5891 	ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5892 	    outer->sq_oprev != NULL);
5893 	ASSERT(sq->sq_outer == outer);
5894 
5895 	/* Get exclusive access to the outer perimeter list */
5896 	blocksq(outer, SQ_BLOCKED, 0);
5897 	ASSERT(outer->sq_flags & SQ_BLOCKED);
5898 	ASSERT(!(outer->sq_flags & SQ_WRITER));
5899 
5900 	mutex_enter(SQLOCK(sq));
5901 	sq->sq_outer = NULL;
5902 	sq->sq_onext->sq_oprev = sq->sq_oprev;
5903 	sq->sq_oprev->sq_onext = sq->sq_onext;
5904 	sq->sq_oprev = sq->sq_onext = NULL;
5905 	mutex_exit(SQLOCK(sq));
5906 	unblocksq(outer, SQ_BLOCKED, 1);
5907 }
5908 
5909 /*
5910  * Queue a deferred qwriter(OUTER) callback for this outer perimeter.
5911  * If this is the first callback for this outer perimeter then add
5912  * this outer perimeter to the list of outer perimeters that
5913  * the qwriter_outer_thread will process.
5914  *
5915  * Increments sq_count in the outer syncq to prevent the membership
5916  * of the outer perimeter (in terms of inner syncqs) to change while
5917  * the callback is pending.
5918  */
5919 static void
5920 queue_writer(syncq_t *outer, void (*func)(), queue_t *q, mblk_t *mp)
5921 {
5922 	ASSERT(MUTEX_HELD(SQLOCK(outer)));
5923 
5924 	mp->b_prev = (mblk_t *)func;
5925 	mp->b_queue = q;
5926 	mp->b_next = NULL;
5927 	outer->sq_count++;	/* Decremented when dequeued */
5928 	ASSERT(outer->sq_count != 0);	/* Wraparound */
5929 	if (outer->sq_evhead == NULL) {
5930 		/* First message. */
5931 		outer->sq_evhead = outer->sq_evtail = mp;
5932 		outer->sq_flags |= SQ_EVENTS;
5933 		mutex_exit(SQLOCK(outer));
5934 		STRSTAT(qwr_outer);
5935 		(void) taskq_dispatch(streams_taskq,
5936 		    (task_func_t *)qwriter_outer_service, outer, TQ_SLEEP);
5937 	} else {
5938 		ASSERT(outer->sq_flags & SQ_EVENTS);
5939 		outer->sq_evtail->b_next = mp;
5940 		outer->sq_evtail = mp;
5941 		mutex_exit(SQLOCK(outer));
5942 	}
5943 }
5944 
5945 /*
5946  * Try and upgrade to write access at the outer perimeter. If this can
5947  * not be done without blocking then queue the callback to be done
5948  * by the qwriter_outer_thread.
5949  *
5950  * This routine can only be called from put or service procedures plus
5951  * asynchronous callback routines that have properly entered to
5952  * queue (with entersq.) Thus qwriter(OUTER) assumes the caller has one claim
5953  * on the syncq associated with q.
5954  */
5955 void
5956 qwriter_outer(queue_t *q, mblk_t *mp, void (*func)())
5957 {
5958 	syncq_t	*osq, *sq, *outer;
5959 	int	failed;
5960 	uint16_t flags;
5961 
5962 	osq = q->q_syncq;
5963 	outer = osq->sq_outer;
5964 	if (outer == NULL)
5965 		panic("qwriter(PERIM_OUTER): no outer perimeter");
5966 	ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5967 	    outer->sq_oprev != NULL);
5968 
5969 	mutex_enter(SQLOCK(outer));
5970 	flags = outer->sq_flags;
5971 	/*
5972 	 * If some thread is traversing sq_next, or if we are blocked by
5973 	 * outer_insert or outer_remove, or if the we already have queued
5974 	 * callbacks, then queue this callback for later processing.
5975 	 *
5976 	 * Also queue the qwriter for an interrupt thread in order
5977 	 * to reduce the time spent running at high IPL.
5978 	 * to identify there are events.
5979 	 */
5980 	if ((flags & SQ_GOAWAY) || (curthread->t_pri >= kpreemptpri)) {
5981 		/*
5982 		 * Queue the become_writer request.
5983 		 * The queueing is atomic under SQLOCK(outer) in order
5984 		 * to synchronize with outer_exit.
5985 		 * queue_writer will drop the outer SQLOCK
5986 		 */
5987 		if (flags & SQ_BLOCKED) {
5988 			/* Must set SQ_WRITER on inner perimeter */
5989 			mutex_enter(SQLOCK(osq));
5990 			osq->sq_flags |= SQ_WRITER;
5991 			mutex_exit(SQLOCK(osq));
5992 		} else {
5993 			if (!(flags & SQ_WRITER)) {
5994 				/*
5995 				 * The outer could have been SQ_BLOCKED thus
5996 				 * SQ_WRITER might not be set on the inner.
5997 				 */
5998 				mutex_enter(SQLOCK(osq));
5999 				osq->sq_flags |= SQ_WRITER;
6000 				mutex_exit(SQLOCK(osq));
6001 			}
6002 			ASSERT(osq->sq_flags & SQ_WRITER);
6003 		}
6004 		queue_writer(outer, func, q, mp);
6005 		return;
6006 	}
6007 	/*
6008 	 * We are half-way to exclusive access to the outer perimeter.
6009 	 * Prevent any outer_enter, qwriter(OUTER), or outer_insert/remove
6010 	 * while the inner syncqs are traversed.
6011 	 */
6012 	outer->sq_count++;
6013 	ASSERT(outer->sq_count != 0);	/* wraparound */
6014 	flags |= SQ_WRITER;
6015 	/*
6016 	 * Check if we can run the function immediately. Mark all
6017 	 * syncqs with the writer flag to prevent new entries into
6018 	 * put and service procedures.
6019 	 *
6020 	 * Set SQ_WRITER on all the inner syncqs while holding
6021 	 * the SQLOCK on the outer syncq. This ensures that the changing
6022 	 * of SQ_WRITER is atomic under the outer SQLOCK.
6023 	 */
6024 	failed = 0;
6025 	for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) {
6026 		uint16_t count;
6027 		uint_t	maxcnt = (sq == osq) ? 1 : 0;
6028 
6029 		mutex_enter(SQLOCK(sq));
6030 		count = sq->sq_count;
6031 		SQ_PUTLOCKS_ENTER(sq);
6032 		SUM_SQ_PUTCOUNTS(sq, count);
6033 		if (sq->sq_count > maxcnt)
6034 			failed = 1;
6035 		sq->sq_flags |= SQ_WRITER;
6036 		SQ_PUTLOCKS_EXIT(sq);
6037 		mutex_exit(SQLOCK(sq));
6038 	}
6039 	if (failed) {
6040 		/*
6041 		 * Some other thread has a read claim on the outer perimeter.
6042 		 * Queue the callback for deferred processing.
6043 		 *
6044 		 * queue_writer will set SQ_QUEUED before we drop SQ_WRITER
6045 		 * so that other qwriter(OUTER) calls will queue their
6046 		 * callbacks as well. queue_writer increments sq_count so we
6047 		 * decrement to compensate for the our increment.
6048 		 *
6049 		 * Dropping SQ_WRITER enables the writer thread to work
6050 		 * on this outer perimeter.
6051 		 */
6052 		outer->sq_flags = flags;
6053 		queue_writer(outer, func, q, mp);
6054 		/* queue_writer dropper the lock */
6055 		mutex_enter(SQLOCK(outer));
6056 		ASSERT(outer->sq_count > 0);
6057 		outer->sq_count--;
6058 		ASSERT(outer->sq_flags & SQ_WRITER);
6059 		flags = outer->sq_flags;
6060 		flags &= ~SQ_WRITER;
6061 		if (flags & SQ_WANTWAKEUP) {
6062 			flags &= ~SQ_WANTWAKEUP;
6063 			cv_broadcast(&outer->sq_wait);
6064 		}
6065 		outer->sq_flags = flags;
6066 		mutex_exit(SQLOCK(outer));
6067 		return;
6068 	} else {
6069 		outer->sq_flags = flags;
6070 		mutex_exit(SQLOCK(outer));
6071 	}
6072 
6073 	/* Can run it immediately */
6074 	(*func)(q, mp);
6075 
6076 	outer_exit(outer);
6077 }
6078 
6079 /*
6080  * Dequeue all writer callbacks from the outer perimeter and run them.
6081  */
6082 static void
6083 write_now(syncq_t *outer)
6084 {
6085 	mblk_t		*mp;
6086 	queue_t		*q;
6087 	void	(*func)();
6088 
6089 	ASSERT(MUTEX_HELD(SQLOCK(outer)));
6090 	ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
6091 	    outer->sq_oprev != NULL);
6092 	while ((mp = outer->sq_evhead) != NULL) {
6093 		/*
6094 		 * queues cannot be placed on the queuelist on the outer
6095 		 * perimiter.
6096 		 */
6097 		ASSERT(!(outer->sq_flags & SQ_MESSAGES));
6098 		ASSERT((outer->sq_flags & SQ_EVENTS));
6099 
6100 		outer->sq_evhead = mp->b_next;
6101 		if (outer->sq_evhead == NULL) {
6102 			outer->sq_evtail = NULL;
6103 			outer->sq_flags &= ~SQ_EVENTS;
6104 		}
6105 		ASSERT(outer->sq_count != 0);
6106 		outer->sq_count--;	/* Incremented when enqueued. */
6107 		mutex_exit(SQLOCK(outer));
6108 		/*
6109 		 * Drop the message if the queue is closing.
6110 		 * Make sure that the queue is "claimed" when the callback
6111 		 * is run in order to satisfy various ASSERTs.
6112 		 */
6113 		q = mp->b_queue;
6114 		func = (void (*)())mp->b_prev;
6115 		ASSERT(func != NULL);
6116 		mp->b_next = mp->b_prev = NULL;
6117 		if (q->q_flag & QWCLOSE) {
6118 			freemsg(mp);
6119 		} else {
6120 			claimq(q);
6121 			(*func)(q, mp);
6122 			releaseq(q);
6123 		}
6124 		mutex_enter(SQLOCK(outer));
6125 	}
6126 	ASSERT(MUTEX_HELD(SQLOCK(outer)));
6127 }
6128 
6129 /*
6130  * The list of messages on the inner syncq is effectively hashed
6131  * by destination queue.  These destination queues are doubly
6132  * linked lists (hopefully) in priority order.  Messages are then
6133  * put on the queue referenced by the q_sqhead/q_sqtail elements.
6134  * Additional messages are linked together by the b_next/b_prev
6135  * elements in the mblk, with (similar to putq()) the first message
6136  * having a NULL b_prev and the last message having a NULL b_next.
6137  *
6138  * Events, such as qwriter callbacks, are put onto a list in FIFO
6139  * order referenced by sq_evhead, and sq_evtail.  This is a singly
6140  * linked list, and messages here MUST be processed in the order queued.
6141  */
6142 
6143 /*
6144  * Run the events on the syncq event list (sq_evhead).
6145  * Assumes there is only one claim on the syncq, it is
6146  * already exclusive (SQ_EXCL set), and the SQLOCK held.
6147  * Messages here are processed in order, with the SQ_EXCL bit
6148  * held all the way through till the last message is processed.
6149  */
6150 void
6151 sq_run_events(syncq_t *sq)
6152 {
6153 	mblk_t		*bp;
6154 	queue_t		*qp;
6155 	uint16_t	flags = sq->sq_flags;
6156 	void		(*func)();
6157 
6158 	ASSERT(MUTEX_HELD(SQLOCK(sq)));
6159 	ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6160 	    sq->sq_oprev == NULL) ||
6161 	    (sq->sq_outer != NULL && sq->sq_onext != NULL &&
6162 	    sq->sq_oprev != NULL));
6163 
6164 	ASSERT(flags & SQ_EXCL);
6165 	ASSERT(sq->sq_count == 1);
6166 
6167 	/*
6168 	 * We need to process all of the events on this list.  It
6169 	 * is possible that new events will be added while we are
6170 	 * away processing a callback, so on every loop, we start
6171 	 * back at the beginning of the list.
6172 	 */
6173 	/*
6174 	 * We have to reaccess sq_evhead since there is a
6175 	 * possibility of a new entry while we were running
6176 	 * the callback.
6177 	 */
6178 	for (bp = sq->sq_evhead; bp != NULL; bp = sq->sq_evhead) {
6179 		ASSERT(bp->b_queue->q_syncq == sq);
6180 		ASSERT(sq->sq_flags & SQ_EVENTS);
6181 
6182 		qp = bp->b_queue;
6183 		func = (void (*)())bp->b_prev;
6184 		ASSERT(func != NULL);
6185 
6186 		/*
6187 		 * Messages from the event queue must be taken off in
6188 		 * FIFO order.
6189 		 */
6190 		ASSERT(sq->sq_evhead == bp);
6191 		sq->sq_evhead = bp->b_next;
6192 
6193 		if (bp->b_next == NULL) {
6194 			/* Deleting last */
6195 			ASSERT(sq->sq_evtail == bp);
6196 			sq->sq_evtail = NULL;
6197 			sq->sq_flags &= ~SQ_EVENTS;
6198 		}
6199 		bp->b_prev = bp->b_next = NULL;
6200 		ASSERT(bp->b_datap->db_ref != 0);
6201 
6202 		mutex_exit(SQLOCK(sq));
6203 
6204 		(*func)(qp, bp);
6205 
6206 		mutex_enter(SQLOCK(sq));
6207 		/*
6208 		 * re-read the flags, since they could have changed.
6209 		 */
6210 		flags = sq->sq_flags;
6211 		ASSERT(flags & SQ_EXCL);
6212 	}
6213 	ASSERT(sq->sq_evhead == NULL && sq->sq_evtail == NULL);
6214 	ASSERT(!(sq->sq_flags & SQ_EVENTS));
6215 
6216 	if (flags & SQ_WANTWAKEUP) {
6217 		flags &= ~SQ_WANTWAKEUP;
6218 		cv_broadcast(&sq->sq_wait);
6219 	}
6220 	if (flags & SQ_WANTEXWAKEUP) {
6221 		flags &= ~SQ_WANTEXWAKEUP;
6222 		cv_broadcast(&sq->sq_exitwait);
6223 	}
6224 	sq->sq_flags = flags;
6225 }
6226 
6227 /*
6228  * Put messages on the event list.
6229  * If we can go exclusive now, do so and process the event list, otherwise
6230  * let the last claim service this list (or wake the sqthread).
6231  * This procedure assumes SQLOCK is held.  To run the event list, it
6232  * must be called with no claims.
6233  */
6234 static void
6235 sqfill_events(syncq_t *sq, queue_t *q, mblk_t *mp, void (*func)())
6236 {
6237 	uint16_t count;
6238 
6239 	ASSERT(MUTEX_HELD(SQLOCK(sq)));
6240 	ASSERT(func != NULL);
6241 
6242 	/*
6243 	 * This is a callback.  Add it to the list of callbacks
6244 	 * and see about upgrading.
6245 	 */
6246 	mp->b_prev = (mblk_t *)func;
6247 	mp->b_queue = q;
6248 	mp->b_next = NULL;
6249 	if (sq->sq_evhead == NULL) {
6250 		sq->sq_evhead = sq->sq_evtail = mp;
6251 		sq->sq_flags |= SQ_EVENTS;
6252 	} else {
6253 		ASSERT(sq->sq_evtail != NULL);
6254 		ASSERT(sq->sq_evtail->b_next == NULL);
6255 		ASSERT(sq->sq_flags & SQ_EVENTS);
6256 		sq->sq_evtail->b_next = mp;
6257 		sq->sq_evtail = mp;
6258 	}
6259 	/*
6260 	 * We have set SQ_EVENTS, so threads will have to
6261 	 * unwind out of the perimiter, and new entries will
6262 	 * not grab a putlock.  But we still need to know
6263 	 * how many threads have already made a claim to the
6264 	 * syncq, so grab the putlocks, and sum the counts.
6265 	 * If there are no claims on the syncq, we can upgrade
6266 	 * to exclusive, and run the event list.
6267 	 * NOTE: We hold the SQLOCK, so we can just grab the
6268 	 * putlocks.
6269 	 */
6270 	count = sq->sq_count;
6271 	SQ_PUTLOCKS_ENTER(sq);
6272 	SUM_SQ_PUTCOUNTS(sq, count);
6273 	/*
6274 	 * We have no claim, so we need to check if there
6275 	 * are no others, then we can upgrade.
6276 	 */
6277 	/*
6278 	 * There are currently no claims on
6279 	 * the syncq by this thread (at least on this entry). The thread who has
6280 	 * the claim should drain syncq.
6281 	 */
6282 	if (count > 0) {
6283 		/*
6284 		 * Can't upgrade - other threads inside.
6285 		 */
6286 		SQ_PUTLOCKS_EXIT(sq);
6287 		mutex_exit(SQLOCK(sq));
6288 		return;
6289 	}
6290 	/*
6291 	 * Need to set SQ_EXCL and make a claim on the syncq.
6292 	 */
6293 	ASSERT((sq->sq_flags & SQ_EXCL) == 0);
6294 	sq->sq_flags |= SQ_EXCL;
6295 	ASSERT(sq->sq_count == 0);
6296 	sq->sq_count++;
6297 	SQ_PUTLOCKS_EXIT(sq);
6298 
6299 	/* Process the events list */
6300 	sq_run_events(sq);
6301 
6302 	/*
6303 	 * Release our claim...
6304 	 */
6305 	sq->sq_count--;
6306 
6307 	/*
6308 	 * And release SQ_EXCL.
6309 	 * We don't need to acquire the putlocks to release
6310 	 * SQ_EXCL, since we are exclusive, and hold the SQLOCK.
6311 	 */
6312 	sq->sq_flags &= ~SQ_EXCL;
6313 
6314 	/*
6315 	 * sq_run_events should have released SQ_EXCL
6316 	 */
6317 	ASSERT(!(sq->sq_flags & SQ_EXCL));
6318 
6319 	/*
6320 	 * If anything happened while we were running the
6321 	 * events (or was there before), we need to process
6322 	 * them now.  We shouldn't be exclusive sine we
6323 	 * released the perimiter above (plus, we asserted
6324 	 * for it).
6325 	 */
6326 	if (!(sq->sq_flags & SQ_STAYAWAY) && (sq->sq_flags & SQ_QUEUED))
6327 		drain_syncq(sq);
6328 	else
6329 		mutex_exit(SQLOCK(sq));
6330 }
6331 
6332 /*
6333  * Perform delayed processing. The caller has to make sure that it is safe
6334  * to enter the syncq (e.g. by checking that none of the SQ_STAYAWAY bits are
6335  * set.)
6336  *
6337  * Assume that the caller has NO claims on the syncq.  However, a claim
6338  * on the syncq does not indicate that a thread is draining the syncq.
6339  * There may be more claims on the syncq than there are threads draining
6340  * (i.e.  #_threads_draining <= sq_count)
6341  *
6342  * drain_syncq has to terminate when one of the SQ_STAYAWAY bits gets set
6343  * in order to preserve qwriter(OUTER) ordering constraints.
6344  *
6345  * sq_putcount only needs to be checked when dispatching the queued
6346  * writer call for CIPUT sync queue, but this is handled in sq_run_events.
6347  */
6348 void
6349 drain_syncq(syncq_t *sq)
6350 {
6351 	queue_t		*qp;
6352 	uint16_t	count;
6353 	uint16_t	type = sq->sq_type;
6354 	uint16_t	flags = sq->sq_flags;
6355 	boolean_t	bg_service = sq->sq_svcflags & SQ_SERVICE;
6356 
6357 	TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START,
6358 	    "drain_syncq start:%p", sq);
6359 	ASSERT(MUTEX_HELD(SQLOCK(sq)));
6360 	ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6361 	    sq->sq_oprev == NULL) ||
6362 	    (sq->sq_outer != NULL && sq->sq_onext != NULL &&
6363 	    sq->sq_oprev != NULL));
6364 
6365 	/*
6366 	 * Drop SQ_SERVICE flag.
6367 	 */
6368 	if (bg_service)
6369 		sq->sq_svcflags &= ~SQ_SERVICE;
6370 
6371 	/*
6372 	 * If SQ_EXCL is set, someone else is processing this syncq - let him
6373 	 * finish the job.
6374 	 */
6375 	if (flags & SQ_EXCL) {
6376 		if (bg_service) {
6377 			ASSERT(sq->sq_servcount != 0);
6378 			sq->sq_servcount--;
6379 		}
6380 		mutex_exit(SQLOCK(sq));
6381 		return;
6382 	}
6383 
6384 	/*
6385 	 * This routine can be called by a background thread if
6386 	 * it was scheduled by a hi-priority thread.  SO, if there are
6387 	 * NOT messages queued, return (remember, we have the SQLOCK,
6388 	 * and it cannot change until we release it). Wakeup any waiters also.
6389 	 */
6390 	if (!(flags & SQ_QUEUED)) {
6391 		if (flags & SQ_WANTWAKEUP) {
6392 			flags &= ~SQ_WANTWAKEUP;
6393 			cv_broadcast(&sq->sq_wait);
6394 		}
6395 		if (flags & SQ_WANTEXWAKEUP) {
6396 			flags &= ~SQ_WANTEXWAKEUP;
6397 			cv_broadcast(&sq->sq_exitwait);
6398 		}
6399 		sq->sq_flags = flags;
6400 		if (bg_service) {
6401 			ASSERT(sq->sq_servcount != 0);
6402 			sq->sq_servcount--;
6403 		}
6404 		mutex_exit(SQLOCK(sq));
6405 		return;
6406 	}
6407 
6408 	/*
6409 	 * If this is not a concurrent put perimiter, we need to
6410 	 * become exclusive to drain.  Also, if not CIPUT, we would
6411 	 * not have acquired a putlock, so we don't need to check
6412 	 * the putcounts.  If not entering with a claim, we test
6413 	 * for sq_count == 0.
6414 	 */
6415 	type = sq->sq_type;
6416 	if (!(type & SQ_CIPUT)) {
6417 		if (sq->sq_count > 1) {
6418 			if (bg_service) {
6419 				ASSERT(sq->sq_servcount != 0);
6420 				sq->sq_servcount--;
6421 			}
6422 			mutex_exit(SQLOCK(sq));
6423 			return;
6424 		}
6425 		sq->sq_flags |= SQ_EXCL;
6426 	}
6427 
6428 	/*
6429 	 * This is where we make a claim to the syncq.
6430 	 * This can either be done by incrementing a putlock, or
6431 	 * the sq_count.  But since we already have the SQLOCK
6432 	 * here, we just bump the sq_count.
6433 	 *
6434 	 * Note that after we make a claim, we need to let the code
6435 	 * fall through to the end of this routine to clean itself
6436 	 * up.  A return in the while loop will put the syncq in a
6437 	 * very bad state.
6438 	 */
6439 	sq->sq_count++;
6440 	ASSERT(sq->sq_count != 0);	/* wraparound */
6441 
6442 	while ((flags = sq->sq_flags) & SQ_QUEUED) {
6443 		/*
6444 		 * If we are told to stayaway or went exclusive,
6445 		 * we are done.
6446 		 */
6447 		if (flags & (SQ_STAYAWAY)) {
6448 			break;
6449 		}
6450 
6451 		/*
6452 		 * If there are events to run, do so.
6453 		 * We have one claim to the syncq, so if there are
6454 		 * more than one, other threads are running.
6455 		 */
6456 		if (sq->sq_evhead != NULL) {
6457 			ASSERT(sq->sq_flags & SQ_EVENTS);
6458 
6459 			count = sq->sq_count;
6460 			SQ_PUTLOCKS_ENTER(sq);
6461 			SUM_SQ_PUTCOUNTS(sq, count);
6462 			if (count > 1) {
6463 				SQ_PUTLOCKS_EXIT(sq);
6464 				/* Can't upgrade - other threads inside */
6465 				break;
6466 			}
6467 			ASSERT((flags & SQ_EXCL) == 0);
6468 			sq->sq_flags = flags | SQ_EXCL;
6469 			SQ_PUTLOCKS_EXIT(sq);
6470 			/*
6471 			 * we have the only claim, run the events,
6472 			 * sq_run_events will clear the SQ_EXCL flag.
6473 			 */
6474 			sq_run_events(sq);
6475 
6476 			/*
6477 			 * If this is a CIPUT perimiter, we need
6478 			 * to drop the SQ_EXCL flag so we can properly
6479 			 * continue draining the syncq.
6480 			 */
6481 			if (type & SQ_CIPUT) {
6482 				ASSERT(sq->sq_flags & SQ_EXCL);
6483 				sq->sq_flags &= ~SQ_EXCL;
6484 			}
6485 
6486 			/*
6487 			 * And go back to the beginning just in case
6488 			 * anything changed while we were away.
6489 			 */
6490 			ASSERT((sq->sq_flags & SQ_EXCL) || (type & SQ_CIPUT));
6491 			continue;
6492 		}
6493 
6494 		ASSERT(sq->sq_evhead == NULL);
6495 		ASSERT(!(sq->sq_flags & SQ_EVENTS));
6496 
6497 		/*
6498 		 * Find the queue that is not draining.
6499 		 *
6500 		 * q_draining is protected by QLOCK which we do not hold.
6501 		 * But if it was set, then a thread was draining, and if it gets
6502 		 * cleared, then it was because the thread has successfully
6503 		 * drained the syncq, or a GOAWAY state occured. For the GOAWAY
6504 		 * state to happen, a thread needs the SQLOCK which we hold, and
6505 		 * if there was such a flag, we whould have already seen it.
6506 		 */
6507 
6508 		for (qp = sq->sq_head;
6509 		    qp != NULL && (qp->q_draining ||
6510 		    (qp->q_sqflags & Q_SQDRAINING));
6511 		    qp = qp->q_sqnext)
6512 			;
6513 
6514 		if (qp == NULL)
6515 			break;
6516 
6517 		/*
6518 		 * We have a queue to work on, and we hold the
6519 		 * SQLOCK and one claim, call qdrain_syncq.
6520 		 * This means we need to release the SQLOCK and
6521 		 * aquire the QLOCK (OK since we have a claim).
6522 		 * Note that qdrain_syncq will actually dequeue
6523 		 * this queue from the sq_head list when it is
6524 		 * convinced all the work is done and release
6525 		 * the QLOCK before returning.
6526 		 */
6527 		qp->q_sqflags |= Q_SQDRAINING;
6528 		mutex_exit(SQLOCK(sq));
6529 		mutex_enter(QLOCK(qp));
6530 		qdrain_syncq(sq, qp);
6531 		mutex_enter(SQLOCK(sq));
6532 
6533 		/* The queue is drained */
6534 		ASSERT(qp->q_sqflags & Q_SQDRAINING);
6535 		qp->q_sqflags &= ~Q_SQDRAINING;
6536 		/*
6537 		 * NOTE: After this point qp should not be used since it may be
6538 		 * closed.
6539 		 */
6540 	}
6541 
6542 	ASSERT(MUTEX_HELD(SQLOCK(sq)));
6543 	flags = sq->sq_flags;
6544 
6545 	/*
6546 	 * sq->sq_head cannot change because we hold the
6547 	 * sqlock. However, a thread CAN decide that it is no longer
6548 	 * going to drain that queue.  However, this should be due to
6549 	 * a GOAWAY state, and we should see that here.
6550 	 *
6551 	 * This loop is not very efficient. One solution may be adding a second
6552 	 * pointer to the "draining" queue, but it is difficult to do when
6553 	 * queues are inserted in the middle due to priority ordering. Another
6554 	 * possibility is to yank the queue out of the sq list and put it onto
6555 	 * the "draining list" and then put it back if it can't be drained.
6556 	 */
6557 
6558 	ASSERT((sq->sq_head == NULL) || (flags & SQ_GOAWAY) ||
6559 	    (type & SQ_CI) || sq->sq_head->q_draining);
6560 
6561 	/* Drop SQ_EXCL for non-CIPUT perimiters */
6562 	if (!(type & SQ_CIPUT))
6563 		flags &= ~SQ_EXCL;
6564 	ASSERT((flags & SQ_EXCL) == 0);
6565 
6566 	/* Wake up any waiters. */
6567 	if (flags & SQ_WANTWAKEUP) {
6568 		flags &= ~SQ_WANTWAKEUP;
6569 		cv_broadcast(&sq->sq_wait);
6570 	}
6571 	if (flags & SQ_WANTEXWAKEUP) {
6572 		flags &= ~SQ_WANTEXWAKEUP;
6573 		cv_broadcast(&sq->sq_exitwait);
6574 	}
6575 	sq->sq_flags = flags;
6576 
6577 	ASSERT(sq->sq_count != 0);
6578 	/* Release our claim. */
6579 	sq->sq_count--;
6580 
6581 	if (bg_service) {
6582 		ASSERT(sq->sq_servcount != 0);
6583 		sq->sq_servcount--;
6584 	}
6585 
6586 	mutex_exit(SQLOCK(sq));
6587 
6588 	TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END,
6589 	    "drain_syncq end:%p", sq);
6590 }
6591 
6592 
6593 /*
6594  *
6595  * qdrain_syncq can be called (currently) from only one of two places:
6596  *	drain_syncq
6597  * 	putnext  (or some variation of it).
6598  * and eventually
6599  * 	qwait(_sig)
6600  *
6601  * If called from drain_syncq, we found it in the list
6602  * of queue's needing service, so there is work to be done (or it
6603  * wouldn't be on the list).
6604  *
6605  * If called from some putnext variation, it was because the
6606  * perimiter is open, but messages are blocking a putnext and
6607  * there is not a thread working on it.  Now a thread could start
6608  * working on it while we are getting ready to do so ourself, but
6609  * the thread would set the q_draining flag, and we can spin out.
6610  *
6611  * As for qwait(_sig), I think I shall let it continue to call
6612  * drain_syncq directly (after all, it will get here eventually).
6613  *
6614  * qdrain_syncq has to terminate when:
6615  * - one of the SQ_STAYAWAY bits gets set to preserve qwriter(OUTER) ordering
6616  * - SQ_EVENTS gets set to preserve qwriter(INNER) ordering
6617  *
6618  * ASSUMES:
6619  *	One claim
6620  * 	QLOCK held
6621  * 	SQLOCK not held
6622  *	Will release QLOCK before returning
6623  */
6624 void
6625 qdrain_syncq(syncq_t *sq, queue_t *q)
6626 {
6627 	mblk_t		*bp;
6628 	boolean_t	do_clr;
6629 #ifdef DEBUG
6630 	uint16_t	count;
6631 #endif
6632 
6633 	TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START,
6634 	    "drain_syncq start:%p", sq);
6635 	ASSERT(q->q_syncq == sq);
6636 	ASSERT(MUTEX_HELD(QLOCK(q)));
6637 	ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
6638 	/*
6639 	 * For non-CIPUT perimiters, we should be called with the
6640 	 * exclusive bit set already.  For non-CIPUT perimiters we
6641 	 * will be doing a concurrent drain, so it better not be set.
6642 	 */
6643 	ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)));
6644 	ASSERT(!((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL)));
6645 	ASSERT((sq->sq_type & SQ_CIPUT) || (sq->sq_flags & SQ_EXCL));
6646 	/*
6647 	 * All outer pointers are set, or none of them are
6648 	 */
6649 	ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6650 	    sq->sq_oprev == NULL) ||
6651 	    (sq->sq_outer != NULL && sq->sq_onext != NULL &&
6652 	    sq->sq_oprev != NULL));
6653 #ifdef DEBUG
6654 	count = sq->sq_count;
6655 	/*
6656 	 * This is OK without the putlocks, because we have one
6657 	 * claim either from the sq_count, or a putcount.  We could
6658 	 * get an erroneous value from other counts, but ours won't
6659 	 * change, so one way or another, we will have at least a
6660 	 * value of one.
6661 	 */
6662 	SUM_SQ_PUTCOUNTS(sq, count);
6663 	ASSERT(count >= 1);
6664 #endif /* DEBUG */
6665 
6666 	/*
6667 	 * The first thing to do here, is find out if a thread is already
6668 	 * draining this queue or the queue is closing. If so, we are done,
6669 	 * just return. Also, if there are no messages, we are done as well.
6670 	 * Note that we check the q_sqhead since there is s window of
6671 	 * opportunity for us to enter here because Q_SQQUEUED was set, but is
6672 	 * not anymore.
6673 	 */
6674 	if (q->q_draining || (q->q_sqhead == NULL)) {
6675 		mutex_exit(QLOCK(q));
6676 		return;
6677 	}
6678 
6679 	/*
6680 	 * If the perimiter is exclusive, there is nothing we can
6681 	 * do right now, go away.
6682 	 * Note that there is nothing to prevent this case from changing
6683 	 * right after this check, but the spin-out will catch it.
6684 	 */
6685 
6686 	/* Tell other threads that we are draining this queue */
6687 	q->q_draining = 1;	/* Protected by QLOCK */
6688 
6689 	for (bp = q->q_sqhead; bp != NULL; bp = q->q_sqhead) {
6690 
6691 		/*
6692 		 * Because we can enter this routine just because
6693 		 * a putnext is blocked, we need to spin out if
6694 		 * the perimiter wants to go exclusive as well
6695 		 * as just blocked. We need to spin out also if
6696 		 * events are queued on the syncq.
6697 		 * Don't check for SQ_EXCL, because non-CIPUT
6698 		 * perimiters would set it, and it can't become
6699 		 * exclusive while we hold a claim.
6700 		 */
6701 		if (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS)) {
6702 			break;
6703 		}
6704 
6705 #ifdef DEBUG
6706 		/*
6707 		 * Since we are in qdrain_syncq, we already know the queue,
6708 		 * but for sanity, we want to check this against the qp that
6709 		 * was passed in by bp->b_queue.
6710 		 */
6711 
6712 		ASSERT(bp->b_queue == q);
6713 		ASSERT(bp->b_queue->q_syncq == sq);
6714 		bp->b_queue = NULL;
6715 
6716 		/*
6717 		 * We would have the following check in the DEBUG code:
6718 		 *
6719 		 * if (bp->b_prev != NULL)  {
6720 		 *	ASSERT(bp->b_prev == (void (*)())q->q_qinfo->qi_putp);
6721 		 * }
6722 		 *
6723 		 * This can't be done, however, since IP modifies qinfo
6724 		 * structure at run-time (switching between IPv4 qinfo and IPv6
6725 		 * qinfo), invalidating the check.
6726 		 * So the assignment to func is left here, but the ASSERT itself
6727 		 * is removed until the whole issue is resolved.
6728 		 */
6729 #endif
6730 		ASSERT(q->q_sqhead == bp);
6731 		q->q_sqhead = bp->b_next;
6732 		bp->b_prev = bp->b_next = NULL;
6733 		ASSERT(q->q_syncqmsgs > 0);
6734 		mutex_exit(QLOCK(q));
6735 
6736 		ASSERT(bp->b_datap->db_ref != 0);
6737 
6738 		(void) (*q->q_qinfo->qi_putp)(q, bp);
6739 
6740 		mutex_enter(QLOCK(q));
6741 		/*
6742 		 * We should decrement q_syncqmsgs only after executing the
6743 		 * put procedure to avoid a possible race with putnext().
6744 		 * In putnext() though it sees Q_SQQUEUED is set, there is
6745 		 * an optimization which allows putnext to call the put
6746 		 * procedure directly if (q_syncqmsgs == 0) and thus
6747 		 * a message reodering could otherwise occur.
6748 		 */
6749 		q->q_syncqmsgs--;
6750 
6751 		/*
6752 		 * Clear QFULL in the next service procedure queue if
6753 		 * this is the last message destined to that queue.
6754 		 *
6755 		 * It would make better sense to have some sort of
6756 		 * tunable for the low water mark, but these symantics
6757 		 * are not yet defined.  So, alas, we use a constant.
6758 		 */
6759 		do_clr = (q->q_syncqmsgs == 0);
6760 		mutex_exit(QLOCK(q));
6761 
6762 		if (do_clr)
6763 			clr_qfull(q);
6764 
6765 		mutex_enter(QLOCK(q));
6766 		/*
6767 		 * Always clear SQ_EXCL when CIPUT in order to handle
6768 		 * qwriter(INNER).
6769 		 */
6770 		/*
6771 		 * The putp() can call qwriter and get exclusive access
6772 		 * IFF this is the only claim.  So, we need to test for
6773 		 * this possibility so we can aquire the mutex and clear
6774 		 * the bit.
6775 		 */
6776 		if ((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL)) {
6777 			mutex_enter(SQLOCK(sq));
6778 			sq->sq_flags &= ~SQ_EXCL;
6779 			mutex_exit(SQLOCK(sq));
6780 		}
6781 	}
6782 
6783 	/*
6784 	 * We should either have no queues on the syncq, or we were
6785 	 * told to goaway by a waiter (which we will wake up at the
6786 	 * end of this function).
6787 	 */
6788 	ASSERT((q->q_sqhead == NULL) ||
6789 	    (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS)));
6790 
6791 	ASSERT(MUTEX_HELD(QLOCK(q)));
6792 	ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
6793 
6794 	/*
6795 	 * Remove the q from the syncq list if all the messages are
6796 	 * drained.
6797 	 */
6798 	if (q->q_sqhead == NULL) {
6799 		mutex_enter(SQLOCK(sq));
6800 		if (q->q_sqflags & Q_SQQUEUED)
6801 			SQRM_Q(sq, q);
6802 		mutex_exit(SQLOCK(sq));
6803 		/*
6804 		 * Since the queue is removed from the list, reset its priority.
6805 		 */
6806 		q->q_spri = 0;
6807 	}
6808 
6809 	/*
6810 	 * Remember, the q_draining flag is used to let another
6811 	 * thread know that there is a thread currently draining
6812 	 * the messages for a queue.  Since we are now done with
6813 	 * this queue (even if there may be messages still there),
6814 	 * we need to clear this flag so some thread will work
6815 	 * on it if needed.
6816 	 */
6817 	ASSERT(q->q_draining);
6818 	q->q_draining = 0;
6819 
6820 	/* called with a claim, so OK to drop all locks. */
6821 	mutex_exit(QLOCK(q));
6822 
6823 	TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END,
6824 	    "drain_syncq end:%p", sq);
6825 }
6826 /* END OF QDRAIN_SYNCQ  */
6827 
6828 
6829 /*
6830  * This is the mate to qdrain_syncq, except that it is putting the
6831  * message onto the the queue instead draining.  Since the
6832  * message is destined for the queue that is selected, there is
6833  * no need to identify the function because the message is
6834  * intended for the put routine for the queue.  But this
6835  * routine will do it anyway just in case (but only for debug kernels).
6836  *
6837  * After the message is enqueued on the syncq, it calls putnext_tail()
6838  * which will schedule a background thread to actually process the message.
6839  *
6840  * Assumes that there is a claim on the syncq (sq->sq_count > 0) and
6841  * SQLOCK(sq) and QLOCK(q) are not held.
6842  */
6843 void
6844 qfill_syncq(syncq_t *sq, queue_t *q, mblk_t *mp)
6845 {
6846 	queue_t		*fq = NULL;
6847 
6848 	ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
6849 	ASSERT(MUTEX_NOT_HELD(QLOCK(q)));
6850 	ASSERT(sq->sq_count > 0);
6851 	ASSERT(q->q_syncq == sq);
6852 	ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6853 	    sq->sq_oprev == NULL) ||
6854 	    (sq->sq_outer != NULL && sq->sq_onext != NULL &&
6855 	    sq->sq_oprev != NULL));
6856 
6857 	mutex_enter(QLOCK(q));
6858 
6859 	/*
6860 	 * Set QFULL in next service procedure queue (that cares) if not
6861 	 * already set and if there are already more messages on the syncq
6862 	 * than sq_max_size.  If sq_max_size is 0, no flow control will be
6863 	 * asserted on any syncq.
6864 	 *
6865 	 * The fq here is the next queue with a service procedure.
6866 	 * This is where we would fail canputnext, so this is where we
6867 	 * need to set QFULL.
6868 	 *
6869 	 * LOCKING HIERARCHY: In the case when fq != q we need to
6870 	 *  a) Take QLOCK(fq) to set QFULL flag and
6871 	 *  b) Take sd_reflock in the case of the hot stream to update
6872 	 *  	sd_refcnt.
6873 	 * We already have QLOCK at this point. To avoid cross-locks with
6874 	 * freezestr() which grabs all QLOCKs and with strlock() which grabs
6875 	 * both SQLOCK and sd_reflock, we need to drop respective locks first.
6876 	 */
6877 	if ((sq_max_size != 0) && (!(q->q_nfsrv->q_flag & QFULL)) &&
6878 	    (q->q_syncqmsgs > sq_max_size)) {
6879 		if ((fq = q->q_nfsrv) == q) {
6880 			fq->q_flag |= QFULL;
6881 		} else {
6882 			mutex_exit(QLOCK(q));
6883 			mutex_enter(QLOCK(fq));
6884 			fq->q_flag |= QFULL;
6885 			mutex_exit(QLOCK(fq));
6886 			mutex_enter(QLOCK(q));
6887 		}
6888 	}
6889 
6890 #ifdef DEBUG
6891 	/*
6892 	 * This is used for debug in the qfill_syncq/qdrain_syncq case
6893 	 * to trace the queue that the message is intended for.  Note
6894 	 * that the original use was to identify the queue and function
6895 	 * to call on the drain.  In the new syncq, we have the context
6896 	 * of the queue that we are draining, so call it's putproc and
6897 	 * don't rely on the saved values.  But for debug this is still
6898 	 * usefull information.
6899 	 */
6900 	mp->b_prev = (mblk_t *)q->q_qinfo->qi_putp;
6901 	mp->b_queue = q;
6902 	mp->b_next = NULL;
6903 #endif
6904 	ASSERT(q->q_syncq == sq);
6905 	/*
6906 	 * Enqueue the message on the list.
6907 	 * SQPUT_MP() accesses q_syncqmsgs.  We are already holding QLOCK to
6908 	 * protect it.  So its ok to acquire SQLOCK after SQPUT_MP().
6909 	 */
6910 	SQPUT_MP(q, mp);
6911 	mutex_enter(SQLOCK(sq));
6912 
6913 	/*
6914 	 * And queue on syncq for scheduling, if not already queued.
6915 	 * Note that we need the SQLOCK for this, and for testing flags
6916 	 * at the end to see if we will drain.  So grab it now, and
6917 	 * release it before we call qdrain_syncq or return.
6918 	 */
6919 	if (!(q->q_sqflags & Q_SQQUEUED)) {
6920 		q->q_spri = curthread->t_pri;
6921 		SQPUT_Q(sq, q);
6922 	}
6923 #ifdef DEBUG
6924 	else {
6925 		/*
6926 		 * All of these conditions MUST be true!
6927 		 */
6928 		ASSERT(sq->sq_tail != NULL);
6929 		if (sq->sq_tail == sq->sq_head) {
6930 			ASSERT((q->q_sqprev == NULL) &&
6931 			    (q->q_sqnext == NULL));
6932 		} else {
6933 			ASSERT((q->q_sqprev != NULL) ||
6934 			    (q->q_sqnext != NULL));
6935 		}
6936 		ASSERT(sq->sq_flags & SQ_QUEUED);
6937 		ASSERT(q->q_syncqmsgs != 0);
6938 		ASSERT(q->q_sqflags & Q_SQQUEUED);
6939 	}
6940 #endif
6941 	mutex_exit(QLOCK(q));
6942 	/*
6943 	 * SQLOCK is still held, so sq_count can be safely decremented.
6944 	 */
6945 	sq->sq_count--;
6946 
6947 	putnext_tail(sq, q, 0);
6948 	/* Should not reference sq or q after this point. */
6949 }
6950 
6951 /*  End of qfill_syncq  */
6952 
6953 /*
6954  * Remove all messages from a syncq (if qp is NULL) or remove all messages
6955  * that would be put into qp by drain_syncq.
6956  * Used when deleting the syncq (qp == NULL) or when detaching
6957  * a queue (qp != NULL).
6958  * Return non-zero if one or more messages were freed.
6959  *
6960  * no need to grab sq_putlocks here. See comment in strsubr.h that explains when
6961  * sq_putlocks are used.
6962  *
6963  * NOTE: This function assumes that it is called from the close() context and
6964  * that all the queues in the syncq are going aay. For this reason it doesn't
6965  * acquire QLOCK for modifying q_sqhead/q_sqtail fields. This assumption is
6966  * currently valid, but it is useful to rethink this function to behave properly
6967  * in other cases.
6968  */
6969 int
6970 flush_syncq(syncq_t *sq, queue_t *qp)
6971 {
6972 	mblk_t		*bp, *mp_head, *mp_next, *mp_prev;
6973 	queue_t		*q;
6974 	int		ret = 0;
6975 
6976 	mutex_enter(SQLOCK(sq));
6977 
6978 	/*
6979 	 * Before we leave, we need to make sure there are no
6980 	 * events listed for this queue.  All events for this queue
6981 	 * will just be freed.
6982 	 */
6983 	if (qp != NULL && sq->sq_evhead != NULL) {
6984 		ASSERT(sq->sq_flags & SQ_EVENTS);
6985 
6986 		mp_prev = NULL;
6987 		for (bp = sq->sq_evhead; bp != NULL; bp = mp_next) {
6988 			mp_next = bp->b_next;
6989 			if (bp->b_queue == qp) {
6990 				/* Delete this message */
6991 				if (mp_prev != NULL) {
6992 					mp_prev->b_next = mp_next;
6993 					/*
6994 					 * Update sq_evtail if the last element
6995 					 * is removed.
6996 					 */
6997 					if (bp == sq->sq_evtail) {
6998 						ASSERT(mp_next == NULL);
6999 						sq->sq_evtail = mp_prev;
7000 					}
7001 				} else
7002 					sq->sq_evhead = mp_next;
7003 				if (sq->sq_evhead == NULL)
7004 					sq->sq_flags &= ~SQ_EVENTS;
7005 				bp->b_prev = bp->b_next = NULL;
7006 				freemsg(bp);
7007 				ret++;
7008 			} else {
7009 				mp_prev = bp;
7010 			}
7011 		}
7012 	}
7013 
7014 	/*
7015 	 * Walk sq_head and:
7016 	 *	- match qp if qp is set, remove it's messages
7017 	 *	- all if qp is not set
7018 	 */
7019 	q = sq->sq_head;
7020 	while (q != NULL) {
7021 		ASSERT(q->q_syncq == sq);
7022 		if ((qp == NULL) || (qp == q)) {
7023 			/*
7024 			 * Yank the messages as a list off the queue
7025 			 */
7026 			mp_head = q->q_sqhead;
7027 			/*
7028 			 * We do not have QLOCK(q) here (which is safe due to
7029 			 * assumptions mentioned above). To obtain the lock we
7030 			 * need to release SQLOCK which may allow lots of things
7031 			 * to change upon us. This place requires more analysis.
7032 			 */
7033 			q->q_sqhead = q->q_sqtail = NULL;
7034 			ASSERT(mp_head->b_queue &&
7035 			    mp_head->b_queue->q_syncq == sq);
7036 
7037 			/*
7038 			 * Free each of the messages.
7039 			 */
7040 			for (bp = mp_head; bp != NULL; bp = mp_next) {
7041 				mp_next = bp->b_next;
7042 				bp->b_prev = bp->b_next = NULL;
7043 				freemsg(bp);
7044 				ret++;
7045 			}
7046 			/*
7047 			 * Now remove the queue from the syncq.
7048 			 */
7049 			ASSERT(q->q_sqflags & Q_SQQUEUED);
7050 			SQRM_Q(sq, q);
7051 			q->q_spri = 0;
7052 			q->q_syncqmsgs = 0;
7053 
7054 			/*
7055 			 * If qp was specified, we are done with it and are
7056 			 * going to drop SQLOCK(sq) and return. We wakeup syncq
7057 			 * waiters while we still have the SQLOCK.
7058 			 */
7059 			if ((qp != NULL) && (sq->sq_flags & SQ_WANTWAKEUP)) {
7060 				sq->sq_flags &= ~SQ_WANTWAKEUP;
7061 				cv_broadcast(&sq->sq_wait);
7062 			}
7063 			/* Drop SQLOCK across clr_qfull */
7064 			mutex_exit(SQLOCK(sq));
7065 
7066 			/*
7067 			 * We avoid doing the test that drain_syncq does and
7068 			 * unconditionally clear qfull for every flushed
7069 			 * message. Since flush_syncq is only called during
7070 			 * close this should not be a problem.
7071 			 */
7072 			clr_qfull(q);
7073 			if (qp != NULL) {
7074 				return (ret);
7075 			} else {
7076 				mutex_enter(SQLOCK(sq));
7077 				/*
7078 				 * The head was removed by SQRM_Q above.
7079 				 * reread the new head and flush it.
7080 				 */
7081 				q = sq->sq_head;
7082 			}
7083 		} else {
7084 			q = q->q_sqnext;
7085 		}
7086 		ASSERT(MUTEX_HELD(SQLOCK(sq)));
7087 	}
7088 
7089 	if (sq->sq_flags & SQ_WANTWAKEUP) {
7090 		sq->sq_flags &= ~SQ_WANTWAKEUP;
7091 		cv_broadcast(&sq->sq_wait);
7092 	}
7093 
7094 	mutex_exit(SQLOCK(sq));
7095 	return (ret);
7096 }
7097 
7098 /*
7099  * Propagate all messages from a syncq to the next syncq that are associated
7100  * with the specified queue. If the queue is attached to a driver or if the
7101  * messages have been added due to a qwriter(PERIM_INNER), free the messages.
7102  *
7103  * Assumes that the stream is strlock()'ed. We don't come here if there
7104  * are no messages to propagate.
7105  *
7106  * NOTE : If the queue is attached to a driver, all the messages are freed
7107  * as there is no point in propagating the messages from the driver syncq
7108  * to the closing stream head which will in turn get freed later.
7109  */
7110 static int
7111 propagate_syncq(queue_t *qp)
7112 {
7113 	mblk_t		*bp, *head, *tail, *prev, *next;
7114 	syncq_t 	*sq;
7115 	queue_t		*nqp;
7116 	syncq_t		*nsq;
7117 	boolean_t	isdriver;
7118 	int 		moved = 0;
7119 	uint16_t	flags;
7120 	pri_t		priority = curthread->t_pri;
7121 #ifdef DEBUG
7122 	void		(*func)();
7123 #endif
7124 
7125 	sq = qp->q_syncq;
7126 	ASSERT(MUTEX_HELD(SQLOCK(sq)));
7127 	/* debug macro */
7128 	SQ_PUTLOCKS_HELD(sq);
7129 	/*
7130 	 * As entersq() does not increment the sq_count for
7131 	 * the write side, check sq_count for non-QPERQ
7132 	 * perimeters alone.
7133 	 */
7134 	ASSERT((qp->q_flag & QPERQ) || (sq->sq_count >= 1));
7135 
7136 	/*
7137 	 * propagate_syncq() can be called because of either messages on the
7138 	 * queue syncq or because on events on the queue syncq. Do actual
7139 	 * message propagations if there are any messages.
7140 	 */
7141 	if (qp->q_syncqmsgs) {
7142 		isdriver = (qp->q_flag & QISDRV);
7143 
7144 		if (!isdriver) {
7145 			nqp = qp->q_next;
7146 			nsq = nqp->q_syncq;
7147 			ASSERT(MUTEX_HELD(SQLOCK(nsq)));
7148 			/* debug macro */
7149 			SQ_PUTLOCKS_HELD(nsq);
7150 #ifdef DEBUG
7151 			func = (void (*)())nqp->q_qinfo->qi_putp;
7152 #endif
7153 		}
7154 
7155 		SQRM_Q(sq, qp);
7156 		priority = MAX(qp->q_spri, priority);
7157 		qp->q_spri = 0;
7158 		head = qp->q_sqhead;
7159 		tail = qp->q_sqtail;
7160 		qp->q_sqhead = qp->q_sqtail = NULL;
7161 		qp->q_syncqmsgs = 0;
7162 
7163 		/*
7164 		 * Walk the list of messages, and free them if this is a driver,
7165 		 * otherwise reset the b_prev and b_queue value to the new putp.
7166 		 * Afterward, we will just add the head to the end of the next
7167 		 * syncq, and point the tail to the end of this one.
7168 		 */
7169 
7170 		for (bp = head; bp != NULL; bp = next) {
7171 			next = bp->b_next;
7172 			if (isdriver) {
7173 				bp->b_prev = bp->b_next = NULL;
7174 				freemsg(bp);
7175 				continue;
7176 			}
7177 			/* Change the q values for this message */
7178 			bp->b_queue = nqp;
7179 #ifdef DEBUG
7180 			bp->b_prev = (mblk_t *)func;
7181 #endif
7182 			moved++;
7183 		}
7184 		/*
7185 		 * Attach list of messages to the end of the new queue (if there
7186 		 * is a list of messages).
7187 		 */
7188 
7189 		if (!isdriver && head != NULL) {
7190 			ASSERT(tail != NULL);
7191 			if (nqp->q_sqhead == NULL) {
7192 				nqp->q_sqhead = head;
7193 			} else {
7194 				ASSERT(nqp->q_sqtail != NULL);
7195 				nqp->q_sqtail->b_next = head;
7196 			}
7197 			nqp->q_sqtail = tail;
7198 			/*
7199 			 * When messages are moved from high priority queue to
7200 			 * another queue, the destination queue priority is
7201 			 * upgraded.
7202 			 */
7203 
7204 			if (priority > nqp->q_spri)
7205 				nqp->q_spri = priority;
7206 
7207 			SQPUT_Q(nsq, nqp);
7208 
7209 			nqp->q_syncqmsgs += moved;
7210 			ASSERT(nqp->q_syncqmsgs != 0);
7211 		}
7212 	}
7213 
7214 	/*
7215 	 * Before we leave, we need to make sure there are no
7216 	 * events listed for this queue.  All events for this queue
7217 	 * will just be freed.
7218 	 */
7219 	if (sq->sq_evhead != NULL) {
7220 		ASSERT(sq->sq_flags & SQ_EVENTS);
7221 		prev = NULL;
7222 		for (bp = sq->sq_evhead; bp != NULL; bp = next) {
7223 			next = bp->b_next;
7224 			if (bp->b_queue == qp) {
7225 				/* Delete this message */
7226 				if (prev != NULL) {
7227 					prev->b_next = next;
7228 					/*
7229 					 * Update sq_evtail if the last element
7230 					 * is removed.
7231 					 */
7232 					if (bp == sq->sq_evtail) {
7233 						ASSERT(next == NULL);
7234 						sq->sq_evtail = prev;
7235 					}
7236 				} else
7237 					sq->sq_evhead = next;
7238 				if (sq->sq_evhead == NULL)
7239 					sq->sq_flags &= ~SQ_EVENTS;
7240 				bp->b_prev = bp->b_next = NULL;
7241 				freemsg(bp);
7242 			} else {
7243 				prev = bp;
7244 			}
7245 		}
7246 	}
7247 
7248 	flags = sq->sq_flags;
7249 
7250 	/* Wake up any waiter before leaving. */
7251 	if (flags & SQ_WANTWAKEUP) {
7252 		flags &= ~SQ_WANTWAKEUP;
7253 		cv_broadcast(&sq->sq_wait);
7254 	}
7255 	sq->sq_flags = flags;
7256 
7257 	return (moved);
7258 }
7259 
7260 /*
7261  * Try and upgrade to exclusive access at the inner perimeter. If this can
7262  * not be done without blocking then request will be queued on the syncq
7263  * and drain_syncq will run it later.
7264  *
7265  * This routine can only be called from put or service procedures plus
7266  * asynchronous callback routines that have properly entered to
7267  * queue (with entersq.) Thus qwriter_inner assumes the caller has one claim
7268  * on the syncq associated with q.
7269  */
7270 void
7271 qwriter_inner(queue_t *q, mblk_t *mp, void (*func)())
7272 {
7273 	syncq_t	*sq = q->q_syncq;
7274 	uint16_t count;
7275 
7276 	mutex_enter(SQLOCK(sq));
7277 	count = sq->sq_count;
7278 	SQ_PUTLOCKS_ENTER(sq);
7279 	SUM_SQ_PUTCOUNTS(sq, count);
7280 	ASSERT(count >= 1);
7281 	ASSERT(sq->sq_type & (SQ_CIPUT|SQ_CISVC));
7282 
7283 	if (count == 1) {
7284 		/*
7285 		 * Can upgrade. This case also handles nested qwriter calls
7286 		 * (when the qwriter callback function calls qwriter). In that
7287 		 * case SQ_EXCL is already set.
7288 		 */
7289 		sq->sq_flags |= SQ_EXCL;
7290 		SQ_PUTLOCKS_EXIT(sq);
7291 		mutex_exit(SQLOCK(sq));
7292 		(*func)(q, mp);
7293 		/*
7294 		 * Assumes that leavesq, putnext, and drain_syncq will reset
7295 		 * SQ_EXCL for SQ_CIPUT/SQ_CISVC queues. We leave SQ_EXCL on
7296 		 * until putnext, leavesq, or drain_syncq drops it.
7297 		 * That way we handle nested qwriter(INNER) without dropping
7298 		 * SQ_EXCL until the outermost qwriter callback routine is
7299 		 * done.
7300 		 */
7301 		return;
7302 	}
7303 	SQ_PUTLOCKS_EXIT(sq);
7304 	sqfill_events(sq, q, mp, func);
7305 }
7306 
7307 /*
7308  * Synchronous callback support functions
7309  */
7310 
7311 /*
7312  * Allocate a callback parameter structure.
7313  * Assumes that caller initializes the flags and the id.
7314  * Acquires SQLOCK(sq) if non-NULL is returned.
7315  */
7316 callbparams_t *
7317 callbparams_alloc(syncq_t *sq, void (*func)(void *), void *arg, int kmflags)
7318 {
7319 	callbparams_t *cbp;
7320 	size_t size = sizeof (callbparams_t);
7321 
7322 	cbp = kmem_alloc(size, kmflags & ~KM_PANIC);
7323 
7324 	/*
7325 	 * Only try tryhard allocation if the caller is ready to panic.
7326 	 * Otherwise just fail.
7327 	 */
7328 	if (cbp == NULL) {
7329 		if (kmflags & KM_PANIC)
7330 			cbp = kmem_alloc_tryhard(sizeof (callbparams_t),
7331 			    &size, kmflags);
7332 		else
7333 			return (NULL);
7334 	}
7335 
7336 	ASSERT(size >= sizeof (callbparams_t));
7337 	cbp->cbp_size = size;
7338 	cbp->cbp_sq = sq;
7339 	cbp->cbp_func = func;
7340 	cbp->cbp_arg = arg;
7341 	mutex_enter(SQLOCK(sq));
7342 	cbp->cbp_next = sq->sq_callbpend;
7343 	sq->sq_callbpend = cbp;
7344 	return (cbp);
7345 }
7346 
7347 void
7348 callbparams_free(syncq_t *sq, callbparams_t *cbp)
7349 {
7350 	callbparams_t **pp, *p;
7351 
7352 	ASSERT(MUTEX_HELD(SQLOCK(sq)));
7353 
7354 	for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) {
7355 		if (p == cbp) {
7356 			*pp = p->cbp_next;
7357 			kmem_free(p, p->cbp_size);
7358 			return;
7359 		}
7360 	}
7361 	(void) (STRLOG(0, 0, 0, SL_CONSOLE,
7362 	    "callbparams_free: not found\n"));
7363 }
7364 
7365 void
7366 callbparams_free_id(syncq_t *sq, callbparams_id_t id, int32_t flag)
7367 {
7368 	callbparams_t **pp, *p;
7369 
7370 	ASSERT(MUTEX_HELD(SQLOCK(sq)));
7371 
7372 	for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) {
7373 		if (p->cbp_id == id && p->cbp_flags == flag) {
7374 			*pp = p->cbp_next;
7375 			kmem_free(p, p->cbp_size);
7376 			return;
7377 		}
7378 	}
7379 	(void) (STRLOG(0, 0, 0, SL_CONSOLE,
7380 	    "callbparams_free_id: not found\n"));
7381 }
7382 
7383 /*
7384  * Callback wrapper function used by once-only callbacks that can be
7385  * cancelled (qtimeout and qbufcall)
7386  * Contains inline version of entersq(sq, SQ_CALLBACK) that can be
7387  * cancelled by the qun* functions.
7388  */
7389 void
7390 qcallbwrapper(void *arg)
7391 {
7392 	callbparams_t *cbp = arg;
7393 	syncq_t	*sq;
7394 	uint16_t count = 0;
7395 	uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL;
7396 	uint16_t type;
7397 
7398 	sq = cbp->cbp_sq;
7399 	mutex_enter(SQLOCK(sq));
7400 	type = sq->sq_type;
7401 	if (!(type & SQ_CICB)) {
7402 		count = sq->sq_count;
7403 		SQ_PUTLOCKS_ENTER(sq);
7404 		SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
7405 		SUM_SQ_PUTCOUNTS(sq, count);
7406 		sq->sq_needexcl++;
7407 		ASSERT(sq->sq_needexcl != 0);	/* wraparound */
7408 		waitflags |= SQ_MESSAGES;
7409 	}
7410 	/* Can not handle exlusive entry at outer perimeter */
7411 	ASSERT(type & SQ_COCB);
7412 
7413 	while ((sq->sq_flags & waitflags) || (!(type & SQ_CICB) &&count != 0)) {
7414 		if ((sq->sq_callbflags & cbp->cbp_flags) &&
7415 		    (sq->sq_cancelid == cbp->cbp_id)) {
7416 			/* timeout has been cancelled */
7417 			sq->sq_callbflags |= SQ_CALLB_BYPASSED;
7418 			callbparams_free(sq, cbp);
7419 			if (!(type & SQ_CICB)) {
7420 				ASSERT(sq->sq_needexcl > 0);
7421 				sq->sq_needexcl--;
7422 				if (sq->sq_needexcl == 0) {
7423 					SQ_PUTCOUNT_SETFAST_LOCKED(sq);
7424 				}
7425 				SQ_PUTLOCKS_EXIT(sq);
7426 			}
7427 			mutex_exit(SQLOCK(sq));
7428 			return;
7429 		}
7430 		sq->sq_flags |= SQ_WANTWAKEUP;
7431 		if (!(type & SQ_CICB)) {
7432 			SQ_PUTLOCKS_EXIT(sq);
7433 		}
7434 		cv_wait(&sq->sq_wait, SQLOCK(sq));
7435 		if (!(type & SQ_CICB)) {
7436 			count = sq->sq_count;
7437 			SQ_PUTLOCKS_ENTER(sq);
7438 			SUM_SQ_PUTCOUNTS(sq, count);
7439 		}
7440 	}
7441 
7442 	sq->sq_count++;
7443 	ASSERT(sq->sq_count != 0);	/* Wraparound */
7444 	if (!(type & SQ_CICB)) {
7445 		ASSERT(count == 0);
7446 		sq->sq_flags |= SQ_EXCL;
7447 		ASSERT(sq->sq_needexcl > 0);
7448 		sq->sq_needexcl--;
7449 		if (sq->sq_needexcl == 0) {
7450 			SQ_PUTCOUNT_SETFAST_LOCKED(sq);
7451 		}
7452 		SQ_PUTLOCKS_EXIT(sq);
7453 	}
7454 
7455 	mutex_exit(SQLOCK(sq));
7456 
7457 	cbp->cbp_func(cbp->cbp_arg);
7458 
7459 	/*
7460 	 * We drop the lock only for leavesq to re-acquire it.
7461 	 * Possible optimization is inline of leavesq.
7462 	 */
7463 	mutex_enter(SQLOCK(sq));
7464 	callbparams_free(sq, cbp);
7465 	mutex_exit(SQLOCK(sq));
7466 	leavesq(sq, SQ_CALLBACK);
7467 }
7468 
7469 /*
7470  * no need to grab sq_putlocks here. See comment in strsubr.h that
7471  * explains when sq_putlocks are used.
7472  *
7473  * sq_count (or one of the sq_putcounts) has already been
7474  * decremented by the caller, and if SQ_QUEUED, we need to call
7475  * drain_syncq (the global syncq drain).
7476  * If putnext_tail is called with the SQ_EXCL bit set, we are in
7477  * one of two states, non-CIPUT perimiter, and we need to clear
7478  * it, or we went exclusive in the put procedure.  In any case,
7479  * we want to clear the bit now, and it is probably easier to do
7480  * this at the beginning of this function (remember, we hold
7481  * the SQLOCK).  Lastly, if there are other messages queued
7482  * on the syncq (and not for our destination), enable the syncq
7483  * for background work.
7484  */
7485 
7486 /* ARGSUSED */
7487 void
7488 putnext_tail(syncq_t *sq, queue_t *qp, uint32_t passflags)
7489 {
7490 	uint16_t	flags = sq->sq_flags;
7491 
7492 	ASSERT(MUTEX_HELD(SQLOCK(sq)));
7493 	ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
7494 
7495 	/* Clear SQ_EXCL if set in passflags */
7496 	if (passflags & SQ_EXCL) {
7497 		flags &= ~SQ_EXCL;
7498 	}
7499 	if (flags & SQ_WANTWAKEUP) {
7500 		flags &= ~SQ_WANTWAKEUP;
7501 		cv_broadcast(&sq->sq_wait);
7502 	}
7503 	if (flags & SQ_WANTEXWAKEUP) {
7504 		flags &= ~SQ_WANTEXWAKEUP;
7505 		cv_broadcast(&sq->sq_exitwait);
7506 	}
7507 	sq->sq_flags = flags;
7508 
7509 	/*
7510 	 * We have cleared SQ_EXCL if we were asked to, and started
7511 	 * the wakeup process for waiters.  If there are no writers
7512 	 * then we need to drain the syncq if we were told to, or
7513 	 * enable the background thread to do it.
7514 	 */
7515 	if (!(flags & (SQ_STAYAWAY|SQ_EXCL))) {
7516 		if ((passflags & SQ_QUEUED) ||
7517 		    (sq->sq_svcflags & SQ_DISABLED)) {
7518 			/* drain_syncq will take care of events in the list */
7519 			drain_syncq(sq);
7520 			return;
7521 		} else if (flags & SQ_QUEUED) {
7522 			sqenable(sq);
7523 		}
7524 	}
7525 	/* Drop the SQLOCK on exit */
7526 	mutex_exit(SQLOCK(sq));
7527 	TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
7528 	    "putnext_end:(%p, %p, %p) done", NULL, qp, sq);
7529 }
7530 
7531 void
7532 set_qend(queue_t *q)
7533 {
7534 	mutex_enter(QLOCK(q));
7535 	if (!O_SAMESTR(q))
7536 		q->q_flag |= QEND;
7537 	else
7538 		q->q_flag &= ~QEND;
7539 	mutex_exit(QLOCK(q));
7540 	q = _OTHERQ(q);
7541 	mutex_enter(QLOCK(q));
7542 	if (!O_SAMESTR(q))
7543 		q->q_flag |= QEND;
7544 	else
7545 		q->q_flag &= ~QEND;
7546 	mutex_exit(QLOCK(q));
7547 }
7548 
7549 
7550 void
7551 clr_qfull(queue_t *q)
7552 {
7553 	queue_t	*oq = q;
7554 
7555 	q = q->q_nfsrv;
7556 	/* Fast check if there is any work to do before getting the lock. */
7557 	if ((q->q_flag & (QFULL|QWANTW)) == 0) {
7558 		return;
7559 	}
7560 
7561 	/*
7562 	 * Do not reset QFULL (and backenable) if the q_count is the reason
7563 	 * for QFULL being set.
7564 	 */
7565 	mutex_enter(QLOCK(q));
7566 	/*
7567 	 * If queue is empty i.e q_mblkcnt is zero, queue can not be full.
7568 	 * Hence clear the QFULL.
7569 	 * If both q_count and q_mblkcnt are less than the hiwat mark,
7570 	 * clear the QFULL.
7571 	 */
7572 	if (q->q_mblkcnt == 0 || ((q->q_count < q->q_hiwat) &&
7573 	    (q->q_mblkcnt < q->q_hiwat))) {
7574 		q->q_flag &= ~QFULL;
7575 		/*
7576 		 * A little more confusing, how about this way:
7577 		 * if someone wants to write,
7578 		 * AND
7579 		 *    both counts are less than the lowat mark
7580 		 *    OR
7581 		 *    the lowat mark is zero
7582 		 * THEN
7583 		 * backenable
7584 		 */
7585 		if ((q->q_flag & QWANTW) &&
7586 		    (((q->q_count < q->q_lowat) &&
7587 		    (q->q_mblkcnt < q->q_lowat)) || q->q_lowat == 0)) {
7588 			q->q_flag &= ~QWANTW;
7589 			mutex_exit(QLOCK(q));
7590 			backenable(oq, 0);
7591 		} else
7592 			mutex_exit(QLOCK(q));
7593 	} else
7594 		mutex_exit(QLOCK(q));
7595 }
7596 
7597 /*
7598  * Set the forward service procedure pointer.
7599  *
7600  * Called at insert-time to cache a queue's next forward service procedure in
7601  * q_nfsrv; used by canput() and canputnext().  If the queue to be inserted
7602  * has a service procedure then q_nfsrv points to itself.  If the queue to be
7603  * inserted does not have a service procedure, then q_nfsrv points to the next
7604  * queue forward that has a service procedure.  If the queue is at the logical
7605  * end of the stream (driver for write side, stream head for the read side)
7606  * and does not have a service procedure, then q_nfsrv also points to itself.
7607  */
7608 void
7609 set_nfsrv_ptr(
7610 	queue_t  *rnew,		/* read queue pointer to new module */
7611 	queue_t  *wnew,		/* write queue pointer to new module */
7612 	queue_t  *prev_rq,	/* read queue pointer to the module above */
7613 	queue_t  *prev_wq)	/* write queue pointer to the module above */
7614 {
7615 	queue_t *qp;
7616 
7617 	if (prev_wq->q_next == NULL) {
7618 		/*
7619 		 * Insert the driver, initialize the driver and stream head.
7620 		 * In this case, prev_rq/prev_wq should be the stream head.
7621 		 * _I_INSERT does not allow inserting a driver.  Make sure
7622 		 * that it is not an insertion.
7623 		 */
7624 		ASSERT(!(rnew->q_flag & _QINSERTING));
7625 		wnew->q_nfsrv = wnew;
7626 		if (rnew->q_qinfo->qi_srvp)
7627 			rnew->q_nfsrv = rnew;
7628 		else
7629 			rnew->q_nfsrv = prev_rq;
7630 		prev_rq->q_nfsrv = prev_rq;
7631 		prev_wq->q_nfsrv = prev_wq;
7632 	} else {
7633 		/*
7634 		 * set up read side q_nfsrv pointer.  This MUST be done
7635 		 * before setting the write side, because the setting of
7636 		 * the write side for a fifo may depend on it.
7637 		 *
7638 		 * Suppose we have a fifo that only has pipemod pushed.
7639 		 * pipemod has no read or write service procedures, so
7640 		 * nfsrv for both pipemod queues points to prev_rq (the
7641 		 * stream read head).  Now push bufmod (which has only a
7642 		 * read service procedure).  Doing the write side first,
7643 		 * wnew->q_nfsrv is set to pipemod's writeq nfsrv, which
7644 		 * is WRONG; the next queue forward from wnew with a
7645 		 * service procedure will be rnew, not the stream read head.
7646 		 * Since the downstream queue (which in the case of a fifo
7647 		 * is the read queue rnew) can affect upstream queues, it
7648 		 * needs to be done first.  Setting up the read side first
7649 		 * sets nfsrv for both pipemod queues to rnew and then
7650 		 * when the write side is set up, wnew-q_nfsrv will also
7651 		 * point to rnew.
7652 		 */
7653 		if (rnew->q_qinfo->qi_srvp) {
7654 			/*
7655 			 * use _OTHERQ() because, if this is a pipe, next
7656 			 * module may have been pushed from other end and
7657 			 * q_next could be a read queue.
7658 			 */
7659 			qp = _OTHERQ(prev_wq->q_next);
7660 			while (qp && qp->q_nfsrv != qp) {
7661 				qp->q_nfsrv = rnew;
7662 				qp = backq(qp);
7663 			}
7664 			rnew->q_nfsrv = rnew;
7665 		} else
7666 			rnew->q_nfsrv = prev_rq->q_nfsrv;
7667 
7668 		/* set up write side q_nfsrv pointer */
7669 		if (wnew->q_qinfo->qi_srvp) {
7670 			wnew->q_nfsrv = wnew;
7671 
7672 			/*
7673 			 * For insertion, need to update nfsrv of the modules
7674 			 * above which do not have a service routine.
7675 			 */
7676 			if (rnew->q_flag & _QINSERTING) {
7677 				for (qp = prev_wq;
7678 				    qp != NULL && qp->q_nfsrv != qp;
7679 				    qp = backq(qp)) {
7680 					qp->q_nfsrv = wnew->q_nfsrv;
7681 				}
7682 			}
7683 		} else {
7684 			if (prev_wq->q_next == prev_rq)
7685 				/*
7686 				 * Since prev_wq/prev_rq are the middle of a
7687 				 * fifo, wnew/rnew will also be the middle of
7688 				 * a fifo and wnew's nfsrv is same as rnew's.
7689 				 */
7690 				wnew->q_nfsrv = rnew->q_nfsrv;
7691 			else
7692 				wnew->q_nfsrv = prev_wq->q_next->q_nfsrv;
7693 		}
7694 	}
7695 }
7696 
7697 /*
7698  * Reset the forward service procedure pointer; called at remove-time.
7699  */
7700 void
7701 reset_nfsrv_ptr(queue_t *rqp, queue_t *wqp)
7702 {
7703 	queue_t *tmp_qp;
7704 
7705 	/* Reset the write side q_nfsrv pointer for _I_REMOVE */
7706 	if ((rqp->q_flag & _QREMOVING) && (wqp->q_qinfo->qi_srvp != NULL)) {
7707 		for (tmp_qp = backq(wqp);
7708 		    tmp_qp != NULL && tmp_qp->q_nfsrv == wqp;
7709 		    tmp_qp = backq(tmp_qp)) {
7710 			tmp_qp->q_nfsrv = wqp->q_nfsrv;
7711 		}
7712 	}
7713 
7714 	/* reset the read side q_nfsrv pointer */
7715 	if (rqp->q_qinfo->qi_srvp) {
7716 		if (wqp->q_next) {	/* non-driver case */
7717 			tmp_qp = _OTHERQ(wqp->q_next);
7718 			while (tmp_qp && tmp_qp->q_nfsrv == rqp) {
7719 				/* Note that rqp->q_next cannot be NULL */
7720 				ASSERT(rqp->q_next != NULL);
7721 				tmp_qp->q_nfsrv = rqp->q_next->q_nfsrv;
7722 				tmp_qp = backq(tmp_qp);
7723 			}
7724 		}
7725 	}
7726 }
7727 
7728 /*
7729  * This routine should be called after all stream geometry changes to update
7730  * the stream head cached struio() rd/wr queue pointers. Note must be called
7731  * with the streamlock()ed.
7732  *
7733  * Note: only enables Synchronous STREAMS for a side of a Stream which has
7734  *	 an explicit synchronous barrier module queue. That is, a queue that
7735  *	 has specified a struio() type.
7736  */
7737 static void
7738 strsetuio(stdata_t *stp)
7739 {
7740 	queue_t *wrq;
7741 
7742 	if (stp->sd_flag & STPLEX) {
7743 		/*
7744 		 * Not stremahead, but a mux, so no Synchronous STREAMS.
7745 		 */
7746 		stp->sd_struiowrq = NULL;
7747 		stp->sd_struiordq = NULL;
7748 		return;
7749 	}
7750 	/*
7751 	 * Scan the write queue(s) while synchronous
7752 	 * until we find a qinfo uio type specified.
7753 	 */
7754 	wrq = stp->sd_wrq->q_next;
7755 	while (wrq) {
7756 		if (wrq->q_struiot == STRUIOT_NONE) {
7757 			wrq = 0;
7758 			break;
7759 		}
7760 		if (wrq->q_struiot != STRUIOT_DONTCARE)
7761 			break;
7762 		if (! _SAMESTR(wrq)) {
7763 			wrq = 0;
7764 			break;
7765 		}
7766 		wrq = wrq->q_next;
7767 	}
7768 	stp->sd_struiowrq = wrq;
7769 	/*
7770 	 * Scan the read queue(s) while synchronous
7771 	 * until we find a qinfo uio type specified.
7772 	 */
7773 	wrq = stp->sd_wrq->q_next;
7774 	while (wrq) {
7775 		if (_RD(wrq)->q_struiot == STRUIOT_NONE) {
7776 			wrq = 0;
7777 			break;
7778 		}
7779 		if (_RD(wrq)->q_struiot != STRUIOT_DONTCARE)
7780 			break;
7781 		if (! _SAMESTR(wrq)) {
7782 			wrq = 0;
7783 			break;
7784 		}
7785 		wrq = wrq->q_next;
7786 	}
7787 	stp->sd_struiordq = wrq ? _RD(wrq) : 0;
7788 }
7789 
7790 /*
7791  * pass_wput, unblocks the passthru queues, so that
7792  * messages can arrive at muxs lower read queue, before
7793  * I_LINK/I_UNLINK is acked/nacked.
7794  */
7795 static void
7796 pass_wput(queue_t *q, mblk_t *mp)
7797 {
7798 	syncq_t *sq;
7799 
7800 	sq = _RD(q)->q_syncq;
7801 	if (sq->sq_flags & SQ_BLOCKED)
7802 		unblocksq(sq, SQ_BLOCKED, 0);
7803 	putnext(q, mp);
7804 }
7805 
7806 /*
7807  * Set up queues for the link/unlink.
7808  * Create a new queue and block it and then insert it
7809  * below the stream head on the lower stream.
7810  * This prevents any messages from arriving during the setq
7811  * as well as while the mux is processing the LINK/I_UNLINK.
7812  * The blocked passq is unblocked once the LINK/I_UNLINK has
7813  * been acked or nacked or if a message is generated and sent
7814  * down muxs write put procedure.
7815  * see pass_wput().
7816  *
7817  * After the new queue is inserted, all messages coming from below are
7818  * blocked. The call to strlock will ensure that all activity in the stream head
7819  * read queue syncq is stopped (sq_count drops to zero).
7820  */
7821 static queue_t *
7822 link_addpassthru(stdata_t *stpdown)
7823 {
7824 	queue_t *passq;
7825 	sqlist_t sqlist;
7826 
7827 	passq = allocq();
7828 	STREAM(passq) = STREAM(_WR(passq)) = stpdown;
7829 	/* setq might sleep in allocator - avoid holding locks. */
7830 	setq(passq, &passthru_rinit, &passthru_winit, NULL, QPERQ,
7831 	    SQ_CI|SQ_CO, B_FALSE);
7832 	claimq(passq);
7833 	blocksq(passq->q_syncq, SQ_BLOCKED, 1);
7834 	insertq(STREAM(passq), passq);
7835 
7836 	/*
7837 	 * Use strlock() to wait for the stream head sq_count to drop to zero
7838 	 * since we are going to change q_ptr in the stream head.  Note that
7839 	 * insertq() doesn't wait for any syncq counts to drop to zero.
7840 	 */
7841 	sqlist.sqlist_head = NULL;
7842 	sqlist.sqlist_index = 0;
7843 	sqlist.sqlist_size = sizeof (sqlist_t);
7844 	sqlist_insert(&sqlist, _RD(stpdown->sd_wrq)->q_syncq);
7845 	strlock(stpdown, &sqlist);
7846 	strunlock(stpdown, &sqlist);
7847 
7848 	releaseq(passq);
7849 	return (passq);
7850 }
7851 
7852 /*
7853  * Let messages flow up into the mux by removing
7854  * the passq.
7855  */
7856 static void
7857 link_rempassthru(queue_t *passq)
7858 {
7859 	claimq(passq);
7860 	removeq(passq);
7861 	releaseq(passq);
7862 	freeq(passq);
7863 }
7864 
7865 /*
7866  * Wait for the condition variable pointed to by `cvp' to be signaled,
7867  * or for `tim' milliseconds to elapse, whichever comes first.  If `tim'
7868  * is negative, then there is no time limit.  If `nosigs' is non-zero,
7869  * then the wait will be non-interruptible.
7870  *
7871  * Returns >0 if signaled, 0 if interrupted, or -1 upon timeout.
7872  */
7873 clock_t
7874 str_cv_wait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim, int nosigs)
7875 {
7876 	clock_t ret, now, tick;
7877 
7878 	if (tim < 0) {
7879 		if (nosigs) {
7880 			cv_wait(cvp, mp);
7881 			ret = 1;
7882 		} else {
7883 			ret = cv_wait_sig(cvp, mp);
7884 		}
7885 	} else if (tim > 0) {
7886 		/*
7887 		 * convert milliseconds to clock ticks
7888 		 */
7889 		tick = MSEC_TO_TICK_ROUNDUP(tim);
7890 		time_to_wait(&now, tick);
7891 		if (nosigs) {
7892 			ret = cv_timedwait(cvp, mp, now);
7893 		} else {
7894 			ret = cv_timedwait_sig(cvp, mp, now);
7895 		}
7896 	} else {
7897 		ret = -1;
7898 	}
7899 	return (ret);
7900 }
7901 
7902 /*
7903  * Wait until the stream head can determine if it is at the mark but
7904  * don't wait forever to prevent a race condition between the "mark" state
7905  * in the stream head and any mark state in the caller/user of this routine.
7906  *
7907  * This is used by sockets and for a socket it would be incorrect
7908  * to return a failure for SIOCATMARK when there is no data in the receive
7909  * queue and the marked urgent data is traveling up the stream.
7910  *
7911  * This routine waits until the mark is known by waiting for one of these
7912  * three events:
7913  *	The stream head read queue becoming non-empty (including an EOF)
7914  *	The STRATMARK flag being set. (Due to a MSGMARKNEXT message.)
7915  *	The STRNOTATMARK flag being set (which indicates that the transport
7916  *	has sent a MSGNOTMARKNEXT message to indicate that it is not at
7917  *	the mark).
7918  *
7919  * The routine returns 1 if the stream is at the mark; 0 if it can
7920  * be determined that the stream is not at the mark.
7921  * If the wait times out and it can't determine
7922  * whether or not the stream might be at the mark the routine will return -1.
7923  *
7924  * Note: This routine should only be used when a mark is pending i.e.,
7925  * in the socket case the SIGURG has been posted.
7926  * Note2: This can not wakeup just because synchronous streams indicate
7927  * that data is available since it is not possible to use the synchronous
7928  * streams interfaces to determine the b_flag value for the data queued below
7929  * the stream head.
7930  */
7931 int
7932 strwaitmark(vnode_t *vp)
7933 {
7934 	struct stdata *stp = vp->v_stream;
7935 	queue_t *rq = _RD(stp->sd_wrq);
7936 	int mark;
7937 
7938 	mutex_enter(&stp->sd_lock);
7939 	while (rq->q_first == NULL &&
7940 	    !(stp->sd_flag & (STRATMARK|STRNOTATMARK|STREOF))) {
7941 		stp->sd_flag |= RSLEEP;
7942 
7943 		/* Wait for 100 milliseconds for any state change. */
7944 		if (str_cv_wait(&rq->q_wait, &stp->sd_lock, 100, 1) == -1) {
7945 			mutex_exit(&stp->sd_lock);
7946 			return (-1);
7947 		}
7948 	}
7949 	if (stp->sd_flag & STRATMARK)
7950 		mark = 1;
7951 	else if (rq->q_first != NULL && (rq->q_first->b_flag & MSGMARK))
7952 		mark = 1;
7953 	else
7954 		mark = 0;
7955 
7956 	mutex_exit(&stp->sd_lock);
7957 	return (mark);
7958 }
7959 
7960 /*
7961  * Set a read side error. If persist is set change the socket error
7962  * to persistent. If errfunc is set install the function as the exported
7963  * error handler.
7964  */
7965 void
7966 strsetrerror(vnode_t *vp, int error, int persist, errfunc_t errfunc)
7967 {
7968 	struct stdata *stp = vp->v_stream;
7969 
7970 	mutex_enter(&stp->sd_lock);
7971 	stp->sd_rerror = error;
7972 	if (error == 0 && errfunc == NULL)
7973 		stp->sd_flag &= ~STRDERR;
7974 	else
7975 		stp->sd_flag |= STRDERR;
7976 	if (persist) {
7977 		stp->sd_flag &= ~STRDERRNONPERSIST;
7978 	} else {
7979 		stp->sd_flag |= STRDERRNONPERSIST;
7980 	}
7981 	stp->sd_rderrfunc = errfunc;
7982 	if (error != 0 || errfunc != NULL) {
7983 		cv_broadcast(&_RD(stp->sd_wrq)->q_wait);	/* readers */
7984 		cv_broadcast(&stp->sd_wrq->q_wait);		/* writers */
7985 		cv_broadcast(&stp->sd_monitor);			/* ioctllers */
7986 
7987 		mutex_exit(&stp->sd_lock);
7988 		pollwakeup(&stp->sd_pollist, POLLERR);
7989 		mutex_enter(&stp->sd_lock);
7990 
7991 		if (stp->sd_sigflags & S_ERROR)
7992 			strsendsig(stp->sd_siglist, S_ERROR, 0, error);
7993 	}
7994 	mutex_exit(&stp->sd_lock);
7995 }
7996 
7997 /*
7998  * Set a write side error. If persist is set change the socket error
7999  * to persistent.
8000  */
8001 void
8002 strsetwerror(vnode_t *vp, int error, int persist, errfunc_t errfunc)
8003 {
8004 	struct stdata *stp = vp->v_stream;
8005 
8006 	mutex_enter(&stp->sd_lock);
8007 	stp->sd_werror = error;
8008 	if (error == 0 && errfunc == NULL)
8009 		stp->sd_flag &= ~STWRERR;
8010 	else
8011 		stp->sd_flag |= STWRERR;
8012 	if (persist) {
8013 		stp->sd_flag &= ~STWRERRNONPERSIST;
8014 	} else {
8015 		stp->sd_flag |= STWRERRNONPERSIST;
8016 	}
8017 	stp->sd_wrerrfunc = errfunc;
8018 	if (error != 0 || errfunc != NULL) {
8019 		cv_broadcast(&_RD(stp->sd_wrq)->q_wait);	/* readers */
8020 		cv_broadcast(&stp->sd_wrq->q_wait);		/* writers */
8021 		cv_broadcast(&stp->sd_monitor);			/* ioctllers */
8022 
8023 		mutex_exit(&stp->sd_lock);
8024 		pollwakeup(&stp->sd_pollist, POLLERR);
8025 		mutex_enter(&stp->sd_lock);
8026 
8027 		if (stp->sd_sigflags & S_ERROR)
8028 			strsendsig(stp->sd_siglist, S_ERROR, 0, error);
8029 	}
8030 	mutex_exit(&stp->sd_lock);
8031 }
8032 
8033 /*
8034  * Make the stream return 0 (EOF) when all data has been read.
8035  * No effect on write side.
8036  */
8037 void
8038 strseteof(vnode_t *vp, int eof)
8039 {
8040 	struct stdata *stp = vp->v_stream;
8041 
8042 	mutex_enter(&stp->sd_lock);
8043 	if (!eof) {
8044 		stp->sd_flag &= ~STREOF;
8045 		mutex_exit(&stp->sd_lock);
8046 		return;
8047 	}
8048 	stp->sd_flag |= STREOF;
8049 	if (stp->sd_flag & RSLEEP) {
8050 		stp->sd_flag &= ~RSLEEP;
8051 		cv_broadcast(&_RD(stp->sd_wrq)->q_wait);
8052 	}
8053 
8054 	mutex_exit(&stp->sd_lock);
8055 	pollwakeup(&stp->sd_pollist, POLLIN|POLLRDNORM);
8056 	mutex_enter(&stp->sd_lock);
8057 
8058 	if (stp->sd_sigflags & (S_INPUT|S_RDNORM))
8059 		strsendsig(stp->sd_siglist, S_INPUT|S_RDNORM, 0, 0);
8060 	mutex_exit(&stp->sd_lock);
8061 }
8062 
8063 void
8064 strflushrq(vnode_t *vp, int flag)
8065 {
8066 	struct stdata *stp = vp->v_stream;
8067 
8068 	mutex_enter(&stp->sd_lock);
8069 	flushq(_RD(stp->sd_wrq), flag);
8070 	mutex_exit(&stp->sd_lock);
8071 }
8072 
8073 void
8074 strsetrputhooks(vnode_t *vp, uint_t flags,
8075 		msgfunc_t protofunc, msgfunc_t miscfunc)
8076 {
8077 	struct stdata *stp = vp->v_stream;
8078 
8079 	mutex_enter(&stp->sd_lock);
8080 
8081 	if (protofunc == NULL)
8082 		stp->sd_rprotofunc = strrput_proto;
8083 	else
8084 		stp->sd_rprotofunc = protofunc;
8085 
8086 	if (miscfunc == NULL)
8087 		stp->sd_rmiscfunc = strrput_misc;
8088 	else
8089 		stp->sd_rmiscfunc = miscfunc;
8090 
8091 	if (flags & SH_CONSOL_DATA)
8092 		stp->sd_rput_opt |= SR_CONSOL_DATA;
8093 	else
8094 		stp->sd_rput_opt &= ~SR_CONSOL_DATA;
8095 
8096 	if (flags & SH_SIGALLDATA)
8097 		stp->sd_rput_opt |= SR_SIGALLDATA;
8098 	else
8099 		stp->sd_rput_opt &= ~SR_SIGALLDATA;
8100 
8101 	if (flags & SH_IGN_ZEROLEN)
8102 		stp->sd_rput_opt |= SR_IGN_ZEROLEN;
8103 	else
8104 		stp->sd_rput_opt &= ~SR_IGN_ZEROLEN;
8105 
8106 	mutex_exit(&stp->sd_lock);
8107 }
8108 
8109 void
8110 strsetwputhooks(vnode_t *vp, uint_t flags, clock_t closetime)
8111 {
8112 	struct stdata *stp = vp->v_stream;
8113 
8114 	mutex_enter(&stp->sd_lock);
8115 	stp->sd_closetime = closetime;
8116 
8117 	if (flags & SH_SIGPIPE)
8118 		stp->sd_wput_opt |= SW_SIGPIPE;
8119 	else
8120 		stp->sd_wput_opt &= ~SW_SIGPIPE;
8121 	if (flags & SH_RECHECK_ERR)
8122 		stp->sd_wput_opt |= SW_RECHECK_ERR;
8123 	else
8124 		stp->sd_wput_opt &= ~SW_RECHECK_ERR;
8125 
8126 	mutex_exit(&stp->sd_lock);
8127 }
8128 
8129 void
8130 strsetrwputdatahooks(vnode_t *vp, msgfunc_t rdatafunc, msgfunc_t wdatafunc)
8131 {
8132 	struct stdata *stp = vp->v_stream;
8133 
8134 	mutex_enter(&stp->sd_lock);
8135 
8136 	stp->sd_rputdatafunc = rdatafunc;
8137 	stp->sd_wputdatafunc = wdatafunc;
8138 
8139 	mutex_exit(&stp->sd_lock);
8140 }
8141 
8142 /* Used within framework when the queue is already locked */
8143 void
8144 qenable_locked(queue_t *q)
8145 {
8146 	stdata_t *stp = STREAM(q);
8147 
8148 	ASSERT(MUTEX_HELD(QLOCK(q)));
8149 
8150 	if (!q->q_qinfo->qi_srvp)
8151 		return;
8152 
8153 	/*
8154 	 * Do not place on run queue if already enabled or closing.
8155 	 */
8156 	if (q->q_flag & (QWCLOSE|QENAB))
8157 		return;
8158 
8159 	/*
8160 	 * mark queue enabled and place on run list if it is not already being
8161 	 * serviced. If it is serviced, the runservice() function will detect
8162 	 * that QENAB is set and call service procedure before clearing
8163 	 * QINSERVICE flag.
8164 	 */
8165 	q->q_flag |= QENAB;
8166 	if (q->q_flag & QINSERVICE)
8167 		return;
8168 
8169 	/* Record the time of qenable */
8170 	q->q_qtstamp = lbolt;
8171 
8172 	/*
8173 	 * Put the queue in the stp list and schedule it for background
8174 	 * processing if it is not already scheduled or if stream head does not
8175 	 * intent to process it in the foreground later by setting
8176 	 * STRS_WILLSERVICE flag.
8177 	 */
8178 	mutex_enter(&stp->sd_qlock);
8179 	/*
8180 	 * If there are already something on the list, stp flags should show
8181 	 * intention to drain it.
8182 	 */
8183 	IMPLY(STREAM_NEEDSERVICE(stp),
8184 	    (stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED)));
8185 
8186 	ENQUEUE(q, stp->sd_qhead, stp->sd_qtail, q_link);
8187 	stp->sd_nqueues++;
8188 
8189 	/*
8190 	 * If no one will drain this stream we are the first producer and
8191 	 * need to schedule it for background thread.
8192 	 */
8193 	if (!(stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))) {
8194 		/*
8195 		 * No one will service this stream later, so we have to
8196 		 * schedule it now.
8197 		 */
8198 		STRSTAT(stenables);
8199 		stp->sd_svcflags |= STRS_SCHEDULED;
8200 		stp->sd_servid = (void *)taskq_dispatch(streams_taskq,
8201 		    (task_func_t *)stream_service, stp, TQ_NOSLEEP|TQ_NOQUEUE);
8202 
8203 		if (stp->sd_servid == NULL) {
8204 			/*
8205 			 * Task queue failed so fail over to the backup
8206 			 * servicing thread.
8207 			 */
8208 			STRSTAT(taskqfails);
8209 			/*
8210 			 * It is safe to clear STRS_SCHEDULED flag because it
8211 			 * was set by this thread above.
8212 			 */
8213 			stp->sd_svcflags &= ~STRS_SCHEDULED;
8214 
8215 			/*
8216 			 * Failover scheduling is protected by service_queue
8217 			 * lock.
8218 			 */
8219 			mutex_enter(&service_queue);
8220 			ASSERT((stp->sd_qhead == q) && (stp->sd_qtail == q));
8221 			ASSERT(q->q_link == NULL);
8222 			/*
8223 			 * Append the queue to qhead/qtail list.
8224 			 */
8225 			if (qhead == NULL)
8226 				qhead = q;
8227 			else
8228 				qtail->q_link = q;
8229 			qtail = q;
8230 			/*
8231 			 * Clear stp queue list.
8232 			 */
8233 			stp->sd_qhead = stp->sd_qtail = NULL;
8234 			stp->sd_nqueues = 0;
8235 			/*
8236 			 * Wakeup background queue processing thread.
8237 			 */
8238 			cv_signal(&services_to_run);
8239 			mutex_exit(&service_queue);
8240 		}
8241 	}
8242 	mutex_exit(&stp->sd_qlock);
8243 }
8244 
8245 static void
8246 queue_service(queue_t *q)
8247 {
8248 	/*
8249 	 * The queue in the list should have
8250 	 * QENAB flag set and should not have
8251 	 * QINSERVICE flag set. QINSERVICE is
8252 	 * set when the queue is dequeued and
8253 	 * qenable_locked doesn't enqueue a
8254 	 * queue with QINSERVICE set.
8255 	 */
8256 
8257 	ASSERT(!(q->q_flag & QINSERVICE));
8258 	ASSERT((q->q_flag & QENAB));
8259 	mutex_enter(QLOCK(q));
8260 	q->q_flag &= ~QENAB;
8261 	q->q_flag |= QINSERVICE;
8262 	mutex_exit(QLOCK(q));
8263 	runservice(q);
8264 }
8265 
8266 static void
8267 syncq_service(syncq_t *sq)
8268 {
8269 	STRSTAT(syncqservice);
8270 	mutex_enter(SQLOCK(sq));
8271 	ASSERT(!(sq->sq_svcflags & SQ_SERVICE));
8272 	ASSERT(sq->sq_servcount != 0);
8273 	ASSERT(sq->sq_next == NULL);
8274 
8275 	/* if we came here from the background thread, clear the flag */
8276 	if (sq->sq_svcflags & SQ_BGTHREAD)
8277 		sq->sq_svcflags &= ~SQ_BGTHREAD;
8278 
8279 	/* let drain_syncq know that it's being called in the background */
8280 	sq->sq_svcflags |= SQ_SERVICE;
8281 	drain_syncq(sq);
8282 }
8283 
8284 static void
8285 qwriter_outer_service(syncq_t *outer)
8286 {
8287 	/*
8288 	 * Note that SQ_WRITER is used on the outer perimeter
8289 	 * to signal that a qwriter(OUTER) is either investigating
8290 	 * running or that it is actually running a function.
8291 	 */
8292 	outer_enter(outer, SQ_BLOCKED|SQ_WRITER);
8293 
8294 	/*
8295 	 * All inner syncq are empty and have SQ_WRITER set
8296 	 * to block entering the outer perimeter.
8297 	 *
8298 	 * We do not need to explicitly call write_now since
8299 	 * outer_exit does it for us.
8300 	 */
8301 	outer_exit(outer);
8302 }
8303 
8304 static void
8305 mblk_free(mblk_t *mp)
8306 {
8307 	dblk_t *dbp = mp->b_datap;
8308 	frtn_t *frp = dbp->db_frtnp;
8309 
8310 	mp->b_next = NULL;
8311 	if (dbp->db_fthdr != NULL)
8312 		str_ftfree(dbp);
8313 
8314 	ASSERT(dbp->db_fthdr == NULL);
8315 	frp->free_func(frp->free_arg);
8316 	ASSERT(dbp->db_mblk == mp);
8317 
8318 	if (dbp->db_credp != NULL) {
8319 		crfree(dbp->db_credp);
8320 		dbp->db_credp = NULL;
8321 	}
8322 	dbp->db_cpid = -1;
8323 	dbp->db_struioflag = 0;
8324 	dbp->db_struioun.cksum.flags = 0;
8325 
8326 	kmem_cache_free(dbp->db_cache, dbp);
8327 }
8328 
8329 /*
8330  * Background processing of the stream queue list.
8331  */
8332 static void
8333 stream_service(stdata_t *stp)
8334 {
8335 	queue_t *q;
8336 
8337 	mutex_enter(&stp->sd_qlock);
8338 
8339 	STR_SERVICE(stp, q);
8340 
8341 	stp->sd_svcflags &= ~STRS_SCHEDULED;
8342 	stp->sd_servid = NULL;
8343 	cv_signal(&stp->sd_qcv);
8344 	mutex_exit(&stp->sd_qlock);
8345 }
8346 
8347 /*
8348  * Foreground processing of the stream queue list.
8349  */
8350 void
8351 stream_runservice(stdata_t *stp)
8352 {
8353 	queue_t *q;
8354 
8355 	mutex_enter(&stp->sd_qlock);
8356 	STRSTAT(rservice);
8357 	/*
8358 	 * We are going to drain this stream queue list, so qenable_locked will
8359 	 * not schedule it until we finish.
8360 	 */
8361 	stp->sd_svcflags |= STRS_WILLSERVICE;
8362 
8363 	STR_SERVICE(stp, q);
8364 
8365 	stp->sd_svcflags &= ~STRS_WILLSERVICE;
8366 	mutex_exit(&stp->sd_qlock);
8367 	/*
8368 	 * Help backup background thread to drain the qhead/qtail list.
8369 	 */
8370 	while (qhead != NULL) {
8371 		STRSTAT(qhelps);
8372 		mutex_enter(&service_queue);
8373 		DQ(q, qhead, qtail, q_link);
8374 		mutex_exit(&service_queue);
8375 		if (q != NULL)
8376 			queue_service(q);
8377 	}
8378 }
8379 
8380 void
8381 stream_willservice(stdata_t *stp)
8382 {
8383 	mutex_enter(&stp->sd_qlock);
8384 	stp->sd_svcflags |= STRS_WILLSERVICE;
8385 	mutex_exit(&stp->sd_qlock);
8386 }
8387 
8388 /*
8389  * Replace the cred currently in the mblk with a different one.
8390  */
8391 void
8392 mblk_setcred(mblk_t *mp, cred_t *cr)
8393 {
8394 	cred_t *ocr = DB_CRED(mp);
8395 
8396 	ASSERT(cr != NULL);
8397 
8398 	if (cr != ocr) {
8399 		crhold(mp->b_datap->db_credp = cr);
8400 		if (ocr != NULL)
8401 			crfree(ocr);
8402 	}
8403 }
8404 
8405 int
8406 hcksum_assoc(mblk_t *mp,  multidata_t *mmd, pdesc_t *pd,
8407     uint32_t start, uint32_t stuff, uint32_t end, uint32_t value,
8408     uint32_t flags, int km_flags)
8409 {
8410 	int rc = 0;
8411 
8412 	ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA);
8413 	if (mp->b_datap->db_type == M_DATA) {
8414 		/* Associate values for M_DATA type */
8415 		DB_CKSUMSTART(mp) = (intptr_t)start;
8416 		DB_CKSUMSTUFF(mp) = (intptr_t)stuff;
8417 		DB_CKSUMEND(mp) = (intptr_t)end;
8418 		DB_CKSUMFLAGS(mp) = flags;
8419 		DB_CKSUM16(mp) = (uint16_t)value;
8420 
8421 	} else {
8422 		pattrinfo_t pa_info;
8423 
8424 		ASSERT(mmd != NULL);
8425 
8426 		pa_info.type = PATTR_HCKSUM;
8427 		pa_info.len = sizeof (pattr_hcksum_t);
8428 
8429 		if (mmd_addpattr(mmd, pd, &pa_info, B_TRUE, km_flags) != NULL) {
8430 			pattr_hcksum_t *hck = (pattr_hcksum_t *)pa_info.buf;
8431 
8432 			hck->hcksum_start_offset = start;
8433 			hck->hcksum_stuff_offset = stuff;
8434 			hck->hcksum_end_offset = end;
8435 			hck->hcksum_cksum_val.inet_cksum = (uint16_t)value;
8436 			hck->hcksum_flags = flags;
8437 		} else {
8438 			rc = -1;
8439 		}
8440 	}
8441 	return (rc);
8442 }
8443 
8444 void
8445 hcksum_retrieve(mblk_t *mp, multidata_t *mmd, pdesc_t *pd,
8446     uint32_t *start, uint32_t *stuff, uint32_t *end,
8447     uint32_t *value, uint32_t *flags)
8448 {
8449 	ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA);
8450 	if (mp->b_datap->db_type == M_DATA) {
8451 		if (flags != NULL) {
8452 			*flags = DB_CKSUMFLAGS(mp);
8453 			if (*flags & HCK_PARTIALCKSUM) {
8454 				if (start != NULL)
8455 					*start = (uint32_t)DB_CKSUMSTART(mp);
8456 				if (stuff != NULL)
8457 					*stuff = (uint32_t)DB_CKSUMSTUFF(mp);
8458 				if (end != NULL)
8459 					*end = (uint32_t)DB_CKSUMEND(mp);
8460 				if (value != NULL)
8461 					*value = (uint32_t)DB_CKSUM16(mp);
8462 			} else if ((*flags & HW_LSO) && (value != NULL))
8463 				*value = (uint32_t)DB_LSOMSS(mp);
8464 		}
8465 	} else {
8466 		pattrinfo_t hck_attr = {PATTR_HCKSUM};
8467 
8468 		ASSERT(mmd != NULL);
8469 
8470 		/* get hardware checksum attribute */
8471 		if (mmd_getpattr(mmd, pd, &hck_attr) != NULL) {
8472 			pattr_hcksum_t *hck = (pattr_hcksum_t *)hck_attr.buf;
8473 
8474 			ASSERT(hck_attr.len >= sizeof (pattr_hcksum_t));
8475 			if (flags != NULL)
8476 				*flags = hck->hcksum_flags;
8477 			if (start != NULL)
8478 				*start = hck->hcksum_start_offset;
8479 			if (stuff != NULL)
8480 				*stuff = hck->hcksum_stuff_offset;
8481 			if (end != NULL)
8482 				*end = hck->hcksum_end_offset;
8483 			if (value != NULL)
8484 				*value = (uint32_t)
8485 				    hck->hcksum_cksum_val.inet_cksum;
8486 		}
8487 	}
8488 }
8489 
8490 /*
8491  * Checksum buffer *bp for len bytes with psum partial checksum,
8492  * or 0 if none, and return the 16 bit partial checksum.
8493  */
8494 unsigned
8495 bcksum(uchar_t *bp, int len, unsigned int psum)
8496 {
8497 	int odd = len & 1;
8498 	extern unsigned int ip_ocsum();
8499 
8500 	if (((intptr_t)bp & 1) == 0 && !odd) {
8501 		/*
8502 		 * Bp is 16 bit aligned and len is multiple of 16 bit word.
8503 		 */
8504 		return (ip_ocsum((ushort_t *)bp, len >> 1, psum));
8505 	}
8506 	if (((intptr_t)bp & 1) != 0) {
8507 		/*
8508 		 * Bp isn't 16 bit aligned.
8509 		 */
8510 		unsigned int tsum;
8511 
8512 #ifdef _LITTLE_ENDIAN
8513 		psum += *bp;
8514 #else
8515 		psum += *bp << 8;
8516 #endif
8517 		len--;
8518 		bp++;
8519 		tsum = ip_ocsum((ushort_t *)bp, len >> 1, 0);
8520 		psum += (tsum << 8) & 0xffff | (tsum >> 8);
8521 		if (len & 1) {
8522 			bp += len - 1;
8523 #ifdef _LITTLE_ENDIAN
8524 			psum += *bp << 8;
8525 #else
8526 			psum += *bp;
8527 #endif
8528 		}
8529 	} else {
8530 		/*
8531 		 * Bp is 16 bit aligned.
8532 		 */
8533 		psum = ip_ocsum((ushort_t *)bp, len >> 1, psum);
8534 		if (odd) {
8535 			bp += len - 1;
8536 #ifdef _LITTLE_ENDIAN
8537 			psum += *bp;
8538 #else
8539 			psum += *bp << 8;
8540 #endif
8541 		}
8542 	}
8543 	/*
8544 	 * Normalize psum to 16 bits before returning the new partial
8545 	 * checksum. The max psum value before normalization is 0x3FDFE.
8546 	 */
8547 	return ((psum >> 16) + (psum & 0xFFFF));
8548 }
8549 
8550 boolean_t
8551 is_vmloaned_mblk(mblk_t *mp, multidata_t *mmd, pdesc_t *pd)
8552 {
8553 	boolean_t rc;
8554 
8555 	ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA);
8556 	if (DB_TYPE(mp) == M_DATA) {
8557 		rc = (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0);
8558 	} else {
8559 		pattrinfo_t zcopy_attr = {PATTR_ZCOPY};
8560 
8561 		ASSERT(mmd != NULL);
8562 		rc = (mmd_getpattr(mmd, pd, &zcopy_attr) != NULL);
8563 	}
8564 	return (rc);
8565 }
8566 
8567 void
8568 freemsgchain(mblk_t *mp)
8569 {
8570 	mblk_t	*next;
8571 
8572 	while (mp != NULL) {
8573 		next = mp->b_next;
8574 		mp->b_next = NULL;
8575 
8576 		freemsg(mp);
8577 		mp = next;
8578 	}
8579 }
8580 
8581 mblk_t *
8582 copymsgchain(mblk_t *mp)
8583 {
8584 	mblk_t	*nmp = NULL;
8585 	mblk_t	**nmpp = &nmp;
8586 
8587 	for (; mp != NULL; mp = mp->b_next) {
8588 		if ((*nmpp = copymsg(mp)) == NULL) {
8589 			freemsgchain(nmp);
8590 			return (NULL);
8591 		}
8592 
8593 		nmpp = &((*nmpp)->b_next);
8594 	}
8595 
8596 	return (nmp);
8597 }
8598 
8599 /* NOTE: Do not add code after this point. */
8600 #undef QLOCK
8601 
8602 /*
8603  * replacement for QLOCK macro for those that can't use it.
8604  */
8605 kmutex_t *
8606 QLOCK(queue_t *q)
8607 {
8608 	return (&(q)->q_lock);
8609 }
8610 
8611 /*
8612  * Dummy runqueues/queuerun functions functions for backwards compatibility.
8613  */
8614 #undef runqueues
8615 void
8616 runqueues(void)
8617 {
8618 }
8619 
8620 #undef queuerun
8621 void
8622 queuerun(void)
8623 {
8624 }
8625 
8626 /*
8627  * Initialize the STR stack instance, which tracks autopush and persistent
8628  * links.
8629  */
8630 /* ARGSUSED */
8631 static void *
8632 str_stack_init(netstackid_t stackid, netstack_t *ns)
8633 {
8634 	str_stack_t	*ss;
8635 	int i;
8636 
8637 	ss = (str_stack_t *)kmem_zalloc(sizeof (*ss), KM_SLEEP);
8638 	ss->ss_netstack = ns;
8639 
8640 	/*
8641 	 * set up autopush
8642 	 */
8643 	sad_initspace(ss);
8644 
8645 	/*
8646 	 * set up mux_node structures.
8647 	 */
8648 	ss->ss_devcnt = devcnt;	/* In case it should change before free */
8649 	ss->ss_mux_nodes = kmem_zalloc((sizeof (struct mux_node) *
8650 	    ss->ss_devcnt), KM_SLEEP);
8651 	for (i = 0; i < ss->ss_devcnt; i++)
8652 		ss->ss_mux_nodes[i].mn_imaj = i;
8653 	return (ss);
8654 }
8655 
8656 /*
8657  * Note: run at zone shutdown and not destroy so that the PLINKs are
8658  * gone by the time other cleanup happens from the destroy callbacks.
8659  */
8660 static void
8661 str_stack_shutdown(netstackid_t stackid, void *arg)
8662 {
8663 	str_stack_t *ss = (str_stack_t *)arg;
8664 	int i;
8665 	cred_t *cr;
8666 
8667 	cr = zone_get_kcred(netstackid_to_zoneid(stackid));
8668 	ASSERT(cr != NULL);
8669 
8670 	/* Undo all the I_PLINKs for this zone */
8671 	for (i = 0; i < ss->ss_devcnt; i++) {
8672 		struct mux_edge		*ep;
8673 		ldi_handle_t		lh;
8674 		ldi_ident_t		li;
8675 		int			ret;
8676 		int			rval;
8677 		dev_t			rdev;
8678 
8679 		ep = ss->ss_mux_nodes[i].mn_outp;
8680 		if (ep == NULL)
8681 			continue;
8682 		ret = ldi_ident_from_major((major_t)i, &li);
8683 		if (ret != 0) {
8684 			continue;
8685 		}
8686 		rdev = ep->me_dev;
8687 		ret = ldi_open_by_dev(&rdev, OTYP_CHR, FREAD|FWRITE,
8688 		    cr, &lh, li);
8689 		if (ret != 0) {
8690 			ldi_ident_release(li);
8691 			continue;
8692 		}
8693 
8694 		ret = ldi_ioctl(lh, I_PUNLINK, (intptr_t)MUXID_ALL, FKIOCTL,
8695 		    cr, &rval);
8696 		if (ret) {
8697 			(void) ldi_close(lh, FREAD|FWRITE, cr);
8698 			ldi_ident_release(li);
8699 			continue;
8700 		}
8701 		(void) ldi_close(lh, FREAD|FWRITE, cr);
8702 
8703 		/* Close layered handles */
8704 		ldi_ident_release(li);
8705 	}
8706 	crfree(cr);
8707 
8708 	sad_freespace(ss);
8709 
8710 	kmem_free(ss->ss_mux_nodes, sizeof (struct mux_node) * ss->ss_devcnt);
8711 	ss->ss_mux_nodes = NULL;
8712 }
8713 
8714 /*
8715  * Free the structure; str_stack_shutdown did the other cleanup work.
8716  */
8717 /* ARGSUSED */
8718 static void
8719 str_stack_fini(netstackid_t stackid, void *arg)
8720 {
8721 	str_stack_t	*ss = (str_stack_t *)arg;
8722 
8723 	kmem_free(ss, sizeof (*ss));
8724 }
8725