1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
22 /* All Rights Reserved */
23
24
25 /*
26 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
27 * Use is subject to license terms.
28 * Copyright (c) 2016 by Delphix. All rights reserved.
29 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
30 * Copyright 2018 Joyent, Inc.
31 * Copyright 2022 Garrett D'Amore
32 * Copyright 2025 Oxide Computer Company
33 */
34
35 #include <sys/types.h>
36 #include <sys/sysmacros.h>
37 #include <sys/param.h>
38 #include <sys/errno.h>
39 #include <sys/signal.h>
40 #include <sys/proc.h>
41 #include <sys/conf.h>
42 #include <sys/cred.h>
43 #include <sys/user.h>
44 #include <sys/vnode.h>
45 #include <sys/file.h>
46 #include <sys/session.h>
47 #include <sys/stream.h>
48 #include <sys/strsubr.h>
49 #include <sys/stropts.h>
50 #include <sys/poll.h>
51 #include <sys/systm.h>
52 #include <sys/cpuvar.h>
53 #include <sys/uio.h>
54 #include <sys/cmn_err.h>
55 #include <sys/priocntl.h>
56 #include <sys/procset.h>
57 #include <sys/vmem.h>
58 #include <sys/bitmap.h>
59 #include <sys/kmem.h>
60 #include <sys/siginfo.h>
61 #include <sys/vtrace.h>
62 #include <sys/callb.h>
63 #include <sys/debug.h>
64 #include <sys/modctl.h>
65 #include <sys/vmsystm.h>
66 #include <vm/page.h>
67 #include <sys/atomic.h>
68 #include <sys/suntpi.h>
69 #include <sys/strlog.h>
70 #include <sys/promif.h>
71 #include <sys/project.h>
72 #include <sys/vm.h>
73 #include <sys/taskq.h>
74 #include <sys/sunddi.h>
75 #include <sys/sunldi_impl.h>
76 #include <sys/strsun.h>
77 #include <sys/isa_defs.h>
78 #include <sys/pattr.h>
79 #include <sys/strft.h>
80 #include <sys/fs/snode.h>
81 #include <sys/zone.h>
82 #include <sys/open.h>
83 #include <sys/sunldi.h>
84 #include <sys/sad.h>
85 #include <sys/netstack.h>
86
87 #define O_SAMESTR(q) (((q)->q_next) && \
88 (((q)->q_flag & QREADR) == ((q)->q_next->q_flag & QREADR)))
89
90 /*
91 * WARNING:
92 * The variables and routines in this file are private, belonging
93 * to the STREAMS subsystem. These should not be used by modules
94 * or drivers. Compatibility will not be guaranteed.
95 */
96
97 /*
98 * Id value used to distinguish between different multiplexor links.
99 */
100 static int32_t lnk_id = 0;
101
102 #define STREAMS_LOPRI MINCLSYSPRI
103 static pri_t streams_lopri = STREAMS_LOPRI;
104
105 #define STRSTAT(x) (str_statistics.x.value.ui64++)
106 typedef struct str_stat {
107 kstat_named_t sqenables;
108 kstat_named_t stenables;
109 kstat_named_t syncqservice;
110 kstat_named_t freebs;
111 kstat_named_t qwr_outer;
112 kstat_named_t rservice;
113 kstat_named_t strwaits;
114 kstat_named_t taskqfails;
115 kstat_named_t bufcalls;
116 kstat_named_t qremoved;
117 kstat_named_t sqremoved;
118 kstat_named_t bcwaits;
119 kstat_named_t sqtoomany;
120 } str_stat_t;
121
122 static str_stat_t str_statistics = {
123 { "sqenables", KSTAT_DATA_UINT64 },
124 { "stenables", KSTAT_DATA_UINT64 },
125 { "syncqservice", KSTAT_DATA_UINT64 },
126 { "freebs", KSTAT_DATA_UINT64 },
127 { "qwr_outer", KSTAT_DATA_UINT64 },
128 { "rservice", KSTAT_DATA_UINT64 },
129 { "strwaits", KSTAT_DATA_UINT64 },
130 { "taskqfails", KSTAT_DATA_UINT64 },
131 { "bufcalls", KSTAT_DATA_UINT64 },
132 { "qremoved", KSTAT_DATA_UINT64 },
133 { "sqremoved", KSTAT_DATA_UINT64 },
134 { "bcwaits", KSTAT_DATA_UINT64 },
135 { "sqtoomany", KSTAT_DATA_UINT64 },
136 };
137
138 static kstat_t *str_kstat;
139
140 /*
141 * qrunflag was used previously to control background scheduling of queues. It
142 * is not used anymore, but kept here in case some module still wants to access
143 * it via qready() and setqsched macros.
144 */
145 char qrunflag; /* Unused */
146
147 /*
148 * Most of the streams scheduling is done via task queues. Task queues may fail
149 * for non-sleep dispatches, so there are two backup threads servicing failed
150 * requests for queues and syncqs. Both of these threads also service failed
151 * dispatches freebs requests. Queues are put in the list specified by `qhead'
152 * and `qtail' pointers, syncqs use `sqhead' and `sqtail' pointers and freebs
153 * requests are put into `freebs_list' which has no tail pointer. All three
154 * lists are protected by a single `service_queue' lock and use
155 * `services_to_run' condition variable for signaling background threads. Use of
156 * a single lock should not be a problem because it is only used under heavy
157 * loads when task queues start to fail and at that time it may be a good idea
158 * to throttle scheduling requests.
159 *
160 * NOTE: queues and syncqs should be scheduled by two separate threads because
161 * queue servicing may be blocked waiting for a syncq which may be also
162 * scheduled for background execution. This may create a deadlock when only one
163 * thread is used for both.
164 */
165
166 static taskq_t *streams_taskq; /* Used for most STREAMS scheduling */
167
168 static kmutex_t service_queue; /* protects all of servicing vars */
169 static kcondvar_t services_to_run; /* wake up background service thread */
170 static kcondvar_t syncqs_to_run; /* wake up background service thread */
171
172 /*
173 * List of queues scheduled for background processing due to lack of resources
174 * in the task queues. Protected by service_queue lock;
175 */
176 static struct queue *qhead;
177 static struct queue *qtail;
178
179 /*
180 * Same list for syncqs
181 */
182 static syncq_t *sqhead;
183 static syncq_t *sqtail;
184
185 static mblk_t *freebs_list; /* list of buffers to free */
186
187 /*
188 * Backup threads for servicing queues and syncqs
189 */
190 kthread_t *streams_qbkgrnd_thread;
191 kthread_t *streams_sqbkgrnd_thread;
192
193 /*
194 * Bufcalls related variables.
195 */
196 struct bclist strbcalls; /* list of waiting bufcalls */
197 kmutex_t strbcall_lock; /* protects bufcall list (strbcalls) */
198 kcondvar_t strbcall_cv; /* Signaling when a bufcall is added */
199 kmutex_t bcall_monitor; /* sleep/wakeup style monitor */
200 kcondvar_t bcall_cv; /* wait 'till executing bufcall completes */
201 kthread_t *bc_bkgrnd_thread; /* Thread to service bufcall requests */
202
203 kmutex_t strresources; /* protects global resources */
204 kmutex_t muxifier; /* single-threads multiplexor creation */
205
206 static void *str_stack_init(netstackid_t stackid, netstack_t *ns);
207 static void str_stack_shutdown(netstackid_t stackid, void *arg);
208 static void str_stack_fini(netstackid_t stackid, void *arg);
209
210 /*
211 * run_queues is no longer used, but is kept in case some 3rd party
212 * module/driver decides to use it.
213 */
214 int run_queues = 0;
215
216 /*
217 * sq_max_size is the depth of the syncq (in number of messages) before
218 * qfill_syncq() starts QFULL'ing destination queues. As its primary
219 * consumer - IP is no longer D_MTPERMOD, but there may be other
220 * modules/drivers depend on this syncq flow control, we prefer to
221 * choose a large number as the default value. For potential
222 * performance gain, this value is tunable in /etc/system.
223 */
224 int sq_max_size = 10000;
225
226 /*
227 * The number of ciputctrl structures per syncq and stream we create when
228 * needed.
229 */
230 int n_ciputctrl;
231 int max_n_ciputctrl = 16;
232 /*
233 * If n_ciputctrl is < min_n_ciputctrl don't even create ciputctrl_cache.
234 */
235 int min_n_ciputctrl = 2;
236
237 /*
238 * Per-driver/module syncqs
239 * ========================
240 *
241 * For drivers/modules that use PERMOD or outer syncqs we keep a list of
242 * perdm structures, new entries being added (and new syncqs allocated) when
243 * setq() encounters a module/driver with a streamtab that it hasn't seen
244 * before.
245 * The reason for this mechanism is that some modules and drivers share a
246 * common streamtab and it is necessary for those modules and drivers to also
247 * share a common PERMOD syncq.
248 *
249 * perdm_list --> dm_str == streamtab_1
250 * dm_sq == syncq_1
251 * dm_ref
252 * dm_next --> dm_str == streamtab_2
253 * dm_sq == syncq_2
254 * dm_ref
255 * dm_next --> ... NULL
256 *
257 * The dm_ref field is incremented for each new driver/module that takes
258 * a reference to the perdm structure and hence shares the syncq.
259 * References are held in the fmodsw_impl_t structure for each STREAMS module
260 * or the dev_impl array (indexed by device major number) for each driver.
261 *
262 * perdm_list -> [dm_ref == 1] -> [dm_ref == 2] -> [dm_ref == 1] -> NULL
263 * ^ ^ ^ ^
264 * | ______________/ | |
265 * | / | |
266 * dev_impl: ...|x|y|... module A module B
267 *
268 * When a module/driver is unloaded the reference count is decremented and,
269 * when it falls to zero, the perdm structure is removed from the list and
270 * the syncq is freed (see rele_dm()).
271 */
272 perdm_t *perdm_list = NULL;
273 static krwlock_t perdm_rwlock;
274 cdevsw_impl_t *devimpl;
275
276 extern struct qinit strdata;
277 extern struct qinit stwdata;
278
279 static void runservice(queue_t *);
280 static void streams_bufcall_service(void);
281 static void streams_qbkgrnd_service(void);
282 static void streams_sqbkgrnd_service(void);
283 static syncq_t *new_syncq(void);
284 static void free_syncq(syncq_t *);
285 static void outer_insert(syncq_t *, syncq_t *);
286 static void outer_remove(syncq_t *, syncq_t *);
287 static void write_now(syncq_t *);
288 static void clr_qfull(queue_t *);
289 static void runbufcalls(void);
290 static void sqenable(syncq_t *);
291 static void sqfill_events(syncq_t *, queue_t *, mblk_t *, void (*)());
292 static void wait_q_syncq(queue_t *);
293 static void backenable_insertedq(queue_t *);
294
295 static void queue_service(queue_t *);
296 static void stream_service(stdata_t *);
297 static void syncq_service(syncq_t *);
298 static void qwriter_outer_service(syncq_t *);
299 static void mblk_free(mblk_t *);
300 #ifdef DEBUG
301 static int qprocsareon(queue_t *);
302 #endif
303
304 static void set_nfsrv_ptr(queue_t *, queue_t *, queue_t *, queue_t *);
305 static void reset_nfsrv_ptr(queue_t *, queue_t *);
306 void set_qfull(queue_t *);
307
308 static void sq_run_events(syncq_t *);
309 static int propagate_syncq(queue_t *);
310
311 static void blocksq(syncq_t *, ushort_t, int);
312 static void unblocksq(syncq_t *, ushort_t, int);
313 static int dropsq(syncq_t *, uint16_t);
314 static void emptysq(syncq_t *);
315 static sqlist_t *sqlist_alloc(struct stdata *, int);
316 static void sqlist_free(sqlist_t *);
317 static sqlist_t *sqlist_build(queue_t *, struct stdata *, boolean_t);
318 static void sqlist_insert(sqlist_t *, syncq_t *);
319 static void sqlist_insertall(sqlist_t *, queue_t *);
320
321 static void strsetuio(stdata_t *);
322
323 struct kmem_cache *stream_head_cache;
324 struct kmem_cache *queue_cache;
325 struct kmem_cache *syncq_cache;
326 struct kmem_cache *qband_cache;
327 struct kmem_cache *linkinfo_cache;
328 struct kmem_cache *ciputctrl_cache = NULL;
329
330 static linkinfo_t *linkinfo_list;
331
332 /* Global esballoc throttling queue */
333 static esb_queue_t system_esbq;
334
335 /* Array of esballoc throttling queues, of length esbq_nelem */
336 static esb_queue_t *volatile system_esbq_array;
337 static int esbq_nelem;
338 static kmutex_t esbq_lock;
339 static int esbq_log2_cpus_per_q = 0;
340
341 /* Scale the system_esbq length by setting number of CPUs per queue. */
342 uint_t esbq_cpus_per_q = 1;
343
344 /*
345 * esballoc tunable parameters.
346 */
347 int esbq_max_qlen = 0x16; /* throttled queue length */
348 clock_t esbq_timeout = 0x8; /* timeout to process esb queue */
349
350 /*
351 * Routines to handle esballoc queueing.
352 */
353 static void esballoc_process_queue(esb_queue_t *);
354 static void esballoc_enqueue_mblk(mblk_t *);
355 static void esballoc_timer(void *);
356 static void esballoc_set_timer(esb_queue_t *, clock_t);
357 static void esballoc_mblk_free(mblk_t *);
358
359 /*
360 * Qinit structure and Module_info structures
361 * for passthru read and write queues
362 */
363
364 static int pass_rput(queue_t *, mblk_t *);
365 static int pass_wput(queue_t *, mblk_t *);
366 static queue_t *link_addpassthru(stdata_t *);
367 static void link_rempassthru(queue_t *);
368
369 struct module_info passthru_info = {
370 0,
371 "passthru",
372 0,
373 INFPSZ,
374 STRHIGH,
375 STRLOW
376 };
377
378 struct qinit passthru_rinit = {
379 pass_rput,
380 NULL,
381 NULL,
382 NULL,
383 NULL,
384 &passthru_info,
385 NULL
386 };
387
388 struct qinit passthru_winit = {
389 pass_wput,
390 NULL,
391 NULL,
392 NULL,
393 NULL,
394 &passthru_info,
395 NULL
396 };
397
398 /*
399 * Verify correctness of list head/tail pointers.
400 */
401 #define LISTCHECK(head, tail, link) { \
402 EQUIV(head, tail); \
403 IMPLY(tail != NULL, tail->link == NULL); \
404 }
405
406 /*
407 * Enqueue a list element `el' in the end of a list denoted by `head' and `tail'
408 * using a `link' field.
409 */
410 #define ENQUEUE(el, head, tail, link) { \
411 ASSERT(el->link == NULL); \
412 LISTCHECK(head, tail, link); \
413 if (head == NULL) \
414 head = el; \
415 else \
416 tail->link = el; \
417 tail = el; \
418 }
419
420 /*
421 * Dequeue the first element of the list denoted by `head' and `tail' pointers
422 * using a `link' field and put result into `el'.
423 */
424 #define DQ(el, head, tail, link) { \
425 LISTCHECK(head, tail, link); \
426 el = head; \
427 if (head != NULL) { \
428 head = head->link; \
429 if (head == NULL) \
430 tail = NULL; \
431 el->link = NULL; \
432 } \
433 }
434
435 /*
436 * Remove `el' from the list using `chase' and `curr' pointers and return result
437 * in `succeed'.
438 */
439 #define RMQ(el, head, tail, link, chase, curr, succeed) { \
440 LISTCHECK(head, tail, link); \
441 chase = NULL; \
442 succeed = 0; \
443 for (curr = head; (curr != el) && (curr != NULL); curr = curr->link) \
444 chase = curr; \
445 if (curr != NULL) { \
446 succeed = 1; \
447 ASSERT(curr == el); \
448 if (chase != NULL) \
449 chase->link = curr->link; \
450 else \
451 head = curr->link; \
452 curr->link = NULL; \
453 if (curr == tail) \
454 tail = chase; \
455 } \
456 LISTCHECK(head, tail, link); \
457 }
458
459 /* Handling of delayed messages on the inner syncq. */
460
461 /*
462 * DEBUG versions should use function versions (to simplify tracing) and
463 * non-DEBUG kernels should use macro versions.
464 */
465
466 /*
467 * Put a queue on the syncq list of queues.
468 * Assumes SQLOCK held.
469 */
470 #define SQPUT_Q(sq, qp) \
471 { \
472 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
473 if (!(qp->q_sqflags & Q_SQQUEUED)) { \
474 /* The queue should not be linked anywhere */ \
475 ASSERT((qp->q_sqprev == NULL) && (qp->q_sqnext == NULL)); \
476 /* Head and tail may only be NULL simultaneously */ \
477 EQUIV(sq->sq_head, sq->sq_tail); \
478 /* Queue may be only enqueued on its syncq */ \
479 ASSERT(sq == qp->q_syncq); \
480 /* Check the correctness of SQ_MESSAGES flag */ \
481 EQUIV(sq->sq_head, (sq->sq_flags & SQ_MESSAGES)); \
482 /* Sanity check first/last elements of the list */ \
483 IMPLY(sq->sq_head != NULL, sq->sq_head->q_sqprev == NULL);\
484 IMPLY(sq->sq_tail != NULL, sq->sq_tail->q_sqnext == NULL);\
485 /* \
486 * Sanity check of priority field: empty queue should \
487 * have zero priority \
488 * and nqueues equal to zero. \
489 */ \
490 IMPLY(sq->sq_head == NULL, sq->sq_pri == 0); \
491 /* Sanity check of sq_nqueues field */ \
492 EQUIV(sq->sq_head, sq->sq_nqueues); \
493 if (sq->sq_head == NULL) { \
494 sq->sq_head = sq->sq_tail = qp; \
495 sq->sq_flags |= SQ_MESSAGES; \
496 } else if (qp->q_spri == 0) { \
497 qp->q_sqprev = sq->sq_tail; \
498 sq->sq_tail->q_sqnext = qp; \
499 sq->sq_tail = qp; \
500 } else { \
501 /* \
502 * Put this queue in priority order: higher \
503 * priority gets closer to the head. \
504 */ \
505 queue_t **qpp = &sq->sq_tail; \
506 queue_t *qnext = NULL; \
507 \
508 while (*qpp != NULL && qp->q_spri > (*qpp)->q_spri) { \
509 qnext = *qpp; \
510 qpp = &(*qpp)->q_sqprev; \
511 } \
512 qp->q_sqnext = qnext; \
513 qp->q_sqprev = *qpp; \
514 if (*qpp != NULL) { \
515 (*qpp)->q_sqnext = qp; \
516 } else { \
517 sq->sq_head = qp; \
518 sq->sq_pri = sq->sq_head->q_spri; \
519 } \
520 *qpp = qp; \
521 } \
522 qp->q_sqflags |= Q_SQQUEUED; \
523 qp->q_sqtstamp = ddi_get_lbolt(); \
524 sq->sq_nqueues++; \
525 } \
526 }
527
528 /*
529 * Remove a queue from the syncq list
530 * Assumes SQLOCK held.
531 */
532 #define SQRM_Q(sq, qp) \
533 { \
534 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
535 ASSERT(qp->q_sqflags & Q_SQQUEUED); \
536 ASSERT(sq->sq_head != NULL && sq->sq_tail != NULL); \
537 ASSERT((sq->sq_flags & SQ_MESSAGES) != 0); \
538 /* Check that the queue is actually in the list */ \
539 ASSERT(qp->q_sqnext != NULL || sq->sq_tail == qp); \
540 ASSERT(qp->q_sqprev != NULL || sq->sq_head == qp); \
541 ASSERT(sq->sq_nqueues != 0); \
542 if (qp->q_sqprev == NULL) { \
543 /* First queue on list, make head q_sqnext */ \
544 sq->sq_head = qp->q_sqnext; \
545 } else { \
546 /* Make prev->next == next */ \
547 qp->q_sqprev->q_sqnext = qp->q_sqnext; \
548 } \
549 if (qp->q_sqnext == NULL) { \
550 /* Last queue on list, make tail sqprev */ \
551 sq->sq_tail = qp->q_sqprev; \
552 } else { \
553 /* Make next->prev == prev */ \
554 qp->q_sqnext->q_sqprev = qp->q_sqprev; \
555 } \
556 /* clear out references on this queue */ \
557 qp->q_sqprev = qp->q_sqnext = NULL; \
558 qp->q_sqflags &= ~Q_SQQUEUED; \
559 /* If there is nothing queued, clear SQ_MESSAGES */ \
560 if (sq->sq_head != NULL) { \
561 sq->sq_pri = sq->sq_head->q_spri; \
562 } else { \
563 sq->sq_flags &= ~SQ_MESSAGES; \
564 sq->sq_pri = 0; \
565 } \
566 sq->sq_nqueues--; \
567 ASSERT(sq->sq_head != NULL || sq->sq_evhead != NULL || \
568 (sq->sq_flags & SQ_QUEUED) == 0); \
569 }
570
571 /* Hide the definition from the header file. */
572 #ifdef SQPUT_MP
573 #undef SQPUT_MP
574 #endif
575
576 /*
577 * Put a message on the queue syncq.
578 * Assumes QLOCK held.
579 */
580 #define SQPUT_MP(qp, mp) \
581 { \
582 ASSERT(MUTEX_HELD(QLOCK(qp))); \
583 ASSERT(qp->q_sqhead == NULL || \
584 (qp->q_sqtail != NULL && \
585 qp->q_sqtail->b_next == NULL)); \
586 qp->q_syncqmsgs++; \
587 ASSERT(qp->q_syncqmsgs != 0); /* Wraparound */ \
588 if (qp->q_sqhead == NULL) { \
589 qp->q_sqhead = qp->q_sqtail = mp; \
590 } else { \
591 qp->q_sqtail->b_next = mp; \
592 qp->q_sqtail = mp; \
593 } \
594 ASSERT(qp->q_syncqmsgs > 0); \
595 set_qfull(qp); \
596 }
597
598 #define SQ_PUTCOUNT_SETFAST_LOCKED(sq) { \
599 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
600 if ((sq)->sq_ciputctrl != NULL) { \
601 int i; \
602 int nlocks = (sq)->sq_nciputctrl; \
603 ciputctrl_t *cip = (sq)->sq_ciputctrl; \
604 ASSERT((sq)->sq_type & SQ_CIPUT); \
605 for (i = 0; i <= nlocks; i++) { \
606 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \
607 cip[i].ciputctrl_count |= SQ_FASTPUT; \
608 } \
609 } \
610 }
611
612
613 #define SQ_PUTCOUNT_CLRFAST_LOCKED(sq) { \
614 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
615 if ((sq)->sq_ciputctrl != NULL) { \
616 int i; \
617 int nlocks = (sq)->sq_nciputctrl; \
618 ciputctrl_t *cip = (sq)->sq_ciputctrl; \
619 ASSERT((sq)->sq_type & SQ_CIPUT); \
620 for (i = 0; i <= nlocks; i++) { \
621 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \
622 cip[i].ciputctrl_count &= ~SQ_FASTPUT; \
623 } \
624 } \
625 }
626
627 /*
628 * Run service procedures for all queues in the stream head.
629 */
630 #define STR_SERVICE(stp, q) { \
631 ASSERT(MUTEX_HELD(&stp->sd_qlock)); \
632 while (stp->sd_qhead != NULL) { \
633 DQ(q, stp->sd_qhead, stp->sd_qtail, q_link); \
634 ASSERT(stp->sd_nqueues > 0); \
635 stp->sd_nqueues--; \
636 ASSERT(!(q->q_flag & QINSERVICE)); \
637 mutex_exit(&stp->sd_qlock); \
638 queue_service(q); \
639 mutex_enter(&stp->sd_qlock); \
640 } \
641 ASSERT(stp->sd_nqueues == 0); \
642 ASSERT((stp->sd_qhead == NULL) && (stp->sd_qtail == NULL)); \
643 }
644
645 /*
646 * Constructor/destructor routines for the stream head cache
647 */
648 /* ARGSUSED */
649 static int
stream_head_constructor(void * buf,void * cdrarg,int kmflags)650 stream_head_constructor(void *buf, void *cdrarg, int kmflags)
651 {
652 stdata_t *stp = buf;
653
654 mutex_init(&stp->sd_lock, NULL, MUTEX_DEFAULT, NULL);
655 mutex_init(&stp->sd_reflock, NULL, MUTEX_DEFAULT, NULL);
656 mutex_init(&stp->sd_qlock, NULL, MUTEX_DEFAULT, NULL);
657 cv_init(&stp->sd_monitor, NULL, CV_DEFAULT, NULL);
658 cv_init(&stp->sd_iocmonitor, NULL, CV_DEFAULT, NULL);
659 cv_init(&stp->sd_refmonitor, NULL, CV_DEFAULT, NULL);
660 cv_init(&stp->sd_qcv, NULL, CV_DEFAULT, NULL);
661 cv_init(&stp->sd_zcopy_wait, NULL, CV_DEFAULT, NULL);
662 stp->sd_wrq = NULL;
663
664 return (0);
665 }
666
667 /* ARGSUSED */
668 static void
stream_head_destructor(void * buf,void * cdrarg)669 stream_head_destructor(void *buf, void *cdrarg)
670 {
671 stdata_t *stp = buf;
672
673 mutex_destroy(&stp->sd_lock);
674 mutex_destroy(&stp->sd_reflock);
675 mutex_destroy(&stp->sd_qlock);
676 cv_destroy(&stp->sd_monitor);
677 cv_destroy(&stp->sd_iocmonitor);
678 cv_destroy(&stp->sd_refmonitor);
679 cv_destroy(&stp->sd_qcv);
680 cv_destroy(&stp->sd_zcopy_wait);
681 }
682
683 /*
684 * Constructor/destructor routines for the queue cache
685 */
686 /* ARGSUSED */
687 static int
queue_constructor(void * buf,void * cdrarg,int kmflags)688 queue_constructor(void *buf, void *cdrarg, int kmflags)
689 {
690 queinfo_t *qip = buf;
691 queue_t *qp = &qip->qu_rqueue;
692 queue_t *wqp = &qip->qu_wqueue;
693 syncq_t *sq = &qip->qu_syncq;
694
695 qp->q_first = NULL;
696 qp->q_link = NULL;
697 qp->q_count = 0;
698 qp->q_mblkcnt = 0;
699 qp->q_sqhead = NULL;
700 qp->q_sqtail = NULL;
701 qp->q_sqnext = NULL;
702 qp->q_sqprev = NULL;
703 qp->q_sqflags = 0;
704 qp->q_rwcnt = 0;
705 qp->q_spri = 0;
706
707 mutex_init(QLOCK(qp), NULL, MUTEX_DEFAULT, NULL);
708 cv_init(&qp->q_wait, NULL, CV_DEFAULT, NULL);
709
710 wqp->q_first = NULL;
711 wqp->q_link = NULL;
712 wqp->q_count = 0;
713 wqp->q_mblkcnt = 0;
714 wqp->q_sqhead = NULL;
715 wqp->q_sqtail = NULL;
716 wqp->q_sqnext = NULL;
717 wqp->q_sqprev = NULL;
718 wqp->q_sqflags = 0;
719 wqp->q_rwcnt = 0;
720 wqp->q_spri = 0;
721
722 mutex_init(QLOCK(wqp), NULL, MUTEX_DEFAULT, NULL);
723 cv_init(&wqp->q_wait, NULL, CV_DEFAULT, NULL);
724
725 sq->sq_head = NULL;
726 sq->sq_tail = NULL;
727 sq->sq_evhead = NULL;
728 sq->sq_evtail = NULL;
729 sq->sq_callbpend = NULL;
730 sq->sq_outer = NULL;
731 sq->sq_onext = NULL;
732 sq->sq_oprev = NULL;
733 sq->sq_next = NULL;
734 sq->sq_svcflags = 0;
735 sq->sq_servcount = 0;
736 sq->sq_needexcl = 0;
737 sq->sq_nqueues = 0;
738 sq->sq_pri = 0;
739
740 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL);
741 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL);
742 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL);
743
744 return (0);
745 }
746
747 /* ARGSUSED */
748 static void
queue_destructor(void * buf,void * cdrarg)749 queue_destructor(void *buf, void *cdrarg)
750 {
751 queinfo_t *qip = buf;
752 queue_t *qp = &qip->qu_rqueue;
753 queue_t *wqp = &qip->qu_wqueue;
754 syncq_t *sq = &qip->qu_syncq;
755
756 ASSERT(qp->q_sqhead == NULL);
757 ASSERT(wqp->q_sqhead == NULL);
758 ASSERT(qp->q_sqnext == NULL);
759 ASSERT(wqp->q_sqnext == NULL);
760 ASSERT(qp->q_rwcnt == 0);
761 ASSERT(wqp->q_rwcnt == 0);
762
763 mutex_destroy(&qp->q_lock);
764 cv_destroy(&qp->q_wait);
765
766 mutex_destroy(&wqp->q_lock);
767 cv_destroy(&wqp->q_wait);
768
769 mutex_destroy(&sq->sq_lock);
770 cv_destroy(&sq->sq_wait);
771 cv_destroy(&sq->sq_exitwait);
772 }
773
774 /*
775 * Constructor/destructor routines for the syncq cache
776 */
777 /* ARGSUSED */
778 static int
syncq_constructor(void * buf,void * cdrarg,int kmflags)779 syncq_constructor(void *buf, void *cdrarg, int kmflags)
780 {
781 syncq_t *sq = buf;
782
783 bzero(buf, sizeof (syncq_t));
784
785 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL);
786 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL);
787 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL);
788
789 return (0);
790 }
791
792 /* ARGSUSED */
793 static void
syncq_destructor(void * buf,void * cdrarg)794 syncq_destructor(void *buf, void *cdrarg)
795 {
796 syncq_t *sq = buf;
797
798 ASSERT(sq->sq_head == NULL);
799 ASSERT(sq->sq_tail == NULL);
800 ASSERT(sq->sq_evhead == NULL);
801 ASSERT(sq->sq_evtail == NULL);
802 ASSERT(sq->sq_callbpend == NULL);
803 ASSERT(sq->sq_callbflags == 0);
804 ASSERT(sq->sq_outer == NULL);
805 ASSERT(sq->sq_onext == NULL);
806 ASSERT(sq->sq_oprev == NULL);
807 ASSERT(sq->sq_next == NULL);
808 ASSERT(sq->sq_needexcl == 0);
809 ASSERT(sq->sq_svcflags == 0);
810 ASSERT(sq->sq_servcount == 0);
811 ASSERT(sq->sq_nqueues == 0);
812 ASSERT(sq->sq_pri == 0);
813 ASSERT(sq->sq_count == 0);
814 ASSERT(sq->sq_rmqcount == 0);
815 ASSERT(sq->sq_cancelid == 0);
816 ASSERT(sq->sq_ciputctrl == NULL);
817 ASSERT(sq->sq_nciputctrl == 0);
818 ASSERT(sq->sq_type == 0);
819 ASSERT(sq->sq_flags == 0);
820
821 mutex_destroy(&sq->sq_lock);
822 cv_destroy(&sq->sq_wait);
823 cv_destroy(&sq->sq_exitwait);
824 }
825
826 /* ARGSUSED */
827 static int
ciputctrl_constructor(void * buf,void * cdrarg,int kmflags)828 ciputctrl_constructor(void *buf, void *cdrarg, int kmflags)
829 {
830 ciputctrl_t *cip = buf;
831 int i;
832
833 for (i = 0; i < n_ciputctrl; i++) {
834 cip[i].ciputctrl_count = SQ_FASTPUT;
835 mutex_init(&cip[i].ciputctrl_lock, NULL, MUTEX_DEFAULT, NULL);
836 }
837
838 return (0);
839 }
840
841 /* ARGSUSED */
842 static void
ciputctrl_destructor(void * buf,void * cdrarg)843 ciputctrl_destructor(void *buf, void *cdrarg)
844 {
845 ciputctrl_t *cip = buf;
846 int i;
847
848 for (i = 0; i < n_ciputctrl; i++) {
849 ASSERT(cip[i].ciputctrl_count & SQ_FASTPUT);
850 mutex_destroy(&cip[i].ciputctrl_lock);
851 }
852 }
853
854 /*
855 * Init routine run from main at boot time.
856 */
857 void
strinit(void)858 strinit(void)
859 {
860 int ncpus = ((boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus);
861
862 stream_head_cache = kmem_cache_create("stream_head_cache",
863 sizeof (stdata_t), 0,
864 stream_head_constructor, stream_head_destructor, NULL,
865 NULL, NULL, 0);
866
867 queue_cache = kmem_cache_create("queue_cache", sizeof (queinfo_t), 0,
868 queue_constructor, queue_destructor, NULL, NULL, NULL, 0);
869
870 syncq_cache = kmem_cache_create("syncq_cache", sizeof (syncq_t), 0,
871 syncq_constructor, syncq_destructor, NULL, NULL, NULL, 0);
872
873 qband_cache = kmem_cache_create("qband_cache",
874 sizeof (qband_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
875
876 linkinfo_cache = kmem_cache_create("linkinfo_cache",
877 sizeof (linkinfo_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
878
879 n_ciputctrl = ncpus;
880 n_ciputctrl = 1 << highbit(n_ciputctrl - 1);
881 ASSERT(n_ciputctrl >= 1);
882 n_ciputctrl = MIN(n_ciputctrl, max_n_ciputctrl);
883 if (n_ciputctrl >= min_n_ciputctrl) {
884 ciputctrl_cache = kmem_cache_create("ciputctrl_cache",
885 sizeof (ciputctrl_t) * n_ciputctrl,
886 sizeof (ciputctrl_t), ciputctrl_constructor,
887 ciputctrl_destructor, NULL, NULL, NULL, 0);
888 }
889
890 streams_taskq = system_taskq;
891
892 if (streams_taskq == NULL)
893 panic("strinit: no memory for streams taskq!");
894
895 bc_bkgrnd_thread = thread_create(NULL, 0,
896 streams_bufcall_service, NULL, 0, &p0, TS_RUN, streams_lopri);
897
898 streams_qbkgrnd_thread = thread_create(NULL, 0,
899 streams_qbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri);
900
901 streams_sqbkgrnd_thread = thread_create(NULL, 0,
902 streams_sqbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri);
903
904 /*
905 * Create STREAMS kstats.
906 */
907 str_kstat = kstat_create("streams", 0, "strstat",
908 "net", KSTAT_TYPE_NAMED,
909 sizeof (str_statistics) / sizeof (kstat_named_t),
910 KSTAT_FLAG_VIRTUAL);
911
912 if (str_kstat != NULL) {
913 str_kstat->ks_data = &str_statistics;
914 kstat_install(str_kstat);
915 }
916
917 /*
918 * TPI support routine initialisation.
919 */
920 tpi_init();
921
922 /*
923 * Handle to have autopush and persistent link information per
924 * zone.
925 * Note: uses shutdown hook instead of destroy hook so that the
926 * persistent links can be torn down before the destroy hooks
927 * in the TCP/IP stack are called.
928 */
929 netstack_register(NS_STR, str_stack_init, str_stack_shutdown,
930 str_stack_fini);
931 }
932
933 void
str_sendsig(vnode_t * vp,int event,uchar_t band,int error)934 str_sendsig(vnode_t *vp, int event, uchar_t band, int error)
935 {
936 struct stdata *stp;
937
938 ASSERT(vp->v_stream);
939 stp = vp->v_stream;
940 /* Have to hold sd_lock to prevent siglist from changing */
941 mutex_enter(&stp->sd_lock);
942 if (stp->sd_sigflags & event)
943 strsendsig(stp->sd_siglist, event, band, error);
944 mutex_exit(&stp->sd_lock);
945 }
946
947 /*
948 * Send the "sevent" set of signals to a process.
949 * This might send more than one signal if the process is registered
950 * for multiple events. The caller should pass in an sevent that only
951 * includes the events for which the process has registered.
952 */
953 static void
dosendsig(proc_t * proc,int events,int sevent,k_siginfo_t * info,uchar_t band,int error)954 dosendsig(proc_t *proc, int events, int sevent, k_siginfo_t *info,
955 uchar_t band, int error)
956 {
957 ASSERT(MUTEX_HELD(&proc->p_lock));
958
959 info->si_band = 0;
960 info->si_errno = 0;
961
962 if (sevent & S_ERROR) {
963 sevent &= ~S_ERROR;
964 info->si_code = POLL_ERR;
965 info->si_errno = error;
966 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
967 "strsendsig:proc %p info %p", proc, info);
968 sigaddq(proc, NULL, info, KM_NOSLEEP);
969 info->si_errno = 0;
970 }
971 if (sevent & S_HANGUP) {
972 sevent &= ~S_HANGUP;
973 info->si_code = POLL_HUP;
974 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
975 "strsendsig:proc %p info %p", proc, info);
976 sigaddq(proc, NULL, info, KM_NOSLEEP);
977 }
978 if (sevent & S_HIPRI) {
979 sevent &= ~S_HIPRI;
980 info->si_code = POLL_PRI;
981 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
982 "strsendsig:proc %p info %p", proc, info);
983 sigaddq(proc, NULL, info, KM_NOSLEEP);
984 }
985 if (sevent & S_RDBAND) {
986 sevent &= ~S_RDBAND;
987 if (events & S_BANDURG)
988 sigtoproc(proc, NULL, SIGURG);
989 else
990 sigtoproc(proc, NULL, SIGPOLL);
991 }
992 if (sevent & S_WRBAND) {
993 sevent &= ~S_WRBAND;
994 sigtoproc(proc, NULL, SIGPOLL);
995 }
996 if (sevent & S_INPUT) {
997 sevent &= ~S_INPUT;
998 info->si_code = POLL_IN;
999 info->si_band = band;
1000 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
1001 "strsendsig:proc %p info %p", proc, info);
1002 sigaddq(proc, NULL, info, KM_NOSLEEP);
1003 info->si_band = 0;
1004 }
1005 if (sevent & S_OUTPUT) {
1006 sevent &= ~S_OUTPUT;
1007 info->si_code = POLL_OUT;
1008 info->si_band = band;
1009 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
1010 "strsendsig:proc %p info %p", proc, info);
1011 sigaddq(proc, NULL, info, KM_NOSLEEP);
1012 info->si_band = 0;
1013 }
1014 if (sevent & S_MSG) {
1015 sevent &= ~S_MSG;
1016 info->si_code = POLL_MSG;
1017 info->si_band = band;
1018 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
1019 "strsendsig:proc %p info %p", proc, info);
1020 sigaddq(proc, NULL, info, KM_NOSLEEP);
1021 info->si_band = 0;
1022 }
1023 if (sevent & S_RDNORM) {
1024 sevent &= ~S_RDNORM;
1025 sigtoproc(proc, NULL, SIGPOLL);
1026 }
1027 if (sevent != 0) {
1028 panic("strsendsig: unknown event(s) %x", sevent);
1029 }
1030 }
1031
1032 /*
1033 * Send SIGPOLL/SIGURG signal to all processes and process groups
1034 * registered on the given signal list that want a signal for at
1035 * least one of the specified events.
1036 *
1037 * Must be called with exclusive access to siglist (caller holding sd_lock).
1038 *
1039 * strioctl(I_SETSIG/I_ESETSIG) will only change siglist when holding
1040 * sd_lock and the ioctl code maintains a PID_HOLD on the pid structure
1041 * while it is in the siglist.
1042 *
1043 * For performance reasons (MP scalability) the code drops pidlock
1044 * when sending signals to a single process.
1045 * When sending to a process group the code holds
1046 * pidlock to prevent the membership in the process group from changing
1047 * while walking the p_pglink list.
1048 */
1049 void
strsendsig(strsig_t * siglist,int event,uchar_t band,int error)1050 strsendsig(strsig_t *siglist, int event, uchar_t band, int error)
1051 {
1052 strsig_t *ssp;
1053 k_siginfo_t info;
1054 struct pid *pidp;
1055 proc_t *proc;
1056
1057 info.si_signo = SIGPOLL;
1058 info.si_errno = 0;
1059 for (ssp = siglist; ssp; ssp = ssp->ss_next) {
1060 int sevent;
1061
1062 sevent = ssp->ss_events & event;
1063 if (sevent == 0)
1064 continue;
1065
1066 if ((pidp = ssp->ss_pidp) == NULL) {
1067 /* pid was released but still on event list */
1068 continue;
1069 }
1070
1071
1072 if (ssp->ss_pid > 0) {
1073 /*
1074 * XXX This unfortunately still generates
1075 * a signal when a fd is closed but
1076 * the proc is active.
1077 */
1078 ASSERT(ssp->ss_pid == pidp->pid_id);
1079
1080 mutex_enter(&pidlock);
1081 proc = prfind_zone(pidp->pid_id, ALL_ZONES);
1082 if (proc == NULL) {
1083 mutex_exit(&pidlock);
1084 continue;
1085 }
1086 mutex_enter(&proc->p_lock);
1087 mutex_exit(&pidlock);
1088 dosendsig(proc, ssp->ss_events, sevent, &info,
1089 band, error);
1090 mutex_exit(&proc->p_lock);
1091 } else {
1092 /*
1093 * Send to process group. Hold pidlock across
1094 * calls to dosendsig().
1095 */
1096 pid_t pgrp = -ssp->ss_pid;
1097
1098 mutex_enter(&pidlock);
1099 proc = pgfind_zone(pgrp, ALL_ZONES);
1100 while (proc != NULL) {
1101 mutex_enter(&proc->p_lock);
1102 dosendsig(proc, ssp->ss_events, sevent,
1103 &info, band, error);
1104 mutex_exit(&proc->p_lock);
1105 proc = proc->p_pglink;
1106 }
1107 mutex_exit(&pidlock);
1108 }
1109 }
1110 }
1111
1112 /*
1113 * Attach a stream device or module.
1114 * qp is a read queue; the new queue goes in so its next
1115 * read ptr is the argument, and the write queue corresponding
1116 * to the argument points to this queue. Return 0 on success,
1117 * or a non-zero errno on failure.
1118 */
1119 int
qattach(queue_t * qp,dev_t * devp,int oflag,cred_t * crp,fmodsw_impl_t * fp,boolean_t is_insert)1120 qattach(queue_t *qp, dev_t *devp, int oflag, cred_t *crp, fmodsw_impl_t *fp,
1121 boolean_t is_insert)
1122 {
1123 major_t major;
1124 cdevsw_impl_t *dp;
1125 struct streamtab *str;
1126 queue_t *rq;
1127 queue_t *wrq;
1128 uint32_t qflag;
1129 uint32_t sqtype;
1130 perdm_t *dmp;
1131 int error;
1132 int sflag;
1133
1134 rq = allocq();
1135 wrq = _WR(rq);
1136 STREAM(rq) = STREAM(wrq) = STREAM(qp);
1137
1138 if (fp != NULL) {
1139 str = fp->f_str;
1140 qflag = fp->f_qflag;
1141 sqtype = fp->f_sqtype;
1142 dmp = fp->f_dmp;
1143 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL);
1144 sflag = MODOPEN;
1145
1146 /*
1147 * stash away a pointer to the module structure so we can
1148 * unref it in qdetach.
1149 */
1150 rq->q_fp = fp;
1151 } else {
1152 ASSERT(!is_insert);
1153
1154 major = getmajor(*devp);
1155 dp = &devimpl[major];
1156
1157 str = dp->d_str;
1158 ASSERT(str == STREAMSTAB(major));
1159
1160 qflag = dp->d_qflag;
1161 ASSERT(qflag & QISDRV);
1162 sqtype = dp->d_sqtype;
1163
1164 /* create perdm_t if needed */
1165 if (NEED_DM(dp->d_dmp, qflag))
1166 dp->d_dmp = hold_dm(str, qflag, sqtype);
1167
1168 dmp = dp->d_dmp;
1169 sflag = 0;
1170 }
1171
1172 TRACE_2(TR_FAC_STREAMS_FR, TR_QATTACH_FLAGS,
1173 "qattach:qflag == %X(%X)", qflag, *devp);
1174
1175 /* setq might sleep in allocator - avoid holding locks. */
1176 setq(rq, str->st_rdinit, str->st_wrinit, dmp, qflag, sqtype, B_FALSE);
1177
1178 /*
1179 * Before calling the module's open routine, set up the q_next
1180 * pointer for inserting a module in the middle of a stream.
1181 *
1182 * Note that we can always set _QINSERTING and set up q_next
1183 * pointer for both inserting and pushing a module. Then there
1184 * is no need for the is_insert parameter. In insertq(), called
1185 * by qprocson(), assume that q_next of the new module always points
1186 * to the correct queue and use it for insertion. Everything should
1187 * work out fine. But in the first release of _I_INSERT, we
1188 * distinguish between inserting and pushing to make sure that
1189 * pushing a module follows the same code path as before.
1190 */
1191 if (is_insert) {
1192 rq->q_flag |= _QINSERTING;
1193 rq->q_next = qp;
1194 }
1195
1196 /*
1197 * If there is an outer perimeter get exclusive access during
1198 * the open procedure. Bump up the reference count on the queue.
1199 */
1200 entersq(rq->q_syncq, SQ_OPENCLOSE);
1201 error = (*rq->q_qinfo->qi_qopen)(rq, devp, oflag, sflag, crp);
1202 if (error != 0)
1203 goto failed;
1204 leavesq(rq->q_syncq, SQ_OPENCLOSE);
1205 ASSERT(qprocsareon(rq));
1206 return (0);
1207
1208 failed:
1209 rq->q_flag &= ~_QINSERTING;
1210 if (backq(wrq) != NULL && backq(wrq)->q_next == wrq)
1211 qprocsoff(rq);
1212 leavesq(rq->q_syncq, SQ_OPENCLOSE);
1213 rq->q_next = wrq->q_next = NULL;
1214 qdetach(rq, 0, 0, crp, B_FALSE);
1215 return (error);
1216 }
1217
1218 /*
1219 * Handle second open of stream. For modules, set the
1220 * last argument to MODOPEN and do not pass any open flags.
1221 * Ignore dummydev since this is not the first open.
1222 */
1223 int
qreopen(queue_t * qp,dev_t * devp,int flag,cred_t * crp)1224 qreopen(queue_t *qp, dev_t *devp, int flag, cred_t *crp)
1225 {
1226 int error;
1227 dev_t dummydev;
1228 queue_t *wqp = _WR(qp);
1229
1230 ASSERT(qp->q_flag & QREADR);
1231 entersq(qp->q_syncq, SQ_OPENCLOSE);
1232
1233 dummydev = *devp;
1234 if (error = ((*qp->q_qinfo->qi_qopen)(qp, &dummydev,
1235 (wqp->q_next ? 0 : flag), (wqp->q_next ? MODOPEN : 0), crp))) {
1236 leavesq(qp->q_syncq, SQ_OPENCLOSE);
1237 mutex_enter(&STREAM(qp)->sd_lock);
1238 qp->q_stream->sd_flag |= STREOPENFAIL;
1239 mutex_exit(&STREAM(qp)->sd_lock);
1240 return (error);
1241 }
1242 leavesq(qp->q_syncq, SQ_OPENCLOSE);
1243
1244 /*
1245 * successful open should have done qprocson()
1246 */
1247 ASSERT(qprocsareon(_RD(qp)));
1248 return (0);
1249 }
1250
1251 /*
1252 * Detach a stream module or device.
1253 * If clmode == 1 then the module or driver was opened and its
1254 * close routine must be called. If clmode == 0, the module
1255 * or driver was never opened or the open failed, and so its close
1256 * should not be called.
1257 */
1258 void
qdetach(queue_t * qp,int clmode,int flag,cred_t * crp,boolean_t is_remove)1259 qdetach(queue_t *qp, int clmode, int flag, cred_t *crp, boolean_t is_remove)
1260 {
1261 queue_t *wqp = _WR(qp);
1262 ASSERT(STREAM(qp)->sd_flag & (STRCLOSE|STWOPEN|STRPLUMB));
1263
1264 if (STREAM_NEEDSERVICE(STREAM(qp)))
1265 stream_runservice(STREAM(qp));
1266
1267 if (clmode) {
1268 /*
1269 * Make sure that all the messages on the write side syncq are
1270 * processed and nothing is left. Since we are closing, no new
1271 * messages may appear there.
1272 */
1273 wait_q_syncq(wqp);
1274
1275 entersq(qp->q_syncq, SQ_OPENCLOSE);
1276 if (is_remove) {
1277 mutex_enter(QLOCK(qp));
1278 qp->q_flag |= _QREMOVING;
1279 mutex_exit(QLOCK(qp));
1280 }
1281 (*qp->q_qinfo->qi_qclose)(qp, flag, crp);
1282 /*
1283 * Check that qprocsoff() was actually called.
1284 */
1285 ASSERT((qp->q_flag & QWCLOSE) && (wqp->q_flag & QWCLOSE));
1286
1287 leavesq(qp->q_syncq, SQ_OPENCLOSE);
1288 } else {
1289 disable_svc(qp);
1290 }
1291
1292 /*
1293 * Allow any threads blocked in entersq to proceed and discover
1294 * the QWCLOSE is set.
1295 * Note: This assumes that all users of entersq check QWCLOSE.
1296 * Currently runservice is the only entersq that can happen
1297 * after removeq has finished.
1298 * Removeq will have discarded all messages destined to the closing
1299 * pair of queues from the syncq.
1300 * NOTE: Calling a function inside an assert is unconventional.
1301 * However, it does not cause any problem since flush_syncq() does
1302 * not change any state except when it returns non-zero i.e.
1303 * when the assert will trigger.
1304 */
1305 ASSERT(flush_syncq(qp->q_syncq, qp) == 0);
1306 ASSERT(flush_syncq(wqp->q_syncq, wqp) == 0);
1307 ASSERT((qp->q_flag & QPERMOD) ||
1308 ((qp->q_syncq->sq_head == NULL) &&
1309 (wqp->q_syncq->sq_head == NULL)));
1310
1311 /* release any fmodsw_impl_t structure held on behalf of the queue */
1312 ASSERT(qp->q_fp != NULL || qp->q_flag & QISDRV);
1313 if (qp->q_fp != NULL)
1314 fmodsw_rele(qp->q_fp);
1315
1316 /* freeq removes us from the outer perimeter if any */
1317 freeq(qp);
1318 }
1319
1320 /* Prevent service procedures from being called */
1321 void
disable_svc(queue_t * qp)1322 disable_svc(queue_t *qp)
1323 {
1324 queue_t *wqp = _WR(qp);
1325
1326 ASSERT(qp->q_flag & QREADR);
1327 mutex_enter(QLOCK(qp));
1328 qp->q_flag |= QWCLOSE;
1329 mutex_exit(QLOCK(qp));
1330 mutex_enter(QLOCK(wqp));
1331 wqp->q_flag |= QWCLOSE;
1332 mutex_exit(QLOCK(wqp));
1333 }
1334
1335 /* Allow service procedures to be called again */
1336 void
enable_svc(queue_t * qp)1337 enable_svc(queue_t *qp)
1338 {
1339 queue_t *wqp = _WR(qp);
1340
1341 ASSERT(qp->q_flag & QREADR);
1342 mutex_enter(QLOCK(qp));
1343 qp->q_flag &= ~QWCLOSE;
1344 mutex_exit(QLOCK(qp));
1345 mutex_enter(QLOCK(wqp));
1346 wqp->q_flag &= ~QWCLOSE;
1347 mutex_exit(QLOCK(wqp));
1348 }
1349
1350 /*
1351 * Remove queue from qhead/qtail if it is enabled.
1352 * Only reset QENAB if the queue was removed from the runlist.
1353 * A queue goes through 3 stages:
1354 * It is on the service list and QENAB is set.
1355 * It is removed from the service list but QENAB is still set.
1356 * QENAB gets changed to QINSERVICE.
1357 * QINSERVICE is reset (when the service procedure is done)
1358 * Thus we can not reset QENAB unless we actually removed it from the service
1359 * queue.
1360 */
1361 void
remove_runlist(queue_t * qp)1362 remove_runlist(queue_t *qp)
1363 {
1364 if (qp->q_flag & QENAB && qhead != NULL) {
1365 queue_t *q_chase;
1366 queue_t *q_curr;
1367 int removed;
1368
1369 mutex_enter(&service_queue);
1370 RMQ(qp, qhead, qtail, q_link, q_chase, q_curr, removed);
1371 mutex_exit(&service_queue);
1372 if (removed) {
1373 STRSTAT(qremoved);
1374 qp->q_flag &= ~QENAB;
1375 }
1376 }
1377 }
1378
1379
1380 /*
1381 * Wait for any pending service processing to complete.
1382 * The removal of queues from the runlist is not atomic with the
1383 * clearing of the QENABLED flag and setting the INSERVICE flag.
1384 * consequently it is possible for remove_runlist in strclose
1385 * to not find the queue on the runlist but for it to be QENABLED
1386 * and not yet INSERVICE -> hence wait_svc needs to check QENABLED
1387 * as well as INSERVICE.
1388 */
1389 void
wait_svc(queue_t * qp)1390 wait_svc(queue_t *qp)
1391 {
1392 queue_t *wqp = _WR(qp);
1393
1394 ASSERT(qp->q_flag & QREADR);
1395
1396 /*
1397 * Try to remove queues from qhead/qtail list.
1398 */
1399 if (qhead != NULL) {
1400 remove_runlist(qp);
1401 remove_runlist(wqp);
1402 }
1403 /*
1404 * Wait till the syncqs associated with the queue disappear from the
1405 * background processing list.
1406 * This only needs to be done for non-PERMOD perimeters since
1407 * for PERMOD perimeters the syncq may be shared and will only be freed
1408 * when the last module/driver is unloaded.
1409 * If for PERMOD perimeters queue was on the syncq list, removeq()
1410 * should call propagate_syncq() or drain_syncq() for it. Both of these
1411 * functions remove the queue from its syncq list, so sqthread will not
1412 * try to access the queue.
1413 */
1414 if (!(qp->q_flag & QPERMOD)) {
1415 syncq_t *rsq = qp->q_syncq;
1416 syncq_t *wsq = wqp->q_syncq;
1417
1418 /*
1419 * Disable rsq and wsq and wait for any background processing of
1420 * syncq to complete.
1421 */
1422 wait_sq_svc(rsq);
1423 if (wsq != rsq)
1424 wait_sq_svc(wsq);
1425 }
1426
1427 mutex_enter(QLOCK(qp));
1428 while (qp->q_flag & (QINSERVICE|QENAB))
1429 cv_wait(&qp->q_wait, QLOCK(qp));
1430 mutex_exit(QLOCK(qp));
1431 mutex_enter(QLOCK(wqp));
1432 while (wqp->q_flag & (QINSERVICE|QENAB))
1433 cv_wait(&wqp->q_wait, QLOCK(wqp));
1434 mutex_exit(QLOCK(wqp));
1435 }
1436
1437 /*
1438 * Put ioctl data from userland buffer `arg' into the mblk chain `bp'.
1439 * `flag' must always contain either K_TO_K or U_TO_K; STR_NOSIG may
1440 * also be set, and is passed through to allocb_cred_wait().
1441 *
1442 * Returns errno on failure, zero on success.
1443 */
1444 int
putiocd(mblk_t * bp,char * arg,int flag,cred_t * cr)1445 putiocd(mblk_t *bp, char *arg, int flag, cred_t *cr)
1446 {
1447 mblk_t *tmp;
1448 ssize_t count;
1449 int error = 0;
1450
1451 ASSERT((flag & (U_TO_K | K_TO_K)) == U_TO_K ||
1452 (flag & (U_TO_K | K_TO_K)) == K_TO_K);
1453
1454 if (bp->b_datap->db_type == M_IOCTL) {
1455 count = ((struct iocblk *)bp->b_rptr)->ioc_count;
1456 } else {
1457 ASSERT(bp->b_datap->db_type == M_COPYIN);
1458 count = ((struct copyreq *)bp->b_rptr)->cq_size;
1459 }
1460 /*
1461 * strdoioctl validates ioc_count, so if this assert fails it
1462 * cannot be due to user error.
1463 */
1464 ASSERT(count >= 0);
1465
1466 if ((tmp = allocb_cred_wait(count, (flag & STR_NOSIG), &error, cr,
1467 curproc->p_pid)) == NULL) {
1468 return (error);
1469 }
1470 error = strcopyin(arg, tmp->b_wptr, count, flag & (U_TO_K|K_TO_K));
1471 if (error != 0) {
1472 freeb(tmp);
1473 return (error);
1474 }
1475 DB_CPID(tmp) = curproc->p_pid;
1476 tmp->b_wptr += count;
1477 bp->b_cont = tmp;
1478
1479 return (0);
1480 }
1481
1482 /*
1483 * Copy ioctl data to user-land. Return non-zero errno on failure,
1484 * 0 for success.
1485 */
1486 int
getiocd(mblk_t * bp,char * arg,int copymode)1487 getiocd(mblk_t *bp, char *arg, int copymode)
1488 {
1489 ssize_t count;
1490 size_t n;
1491 int error;
1492
1493 if (bp->b_datap->db_type == M_IOCACK)
1494 count = ((struct iocblk *)bp->b_rptr)->ioc_count;
1495 else {
1496 ASSERT(bp->b_datap->db_type == M_COPYOUT);
1497 count = ((struct copyreq *)bp->b_rptr)->cq_size;
1498 }
1499 ASSERT(count >= 0);
1500
1501 for (bp = bp->b_cont; bp && count;
1502 count -= n, bp = bp->b_cont, arg += n) {
1503 n = MIN(count, bp->b_wptr - bp->b_rptr);
1504 error = strcopyout(bp->b_rptr, arg, n, copymode);
1505 if (error)
1506 return (error);
1507 }
1508 ASSERT(count == 0);
1509 return (0);
1510 }
1511
1512 /*
1513 * Allocate a linkinfo entry given the write queue of the
1514 * bottom module of the top stream and the write queue of the
1515 * stream head of the bottom stream.
1516 */
1517 linkinfo_t *
alloclink(queue_t * qup,queue_t * qdown,file_t * fpdown)1518 alloclink(queue_t *qup, queue_t *qdown, file_t *fpdown)
1519 {
1520 linkinfo_t *linkp;
1521
1522 linkp = kmem_cache_alloc(linkinfo_cache, KM_SLEEP);
1523
1524 linkp->li_lblk.l_qtop = qup;
1525 linkp->li_lblk.l_qbot = qdown;
1526 linkp->li_fpdown = fpdown;
1527
1528 mutex_enter(&strresources);
1529 linkp->li_next = linkinfo_list;
1530 linkp->li_prev = NULL;
1531 if (linkp->li_next)
1532 linkp->li_next->li_prev = linkp;
1533 linkinfo_list = linkp;
1534 linkp->li_lblk.l_index = ++lnk_id;
1535 ASSERT(lnk_id != 0); /* this should never wrap in practice */
1536 mutex_exit(&strresources);
1537
1538 return (linkp);
1539 }
1540
1541 /*
1542 * Free a linkinfo entry.
1543 */
1544 void
lbfree(linkinfo_t * linkp)1545 lbfree(linkinfo_t *linkp)
1546 {
1547 mutex_enter(&strresources);
1548 if (linkp->li_next)
1549 linkp->li_next->li_prev = linkp->li_prev;
1550 if (linkp->li_prev)
1551 linkp->li_prev->li_next = linkp->li_next;
1552 else
1553 linkinfo_list = linkp->li_next;
1554 mutex_exit(&strresources);
1555
1556 kmem_cache_free(linkinfo_cache, linkp);
1557 }
1558
1559 /*
1560 * Check for a potential linking cycle.
1561 * Return 1 if a link will result in a cycle,
1562 * and 0 otherwise.
1563 */
1564 int
linkcycle(stdata_t * upstp,stdata_t * lostp,str_stack_t * ss)1565 linkcycle(stdata_t *upstp, stdata_t *lostp, str_stack_t *ss)
1566 {
1567 struct mux_node *np;
1568 struct mux_edge *ep;
1569 int i;
1570 major_t lomaj;
1571 major_t upmaj;
1572 /*
1573 * if the lower stream is a pipe/FIFO, return, since link
1574 * cycles can not happen on pipes/FIFOs
1575 */
1576 if (lostp->sd_vnode->v_type == VFIFO)
1577 return (0);
1578
1579 for (i = 0; i < ss->ss_devcnt; i++) {
1580 np = &ss->ss_mux_nodes[i];
1581 MUX_CLEAR(np);
1582 }
1583 lomaj = getmajor(lostp->sd_vnode->v_rdev);
1584 upmaj = getmajor(upstp->sd_vnode->v_rdev);
1585 np = &ss->ss_mux_nodes[lomaj];
1586 for (;;) {
1587 if (!MUX_DIDVISIT(np)) {
1588 if (np->mn_imaj == upmaj)
1589 return (1);
1590 if (np->mn_outp == NULL) {
1591 MUX_VISIT(np);
1592 if (np->mn_originp == NULL)
1593 return (0);
1594 np = np->mn_originp;
1595 continue;
1596 }
1597 MUX_VISIT(np);
1598 np->mn_startp = np->mn_outp;
1599 } else {
1600 if (np->mn_startp == NULL) {
1601 if (np->mn_originp == NULL)
1602 return (0);
1603 else {
1604 np = np->mn_originp;
1605 continue;
1606 }
1607 }
1608 /*
1609 * If ep->me_nodep is a FIFO (me_nodep == NULL),
1610 * ignore the edge and move on. ep->me_nodep gets
1611 * set to NULL in mux_addedge() if it is a FIFO.
1612 *
1613 */
1614 ep = np->mn_startp;
1615 np->mn_startp = ep->me_nextp;
1616 if (ep->me_nodep == NULL)
1617 continue;
1618 ep->me_nodep->mn_originp = np;
1619 np = ep->me_nodep;
1620 }
1621 }
1622 }
1623
1624 /*
1625 * Find linkinfo entry corresponding to the parameters.
1626 */
1627 linkinfo_t *
findlinks(stdata_t * stp,int index,int type,str_stack_t * ss)1628 findlinks(stdata_t *stp, int index, int type, str_stack_t *ss)
1629 {
1630 linkinfo_t *linkp;
1631 struct mux_edge *mep;
1632 struct mux_node *mnp;
1633 queue_t *qup;
1634
1635 mutex_enter(&strresources);
1636 if ((type & LINKTYPEMASK) == LINKNORMAL) {
1637 qup = getendq(stp->sd_wrq);
1638 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) {
1639 if ((qup == linkp->li_lblk.l_qtop) &&
1640 (!index || (index == linkp->li_lblk.l_index))) {
1641 mutex_exit(&strresources);
1642 return (linkp);
1643 }
1644 }
1645 } else {
1646 ASSERT((type & LINKTYPEMASK) == LINKPERSIST);
1647 mnp = &ss->ss_mux_nodes[getmajor(stp->sd_vnode->v_rdev)];
1648 mep = mnp->mn_outp;
1649 while (mep) {
1650 if ((index == 0) || (index == mep->me_muxid))
1651 break;
1652 mep = mep->me_nextp;
1653 }
1654 if (!mep) {
1655 mutex_exit(&strresources);
1656 return (NULL);
1657 }
1658 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) {
1659 if ((!linkp->li_lblk.l_qtop) &&
1660 (mep->me_muxid == linkp->li_lblk.l_index)) {
1661 mutex_exit(&strresources);
1662 return (linkp);
1663 }
1664 }
1665 }
1666 mutex_exit(&strresources);
1667 return (NULL);
1668 }
1669
1670 /*
1671 * Given a queue ptr, follow the chain of q_next pointers until you reach the
1672 * last queue on the chain and return it.
1673 */
1674 queue_t *
getendq(queue_t * q)1675 getendq(queue_t *q)
1676 {
1677 ASSERT(q != NULL);
1678 while (_SAMESTR(q))
1679 q = q->q_next;
1680 return (q);
1681 }
1682
1683 /*
1684 * Wait for the syncq count to drop to zero.
1685 * sq could be either outer or inner.
1686 */
1687
1688 static void
wait_syncq(syncq_t * sq)1689 wait_syncq(syncq_t *sq)
1690 {
1691 uint16_t count;
1692
1693 mutex_enter(SQLOCK(sq));
1694 count = sq->sq_count;
1695 SQ_PUTLOCKS_ENTER(sq);
1696 SUM_SQ_PUTCOUNTS(sq, count);
1697 while (count != 0) {
1698 sq->sq_flags |= SQ_WANTWAKEUP;
1699 SQ_PUTLOCKS_EXIT(sq);
1700 cv_wait(&sq->sq_wait, SQLOCK(sq));
1701 count = sq->sq_count;
1702 SQ_PUTLOCKS_ENTER(sq);
1703 SUM_SQ_PUTCOUNTS(sq, count);
1704 }
1705 SQ_PUTLOCKS_EXIT(sq);
1706 mutex_exit(SQLOCK(sq));
1707 }
1708
1709 /*
1710 * Wait while there are any messages for the queue in its syncq.
1711 */
1712 static void
wait_q_syncq(queue_t * q)1713 wait_q_syncq(queue_t *q)
1714 {
1715 if ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) {
1716 syncq_t *sq = q->q_syncq;
1717
1718 mutex_enter(SQLOCK(sq));
1719 while ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) {
1720 sq->sq_flags |= SQ_WANTWAKEUP;
1721 cv_wait(&sq->sq_wait, SQLOCK(sq));
1722 }
1723 mutex_exit(SQLOCK(sq));
1724 }
1725 }
1726
1727
1728 int
mlink_file(vnode_t * vp,int cmd,struct file * fpdown,cred_t * crp,int * rvalp,int lhlink)1729 mlink_file(vnode_t *vp, int cmd, struct file *fpdown, cred_t *crp, int *rvalp,
1730 int lhlink)
1731 {
1732 struct stdata *stp;
1733 struct strioctl strioc;
1734 struct linkinfo *linkp;
1735 struct stdata *stpdown;
1736 struct streamtab *str;
1737 queue_t *passq;
1738 syncq_t *passyncq;
1739 queue_t *rq;
1740 cdevsw_impl_t *dp;
1741 uint32_t qflag;
1742 uint32_t sqtype;
1743 perdm_t *dmp;
1744 int error = 0;
1745 netstack_t *ns;
1746 str_stack_t *ss;
1747
1748 stp = vp->v_stream;
1749 TRACE_1(TR_FAC_STREAMS_FR,
1750 TR_I_LINK, "I_LINK/I_PLINK:stp %p", stp);
1751 /*
1752 * Test for invalid upper stream
1753 */
1754 if (stp->sd_flag & STRHUP) {
1755 return (ENXIO);
1756 }
1757 if (vp->v_type == VFIFO) {
1758 return (EINVAL);
1759 }
1760 if (stp->sd_strtab == NULL) {
1761 return (EINVAL);
1762 }
1763 if (!stp->sd_strtab->st_muxwinit) {
1764 return (EINVAL);
1765 }
1766 if (fpdown == NULL) {
1767 return (EBADF);
1768 }
1769 ns = netstack_find_by_cred(crp);
1770 ASSERT(ns != NULL);
1771 ss = ns->netstack_str;
1772 ASSERT(ss != NULL);
1773
1774 if (getmajor(stp->sd_vnode->v_rdev) >= ss->ss_devcnt) {
1775 netstack_rele(ss->ss_netstack);
1776 return (EINVAL);
1777 }
1778 mutex_enter(&muxifier);
1779 if (stp->sd_flag & STPLEX) {
1780 mutex_exit(&muxifier);
1781 netstack_rele(ss->ss_netstack);
1782 return (ENXIO);
1783 }
1784
1785 /*
1786 * Test for invalid lower stream.
1787 * The check for the v_type != VFIFO and having a major
1788 * number not >= devcnt is done to avoid problems with
1789 * adding mux_node entry past the end of mux_nodes[].
1790 * For FIFO's we don't add an entry so this isn't a
1791 * problem.
1792 */
1793 if (((stpdown = fpdown->f_vnode->v_stream) == NULL) ||
1794 (stpdown == stp) || (stpdown->sd_flag &
1795 (STPLEX|STRHUP|STRDERR|STWRERR|IOCWAIT|STRPLUMB)) ||
1796 ((stpdown->sd_vnode->v_type != VFIFO) &&
1797 (getmajor(stpdown->sd_vnode->v_rdev) >= ss->ss_devcnt)) ||
1798 linkcycle(stp, stpdown, ss)) {
1799 mutex_exit(&muxifier);
1800 netstack_rele(ss->ss_netstack);
1801 return (EINVAL);
1802 }
1803 TRACE_1(TR_FAC_STREAMS_FR,
1804 TR_STPDOWN, "stpdown:%p", stpdown);
1805 rq = getendq(stp->sd_wrq);
1806 if (cmd == I_PLINK)
1807 rq = NULL;
1808
1809 linkp = alloclink(rq, stpdown->sd_wrq, fpdown);
1810
1811 strioc.ic_cmd = cmd;
1812 strioc.ic_timout = INFTIM;
1813 strioc.ic_len = sizeof (struct linkblk);
1814 strioc.ic_dp = (char *)&linkp->li_lblk;
1815
1816 /*
1817 * STRPLUMB protects plumbing changes and should be set before
1818 * link_addpassthru()/link_rempassthru() are called, so it is set here
1819 * and cleared in the end of mlink when passthru queue is removed.
1820 * Setting of STRPLUMB prevents reopens of the stream while passthru
1821 * queue is in-place (it is not a proper module and doesn't have open
1822 * entry point).
1823 *
1824 * STPLEX prevents any threads from entering the stream from above. It
1825 * can't be set before the call to link_addpassthru() because putnext
1826 * from below may cause stream head I/O routines to be called and these
1827 * routines assert that STPLEX is not set. After link_addpassthru()
1828 * nothing may come from below since the pass queue syncq is blocked.
1829 * Note also that STPLEX should be cleared before the call to
1830 * link_rempassthru() since when messages start flowing to the stream
1831 * head (e.g. because of message propagation from the pass queue) stream
1832 * head I/O routines may be called with STPLEX flag set.
1833 *
1834 * When STPLEX is set, nothing may come into the stream from above and
1835 * it is safe to do a setq which will change stream head. So, the
1836 * correct sequence of actions is:
1837 *
1838 * 1) Set STRPLUMB
1839 * 2) Call link_addpassthru()
1840 * 3) Set STPLEX
1841 * 4) Call setq and update the stream state
1842 * 5) Clear STPLEX
1843 * 6) Call link_rempassthru()
1844 * 7) Clear STRPLUMB
1845 *
1846 * The same sequence applies to munlink() code.
1847 */
1848 mutex_enter(&stpdown->sd_lock);
1849 stpdown->sd_flag |= STRPLUMB;
1850 mutex_exit(&stpdown->sd_lock);
1851 /*
1852 * Add passthru queue below lower mux. This will block
1853 * syncqs of lower muxs read queue during I_LINK/I_UNLINK.
1854 */
1855 passq = link_addpassthru(stpdown);
1856
1857 mutex_enter(&stpdown->sd_lock);
1858 stpdown->sd_flag |= STPLEX;
1859 mutex_exit(&stpdown->sd_lock);
1860
1861 rq = _RD(stpdown->sd_wrq);
1862 /*
1863 * There may be messages in the streamhead's syncq due to messages
1864 * that arrived before link_addpassthru() was done. To avoid
1865 * background processing of the syncq happening simultaneous with
1866 * setq processing, we disable the streamhead syncq and wait until
1867 * existing background thread finishes working on it.
1868 */
1869 wait_sq_svc(rq->q_syncq);
1870 passyncq = passq->q_syncq;
1871 if (!(passyncq->sq_flags & SQ_BLOCKED))
1872 blocksq(passyncq, SQ_BLOCKED, 0);
1873
1874 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE);
1875 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq));
1876 rq->q_ptr = _WR(rq)->q_ptr = NULL;
1877
1878 /* setq might sleep in allocator - avoid holding locks. */
1879 /* Note: we are holding muxifier here. */
1880
1881 str = stp->sd_strtab;
1882 dp = &devimpl[getmajor(vp->v_rdev)];
1883 ASSERT(dp->d_str == str);
1884
1885 qflag = dp->d_qflag;
1886 sqtype = dp->d_sqtype;
1887
1888 /* create perdm_t if needed */
1889 if (NEED_DM(dp->d_dmp, qflag))
1890 dp->d_dmp = hold_dm(str, qflag, sqtype);
1891
1892 dmp = dp->d_dmp;
1893
1894 setq(rq, str->st_muxrinit, str->st_muxwinit, dmp, qflag, sqtype,
1895 B_TRUE);
1896
1897 /*
1898 * XXX Remove any "odd" messages from the queue.
1899 * Keep only M_DATA, M_PROTO, M_PCPROTO.
1900 */
1901 error = strdoioctl(stp, &strioc, FNATIVE,
1902 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp);
1903 if (error != 0)
1904 goto cleanup;
1905
1906 mutex_enter(&fpdown->f_tlock);
1907 fpdown->f_count++;
1908 mutex_exit(&fpdown->f_tlock);
1909
1910 /*
1911 * if we've made it here the linkage is all set up so we should also
1912 * set up the layered driver linkages
1913 */
1914
1915 ASSERT((cmd == I_LINK) || (cmd == I_PLINK));
1916 if (cmd == I_LINK) {
1917 error = ldi_mlink_fp(stp, fpdown, lhlink, LINKNORMAL);
1918 } else {
1919 error = ldi_mlink_fp(stp, fpdown, lhlink, LINKPERSIST);
1920 }
1921
1922 if (error != 0) {
1923 mutex_enter(&fpdown->f_tlock);
1924 fpdown->f_count--;
1925 mutex_exit(&fpdown->f_tlock);
1926 goto cleanup;
1927 }
1928
1929 link_rempassthru(passq);
1930
1931 mux_addedge(stp, stpdown, linkp->li_lblk.l_index, ss);
1932
1933 /*
1934 * Mark the upper stream as having dependent links
1935 * so that strclose can clean it up.
1936 */
1937 if (cmd == I_LINK) {
1938 mutex_enter(&stp->sd_lock);
1939 stp->sd_flag |= STRHASLINKS;
1940 mutex_exit(&stp->sd_lock);
1941 }
1942 /*
1943 * Wake up any other processes that may have been
1944 * waiting on the lower stream. These will all
1945 * error out.
1946 */
1947 mutex_enter(&stpdown->sd_lock);
1948 /* The passthru module is removed so we may release STRPLUMB */
1949 stpdown->sd_flag &= ~STRPLUMB;
1950 cv_broadcast(&rq->q_wait);
1951 cv_broadcast(&_WR(rq)->q_wait);
1952 cv_broadcast(&stpdown->sd_monitor);
1953 mutex_exit(&stpdown->sd_lock);
1954 mutex_exit(&muxifier);
1955 *rvalp = linkp->li_lblk.l_index;
1956 netstack_rele(ss->ss_netstack);
1957 return (0);
1958
1959 cleanup:
1960 lbfree(linkp);
1961
1962 if (!(passyncq->sq_flags & SQ_BLOCKED))
1963 blocksq(passyncq, SQ_BLOCKED, 0);
1964 /*
1965 * Restore the stream head queue and then remove
1966 * the passq. Turn off STPLEX before we turn on
1967 * the stream by removing the passq.
1968 */
1969 rq->q_ptr = _WR(rq)->q_ptr = stpdown;
1970 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO,
1971 B_TRUE);
1972
1973 mutex_enter(&stpdown->sd_lock);
1974 stpdown->sd_flag &= ~STPLEX;
1975 mutex_exit(&stpdown->sd_lock);
1976
1977 link_rempassthru(passq);
1978
1979 mutex_enter(&stpdown->sd_lock);
1980 stpdown->sd_flag &= ~STRPLUMB;
1981 /* Wakeup anyone waiting for STRPLUMB to clear. */
1982 cv_broadcast(&stpdown->sd_monitor);
1983 mutex_exit(&stpdown->sd_lock);
1984
1985 mutex_exit(&muxifier);
1986 netstack_rele(ss->ss_netstack);
1987 return (error);
1988 }
1989
1990 int
mlink(vnode_t * vp,int cmd,int arg,cred_t * crp,int * rvalp,int lhlink)1991 mlink(vnode_t *vp, int cmd, int arg, cred_t *crp, int *rvalp, int lhlink)
1992 {
1993 int ret;
1994 struct file *fpdown;
1995
1996 fpdown = getf(arg);
1997 ret = mlink_file(vp, cmd, fpdown, crp, rvalp, lhlink);
1998 if (fpdown != NULL)
1999 releasef(arg);
2000 return (ret);
2001 }
2002
2003 /*
2004 * Unlink a multiplexor link. Stp is the controlling stream for the
2005 * link, and linkp points to the link's entry in the linkinfo list.
2006 * The muxifier lock must be held on entry and is dropped on exit.
2007 *
2008 * NOTE : Currently it is assumed that mux would process all the messages
2009 * sitting on it's queue before ACKing the UNLINK. It is the responsibility
2010 * of the mux to handle all the messages that arrive before UNLINK.
2011 * If the mux has to send down messages on its lower stream before
2012 * ACKing I_UNLINK, then it *should* know to handle messages even
2013 * after the UNLINK is acked (actually it should be able to handle till we
2014 * re-block the read side of the pass queue here). If the mux does not
2015 * open up the lower stream, any messages that arrive during UNLINK
2016 * will be put in the stream head. In the case of lower stream opening
2017 * up, some messages might land in the stream head depending on when
2018 * the message arrived and when the read side of the pass queue was
2019 * re-blocked.
2020 */
2021 int
munlink(stdata_t * stp,linkinfo_t * linkp,int flag,cred_t * crp,int * rvalp,str_stack_t * ss)2022 munlink(stdata_t *stp, linkinfo_t *linkp, int flag, cred_t *crp, int *rvalp,
2023 str_stack_t *ss)
2024 {
2025 struct strioctl strioc;
2026 struct stdata *stpdown;
2027 queue_t *rq, *wrq;
2028 queue_t *passq;
2029 syncq_t *passyncq;
2030 int error = 0;
2031 file_t *fpdown;
2032
2033 ASSERT(MUTEX_HELD(&muxifier));
2034
2035 stpdown = linkp->li_fpdown->f_vnode->v_stream;
2036
2037 /*
2038 * See the comment in mlink() concerning STRPLUMB/STPLEX flags.
2039 */
2040 mutex_enter(&stpdown->sd_lock);
2041 stpdown->sd_flag |= STRPLUMB;
2042 mutex_exit(&stpdown->sd_lock);
2043
2044 /*
2045 * Add passthru queue below lower mux. This will block
2046 * syncqs of lower muxs read queue during I_LINK/I_UNLINK.
2047 */
2048 passq = link_addpassthru(stpdown);
2049
2050 if ((flag & LINKTYPEMASK) == LINKNORMAL)
2051 strioc.ic_cmd = I_UNLINK;
2052 else
2053 strioc.ic_cmd = I_PUNLINK;
2054 strioc.ic_timout = INFTIM;
2055 strioc.ic_len = sizeof (struct linkblk);
2056 strioc.ic_dp = (char *)&linkp->li_lblk;
2057
2058 error = strdoioctl(stp, &strioc, FNATIVE,
2059 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp);
2060
2061 /*
2062 * If there was an error and this is not called via strclose,
2063 * return to the user. Otherwise, pretend there was no error
2064 * and close the link.
2065 */
2066 if (error) {
2067 if (flag & LINKCLOSE) {
2068 cmn_err(CE_WARN, "KERNEL: munlink: could not perform "
2069 "unlink ioctl, closing anyway (%d)\n", error);
2070 } else {
2071 link_rempassthru(passq);
2072 mutex_enter(&stpdown->sd_lock);
2073 stpdown->sd_flag &= ~STRPLUMB;
2074 cv_broadcast(&stpdown->sd_monitor);
2075 mutex_exit(&stpdown->sd_lock);
2076 mutex_exit(&muxifier);
2077 return (error);
2078 }
2079 }
2080
2081 mux_rmvedge(stp, linkp->li_lblk.l_index, ss);
2082 fpdown = linkp->li_fpdown;
2083 lbfree(linkp);
2084
2085 /*
2086 * We go ahead and drop muxifier here--it's a nasty global lock that
2087 * can slow others down. It's okay to since attempts to mlink() this
2088 * stream will be stopped because STPLEX is still set in the stdata
2089 * structure, and munlink() is stopped because mux_rmvedge() and
2090 * lbfree() have removed it from mux_nodes[] and linkinfo_list,
2091 * respectively. Note that we defer the closef() of fpdown until
2092 * after we drop muxifier since strclose() can call munlinkall().
2093 */
2094 mutex_exit(&muxifier);
2095
2096 wrq = stpdown->sd_wrq;
2097 rq = _RD(wrq);
2098
2099 /*
2100 * Get rid of outstanding service procedure runs, before we make
2101 * it a stream head, since a stream head doesn't have any service
2102 * procedure.
2103 */
2104 disable_svc(rq);
2105 wait_svc(rq);
2106
2107 /*
2108 * Since we don't disable the syncq for QPERMOD, we wait for whatever
2109 * is queued up to be finished. mux should take care that nothing is
2110 * send down to this queue. We should do it now as we're going to block
2111 * passyncq if it was unblocked.
2112 */
2113 if (wrq->q_flag & QPERMOD) {
2114 syncq_t *sq = wrq->q_syncq;
2115
2116 mutex_enter(SQLOCK(sq));
2117 while (wrq->q_sqflags & Q_SQQUEUED) {
2118 sq->sq_flags |= SQ_WANTWAKEUP;
2119 cv_wait(&sq->sq_wait, SQLOCK(sq));
2120 }
2121 mutex_exit(SQLOCK(sq));
2122 }
2123 passyncq = passq->q_syncq;
2124 if (!(passyncq->sq_flags & SQ_BLOCKED)) {
2125
2126 syncq_t *sq, *outer;
2127
2128 /*
2129 * Messages could be flowing from underneath. We will
2130 * block the read side of the passq. This would be
2131 * sufficient for QPAIR and QPERQ muxes to ensure
2132 * that no data is flowing up into this queue
2133 * and hence no thread active in this instance of
2134 * lower mux. But for QPERMOD and QMTOUTPERIM there
2135 * could be messages on the inner and outer/inner
2136 * syncqs respectively. We will wait for them to drain.
2137 * Because passq is blocked messages end up in the syncq
2138 * And qfill_syncq could possibly end up setting QFULL
2139 * which will access the rq->q_flag. Hence, we have to
2140 * acquire the QLOCK in setq.
2141 *
2142 * XXX Messages can also flow from top into this
2143 * queue though the unlink is over (Ex. some instance
2144 * in putnext() called from top that has still not
2145 * accessed this queue. And also putq(lowerq) ?).
2146 * Solution : How about blocking the l_qtop queue ?
2147 * Do we really care about such pure D_MP muxes ?
2148 */
2149
2150 blocksq(passyncq, SQ_BLOCKED, 0);
2151
2152 sq = rq->q_syncq;
2153 if ((outer = sq->sq_outer) != NULL) {
2154
2155 /*
2156 * We have to just wait for the outer sq_count
2157 * drop to zero. As this does not prevent new
2158 * messages to enter the outer perimeter, this
2159 * is subject to starvation.
2160 *
2161 * NOTE :Because of blocksq above, messages could
2162 * be in the inner syncq only because of some
2163 * thread holding the outer perimeter exclusively.
2164 * Hence it would be sufficient to wait for the
2165 * exclusive holder of the outer perimeter to drain
2166 * the inner and outer syncqs. But we will not depend
2167 * on this feature and hence check the inner syncqs
2168 * separately.
2169 */
2170 wait_syncq(outer);
2171 }
2172
2173
2174 /*
2175 * There could be messages destined for
2176 * this queue. Let the exclusive holder
2177 * drain it.
2178 */
2179
2180 wait_syncq(sq);
2181 ASSERT((rq->q_flag & QPERMOD) ||
2182 ((rq->q_syncq->sq_head == NULL) &&
2183 (_WR(rq)->q_syncq->sq_head == NULL)));
2184 }
2185
2186 /*
2187 * We haven't taken care of QPERMOD case yet. QPERMOD is a special
2188 * case as we don't disable its syncq or remove it off the syncq
2189 * service list.
2190 */
2191 if (rq->q_flag & QPERMOD) {
2192 syncq_t *sq = rq->q_syncq;
2193
2194 mutex_enter(SQLOCK(sq));
2195 while (rq->q_sqflags & Q_SQQUEUED) {
2196 sq->sq_flags |= SQ_WANTWAKEUP;
2197 cv_wait(&sq->sq_wait, SQLOCK(sq));
2198 }
2199 mutex_exit(SQLOCK(sq));
2200 }
2201
2202 /*
2203 * flush_syncq changes states only when there are some messages to
2204 * free, i.e. when it returns non-zero value to return.
2205 */
2206 ASSERT(flush_syncq(rq->q_syncq, rq) == 0);
2207 ASSERT(flush_syncq(wrq->q_syncq, wrq) == 0);
2208
2209 /*
2210 * Nobody else should know about this queue now.
2211 * If the mux did not process the messages before
2212 * acking the I_UNLINK, free them now.
2213 */
2214
2215 flushq(rq, FLUSHALL);
2216 flushq(_WR(rq), FLUSHALL);
2217
2218 /*
2219 * Convert the mux lower queue into a stream head queue.
2220 * Turn off STPLEX before we turn on the stream by removing the passq.
2221 */
2222 rq->q_ptr = wrq->q_ptr = stpdown;
2223 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, B_TRUE);
2224
2225 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE);
2226 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq));
2227
2228 enable_svc(rq);
2229
2230 /*
2231 * Now it is a proper stream, so STPLEX is cleared. But STRPLUMB still
2232 * needs to be set to prevent reopen() of the stream - such reopen may
2233 * try to call non-existent pass queue open routine and panic.
2234 */
2235 mutex_enter(&stpdown->sd_lock);
2236 stpdown->sd_flag &= ~STPLEX;
2237 mutex_exit(&stpdown->sd_lock);
2238
2239 ASSERT(((flag & LINKTYPEMASK) == LINKNORMAL) ||
2240 ((flag & LINKTYPEMASK) == LINKPERSIST));
2241
2242 /* clean up the layered driver linkages */
2243 if ((flag & LINKTYPEMASK) == LINKNORMAL) {
2244 VERIFY0(ldi_munlink_fp(stp, fpdown, LINKNORMAL));
2245 } else {
2246 VERIFY0(ldi_munlink_fp(stp, fpdown, LINKPERSIST));
2247 }
2248
2249 link_rempassthru(passq);
2250
2251 /*
2252 * Now all plumbing changes are finished and STRPLUMB is no
2253 * longer needed.
2254 */
2255 mutex_enter(&stpdown->sd_lock);
2256 stpdown->sd_flag &= ~STRPLUMB;
2257 cv_broadcast(&stpdown->sd_monitor);
2258 mutex_exit(&stpdown->sd_lock);
2259
2260 (void) closef(fpdown);
2261 return (0);
2262 }
2263
2264 /*
2265 * Unlink all multiplexor links for which stp is the controlling stream.
2266 * Return 0, or a non-zero errno on failure.
2267 */
2268 int
munlinkall(stdata_t * stp,int flag,cred_t * crp,int * rvalp,str_stack_t * ss)2269 munlinkall(stdata_t *stp, int flag, cred_t *crp, int *rvalp, str_stack_t *ss)
2270 {
2271 linkinfo_t *linkp;
2272 int error = 0;
2273
2274 mutex_enter(&muxifier);
2275 while (linkp = findlinks(stp, 0, flag, ss)) {
2276 /*
2277 * munlink() releases the muxifier lock.
2278 */
2279 if (error = munlink(stp, linkp, flag, crp, rvalp, ss))
2280 return (error);
2281 mutex_enter(&muxifier);
2282 }
2283 mutex_exit(&muxifier);
2284 return (0);
2285 }
2286
2287 /*
2288 * A multiplexor link has been made. Add an
2289 * edge to the directed graph.
2290 */
2291 void
mux_addedge(stdata_t * upstp,stdata_t * lostp,int muxid,str_stack_t * ss)2292 mux_addedge(stdata_t *upstp, stdata_t *lostp, int muxid, str_stack_t *ss)
2293 {
2294 struct mux_node *np;
2295 struct mux_edge *ep;
2296 major_t upmaj;
2297 major_t lomaj;
2298
2299 upmaj = getmajor(upstp->sd_vnode->v_rdev);
2300 lomaj = getmajor(lostp->sd_vnode->v_rdev);
2301 np = &ss->ss_mux_nodes[upmaj];
2302 if (np->mn_outp) {
2303 ep = np->mn_outp;
2304 while (ep->me_nextp)
2305 ep = ep->me_nextp;
2306 ep->me_nextp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP);
2307 ep = ep->me_nextp;
2308 } else {
2309 np->mn_outp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP);
2310 ep = np->mn_outp;
2311 }
2312 ep->me_nextp = NULL;
2313 ep->me_muxid = muxid;
2314 /*
2315 * Save the dev_t for the purposes of str_stack_shutdown.
2316 * str_stack_shutdown assumes that the device allows reopen, since
2317 * this dev_t is the one after any cloning by xx_open().
2318 * Would prefer finding the dev_t from before any cloning,
2319 * but specfs doesn't retain that.
2320 */
2321 ep->me_dev = upstp->sd_vnode->v_rdev;
2322 if (lostp->sd_vnode->v_type == VFIFO)
2323 ep->me_nodep = NULL;
2324 else
2325 ep->me_nodep = &ss->ss_mux_nodes[lomaj];
2326 }
2327
2328 /*
2329 * A multiplexor link has been removed. Remove the
2330 * edge in the directed graph.
2331 */
2332 void
mux_rmvedge(stdata_t * upstp,int muxid,str_stack_t * ss)2333 mux_rmvedge(stdata_t *upstp, int muxid, str_stack_t *ss)
2334 {
2335 struct mux_node *np;
2336 struct mux_edge *ep;
2337 struct mux_edge *pep = NULL;
2338 major_t upmaj;
2339
2340 upmaj = getmajor(upstp->sd_vnode->v_rdev);
2341 np = &ss->ss_mux_nodes[upmaj];
2342 ASSERT(np->mn_outp != NULL);
2343 ep = np->mn_outp;
2344 while (ep) {
2345 if (ep->me_muxid == muxid) {
2346 if (pep)
2347 pep->me_nextp = ep->me_nextp;
2348 else
2349 np->mn_outp = ep->me_nextp;
2350 kmem_free(ep, sizeof (struct mux_edge));
2351 return;
2352 }
2353 pep = ep;
2354 ep = ep->me_nextp;
2355 }
2356 ASSERT(0); /* should not reach here */
2357 }
2358
2359 /*
2360 * Translate the device flags (from conf.h) to the corresponding
2361 * qflag and sq_flag (type) values.
2362 */
2363 int
devflg_to_qflag(struct streamtab * stp,uint32_t devflag,uint32_t * qflagp,uint32_t * sqtypep)2364 devflg_to_qflag(struct streamtab *stp, uint32_t devflag, uint32_t *qflagp,
2365 uint32_t *sqtypep)
2366 {
2367 uint32_t qflag = 0;
2368 uint32_t sqtype = 0;
2369
2370 if (devflag & _D_OLD)
2371 goto bad;
2372
2373 /* Inner perimeter presence and scope */
2374 switch (devflag & D_MTINNER_MASK) {
2375 case D_MP:
2376 qflag |= QMTSAFE;
2377 sqtype |= SQ_CI;
2378 break;
2379 case D_MTPERQ|D_MP:
2380 qflag |= QPERQ;
2381 break;
2382 case D_MTQPAIR|D_MP:
2383 qflag |= QPAIR;
2384 break;
2385 case D_MTPERMOD|D_MP:
2386 qflag |= QPERMOD;
2387 break;
2388 default:
2389 goto bad;
2390 }
2391
2392 /* Outer perimeter */
2393 if (devflag & D_MTOUTPERIM) {
2394 switch (devflag & D_MTINNER_MASK) {
2395 case D_MP:
2396 case D_MTPERQ|D_MP:
2397 case D_MTQPAIR|D_MP:
2398 break;
2399 default:
2400 goto bad;
2401 }
2402 qflag |= QMTOUTPERIM;
2403 }
2404
2405 /* Inner perimeter modifiers */
2406 if (devflag & D_MTINNER_MOD) {
2407 switch (devflag & D_MTINNER_MASK) {
2408 case D_MP:
2409 goto bad;
2410 default:
2411 break;
2412 }
2413 if (devflag & D_MTPUTSHARED)
2414 sqtype |= SQ_CIPUT;
2415 if (devflag & _D_MTOCSHARED) {
2416 /*
2417 * The code in putnext assumes that it has the
2418 * highest concurrency by not checking sq_count.
2419 * Thus _D_MTOCSHARED can only be supported when
2420 * D_MTPUTSHARED is set.
2421 */
2422 if (!(devflag & D_MTPUTSHARED))
2423 goto bad;
2424 sqtype |= SQ_CIOC;
2425 }
2426 if (devflag & _D_MTCBSHARED) {
2427 /*
2428 * The code in putnext assumes that it has the
2429 * highest concurrency by not checking sq_count.
2430 * Thus _D_MTCBSHARED can only be supported when
2431 * D_MTPUTSHARED is set.
2432 */
2433 if (!(devflag & D_MTPUTSHARED))
2434 goto bad;
2435 sqtype |= SQ_CICB;
2436 }
2437 if (devflag & _D_MTSVCSHARED) {
2438 /*
2439 * The code in putnext assumes that it has the
2440 * highest concurrency by not checking sq_count.
2441 * Thus _D_MTSVCSHARED can only be supported when
2442 * D_MTPUTSHARED is set. Also _D_MTSVCSHARED is
2443 * supported only for QPERMOD.
2444 */
2445 if (!(devflag & D_MTPUTSHARED) || !(qflag & QPERMOD))
2446 goto bad;
2447 sqtype |= SQ_CISVC;
2448 }
2449 }
2450
2451 /* Default outer perimeter concurrency */
2452 sqtype |= SQ_CO;
2453
2454 /* Outer perimeter modifiers */
2455 if (devflag & D_MTOCEXCL) {
2456 if (!(devflag & D_MTOUTPERIM)) {
2457 /* No outer perimeter */
2458 goto bad;
2459 }
2460 sqtype &= ~SQ_COOC;
2461 }
2462
2463 /* Synchronous Streams extended qinit structure */
2464 if (devflag & D_SYNCSTR)
2465 qflag |= QSYNCSTR;
2466
2467 /*
2468 * Private flag used by a transport module to indicate
2469 * to sockfs that it supports direct-access mode without
2470 * having to go through STREAMS.
2471 */
2472 if (devflag & _D_DIRECT) {
2473 /* Reject unless the module is fully-MT (no perimeter) */
2474 if ((qflag & QMT_TYPEMASK) != QMTSAFE)
2475 goto bad;
2476 qflag |= _QDIRECT;
2477 }
2478
2479 /*
2480 * Private flag used to indicate that a streams module should only
2481 * be pushed once. The TTY streams modules have this flag since if
2482 * libc believes itself to be an xpg4 process then it will
2483 * automatically and unconditionally push them when a PTS device is
2484 * opened. If an application is not aware of this then without this
2485 * flag we would end up with duplicate modules.
2486 */
2487 if (devflag & _D_SINGLE_INSTANCE)
2488 qflag |= _QSINGLE_INSTANCE;
2489
2490 *qflagp = qflag;
2491 *sqtypep = sqtype;
2492 return (0);
2493
2494 bad:
2495 cmn_err(CE_WARN,
2496 "stropen: bad MT flags (0x%x) in driver '%s'",
2497 (int)(qflag & D_MTSAFETY_MASK),
2498 stp->st_rdinit->qi_minfo->mi_idname);
2499
2500 return (EINVAL);
2501 }
2502
2503 /*
2504 * Set the interface values for a pair of queues (qinit structure,
2505 * packet sizes, water marks).
2506 * setq assumes that the caller does not have a claim (entersq or claimq)
2507 * on the queue.
2508 */
2509 void
setq(queue_t * rq,struct qinit * rinit,struct qinit * winit,perdm_t * dmp,uint32_t qflag,uint32_t sqtype,boolean_t lock_needed)2510 setq(queue_t *rq, struct qinit *rinit, struct qinit *winit,
2511 perdm_t *dmp, uint32_t qflag, uint32_t sqtype, boolean_t lock_needed)
2512 {
2513 queue_t *wq;
2514 syncq_t *sq, *outer;
2515
2516 ASSERT(rq->q_flag & QREADR);
2517 ASSERT((qflag & QMT_TYPEMASK) != 0);
2518 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL);
2519
2520 wq = _WR(rq);
2521 rq->q_qinfo = rinit;
2522 rq->q_hiwat = rinit->qi_minfo->mi_hiwat;
2523 rq->q_lowat = rinit->qi_minfo->mi_lowat;
2524 rq->q_minpsz = rinit->qi_minfo->mi_minpsz;
2525 rq->q_maxpsz = rinit->qi_minfo->mi_maxpsz;
2526 wq->q_qinfo = winit;
2527 wq->q_hiwat = winit->qi_minfo->mi_hiwat;
2528 wq->q_lowat = winit->qi_minfo->mi_lowat;
2529 wq->q_minpsz = winit->qi_minfo->mi_minpsz;
2530 wq->q_maxpsz = winit->qi_minfo->mi_maxpsz;
2531
2532 /* Remove old syncqs */
2533 sq = rq->q_syncq;
2534 outer = sq->sq_outer;
2535 if (outer != NULL) {
2536 ASSERT(wq->q_syncq->sq_outer == outer);
2537 outer_remove(outer, rq->q_syncq);
2538 if (wq->q_syncq != rq->q_syncq)
2539 outer_remove(outer, wq->q_syncq);
2540 }
2541 ASSERT(sq->sq_outer == NULL);
2542 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL);
2543
2544 if (sq != SQ(rq)) {
2545 if (!(rq->q_flag & QPERMOD))
2546 free_syncq(sq);
2547 if (wq->q_syncq == rq->q_syncq)
2548 wq->q_syncq = NULL;
2549 rq->q_syncq = NULL;
2550 }
2551 if (wq->q_syncq != NULL && wq->q_syncq != sq &&
2552 wq->q_syncq != SQ(rq)) {
2553 free_syncq(wq->q_syncq);
2554 wq->q_syncq = NULL;
2555 }
2556 ASSERT(rq->q_syncq == NULL || (rq->q_syncq->sq_head == NULL &&
2557 rq->q_syncq->sq_tail == NULL));
2558 ASSERT(wq->q_syncq == NULL || (wq->q_syncq->sq_head == NULL &&
2559 wq->q_syncq->sq_tail == NULL));
2560
2561 if (!(rq->q_flag & QPERMOD) &&
2562 rq->q_syncq != NULL && rq->q_syncq->sq_ciputctrl != NULL) {
2563 ASSERT(rq->q_syncq->sq_nciputctrl == n_ciputctrl - 1);
2564 SUMCHECK_CIPUTCTRL_COUNTS(rq->q_syncq->sq_ciputctrl,
2565 rq->q_syncq->sq_nciputctrl, 0);
2566 ASSERT(ciputctrl_cache != NULL);
2567 kmem_cache_free(ciputctrl_cache, rq->q_syncq->sq_ciputctrl);
2568 rq->q_syncq->sq_ciputctrl = NULL;
2569 rq->q_syncq->sq_nciputctrl = 0;
2570 }
2571
2572 if (!(wq->q_flag & QPERMOD) &&
2573 wq->q_syncq != NULL && wq->q_syncq->sq_ciputctrl != NULL) {
2574 ASSERT(wq->q_syncq->sq_nciputctrl == n_ciputctrl - 1);
2575 SUMCHECK_CIPUTCTRL_COUNTS(wq->q_syncq->sq_ciputctrl,
2576 wq->q_syncq->sq_nciputctrl, 0);
2577 ASSERT(ciputctrl_cache != NULL);
2578 kmem_cache_free(ciputctrl_cache, wq->q_syncq->sq_ciputctrl);
2579 wq->q_syncq->sq_ciputctrl = NULL;
2580 wq->q_syncq->sq_nciputctrl = 0;
2581 }
2582
2583 sq = SQ(rq);
2584 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL);
2585 ASSERT(sq->sq_outer == NULL);
2586 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL);
2587
2588 /*
2589 * Create syncqs based on qflag and sqtype. Set the SQ_TYPES_IN_FLAGS
2590 * bits in sq_flag based on the sqtype.
2591 */
2592 ASSERT((sq->sq_flags & ~SQ_TYPES_IN_FLAGS) == 0);
2593
2594 rq->q_syncq = wq->q_syncq = sq;
2595 sq->sq_type = sqtype;
2596 sq->sq_flags = (sqtype & SQ_TYPES_IN_FLAGS);
2597
2598 /*
2599 * We are making sq_svcflags zero,
2600 * resetting SQ_DISABLED in case it was set by
2601 * wait_svc() in the munlink path.
2602 *
2603 */
2604 ASSERT((sq->sq_svcflags & SQ_SERVICE) == 0);
2605 sq->sq_svcflags = 0;
2606
2607 /*
2608 * We need to acquire the lock here for the mlink and munlink case,
2609 * where canputnext, backenable, etc can access the q_flag.
2610 */
2611 if (lock_needed) {
2612 mutex_enter(QLOCK(rq));
2613 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2614 mutex_exit(QLOCK(rq));
2615 mutex_enter(QLOCK(wq));
2616 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2617 mutex_exit(QLOCK(wq));
2618 } else {
2619 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2620 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2621 }
2622
2623 if (qflag & QPERQ) {
2624 /* Allocate a separate syncq for the write side */
2625 sq = new_syncq();
2626 sq->sq_type = rq->q_syncq->sq_type;
2627 sq->sq_flags = rq->q_syncq->sq_flags;
2628 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL &&
2629 sq->sq_oprev == NULL);
2630 wq->q_syncq = sq;
2631 }
2632 if (qflag & QPERMOD) {
2633 sq = dmp->dm_sq;
2634
2635 /*
2636 * Assert that we do have an inner perimeter syncq and that it
2637 * does not have an outer perimeter associated with it.
2638 */
2639 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL &&
2640 sq->sq_oprev == NULL);
2641 rq->q_syncq = wq->q_syncq = sq;
2642 }
2643 if (qflag & QMTOUTPERIM) {
2644 outer = dmp->dm_sq;
2645
2646 ASSERT(outer->sq_outer == NULL);
2647 outer_insert(outer, rq->q_syncq);
2648 if (wq->q_syncq != rq->q_syncq)
2649 outer_insert(outer, wq->q_syncq);
2650 }
2651 ASSERT((rq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) ==
2652 (rq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS));
2653 ASSERT((wq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) ==
2654 (wq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS));
2655 ASSERT((rq->q_flag & QMT_TYPEMASK) == (qflag & QMT_TYPEMASK));
2656
2657 /*
2658 * Initialize struio() types.
2659 */
2660 rq->q_struiot =
2661 (rq->q_flag & QSYNCSTR) ? rinit->qi_struiot : STRUIOT_NONE;
2662 wq->q_struiot =
2663 (wq->q_flag & QSYNCSTR) ? winit->qi_struiot : STRUIOT_NONE;
2664 }
2665
2666 perdm_t *
hold_dm(struct streamtab * str,uint32_t qflag,uint32_t sqtype)2667 hold_dm(struct streamtab *str, uint32_t qflag, uint32_t sqtype)
2668 {
2669 syncq_t *sq;
2670 perdm_t **pp;
2671 perdm_t *p;
2672 perdm_t *dmp;
2673
2674 ASSERT(str != NULL);
2675 ASSERT(qflag & (QPERMOD | QMTOUTPERIM));
2676
2677 rw_enter(&perdm_rwlock, RW_READER);
2678 for (p = perdm_list; p != NULL; p = p->dm_next) {
2679 if (p->dm_str == str) { /* found one */
2680 atomic_inc_32(&(p->dm_ref));
2681 rw_exit(&perdm_rwlock);
2682 return (p);
2683 }
2684 }
2685 rw_exit(&perdm_rwlock);
2686
2687 sq = new_syncq();
2688 if (qflag & QPERMOD) {
2689 sq->sq_type = sqtype | SQ_PERMOD;
2690 sq->sq_flags = sqtype & SQ_TYPES_IN_FLAGS;
2691 } else {
2692 ASSERT(qflag & QMTOUTPERIM);
2693 sq->sq_onext = sq->sq_oprev = sq;
2694 }
2695
2696 dmp = kmem_alloc(sizeof (perdm_t), KM_SLEEP);
2697 dmp->dm_sq = sq;
2698 dmp->dm_str = str;
2699 dmp->dm_ref = 1;
2700 dmp->dm_next = NULL;
2701
2702 rw_enter(&perdm_rwlock, RW_WRITER);
2703 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) {
2704 if (p->dm_str == str) { /* already present */
2705 p->dm_ref++;
2706 rw_exit(&perdm_rwlock);
2707 free_syncq(sq);
2708 kmem_free(dmp, sizeof (perdm_t));
2709 return (p);
2710 }
2711 }
2712
2713 *pp = dmp;
2714 rw_exit(&perdm_rwlock);
2715 return (dmp);
2716 }
2717
2718 void
rele_dm(perdm_t * dmp)2719 rele_dm(perdm_t *dmp)
2720 {
2721 perdm_t **pp;
2722 perdm_t *p;
2723
2724 rw_enter(&perdm_rwlock, RW_WRITER);
2725 ASSERT(dmp->dm_ref > 0);
2726
2727 if (--dmp->dm_ref > 0) {
2728 rw_exit(&perdm_rwlock);
2729 return;
2730 }
2731
2732 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next))
2733 if (p == dmp)
2734 break;
2735 ASSERT(p == dmp);
2736 *pp = p->dm_next;
2737 rw_exit(&perdm_rwlock);
2738
2739 /*
2740 * Wait for any background processing that relies on the
2741 * syncq to complete before it is freed.
2742 */
2743 wait_sq_svc(p->dm_sq);
2744 free_syncq(p->dm_sq);
2745 kmem_free(p, sizeof (perdm_t));
2746 }
2747
2748 /*
2749 * Make a protocol message given control and data buffers.
2750 * n.b., this can block; be careful of what locks you hold when calling it.
2751 *
2752 * If sd_maxblk is less than *iosize this routine can fail part way through
2753 * (due to an allocation failure). In this case on return *iosize will contain
2754 * the amount that was consumed. Otherwise *iosize will not be modified
2755 * i.e. it will contain the amount that was consumed.
2756 */
2757 int
strmakemsg(struct strbuf * mctl,ssize_t * iosize,struct uio * uiop,stdata_t * stp,int32_t flag,mblk_t ** mpp)2758 strmakemsg(
2759 struct strbuf *mctl,
2760 ssize_t *iosize,
2761 struct uio *uiop,
2762 stdata_t *stp,
2763 int32_t flag,
2764 mblk_t **mpp)
2765 {
2766 mblk_t *mpctl = NULL;
2767 mblk_t *mpdata = NULL;
2768 int error;
2769
2770 ASSERT(uiop != NULL);
2771
2772 *mpp = NULL;
2773 /* Create control part, if any */
2774 if ((mctl != NULL) && (mctl->len >= 0)) {
2775 error = strmakectl(mctl, flag, uiop->uio_fmode, &mpctl);
2776 if (error)
2777 return (error);
2778 }
2779 /* Create data part, if any */
2780 if (*iosize >= 0) {
2781 error = strmakedata(iosize, uiop, stp, flag, &mpdata);
2782 if (error) {
2783 freemsg(mpctl);
2784 return (error);
2785 }
2786 }
2787 if (mpctl != NULL) {
2788 if (mpdata != NULL)
2789 linkb(mpctl, mpdata);
2790 *mpp = mpctl;
2791 } else {
2792 *mpp = mpdata;
2793 }
2794 return (0);
2795 }
2796
2797 /*
2798 * Make the control part of a protocol message given a control buffer.
2799 * n.b., this can block; be careful of what locks you hold when calling it.
2800 */
2801 int
strmakectl(struct strbuf * mctl,int32_t flag,int32_t fflag,mblk_t ** mpp)2802 strmakectl(
2803 struct strbuf *mctl,
2804 int32_t flag,
2805 int32_t fflag,
2806 mblk_t **mpp)
2807 {
2808 mblk_t *bp = NULL;
2809 unsigned char msgtype;
2810 int error = 0;
2811 cred_t *cr = CRED();
2812
2813 /* We do not support interrupt threads using the stream head to send */
2814 ASSERT(cr != NULL);
2815
2816 *mpp = NULL;
2817 /*
2818 * Create control part of message, if any.
2819 */
2820 if ((mctl != NULL) && (mctl->len >= 0)) {
2821 caddr_t base;
2822 int ctlcount;
2823 int allocsz;
2824
2825 if (flag & RS_HIPRI)
2826 msgtype = M_PCPROTO;
2827 else
2828 msgtype = M_PROTO;
2829
2830 ctlcount = mctl->len;
2831 base = mctl->buf;
2832
2833 /*
2834 * Give modules a better chance to reuse M_PROTO/M_PCPROTO
2835 * blocks by increasing the size to something more usable.
2836 */
2837 allocsz = MAX(ctlcount, 64);
2838
2839 /*
2840 * Range checking has already been done; simply try
2841 * to allocate a message block for the ctl part.
2842 */
2843 while ((bp = allocb_cred(allocsz, cr,
2844 curproc->p_pid)) == NULL) {
2845 if (fflag & (FNDELAY|FNONBLOCK))
2846 return (EAGAIN);
2847 if (error = strwaitbuf(allocsz, BPRI_MED))
2848 return (error);
2849 }
2850
2851 bp->b_datap->db_type = msgtype;
2852 if (copyin(base, bp->b_wptr, ctlcount)) {
2853 freeb(bp);
2854 return (EFAULT);
2855 }
2856 bp->b_wptr += ctlcount;
2857 }
2858 *mpp = bp;
2859 return (0);
2860 }
2861
2862 /*
2863 * Make a protocol message given data buffers.
2864 * n.b., this can block; be careful of what locks you hold when calling it.
2865 *
2866 * If sd_maxblk is less than *iosize this routine can fail part way through
2867 * (due to an allocation failure). In this case on return *iosize will contain
2868 * the amount that was consumed. Otherwise *iosize will not be modified
2869 * i.e. it will contain the amount that was consumed.
2870 */
2871 int
strmakedata(ssize_t * iosize,struct uio * uiop,stdata_t * stp,int32_t flag,mblk_t ** mpp)2872 strmakedata(
2873 ssize_t *iosize,
2874 struct uio *uiop,
2875 stdata_t *stp,
2876 int32_t flag,
2877 mblk_t **mpp)
2878 {
2879 mblk_t *mp = NULL;
2880 mblk_t *bp;
2881 int wroff = (int)stp->sd_wroff;
2882 int tail_len = (int)stp->sd_tail;
2883 int extra = wroff + tail_len;
2884 int error = 0;
2885 ssize_t maxblk;
2886 ssize_t count = *iosize;
2887 cred_t *cr;
2888
2889 *mpp = NULL;
2890 if (count < 0)
2891 return (0);
2892
2893 /* We do not support interrupt threads using the stream head to send */
2894 cr = CRED();
2895 ASSERT(cr != NULL);
2896
2897 maxblk = stp->sd_maxblk;
2898 if (maxblk == INFPSZ)
2899 maxblk = count;
2900
2901 /*
2902 * Create data part of message, if any.
2903 */
2904 do {
2905 ssize_t size;
2906 dblk_t *dp;
2907
2908 ASSERT(uiop);
2909
2910 size = MIN(count, maxblk);
2911
2912 while ((bp = allocb_cred(size + extra, cr,
2913 curproc->p_pid)) == NULL) {
2914 error = EAGAIN;
2915 if ((uiop->uio_fmode & (FNDELAY|FNONBLOCK)) ||
2916 (error = strwaitbuf(size + extra, BPRI_MED)) != 0) {
2917 if (count == *iosize) {
2918 freemsg(mp);
2919 return (error);
2920 } else {
2921 *iosize -= count;
2922 *mpp = mp;
2923 return (0);
2924 }
2925 }
2926 }
2927 dp = bp->b_datap;
2928 dp->db_cpid = curproc->p_pid;
2929 ASSERT(wroff <= dp->db_lim - bp->b_wptr);
2930 bp->b_wptr = bp->b_rptr = bp->b_rptr + wroff;
2931
2932 if (flag & STRUIO_POSTPONE) {
2933 /*
2934 * Setup the stream uio portion of the
2935 * dblk for subsequent use by struioget().
2936 */
2937 dp->db_struioflag = STRUIO_SPEC;
2938 dp->db_cksumstart = 0;
2939 dp->db_cksumstuff = 0;
2940 dp->db_cksumend = size;
2941 *(long long *)dp->db_struioun.data = 0ll;
2942 bp->b_wptr += size;
2943 } else {
2944 if (stp->sd_copyflag & STRCOPYCACHED)
2945 uiop->uio_extflg |= UIO_COPY_CACHED;
2946
2947 if (size != 0) {
2948 error = uiomove(bp->b_wptr, size, UIO_WRITE,
2949 uiop);
2950 if (error != 0) {
2951 freeb(bp);
2952 freemsg(mp);
2953 return (error);
2954 }
2955 }
2956 bp->b_wptr += size;
2957
2958 if (stp->sd_wputdatafunc != NULL) {
2959 mblk_t *newbp;
2960
2961 newbp = (stp->sd_wputdatafunc)(stp->sd_vnode,
2962 bp, NULL, NULL, NULL, NULL);
2963 if (newbp == NULL) {
2964 freeb(bp);
2965 freemsg(mp);
2966 return (ECOMM);
2967 }
2968 bp = newbp;
2969 }
2970 }
2971
2972 count -= size;
2973
2974 if (mp == NULL)
2975 mp = bp;
2976 else
2977 linkb(mp, bp);
2978 } while (count > 0);
2979
2980 *mpp = mp;
2981 return (0);
2982 }
2983
2984 /*
2985 * Wait for a buffer to become available. Return non-zero errno
2986 * if not able to wait, 0 if buffer is probably there.
2987 */
2988 int
strwaitbuf(size_t size,int pri)2989 strwaitbuf(size_t size, int pri)
2990 {
2991 bufcall_id_t id;
2992
2993 mutex_enter(&bcall_monitor);
2994 if ((id = bufcall(size, pri, (void (*)(void *))cv_broadcast,
2995 &ttoproc(curthread)->p_flag_cv)) == 0) {
2996 mutex_exit(&bcall_monitor);
2997 return (ENOSR);
2998 }
2999 if (!cv_wait_sig(&(ttoproc(curthread)->p_flag_cv), &bcall_monitor)) {
3000 unbufcall(id);
3001 mutex_exit(&bcall_monitor);
3002 return (EINTR);
3003 }
3004 unbufcall(id);
3005 mutex_exit(&bcall_monitor);
3006 return (0);
3007 }
3008
3009 /*
3010 * This function waits for a read or write event to happen on a stream.
3011 * fmode can specify FNDELAY and/or FNONBLOCK.
3012 * The timeout is in ms with -1 meaning infinite.
3013 * The flag values work as follows:
3014 * READWAIT Check for read side errors, send M_READ
3015 * GETWAIT Check for read side errors, no M_READ
3016 * WRITEWAIT Check for write side errors.
3017 * NOINTR Do not return error if nonblocking or timeout.
3018 * STR_NOERROR Ignore all errors except STPLEX.
3019 * STR_NOSIG Ignore/hold signals during the duration of the call.
3020 * STR_PEEK Pass through the strgeterr().
3021 */
3022 int
strwaitq(stdata_t * stp,int flag,ssize_t count,int fmode,clock_t timout,int * done)3023 strwaitq(stdata_t *stp, int flag, ssize_t count, int fmode, clock_t timout,
3024 int *done)
3025 {
3026 int slpflg, errs;
3027 int error;
3028 kcondvar_t *sleepon;
3029 mblk_t *mp;
3030 ssize_t *rd_count;
3031 clock_t rval;
3032
3033 ASSERT(MUTEX_HELD(&stp->sd_lock));
3034 if ((flag & READWAIT) || (flag & GETWAIT)) {
3035 slpflg = RSLEEP;
3036 sleepon = &_RD(stp->sd_wrq)->q_wait;
3037 errs = STRDERR|STPLEX;
3038 } else {
3039 slpflg = WSLEEP;
3040 sleepon = &stp->sd_wrq->q_wait;
3041 errs = STWRERR|STRHUP|STPLEX;
3042 }
3043 if (flag & STR_NOERROR)
3044 errs = STPLEX;
3045
3046 if (stp->sd_wakeq & slpflg) {
3047 /*
3048 * A strwakeq() is pending, no need to sleep.
3049 */
3050 stp->sd_wakeq &= ~slpflg;
3051 *done = 0;
3052 return (0);
3053 }
3054
3055 if (stp->sd_flag & errs) {
3056 /*
3057 * Check for errors before going to sleep since the
3058 * caller might not have checked this while holding
3059 * sd_lock.
3060 */
3061 error = strgeterr(stp, errs, (flag & STR_PEEK));
3062 if (error != 0) {
3063 *done = 1;
3064 return (error);
3065 }
3066 }
3067
3068 /*
3069 * If any module downstream has requested read notification
3070 * by setting SNDMREAD flag using M_SETOPTS, send a message
3071 * down stream.
3072 */
3073 if ((flag & READWAIT) && (stp->sd_flag & SNDMREAD)) {
3074 mutex_exit(&stp->sd_lock);
3075 if (!(mp = allocb_wait(sizeof (ssize_t), BPRI_MED,
3076 (flag & STR_NOSIG), &error))) {
3077 mutex_enter(&stp->sd_lock);
3078 *done = 1;
3079 return (error);
3080 }
3081 mp->b_datap->db_type = M_READ;
3082 rd_count = (ssize_t *)mp->b_wptr;
3083 *rd_count = count;
3084 mp->b_wptr += sizeof (ssize_t);
3085 /*
3086 * Send the number of bytes requested by the
3087 * read as the argument to M_READ.
3088 */
3089 stream_willservice(stp);
3090 putnext(stp->sd_wrq, mp);
3091 stream_runservice(stp);
3092 mutex_enter(&stp->sd_lock);
3093
3094 /*
3095 * If any data arrived due to inline processing
3096 * of putnext(), don't sleep.
3097 */
3098 if (_RD(stp->sd_wrq)->q_first != NULL) {
3099 *done = 0;
3100 return (0);
3101 }
3102 }
3103
3104 if (fmode & (FNDELAY|FNONBLOCK)) {
3105 if (!(flag & NOINTR))
3106 error = EAGAIN;
3107 else
3108 error = 0;
3109 *done = 1;
3110 return (error);
3111 }
3112
3113 stp->sd_flag |= slpflg;
3114 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAIT2,
3115 "strwaitq sleeps (2):%p, %X, %lX, %X, %p",
3116 stp, flag, count, fmode, done);
3117
3118 rval = str_cv_wait(sleepon, &stp->sd_lock, timout, flag & STR_NOSIG);
3119 if (rval > 0) {
3120 /* EMPTY */
3121 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAKE2,
3122 "strwaitq awakes(2):%X, %X, %X, %X, %X",
3123 stp, flag, count, fmode, done);
3124 } else if (rval == 0) {
3125 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_INTR2,
3126 "strwaitq interrupt #2:%p, %X, %lX, %X, %p",
3127 stp, flag, count, fmode, done);
3128 stp->sd_flag &= ~slpflg;
3129 cv_broadcast(sleepon);
3130 if (!(flag & NOINTR))
3131 error = EINTR;
3132 else
3133 error = 0;
3134 *done = 1;
3135 return (error);
3136 } else {
3137 /* timeout */
3138 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_TIME,
3139 "strwaitq timeout:%p, %X, %lX, %X, %p",
3140 stp, flag, count, fmode, done);
3141 *done = 1;
3142 if (!(flag & NOINTR))
3143 return (ETIME);
3144 else
3145 return (0);
3146 }
3147 /*
3148 * If the caller implements delayed errors (i.e. queued after data)
3149 * we can not check for errors here since data as well as an
3150 * error might have arrived at the stream head. We return to
3151 * have the caller check the read queue before checking for errors.
3152 */
3153 if ((stp->sd_flag & errs) && !(flag & STR_DELAYERR)) {
3154 error = strgeterr(stp, errs, (flag & STR_PEEK));
3155 if (error != 0) {
3156 *done = 1;
3157 return (error);
3158 }
3159 }
3160 *done = 0;
3161 return (0);
3162 }
3163
3164 /*
3165 * Perform job control discipline access checks.
3166 * Return 0 for success and the errno for failure.
3167 */
3168
3169 #define cantsend(p, t, sig) \
3170 (sigismember(&(p)->p_ignore, sig) || signal_is_blocked((t), sig))
3171
3172 int
straccess(struct stdata * stp,enum jcaccess mode)3173 straccess(struct stdata *stp, enum jcaccess mode)
3174 {
3175 extern kcondvar_t lbolt_cv; /* XXX: should be in a header file */
3176 kthread_t *t = curthread;
3177 proc_t *p = ttoproc(t);
3178 sess_t *sp;
3179
3180 ASSERT(mutex_owned(&stp->sd_lock));
3181
3182 if (stp->sd_sidp == NULL || stp->sd_vnode->v_type == VFIFO)
3183 return (0);
3184
3185 mutex_enter(&p->p_lock); /* protects p_pgidp */
3186
3187 for (;;) {
3188 mutex_enter(&p->p_splock); /* protects p->p_sessp */
3189 sp = p->p_sessp;
3190 mutex_enter(&sp->s_lock); /* protects sp->* */
3191
3192 /*
3193 * If this is not the calling process's controlling terminal
3194 * or if the calling process is already in the foreground
3195 * then allow access.
3196 */
3197 if (sp->s_dev != stp->sd_vnode->v_rdev ||
3198 p->p_pgidp == stp->sd_pgidp) {
3199 mutex_exit(&sp->s_lock);
3200 mutex_exit(&p->p_splock);
3201 mutex_exit(&p->p_lock);
3202 return (0);
3203 }
3204
3205 /*
3206 * Check to see if controlling terminal has been deallocated.
3207 */
3208 if (sp->s_vp == NULL) {
3209 if (!cantsend(p, t, SIGHUP))
3210 sigtoproc(p, t, SIGHUP);
3211 mutex_exit(&sp->s_lock);
3212 mutex_exit(&p->p_splock);
3213 mutex_exit(&p->p_lock);
3214 return (EIO);
3215 }
3216
3217 mutex_exit(&sp->s_lock);
3218 mutex_exit(&p->p_splock);
3219
3220 if (mode == JCGETP) {
3221 mutex_exit(&p->p_lock);
3222 return (0);
3223 }
3224
3225 if (mode == JCREAD) {
3226 if (p->p_detached || cantsend(p, t, SIGTTIN)) {
3227 mutex_exit(&p->p_lock);
3228 return (EIO);
3229 }
3230 mutex_exit(&p->p_lock);
3231 mutex_exit(&stp->sd_lock);
3232 pgsignal(p->p_pgidp, SIGTTIN);
3233 mutex_enter(&stp->sd_lock);
3234 mutex_enter(&p->p_lock);
3235 } else { /* mode == JCWRITE or JCSETP */
3236 if ((mode == JCWRITE && !(stp->sd_flag & STRTOSTOP)) ||
3237 cantsend(p, t, SIGTTOU)) {
3238 mutex_exit(&p->p_lock);
3239 return (0);
3240 }
3241 if (p->p_detached) {
3242 mutex_exit(&p->p_lock);
3243 return (EIO);
3244 }
3245 mutex_exit(&p->p_lock);
3246 mutex_exit(&stp->sd_lock);
3247 pgsignal(p->p_pgidp, SIGTTOU);
3248 mutex_enter(&stp->sd_lock);
3249 mutex_enter(&p->p_lock);
3250 }
3251
3252 /*
3253 * We call cv_wait_sig_swap() to cause the appropriate
3254 * action for the jobcontrol signal to take place.
3255 * If the signal is being caught, we will take the
3256 * EINTR error return. Otherwise, the default action
3257 * of causing the process to stop will take place.
3258 * In this case, we rely on the periodic cv_broadcast() on
3259 * &lbolt_cv to wake us up to loop around and test again.
3260 * We can't get here if the signal is ignored or
3261 * if the current thread is blocking the signal.
3262 */
3263 mutex_exit(&stp->sd_lock);
3264 if (!cv_wait_sig_swap(&lbolt_cv, &p->p_lock)) {
3265 mutex_exit(&p->p_lock);
3266 mutex_enter(&stp->sd_lock);
3267 return (EINTR);
3268 }
3269 mutex_exit(&p->p_lock);
3270 mutex_enter(&stp->sd_lock);
3271 mutex_enter(&p->p_lock);
3272 }
3273 }
3274
3275 /*
3276 * Return size of message of block type (bp->b_datap->db_type)
3277 */
3278 size_t
xmsgsize(mblk_t * bp)3279 xmsgsize(mblk_t *bp)
3280 {
3281 unsigned char type;
3282 size_t count = 0;
3283
3284 type = bp->b_datap->db_type;
3285
3286 for (; bp; bp = bp->b_cont) {
3287 if (type != bp->b_datap->db_type)
3288 break;
3289 ASSERT(bp->b_wptr >= bp->b_rptr);
3290 count += bp->b_wptr - bp->b_rptr;
3291 }
3292 return (count);
3293 }
3294
3295 /*
3296 * Allocate a stream head.
3297 */
3298 struct stdata *
shalloc(queue_t * qp)3299 shalloc(queue_t *qp)
3300 {
3301 stdata_t *stp;
3302
3303 stp = kmem_cache_alloc(stream_head_cache, KM_SLEEP);
3304
3305 stp->sd_wrq = _WR(qp);
3306 stp->sd_strtab = NULL;
3307 stp->sd_iocid = 0;
3308 stp->sd_mate = NULL;
3309 stp->sd_freezer = NULL;
3310 stp->sd_refcnt = 0;
3311 stp->sd_wakeq = 0;
3312 stp->sd_anchor = 0;
3313 stp->sd_struiowrq = NULL;
3314 stp->sd_struiordq = NULL;
3315 stp->sd_struiodnak = 0;
3316 stp->sd_struionak = NULL;
3317 stp->sd_t_audit_data = NULL;
3318 stp->sd_rput_opt = 0;
3319 stp->sd_wput_opt = 0;
3320 stp->sd_read_opt = 0;
3321 stp->sd_rprotofunc = strrput_proto;
3322 stp->sd_rmiscfunc = strrput_misc;
3323 stp->sd_rderrfunc = stp->sd_wrerrfunc = NULL;
3324 stp->sd_rputdatafunc = stp->sd_wputdatafunc = NULL;
3325 stp->sd_ciputctrl = NULL;
3326 stp->sd_nciputctrl = 0;
3327 stp->sd_qhead = NULL;
3328 stp->sd_qtail = NULL;
3329 stp->sd_servid = NULL;
3330 stp->sd_nqueues = 0;
3331 stp->sd_svcflags = 0;
3332 stp->sd_copyflag = 0;
3333
3334 return (stp);
3335 }
3336
3337 /*
3338 * Free a stream head.
3339 */
3340 void
shfree(stdata_t * stp)3341 shfree(stdata_t *stp)
3342 {
3343 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock));
3344
3345 stp->sd_wrq = NULL;
3346
3347 mutex_enter(&stp->sd_qlock);
3348 while (stp->sd_svcflags & STRS_SCHEDULED) {
3349 STRSTAT(strwaits);
3350 cv_wait(&stp->sd_qcv, &stp->sd_qlock);
3351 }
3352 mutex_exit(&stp->sd_qlock);
3353
3354 if (stp->sd_ciputctrl != NULL) {
3355 ASSERT(stp->sd_nciputctrl == n_ciputctrl - 1);
3356 SUMCHECK_CIPUTCTRL_COUNTS(stp->sd_ciputctrl,
3357 stp->sd_nciputctrl, 0);
3358 ASSERT(ciputctrl_cache != NULL);
3359 kmem_cache_free(ciputctrl_cache, stp->sd_ciputctrl);
3360 stp->sd_ciputctrl = NULL;
3361 stp->sd_nciputctrl = 0;
3362 }
3363 ASSERT(stp->sd_qhead == NULL);
3364 ASSERT(stp->sd_qtail == NULL);
3365 ASSERT(stp->sd_nqueues == 0);
3366 kmem_cache_free(stream_head_cache, stp);
3367 }
3368
3369 /*
3370 * Allocate a pair of queues and a syncq for the pair
3371 */
3372 queue_t *
allocq(void)3373 allocq(void)
3374 {
3375 queinfo_t *qip;
3376 queue_t *qp, *wqp;
3377 syncq_t *sq;
3378
3379 qip = kmem_cache_alloc(queue_cache, KM_SLEEP);
3380
3381 qp = &qip->qu_rqueue;
3382 wqp = &qip->qu_wqueue;
3383 sq = &qip->qu_syncq;
3384
3385 qp->q_last = NULL;
3386 qp->q_next = NULL;
3387 qp->q_ptr = NULL;
3388 qp->q_flag = QUSE | QREADR;
3389 qp->q_bandp = NULL;
3390 qp->q_stream = NULL;
3391 qp->q_syncq = sq;
3392 qp->q_nband = 0;
3393 qp->q_nfsrv = NULL;
3394 qp->q_draining = 0;
3395 qp->q_syncqmsgs = 0;
3396 qp->q_spri = 0;
3397 qp->q_qtstamp = 0;
3398 qp->q_sqtstamp = 0;
3399 qp->q_fp = NULL;
3400
3401 wqp->q_last = NULL;
3402 wqp->q_next = NULL;
3403 wqp->q_ptr = NULL;
3404 wqp->q_flag = QUSE;
3405 wqp->q_bandp = NULL;
3406 wqp->q_stream = NULL;
3407 wqp->q_syncq = sq;
3408 wqp->q_nband = 0;
3409 wqp->q_nfsrv = NULL;
3410 wqp->q_draining = 0;
3411 wqp->q_syncqmsgs = 0;
3412 wqp->q_qtstamp = 0;
3413 wqp->q_sqtstamp = 0;
3414 wqp->q_spri = 0;
3415
3416 sq->sq_count = 0;
3417 sq->sq_rmqcount = 0;
3418 sq->sq_flags = 0;
3419 sq->sq_type = 0;
3420 sq->sq_callbflags = 0;
3421 sq->sq_cancelid = 0;
3422 sq->sq_ciputctrl = NULL;
3423 sq->sq_nciputctrl = 0;
3424 sq->sq_needexcl = 0;
3425 sq->sq_svcflags = 0;
3426
3427 return (qp);
3428 }
3429
3430 /*
3431 * Free a pair of queues and the "attached" syncq.
3432 * Discard any messages left on the syncq(s), remove the syncq(s) from the
3433 * outer perimeter, and free the syncq(s) if they are not the "attached" syncq.
3434 */
3435 void
freeq(queue_t * qp)3436 freeq(queue_t *qp)
3437 {
3438 qband_t *qbp, *nqbp;
3439 syncq_t *sq, *outer;
3440 queue_t *wqp = _WR(qp);
3441
3442 ASSERT(qp->q_flag & QREADR);
3443
3444 /*
3445 * If a previously dispatched taskq job is scheduled to run
3446 * sync_service() or a service routine is scheduled for the
3447 * queues about to be freed, wait here until all service is
3448 * done on the queue and all associated queues and syncqs.
3449 */
3450 wait_svc(qp);
3451
3452 (void) flush_syncq(qp->q_syncq, qp);
3453 (void) flush_syncq(wqp->q_syncq, wqp);
3454 ASSERT(qp->q_syncqmsgs == 0 && wqp->q_syncqmsgs == 0);
3455
3456 /*
3457 * Flush the queues before q_next is set to NULL This is needed
3458 * in order to backenable any downstream queue before we go away.
3459 * Note: we are already removed from the stream so that the
3460 * backenabling will not cause any messages to be delivered to our
3461 * put procedures.
3462 */
3463 flushq(qp, FLUSHALL);
3464 flushq(wqp, FLUSHALL);
3465
3466 /* Tidy up - removeq only does a half-remove from stream */
3467 qp->q_next = wqp->q_next = NULL;
3468 ASSERT(!(qp->q_flag & QENAB));
3469 ASSERT(!(wqp->q_flag & QENAB));
3470
3471 outer = qp->q_syncq->sq_outer;
3472 if (outer != NULL) {
3473 outer_remove(outer, qp->q_syncq);
3474 if (wqp->q_syncq != qp->q_syncq)
3475 outer_remove(outer, wqp->q_syncq);
3476 }
3477 /*
3478 * Free any syncqs that are outside what allocq returned.
3479 */
3480 if (qp->q_syncq != SQ(qp) && !(qp->q_flag & QPERMOD))
3481 free_syncq(qp->q_syncq);
3482 if (qp->q_syncq != wqp->q_syncq && wqp->q_syncq != SQ(qp))
3483 free_syncq(wqp->q_syncq);
3484
3485 ASSERT((qp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0);
3486 ASSERT((wqp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0);
3487 ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
3488 ASSERT(MUTEX_NOT_HELD(QLOCK(wqp)));
3489 sq = SQ(qp);
3490 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
3491 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL);
3492 ASSERT(sq->sq_outer == NULL);
3493 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL);
3494 ASSERT(sq->sq_callbpend == NULL);
3495 ASSERT(sq->sq_needexcl == 0);
3496
3497 if (sq->sq_ciputctrl != NULL) {
3498 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1);
3499 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl,
3500 sq->sq_nciputctrl, 0);
3501 ASSERT(ciputctrl_cache != NULL);
3502 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl);
3503 sq->sq_ciputctrl = NULL;
3504 sq->sq_nciputctrl = 0;
3505 }
3506
3507 ASSERT(qp->q_first == NULL && wqp->q_first == NULL);
3508 ASSERT(qp->q_count == 0 && wqp->q_count == 0);
3509 ASSERT(qp->q_mblkcnt == 0 && wqp->q_mblkcnt == 0);
3510
3511 qp->q_flag &= ~QUSE;
3512 wqp->q_flag &= ~QUSE;
3513
3514 /* NOTE: Uncomment the assert below once bugid 1159635 is fixed. */
3515 /* ASSERT((qp->q_flag & QWANTW) == 0 && (wqp->q_flag & QWANTW) == 0); */
3516
3517 qbp = qp->q_bandp;
3518 while (qbp) {
3519 nqbp = qbp->qb_next;
3520 freeband(qbp);
3521 qbp = nqbp;
3522 }
3523 qbp = wqp->q_bandp;
3524 while (qbp) {
3525 nqbp = qbp->qb_next;
3526 freeband(qbp);
3527 qbp = nqbp;
3528 }
3529 kmem_cache_free(queue_cache, qp);
3530 }
3531
3532 /*
3533 * Allocate a qband structure.
3534 */
3535 qband_t *
allocband(void)3536 allocband(void)
3537 {
3538 qband_t *qbp;
3539
3540 qbp = kmem_cache_alloc(qband_cache, KM_NOSLEEP);
3541 if (qbp == NULL)
3542 return (NULL);
3543
3544 qbp->qb_next = NULL;
3545 qbp->qb_count = 0;
3546 qbp->qb_mblkcnt = 0;
3547 qbp->qb_first = NULL;
3548 qbp->qb_last = NULL;
3549 qbp->qb_flag = 0;
3550
3551 return (qbp);
3552 }
3553
3554 /*
3555 * Free a qband structure.
3556 */
3557 void
freeband(qband_t * qbp)3558 freeband(qband_t *qbp)
3559 {
3560 kmem_cache_free(qband_cache, qbp);
3561 }
3562
3563 /*
3564 * Just like putnextctl(9F), except that allocb_wait() is used.
3565 *
3566 * Consolidation Private, and of course only callable from the stream head or
3567 * routines that may block.
3568 */
3569 int
putnextctl_wait(queue_t * q,int type)3570 putnextctl_wait(queue_t *q, int type)
3571 {
3572 mblk_t *bp;
3573 int error;
3574
3575 if ((datamsg(type) && (type != M_DELAY)) ||
3576 (bp = allocb_wait(0, BPRI_HI, 0, &error)) == NULL)
3577 return (0);
3578
3579 bp->b_datap->db_type = (unsigned char)type;
3580 putnext(q, bp);
3581 return (1);
3582 }
3583
3584 /*
3585 * Run any possible bufcalls.
3586 */
3587 void
runbufcalls(void)3588 runbufcalls(void)
3589 {
3590 strbufcall_t *bcp;
3591
3592 mutex_enter(&bcall_monitor);
3593 mutex_enter(&strbcall_lock);
3594
3595 if (strbcalls.bc_head) {
3596 size_t count;
3597 int nevent;
3598
3599 /*
3600 * count how many events are on the list
3601 * now so we can check to avoid looping
3602 * in low memory situations
3603 */
3604 nevent = 0;
3605 for (bcp = strbcalls.bc_head; bcp; bcp = bcp->bc_next)
3606 nevent++;
3607
3608 /*
3609 * get estimate of available memory from kmem_avail().
3610 * awake all bufcall functions waiting for
3611 * memory whose request could be satisfied
3612 * by 'count' memory and let 'em fight for it.
3613 */
3614 count = kmem_avail();
3615 while ((bcp = strbcalls.bc_head) != NULL && nevent) {
3616 STRSTAT(bufcalls);
3617 --nevent;
3618 if (bcp->bc_size <= count) {
3619 bcp->bc_executor = curthread;
3620 mutex_exit(&strbcall_lock);
3621 (*bcp->bc_func)(bcp->bc_arg);
3622 mutex_enter(&strbcall_lock);
3623 bcp->bc_executor = NULL;
3624 cv_broadcast(&bcall_cv);
3625 strbcalls.bc_head = bcp->bc_next;
3626 kmem_free(bcp, sizeof (strbufcall_t));
3627 } else {
3628 /*
3629 * too big, try again later - note
3630 * that nevent was decremented above
3631 * so we won't retry this one on this
3632 * iteration of the loop
3633 */
3634 if (bcp->bc_next != NULL) {
3635 strbcalls.bc_head = bcp->bc_next;
3636 bcp->bc_next = NULL;
3637 strbcalls.bc_tail->bc_next = bcp;
3638 strbcalls.bc_tail = bcp;
3639 }
3640 }
3641 }
3642 if (strbcalls.bc_head == NULL)
3643 strbcalls.bc_tail = NULL;
3644 }
3645
3646 mutex_exit(&strbcall_lock);
3647 mutex_exit(&bcall_monitor);
3648 }
3649
3650
3651 /*
3652 * Actually run queue's service routine.
3653 */
3654 static void
runservice(queue_t * q)3655 runservice(queue_t *q)
3656 {
3657 qband_t *qbp;
3658
3659 ASSERT(q->q_qinfo->qi_srvp);
3660 again:
3661 entersq(q->q_syncq, SQ_SVC);
3662 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_START,
3663 "runservice starts:%p", q);
3664
3665 if (!(q->q_flag & QWCLOSE))
3666 (*q->q_qinfo->qi_srvp)(q);
3667
3668 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_END,
3669 "runservice ends:(%p)", q);
3670
3671 leavesq(q->q_syncq, SQ_SVC);
3672
3673 mutex_enter(QLOCK(q));
3674 if (q->q_flag & QENAB) {
3675 q->q_flag &= ~QENAB;
3676 mutex_exit(QLOCK(q));
3677 goto again;
3678 }
3679 q->q_flag &= ~QINSERVICE;
3680 q->q_flag &= ~QBACK;
3681 for (qbp = q->q_bandp; qbp; qbp = qbp->qb_next)
3682 qbp->qb_flag &= ~QB_BACK;
3683 /*
3684 * Wakeup thread waiting for the service procedure
3685 * to be run (strclose and qdetach).
3686 */
3687 cv_broadcast(&q->q_wait);
3688
3689 mutex_exit(QLOCK(q));
3690 }
3691
3692 /*
3693 * Background processing of bufcalls.
3694 */
3695 void
streams_bufcall_service(void)3696 streams_bufcall_service(void)
3697 {
3698 callb_cpr_t cprinfo;
3699
3700 CALLB_CPR_INIT(&cprinfo, &strbcall_lock, callb_generic_cpr,
3701 "streams_bufcall_service");
3702
3703 mutex_enter(&strbcall_lock);
3704
3705 for (;;) {
3706 if (strbcalls.bc_head != NULL && kmem_avail() > 0) {
3707 mutex_exit(&strbcall_lock);
3708 runbufcalls();
3709 mutex_enter(&strbcall_lock);
3710 }
3711 if (strbcalls.bc_head != NULL) {
3712 STRSTAT(bcwaits);
3713 /* Wait for memory to become available */
3714 CALLB_CPR_SAFE_BEGIN(&cprinfo);
3715 (void) cv_reltimedwait(&memavail_cv, &strbcall_lock,
3716 SEC_TO_TICK(60), TR_CLOCK_TICK);
3717 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock);
3718 }
3719
3720 /* Wait for new work to arrive */
3721 if (strbcalls.bc_head == NULL) {
3722 CALLB_CPR_SAFE_BEGIN(&cprinfo);
3723 cv_wait(&strbcall_cv, &strbcall_lock);
3724 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock);
3725 }
3726 }
3727 }
3728
3729 /*
3730 * Background processing of streams background tasks which failed
3731 * taskq_dispatch.
3732 */
3733 static void
streams_qbkgrnd_service(void)3734 streams_qbkgrnd_service(void)
3735 {
3736 callb_cpr_t cprinfo;
3737 queue_t *q;
3738
3739 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr,
3740 "streams_bkgrnd_service");
3741
3742 mutex_enter(&service_queue);
3743
3744 for (;;) {
3745 /*
3746 * Wait for work to arrive.
3747 */
3748 while ((freebs_list == NULL) && (qhead == NULL)) {
3749 CALLB_CPR_SAFE_BEGIN(&cprinfo);
3750 cv_wait(&services_to_run, &service_queue);
3751 CALLB_CPR_SAFE_END(&cprinfo, &service_queue);
3752 }
3753 /*
3754 * Handle all pending freebs requests to free memory.
3755 */
3756 while (freebs_list != NULL) {
3757 mblk_t *mp = freebs_list;
3758 freebs_list = mp->b_next;
3759 mutex_exit(&service_queue);
3760 mblk_free(mp);
3761 mutex_enter(&service_queue);
3762 }
3763 /*
3764 * Run pending queues.
3765 */
3766 while (qhead != NULL) {
3767 DQ(q, qhead, qtail, q_link);
3768 ASSERT(q != NULL);
3769 mutex_exit(&service_queue);
3770 queue_service(q);
3771 mutex_enter(&service_queue);
3772 }
3773 ASSERT(qhead == NULL && qtail == NULL);
3774 }
3775 }
3776
3777 /*
3778 * Background processing of streams background tasks which failed
3779 * taskq_dispatch.
3780 */
3781 static void
streams_sqbkgrnd_service(void)3782 streams_sqbkgrnd_service(void)
3783 {
3784 callb_cpr_t cprinfo;
3785 syncq_t *sq;
3786
3787 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr,
3788 "streams_sqbkgrnd_service");
3789
3790 mutex_enter(&service_queue);
3791
3792 for (;;) {
3793 /*
3794 * Wait for work to arrive.
3795 */
3796 while (sqhead == NULL) {
3797 CALLB_CPR_SAFE_BEGIN(&cprinfo);
3798 cv_wait(&syncqs_to_run, &service_queue);
3799 CALLB_CPR_SAFE_END(&cprinfo, &service_queue);
3800 }
3801
3802 /*
3803 * Run pending syncqs.
3804 */
3805 while (sqhead != NULL) {
3806 DQ(sq, sqhead, sqtail, sq_next);
3807 ASSERT(sq != NULL);
3808 ASSERT(sq->sq_svcflags & SQ_BGTHREAD);
3809 mutex_exit(&service_queue);
3810 syncq_service(sq);
3811 mutex_enter(&service_queue);
3812 }
3813 }
3814 }
3815
3816 /*
3817 * Disable the syncq and wait for background syncq processing to complete.
3818 * If the syncq is placed on the sqhead/sqtail queue, try to remove it from the
3819 * list.
3820 */
3821 void
wait_sq_svc(syncq_t * sq)3822 wait_sq_svc(syncq_t *sq)
3823 {
3824 mutex_enter(SQLOCK(sq));
3825 sq->sq_svcflags |= SQ_DISABLED;
3826 if (sq->sq_svcflags & SQ_BGTHREAD) {
3827 syncq_t *sq_chase;
3828 syncq_t *sq_curr;
3829 int removed;
3830
3831 ASSERT(sq->sq_servcount == 1);
3832 mutex_enter(&service_queue);
3833 RMQ(sq, sqhead, sqtail, sq_next, sq_chase, sq_curr, removed);
3834 mutex_exit(&service_queue);
3835 if (removed) {
3836 sq->sq_svcflags &= ~SQ_BGTHREAD;
3837 sq->sq_servcount = 0;
3838 STRSTAT(sqremoved);
3839 goto done;
3840 }
3841 }
3842 while (sq->sq_servcount != 0) {
3843 sq->sq_flags |= SQ_WANTWAKEUP;
3844 cv_wait(&sq->sq_wait, SQLOCK(sq));
3845 }
3846 done:
3847 mutex_exit(SQLOCK(sq));
3848 }
3849
3850 /*
3851 * Put a syncq on the list of syncq's to be serviced by the sqthread.
3852 * Add the argument to the end of the sqhead list and set the flag
3853 * indicating this syncq has been enabled. If it has already been
3854 * enabled, don't do anything.
3855 * This routine assumes that SQLOCK is held.
3856 * NOTE that the lock order is to have the SQLOCK first,
3857 * so if the service_syncq lock is held, we need to release it
3858 * before acquiring the SQLOCK (mostly relevant for the background
3859 * thread, and this seems to be common among the STREAMS global locks).
3860 * Note that the sq_svcflags are protected by the SQLOCK.
3861 */
3862 void
sqenable(syncq_t * sq)3863 sqenable(syncq_t *sq)
3864 {
3865 /*
3866 * This is probably not important except for where I believe it
3867 * is being called. At that point, it should be held (and it
3868 * is a pain to release it just for this routine, so don't do
3869 * it).
3870 */
3871 ASSERT(MUTEX_HELD(SQLOCK(sq)));
3872
3873 IMPLY(sq->sq_servcount == 0, sq->sq_next == NULL);
3874 IMPLY(sq->sq_next != NULL, sq->sq_svcflags & SQ_BGTHREAD);
3875
3876 /*
3877 * Do not put on list if background thread is scheduled or
3878 * syncq is disabled.
3879 */
3880 if (sq->sq_svcflags & (SQ_DISABLED | SQ_BGTHREAD))
3881 return;
3882
3883 /*
3884 * Check whether we should enable sq at all.
3885 * Non PERMOD syncqs may be drained by at most one thread.
3886 * PERMOD syncqs may be drained by several threads but we limit the
3887 * total amount to the lesser of
3888 * Number of queues on the squeue and
3889 * Number of CPUs.
3890 */
3891 if (sq->sq_servcount != 0) {
3892 if (((sq->sq_type & SQ_PERMOD) == 0) ||
3893 (sq->sq_servcount >= MIN(sq->sq_nqueues, ncpus_online))) {
3894 STRSTAT(sqtoomany);
3895 return;
3896 }
3897 }
3898
3899 sq->sq_tstamp = ddi_get_lbolt();
3900 STRSTAT(sqenables);
3901
3902 /* Attempt a taskq dispatch */
3903 sq->sq_servid = (void *)taskq_dispatch(streams_taskq,
3904 (task_func_t *)syncq_service, sq, TQ_NOSLEEP | TQ_NOQUEUE);
3905 if (sq->sq_servid != NULL) {
3906 sq->sq_servcount++;
3907 return;
3908 }
3909
3910 /*
3911 * This taskq dispatch failed, but a previous one may have succeeded.
3912 * Don't try to schedule on the background thread whilst there is
3913 * outstanding taskq processing.
3914 */
3915 if (sq->sq_servcount != 0)
3916 return;
3917
3918 /*
3919 * System is low on resources and can't perform a non-sleeping
3920 * dispatch. Schedule the syncq for a background thread and mark the
3921 * syncq to avoid any further taskq dispatch attempts.
3922 */
3923 mutex_enter(&service_queue);
3924 STRSTAT(taskqfails);
3925 ENQUEUE(sq, sqhead, sqtail, sq_next);
3926 sq->sq_svcflags |= SQ_BGTHREAD;
3927 sq->sq_servcount = 1;
3928 cv_signal(&syncqs_to_run);
3929 mutex_exit(&service_queue);
3930 }
3931
3932 /*
3933 * Note: fifo_close() depends on the mblk_t on the queue being freed
3934 * asynchronously. The asynchronous freeing of messages breaks the
3935 * recursive call chain of fifo_close() while there are I_SENDFD type of
3936 * messages referring to other file pointers on the queue. Then when
3937 * closing pipes it can avoid stack overflow in case of daisy-chained
3938 * pipes, and also avoid deadlock in case of fifonode_t pairs (which
3939 * share the same fifolock_t).
3940 *
3941 * No need to kpreempt_disable to access cpu_seqid. If we migrate and
3942 * the esb queue does not match the new CPU, that is OK.
3943 */
3944 void
freebs_enqueue(mblk_t * mp,dblk_t * dbp)3945 freebs_enqueue(mblk_t *mp, dblk_t *dbp)
3946 {
3947 int qindex = CPU->cpu_seqid >> esbq_log2_cpus_per_q;
3948 esb_queue_t *eqp;
3949
3950 ASSERT(dbp->db_mblk == mp);
3951 ASSERT(qindex < esbq_nelem);
3952
3953 eqp = system_esbq_array;
3954 if (eqp != NULL) {
3955 eqp += qindex;
3956 } else {
3957 mutex_enter(&esbq_lock);
3958 if (kmem_ready && system_esbq_array == NULL)
3959 system_esbq_array = (esb_queue_t *)kmem_zalloc(
3960 esbq_nelem * sizeof (esb_queue_t), KM_NOSLEEP);
3961 mutex_exit(&esbq_lock);
3962 eqp = system_esbq_array;
3963 if (eqp != NULL)
3964 eqp += qindex;
3965 else
3966 eqp = &system_esbq;
3967 }
3968
3969 /*
3970 * Check data sanity. The dblock should have non-empty free function.
3971 * It is better to panic here then later when the dblock is freed
3972 * asynchronously when the context is lost.
3973 */
3974 if (dbp->db_frtnp->free_func == NULL) {
3975 panic("freebs_enqueue: dblock %p has a NULL free callback",
3976 (void *)dbp);
3977 }
3978
3979 mutex_enter(&eqp->eq_lock);
3980 /* queue the new mblk on the esballoc queue */
3981 if (eqp->eq_head == NULL) {
3982 eqp->eq_head = eqp->eq_tail = mp;
3983 } else {
3984 eqp->eq_tail->b_next = mp;
3985 eqp->eq_tail = mp;
3986 }
3987 eqp->eq_len++;
3988
3989 /* If we're the first thread to reach the threshold, process */
3990 if (eqp->eq_len >= esbq_max_qlen &&
3991 !(eqp->eq_flags & ESBQ_PROCESSING))
3992 esballoc_process_queue(eqp);
3993
3994 esballoc_set_timer(eqp, esbq_timeout);
3995 mutex_exit(&eqp->eq_lock);
3996 }
3997
3998 static void
esballoc_process_queue(esb_queue_t * eqp)3999 esballoc_process_queue(esb_queue_t *eqp)
4000 {
4001 mblk_t *mp;
4002
4003 ASSERT(MUTEX_HELD(&eqp->eq_lock));
4004
4005 eqp->eq_flags |= ESBQ_PROCESSING;
4006
4007 do {
4008 /*
4009 * Detach the message chain for processing.
4010 */
4011 mp = eqp->eq_head;
4012 eqp->eq_tail->b_next = NULL;
4013 eqp->eq_head = eqp->eq_tail = NULL;
4014 eqp->eq_len = 0;
4015 mutex_exit(&eqp->eq_lock);
4016
4017 /*
4018 * Process the message chain.
4019 */
4020 esballoc_enqueue_mblk(mp);
4021 mutex_enter(&eqp->eq_lock);
4022 } while ((eqp->eq_len >= esbq_max_qlen) && (eqp->eq_len > 0));
4023
4024 eqp->eq_flags &= ~ESBQ_PROCESSING;
4025 }
4026
4027 /*
4028 * taskq callback routine to free esballoced mblk's
4029 */
4030 static void
esballoc_mblk_free(mblk_t * mp)4031 esballoc_mblk_free(mblk_t *mp)
4032 {
4033 mblk_t *nextmp;
4034
4035 for (; mp != NULL; mp = nextmp) {
4036 nextmp = mp->b_next;
4037 mp->b_next = NULL;
4038 mblk_free(mp);
4039 }
4040 }
4041
4042 static void
esballoc_enqueue_mblk(mblk_t * mp)4043 esballoc_enqueue_mblk(mblk_t *mp)
4044 {
4045
4046 if (taskq_dispatch(system_taskq, (task_func_t *)esballoc_mblk_free, mp,
4047 TQ_NOSLEEP) == TASKQID_INVALID) {
4048 mblk_t *first_mp = mp;
4049 /*
4050 * System is low on resources and can't perform a non-sleeping
4051 * dispatch. Schedule for a background thread.
4052 */
4053 mutex_enter(&service_queue);
4054 STRSTAT(taskqfails);
4055
4056 while (mp->b_next != NULL)
4057 mp = mp->b_next;
4058
4059 mp->b_next = freebs_list;
4060 freebs_list = first_mp;
4061 cv_signal(&services_to_run);
4062 mutex_exit(&service_queue);
4063 }
4064 }
4065
4066 static void
esballoc_timer(void * arg)4067 esballoc_timer(void *arg)
4068 {
4069 esb_queue_t *eqp = arg;
4070
4071 mutex_enter(&eqp->eq_lock);
4072 eqp->eq_flags &= ~ESBQ_TIMER;
4073
4074 if (!(eqp->eq_flags & ESBQ_PROCESSING) &&
4075 eqp->eq_len > 0)
4076 esballoc_process_queue(eqp);
4077
4078 esballoc_set_timer(eqp, esbq_timeout);
4079 mutex_exit(&eqp->eq_lock);
4080 }
4081
4082 static void
esballoc_set_timer(esb_queue_t * eqp,clock_t eq_timeout)4083 esballoc_set_timer(esb_queue_t *eqp, clock_t eq_timeout)
4084 {
4085 ASSERT(MUTEX_HELD(&eqp->eq_lock));
4086
4087 if (eqp->eq_len > 0 && !(eqp->eq_flags & ESBQ_TIMER)) {
4088 (void) timeout(esballoc_timer, eqp, eq_timeout);
4089 eqp->eq_flags |= ESBQ_TIMER;
4090 }
4091 }
4092
4093 /*
4094 * Setup esbq array length based upon NCPU scaled by CPUs per
4095 * queue. Use static system_esbq until kmem_ready and we can
4096 * create an array in freebs_enqueue().
4097 */
4098 void
esballoc_queue_init(void)4099 esballoc_queue_init(void)
4100 {
4101 esbq_log2_cpus_per_q = highbit(esbq_cpus_per_q - 1);
4102 esbq_cpus_per_q = 1 << esbq_log2_cpus_per_q;
4103 esbq_nelem = howmany(NCPU, esbq_cpus_per_q);
4104 system_esbq.eq_len = 0;
4105 system_esbq.eq_head = system_esbq.eq_tail = NULL;
4106 system_esbq.eq_flags = 0;
4107 }
4108
4109 /*
4110 * Set the QBACK or QB_BACK flag in the given queue for
4111 * the given priority band.
4112 */
4113 void
setqback(queue_t * q,unsigned char pri)4114 setqback(queue_t *q, unsigned char pri)
4115 {
4116 int i;
4117 qband_t *qbp;
4118 qband_t **qbpp;
4119
4120 ASSERT(MUTEX_HELD(QLOCK(q)));
4121 if (pri != 0) {
4122 if (pri > q->q_nband) {
4123 qbpp = &q->q_bandp;
4124 while (*qbpp)
4125 qbpp = &(*qbpp)->qb_next;
4126 while (pri > q->q_nband) {
4127 if ((*qbpp = allocband()) == NULL) {
4128 cmn_err(CE_WARN,
4129 "setqback: can't allocate qband\n");
4130 return;
4131 }
4132 (*qbpp)->qb_hiwat = q->q_hiwat;
4133 (*qbpp)->qb_lowat = q->q_lowat;
4134 q->q_nband++;
4135 qbpp = &(*qbpp)->qb_next;
4136 }
4137 }
4138 qbp = q->q_bandp;
4139 i = pri;
4140 while (--i)
4141 qbp = qbp->qb_next;
4142 qbp->qb_flag |= QB_BACK;
4143 } else {
4144 q->q_flag |= QBACK;
4145 }
4146 }
4147
4148 int
strcopyin(void * from,void * to,size_t len,int copyflag)4149 strcopyin(void *from, void *to, size_t len, int copyflag)
4150 {
4151 if (copyflag & U_TO_K) {
4152 ASSERT((copyflag & K_TO_K) == 0);
4153 if (copyin(from, to, len))
4154 return (EFAULT);
4155 } else {
4156 ASSERT(copyflag & K_TO_K);
4157 bcopy(from, to, len);
4158 }
4159 return (0);
4160 }
4161
4162 int
strcopyout(void * from,void * to,size_t len,int copyflag)4163 strcopyout(void *from, void *to, size_t len, int copyflag)
4164 {
4165 if (copyflag & U_TO_K) {
4166 if (copyout(from, to, len))
4167 return (EFAULT);
4168 } else {
4169 ASSERT(copyflag & K_TO_K);
4170 bcopy(from, to, len);
4171 }
4172 return (0);
4173 }
4174
4175 /*
4176 * strsignal_nolock() posts a signal to the process(es) at the stream head.
4177 * It assumes that the stream head lock is already held, whereas strsignal()
4178 * acquires the lock first. This routine was created because a few callers
4179 * release the stream head lock before calling only to re-acquire it after
4180 * it returns.
4181 */
4182 void
strsignal_nolock(stdata_t * stp,int sig,uchar_t band)4183 strsignal_nolock(stdata_t *stp, int sig, uchar_t band)
4184 {
4185 ASSERT(MUTEX_HELD(&stp->sd_lock));
4186 switch (sig) {
4187 case SIGPOLL:
4188 if (stp->sd_sigflags & S_MSG)
4189 strsendsig(stp->sd_siglist, S_MSG, band, 0);
4190 break;
4191 default:
4192 if (stp->sd_pgidp)
4193 pgsignal(stp->sd_pgidp, sig);
4194 break;
4195 }
4196 }
4197
4198 void
strsignal(stdata_t * stp,int sig,int32_t band)4199 strsignal(stdata_t *stp, int sig, int32_t band)
4200 {
4201 TRACE_3(TR_FAC_STREAMS_FR, TR_SENDSIG,
4202 "strsignal:%p, %X, %X", stp, sig, band);
4203
4204 mutex_enter(&stp->sd_lock);
4205 switch (sig) {
4206 case SIGPOLL:
4207 if (stp->sd_sigflags & S_MSG)
4208 strsendsig(stp->sd_siglist, S_MSG, (uchar_t)band, 0);
4209 break;
4210
4211 default:
4212 if (stp->sd_pgidp) {
4213 pgsignal(stp->sd_pgidp, sig);
4214 }
4215 break;
4216 }
4217 mutex_exit(&stp->sd_lock);
4218 }
4219
4220 void
strhup(stdata_t * stp)4221 strhup(stdata_t *stp)
4222 {
4223 ASSERT(mutex_owned(&stp->sd_lock));
4224 pollwakeup(&stp->sd_pollist, POLLHUP);
4225 if (stp->sd_sigflags & S_HANGUP)
4226 strsendsig(stp->sd_siglist, S_HANGUP, 0, 0);
4227 }
4228
4229 /*
4230 * Backenable the first queue upstream from `q' with a service procedure.
4231 */
4232 void
backenable(queue_t * q,uchar_t pri)4233 backenable(queue_t *q, uchar_t pri)
4234 {
4235 queue_t *nq;
4236
4237 /*
4238 * Our presence might not prevent other modules in our own
4239 * stream from popping/pushing since the caller of getq might not
4240 * have a claim on the queue (some drivers do a getq on somebody
4241 * else's queue - they know that the queue itself is not going away
4242 * but the framework has to guarantee q_next in that stream).
4243 */
4244 claimstr(q);
4245
4246 /* Find nearest back queue with service proc */
4247 for (nq = backq(q); nq && !nq->q_qinfo->qi_srvp; nq = backq(nq)) {
4248 ASSERT(STRMATED(q->q_stream) || STREAM(q) == STREAM(nq));
4249 }
4250
4251 if (nq) {
4252 kthread_t *freezer;
4253 /*
4254 * backenable can be called either with no locks held
4255 * or with the stream frozen (the latter occurs when a module
4256 * calls rmvq with the stream frozen). If the stream is frozen
4257 * by the caller the caller will hold all qlocks in the stream.
4258 * Note that a frozen stream doesn't freeze a mated stream,
4259 * so we explicitly check for that.
4260 */
4261 freezer = STREAM(q)->sd_freezer;
4262 if (freezer != curthread || STREAM(q) != STREAM(nq)) {
4263 mutex_enter(QLOCK(nq));
4264 }
4265 #ifdef DEBUG
4266 else {
4267 ASSERT(frozenstr(q));
4268 ASSERT(MUTEX_HELD(QLOCK(q)));
4269 ASSERT(MUTEX_HELD(QLOCK(nq)));
4270 }
4271 #endif
4272 setqback(nq, pri);
4273 qenable_locked(nq);
4274 if (freezer != curthread || STREAM(q) != STREAM(nq))
4275 mutex_exit(QLOCK(nq));
4276 }
4277 releasestr(q);
4278 }
4279
4280 /*
4281 * Return the appropriate errno when one of flags_to_check is set
4282 * in sd_flags. Uses the exported error routines if they are set.
4283 * Will return 0 if non error is set (or if the exported error routines
4284 * do not return an error).
4285 *
4286 * If there is both a read and write error to check, we prefer the read error.
4287 * Also, give preference to recorded errno's over the error functions.
4288 * The flags that are handled are:
4289 * STPLEX return EINVAL
4290 * STRDERR return sd_rerror (and clear if STRDERRNONPERSIST)
4291 * STWRERR return sd_werror (and clear if STWRERRNONPERSIST)
4292 * STRHUP return sd_werror
4293 *
4294 * If the caller indicates that the operation is a peek, a nonpersistent error
4295 * is not cleared.
4296 */
4297 int
strgeterr(stdata_t * stp,int32_t flags_to_check,int ispeek)4298 strgeterr(stdata_t *stp, int32_t flags_to_check, int ispeek)
4299 {
4300 int32_t sd_flag = stp->sd_flag & flags_to_check;
4301 int error = 0;
4302
4303 ASSERT(MUTEX_HELD(&stp->sd_lock));
4304 ASSERT((flags_to_check & ~(STRDERR|STWRERR|STRHUP|STPLEX)) == 0);
4305 if (sd_flag & STPLEX)
4306 error = EINVAL;
4307 else if (sd_flag & STRDERR) {
4308 error = stp->sd_rerror;
4309 if ((stp->sd_flag & STRDERRNONPERSIST) && !ispeek) {
4310 /*
4311 * Read errors are non-persistent i.e. discarded once
4312 * returned to a non-peeking caller,
4313 */
4314 stp->sd_rerror = 0;
4315 stp->sd_flag &= ~STRDERR;
4316 }
4317 if (error == 0 && stp->sd_rderrfunc != NULL) {
4318 int clearerr = 0;
4319
4320 error = (*stp->sd_rderrfunc)(stp->sd_vnode, ispeek,
4321 &clearerr);
4322 if (clearerr) {
4323 stp->sd_flag &= ~STRDERR;
4324 stp->sd_rderrfunc = NULL;
4325 }
4326 }
4327 } else if (sd_flag & STWRERR) {
4328 error = stp->sd_werror;
4329 if ((stp->sd_flag & STWRERRNONPERSIST) && !ispeek) {
4330 /*
4331 * Write errors are non-persistent i.e. discarded once
4332 * returned to a non-peeking caller,
4333 */
4334 stp->sd_werror = 0;
4335 stp->sd_flag &= ~STWRERR;
4336 }
4337 if (error == 0 && stp->sd_wrerrfunc != NULL) {
4338 int clearerr = 0;
4339
4340 error = (*stp->sd_wrerrfunc)(stp->sd_vnode, ispeek,
4341 &clearerr);
4342 if (clearerr) {
4343 stp->sd_flag &= ~STWRERR;
4344 stp->sd_wrerrfunc = NULL;
4345 }
4346 }
4347 } else if (sd_flag & STRHUP) {
4348 /* sd_werror set when STRHUP */
4349 error = stp->sd_werror;
4350 }
4351 return (error);
4352 }
4353
4354
4355 /*
4356 * Single-thread open/close/push/pop
4357 * for twisted streams also
4358 */
4359 int
strstartplumb(stdata_t * stp,int flag,int cmd)4360 strstartplumb(stdata_t *stp, int flag, int cmd)
4361 {
4362 int waited = 1;
4363 int error = 0;
4364
4365 if (STRMATED(stp)) {
4366 struct stdata *stmatep = stp->sd_mate;
4367
4368 STRLOCKMATES(stp);
4369 while (waited) {
4370 waited = 0;
4371 while (stmatep->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
4372 if ((cmd == I_POP) &&
4373 (flag & (FNDELAY|FNONBLOCK))) {
4374 STRUNLOCKMATES(stp);
4375 return (EAGAIN);
4376 }
4377 waited = 1;
4378 mutex_exit(&stp->sd_lock);
4379 if (!cv_wait_sig(&stmatep->sd_monitor,
4380 &stmatep->sd_lock)) {
4381 mutex_exit(&stmatep->sd_lock);
4382 return (EINTR);
4383 }
4384 mutex_exit(&stmatep->sd_lock);
4385 STRLOCKMATES(stp);
4386 }
4387 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
4388 if ((cmd == I_POP) &&
4389 (flag & (FNDELAY|FNONBLOCK))) {
4390 STRUNLOCKMATES(stp);
4391 return (EAGAIN);
4392 }
4393 waited = 1;
4394 mutex_exit(&stmatep->sd_lock);
4395 if (!cv_wait_sig(&stp->sd_monitor,
4396 &stp->sd_lock)) {
4397 mutex_exit(&stp->sd_lock);
4398 return (EINTR);
4399 }
4400 mutex_exit(&stp->sd_lock);
4401 STRLOCKMATES(stp);
4402 }
4403 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) {
4404 error = strgeterr(stp,
4405 STRDERR|STWRERR|STRHUP|STPLEX, 0);
4406 if (error != 0) {
4407 STRUNLOCKMATES(stp);
4408 return (error);
4409 }
4410 }
4411 }
4412 stp->sd_flag |= STRPLUMB;
4413 STRUNLOCKMATES(stp);
4414 } else {
4415 mutex_enter(&stp->sd_lock);
4416 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
4417 if (((cmd == I_POP) || (cmd == _I_REMOVE)) &&
4418 (flag & (FNDELAY|FNONBLOCK))) {
4419 mutex_exit(&stp->sd_lock);
4420 return (EAGAIN);
4421 }
4422 if (!cv_wait_sig(&stp->sd_monitor, &stp->sd_lock)) {
4423 mutex_exit(&stp->sd_lock);
4424 return (EINTR);
4425 }
4426 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) {
4427 error = strgeterr(stp,
4428 STRDERR|STWRERR|STRHUP|STPLEX, 0);
4429 if (error != 0) {
4430 mutex_exit(&stp->sd_lock);
4431 return (error);
4432 }
4433 }
4434 }
4435 stp->sd_flag |= STRPLUMB;
4436 mutex_exit(&stp->sd_lock);
4437 }
4438 return (0);
4439 }
4440
4441 /*
4442 * Complete the plumbing operation associated with stream `stp'.
4443 */
4444 void
strendplumb(stdata_t * stp)4445 strendplumb(stdata_t *stp)
4446 {
4447 ASSERT(MUTEX_HELD(&stp->sd_lock));
4448 ASSERT(stp->sd_flag & STRPLUMB);
4449 stp->sd_flag &= ~STRPLUMB;
4450 cv_broadcast(&stp->sd_monitor);
4451 }
4452
4453 /*
4454 * This describes how the STREAMS framework handles synchronization
4455 * during open/push and close/pop.
4456 * The key interfaces for open and close are qprocson and qprocsoff,
4457 * respectively. While the close case in general is harder both open
4458 * have close have significant similarities.
4459 *
4460 * During close the STREAMS framework has to both ensure that there
4461 * are no stale references to the queue pair (and syncq) that
4462 * are being closed and also provide the guarantees that are documented
4463 * in qprocsoff(9F).
4464 * If there are stale references to the queue that is closing it can
4465 * result in kernel memory corruption or kernel panics.
4466 *
4467 * Note that is it up to the module/driver to ensure that it itself
4468 * does not have any stale references to the closing queues once its close
4469 * routine returns. This includes:
4470 * - Cancelling any timeout/bufcall/qtimeout/qbufcall callback routines
4471 * associated with the queues. For timeout and bufcall callbacks the
4472 * module/driver also has to ensure (or wait for) any callbacks that
4473 * are in progress.
4474 * - If the module/driver is using esballoc it has to ensure that any
4475 * esballoc free functions do not refer to a queue that has closed.
4476 * (Note that in general the close routine can not wait for the esballoc'ed
4477 * messages to be freed since that can cause a deadlock.)
4478 * - Cancelling any interrupts that refer to the closing queues and
4479 * also ensuring that there are no interrupts in progress that will
4480 * refer to the closing queues once the close routine returns.
4481 * - For multiplexors removing any driver global state that refers to
4482 * the closing queue and also ensuring that there are no threads in
4483 * the multiplexor that has picked up a queue pointer but not yet
4484 * finished using it.
4485 *
4486 * In addition, a driver/module can only reference the q_next pointer
4487 * in its open, close, put, or service procedures or in a
4488 * qtimeout/qbufcall callback procedure executing "on" the correct
4489 * stream. Thus it can not reference the q_next pointer in an interrupt
4490 * routine or a timeout, bufcall or esballoc callback routine. Likewise
4491 * it can not reference q_next of a different queue e.g. in a mux that
4492 * passes messages from one queues put/service procedure to another queue.
4493 * In all the cases when the driver/module can not access the q_next
4494 * field it must use the *next* versions e.g. canputnext instead of
4495 * canput(q->q_next) and putnextctl instead of putctl(q->q_next, ...).
4496 *
4497 *
4498 * Assuming that the driver/module conforms to the above constraints
4499 * the STREAMS framework has to avoid stale references to q_next for all
4500 * the framework internal cases which include (but are not limited to):
4501 * - Threads in canput/canputnext/backenable and elsewhere that are
4502 * walking q_next.
4503 * - Messages on a syncq that have a reference to the queue through b_queue.
4504 * - Messages on an outer perimeter (syncq) that have a reference to the
4505 * queue through b_queue.
4506 * - Threads that use q_nfsrv (e.g. canput) to find a queue.
4507 * Note that only canput and bcanput use q_nfsrv without any locking.
4508 *
4509 * The STREAMS framework providing the qprocsoff(9F) guarantees means that
4510 * after qprocsoff returns, the framework has to ensure that no threads can
4511 * enter the put or service routines for the closing read or write-side queue.
4512 * In addition to preventing "direct" entry into the put procedures
4513 * the framework also has to prevent messages being drained from
4514 * the syncq or the outer perimeter.
4515 * XXX Note that currently qdetach does relies on D_MTOCEXCL as the only
4516 * mechanism to prevent qwriter(PERIM_OUTER) from running after
4517 * qprocsoff has returned.
4518 * Note that if a module/driver uses put(9F) on one of its own queues
4519 * it is up to the module/driver to ensure that the put() doesn't
4520 * get called when the queue is closing.
4521 *
4522 *
4523 * The framework aspects of the above "contract" is implemented by
4524 * qprocsoff, removeq, and strlock:
4525 * - qprocsoff (disable_svc) sets QWCLOSE to prevent runservice from
4526 * entering the service procedures.
4527 * - strlock acquires the sd_lock and sd_reflock to prevent putnext,
4528 * canputnext, backenable etc from dereferencing the q_next that will
4529 * soon change.
4530 * - strlock waits for sd_refcnt to be zero to wait for e.g. any canputnext
4531 * or other q_next walker that uses claimstr/releasestr to finish.
4532 * - optionally for every syncq in the stream strlock acquires all the
4533 * sq_lock's and waits for all sq_counts to drop to a value that indicates
4534 * that no thread executes in the put or service procedures and that no
4535 * thread is draining into the module/driver. This ensures that no
4536 * open, close, put, service, or qtimeout/qbufcall callback procedure is
4537 * currently executing hence no such thread can end up with the old stale
4538 * q_next value and no canput/backenable can have the old stale
4539 * q_nfsrv/q_next.
4540 * - qdetach (wait_svc) makes sure that any scheduled or running threads
4541 * have either finished or observed the QWCLOSE flag and gone away.
4542 */
4543
4544
4545 /*
4546 * Get all the locks necessary to change q_next.
4547 *
4548 * Wait for sd_refcnt to reach 0 and, if sqlist is present, wait for the
4549 * sq_count of each syncq in the list to drop to sq_rmqcount, indicating that
4550 * the only threads inside the syncq are threads currently calling removeq().
4551 * Since threads calling removeq() are in the process of removing their queues
4552 * from the stream, we do not need to worry about them accessing a stale q_next
4553 * pointer and thus we do not need to wait for them to exit (in fact, waiting
4554 * for them can cause deadlock).
4555 *
4556 * This routine is subject to starvation since it does not set any flag to
4557 * prevent threads from entering a module in the stream (i.e. sq_count can
4558 * increase on some syncq while it is waiting on some other syncq).
4559 *
4560 * Assumes that only one thread attempts to call strlock for a given
4561 * stream. If this is not the case the two threads would deadlock.
4562 * This assumption is guaranteed since strlock is only called by insertq
4563 * and removeq and streams plumbing changes are single-threaded for
4564 * a given stream using the STWOPEN, STRCLOSE, and STRPLUMB flags.
4565 *
4566 * For pipes, it is not difficult to atomically designate a pair of streams
4567 * to be mated. Once mated atomically by the framework the twisted pair remain
4568 * configured that way until dismantled atomically by the framework.
4569 * When plumbing takes place on a twisted stream it is necessary to ensure that
4570 * this operation is done exclusively on the twisted stream since two such
4571 * operations, each initiated on different ends of the pipe will deadlock
4572 * waiting for each other to complete.
4573 *
4574 * On entry, no locks should be held.
4575 * The locks acquired and held by strlock depends on a few factors.
4576 * - If sqlist is non-NULL all the syncq locks in the sqlist will be acquired
4577 * and held on exit and all sq_count are at an acceptable level.
4578 * - In all cases, sd_lock and sd_reflock are acquired and held on exit with
4579 * sd_refcnt being zero.
4580 */
4581
4582 static void
strlock(struct stdata * stp,sqlist_t * sqlist)4583 strlock(struct stdata *stp, sqlist_t *sqlist)
4584 {
4585 syncql_t *sql, *sql2;
4586 retry:
4587 /*
4588 * Wait for any claimstr to go away.
4589 */
4590 if (STRMATED(stp)) {
4591 struct stdata *stp1, *stp2;
4592
4593 STRLOCKMATES(stp);
4594 /*
4595 * Note that the selection of locking order is not
4596 * important, just that they are always acquired in
4597 * the same order. To assure this, we choose this
4598 * order based on the value of the pointer, and since
4599 * the pointer will not change for the life of this
4600 * pair, we will always grab the locks in the same
4601 * order (and hence, prevent deadlocks).
4602 */
4603 if (&(stp->sd_lock) > &((stp->sd_mate)->sd_lock)) {
4604 stp1 = stp;
4605 stp2 = stp->sd_mate;
4606 } else {
4607 stp2 = stp;
4608 stp1 = stp->sd_mate;
4609 }
4610 mutex_enter(&stp1->sd_reflock);
4611 if (stp1->sd_refcnt > 0) {
4612 STRUNLOCKMATES(stp);
4613 cv_wait(&stp1->sd_refmonitor, &stp1->sd_reflock);
4614 mutex_exit(&stp1->sd_reflock);
4615 goto retry;
4616 }
4617 mutex_enter(&stp2->sd_reflock);
4618 if (stp2->sd_refcnt > 0) {
4619 STRUNLOCKMATES(stp);
4620 mutex_exit(&stp1->sd_reflock);
4621 cv_wait(&stp2->sd_refmonitor, &stp2->sd_reflock);
4622 mutex_exit(&stp2->sd_reflock);
4623 goto retry;
4624 }
4625 STREAM_PUTLOCKS_ENTER(stp1);
4626 STREAM_PUTLOCKS_ENTER(stp2);
4627 } else {
4628 mutex_enter(&stp->sd_lock);
4629 mutex_enter(&stp->sd_reflock);
4630 while (stp->sd_refcnt > 0) {
4631 mutex_exit(&stp->sd_lock);
4632 cv_wait(&stp->sd_refmonitor, &stp->sd_reflock);
4633 if (mutex_tryenter(&stp->sd_lock) == 0) {
4634 mutex_exit(&stp->sd_reflock);
4635 mutex_enter(&stp->sd_lock);
4636 mutex_enter(&stp->sd_reflock);
4637 }
4638 }
4639 STREAM_PUTLOCKS_ENTER(stp);
4640 }
4641
4642 if (sqlist == NULL)
4643 return;
4644
4645 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) {
4646 syncq_t *sq = sql->sql_sq;
4647 uint16_t count;
4648
4649 mutex_enter(SQLOCK(sq));
4650 count = sq->sq_count;
4651 ASSERT(sq->sq_rmqcount <= count);
4652 SQ_PUTLOCKS_ENTER(sq);
4653 SUM_SQ_PUTCOUNTS(sq, count);
4654 if (count == sq->sq_rmqcount)
4655 continue;
4656
4657 /* Failed - drop all locks that we have acquired so far */
4658 if (STRMATED(stp)) {
4659 STREAM_PUTLOCKS_EXIT(stp);
4660 STREAM_PUTLOCKS_EXIT(stp->sd_mate);
4661 STRUNLOCKMATES(stp);
4662 mutex_exit(&stp->sd_reflock);
4663 mutex_exit(&stp->sd_mate->sd_reflock);
4664 } else {
4665 STREAM_PUTLOCKS_EXIT(stp);
4666 mutex_exit(&stp->sd_lock);
4667 mutex_exit(&stp->sd_reflock);
4668 }
4669 for (sql2 = sqlist->sqlist_head; sql2 != sql;
4670 sql2 = sql2->sql_next) {
4671 SQ_PUTLOCKS_EXIT(sql2->sql_sq);
4672 mutex_exit(SQLOCK(sql2->sql_sq));
4673 }
4674
4675 /*
4676 * The wait loop below may starve when there are many threads
4677 * claiming the syncq. This is especially a problem with permod
4678 * syncqs (IP). To lessen the impact of the problem we increment
4679 * sq_needexcl and clear fastbits so that putnexts will slow
4680 * down and call sqenable instead of draining right away.
4681 */
4682 sq->sq_needexcl++;
4683 SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
4684 while (count > sq->sq_rmqcount) {
4685 sq->sq_flags |= SQ_WANTWAKEUP;
4686 SQ_PUTLOCKS_EXIT(sq);
4687 cv_wait(&sq->sq_wait, SQLOCK(sq));
4688 count = sq->sq_count;
4689 SQ_PUTLOCKS_ENTER(sq);
4690 SUM_SQ_PUTCOUNTS(sq, count);
4691 }
4692 sq->sq_needexcl--;
4693 if (sq->sq_needexcl == 0)
4694 SQ_PUTCOUNT_SETFAST_LOCKED(sq);
4695 SQ_PUTLOCKS_EXIT(sq);
4696 ASSERT(count == sq->sq_rmqcount);
4697 mutex_exit(SQLOCK(sq));
4698 goto retry;
4699 }
4700 }
4701
4702 /*
4703 * Drop all the locks that strlock acquired.
4704 */
4705 static void
strunlock(struct stdata * stp,sqlist_t * sqlist)4706 strunlock(struct stdata *stp, sqlist_t *sqlist)
4707 {
4708 syncql_t *sql;
4709
4710 if (STRMATED(stp)) {
4711 STREAM_PUTLOCKS_EXIT(stp);
4712 STREAM_PUTLOCKS_EXIT(stp->sd_mate);
4713 STRUNLOCKMATES(stp);
4714 mutex_exit(&stp->sd_reflock);
4715 mutex_exit(&stp->sd_mate->sd_reflock);
4716 } else {
4717 STREAM_PUTLOCKS_EXIT(stp);
4718 mutex_exit(&stp->sd_lock);
4719 mutex_exit(&stp->sd_reflock);
4720 }
4721
4722 if (sqlist == NULL)
4723 return;
4724
4725 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) {
4726 SQ_PUTLOCKS_EXIT(sql->sql_sq);
4727 mutex_exit(SQLOCK(sql->sql_sq));
4728 }
4729 }
4730
4731 /*
4732 * When the module has service procedure, we need check if the next
4733 * module which has service procedure is in flow control to trigger
4734 * the backenable.
4735 */
4736 static void
backenable_insertedq(queue_t * q)4737 backenable_insertedq(queue_t *q)
4738 {
4739 qband_t *qbp;
4740
4741 claimstr(q);
4742 if (q->q_qinfo->qi_srvp != NULL && q->q_next != NULL) {
4743 if (q->q_next->q_nfsrv->q_flag & QWANTW)
4744 backenable(q, 0);
4745
4746 qbp = q->q_next->q_nfsrv->q_bandp;
4747 for (; qbp != NULL; qbp = qbp->qb_next)
4748 if ((qbp->qb_flag & QB_WANTW) && qbp->qb_first != NULL)
4749 backenable(q, qbp->qb_first->b_band);
4750 }
4751 releasestr(q);
4752 }
4753
4754 /*
4755 * Given two read queues, insert a new single one after another.
4756 *
4757 * This routine acquires all the necessary locks in order to change
4758 * q_next and related pointer using strlock().
4759 * It depends on the stream head ensuring that there are no concurrent
4760 * insertq or removeq on the same stream. The stream head ensures this
4761 * using the flags STWOPEN, STRCLOSE, and STRPLUMB.
4762 *
4763 * Note that no syncq locks are held during the q_next change. This is
4764 * applied to all streams since, unlike removeq, there is no problem of stale
4765 * pointers when adding a module to the stream. Thus drivers/modules that do a
4766 * canput(rq->q_next) would never get a closed/freed queue pointer even if we
4767 * applied this optimization to all streams.
4768 */
4769 void
insertq(struct stdata * stp,queue_t * new)4770 insertq(struct stdata *stp, queue_t *new)
4771 {
4772 queue_t *after;
4773 queue_t *wafter;
4774 queue_t *wnew = _WR(new);
4775 boolean_t have_fifo = B_FALSE;
4776
4777 if (new->q_flag & _QINSERTING) {
4778 ASSERT(stp->sd_vnode->v_type != VFIFO);
4779 after = new->q_next;
4780 wafter = _WR(new->q_next);
4781 } else {
4782 after = _RD(stp->sd_wrq);
4783 wafter = stp->sd_wrq;
4784 }
4785
4786 TRACE_2(TR_FAC_STREAMS_FR, TR_INSERTQ,
4787 "insertq:%p, %p", after, new);
4788 ASSERT(after->q_flag & QREADR);
4789 ASSERT(new->q_flag & QREADR);
4790
4791 strlock(stp, NULL);
4792
4793 /* Do we have a FIFO? */
4794 if (wafter->q_next == after) {
4795 have_fifo = B_TRUE;
4796 wnew->q_next = new;
4797 } else {
4798 wnew->q_next = wafter->q_next;
4799 }
4800 new->q_next = after;
4801
4802 set_nfsrv_ptr(new, wnew, after, wafter);
4803 /*
4804 * set_nfsrv_ptr() needs to know if this is an insertion or not,
4805 * so only reset this flag after calling it.
4806 */
4807 new->q_flag &= ~_QINSERTING;
4808
4809 if (have_fifo) {
4810 wafter->q_next = wnew;
4811 } else {
4812 if (wafter->q_next)
4813 _OTHERQ(wafter->q_next)->q_next = new;
4814 wafter->q_next = wnew;
4815 }
4816
4817 set_qend(new);
4818 /* The QEND flag might have to be updated for the upstream guy */
4819 set_qend(after);
4820
4821 ASSERT(_SAMESTR(new) == O_SAMESTR(new));
4822 ASSERT(_SAMESTR(wnew) == O_SAMESTR(wnew));
4823 ASSERT(_SAMESTR(after) == O_SAMESTR(after));
4824 ASSERT(_SAMESTR(wafter) == O_SAMESTR(wafter));
4825 strsetuio(stp);
4826
4827 /*
4828 * If this was a module insertion, bump the push count.
4829 */
4830 if (!(new->q_flag & QISDRV))
4831 stp->sd_pushcnt++;
4832
4833 strunlock(stp, NULL);
4834
4835 /* check if the write Q needs backenable */
4836 backenable_insertedq(wnew);
4837
4838 /* check if the read Q needs backenable */
4839 backenable_insertedq(new);
4840 }
4841
4842 /*
4843 * Given a read queue, unlink it from any neighbors.
4844 *
4845 * This routine acquires all the necessary locks in order to
4846 * change q_next and related pointers and also guard against
4847 * stale references (e.g. through q_next) to the queue that
4848 * is being removed. It also plays part of the role in ensuring
4849 * that the module's/driver's put procedure doesn't get called
4850 * after qprocsoff returns.
4851 *
4852 * Removeq depends on the stream head ensuring that there are
4853 * no concurrent insertq or removeq on the same stream. The
4854 * stream head ensures this using the flags STWOPEN, STRCLOSE and
4855 * STRPLUMB.
4856 *
4857 * The set of locks needed to remove the queue is different in
4858 * different cases:
4859 *
4860 * Acquire sd_lock, sd_reflock, and all the syncq locks in the stream after
4861 * waiting for the syncq reference count to drop to 0 indicating that no
4862 * non-close threads are present anywhere in the stream. This ensures that any
4863 * module/driver can reference q_next in its open, close, put, or service
4864 * procedures.
4865 *
4866 * The sq_rmqcount counter tracks the number of threads inside removeq().
4867 * strlock() ensures that there is either no threads executing inside perimeter
4868 * or there is only a thread calling qprocsoff().
4869 *
4870 * strlock() compares the value of sq_count with the number of threads inside
4871 * removeq() and waits until sq_count is equal to sq_rmqcount. We need to wakeup
4872 * any threads waiting in strlock() when the sq_rmqcount increases.
4873 */
4874
4875 void
removeq(queue_t * qp)4876 removeq(queue_t *qp)
4877 {
4878 queue_t *wqp = _WR(qp);
4879 struct stdata *stp = STREAM(qp);
4880 sqlist_t *sqlist = NULL;
4881 boolean_t isdriver;
4882 int moved;
4883 syncq_t *sq = qp->q_syncq;
4884 syncq_t *wsq = wqp->q_syncq;
4885
4886 ASSERT(stp);
4887
4888 TRACE_2(TR_FAC_STREAMS_FR, TR_REMOVEQ,
4889 "removeq:%p %p", qp, wqp);
4890 ASSERT(qp->q_flag&QREADR);
4891
4892 /*
4893 * For queues using Synchronous streams, we must wait for all threads in
4894 * rwnext() to drain out before proceeding.
4895 */
4896 if (qp->q_flag & QSYNCSTR) {
4897 /* First, we need wakeup any threads blocked in rwnext() */
4898 mutex_enter(SQLOCK(sq));
4899 if (sq->sq_flags & SQ_WANTWAKEUP) {
4900 sq->sq_flags &= ~SQ_WANTWAKEUP;
4901 cv_broadcast(&sq->sq_wait);
4902 }
4903 mutex_exit(SQLOCK(sq));
4904
4905 if (wsq != sq) {
4906 mutex_enter(SQLOCK(wsq));
4907 if (wsq->sq_flags & SQ_WANTWAKEUP) {
4908 wsq->sq_flags &= ~SQ_WANTWAKEUP;
4909 cv_broadcast(&wsq->sq_wait);
4910 }
4911 mutex_exit(SQLOCK(wsq));
4912 }
4913
4914 mutex_enter(QLOCK(qp));
4915 while (qp->q_rwcnt > 0) {
4916 qp->q_flag |= QWANTRMQSYNC;
4917 cv_wait(&qp->q_wait, QLOCK(qp));
4918 }
4919 mutex_exit(QLOCK(qp));
4920
4921 mutex_enter(QLOCK(wqp));
4922 while (wqp->q_rwcnt > 0) {
4923 wqp->q_flag |= QWANTRMQSYNC;
4924 cv_wait(&wqp->q_wait, QLOCK(wqp));
4925 }
4926 mutex_exit(QLOCK(wqp));
4927 }
4928
4929 mutex_enter(SQLOCK(sq));
4930 sq->sq_rmqcount++;
4931 if (sq->sq_flags & SQ_WANTWAKEUP) {
4932 sq->sq_flags &= ~SQ_WANTWAKEUP;
4933 cv_broadcast(&sq->sq_wait);
4934 }
4935 mutex_exit(SQLOCK(sq));
4936
4937 isdriver = (qp->q_flag & QISDRV);
4938
4939 sqlist = sqlist_build(qp, stp, STRMATED(stp));
4940 strlock(stp, sqlist);
4941
4942 reset_nfsrv_ptr(qp, wqp);
4943
4944 ASSERT(wqp->q_next == NULL || backq(qp)->q_next == qp);
4945 ASSERT(qp->q_next == NULL || backq(wqp)->q_next == wqp);
4946 /* Do we have a FIFO? */
4947 if (wqp->q_next == qp) {
4948 stp->sd_wrq->q_next = _RD(stp->sd_wrq);
4949 } else {
4950 if (wqp->q_next)
4951 backq(qp)->q_next = qp->q_next;
4952 if (qp->q_next)
4953 backq(wqp)->q_next = wqp->q_next;
4954 }
4955
4956 /* The QEND flag might have to be updated for the upstream guy */
4957 if (qp->q_next)
4958 set_qend(qp->q_next);
4959
4960 ASSERT(_SAMESTR(stp->sd_wrq) == O_SAMESTR(stp->sd_wrq));
4961 ASSERT(_SAMESTR(_RD(stp->sd_wrq)) == O_SAMESTR(_RD(stp->sd_wrq)));
4962
4963 /*
4964 * Move any messages destined for the put procedures to the next
4965 * syncq in line. Otherwise free them.
4966 */
4967 moved = 0;
4968 /*
4969 * Quick check to see whether there are any messages or events.
4970 */
4971 if (qp->q_syncqmsgs != 0 || (qp->q_syncq->sq_flags & SQ_EVENTS))
4972 moved += propagate_syncq(qp);
4973 if (wqp->q_syncqmsgs != 0 ||
4974 (wqp->q_syncq->sq_flags & SQ_EVENTS))
4975 moved += propagate_syncq(wqp);
4976
4977 strsetuio(stp);
4978
4979 /*
4980 * If this was a module removal, decrement the push count.
4981 */
4982 if (!isdriver)
4983 stp->sd_pushcnt--;
4984
4985 strunlock(stp, sqlist);
4986 sqlist_free(sqlist);
4987
4988 /*
4989 * Make sure any messages that were propagated are drained.
4990 * Also clear any QFULL bit caused by messages that were propagated.
4991 */
4992
4993 if (qp->q_next != NULL) {
4994 clr_qfull(qp);
4995 /*
4996 * For the driver calling qprocsoff, propagate_syncq
4997 * frees all the messages instead of putting it in
4998 * the stream head
4999 */
5000 if (!isdriver && (moved > 0))
5001 emptysq(qp->q_next->q_syncq);
5002 }
5003 if (wqp->q_next != NULL) {
5004 clr_qfull(wqp);
5005 /*
5006 * We come here for any pop of a module except for the
5007 * case of driver being removed. We don't call emptysq
5008 * if we did not move any messages. This will avoid holding
5009 * PERMOD syncq locks in emptysq
5010 */
5011 if (moved > 0)
5012 emptysq(wqp->q_next->q_syncq);
5013 }
5014
5015 mutex_enter(SQLOCK(sq));
5016 sq->sq_rmqcount--;
5017 mutex_exit(SQLOCK(sq));
5018 }
5019
5020 /*
5021 * Prevent further entry by setting a flag (like SQ_FROZEN, SQ_BLOCKED or
5022 * SQ_WRITER) on a syncq.
5023 * If maxcnt is not -1 it assumes that caller has "maxcnt" claim(s) on the
5024 * sync queue and waits until sq_count reaches maxcnt.
5025 *
5026 * If maxcnt is -1 there's no need to grab sq_putlocks since the caller
5027 * does not care about putnext threads that are in the middle of calling put
5028 * entry points.
5029 *
5030 * This routine is used for both inner and outer syncqs.
5031 */
5032 static void
blocksq(syncq_t * sq,ushort_t flag,int maxcnt)5033 blocksq(syncq_t *sq, ushort_t flag, int maxcnt)
5034 {
5035 uint16_t count = 0;
5036
5037 mutex_enter(SQLOCK(sq));
5038 /*
5039 * Wait for SQ_FROZEN/SQ_BLOCKED to be reset.
5040 * SQ_FROZEN will be set if there is a frozen stream that has a
5041 * queue which also refers to this "shared" syncq.
5042 * SQ_BLOCKED will be set if there is "off" queue which also
5043 * refers to this "shared" syncq.
5044 */
5045 if (maxcnt != -1) {
5046 count = sq->sq_count;
5047 SQ_PUTLOCKS_ENTER(sq);
5048 SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
5049 SUM_SQ_PUTCOUNTS(sq, count);
5050 }
5051 sq->sq_needexcl++;
5052 ASSERT(sq->sq_needexcl != 0); /* wraparound */
5053
5054 while ((sq->sq_flags & flag) ||
5055 (maxcnt != -1 && count > (unsigned)maxcnt)) {
5056 sq->sq_flags |= SQ_WANTWAKEUP;
5057 if (maxcnt != -1) {
5058 SQ_PUTLOCKS_EXIT(sq);
5059 }
5060 cv_wait(&sq->sq_wait, SQLOCK(sq));
5061 if (maxcnt != -1) {
5062 count = sq->sq_count;
5063 SQ_PUTLOCKS_ENTER(sq);
5064 SUM_SQ_PUTCOUNTS(sq, count);
5065 }
5066 }
5067 sq->sq_needexcl--;
5068 sq->sq_flags |= flag;
5069 ASSERT(maxcnt == -1 || count == maxcnt);
5070 if (maxcnt != -1) {
5071 if (sq->sq_needexcl == 0) {
5072 SQ_PUTCOUNT_SETFAST_LOCKED(sq);
5073 }
5074 SQ_PUTLOCKS_EXIT(sq);
5075 } else if (sq->sq_needexcl == 0) {
5076 SQ_PUTCOUNT_SETFAST(sq);
5077 }
5078
5079 mutex_exit(SQLOCK(sq));
5080 }
5081
5082 /*
5083 * Reset a flag that was set with blocksq.
5084 *
5085 * Can not use this routine to reset SQ_WRITER.
5086 *
5087 * If "isouter" is set then the syncq is assumed to be an outer perimeter
5088 * and drain_syncq is not called. Instead we rely on the qwriter_outer thread
5089 * to handle the queued qwriter operations.
5090 *
5091 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5092 * sq_putlocks are used.
5093 */
5094 static void
unblocksq(syncq_t * sq,uint16_t resetflag,int isouter)5095 unblocksq(syncq_t *sq, uint16_t resetflag, int isouter)
5096 {
5097 uint16_t flags;
5098
5099 mutex_enter(SQLOCK(sq));
5100 ASSERT(resetflag != SQ_WRITER);
5101 ASSERT(sq->sq_flags & resetflag);
5102 flags = sq->sq_flags & ~resetflag;
5103 sq->sq_flags = flags;
5104 if (flags & (SQ_QUEUED | SQ_WANTWAKEUP)) {
5105 if (flags & SQ_WANTWAKEUP) {
5106 flags &= ~SQ_WANTWAKEUP;
5107 cv_broadcast(&sq->sq_wait);
5108 }
5109 sq->sq_flags = flags;
5110 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) {
5111 if (!isouter) {
5112 /* drain_syncq drops SQLOCK */
5113 drain_syncq(sq);
5114 return;
5115 }
5116 }
5117 }
5118 mutex_exit(SQLOCK(sq));
5119 }
5120
5121 /*
5122 * Reset a flag that was set with blocksq.
5123 * Does not drain the syncq. Use emptysq() for that.
5124 * Returns 1 if SQ_QUEUED is set. Otherwise 0.
5125 *
5126 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5127 * sq_putlocks are used.
5128 */
5129 static int
dropsq(syncq_t * sq,uint16_t resetflag)5130 dropsq(syncq_t *sq, uint16_t resetflag)
5131 {
5132 uint16_t flags;
5133
5134 mutex_enter(SQLOCK(sq));
5135 ASSERT(sq->sq_flags & resetflag);
5136 flags = sq->sq_flags & ~resetflag;
5137 if (flags & SQ_WANTWAKEUP) {
5138 flags &= ~SQ_WANTWAKEUP;
5139 cv_broadcast(&sq->sq_wait);
5140 }
5141 sq->sq_flags = flags;
5142 mutex_exit(SQLOCK(sq));
5143 if (flags & SQ_QUEUED)
5144 return (1);
5145 return (0);
5146 }
5147
5148 /*
5149 * Empty all the messages on a syncq.
5150 *
5151 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5152 * sq_putlocks are used.
5153 */
5154 static void
emptysq(syncq_t * sq)5155 emptysq(syncq_t *sq)
5156 {
5157 uint16_t flags;
5158
5159 mutex_enter(SQLOCK(sq));
5160 flags = sq->sq_flags;
5161 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) {
5162 /*
5163 * To prevent potential recursive invocation of drain_syncq we
5164 * do not call drain_syncq if count is non-zero.
5165 */
5166 if (sq->sq_count == 0) {
5167 /* drain_syncq() drops SQLOCK */
5168 drain_syncq(sq);
5169 return;
5170 } else
5171 sqenable(sq);
5172 }
5173 mutex_exit(SQLOCK(sq));
5174 }
5175
5176 /*
5177 * Ordered insert while removing duplicates.
5178 */
5179 static void
sqlist_insert(sqlist_t * sqlist,syncq_t * sqp)5180 sqlist_insert(sqlist_t *sqlist, syncq_t *sqp)
5181 {
5182 syncql_t *sqlp, **prev_sqlpp, *new_sqlp;
5183
5184 prev_sqlpp = &sqlist->sqlist_head;
5185 while ((sqlp = *prev_sqlpp) != NULL) {
5186 if (sqlp->sql_sq >= sqp) {
5187 if (sqlp->sql_sq == sqp) /* duplicate */
5188 return;
5189 break;
5190 }
5191 prev_sqlpp = &sqlp->sql_next;
5192 }
5193 new_sqlp = &sqlist->sqlist_array[sqlist->sqlist_index++];
5194 ASSERT((char *)new_sqlp < (char *)sqlist + sqlist->sqlist_size);
5195 new_sqlp->sql_next = sqlp;
5196 new_sqlp->sql_sq = sqp;
5197 *prev_sqlpp = new_sqlp;
5198 }
5199
5200 /*
5201 * Walk the write side queues until we hit either the driver
5202 * or a twist in the stream (_SAMESTR will return false in both
5203 * these cases) then turn around and walk the read side queues
5204 * back up to the stream head.
5205 */
5206 static void
sqlist_insertall(sqlist_t * sqlist,queue_t * q)5207 sqlist_insertall(sqlist_t *sqlist, queue_t *q)
5208 {
5209 while (q != NULL) {
5210 sqlist_insert(sqlist, q->q_syncq);
5211
5212 if (_SAMESTR(q))
5213 q = q->q_next;
5214 else if (!(q->q_flag & QREADR))
5215 q = _RD(q);
5216 else
5217 q = NULL;
5218 }
5219 }
5220
5221 /*
5222 * Allocate and build a list of all syncqs in a stream and the syncq(s)
5223 * associated with the "q" parameter. The resulting list is sorted in a
5224 * canonical order and is free of duplicates.
5225 * Assumes the passed queue is a _RD(q).
5226 */
5227 static sqlist_t *
sqlist_build(queue_t * q,struct stdata * stp,boolean_t do_twist)5228 sqlist_build(queue_t *q, struct stdata *stp, boolean_t do_twist)
5229 {
5230 sqlist_t *sqlist = sqlist_alloc(stp, KM_SLEEP);
5231
5232 /*
5233 * start with the current queue/qpair
5234 */
5235 ASSERT(q->q_flag & QREADR);
5236
5237 sqlist_insert(sqlist, q->q_syncq);
5238 sqlist_insert(sqlist, _WR(q)->q_syncq);
5239
5240 sqlist_insertall(sqlist, stp->sd_wrq);
5241 if (do_twist)
5242 sqlist_insertall(sqlist, stp->sd_mate->sd_wrq);
5243
5244 return (sqlist);
5245 }
5246
5247 static sqlist_t *
sqlist_alloc(struct stdata * stp,int kmflag)5248 sqlist_alloc(struct stdata *stp, int kmflag)
5249 {
5250 size_t sqlist_size;
5251 sqlist_t *sqlist;
5252
5253 /*
5254 * Allocate 2 syncql_t's for each pushed module. Note that
5255 * the sqlist_t structure already has 4 syncql_t's built in:
5256 * 2 for the stream head, and 2 for the driver/other stream head.
5257 */
5258 sqlist_size = 2 * sizeof (syncql_t) * stp->sd_pushcnt +
5259 sizeof (sqlist_t);
5260 if (STRMATED(stp))
5261 sqlist_size += 2 * sizeof (syncql_t) * stp->sd_mate->sd_pushcnt;
5262 sqlist = kmem_alloc(sqlist_size, kmflag);
5263
5264 sqlist->sqlist_head = NULL;
5265 sqlist->sqlist_size = sqlist_size;
5266 sqlist->sqlist_index = 0;
5267
5268 return (sqlist);
5269 }
5270
5271 /*
5272 * Free the list created by sqlist_alloc()
5273 */
5274 static void
sqlist_free(sqlist_t * sqlist)5275 sqlist_free(sqlist_t *sqlist)
5276 {
5277 kmem_free(sqlist, sqlist->sqlist_size);
5278 }
5279
5280 /*
5281 * Prevent any new entries into any syncq in this stream.
5282 * Used by freezestr.
5283 */
5284 void
strblock(queue_t * q)5285 strblock(queue_t *q)
5286 {
5287 struct stdata *stp;
5288 syncql_t *sql;
5289 sqlist_t *sqlist;
5290
5291 q = _RD(q);
5292
5293 stp = STREAM(q);
5294 ASSERT(stp != NULL);
5295
5296 /*
5297 * Get a sorted list with all the duplicates removed containing
5298 * all the syncqs referenced by this stream.
5299 */
5300 sqlist = sqlist_build(q, stp, B_FALSE);
5301 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next)
5302 blocksq(sql->sql_sq, SQ_FROZEN, -1);
5303 sqlist_free(sqlist);
5304 }
5305
5306 /*
5307 * Release the block on new entries into this stream
5308 */
5309 void
strunblock(queue_t * q)5310 strunblock(queue_t *q)
5311 {
5312 struct stdata *stp;
5313 syncql_t *sql;
5314 sqlist_t *sqlist;
5315 int drain_needed;
5316
5317 q = _RD(q);
5318
5319 /*
5320 * Get a sorted list with all the duplicates removed containing
5321 * all the syncqs referenced by this stream.
5322 * Have to drop the SQ_FROZEN flag on all the syncqs before
5323 * starting to drain them; otherwise the draining might
5324 * cause a freezestr in some module on the stream (which
5325 * would deadlock).
5326 */
5327 stp = STREAM(q);
5328 ASSERT(stp != NULL);
5329 sqlist = sqlist_build(q, stp, B_FALSE);
5330 drain_needed = 0;
5331 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next)
5332 drain_needed += dropsq(sql->sql_sq, SQ_FROZEN);
5333 if (drain_needed) {
5334 for (sql = sqlist->sqlist_head; sql != NULL;
5335 sql = sql->sql_next)
5336 emptysq(sql->sql_sq);
5337 }
5338 sqlist_free(sqlist);
5339 }
5340
5341 #ifdef DEBUG
5342 static int
qprocsareon(queue_t * rq)5343 qprocsareon(queue_t *rq)
5344 {
5345 if (rq->q_next == NULL)
5346 return (0);
5347 return (_WR(rq->q_next)->q_next == _WR(rq));
5348 }
5349
5350 int
qclaimed(queue_t * q)5351 qclaimed(queue_t *q)
5352 {
5353 uint_t count;
5354
5355 count = q->q_syncq->sq_count;
5356 SUM_SQ_PUTCOUNTS(q->q_syncq, count);
5357 return (count != 0);
5358 }
5359
5360 /*
5361 * Check if anyone has frozen this stream with freezestr
5362 */
5363 int
frozenstr(queue_t * q)5364 frozenstr(queue_t *q)
5365 {
5366 return ((q->q_syncq->sq_flags & SQ_FROZEN) != 0);
5367 }
5368 #endif /* DEBUG */
5369
5370 /*
5371 * Enter a queue.
5372 * Obsoleted interface. Should not be used.
5373 */
5374 void
enterq(queue_t * q)5375 enterq(queue_t *q)
5376 {
5377 entersq(q->q_syncq, SQ_CALLBACK);
5378 }
5379
5380 void
leaveq(queue_t * q)5381 leaveq(queue_t *q)
5382 {
5383 leavesq(q->q_syncq, SQ_CALLBACK);
5384 }
5385
5386 /*
5387 * Enter a perimeter. c_inner and c_outer specifies which concurrency bits
5388 * to check.
5389 * Wait if SQ_QUEUED is set to preserve ordering between messages and qwriter
5390 * calls and the running of open, close and service procedures.
5391 *
5392 * If c_inner bit is set no need to grab sq_putlocks since we don't care
5393 * if other threads have entered or are entering put entry point.
5394 *
5395 * If c_inner bit is set it might have been possible to use
5396 * sq_putlocks/sq_putcounts instead of SQLOCK/sq_count (e.g. to optimize
5397 * open/close path for IP) but since the count may need to be decremented in
5398 * qwait() we wouldn't know which counter to decrement. Currently counter is
5399 * selected by current cpu_seqid and current CPU can change at any moment. XXX
5400 * in the future we might use curthread id bits to select the counter and this
5401 * would stay constant across routine calls.
5402 */
5403 void
entersq(syncq_t * sq,int entrypoint)5404 entersq(syncq_t *sq, int entrypoint)
5405 {
5406 uint16_t count = 0;
5407 uint16_t flags;
5408 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL;
5409 uint16_t type;
5410 uint_t c_inner = entrypoint & SQ_CI;
5411 uint_t c_outer = entrypoint & SQ_CO;
5412
5413 /*
5414 * Increment ref count to keep closes out of this queue.
5415 */
5416 ASSERT(sq);
5417 ASSERT(c_inner && c_outer);
5418 mutex_enter(SQLOCK(sq));
5419 flags = sq->sq_flags;
5420 type = sq->sq_type;
5421 if (!(type & c_inner)) {
5422 /* Make sure all putcounts now use slowlock. */
5423 count = sq->sq_count;
5424 SQ_PUTLOCKS_ENTER(sq);
5425 SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
5426 SUM_SQ_PUTCOUNTS(sq, count);
5427 sq->sq_needexcl++;
5428 ASSERT(sq->sq_needexcl != 0); /* wraparound */
5429 waitflags |= SQ_MESSAGES;
5430 }
5431 /*
5432 * Wait until we can enter the inner perimeter.
5433 * If we want exclusive access we wait until sq_count is 0.
5434 * We have to do this before entering the outer perimeter in order
5435 * to preserve put/close message ordering.
5436 */
5437 while ((flags & waitflags) || (!(type & c_inner) && count != 0)) {
5438 sq->sq_flags = flags | SQ_WANTWAKEUP;
5439 if (!(type & c_inner)) {
5440 SQ_PUTLOCKS_EXIT(sq);
5441 }
5442 cv_wait(&sq->sq_wait, SQLOCK(sq));
5443 if (!(type & c_inner)) {
5444 count = sq->sq_count;
5445 SQ_PUTLOCKS_ENTER(sq);
5446 SUM_SQ_PUTCOUNTS(sq, count);
5447 }
5448 flags = sq->sq_flags;
5449 }
5450
5451 if (!(type & c_inner)) {
5452 ASSERT(sq->sq_needexcl > 0);
5453 sq->sq_needexcl--;
5454 if (sq->sq_needexcl == 0) {
5455 SQ_PUTCOUNT_SETFAST_LOCKED(sq);
5456 }
5457 }
5458
5459 /* Check if we need to enter the outer perimeter */
5460 if (!(type & c_outer)) {
5461 /*
5462 * We have to enter the outer perimeter exclusively before
5463 * we can increment sq_count to avoid deadlock. This implies
5464 * that we have to re-check sq_flags and sq_count.
5465 *
5466 * is it possible to have c_inner set when c_outer is not set?
5467 */
5468 if (!(type & c_inner)) {
5469 SQ_PUTLOCKS_EXIT(sq);
5470 }
5471 mutex_exit(SQLOCK(sq));
5472 outer_enter(sq->sq_outer, SQ_GOAWAY);
5473 mutex_enter(SQLOCK(sq));
5474 flags = sq->sq_flags;
5475 /*
5476 * there should be no need to recheck sq_putcounts
5477 * because outer_enter() has already waited for them to clear
5478 * after setting SQ_WRITER.
5479 */
5480 count = sq->sq_count;
5481 #ifdef DEBUG
5482 /*
5483 * SUMCHECK_SQ_PUTCOUNTS should return the sum instead
5484 * of doing an ASSERT internally. Others should do
5485 * something like
5486 * ASSERT(SUMCHECK_SQ_PUTCOUNTS(sq) == 0);
5487 * without the need to #ifdef DEBUG it.
5488 */
5489 SUMCHECK_SQ_PUTCOUNTS(sq, 0);
5490 #endif
5491 while ((flags & (SQ_EXCL|SQ_BLOCKED|SQ_FROZEN)) ||
5492 (!(type & c_inner) && count != 0)) {
5493 sq->sq_flags = flags | SQ_WANTWAKEUP;
5494 cv_wait(&sq->sq_wait, SQLOCK(sq));
5495 count = sq->sq_count;
5496 flags = sq->sq_flags;
5497 }
5498 }
5499
5500 sq->sq_count++;
5501 ASSERT(sq->sq_count != 0); /* Wraparound */
5502 if (!(type & c_inner)) {
5503 /* Exclusive entry */
5504 ASSERT(sq->sq_count == 1);
5505 sq->sq_flags |= SQ_EXCL;
5506 if (type & c_outer) {
5507 SQ_PUTLOCKS_EXIT(sq);
5508 }
5509 }
5510 mutex_exit(SQLOCK(sq));
5511 }
5512
5513 /*
5514 * Leave a syncq. Announce to framework that closes may proceed.
5515 * c_inner and c_outer specify which concurrency bits to check.
5516 *
5517 * Must never be called from driver or module put entry point.
5518 *
5519 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5520 * sq_putlocks are used.
5521 */
5522 void
leavesq(syncq_t * sq,int entrypoint)5523 leavesq(syncq_t *sq, int entrypoint)
5524 {
5525 uint16_t flags;
5526 uint16_t type;
5527 uint_t c_outer = entrypoint & SQ_CO;
5528 #ifdef DEBUG
5529 uint_t c_inner = entrypoint & SQ_CI;
5530 #endif
5531
5532 /*
5533 * Decrement ref count, drain the syncq if possible, and wake up
5534 * any waiting close.
5535 */
5536 ASSERT(sq);
5537 ASSERT(c_inner && c_outer);
5538 mutex_enter(SQLOCK(sq));
5539 flags = sq->sq_flags;
5540 type = sq->sq_type;
5541 if (flags & (SQ_QUEUED|SQ_WANTWAKEUP|SQ_WANTEXWAKEUP)) {
5542
5543 if (flags & SQ_WANTWAKEUP) {
5544 flags &= ~SQ_WANTWAKEUP;
5545 cv_broadcast(&sq->sq_wait);
5546 }
5547 if (flags & SQ_WANTEXWAKEUP) {
5548 flags &= ~SQ_WANTEXWAKEUP;
5549 cv_broadcast(&sq->sq_exitwait);
5550 }
5551
5552 if ((flags & SQ_QUEUED) && !(flags & SQ_STAYAWAY)) {
5553 /*
5554 * The syncq needs to be drained. "Exit" the syncq
5555 * before calling drain_syncq.
5556 */
5557 ASSERT(sq->sq_count != 0);
5558 sq->sq_count--;
5559 ASSERT((flags & SQ_EXCL) || (type & c_inner));
5560 sq->sq_flags = flags & ~SQ_EXCL;
5561 drain_syncq(sq);
5562 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
5563 /* Check if we need to exit the outer perimeter */
5564 /* XXX will this ever be true? */
5565 if (!(type & c_outer))
5566 outer_exit(sq->sq_outer);
5567 return;
5568 }
5569 }
5570 ASSERT(sq->sq_count != 0);
5571 sq->sq_count--;
5572 ASSERT((flags & SQ_EXCL) || (type & c_inner));
5573 sq->sq_flags = flags & ~SQ_EXCL;
5574 mutex_exit(SQLOCK(sq));
5575
5576 /* Check if we need to exit the outer perimeter */
5577 if (!(sq->sq_type & c_outer))
5578 outer_exit(sq->sq_outer);
5579 }
5580
5581 /*
5582 * Prevent q_next from changing in this stream by incrementing sq_count.
5583 *
5584 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5585 * sq_putlocks are used.
5586 */
5587 void
claimq(queue_t * qp)5588 claimq(queue_t *qp)
5589 {
5590 syncq_t *sq = qp->q_syncq;
5591
5592 mutex_enter(SQLOCK(sq));
5593 sq->sq_count++;
5594 ASSERT(sq->sq_count != 0); /* Wraparound */
5595 mutex_exit(SQLOCK(sq));
5596 }
5597
5598 /*
5599 * Undo claimq.
5600 *
5601 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5602 * sq_putlocks are used.
5603 */
5604 void
releaseq(queue_t * qp)5605 releaseq(queue_t *qp)
5606 {
5607 syncq_t *sq = qp->q_syncq;
5608 uint16_t flags;
5609
5610 mutex_enter(SQLOCK(sq));
5611 ASSERT(sq->sq_count > 0);
5612 sq->sq_count--;
5613
5614 flags = sq->sq_flags;
5615 if (flags & (SQ_WANTWAKEUP|SQ_QUEUED)) {
5616 if (flags & SQ_WANTWAKEUP) {
5617 flags &= ~SQ_WANTWAKEUP;
5618 cv_broadcast(&sq->sq_wait);
5619 }
5620 sq->sq_flags = flags;
5621 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) {
5622 /*
5623 * To prevent potential recursive invocation of
5624 * drain_syncq we do not call drain_syncq if count is
5625 * non-zero.
5626 */
5627 if (sq->sq_count == 0) {
5628 drain_syncq(sq);
5629 return;
5630 } else
5631 sqenable(sq);
5632 }
5633 }
5634 mutex_exit(SQLOCK(sq));
5635 }
5636
5637 /*
5638 * Prevent q_next from changing in this stream by incrementing sd_refcnt.
5639 */
5640 void
claimstr(queue_t * qp)5641 claimstr(queue_t *qp)
5642 {
5643 struct stdata *stp = STREAM(qp);
5644
5645 mutex_enter(&stp->sd_reflock);
5646 stp->sd_refcnt++;
5647 ASSERT(stp->sd_refcnt != 0); /* Wraparound */
5648 mutex_exit(&stp->sd_reflock);
5649 }
5650
5651 /*
5652 * Undo claimstr.
5653 */
5654 void
releasestr(queue_t * qp)5655 releasestr(queue_t *qp)
5656 {
5657 struct stdata *stp = STREAM(qp);
5658
5659 mutex_enter(&stp->sd_reflock);
5660 ASSERT(stp->sd_refcnt != 0);
5661 if (--stp->sd_refcnt == 0)
5662 cv_broadcast(&stp->sd_refmonitor);
5663 mutex_exit(&stp->sd_reflock);
5664 }
5665
5666 static syncq_t *
new_syncq(void)5667 new_syncq(void)
5668 {
5669 return (kmem_cache_alloc(syncq_cache, KM_SLEEP));
5670 }
5671
5672 static void
free_syncq(syncq_t * sq)5673 free_syncq(syncq_t *sq)
5674 {
5675 ASSERT(sq->sq_head == NULL);
5676 ASSERT(sq->sq_outer == NULL);
5677 ASSERT(sq->sq_callbpend == NULL);
5678 ASSERT((sq->sq_onext == NULL && sq->sq_oprev == NULL) ||
5679 (sq->sq_onext == sq && sq->sq_oprev == sq));
5680
5681 if (sq->sq_ciputctrl != NULL) {
5682 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1);
5683 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl,
5684 sq->sq_nciputctrl, 0);
5685 ASSERT(ciputctrl_cache != NULL);
5686 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl);
5687 }
5688
5689 sq->sq_tail = NULL;
5690 sq->sq_evhead = NULL;
5691 sq->sq_evtail = NULL;
5692 sq->sq_ciputctrl = NULL;
5693 sq->sq_nciputctrl = 0;
5694 sq->sq_count = 0;
5695 sq->sq_rmqcount = 0;
5696 sq->sq_callbflags = 0;
5697 sq->sq_cancelid = 0;
5698 sq->sq_next = NULL;
5699 sq->sq_needexcl = 0;
5700 sq->sq_svcflags = 0;
5701 sq->sq_nqueues = 0;
5702 sq->sq_pri = 0;
5703 sq->sq_onext = NULL;
5704 sq->sq_oprev = NULL;
5705 sq->sq_flags = 0;
5706 sq->sq_type = 0;
5707 sq->sq_servcount = 0;
5708
5709 kmem_cache_free(syncq_cache, sq);
5710 }
5711
5712 /* Outer perimeter code */
5713
5714 /*
5715 * The outer syncq uses the fields and flags in the syncq slightly
5716 * differently from the inner syncqs.
5717 * sq_count Incremented when there are pending or running
5718 * writers at the outer perimeter to prevent the set of
5719 * inner syncqs that belong to the outer perimeter from
5720 * changing.
5721 * sq_head/tail List of deferred qwriter(OUTER) operations.
5722 *
5723 * SQ_BLOCKED Set to prevent traversing of sq_next,sq_prev while
5724 * inner syncqs are added to or removed from the
5725 * outer perimeter.
5726 * SQ_QUEUED sq_head/tail has messages or events queued.
5727 *
5728 * SQ_WRITER A thread is currently traversing all the inner syncqs
5729 * setting the SQ_WRITER flag.
5730 */
5731
5732 /*
5733 * Get write access at the outer perimeter.
5734 * Note that read access is done by entersq, putnext, and put by simply
5735 * incrementing sq_count in the inner syncq.
5736 *
5737 * Waits until "flags" is no longer set in the outer to prevent multiple
5738 * threads from having write access at the same time. SQ_WRITER has to be part
5739 * of "flags".
5740 *
5741 * Increases sq_count on the outer syncq to keep away outer_insert/remove
5742 * until the outer_exit is finished.
5743 *
5744 * outer_enter is vulnerable to starvation since it does not prevent new
5745 * threads from entering the inner syncqs while it is waiting for sq_count to
5746 * go to zero.
5747 */
5748 void
outer_enter(syncq_t * outer,uint16_t flags)5749 outer_enter(syncq_t *outer, uint16_t flags)
5750 {
5751 syncq_t *sq;
5752 int wait_needed;
5753 uint16_t count;
5754
5755 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5756 outer->sq_oprev != NULL);
5757 ASSERT(flags & SQ_WRITER);
5758
5759 retry:
5760 mutex_enter(SQLOCK(outer));
5761 while (outer->sq_flags & flags) {
5762 outer->sq_flags |= SQ_WANTWAKEUP;
5763 cv_wait(&outer->sq_wait, SQLOCK(outer));
5764 }
5765
5766 ASSERT(!(outer->sq_flags & SQ_WRITER));
5767 outer->sq_flags |= SQ_WRITER;
5768 outer->sq_count++;
5769 ASSERT(outer->sq_count != 0); /* wraparound */
5770 wait_needed = 0;
5771 /*
5772 * Set SQ_WRITER on all the inner syncqs while holding
5773 * the SQLOCK on the outer syncq. This ensures that the changing
5774 * of SQ_WRITER is atomic under the outer SQLOCK.
5775 */
5776 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) {
5777 mutex_enter(SQLOCK(sq));
5778 count = sq->sq_count;
5779 SQ_PUTLOCKS_ENTER(sq);
5780 sq->sq_flags |= SQ_WRITER;
5781 SUM_SQ_PUTCOUNTS(sq, count);
5782 if (count != 0)
5783 wait_needed = 1;
5784 SQ_PUTLOCKS_EXIT(sq);
5785 mutex_exit(SQLOCK(sq));
5786 }
5787 mutex_exit(SQLOCK(outer));
5788
5789 /*
5790 * Get everybody out of the syncqs sequentially.
5791 * Note that we don't actually need to acquire the PUTLOCKS, since
5792 * we have already cleared the fastbit, and set QWRITER. By
5793 * definition, the count can not increase since putnext will
5794 * take the slowlock path (and the purpose of acquiring the
5795 * putlocks was to make sure it didn't increase while we were
5796 * waiting).
5797 *
5798 * Note that we still acquire the PUTLOCKS to be safe.
5799 */
5800 if (wait_needed) {
5801 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) {
5802 mutex_enter(SQLOCK(sq));
5803 count = sq->sq_count;
5804 SQ_PUTLOCKS_ENTER(sq);
5805 SUM_SQ_PUTCOUNTS(sq, count);
5806 while (count != 0) {
5807 sq->sq_flags |= SQ_WANTWAKEUP;
5808 SQ_PUTLOCKS_EXIT(sq);
5809 cv_wait(&sq->sq_wait, SQLOCK(sq));
5810 count = sq->sq_count;
5811 SQ_PUTLOCKS_ENTER(sq);
5812 SUM_SQ_PUTCOUNTS(sq, count);
5813 }
5814 SQ_PUTLOCKS_EXIT(sq);
5815 mutex_exit(SQLOCK(sq));
5816 }
5817 /*
5818 * Verify that none of the flags got set while we
5819 * were waiting for the sq_counts to drop.
5820 * If this happens we exit and retry entering the
5821 * outer perimeter.
5822 */
5823 mutex_enter(SQLOCK(outer));
5824 if (outer->sq_flags & (flags & ~SQ_WRITER)) {
5825 mutex_exit(SQLOCK(outer));
5826 outer_exit(outer);
5827 goto retry;
5828 }
5829 mutex_exit(SQLOCK(outer));
5830 }
5831 }
5832
5833 /*
5834 * Drop the write access at the outer perimeter.
5835 * Read access is dropped implicitly (by putnext, put, and leavesq) by
5836 * decrementing sq_count.
5837 */
5838 void
outer_exit(syncq_t * outer)5839 outer_exit(syncq_t *outer)
5840 {
5841 syncq_t *sq;
5842 int drain_needed;
5843 uint16_t flags;
5844
5845 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5846 outer->sq_oprev != NULL);
5847 ASSERT(MUTEX_NOT_HELD(SQLOCK(outer)));
5848
5849 /*
5850 * Atomically (from the perspective of threads calling become_writer)
5851 * drop the write access at the outer perimeter by holding
5852 * SQLOCK(outer) across all the dropsq calls and the resetting of
5853 * SQ_WRITER.
5854 * This defines a locking order between the outer perimeter
5855 * SQLOCK and the inner perimeter SQLOCKs.
5856 */
5857 mutex_enter(SQLOCK(outer));
5858 flags = outer->sq_flags;
5859 ASSERT(outer->sq_flags & SQ_WRITER);
5860 if (flags & SQ_QUEUED) {
5861 write_now(outer);
5862 flags = outer->sq_flags;
5863 }
5864
5865 /*
5866 * sq_onext is stable since sq_count has not yet been decreased.
5867 * Reset the SQ_WRITER flags in all syncqs.
5868 * After dropping SQ_WRITER on the outer syncq we empty all the
5869 * inner syncqs.
5870 */
5871 drain_needed = 0;
5872 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext)
5873 drain_needed += dropsq(sq, SQ_WRITER);
5874 ASSERT(!(outer->sq_flags & SQ_QUEUED));
5875 flags &= ~SQ_WRITER;
5876 if (drain_needed) {
5877 outer->sq_flags = flags;
5878 mutex_exit(SQLOCK(outer));
5879 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext)
5880 emptysq(sq);
5881 mutex_enter(SQLOCK(outer));
5882 flags = outer->sq_flags;
5883 }
5884 if (flags & SQ_WANTWAKEUP) {
5885 flags &= ~SQ_WANTWAKEUP;
5886 cv_broadcast(&outer->sq_wait);
5887 }
5888 outer->sq_flags = flags;
5889 ASSERT(outer->sq_count > 0);
5890 outer->sq_count--;
5891 mutex_exit(SQLOCK(outer));
5892 }
5893
5894 /*
5895 * Add another syncq to an outer perimeter.
5896 * Block out all other access to the outer perimeter while it is being
5897 * changed using blocksq.
5898 * Assumes that the caller has *not* done an outer_enter.
5899 *
5900 * Vulnerable to starvation in blocksq.
5901 */
5902 static void
outer_insert(syncq_t * outer,syncq_t * sq)5903 outer_insert(syncq_t *outer, syncq_t *sq)
5904 {
5905 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5906 outer->sq_oprev != NULL);
5907 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL &&
5908 sq->sq_oprev == NULL); /* Can't be in an outer perimeter */
5909
5910 /* Get exclusive access to the outer perimeter list */
5911 blocksq(outer, SQ_BLOCKED, 0);
5912 ASSERT(outer->sq_flags & SQ_BLOCKED);
5913 ASSERT(!(outer->sq_flags & SQ_WRITER));
5914
5915 mutex_enter(SQLOCK(sq));
5916 sq->sq_outer = outer;
5917 outer->sq_onext->sq_oprev = sq;
5918 sq->sq_onext = outer->sq_onext;
5919 outer->sq_onext = sq;
5920 sq->sq_oprev = outer;
5921 mutex_exit(SQLOCK(sq));
5922 unblocksq(outer, SQ_BLOCKED, 1);
5923 }
5924
5925 /*
5926 * Remove a syncq from an outer perimeter.
5927 * Block out all other access to the outer perimeter while it is being
5928 * changed using blocksq.
5929 * Assumes that the caller has *not* done an outer_enter.
5930 *
5931 * Vulnerable to starvation in blocksq.
5932 */
5933 static void
outer_remove(syncq_t * outer,syncq_t * sq)5934 outer_remove(syncq_t *outer, syncq_t *sq)
5935 {
5936 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5937 outer->sq_oprev != NULL);
5938 ASSERT(sq->sq_outer == outer);
5939
5940 /* Get exclusive access to the outer perimeter list */
5941 blocksq(outer, SQ_BLOCKED, 0);
5942 ASSERT(outer->sq_flags & SQ_BLOCKED);
5943 ASSERT(!(outer->sq_flags & SQ_WRITER));
5944
5945 mutex_enter(SQLOCK(sq));
5946 sq->sq_outer = NULL;
5947 sq->sq_onext->sq_oprev = sq->sq_oprev;
5948 sq->sq_oprev->sq_onext = sq->sq_onext;
5949 sq->sq_oprev = sq->sq_onext = NULL;
5950 mutex_exit(SQLOCK(sq));
5951 unblocksq(outer, SQ_BLOCKED, 1);
5952 }
5953
5954 /*
5955 * Queue a deferred qwriter(OUTER) callback for this outer perimeter.
5956 * If this is the first callback for this outer perimeter then add
5957 * this outer perimeter to the list of outer perimeters that
5958 * the qwriter_outer_thread will process.
5959 *
5960 * Increments sq_count in the outer syncq to prevent the membership
5961 * of the outer perimeter (in terms of inner syncqs) to change while
5962 * the callback is pending.
5963 */
5964 static void
queue_writer(syncq_t * outer,void (* func)(),queue_t * q,mblk_t * mp)5965 queue_writer(syncq_t *outer, void (*func)(), queue_t *q, mblk_t *mp)
5966 {
5967 ASSERT(MUTEX_HELD(SQLOCK(outer)));
5968
5969 mp->b_prev = (mblk_t *)func;
5970 mp->b_queue = q;
5971 mp->b_next = NULL;
5972 outer->sq_count++; /* Decremented when dequeued */
5973 ASSERT(outer->sq_count != 0); /* Wraparound */
5974 if (outer->sq_evhead == NULL) {
5975 /* First message. */
5976 outer->sq_evhead = outer->sq_evtail = mp;
5977 outer->sq_flags |= SQ_EVENTS;
5978 mutex_exit(SQLOCK(outer));
5979 STRSTAT(qwr_outer);
5980 (void) taskq_dispatch(streams_taskq,
5981 (task_func_t *)qwriter_outer_service, outer, TQ_SLEEP);
5982 } else {
5983 ASSERT(outer->sq_flags & SQ_EVENTS);
5984 outer->sq_evtail->b_next = mp;
5985 outer->sq_evtail = mp;
5986 mutex_exit(SQLOCK(outer));
5987 }
5988 }
5989
5990 /*
5991 * Try and upgrade to write access at the outer perimeter. If this can
5992 * not be done without blocking then queue the callback to be done
5993 * by the qwriter_outer_thread.
5994 *
5995 * This routine can only be called from put or service procedures plus
5996 * asynchronous callback routines that have properly entered the queue (with
5997 * entersq). Thus qwriter(OUTER) assumes the caller has one claim on the syncq
5998 * associated with q.
5999 */
6000 void
qwriter_outer(queue_t * q,mblk_t * mp,void (* func)())6001 qwriter_outer(queue_t *q, mblk_t *mp, void (*func)())
6002 {
6003 syncq_t *osq, *sq, *outer;
6004 int failed;
6005 uint16_t flags;
6006
6007 osq = q->q_syncq;
6008 outer = osq->sq_outer;
6009 if (outer == NULL)
6010 panic("qwriter(PERIM_OUTER): no outer perimeter");
6011 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
6012 outer->sq_oprev != NULL);
6013
6014 mutex_enter(SQLOCK(outer));
6015 flags = outer->sq_flags;
6016 /*
6017 * If some thread is traversing sq_next, or if we are blocked by
6018 * outer_insert or outer_remove, or if the we already have queued
6019 * callbacks, then queue this callback for later processing.
6020 *
6021 * Also queue the qwriter for an interrupt thread in order
6022 * to reduce the time spent running at high IPL.
6023 * to identify there are events.
6024 */
6025 if ((flags & SQ_GOAWAY) || (curthread->t_pri >= kpreemptpri)) {
6026 /*
6027 * Queue the become_writer request.
6028 * The queueing is atomic under SQLOCK(outer) in order
6029 * to synchronize with outer_exit.
6030 * queue_writer will drop the outer SQLOCK
6031 */
6032 if (flags & SQ_BLOCKED) {
6033 /* Must set SQ_WRITER on inner perimeter */
6034 mutex_enter(SQLOCK(osq));
6035 osq->sq_flags |= SQ_WRITER;
6036 mutex_exit(SQLOCK(osq));
6037 } else {
6038 if (!(flags & SQ_WRITER)) {
6039 /*
6040 * The outer could have been SQ_BLOCKED thus
6041 * SQ_WRITER might not be set on the inner.
6042 */
6043 mutex_enter(SQLOCK(osq));
6044 osq->sq_flags |= SQ_WRITER;
6045 mutex_exit(SQLOCK(osq));
6046 }
6047 ASSERT(osq->sq_flags & SQ_WRITER);
6048 }
6049 queue_writer(outer, func, q, mp);
6050 return;
6051 }
6052 /*
6053 * We are half-way to exclusive access to the outer perimeter.
6054 * Prevent any outer_enter, qwriter(OUTER), or outer_insert/remove
6055 * while the inner syncqs are traversed.
6056 */
6057 outer->sq_count++;
6058 ASSERT(outer->sq_count != 0); /* wraparound */
6059 flags |= SQ_WRITER;
6060 /*
6061 * Check if we can run the function immediately. Mark all
6062 * syncqs with the writer flag to prevent new entries into
6063 * put and service procedures.
6064 *
6065 * Set SQ_WRITER on all the inner syncqs while holding
6066 * the SQLOCK on the outer syncq. This ensures that the changing
6067 * of SQ_WRITER is atomic under the outer SQLOCK.
6068 */
6069 failed = 0;
6070 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) {
6071 uint16_t count;
6072 uint_t maxcnt = (sq == osq) ? 1 : 0;
6073
6074 mutex_enter(SQLOCK(sq));
6075 count = sq->sq_count;
6076 SQ_PUTLOCKS_ENTER(sq);
6077 SUM_SQ_PUTCOUNTS(sq, count);
6078 if (sq->sq_count > maxcnt)
6079 failed = 1;
6080 sq->sq_flags |= SQ_WRITER;
6081 SQ_PUTLOCKS_EXIT(sq);
6082 mutex_exit(SQLOCK(sq));
6083 }
6084 if (failed) {
6085 /*
6086 * Some other thread has a read claim on the outer perimeter.
6087 * Queue the callback for deferred processing.
6088 *
6089 * queue_writer will set SQ_QUEUED before we drop SQ_WRITER
6090 * so that other qwriter(OUTER) calls will queue their
6091 * callbacks as well. queue_writer increments sq_count so we
6092 * decrement to compensate for the our increment.
6093 *
6094 * Dropping SQ_WRITER enables the writer thread to work
6095 * on this outer perimeter.
6096 */
6097 outer->sq_flags = flags;
6098 queue_writer(outer, func, q, mp);
6099 /* queue_writer dropper the lock */
6100 mutex_enter(SQLOCK(outer));
6101 ASSERT(outer->sq_count > 0);
6102 outer->sq_count--;
6103 ASSERT(outer->sq_flags & SQ_WRITER);
6104 flags = outer->sq_flags;
6105 flags &= ~SQ_WRITER;
6106 if (flags & SQ_WANTWAKEUP) {
6107 flags &= ~SQ_WANTWAKEUP;
6108 cv_broadcast(&outer->sq_wait);
6109 }
6110 outer->sq_flags = flags;
6111 mutex_exit(SQLOCK(outer));
6112 return;
6113 } else {
6114 outer->sq_flags = flags;
6115 mutex_exit(SQLOCK(outer));
6116 }
6117
6118 /* Can run it immediately */
6119 (*func)(q, mp);
6120
6121 outer_exit(outer);
6122 }
6123
6124 /*
6125 * Dequeue all writer callbacks from the outer perimeter and run them.
6126 */
6127 static void
write_now(syncq_t * outer)6128 write_now(syncq_t *outer)
6129 {
6130 mblk_t *mp;
6131 queue_t *q;
6132 void (*func)();
6133
6134 ASSERT(MUTEX_HELD(SQLOCK(outer)));
6135 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
6136 outer->sq_oprev != NULL);
6137 while ((mp = outer->sq_evhead) != NULL) {
6138 /*
6139 * queues cannot be placed on the queuelist on the outer
6140 * perimeter.
6141 */
6142 ASSERT(!(outer->sq_flags & SQ_MESSAGES));
6143 ASSERT((outer->sq_flags & SQ_EVENTS));
6144
6145 outer->sq_evhead = mp->b_next;
6146 if (outer->sq_evhead == NULL) {
6147 outer->sq_evtail = NULL;
6148 outer->sq_flags &= ~SQ_EVENTS;
6149 }
6150 ASSERT(outer->sq_count != 0);
6151 outer->sq_count--; /* Incremented when enqueued. */
6152 mutex_exit(SQLOCK(outer));
6153 /*
6154 * Drop the message if the queue is closing.
6155 * Make sure that the queue is "claimed" when the callback
6156 * is run in order to satisfy various ASSERTs.
6157 */
6158 q = mp->b_queue;
6159 func = (void (*)())mp->b_prev;
6160 ASSERT(func != NULL);
6161 mp->b_next = mp->b_prev = NULL;
6162 if (q->q_flag & QWCLOSE) {
6163 freemsg(mp);
6164 } else {
6165 claimq(q);
6166 (*func)(q, mp);
6167 releaseq(q);
6168 }
6169 mutex_enter(SQLOCK(outer));
6170 }
6171 ASSERT(MUTEX_HELD(SQLOCK(outer)));
6172 }
6173
6174 /*
6175 * The list of messages on the inner syncq is effectively hashed
6176 * by destination queue. These destination queues are doubly
6177 * linked lists (hopefully) in priority order. Messages are then
6178 * put on the queue referenced by the q_sqhead/q_sqtail elements.
6179 * Additional messages are linked together by the b_next/b_prev
6180 * elements in the mblk, with (similar to putq()) the first message
6181 * having a NULL b_prev and the last message having a NULL b_next.
6182 *
6183 * Events, such as qwriter callbacks, are put onto a list in FIFO
6184 * order referenced by sq_evhead, and sq_evtail. This is a singly
6185 * linked list, and messages here MUST be processed in the order queued.
6186 */
6187
6188 /*
6189 * Run the events on the syncq event list (sq_evhead).
6190 * Assumes there is only one claim on the syncq, it is
6191 * already exclusive (SQ_EXCL set), and the SQLOCK held.
6192 * Messages here are processed in order, with the SQ_EXCL bit
6193 * held all the way through till the last message is processed.
6194 */
6195 void
sq_run_events(syncq_t * sq)6196 sq_run_events(syncq_t *sq)
6197 {
6198 mblk_t *bp;
6199 queue_t *qp;
6200 uint16_t flags = sq->sq_flags;
6201 void (*func)();
6202
6203 ASSERT(MUTEX_HELD(SQLOCK(sq)));
6204 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6205 sq->sq_oprev == NULL) ||
6206 (sq->sq_outer != NULL && sq->sq_onext != NULL &&
6207 sq->sq_oprev != NULL));
6208
6209 ASSERT(flags & SQ_EXCL);
6210 ASSERT(sq->sq_count == 1);
6211
6212 /*
6213 * We need to process all of the events on this list. It
6214 * is possible that new events will be added while we are
6215 * away processing a callback, so on every loop, we start
6216 * back at the beginning of the list.
6217 */
6218 /*
6219 * We have to reaccess sq_evhead since there is a
6220 * possibility of a new entry while we were running
6221 * the callback.
6222 */
6223 for (bp = sq->sq_evhead; bp != NULL; bp = sq->sq_evhead) {
6224 ASSERT(bp->b_queue->q_syncq == sq);
6225 ASSERT(sq->sq_flags & SQ_EVENTS);
6226
6227 qp = bp->b_queue;
6228 func = (void (*)())bp->b_prev;
6229 ASSERT(func != NULL);
6230
6231 /*
6232 * Messages from the event queue must be taken off in
6233 * FIFO order.
6234 */
6235 ASSERT(sq->sq_evhead == bp);
6236 sq->sq_evhead = bp->b_next;
6237
6238 if (bp->b_next == NULL) {
6239 /* Deleting last */
6240 ASSERT(sq->sq_evtail == bp);
6241 sq->sq_evtail = NULL;
6242 sq->sq_flags &= ~SQ_EVENTS;
6243 }
6244 bp->b_prev = bp->b_next = NULL;
6245 ASSERT(bp->b_datap->db_ref != 0);
6246
6247 mutex_exit(SQLOCK(sq));
6248
6249 (*func)(qp, bp);
6250
6251 mutex_enter(SQLOCK(sq));
6252 /*
6253 * re-read the flags, since they could have changed.
6254 */
6255 flags = sq->sq_flags;
6256 ASSERT(flags & SQ_EXCL);
6257 }
6258 ASSERT(sq->sq_evhead == NULL && sq->sq_evtail == NULL);
6259 ASSERT(!(sq->sq_flags & SQ_EVENTS));
6260
6261 if (flags & SQ_WANTWAKEUP) {
6262 flags &= ~SQ_WANTWAKEUP;
6263 cv_broadcast(&sq->sq_wait);
6264 }
6265 if (flags & SQ_WANTEXWAKEUP) {
6266 flags &= ~SQ_WANTEXWAKEUP;
6267 cv_broadcast(&sq->sq_exitwait);
6268 }
6269 sq->sq_flags = flags;
6270 }
6271
6272 /*
6273 * Put messages on the event list.
6274 * If we can go exclusive now, do so and process the event list, otherwise
6275 * let the last claim service this list (or wake the sqthread).
6276 * This procedure assumes SQLOCK is held. To run the event list, it
6277 * must be called with no claims.
6278 */
6279 static void
sqfill_events(syncq_t * sq,queue_t * q,mblk_t * mp,void (* func)())6280 sqfill_events(syncq_t *sq, queue_t *q, mblk_t *mp, void (*func)())
6281 {
6282 uint16_t count;
6283
6284 ASSERT(MUTEX_HELD(SQLOCK(sq)));
6285 ASSERT(func != NULL);
6286
6287 /*
6288 * This is a callback. Add it to the list of callbacks
6289 * and see about upgrading.
6290 */
6291 mp->b_prev = (mblk_t *)func;
6292 mp->b_queue = q;
6293 mp->b_next = NULL;
6294 if (sq->sq_evhead == NULL) {
6295 sq->sq_evhead = sq->sq_evtail = mp;
6296 sq->sq_flags |= SQ_EVENTS;
6297 } else {
6298 ASSERT(sq->sq_evtail != NULL);
6299 ASSERT(sq->sq_evtail->b_next == NULL);
6300 ASSERT(sq->sq_flags & SQ_EVENTS);
6301 sq->sq_evtail->b_next = mp;
6302 sq->sq_evtail = mp;
6303 }
6304 /*
6305 * We have set SQ_EVENTS, so threads will have to
6306 * unwind out of the perimeter, and new entries will
6307 * not grab a putlock. But we still need to know
6308 * how many threads have already made a claim to the
6309 * syncq, so grab the putlocks, and sum the counts.
6310 * If there are no claims on the syncq, we can upgrade
6311 * to exclusive, and run the event list.
6312 * NOTE: We hold the SQLOCK, so we can just grab the
6313 * putlocks.
6314 */
6315 count = sq->sq_count;
6316 SQ_PUTLOCKS_ENTER(sq);
6317 SUM_SQ_PUTCOUNTS(sq, count);
6318 /*
6319 * We have no claim, so we need to check if there
6320 * are no others, then we can upgrade.
6321 */
6322 /*
6323 * There are currently no claims on
6324 * the syncq by this thread (at least on this entry). The thread who has
6325 * the claim should drain syncq.
6326 */
6327 if (count > 0) {
6328 /*
6329 * Can't upgrade - other threads inside.
6330 */
6331 SQ_PUTLOCKS_EXIT(sq);
6332 mutex_exit(SQLOCK(sq));
6333 return;
6334 }
6335 /*
6336 * Need to set SQ_EXCL and make a claim on the syncq.
6337 */
6338 ASSERT((sq->sq_flags & SQ_EXCL) == 0);
6339 sq->sq_flags |= SQ_EXCL;
6340 ASSERT(sq->sq_count == 0);
6341 sq->sq_count++;
6342 SQ_PUTLOCKS_EXIT(sq);
6343
6344 /* Process the events list */
6345 sq_run_events(sq);
6346
6347 /*
6348 * Release our claim...
6349 */
6350 sq->sq_count--;
6351
6352 /*
6353 * And release SQ_EXCL.
6354 * We don't need to acquire the putlocks to release
6355 * SQ_EXCL, since we are exclusive, and hold the SQLOCK.
6356 */
6357 sq->sq_flags &= ~SQ_EXCL;
6358
6359 /*
6360 * sq_run_events should have released SQ_EXCL
6361 */
6362 ASSERT(!(sq->sq_flags & SQ_EXCL));
6363
6364 /*
6365 * If anything happened while we were running the
6366 * events (or was there before), we need to process
6367 * them now. We shouldn't be exclusive sine we
6368 * released the perimeter above (plus, we asserted
6369 * for it).
6370 */
6371 if (!(sq->sq_flags & SQ_STAYAWAY) && (sq->sq_flags & SQ_QUEUED))
6372 drain_syncq(sq);
6373 else
6374 mutex_exit(SQLOCK(sq));
6375 }
6376
6377 /*
6378 * Perform delayed processing. The caller has to make sure that it is safe
6379 * to enter the syncq (e.g. by checking that none of the SQ_STAYAWAY bits are
6380 * set).
6381 *
6382 * Assume that the caller has NO claims on the syncq. However, a claim
6383 * on the syncq does not indicate that a thread is draining the syncq.
6384 * There may be more claims on the syncq than there are threads draining
6385 * (i.e. #_threads_draining <= sq_count)
6386 *
6387 * drain_syncq has to terminate when one of the SQ_STAYAWAY bits gets set
6388 * in order to preserve qwriter(OUTER) ordering constraints.
6389 *
6390 * sq_putcount only needs to be checked when dispatching the queued
6391 * writer call for CIPUT sync queue, but this is handled in sq_run_events.
6392 */
6393 void
drain_syncq(syncq_t * sq)6394 drain_syncq(syncq_t *sq)
6395 {
6396 queue_t *qp;
6397 uint16_t count;
6398 uint16_t type = sq->sq_type;
6399 uint16_t flags = sq->sq_flags;
6400 boolean_t bg_service = sq->sq_svcflags & SQ_SERVICE;
6401
6402 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START,
6403 "drain_syncq start:%p", sq);
6404 ASSERT(MUTEX_HELD(SQLOCK(sq)));
6405 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6406 sq->sq_oprev == NULL) ||
6407 (sq->sq_outer != NULL && sq->sq_onext != NULL &&
6408 sq->sq_oprev != NULL));
6409
6410 /*
6411 * Drop SQ_SERVICE flag.
6412 */
6413 if (bg_service)
6414 sq->sq_svcflags &= ~SQ_SERVICE;
6415
6416 /*
6417 * If SQ_EXCL is set, someone else is processing this syncq - let them
6418 * finish the job.
6419 */
6420 if (flags & SQ_EXCL) {
6421 if (bg_service) {
6422 ASSERT(sq->sq_servcount != 0);
6423 sq->sq_servcount--;
6424 }
6425 mutex_exit(SQLOCK(sq));
6426 return;
6427 }
6428
6429 /*
6430 * This routine can be called by a background thread if
6431 * it was scheduled by a hi-priority thread. SO, if there are
6432 * NOT messages queued, return (remember, we have the SQLOCK,
6433 * and it cannot change until we release it). Wakeup any waiters also.
6434 */
6435 if (!(flags & SQ_QUEUED)) {
6436 if (flags & SQ_WANTWAKEUP) {
6437 flags &= ~SQ_WANTWAKEUP;
6438 cv_broadcast(&sq->sq_wait);
6439 }
6440 if (flags & SQ_WANTEXWAKEUP) {
6441 flags &= ~SQ_WANTEXWAKEUP;
6442 cv_broadcast(&sq->sq_exitwait);
6443 }
6444 sq->sq_flags = flags;
6445 if (bg_service) {
6446 ASSERT(sq->sq_servcount != 0);
6447 sq->sq_servcount--;
6448 }
6449 mutex_exit(SQLOCK(sq));
6450 return;
6451 }
6452
6453 /*
6454 * If this is not a concurrent put perimeter, we need to
6455 * become exclusive to drain. Also, if not CIPUT, we would
6456 * not have acquired a putlock, so we don't need to check
6457 * the putcounts. If not entering with a claim, we test
6458 * for sq_count == 0.
6459 */
6460 type = sq->sq_type;
6461 if (!(type & SQ_CIPUT)) {
6462 if (sq->sq_count > 1) {
6463 if (bg_service) {
6464 ASSERT(sq->sq_servcount != 0);
6465 sq->sq_servcount--;
6466 }
6467 mutex_exit(SQLOCK(sq));
6468 return;
6469 }
6470 sq->sq_flags |= SQ_EXCL;
6471 }
6472
6473 /*
6474 * This is where we make a claim to the syncq.
6475 * This can either be done by incrementing a putlock, or
6476 * the sq_count. But since we already have the SQLOCK
6477 * here, we just bump the sq_count.
6478 *
6479 * Note that after we make a claim, we need to let the code
6480 * fall through to the end of this routine to clean itself
6481 * up. A return in the while loop will put the syncq in a
6482 * very bad state.
6483 */
6484 sq->sq_count++;
6485 ASSERT(sq->sq_count != 0); /* wraparound */
6486
6487 while ((flags = sq->sq_flags) & SQ_QUEUED) {
6488 /*
6489 * If we are told to stayaway or went exclusive,
6490 * we are done.
6491 */
6492 if (flags & (SQ_STAYAWAY)) {
6493 break;
6494 }
6495
6496 /*
6497 * If there are events to run, do so.
6498 * We have one claim to the syncq, so if there are
6499 * more than one, other threads are running.
6500 */
6501 if (sq->sq_evhead != NULL) {
6502 ASSERT(sq->sq_flags & SQ_EVENTS);
6503
6504 count = sq->sq_count;
6505 SQ_PUTLOCKS_ENTER(sq);
6506 SUM_SQ_PUTCOUNTS(sq, count);
6507 if (count > 1) {
6508 SQ_PUTLOCKS_EXIT(sq);
6509 /* Can't upgrade - other threads inside */
6510 break;
6511 }
6512 ASSERT((flags & SQ_EXCL) == 0);
6513 sq->sq_flags = flags | SQ_EXCL;
6514 SQ_PUTLOCKS_EXIT(sq);
6515 /*
6516 * we have the only claim, run the events,
6517 * sq_run_events will clear the SQ_EXCL flag.
6518 */
6519 sq_run_events(sq);
6520
6521 /*
6522 * If this is a CIPUT perimeter, we need
6523 * to drop the SQ_EXCL flag so we can properly
6524 * continue draining the syncq.
6525 */
6526 if (type & SQ_CIPUT) {
6527 ASSERT(sq->sq_flags & SQ_EXCL);
6528 sq->sq_flags &= ~SQ_EXCL;
6529 }
6530
6531 /*
6532 * And go back to the beginning just in case
6533 * anything changed while we were away.
6534 */
6535 ASSERT((sq->sq_flags & SQ_EXCL) || (type & SQ_CIPUT));
6536 continue;
6537 }
6538
6539 ASSERT(sq->sq_evhead == NULL);
6540 ASSERT(!(sq->sq_flags & SQ_EVENTS));
6541
6542 /*
6543 * Find the queue that is not draining.
6544 *
6545 * q_draining is protected by QLOCK which we do not hold.
6546 * But if it was set, then a thread was draining, and if it gets
6547 * cleared, then it was because the thread has successfully
6548 * drained the syncq, or a GOAWAY state occurred. For the GOAWAY
6549 * state to happen, a thread needs the SQLOCK which we hold, and
6550 * if there was such a flag, we would have already seen it.
6551 */
6552
6553 for (qp = sq->sq_head;
6554 qp != NULL && (qp->q_draining ||
6555 (qp->q_sqflags & Q_SQDRAINING));
6556 qp = qp->q_sqnext)
6557 ;
6558
6559 if (qp == NULL)
6560 break;
6561
6562 /*
6563 * We have a queue to work on, and we hold the
6564 * SQLOCK and one claim, call qdrain_syncq.
6565 * This means we need to release the SQLOCK and
6566 * acquire the QLOCK (OK since we have a claim).
6567 * Note that qdrain_syncq will actually dequeue
6568 * this queue from the sq_head list when it is
6569 * convinced all the work is done and release
6570 * the QLOCK before returning.
6571 */
6572 qp->q_sqflags |= Q_SQDRAINING;
6573 mutex_exit(SQLOCK(sq));
6574 mutex_enter(QLOCK(qp));
6575 qdrain_syncq(sq, qp);
6576 mutex_enter(SQLOCK(sq));
6577
6578 /* The queue is drained */
6579 ASSERT(qp->q_sqflags & Q_SQDRAINING);
6580 qp->q_sqflags &= ~Q_SQDRAINING;
6581 /*
6582 * NOTE: After this point qp should not be used since it may be
6583 * closed.
6584 */
6585 }
6586
6587 ASSERT(MUTEX_HELD(SQLOCK(sq)));
6588 flags = sq->sq_flags;
6589
6590 /*
6591 * sq->sq_head cannot change because we hold the
6592 * sqlock. However, a thread CAN decide that it is no longer
6593 * going to drain that queue. However, this should be due to
6594 * a GOAWAY state, and we should see that here.
6595 *
6596 * This loop is not very efficient. One solution may be adding a second
6597 * pointer to the "draining" queue, but it is difficult to do when
6598 * queues are inserted in the middle due to priority ordering. Another
6599 * possibility is to yank the queue out of the sq list and put it onto
6600 * the "draining list" and then put it back if it can't be drained.
6601 */
6602
6603 ASSERT((sq->sq_head == NULL) || (flags & SQ_GOAWAY) ||
6604 (type & SQ_CI) || sq->sq_head->q_draining);
6605
6606 /* Drop SQ_EXCL for non-CIPUT perimeters */
6607 if (!(type & SQ_CIPUT))
6608 flags &= ~SQ_EXCL;
6609 ASSERT((flags & SQ_EXCL) == 0);
6610
6611 /* Wake up any waiters. */
6612 if (flags & SQ_WANTWAKEUP) {
6613 flags &= ~SQ_WANTWAKEUP;
6614 cv_broadcast(&sq->sq_wait);
6615 }
6616 if (flags & SQ_WANTEXWAKEUP) {
6617 flags &= ~SQ_WANTEXWAKEUP;
6618 cv_broadcast(&sq->sq_exitwait);
6619 }
6620 sq->sq_flags = flags;
6621
6622 ASSERT(sq->sq_count != 0);
6623 /* Release our claim. */
6624 sq->sq_count--;
6625
6626 if (bg_service) {
6627 ASSERT(sq->sq_servcount != 0);
6628 sq->sq_servcount--;
6629 }
6630
6631 mutex_exit(SQLOCK(sq));
6632
6633 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END,
6634 "drain_syncq end:%p", sq);
6635 }
6636
6637
6638 /*
6639 *
6640 * qdrain_syncq can be called (currently) from only one of two places:
6641 * drain_syncq
6642 * putnext (or some variation of it).
6643 * and eventually
6644 * qwait(_sig)
6645 *
6646 * If called from drain_syncq, we found it in the list of queues needing
6647 * service, so there is work to be done (or it wouldn't be in the list).
6648 *
6649 * If called from some putnext variation, it was because the
6650 * perimeter is open, but messages are blocking a putnext and
6651 * there is not a thread working on it. Now a thread could start
6652 * working on it while we are getting ready to do so ourself, but
6653 * the thread would set the q_draining flag, and we can spin out.
6654 *
6655 * As for qwait(_sig), I think I shall let it continue to call
6656 * drain_syncq directly (after all, it will get here eventually).
6657 *
6658 * qdrain_syncq has to terminate when:
6659 * - one of the SQ_STAYAWAY bits gets set to preserve qwriter(OUTER) ordering
6660 * - SQ_EVENTS gets set to preserve qwriter(INNER) ordering
6661 *
6662 * ASSUMES:
6663 * One claim
6664 * QLOCK held
6665 * SQLOCK not held
6666 * Will release QLOCK before returning
6667 */
6668 void
qdrain_syncq(syncq_t * sq,queue_t * q)6669 qdrain_syncq(syncq_t *sq, queue_t *q)
6670 {
6671 mblk_t *bp;
6672 #ifdef DEBUG
6673 uint16_t count;
6674 #endif
6675
6676 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START,
6677 "drain_syncq start:%p", sq);
6678 ASSERT(q->q_syncq == sq);
6679 ASSERT(MUTEX_HELD(QLOCK(q)));
6680 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
6681 /*
6682 * For non-CIPUT perimeters, we should be called with the exclusive bit
6683 * set already. For CIPUT perimeters, we will be doing a concurrent
6684 * drain, so it better not be set.
6685 */
6686 ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)));
6687 ASSERT(!((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL)));
6688 ASSERT((sq->sq_type & SQ_CIPUT) || (sq->sq_flags & SQ_EXCL));
6689 /*
6690 * All outer pointers are set, or none of them are
6691 */
6692 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6693 sq->sq_oprev == NULL) ||
6694 (sq->sq_outer != NULL && sq->sq_onext != NULL &&
6695 sq->sq_oprev != NULL));
6696 #ifdef DEBUG
6697 count = sq->sq_count;
6698 /*
6699 * This is OK without the putlocks, because we have one
6700 * claim either from the sq_count, or a putcount. We could
6701 * get an erroneous value from other counts, but ours won't
6702 * change, so one way or another, we will have at least a
6703 * value of one.
6704 */
6705 SUM_SQ_PUTCOUNTS(sq, count);
6706 ASSERT(count >= 1);
6707 #endif /* DEBUG */
6708
6709 /*
6710 * The first thing to do is find out if a thread is already draining
6711 * this queue. If so, we are done, just return.
6712 */
6713 if (q->q_draining) {
6714 mutex_exit(QLOCK(q));
6715 return;
6716 }
6717
6718 /*
6719 * If the perimeter is exclusive, there is nothing we can do right now,
6720 * go away. Note that there is nothing to prevent this case from
6721 * changing right after this check, but the spin-out will catch it.
6722 */
6723
6724 /* Tell other threads that we are draining this queue */
6725 q->q_draining = 1; /* Protected by QLOCK */
6726
6727 /*
6728 * If there is nothing to do, clear QFULL as necessary. This caters for
6729 * the case where an empty queue was enqueued onto the syncq.
6730 */
6731 if (q->q_sqhead == NULL) {
6732 ASSERT(q->q_syncqmsgs == 0);
6733 mutex_exit(QLOCK(q));
6734 clr_qfull(q);
6735 mutex_enter(QLOCK(q));
6736 }
6737
6738 /*
6739 * Note that q_sqhead must be re-checked here in case another message
6740 * was enqueued whilst QLOCK was dropped during the call to clr_qfull.
6741 */
6742 for (bp = q->q_sqhead; bp != NULL; bp = q->q_sqhead) {
6743 /*
6744 * Because we can enter this routine just because a putnext is
6745 * blocked, we need to spin out if the perimeter wants to go
6746 * exclusive as well as just blocked. We need to spin out also
6747 * if events are queued on the syncq.
6748 * Don't check for SQ_EXCL, because non-CIPUT perimeters would
6749 * set it, and it can't become exclusive while we hold a claim.
6750 */
6751 if (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS)) {
6752 break;
6753 }
6754
6755 #ifdef DEBUG
6756 /*
6757 * Since we are in qdrain_syncq, we already know the queue,
6758 * but for sanity, we want to check this against the qp that
6759 * was passed in by bp->b_queue.
6760 */
6761
6762 ASSERT(bp->b_queue == q);
6763 ASSERT(bp->b_queue->q_syncq == sq);
6764 bp->b_queue = NULL;
6765
6766 /*
6767 * We would have the following check in the DEBUG code:
6768 *
6769 * if (bp->b_prev != NULL) {
6770 * ASSERT(bp->b_prev == (void (*)())q->q_qinfo->qi_putp);
6771 * }
6772 *
6773 * This can't be done, however, since IP modifies qinfo
6774 * structure at run-time (switching between IPv4 qinfo and IPv6
6775 * qinfo), invalidating the check.
6776 * So the assignment to func is left here, but the ASSERT itself
6777 * is removed until the whole issue is resolved.
6778 */
6779 #endif
6780 ASSERT(q->q_sqhead == bp);
6781 q->q_sqhead = bp->b_next;
6782 bp->b_prev = bp->b_next = NULL;
6783 ASSERT(q->q_syncqmsgs > 0);
6784 mutex_exit(QLOCK(q));
6785
6786 ASSERT(bp->b_datap->db_ref != 0);
6787
6788 (void) (*q->q_qinfo->qi_putp)(q, bp);
6789
6790 mutex_enter(QLOCK(q));
6791
6792 /*
6793 * q_syncqmsgs should only be decremented after executing the
6794 * put procedure to avoid message re-ordering. This is due to an
6795 * optimisation in putnext() which can call the put procedure
6796 * directly if it sees q_syncqmsgs == 0 (despite Q_SQQUEUED
6797 * being set).
6798 *
6799 * We also need to clear QFULL in the next service procedure
6800 * queue if this is the last message destined for that queue.
6801 *
6802 * It would make better sense to have some sort of tunable for
6803 * the low water mark, but these semantics are not yet defined.
6804 * So, alas, we use a constant.
6805 */
6806 if (--q->q_syncqmsgs == 0) {
6807 mutex_exit(QLOCK(q));
6808 clr_qfull(q);
6809 mutex_enter(QLOCK(q));
6810 }
6811
6812 /*
6813 * Always clear SQ_EXCL when CIPUT in order to handle
6814 * qwriter(INNER). The putp() can call qwriter and get exclusive
6815 * access IFF this is the only claim. So, we need to test for
6816 * this possibility, acquire the mutex and clear the bit.
6817 */
6818 if ((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL)) {
6819 mutex_enter(SQLOCK(sq));
6820 sq->sq_flags &= ~SQ_EXCL;
6821 mutex_exit(SQLOCK(sq));
6822 }
6823 }
6824
6825 /*
6826 * We should either have no messages on this queue, or we were told to
6827 * goaway by a waiter (which we will wake up at the end of this
6828 * function).
6829 */
6830 ASSERT((q->q_sqhead == NULL) ||
6831 (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS)));
6832
6833 ASSERT(MUTEX_HELD(QLOCK(q)));
6834 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
6835
6836 /* Remove the q from the syncq list if all the messages are drained. */
6837 if (q->q_sqhead == NULL) {
6838 ASSERT(q->q_syncqmsgs == 0);
6839 mutex_enter(SQLOCK(sq));
6840 if (q->q_sqflags & Q_SQQUEUED)
6841 SQRM_Q(sq, q);
6842 mutex_exit(SQLOCK(sq));
6843 /*
6844 * Since the queue is removed from the list, reset its priority.
6845 */
6846 q->q_spri = 0;
6847 }
6848
6849 /*
6850 * Remember, the q_draining flag is used to let another thread know
6851 * that there is a thread currently draining the messages for a queue.
6852 * Since we are now done with this queue (even if there may be messages
6853 * still there), we need to clear this flag so some thread will work on
6854 * it if needed.
6855 */
6856 ASSERT(q->q_draining);
6857 q->q_draining = 0;
6858
6859 /* Called with a claim, so OK to drop all locks. */
6860 mutex_exit(QLOCK(q));
6861
6862 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END,
6863 "drain_syncq end:%p", sq);
6864 }
6865 /* END OF QDRAIN_SYNCQ */
6866
6867
6868 /*
6869 * This is the mate to qdrain_syncq, except that it is putting the message onto
6870 * the queue instead of draining. Since the message is destined for the queue
6871 * that is selected, there is no need to identify the function because the
6872 * message is intended for the put routine for the queue. For debug kernels,
6873 * this routine will do it anyway just in case.
6874 *
6875 * After the message is enqueued on the syncq, it calls putnext_tail()
6876 * which will schedule a background thread to actually process the message.
6877 *
6878 * Assumes that there is a claim on the syncq (sq->sq_count > 0) and
6879 * SQLOCK(sq) and QLOCK(q) are not held.
6880 */
6881 void
qfill_syncq(syncq_t * sq,queue_t * q,mblk_t * mp)6882 qfill_syncq(syncq_t *sq, queue_t *q, mblk_t *mp)
6883 {
6884 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
6885 ASSERT(MUTEX_NOT_HELD(QLOCK(q)));
6886 ASSERT(sq->sq_count > 0);
6887 ASSERT(q->q_syncq == sq);
6888 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6889 sq->sq_oprev == NULL) ||
6890 (sq->sq_outer != NULL && sq->sq_onext != NULL &&
6891 sq->sq_oprev != NULL));
6892
6893 mutex_enter(QLOCK(q));
6894
6895 #ifdef DEBUG
6896 /*
6897 * This is used for debug in the qfill_syncq/qdrain_syncq case
6898 * to trace the queue that the message is intended for. Note
6899 * that the original use was to identify the queue and function
6900 * to call on the drain. In the new syncq, we have the context
6901 * of the queue that we are draining, so call it's putproc and
6902 * don't rely on the saved values. But for debug this is still
6903 * useful information.
6904 */
6905 mp->b_prev = (mblk_t *)q->q_qinfo->qi_putp;
6906 mp->b_queue = q;
6907 mp->b_next = NULL;
6908 #endif
6909 ASSERT(q->q_syncq == sq);
6910 /*
6911 * Enqueue the message on the list.
6912 * SQPUT_MP() accesses q_syncqmsgs. We are already holding QLOCK to
6913 * protect it. So it's ok to acquire SQLOCK after SQPUT_MP().
6914 */
6915 SQPUT_MP(q, mp);
6916 mutex_enter(SQLOCK(sq));
6917
6918 /*
6919 * And queue on syncq for scheduling, if not already queued.
6920 * Note that we need the SQLOCK for this, and for testing flags
6921 * at the end to see if we will drain. So grab it now, and
6922 * release it before we call qdrain_syncq or return.
6923 */
6924 if (!(q->q_sqflags & Q_SQQUEUED)) {
6925 q->q_spri = curthread->t_pri;
6926 SQPUT_Q(sq, q);
6927 }
6928 #ifdef DEBUG
6929 else {
6930 /*
6931 * All of these conditions MUST be true!
6932 */
6933 ASSERT(sq->sq_tail != NULL);
6934 if (sq->sq_tail == sq->sq_head) {
6935 ASSERT((q->q_sqprev == NULL) &&
6936 (q->q_sqnext == NULL));
6937 } else {
6938 ASSERT((q->q_sqprev != NULL) ||
6939 (q->q_sqnext != NULL));
6940 }
6941 ASSERT(sq->sq_flags & SQ_QUEUED);
6942 ASSERT(q->q_syncqmsgs != 0);
6943 ASSERT(q->q_sqflags & Q_SQQUEUED);
6944 }
6945 #endif
6946 mutex_exit(QLOCK(q));
6947 /*
6948 * SQLOCK is still held, so sq_count can be safely decremented.
6949 */
6950 sq->sq_count--;
6951
6952 putnext_tail(sq, q, 0);
6953 /* Should not reference sq or q after this point. */
6954 }
6955
6956 /* End of qfill_syncq */
6957
6958 /*
6959 * Remove all messages from a syncq (if qp is NULL) or remove all messages
6960 * that would be put into qp by drain_syncq.
6961 * Used when deleting the syncq (qp == NULL) or when detaching
6962 * a queue (qp != NULL).
6963 * Return non-zero if one or more messages were freed.
6964 *
6965 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
6966 * sq_putlocks are used.
6967 *
6968 * NOTE: This function assumes that it is called from the close() context and
6969 * that all the queues in the syncq are going away. For this reason it doesn't
6970 * acquire QLOCK for modifying q_sqhead/q_sqtail fields. This assumption is
6971 * currently valid, but it is useful to rethink this function to behave properly
6972 * in other cases.
6973 */
6974 int
flush_syncq(syncq_t * sq,queue_t * qp)6975 flush_syncq(syncq_t *sq, queue_t *qp)
6976 {
6977 mblk_t *bp, *mp_head, *mp_next, *mp_prev;
6978 queue_t *q;
6979 int ret = 0;
6980
6981 mutex_enter(SQLOCK(sq));
6982
6983 /*
6984 * Before we leave, we need to make sure there are no
6985 * events listed for this queue. All events for this queue
6986 * will just be freed.
6987 */
6988 if (qp != NULL && sq->sq_evhead != NULL) {
6989 ASSERT(sq->sq_flags & SQ_EVENTS);
6990
6991 mp_prev = NULL;
6992 for (bp = sq->sq_evhead; bp != NULL; bp = mp_next) {
6993 mp_next = bp->b_next;
6994 if (bp->b_queue == qp) {
6995 /* Delete this message */
6996 if (mp_prev != NULL) {
6997 mp_prev->b_next = mp_next;
6998 /*
6999 * Update sq_evtail if the last element
7000 * is removed.
7001 */
7002 if (bp == sq->sq_evtail) {
7003 ASSERT(mp_next == NULL);
7004 sq->sq_evtail = mp_prev;
7005 }
7006 } else
7007 sq->sq_evhead = mp_next;
7008 if (sq->sq_evhead == NULL)
7009 sq->sq_flags &= ~SQ_EVENTS;
7010 bp->b_prev = bp->b_next = NULL;
7011 freemsg(bp);
7012 ret++;
7013 } else {
7014 mp_prev = bp;
7015 }
7016 }
7017 }
7018
7019 /*
7020 * Walk sq_head and:
7021 * - match qp if qp is set, remove it's messages
7022 * - all if qp is not set
7023 */
7024 q = sq->sq_head;
7025 while (q != NULL) {
7026 ASSERT(q->q_syncq == sq);
7027 if ((qp == NULL) || (qp == q)) {
7028 /*
7029 * Yank the messages as a list off the queue
7030 */
7031 mp_head = q->q_sqhead;
7032 /*
7033 * We do not have QLOCK(q) here (which is safe due to
7034 * assumptions mentioned above). To obtain the lock we
7035 * need to release SQLOCK which may allow lots of things
7036 * to change upon us. This place requires more analysis.
7037 */
7038 q->q_sqhead = q->q_sqtail = NULL;
7039 ASSERT(mp_head->b_queue &&
7040 mp_head->b_queue->q_syncq == sq);
7041
7042 /*
7043 * Free each of the messages.
7044 */
7045 for (bp = mp_head; bp != NULL; bp = mp_next) {
7046 mp_next = bp->b_next;
7047 bp->b_prev = bp->b_next = NULL;
7048 freemsg(bp);
7049 ret++;
7050 }
7051 /*
7052 * Now remove the queue from the syncq.
7053 */
7054 ASSERT(q->q_sqflags & Q_SQQUEUED);
7055 SQRM_Q(sq, q);
7056 q->q_spri = 0;
7057 q->q_syncqmsgs = 0;
7058
7059 /*
7060 * If qp was specified, we are done with it and are
7061 * going to drop SQLOCK(sq) and return. We wakeup syncq
7062 * waiters while we still have the SQLOCK.
7063 */
7064 if ((qp != NULL) && (sq->sq_flags & SQ_WANTWAKEUP)) {
7065 sq->sq_flags &= ~SQ_WANTWAKEUP;
7066 cv_broadcast(&sq->sq_wait);
7067 }
7068 /* Drop SQLOCK across clr_qfull */
7069 mutex_exit(SQLOCK(sq));
7070
7071 /*
7072 * We avoid doing the test that drain_syncq does and
7073 * unconditionally clear qfull for every flushed
7074 * message. Since flush_syncq is only called during
7075 * close this should not be a problem.
7076 */
7077 clr_qfull(q);
7078 if (qp != NULL) {
7079 return (ret);
7080 } else {
7081 mutex_enter(SQLOCK(sq));
7082 /*
7083 * The head was removed by SQRM_Q above.
7084 * reread the new head and flush it.
7085 */
7086 q = sq->sq_head;
7087 }
7088 } else {
7089 q = q->q_sqnext;
7090 }
7091 ASSERT(MUTEX_HELD(SQLOCK(sq)));
7092 }
7093
7094 if (sq->sq_flags & SQ_WANTWAKEUP) {
7095 sq->sq_flags &= ~SQ_WANTWAKEUP;
7096 cv_broadcast(&sq->sq_wait);
7097 }
7098
7099 mutex_exit(SQLOCK(sq));
7100 return (ret);
7101 }
7102
7103 /*
7104 * Propagate all messages from a syncq to the next syncq that are associated
7105 * with the specified queue. If the queue is attached to a driver or if the
7106 * messages have been added due to a qwriter(PERIM_INNER), free the messages.
7107 *
7108 * Assumes that the stream is strlock()'ed. We don't come here if there
7109 * are no messages to propagate.
7110 *
7111 * NOTE : If the queue is attached to a driver, all the messages are freed
7112 * as there is no point in propagating the messages from the driver syncq
7113 * to the closing stream head which will in turn get freed later.
7114 */
7115 static int
propagate_syncq(queue_t * qp)7116 propagate_syncq(queue_t *qp)
7117 {
7118 mblk_t *bp, *head, *tail, *prev, *next;
7119 syncq_t *sq;
7120 queue_t *nqp;
7121 syncq_t *nsq;
7122 boolean_t isdriver;
7123 int moved = 0;
7124 uint16_t flags;
7125 pri_t priority = curthread->t_pri;
7126 #ifdef DEBUG
7127 void (*func)();
7128 #endif
7129
7130 sq = qp->q_syncq;
7131 ASSERT(MUTEX_HELD(SQLOCK(sq)));
7132 /* debug macro */
7133 SQ_PUTLOCKS_HELD(sq);
7134 /*
7135 * As entersq() does not increment the sq_count for
7136 * the write side, check sq_count for non-QPERQ
7137 * perimeters alone.
7138 */
7139 ASSERT((qp->q_flag & QPERQ) || (sq->sq_count >= 1));
7140
7141 /*
7142 * propagate_syncq() can be called because of either messages on the
7143 * queue syncq or because on events on the queue syncq. Do actual
7144 * message propagations if there are any messages.
7145 */
7146 if (qp->q_syncqmsgs) {
7147 isdriver = (qp->q_flag & QISDRV);
7148
7149 if (!isdriver) {
7150 nqp = qp->q_next;
7151 nsq = nqp->q_syncq;
7152 ASSERT(MUTEX_HELD(SQLOCK(nsq)));
7153 /* debug macro */
7154 SQ_PUTLOCKS_HELD(nsq);
7155 #ifdef DEBUG
7156 func = (void (*)())(uintptr_t)nqp->q_qinfo->qi_putp;
7157 #endif
7158 }
7159
7160 SQRM_Q(sq, qp);
7161 priority = MAX(qp->q_spri, priority);
7162 qp->q_spri = 0;
7163 head = qp->q_sqhead;
7164 tail = qp->q_sqtail;
7165 qp->q_sqhead = qp->q_sqtail = NULL;
7166 qp->q_syncqmsgs = 0;
7167
7168 /*
7169 * Walk the list of messages, and free them if this is a driver,
7170 * otherwise reset the b_prev and b_queue value to the new putp.
7171 * Afterward, we will just add the head to the end of the next
7172 * syncq, and point the tail to the end of this one.
7173 */
7174
7175 for (bp = head; bp != NULL; bp = next) {
7176 next = bp->b_next;
7177 if (isdriver) {
7178 bp->b_prev = bp->b_next = NULL;
7179 freemsg(bp);
7180 continue;
7181 }
7182 /* Change the q values for this message */
7183 bp->b_queue = nqp;
7184 #ifdef DEBUG
7185 bp->b_prev = (mblk_t *)func;
7186 #endif
7187 moved++;
7188 }
7189 /*
7190 * Attach list of messages to the end of the new queue (if there
7191 * is a list of messages).
7192 */
7193
7194 if (!isdriver && head != NULL) {
7195 ASSERT(tail != NULL);
7196 if (nqp->q_sqhead == NULL) {
7197 nqp->q_sqhead = head;
7198 } else {
7199 ASSERT(nqp->q_sqtail != NULL);
7200 nqp->q_sqtail->b_next = head;
7201 }
7202 nqp->q_sqtail = tail;
7203 /*
7204 * When messages are moved from high priority queue to
7205 * another queue, the destination queue priority is
7206 * upgraded.
7207 */
7208
7209 if (priority > nqp->q_spri)
7210 nqp->q_spri = priority;
7211
7212 SQPUT_Q(nsq, nqp);
7213
7214 nqp->q_syncqmsgs += moved;
7215 ASSERT(nqp->q_syncqmsgs != 0);
7216 }
7217 }
7218
7219 /*
7220 * Before we leave, we need to make sure there are no
7221 * events listed for this queue. All events for this queue
7222 * will just be freed.
7223 */
7224 if (sq->sq_evhead != NULL) {
7225 ASSERT(sq->sq_flags & SQ_EVENTS);
7226 prev = NULL;
7227 for (bp = sq->sq_evhead; bp != NULL; bp = next) {
7228 next = bp->b_next;
7229 if (bp->b_queue == qp) {
7230 /* Delete this message */
7231 if (prev != NULL) {
7232 prev->b_next = next;
7233 /*
7234 * Update sq_evtail if the last element
7235 * is removed.
7236 */
7237 if (bp == sq->sq_evtail) {
7238 ASSERT(next == NULL);
7239 sq->sq_evtail = prev;
7240 }
7241 } else
7242 sq->sq_evhead = next;
7243 if (sq->sq_evhead == NULL)
7244 sq->sq_flags &= ~SQ_EVENTS;
7245 bp->b_prev = bp->b_next = NULL;
7246 freemsg(bp);
7247 } else {
7248 prev = bp;
7249 }
7250 }
7251 }
7252
7253 flags = sq->sq_flags;
7254
7255 /* Wake up any waiter before leaving. */
7256 if (flags & SQ_WANTWAKEUP) {
7257 flags &= ~SQ_WANTWAKEUP;
7258 cv_broadcast(&sq->sq_wait);
7259 }
7260 sq->sq_flags = flags;
7261
7262 return (moved);
7263 }
7264
7265 /*
7266 * Try and upgrade to exclusive access at the inner perimeter. If this can
7267 * not be done without blocking then request will be queued on the syncq
7268 * and drain_syncq will run it later.
7269 *
7270 * This routine can only be called from put or service procedures plus
7271 * asynchronous callback routines that have properly entered the queue (with
7272 * entersq). Thus qwriter_inner assumes the caller has one claim on the syncq
7273 * associated with q.
7274 */
7275 void
qwriter_inner(queue_t * q,mblk_t * mp,void (* func)())7276 qwriter_inner(queue_t *q, mblk_t *mp, void (*func)())
7277 {
7278 syncq_t *sq = q->q_syncq;
7279 uint16_t count;
7280
7281 mutex_enter(SQLOCK(sq));
7282 count = sq->sq_count;
7283 SQ_PUTLOCKS_ENTER(sq);
7284 SUM_SQ_PUTCOUNTS(sq, count);
7285 ASSERT(count >= 1);
7286 ASSERT(sq->sq_type & (SQ_CIPUT|SQ_CISVC));
7287
7288 if (count == 1) {
7289 /*
7290 * Can upgrade. This case also handles nested qwriter calls
7291 * (when the qwriter callback function calls qwriter). In that
7292 * case SQ_EXCL is already set.
7293 */
7294 sq->sq_flags |= SQ_EXCL;
7295 SQ_PUTLOCKS_EXIT(sq);
7296 mutex_exit(SQLOCK(sq));
7297 (*func)(q, mp);
7298 /*
7299 * Assumes that leavesq, putnext, and drain_syncq will reset
7300 * SQ_EXCL for SQ_CIPUT/SQ_CISVC queues. We leave SQ_EXCL on
7301 * until putnext, leavesq, or drain_syncq drops it.
7302 * That way we handle nested qwriter(INNER) without dropping
7303 * SQ_EXCL until the outermost qwriter callback routine is
7304 * done.
7305 */
7306 return;
7307 }
7308 SQ_PUTLOCKS_EXIT(sq);
7309 sqfill_events(sq, q, mp, func);
7310 }
7311
7312 /*
7313 * Synchronous callback support functions
7314 */
7315
7316 /*
7317 * Allocate a callback parameter structure.
7318 * Assumes that caller initializes the flags and the id.
7319 * Acquires SQLOCK(sq) if non-NULL is returned.
7320 */
7321 callbparams_t *
callbparams_alloc(syncq_t * sq,void (* func)(void *),void * arg,int kmflags)7322 callbparams_alloc(syncq_t *sq, void (*func)(void *), void *arg, int kmflags)
7323 {
7324 callbparams_t *cbp;
7325 size_t size = sizeof (callbparams_t);
7326
7327 cbp = kmem_alloc(size, kmflags & ~KM_PANIC);
7328
7329 /*
7330 * Only try tryhard allocation if the caller is ready to panic.
7331 * Otherwise just fail.
7332 */
7333 if (cbp == NULL) {
7334 if (kmflags & KM_PANIC)
7335 cbp = kmem_alloc_tryhard(sizeof (callbparams_t),
7336 &size, kmflags);
7337 else
7338 return (NULL);
7339 }
7340
7341 ASSERT(size >= sizeof (callbparams_t));
7342 cbp->cbp_size = size;
7343 cbp->cbp_sq = sq;
7344 cbp->cbp_func = func;
7345 cbp->cbp_arg = arg;
7346 mutex_enter(SQLOCK(sq));
7347 cbp->cbp_next = sq->sq_callbpend;
7348 sq->sq_callbpend = cbp;
7349 return (cbp);
7350 }
7351
7352 void
callbparams_free(syncq_t * sq,callbparams_t * cbp)7353 callbparams_free(syncq_t *sq, callbparams_t *cbp)
7354 {
7355 callbparams_t **pp, *p;
7356
7357 ASSERT(MUTEX_HELD(SQLOCK(sq)));
7358
7359 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) {
7360 if (p == cbp) {
7361 *pp = p->cbp_next;
7362 kmem_free(p, p->cbp_size);
7363 return;
7364 }
7365 }
7366 (void) (STRLOG(0, 0, 0, SL_CONSOLE,
7367 "callbparams_free: not found\n"));
7368 }
7369
7370 void
callbparams_free_id(syncq_t * sq,callbparams_id_t id,int32_t flag)7371 callbparams_free_id(syncq_t *sq, callbparams_id_t id, int32_t flag)
7372 {
7373 callbparams_t **pp, *p;
7374
7375 ASSERT(MUTEX_HELD(SQLOCK(sq)));
7376
7377 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) {
7378 if (p->cbp_id == id && p->cbp_flags == flag) {
7379 *pp = p->cbp_next;
7380 kmem_free(p, p->cbp_size);
7381 return;
7382 }
7383 }
7384 (void) (STRLOG(0, 0, 0, SL_CONSOLE,
7385 "callbparams_free_id: not found\n"));
7386 }
7387
7388 /*
7389 * Callback wrapper function used by once-only callbacks that can be
7390 * cancelled (qtimeout and qbufcall)
7391 * Contains inline version of entersq(sq, SQ_CALLBACK) that can be
7392 * cancelled by the qun* functions.
7393 */
7394 void
qcallbwrapper(void * arg)7395 qcallbwrapper(void *arg)
7396 {
7397 callbparams_t *cbp = arg;
7398 syncq_t *sq;
7399 uint16_t count = 0;
7400 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL;
7401 uint16_t type;
7402
7403 sq = cbp->cbp_sq;
7404 mutex_enter(SQLOCK(sq));
7405 type = sq->sq_type;
7406 if (!(type & SQ_CICB)) {
7407 count = sq->sq_count;
7408 SQ_PUTLOCKS_ENTER(sq);
7409 SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
7410 SUM_SQ_PUTCOUNTS(sq, count);
7411 sq->sq_needexcl++;
7412 ASSERT(sq->sq_needexcl != 0); /* wraparound */
7413 waitflags |= SQ_MESSAGES;
7414 }
7415 /* Can not handle exclusive entry at outer perimeter */
7416 ASSERT(type & SQ_COCB);
7417
7418 while ((sq->sq_flags & waitflags) || (!(type & SQ_CICB) &&count != 0)) {
7419 if ((sq->sq_callbflags & cbp->cbp_flags) &&
7420 (sq->sq_cancelid == cbp->cbp_id)) {
7421 /* timeout has been cancelled */
7422 sq->sq_callbflags |= SQ_CALLB_BYPASSED;
7423 callbparams_free(sq, cbp);
7424 if (!(type & SQ_CICB)) {
7425 ASSERT(sq->sq_needexcl > 0);
7426 sq->sq_needexcl--;
7427 if (sq->sq_needexcl == 0) {
7428 SQ_PUTCOUNT_SETFAST_LOCKED(sq);
7429 }
7430 SQ_PUTLOCKS_EXIT(sq);
7431 }
7432 mutex_exit(SQLOCK(sq));
7433 return;
7434 }
7435 sq->sq_flags |= SQ_WANTWAKEUP;
7436 if (!(type & SQ_CICB)) {
7437 SQ_PUTLOCKS_EXIT(sq);
7438 }
7439 cv_wait(&sq->sq_wait, SQLOCK(sq));
7440 if (!(type & SQ_CICB)) {
7441 count = sq->sq_count;
7442 SQ_PUTLOCKS_ENTER(sq);
7443 SUM_SQ_PUTCOUNTS(sq, count);
7444 }
7445 }
7446
7447 sq->sq_count++;
7448 ASSERT(sq->sq_count != 0); /* Wraparound */
7449 if (!(type & SQ_CICB)) {
7450 ASSERT(count == 0);
7451 sq->sq_flags |= SQ_EXCL;
7452 ASSERT(sq->sq_needexcl > 0);
7453 sq->sq_needexcl--;
7454 if (sq->sq_needexcl == 0) {
7455 SQ_PUTCOUNT_SETFAST_LOCKED(sq);
7456 }
7457 SQ_PUTLOCKS_EXIT(sq);
7458 }
7459
7460 mutex_exit(SQLOCK(sq));
7461
7462 cbp->cbp_func(cbp->cbp_arg);
7463
7464 /*
7465 * We drop the lock only for leavesq to re-acquire it.
7466 * Possible optimization is inline of leavesq.
7467 */
7468 mutex_enter(SQLOCK(sq));
7469 callbparams_free(sq, cbp);
7470 mutex_exit(SQLOCK(sq));
7471 leavesq(sq, SQ_CALLBACK);
7472 }
7473
7474 /*
7475 * No need to grab sq_putlocks here. See comment in strsubr.h that
7476 * explains when sq_putlocks are used.
7477 *
7478 * sq_count (or one of the sq_putcounts) has already been
7479 * decremented by the caller, and if SQ_QUEUED, we need to call
7480 * drain_syncq (the global syncq drain).
7481 * If putnext_tail is called with the SQ_EXCL bit set, we are in
7482 * one of two states, non-CIPUT perimeter, and we need to clear
7483 * it, or we went exclusive in the put procedure. In any case,
7484 * we want to clear the bit now, and it is probably easier to do
7485 * this at the beginning of this function (remember, we hold
7486 * the SQLOCK). Lastly, if there are other messages queued
7487 * on the syncq (and not for our destination), enable the syncq
7488 * for background work.
7489 */
7490
7491 /* ARGSUSED */
7492 void
putnext_tail(syncq_t * sq,queue_t * qp,uint32_t passflags)7493 putnext_tail(syncq_t *sq, queue_t *qp, uint32_t passflags)
7494 {
7495 uint16_t flags = sq->sq_flags;
7496
7497 ASSERT(MUTEX_HELD(SQLOCK(sq)));
7498 ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
7499
7500 /* Clear SQ_EXCL if set in passflags */
7501 if (passflags & SQ_EXCL) {
7502 flags &= ~SQ_EXCL;
7503 }
7504 if (flags & SQ_WANTWAKEUP) {
7505 flags &= ~SQ_WANTWAKEUP;
7506 cv_broadcast(&sq->sq_wait);
7507 }
7508 if (flags & SQ_WANTEXWAKEUP) {
7509 flags &= ~SQ_WANTEXWAKEUP;
7510 cv_broadcast(&sq->sq_exitwait);
7511 }
7512 sq->sq_flags = flags;
7513
7514 /*
7515 * We have cleared SQ_EXCL if we were asked to, and started
7516 * the wakeup process for waiters. If there are no writers
7517 * then we need to drain the syncq if we were told to, or
7518 * enable the background thread to do it.
7519 */
7520 if (!(flags & (SQ_STAYAWAY|SQ_EXCL))) {
7521 if ((passflags & SQ_QUEUED) ||
7522 (sq->sq_svcflags & SQ_DISABLED)) {
7523 /* drain_syncq will take care of events in the list */
7524 drain_syncq(sq);
7525 return;
7526 } else if (flags & SQ_QUEUED) {
7527 sqenable(sq);
7528 }
7529 }
7530 /* Drop the SQLOCK on exit */
7531 mutex_exit(SQLOCK(sq));
7532 TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
7533 "putnext_end:(%p, %p, %p) done", NULL, qp, sq);
7534 }
7535
7536 void
set_qend(queue_t * q)7537 set_qend(queue_t *q)
7538 {
7539 mutex_enter(QLOCK(q));
7540 if (!O_SAMESTR(q))
7541 q->q_flag |= QEND;
7542 else
7543 q->q_flag &= ~QEND;
7544 mutex_exit(QLOCK(q));
7545 q = _OTHERQ(q);
7546 mutex_enter(QLOCK(q));
7547 if (!O_SAMESTR(q))
7548 q->q_flag |= QEND;
7549 else
7550 q->q_flag &= ~QEND;
7551 mutex_exit(QLOCK(q));
7552 }
7553
7554 /*
7555 * Set QFULL in next service procedure queue (that cares) if not already
7556 * set and if there are already more messages on the syncq than
7557 * sq_max_size. If sq_max_size is 0, no flow control will be asserted on
7558 * any syncq.
7559 *
7560 * The fq here is the next queue with a service procedure. This is where
7561 * we would fail canputnext, so this is where we need to set QFULL.
7562 * In the case when fq != q we need to take QLOCK(fq) to set QFULL flag.
7563 *
7564 * We already have QLOCK at this point. To avoid cross-locks with
7565 * freezestr() which grabs all QLOCKs and with strlock() which grabs both
7566 * SQLOCK and sd_reflock, we need to drop respective locks first.
7567 */
7568 void
set_qfull(queue_t * q)7569 set_qfull(queue_t *q)
7570 {
7571 queue_t *fq = NULL;
7572
7573 ASSERT(MUTEX_HELD(QLOCK(q)));
7574 if ((sq_max_size != 0) && (!(q->q_nfsrv->q_flag & QFULL)) &&
7575 (q->q_syncqmsgs > sq_max_size)) {
7576 if ((fq = q->q_nfsrv) == q) {
7577 fq->q_flag |= QFULL;
7578 } else {
7579 mutex_exit(QLOCK(q));
7580 mutex_enter(QLOCK(fq));
7581 fq->q_flag |= QFULL;
7582 mutex_exit(QLOCK(fq));
7583 mutex_enter(QLOCK(q));
7584 }
7585 }
7586 }
7587
7588 void
clr_qfull(queue_t * q)7589 clr_qfull(queue_t *q)
7590 {
7591 queue_t *oq = q;
7592
7593 q = q->q_nfsrv;
7594 /* Fast check if there is any work to do before getting the lock. */
7595 if ((q->q_flag & (QFULL|QWANTW)) == 0) {
7596 return;
7597 }
7598
7599 /*
7600 * Do not reset QFULL (and backenable) if the q_count is the reason
7601 * for QFULL being set.
7602 */
7603 mutex_enter(QLOCK(q));
7604 /*
7605 * If queue is empty i.e q_mblkcnt is zero, queue can not be full.
7606 * Hence clear the QFULL.
7607 * If both q_count and q_mblkcnt are less than the hiwat mark,
7608 * clear the QFULL.
7609 */
7610 if (q->q_mblkcnt == 0 || ((q->q_count < q->q_hiwat) &&
7611 (q->q_mblkcnt < q->q_hiwat))) {
7612 q->q_flag &= ~QFULL;
7613 /*
7614 * A little more confusing, how about this way:
7615 * if someone wants to write,
7616 * AND
7617 * both counts are less than the lowat mark
7618 * OR
7619 * the lowat mark is zero
7620 * THEN
7621 * backenable
7622 */
7623 if ((q->q_flag & QWANTW) &&
7624 (((q->q_count < q->q_lowat) &&
7625 (q->q_mblkcnt < q->q_lowat)) || q->q_lowat == 0)) {
7626 q->q_flag &= ~QWANTW;
7627 mutex_exit(QLOCK(q));
7628 backenable(oq, 0);
7629 } else
7630 mutex_exit(QLOCK(q));
7631 } else
7632 mutex_exit(QLOCK(q));
7633 }
7634
7635 /*
7636 * Set the forward service procedure pointer.
7637 *
7638 * Called at insert-time to cache a queue's next forward service procedure in
7639 * q_nfsrv; used by canput() and canputnext(). If the queue to be inserted
7640 * has a service procedure then q_nfsrv points to itself. If the queue to be
7641 * inserted does not have a service procedure, then q_nfsrv points to the next
7642 * queue forward that has a service procedure. If the queue is at the logical
7643 * end of the stream (driver for write side, stream head for the read side)
7644 * and does not have a service procedure, then q_nfsrv also points to itself.
7645 */
7646 void
set_nfsrv_ptr(queue_t * rnew,queue_t * wnew,queue_t * prev_rq,queue_t * prev_wq)7647 set_nfsrv_ptr(
7648 queue_t *rnew, /* read queue pointer to new module */
7649 queue_t *wnew, /* write queue pointer to new module */
7650 queue_t *prev_rq, /* read queue pointer to the module above */
7651 queue_t *prev_wq) /* write queue pointer to the module above */
7652 {
7653 queue_t *qp;
7654
7655 if (prev_wq->q_next == NULL) {
7656 /*
7657 * Insert the driver, initialize the driver and stream head.
7658 * In this case, prev_rq/prev_wq should be the stream head.
7659 * _I_INSERT does not allow inserting a driver. Make sure
7660 * that it is not an insertion.
7661 */
7662 ASSERT(!(rnew->q_flag & _QINSERTING));
7663 wnew->q_nfsrv = wnew;
7664 if (rnew->q_qinfo->qi_srvp)
7665 rnew->q_nfsrv = rnew;
7666 else
7667 rnew->q_nfsrv = prev_rq;
7668 prev_rq->q_nfsrv = prev_rq;
7669 prev_wq->q_nfsrv = prev_wq;
7670 } else {
7671 /*
7672 * set up read side q_nfsrv pointer. This MUST be done
7673 * before setting the write side, because the setting of
7674 * the write side for a fifo may depend on it.
7675 *
7676 * Suppose we have a fifo that only has pipemod pushed.
7677 * pipemod has no read or write service procedures, so
7678 * nfsrv for both pipemod queues points to prev_rq (the
7679 * stream read head). Now push bufmod (which has only a
7680 * read service procedure). Doing the write side first,
7681 * wnew->q_nfsrv is set to pipemod's writeq nfsrv, which
7682 * is WRONG; the next queue forward from wnew with a
7683 * service procedure will be rnew, not the stream read head.
7684 * Since the downstream queue (which in the case of a fifo
7685 * is the read queue rnew) can affect upstream queues, it
7686 * needs to be done first. Setting up the read side first
7687 * sets nfsrv for both pipemod queues to rnew and then
7688 * when the write side is set up, wnew-q_nfsrv will also
7689 * point to rnew.
7690 */
7691 if (rnew->q_qinfo->qi_srvp) {
7692 /*
7693 * use _OTHERQ() because, if this is a pipe, next
7694 * module may have been pushed from other end and
7695 * q_next could be a read queue.
7696 */
7697 qp = _OTHERQ(prev_wq->q_next);
7698 while (qp && qp->q_nfsrv != qp) {
7699 qp->q_nfsrv = rnew;
7700 qp = backq(qp);
7701 }
7702 rnew->q_nfsrv = rnew;
7703 } else
7704 rnew->q_nfsrv = prev_rq->q_nfsrv;
7705
7706 /* set up write side q_nfsrv pointer */
7707 if (wnew->q_qinfo->qi_srvp) {
7708 wnew->q_nfsrv = wnew;
7709
7710 /*
7711 * For insertion, need to update nfsrv of the modules
7712 * above which do not have a service routine.
7713 */
7714 if (rnew->q_flag & _QINSERTING) {
7715 for (qp = prev_wq;
7716 qp != NULL && qp->q_nfsrv != qp;
7717 qp = backq(qp)) {
7718 qp->q_nfsrv = wnew->q_nfsrv;
7719 }
7720 }
7721 } else {
7722 if (prev_wq->q_next == prev_rq)
7723 /*
7724 * Since prev_wq/prev_rq are the middle of a
7725 * fifo, wnew/rnew will also be the middle of
7726 * a fifo and wnew's nfsrv is same as rnew's.
7727 */
7728 wnew->q_nfsrv = rnew->q_nfsrv;
7729 else
7730 wnew->q_nfsrv = prev_wq->q_next->q_nfsrv;
7731 }
7732 }
7733 }
7734
7735 /*
7736 * Reset the forward service procedure pointer; called at remove-time.
7737 */
7738 void
reset_nfsrv_ptr(queue_t * rqp,queue_t * wqp)7739 reset_nfsrv_ptr(queue_t *rqp, queue_t *wqp)
7740 {
7741 queue_t *tmp_qp;
7742
7743 /* Reset the write side q_nfsrv pointer for _I_REMOVE */
7744 if ((rqp->q_flag & _QREMOVING) && (wqp->q_qinfo->qi_srvp != NULL)) {
7745 for (tmp_qp = backq(wqp);
7746 tmp_qp != NULL && tmp_qp->q_nfsrv == wqp;
7747 tmp_qp = backq(tmp_qp)) {
7748 tmp_qp->q_nfsrv = wqp->q_nfsrv;
7749 }
7750 }
7751
7752 /* reset the read side q_nfsrv pointer */
7753 if (rqp->q_qinfo->qi_srvp) {
7754 if (wqp->q_next) { /* non-driver case */
7755 tmp_qp = _OTHERQ(wqp->q_next);
7756 while (tmp_qp && tmp_qp->q_nfsrv == rqp) {
7757 /* Note that rqp->q_next cannot be NULL */
7758 ASSERT(rqp->q_next != NULL);
7759 tmp_qp->q_nfsrv = rqp->q_next->q_nfsrv;
7760 tmp_qp = backq(tmp_qp);
7761 }
7762 }
7763 }
7764 }
7765
7766 /*
7767 * This routine should be called after all stream geometry changes to update
7768 * the stream head cached struio() rd/wr queue pointers. Note must be called
7769 * with the streamlock()ed.
7770 *
7771 * Note: only enables Synchronous STREAMS for a side of a Stream which has
7772 * an explicit synchronous barrier module queue. That is, a queue that
7773 * has specified a struio() type.
7774 */
7775 static void
strsetuio(stdata_t * stp)7776 strsetuio(stdata_t *stp)
7777 {
7778 queue_t *wrq;
7779
7780 if (stp->sd_flag & STPLEX) {
7781 /*
7782 * Not streamhead, but a mux, so no Synchronous STREAMS.
7783 */
7784 stp->sd_struiowrq = NULL;
7785 stp->sd_struiordq = NULL;
7786 return;
7787 }
7788 /*
7789 * Scan the write queue(s) while synchronous
7790 * until we find a qinfo uio type specified.
7791 */
7792 wrq = stp->sd_wrq->q_next;
7793 while (wrq) {
7794 if (wrq->q_struiot == STRUIOT_NONE) {
7795 wrq = 0;
7796 break;
7797 }
7798 if (wrq->q_struiot != STRUIOT_DONTCARE)
7799 break;
7800 if (! _SAMESTR(wrq)) {
7801 wrq = 0;
7802 break;
7803 }
7804 wrq = wrq->q_next;
7805 }
7806 stp->sd_struiowrq = wrq;
7807 /*
7808 * Scan the read queue(s) while synchronous
7809 * until we find a qinfo uio type specified.
7810 */
7811 wrq = stp->sd_wrq->q_next;
7812 while (wrq) {
7813 if (_RD(wrq)->q_struiot == STRUIOT_NONE) {
7814 wrq = 0;
7815 break;
7816 }
7817 if (_RD(wrq)->q_struiot != STRUIOT_DONTCARE)
7818 break;
7819 if (! _SAMESTR(wrq)) {
7820 wrq = 0;
7821 break;
7822 }
7823 wrq = wrq->q_next;
7824 }
7825 stp->sd_struiordq = wrq ? _RD(wrq) : 0;
7826 }
7827
7828 static int
pass_rput(queue_t * q,mblk_t * mp)7829 pass_rput(queue_t *q, mblk_t *mp)
7830 {
7831 putnext(q, mp);
7832 return (0);
7833 }
7834
7835 /*
7836 * pass_wput, unblocks the passthru queues, so that
7837 * messages can arrive at muxs lower read queue, before
7838 * I_LINK/I_UNLINK is acked/nacked.
7839 */
7840 static int
pass_wput(queue_t * q,mblk_t * mp)7841 pass_wput(queue_t *q, mblk_t *mp)
7842 {
7843 syncq_t *sq;
7844
7845 sq = _RD(q)->q_syncq;
7846 if (sq->sq_flags & SQ_BLOCKED)
7847 unblocksq(sq, SQ_BLOCKED, 0);
7848 putnext(q, mp);
7849 return (0);
7850 }
7851
7852 /*
7853 * Set up queues for the link/unlink.
7854 * Create a new queue and block it and then insert it
7855 * below the stream head on the lower stream.
7856 * This prevents any messages from arriving during the setq
7857 * as well as while the mux is processing the LINK/I_UNLINK.
7858 * The blocked passq is unblocked once the LINK/I_UNLINK has
7859 * been acked or nacked or if a message is generated and sent
7860 * down muxs write put procedure.
7861 * See pass_wput().
7862 *
7863 * After the new queue is inserted, all messages coming from below are
7864 * blocked. The call to strlock will ensure that all activity in the stream head
7865 * read queue syncq is stopped (sq_count drops to zero).
7866 */
7867 static queue_t *
link_addpassthru(stdata_t * stpdown)7868 link_addpassthru(stdata_t *stpdown)
7869 {
7870 queue_t *passq;
7871 sqlist_t sqlist;
7872
7873 passq = allocq();
7874 STREAM(passq) = STREAM(_WR(passq)) = stpdown;
7875 /* setq might sleep in allocator - avoid holding locks. */
7876 setq(passq, &passthru_rinit, &passthru_winit, NULL, QPERQ,
7877 SQ_CI|SQ_CO, B_FALSE);
7878 claimq(passq);
7879 blocksq(passq->q_syncq, SQ_BLOCKED, 1);
7880 insertq(STREAM(passq), passq);
7881
7882 /*
7883 * Use strlock() to wait for the stream head sq_count to drop to zero
7884 * since we are going to change q_ptr in the stream head. Note that
7885 * insertq() doesn't wait for any syncq counts to drop to zero.
7886 */
7887 sqlist.sqlist_head = NULL;
7888 sqlist.sqlist_index = 0;
7889 sqlist.sqlist_size = sizeof (sqlist_t);
7890 sqlist_insert(&sqlist, _RD(stpdown->sd_wrq)->q_syncq);
7891 strlock(stpdown, &sqlist);
7892 strunlock(stpdown, &sqlist);
7893
7894 releaseq(passq);
7895 return (passq);
7896 }
7897
7898 /*
7899 * Let messages flow up into the mux by removing
7900 * the passq.
7901 */
7902 static void
link_rempassthru(queue_t * passq)7903 link_rempassthru(queue_t *passq)
7904 {
7905 claimq(passq);
7906 removeq(passq);
7907 releaseq(passq);
7908 freeq(passq);
7909 }
7910
7911 /*
7912 * Wait for the condition variable pointed to by `cvp' to be signaled,
7913 * or for `tim' milliseconds to elapse, whichever comes first. If `tim'
7914 * is negative, then there is no time limit. If `nosigs' is non-zero,
7915 * then the wait will be non-interruptible.
7916 *
7917 * Returns >0 if signaled, 0 if interrupted, or -1 upon timeout.
7918 */
7919 clock_t
str_cv_wait(kcondvar_t * cvp,kmutex_t * mp,clock_t tim,int nosigs)7920 str_cv_wait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim, int nosigs)
7921 {
7922 clock_t ret;
7923
7924 if (tim < 0) {
7925 if (nosigs) {
7926 cv_wait(cvp, mp);
7927 ret = 1;
7928 } else {
7929 ret = cv_wait_sig(cvp, mp);
7930 }
7931 } else if (tim > 0) {
7932 /*
7933 * convert milliseconds to clock ticks
7934 */
7935 if (nosigs) {
7936 ret = cv_reltimedwait(cvp, mp,
7937 MSEC_TO_TICK_ROUNDUP(tim), TR_CLOCK_TICK);
7938 } else {
7939 ret = cv_reltimedwait_sig(cvp, mp,
7940 MSEC_TO_TICK_ROUNDUP(tim), TR_CLOCK_TICK);
7941 }
7942 } else {
7943 ret = -1;
7944 }
7945 return (ret);
7946 }
7947
7948 /*
7949 * Wait until the stream head can determine if it is at the mark but
7950 * don't wait forever to prevent a race condition between the "mark" state
7951 * in the stream head and any mark state in the caller/user of this routine.
7952 *
7953 * This is used by sockets and for a socket it would be incorrect
7954 * to return a failure for SIOCATMARK when there is no data in the receive
7955 * queue and the marked urgent data is traveling up the stream.
7956 *
7957 * This routine waits until the mark is known by waiting for one of these
7958 * three events:
7959 * The stream head read queue becoming non-empty (including an EOF).
7960 * The STRATMARK flag being set (due to a MSGMARKNEXT message).
7961 * The STRNOTATMARK flag being set (which indicates that the transport
7962 * has sent a MSGNOTMARKNEXT message to indicate that it is not at
7963 * the mark).
7964 *
7965 * The routine returns 1 if the stream is at the mark; 0 if it can
7966 * be determined that the stream is not at the mark.
7967 * If the wait times out and it can't determine
7968 * whether or not the stream might be at the mark the routine will return -1.
7969 *
7970 * Note: This routine should only be used when a mark is pending i.e.,
7971 * in the socket case the SIGURG has been posted.
7972 * Note2: This can not wakeup just because synchronous streams indicate
7973 * that data is available since it is not possible to use the synchronous
7974 * streams interfaces to determine the b_flag value for the data queued below
7975 * the stream head.
7976 */
7977 int
strwaitmark(vnode_t * vp)7978 strwaitmark(vnode_t *vp)
7979 {
7980 struct stdata *stp = vp->v_stream;
7981 queue_t *rq = _RD(stp->sd_wrq);
7982 int mark;
7983
7984 mutex_enter(&stp->sd_lock);
7985 while (rq->q_first == NULL &&
7986 !(stp->sd_flag & (STRATMARK|STRNOTATMARK|STREOF))) {
7987 stp->sd_flag |= RSLEEP;
7988
7989 /* Wait for 100 milliseconds for any state change. */
7990 if (str_cv_wait(&rq->q_wait, &stp->sd_lock, 100, 1) == -1) {
7991 mutex_exit(&stp->sd_lock);
7992 return (-1);
7993 }
7994 }
7995 if (stp->sd_flag & STRATMARK)
7996 mark = 1;
7997 else if (rq->q_first != NULL && (rq->q_first->b_flag & MSGMARK))
7998 mark = 1;
7999 else
8000 mark = 0;
8001
8002 mutex_exit(&stp->sd_lock);
8003 return (mark);
8004 }
8005
8006 /*
8007 * Set a read side error. If persist is set change the socket error
8008 * to persistent. If errfunc is set install the function as the exported
8009 * error handler.
8010 */
8011 void
strsetrerror(vnode_t * vp,int error,int persist,errfunc_t errfunc)8012 strsetrerror(vnode_t *vp, int error, int persist, errfunc_t errfunc)
8013 {
8014 struct stdata *stp = vp->v_stream;
8015
8016 mutex_enter(&stp->sd_lock);
8017 stp->sd_rerror = error;
8018 if (error == 0 && errfunc == NULL)
8019 stp->sd_flag &= ~STRDERR;
8020 else
8021 stp->sd_flag |= STRDERR;
8022 if (persist) {
8023 stp->sd_flag &= ~STRDERRNONPERSIST;
8024 } else {
8025 stp->sd_flag |= STRDERRNONPERSIST;
8026 }
8027 stp->sd_rderrfunc = errfunc;
8028 if (error != 0 || errfunc != NULL) {
8029 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */
8030 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */
8031 cv_broadcast(&stp->sd_monitor); /* ioctllers */
8032
8033 mutex_exit(&stp->sd_lock);
8034 pollwakeup(&stp->sd_pollist, POLLERR);
8035 mutex_enter(&stp->sd_lock);
8036
8037 if (stp->sd_sigflags & S_ERROR)
8038 strsendsig(stp->sd_siglist, S_ERROR, 0, error);
8039 }
8040 mutex_exit(&stp->sd_lock);
8041 }
8042
8043 /*
8044 * Set a write side error. If persist is set change the socket error
8045 * to persistent.
8046 */
8047 void
strsetwerror(vnode_t * vp,int error,int persist,errfunc_t errfunc)8048 strsetwerror(vnode_t *vp, int error, int persist, errfunc_t errfunc)
8049 {
8050 struct stdata *stp = vp->v_stream;
8051
8052 mutex_enter(&stp->sd_lock);
8053 stp->sd_werror = error;
8054 if (error == 0 && errfunc == NULL)
8055 stp->sd_flag &= ~STWRERR;
8056 else
8057 stp->sd_flag |= STWRERR;
8058 if (persist) {
8059 stp->sd_flag &= ~STWRERRNONPERSIST;
8060 } else {
8061 stp->sd_flag |= STWRERRNONPERSIST;
8062 }
8063 stp->sd_wrerrfunc = errfunc;
8064 if (error != 0 || errfunc != NULL) {
8065 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */
8066 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */
8067 cv_broadcast(&stp->sd_monitor); /* ioctllers */
8068
8069 mutex_exit(&stp->sd_lock);
8070 pollwakeup(&stp->sd_pollist, POLLERR);
8071 mutex_enter(&stp->sd_lock);
8072
8073 if (stp->sd_sigflags & S_ERROR)
8074 strsendsig(stp->sd_siglist, S_ERROR, 0, error);
8075 }
8076 mutex_exit(&stp->sd_lock);
8077 }
8078
8079 /*
8080 * Make the stream return 0 (EOF) when all data has been read.
8081 * No effect on write side.
8082 */
8083 void
strseteof(vnode_t * vp,int eof)8084 strseteof(vnode_t *vp, int eof)
8085 {
8086 struct stdata *stp = vp->v_stream;
8087
8088 mutex_enter(&stp->sd_lock);
8089 if (!eof) {
8090 stp->sd_flag &= ~STREOF;
8091 mutex_exit(&stp->sd_lock);
8092 return;
8093 }
8094 stp->sd_flag |= STREOF;
8095 if (stp->sd_flag & RSLEEP) {
8096 stp->sd_flag &= ~RSLEEP;
8097 cv_broadcast(&_RD(stp->sd_wrq)->q_wait);
8098 }
8099
8100 mutex_exit(&stp->sd_lock);
8101 pollwakeup(&stp->sd_pollist, POLLIN|POLLRDNORM);
8102 mutex_enter(&stp->sd_lock);
8103
8104 if (stp->sd_sigflags & (S_INPUT|S_RDNORM))
8105 strsendsig(stp->sd_siglist, S_INPUT|S_RDNORM, 0, 0);
8106 mutex_exit(&stp->sd_lock);
8107 }
8108
8109 void
strflushrq(vnode_t * vp,int flag)8110 strflushrq(vnode_t *vp, int flag)
8111 {
8112 struct stdata *stp = vp->v_stream;
8113
8114 mutex_enter(&stp->sd_lock);
8115 flushq(_RD(stp->sd_wrq), flag);
8116 mutex_exit(&stp->sd_lock);
8117 }
8118
8119 void
strsetrputhooks(vnode_t * vp,uint_t flags,msgfunc_t protofunc,msgfunc_t miscfunc)8120 strsetrputhooks(vnode_t *vp, uint_t flags,
8121 msgfunc_t protofunc, msgfunc_t miscfunc)
8122 {
8123 struct stdata *stp = vp->v_stream;
8124
8125 mutex_enter(&stp->sd_lock);
8126
8127 if (protofunc == NULL)
8128 stp->sd_rprotofunc = strrput_proto;
8129 else
8130 stp->sd_rprotofunc = protofunc;
8131
8132 if (miscfunc == NULL)
8133 stp->sd_rmiscfunc = strrput_misc;
8134 else
8135 stp->sd_rmiscfunc = miscfunc;
8136
8137 if (flags & SH_CONSOL_DATA)
8138 stp->sd_rput_opt |= SR_CONSOL_DATA;
8139 else
8140 stp->sd_rput_opt &= ~SR_CONSOL_DATA;
8141
8142 if (flags & SH_SIGALLDATA)
8143 stp->sd_rput_opt |= SR_SIGALLDATA;
8144 else
8145 stp->sd_rput_opt &= ~SR_SIGALLDATA;
8146
8147 if (flags & SH_IGN_ZEROLEN)
8148 stp->sd_rput_opt |= SR_IGN_ZEROLEN;
8149 else
8150 stp->sd_rput_opt &= ~SR_IGN_ZEROLEN;
8151
8152 mutex_exit(&stp->sd_lock);
8153 }
8154
8155 void
strsetwputhooks(vnode_t * vp,uint_t flags,clock_t closetime)8156 strsetwputhooks(vnode_t *vp, uint_t flags, clock_t closetime)
8157 {
8158 struct stdata *stp = vp->v_stream;
8159
8160 mutex_enter(&stp->sd_lock);
8161 stp->sd_closetime = closetime;
8162
8163 if (flags & SH_SIGPIPE)
8164 stp->sd_wput_opt |= SW_SIGPIPE;
8165 else
8166 stp->sd_wput_opt &= ~SW_SIGPIPE;
8167 if (flags & SH_RECHECK_ERR)
8168 stp->sd_wput_opt |= SW_RECHECK_ERR;
8169 else
8170 stp->sd_wput_opt &= ~SW_RECHECK_ERR;
8171
8172 mutex_exit(&stp->sd_lock);
8173 }
8174
8175 void
strsetrwputdatahooks(vnode_t * vp,msgfunc_t rdatafunc,msgfunc_t wdatafunc)8176 strsetrwputdatahooks(vnode_t *vp, msgfunc_t rdatafunc, msgfunc_t wdatafunc)
8177 {
8178 struct stdata *stp = vp->v_stream;
8179
8180 mutex_enter(&stp->sd_lock);
8181
8182 stp->sd_rputdatafunc = rdatafunc;
8183 stp->sd_wputdatafunc = wdatafunc;
8184
8185 mutex_exit(&stp->sd_lock);
8186 }
8187
8188 /* Used within framework when the queue is already locked */
8189 void
qenable_locked(queue_t * q)8190 qenable_locked(queue_t *q)
8191 {
8192 stdata_t *stp = STREAM(q);
8193
8194 ASSERT(MUTEX_HELD(QLOCK(q)));
8195
8196 if (!q->q_qinfo->qi_srvp)
8197 return;
8198
8199 /*
8200 * Do not place on run queue if already enabled or closing.
8201 */
8202 if (q->q_flag & (QWCLOSE|QENAB))
8203 return;
8204
8205 /*
8206 * mark queue enabled and place on run list if it is not already being
8207 * serviced. If it is serviced, the runservice() function will detect
8208 * that QENAB is set and call service procedure before clearing
8209 * QINSERVICE flag.
8210 */
8211 q->q_flag |= QENAB;
8212 if (q->q_flag & QINSERVICE)
8213 return;
8214
8215 /* Record the time of qenable */
8216 q->q_qtstamp = ddi_get_lbolt();
8217
8218 /*
8219 * Put the queue in the stp list and schedule it for background
8220 * processing if it is not already scheduled or if stream head does not
8221 * intent to process it in the foreground later by setting
8222 * STRS_WILLSERVICE flag.
8223 */
8224 mutex_enter(&stp->sd_qlock);
8225 /*
8226 * If there are already something on the list, stp flags should show
8227 * intention to drain it.
8228 */
8229 IMPLY(STREAM_NEEDSERVICE(stp),
8230 (stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED)));
8231
8232 ENQUEUE(q, stp->sd_qhead, stp->sd_qtail, q_link);
8233 stp->sd_nqueues++;
8234
8235 /*
8236 * If no one will drain this stream we are the first producer and
8237 * need to schedule it for background thread.
8238 */
8239 if (!(stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))) {
8240 /*
8241 * No one will service this stream later, so we have to
8242 * schedule it now.
8243 */
8244 STRSTAT(stenables);
8245 stp->sd_svcflags |= STRS_SCHEDULED;
8246 stp->sd_servid = (void *)taskq_dispatch(streams_taskq,
8247 (task_func_t *)stream_service, stp, TQ_NOSLEEP|TQ_NOQUEUE);
8248
8249 if (stp->sd_servid == NULL) {
8250 /*
8251 * Task queue failed so fail over to the backup
8252 * servicing thread.
8253 */
8254 STRSTAT(taskqfails);
8255 /*
8256 * It is safe to clear STRS_SCHEDULED flag because it
8257 * was set by this thread above.
8258 */
8259 stp->sd_svcflags &= ~STRS_SCHEDULED;
8260
8261 /*
8262 * Failover scheduling is protected by service_queue
8263 * lock.
8264 */
8265 mutex_enter(&service_queue);
8266 ASSERT((stp->sd_qhead == q) && (stp->sd_qtail == q));
8267 ASSERT(q->q_link == NULL);
8268 /*
8269 * Append the queue to qhead/qtail list.
8270 */
8271 if (qhead == NULL)
8272 qhead = q;
8273 else
8274 qtail->q_link = q;
8275 qtail = q;
8276 /*
8277 * Clear stp queue list.
8278 */
8279 stp->sd_qhead = stp->sd_qtail = NULL;
8280 stp->sd_nqueues = 0;
8281 /*
8282 * Wakeup background queue processing thread.
8283 */
8284 cv_signal(&services_to_run);
8285 mutex_exit(&service_queue);
8286 }
8287 }
8288 mutex_exit(&stp->sd_qlock);
8289 }
8290
8291 static void
queue_service(queue_t * q)8292 queue_service(queue_t *q)
8293 {
8294 /*
8295 * The queue in the list should have
8296 * QENAB flag set and should not have
8297 * QINSERVICE flag set. QINSERVICE is
8298 * set when the queue is dequeued and
8299 * qenable_locked doesn't enqueue a
8300 * queue with QINSERVICE set.
8301 */
8302
8303 ASSERT(!(q->q_flag & QINSERVICE));
8304 ASSERT((q->q_flag & QENAB));
8305 mutex_enter(QLOCK(q));
8306 q->q_flag &= ~QENAB;
8307 q->q_flag |= QINSERVICE;
8308 mutex_exit(QLOCK(q));
8309 runservice(q);
8310 }
8311
8312 static void
syncq_service(syncq_t * sq)8313 syncq_service(syncq_t *sq)
8314 {
8315 STRSTAT(syncqservice);
8316 mutex_enter(SQLOCK(sq));
8317 ASSERT(!(sq->sq_svcflags & SQ_SERVICE));
8318 ASSERT(sq->sq_servcount != 0);
8319 ASSERT(sq->sq_next == NULL);
8320
8321 /* if we came here from the background thread, clear the flag */
8322 if (sq->sq_svcflags & SQ_BGTHREAD)
8323 sq->sq_svcflags &= ~SQ_BGTHREAD;
8324
8325 /* let drain_syncq know that it's being called in the background */
8326 sq->sq_svcflags |= SQ_SERVICE;
8327 drain_syncq(sq);
8328 }
8329
8330 static void
qwriter_outer_service(syncq_t * outer)8331 qwriter_outer_service(syncq_t *outer)
8332 {
8333 /*
8334 * Note that SQ_WRITER is used on the outer perimeter
8335 * to signal that a qwriter(OUTER) is either investigating
8336 * running or that it is actually running a function.
8337 */
8338 outer_enter(outer, SQ_BLOCKED|SQ_WRITER);
8339
8340 /*
8341 * All inner syncq are empty and have SQ_WRITER set
8342 * to block entering the outer perimeter.
8343 *
8344 * We do not need to explicitly call write_now since
8345 * outer_exit does it for us.
8346 */
8347 outer_exit(outer);
8348 }
8349
8350 static void
mblk_free(mblk_t * mp)8351 mblk_free(mblk_t *mp)
8352 {
8353 dblk_t *dbp = mp->b_datap;
8354 frtn_t *frp = dbp->db_frtnp;
8355
8356 mp->b_next = NULL;
8357 if (dbp->db_fthdr != NULL)
8358 str_ftfree(dbp);
8359
8360 ASSERT(dbp->db_fthdr == NULL);
8361 frp->free_func(frp->free_arg);
8362 ASSERT(dbp->db_mblk == mp);
8363
8364 if (dbp->db_credp != NULL) {
8365 crfree(dbp->db_credp);
8366 dbp->db_credp = NULL;
8367 }
8368 dbp->db_cpid = -1;
8369 dbp->db_struioflag = 0;
8370 dbp->db_struioun.cksum.flags = 0;
8371
8372 kmem_cache_free(dbp->db_cache, dbp);
8373 }
8374
8375 /*
8376 * Background processing of the stream queue list.
8377 */
8378 static void
stream_service(stdata_t * stp)8379 stream_service(stdata_t *stp)
8380 {
8381 queue_t *q;
8382
8383 mutex_enter(&stp->sd_qlock);
8384
8385 STR_SERVICE(stp, q);
8386
8387 stp->sd_svcflags &= ~STRS_SCHEDULED;
8388 stp->sd_servid = NULL;
8389 cv_signal(&stp->sd_qcv);
8390 mutex_exit(&stp->sd_qlock);
8391 }
8392
8393 /*
8394 * Foreground processing of the stream queue list.
8395 */
8396 void
stream_runservice(stdata_t * stp)8397 stream_runservice(stdata_t *stp)
8398 {
8399 queue_t *q;
8400
8401 mutex_enter(&stp->sd_qlock);
8402 STRSTAT(rservice);
8403 /*
8404 * We are going to drain this stream queue list, so qenable_locked will
8405 * not schedule it until we finish.
8406 */
8407 stp->sd_svcflags |= STRS_WILLSERVICE;
8408
8409 STR_SERVICE(stp, q);
8410
8411 stp->sd_svcflags &= ~STRS_WILLSERVICE;
8412 mutex_exit(&stp->sd_qlock);
8413 }
8414
8415 void
stream_willservice(stdata_t * stp)8416 stream_willservice(stdata_t *stp)
8417 {
8418 mutex_enter(&stp->sd_qlock);
8419 stp->sd_svcflags |= STRS_WILLSERVICE;
8420 mutex_exit(&stp->sd_qlock);
8421 }
8422
8423 /*
8424 * Replace the cred currently in the mblk with a different one.
8425 * Also update db_cpid.
8426 */
8427 void
mblk_setcred(mblk_t * mp,cred_t * cr,pid_t cpid)8428 mblk_setcred(mblk_t *mp, cred_t *cr, pid_t cpid)
8429 {
8430 dblk_t *dbp = mp->b_datap;
8431 cred_t *ocr = dbp->db_credp;
8432
8433 ASSERT(cr != NULL);
8434
8435 if (cr != ocr) {
8436 crhold(dbp->db_credp = cr);
8437 if (ocr != NULL)
8438 crfree(ocr);
8439 }
8440 /* Don't overwrite with NOPID */
8441 if (cpid != NOPID)
8442 dbp->db_cpid = cpid;
8443 }
8444
8445 /*
8446 * If the src message has a cred, then replace the cred currently in the mblk
8447 * with it.
8448 * Also update db_cpid.
8449 */
8450 void
mblk_copycred(mblk_t * mp,const mblk_t * src)8451 mblk_copycred(mblk_t *mp, const mblk_t *src)
8452 {
8453 dblk_t *dbp = mp->b_datap;
8454 cred_t *cr, *ocr;
8455 pid_t cpid;
8456
8457 cr = msg_getcred(src, &cpid);
8458 if (cr == NULL)
8459 return;
8460
8461 ocr = dbp->db_credp;
8462 if (cr != ocr) {
8463 crhold(dbp->db_credp = cr);
8464 if (ocr != NULL)
8465 crfree(ocr);
8466 }
8467 /* Don't overwrite with NOPID */
8468 if (cpid != NOPID)
8469 dbp->db_cpid = cpid;
8470 }
8471
8472 void
lso_info_set(mblk_t * mp,uint32_t mss,uint32_t flags)8473 lso_info_set(mblk_t *mp, uint32_t mss, uint32_t flags)
8474 {
8475 ASSERT(DB_TYPE(mp) == M_DATA);
8476 ASSERT((flags & ~HW_LSO_FLAGS) == 0);
8477
8478 /* Set the flags */
8479 DB_LSOFLAGS(mp) |= flags;
8480 DB_LSOMSS(mp) = mss;
8481 }
8482
8483 void
lso_info_cleanup(mblk_t * mp)8484 lso_info_cleanup(mblk_t *mp)
8485 {
8486 ASSERT(DB_TYPE(mp) == M_DATA);
8487
8488 /* Clear the flags */
8489 DB_LSOFLAGS(mp) &= ~HW_LSO_FLAGS;
8490 DB_LSOMSS(mp) = 0;
8491 }
8492
8493 /*
8494 * Checksum buffer *bp for len bytes with psum partial checksum,
8495 * or 0 if none, and return the 16 bit partial checksum.
8496 */
8497 unsigned
bcksum(uchar_t * bp,int len,unsigned int psum)8498 bcksum(uchar_t *bp, int len, unsigned int psum)
8499 {
8500 int odd = len & 1;
8501 extern unsigned int ip_ocsum();
8502
8503 if (((intptr_t)bp & 1) == 0 && !odd) {
8504 /*
8505 * Bp is 16 bit aligned and len is multiple of 16 bit word.
8506 */
8507 return (ip_ocsum((ushort_t *)bp, len >> 1, psum));
8508 }
8509 if (((intptr_t)bp & 1) != 0) {
8510 /*
8511 * Bp isn't 16 bit aligned.
8512 */
8513 unsigned int tsum;
8514
8515 #ifdef _LITTLE_ENDIAN
8516 psum += *bp;
8517 #else
8518 psum += *bp << 8;
8519 #endif
8520 len--;
8521 bp++;
8522 tsum = ip_ocsum((ushort_t *)bp, len >> 1, 0);
8523 psum += (tsum << 8) & 0xffff | (tsum >> 8);
8524 if (len & 1) {
8525 bp += len - 1;
8526 #ifdef _LITTLE_ENDIAN
8527 psum += *bp << 8;
8528 #else
8529 psum += *bp;
8530 #endif
8531 }
8532 } else {
8533 /*
8534 * Bp is 16 bit aligned.
8535 */
8536 psum = ip_ocsum((ushort_t *)bp, len >> 1, psum);
8537 if (odd) {
8538 bp += len - 1;
8539 #ifdef _LITTLE_ENDIAN
8540 psum += *bp;
8541 #else
8542 psum += *bp << 8;
8543 #endif
8544 }
8545 }
8546 /*
8547 * Normalize psum to 16 bits before returning the new partial
8548 * checksum. The max psum value before normalization is 0x3FDFE.
8549 */
8550 return ((psum >> 16) + (psum & 0xFFFF));
8551 }
8552
8553 void
freemsgchain(mblk_t * mp)8554 freemsgchain(mblk_t *mp)
8555 {
8556 mblk_t *next;
8557
8558 while (mp != NULL) {
8559 next = mp->b_next;
8560 mp->b_next = NULL;
8561
8562 freemsg(mp);
8563 mp = next;
8564 }
8565 }
8566
8567 mblk_t *
copymsgchain(mblk_t * mp)8568 copymsgchain(mblk_t *mp)
8569 {
8570 mblk_t *nmp = NULL;
8571 mblk_t **nmpp = &nmp;
8572
8573 for (; mp != NULL; mp = mp->b_next) {
8574 if ((*nmpp = copymsg(mp)) == NULL) {
8575 freemsgchain(nmp);
8576 return (NULL);
8577 }
8578
8579 nmpp = &((*nmpp)->b_next);
8580 }
8581
8582 return (nmp);
8583 }
8584
8585 /* NOTE: Do not add code after this point. */
8586 #undef QLOCK
8587
8588 /*
8589 * Replacement for QLOCK macro for those that can't use it.
8590 */
8591 kmutex_t *
QLOCK(queue_t * q)8592 QLOCK(queue_t *q)
8593 {
8594 return (&(q)->q_lock);
8595 }
8596
8597 /*
8598 * Dummy runqueues/queuerun functions functions for backwards compatibility.
8599 */
8600 #undef runqueues
8601 void
runqueues(void)8602 runqueues(void)
8603 {
8604 }
8605
8606 #undef queuerun
8607 void
queuerun(void)8608 queuerun(void)
8609 {
8610 }
8611
8612 /*
8613 * Initialize the STR stack instance, which tracks autopush and persistent
8614 * links.
8615 */
8616 /* ARGSUSED */
8617 static void *
str_stack_init(netstackid_t stackid,netstack_t * ns)8618 str_stack_init(netstackid_t stackid, netstack_t *ns)
8619 {
8620 str_stack_t *ss;
8621 int i;
8622
8623 ss = (str_stack_t *)kmem_zalloc(sizeof (*ss), KM_SLEEP);
8624 ss->ss_netstack = ns;
8625
8626 /*
8627 * set up autopush
8628 */
8629 sad_initspace(ss);
8630
8631 /*
8632 * set up mux_node structures.
8633 */
8634 ss->ss_devcnt = devcnt; /* In case it should change before free */
8635 ss->ss_mux_nodes = kmem_zalloc((sizeof (struct mux_node) *
8636 ss->ss_devcnt), KM_SLEEP);
8637 for (i = 0; i < ss->ss_devcnt; i++)
8638 ss->ss_mux_nodes[i].mn_imaj = i;
8639 return (ss);
8640 }
8641
8642 /*
8643 * Note: run at zone shutdown and not destroy so that the PLINKs are
8644 * gone by the time other cleanup happens from the destroy callbacks.
8645 */
8646 static void
str_stack_shutdown(netstackid_t stackid,void * arg)8647 str_stack_shutdown(netstackid_t stackid, void *arg)
8648 {
8649 str_stack_t *ss = (str_stack_t *)arg;
8650 int i;
8651 cred_t *cr;
8652
8653 cr = zone_get_kcred(netstackid_to_zoneid(stackid));
8654 ASSERT(cr != NULL);
8655
8656 /* Undo all the I_PLINKs for this zone */
8657 for (i = 0; i < ss->ss_devcnt; i++) {
8658 struct mux_edge *ep;
8659 ldi_handle_t lh;
8660 ldi_ident_t li;
8661 int ret;
8662 int rval;
8663 dev_t rdev;
8664
8665 ep = ss->ss_mux_nodes[i].mn_outp;
8666 if (ep == NULL)
8667 continue;
8668 ret = ldi_ident_from_major((major_t)i, &li);
8669 if (ret != 0) {
8670 continue;
8671 }
8672 rdev = ep->me_dev;
8673 ret = ldi_open_by_dev(&rdev, OTYP_CHR, FREAD|FWRITE,
8674 cr, &lh, li);
8675 if (ret != 0) {
8676 ldi_ident_release(li);
8677 continue;
8678 }
8679
8680 ret = ldi_ioctl(lh, I_PUNLINK, (intptr_t)MUXID_ALL, FKIOCTL,
8681 cr, &rval);
8682 if (ret) {
8683 (void) ldi_close(lh, FREAD|FWRITE, cr);
8684 ldi_ident_release(li);
8685 continue;
8686 }
8687 (void) ldi_close(lh, FREAD|FWRITE, cr);
8688
8689 /* Close layered handles */
8690 ldi_ident_release(li);
8691 }
8692 crfree(cr);
8693
8694 sad_freespace(ss);
8695
8696 kmem_free(ss->ss_mux_nodes, sizeof (struct mux_node) * ss->ss_devcnt);
8697 ss->ss_mux_nodes = NULL;
8698 }
8699
8700 /*
8701 * Free the structure; str_stack_shutdown did the other cleanup work.
8702 */
8703 /* ARGSUSED */
8704 static void
str_stack_fini(netstackid_t stackid,void * arg)8705 str_stack_fini(netstackid_t stackid, void *arg)
8706 {
8707 str_stack_t *ss = (str_stack_t *)arg;
8708
8709 kmem_free(ss, sizeof (*ss));
8710 }
8711