1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 22 /* All Rights Reserved */ 23 24 25 /* 26 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 30 #include <sys/types.h> 31 #include <sys/sysmacros.h> 32 #include <sys/param.h> 33 #include <sys/errno.h> 34 #include <sys/signal.h> 35 #include <sys/proc.h> 36 #include <sys/conf.h> 37 #include <sys/cred.h> 38 #include <sys/user.h> 39 #include <sys/vnode.h> 40 #include <sys/file.h> 41 #include <sys/session.h> 42 #include <sys/stream.h> 43 #include <sys/strsubr.h> 44 #include <sys/stropts.h> 45 #include <sys/poll.h> 46 #include <sys/systm.h> 47 #include <sys/cpuvar.h> 48 #include <sys/uio.h> 49 #include <sys/cmn_err.h> 50 #include <sys/priocntl.h> 51 #include <sys/procset.h> 52 #include <sys/vmem.h> 53 #include <sys/bitmap.h> 54 #include <sys/kmem.h> 55 #include <sys/siginfo.h> 56 #include <sys/vtrace.h> 57 #include <sys/callb.h> 58 #include <sys/debug.h> 59 #include <sys/modctl.h> 60 #include <sys/vmsystm.h> 61 #include <vm/page.h> 62 #include <sys/atomic.h> 63 #include <sys/suntpi.h> 64 #include <sys/strlog.h> 65 #include <sys/promif.h> 66 #include <sys/project.h> 67 #include <sys/vm.h> 68 #include <sys/taskq.h> 69 #include <sys/sunddi.h> 70 #include <sys/sunldi_impl.h> 71 #include <sys/strsun.h> 72 #include <sys/isa_defs.h> 73 #include <sys/multidata.h> 74 #include <sys/pattr.h> 75 #include <sys/strft.h> 76 #include <sys/fs/snode.h> 77 #include <sys/zone.h> 78 #include <sys/open.h> 79 #include <sys/sunldi.h> 80 #include <sys/sad.h> 81 #include <sys/netstack.h> 82 83 #define O_SAMESTR(q) (((q)->q_next) && \ 84 (((q)->q_flag & QREADR) == ((q)->q_next->q_flag & QREADR))) 85 86 /* 87 * WARNING: 88 * The variables and routines in this file are private, belonging 89 * to the STREAMS subsystem. These should not be used by modules 90 * or drivers. Compatibility will not be guaranteed. 91 */ 92 93 /* 94 * Id value used to distinguish between different multiplexor links. 95 */ 96 static int32_t lnk_id = 0; 97 98 #define STREAMS_LOPRI MINCLSYSPRI 99 static pri_t streams_lopri = STREAMS_LOPRI; 100 101 #define STRSTAT(x) (str_statistics.x.value.ui64++) 102 typedef struct str_stat { 103 kstat_named_t sqenables; 104 kstat_named_t stenables; 105 kstat_named_t syncqservice; 106 kstat_named_t freebs; 107 kstat_named_t qwr_outer; 108 kstat_named_t rservice; 109 kstat_named_t strwaits; 110 kstat_named_t taskqfails; 111 kstat_named_t bufcalls; 112 kstat_named_t qhelps; 113 kstat_named_t qremoved; 114 kstat_named_t sqremoved; 115 kstat_named_t bcwaits; 116 kstat_named_t sqtoomany; 117 } str_stat_t; 118 119 static str_stat_t str_statistics = { 120 { "sqenables", KSTAT_DATA_UINT64 }, 121 { "stenables", KSTAT_DATA_UINT64 }, 122 { "syncqservice", KSTAT_DATA_UINT64 }, 123 { "freebs", KSTAT_DATA_UINT64 }, 124 { "qwr_outer", KSTAT_DATA_UINT64 }, 125 { "rservice", KSTAT_DATA_UINT64 }, 126 { "strwaits", KSTAT_DATA_UINT64 }, 127 { "taskqfails", KSTAT_DATA_UINT64 }, 128 { "bufcalls", KSTAT_DATA_UINT64 }, 129 { "qhelps", KSTAT_DATA_UINT64 }, 130 { "qremoved", KSTAT_DATA_UINT64 }, 131 { "sqremoved", KSTAT_DATA_UINT64 }, 132 { "bcwaits", KSTAT_DATA_UINT64 }, 133 { "sqtoomany", KSTAT_DATA_UINT64 }, 134 }; 135 136 static kstat_t *str_kstat; 137 138 /* 139 * qrunflag was used previously to control background scheduling of queues. It 140 * is not used anymore, but kept here in case some module still wants to access 141 * it via qready() and setqsched macros. 142 */ 143 char qrunflag; /* Unused */ 144 145 /* 146 * Most of the streams scheduling is done via task queues. Task queues may fail 147 * for non-sleep dispatches, so there are two backup threads servicing failed 148 * requests for queues and syncqs. Both of these threads also service failed 149 * dispatches freebs requests. Queues are put in the list specified by `qhead' 150 * and `qtail' pointers, syncqs use `sqhead' and `sqtail' pointers and freebs 151 * requests are put into `freebs_list' which has no tail pointer. All three 152 * lists are protected by a single `service_queue' lock and use 153 * `services_to_run' condition variable for signaling background threads. Use of 154 * a single lock should not be a problem because it is only used under heavy 155 * loads when task queues start to fail and at that time it may be a good idea 156 * to throttle scheduling requests. 157 * 158 * NOTE: queues and syncqs should be scheduled by two separate threads because 159 * queue servicing may be blocked waiting for a syncq which may be also 160 * scheduled for background execution. This may create a deadlock when only one 161 * thread is used for both. 162 */ 163 164 static taskq_t *streams_taskq; /* Used for most STREAMS scheduling */ 165 166 static kmutex_t service_queue; /* protects all of servicing vars */ 167 static kcondvar_t services_to_run; /* wake up background service thread */ 168 static kcondvar_t syncqs_to_run; /* wake up background service thread */ 169 170 /* 171 * List of queues scheduled for background processing due to lack of resources 172 * in the task queues. Protected by service_queue lock; 173 */ 174 static struct queue *qhead; 175 static struct queue *qtail; 176 177 /* 178 * Same list for syncqs 179 */ 180 static syncq_t *sqhead; 181 static syncq_t *sqtail; 182 183 static mblk_t *freebs_list; /* list of buffers to free */ 184 185 /* 186 * Backup threads for servicing queues and syncqs 187 */ 188 kthread_t *streams_qbkgrnd_thread; 189 kthread_t *streams_sqbkgrnd_thread; 190 191 /* 192 * Bufcalls related variables. 193 */ 194 struct bclist strbcalls; /* list of waiting bufcalls */ 195 kmutex_t strbcall_lock; /* protects bufcall list (strbcalls) */ 196 kcondvar_t strbcall_cv; /* Signaling when a bufcall is added */ 197 kmutex_t bcall_monitor; /* sleep/wakeup style monitor */ 198 kcondvar_t bcall_cv; /* wait 'till executing bufcall completes */ 199 kthread_t *bc_bkgrnd_thread; /* Thread to service bufcall requests */ 200 201 kmutex_t strresources; /* protects global resources */ 202 kmutex_t muxifier; /* single-threads multiplexor creation */ 203 204 static void *str_stack_init(netstackid_t stackid, netstack_t *ns); 205 static void str_stack_shutdown(netstackid_t stackid, void *arg); 206 static void str_stack_fini(netstackid_t stackid, void *arg); 207 208 /* 209 * run_queues is no longer used, but is kept in case some 3rd party 210 * module/driver decides to use it. 211 */ 212 int run_queues = 0; 213 214 /* 215 * sq_max_size is the depth of the syncq (in number of messages) before 216 * qfill_syncq() starts QFULL'ing destination queues. As its primary 217 * consumer - IP is no longer D_MTPERMOD, but there may be other 218 * modules/drivers depend on this syncq flow control, we prefer to 219 * choose a large number as the default value. For potential 220 * performance gain, this value is tunable in /etc/system. 221 */ 222 int sq_max_size = 10000; 223 224 /* 225 * The number of ciputctrl structures per syncq and stream we create when 226 * needed. 227 */ 228 int n_ciputctrl; 229 int max_n_ciputctrl = 16; 230 /* 231 * If n_ciputctrl is < min_n_ciputctrl don't even create ciputctrl_cache. 232 */ 233 int min_n_ciputctrl = 2; 234 235 /* 236 * Per-driver/module syncqs 237 * ======================== 238 * 239 * For drivers/modules that use PERMOD or outer syncqs we keep a list of 240 * perdm structures, new entries being added (and new syncqs allocated) when 241 * setq() encounters a module/driver with a streamtab that it hasn't seen 242 * before. 243 * The reason for this mechanism is that some modules and drivers share a 244 * common streamtab and it is necessary for those modules and drivers to also 245 * share a common PERMOD syncq. 246 * 247 * perdm_list --> dm_str == streamtab_1 248 * dm_sq == syncq_1 249 * dm_ref 250 * dm_next --> dm_str == streamtab_2 251 * dm_sq == syncq_2 252 * dm_ref 253 * dm_next --> ... NULL 254 * 255 * The dm_ref field is incremented for each new driver/module that takes 256 * a reference to the perdm structure and hence shares the syncq. 257 * References are held in the fmodsw_impl_t structure for each STREAMS module 258 * or the dev_impl array (indexed by device major number) for each driver. 259 * 260 * perdm_list -> [dm_ref == 1] -> [dm_ref == 2] -> [dm_ref == 1] -> NULL 261 * ^ ^ ^ ^ 262 * | ______________/ | | 263 * | / | | 264 * dev_impl: ...|x|y|... module A module B 265 * 266 * When a module/driver is unloaded the reference count is decremented and, 267 * when it falls to zero, the perdm structure is removed from the list and 268 * the syncq is freed (see rele_dm()). 269 */ 270 perdm_t *perdm_list = NULL; 271 static krwlock_t perdm_rwlock; 272 cdevsw_impl_t *devimpl; 273 274 extern struct qinit strdata; 275 extern struct qinit stwdata; 276 277 static void runservice(queue_t *); 278 static void streams_bufcall_service(void); 279 static void streams_qbkgrnd_service(void); 280 static void streams_sqbkgrnd_service(void); 281 static syncq_t *new_syncq(void); 282 static void free_syncq(syncq_t *); 283 static void outer_insert(syncq_t *, syncq_t *); 284 static void outer_remove(syncq_t *, syncq_t *); 285 static void write_now(syncq_t *); 286 static void clr_qfull(queue_t *); 287 static void runbufcalls(void); 288 static void sqenable(syncq_t *); 289 static void sqfill_events(syncq_t *, queue_t *, mblk_t *, void (*)()); 290 static void wait_q_syncq(queue_t *); 291 static void backenable_insertedq(queue_t *); 292 293 static void queue_service(queue_t *); 294 static void stream_service(stdata_t *); 295 static void syncq_service(syncq_t *); 296 static void qwriter_outer_service(syncq_t *); 297 static void mblk_free(mblk_t *); 298 #ifdef DEBUG 299 static int qprocsareon(queue_t *); 300 #endif 301 302 static void set_nfsrv_ptr(queue_t *, queue_t *, queue_t *, queue_t *); 303 static void reset_nfsrv_ptr(queue_t *, queue_t *); 304 void set_qfull(queue_t *); 305 306 static void sq_run_events(syncq_t *); 307 static int propagate_syncq(queue_t *); 308 309 static void blocksq(syncq_t *, ushort_t, int); 310 static void unblocksq(syncq_t *, ushort_t, int); 311 static int dropsq(syncq_t *, uint16_t); 312 static void emptysq(syncq_t *); 313 static sqlist_t *sqlist_alloc(struct stdata *, int); 314 static void sqlist_free(sqlist_t *); 315 static sqlist_t *sqlist_build(queue_t *, struct stdata *, boolean_t); 316 static void sqlist_insert(sqlist_t *, syncq_t *); 317 static void sqlist_insertall(sqlist_t *, queue_t *); 318 319 static void strsetuio(stdata_t *); 320 321 struct kmem_cache *stream_head_cache; 322 struct kmem_cache *queue_cache; 323 struct kmem_cache *syncq_cache; 324 struct kmem_cache *qband_cache; 325 struct kmem_cache *linkinfo_cache; 326 struct kmem_cache *ciputctrl_cache = NULL; 327 328 static linkinfo_t *linkinfo_list; 329 330 /* Global esballoc throttling queue */ 331 static esb_queue_t system_esbq; 332 333 /* 334 * esballoc tunable parameters. 335 */ 336 int esbq_max_qlen = 0x16; /* throttled queue length */ 337 clock_t esbq_timeout = 0x8; /* timeout to process esb queue */ 338 339 /* 340 * Routines to handle esballoc queueing. 341 */ 342 static void esballoc_process_queue(esb_queue_t *); 343 static void esballoc_enqueue_mblk(mblk_t *); 344 static void esballoc_timer(void *); 345 static void esballoc_set_timer(esb_queue_t *, clock_t); 346 static void esballoc_mblk_free(mblk_t *); 347 348 /* 349 * Qinit structure and Module_info structures 350 * for passthru read and write queues 351 */ 352 353 static void pass_wput(queue_t *, mblk_t *); 354 static queue_t *link_addpassthru(stdata_t *); 355 static void link_rempassthru(queue_t *); 356 357 struct module_info passthru_info = { 358 0, 359 "passthru", 360 0, 361 INFPSZ, 362 STRHIGH, 363 STRLOW 364 }; 365 366 struct qinit passthru_rinit = { 367 (int (*)())putnext, 368 NULL, 369 NULL, 370 NULL, 371 NULL, 372 &passthru_info, 373 NULL 374 }; 375 376 struct qinit passthru_winit = { 377 (int (*)()) pass_wput, 378 NULL, 379 NULL, 380 NULL, 381 NULL, 382 &passthru_info, 383 NULL 384 }; 385 386 /* 387 * Verify correctness of list head/tail pointers. 388 */ 389 #define LISTCHECK(head, tail, link) { \ 390 EQUIV(head, tail); \ 391 IMPLY(tail != NULL, tail->link == NULL); \ 392 } 393 394 /* 395 * Enqueue a list element `el' in the end of a list denoted by `head' and `tail' 396 * using a `link' field. 397 */ 398 #define ENQUEUE(el, head, tail, link) { \ 399 ASSERT(el->link == NULL); \ 400 LISTCHECK(head, tail, link); \ 401 if (head == NULL) \ 402 head = el; \ 403 else \ 404 tail->link = el; \ 405 tail = el; \ 406 } 407 408 /* 409 * Dequeue the first element of the list denoted by `head' and `tail' pointers 410 * using a `link' field and put result into `el'. 411 */ 412 #define DQ(el, head, tail, link) { \ 413 LISTCHECK(head, tail, link); \ 414 el = head; \ 415 if (head != NULL) { \ 416 head = head->link; \ 417 if (head == NULL) \ 418 tail = NULL; \ 419 el->link = NULL; \ 420 } \ 421 } 422 423 /* 424 * Remove `el' from the list using `chase' and `curr' pointers and return result 425 * in `succeed'. 426 */ 427 #define RMQ(el, head, tail, link, chase, curr, succeed) { \ 428 LISTCHECK(head, tail, link); \ 429 chase = NULL; \ 430 succeed = 0; \ 431 for (curr = head; (curr != el) && (curr != NULL); curr = curr->link) \ 432 chase = curr; \ 433 if (curr != NULL) { \ 434 succeed = 1; \ 435 ASSERT(curr == el); \ 436 if (chase != NULL) \ 437 chase->link = curr->link; \ 438 else \ 439 head = curr->link; \ 440 curr->link = NULL; \ 441 if (curr == tail) \ 442 tail = chase; \ 443 } \ 444 LISTCHECK(head, tail, link); \ 445 } 446 447 /* Handling of delayed messages on the inner syncq. */ 448 449 /* 450 * DEBUG versions should use function versions (to simplify tracing) and 451 * non-DEBUG kernels should use macro versions. 452 */ 453 454 /* 455 * Put a queue on the syncq list of queues. 456 * Assumes SQLOCK held. 457 */ 458 #define SQPUT_Q(sq, qp) \ 459 { \ 460 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 461 if (!(qp->q_sqflags & Q_SQQUEUED)) { \ 462 /* The queue should not be linked anywhere */ \ 463 ASSERT((qp->q_sqprev == NULL) && (qp->q_sqnext == NULL)); \ 464 /* Head and tail may only be NULL simultaneously */ \ 465 EQUIV(sq->sq_head, sq->sq_tail); \ 466 /* Queue may be only enqueued on its syncq */ \ 467 ASSERT(sq == qp->q_syncq); \ 468 /* Check the correctness of SQ_MESSAGES flag */ \ 469 EQUIV(sq->sq_head, (sq->sq_flags & SQ_MESSAGES)); \ 470 /* Sanity check first/last elements of the list */ \ 471 IMPLY(sq->sq_head != NULL, sq->sq_head->q_sqprev == NULL);\ 472 IMPLY(sq->sq_tail != NULL, sq->sq_tail->q_sqnext == NULL);\ 473 /* \ 474 * Sanity check of priority field: empty queue should \ 475 * have zero priority \ 476 * and nqueues equal to zero. \ 477 */ \ 478 IMPLY(sq->sq_head == NULL, sq->sq_pri == 0); \ 479 /* Sanity check of sq_nqueues field */ \ 480 EQUIV(sq->sq_head, sq->sq_nqueues); \ 481 if (sq->sq_head == NULL) { \ 482 sq->sq_head = sq->sq_tail = qp; \ 483 sq->sq_flags |= SQ_MESSAGES; \ 484 } else if (qp->q_spri == 0) { \ 485 qp->q_sqprev = sq->sq_tail; \ 486 sq->sq_tail->q_sqnext = qp; \ 487 sq->sq_tail = qp; \ 488 } else { \ 489 /* \ 490 * Put this queue in priority order: higher \ 491 * priority gets closer to the head. \ 492 */ \ 493 queue_t **qpp = &sq->sq_tail; \ 494 queue_t *qnext = NULL; \ 495 \ 496 while (*qpp != NULL && qp->q_spri > (*qpp)->q_spri) { \ 497 qnext = *qpp; \ 498 qpp = &(*qpp)->q_sqprev; \ 499 } \ 500 qp->q_sqnext = qnext; \ 501 qp->q_sqprev = *qpp; \ 502 if (*qpp != NULL) { \ 503 (*qpp)->q_sqnext = qp; \ 504 } else { \ 505 sq->sq_head = qp; \ 506 sq->sq_pri = sq->sq_head->q_spri; \ 507 } \ 508 *qpp = qp; \ 509 } \ 510 qp->q_sqflags |= Q_SQQUEUED; \ 511 qp->q_sqtstamp = ddi_get_lbolt(); \ 512 sq->sq_nqueues++; \ 513 } \ 514 } 515 516 /* 517 * Remove a queue from the syncq list 518 * Assumes SQLOCK held. 519 */ 520 #define SQRM_Q(sq, qp) \ 521 { \ 522 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 523 ASSERT(qp->q_sqflags & Q_SQQUEUED); \ 524 ASSERT(sq->sq_head != NULL && sq->sq_tail != NULL); \ 525 ASSERT((sq->sq_flags & SQ_MESSAGES) != 0); \ 526 /* Check that the queue is actually in the list */ \ 527 ASSERT(qp->q_sqnext != NULL || sq->sq_tail == qp); \ 528 ASSERT(qp->q_sqprev != NULL || sq->sq_head == qp); \ 529 ASSERT(sq->sq_nqueues != 0); \ 530 if (qp->q_sqprev == NULL) { \ 531 /* First queue on list, make head q_sqnext */ \ 532 sq->sq_head = qp->q_sqnext; \ 533 } else { \ 534 /* Make prev->next == next */ \ 535 qp->q_sqprev->q_sqnext = qp->q_sqnext; \ 536 } \ 537 if (qp->q_sqnext == NULL) { \ 538 /* Last queue on list, make tail sqprev */ \ 539 sq->sq_tail = qp->q_sqprev; \ 540 } else { \ 541 /* Make next->prev == prev */ \ 542 qp->q_sqnext->q_sqprev = qp->q_sqprev; \ 543 } \ 544 /* clear out references on this queue */ \ 545 qp->q_sqprev = qp->q_sqnext = NULL; \ 546 qp->q_sqflags &= ~Q_SQQUEUED; \ 547 /* If there is nothing queued, clear SQ_MESSAGES */ \ 548 if (sq->sq_head != NULL) { \ 549 sq->sq_pri = sq->sq_head->q_spri; \ 550 } else { \ 551 sq->sq_flags &= ~SQ_MESSAGES; \ 552 sq->sq_pri = 0; \ 553 } \ 554 sq->sq_nqueues--; \ 555 ASSERT(sq->sq_head != NULL || sq->sq_evhead != NULL || \ 556 (sq->sq_flags & SQ_QUEUED) == 0); \ 557 } 558 559 /* Hide the definition from the header file. */ 560 #ifdef SQPUT_MP 561 #undef SQPUT_MP 562 #endif 563 564 /* 565 * Put a message on the queue syncq. 566 * Assumes QLOCK held. 567 */ 568 #define SQPUT_MP(qp, mp) \ 569 { \ 570 ASSERT(MUTEX_HELD(QLOCK(qp))); \ 571 ASSERT(qp->q_sqhead == NULL || \ 572 (qp->q_sqtail != NULL && \ 573 qp->q_sqtail->b_next == NULL)); \ 574 qp->q_syncqmsgs++; \ 575 ASSERT(qp->q_syncqmsgs != 0); /* Wraparound */ \ 576 if (qp->q_sqhead == NULL) { \ 577 qp->q_sqhead = qp->q_sqtail = mp; \ 578 } else { \ 579 qp->q_sqtail->b_next = mp; \ 580 qp->q_sqtail = mp; \ 581 } \ 582 ASSERT(qp->q_syncqmsgs > 0); \ 583 set_qfull(qp); \ 584 } 585 586 #define SQ_PUTCOUNT_SETFAST_LOCKED(sq) { \ 587 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 588 if ((sq)->sq_ciputctrl != NULL) { \ 589 int i; \ 590 int nlocks = (sq)->sq_nciputctrl; \ 591 ciputctrl_t *cip = (sq)->sq_ciputctrl; \ 592 ASSERT((sq)->sq_type & SQ_CIPUT); \ 593 for (i = 0; i <= nlocks; i++) { \ 594 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \ 595 cip[i].ciputctrl_count |= SQ_FASTPUT; \ 596 } \ 597 } \ 598 } 599 600 601 #define SQ_PUTCOUNT_CLRFAST_LOCKED(sq) { \ 602 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 603 if ((sq)->sq_ciputctrl != NULL) { \ 604 int i; \ 605 int nlocks = (sq)->sq_nciputctrl; \ 606 ciputctrl_t *cip = (sq)->sq_ciputctrl; \ 607 ASSERT((sq)->sq_type & SQ_CIPUT); \ 608 for (i = 0; i <= nlocks; i++) { \ 609 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \ 610 cip[i].ciputctrl_count &= ~SQ_FASTPUT; \ 611 } \ 612 } \ 613 } 614 615 /* 616 * Run service procedures for all queues in the stream head. 617 */ 618 #define STR_SERVICE(stp, q) { \ 619 ASSERT(MUTEX_HELD(&stp->sd_qlock)); \ 620 while (stp->sd_qhead != NULL) { \ 621 DQ(q, stp->sd_qhead, stp->sd_qtail, q_link); \ 622 ASSERT(stp->sd_nqueues > 0); \ 623 stp->sd_nqueues--; \ 624 ASSERT(!(q->q_flag & QINSERVICE)); \ 625 mutex_exit(&stp->sd_qlock); \ 626 queue_service(q); \ 627 mutex_enter(&stp->sd_qlock); \ 628 } \ 629 ASSERT(stp->sd_nqueues == 0); \ 630 ASSERT((stp->sd_qhead == NULL) && (stp->sd_qtail == NULL)); \ 631 } 632 633 /* 634 * Constructor/destructor routines for the stream head cache 635 */ 636 /* ARGSUSED */ 637 static int 638 stream_head_constructor(void *buf, void *cdrarg, int kmflags) 639 { 640 stdata_t *stp = buf; 641 642 mutex_init(&stp->sd_lock, NULL, MUTEX_DEFAULT, NULL); 643 mutex_init(&stp->sd_reflock, NULL, MUTEX_DEFAULT, NULL); 644 mutex_init(&stp->sd_qlock, NULL, MUTEX_DEFAULT, NULL); 645 cv_init(&stp->sd_monitor, NULL, CV_DEFAULT, NULL); 646 cv_init(&stp->sd_iocmonitor, NULL, CV_DEFAULT, NULL); 647 cv_init(&stp->sd_refmonitor, NULL, CV_DEFAULT, NULL); 648 cv_init(&stp->sd_qcv, NULL, CV_DEFAULT, NULL); 649 cv_init(&stp->sd_zcopy_wait, NULL, CV_DEFAULT, NULL); 650 stp->sd_wrq = NULL; 651 652 return (0); 653 } 654 655 /* ARGSUSED */ 656 static void 657 stream_head_destructor(void *buf, void *cdrarg) 658 { 659 stdata_t *stp = buf; 660 661 mutex_destroy(&stp->sd_lock); 662 mutex_destroy(&stp->sd_reflock); 663 mutex_destroy(&stp->sd_qlock); 664 cv_destroy(&stp->sd_monitor); 665 cv_destroy(&stp->sd_iocmonitor); 666 cv_destroy(&stp->sd_refmonitor); 667 cv_destroy(&stp->sd_qcv); 668 cv_destroy(&stp->sd_zcopy_wait); 669 } 670 671 /* 672 * Constructor/destructor routines for the queue cache 673 */ 674 /* ARGSUSED */ 675 static int 676 queue_constructor(void *buf, void *cdrarg, int kmflags) 677 { 678 queinfo_t *qip = buf; 679 queue_t *qp = &qip->qu_rqueue; 680 queue_t *wqp = &qip->qu_wqueue; 681 syncq_t *sq = &qip->qu_syncq; 682 683 qp->q_first = NULL; 684 qp->q_link = NULL; 685 qp->q_count = 0; 686 qp->q_mblkcnt = 0; 687 qp->q_sqhead = NULL; 688 qp->q_sqtail = NULL; 689 qp->q_sqnext = NULL; 690 qp->q_sqprev = NULL; 691 qp->q_sqflags = 0; 692 qp->q_rwcnt = 0; 693 qp->q_spri = 0; 694 695 mutex_init(QLOCK(qp), NULL, MUTEX_DEFAULT, NULL); 696 cv_init(&qp->q_wait, NULL, CV_DEFAULT, NULL); 697 698 wqp->q_first = NULL; 699 wqp->q_link = NULL; 700 wqp->q_count = 0; 701 wqp->q_mblkcnt = 0; 702 wqp->q_sqhead = NULL; 703 wqp->q_sqtail = NULL; 704 wqp->q_sqnext = NULL; 705 wqp->q_sqprev = NULL; 706 wqp->q_sqflags = 0; 707 wqp->q_rwcnt = 0; 708 wqp->q_spri = 0; 709 710 mutex_init(QLOCK(wqp), NULL, MUTEX_DEFAULT, NULL); 711 cv_init(&wqp->q_wait, NULL, CV_DEFAULT, NULL); 712 713 sq->sq_head = NULL; 714 sq->sq_tail = NULL; 715 sq->sq_evhead = NULL; 716 sq->sq_evtail = NULL; 717 sq->sq_callbpend = NULL; 718 sq->sq_outer = NULL; 719 sq->sq_onext = NULL; 720 sq->sq_oprev = NULL; 721 sq->sq_next = NULL; 722 sq->sq_svcflags = 0; 723 sq->sq_servcount = 0; 724 sq->sq_needexcl = 0; 725 sq->sq_nqueues = 0; 726 sq->sq_pri = 0; 727 728 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL); 729 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL); 730 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL); 731 732 return (0); 733 } 734 735 /* ARGSUSED */ 736 static void 737 queue_destructor(void *buf, void *cdrarg) 738 { 739 queinfo_t *qip = buf; 740 queue_t *qp = &qip->qu_rqueue; 741 queue_t *wqp = &qip->qu_wqueue; 742 syncq_t *sq = &qip->qu_syncq; 743 744 ASSERT(qp->q_sqhead == NULL); 745 ASSERT(wqp->q_sqhead == NULL); 746 ASSERT(qp->q_sqnext == NULL); 747 ASSERT(wqp->q_sqnext == NULL); 748 ASSERT(qp->q_rwcnt == 0); 749 ASSERT(wqp->q_rwcnt == 0); 750 751 mutex_destroy(&qp->q_lock); 752 cv_destroy(&qp->q_wait); 753 754 mutex_destroy(&wqp->q_lock); 755 cv_destroy(&wqp->q_wait); 756 757 mutex_destroy(&sq->sq_lock); 758 cv_destroy(&sq->sq_wait); 759 cv_destroy(&sq->sq_exitwait); 760 } 761 762 /* 763 * Constructor/destructor routines for the syncq cache 764 */ 765 /* ARGSUSED */ 766 static int 767 syncq_constructor(void *buf, void *cdrarg, int kmflags) 768 { 769 syncq_t *sq = buf; 770 771 bzero(buf, sizeof (syncq_t)); 772 773 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL); 774 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL); 775 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL); 776 777 return (0); 778 } 779 780 /* ARGSUSED */ 781 static void 782 syncq_destructor(void *buf, void *cdrarg) 783 { 784 syncq_t *sq = buf; 785 786 ASSERT(sq->sq_head == NULL); 787 ASSERT(sq->sq_tail == NULL); 788 ASSERT(sq->sq_evhead == NULL); 789 ASSERT(sq->sq_evtail == NULL); 790 ASSERT(sq->sq_callbpend == NULL); 791 ASSERT(sq->sq_callbflags == 0); 792 ASSERT(sq->sq_outer == NULL); 793 ASSERT(sq->sq_onext == NULL); 794 ASSERT(sq->sq_oprev == NULL); 795 ASSERT(sq->sq_next == NULL); 796 ASSERT(sq->sq_needexcl == 0); 797 ASSERT(sq->sq_svcflags == 0); 798 ASSERT(sq->sq_servcount == 0); 799 ASSERT(sq->sq_nqueues == 0); 800 ASSERT(sq->sq_pri == 0); 801 ASSERT(sq->sq_count == 0); 802 ASSERT(sq->sq_rmqcount == 0); 803 ASSERT(sq->sq_cancelid == 0); 804 ASSERT(sq->sq_ciputctrl == NULL); 805 ASSERT(sq->sq_nciputctrl == 0); 806 ASSERT(sq->sq_type == 0); 807 ASSERT(sq->sq_flags == 0); 808 809 mutex_destroy(&sq->sq_lock); 810 cv_destroy(&sq->sq_wait); 811 cv_destroy(&sq->sq_exitwait); 812 } 813 814 /* ARGSUSED */ 815 static int 816 ciputctrl_constructor(void *buf, void *cdrarg, int kmflags) 817 { 818 ciputctrl_t *cip = buf; 819 int i; 820 821 for (i = 0; i < n_ciputctrl; i++) { 822 cip[i].ciputctrl_count = SQ_FASTPUT; 823 mutex_init(&cip[i].ciputctrl_lock, NULL, MUTEX_DEFAULT, NULL); 824 } 825 826 return (0); 827 } 828 829 /* ARGSUSED */ 830 static void 831 ciputctrl_destructor(void *buf, void *cdrarg) 832 { 833 ciputctrl_t *cip = buf; 834 int i; 835 836 for (i = 0; i < n_ciputctrl; i++) { 837 ASSERT(cip[i].ciputctrl_count & SQ_FASTPUT); 838 mutex_destroy(&cip[i].ciputctrl_lock); 839 } 840 } 841 842 /* 843 * Init routine run from main at boot time. 844 */ 845 void 846 strinit(void) 847 { 848 int ncpus = ((boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus); 849 850 stream_head_cache = kmem_cache_create("stream_head_cache", 851 sizeof (stdata_t), 0, 852 stream_head_constructor, stream_head_destructor, NULL, 853 NULL, NULL, 0); 854 855 queue_cache = kmem_cache_create("queue_cache", sizeof (queinfo_t), 0, 856 queue_constructor, queue_destructor, NULL, NULL, NULL, 0); 857 858 syncq_cache = kmem_cache_create("syncq_cache", sizeof (syncq_t), 0, 859 syncq_constructor, syncq_destructor, NULL, NULL, NULL, 0); 860 861 qband_cache = kmem_cache_create("qband_cache", 862 sizeof (qband_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 863 864 linkinfo_cache = kmem_cache_create("linkinfo_cache", 865 sizeof (linkinfo_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 866 867 n_ciputctrl = ncpus; 868 n_ciputctrl = 1 << highbit(n_ciputctrl - 1); 869 ASSERT(n_ciputctrl >= 1); 870 n_ciputctrl = MIN(n_ciputctrl, max_n_ciputctrl); 871 if (n_ciputctrl >= min_n_ciputctrl) { 872 ciputctrl_cache = kmem_cache_create("ciputctrl_cache", 873 sizeof (ciputctrl_t) * n_ciputctrl, 874 sizeof (ciputctrl_t), ciputctrl_constructor, 875 ciputctrl_destructor, NULL, NULL, NULL, 0); 876 } 877 878 streams_taskq = system_taskq; 879 880 if (streams_taskq == NULL) 881 panic("strinit: no memory for streams taskq!"); 882 883 bc_bkgrnd_thread = thread_create(NULL, 0, 884 streams_bufcall_service, NULL, 0, &p0, TS_RUN, streams_lopri); 885 886 streams_qbkgrnd_thread = thread_create(NULL, 0, 887 streams_qbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri); 888 889 streams_sqbkgrnd_thread = thread_create(NULL, 0, 890 streams_sqbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri); 891 892 /* 893 * Create STREAMS kstats. 894 */ 895 str_kstat = kstat_create("streams", 0, "strstat", 896 "net", KSTAT_TYPE_NAMED, 897 sizeof (str_statistics) / sizeof (kstat_named_t), 898 KSTAT_FLAG_VIRTUAL); 899 900 if (str_kstat != NULL) { 901 str_kstat->ks_data = &str_statistics; 902 kstat_install(str_kstat); 903 } 904 905 /* 906 * TPI support routine initialisation. 907 */ 908 tpi_init(); 909 910 /* 911 * Handle to have autopush and persistent link information per 912 * zone. 913 * Note: uses shutdown hook instead of destroy hook so that the 914 * persistent links can be torn down before the destroy hooks 915 * in the TCP/IP stack are called. 916 */ 917 netstack_register(NS_STR, str_stack_init, str_stack_shutdown, 918 str_stack_fini); 919 } 920 921 void 922 str_sendsig(vnode_t *vp, int event, uchar_t band, int error) 923 { 924 struct stdata *stp; 925 926 ASSERT(vp->v_stream); 927 stp = vp->v_stream; 928 /* Have to hold sd_lock to prevent siglist from changing */ 929 mutex_enter(&stp->sd_lock); 930 if (stp->sd_sigflags & event) 931 strsendsig(stp->sd_siglist, event, band, error); 932 mutex_exit(&stp->sd_lock); 933 } 934 935 /* 936 * Send the "sevent" set of signals to a process. 937 * This might send more than one signal if the process is registered 938 * for multiple events. The caller should pass in an sevent that only 939 * includes the events for which the process has registered. 940 */ 941 static void 942 dosendsig(proc_t *proc, int events, int sevent, k_siginfo_t *info, 943 uchar_t band, int error) 944 { 945 ASSERT(MUTEX_HELD(&proc->p_lock)); 946 947 info->si_band = 0; 948 info->si_errno = 0; 949 950 if (sevent & S_ERROR) { 951 sevent &= ~S_ERROR; 952 info->si_code = POLL_ERR; 953 info->si_errno = error; 954 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 955 "strsendsig:proc %p info %p", proc, info); 956 sigaddq(proc, NULL, info, KM_NOSLEEP); 957 info->si_errno = 0; 958 } 959 if (sevent & S_HANGUP) { 960 sevent &= ~S_HANGUP; 961 info->si_code = POLL_HUP; 962 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 963 "strsendsig:proc %p info %p", proc, info); 964 sigaddq(proc, NULL, info, KM_NOSLEEP); 965 } 966 if (sevent & S_HIPRI) { 967 sevent &= ~S_HIPRI; 968 info->si_code = POLL_PRI; 969 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 970 "strsendsig:proc %p info %p", proc, info); 971 sigaddq(proc, NULL, info, KM_NOSLEEP); 972 } 973 if (sevent & S_RDBAND) { 974 sevent &= ~S_RDBAND; 975 if (events & S_BANDURG) 976 sigtoproc(proc, NULL, SIGURG); 977 else 978 sigtoproc(proc, NULL, SIGPOLL); 979 } 980 if (sevent & S_WRBAND) { 981 sevent &= ~S_WRBAND; 982 sigtoproc(proc, NULL, SIGPOLL); 983 } 984 if (sevent & S_INPUT) { 985 sevent &= ~S_INPUT; 986 info->si_code = POLL_IN; 987 info->si_band = band; 988 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 989 "strsendsig:proc %p info %p", proc, info); 990 sigaddq(proc, NULL, info, KM_NOSLEEP); 991 info->si_band = 0; 992 } 993 if (sevent & S_OUTPUT) { 994 sevent &= ~S_OUTPUT; 995 info->si_code = POLL_OUT; 996 info->si_band = band; 997 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 998 "strsendsig:proc %p info %p", proc, info); 999 sigaddq(proc, NULL, info, KM_NOSLEEP); 1000 info->si_band = 0; 1001 } 1002 if (sevent & S_MSG) { 1003 sevent &= ~S_MSG; 1004 info->si_code = POLL_MSG; 1005 info->si_band = band; 1006 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 1007 "strsendsig:proc %p info %p", proc, info); 1008 sigaddq(proc, NULL, info, KM_NOSLEEP); 1009 info->si_band = 0; 1010 } 1011 if (sevent & S_RDNORM) { 1012 sevent &= ~S_RDNORM; 1013 sigtoproc(proc, NULL, SIGPOLL); 1014 } 1015 if (sevent != 0) { 1016 panic("strsendsig: unknown event(s) %x", sevent); 1017 } 1018 } 1019 1020 /* 1021 * Send SIGPOLL/SIGURG signal to all processes and process groups 1022 * registered on the given signal list that want a signal for at 1023 * least one of the specified events. 1024 * 1025 * Must be called with exclusive access to siglist (caller holding sd_lock). 1026 * 1027 * strioctl(I_SETSIG/I_ESETSIG) will only change siglist when holding 1028 * sd_lock and the ioctl code maintains a PID_HOLD on the pid structure 1029 * while it is in the siglist. 1030 * 1031 * For performance reasons (MP scalability) the code drops pidlock 1032 * when sending signals to a single process. 1033 * When sending to a process group the code holds 1034 * pidlock to prevent the membership in the process group from changing 1035 * while walking the p_pglink list. 1036 */ 1037 void 1038 strsendsig(strsig_t *siglist, int event, uchar_t band, int error) 1039 { 1040 strsig_t *ssp; 1041 k_siginfo_t info; 1042 struct pid *pidp; 1043 proc_t *proc; 1044 1045 info.si_signo = SIGPOLL; 1046 info.si_errno = 0; 1047 for (ssp = siglist; ssp; ssp = ssp->ss_next) { 1048 int sevent; 1049 1050 sevent = ssp->ss_events & event; 1051 if (sevent == 0) 1052 continue; 1053 1054 if ((pidp = ssp->ss_pidp) == NULL) { 1055 /* pid was released but still on event list */ 1056 continue; 1057 } 1058 1059 1060 if (ssp->ss_pid > 0) { 1061 /* 1062 * XXX This unfortunately still generates 1063 * a signal when a fd is closed but 1064 * the proc is active. 1065 */ 1066 ASSERT(ssp->ss_pid == pidp->pid_id); 1067 1068 mutex_enter(&pidlock); 1069 proc = prfind_zone(pidp->pid_id, ALL_ZONES); 1070 if (proc == NULL) { 1071 mutex_exit(&pidlock); 1072 continue; 1073 } 1074 mutex_enter(&proc->p_lock); 1075 mutex_exit(&pidlock); 1076 dosendsig(proc, ssp->ss_events, sevent, &info, 1077 band, error); 1078 mutex_exit(&proc->p_lock); 1079 } else { 1080 /* 1081 * Send to process group. Hold pidlock across 1082 * calls to dosendsig(). 1083 */ 1084 pid_t pgrp = -ssp->ss_pid; 1085 1086 mutex_enter(&pidlock); 1087 proc = pgfind_zone(pgrp, ALL_ZONES); 1088 while (proc != NULL) { 1089 mutex_enter(&proc->p_lock); 1090 dosendsig(proc, ssp->ss_events, sevent, 1091 &info, band, error); 1092 mutex_exit(&proc->p_lock); 1093 proc = proc->p_pglink; 1094 } 1095 mutex_exit(&pidlock); 1096 } 1097 } 1098 } 1099 1100 /* 1101 * Attach a stream device or module. 1102 * qp is a read queue; the new queue goes in so its next 1103 * read ptr is the argument, and the write queue corresponding 1104 * to the argument points to this queue. Return 0 on success, 1105 * or a non-zero errno on failure. 1106 */ 1107 int 1108 qattach(queue_t *qp, dev_t *devp, int oflag, cred_t *crp, fmodsw_impl_t *fp, 1109 boolean_t is_insert) 1110 { 1111 major_t major; 1112 cdevsw_impl_t *dp; 1113 struct streamtab *str; 1114 queue_t *rq; 1115 queue_t *wrq; 1116 uint32_t qflag; 1117 uint32_t sqtype; 1118 perdm_t *dmp; 1119 int error; 1120 int sflag; 1121 1122 rq = allocq(); 1123 wrq = _WR(rq); 1124 STREAM(rq) = STREAM(wrq) = STREAM(qp); 1125 1126 if (fp != NULL) { 1127 str = fp->f_str; 1128 qflag = fp->f_qflag; 1129 sqtype = fp->f_sqtype; 1130 dmp = fp->f_dmp; 1131 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL); 1132 sflag = MODOPEN; 1133 1134 /* 1135 * stash away a pointer to the module structure so we can 1136 * unref it in qdetach. 1137 */ 1138 rq->q_fp = fp; 1139 } else { 1140 ASSERT(!is_insert); 1141 1142 major = getmajor(*devp); 1143 dp = &devimpl[major]; 1144 1145 str = dp->d_str; 1146 ASSERT(str == STREAMSTAB(major)); 1147 1148 qflag = dp->d_qflag; 1149 ASSERT(qflag & QISDRV); 1150 sqtype = dp->d_sqtype; 1151 1152 /* create perdm_t if needed */ 1153 if (NEED_DM(dp->d_dmp, qflag)) 1154 dp->d_dmp = hold_dm(str, qflag, sqtype); 1155 1156 dmp = dp->d_dmp; 1157 sflag = 0; 1158 } 1159 1160 TRACE_2(TR_FAC_STREAMS_FR, TR_QATTACH_FLAGS, 1161 "qattach:qflag == %X(%X)", qflag, *devp); 1162 1163 /* setq might sleep in allocator - avoid holding locks. */ 1164 setq(rq, str->st_rdinit, str->st_wrinit, dmp, qflag, sqtype, B_FALSE); 1165 1166 /* 1167 * Before calling the module's open routine, set up the q_next 1168 * pointer for inserting a module in the middle of a stream. 1169 * 1170 * Note that we can always set _QINSERTING and set up q_next 1171 * pointer for both inserting and pushing a module. Then there 1172 * is no need for the is_insert parameter. In insertq(), called 1173 * by qprocson(), assume that q_next of the new module always points 1174 * to the correct queue and use it for insertion. Everything should 1175 * work out fine. But in the first release of _I_INSERT, we 1176 * distinguish between inserting and pushing to make sure that 1177 * pushing a module follows the same code path as before. 1178 */ 1179 if (is_insert) { 1180 rq->q_flag |= _QINSERTING; 1181 rq->q_next = qp; 1182 } 1183 1184 /* 1185 * If there is an outer perimeter get exclusive access during 1186 * the open procedure. Bump up the reference count on the queue. 1187 */ 1188 entersq(rq->q_syncq, SQ_OPENCLOSE); 1189 error = (*rq->q_qinfo->qi_qopen)(rq, devp, oflag, sflag, crp); 1190 if (error != 0) 1191 goto failed; 1192 leavesq(rq->q_syncq, SQ_OPENCLOSE); 1193 ASSERT(qprocsareon(rq)); 1194 return (0); 1195 1196 failed: 1197 rq->q_flag &= ~_QINSERTING; 1198 if (backq(wrq) != NULL && backq(wrq)->q_next == wrq) 1199 qprocsoff(rq); 1200 leavesq(rq->q_syncq, SQ_OPENCLOSE); 1201 rq->q_next = wrq->q_next = NULL; 1202 qdetach(rq, 0, 0, crp, B_FALSE); 1203 return (error); 1204 } 1205 1206 /* 1207 * Handle second open of stream. For modules, set the 1208 * last argument to MODOPEN and do not pass any open flags. 1209 * Ignore dummydev since this is not the first open. 1210 */ 1211 int 1212 qreopen(queue_t *qp, dev_t *devp, int flag, cred_t *crp) 1213 { 1214 int error; 1215 dev_t dummydev; 1216 queue_t *wqp = _WR(qp); 1217 1218 ASSERT(qp->q_flag & QREADR); 1219 entersq(qp->q_syncq, SQ_OPENCLOSE); 1220 1221 dummydev = *devp; 1222 if (error = ((*qp->q_qinfo->qi_qopen)(qp, &dummydev, 1223 (wqp->q_next ? 0 : flag), (wqp->q_next ? MODOPEN : 0), crp))) { 1224 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1225 mutex_enter(&STREAM(qp)->sd_lock); 1226 qp->q_stream->sd_flag |= STREOPENFAIL; 1227 mutex_exit(&STREAM(qp)->sd_lock); 1228 return (error); 1229 } 1230 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1231 1232 /* 1233 * successful open should have done qprocson() 1234 */ 1235 ASSERT(qprocsareon(_RD(qp))); 1236 return (0); 1237 } 1238 1239 /* 1240 * Detach a stream module or device. 1241 * If clmode == 1 then the module or driver was opened and its 1242 * close routine must be called. If clmode == 0, the module 1243 * or driver was never opened or the open failed, and so its close 1244 * should not be called. 1245 */ 1246 void 1247 qdetach(queue_t *qp, int clmode, int flag, cred_t *crp, boolean_t is_remove) 1248 { 1249 queue_t *wqp = _WR(qp); 1250 ASSERT(STREAM(qp)->sd_flag & (STRCLOSE|STWOPEN|STRPLUMB)); 1251 1252 if (STREAM_NEEDSERVICE(STREAM(qp))) 1253 stream_runservice(STREAM(qp)); 1254 1255 if (clmode) { 1256 /* 1257 * Make sure that all the messages on the write side syncq are 1258 * processed and nothing is left. Since we are closing, no new 1259 * messages may appear there. 1260 */ 1261 wait_q_syncq(wqp); 1262 1263 entersq(qp->q_syncq, SQ_OPENCLOSE); 1264 if (is_remove) { 1265 mutex_enter(QLOCK(qp)); 1266 qp->q_flag |= _QREMOVING; 1267 mutex_exit(QLOCK(qp)); 1268 } 1269 (*qp->q_qinfo->qi_qclose)(qp, flag, crp); 1270 /* 1271 * Check that qprocsoff() was actually called. 1272 */ 1273 ASSERT((qp->q_flag & QWCLOSE) && (wqp->q_flag & QWCLOSE)); 1274 1275 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1276 } else { 1277 disable_svc(qp); 1278 } 1279 1280 /* 1281 * Allow any threads blocked in entersq to proceed and discover 1282 * the QWCLOSE is set. 1283 * Note: This assumes that all users of entersq check QWCLOSE. 1284 * Currently runservice is the only entersq that can happen 1285 * after removeq has finished. 1286 * Removeq will have discarded all messages destined to the closing 1287 * pair of queues from the syncq. 1288 * NOTE: Calling a function inside an assert is unconventional. 1289 * However, it does not cause any problem since flush_syncq() does 1290 * not change any state except when it returns non-zero i.e. 1291 * when the assert will trigger. 1292 */ 1293 ASSERT(flush_syncq(qp->q_syncq, qp) == 0); 1294 ASSERT(flush_syncq(wqp->q_syncq, wqp) == 0); 1295 ASSERT((qp->q_flag & QPERMOD) || 1296 ((qp->q_syncq->sq_head == NULL) && 1297 (wqp->q_syncq->sq_head == NULL))); 1298 1299 /* release any fmodsw_impl_t structure held on behalf of the queue */ 1300 ASSERT(qp->q_fp != NULL || qp->q_flag & QISDRV); 1301 if (qp->q_fp != NULL) 1302 fmodsw_rele(qp->q_fp); 1303 1304 /* freeq removes us from the outer perimeter if any */ 1305 freeq(qp); 1306 } 1307 1308 /* Prevent service procedures from being called */ 1309 void 1310 disable_svc(queue_t *qp) 1311 { 1312 queue_t *wqp = _WR(qp); 1313 1314 ASSERT(qp->q_flag & QREADR); 1315 mutex_enter(QLOCK(qp)); 1316 qp->q_flag |= QWCLOSE; 1317 mutex_exit(QLOCK(qp)); 1318 mutex_enter(QLOCK(wqp)); 1319 wqp->q_flag |= QWCLOSE; 1320 mutex_exit(QLOCK(wqp)); 1321 } 1322 1323 /* Allow service procedures to be called again */ 1324 void 1325 enable_svc(queue_t *qp) 1326 { 1327 queue_t *wqp = _WR(qp); 1328 1329 ASSERT(qp->q_flag & QREADR); 1330 mutex_enter(QLOCK(qp)); 1331 qp->q_flag &= ~QWCLOSE; 1332 mutex_exit(QLOCK(qp)); 1333 mutex_enter(QLOCK(wqp)); 1334 wqp->q_flag &= ~QWCLOSE; 1335 mutex_exit(QLOCK(wqp)); 1336 } 1337 1338 /* 1339 * Remove queue from qhead/qtail if it is enabled. 1340 * Only reset QENAB if the queue was removed from the runlist. 1341 * A queue goes through 3 stages: 1342 * It is on the service list and QENAB is set. 1343 * It is removed from the service list but QENAB is still set. 1344 * QENAB gets changed to QINSERVICE. 1345 * QINSERVICE is reset (when the service procedure is done) 1346 * Thus we can not reset QENAB unless we actually removed it from the service 1347 * queue. 1348 */ 1349 void 1350 remove_runlist(queue_t *qp) 1351 { 1352 if (qp->q_flag & QENAB && qhead != NULL) { 1353 queue_t *q_chase; 1354 queue_t *q_curr; 1355 int removed; 1356 1357 mutex_enter(&service_queue); 1358 RMQ(qp, qhead, qtail, q_link, q_chase, q_curr, removed); 1359 mutex_exit(&service_queue); 1360 if (removed) { 1361 STRSTAT(qremoved); 1362 qp->q_flag &= ~QENAB; 1363 } 1364 } 1365 } 1366 1367 1368 /* 1369 * Wait for any pending service processing to complete. 1370 * The removal of queues from the runlist is not atomic with the 1371 * clearing of the QENABLED flag and setting the INSERVICE flag. 1372 * consequently it is possible for remove_runlist in strclose 1373 * to not find the queue on the runlist but for it to be QENABLED 1374 * and not yet INSERVICE -> hence wait_svc needs to check QENABLED 1375 * as well as INSERVICE. 1376 */ 1377 void 1378 wait_svc(queue_t *qp) 1379 { 1380 queue_t *wqp = _WR(qp); 1381 1382 ASSERT(qp->q_flag & QREADR); 1383 1384 /* 1385 * Try to remove queues from qhead/qtail list. 1386 */ 1387 if (qhead != NULL) { 1388 remove_runlist(qp); 1389 remove_runlist(wqp); 1390 } 1391 /* 1392 * Wait till the syncqs associated with the queue disappear from the 1393 * background processing list. 1394 * This only needs to be done for non-PERMOD perimeters since 1395 * for PERMOD perimeters the syncq may be shared and will only be freed 1396 * when the last module/driver is unloaded. 1397 * If for PERMOD perimeters queue was on the syncq list, removeq() 1398 * should call propagate_syncq() or drain_syncq() for it. Both of these 1399 * functions remove the queue from its syncq list, so sqthread will not 1400 * try to access the queue. 1401 */ 1402 if (!(qp->q_flag & QPERMOD)) { 1403 syncq_t *rsq = qp->q_syncq; 1404 syncq_t *wsq = wqp->q_syncq; 1405 1406 /* 1407 * Disable rsq and wsq and wait for any background processing of 1408 * syncq to complete. 1409 */ 1410 wait_sq_svc(rsq); 1411 if (wsq != rsq) 1412 wait_sq_svc(wsq); 1413 } 1414 1415 mutex_enter(QLOCK(qp)); 1416 while (qp->q_flag & (QINSERVICE|QENAB)) 1417 cv_wait(&qp->q_wait, QLOCK(qp)); 1418 mutex_exit(QLOCK(qp)); 1419 mutex_enter(QLOCK(wqp)); 1420 while (wqp->q_flag & (QINSERVICE|QENAB)) 1421 cv_wait(&wqp->q_wait, QLOCK(wqp)); 1422 mutex_exit(QLOCK(wqp)); 1423 } 1424 1425 /* 1426 * Put ioctl data from userland buffer `arg' into the mblk chain `bp'. 1427 * `flag' must always contain either K_TO_K or U_TO_K; STR_NOSIG may 1428 * also be set, and is passed through to allocb_cred_wait(). 1429 * 1430 * Returns errno on failure, zero on success. 1431 */ 1432 int 1433 putiocd(mblk_t *bp, char *arg, int flag, cred_t *cr) 1434 { 1435 mblk_t *tmp; 1436 ssize_t count; 1437 int error = 0; 1438 1439 ASSERT((flag & (U_TO_K | K_TO_K)) == U_TO_K || 1440 (flag & (U_TO_K | K_TO_K)) == K_TO_K); 1441 1442 if (bp->b_datap->db_type == M_IOCTL) { 1443 count = ((struct iocblk *)bp->b_rptr)->ioc_count; 1444 } else { 1445 ASSERT(bp->b_datap->db_type == M_COPYIN); 1446 count = ((struct copyreq *)bp->b_rptr)->cq_size; 1447 } 1448 /* 1449 * strdoioctl validates ioc_count, so if this assert fails it 1450 * cannot be due to user error. 1451 */ 1452 ASSERT(count >= 0); 1453 1454 if ((tmp = allocb_cred_wait(count, (flag & STR_NOSIG), &error, cr, 1455 curproc->p_pid)) == NULL) { 1456 return (error); 1457 } 1458 error = strcopyin(arg, tmp->b_wptr, count, flag & (U_TO_K|K_TO_K)); 1459 if (error != 0) { 1460 freeb(tmp); 1461 return (error); 1462 } 1463 DB_CPID(tmp) = curproc->p_pid; 1464 tmp->b_wptr += count; 1465 bp->b_cont = tmp; 1466 1467 return (0); 1468 } 1469 1470 /* 1471 * Copy ioctl data to user-land. Return non-zero errno on failure, 1472 * 0 for success. 1473 */ 1474 int 1475 getiocd(mblk_t *bp, char *arg, int copymode) 1476 { 1477 ssize_t count; 1478 size_t n; 1479 int error; 1480 1481 if (bp->b_datap->db_type == M_IOCACK) 1482 count = ((struct iocblk *)bp->b_rptr)->ioc_count; 1483 else { 1484 ASSERT(bp->b_datap->db_type == M_COPYOUT); 1485 count = ((struct copyreq *)bp->b_rptr)->cq_size; 1486 } 1487 ASSERT(count >= 0); 1488 1489 for (bp = bp->b_cont; bp && count; 1490 count -= n, bp = bp->b_cont, arg += n) { 1491 n = MIN(count, bp->b_wptr - bp->b_rptr); 1492 error = strcopyout(bp->b_rptr, arg, n, copymode); 1493 if (error) 1494 return (error); 1495 } 1496 ASSERT(count == 0); 1497 return (0); 1498 } 1499 1500 /* 1501 * Allocate a linkinfo entry given the write queue of the 1502 * bottom module of the top stream and the write queue of the 1503 * stream head of the bottom stream. 1504 */ 1505 linkinfo_t * 1506 alloclink(queue_t *qup, queue_t *qdown, file_t *fpdown) 1507 { 1508 linkinfo_t *linkp; 1509 1510 linkp = kmem_cache_alloc(linkinfo_cache, KM_SLEEP); 1511 1512 linkp->li_lblk.l_qtop = qup; 1513 linkp->li_lblk.l_qbot = qdown; 1514 linkp->li_fpdown = fpdown; 1515 1516 mutex_enter(&strresources); 1517 linkp->li_next = linkinfo_list; 1518 linkp->li_prev = NULL; 1519 if (linkp->li_next) 1520 linkp->li_next->li_prev = linkp; 1521 linkinfo_list = linkp; 1522 linkp->li_lblk.l_index = ++lnk_id; 1523 ASSERT(lnk_id != 0); /* this should never wrap in practice */ 1524 mutex_exit(&strresources); 1525 1526 return (linkp); 1527 } 1528 1529 /* 1530 * Free a linkinfo entry. 1531 */ 1532 void 1533 lbfree(linkinfo_t *linkp) 1534 { 1535 mutex_enter(&strresources); 1536 if (linkp->li_next) 1537 linkp->li_next->li_prev = linkp->li_prev; 1538 if (linkp->li_prev) 1539 linkp->li_prev->li_next = linkp->li_next; 1540 else 1541 linkinfo_list = linkp->li_next; 1542 mutex_exit(&strresources); 1543 1544 kmem_cache_free(linkinfo_cache, linkp); 1545 } 1546 1547 /* 1548 * Check for a potential linking cycle. 1549 * Return 1 if a link will result in a cycle, 1550 * and 0 otherwise. 1551 */ 1552 int 1553 linkcycle(stdata_t *upstp, stdata_t *lostp, str_stack_t *ss) 1554 { 1555 struct mux_node *np; 1556 struct mux_edge *ep; 1557 int i; 1558 major_t lomaj; 1559 major_t upmaj; 1560 /* 1561 * if the lower stream is a pipe/FIFO, return, since link 1562 * cycles can not happen on pipes/FIFOs 1563 */ 1564 if (lostp->sd_vnode->v_type == VFIFO) 1565 return (0); 1566 1567 for (i = 0; i < ss->ss_devcnt; i++) { 1568 np = &ss->ss_mux_nodes[i]; 1569 MUX_CLEAR(np); 1570 } 1571 lomaj = getmajor(lostp->sd_vnode->v_rdev); 1572 upmaj = getmajor(upstp->sd_vnode->v_rdev); 1573 np = &ss->ss_mux_nodes[lomaj]; 1574 for (;;) { 1575 if (!MUX_DIDVISIT(np)) { 1576 if (np->mn_imaj == upmaj) 1577 return (1); 1578 if (np->mn_outp == NULL) { 1579 MUX_VISIT(np); 1580 if (np->mn_originp == NULL) 1581 return (0); 1582 np = np->mn_originp; 1583 continue; 1584 } 1585 MUX_VISIT(np); 1586 np->mn_startp = np->mn_outp; 1587 } else { 1588 if (np->mn_startp == NULL) { 1589 if (np->mn_originp == NULL) 1590 return (0); 1591 else { 1592 np = np->mn_originp; 1593 continue; 1594 } 1595 } 1596 /* 1597 * If ep->me_nodep is a FIFO (me_nodep == NULL), 1598 * ignore the edge and move on. ep->me_nodep gets 1599 * set to NULL in mux_addedge() if it is a FIFO. 1600 * 1601 */ 1602 ep = np->mn_startp; 1603 np->mn_startp = ep->me_nextp; 1604 if (ep->me_nodep == NULL) 1605 continue; 1606 ep->me_nodep->mn_originp = np; 1607 np = ep->me_nodep; 1608 } 1609 } 1610 } 1611 1612 /* 1613 * Find linkinfo entry corresponding to the parameters. 1614 */ 1615 linkinfo_t * 1616 findlinks(stdata_t *stp, int index, int type, str_stack_t *ss) 1617 { 1618 linkinfo_t *linkp; 1619 struct mux_edge *mep; 1620 struct mux_node *mnp; 1621 queue_t *qup; 1622 1623 mutex_enter(&strresources); 1624 if ((type & LINKTYPEMASK) == LINKNORMAL) { 1625 qup = getendq(stp->sd_wrq); 1626 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) { 1627 if ((qup == linkp->li_lblk.l_qtop) && 1628 (!index || (index == linkp->li_lblk.l_index))) { 1629 mutex_exit(&strresources); 1630 return (linkp); 1631 } 1632 } 1633 } else { 1634 ASSERT((type & LINKTYPEMASK) == LINKPERSIST); 1635 mnp = &ss->ss_mux_nodes[getmajor(stp->sd_vnode->v_rdev)]; 1636 mep = mnp->mn_outp; 1637 while (mep) { 1638 if ((index == 0) || (index == mep->me_muxid)) 1639 break; 1640 mep = mep->me_nextp; 1641 } 1642 if (!mep) { 1643 mutex_exit(&strresources); 1644 return (NULL); 1645 } 1646 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) { 1647 if ((!linkp->li_lblk.l_qtop) && 1648 (mep->me_muxid == linkp->li_lblk.l_index)) { 1649 mutex_exit(&strresources); 1650 return (linkp); 1651 } 1652 } 1653 } 1654 mutex_exit(&strresources); 1655 return (NULL); 1656 } 1657 1658 /* 1659 * Given a queue ptr, follow the chain of q_next pointers until you reach the 1660 * last queue on the chain and return it. 1661 */ 1662 queue_t * 1663 getendq(queue_t *q) 1664 { 1665 ASSERT(q != NULL); 1666 while (_SAMESTR(q)) 1667 q = q->q_next; 1668 return (q); 1669 } 1670 1671 /* 1672 * Wait for the syncq count to drop to zero. 1673 * sq could be either outer or inner. 1674 */ 1675 1676 static void 1677 wait_syncq(syncq_t *sq) 1678 { 1679 uint16_t count; 1680 1681 mutex_enter(SQLOCK(sq)); 1682 count = sq->sq_count; 1683 SQ_PUTLOCKS_ENTER(sq); 1684 SUM_SQ_PUTCOUNTS(sq, count); 1685 while (count != 0) { 1686 sq->sq_flags |= SQ_WANTWAKEUP; 1687 SQ_PUTLOCKS_EXIT(sq); 1688 cv_wait(&sq->sq_wait, SQLOCK(sq)); 1689 count = sq->sq_count; 1690 SQ_PUTLOCKS_ENTER(sq); 1691 SUM_SQ_PUTCOUNTS(sq, count); 1692 } 1693 SQ_PUTLOCKS_EXIT(sq); 1694 mutex_exit(SQLOCK(sq)); 1695 } 1696 1697 /* 1698 * Wait while there are any messages for the queue in its syncq. 1699 */ 1700 static void 1701 wait_q_syncq(queue_t *q) 1702 { 1703 if ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) { 1704 syncq_t *sq = q->q_syncq; 1705 1706 mutex_enter(SQLOCK(sq)); 1707 while ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) { 1708 sq->sq_flags |= SQ_WANTWAKEUP; 1709 cv_wait(&sq->sq_wait, SQLOCK(sq)); 1710 } 1711 mutex_exit(SQLOCK(sq)); 1712 } 1713 } 1714 1715 1716 int 1717 mlink_file(vnode_t *vp, int cmd, struct file *fpdown, cred_t *crp, int *rvalp, 1718 int lhlink) 1719 { 1720 struct stdata *stp; 1721 struct strioctl strioc; 1722 struct linkinfo *linkp; 1723 struct stdata *stpdown; 1724 struct streamtab *str; 1725 queue_t *passq; 1726 syncq_t *passyncq; 1727 queue_t *rq; 1728 cdevsw_impl_t *dp; 1729 uint32_t qflag; 1730 uint32_t sqtype; 1731 perdm_t *dmp; 1732 int error = 0; 1733 netstack_t *ns; 1734 str_stack_t *ss; 1735 1736 stp = vp->v_stream; 1737 TRACE_1(TR_FAC_STREAMS_FR, 1738 TR_I_LINK, "I_LINK/I_PLINK:stp %p", stp); 1739 /* 1740 * Test for invalid upper stream 1741 */ 1742 if (stp->sd_flag & STRHUP) { 1743 return (ENXIO); 1744 } 1745 if (vp->v_type == VFIFO) { 1746 return (EINVAL); 1747 } 1748 if (stp->sd_strtab == NULL) { 1749 return (EINVAL); 1750 } 1751 if (!stp->sd_strtab->st_muxwinit) { 1752 return (EINVAL); 1753 } 1754 if (fpdown == NULL) { 1755 return (EBADF); 1756 } 1757 ns = netstack_find_by_cred(crp); 1758 ASSERT(ns != NULL); 1759 ss = ns->netstack_str; 1760 ASSERT(ss != NULL); 1761 1762 if (getmajor(stp->sd_vnode->v_rdev) >= ss->ss_devcnt) { 1763 netstack_rele(ss->ss_netstack); 1764 return (EINVAL); 1765 } 1766 mutex_enter(&muxifier); 1767 if (stp->sd_flag & STPLEX) { 1768 mutex_exit(&muxifier); 1769 netstack_rele(ss->ss_netstack); 1770 return (ENXIO); 1771 } 1772 1773 /* 1774 * Test for invalid lower stream. 1775 * The check for the v_type != VFIFO and having a major 1776 * number not >= devcnt is done to avoid problems with 1777 * adding mux_node entry past the end of mux_nodes[]. 1778 * For FIFO's we don't add an entry so this isn't a 1779 * problem. 1780 */ 1781 if (((stpdown = fpdown->f_vnode->v_stream) == NULL) || 1782 (stpdown == stp) || (stpdown->sd_flag & 1783 (STPLEX|STRHUP|STRDERR|STWRERR|IOCWAIT|STRPLUMB)) || 1784 ((stpdown->sd_vnode->v_type != VFIFO) && 1785 (getmajor(stpdown->sd_vnode->v_rdev) >= ss->ss_devcnt)) || 1786 linkcycle(stp, stpdown, ss)) { 1787 mutex_exit(&muxifier); 1788 netstack_rele(ss->ss_netstack); 1789 return (EINVAL); 1790 } 1791 TRACE_1(TR_FAC_STREAMS_FR, 1792 TR_STPDOWN, "stpdown:%p", stpdown); 1793 rq = getendq(stp->sd_wrq); 1794 if (cmd == I_PLINK) 1795 rq = NULL; 1796 1797 linkp = alloclink(rq, stpdown->sd_wrq, fpdown); 1798 1799 strioc.ic_cmd = cmd; 1800 strioc.ic_timout = INFTIM; 1801 strioc.ic_len = sizeof (struct linkblk); 1802 strioc.ic_dp = (char *)&linkp->li_lblk; 1803 1804 /* 1805 * STRPLUMB protects plumbing changes and should be set before 1806 * link_addpassthru()/link_rempassthru() are called, so it is set here 1807 * and cleared in the end of mlink when passthru queue is removed. 1808 * Setting of STRPLUMB prevents reopens of the stream while passthru 1809 * queue is in-place (it is not a proper module and doesn't have open 1810 * entry point). 1811 * 1812 * STPLEX prevents any threads from entering the stream from above. It 1813 * can't be set before the call to link_addpassthru() because putnext 1814 * from below may cause stream head I/O routines to be called and these 1815 * routines assert that STPLEX is not set. After link_addpassthru() 1816 * nothing may come from below since the pass queue syncq is blocked. 1817 * Note also that STPLEX should be cleared before the call to 1818 * link_rempassthru() since when messages start flowing to the stream 1819 * head (e.g. because of message propagation from the pass queue) stream 1820 * head I/O routines may be called with STPLEX flag set. 1821 * 1822 * When STPLEX is set, nothing may come into the stream from above and 1823 * it is safe to do a setq which will change stream head. So, the 1824 * correct sequence of actions is: 1825 * 1826 * 1) Set STRPLUMB 1827 * 2) Call link_addpassthru() 1828 * 3) Set STPLEX 1829 * 4) Call setq and update the stream state 1830 * 5) Clear STPLEX 1831 * 6) Call link_rempassthru() 1832 * 7) Clear STRPLUMB 1833 * 1834 * The same sequence applies to munlink() code. 1835 */ 1836 mutex_enter(&stpdown->sd_lock); 1837 stpdown->sd_flag |= STRPLUMB; 1838 mutex_exit(&stpdown->sd_lock); 1839 /* 1840 * Add passthru queue below lower mux. This will block 1841 * syncqs of lower muxs read queue during I_LINK/I_UNLINK. 1842 */ 1843 passq = link_addpassthru(stpdown); 1844 1845 mutex_enter(&stpdown->sd_lock); 1846 stpdown->sd_flag |= STPLEX; 1847 mutex_exit(&stpdown->sd_lock); 1848 1849 rq = _RD(stpdown->sd_wrq); 1850 /* 1851 * There may be messages in the streamhead's syncq due to messages 1852 * that arrived before link_addpassthru() was done. To avoid 1853 * background processing of the syncq happening simultaneous with 1854 * setq processing, we disable the streamhead syncq and wait until 1855 * existing background thread finishes working on it. 1856 */ 1857 wait_sq_svc(rq->q_syncq); 1858 passyncq = passq->q_syncq; 1859 if (!(passyncq->sq_flags & SQ_BLOCKED)) 1860 blocksq(passyncq, SQ_BLOCKED, 0); 1861 1862 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE); 1863 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq)); 1864 rq->q_ptr = _WR(rq)->q_ptr = NULL; 1865 1866 /* setq might sleep in allocator - avoid holding locks. */ 1867 /* Note: we are holding muxifier here. */ 1868 1869 str = stp->sd_strtab; 1870 dp = &devimpl[getmajor(vp->v_rdev)]; 1871 ASSERT(dp->d_str == str); 1872 1873 qflag = dp->d_qflag; 1874 sqtype = dp->d_sqtype; 1875 1876 /* create perdm_t if needed */ 1877 if (NEED_DM(dp->d_dmp, qflag)) 1878 dp->d_dmp = hold_dm(str, qflag, sqtype); 1879 1880 dmp = dp->d_dmp; 1881 1882 setq(rq, str->st_muxrinit, str->st_muxwinit, dmp, qflag, sqtype, 1883 B_TRUE); 1884 1885 /* 1886 * XXX Remove any "odd" messages from the queue. 1887 * Keep only M_DATA, M_PROTO, M_PCPROTO. 1888 */ 1889 error = strdoioctl(stp, &strioc, FNATIVE, 1890 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp); 1891 if (error != 0) { 1892 lbfree(linkp); 1893 1894 if (!(passyncq->sq_flags & SQ_BLOCKED)) 1895 blocksq(passyncq, SQ_BLOCKED, 0); 1896 /* 1897 * Restore the stream head queue and then remove 1898 * the passq. Turn off STPLEX before we turn on 1899 * the stream by removing the passq. 1900 */ 1901 rq->q_ptr = _WR(rq)->q_ptr = stpdown; 1902 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, 1903 B_TRUE); 1904 1905 mutex_enter(&stpdown->sd_lock); 1906 stpdown->sd_flag &= ~STPLEX; 1907 mutex_exit(&stpdown->sd_lock); 1908 1909 link_rempassthru(passq); 1910 1911 mutex_enter(&stpdown->sd_lock); 1912 stpdown->sd_flag &= ~STRPLUMB; 1913 /* Wakeup anyone waiting for STRPLUMB to clear. */ 1914 cv_broadcast(&stpdown->sd_monitor); 1915 mutex_exit(&stpdown->sd_lock); 1916 1917 mutex_exit(&muxifier); 1918 netstack_rele(ss->ss_netstack); 1919 return (error); 1920 } 1921 mutex_enter(&fpdown->f_tlock); 1922 fpdown->f_count++; 1923 mutex_exit(&fpdown->f_tlock); 1924 1925 /* 1926 * if we've made it here the linkage is all set up so we should also 1927 * set up the layered driver linkages 1928 */ 1929 1930 ASSERT((cmd == I_LINK) || (cmd == I_PLINK)); 1931 if (cmd == I_LINK) { 1932 ldi_mlink_fp(stp, fpdown, lhlink, LINKNORMAL); 1933 } else { 1934 ldi_mlink_fp(stp, fpdown, lhlink, LINKPERSIST); 1935 } 1936 1937 link_rempassthru(passq); 1938 1939 mux_addedge(stp, stpdown, linkp->li_lblk.l_index, ss); 1940 1941 /* 1942 * Mark the upper stream as having dependent links 1943 * so that strclose can clean it up. 1944 */ 1945 if (cmd == I_LINK) { 1946 mutex_enter(&stp->sd_lock); 1947 stp->sd_flag |= STRHASLINKS; 1948 mutex_exit(&stp->sd_lock); 1949 } 1950 /* 1951 * Wake up any other processes that may have been 1952 * waiting on the lower stream. These will all 1953 * error out. 1954 */ 1955 mutex_enter(&stpdown->sd_lock); 1956 /* The passthru module is removed so we may release STRPLUMB */ 1957 stpdown->sd_flag &= ~STRPLUMB; 1958 cv_broadcast(&rq->q_wait); 1959 cv_broadcast(&_WR(rq)->q_wait); 1960 cv_broadcast(&stpdown->sd_monitor); 1961 mutex_exit(&stpdown->sd_lock); 1962 mutex_exit(&muxifier); 1963 *rvalp = linkp->li_lblk.l_index; 1964 netstack_rele(ss->ss_netstack); 1965 return (0); 1966 } 1967 1968 int 1969 mlink(vnode_t *vp, int cmd, int arg, cred_t *crp, int *rvalp, int lhlink) 1970 { 1971 int ret; 1972 struct file *fpdown; 1973 1974 fpdown = getf(arg); 1975 ret = mlink_file(vp, cmd, fpdown, crp, rvalp, lhlink); 1976 if (fpdown != NULL) 1977 releasef(arg); 1978 return (ret); 1979 } 1980 1981 /* 1982 * Unlink a multiplexor link. Stp is the controlling stream for the 1983 * link, and linkp points to the link's entry in the linkinfo list. 1984 * The muxifier lock must be held on entry and is dropped on exit. 1985 * 1986 * NOTE : Currently it is assumed that mux would process all the messages 1987 * sitting on it's queue before ACKing the UNLINK. It is the responsibility 1988 * of the mux to handle all the messages that arrive before UNLINK. 1989 * If the mux has to send down messages on its lower stream before 1990 * ACKing I_UNLINK, then it *should* know to handle messages even 1991 * after the UNLINK is acked (actually it should be able to handle till we 1992 * re-block the read side of the pass queue here). If the mux does not 1993 * open up the lower stream, any messages that arrive during UNLINK 1994 * will be put in the stream head. In the case of lower stream opening 1995 * up, some messages might land in the stream head depending on when 1996 * the message arrived and when the read side of the pass queue was 1997 * re-blocked. 1998 */ 1999 int 2000 munlink(stdata_t *stp, linkinfo_t *linkp, int flag, cred_t *crp, int *rvalp, 2001 str_stack_t *ss) 2002 { 2003 struct strioctl strioc; 2004 struct stdata *stpdown; 2005 queue_t *rq, *wrq; 2006 queue_t *passq; 2007 syncq_t *passyncq; 2008 int error = 0; 2009 file_t *fpdown; 2010 2011 ASSERT(MUTEX_HELD(&muxifier)); 2012 2013 stpdown = linkp->li_fpdown->f_vnode->v_stream; 2014 2015 /* 2016 * See the comment in mlink() concerning STRPLUMB/STPLEX flags. 2017 */ 2018 mutex_enter(&stpdown->sd_lock); 2019 stpdown->sd_flag |= STRPLUMB; 2020 mutex_exit(&stpdown->sd_lock); 2021 2022 /* 2023 * Add passthru queue below lower mux. This will block 2024 * syncqs of lower muxs read queue during I_LINK/I_UNLINK. 2025 */ 2026 passq = link_addpassthru(stpdown); 2027 2028 if ((flag & LINKTYPEMASK) == LINKNORMAL) 2029 strioc.ic_cmd = I_UNLINK; 2030 else 2031 strioc.ic_cmd = I_PUNLINK; 2032 strioc.ic_timout = INFTIM; 2033 strioc.ic_len = sizeof (struct linkblk); 2034 strioc.ic_dp = (char *)&linkp->li_lblk; 2035 2036 error = strdoioctl(stp, &strioc, FNATIVE, 2037 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp); 2038 2039 /* 2040 * If there was an error and this is not called via strclose, 2041 * return to the user. Otherwise, pretend there was no error 2042 * and close the link. 2043 */ 2044 if (error) { 2045 if (flag & LINKCLOSE) { 2046 cmn_err(CE_WARN, "KERNEL: munlink: could not perform " 2047 "unlink ioctl, closing anyway (%d)\n", error); 2048 } else { 2049 link_rempassthru(passq); 2050 mutex_enter(&stpdown->sd_lock); 2051 stpdown->sd_flag &= ~STRPLUMB; 2052 cv_broadcast(&stpdown->sd_monitor); 2053 mutex_exit(&stpdown->sd_lock); 2054 mutex_exit(&muxifier); 2055 return (error); 2056 } 2057 } 2058 2059 mux_rmvedge(stp, linkp->li_lblk.l_index, ss); 2060 fpdown = linkp->li_fpdown; 2061 lbfree(linkp); 2062 2063 /* 2064 * We go ahead and drop muxifier here--it's a nasty global lock that 2065 * can slow others down. It's okay to since attempts to mlink() this 2066 * stream will be stopped because STPLEX is still set in the stdata 2067 * structure, and munlink() is stopped because mux_rmvedge() and 2068 * lbfree() have removed it from mux_nodes[] and linkinfo_list, 2069 * respectively. Note that we defer the closef() of fpdown until 2070 * after we drop muxifier since strclose() can call munlinkall(). 2071 */ 2072 mutex_exit(&muxifier); 2073 2074 wrq = stpdown->sd_wrq; 2075 rq = _RD(wrq); 2076 2077 /* 2078 * Get rid of outstanding service procedure runs, before we make 2079 * it a stream head, since a stream head doesn't have any service 2080 * procedure. 2081 */ 2082 disable_svc(rq); 2083 wait_svc(rq); 2084 2085 /* 2086 * Since we don't disable the syncq for QPERMOD, we wait for whatever 2087 * is queued up to be finished. mux should take care that nothing is 2088 * send down to this queue. We should do it now as we're going to block 2089 * passyncq if it was unblocked. 2090 */ 2091 if (wrq->q_flag & QPERMOD) { 2092 syncq_t *sq = wrq->q_syncq; 2093 2094 mutex_enter(SQLOCK(sq)); 2095 while (wrq->q_sqflags & Q_SQQUEUED) { 2096 sq->sq_flags |= SQ_WANTWAKEUP; 2097 cv_wait(&sq->sq_wait, SQLOCK(sq)); 2098 } 2099 mutex_exit(SQLOCK(sq)); 2100 } 2101 passyncq = passq->q_syncq; 2102 if (!(passyncq->sq_flags & SQ_BLOCKED)) { 2103 2104 syncq_t *sq, *outer; 2105 2106 /* 2107 * Messages could be flowing from underneath. We will 2108 * block the read side of the passq. This would be 2109 * sufficient for QPAIR and QPERQ muxes to ensure 2110 * that no data is flowing up into this queue 2111 * and hence no thread active in this instance of 2112 * lower mux. But for QPERMOD and QMTOUTPERIM there 2113 * could be messages on the inner and outer/inner 2114 * syncqs respectively. We will wait for them to drain. 2115 * Because passq is blocked messages end up in the syncq 2116 * And qfill_syncq could possibly end up setting QFULL 2117 * which will access the rq->q_flag. Hence, we have to 2118 * acquire the QLOCK in setq. 2119 * 2120 * XXX Messages can also flow from top into this 2121 * queue though the unlink is over (Ex. some instance 2122 * in putnext() called from top that has still not 2123 * accessed this queue. And also putq(lowerq) ?). 2124 * Solution : How about blocking the l_qtop queue ? 2125 * Do we really care about such pure D_MP muxes ? 2126 */ 2127 2128 blocksq(passyncq, SQ_BLOCKED, 0); 2129 2130 sq = rq->q_syncq; 2131 if ((outer = sq->sq_outer) != NULL) { 2132 2133 /* 2134 * We have to just wait for the outer sq_count 2135 * drop to zero. As this does not prevent new 2136 * messages to enter the outer perimeter, this 2137 * is subject to starvation. 2138 * 2139 * NOTE :Because of blocksq above, messages could 2140 * be in the inner syncq only because of some 2141 * thread holding the outer perimeter exclusively. 2142 * Hence it would be sufficient to wait for the 2143 * exclusive holder of the outer perimeter to drain 2144 * the inner and outer syncqs. But we will not depend 2145 * on this feature and hence check the inner syncqs 2146 * separately. 2147 */ 2148 wait_syncq(outer); 2149 } 2150 2151 2152 /* 2153 * There could be messages destined for 2154 * this queue. Let the exclusive holder 2155 * drain it. 2156 */ 2157 2158 wait_syncq(sq); 2159 ASSERT((rq->q_flag & QPERMOD) || 2160 ((rq->q_syncq->sq_head == NULL) && 2161 (_WR(rq)->q_syncq->sq_head == NULL))); 2162 } 2163 2164 /* 2165 * We haven't taken care of QPERMOD case yet. QPERMOD is a special 2166 * case as we don't disable its syncq or remove it off the syncq 2167 * service list. 2168 */ 2169 if (rq->q_flag & QPERMOD) { 2170 syncq_t *sq = rq->q_syncq; 2171 2172 mutex_enter(SQLOCK(sq)); 2173 while (rq->q_sqflags & Q_SQQUEUED) { 2174 sq->sq_flags |= SQ_WANTWAKEUP; 2175 cv_wait(&sq->sq_wait, SQLOCK(sq)); 2176 } 2177 mutex_exit(SQLOCK(sq)); 2178 } 2179 2180 /* 2181 * flush_syncq changes states only when there are some messages to 2182 * free, i.e. when it returns non-zero value to return. 2183 */ 2184 ASSERT(flush_syncq(rq->q_syncq, rq) == 0); 2185 ASSERT(flush_syncq(wrq->q_syncq, wrq) == 0); 2186 2187 /* 2188 * Nobody else should know about this queue now. 2189 * If the mux did not process the messages before 2190 * acking the I_UNLINK, free them now. 2191 */ 2192 2193 flushq(rq, FLUSHALL); 2194 flushq(_WR(rq), FLUSHALL); 2195 2196 /* 2197 * Convert the mux lower queue into a stream head queue. 2198 * Turn off STPLEX before we turn on the stream by removing the passq. 2199 */ 2200 rq->q_ptr = wrq->q_ptr = stpdown; 2201 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, B_TRUE); 2202 2203 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE); 2204 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq)); 2205 2206 enable_svc(rq); 2207 2208 /* 2209 * Now it is a proper stream, so STPLEX is cleared. But STRPLUMB still 2210 * needs to be set to prevent reopen() of the stream - such reopen may 2211 * try to call non-existent pass queue open routine and panic. 2212 */ 2213 mutex_enter(&stpdown->sd_lock); 2214 stpdown->sd_flag &= ~STPLEX; 2215 mutex_exit(&stpdown->sd_lock); 2216 2217 ASSERT(((flag & LINKTYPEMASK) == LINKNORMAL) || 2218 ((flag & LINKTYPEMASK) == LINKPERSIST)); 2219 2220 /* clean up the layered driver linkages */ 2221 if ((flag & LINKTYPEMASK) == LINKNORMAL) { 2222 ldi_munlink_fp(stp, fpdown, LINKNORMAL); 2223 } else { 2224 ldi_munlink_fp(stp, fpdown, LINKPERSIST); 2225 } 2226 2227 link_rempassthru(passq); 2228 2229 /* 2230 * Now all plumbing changes are finished and STRPLUMB is no 2231 * longer needed. 2232 */ 2233 mutex_enter(&stpdown->sd_lock); 2234 stpdown->sd_flag &= ~STRPLUMB; 2235 cv_broadcast(&stpdown->sd_monitor); 2236 mutex_exit(&stpdown->sd_lock); 2237 2238 (void) closef(fpdown); 2239 return (0); 2240 } 2241 2242 /* 2243 * Unlink all multiplexor links for which stp is the controlling stream. 2244 * Return 0, or a non-zero errno on failure. 2245 */ 2246 int 2247 munlinkall(stdata_t *stp, int flag, cred_t *crp, int *rvalp, str_stack_t *ss) 2248 { 2249 linkinfo_t *linkp; 2250 int error = 0; 2251 2252 mutex_enter(&muxifier); 2253 while (linkp = findlinks(stp, 0, flag, ss)) { 2254 /* 2255 * munlink() releases the muxifier lock. 2256 */ 2257 if (error = munlink(stp, linkp, flag, crp, rvalp, ss)) 2258 return (error); 2259 mutex_enter(&muxifier); 2260 } 2261 mutex_exit(&muxifier); 2262 return (0); 2263 } 2264 2265 /* 2266 * A multiplexor link has been made. Add an 2267 * edge to the directed graph. 2268 */ 2269 void 2270 mux_addedge(stdata_t *upstp, stdata_t *lostp, int muxid, str_stack_t *ss) 2271 { 2272 struct mux_node *np; 2273 struct mux_edge *ep; 2274 major_t upmaj; 2275 major_t lomaj; 2276 2277 upmaj = getmajor(upstp->sd_vnode->v_rdev); 2278 lomaj = getmajor(lostp->sd_vnode->v_rdev); 2279 np = &ss->ss_mux_nodes[upmaj]; 2280 if (np->mn_outp) { 2281 ep = np->mn_outp; 2282 while (ep->me_nextp) 2283 ep = ep->me_nextp; 2284 ep->me_nextp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP); 2285 ep = ep->me_nextp; 2286 } else { 2287 np->mn_outp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP); 2288 ep = np->mn_outp; 2289 } 2290 ep->me_nextp = NULL; 2291 ep->me_muxid = muxid; 2292 /* 2293 * Save the dev_t for the purposes of str_stack_shutdown. 2294 * str_stack_shutdown assumes that the device allows reopen, since 2295 * this dev_t is the one after any cloning by xx_open(). 2296 * Would prefer finding the dev_t from before any cloning, 2297 * but specfs doesn't retain that. 2298 */ 2299 ep->me_dev = upstp->sd_vnode->v_rdev; 2300 if (lostp->sd_vnode->v_type == VFIFO) 2301 ep->me_nodep = NULL; 2302 else 2303 ep->me_nodep = &ss->ss_mux_nodes[lomaj]; 2304 } 2305 2306 /* 2307 * A multiplexor link has been removed. Remove the 2308 * edge in the directed graph. 2309 */ 2310 void 2311 mux_rmvedge(stdata_t *upstp, int muxid, str_stack_t *ss) 2312 { 2313 struct mux_node *np; 2314 struct mux_edge *ep; 2315 struct mux_edge *pep = NULL; 2316 major_t upmaj; 2317 2318 upmaj = getmajor(upstp->sd_vnode->v_rdev); 2319 np = &ss->ss_mux_nodes[upmaj]; 2320 ASSERT(np->mn_outp != NULL); 2321 ep = np->mn_outp; 2322 while (ep) { 2323 if (ep->me_muxid == muxid) { 2324 if (pep) 2325 pep->me_nextp = ep->me_nextp; 2326 else 2327 np->mn_outp = ep->me_nextp; 2328 kmem_free(ep, sizeof (struct mux_edge)); 2329 return; 2330 } 2331 pep = ep; 2332 ep = ep->me_nextp; 2333 } 2334 ASSERT(0); /* should not reach here */ 2335 } 2336 2337 /* 2338 * Translate the device flags (from conf.h) to the corresponding 2339 * qflag and sq_flag (type) values. 2340 */ 2341 int 2342 devflg_to_qflag(struct streamtab *stp, uint32_t devflag, uint32_t *qflagp, 2343 uint32_t *sqtypep) 2344 { 2345 uint32_t qflag = 0; 2346 uint32_t sqtype = 0; 2347 2348 if (devflag & _D_OLD) 2349 goto bad; 2350 2351 /* Inner perimeter presence and scope */ 2352 switch (devflag & D_MTINNER_MASK) { 2353 case D_MP: 2354 qflag |= QMTSAFE; 2355 sqtype |= SQ_CI; 2356 break; 2357 case D_MTPERQ|D_MP: 2358 qflag |= QPERQ; 2359 break; 2360 case D_MTQPAIR|D_MP: 2361 qflag |= QPAIR; 2362 break; 2363 case D_MTPERMOD|D_MP: 2364 qflag |= QPERMOD; 2365 break; 2366 default: 2367 goto bad; 2368 } 2369 2370 /* Outer perimeter */ 2371 if (devflag & D_MTOUTPERIM) { 2372 switch (devflag & D_MTINNER_MASK) { 2373 case D_MP: 2374 case D_MTPERQ|D_MP: 2375 case D_MTQPAIR|D_MP: 2376 break; 2377 default: 2378 goto bad; 2379 } 2380 qflag |= QMTOUTPERIM; 2381 } 2382 2383 /* Inner perimeter modifiers */ 2384 if (devflag & D_MTINNER_MOD) { 2385 switch (devflag & D_MTINNER_MASK) { 2386 case D_MP: 2387 goto bad; 2388 default: 2389 break; 2390 } 2391 if (devflag & D_MTPUTSHARED) 2392 sqtype |= SQ_CIPUT; 2393 if (devflag & _D_MTOCSHARED) { 2394 /* 2395 * The code in putnext assumes that it has the 2396 * highest concurrency by not checking sq_count. 2397 * Thus _D_MTOCSHARED can only be supported when 2398 * D_MTPUTSHARED is set. 2399 */ 2400 if (!(devflag & D_MTPUTSHARED)) 2401 goto bad; 2402 sqtype |= SQ_CIOC; 2403 } 2404 if (devflag & _D_MTCBSHARED) { 2405 /* 2406 * The code in putnext assumes that it has the 2407 * highest concurrency by not checking sq_count. 2408 * Thus _D_MTCBSHARED can only be supported when 2409 * D_MTPUTSHARED is set. 2410 */ 2411 if (!(devflag & D_MTPUTSHARED)) 2412 goto bad; 2413 sqtype |= SQ_CICB; 2414 } 2415 if (devflag & _D_MTSVCSHARED) { 2416 /* 2417 * The code in putnext assumes that it has the 2418 * highest concurrency by not checking sq_count. 2419 * Thus _D_MTSVCSHARED can only be supported when 2420 * D_MTPUTSHARED is set. Also _D_MTSVCSHARED is 2421 * supported only for QPERMOD. 2422 */ 2423 if (!(devflag & D_MTPUTSHARED) || !(qflag & QPERMOD)) 2424 goto bad; 2425 sqtype |= SQ_CISVC; 2426 } 2427 } 2428 2429 /* Default outer perimeter concurrency */ 2430 sqtype |= SQ_CO; 2431 2432 /* Outer perimeter modifiers */ 2433 if (devflag & D_MTOCEXCL) { 2434 if (!(devflag & D_MTOUTPERIM)) { 2435 /* No outer perimeter */ 2436 goto bad; 2437 } 2438 sqtype &= ~SQ_COOC; 2439 } 2440 2441 /* Synchronous Streams extended qinit structure */ 2442 if (devflag & D_SYNCSTR) 2443 qflag |= QSYNCSTR; 2444 2445 /* 2446 * Private flag used by a transport module to indicate 2447 * to sockfs that it supports direct-access mode without 2448 * having to go through STREAMS. 2449 */ 2450 if (devflag & _D_DIRECT) { 2451 /* Reject unless the module is fully-MT (no perimeter) */ 2452 if ((qflag & QMT_TYPEMASK) != QMTSAFE) 2453 goto bad; 2454 qflag |= _QDIRECT; 2455 } 2456 2457 *qflagp = qflag; 2458 *sqtypep = sqtype; 2459 return (0); 2460 2461 bad: 2462 cmn_err(CE_WARN, 2463 "stropen: bad MT flags (0x%x) in driver '%s'", 2464 (int)(qflag & D_MTSAFETY_MASK), 2465 stp->st_rdinit->qi_minfo->mi_idname); 2466 2467 return (EINVAL); 2468 } 2469 2470 /* 2471 * Set the interface values for a pair of queues (qinit structure, 2472 * packet sizes, water marks). 2473 * setq assumes that the caller does not have a claim (entersq or claimq) 2474 * on the queue. 2475 */ 2476 void 2477 setq(queue_t *rq, struct qinit *rinit, struct qinit *winit, 2478 perdm_t *dmp, uint32_t qflag, uint32_t sqtype, boolean_t lock_needed) 2479 { 2480 queue_t *wq; 2481 syncq_t *sq, *outer; 2482 2483 ASSERT(rq->q_flag & QREADR); 2484 ASSERT((qflag & QMT_TYPEMASK) != 0); 2485 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL); 2486 2487 wq = _WR(rq); 2488 rq->q_qinfo = rinit; 2489 rq->q_hiwat = rinit->qi_minfo->mi_hiwat; 2490 rq->q_lowat = rinit->qi_minfo->mi_lowat; 2491 rq->q_minpsz = rinit->qi_minfo->mi_minpsz; 2492 rq->q_maxpsz = rinit->qi_minfo->mi_maxpsz; 2493 wq->q_qinfo = winit; 2494 wq->q_hiwat = winit->qi_minfo->mi_hiwat; 2495 wq->q_lowat = winit->qi_minfo->mi_lowat; 2496 wq->q_minpsz = winit->qi_minfo->mi_minpsz; 2497 wq->q_maxpsz = winit->qi_minfo->mi_maxpsz; 2498 2499 /* Remove old syncqs */ 2500 sq = rq->q_syncq; 2501 outer = sq->sq_outer; 2502 if (outer != NULL) { 2503 ASSERT(wq->q_syncq->sq_outer == outer); 2504 outer_remove(outer, rq->q_syncq); 2505 if (wq->q_syncq != rq->q_syncq) 2506 outer_remove(outer, wq->q_syncq); 2507 } 2508 ASSERT(sq->sq_outer == NULL); 2509 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 2510 2511 if (sq != SQ(rq)) { 2512 if (!(rq->q_flag & QPERMOD)) 2513 free_syncq(sq); 2514 if (wq->q_syncq == rq->q_syncq) 2515 wq->q_syncq = NULL; 2516 rq->q_syncq = NULL; 2517 } 2518 if (wq->q_syncq != NULL && wq->q_syncq != sq && 2519 wq->q_syncq != SQ(rq)) { 2520 free_syncq(wq->q_syncq); 2521 wq->q_syncq = NULL; 2522 } 2523 ASSERT(rq->q_syncq == NULL || (rq->q_syncq->sq_head == NULL && 2524 rq->q_syncq->sq_tail == NULL)); 2525 ASSERT(wq->q_syncq == NULL || (wq->q_syncq->sq_head == NULL && 2526 wq->q_syncq->sq_tail == NULL)); 2527 2528 if (!(rq->q_flag & QPERMOD) && 2529 rq->q_syncq != NULL && rq->q_syncq->sq_ciputctrl != NULL) { 2530 ASSERT(rq->q_syncq->sq_nciputctrl == n_ciputctrl - 1); 2531 SUMCHECK_CIPUTCTRL_COUNTS(rq->q_syncq->sq_ciputctrl, 2532 rq->q_syncq->sq_nciputctrl, 0); 2533 ASSERT(ciputctrl_cache != NULL); 2534 kmem_cache_free(ciputctrl_cache, rq->q_syncq->sq_ciputctrl); 2535 rq->q_syncq->sq_ciputctrl = NULL; 2536 rq->q_syncq->sq_nciputctrl = 0; 2537 } 2538 2539 if (!(wq->q_flag & QPERMOD) && 2540 wq->q_syncq != NULL && wq->q_syncq->sq_ciputctrl != NULL) { 2541 ASSERT(wq->q_syncq->sq_nciputctrl == n_ciputctrl - 1); 2542 SUMCHECK_CIPUTCTRL_COUNTS(wq->q_syncq->sq_ciputctrl, 2543 wq->q_syncq->sq_nciputctrl, 0); 2544 ASSERT(ciputctrl_cache != NULL); 2545 kmem_cache_free(ciputctrl_cache, wq->q_syncq->sq_ciputctrl); 2546 wq->q_syncq->sq_ciputctrl = NULL; 2547 wq->q_syncq->sq_nciputctrl = 0; 2548 } 2549 2550 sq = SQ(rq); 2551 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL); 2552 ASSERT(sq->sq_outer == NULL); 2553 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 2554 2555 /* 2556 * Create syncqs based on qflag and sqtype. Set the SQ_TYPES_IN_FLAGS 2557 * bits in sq_flag based on the sqtype. 2558 */ 2559 ASSERT((sq->sq_flags & ~SQ_TYPES_IN_FLAGS) == 0); 2560 2561 rq->q_syncq = wq->q_syncq = sq; 2562 sq->sq_type = sqtype; 2563 sq->sq_flags = (sqtype & SQ_TYPES_IN_FLAGS); 2564 2565 /* 2566 * We are making sq_svcflags zero, 2567 * resetting SQ_DISABLED in case it was set by 2568 * wait_svc() in the munlink path. 2569 * 2570 */ 2571 ASSERT((sq->sq_svcflags & SQ_SERVICE) == 0); 2572 sq->sq_svcflags = 0; 2573 2574 /* 2575 * We need to acquire the lock here for the mlink and munlink case, 2576 * where canputnext, backenable, etc can access the q_flag. 2577 */ 2578 if (lock_needed) { 2579 mutex_enter(QLOCK(rq)); 2580 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2581 mutex_exit(QLOCK(rq)); 2582 mutex_enter(QLOCK(wq)); 2583 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2584 mutex_exit(QLOCK(wq)); 2585 } else { 2586 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2587 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2588 } 2589 2590 if (qflag & QPERQ) { 2591 /* Allocate a separate syncq for the write side */ 2592 sq = new_syncq(); 2593 sq->sq_type = rq->q_syncq->sq_type; 2594 sq->sq_flags = rq->q_syncq->sq_flags; 2595 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 2596 sq->sq_oprev == NULL); 2597 wq->q_syncq = sq; 2598 } 2599 if (qflag & QPERMOD) { 2600 sq = dmp->dm_sq; 2601 2602 /* 2603 * Assert that we do have an inner perimeter syncq and that it 2604 * does not have an outer perimeter associated with it. 2605 */ 2606 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 2607 sq->sq_oprev == NULL); 2608 rq->q_syncq = wq->q_syncq = sq; 2609 } 2610 if (qflag & QMTOUTPERIM) { 2611 outer = dmp->dm_sq; 2612 2613 ASSERT(outer->sq_outer == NULL); 2614 outer_insert(outer, rq->q_syncq); 2615 if (wq->q_syncq != rq->q_syncq) 2616 outer_insert(outer, wq->q_syncq); 2617 } 2618 ASSERT((rq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) == 2619 (rq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS)); 2620 ASSERT((wq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) == 2621 (wq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS)); 2622 ASSERT((rq->q_flag & QMT_TYPEMASK) == (qflag & QMT_TYPEMASK)); 2623 2624 /* 2625 * Initialize struio() types. 2626 */ 2627 rq->q_struiot = 2628 (rq->q_flag & QSYNCSTR) ? rinit->qi_struiot : STRUIOT_NONE; 2629 wq->q_struiot = 2630 (wq->q_flag & QSYNCSTR) ? winit->qi_struiot : STRUIOT_NONE; 2631 } 2632 2633 perdm_t * 2634 hold_dm(struct streamtab *str, uint32_t qflag, uint32_t sqtype) 2635 { 2636 syncq_t *sq; 2637 perdm_t **pp; 2638 perdm_t *p; 2639 perdm_t *dmp; 2640 2641 ASSERT(str != NULL); 2642 ASSERT(qflag & (QPERMOD | QMTOUTPERIM)); 2643 2644 rw_enter(&perdm_rwlock, RW_READER); 2645 for (p = perdm_list; p != NULL; p = p->dm_next) { 2646 if (p->dm_str == str) { /* found one */ 2647 atomic_add_32(&(p->dm_ref), 1); 2648 rw_exit(&perdm_rwlock); 2649 return (p); 2650 } 2651 } 2652 rw_exit(&perdm_rwlock); 2653 2654 sq = new_syncq(); 2655 if (qflag & QPERMOD) { 2656 sq->sq_type = sqtype | SQ_PERMOD; 2657 sq->sq_flags = sqtype & SQ_TYPES_IN_FLAGS; 2658 } else { 2659 ASSERT(qflag & QMTOUTPERIM); 2660 sq->sq_onext = sq->sq_oprev = sq; 2661 } 2662 2663 dmp = kmem_alloc(sizeof (perdm_t), KM_SLEEP); 2664 dmp->dm_sq = sq; 2665 dmp->dm_str = str; 2666 dmp->dm_ref = 1; 2667 dmp->dm_next = NULL; 2668 2669 rw_enter(&perdm_rwlock, RW_WRITER); 2670 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) { 2671 if (p->dm_str == str) { /* already present */ 2672 p->dm_ref++; 2673 rw_exit(&perdm_rwlock); 2674 free_syncq(sq); 2675 kmem_free(dmp, sizeof (perdm_t)); 2676 return (p); 2677 } 2678 } 2679 2680 *pp = dmp; 2681 rw_exit(&perdm_rwlock); 2682 return (dmp); 2683 } 2684 2685 void 2686 rele_dm(perdm_t *dmp) 2687 { 2688 perdm_t **pp; 2689 perdm_t *p; 2690 2691 rw_enter(&perdm_rwlock, RW_WRITER); 2692 ASSERT(dmp->dm_ref > 0); 2693 2694 if (--dmp->dm_ref > 0) { 2695 rw_exit(&perdm_rwlock); 2696 return; 2697 } 2698 2699 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) 2700 if (p == dmp) 2701 break; 2702 ASSERT(p == dmp); 2703 *pp = p->dm_next; 2704 rw_exit(&perdm_rwlock); 2705 2706 /* 2707 * Wait for any background processing that relies on the 2708 * syncq to complete before it is freed. 2709 */ 2710 wait_sq_svc(p->dm_sq); 2711 free_syncq(p->dm_sq); 2712 kmem_free(p, sizeof (perdm_t)); 2713 } 2714 2715 /* 2716 * Make a protocol message given control and data buffers. 2717 * n.b., this can block; be careful of what locks you hold when calling it. 2718 * 2719 * If sd_maxblk is less than *iosize this routine can fail part way through 2720 * (due to an allocation failure). In this case on return *iosize will contain 2721 * the amount that was consumed. Otherwise *iosize will not be modified 2722 * i.e. it will contain the amount that was consumed. 2723 */ 2724 int 2725 strmakemsg( 2726 struct strbuf *mctl, 2727 ssize_t *iosize, 2728 struct uio *uiop, 2729 stdata_t *stp, 2730 int32_t flag, 2731 mblk_t **mpp) 2732 { 2733 mblk_t *mpctl = NULL; 2734 mblk_t *mpdata = NULL; 2735 int error; 2736 2737 ASSERT(uiop != NULL); 2738 2739 *mpp = NULL; 2740 /* Create control part, if any */ 2741 if ((mctl != NULL) && (mctl->len >= 0)) { 2742 error = strmakectl(mctl, flag, uiop->uio_fmode, &mpctl); 2743 if (error) 2744 return (error); 2745 } 2746 /* Create data part, if any */ 2747 if (*iosize >= 0) { 2748 error = strmakedata(iosize, uiop, stp, flag, &mpdata); 2749 if (error) { 2750 freemsg(mpctl); 2751 return (error); 2752 } 2753 } 2754 if (mpctl != NULL) { 2755 if (mpdata != NULL) 2756 linkb(mpctl, mpdata); 2757 *mpp = mpctl; 2758 } else { 2759 *mpp = mpdata; 2760 } 2761 return (0); 2762 } 2763 2764 /* 2765 * Make the control part of a protocol message given a control buffer. 2766 * n.b., this can block; be careful of what locks you hold when calling it. 2767 */ 2768 int 2769 strmakectl( 2770 struct strbuf *mctl, 2771 int32_t flag, 2772 int32_t fflag, 2773 mblk_t **mpp) 2774 { 2775 mblk_t *bp = NULL; 2776 unsigned char msgtype; 2777 int error = 0; 2778 cred_t *cr = CRED(); 2779 2780 /* We do not support interrupt threads using the stream head to send */ 2781 ASSERT(cr != NULL); 2782 2783 *mpp = NULL; 2784 /* 2785 * Create control part of message, if any. 2786 */ 2787 if ((mctl != NULL) && (mctl->len >= 0)) { 2788 caddr_t base; 2789 int ctlcount; 2790 int allocsz; 2791 2792 if (flag & RS_HIPRI) 2793 msgtype = M_PCPROTO; 2794 else 2795 msgtype = M_PROTO; 2796 2797 ctlcount = mctl->len; 2798 base = mctl->buf; 2799 2800 /* 2801 * Give modules a better chance to reuse M_PROTO/M_PCPROTO 2802 * blocks by increasing the size to something more usable. 2803 */ 2804 allocsz = MAX(ctlcount, 64); 2805 2806 /* 2807 * Range checking has already been done; simply try 2808 * to allocate a message block for the ctl part. 2809 */ 2810 while ((bp = allocb_cred(allocsz, cr, 2811 curproc->p_pid)) == NULL) { 2812 if (fflag & (FNDELAY|FNONBLOCK)) 2813 return (EAGAIN); 2814 if (error = strwaitbuf(allocsz, BPRI_MED)) 2815 return (error); 2816 } 2817 2818 bp->b_datap->db_type = msgtype; 2819 if (copyin(base, bp->b_wptr, ctlcount)) { 2820 freeb(bp); 2821 return (EFAULT); 2822 } 2823 bp->b_wptr += ctlcount; 2824 } 2825 *mpp = bp; 2826 return (0); 2827 } 2828 2829 /* 2830 * Make a protocol message given data buffers. 2831 * n.b., this can block; be careful of what locks you hold when calling it. 2832 * 2833 * If sd_maxblk is less than *iosize this routine can fail part way through 2834 * (due to an allocation failure). In this case on return *iosize will contain 2835 * the amount that was consumed. Otherwise *iosize will not be modified 2836 * i.e. it will contain the amount that was consumed. 2837 */ 2838 int 2839 strmakedata( 2840 ssize_t *iosize, 2841 struct uio *uiop, 2842 stdata_t *stp, 2843 int32_t flag, 2844 mblk_t **mpp) 2845 { 2846 mblk_t *mp = NULL; 2847 mblk_t *bp; 2848 int wroff = (int)stp->sd_wroff; 2849 int tail_len = (int)stp->sd_tail; 2850 int extra = wroff + tail_len; 2851 int error = 0; 2852 ssize_t maxblk; 2853 ssize_t count = *iosize; 2854 cred_t *cr; 2855 2856 *mpp = NULL; 2857 if (count < 0) 2858 return (0); 2859 2860 /* We do not support interrupt threads using the stream head to send */ 2861 cr = CRED(); 2862 ASSERT(cr != NULL); 2863 2864 maxblk = stp->sd_maxblk; 2865 if (maxblk == INFPSZ) 2866 maxblk = count; 2867 2868 /* 2869 * Create data part of message, if any. 2870 */ 2871 do { 2872 ssize_t size; 2873 dblk_t *dp; 2874 2875 ASSERT(uiop); 2876 2877 size = MIN(count, maxblk); 2878 2879 while ((bp = allocb_cred(size + extra, cr, 2880 curproc->p_pid)) == NULL) { 2881 error = EAGAIN; 2882 if ((uiop->uio_fmode & (FNDELAY|FNONBLOCK)) || 2883 (error = strwaitbuf(size + extra, BPRI_MED)) != 0) { 2884 if (count == *iosize) { 2885 freemsg(mp); 2886 return (error); 2887 } else { 2888 *iosize -= count; 2889 *mpp = mp; 2890 return (0); 2891 } 2892 } 2893 } 2894 dp = bp->b_datap; 2895 dp->db_cpid = curproc->p_pid; 2896 ASSERT(wroff <= dp->db_lim - bp->b_wptr); 2897 bp->b_wptr = bp->b_rptr = bp->b_rptr + wroff; 2898 2899 if (flag & STRUIO_POSTPONE) { 2900 /* 2901 * Setup the stream uio portion of the 2902 * dblk for subsequent use by struioget(). 2903 */ 2904 dp->db_struioflag = STRUIO_SPEC; 2905 dp->db_cksumstart = 0; 2906 dp->db_cksumstuff = 0; 2907 dp->db_cksumend = size; 2908 *(long long *)dp->db_struioun.data = 0ll; 2909 bp->b_wptr += size; 2910 } else { 2911 if (stp->sd_copyflag & STRCOPYCACHED) 2912 uiop->uio_extflg |= UIO_COPY_CACHED; 2913 2914 if (size != 0) { 2915 error = uiomove(bp->b_wptr, size, UIO_WRITE, 2916 uiop); 2917 if (error != 0) { 2918 freeb(bp); 2919 freemsg(mp); 2920 return (error); 2921 } 2922 } 2923 bp->b_wptr += size; 2924 2925 if (stp->sd_wputdatafunc != NULL) { 2926 mblk_t *newbp; 2927 2928 newbp = (stp->sd_wputdatafunc)(stp->sd_vnode, 2929 bp, NULL, NULL, NULL, NULL); 2930 if (newbp == NULL) { 2931 freeb(bp); 2932 freemsg(mp); 2933 return (ECOMM); 2934 } 2935 bp = newbp; 2936 } 2937 } 2938 2939 count -= size; 2940 2941 if (mp == NULL) 2942 mp = bp; 2943 else 2944 linkb(mp, bp); 2945 } while (count > 0); 2946 2947 *mpp = mp; 2948 return (0); 2949 } 2950 2951 /* 2952 * Wait for a buffer to become available. Return non-zero errno 2953 * if not able to wait, 0 if buffer is probably there. 2954 */ 2955 int 2956 strwaitbuf(size_t size, int pri) 2957 { 2958 bufcall_id_t id; 2959 2960 mutex_enter(&bcall_monitor); 2961 if ((id = bufcall(size, pri, (void (*)(void *))cv_broadcast, 2962 &ttoproc(curthread)->p_flag_cv)) == 0) { 2963 mutex_exit(&bcall_monitor); 2964 return (ENOSR); 2965 } 2966 if (!cv_wait_sig(&(ttoproc(curthread)->p_flag_cv), &bcall_monitor)) { 2967 unbufcall(id); 2968 mutex_exit(&bcall_monitor); 2969 return (EINTR); 2970 } 2971 unbufcall(id); 2972 mutex_exit(&bcall_monitor); 2973 return (0); 2974 } 2975 2976 /* 2977 * This function waits for a read or write event to happen on a stream. 2978 * fmode can specify FNDELAY and/or FNONBLOCK. 2979 * The timeout is in ms with -1 meaning infinite. 2980 * The flag values work as follows: 2981 * READWAIT Check for read side errors, send M_READ 2982 * GETWAIT Check for read side errors, no M_READ 2983 * WRITEWAIT Check for write side errors. 2984 * NOINTR Do not return error if nonblocking or timeout. 2985 * STR_NOERROR Ignore all errors except STPLEX. 2986 * STR_NOSIG Ignore/hold signals during the duration of the call. 2987 * STR_PEEK Pass through the strgeterr(). 2988 */ 2989 int 2990 strwaitq(stdata_t *stp, int flag, ssize_t count, int fmode, clock_t timout, 2991 int *done) 2992 { 2993 int slpflg, errs; 2994 int error; 2995 kcondvar_t *sleepon; 2996 mblk_t *mp; 2997 ssize_t *rd_count; 2998 clock_t rval; 2999 3000 ASSERT(MUTEX_HELD(&stp->sd_lock)); 3001 if ((flag & READWAIT) || (flag & GETWAIT)) { 3002 slpflg = RSLEEP; 3003 sleepon = &_RD(stp->sd_wrq)->q_wait; 3004 errs = STRDERR|STPLEX; 3005 } else { 3006 slpflg = WSLEEP; 3007 sleepon = &stp->sd_wrq->q_wait; 3008 errs = STWRERR|STRHUP|STPLEX; 3009 } 3010 if (flag & STR_NOERROR) 3011 errs = STPLEX; 3012 3013 if (stp->sd_wakeq & slpflg) { 3014 /* 3015 * A strwakeq() is pending, no need to sleep. 3016 */ 3017 stp->sd_wakeq &= ~slpflg; 3018 *done = 0; 3019 return (0); 3020 } 3021 3022 if (stp->sd_flag & errs) { 3023 /* 3024 * Check for errors before going to sleep since the 3025 * caller might not have checked this while holding 3026 * sd_lock. 3027 */ 3028 error = strgeterr(stp, errs, (flag & STR_PEEK)); 3029 if (error != 0) { 3030 *done = 1; 3031 return (error); 3032 } 3033 } 3034 3035 /* 3036 * If any module downstream has requested read notification 3037 * by setting SNDMREAD flag using M_SETOPTS, send a message 3038 * down stream. 3039 */ 3040 if ((flag & READWAIT) && (stp->sd_flag & SNDMREAD)) { 3041 mutex_exit(&stp->sd_lock); 3042 if (!(mp = allocb_wait(sizeof (ssize_t), BPRI_MED, 3043 (flag & STR_NOSIG), &error))) { 3044 mutex_enter(&stp->sd_lock); 3045 *done = 1; 3046 return (error); 3047 } 3048 mp->b_datap->db_type = M_READ; 3049 rd_count = (ssize_t *)mp->b_wptr; 3050 *rd_count = count; 3051 mp->b_wptr += sizeof (ssize_t); 3052 /* 3053 * Send the number of bytes requested by the 3054 * read as the argument to M_READ. 3055 */ 3056 stream_willservice(stp); 3057 putnext(stp->sd_wrq, mp); 3058 stream_runservice(stp); 3059 mutex_enter(&stp->sd_lock); 3060 3061 /* 3062 * If any data arrived due to inline processing 3063 * of putnext(), don't sleep. 3064 */ 3065 if (_RD(stp->sd_wrq)->q_first != NULL) { 3066 *done = 0; 3067 return (0); 3068 } 3069 } 3070 3071 if (fmode & (FNDELAY|FNONBLOCK)) { 3072 if (!(flag & NOINTR)) 3073 error = EAGAIN; 3074 else 3075 error = 0; 3076 *done = 1; 3077 return (error); 3078 } 3079 3080 stp->sd_flag |= slpflg; 3081 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAIT2, 3082 "strwaitq sleeps (2):%p, %X, %lX, %X, %p", 3083 stp, flag, count, fmode, done); 3084 3085 rval = str_cv_wait(sleepon, &stp->sd_lock, timout, flag & STR_NOSIG); 3086 if (rval > 0) { 3087 /* EMPTY */ 3088 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAKE2, 3089 "strwaitq awakes(2):%X, %X, %X, %X, %X", 3090 stp, flag, count, fmode, done); 3091 } else if (rval == 0) { 3092 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_INTR2, 3093 "strwaitq interrupt #2:%p, %X, %lX, %X, %p", 3094 stp, flag, count, fmode, done); 3095 stp->sd_flag &= ~slpflg; 3096 cv_broadcast(sleepon); 3097 if (!(flag & NOINTR)) 3098 error = EINTR; 3099 else 3100 error = 0; 3101 *done = 1; 3102 return (error); 3103 } else { 3104 /* timeout */ 3105 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_TIME, 3106 "strwaitq timeout:%p, %X, %lX, %X, %p", 3107 stp, flag, count, fmode, done); 3108 *done = 1; 3109 if (!(flag & NOINTR)) 3110 return (ETIME); 3111 else 3112 return (0); 3113 } 3114 /* 3115 * If the caller implements delayed errors (i.e. queued after data) 3116 * we can not check for errors here since data as well as an 3117 * error might have arrived at the stream head. We return to 3118 * have the caller check the read queue before checking for errors. 3119 */ 3120 if ((stp->sd_flag & errs) && !(flag & STR_DELAYERR)) { 3121 error = strgeterr(stp, errs, (flag & STR_PEEK)); 3122 if (error != 0) { 3123 *done = 1; 3124 return (error); 3125 } 3126 } 3127 *done = 0; 3128 return (0); 3129 } 3130 3131 /* 3132 * Perform job control discipline access checks. 3133 * Return 0 for success and the errno for failure. 3134 */ 3135 3136 #define cantsend(p, t, sig) \ 3137 (sigismember(&(p)->p_ignore, sig) || signal_is_blocked((t), sig)) 3138 3139 int 3140 straccess(struct stdata *stp, enum jcaccess mode) 3141 { 3142 extern kcondvar_t lbolt_cv; /* XXX: should be in a header file */ 3143 kthread_t *t = curthread; 3144 proc_t *p = ttoproc(t); 3145 sess_t *sp; 3146 3147 ASSERT(mutex_owned(&stp->sd_lock)); 3148 3149 if (stp->sd_sidp == NULL || stp->sd_vnode->v_type == VFIFO) 3150 return (0); 3151 3152 mutex_enter(&p->p_lock); /* protects p_pgidp */ 3153 3154 for (;;) { 3155 mutex_enter(&p->p_splock); /* protects p->p_sessp */ 3156 sp = p->p_sessp; 3157 mutex_enter(&sp->s_lock); /* protects sp->* */ 3158 3159 /* 3160 * If this is not the calling process's controlling terminal 3161 * or if the calling process is already in the foreground 3162 * then allow access. 3163 */ 3164 if (sp->s_dev != stp->sd_vnode->v_rdev || 3165 p->p_pgidp == stp->sd_pgidp) { 3166 mutex_exit(&sp->s_lock); 3167 mutex_exit(&p->p_splock); 3168 mutex_exit(&p->p_lock); 3169 return (0); 3170 } 3171 3172 /* 3173 * Check to see if controlling terminal has been deallocated. 3174 */ 3175 if (sp->s_vp == NULL) { 3176 if (!cantsend(p, t, SIGHUP)) 3177 sigtoproc(p, t, SIGHUP); 3178 mutex_exit(&sp->s_lock); 3179 mutex_exit(&p->p_splock); 3180 mutex_exit(&p->p_lock); 3181 return (EIO); 3182 } 3183 3184 mutex_exit(&sp->s_lock); 3185 mutex_exit(&p->p_splock); 3186 3187 if (mode == JCGETP) { 3188 mutex_exit(&p->p_lock); 3189 return (0); 3190 } 3191 3192 if (mode == JCREAD) { 3193 if (p->p_detached || cantsend(p, t, SIGTTIN)) { 3194 mutex_exit(&p->p_lock); 3195 return (EIO); 3196 } 3197 mutex_exit(&p->p_lock); 3198 mutex_exit(&stp->sd_lock); 3199 pgsignal(p->p_pgidp, SIGTTIN); 3200 mutex_enter(&stp->sd_lock); 3201 mutex_enter(&p->p_lock); 3202 } else { /* mode == JCWRITE or JCSETP */ 3203 if ((mode == JCWRITE && !(stp->sd_flag & STRTOSTOP)) || 3204 cantsend(p, t, SIGTTOU)) { 3205 mutex_exit(&p->p_lock); 3206 return (0); 3207 } 3208 if (p->p_detached) { 3209 mutex_exit(&p->p_lock); 3210 return (EIO); 3211 } 3212 mutex_exit(&p->p_lock); 3213 mutex_exit(&stp->sd_lock); 3214 pgsignal(p->p_pgidp, SIGTTOU); 3215 mutex_enter(&stp->sd_lock); 3216 mutex_enter(&p->p_lock); 3217 } 3218 3219 /* 3220 * We call cv_wait_sig_swap() to cause the appropriate 3221 * action for the jobcontrol signal to take place. 3222 * If the signal is being caught, we will take the 3223 * EINTR error return. Otherwise, the default action 3224 * of causing the process to stop will take place. 3225 * In this case, we rely on the periodic cv_broadcast() on 3226 * &lbolt_cv to wake us up to loop around and test again. 3227 * We can't get here if the signal is ignored or 3228 * if the current thread is blocking the signal. 3229 */ 3230 mutex_exit(&stp->sd_lock); 3231 if (!cv_wait_sig_swap(&lbolt_cv, &p->p_lock)) { 3232 mutex_exit(&p->p_lock); 3233 mutex_enter(&stp->sd_lock); 3234 return (EINTR); 3235 } 3236 mutex_exit(&p->p_lock); 3237 mutex_enter(&stp->sd_lock); 3238 mutex_enter(&p->p_lock); 3239 } 3240 } 3241 3242 /* 3243 * Return size of message of block type (bp->b_datap->db_type) 3244 */ 3245 size_t 3246 xmsgsize(mblk_t *bp) 3247 { 3248 unsigned char type; 3249 size_t count = 0; 3250 3251 type = bp->b_datap->db_type; 3252 3253 for (; bp; bp = bp->b_cont) { 3254 if (type != bp->b_datap->db_type) 3255 break; 3256 ASSERT(bp->b_wptr >= bp->b_rptr); 3257 count += bp->b_wptr - bp->b_rptr; 3258 } 3259 return (count); 3260 } 3261 3262 /* 3263 * Allocate a stream head. 3264 */ 3265 struct stdata * 3266 shalloc(queue_t *qp) 3267 { 3268 stdata_t *stp; 3269 3270 stp = kmem_cache_alloc(stream_head_cache, KM_SLEEP); 3271 3272 stp->sd_wrq = _WR(qp); 3273 stp->sd_strtab = NULL; 3274 stp->sd_iocid = 0; 3275 stp->sd_mate = NULL; 3276 stp->sd_freezer = NULL; 3277 stp->sd_refcnt = 0; 3278 stp->sd_wakeq = 0; 3279 stp->sd_anchor = 0; 3280 stp->sd_struiowrq = NULL; 3281 stp->sd_struiordq = NULL; 3282 stp->sd_struiodnak = 0; 3283 stp->sd_struionak = NULL; 3284 stp->sd_t_audit_data = NULL; 3285 stp->sd_rput_opt = 0; 3286 stp->sd_wput_opt = 0; 3287 stp->sd_read_opt = 0; 3288 stp->sd_rprotofunc = strrput_proto; 3289 stp->sd_rmiscfunc = strrput_misc; 3290 stp->sd_rderrfunc = stp->sd_wrerrfunc = NULL; 3291 stp->sd_rputdatafunc = stp->sd_wputdatafunc = NULL; 3292 stp->sd_ciputctrl = NULL; 3293 stp->sd_nciputctrl = 0; 3294 stp->sd_qhead = NULL; 3295 stp->sd_qtail = NULL; 3296 stp->sd_servid = NULL; 3297 stp->sd_nqueues = 0; 3298 stp->sd_svcflags = 0; 3299 stp->sd_copyflag = 0; 3300 3301 return (stp); 3302 } 3303 3304 /* 3305 * Free a stream head. 3306 */ 3307 void 3308 shfree(stdata_t *stp) 3309 { 3310 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock)); 3311 3312 stp->sd_wrq = NULL; 3313 3314 mutex_enter(&stp->sd_qlock); 3315 while (stp->sd_svcflags & STRS_SCHEDULED) { 3316 STRSTAT(strwaits); 3317 cv_wait(&stp->sd_qcv, &stp->sd_qlock); 3318 } 3319 mutex_exit(&stp->sd_qlock); 3320 3321 if (stp->sd_ciputctrl != NULL) { 3322 ASSERT(stp->sd_nciputctrl == n_ciputctrl - 1); 3323 SUMCHECK_CIPUTCTRL_COUNTS(stp->sd_ciputctrl, 3324 stp->sd_nciputctrl, 0); 3325 ASSERT(ciputctrl_cache != NULL); 3326 kmem_cache_free(ciputctrl_cache, stp->sd_ciputctrl); 3327 stp->sd_ciputctrl = NULL; 3328 stp->sd_nciputctrl = 0; 3329 } 3330 ASSERT(stp->sd_qhead == NULL); 3331 ASSERT(stp->sd_qtail == NULL); 3332 ASSERT(stp->sd_nqueues == 0); 3333 kmem_cache_free(stream_head_cache, stp); 3334 } 3335 3336 /* 3337 * Allocate a pair of queues and a syncq for the pair 3338 */ 3339 queue_t * 3340 allocq(void) 3341 { 3342 queinfo_t *qip; 3343 queue_t *qp, *wqp; 3344 syncq_t *sq; 3345 3346 qip = kmem_cache_alloc(queue_cache, KM_SLEEP); 3347 3348 qp = &qip->qu_rqueue; 3349 wqp = &qip->qu_wqueue; 3350 sq = &qip->qu_syncq; 3351 3352 qp->q_last = NULL; 3353 qp->q_next = NULL; 3354 qp->q_ptr = NULL; 3355 qp->q_flag = QUSE | QREADR; 3356 qp->q_bandp = NULL; 3357 qp->q_stream = NULL; 3358 qp->q_syncq = sq; 3359 qp->q_nband = 0; 3360 qp->q_nfsrv = NULL; 3361 qp->q_draining = 0; 3362 qp->q_syncqmsgs = 0; 3363 qp->q_spri = 0; 3364 qp->q_qtstamp = 0; 3365 qp->q_sqtstamp = 0; 3366 qp->q_fp = NULL; 3367 3368 wqp->q_last = NULL; 3369 wqp->q_next = NULL; 3370 wqp->q_ptr = NULL; 3371 wqp->q_flag = QUSE; 3372 wqp->q_bandp = NULL; 3373 wqp->q_stream = NULL; 3374 wqp->q_syncq = sq; 3375 wqp->q_nband = 0; 3376 wqp->q_nfsrv = NULL; 3377 wqp->q_draining = 0; 3378 wqp->q_syncqmsgs = 0; 3379 wqp->q_qtstamp = 0; 3380 wqp->q_sqtstamp = 0; 3381 wqp->q_spri = 0; 3382 3383 sq->sq_count = 0; 3384 sq->sq_rmqcount = 0; 3385 sq->sq_flags = 0; 3386 sq->sq_type = 0; 3387 sq->sq_callbflags = 0; 3388 sq->sq_cancelid = 0; 3389 sq->sq_ciputctrl = NULL; 3390 sq->sq_nciputctrl = 0; 3391 sq->sq_needexcl = 0; 3392 sq->sq_svcflags = 0; 3393 3394 return (qp); 3395 } 3396 3397 /* 3398 * Free a pair of queues and the "attached" syncq. 3399 * Discard any messages left on the syncq(s), remove the syncq(s) from the 3400 * outer perimeter, and free the syncq(s) if they are not the "attached" syncq. 3401 */ 3402 void 3403 freeq(queue_t *qp) 3404 { 3405 qband_t *qbp, *nqbp; 3406 syncq_t *sq, *outer; 3407 queue_t *wqp = _WR(qp); 3408 3409 ASSERT(qp->q_flag & QREADR); 3410 3411 /* 3412 * If a previously dispatched taskq job is scheduled to run 3413 * sync_service() or a service routine is scheduled for the 3414 * queues about to be freed, wait here until all service is 3415 * done on the queue and all associated queues and syncqs. 3416 */ 3417 wait_svc(qp); 3418 3419 (void) flush_syncq(qp->q_syncq, qp); 3420 (void) flush_syncq(wqp->q_syncq, wqp); 3421 ASSERT(qp->q_syncqmsgs == 0 && wqp->q_syncqmsgs == 0); 3422 3423 /* 3424 * Flush the queues before q_next is set to NULL This is needed 3425 * in order to backenable any downstream queue before we go away. 3426 * Note: we are already removed from the stream so that the 3427 * backenabling will not cause any messages to be delivered to our 3428 * put procedures. 3429 */ 3430 flushq(qp, FLUSHALL); 3431 flushq(wqp, FLUSHALL); 3432 3433 /* Tidy up - removeq only does a half-remove from stream */ 3434 qp->q_next = wqp->q_next = NULL; 3435 ASSERT(!(qp->q_flag & QENAB)); 3436 ASSERT(!(wqp->q_flag & QENAB)); 3437 3438 outer = qp->q_syncq->sq_outer; 3439 if (outer != NULL) { 3440 outer_remove(outer, qp->q_syncq); 3441 if (wqp->q_syncq != qp->q_syncq) 3442 outer_remove(outer, wqp->q_syncq); 3443 } 3444 /* 3445 * Free any syncqs that are outside what allocq returned. 3446 */ 3447 if (qp->q_syncq != SQ(qp) && !(qp->q_flag & QPERMOD)) 3448 free_syncq(qp->q_syncq); 3449 if (qp->q_syncq != wqp->q_syncq && wqp->q_syncq != SQ(qp)) 3450 free_syncq(wqp->q_syncq); 3451 3452 ASSERT((qp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0); 3453 ASSERT((wqp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0); 3454 ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); 3455 ASSERT(MUTEX_NOT_HELD(QLOCK(wqp))); 3456 sq = SQ(qp); 3457 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 3458 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL); 3459 ASSERT(sq->sq_outer == NULL); 3460 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 3461 ASSERT(sq->sq_callbpend == NULL); 3462 ASSERT(sq->sq_needexcl == 0); 3463 3464 if (sq->sq_ciputctrl != NULL) { 3465 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1); 3466 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl, 3467 sq->sq_nciputctrl, 0); 3468 ASSERT(ciputctrl_cache != NULL); 3469 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl); 3470 sq->sq_ciputctrl = NULL; 3471 sq->sq_nciputctrl = 0; 3472 } 3473 3474 ASSERT(qp->q_first == NULL && wqp->q_first == NULL); 3475 ASSERT(qp->q_count == 0 && wqp->q_count == 0); 3476 ASSERT(qp->q_mblkcnt == 0 && wqp->q_mblkcnt == 0); 3477 3478 qp->q_flag &= ~QUSE; 3479 wqp->q_flag &= ~QUSE; 3480 3481 /* NOTE: Uncomment the assert below once bugid 1159635 is fixed. */ 3482 /* ASSERT((qp->q_flag & QWANTW) == 0 && (wqp->q_flag & QWANTW) == 0); */ 3483 3484 qbp = qp->q_bandp; 3485 while (qbp) { 3486 nqbp = qbp->qb_next; 3487 freeband(qbp); 3488 qbp = nqbp; 3489 } 3490 qbp = wqp->q_bandp; 3491 while (qbp) { 3492 nqbp = qbp->qb_next; 3493 freeband(qbp); 3494 qbp = nqbp; 3495 } 3496 kmem_cache_free(queue_cache, qp); 3497 } 3498 3499 /* 3500 * Allocate a qband structure. 3501 */ 3502 qband_t * 3503 allocband(void) 3504 { 3505 qband_t *qbp; 3506 3507 qbp = kmem_cache_alloc(qband_cache, KM_NOSLEEP); 3508 if (qbp == NULL) 3509 return (NULL); 3510 3511 qbp->qb_next = NULL; 3512 qbp->qb_count = 0; 3513 qbp->qb_mblkcnt = 0; 3514 qbp->qb_first = NULL; 3515 qbp->qb_last = NULL; 3516 qbp->qb_flag = 0; 3517 3518 return (qbp); 3519 } 3520 3521 /* 3522 * Free a qband structure. 3523 */ 3524 void 3525 freeband(qband_t *qbp) 3526 { 3527 kmem_cache_free(qband_cache, qbp); 3528 } 3529 3530 /* 3531 * Just like putnextctl(9F), except that allocb_wait() is used. 3532 * 3533 * Consolidation Private, and of course only callable from the stream head or 3534 * routines that may block. 3535 */ 3536 int 3537 putnextctl_wait(queue_t *q, int type) 3538 { 3539 mblk_t *bp; 3540 int error; 3541 3542 if ((datamsg(type) && (type != M_DELAY)) || 3543 (bp = allocb_wait(0, BPRI_HI, 0, &error)) == NULL) 3544 return (0); 3545 3546 bp->b_datap->db_type = (unsigned char)type; 3547 putnext(q, bp); 3548 return (1); 3549 } 3550 3551 /* 3552 * Run any possible bufcalls. 3553 */ 3554 void 3555 runbufcalls(void) 3556 { 3557 strbufcall_t *bcp; 3558 3559 mutex_enter(&bcall_monitor); 3560 mutex_enter(&strbcall_lock); 3561 3562 if (strbcalls.bc_head) { 3563 size_t count; 3564 int nevent; 3565 3566 /* 3567 * count how many events are on the list 3568 * now so we can check to avoid looping 3569 * in low memory situations 3570 */ 3571 nevent = 0; 3572 for (bcp = strbcalls.bc_head; bcp; bcp = bcp->bc_next) 3573 nevent++; 3574 3575 /* 3576 * get estimate of available memory from kmem_avail(). 3577 * awake all bufcall functions waiting for 3578 * memory whose request could be satisfied 3579 * by 'count' memory and let 'em fight for it. 3580 */ 3581 count = kmem_avail(); 3582 while ((bcp = strbcalls.bc_head) != NULL && nevent) { 3583 STRSTAT(bufcalls); 3584 --nevent; 3585 if (bcp->bc_size <= count) { 3586 bcp->bc_executor = curthread; 3587 mutex_exit(&strbcall_lock); 3588 (*bcp->bc_func)(bcp->bc_arg); 3589 mutex_enter(&strbcall_lock); 3590 bcp->bc_executor = NULL; 3591 cv_broadcast(&bcall_cv); 3592 strbcalls.bc_head = bcp->bc_next; 3593 kmem_free(bcp, sizeof (strbufcall_t)); 3594 } else { 3595 /* 3596 * too big, try again later - note 3597 * that nevent was decremented above 3598 * so we won't retry this one on this 3599 * iteration of the loop 3600 */ 3601 if (bcp->bc_next != NULL) { 3602 strbcalls.bc_head = bcp->bc_next; 3603 bcp->bc_next = NULL; 3604 strbcalls.bc_tail->bc_next = bcp; 3605 strbcalls.bc_tail = bcp; 3606 } 3607 } 3608 } 3609 if (strbcalls.bc_head == NULL) 3610 strbcalls.bc_tail = NULL; 3611 } 3612 3613 mutex_exit(&strbcall_lock); 3614 mutex_exit(&bcall_monitor); 3615 } 3616 3617 3618 /* 3619 * Actually run queue's service routine. 3620 */ 3621 static void 3622 runservice(queue_t *q) 3623 { 3624 qband_t *qbp; 3625 3626 ASSERT(q->q_qinfo->qi_srvp); 3627 again: 3628 entersq(q->q_syncq, SQ_SVC); 3629 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_START, 3630 "runservice starts:%p", q); 3631 3632 if (!(q->q_flag & QWCLOSE)) 3633 (*q->q_qinfo->qi_srvp)(q); 3634 3635 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_END, 3636 "runservice ends:(%p)", q); 3637 3638 leavesq(q->q_syncq, SQ_SVC); 3639 3640 mutex_enter(QLOCK(q)); 3641 if (q->q_flag & QENAB) { 3642 q->q_flag &= ~QENAB; 3643 mutex_exit(QLOCK(q)); 3644 goto again; 3645 } 3646 q->q_flag &= ~QINSERVICE; 3647 q->q_flag &= ~QBACK; 3648 for (qbp = q->q_bandp; qbp; qbp = qbp->qb_next) 3649 qbp->qb_flag &= ~QB_BACK; 3650 /* 3651 * Wakeup thread waiting for the service procedure 3652 * to be run (strclose and qdetach). 3653 */ 3654 cv_broadcast(&q->q_wait); 3655 3656 mutex_exit(QLOCK(q)); 3657 } 3658 3659 /* 3660 * Background processing of bufcalls. 3661 */ 3662 void 3663 streams_bufcall_service(void) 3664 { 3665 callb_cpr_t cprinfo; 3666 3667 CALLB_CPR_INIT(&cprinfo, &strbcall_lock, callb_generic_cpr, 3668 "streams_bufcall_service"); 3669 3670 mutex_enter(&strbcall_lock); 3671 3672 for (;;) { 3673 if (strbcalls.bc_head != NULL && kmem_avail() > 0) { 3674 mutex_exit(&strbcall_lock); 3675 runbufcalls(); 3676 mutex_enter(&strbcall_lock); 3677 } 3678 if (strbcalls.bc_head != NULL) { 3679 STRSTAT(bcwaits); 3680 /* Wait for memory to become available */ 3681 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3682 (void) cv_reltimedwait(&memavail_cv, &strbcall_lock, 3683 SEC_TO_TICK(60), TR_CLOCK_TICK); 3684 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock); 3685 } 3686 3687 /* Wait for new work to arrive */ 3688 if (strbcalls.bc_head == NULL) { 3689 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3690 cv_wait(&strbcall_cv, &strbcall_lock); 3691 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock); 3692 } 3693 } 3694 } 3695 3696 /* 3697 * Background processing of streams background tasks which failed 3698 * taskq_dispatch. 3699 */ 3700 static void 3701 streams_qbkgrnd_service(void) 3702 { 3703 callb_cpr_t cprinfo; 3704 queue_t *q; 3705 3706 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr, 3707 "streams_bkgrnd_service"); 3708 3709 mutex_enter(&service_queue); 3710 3711 for (;;) { 3712 /* 3713 * Wait for work to arrive. 3714 */ 3715 while ((freebs_list == NULL) && (qhead == NULL)) { 3716 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3717 cv_wait(&services_to_run, &service_queue); 3718 CALLB_CPR_SAFE_END(&cprinfo, &service_queue); 3719 } 3720 /* 3721 * Handle all pending freebs requests to free memory. 3722 */ 3723 while (freebs_list != NULL) { 3724 mblk_t *mp = freebs_list; 3725 freebs_list = mp->b_next; 3726 mutex_exit(&service_queue); 3727 mblk_free(mp); 3728 mutex_enter(&service_queue); 3729 } 3730 /* 3731 * Run pending queues. 3732 */ 3733 while (qhead != NULL) { 3734 DQ(q, qhead, qtail, q_link); 3735 ASSERT(q != NULL); 3736 mutex_exit(&service_queue); 3737 queue_service(q); 3738 mutex_enter(&service_queue); 3739 } 3740 ASSERT(qhead == NULL && qtail == NULL); 3741 } 3742 } 3743 3744 /* 3745 * Background processing of streams background tasks which failed 3746 * taskq_dispatch. 3747 */ 3748 static void 3749 streams_sqbkgrnd_service(void) 3750 { 3751 callb_cpr_t cprinfo; 3752 syncq_t *sq; 3753 3754 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr, 3755 "streams_sqbkgrnd_service"); 3756 3757 mutex_enter(&service_queue); 3758 3759 for (;;) { 3760 /* 3761 * Wait for work to arrive. 3762 */ 3763 while (sqhead == NULL) { 3764 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3765 cv_wait(&syncqs_to_run, &service_queue); 3766 CALLB_CPR_SAFE_END(&cprinfo, &service_queue); 3767 } 3768 3769 /* 3770 * Run pending syncqs. 3771 */ 3772 while (sqhead != NULL) { 3773 DQ(sq, sqhead, sqtail, sq_next); 3774 ASSERT(sq != NULL); 3775 ASSERT(sq->sq_svcflags & SQ_BGTHREAD); 3776 mutex_exit(&service_queue); 3777 syncq_service(sq); 3778 mutex_enter(&service_queue); 3779 } 3780 } 3781 } 3782 3783 /* 3784 * Disable the syncq and wait for background syncq processing to complete. 3785 * If the syncq is placed on the sqhead/sqtail queue, try to remove it from the 3786 * list. 3787 */ 3788 void 3789 wait_sq_svc(syncq_t *sq) 3790 { 3791 mutex_enter(SQLOCK(sq)); 3792 sq->sq_svcflags |= SQ_DISABLED; 3793 if (sq->sq_svcflags & SQ_BGTHREAD) { 3794 syncq_t *sq_chase; 3795 syncq_t *sq_curr; 3796 int removed; 3797 3798 ASSERT(sq->sq_servcount == 1); 3799 mutex_enter(&service_queue); 3800 RMQ(sq, sqhead, sqtail, sq_next, sq_chase, sq_curr, removed); 3801 mutex_exit(&service_queue); 3802 if (removed) { 3803 sq->sq_svcflags &= ~SQ_BGTHREAD; 3804 sq->sq_servcount = 0; 3805 STRSTAT(sqremoved); 3806 goto done; 3807 } 3808 } 3809 while (sq->sq_servcount != 0) { 3810 sq->sq_flags |= SQ_WANTWAKEUP; 3811 cv_wait(&sq->sq_wait, SQLOCK(sq)); 3812 } 3813 done: 3814 mutex_exit(SQLOCK(sq)); 3815 } 3816 3817 /* 3818 * Put a syncq on the list of syncq's to be serviced by the sqthread. 3819 * Add the argument to the end of the sqhead list and set the flag 3820 * indicating this syncq has been enabled. If it has already been 3821 * enabled, don't do anything. 3822 * This routine assumes that SQLOCK is held. 3823 * NOTE that the lock order is to have the SQLOCK first, 3824 * so if the service_syncq lock is held, we need to release it 3825 * before acquiring the SQLOCK (mostly relevant for the background 3826 * thread, and this seems to be common among the STREAMS global locks). 3827 * Note that the sq_svcflags are protected by the SQLOCK. 3828 */ 3829 void 3830 sqenable(syncq_t *sq) 3831 { 3832 /* 3833 * This is probably not important except for where I believe it 3834 * is being called. At that point, it should be held (and it 3835 * is a pain to release it just for this routine, so don't do 3836 * it). 3837 */ 3838 ASSERT(MUTEX_HELD(SQLOCK(sq))); 3839 3840 IMPLY(sq->sq_servcount == 0, sq->sq_next == NULL); 3841 IMPLY(sq->sq_next != NULL, sq->sq_svcflags & SQ_BGTHREAD); 3842 3843 /* 3844 * Do not put on list if background thread is scheduled or 3845 * syncq is disabled. 3846 */ 3847 if (sq->sq_svcflags & (SQ_DISABLED | SQ_BGTHREAD)) 3848 return; 3849 3850 /* 3851 * Check whether we should enable sq at all. 3852 * Non PERMOD syncqs may be drained by at most one thread. 3853 * PERMOD syncqs may be drained by several threads but we limit the 3854 * total amount to the lesser of 3855 * Number of queues on the squeue and 3856 * Number of CPUs. 3857 */ 3858 if (sq->sq_servcount != 0) { 3859 if (((sq->sq_type & SQ_PERMOD) == 0) || 3860 (sq->sq_servcount >= MIN(sq->sq_nqueues, ncpus_online))) { 3861 STRSTAT(sqtoomany); 3862 return; 3863 } 3864 } 3865 3866 sq->sq_tstamp = ddi_get_lbolt(); 3867 STRSTAT(sqenables); 3868 3869 /* Attempt a taskq dispatch */ 3870 sq->sq_servid = (void *)taskq_dispatch(streams_taskq, 3871 (task_func_t *)syncq_service, sq, TQ_NOSLEEP | TQ_NOQUEUE); 3872 if (sq->sq_servid != NULL) { 3873 sq->sq_servcount++; 3874 return; 3875 } 3876 3877 /* 3878 * This taskq dispatch failed, but a previous one may have succeeded. 3879 * Don't try to schedule on the background thread whilst there is 3880 * outstanding taskq processing. 3881 */ 3882 if (sq->sq_servcount != 0) 3883 return; 3884 3885 /* 3886 * System is low on resources and can't perform a non-sleeping 3887 * dispatch. Schedule the syncq for a background thread and mark the 3888 * syncq to avoid any further taskq dispatch attempts. 3889 */ 3890 mutex_enter(&service_queue); 3891 STRSTAT(taskqfails); 3892 ENQUEUE(sq, sqhead, sqtail, sq_next); 3893 sq->sq_svcflags |= SQ_BGTHREAD; 3894 sq->sq_servcount = 1; 3895 cv_signal(&syncqs_to_run); 3896 mutex_exit(&service_queue); 3897 } 3898 3899 /* 3900 * Note: fifo_close() depends on the mblk_t on the queue being freed 3901 * asynchronously. The asynchronous freeing of messages breaks the 3902 * recursive call chain of fifo_close() while there are I_SENDFD type of 3903 * messages referring to other file pointers on the queue. Then when 3904 * closing pipes it can avoid stack overflow in case of daisy-chained 3905 * pipes, and also avoid deadlock in case of fifonode_t pairs (which 3906 * share the same fifolock_t). 3907 */ 3908 3909 void 3910 freebs_enqueue(mblk_t *mp, dblk_t *dbp) 3911 { 3912 esb_queue_t *eqp = &system_esbq; 3913 3914 ASSERT(dbp->db_mblk == mp); 3915 3916 /* 3917 * Check data sanity. The dblock should have non-empty free function. 3918 * It is better to panic here then later when the dblock is freed 3919 * asynchronously when the context is lost. 3920 */ 3921 if (dbp->db_frtnp->free_func == NULL) { 3922 panic("freebs_enqueue: dblock %p has a NULL free callback", 3923 (void *)dbp); 3924 } 3925 3926 mutex_enter(&eqp->eq_lock); 3927 /* queue the new mblk on the esballoc queue */ 3928 if (eqp->eq_head == NULL) { 3929 eqp->eq_head = eqp->eq_tail = mp; 3930 } else { 3931 eqp->eq_tail->b_next = mp; 3932 eqp->eq_tail = mp; 3933 } 3934 eqp->eq_len++; 3935 3936 /* If we're the first thread to reach the threshold, process */ 3937 if (eqp->eq_len >= esbq_max_qlen && 3938 !(eqp->eq_flags & ESBQ_PROCESSING)) 3939 esballoc_process_queue(eqp); 3940 3941 esballoc_set_timer(eqp, esbq_timeout); 3942 mutex_exit(&eqp->eq_lock); 3943 } 3944 3945 static void 3946 esballoc_process_queue(esb_queue_t *eqp) 3947 { 3948 mblk_t *mp; 3949 3950 ASSERT(MUTEX_HELD(&eqp->eq_lock)); 3951 3952 eqp->eq_flags |= ESBQ_PROCESSING; 3953 3954 do { 3955 /* 3956 * Detach the message chain for processing. 3957 */ 3958 mp = eqp->eq_head; 3959 eqp->eq_tail->b_next = NULL; 3960 eqp->eq_head = eqp->eq_tail = NULL; 3961 eqp->eq_len = 0; 3962 mutex_exit(&eqp->eq_lock); 3963 3964 /* 3965 * Process the message chain. 3966 */ 3967 esballoc_enqueue_mblk(mp); 3968 mutex_enter(&eqp->eq_lock); 3969 } while ((eqp->eq_len >= esbq_max_qlen) && (eqp->eq_len > 0)); 3970 3971 eqp->eq_flags &= ~ESBQ_PROCESSING; 3972 } 3973 3974 /* 3975 * taskq callback routine to free esballoced mblk's 3976 */ 3977 static void 3978 esballoc_mblk_free(mblk_t *mp) 3979 { 3980 mblk_t *nextmp; 3981 3982 for (; mp != NULL; mp = nextmp) { 3983 nextmp = mp->b_next; 3984 mp->b_next = NULL; 3985 mblk_free(mp); 3986 } 3987 } 3988 3989 static void 3990 esballoc_enqueue_mblk(mblk_t *mp) 3991 { 3992 3993 if (taskq_dispatch(system_taskq, (task_func_t *)esballoc_mblk_free, mp, 3994 TQ_NOSLEEP) == NULL) { 3995 mblk_t *first_mp = mp; 3996 /* 3997 * System is low on resources and can't perform a non-sleeping 3998 * dispatch. Schedule for a background thread. 3999 */ 4000 mutex_enter(&service_queue); 4001 STRSTAT(taskqfails); 4002 4003 while (mp->b_next != NULL) 4004 mp = mp->b_next; 4005 4006 mp->b_next = freebs_list; 4007 freebs_list = first_mp; 4008 cv_signal(&services_to_run); 4009 mutex_exit(&service_queue); 4010 } 4011 } 4012 4013 static void 4014 esballoc_timer(void *arg) 4015 { 4016 esb_queue_t *eqp = arg; 4017 4018 mutex_enter(&eqp->eq_lock); 4019 eqp->eq_flags &= ~ESBQ_TIMER; 4020 4021 if (!(eqp->eq_flags & ESBQ_PROCESSING) && 4022 eqp->eq_len > 0) 4023 esballoc_process_queue(eqp); 4024 4025 esballoc_set_timer(eqp, esbq_timeout); 4026 mutex_exit(&eqp->eq_lock); 4027 } 4028 4029 static void 4030 esballoc_set_timer(esb_queue_t *eqp, clock_t eq_timeout) 4031 { 4032 ASSERT(MUTEX_HELD(&eqp->eq_lock)); 4033 4034 if (eqp->eq_len > 0 && !(eqp->eq_flags & ESBQ_TIMER)) { 4035 (void) timeout(esballoc_timer, eqp, eq_timeout); 4036 eqp->eq_flags |= ESBQ_TIMER; 4037 } 4038 } 4039 4040 void 4041 esballoc_queue_init(void) 4042 { 4043 system_esbq.eq_len = 0; 4044 system_esbq.eq_head = system_esbq.eq_tail = NULL; 4045 system_esbq.eq_flags = 0; 4046 } 4047 4048 /* 4049 * Set the QBACK or QB_BACK flag in the given queue for 4050 * the given priority band. 4051 */ 4052 void 4053 setqback(queue_t *q, unsigned char pri) 4054 { 4055 int i; 4056 qband_t *qbp; 4057 qband_t **qbpp; 4058 4059 ASSERT(MUTEX_HELD(QLOCK(q))); 4060 if (pri != 0) { 4061 if (pri > q->q_nband) { 4062 qbpp = &q->q_bandp; 4063 while (*qbpp) 4064 qbpp = &(*qbpp)->qb_next; 4065 while (pri > q->q_nband) { 4066 if ((*qbpp = allocband()) == NULL) { 4067 cmn_err(CE_WARN, 4068 "setqback: can't allocate qband\n"); 4069 return; 4070 } 4071 (*qbpp)->qb_hiwat = q->q_hiwat; 4072 (*qbpp)->qb_lowat = q->q_lowat; 4073 q->q_nband++; 4074 qbpp = &(*qbpp)->qb_next; 4075 } 4076 } 4077 qbp = q->q_bandp; 4078 i = pri; 4079 while (--i) 4080 qbp = qbp->qb_next; 4081 qbp->qb_flag |= QB_BACK; 4082 } else { 4083 q->q_flag |= QBACK; 4084 } 4085 } 4086 4087 int 4088 strcopyin(void *from, void *to, size_t len, int copyflag) 4089 { 4090 if (copyflag & U_TO_K) { 4091 ASSERT((copyflag & K_TO_K) == 0); 4092 if (copyin(from, to, len)) 4093 return (EFAULT); 4094 } else { 4095 ASSERT(copyflag & K_TO_K); 4096 bcopy(from, to, len); 4097 } 4098 return (0); 4099 } 4100 4101 int 4102 strcopyout(void *from, void *to, size_t len, int copyflag) 4103 { 4104 if (copyflag & U_TO_K) { 4105 if (copyout(from, to, len)) 4106 return (EFAULT); 4107 } else { 4108 ASSERT(copyflag & K_TO_K); 4109 bcopy(from, to, len); 4110 } 4111 return (0); 4112 } 4113 4114 /* 4115 * strsignal_nolock() posts a signal to the process(es) at the stream head. 4116 * It assumes that the stream head lock is already held, whereas strsignal() 4117 * acquires the lock first. This routine was created because a few callers 4118 * release the stream head lock before calling only to re-acquire it after 4119 * it returns. 4120 */ 4121 void 4122 strsignal_nolock(stdata_t *stp, int sig, uchar_t band) 4123 { 4124 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4125 switch (sig) { 4126 case SIGPOLL: 4127 if (stp->sd_sigflags & S_MSG) 4128 strsendsig(stp->sd_siglist, S_MSG, band, 0); 4129 break; 4130 default: 4131 if (stp->sd_pgidp) 4132 pgsignal(stp->sd_pgidp, sig); 4133 break; 4134 } 4135 } 4136 4137 void 4138 strsignal(stdata_t *stp, int sig, int32_t band) 4139 { 4140 TRACE_3(TR_FAC_STREAMS_FR, TR_SENDSIG, 4141 "strsignal:%p, %X, %X", stp, sig, band); 4142 4143 mutex_enter(&stp->sd_lock); 4144 switch (sig) { 4145 case SIGPOLL: 4146 if (stp->sd_sigflags & S_MSG) 4147 strsendsig(stp->sd_siglist, S_MSG, (uchar_t)band, 0); 4148 break; 4149 4150 default: 4151 if (stp->sd_pgidp) { 4152 pgsignal(stp->sd_pgidp, sig); 4153 } 4154 break; 4155 } 4156 mutex_exit(&stp->sd_lock); 4157 } 4158 4159 void 4160 strhup(stdata_t *stp) 4161 { 4162 ASSERT(mutex_owned(&stp->sd_lock)); 4163 pollwakeup(&stp->sd_pollist, POLLHUP); 4164 if (stp->sd_sigflags & S_HANGUP) 4165 strsendsig(stp->sd_siglist, S_HANGUP, 0, 0); 4166 } 4167 4168 /* 4169 * Backenable the first queue upstream from `q' with a service procedure. 4170 */ 4171 void 4172 backenable(queue_t *q, uchar_t pri) 4173 { 4174 queue_t *nq; 4175 4176 /* 4177 * Our presence might not prevent other modules in our own 4178 * stream from popping/pushing since the caller of getq might not 4179 * have a claim on the queue (some drivers do a getq on somebody 4180 * else's queue - they know that the queue itself is not going away 4181 * but the framework has to guarantee q_next in that stream). 4182 */ 4183 claimstr(q); 4184 4185 /* Find nearest back queue with service proc */ 4186 for (nq = backq(q); nq && !nq->q_qinfo->qi_srvp; nq = backq(nq)) { 4187 ASSERT(STRMATED(q->q_stream) || STREAM(q) == STREAM(nq)); 4188 } 4189 4190 if (nq) { 4191 kthread_t *freezer; 4192 /* 4193 * backenable can be called either with no locks held 4194 * or with the stream frozen (the latter occurs when a module 4195 * calls rmvq with the stream frozen). If the stream is frozen 4196 * by the caller the caller will hold all qlocks in the stream. 4197 * Note that a frozen stream doesn't freeze a mated stream, 4198 * so we explicitly check for that. 4199 */ 4200 freezer = STREAM(q)->sd_freezer; 4201 if (freezer != curthread || STREAM(q) != STREAM(nq)) { 4202 mutex_enter(QLOCK(nq)); 4203 } 4204 #ifdef DEBUG 4205 else { 4206 ASSERT(frozenstr(q)); 4207 ASSERT(MUTEX_HELD(QLOCK(q))); 4208 ASSERT(MUTEX_HELD(QLOCK(nq))); 4209 } 4210 #endif 4211 setqback(nq, pri); 4212 qenable_locked(nq); 4213 if (freezer != curthread || STREAM(q) != STREAM(nq)) 4214 mutex_exit(QLOCK(nq)); 4215 } 4216 releasestr(q); 4217 } 4218 4219 /* 4220 * Return the appropriate errno when one of flags_to_check is set 4221 * in sd_flags. Uses the exported error routines if they are set. 4222 * Will return 0 if non error is set (or if the exported error routines 4223 * do not return an error). 4224 * 4225 * If there is both a read and write error to check, we prefer the read error. 4226 * Also, give preference to recorded errno's over the error functions. 4227 * The flags that are handled are: 4228 * STPLEX return EINVAL 4229 * STRDERR return sd_rerror (and clear if STRDERRNONPERSIST) 4230 * STWRERR return sd_werror (and clear if STWRERRNONPERSIST) 4231 * STRHUP return sd_werror 4232 * 4233 * If the caller indicates that the operation is a peek, a nonpersistent error 4234 * is not cleared. 4235 */ 4236 int 4237 strgeterr(stdata_t *stp, int32_t flags_to_check, int ispeek) 4238 { 4239 int32_t sd_flag = stp->sd_flag & flags_to_check; 4240 int error = 0; 4241 4242 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4243 ASSERT((flags_to_check & ~(STRDERR|STWRERR|STRHUP|STPLEX)) == 0); 4244 if (sd_flag & STPLEX) 4245 error = EINVAL; 4246 else if (sd_flag & STRDERR) { 4247 error = stp->sd_rerror; 4248 if ((stp->sd_flag & STRDERRNONPERSIST) && !ispeek) { 4249 /* 4250 * Read errors are non-persistent i.e. discarded once 4251 * returned to a non-peeking caller, 4252 */ 4253 stp->sd_rerror = 0; 4254 stp->sd_flag &= ~STRDERR; 4255 } 4256 if (error == 0 && stp->sd_rderrfunc != NULL) { 4257 int clearerr = 0; 4258 4259 error = (*stp->sd_rderrfunc)(stp->sd_vnode, ispeek, 4260 &clearerr); 4261 if (clearerr) { 4262 stp->sd_flag &= ~STRDERR; 4263 stp->sd_rderrfunc = NULL; 4264 } 4265 } 4266 } else if (sd_flag & STWRERR) { 4267 error = stp->sd_werror; 4268 if ((stp->sd_flag & STWRERRNONPERSIST) && !ispeek) { 4269 /* 4270 * Write errors are non-persistent i.e. discarded once 4271 * returned to a non-peeking caller, 4272 */ 4273 stp->sd_werror = 0; 4274 stp->sd_flag &= ~STWRERR; 4275 } 4276 if (error == 0 && stp->sd_wrerrfunc != NULL) { 4277 int clearerr = 0; 4278 4279 error = (*stp->sd_wrerrfunc)(stp->sd_vnode, ispeek, 4280 &clearerr); 4281 if (clearerr) { 4282 stp->sd_flag &= ~STWRERR; 4283 stp->sd_wrerrfunc = NULL; 4284 } 4285 } 4286 } else if (sd_flag & STRHUP) { 4287 /* sd_werror set when STRHUP */ 4288 error = stp->sd_werror; 4289 } 4290 return (error); 4291 } 4292 4293 4294 /* 4295 * Single-thread open/close/push/pop 4296 * for twisted streams also 4297 */ 4298 int 4299 strstartplumb(stdata_t *stp, int flag, int cmd) 4300 { 4301 int waited = 1; 4302 int error = 0; 4303 4304 if (STRMATED(stp)) { 4305 struct stdata *stmatep = stp->sd_mate; 4306 4307 STRLOCKMATES(stp); 4308 while (waited) { 4309 waited = 0; 4310 while (stmatep->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4311 if ((cmd == I_POP) && 4312 (flag & (FNDELAY|FNONBLOCK))) { 4313 STRUNLOCKMATES(stp); 4314 return (EAGAIN); 4315 } 4316 waited = 1; 4317 mutex_exit(&stp->sd_lock); 4318 if (!cv_wait_sig(&stmatep->sd_monitor, 4319 &stmatep->sd_lock)) { 4320 mutex_exit(&stmatep->sd_lock); 4321 return (EINTR); 4322 } 4323 mutex_exit(&stmatep->sd_lock); 4324 STRLOCKMATES(stp); 4325 } 4326 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4327 if ((cmd == I_POP) && 4328 (flag & (FNDELAY|FNONBLOCK))) { 4329 STRUNLOCKMATES(stp); 4330 return (EAGAIN); 4331 } 4332 waited = 1; 4333 mutex_exit(&stmatep->sd_lock); 4334 if (!cv_wait_sig(&stp->sd_monitor, 4335 &stp->sd_lock)) { 4336 mutex_exit(&stp->sd_lock); 4337 return (EINTR); 4338 } 4339 mutex_exit(&stp->sd_lock); 4340 STRLOCKMATES(stp); 4341 } 4342 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) { 4343 error = strgeterr(stp, 4344 STRDERR|STWRERR|STRHUP|STPLEX, 0); 4345 if (error != 0) { 4346 STRUNLOCKMATES(stp); 4347 return (error); 4348 } 4349 } 4350 } 4351 stp->sd_flag |= STRPLUMB; 4352 STRUNLOCKMATES(stp); 4353 } else { 4354 mutex_enter(&stp->sd_lock); 4355 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4356 if (((cmd == I_POP) || (cmd == _I_REMOVE)) && 4357 (flag & (FNDELAY|FNONBLOCK))) { 4358 mutex_exit(&stp->sd_lock); 4359 return (EAGAIN); 4360 } 4361 if (!cv_wait_sig(&stp->sd_monitor, &stp->sd_lock)) { 4362 mutex_exit(&stp->sd_lock); 4363 return (EINTR); 4364 } 4365 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) { 4366 error = strgeterr(stp, 4367 STRDERR|STWRERR|STRHUP|STPLEX, 0); 4368 if (error != 0) { 4369 mutex_exit(&stp->sd_lock); 4370 return (error); 4371 } 4372 } 4373 } 4374 stp->sd_flag |= STRPLUMB; 4375 mutex_exit(&stp->sd_lock); 4376 } 4377 return (0); 4378 } 4379 4380 /* 4381 * Complete the plumbing operation associated with stream `stp'. 4382 */ 4383 void 4384 strendplumb(stdata_t *stp) 4385 { 4386 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4387 ASSERT(stp->sd_flag & STRPLUMB); 4388 stp->sd_flag &= ~STRPLUMB; 4389 cv_broadcast(&stp->sd_monitor); 4390 } 4391 4392 /* 4393 * This describes how the STREAMS framework handles synchronization 4394 * during open/push and close/pop. 4395 * The key interfaces for open and close are qprocson and qprocsoff, 4396 * respectively. While the close case in general is harder both open 4397 * have close have significant similarities. 4398 * 4399 * During close the STREAMS framework has to both ensure that there 4400 * are no stale references to the queue pair (and syncq) that 4401 * are being closed and also provide the guarantees that are documented 4402 * in qprocsoff(9F). 4403 * If there are stale references to the queue that is closing it can 4404 * result in kernel memory corruption or kernel panics. 4405 * 4406 * Note that is it up to the module/driver to ensure that it itself 4407 * does not have any stale references to the closing queues once its close 4408 * routine returns. This includes: 4409 * - Cancelling any timeout/bufcall/qtimeout/qbufcall callback routines 4410 * associated with the queues. For timeout and bufcall callbacks the 4411 * module/driver also has to ensure (or wait for) any callbacks that 4412 * are in progress. 4413 * - If the module/driver is using esballoc it has to ensure that any 4414 * esballoc free functions do not refer to a queue that has closed. 4415 * (Note that in general the close routine can not wait for the esballoc'ed 4416 * messages to be freed since that can cause a deadlock.) 4417 * - Cancelling any interrupts that refer to the closing queues and 4418 * also ensuring that there are no interrupts in progress that will 4419 * refer to the closing queues once the close routine returns. 4420 * - For multiplexors removing any driver global state that refers to 4421 * the closing queue and also ensuring that there are no threads in 4422 * the multiplexor that has picked up a queue pointer but not yet 4423 * finished using it. 4424 * 4425 * In addition, a driver/module can only reference the q_next pointer 4426 * in its open, close, put, or service procedures or in a 4427 * qtimeout/qbufcall callback procedure executing "on" the correct 4428 * stream. Thus it can not reference the q_next pointer in an interrupt 4429 * routine or a timeout, bufcall or esballoc callback routine. Likewise 4430 * it can not reference q_next of a different queue e.g. in a mux that 4431 * passes messages from one queues put/service procedure to another queue. 4432 * In all the cases when the driver/module can not access the q_next 4433 * field it must use the *next* versions e.g. canputnext instead of 4434 * canput(q->q_next) and putnextctl instead of putctl(q->q_next, ...). 4435 * 4436 * 4437 * Assuming that the driver/module conforms to the above constraints 4438 * the STREAMS framework has to avoid stale references to q_next for all 4439 * the framework internal cases which include (but are not limited to): 4440 * - Threads in canput/canputnext/backenable and elsewhere that are 4441 * walking q_next. 4442 * - Messages on a syncq that have a reference to the queue through b_queue. 4443 * - Messages on an outer perimeter (syncq) that have a reference to the 4444 * queue through b_queue. 4445 * - Threads that use q_nfsrv (e.g. canput) to find a queue. 4446 * Note that only canput and bcanput use q_nfsrv without any locking. 4447 * 4448 * The STREAMS framework providing the qprocsoff(9F) guarantees means that 4449 * after qprocsoff returns, the framework has to ensure that no threads can 4450 * enter the put or service routines for the closing read or write-side queue. 4451 * In addition to preventing "direct" entry into the put procedures 4452 * the framework also has to prevent messages being drained from 4453 * the syncq or the outer perimeter. 4454 * XXX Note that currently qdetach does relies on D_MTOCEXCL as the only 4455 * mechanism to prevent qwriter(PERIM_OUTER) from running after 4456 * qprocsoff has returned. 4457 * Note that if a module/driver uses put(9F) on one of its own queues 4458 * it is up to the module/driver to ensure that the put() doesn't 4459 * get called when the queue is closing. 4460 * 4461 * 4462 * The framework aspects of the above "contract" is implemented by 4463 * qprocsoff, removeq, and strlock: 4464 * - qprocsoff (disable_svc) sets QWCLOSE to prevent runservice from 4465 * entering the service procedures. 4466 * - strlock acquires the sd_lock and sd_reflock to prevent putnext, 4467 * canputnext, backenable etc from dereferencing the q_next that will 4468 * soon change. 4469 * - strlock waits for sd_refcnt to be zero to wait for e.g. any canputnext 4470 * or other q_next walker that uses claimstr/releasestr to finish. 4471 * - optionally for every syncq in the stream strlock acquires all the 4472 * sq_lock's and waits for all sq_counts to drop to a value that indicates 4473 * that no thread executes in the put or service procedures and that no 4474 * thread is draining into the module/driver. This ensures that no 4475 * open, close, put, service, or qtimeout/qbufcall callback procedure is 4476 * currently executing hence no such thread can end up with the old stale 4477 * q_next value and no canput/backenable can have the old stale 4478 * q_nfsrv/q_next. 4479 * - qdetach (wait_svc) makes sure that any scheduled or running threads 4480 * have either finished or observed the QWCLOSE flag and gone away. 4481 */ 4482 4483 4484 /* 4485 * Get all the locks necessary to change q_next. 4486 * 4487 * Wait for sd_refcnt to reach 0 and, if sqlist is present, wait for the 4488 * sq_count of each syncq in the list to drop to sq_rmqcount, indicating that 4489 * the only threads inside the syncq are threads currently calling removeq(). 4490 * Since threads calling removeq() are in the process of removing their queues 4491 * from the stream, we do not need to worry about them accessing a stale q_next 4492 * pointer and thus we do not need to wait for them to exit (in fact, waiting 4493 * for them can cause deadlock). 4494 * 4495 * This routine is subject to starvation since it does not set any flag to 4496 * prevent threads from entering a module in the stream (i.e. sq_count can 4497 * increase on some syncq while it is waiting on some other syncq). 4498 * 4499 * Assumes that only one thread attempts to call strlock for a given 4500 * stream. If this is not the case the two threads would deadlock. 4501 * This assumption is guaranteed since strlock is only called by insertq 4502 * and removeq and streams plumbing changes are single-threaded for 4503 * a given stream using the STWOPEN, STRCLOSE, and STRPLUMB flags. 4504 * 4505 * For pipes, it is not difficult to atomically designate a pair of streams 4506 * to be mated. Once mated atomically by the framework the twisted pair remain 4507 * configured that way until dismantled atomically by the framework. 4508 * When plumbing takes place on a twisted stream it is necessary to ensure that 4509 * this operation is done exclusively on the twisted stream since two such 4510 * operations, each initiated on different ends of the pipe will deadlock 4511 * waiting for each other to complete. 4512 * 4513 * On entry, no locks should be held. 4514 * The locks acquired and held by strlock depends on a few factors. 4515 * - If sqlist is non-NULL all the syncq locks in the sqlist will be acquired 4516 * and held on exit and all sq_count are at an acceptable level. 4517 * - In all cases, sd_lock and sd_reflock are acquired and held on exit with 4518 * sd_refcnt being zero. 4519 */ 4520 4521 static void 4522 strlock(struct stdata *stp, sqlist_t *sqlist) 4523 { 4524 syncql_t *sql, *sql2; 4525 retry: 4526 /* 4527 * Wait for any claimstr to go away. 4528 */ 4529 if (STRMATED(stp)) { 4530 struct stdata *stp1, *stp2; 4531 4532 STRLOCKMATES(stp); 4533 /* 4534 * Note that the selection of locking order is not 4535 * important, just that they are always acquired in 4536 * the same order. To assure this, we choose this 4537 * order based on the value of the pointer, and since 4538 * the pointer will not change for the life of this 4539 * pair, we will always grab the locks in the same 4540 * order (and hence, prevent deadlocks). 4541 */ 4542 if (&(stp->sd_lock) > &((stp->sd_mate)->sd_lock)) { 4543 stp1 = stp; 4544 stp2 = stp->sd_mate; 4545 } else { 4546 stp2 = stp; 4547 stp1 = stp->sd_mate; 4548 } 4549 mutex_enter(&stp1->sd_reflock); 4550 if (stp1->sd_refcnt > 0) { 4551 STRUNLOCKMATES(stp); 4552 cv_wait(&stp1->sd_refmonitor, &stp1->sd_reflock); 4553 mutex_exit(&stp1->sd_reflock); 4554 goto retry; 4555 } 4556 mutex_enter(&stp2->sd_reflock); 4557 if (stp2->sd_refcnt > 0) { 4558 STRUNLOCKMATES(stp); 4559 mutex_exit(&stp1->sd_reflock); 4560 cv_wait(&stp2->sd_refmonitor, &stp2->sd_reflock); 4561 mutex_exit(&stp2->sd_reflock); 4562 goto retry; 4563 } 4564 STREAM_PUTLOCKS_ENTER(stp1); 4565 STREAM_PUTLOCKS_ENTER(stp2); 4566 } else { 4567 mutex_enter(&stp->sd_lock); 4568 mutex_enter(&stp->sd_reflock); 4569 while (stp->sd_refcnt > 0) { 4570 mutex_exit(&stp->sd_lock); 4571 cv_wait(&stp->sd_refmonitor, &stp->sd_reflock); 4572 if (mutex_tryenter(&stp->sd_lock) == 0) { 4573 mutex_exit(&stp->sd_reflock); 4574 mutex_enter(&stp->sd_lock); 4575 mutex_enter(&stp->sd_reflock); 4576 } 4577 } 4578 STREAM_PUTLOCKS_ENTER(stp); 4579 } 4580 4581 if (sqlist == NULL) 4582 return; 4583 4584 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) { 4585 syncq_t *sq = sql->sql_sq; 4586 uint16_t count; 4587 4588 mutex_enter(SQLOCK(sq)); 4589 count = sq->sq_count; 4590 ASSERT(sq->sq_rmqcount <= count); 4591 SQ_PUTLOCKS_ENTER(sq); 4592 SUM_SQ_PUTCOUNTS(sq, count); 4593 if (count == sq->sq_rmqcount) 4594 continue; 4595 4596 /* Failed - drop all locks that we have acquired so far */ 4597 if (STRMATED(stp)) { 4598 STREAM_PUTLOCKS_EXIT(stp); 4599 STREAM_PUTLOCKS_EXIT(stp->sd_mate); 4600 STRUNLOCKMATES(stp); 4601 mutex_exit(&stp->sd_reflock); 4602 mutex_exit(&stp->sd_mate->sd_reflock); 4603 } else { 4604 STREAM_PUTLOCKS_EXIT(stp); 4605 mutex_exit(&stp->sd_lock); 4606 mutex_exit(&stp->sd_reflock); 4607 } 4608 for (sql2 = sqlist->sqlist_head; sql2 != sql; 4609 sql2 = sql2->sql_next) { 4610 SQ_PUTLOCKS_EXIT(sql2->sql_sq); 4611 mutex_exit(SQLOCK(sql2->sql_sq)); 4612 } 4613 4614 /* 4615 * The wait loop below may starve when there are many threads 4616 * claiming the syncq. This is especially a problem with permod 4617 * syncqs (IP). To lessen the impact of the problem we increment 4618 * sq_needexcl and clear fastbits so that putnexts will slow 4619 * down and call sqenable instead of draining right away. 4620 */ 4621 sq->sq_needexcl++; 4622 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 4623 while (count > sq->sq_rmqcount) { 4624 sq->sq_flags |= SQ_WANTWAKEUP; 4625 SQ_PUTLOCKS_EXIT(sq); 4626 cv_wait(&sq->sq_wait, SQLOCK(sq)); 4627 count = sq->sq_count; 4628 SQ_PUTLOCKS_ENTER(sq); 4629 SUM_SQ_PUTCOUNTS(sq, count); 4630 } 4631 sq->sq_needexcl--; 4632 if (sq->sq_needexcl == 0) 4633 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 4634 SQ_PUTLOCKS_EXIT(sq); 4635 ASSERT(count == sq->sq_rmqcount); 4636 mutex_exit(SQLOCK(sq)); 4637 goto retry; 4638 } 4639 } 4640 4641 /* 4642 * Drop all the locks that strlock acquired. 4643 */ 4644 static void 4645 strunlock(struct stdata *stp, sqlist_t *sqlist) 4646 { 4647 syncql_t *sql; 4648 4649 if (STRMATED(stp)) { 4650 STREAM_PUTLOCKS_EXIT(stp); 4651 STREAM_PUTLOCKS_EXIT(stp->sd_mate); 4652 STRUNLOCKMATES(stp); 4653 mutex_exit(&stp->sd_reflock); 4654 mutex_exit(&stp->sd_mate->sd_reflock); 4655 } else { 4656 STREAM_PUTLOCKS_EXIT(stp); 4657 mutex_exit(&stp->sd_lock); 4658 mutex_exit(&stp->sd_reflock); 4659 } 4660 4661 if (sqlist == NULL) 4662 return; 4663 4664 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) { 4665 SQ_PUTLOCKS_EXIT(sql->sql_sq); 4666 mutex_exit(SQLOCK(sql->sql_sq)); 4667 } 4668 } 4669 4670 /* 4671 * When the module has service procedure, we need check if the next 4672 * module which has service procedure is in flow control to trigger 4673 * the backenable. 4674 */ 4675 static void 4676 backenable_insertedq(queue_t *q) 4677 { 4678 qband_t *qbp; 4679 4680 claimstr(q); 4681 if (q->q_qinfo->qi_srvp != NULL && q->q_next != NULL) { 4682 if (q->q_next->q_nfsrv->q_flag & QWANTW) 4683 backenable(q, 0); 4684 4685 qbp = q->q_next->q_nfsrv->q_bandp; 4686 for (; qbp != NULL; qbp = qbp->qb_next) 4687 if ((qbp->qb_flag & QB_WANTW) && qbp->qb_first != NULL) 4688 backenable(q, qbp->qb_first->b_band); 4689 } 4690 releasestr(q); 4691 } 4692 4693 /* 4694 * Given two read queues, insert a new single one after another. 4695 * 4696 * This routine acquires all the necessary locks in order to change 4697 * q_next and related pointer using strlock(). 4698 * It depends on the stream head ensuring that there are no concurrent 4699 * insertq or removeq on the same stream. The stream head ensures this 4700 * using the flags STWOPEN, STRCLOSE, and STRPLUMB. 4701 * 4702 * Note that no syncq locks are held during the q_next change. This is 4703 * applied to all streams since, unlike removeq, there is no problem of stale 4704 * pointers when adding a module to the stream. Thus drivers/modules that do a 4705 * canput(rq->q_next) would never get a closed/freed queue pointer even if we 4706 * applied this optimization to all streams. 4707 */ 4708 void 4709 insertq(struct stdata *stp, queue_t *new) 4710 { 4711 queue_t *after; 4712 queue_t *wafter; 4713 queue_t *wnew = _WR(new); 4714 boolean_t have_fifo = B_FALSE; 4715 4716 if (new->q_flag & _QINSERTING) { 4717 ASSERT(stp->sd_vnode->v_type != VFIFO); 4718 after = new->q_next; 4719 wafter = _WR(new->q_next); 4720 } else { 4721 after = _RD(stp->sd_wrq); 4722 wafter = stp->sd_wrq; 4723 } 4724 4725 TRACE_2(TR_FAC_STREAMS_FR, TR_INSERTQ, 4726 "insertq:%p, %p", after, new); 4727 ASSERT(after->q_flag & QREADR); 4728 ASSERT(new->q_flag & QREADR); 4729 4730 strlock(stp, NULL); 4731 4732 /* Do we have a FIFO? */ 4733 if (wafter->q_next == after) { 4734 have_fifo = B_TRUE; 4735 wnew->q_next = new; 4736 } else { 4737 wnew->q_next = wafter->q_next; 4738 } 4739 new->q_next = after; 4740 4741 set_nfsrv_ptr(new, wnew, after, wafter); 4742 /* 4743 * set_nfsrv_ptr() needs to know if this is an insertion or not, 4744 * so only reset this flag after calling it. 4745 */ 4746 new->q_flag &= ~_QINSERTING; 4747 4748 if (have_fifo) { 4749 wafter->q_next = wnew; 4750 } else { 4751 if (wafter->q_next) 4752 _OTHERQ(wafter->q_next)->q_next = new; 4753 wafter->q_next = wnew; 4754 } 4755 4756 set_qend(new); 4757 /* The QEND flag might have to be updated for the upstream guy */ 4758 set_qend(after); 4759 4760 ASSERT(_SAMESTR(new) == O_SAMESTR(new)); 4761 ASSERT(_SAMESTR(wnew) == O_SAMESTR(wnew)); 4762 ASSERT(_SAMESTR(after) == O_SAMESTR(after)); 4763 ASSERT(_SAMESTR(wafter) == O_SAMESTR(wafter)); 4764 strsetuio(stp); 4765 4766 /* 4767 * If this was a module insertion, bump the push count. 4768 */ 4769 if (!(new->q_flag & QISDRV)) 4770 stp->sd_pushcnt++; 4771 4772 strunlock(stp, NULL); 4773 4774 /* check if the write Q needs backenable */ 4775 backenable_insertedq(wnew); 4776 4777 /* check if the read Q needs backenable */ 4778 backenable_insertedq(new); 4779 } 4780 4781 /* 4782 * Given a read queue, unlink it from any neighbors. 4783 * 4784 * This routine acquires all the necessary locks in order to 4785 * change q_next and related pointers and also guard against 4786 * stale references (e.g. through q_next) to the queue that 4787 * is being removed. It also plays part of the role in ensuring 4788 * that the module's/driver's put procedure doesn't get called 4789 * after qprocsoff returns. 4790 * 4791 * Removeq depends on the stream head ensuring that there are 4792 * no concurrent insertq or removeq on the same stream. The 4793 * stream head ensures this using the flags STWOPEN, STRCLOSE and 4794 * STRPLUMB. 4795 * 4796 * The set of locks needed to remove the queue is different in 4797 * different cases: 4798 * 4799 * Acquire sd_lock, sd_reflock, and all the syncq locks in the stream after 4800 * waiting for the syncq reference count to drop to 0 indicating that no 4801 * non-close threads are present anywhere in the stream. This ensures that any 4802 * module/driver can reference q_next in its open, close, put, or service 4803 * procedures. 4804 * 4805 * The sq_rmqcount counter tracks the number of threads inside removeq(). 4806 * strlock() ensures that there is either no threads executing inside perimeter 4807 * or there is only a thread calling qprocsoff(). 4808 * 4809 * strlock() compares the value of sq_count with the number of threads inside 4810 * removeq() and waits until sq_count is equal to sq_rmqcount. We need to wakeup 4811 * any threads waiting in strlock() when the sq_rmqcount increases. 4812 */ 4813 4814 void 4815 removeq(queue_t *qp) 4816 { 4817 queue_t *wqp = _WR(qp); 4818 struct stdata *stp = STREAM(qp); 4819 sqlist_t *sqlist = NULL; 4820 boolean_t isdriver; 4821 int moved; 4822 syncq_t *sq = qp->q_syncq; 4823 syncq_t *wsq = wqp->q_syncq; 4824 4825 ASSERT(stp); 4826 4827 TRACE_2(TR_FAC_STREAMS_FR, TR_REMOVEQ, 4828 "removeq:%p %p", qp, wqp); 4829 ASSERT(qp->q_flag&QREADR); 4830 4831 /* 4832 * For queues using Synchronous streams, we must wait for all threads in 4833 * rwnext() to drain out before proceeding. 4834 */ 4835 if (qp->q_flag & QSYNCSTR) { 4836 /* First, we need wakeup any threads blocked in rwnext() */ 4837 mutex_enter(SQLOCK(sq)); 4838 if (sq->sq_flags & SQ_WANTWAKEUP) { 4839 sq->sq_flags &= ~SQ_WANTWAKEUP; 4840 cv_broadcast(&sq->sq_wait); 4841 } 4842 mutex_exit(SQLOCK(sq)); 4843 4844 if (wsq != sq) { 4845 mutex_enter(SQLOCK(wsq)); 4846 if (wsq->sq_flags & SQ_WANTWAKEUP) { 4847 wsq->sq_flags &= ~SQ_WANTWAKEUP; 4848 cv_broadcast(&wsq->sq_wait); 4849 } 4850 mutex_exit(SQLOCK(wsq)); 4851 } 4852 4853 mutex_enter(QLOCK(qp)); 4854 while (qp->q_rwcnt > 0) { 4855 qp->q_flag |= QWANTRMQSYNC; 4856 cv_wait(&qp->q_wait, QLOCK(qp)); 4857 } 4858 mutex_exit(QLOCK(qp)); 4859 4860 mutex_enter(QLOCK(wqp)); 4861 while (wqp->q_rwcnt > 0) { 4862 wqp->q_flag |= QWANTRMQSYNC; 4863 cv_wait(&wqp->q_wait, QLOCK(wqp)); 4864 } 4865 mutex_exit(QLOCK(wqp)); 4866 } 4867 4868 mutex_enter(SQLOCK(sq)); 4869 sq->sq_rmqcount++; 4870 if (sq->sq_flags & SQ_WANTWAKEUP) { 4871 sq->sq_flags &= ~SQ_WANTWAKEUP; 4872 cv_broadcast(&sq->sq_wait); 4873 } 4874 mutex_exit(SQLOCK(sq)); 4875 4876 isdriver = (qp->q_flag & QISDRV); 4877 4878 sqlist = sqlist_build(qp, stp, STRMATED(stp)); 4879 strlock(stp, sqlist); 4880 4881 reset_nfsrv_ptr(qp, wqp); 4882 4883 ASSERT(wqp->q_next == NULL || backq(qp)->q_next == qp); 4884 ASSERT(qp->q_next == NULL || backq(wqp)->q_next == wqp); 4885 /* Do we have a FIFO? */ 4886 if (wqp->q_next == qp) { 4887 stp->sd_wrq->q_next = _RD(stp->sd_wrq); 4888 } else { 4889 if (wqp->q_next) 4890 backq(qp)->q_next = qp->q_next; 4891 if (qp->q_next) 4892 backq(wqp)->q_next = wqp->q_next; 4893 } 4894 4895 /* The QEND flag might have to be updated for the upstream guy */ 4896 if (qp->q_next) 4897 set_qend(qp->q_next); 4898 4899 ASSERT(_SAMESTR(stp->sd_wrq) == O_SAMESTR(stp->sd_wrq)); 4900 ASSERT(_SAMESTR(_RD(stp->sd_wrq)) == O_SAMESTR(_RD(stp->sd_wrq))); 4901 4902 /* 4903 * Move any messages destined for the put procedures to the next 4904 * syncq in line. Otherwise free them. 4905 */ 4906 moved = 0; 4907 /* 4908 * Quick check to see whether there are any messages or events. 4909 */ 4910 if (qp->q_syncqmsgs != 0 || (qp->q_syncq->sq_flags & SQ_EVENTS)) 4911 moved += propagate_syncq(qp); 4912 if (wqp->q_syncqmsgs != 0 || 4913 (wqp->q_syncq->sq_flags & SQ_EVENTS)) 4914 moved += propagate_syncq(wqp); 4915 4916 strsetuio(stp); 4917 4918 /* 4919 * If this was a module removal, decrement the push count. 4920 */ 4921 if (!isdriver) 4922 stp->sd_pushcnt--; 4923 4924 strunlock(stp, sqlist); 4925 sqlist_free(sqlist); 4926 4927 /* 4928 * Make sure any messages that were propagated are drained. 4929 * Also clear any QFULL bit caused by messages that were propagated. 4930 */ 4931 4932 if (qp->q_next != NULL) { 4933 clr_qfull(qp); 4934 /* 4935 * For the driver calling qprocsoff, propagate_syncq 4936 * frees all the messages instead of putting it in 4937 * the stream head 4938 */ 4939 if (!isdriver && (moved > 0)) 4940 emptysq(qp->q_next->q_syncq); 4941 } 4942 if (wqp->q_next != NULL) { 4943 clr_qfull(wqp); 4944 /* 4945 * We come here for any pop of a module except for the 4946 * case of driver being removed. We don't call emptysq 4947 * if we did not move any messages. This will avoid holding 4948 * PERMOD syncq locks in emptysq 4949 */ 4950 if (moved > 0) 4951 emptysq(wqp->q_next->q_syncq); 4952 } 4953 4954 mutex_enter(SQLOCK(sq)); 4955 sq->sq_rmqcount--; 4956 mutex_exit(SQLOCK(sq)); 4957 } 4958 4959 /* 4960 * Prevent further entry by setting a flag (like SQ_FROZEN, SQ_BLOCKED or 4961 * SQ_WRITER) on a syncq. 4962 * If maxcnt is not -1 it assumes that caller has "maxcnt" claim(s) on the 4963 * sync queue and waits until sq_count reaches maxcnt. 4964 * 4965 * If maxcnt is -1 there's no need to grab sq_putlocks since the caller 4966 * does not care about putnext threads that are in the middle of calling put 4967 * entry points. 4968 * 4969 * This routine is used for both inner and outer syncqs. 4970 */ 4971 static void 4972 blocksq(syncq_t *sq, ushort_t flag, int maxcnt) 4973 { 4974 uint16_t count = 0; 4975 4976 mutex_enter(SQLOCK(sq)); 4977 /* 4978 * Wait for SQ_FROZEN/SQ_BLOCKED to be reset. 4979 * SQ_FROZEN will be set if there is a frozen stream that has a 4980 * queue which also refers to this "shared" syncq. 4981 * SQ_BLOCKED will be set if there is "off" queue which also 4982 * refers to this "shared" syncq. 4983 */ 4984 if (maxcnt != -1) { 4985 count = sq->sq_count; 4986 SQ_PUTLOCKS_ENTER(sq); 4987 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 4988 SUM_SQ_PUTCOUNTS(sq, count); 4989 } 4990 sq->sq_needexcl++; 4991 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 4992 4993 while ((sq->sq_flags & flag) || 4994 (maxcnt != -1 && count > (unsigned)maxcnt)) { 4995 sq->sq_flags |= SQ_WANTWAKEUP; 4996 if (maxcnt != -1) { 4997 SQ_PUTLOCKS_EXIT(sq); 4998 } 4999 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5000 if (maxcnt != -1) { 5001 count = sq->sq_count; 5002 SQ_PUTLOCKS_ENTER(sq); 5003 SUM_SQ_PUTCOUNTS(sq, count); 5004 } 5005 } 5006 sq->sq_needexcl--; 5007 sq->sq_flags |= flag; 5008 ASSERT(maxcnt == -1 || count == maxcnt); 5009 if (maxcnt != -1) { 5010 if (sq->sq_needexcl == 0) { 5011 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 5012 } 5013 SQ_PUTLOCKS_EXIT(sq); 5014 } else if (sq->sq_needexcl == 0) { 5015 SQ_PUTCOUNT_SETFAST(sq); 5016 } 5017 5018 mutex_exit(SQLOCK(sq)); 5019 } 5020 5021 /* 5022 * Reset a flag that was set with blocksq. 5023 * 5024 * Can not use this routine to reset SQ_WRITER. 5025 * 5026 * If "isouter" is set then the syncq is assumed to be an outer perimeter 5027 * and drain_syncq is not called. Instead we rely on the qwriter_outer thread 5028 * to handle the queued qwriter operations. 5029 * 5030 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5031 * sq_putlocks are used. 5032 */ 5033 static void 5034 unblocksq(syncq_t *sq, uint16_t resetflag, int isouter) 5035 { 5036 uint16_t flags; 5037 5038 mutex_enter(SQLOCK(sq)); 5039 ASSERT(resetflag != SQ_WRITER); 5040 ASSERT(sq->sq_flags & resetflag); 5041 flags = sq->sq_flags & ~resetflag; 5042 sq->sq_flags = flags; 5043 if (flags & (SQ_QUEUED | SQ_WANTWAKEUP)) { 5044 if (flags & SQ_WANTWAKEUP) { 5045 flags &= ~SQ_WANTWAKEUP; 5046 cv_broadcast(&sq->sq_wait); 5047 } 5048 sq->sq_flags = flags; 5049 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5050 if (!isouter) { 5051 /* drain_syncq drops SQLOCK */ 5052 drain_syncq(sq); 5053 return; 5054 } 5055 } 5056 } 5057 mutex_exit(SQLOCK(sq)); 5058 } 5059 5060 /* 5061 * Reset a flag that was set with blocksq. 5062 * Does not drain the syncq. Use emptysq() for that. 5063 * Returns 1 if SQ_QUEUED is set. Otherwise 0. 5064 * 5065 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5066 * sq_putlocks are used. 5067 */ 5068 static int 5069 dropsq(syncq_t *sq, uint16_t resetflag) 5070 { 5071 uint16_t flags; 5072 5073 mutex_enter(SQLOCK(sq)); 5074 ASSERT(sq->sq_flags & resetflag); 5075 flags = sq->sq_flags & ~resetflag; 5076 if (flags & SQ_WANTWAKEUP) { 5077 flags &= ~SQ_WANTWAKEUP; 5078 cv_broadcast(&sq->sq_wait); 5079 } 5080 sq->sq_flags = flags; 5081 mutex_exit(SQLOCK(sq)); 5082 if (flags & SQ_QUEUED) 5083 return (1); 5084 return (0); 5085 } 5086 5087 /* 5088 * Empty all the messages on a syncq. 5089 * 5090 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5091 * sq_putlocks are used. 5092 */ 5093 static void 5094 emptysq(syncq_t *sq) 5095 { 5096 uint16_t flags; 5097 5098 mutex_enter(SQLOCK(sq)); 5099 flags = sq->sq_flags; 5100 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5101 /* 5102 * To prevent potential recursive invocation of drain_syncq we 5103 * do not call drain_syncq if count is non-zero. 5104 */ 5105 if (sq->sq_count == 0) { 5106 /* drain_syncq() drops SQLOCK */ 5107 drain_syncq(sq); 5108 return; 5109 } else 5110 sqenable(sq); 5111 } 5112 mutex_exit(SQLOCK(sq)); 5113 } 5114 5115 /* 5116 * Ordered insert while removing duplicates. 5117 */ 5118 static void 5119 sqlist_insert(sqlist_t *sqlist, syncq_t *sqp) 5120 { 5121 syncql_t *sqlp, **prev_sqlpp, *new_sqlp; 5122 5123 prev_sqlpp = &sqlist->sqlist_head; 5124 while ((sqlp = *prev_sqlpp) != NULL) { 5125 if (sqlp->sql_sq >= sqp) { 5126 if (sqlp->sql_sq == sqp) /* duplicate */ 5127 return; 5128 break; 5129 } 5130 prev_sqlpp = &sqlp->sql_next; 5131 } 5132 new_sqlp = &sqlist->sqlist_array[sqlist->sqlist_index++]; 5133 ASSERT((char *)new_sqlp < (char *)sqlist + sqlist->sqlist_size); 5134 new_sqlp->sql_next = sqlp; 5135 new_sqlp->sql_sq = sqp; 5136 *prev_sqlpp = new_sqlp; 5137 } 5138 5139 /* 5140 * Walk the write side queues until we hit either the driver 5141 * or a twist in the stream (_SAMESTR will return false in both 5142 * these cases) then turn around and walk the read side queues 5143 * back up to the stream head. 5144 */ 5145 static void 5146 sqlist_insertall(sqlist_t *sqlist, queue_t *q) 5147 { 5148 while (q != NULL) { 5149 sqlist_insert(sqlist, q->q_syncq); 5150 5151 if (_SAMESTR(q)) 5152 q = q->q_next; 5153 else if (!(q->q_flag & QREADR)) 5154 q = _RD(q); 5155 else 5156 q = NULL; 5157 } 5158 } 5159 5160 /* 5161 * Allocate and build a list of all syncqs in a stream and the syncq(s) 5162 * associated with the "q" parameter. The resulting list is sorted in a 5163 * canonical order and is free of duplicates. 5164 * Assumes the passed queue is a _RD(q). 5165 */ 5166 static sqlist_t * 5167 sqlist_build(queue_t *q, struct stdata *stp, boolean_t do_twist) 5168 { 5169 sqlist_t *sqlist = sqlist_alloc(stp, KM_SLEEP); 5170 5171 /* 5172 * start with the current queue/qpair 5173 */ 5174 ASSERT(q->q_flag & QREADR); 5175 5176 sqlist_insert(sqlist, q->q_syncq); 5177 sqlist_insert(sqlist, _WR(q)->q_syncq); 5178 5179 sqlist_insertall(sqlist, stp->sd_wrq); 5180 if (do_twist) 5181 sqlist_insertall(sqlist, stp->sd_mate->sd_wrq); 5182 5183 return (sqlist); 5184 } 5185 5186 static sqlist_t * 5187 sqlist_alloc(struct stdata *stp, int kmflag) 5188 { 5189 size_t sqlist_size; 5190 sqlist_t *sqlist; 5191 5192 /* 5193 * Allocate 2 syncql_t's for each pushed module. Note that 5194 * the sqlist_t structure already has 4 syncql_t's built in: 5195 * 2 for the stream head, and 2 for the driver/other stream head. 5196 */ 5197 sqlist_size = 2 * sizeof (syncql_t) * stp->sd_pushcnt + 5198 sizeof (sqlist_t); 5199 if (STRMATED(stp)) 5200 sqlist_size += 2 * sizeof (syncql_t) * stp->sd_mate->sd_pushcnt; 5201 sqlist = kmem_alloc(sqlist_size, kmflag); 5202 5203 sqlist->sqlist_head = NULL; 5204 sqlist->sqlist_size = sqlist_size; 5205 sqlist->sqlist_index = 0; 5206 5207 return (sqlist); 5208 } 5209 5210 /* 5211 * Free the list created by sqlist_alloc() 5212 */ 5213 static void 5214 sqlist_free(sqlist_t *sqlist) 5215 { 5216 kmem_free(sqlist, sqlist->sqlist_size); 5217 } 5218 5219 /* 5220 * Prevent any new entries into any syncq in this stream. 5221 * Used by freezestr. 5222 */ 5223 void 5224 strblock(queue_t *q) 5225 { 5226 struct stdata *stp; 5227 syncql_t *sql; 5228 sqlist_t *sqlist; 5229 5230 q = _RD(q); 5231 5232 stp = STREAM(q); 5233 ASSERT(stp != NULL); 5234 5235 /* 5236 * Get a sorted list with all the duplicates removed containing 5237 * all the syncqs referenced by this stream. 5238 */ 5239 sqlist = sqlist_build(q, stp, B_FALSE); 5240 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next) 5241 blocksq(sql->sql_sq, SQ_FROZEN, -1); 5242 sqlist_free(sqlist); 5243 } 5244 5245 /* 5246 * Release the block on new entries into this stream 5247 */ 5248 void 5249 strunblock(queue_t *q) 5250 { 5251 struct stdata *stp; 5252 syncql_t *sql; 5253 sqlist_t *sqlist; 5254 int drain_needed; 5255 5256 q = _RD(q); 5257 5258 /* 5259 * Get a sorted list with all the duplicates removed containing 5260 * all the syncqs referenced by this stream. 5261 * Have to drop the SQ_FROZEN flag on all the syncqs before 5262 * starting to drain them; otherwise the draining might 5263 * cause a freezestr in some module on the stream (which 5264 * would deadlock). 5265 */ 5266 stp = STREAM(q); 5267 ASSERT(stp != NULL); 5268 sqlist = sqlist_build(q, stp, B_FALSE); 5269 drain_needed = 0; 5270 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next) 5271 drain_needed += dropsq(sql->sql_sq, SQ_FROZEN); 5272 if (drain_needed) { 5273 for (sql = sqlist->sqlist_head; sql != NULL; 5274 sql = sql->sql_next) 5275 emptysq(sql->sql_sq); 5276 } 5277 sqlist_free(sqlist); 5278 } 5279 5280 #ifdef DEBUG 5281 static int 5282 qprocsareon(queue_t *rq) 5283 { 5284 if (rq->q_next == NULL) 5285 return (0); 5286 return (_WR(rq->q_next)->q_next == _WR(rq)); 5287 } 5288 5289 int 5290 qclaimed(queue_t *q) 5291 { 5292 uint_t count; 5293 5294 count = q->q_syncq->sq_count; 5295 SUM_SQ_PUTCOUNTS(q->q_syncq, count); 5296 return (count != 0); 5297 } 5298 5299 /* 5300 * Check if anyone has frozen this stream with freezestr 5301 */ 5302 int 5303 frozenstr(queue_t *q) 5304 { 5305 return ((q->q_syncq->sq_flags & SQ_FROZEN) != 0); 5306 } 5307 #endif /* DEBUG */ 5308 5309 /* 5310 * Enter a queue. 5311 * Obsoleted interface. Should not be used. 5312 */ 5313 void 5314 enterq(queue_t *q) 5315 { 5316 entersq(q->q_syncq, SQ_CALLBACK); 5317 } 5318 5319 void 5320 leaveq(queue_t *q) 5321 { 5322 leavesq(q->q_syncq, SQ_CALLBACK); 5323 } 5324 5325 /* 5326 * Enter a perimeter. c_inner and c_outer specifies which concurrency bits 5327 * to check. 5328 * Wait if SQ_QUEUED is set to preserve ordering between messages and qwriter 5329 * calls and the running of open, close and service procedures. 5330 * 5331 * If c_inner bit is set no need to grab sq_putlocks since we don't care 5332 * if other threads have entered or are entering put entry point. 5333 * 5334 * If c_inner bit is set it might have been possible to use 5335 * sq_putlocks/sq_putcounts instead of SQLOCK/sq_count (e.g. to optimize 5336 * open/close path for IP) but since the count may need to be decremented in 5337 * qwait() we wouldn't know which counter to decrement. Currently counter is 5338 * selected by current cpu_seqid and current CPU can change at any moment. XXX 5339 * in the future we might use curthread id bits to select the counter and this 5340 * would stay constant across routine calls. 5341 */ 5342 void 5343 entersq(syncq_t *sq, int entrypoint) 5344 { 5345 uint16_t count = 0; 5346 uint16_t flags; 5347 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL; 5348 uint16_t type; 5349 uint_t c_inner = entrypoint & SQ_CI; 5350 uint_t c_outer = entrypoint & SQ_CO; 5351 5352 /* 5353 * Increment ref count to keep closes out of this queue. 5354 */ 5355 ASSERT(sq); 5356 ASSERT(c_inner && c_outer); 5357 mutex_enter(SQLOCK(sq)); 5358 flags = sq->sq_flags; 5359 type = sq->sq_type; 5360 if (!(type & c_inner)) { 5361 /* Make sure all putcounts now use slowlock. */ 5362 count = sq->sq_count; 5363 SQ_PUTLOCKS_ENTER(sq); 5364 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 5365 SUM_SQ_PUTCOUNTS(sq, count); 5366 sq->sq_needexcl++; 5367 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 5368 waitflags |= SQ_MESSAGES; 5369 } 5370 /* 5371 * Wait until we can enter the inner perimeter. 5372 * If we want exclusive access we wait until sq_count is 0. 5373 * We have to do this before entering the outer perimeter in order 5374 * to preserve put/close message ordering. 5375 */ 5376 while ((flags & waitflags) || (!(type & c_inner) && count != 0)) { 5377 sq->sq_flags = flags | SQ_WANTWAKEUP; 5378 if (!(type & c_inner)) { 5379 SQ_PUTLOCKS_EXIT(sq); 5380 } 5381 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5382 if (!(type & c_inner)) { 5383 count = sq->sq_count; 5384 SQ_PUTLOCKS_ENTER(sq); 5385 SUM_SQ_PUTCOUNTS(sq, count); 5386 } 5387 flags = sq->sq_flags; 5388 } 5389 5390 if (!(type & c_inner)) { 5391 ASSERT(sq->sq_needexcl > 0); 5392 sq->sq_needexcl--; 5393 if (sq->sq_needexcl == 0) { 5394 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 5395 } 5396 } 5397 5398 /* Check if we need to enter the outer perimeter */ 5399 if (!(type & c_outer)) { 5400 /* 5401 * We have to enter the outer perimeter exclusively before 5402 * we can increment sq_count to avoid deadlock. This implies 5403 * that we have to re-check sq_flags and sq_count. 5404 * 5405 * is it possible to have c_inner set when c_outer is not set? 5406 */ 5407 if (!(type & c_inner)) { 5408 SQ_PUTLOCKS_EXIT(sq); 5409 } 5410 mutex_exit(SQLOCK(sq)); 5411 outer_enter(sq->sq_outer, SQ_GOAWAY); 5412 mutex_enter(SQLOCK(sq)); 5413 flags = sq->sq_flags; 5414 /* 5415 * there should be no need to recheck sq_putcounts 5416 * because outer_enter() has already waited for them to clear 5417 * after setting SQ_WRITER. 5418 */ 5419 count = sq->sq_count; 5420 #ifdef DEBUG 5421 /* 5422 * SUMCHECK_SQ_PUTCOUNTS should return the sum instead 5423 * of doing an ASSERT internally. Others should do 5424 * something like 5425 * ASSERT(SUMCHECK_SQ_PUTCOUNTS(sq) == 0); 5426 * without the need to #ifdef DEBUG it. 5427 */ 5428 SUMCHECK_SQ_PUTCOUNTS(sq, 0); 5429 #endif 5430 while ((flags & (SQ_EXCL|SQ_BLOCKED|SQ_FROZEN)) || 5431 (!(type & c_inner) && count != 0)) { 5432 sq->sq_flags = flags | SQ_WANTWAKEUP; 5433 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5434 count = sq->sq_count; 5435 flags = sq->sq_flags; 5436 } 5437 } 5438 5439 sq->sq_count++; 5440 ASSERT(sq->sq_count != 0); /* Wraparound */ 5441 if (!(type & c_inner)) { 5442 /* Exclusive entry */ 5443 ASSERT(sq->sq_count == 1); 5444 sq->sq_flags |= SQ_EXCL; 5445 if (type & c_outer) { 5446 SQ_PUTLOCKS_EXIT(sq); 5447 } 5448 } 5449 mutex_exit(SQLOCK(sq)); 5450 } 5451 5452 /* 5453 * Leave a syncq. Announce to framework that closes may proceed. 5454 * c_inner and c_outer specify which concurrency bits to check. 5455 * 5456 * Must never be called from driver or module put entry point. 5457 * 5458 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5459 * sq_putlocks are used. 5460 */ 5461 void 5462 leavesq(syncq_t *sq, int entrypoint) 5463 { 5464 uint16_t flags; 5465 uint16_t type; 5466 uint_t c_outer = entrypoint & SQ_CO; 5467 #ifdef DEBUG 5468 uint_t c_inner = entrypoint & SQ_CI; 5469 #endif 5470 5471 /* 5472 * Decrement ref count, drain the syncq if possible, and wake up 5473 * any waiting close. 5474 */ 5475 ASSERT(sq); 5476 ASSERT(c_inner && c_outer); 5477 mutex_enter(SQLOCK(sq)); 5478 flags = sq->sq_flags; 5479 type = sq->sq_type; 5480 if (flags & (SQ_QUEUED|SQ_WANTWAKEUP|SQ_WANTEXWAKEUP)) { 5481 5482 if (flags & SQ_WANTWAKEUP) { 5483 flags &= ~SQ_WANTWAKEUP; 5484 cv_broadcast(&sq->sq_wait); 5485 } 5486 if (flags & SQ_WANTEXWAKEUP) { 5487 flags &= ~SQ_WANTEXWAKEUP; 5488 cv_broadcast(&sq->sq_exitwait); 5489 } 5490 5491 if ((flags & SQ_QUEUED) && !(flags & SQ_STAYAWAY)) { 5492 /* 5493 * The syncq needs to be drained. "Exit" the syncq 5494 * before calling drain_syncq. 5495 */ 5496 ASSERT(sq->sq_count != 0); 5497 sq->sq_count--; 5498 ASSERT((flags & SQ_EXCL) || (type & c_inner)); 5499 sq->sq_flags = flags & ~SQ_EXCL; 5500 drain_syncq(sq); 5501 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 5502 /* Check if we need to exit the outer perimeter */ 5503 /* XXX will this ever be true? */ 5504 if (!(type & c_outer)) 5505 outer_exit(sq->sq_outer); 5506 return; 5507 } 5508 } 5509 ASSERT(sq->sq_count != 0); 5510 sq->sq_count--; 5511 ASSERT((flags & SQ_EXCL) || (type & c_inner)); 5512 sq->sq_flags = flags & ~SQ_EXCL; 5513 mutex_exit(SQLOCK(sq)); 5514 5515 /* Check if we need to exit the outer perimeter */ 5516 if (!(sq->sq_type & c_outer)) 5517 outer_exit(sq->sq_outer); 5518 } 5519 5520 /* 5521 * Prevent q_next from changing in this stream by incrementing sq_count. 5522 * 5523 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5524 * sq_putlocks are used. 5525 */ 5526 void 5527 claimq(queue_t *qp) 5528 { 5529 syncq_t *sq = qp->q_syncq; 5530 5531 mutex_enter(SQLOCK(sq)); 5532 sq->sq_count++; 5533 ASSERT(sq->sq_count != 0); /* Wraparound */ 5534 mutex_exit(SQLOCK(sq)); 5535 } 5536 5537 /* 5538 * Undo claimq. 5539 * 5540 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5541 * sq_putlocks are used. 5542 */ 5543 void 5544 releaseq(queue_t *qp) 5545 { 5546 syncq_t *sq = qp->q_syncq; 5547 uint16_t flags; 5548 5549 mutex_enter(SQLOCK(sq)); 5550 ASSERT(sq->sq_count > 0); 5551 sq->sq_count--; 5552 5553 flags = sq->sq_flags; 5554 if (flags & (SQ_WANTWAKEUP|SQ_QUEUED)) { 5555 if (flags & SQ_WANTWAKEUP) { 5556 flags &= ~SQ_WANTWAKEUP; 5557 cv_broadcast(&sq->sq_wait); 5558 } 5559 sq->sq_flags = flags; 5560 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5561 /* 5562 * To prevent potential recursive invocation of 5563 * drain_syncq we do not call drain_syncq if count is 5564 * non-zero. 5565 */ 5566 if (sq->sq_count == 0) { 5567 drain_syncq(sq); 5568 return; 5569 } else 5570 sqenable(sq); 5571 } 5572 } 5573 mutex_exit(SQLOCK(sq)); 5574 } 5575 5576 /* 5577 * Prevent q_next from changing in this stream by incrementing sd_refcnt. 5578 */ 5579 void 5580 claimstr(queue_t *qp) 5581 { 5582 struct stdata *stp = STREAM(qp); 5583 5584 mutex_enter(&stp->sd_reflock); 5585 stp->sd_refcnt++; 5586 ASSERT(stp->sd_refcnt != 0); /* Wraparound */ 5587 mutex_exit(&stp->sd_reflock); 5588 } 5589 5590 /* 5591 * Undo claimstr. 5592 */ 5593 void 5594 releasestr(queue_t *qp) 5595 { 5596 struct stdata *stp = STREAM(qp); 5597 5598 mutex_enter(&stp->sd_reflock); 5599 ASSERT(stp->sd_refcnt != 0); 5600 if (--stp->sd_refcnt == 0) 5601 cv_broadcast(&stp->sd_refmonitor); 5602 mutex_exit(&stp->sd_reflock); 5603 } 5604 5605 static syncq_t * 5606 new_syncq(void) 5607 { 5608 return (kmem_cache_alloc(syncq_cache, KM_SLEEP)); 5609 } 5610 5611 static void 5612 free_syncq(syncq_t *sq) 5613 { 5614 ASSERT(sq->sq_head == NULL); 5615 ASSERT(sq->sq_outer == NULL); 5616 ASSERT(sq->sq_callbpend == NULL); 5617 ASSERT((sq->sq_onext == NULL && sq->sq_oprev == NULL) || 5618 (sq->sq_onext == sq && sq->sq_oprev == sq)); 5619 5620 if (sq->sq_ciputctrl != NULL) { 5621 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1); 5622 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl, 5623 sq->sq_nciputctrl, 0); 5624 ASSERT(ciputctrl_cache != NULL); 5625 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl); 5626 } 5627 5628 sq->sq_tail = NULL; 5629 sq->sq_evhead = NULL; 5630 sq->sq_evtail = NULL; 5631 sq->sq_ciputctrl = NULL; 5632 sq->sq_nciputctrl = 0; 5633 sq->sq_count = 0; 5634 sq->sq_rmqcount = 0; 5635 sq->sq_callbflags = 0; 5636 sq->sq_cancelid = 0; 5637 sq->sq_next = NULL; 5638 sq->sq_needexcl = 0; 5639 sq->sq_svcflags = 0; 5640 sq->sq_nqueues = 0; 5641 sq->sq_pri = 0; 5642 sq->sq_onext = NULL; 5643 sq->sq_oprev = NULL; 5644 sq->sq_flags = 0; 5645 sq->sq_type = 0; 5646 sq->sq_servcount = 0; 5647 5648 kmem_cache_free(syncq_cache, sq); 5649 } 5650 5651 /* Outer perimeter code */ 5652 5653 /* 5654 * The outer syncq uses the fields and flags in the syncq slightly 5655 * differently from the inner syncqs. 5656 * sq_count Incremented when there are pending or running 5657 * writers at the outer perimeter to prevent the set of 5658 * inner syncqs that belong to the outer perimeter from 5659 * changing. 5660 * sq_head/tail List of deferred qwriter(OUTER) operations. 5661 * 5662 * SQ_BLOCKED Set to prevent traversing of sq_next,sq_prev while 5663 * inner syncqs are added to or removed from the 5664 * outer perimeter. 5665 * SQ_QUEUED sq_head/tail has messages or events queued. 5666 * 5667 * SQ_WRITER A thread is currently traversing all the inner syncqs 5668 * setting the SQ_WRITER flag. 5669 */ 5670 5671 /* 5672 * Get write access at the outer perimeter. 5673 * Note that read access is done by entersq, putnext, and put by simply 5674 * incrementing sq_count in the inner syncq. 5675 * 5676 * Waits until "flags" is no longer set in the outer to prevent multiple 5677 * threads from having write access at the same time. SQ_WRITER has to be part 5678 * of "flags". 5679 * 5680 * Increases sq_count on the outer syncq to keep away outer_insert/remove 5681 * until the outer_exit is finished. 5682 * 5683 * outer_enter is vulnerable to starvation since it does not prevent new 5684 * threads from entering the inner syncqs while it is waiting for sq_count to 5685 * go to zero. 5686 */ 5687 void 5688 outer_enter(syncq_t *outer, uint16_t flags) 5689 { 5690 syncq_t *sq; 5691 int wait_needed; 5692 uint16_t count; 5693 5694 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5695 outer->sq_oprev != NULL); 5696 ASSERT(flags & SQ_WRITER); 5697 5698 retry: 5699 mutex_enter(SQLOCK(outer)); 5700 while (outer->sq_flags & flags) { 5701 outer->sq_flags |= SQ_WANTWAKEUP; 5702 cv_wait(&outer->sq_wait, SQLOCK(outer)); 5703 } 5704 5705 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5706 outer->sq_flags |= SQ_WRITER; 5707 outer->sq_count++; 5708 ASSERT(outer->sq_count != 0); /* wraparound */ 5709 wait_needed = 0; 5710 /* 5711 * Set SQ_WRITER on all the inner syncqs while holding 5712 * the SQLOCK on the outer syncq. This ensures that the changing 5713 * of SQ_WRITER is atomic under the outer SQLOCK. 5714 */ 5715 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 5716 mutex_enter(SQLOCK(sq)); 5717 count = sq->sq_count; 5718 SQ_PUTLOCKS_ENTER(sq); 5719 sq->sq_flags |= SQ_WRITER; 5720 SUM_SQ_PUTCOUNTS(sq, count); 5721 if (count != 0) 5722 wait_needed = 1; 5723 SQ_PUTLOCKS_EXIT(sq); 5724 mutex_exit(SQLOCK(sq)); 5725 } 5726 mutex_exit(SQLOCK(outer)); 5727 5728 /* 5729 * Get everybody out of the syncqs sequentially. 5730 * Note that we don't actually need to acquire the PUTLOCKS, since 5731 * we have already cleared the fastbit, and set QWRITER. By 5732 * definition, the count can not increase since putnext will 5733 * take the slowlock path (and the purpose of acquiring the 5734 * putlocks was to make sure it didn't increase while we were 5735 * waiting). 5736 * 5737 * Note that we still acquire the PUTLOCKS to be safe. 5738 */ 5739 if (wait_needed) { 5740 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 5741 mutex_enter(SQLOCK(sq)); 5742 count = sq->sq_count; 5743 SQ_PUTLOCKS_ENTER(sq); 5744 SUM_SQ_PUTCOUNTS(sq, count); 5745 while (count != 0) { 5746 sq->sq_flags |= SQ_WANTWAKEUP; 5747 SQ_PUTLOCKS_EXIT(sq); 5748 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5749 count = sq->sq_count; 5750 SQ_PUTLOCKS_ENTER(sq); 5751 SUM_SQ_PUTCOUNTS(sq, count); 5752 } 5753 SQ_PUTLOCKS_EXIT(sq); 5754 mutex_exit(SQLOCK(sq)); 5755 } 5756 /* 5757 * Verify that none of the flags got set while we 5758 * were waiting for the sq_counts to drop. 5759 * If this happens we exit and retry entering the 5760 * outer perimeter. 5761 */ 5762 mutex_enter(SQLOCK(outer)); 5763 if (outer->sq_flags & (flags & ~SQ_WRITER)) { 5764 mutex_exit(SQLOCK(outer)); 5765 outer_exit(outer); 5766 goto retry; 5767 } 5768 mutex_exit(SQLOCK(outer)); 5769 } 5770 } 5771 5772 /* 5773 * Drop the write access at the outer perimeter. 5774 * Read access is dropped implicitly (by putnext, put, and leavesq) by 5775 * decrementing sq_count. 5776 */ 5777 void 5778 outer_exit(syncq_t *outer) 5779 { 5780 syncq_t *sq; 5781 int drain_needed; 5782 uint16_t flags; 5783 5784 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5785 outer->sq_oprev != NULL); 5786 ASSERT(MUTEX_NOT_HELD(SQLOCK(outer))); 5787 5788 /* 5789 * Atomically (from the perspective of threads calling become_writer) 5790 * drop the write access at the outer perimeter by holding 5791 * SQLOCK(outer) across all the dropsq calls and the resetting of 5792 * SQ_WRITER. 5793 * This defines a locking order between the outer perimeter 5794 * SQLOCK and the inner perimeter SQLOCKs. 5795 */ 5796 mutex_enter(SQLOCK(outer)); 5797 flags = outer->sq_flags; 5798 ASSERT(outer->sq_flags & SQ_WRITER); 5799 if (flags & SQ_QUEUED) { 5800 write_now(outer); 5801 flags = outer->sq_flags; 5802 } 5803 5804 /* 5805 * sq_onext is stable since sq_count has not yet been decreased. 5806 * Reset the SQ_WRITER flags in all syncqs. 5807 * After dropping SQ_WRITER on the outer syncq we empty all the 5808 * inner syncqs. 5809 */ 5810 drain_needed = 0; 5811 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) 5812 drain_needed += dropsq(sq, SQ_WRITER); 5813 ASSERT(!(outer->sq_flags & SQ_QUEUED)); 5814 flags &= ~SQ_WRITER; 5815 if (drain_needed) { 5816 outer->sq_flags = flags; 5817 mutex_exit(SQLOCK(outer)); 5818 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) 5819 emptysq(sq); 5820 mutex_enter(SQLOCK(outer)); 5821 flags = outer->sq_flags; 5822 } 5823 if (flags & SQ_WANTWAKEUP) { 5824 flags &= ~SQ_WANTWAKEUP; 5825 cv_broadcast(&outer->sq_wait); 5826 } 5827 outer->sq_flags = flags; 5828 ASSERT(outer->sq_count > 0); 5829 outer->sq_count--; 5830 mutex_exit(SQLOCK(outer)); 5831 } 5832 5833 /* 5834 * Add another syncq to an outer perimeter. 5835 * Block out all other access to the outer perimeter while it is being 5836 * changed using blocksq. 5837 * Assumes that the caller has *not* done an outer_enter. 5838 * 5839 * Vulnerable to starvation in blocksq. 5840 */ 5841 static void 5842 outer_insert(syncq_t *outer, syncq_t *sq) 5843 { 5844 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5845 outer->sq_oprev != NULL); 5846 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 5847 sq->sq_oprev == NULL); /* Can't be in an outer perimeter */ 5848 5849 /* Get exclusive access to the outer perimeter list */ 5850 blocksq(outer, SQ_BLOCKED, 0); 5851 ASSERT(outer->sq_flags & SQ_BLOCKED); 5852 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5853 5854 mutex_enter(SQLOCK(sq)); 5855 sq->sq_outer = outer; 5856 outer->sq_onext->sq_oprev = sq; 5857 sq->sq_onext = outer->sq_onext; 5858 outer->sq_onext = sq; 5859 sq->sq_oprev = outer; 5860 mutex_exit(SQLOCK(sq)); 5861 unblocksq(outer, SQ_BLOCKED, 1); 5862 } 5863 5864 /* 5865 * Remove a syncq from an outer perimeter. 5866 * Block out all other access to the outer perimeter while it is being 5867 * changed using blocksq. 5868 * Assumes that the caller has *not* done an outer_enter. 5869 * 5870 * Vulnerable to starvation in blocksq. 5871 */ 5872 static void 5873 outer_remove(syncq_t *outer, syncq_t *sq) 5874 { 5875 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5876 outer->sq_oprev != NULL); 5877 ASSERT(sq->sq_outer == outer); 5878 5879 /* Get exclusive access to the outer perimeter list */ 5880 blocksq(outer, SQ_BLOCKED, 0); 5881 ASSERT(outer->sq_flags & SQ_BLOCKED); 5882 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5883 5884 mutex_enter(SQLOCK(sq)); 5885 sq->sq_outer = NULL; 5886 sq->sq_onext->sq_oprev = sq->sq_oprev; 5887 sq->sq_oprev->sq_onext = sq->sq_onext; 5888 sq->sq_oprev = sq->sq_onext = NULL; 5889 mutex_exit(SQLOCK(sq)); 5890 unblocksq(outer, SQ_BLOCKED, 1); 5891 } 5892 5893 /* 5894 * Queue a deferred qwriter(OUTER) callback for this outer perimeter. 5895 * If this is the first callback for this outer perimeter then add 5896 * this outer perimeter to the list of outer perimeters that 5897 * the qwriter_outer_thread will process. 5898 * 5899 * Increments sq_count in the outer syncq to prevent the membership 5900 * of the outer perimeter (in terms of inner syncqs) to change while 5901 * the callback is pending. 5902 */ 5903 static void 5904 queue_writer(syncq_t *outer, void (*func)(), queue_t *q, mblk_t *mp) 5905 { 5906 ASSERT(MUTEX_HELD(SQLOCK(outer))); 5907 5908 mp->b_prev = (mblk_t *)func; 5909 mp->b_queue = q; 5910 mp->b_next = NULL; 5911 outer->sq_count++; /* Decremented when dequeued */ 5912 ASSERT(outer->sq_count != 0); /* Wraparound */ 5913 if (outer->sq_evhead == NULL) { 5914 /* First message. */ 5915 outer->sq_evhead = outer->sq_evtail = mp; 5916 outer->sq_flags |= SQ_EVENTS; 5917 mutex_exit(SQLOCK(outer)); 5918 STRSTAT(qwr_outer); 5919 (void) taskq_dispatch(streams_taskq, 5920 (task_func_t *)qwriter_outer_service, outer, TQ_SLEEP); 5921 } else { 5922 ASSERT(outer->sq_flags & SQ_EVENTS); 5923 outer->sq_evtail->b_next = mp; 5924 outer->sq_evtail = mp; 5925 mutex_exit(SQLOCK(outer)); 5926 } 5927 } 5928 5929 /* 5930 * Try and upgrade to write access at the outer perimeter. If this can 5931 * not be done without blocking then queue the callback to be done 5932 * by the qwriter_outer_thread. 5933 * 5934 * This routine can only be called from put or service procedures plus 5935 * asynchronous callback routines that have properly entered the queue (with 5936 * entersq). Thus qwriter(OUTER) assumes the caller has one claim on the syncq 5937 * associated with q. 5938 */ 5939 void 5940 qwriter_outer(queue_t *q, mblk_t *mp, void (*func)()) 5941 { 5942 syncq_t *osq, *sq, *outer; 5943 int failed; 5944 uint16_t flags; 5945 5946 osq = q->q_syncq; 5947 outer = osq->sq_outer; 5948 if (outer == NULL) 5949 panic("qwriter(PERIM_OUTER): no outer perimeter"); 5950 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5951 outer->sq_oprev != NULL); 5952 5953 mutex_enter(SQLOCK(outer)); 5954 flags = outer->sq_flags; 5955 /* 5956 * If some thread is traversing sq_next, or if we are blocked by 5957 * outer_insert or outer_remove, or if the we already have queued 5958 * callbacks, then queue this callback for later processing. 5959 * 5960 * Also queue the qwriter for an interrupt thread in order 5961 * to reduce the time spent running at high IPL. 5962 * to identify there are events. 5963 */ 5964 if ((flags & SQ_GOAWAY) || (curthread->t_pri >= kpreemptpri)) { 5965 /* 5966 * Queue the become_writer request. 5967 * The queueing is atomic under SQLOCK(outer) in order 5968 * to synchronize with outer_exit. 5969 * queue_writer will drop the outer SQLOCK 5970 */ 5971 if (flags & SQ_BLOCKED) { 5972 /* Must set SQ_WRITER on inner perimeter */ 5973 mutex_enter(SQLOCK(osq)); 5974 osq->sq_flags |= SQ_WRITER; 5975 mutex_exit(SQLOCK(osq)); 5976 } else { 5977 if (!(flags & SQ_WRITER)) { 5978 /* 5979 * The outer could have been SQ_BLOCKED thus 5980 * SQ_WRITER might not be set on the inner. 5981 */ 5982 mutex_enter(SQLOCK(osq)); 5983 osq->sq_flags |= SQ_WRITER; 5984 mutex_exit(SQLOCK(osq)); 5985 } 5986 ASSERT(osq->sq_flags & SQ_WRITER); 5987 } 5988 queue_writer(outer, func, q, mp); 5989 return; 5990 } 5991 /* 5992 * We are half-way to exclusive access to the outer perimeter. 5993 * Prevent any outer_enter, qwriter(OUTER), or outer_insert/remove 5994 * while the inner syncqs are traversed. 5995 */ 5996 outer->sq_count++; 5997 ASSERT(outer->sq_count != 0); /* wraparound */ 5998 flags |= SQ_WRITER; 5999 /* 6000 * Check if we can run the function immediately. Mark all 6001 * syncqs with the writer flag to prevent new entries into 6002 * put and service procedures. 6003 * 6004 * Set SQ_WRITER on all the inner syncqs while holding 6005 * the SQLOCK on the outer syncq. This ensures that the changing 6006 * of SQ_WRITER is atomic under the outer SQLOCK. 6007 */ 6008 failed = 0; 6009 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 6010 uint16_t count; 6011 uint_t maxcnt = (sq == osq) ? 1 : 0; 6012 6013 mutex_enter(SQLOCK(sq)); 6014 count = sq->sq_count; 6015 SQ_PUTLOCKS_ENTER(sq); 6016 SUM_SQ_PUTCOUNTS(sq, count); 6017 if (sq->sq_count > maxcnt) 6018 failed = 1; 6019 sq->sq_flags |= SQ_WRITER; 6020 SQ_PUTLOCKS_EXIT(sq); 6021 mutex_exit(SQLOCK(sq)); 6022 } 6023 if (failed) { 6024 /* 6025 * Some other thread has a read claim on the outer perimeter. 6026 * Queue the callback for deferred processing. 6027 * 6028 * queue_writer will set SQ_QUEUED before we drop SQ_WRITER 6029 * so that other qwriter(OUTER) calls will queue their 6030 * callbacks as well. queue_writer increments sq_count so we 6031 * decrement to compensate for the our increment. 6032 * 6033 * Dropping SQ_WRITER enables the writer thread to work 6034 * on this outer perimeter. 6035 */ 6036 outer->sq_flags = flags; 6037 queue_writer(outer, func, q, mp); 6038 /* queue_writer dropper the lock */ 6039 mutex_enter(SQLOCK(outer)); 6040 ASSERT(outer->sq_count > 0); 6041 outer->sq_count--; 6042 ASSERT(outer->sq_flags & SQ_WRITER); 6043 flags = outer->sq_flags; 6044 flags &= ~SQ_WRITER; 6045 if (flags & SQ_WANTWAKEUP) { 6046 flags &= ~SQ_WANTWAKEUP; 6047 cv_broadcast(&outer->sq_wait); 6048 } 6049 outer->sq_flags = flags; 6050 mutex_exit(SQLOCK(outer)); 6051 return; 6052 } else { 6053 outer->sq_flags = flags; 6054 mutex_exit(SQLOCK(outer)); 6055 } 6056 6057 /* Can run it immediately */ 6058 (*func)(q, mp); 6059 6060 outer_exit(outer); 6061 } 6062 6063 /* 6064 * Dequeue all writer callbacks from the outer perimeter and run them. 6065 */ 6066 static void 6067 write_now(syncq_t *outer) 6068 { 6069 mblk_t *mp; 6070 queue_t *q; 6071 void (*func)(); 6072 6073 ASSERT(MUTEX_HELD(SQLOCK(outer))); 6074 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 6075 outer->sq_oprev != NULL); 6076 while ((mp = outer->sq_evhead) != NULL) { 6077 /* 6078 * queues cannot be placed on the queuelist on the outer 6079 * perimeter. 6080 */ 6081 ASSERT(!(outer->sq_flags & SQ_MESSAGES)); 6082 ASSERT((outer->sq_flags & SQ_EVENTS)); 6083 6084 outer->sq_evhead = mp->b_next; 6085 if (outer->sq_evhead == NULL) { 6086 outer->sq_evtail = NULL; 6087 outer->sq_flags &= ~SQ_EVENTS; 6088 } 6089 ASSERT(outer->sq_count != 0); 6090 outer->sq_count--; /* Incremented when enqueued. */ 6091 mutex_exit(SQLOCK(outer)); 6092 /* 6093 * Drop the message if the queue is closing. 6094 * Make sure that the queue is "claimed" when the callback 6095 * is run in order to satisfy various ASSERTs. 6096 */ 6097 q = mp->b_queue; 6098 func = (void (*)())mp->b_prev; 6099 ASSERT(func != NULL); 6100 mp->b_next = mp->b_prev = NULL; 6101 if (q->q_flag & QWCLOSE) { 6102 freemsg(mp); 6103 } else { 6104 claimq(q); 6105 (*func)(q, mp); 6106 releaseq(q); 6107 } 6108 mutex_enter(SQLOCK(outer)); 6109 } 6110 ASSERT(MUTEX_HELD(SQLOCK(outer))); 6111 } 6112 6113 /* 6114 * The list of messages on the inner syncq is effectively hashed 6115 * by destination queue. These destination queues are doubly 6116 * linked lists (hopefully) in priority order. Messages are then 6117 * put on the queue referenced by the q_sqhead/q_sqtail elements. 6118 * Additional messages are linked together by the b_next/b_prev 6119 * elements in the mblk, with (similar to putq()) the first message 6120 * having a NULL b_prev and the last message having a NULL b_next. 6121 * 6122 * Events, such as qwriter callbacks, are put onto a list in FIFO 6123 * order referenced by sq_evhead, and sq_evtail. This is a singly 6124 * linked list, and messages here MUST be processed in the order queued. 6125 */ 6126 6127 /* 6128 * Run the events on the syncq event list (sq_evhead). 6129 * Assumes there is only one claim on the syncq, it is 6130 * already exclusive (SQ_EXCL set), and the SQLOCK held. 6131 * Messages here are processed in order, with the SQ_EXCL bit 6132 * held all the way through till the last message is processed. 6133 */ 6134 void 6135 sq_run_events(syncq_t *sq) 6136 { 6137 mblk_t *bp; 6138 queue_t *qp; 6139 uint16_t flags = sq->sq_flags; 6140 void (*func)(); 6141 6142 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6143 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6144 sq->sq_oprev == NULL) || 6145 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6146 sq->sq_oprev != NULL)); 6147 6148 ASSERT(flags & SQ_EXCL); 6149 ASSERT(sq->sq_count == 1); 6150 6151 /* 6152 * We need to process all of the events on this list. It 6153 * is possible that new events will be added while we are 6154 * away processing a callback, so on every loop, we start 6155 * back at the beginning of the list. 6156 */ 6157 /* 6158 * We have to reaccess sq_evhead since there is a 6159 * possibility of a new entry while we were running 6160 * the callback. 6161 */ 6162 for (bp = sq->sq_evhead; bp != NULL; bp = sq->sq_evhead) { 6163 ASSERT(bp->b_queue->q_syncq == sq); 6164 ASSERT(sq->sq_flags & SQ_EVENTS); 6165 6166 qp = bp->b_queue; 6167 func = (void (*)())bp->b_prev; 6168 ASSERT(func != NULL); 6169 6170 /* 6171 * Messages from the event queue must be taken off in 6172 * FIFO order. 6173 */ 6174 ASSERT(sq->sq_evhead == bp); 6175 sq->sq_evhead = bp->b_next; 6176 6177 if (bp->b_next == NULL) { 6178 /* Deleting last */ 6179 ASSERT(sq->sq_evtail == bp); 6180 sq->sq_evtail = NULL; 6181 sq->sq_flags &= ~SQ_EVENTS; 6182 } 6183 bp->b_prev = bp->b_next = NULL; 6184 ASSERT(bp->b_datap->db_ref != 0); 6185 6186 mutex_exit(SQLOCK(sq)); 6187 6188 (*func)(qp, bp); 6189 6190 mutex_enter(SQLOCK(sq)); 6191 /* 6192 * re-read the flags, since they could have changed. 6193 */ 6194 flags = sq->sq_flags; 6195 ASSERT(flags & SQ_EXCL); 6196 } 6197 ASSERT(sq->sq_evhead == NULL && sq->sq_evtail == NULL); 6198 ASSERT(!(sq->sq_flags & SQ_EVENTS)); 6199 6200 if (flags & SQ_WANTWAKEUP) { 6201 flags &= ~SQ_WANTWAKEUP; 6202 cv_broadcast(&sq->sq_wait); 6203 } 6204 if (flags & SQ_WANTEXWAKEUP) { 6205 flags &= ~SQ_WANTEXWAKEUP; 6206 cv_broadcast(&sq->sq_exitwait); 6207 } 6208 sq->sq_flags = flags; 6209 } 6210 6211 /* 6212 * Put messages on the event list. 6213 * If we can go exclusive now, do so and process the event list, otherwise 6214 * let the last claim service this list (or wake the sqthread). 6215 * This procedure assumes SQLOCK is held. To run the event list, it 6216 * must be called with no claims. 6217 */ 6218 static void 6219 sqfill_events(syncq_t *sq, queue_t *q, mblk_t *mp, void (*func)()) 6220 { 6221 uint16_t count; 6222 6223 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6224 ASSERT(func != NULL); 6225 6226 /* 6227 * This is a callback. Add it to the list of callbacks 6228 * and see about upgrading. 6229 */ 6230 mp->b_prev = (mblk_t *)func; 6231 mp->b_queue = q; 6232 mp->b_next = NULL; 6233 if (sq->sq_evhead == NULL) { 6234 sq->sq_evhead = sq->sq_evtail = mp; 6235 sq->sq_flags |= SQ_EVENTS; 6236 } else { 6237 ASSERT(sq->sq_evtail != NULL); 6238 ASSERT(sq->sq_evtail->b_next == NULL); 6239 ASSERT(sq->sq_flags & SQ_EVENTS); 6240 sq->sq_evtail->b_next = mp; 6241 sq->sq_evtail = mp; 6242 } 6243 /* 6244 * We have set SQ_EVENTS, so threads will have to 6245 * unwind out of the perimeter, and new entries will 6246 * not grab a putlock. But we still need to know 6247 * how many threads have already made a claim to the 6248 * syncq, so grab the putlocks, and sum the counts. 6249 * If there are no claims on the syncq, we can upgrade 6250 * to exclusive, and run the event list. 6251 * NOTE: We hold the SQLOCK, so we can just grab the 6252 * putlocks. 6253 */ 6254 count = sq->sq_count; 6255 SQ_PUTLOCKS_ENTER(sq); 6256 SUM_SQ_PUTCOUNTS(sq, count); 6257 /* 6258 * We have no claim, so we need to check if there 6259 * are no others, then we can upgrade. 6260 */ 6261 /* 6262 * There are currently no claims on 6263 * the syncq by this thread (at least on this entry). The thread who has 6264 * the claim should drain syncq. 6265 */ 6266 if (count > 0) { 6267 /* 6268 * Can't upgrade - other threads inside. 6269 */ 6270 SQ_PUTLOCKS_EXIT(sq); 6271 mutex_exit(SQLOCK(sq)); 6272 return; 6273 } 6274 /* 6275 * Need to set SQ_EXCL and make a claim on the syncq. 6276 */ 6277 ASSERT((sq->sq_flags & SQ_EXCL) == 0); 6278 sq->sq_flags |= SQ_EXCL; 6279 ASSERT(sq->sq_count == 0); 6280 sq->sq_count++; 6281 SQ_PUTLOCKS_EXIT(sq); 6282 6283 /* Process the events list */ 6284 sq_run_events(sq); 6285 6286 /* 6287 * Release our claim... 6288 */ 6289 sq->sq_count--; 6290 6291 /* 6292 * And release SQ_EXCL. 6293 * We don't need to acquire the putlocks to release 6294 * SQ_EXCL, since we are exclusive, and hold the SQLOCK. 6295 */ 6296 sq->sq_flags &= ~SQ_EXCL; 6297 6298 /* 6299 * sq_run_events should have released SQ_EXCL 6300 */ 6301 ASSERT(!(sq->sq_flags & SQ_EXCL)); 6302 6303 /* 6304 * If anything happened while we were running the 6305 * events (or was there before), we need to process 6306 * them now. We shouldn't be exclusive sine we 6307 * released the perimeter above (plus, we asserted 6308 * for it). 6309 */ 6310 if (!(sq->sq_flags & SQ_STAYAWAY) && (sq->sq_flags & SQ_QUEUED)) 6311 drain_syncq(sq); 6312 else 6313 mutex_exit(SQLOCK(sq)); 6314 } 6315 6316 /* 6317 * Perform delayed processing. The caller has to make sure that it is safe 6318 * to enter the syncq (e.g. by checking that none of the SQ_STAYAWAY bits are 6319 * set). 6320 * 6321 * Assume that the caller has NO claims on the syncq. However, a claim 6322 * on the syncq does not indicate that a thread is draining the syncq. 6323 * There may be more claims on the syncq than there are threads draining 6324 * (i.e. #_threads_draining <= sq_count) 6325 * 6326 * drain_syncq has to terminate when one of the SQ_STAYAWAY bits gets set 6327 * in order to preserve qwriter(OUTER) ordering constraints. 6328 * 6329 * sq_putcount only needs to be checked when dispatching the queued 6330 * writer call for CIPUT sync queue, but this is handled in sq_run_events. 6331 */ 6332 void 6333 drain_syncq(syncq_t *sq) 6334 { 6335 queue_t *qp; 6336 uint16_t count; 6337 uint16_t type = sq->sq_type; 6338 uint16_t flags = sq->sq_flags; 6339 boolean_t bg_service = sq->sq_svcflags & SQ_SERVICE; 6340 6341 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START, 6342 "drain_syncq start:%p", sq); 6343 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6344 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6345 sq->sq_oprev == NULL) || 6346 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6347 sq->sq_oprev != NULL)); 6348 6349 /* 6350 * Drop SQ_SERVICE flag. 6351 */ 6352 if (bg_service) 6353 sq->sq_svcflags &= ~SQ_SERVICE; 6354 6355 /* 6356 * If SQ_EXCL is set, someone else is processing this syncq - let him 6357 * finish the job. 6358 */ 6359 if (flags & SQ_EXCL) { 6360 if (bg_service) { 6361 ASSERT(sq->sq_servcount != 0); 6362 sq->sq_servcount--; 6363 } 6364 mutex_exit(SQLOCK(sq)); 6365 return; 6366 } 6367 6368 /* 6369 * This routine can be called by a background thread if 6370 * it was scheduled by a hi-priority thread. SO, if there are 6371 * NOT messages queued, return (remember, we have the SQLOCK, 6372 * and it cannot change until we release it). Wakeup any waiters also. 6373 */ 6374 if (!(flags & SQ_QUEUED)) { 6375 if (flags & SQ_WANTWAKEUP) { 6376 flags &= ~SQ_WANTWAKEUP; 6377 cv_broadcast(&sq->sq_wait); 6378 } 6379 if (flags & SQ_WANTEXWAKEUP) { 6380 flags &= ~SQ_WANTEXWAKEUP; 6381 cv_broadcast(&sq->sq_exitwait); 6382 } 6383 sq->sq_flags = flags; 6384 if (bg_service) { 6385 ASSERT(sq->sq_servcount != 0); 6386 sq->sq_servcount--; 6387 } 6388 mutex_exit(SQLOCK(sq)); 6389 return; 6390 } 6391 6392 /* 6393 * If this is not a concurrent put perimeter, we need to 6394 * become exclusive to drain. Also, if not CIPUT, we would 6395 * not have acquired a putlock, so we don't need to check 6396 * the putcounts. If not entering with a claim, we test 6397 * for sq_count == 0. 6398 */ 6399 type = sq->sq_type; 6400 if (!(type & SQ_CIPUT)) { 6401 if (sq->sq_count > 1) { 6402 if (bg_service) { 6403 ASSERT(sq->sq_servcount != 0); 6404 sq->sq_servcount--; 6405 } 6406 mutex_exit(SQLOCK(sq)); 6407 return; 6408 } 6409 sq->sq_flags |= SQ_EXCL; 6410 } 6411 6412 /* 6413 * This is where we make a claim to the syncq. 6414 * This can either be done by incrementing a putlock, or 6415 * the sq_count. But since we already have the SQLOCK 6416 * here, we just bump the sq_count. 6417 * 6418 * Note that after we make a claim, we need to let the code 6419 * fall through to the end of this routine to clean itself 6420 * up. A return in the while loop will put the syncq in a 6421 * very bad state. 6422 */ 6423 sq->sq_count++; 6424 ASSERT(sq->sq_count != 0); /* wraparound */ 6425 6426 while ((flags = sq->sq_flags) & SQ_QUEUED) { 6427 /* 6428 * If we are told to stayaway or went exclusive, 6429 * we are done. 6430 */ 6431 if (flags & (SQ_STAYAWAY)) { 6432 break; 6433 } 6434 6435 /* 6436 * If there are events to run, do so. 6437 * We have one claim to the syncq, so if there are 6438 * more than one, other threads are running. 6439 */ 6440 if (sq->sq_evhead != NULL) { 6441 ASSERT(sq->sq_flags & SQ_EVENTS); 6442 6443 count = sq->sq_count; 6444 SQ_PUTLOCKS_ENTER(sq); 6445 SUM_SQ_PUTCOUNTS(sq, count); 6446 if (count > 1) { 6447 SQ_PUTLOCKS_EXIT(sq); 6448 /* Can't upgrade - other threads inside */ 6449 break; 6450 } 6451 ASSERT((flags & SQ_EXCL) == 0); 6452 sq->sq_flags = flags | SQ_EXCL; 6453 SQ_PUTLOCKS_EXIT(sq); 6454 /* 6455 * we have the only claim, run the events, 6456 * sq_run_events will clear the SQ_EXCL flag. 6457 */ 6458 sq_run_events(sq); 6459 6460 /* 6461 * If this is a CIPUT perimeter, we need 6462 * to drop the SQ_EXCL flag so we can properly 6463 * continue draining the syncq. 6464 */ 6465 if (type & SQ_CIPUT) { 6466 ASSERT(sq->sq_flags & SQ_EXCL); 6467 sq->sq_flags &= ~SQ_EXCL; 6468 } 6469 6470 /* 6471 * And go back to the beginning just in case 6472 * anything changed while we were away. 6473 */ 6474 ASSERT((sq->sq_flags & SQ_EXCL) || (type & SQ_CIPUT)); 6475 continue; 6476 } 6477 6478 ASSERT(sq->sq_evhead == NULL); 6479 ASSERT(!(sq->sq_flags & SQ_EVENTS)); 6480 6481 /* 6482 * Find the queue that is not draining. 6483 * 6484 * q_draining is protected by QLOCK which we do not hold. 6485 * But if it was set, then a thread was draining, and if it gets 6486 * cleared, then it was because the thread has successfully 6487 * drained the syncq, or a GOAWAY state occurred. For the GOAWAY 6488 * state to happen, a thread needs the SQLOCK which we hold, and 6489 * if there was such a flag, we would have already seen it. 6490 */ 6491 6492 for (qp = sq->sq_head; 6493 qp != NULL && (qp->q_draining || 6494 (qp->q_sqflags & Q_SQDRAINING)); 6495 qp = qp->q_sqnext) 6496 ; 6497 6498 if (qp == NULL) 6499 break; 6500 6501 /* 6502 * We have a queue to work on, and we hold the 6503 * SQLOCK and one claim, call qdrain_syncq. 6504 * This means we need to release the SQLOCK and 6505 * acquire the QLOCK (OK since we have a claim). 6506 * Note that qdrain_syncq will actually dequeue 6507 * this queue from the sq_head list when it is 6508 * convinced all the work is done and release 6509 * the QLOCK before returning. 6510 */ 6511 qp->q_sqflags |= Q_SQDRAINING; 6512 mutex_exit(SQLOCK(sq)); 6513 mutex_enter(QLOCK(qp)); 6514 qdrain_syncq(sq, qp); 6515 mutex_enter(SQLOCK(sq)); 6516 6517 /* The queue is drained */ 6518 ASSERT(qp->q_sqflags & Q_SQDRAINING); 6519 qp->q_sqflags &= ~Q_SQDRAINING; 6520 /* 6521 * NOTE: After this point qp should not be used since it may be 6522 * closed. 6523 */ 6524 } 6525 6526 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6527 flags = sq->sq_flags; 6528 6529 /* 6530 * sq->sq_head cannot change because we hold the 6531 * sqlock. However, a thread CAN decide that it is no longer 6532 * going to drain that queue. However, this should be due to 6533 * a GOAWAY state, and we should see that here. 6534 * 6535 * This loop is not very efficient. One solution may be adding a second 6536 * pointer to the "draining" queue, but it is difficult to do when 6537 * queues are inserted in the middle due to priority ordering. Another 6538 * possibility is to yank the queue out of the sq list and put it onto 6539 * the "draining list" and then put it back if it can't be drained. 6540 */ 6541 6542 ASSERT((sq->sq_head == NULL) || (flags & SQ_GOAWAY) || 6543 (type & SQ_CI) || sq->sq_head->q_draining); 6544 6545 /* Drop SQ_EXCL for non-CIPUT perimeters */ 6546 if (!(type & SQ_CIPUT)) 6547 flags &= ~SQ_EXCL; 6548 ASSERT((flags & SQ_EXCL) == 0); 6549 6550 /* Wake up any waiters. */ 6551 if (flags & SQ_WANTWAKEUP) { 6552 flags &= ~SQ_WANTWAKEUP; 6553 cv_broadcast(&sq->sq_wait); 6554 } 6555 if (flags & SQ_WANTEXWAKEUP) { 6556 flags &= ~SQ_WANTEXWAKEUP; 6557 cv_broadcast(&sq->sq_exitwait); 6558 } 6559 sq->sq_flags = flags; 6560 6561 ASSERT(sq->sq_count != 0); 6562 /* Release our claim. */ 6563 sq->sq_count--; 6564 6565 if (bg_service) { 6566 ASSERT(sq->sq_servcount != 0); 6567 sq->sq_servcount--; 6568 } 6569 6570 mutex_exit(SQLOCK(sq)); 6571 6572 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END, 6573 "drain_syncq end:%p", sq); 6574 } 6575 6576 6577 /* 6578 * 6579 * qdrain_syncq can be called (currently) from only one of two places: 6580 * drain_syncq 6581 * putnext (or some variation of it). 6582 * and eventually 6583 * qwait(_sig) 6584 * 6585 * If called from drain_syncq, we found it in the list of queues needing 6586 * service, so there is work to be done (or it wouldn't be in the list). 6587 * 6588 * If called from some putnext variation, it was because the 6589 * perimeter is open, but messages are blocking a putnext and 6590 * there is not a thread working on it. Now a thread could start 6591 * working on it while we are getting ready to do so ourself, but 6592 * the thread would set the q_draining flag, and we can spin out. 6593 * 6594 * As for qwait(_sig), I think I shall let it continue to call 6595 * drain_syncq directly (after all, it will get here eventually). 6596 * 6597 * qdrain_syncq has to terminate when: 6598 * - one of the SQ_STAYAWAY bits gets set to preserve qwriter(OUTER) ordering 6599 * - SQ_EVENTS gets set to preserve qwriter(INNER) ordering 6600 * 6601 * ASSUMES: 6602 * One claim 6603 * QLOCK held 6604 * SQLOCK not held 6605 * Will release QLOCK before returning 6606 */ 6607 void 6608 qdrain_syncq(syncq_t *sq, queue_t *q) 6609 { 6610 mblk_t *bp; 6611 #ifdef DEBUG 6612 uint16_t count; 6613 #endif 6614 6615 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START, 6616 "drain_syncq start:%p", sq); 6617 ASSERT(q->q_syncq == sq); 6618 ASSERT(MUTEX_HELD(QLOCK(q))); 6619 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6620 /* 6621 * For non-CIPUT perimeters, we should be called with the exclusive bit 6622 * set already. For CIPUT perimeters, we will be doing a concurrent 6623 * drain, so it better not be set. 6624 */ 6625 ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT))); 6626 ASSERT(!((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL))); 6627 ASSERT((sq->sq_type & SQ_CIPUT) || (sq->sq_flags & SQ_EXCL)); 6628 /* 6629 * All outer pointers are set, or none of them are 6630 */ 6631 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6632 sq->sq_oprev == NULL) || 6633 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6634 sq->sq_oprev != NULL)); 6635 #ifdef DEBUG 6636 count = sq->sq_count; 6637 /* 6638 * This is OK without the putlocks, because we have one 6639 * claim either from the sq_count, or a putcount. We could 6640 * get an erroneous value from other counts, but ours won't 6641 * change, so one way or another, we will have at least a 6642 * value of one. 6643 */ 6644 SUM_SQ_PUTCOUNTS(sq, count); 6645 ASSERT(count >= 1); 6646 #endif /* DEBUG */ 6647 6648 /* 6649 * The first thing to do is find out if a thread is already draining 6650 * this queue. If so, we are done, just return. 6651 */ 6652 if (q->q_draining) { 6653 mutex_exit(QLOCK(q)); 6654 return; 6655 } 6656 6657 /* 6658 * If the perimeter is exclusive, there is nothing we can do right now, 6659 * go away. Note that there is nothing to prevent this case from 6660 * changing right after this check, but the spin-out will catch it. 6661 */ 6662 6663 /* Tell other threads that we are draining this queue */ 6664 q->q_draining = 1; /* Protected by QLOCK */ 6665 6666 /* 6667 * If there is nothing to do, clear QFULL as necessary. This caters for 6668 * the case where an empty queue was enqueued onto the syncq. 6669 */ 6670 if (q->q_sqhead == NULL) { 6671 ASSERT(q->q_syncqmsgs == 0); 6672 mutex_exit(QLOCK(q)); 6673 clr_qfull(q); 6674 mutex_enter(QLOCK(q)); 6675 } 6676 6677 /* 6678 * Note that q_sqhead must be re-checked here in case another message 6679 * was enqueued whilst QLOCK was dropped during the call to clr_qfull. 6680 */ 6681 for (bp = q->q_sqhead; bp != NULL; bp = q->q_sqhead) { 6682 /* 6683 * Because we can enter this routine just because a putnext is 6684 * blocked, we need to spin out if the perimeter wants to go 6685 * exclusive as well as just blocked. We need to spin out also 6686 * if events are queued on the syncq. 6687 * Don't check for SQ_EXCL, because non-CIPUT perimeters would 6688 * set it, and it can't become exclusive while we hold a claim. 6689 */ 6690 if (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS)) { 6691 break; 6692 } 6693 6694 #ifdef DEBUG 6695 /* 6696 * Since we are in qdrain_syncq, we already know the queue, 6697 * but for sanity, we want to check this against the qp that 6698 * was passed in by bp->b_queue. 6699 */ 6700 6701 ASSERT(bp->b_queue == q); 6702 ASSERT(bp->b_queue->q_syncq == sq); 6703 bp->b_queue = NULL; 6704 6705 /* 6706 * We would have the following check in the DEBUG code: 6707 * 6708 * if (bp->b_prev != NULL) { 6709 * ASSERT(bp->b_prev == (void (*)())q->q_qinfo->qi_putp); 6710 * } 6711 * 6712 * This can't be done, however, since IP modifies qinfo 6713 * structure at run-time (switching between IPv4 qinfo and IPv6 6714 * qinfo), invalidating the check. 6715 * So the assignment to func is left here, but the ASSERT itself 6716 * is removed until the whole issue is resolved. 6717 */ 6718 #endif 6719 ASSERT(q->q_sqhead == bp); 6720 q->q_sqhead = bp->b_next; 6721 bp->b_prev = bp->b_next = NULL; 6722 ASSERT(q->q_syncqmsgs > 0); 6723 mutex_exit(QLOCK(q)); 6724 6725 ASSERT(bp->b_datap->db_ref != 0); 6726 6727 (void) (*q->q_qinfo->qi_putp)(q, bp); 6728 6729 mutex_enter(QLOCK(q)); 6730 6731 /* 6732 * q_syncqmsgs should only be decremented after executing the 6733 * put procedure to avoid message re-ordering. This is due to an 6734 * optimisation in putnext() which can call the put procedure 6735 * directly if it sees q_syncqmsgs == 0 (despite Q_SQQUEUED 6736 * being set). 6737 * 6738 * We also need to clear QFULL in the next service procedure 6739 * queue if this is the last message destined for that queue. 6740 * 6741 * It would make better sense to have some sort of tunable for 6742 * the low water mark, but these semantics are not yet defined. 6743 * So, alas, we use a constant. 6744 */ 6745 if (--q->q_syncqmsgs == 0) { 6746 mutex_exit(QLOCK(q)); 6747 clr_qfull(q); 6748 mutex_enter(QLOCK(q)); 6749 } 6750 6751 /* 6752 * Always clear SQ_EXCL when CIPUT in order to handle 6753 * qwriter(INNER). The putp() can call qwriter and get exclusive 6754 * access IFF this is the only claim. So, we need to test for 6755 * this possibility, acquire the mutex and clear the bit. 6756 */ 6757 if ((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL)) { 6758 mutex_enter(SQLOCK(sq)); 6759 sq->sq_flags &= ~SQ_EXCL; 6760 mutex_exit(SQLOCK(sq)); 6761 } 6762 } 6763 6764 /* 6765 * We should either have no messages on this queue, or we were told to 6766 * goaway by a waiter (which we will wake up at the end of this 6767 * function). 6768 */ 6769 ASSERT((q->q_sqhead == NULL) || 6770 (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS))); 6771 6772 ASSERT(MUTEX_HELD(QLOCK(q))); 6773 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6774 6775 /* Remove the q from the syncq list if all the messages are drained. */ 6776 if (q->q_sqhead == NULL) { 6777 ASSERT(q->q_syncqmsgs == 0); 6778 mutex_enter(SQLOCK(sq)); 6779 if (q->q_sqflags & Q_SQQUEUED) 6780 SQRM_Q(sq, q); 6781 mutex_exit(SQLOCK(sq)); 6782 /* 6783 * Since the queue is removed from the list, reset its priority. 6784 */ 6785 q->q_spri = 0; 6786 } 6787 6788 /* 6789 * Remember, the q_draining flag is used to let another thread know 6790 * that there is a thread currently draining the messages for a queue. 6791 * Since we are now done with this queue (even if there may be messages 6792 * still there), we need to clear this flag so some thread will work on 6793 * it if needed. 6794 */ 6795 ASSERT(q->q_draining); 6796 q->q_draining = 0; 6797 6798 /* Called with a claim, so OK to drop all locks. */ 6799 mutex_exit(QLOCK(q)); 6800 6801 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END, 6802 "drain_syncq end:%p", sq); 6803 } 6804 /* END OF QDRAIN_SYNCQ */ 6805 6806 6807 /* 6808 * This is the mate to qdrain_syncq, except that it is putting the message onto 6809 * the queue instead of draining. Since the message is destined for the queue 6810 * that is selected, there is no need to identify the function because the 6811 * message is intended for the put routine for the queue. For debug kernels, 6812 * this routine will do it anyway just in case. 6813 * 6814 * After the message is enqueued on the syncq, it calls putnext_tail() 6815 * which will schedule a background thread to actually process the message. 6816 * 6817 * Assumes that there is a claim on the syncq (sq->sq_count > 0) and 6818 * SQLOCK(sq) and QLOCK(q) are not held. 6819 */ 6820 void 6821 qfill_syncq(syncq_t *sq, queue_t *q, mblk_t *mp) 6822 { 6823 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6824 ASSERT(MUTEX_NOT_HELD(QLOCK(q))); 6825 ASSERT(sq->sq_count > 0); 6826 ASSERT(q->q_syncq == sq); 6827 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6828 sq->sq_oprev == NULL) || 6829 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6830 sq->sq_oprev != NULL)); 6831 6832 mutex_enter(QLOCK(q)); 6833 6834 #ifdef DEBUG 6835 /* 6836 * This is used for debug in the qfill_syncq/qdrain_syncq case 6837 * to trace the queue that the message is intended for. Note 6838 * that the original use was to identify the queue and function 6839 * to call on the drain. In the new syncq, we have the context 6840 * of the queue that we are draining, so call it's putproc and 6841 * don't rely on the saved values. But for debug this is still 6842 * useful information. 6843 */ 6844 mp->b_prev = (mblk_t *)q->q_qinfo->qi_putp; 6845 mp->b_queue = q; 6846 mp->b_next = NULL; 6847 #endif 6848 ASSERT(q->q_syncq == sq); 6849 /* 6850 * Enqueue the message on the list. 6851 * SQPUT_MP() accesses q_syncqmsgs. We are already holding QLOCK to 6852 * protect it. So it's ok to acquire SQLOCK after SQPUT_MP(). 6853 */ 6854 SQPUT_MP(q, mp); 6855 mutex_enter(SQLOCK(sq)); 6856 6857 /* 6858 * And queue on syncq for scheduling, if not already queued. 6859 * Note that we need the SQLOCK for this, and for testing flags 6860 * at the end to see if we will drain. So grab it now, and 6861 * release it before we call qdrain_syncq or return. 6862 */ 6863 if (!(q->q_sqflags & Q_SQQUEUED)) { 6864 q->q_spri = curthread->t_pri; 6865 SQPUT_Q(sq, q); 6866 } 6867 #ifdef DEBUG 6868 else { 6869 /* 6870 * All of these conditions MUST be true! 6871 */ 6872 ASSERT(sq->sq_tail != NULL); 6873 if (sq->sq_tail == sq->sq_head) { 6874 ASSERT((q->q_sqprev == NULL) && 6875 (q->q_sqnext == NULL)); 6876 } else { 6877 ASSERT((q->q_sqprev != NULL) || 6878 (q->q_sqnext != NULL)); 6879 } 6880 ASSERT(sq->sq_flags & SQ_QUEUED); 6881 ASSERT(q->q_syncqmsgs != 0); 6882 ASSERT(q->q_sqflags & Q_SQQUEUED); 6883 } 6884 #endif 6885 mutex_exit(QLOCK(q)); 6886 /* 6887 * SQLOCK is still held, so sq_count can be safely decremented. 6888 */ 6889 sq->sq_count--; 6890 6891 putnext_tail(sq, q, 0); 6892 /* Should not reference sq or q after this point. */ 6893 } 6894 6895 /* End of qfill_syncq */ 6896 6897 /* 6898 * Remove all messages from a syncq (if qp is NULL) or remove all messages 6899 * that would be put into qp by drain_syncq. 6900 * Used when deleting the syncq (qp == NULL) or when detaching 6901 * a queue (qp != NULL). 6902 * Return non-zero if one or more messages were freed. 6903 * 6904 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 6905 * sq_putlocks are used. 6906 * 6907 * NOTE: This function assumes that it is called from the close() context and 6908 * that all the queues in the syncq are going away. For this reason it doesn't 6909 * acquire QLOCK for modifying q_sqhead/q_sqtail fields. This assumption is 6910 * currently valid, but it is useful to rethink this function to behave properly 6911 * in other cases. 6912 */ 6913 int 6914 flush_syncq(syncq_t *sq, queue_t *qp) 6915 { 6916 mblk_t *bp, *mp_head, *mp_next, *mp_prev; 6917 queue_t *q; 6918 int ret = 0; 6919 6920 mutex_enter(SQLOCK(sq)); 6921 6922 /* 6923 * Before we leave, we need to make sure there are no 6924 * events listed for this queue. All events for this queue 6925 * will just be freed. 6926 */ 6927 if (qp != NULL && sq->sq_evhead != NULL) { 6928 ASSERT(sq->sq_flags & SQ_EVENTS); 6929 6930 mp_prev = NULL; 6931 for (bp = sq->sq_evhead; bp != NULL; bp = mp_next) { 6932 mp_next = bp->b_next; 6933 if (bp->b_queue == qp) { 6934 /* Delete this message */ 6935 if (mp_prev != NULL) { 6936 mp_prev->b_next = mp_next; 6937 /* 6938 * Update sq_evtail if the last element 6939 * is removed. 6940 */ 6941 if (bp == sq->sq_evtail) { 6942 ASSERT(mp_next == NULL); 6943 sq->sq_evtail = mp_prev; 6944 } 6945 } else 6946 sq->sq_evhead = mp_next; 6947 if (sq->sq_evhead == NULL) 6948 sq->sq_flags &= ~SQ_EVENTS; 6949 bp->b_prev = bp->b_next = NULL; 6950 freemsg(bp); 6951 ret++; 6952 } else { 6953 mp_prev = bp; 6954 } 6955 } 6956 } 6957 6958 /* 6959 * Walk sq_head and: 6960 * - match qp if qp is set, remove it's messages 6961 * - all if qp is not set 6962 */ 6963 q = sq->sq_head; 6964 while (q != NULL) { 6965 ASSERT(q->q_syncq == sq); 6966 if ((qp == NULL) || (qp == q)) { 6967 /* 6968 * Yank the messages as a list off the queue 6969 */ 6970 mp_head = q->q_sqhead; 6971 /* 6972 * We do not have QLOCK(q) here (which is safe due to 6973 * assumptions mentioned above). To obtain the lock we 6974 * need to release SQLOCK which may allow lots of things 6975 * to change upon us. This place requires more analysis. 6976 */ 6977 q->q_sqhead = q->q_sqtail = NULL; 6978 ASSERT(mp_head->b_queue && 6979 mp_head->b_queue->q_syncq == sq); 6980 6981 /* 6982 * Free each of the messages. 6983 */ 6984 for (bp = mp_head; bp != NULL; bp = mp_next) { 6985 mp_next = bp->b_next; 6986 bp->b_prev = bp->b_next = NULL; 6987 freemsg(bp); 6988 ret++; 6989 } 6990 /* 6991 * Now remove the queue from the syncq. 6992 */ 6993 ASSERT(q->q_sqflags & Q_SQQUEUED); 6994 SQRM_Q(sq, q); 6995 q->q_spri = 0; 6996 q->q_syncqmsgs = 0; 6997 6998 /* 6999 * If qp was specified, we are done with it and are 7000 * going to drop SQLOCK(sq) and return. We wakeup syncq 7001 * waiters while we still have the SQLOCK. 7002 */ 7003 if ((qp != NULL) && (sq->sq_flags & SQ_WANTWAKEUP)) { 7004 sq->sq_flags &= ~SQ_WANTWAKEUP; 7005 cv_broadcast(&sq->sq_wait); 7006 } 7007 /* Drop SQLOCK across clr_qfull */ 7008 mutex_exit(SQLOCK(sq)); 7009 7010 /* 7011 * We avoid doing the test that drain_syncq does and 7012 * unconditionally clear qfull for every flushed 7013 * message. Since flush_syncq is only called during 7014 * close this should not be a problem. 7015 */ 7016 clr_qfull(q); 7017 if (qp != NULL) { 7018 return (ret); 7019 } else { 7020 mutex_enter(SQLOCK(sq)); 7021 /* 7022 * The head was removed by SQRM_Q above. 7023 * reread the new head and flush it. 7024 */ 7025 q = sq->sq_head; 7026 } 7027 } else { 7028 q = q->q_sqnext; 7029 } 7030 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7031 } 7032 7033 if (sq->sq_flags & SQ_WANTWAKEUP) { 7034 sq->sq_flags &= ~SQ_WANTWAKEUP; 7035 cv_broadcast(&sq->sq_wait); 7036 } 7037 7038 mutex_exit(SQLOCK(sq)); 7039 return (ret); 7040 } 7041 7042 /* 7043 * Propagate all messages from a syncq to the next syncq that are associated 7044 * with the specified queue. If the queue is attached to a driver or if the 7045 * messages have been added due to a qwriter(PERIM_INNER), free the messages. 7046 * 7047 * Assumes that the stream is strlock()'ed. We don't come here if there 7048 * are no messages to propagate. 7049 * 7050 * NOTE : If the queue is attached to a driver, all the messages are freed 7051 * as there is no point in propagating the messages from the driver syncq 7052 * to the closing stream head which will in turn get freed later. 7053 */ 7054 static int 7055 propagate_syncq(queue_t *qp) 7056 { 7057 mblk_t *bp, *head, *tail, *prev, *next; 7058 syncq_t *sq; 7059 queue_t *nqp; 7060 syncq_t *nsq; 7061 boolean_t isdriver; 7062 int moved = 0; 7063 uint16_t flags; 7064 pri_t priority = curthread->t_pri; 7065 #ifdef DEBUG 7066 void (*func)(); 7067 #endif 7068 7069 sq = qp->q_syncq; 7070 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7071 /* debug macro */ 7072 SQ_PUTLOCKS_HELD(sq); 7073 /* 7074 * As entersq() does not increment the sq_count for 7075 * the write side, check sq_count for non-QPERQ 7076 * perimeters alone. 7077 */ 7078 ASSERT((qp->q_flag & QPERQ) || (sq->sq_count >= 1)); 7079 7080 /* 7081 * propagate_syncq() can be called because of either messages on the 7082 * queue syncq or because on events on the queue syncq. Do actual 7083 * message propagations if there are any messages. 7084 */ 7085 if (qp->q_syncqmsgs) { 7086 isdriver = (qp->q_flag & QISDRV); 7087 7088 if (!isdriver) { 7089 nqp = qp->q_next; 7090 nsq = nqp->q_syncq; 7091 ASSERT(MUTEX_HELD(SQLOCK(nsq))); 7092 /* debug macro */ 7093 SQ_PUTLOCKS_HELD(nsq); 7094 #ifdef DEBUG 7095 func = (void (*)())nqp->q_qinfo->qi_putp; 7096 #endif 7097 } 7098 7099 SQRM_Q(sq, qp); 7100 priority = MAX(qp->q_spri, priority); 7101 qp->q_spri = 0; 7102 head = qp->q_sqhead; 7103 tail = qp->q_sqtail; 7104 qp->q_sqhead = qp->q_sqtail = NULL; 7105 qp->q_syncqmsgs = 0; 7106 7107 /* 7108 * Walk the list of messages, and free them if this is a driver, 7109 * otherwise reset the b_prev and b_queue value to the new putp. 7110 * Afterward, we will just add the head to the end of the next 7111 * syncq, and point the tail to the end of this one. 7112 */ 7113 7114 for (bp = head; bp != NULL; bp = next) { 7115 next = bp->b_next; 7116 if (isdriver) { 7117 bp->b_prev = bp->b_next = NULL; 7118 freemsg(bp); 7119 continue; 7120 } 7121 /* Change the q values for this message */ 7122 bp->b_queue = nqp; 7123 #ifdef DEBUG 7124 bp->b_prev = (mblk_t *)func; 7125 #endif 7126 moved++; 7127 } 7128 /* 7129 * Attach list of messages to the end of the new queue (if there 7130 * is a list of messages). 7131 */ 7132 7133 if (!isdriver && head != NULL) { 7134 ASSERT(tail != NULL); 7135 if (nqp->q_sqhead == NULL) { 7136 nqp->q_sqhead = head; 7137 } else { 7138 ASSERT(nqp->q_sqtail != NULL); 7139 nqp->q_sqtail->b_next = head; 7140 } 7141 nqp->q_sqtail = tail; 7142 /* 7143 * When messages are moved from high priority queue to 7144 * another queue, the destination queue priority is 7145 * upgraded. 7146 */ 7147 7148 if (priority > nqp->q_spri) 7149 nqp->q_spri = priority; 7150 7151 SQPUT_Q(nsq, nqp); 7152 7153 nqp->q_syncqmsgs += moved; 7154 ASSERT(nqp->q_syncqmsgs != 0); 7155 } 7156 } 7157 7158 /* 7159 * Before we leave, we need to make sure there are no 7160 * events listed for this queue. All events for this queue 7161 * will just be freed. 7162 */ 7163 if (sq->sq_evhead != NULL) { 7164 ASSERT(sq->sq_flags & SQ_EVENTS); 7165 prev = NULL; 7166 for (bp = sq->sq_evhead; bp != NULL; bp = next) { 7167 next = bp->b_next; 7168 if (bp->b_queue == qp) { 7169 /* Delete this message */ 7170 if (prev != NULL) { 7171 prev->b_next = next; 7172 /* 7173 * Update sq_evtail if the last element 7174 * is removed. 7175 */ 7176 if (bp == sq->sq_evtail) { 7177 ASSERT(next == NULL); 7178 sq->sq_evtail = prev; 7179 } 7180 } else 7181 sq->sq_evhead = next; 7182 if (sq->sq_evhead == NULL) 7183 sq->sq_flags &= ~SQ_EVENTS; 7184 bp->b_prev = bp->b_next = NULL; 7185 freemsg(bp); 7186 } else { 7187 prev = bp; 7188 } 7189 } 7190 } 7191 7192 flags = sq->sq_flags; 7193 7194 /* Wake up any waiter before leaving. */ 7195 if (flags & SQ_WANTWAKEUP) { 7196 flags &= ~SQ_WANTWAKEUP; 7197 cv_broadcast(&sq->sq_wait); 7198 } 7199 sq->sq_flags = flags; 7200 7201 return (moved); 7202 } 7203 7204 /* 7205 * Try and upgrade to exclusive access at the inner perimeter. If this can 7206 * not be done without blocking then request will be queued on the syncq 7207 * and drain_syncq will run it later. 7208 * 7209 * This routine can only be called from put or service procedures plus 7210 * asynchronous callback routines that have properly entered the queue (with 7211 * entersq). Thus qwriter_inner assumes the caller has one claim on the syncq 7212 * associated with q. 7213 */ 7214 void 7215 qwriter_inner(queue_t *q, mblk_t *mp, void (*func)()) 7216 { 7217 syncq_t *sq = q->q_syncq; 7218 uint16_t count; 7219 7220 mutex_enter(SQLOCK(sq)); 7221 count = sq->sq_count; 7222 SQ_PUTLOCKS_ENTER(sq); 7223 SUM_SQ_PUTCOUNTS(sq, count); 7224 ASSERT(count >= 1); 7225 ASSERT(sq->sq_type & (SQ_CIPUT|SQ_CISVC)); 7226 7227 if (count == 1) { 7228 /* 7229 * Can upgrade. This case also handles nested qwriter calls 7230 * (when the qwriter callback function calls qwriter). In that 7231 * case SQ_EXCL is already set. 7232 */ 7233 sq->sq_flags |= SQ_EXCL; 7234 SQ_PUTLOCKS_EXIT(sq); 7235 mutex_exit(SQLOCK(sq)); 7236 (*func)(q, mp); 7237 /* 7238 * Assumes that leavesq, putnext, and drain_syncq will reset 7239 * SQ_EXCL for SQ_CIPUT/SQ_CISVC queues. We leave SQ_EXCL on 7240 * until putnext, leavesq, or drain_syncq drops it. 7241 * That way we handle nested qwriter(INNER) without dropping 7242 * SQ_EXCL until the outermost qwriter callback routine is 7243 * done. 7244 */ 7245 return; 7246 } 7247 SQ_PUTLOCKS_EXIT(sq); 7248 sqfill_events(sq, q, mp, func); 7249 } 7250 7251 /* 7252 * Synchronous callback support functions 7253 */ 7254 7255 /* 7256 * Allocate a callback parameter structure. 7257 * Assumes that caller initializes the flags and the id. 7258 * Acquires SQLOCK(sq) if non-NULL is returned. 7259 */ 7260 callbparams_t * 7261 callbparams_alloc(syncq_t *sq, void (*func)(void *), void *arg, int kmflags) 7262 { 7263 callbparams_t *cbp; 7264 size_t size = sizeof (callbparams_t); 7265 7266 cbp = kmem_alloc(size, kmflags & ~KM_PANIC); 7267 7268 /* 7269 * Only try tryhard allocation if the caller is ready to panic. 7270 * Otherwise just fail. 7271 */ 7272 if (cbp == NULL) { 7273 if (kmflags & KM_PANIC) 7274 cbp = kmem_alloc_tryhard(sizeof (callbparams_t), 7275 &size, kmflags); 7276 else 7277 return (NULL); 7278 } 7279 7280 ASSERT(size >= sizeof (callbparams_t)); 7281 cbp->cbp_size = size; 7282 cbp->cbp_sq = sq; 7283 cbp->cbp_func = func; 7284 cbp->cbp_arg = arg; 7285 mutex_enter(SQLOCK(sq)); 7286 cbp->cbp_next = sq->sq_callbpend; 7287 sq->sq_callbpend = cbp; 7288 return (cbp); 7289 } 7290 7291 void 7292 callbparams_free(syncq_t *sq, callbparams_t *cbp) 7293 { 7294 callbparams_t **pp, *p; 7295 7296 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7297 7298 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) { 7299 if (p == cbp) { 7300 *pp = p->cbp_next; 7301 kmem_free(p, p->cbp_size); 7302 return; 7303 } 7304 } 7305 (void) (STRLOG(0, 0, 0, SL_CONSOLE, 7306 "callbparams_free: not found\n")); 7307 } 7308 7309 void 7310 callbparams_free_id(syncq_t *sq, callbparams_id_t id, int32_t flag) 7311 { 7312 callbparams_t **pp, *p; 7313 7314 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7315 7316 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) { 7317 if (p->cbp_id == id && p->cbp_flags == flag) { 7318 *pp = p->cbp_next; 7319 kmem_free(p, p->cbp_size); 7320 return; 7321 } 7322 } 7323 (void) (STRLOG(0, 0, 0, SL_CONSOLE, 7324 "callbparams_free_id: not found\n")); 7325 } 7326 7327 /* 7328 * Callback wrapper function used by once-only callbacks that can be 7329 * cancelled (qtimeout and qbufcall) 7330 * Contains inline version of entersq(sq, SQ_CALLBACK) that can be 7331 * cancelled by the qun* functions. 7332 */ 7333 void 7334 qcallbwrapper(void *arg) 7335 { 7336 callbparams_t *cbp = arg; 7337 syncq_t *sq; 7338 uint16_t count = 0; 7339 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL; 7340 uint16_t type; 7341 7342 sq = cbp->cbp_sq; 7343 mutex_enter(SQLOCK(sq)); 7344 type = sq->sq_type; 7345 if (!(type & SQ_CICB)) { 7346 count = sq->sq_count; 7347 SQ_PUTLOCKS_ENTER(sq); 7348 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 7349 SUM_SQ_PUTCOUNTS(sq, count); 7350 sq->sq_needexcl++; 7351 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 7352 waitflags |= SQ_MESSAGES; 7353 } 7354 /* Can not handle exclusive entry at outer perimeter */ 7355 ASSERT(type & SQ_COCB); 7356 7357 while ((sq->sq_flags & waitflags) || (!(type & SQ_CICB) &&count != 0)) { 7358 if ((sq->sq_callbflags & cbp->cbp_flags) && 7359 (sq->sq_cancelid == cbp->cbp_id)) { 7360 /* timeout has been cancelled */ 7361 sq->sq_callbflags |= SQ_CALLB_BYPASSED; 7362 callbparams_free(sq, cbp); 7363 if (!(type & SQ_CICB)) { 7364 ASSERT(sq->sq_needexcl > 0); 7365 sq->sq_needexcl--; 7366 if (sq->sq_needexcl == 0) { 7367 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 7368 } 7369 SQ_PUTLOCKS_EXIT(sq); 7370 } 7371 mutex_exit(SQLOCK(sq)); 7372 return; 7373 } 7374 sq->sq_flags |= SQ_WANTWAKEUP; 7375 if (!(type & SQ_CICB)) { 7376 SQ_PUTLOCKS_EXIT(sq); 7377 } 7378 cv_wait(&sq->sq_wait, SQLOCK(sq)); 7379 if (!(type & SQ_CICB)) { 7380 count = sq->sq_count; 7381 SQ_PUTLOCKS_ENTER(sq); 7382 SUM_SQ_PUTCOUNTS(sq, count); 7383 } 7384 } 7385 7386 sq->sq_count++; 7387 ASSERT(sq->sq_count != 0); /* Wraparound */ 7388 if (!(type & SQ_CICB)) { 7389 ASSERT(count == 0); 7390 sq->sq_flags |= SQ_EXCL; 7391 ASSERT(sq->sq_needexcl > 0); 7392 sq->sq_needexcl--; 7393 if (sq->sq_needexcl == 0) { 7394 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 7395 } 7396 SQ_PUTLOCKS_EXIT(sq); 7397 } 7398 7399 mutex_exit(SQLOCK(sq)); 7400 7401 cbp->cbp_func(cbp->cbp_arg); 7402 7403 /* 7404 * We drop the lock only for leavesq to re-acquire it. 7405 * Possible optimization is inline of leavesq. 7406 */ 7407 mutex_enter(SQLOCK(sq)); 7408 callbparams_free(sq, cbp); 7409 mutex_exit(SQLOCK(sq)); 7410 leavesq(sq, SQ_CALLBACK); 7411 } 7412 7413 /* 7414 * No need to grab sq_putlocks here. See comment in strsubr.h that 7415 * explains when sq_putlocks are used. 7416 * 7417 * sq_count (or one of the sq_putcounts) has already been 7418 * decremented by the caller, and if SQ_QUEUED, we need to call 7419 * drain_syncq (the global syncq drain). 7420 * If putnext_tail is called with the SQ_EXCL bit set, we are in 7421 * one of two states, non-CIPUT perimeter, and we need to clear 7422 * it, or we went exclusive in the put procedure. In any case, 7423 * we want to clear the bit now, and it is probably easier to do 7424 * this at the beginning of this function (remember, we hold 7425 * the SQLOCK). Lastly, if there are other messages queued 7426 * on the syncq (and not for our destination), enable the syncq 7427 * for background work. 7428 */ 7429 7430 /* ARGSUSED */ 7431 void 7432 putnext_tail(syncq_t *sq, queue_t *qp, uint32_t passflags) 7433 { 7434 uint16_t flags = sq->sq_flags; 7435 7436 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7437 ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); 7438 7439 /* Clear SQ_EXCL if set in passflags */ 7440 if (passflags & SQ_EXCL) { 7441 flags &= ~SQ_EXCL; 7442 } 7443 if (flags & SQ_WANTWAKEUP) { 7444 flags &= ~SQ_WANTWAKEUP; 7445 cv_broadcast(&sq->sq_wait); 7446 } 7447 if (flags & SQ_WANTEXWAKEUP) { 7448 flags &= ~SQ_WANTEXWAKEUP; 7449 cv_broadcast(&sq->sq_exitwait); 7450 } 7451 sq->sq_flags = flags; 7452 7453 /* 7454 * We have cleared SQ_EXCL if we were asked to, and started 7455 * the wakeup process for waiters. If there are no writers 7456 * then we need to drain the syncq if we were told to, or 7457 * enable the background thread to do it. 7458 */ 7459 if (!(flags & (SQ_STAYAWAY|SQ_EXCL))) { 7460 if ((passflags & SQ_QUEUED) || 7461 (sq->sq_svcflags & SQ_DISABLED)) { 7462 /* drain_syncq will take care of events in the list */ 7463 drain_syncq(sq); 7464 return; 7465 } else if (flags & SQ_QUEUED) { 7466 sqenable(sq); 7467 } 7468 } 7469 /* Drop the SQLOCK on exit */ 7470 mutex_exit(SQLOCK(sq)); 7471 TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END, 7472 "putnext_end:(%p, %p, %p) done", NULL, qp, sq); 7473 } 7474 7475 void 7476 set_qend(queue_t *q) 7477 { 7478 mutex_enter(QLOCK(q)); 7479 if (!O_SAMESTR(q)) 7480 q->q_flag |= QEND; 7481 else 7482 q->q_flag &= ~QEND; 7483 mutex_exit(QLOCK(q)); 7484 q = _OTHERQ(q); 7485 mutex_enter(QLOCK(q)); 7486 if (!O_SAMESTR(q)) 7487 q->q_flag |= QEND; 7488 else 7489 q->q_flag &= ~QEND; 7490 mutex_exit(QLOCK(q)); 7491 } 7492 7493 /* 7494 * Set QFULL in next service procedure queue (that cares) if not already 7495 * set and if there are already more messages on the syncq than 7496 * sq_max_size. If sq_max_size is 0, no flow control will be asserted on 7497 * any syncq. 7498 * 7499 * The fq here is the next queue with a service procedure. This is where 7500 * we would fail canputnext, so this is where we need to set QFULL. 7501 * In the case when fq != q we need to take QLOCK(fq) to set QFULL flag. 7502 * 7503 * We already have QLOCK at this point. To avoid cross-locks with 7504 * freezestr() which grabs all QLOCKs and with strlock() which grabs both 7505 * SQLOCK and sd_reflock, we need to drop respective locks first. 7506 */ 7507 void 7508 set_qfull(queue_t *q) 7509 { 7510 queue_t *fq = NULL; 7511 7512 ASSERT(MUTEX_HELD(QLOCK(q))); 7513 if ((sq_max_size != 0) && (!(q->q_nfsrv->q_flag & QFULL)) && 7514 (q->q_syncqmsgs > sq_max_size)) { 7515 if ((fq = q->q_nfsrv) == q) { 7516 fq->q_flag |= QFULL; 7517 } else { 7518 mutex_exit(QLOCK(q)); 7519 mutex_enter(QLOCK(fq)); 7520 fq->q_flag |= QFULL; 7521 mutex_exit(QLOCK(fq)); 7522 mutex_enter(QLOCK(q)); 7523 } 7524 } 7525 } 7526 7527 void 7528 clr_qfull(queue_t *q) 7529 { 7530 queue_t *oq = q; 7531 7532 q = q->q_nfsrv; 7533 /* Fast check if there is any work to do before getting the lock. */ 7534 if ((q->q_flag & (QFULL|QWANTW)) == 0) { 7535 return; 7536 } 7537 7538 /* 7539 * Do not reset QFULL (and backenable) if the q_count is the reason 7540 * for QFULL being set. 7541 */ 7542 mutex_enter(QLOCK(q)); 7543 /* 7544 * If queue is empty i.e q_mblkcnt is zero, queue can not be full. 7545 * Hence clear the QFULL. 7546 * If both q_count and q_mblkcnt are less than the hiwat mark, 7547 * clear the QFULL. 7548 */ 7549 if (q->q_mblkcnt == 0 || ((q->q_count < q->q_hiwat) && 7550 (q->q_mblkcnt < q->q_hiwat))) { 7551 q->q_flag &= ~QFULL; 7552 /* 7553 * A little more confusing, how about this way: 7554 * if someone wants to write, 7555 * AND 7556 * both counts are less than the lowat mark 7557 * OR 7558 * the lowat mark is zero 7559 * THEN 7560 * backenable 7561 */ 7562 if ((q->q_flag & QWANTW) && 7563 (((q->q_count < q->q_lowat) && 7564 (q->q_mblkcnt < q->q_lowat)) || q->q_lowat == 0)) { 7565 q->q_flag &= ~QWANTW; 7566 mutex_exit(QLOCK(q)); 7567 backenable(oq, 0); 7568 } else 7569 mutex_exit(QLOCK(q)); 7570 } else 7571 mutex_exit(QLOCK(q)); 7572 } 7573 7574 /* 7575 * Set the forward service procedure pointer. 7576 * 7577 * Called at insert-time to cache a queue's next forward service procedure in 7578 * q_nfsrv; used by canput() and canputnext(). If the queue to be inserted 7579 * has a service procedure then q_nfsrv points to itself. If the queue to be 7580 * inserted does not have a service procedure, then q_nfsrv points to the next 7581 * queue forward that has a service procedure. If the queue is at the logical 7582 * end of the stream (driver for write side, stream head for the read side) 7583 * and does not have a service procedure, then q_nfsrv also points to itself. 7584 */ 7585 void 7586 set_nfsrv_ptr( 7587 queue_t *rnew, /* read queue pointer to new module */ 7588 queue_t *wnew, /* write queue pointer to new module */ 7589 queue_t *prev_rq, /* read queue pointer to the module above */ 7590 queue_t *prev_wq) /* write queue pointer to the module above */ 7591 { 7592 queue_t *qp; 7593 7594 if (prev_wq->q_next == NULL) { 7595 /* 7596 * Insert the driver, initialize the driver and stream head. 7597 * In this case, prev_rq/prev_wq should be the stream head. 7598 * _I_INSERT does not allow inserting a driver. Make sure 7599 * that it is not an insertion. 7600 */ 7601 ASSERT(!(rnew->q_flag & _QINSERTING)); 7602 wnew->q_nfsrv = wnew; 7603 if (rnew->q_qinfo->qi_srvp) 7604 rnew->q_nfsrv = rnew; 7605 else 7606 rnew->q_nfsrv = prev_rq; 7607 prev_rq->q_nfsrv = prev_rq; 7608 prev_wq->q_nfsrv = prev_wq; 7609 } else { 7610 /* 7611 * set up read side q_nfsrv pointer. This MUST be done 7612 * before setting the write side, because the setting of 7613 * the write side for a fifo may depend on it. 7614 * 7615 * Suppose we have a fifo that only has pipemod pushed. 7616 * pipemod has no read or write service procedures, so 7617 * nfsrv for both pipemod queues points to prev_rq (the 7618 * stream read head). Now push bufmod (which has only a 7619 * read service procedure). Doing the write side first, 7620 * wnew->q_nfsrv is set to pipemod's writeq nfsrv, which 7621 * is WRONG; the next queue forward from wnew with a 7622 * service procedure will be rnew, not the stream read head. 7623 * Since the downstream queue (which in the case of a fifo 7624 * is the read queue rnew) can affect upstream queues, it 7625 * needs to be done first. Setting up the read side first 7626 * sets nfsrv for both pipemod queues to rnew and then 7627 * when the write side is set up, wnew-q_nfsrv will also 7628 * point to rnew. 7629 */ 7630 if (rnew->q_qinfo->qi_srvp) { 7631 /* 7632 * use _OTHERQ() because, if this is a pipe, next 7633 * module may have been pushed from other end and 7634 * q_next could be a read queue. 7635 */ 7636 qp = _OTHERQ(prev_wq->q_next); 7637 while (qp && qp->q_nfsrv != qp) { 7638 qp->q_nfsrv = rnew; 7639 qp = backq(qp); 7640 } 7641 rnew->q_nfsrv = rnew; 7642 } else 7643 rnew->q_nfsrv = prev_rq->q_nfsrv; 7644 7645 /* set up write side q_nfsrv pointer */ 7646 if (wnew->q_qinfo->qi_srvp) { 7647 wnew->q_nfsrv = wnew; 7648 7649 /* 7650 * For insertion, need to update nfsrv of the modules 7651 * above which do not have a service routine. 7652 */ 7653 if (rnew->q_flag & _QINSERTING) { 7654 for (qp = prev_wq; 7655 qp != NULL && qp->q_nfsrv != qp; 7656 qp = backq(qp)) { 7657 qp->q_nfsrv = wnew->q_nfsrv; 7658 } 7659 } 7660 } else { 7661 if (prev_wq->q_next == prev_rq) 7662 /* 7663 * Since prev_wq/prev_rq are the middle of a 7664 * fifo, wnew/rnew will also be the middle of 7665 * a fifo and wnew's nfsrv is same as rnew's. 7666 */ 7667 wnew->q_nfsrv = rnew->q_nfsrv; 7668 else 7669 wnew->q_nfsrv = prev_wq->q_next->q_nfsrv; 7670 } 7671 } 7672 } 7673 7674 /* 7675 * Reset the forward service procedure pointer; called at remove-time. 7676 */ 7677 void 7678 reset_nfsrv_ptr(queue_t *rqp, queue_t *wqp) 7679 { 7680 queue_t *tmp_qp; 7681 7682 /* Reset the write side q_nfsrv pointer for _I_REMOVE */ 7683 if ((rqp->q_flag & _QREMOVING) && (wqp->q_qinfo->qi_srvp != NULL)) { 7684 for (tmp_qp = backq(wqp); 7685 tmp_qp != NULL && tmp_qp->q_nfsrv == wqp; 7686 tmp_qp = backq(tmp_qp)) { 7687 tmp_qp->q_nfsrv = wqp->q_nfsrv; 7688 } 7689 } 7690 7691 /* reset the read side q_nfsrv pointer */ 7692 if (rqp->q_qinfo->qi_srvp) { 7693 if (wqp->q_next) { /* non-driver case */ 7694 tmp_qp = _OTHERQ(wqp->q_next); 7695 while (tmp_qp && tmp_qp->q_nfsrv == rqp) { 7696 /* Note that rqp->q_next cannot be NULL */ 7697 ASSERT(rqp->q_next != NULL); 7698 tmp_qp->q_nfsrv = rqp->q_next->q_nfsrv; 7699 tmp_qp = backq(tmp_qp); 7700 } 7701 } 7702 } 7703 } 7704 7705 /* 7706 * This routine should be called after all stream geometry changes to update 7707 * the stream head cached struio() rd/wr queue pointers. Note must be called 7708 * with the streamlock()ed. 7709 * 7710 * Note: only enables Synchronous STREAMS for a side of a Stream which has 7711 * an explicit synchronous barrier module queue. That is, a queue that 7712 * has specified a struio() type. 7713 */ 7714 static void 7715 strsetuio(stdata_t *stp) 7716 { 7717 queue_t *wrq; 7718 7719 if (stp->sd_flag & STPLEX) { 7720 /* 7721 * Not streamhead, but a mux, so no Synchronous STREAMS. 7722 */ 7723 stp->sd_struiowrq = NULL; 7724 stp->sd_struiordq = NULL; 7725 return; 7726 } 7727 /* 7728 * Scan the write queue(s) while synchronous 7729 * until we find a qinfo uio type specified. 7730 */ 7731 wrq = stp->sd_wrq->q_next; 7732 while (wrq) { 7733 if (wrq->q_struiot == STRUIOT_NONE) { 7734 wrq = 0; 7735 break; 7736 } 7737 if (wrq->q_struiot != STRUIOT_DONTCARE) 7738 break; 7739 if (! _SAMESTR(wrq)) { 7740 wrq = 0; 7741 break; 7742 } 7743 wrq = wrq->q_next; 7744 } 7745 stp->sd_struiowrq = wrq; 7746 /* 7747 * Scan the read queue(s) while synchronous 7748 * until we find a qinfo uio type specified. 7749 */ 7750 wrq = stp->sd_wrq->q_next; 7751 while (wrq) { 7752 if (_RD(wrq)->q_struiot == STRUIOT_NONE) { 7753 wrq = 0; 7754 break; 7755 } 7756 if (_RD(wrq)->q_struiot != STRUIOT_DONTCARE) 7757 break; 7758 if (! _SAMESTR(wrq)) { 7759 wrq = 0; 7760 break; 7761 } 7762 wrq = wrq->q_next; 7763 } 7764 stp->sd_struiordq = wrq ? _RD(wrq) : 0; 7765 } 7766 7767 /* 7768 * pass_wput, unblocks the passthru queues, so that 7769 * messages can arrive at muxs lower read queue, before 7770 * I_LINK/I_UNLINK is acked/nacked. 7771 */ 7772 static void 7773 pass_wput(queue_t *q, mblk_t *mp) 7774 { 7775 syncq_t *sq; 7776 7777 sq = _RD(q)->q_syncq; 7778 if (sq->sq_flags & SQ_BLOCKED) 7779 unblocksq(sq, SQ_BLOCKED, 0); 7780 putnext(q, mp); 7781 } 7782 7783 /* 7784 * Set up queues for the link/unlink. 7785 * Create a new queue and block it and then insert it 7786 * below the stream head on the lower stream. 7787 * This prevents any messages from arriving during the setq 7788 * as well as while the mux is processing the LINK/I_UNLINK. 7789 * The blocked passq is unblocked once the LINK/I_UNLINK has 7790 * been acked or nacked or if a message is generated and sent 7791 * down muxs write put procedure. 7792 * See pass_wput(). 7793 * 7794 * After the new queue is inserted, all messages coming from below are 7795 * blocked. The call to strlock will ensure that all activity in the stream head 7796 * read queue syncq is stopped (sq_count drops to zero). 7797 */ 7798 static queue_t * 7799 link_addpassthru(stdata_t *stpdown) 7800 { 7801 queue_t *passq; 7802 sqlist_t sqlist; 7803 7804 passq = allocq(); 7805 STREAM(passq) = STREAM(_WR(passq)) = stpdown; 7806 /* setq might sleep in allocator - avoid holding locks. */ 7807 setq(passq, &passthru_rinit, &passthru_winit, NULL, QPERQ, 7808 SQ_CI|SQ_CO, B_FALSE); 7809 claimq(passq); 7810 blocksq(passq->q_syncq, SQ_BLOCKED, 1); 7811 insertq(STREAM(passq), passq); 7812 7813 /* 7814 * Use strlock() to wait for the stream head sq_count to drop to zero 7815 * since we are going to change q_ptr in the stream head. Note that 7816 * insertq() doesn't wait for any syncq counts to drop to zero. 7817 */ 7818 sqlist.sqlist_head = NULL; 7819 sqlist.sqlist_index = 0; 7820 sqlist.sqlist_size = sizeof (sqlist_t); 7821 sqlist_insert(&sqlist, _RD(stpdown->sd_wrq)->q_syncq); 7822 strlock(stpdown, &sqlist); 7823 strunlock(stpdown, &sqlist); 7824 7825 releaseq(passq); 7826 return (passq); 7827 } 7828 7829 /* 7830 * Let messages flow up into the mux by removing 7831 * the passq. 7832 */ 7833 static void 7834 link_rempassthru(queue_t *passq) 7835 { 7836 claimq(passq); 7837 removeq(passq); 7838 releaseq(passq); 7839 freeq(passq); 7840 } 7841 7842 /* 7843 * Wait for the condition variable pointed to by `cvp' to be signaled, 7844 * or for `tim' milliseconds to elapse, whichever comes first. If `tim' 7845 * is negative, then there is no time limit. If `nosigs' is non-zero, 7846 * then the wait will be non-interruptible. 7847 * 7848 * Returns >0 if signaled, 0 if interrupted, or -1 upon timeout. 7849 */ 7850 clock_t 7851 str_cv_wait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim, int nosigs) 7852 { 7853 clock_t ret; 7854 7855 if (tim < 0) { 7856 if (nosigs) { 7857 cv_wait(cvp, mp); 7858 ret = 1; 7859 } else { 7860 ret = cv_wait_sig(cvp, mp); 7861 } 7862 } else if (tim > 0) { 7863 /* 7864 * convert milliseconds to clock ticks 7865 */ 7866 if (nosigs) { 7867 ret = cv_reltimedwait(cvp, mp, 7868 MSEC_TO_TICK_ROUNDUP(tim), TR_CLOCK_TICK); 7869 } else { 7870 ret = cv_reltimedwait_sig(cvp, mp, 7871 MSEC_TO_TICK_ROUNDUP(tim), TR_CLOCK_TICK); 7872 } 7873 } else { 7874 ret = -1; 7875 } 7876 return (ret); 7877 } 7878 7879 /* 7880 * Wait until the stream head can determine if it is at the mark but 7881 * don't wait forever to prevent a race condition between the "mark" state 7882 * in the stream head and any mark state in the caller/user of this routine. 7883 * 7884 * This is used by sockets and for a socket it would be incorrect 7885 * to return a failure for SIOCATMARK when there is no data in the receive 7886 * queue and the marked urgent data is traveling up the stream. 7887 * 7888 * This routine waits until the mark is known by waiting for one of these 7889 * three events: 7890 * The stream head read queue becoming non-empty (including an EOF). 7891 * The STRATMARK flag being set (due to a MSGMARKNEXT message). 7892 * The STRNOTATMARK flag being set (which indicates that the transport 7893 * has sent a MSGNOTMARKNEXT message to indicate that it is not at 7894 * the mark). 7895 * 7896 * The routine returns 1 if the stream is at the mark; 0 if it can 7897 * be determined that the stream is not at the mark. 7898 * If the wait times out and it can't determine 7899 * whether or not the stream might be at the mark the routine will return -1. 7900 * 7901 * Note: This routine should only be used when a mark is pending i.e., 7902 * in the socket case the SIGURG has been posted. 7903 * Note2: This can not wakeup just because synchronous streams indicate 7904 * that data is available since it is not possible to use the synchronous 7905 * streams interfaces to determine the b_flag value for the data queued below 7906 * the stream head. 7907 */ 7908 int 7909 strwaitmark(vnode_t *vp) 7910 { 7911 struct stdata *stp = vp->v_stream; 7912 queue_t *rq = _RD(stp->sd_wrq); 7913 int mark; 7914 7915 mutex_enter(&stp->sd_lock); 7916 while (rq->q_first == NULL && 7917 !(stp->sd_flag & (STRATMARK|STRNOTATMARK|STREOF))) { 7918 stp->sd_flag |= RSLEEP; 7919 7920 /* Wait for 100 milliseconds for any state change. */ 7921 if (str_cv_wait(&rq->q_wait, &stp->sd_lock, 100, 1) == -1) { 7922 mutex_exit(&stp->sd_lock); 7923 return (-1); 7924 } 7925 } 7926 if (stp->sd_flag & STRATMARK) 7927 mark = 1; 7928 else if (rq->q_first != NULL && (rq->q_first->b_flag & MSGMARK)) 7929 mark = 1; 7930 else 7931 mark = 0; 7932 7933 mutex_exit(&stp->sd_lock); 7934 return (mark); 7935 } 7936 7937 /* 7938 * Set a read side error. If persist is set change the socket error 7939 * to persistent. If errfunc is set install the function as the exported 7940 * error handler. 7941 */ 7942 void 7943 strsetrerror(vnode_t *vp, int error, int persist, errfunc_t errfunc) 7944 { 7945 struct stdata *stp = vp->v_stream; 7946 7947 mutex_enter(&stp->sd_lock); 7948 stp->sd_rerror = error; 7949 if (error == 0 && errfunc == NULL) 7950 stp->sd_flag &= ~STRDERR; 7951 else 7952 stp->sd_flag |= STRDERR; 7953 if (persist) { 7954 stp->sd_flag &= ~STRDERRNONPERSIST; 7955 } else { 7956 stp->sd_flag |= STRDERRNONPERSIST; 7957 } 7958 stp->sd_rderrfunc = errfunc; 7959 if (error != 0 || errfunc != NULL) { 7960 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */ 7961 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */ 7962 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 7963 7964 mutex_exit(&stp->sd_lock); 7965 pollwakeup(&stp->sd_pollist, POLLERR); 7966 mutex_enter(&stp->sd_lock); 7967 7968 if (stp->sd_sigflags & S_ERROR) 7969 strsendsig(stp->sd_siglist, S_ERROR, 0, error); 7970 } 7971 mutex_exit(&stp->sd_lock); 7972 } 7973 7974 /* 7975 * Set a write side error. If persist is set change the socket error 7976 * to persistent. 7977 */ 7978 void 7979 strsetwerror(vnode_t *vp, int error, int persist, errfunc_t errfunc) 7980 { 7981 struct stdata *stp = vp->v_stream; 7982 7983 mutex_enter(&stp->sd_lock); 7984 stp->sd_werror = error; 7985 if (error == 0 && errfunc == NULL) 7986 stp->sd_flag &= ~STWRERR; 7987 else 7988 stp->sd_flag |= STWRERR; 7989 if (persist) { 7990 stp->sd_flag &= ~STWRERRNONPERSIST; 7991 } else { 7992 stp->sd_flag |= STWRERRNONPERSIST; 7993 } 7994 stp->sd_wrerrfunc = errfunc; 7995 if (error != 0 || errfunc != NULL) { 7996 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */ 7997 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */ 7998 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 7999 8000 mutex_exit(&stp->sd_lock); 8001 pollwakeup(&stp->sd_pollist, POLLERR); 8002 mutex_enter(&stp->sd_lock); 8003 8004 if (stp->sd_sigflags & S_ERROR) 8005 strsendsig(stp->sd_siglist, S_ERROR, 0, error); 8006 } 8007 mutex_exit(&stp->sd_lock); 8008 } 8009 8010 /* 8011 * Make the stream return 0 (EOF) when all data has been read. 8012 * No effect on write side. 8013 */ 8014 void 8015 strseteof(vnode_t *vp, int eof) 8016 { 8017 struct stdata *stp = vp->v_stream; 8018 8019 mutex_enter(&stp->sd_lock); 8020 if (!eof) { 8021 stp->sd_flag &= ~STREOF; 8022 mutex_exit(&stp->sd_lock); 8023 return; 8024 } 8025 stp->sd_flag |= STREOF; 8026 if (stp->sd_flag & RSLEEP) { 8027 stp->sd_flag &= ~RSLEEP; 8028 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); 8029 } 8030 8031 mutex_exit(&stp->sd_lock); 8032 pollwakeup(&stp->sd_pollist, POLLIN|POLLRDNORM); 8033 mutex_enter(&stp->sd_lock); 8034 8035 if (stp->sd_sigflags & (S_INPUT|S_RDNORM)) 8036 strsendsig(stp->sd_siglist, S_INPUT|S_RDNORM, 0, 0); 8037 mutex_exit(&stp->sd_lock); 8038 } 8039 8040 void 8041 strflushrq(vnode_t *vp, int flag) 8042 { 8043 struct stdata *stp = vp->v_stream; 8044 8045 mutex_enter(&stp->sd_lock); 8046 flushq(_RD(stp->sd_wrq), flag); 8047 mutex_exit(&stp->sd_lock); 8048 } 8049 8050 void 8051 strsetrputhooks(vnode_t *vp, uint_t flags, 8052 msgfunc_t protofunc, msgfunc_t miscfunc) 8053 { 8054 struct stdata *stp = vp->v_stream; 8055 8056 mutex_enter(&stp->sd_lock); 8057 8058 if (protofunc == NULL) 8059 stp->sd_rprotofunc = strrput_proto; 8060 else 8061 stp->sd_rprotofunc = protofunc; 8062 8063 if (miscfunc == NULL) 8064 stp->sd_rmiscfunc = strrput_misc; 8065 else 8066 stp->sd_rmiscfunc = miscfunc; 8067 8068 if (flags & SH_CONSOL_DATA) 8069 stp->sd_rput_opt |= SR_CONSOL_DATA; 8070 else 8071 stp->sd_rput_opt &= ~SR_CONSOL_DATA; 8072 8073 if (flags & SH_SIGALLDATA) 8074 stp->sd_rput_opt |= SR_SIGALLDATA; 8075 else 8076 stp->sd_rput_opt &= ~SR_SIGALLDATA; 8077 8078 if (flags & SH_IGN_ZEROLEN) 8079 stp->sd_rput_opt |= SR_IGN_ZEROLEN; 8080 else 8081 stp->sd_rput_opt &= ~SR_IGN_ZEROLEN; 8082 8083 mutex_exit(&stp->sd_lock); 8084 } 8085 8086 void 8087 strsetwputhooks(vnode_t *vp, uint_t flags, clock_t closetime) 8088 { 8089 struct stdata *stp = vp->v_stream; 8090 8091 mutex_enter(&stp->sd_lock); 8092 stp->sd_closetime = closetime; 8093 8094 if (flags & SH_SIGPIPE) 8095 stp->sd_wput_opt |= SW_SIGPIPE; 8096 else 8097 stp->sd_wput_opt &= ~SW_SIGPIPE; 8098 if (flags & SH_RECHECK_ERR) 8099 stp->sd_wput_opt |= SW_RECHECK_ERR; 8100 else 8101 stp->sd_wput_opt &= ~SW_RECHECK_ERR; 8102 8103 mutex_exit(&stp->sd_lock); 8104 } 8105 8106 void 8107 strsetrwputdatahooks(vnode_t *vp, msgfunc_t rdatafunc, msgfunc_t wdatafunc) 8108 { 8109 struct stdata *stp = vp->v_stream; 8110 8111 mutex_enter(&stp->sd_lock); 8112 8113 stp->sd_rputdatafunc = rdatafunc; 8114 stp->sd_wputdatafunc = wdatafunc; 8115 8116 mutex_exit(&stp->sd_lock); 8117 } 8118 8119 /* Used within framework when the queue is already locked */ 8120 void 8121 qenable_locked(queue_t *q) 8122 { 8123 stdata_t *stp = STREAM(q); 8124 8125 ASSERT(MUTEX_HELD(QLOCK(q))); 8126 8127 if (!q->q_qinfo->qi_srvp) 8128 return; 8129 8130 /* 8131 * Do not place on run queue if already enabled or closing. 8132 */ 8133 if (q->q_flag & (QWCLOSE|QENAB)) 8134 return; 8135 8136 /* 8137 * mark queue enabled and place on run list if it is not already being 8138 * serviced. If it is serviced, the runservice() function will detect 8139 * that QENAB is set and call service procedure before clearing 8140 * QINSERVICE flag. 8141 */ 8142 q->q_flag |= QENAB; 8143 if (q->q_flag & QINSERVICE) 8144 return; 8145 8146 /* Record the time of qenable */ 8147 q->q_qtstamp = ddi_get_lbolt(); 8148 8149 /* 8150 * Put the queue in the stp list and schedule it for background 8151 * processing if it is not already scheduled or if stream head does not 8152 * intent to process it in the foreground later by setting 8153 * STRS_WILLSERVICE flag. 8154 */ 8155 mutex_enter(&stp->sd_qlock); 8156 /* 8157 * If there are already something on the list, stp flags should show 8158 * intention to drain it. 8159 */ 8160 IMPLY(STREAM_NEEDSERVICE(stp), 8161 (stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))); 8162 8163 ENQUEUE(q, stp->sd_qhead, stp->sd_qtail, q_link); 8164 stp->sd_nqueues++; 8165 8166 /* 8167 * If no one will drain this stream we are the first producer and 8168 * need to schedule it for background thread. 8169 */ 8170 if (!(stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))) { 8171 /* 8172 * No one will service this stream later, so we have to 8173 * schedule it now. 8174 */ 8175 STRSTAT(stenables); 8176 stp->sd_svcflags |= STRS_SCHEDULED; 8177 stp->sd_servid = (void *)taskq_dispatch(streams_taskq, 8178 (task_func_t *)stream_service, stp, TQ_NOSLEEP|TQ_NOQUEUE); 8179 8180 if (stp->sd_servid == NULL) { 8181 /* 8182 * Task queue failed so fail over to the backup 8183 * servicing thread. 8184 */ 8185 STRSTAT(taskqfails); 8186 /* 8187 * It is safe to clear STRS_SCHEDULED flag because it 8188 * was set by this thread above. 8189 */ 8190 stp->sd_svcflags &= ~STRS_SCHEDULED; 8191 8192 /* 8193 * Failover scheduling is protected by service_queue 8194 * lock. 8195 */ 8196 mutex_enter(&service_queue); 8197 ASSERT((stp->sd_qhead == q) && (stp->sd_qtail == q)); 8198 ASSERT(q->q_link == NULL); 8199 /* 8200 * Append the queue to qhead/qtail list. 8201 */ 8202 if (qhead == NULL) 8203 qhead = q; 8204 else 8205 qtail->q_link = q; 8206 qtail = q; 8207 /* 8208 * Clear stp queue list. 8209 */ 8210 stp->sd_qhead = stp->sd_qtail = NULL; 8211 stp->sd_nqueues = 0; 8212 /* 8213 * Wakeup background queue processing thread. 8214 */ 8215 cv_signal(&services_to_run); 8216 mutex_exit(&service_queue); 8217 } 8218 } 8219 mutex_exit(&stp->sd_qlock); 8220 } 8221 8222 static void 8223 queue_service(queue_t *q) 8224 { 8225 /* 8226 * The queue in the list should have 8227 * QENAB flag set and should not have 8228 * QINSERVICE flag set. QINSERVICE is 8229 * set when the queue is dequeued and 8230 * qenable_locked doesn't enqueue a 8231 * queue with QINSERVICE set. 8232 */ 8233 8234 ASSERT(!(q->q_flag & QINSERVICE)); 8235 ASSERT((q->q_flag & QENAB)); 8236 mutex_enter(QLOCK(q)); 8237 q->q_flag &= ~QENAB; 8238 q->q_flag |= QINSERVICE; 8239 mutex_exit(QLOCK(q)); 8240 runservice(q); 8241 } 8242 8243 static void 8244 syncq_service(syncq_t *sq) 8245 { 8246 STRSTAT(syncqservice); 8247 mutex_enter(SQLOCK(sq)); 8248 ASSERT(!(sq->sq_svcflags & SQ_SERVICE)); 8249 ASSERT(sq->sq_servcount != 0); 8250 ASSERT(sq->sq_next == NULL); 8251 8252 /* if we came here from the background thread, clear the flag */ 8253 if (sq->sq_svcflags & SQ_BGTHREAD) 8254 sq->sq_svcflags &= ~SQ_BGTHREAD; 8255 8256 /* let drain_syncq know that it's being called in the background */ 8257 sq->sq_svcflags |= SQ_SERVICE; 8258 drain_syncq(sq); 8259 } 8260 8261 static void 8262 qwriter_outer_service(syncq_t *outer) 8263 { 8264 /* 8265 * Note that SQ_WRITER is used on the outer perimeter 8266 * to signal that a qwriter(OUTER) is either investigating 8267 * running or that it is actually running a function. 8268 */ 8269 outer_enter(outer, SQ_BLOCKED|SQ_WRITER); 8270 8271 /* 8272 * All inner syncq are empty and have SQ_WRITER set 8273 * to block entering the outer perimeter. 8274 * 8275 * We do not need to explicitly call write_now since 8276 * outer_exit does it for us. 8277 */ 8278 outer_exit(outer); 8279 } 8280 8281 static void 8282 mblk_free(mblk_t *mp) 8283 { 8284 dblk_t *dbp = mp->b_datap; 8285 frtn_t *frp = dbp->db_frtnp; 8286 8287 mp->b_next = NULL; 8288 if (dbp->db_fthdr != NULL) 8289 str_ftfree(dbp); 8290 8291 ASSERT(dbp->db_fthdr == NULL); 8292 frp->free_func(frp->free_arg); 8293 ASSERT(dbp->db_mblk == mp); 8294 8295 if (dbp->db_credp != NULL) { 8296 crfree(dbp->db_credp); 8297 dbp->db_credp = NULL; 8298 } 8299 dbp->db_cpid = -1; 8300 dbp->db_struioflag = 0; 8301 dbp->db_struioun.cksum.flags = 0; 8302 8303 kmem_cache_free(dbp->db_cache, dbp); 8304 } 8305 8306 /* 8307 * Background processing of the stream queue list. 8308 */ 8309 static void 8310 stream_service(stdata_t *stp) 8311 { 8312 queue_t *q; 8313 8314 mutex_enter(&stp->sd_qlock); 8315 8316 STR_SERVICE(stp, q); 8317 8318 stp->sd_svcflags &= ~STRS_SCHEDULED; 8319 stp->sd_servid = NULL; 8320 cv_signal(&stp->sd_qcv); 8321 mutex_exit(&stp->sd_qlock); 8322 } 8323 8324 /* 8325 * Foreground processing of the stream queue list. 8326 */ 8327 void 8328 stream_runservice(stdata_t *stp) 8329 { 8330 queue_t *q; 8331 8332 mutex_enter(&stp->sd_qlock); 8333 STRSTAT(rservice); 8334 /* 8335 * We are going to drain this stream queue list, so qenable_locked will 8336 * not schedule it until we finish. 8337 */ 8338 stp->sd_svcflags |= STRS_WILLSERVICE; 8339 8340 STR_SERVICE(stp, q); 8341 8342 stp->sd_svcflags &= ~STRS_WILLSERVICE; 8343 mutex_exit(&stp->sd_qlock); 8344 /* 8345 * Help backup background thread to drain the qhead/qtail list. 8346 */ 8347 while (qhead != NULL) { 8348 STRSTAT(qhelps); 8349 mutex_enter(&service_queue); 8350 DQ(q, qhead, qtail, q_link); 8351 mutex_exit(&service_queue); 8352 if (q != NULL) 8353 queue_service(q); 8354 } 8355 } 8356 8357 void 8358 stream_willservice(stdata_t *stp) 8359 { 8360 mutex_enter(&stp->sd_qlock); 8361 stp->sd_svcflags |= STRS_WILLSERVICE; 8362 mutex_exit(&stp->sd_qlock); 8363 } 8364 8365 /* 8366 * Replace the cred currently in the mblk with a different one. 8367 * Also update db_cpid. 8368 */ 8369 void 8370 mblk_setcred(mblk_t *mp, cred_t *cr, pid_t cpid) 8371 { 8372 dblk_t *dbp = mp->b_datap; 8373 cred_t *ocr = dbp->db_credp; 8374 8375 ASSERT(cr != NULL); 8376 8377 if (cr != ocr) { 8378 crhold(dbp->db_credp = cr); 8379 if (ocr != NULL) 8380 crfree(ocr); 8381 } 8382 /* Don't overwrite with NOPID */ 8383 if (cpid != NOPID) 8384 dbp->db_cpid = cpid; 8385 } 8386 8387 /* 8388 * If the src message has a cred, then replace the cred currently in the mblk 8389 * with it. 8390 * Also update db_cpid. 8391 */ 8392 void 8393 mblk_copycred(mblk_t *mp, const mblk_t *src) 8394 { 8395 dblk_t *dbp = mp->b_datap; 8396 cred_t *cr, *ocr; 8397 pid_t cpid; 8398 8399 cr = msg_getcred(src, &cpid); 8400 if (cr == NULL) 8401 return; 8402 8403 ocr = dbp->db_credp; 8404 if (cr != ocr) { 8405 crhold(dbp->db_credp = cr); 8406 if (ocr != NULL) 8407 crfree(ocr); 8408 } 8409 /* Don't overwrite with NOPID */ 8410 if (cpid != NOPID) 8411 dbp->db_cpid = cpid; 8412 } 8413 8414 int 8415 hcksum_assoc(mblk_t *mp, multidata_t *mmd, pdesc_t *pd, 8416 uint32_t start, uint32_t stuff, uint32_t end, uint32_t value, 8417 uint32_t flags, int km_flags) 8418 { 8419 int rc = 0; 8420 8421 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8422 if (mp->b_datap->db_type == M_DATA) { 8423 /* Associate values for M_DATA type */ 8424 DB_CKSUMSTART(mp) = (intptr_t)start; 8425 DB_CKSUMSTUFF(mp) = (intptr_t)stuff; 8426 DB_CKSUMEND(mp) = (intptr_t)end; 8427 DB_CKSUMFLAGS(mp) = flags; 8428 DB_CKSUM16(mp) = (uint16_t)value; 8429 8430 } else { 8431 pattrinfo_t pa_info; 8432 8433 ASSERT(mmd != NULL); 8434 8435 pa_info.type = PATTR_HCKSUM; 8436 pa_info.len = sizeof (pattr_hcksum_t); 8437 8438 if (mmd_addpattr(mmd, pd, &pa_info, B_TRUE, km_flags) != NULL) { 8439 pattr_hcksum_t *hck = (pattr_hcksum_t *)pa_info.buf; 8440 8441 hck->hcksum_start_offset = start; 8442 hck->hcksum_stuff_offset = stuff; 8443 hck->hcksum_end_offset = end; 8444 hck->hcksum_cksum_val.inet_cksum = (uint16_t)value; 8445 hck->hcksum_flags = flags; 8446 } else { 8447 rc = -1; 8448 } 8449 } 8450 return (rc); 8451 } 8452 8453 void 8454 hcksum_retrieve(mblk_t *mp, multidata_t *mmd, pdesc_t *pd, 8455 uint32_t *start, uint32_t *stuff, uint32_t *end, 8456 uint32_t *value, uint32_t *flags) 8457 { 8458 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8459 if (mp->b_datap->db_type == M_DATA) { 8460 if (flags != NULL) { 8461 *flags = DB_CKSUMFLAGS(mp) & HCK_FLAGS; 8462 if ((*flags & (HCK_PARTIALCKSUM | 8463 HCK_FULLCKSUM)) != 0) { 8464 if (value != NULL) 8465 *value = (uint32_t)DB_CKSUM16(mp); 8466 if ((*flags & HCK_PARTIALCKSUM) != 0) { 8467 if (start != NULL) 8468 *start = 8469 (uint32_t)DB_CKSUMSTART(mp); 8470 if (stuff != NULL) 8471 *stuff = 8472 (uint32_t)DB_CKSUMSTUFF(mp); 8473 if (end != NULL) 8474 *end = 8475 (uint32_t)DB_CKSUMEND(mp); 8476 } 8477 } 8478 } 8479 } else { 8480 pattrinfo_t hck_attr = {PATTR_HCKSUM}; 8481 8482 ASSERT(mmd != NULL); 8483 8484 /* get hardware checksum attribute */ 8485 if (mmd_getpattr(mmd, pd, &hck_attr) != NULL) { 8486 pattr_hcksum_t *hck = (pattr_hcksum_t *)hck_attr.buf; 8487 8488 ASSERT(hck_attr.len >= sizeof (pattr_hcksum_t)); 8489 if (flags != NULL) 8490 *flags = hck->hcksum_flags; 8491 if (start != NULL) 8492 *start = hck->hcksum_start_offset; 8493 if (stuff != NULL) 8494 *stuff = hck->hcksum_stuff_offset; 8495 if (end != NULL) 8496 *end = hck->hcksum_end_offset; 8497 if (value != NULL) 8498 *value = (uint32_t) 8499 hck->hcksum_cksum_val.inet_cksum; 8500 } 8501 } 8502 } 8503 8504 void 8505 lso_info_set(mblk_t *mp, uint32_t mss, uint32_t flags) 8506 { 8507 ASSERT(DB_TYPE(mp) == M_DATA); 8508 ASSERT((flags & ~HW_LSO_FLAGS) == 0); 8509 8510 /* Set the flags */ 8511 DB_LSOFLAGS(mp) |= flags; 8512 DB_LSOMSS(mp) = mss; 8513 } 8514 8515 void 8516 lso_info_cleanup(mblk_t *mp) 8517 { 8518 ASSERT(DB_TYPE(mp) == M_DATA); 8519 8520 /* Clear the flags */ 8521 DB_LSOFLAGS(mp) &= ~HW_LSO_FLAGS; 8522 DB_LSOMSS(mp) = 0; 8523 } 8524 8525 void 8526 lso_info_get(mblk_t *mp, uint32_t *mss, uint32_t *flags) 8527 { 8528 ASSERT(DB_TYPE(mp) == M_DATA); 8529 8530 if (flags != NULL) { 8531 *flags = DB_CKSUMFLAGS(mp) & HW_LSO_FLAGS; 8532 if ((*flags != 0) && (mss != NULL)) 8533 *mss = (uint32_t)DB_LSOMSS(mp); 8534 } 8535 } 8536 8537 /* 8538 * Checksum buffer *bp for len bytes with psum partial checksum, 8539 * or 0 if none, and return the 16 bit partial checksum. 8540 */ 8541 unsigned 8542 bcksum(uchar_t *bp, int len, unsigned int psum) 8543 { 8544 int odd = len & 1; 8545 extern unsigned int ip_ocsum(); 8546 8547 if (((intptr_t)bp & 1) == 0 && !odd) { 8548 /* 8549 * Bp is 16 bit aligned and len is multiple of 16 bit word. 8550 */ 8551 return (ip_ocsum((ushort_t *)bp, len >> 1, psum)); 8552 } 8553 if (((intptr_t)bp & 1) != 0) { 8554 /* 8555 * Bp isn't 16 bit aligned. 8556 */ 8557 unsigned int tsum; 8558 8559 #ifdef _LITTLE_ENDIAN 8560 psum += *bp; 8561 #else 8562 psum += *bp << 8; 8563 #endif 8564 len--; 8565 bp++; 8566 tsum = ip_ocsum((ushort_t *)bp, len >> 1, 0); 8567 psum += (tsum << 8) & 0xffff | (tsum >> 8); 8568 if (len & 1) { 8569 bp += len - 1; 8570 #ifdef _LITTLE_ENDIAN 8571 psum += *bp << 8; 8572 #else 8573 psum += *bp; 8574 #endif 8575 } 8576 } else { 8577 /* 8578 * Bp is 16 bit aligned. 8579 */ 8580 psum = ip_ocsum((ushort_t *)bp, len >> 1, psum); 8581 if (odd) { 8582 bp += len - 1; 8583 #ifdef _LITTLE_ENDIAN 8584 psum += *bp; 8585 #else 8586 psum += *bp << 8; 8587 #endif 8588 } 8589 } 8590 /* 8591 * Normalize psum to 16 bits before returning the new partial 8592 * checksum. The max psum value before normalization is 0x3FDFE. 8593 */ 8594 return ((psum >> 16) + (psum & 0xFFFF)); 8595 } 8596 8597 boolean_t 8598 is_vmloaned_mblk(mblk_t *mp, multidata_t *mmd, pdesc_t *pd) 8599 { 8600 boolean_t rc; 8601 8602 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8603 if (DB_TYPE(mp) == M_DATA) { 8604 rc = (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0); 8605 } else { 8606 pattrinfo_t zcopy_attr = {PATTR_ZCOPY}; 8607 8608 ASSERT(mmd != NULL); 8609 rc = (mmd_getpattr(mmd, pd, &zcopy_attr) != NULL); 8610 } 8611 return (rc); 8612 } 8613 8614 void 8615 freemsgchain(mblk_t *mp) 8616 { 8617 mblk_t *next; 8618 8619 while (mp != NULL) { 8620 next = mp->b_next; 8621 mp->b_next = NULL; 8622 8623 freemsg(mp); 8624 mp = next; 8625 } 8626 } 8627 8628 mblk_t * 8629 copymsgchain(mblk_t *mp) 8630 { 8631 mblk_t *nmp = NULL; 8632 mblk_t **nmpp = &nmp; 8633 8634 for (; mp != NULL; mp = mp->b_next) { 8635 if ((*nmpp = copymsg(mp)) == NULL) { 8636 freemsgchain(nmp); 8637 return (NULL); 8638 } 8639 8640 nmpp = &((*nmpp)->b_next); 8641 } 8642 8643 return (nmp); 8644 } 8645 8646 /* NOTE: Do not add code after this point. */ 8647 #undef QLOCK 8648 8649 /* 8650 * Replacement for QLOCK macro for those that can't use it. 8651 */ 8652 kmutex_t * 8653 QLOCK(queue_t *q) 8654 { 8655 return (&(q)->q_lock); 8656 } 8657 8658 /* 8659 * Dummy runqueues/queuerun functions functions for backwards compatibility. 8660 */ 8661 #undef runqueues 8662 void 8663 runqueues(void) 8664 { 8665 } 8666 8667 #undef queuerun 8668 void 8669 queuerun(void) 8670 { 8671 } 8672 8673 /* 8674 * Initialize the STR stack instance, which tracks autopush and persistent 8675 * links. 8676 */ 8677 /* ARGSUSED */ 8678 static void * 8679 str_stack_init(netstackid_t stackid, netstack_t *ns) 8680 { 8681 str_stack_t *ss; 8682 int i; 8683 8684 ss = (str_stack_t *)kmem_zalloc(sizeof (*ss), KM_SLEEP); 8685 ss->ss_netstack = ns; 8686 8687 /* 8688 * set up autopush 8689 */ 8690 sad_initspace(ss); 8691 8692 /* 8693 * set up mux_node structures. 8694 */ 8695 ss->ss_devcnt = devcnt; /* In case it should change before free */ 8696 ss->ss_mux_nodes = kmem_zalloc((sizeof (struct mux_node) * 8697 ss->ss_devcnt), KM_SLEEP); 8698 for (i = 0; i < ss->ss_devcnt; i++) 8699 ss->ss_mux_nodes[i].mn_imaj = i; 8700 return (ss); 8701 } 8702 8703 /* 8704 * Note: run at zone shutdown and not destroy so that the PLINKs are 8705 * gone by the time other cleanup happens from the destroy callbacks. 8706 */ 8707 static void 8708 str_stack_shutdown(netstackid_t stackid, void *arg) 8709 { 8710 str_stack_t *ss = (str_stack_t *)arg; 8711 int i; 8712 cred_t *cr; 8713 8714 cr = zone_get_kcred(netstackid_to_zoneid(stackid)); 8715 ASSERT(cr != NULL); 8716 8717 /* Undo all the I_PLINKs for this zone */ 8718 for (i = 0; i < ss->ss_devcnt; i++) { 8719 struct mux_edge *ep; 8720 ldi_handle_t lh; 8721 ldi_ident_t li; 8722 int ret; 8723 int rval; 8724 dev_t rdev; 8725 8726 ep = ss->ss_mux_nodes[i].mn_outp; 8727 if (ep == NULL) 8728 continue; 8729 ret = ldi_ident_from_major((major_t)i, &li); 8730 if (ret != 0) { 8731 continue; 8732 } 8733 rdev = ep->me_dev; 8734 ret = ldi_open_by_dev(&rdev, OTYP_CHR, FREAD|FWRITE, 8735 cr, &lh, li); 8736 if (ret != 0) { 8737 ldi_ident_release(li); 8738 continue; 8739 } 8740 8741 ret = ldi_ioctl(lh, I_PUNLINK, (intptr_t)MUXID_ALL, FKIOCTL, 8742 cr, &rval); 8743 if (ret) { 8744 (void) ldi_close(lh, FREAD|FWRITE, cr); 8745 ldi_ident_release(li); 8746 continue; 8747 } 8748 (void) ldi_close(lh, FREAD|FWRITE, cr); 8749 8750 /* Close layered handles */ 8751 ldi_ident_release(li); 8752 } 8753 crfree(cr); 8754 8755 sad_freespace(ss); 8756 8757 kmem_free(ss->ss_mux_nodes, sizeof (struct mux_node) * ss->ss_devcnt); 8758 ss->ss_mux_nodes = NULL; 8759 } 8760 8761 /* 8762 * Free the structure; str_stack_shutdown did the other cleanup work. 8763 */ 8764 /* ARGSUSED */ 8765 static void 8766 str_stack_fini(netstackid_t stackid, void *arg) 8767 { 8768 str_stack_t *ss = (str_stack_t *)arg; 8769 8770 kmem_free(ss, sizeof (*ss)); 8771 } 8772