1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 22 /* All Rights Reserved */ 23 24 25 /* 26 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 30 #include <sys/types.h> 31 #include <sys/sysmacros.h> 32 #include <sys/param.h> 33 #include <sys/errno.h> 34 #include <sys/signal.h> 35 #include <sys/proc.h> 36 #include <sys/conf.h> 37 #include <sys/cred.h> 38 #include <sys/user.h> 39 #include <sys/vnode.h> 40 #include <sys/file.h> 41 #include <sys/session.h> 42 #include <sys/stream.h> 43 #include <sys/strsubr.h> 44 #include <sys/stropts.h> 45 #include <sys/poll.h> 46 #include <sys/systm.h> 47 #include <sys/cpuvar.h> 48 #include <sys/uio.h> 49 #include <sys/cmn_err.h> 50 #include <sys/priocntl.h> 51 #include <sys/procset.h> 52 #include <sys/vmem.h> 53 #include <sys/bitmap.h> 54 #include <sys/kmem.h> 55 #include <sys/siginfo.h> 56 #include <sys/vtrace.h> 57 #include <sys/callb.h> 58 #include <sys/debug.h> 59 #include <sys/modctl.h> 60 #include <sys/vmsystm.h> 61 #include <vm/page.h> 62 #include <sys/atomic.h> 63 #include <sys/suntpi.h> 64 #include <sys/strlog.h> 65 #include <sys/promif.h> 66 #include <sys/project.h> 67 #include <sys/vm.h> 68 #include <sys/taskq.h> 69 #include <sys/sunddi.h> 70 #include <sys/sunldi_impl.h> 71 #include <sys/strsun.h> 72 #include <sys/isa_defs.h> 73 #include <sys/multidata.h> 74 #include <sys/pattr.h> 75 #include <sys/strft.h> 76 #include <sys/fs/snode.h> 77 #include <sys/zone.h> 78 #include <sys/open.h> 79 #include <sys/sunldi.h> 80 #include <sys/sad.h> 81 #include <sys/netstack.h> 82 83 #define O_SAMESTR(q) (((q)->q_next) && \ 84 (((q)->q_flag & QREADR) == ((q)->q_next->q_flag & QREADR))) 85 86 /* 87 * WARNING: 88 * The variables and routines in this file are private, belonging 89 * to the STREAMS subsystem. These should not be used by modules 90 * or drivers. Compatibility will not be guaranteed. 91 */ 92 93 /* 94 * Id value used to distinguish between different multiplexor links. 95 */ 96 static int32_t lnk_id = 0; 97 98 #define STREAMS_LOPRI MINCLSYSPRI 99 static pri_t streams_lopri = STREAMS_LOPRI; 100 101 #define STRSTAT(x) (str_statistics.x.value.ui64++) 102 typedef struct str_stat { 103 kstat_named_t sqenables; 104 kstat_named_t stenables; 105 kstat_named_t syncqservice; 106 kstat_named_t freebs; 107 kstat_named_t qwr_outer; 108 kstat_named_t rservice; 109 kstat_named_t strwaits; 110 kstat_named_t taskqfails; 111 kstat_named_t bufcalls; 112 kstat_named_t qhelps; 113 kstat_named_t qremoved; 114 kstat_named_t sqremoved; 115 kstat_named_t bcwaits; 116 kstat_named_t sqtoomany; 117 } str_stat_t; 118 119 static str_stat_t str_statistics = { 120 { "sqenables", KSTAT_DATA_UINT64 }, 121 { "stenables", KSTAT_DATA_UINT64 }, 122 { "syncqservice", KSTAT_DATA_UINT64 }, 123 { "freebs", KSTAT_DATA_UINT64 }, 124 { "qwr_outer", KSTAT_DATA_UINT64 }, 125 { "rservice", KSTAT_DATA_UINT64 }, 126 { "strwaits", KSTAT_DATA_UINT64 }, 127 { "taskqfails", KSTAT_DATA_UINT64 }, 128 { "bufcalls", KSTAT_DATA_UINT64 }, 129 { "qhelps", KSTAT_DATA_UINT64 }, 130 { "qremoved", KSTAT_DATA_UINT64 }, 131 { "sqremoved", KSTAT_DATA_UINT64 }, 132 { "bcwaits", KSTAT_DATA_UINT64 }, 133 { "sqtoomany", KSTAT_DATA_UINT64 }, 134 }; 135 136 static kstat_t *str_kstat; 137 138 /* 139 * qrunflag was used previously to control background scheduling of queues. It 140 * is not used anymore, but kept here in case some module still wants to access 141 * it via qready() and setqsched macros. 142 */ 143 char qrunflag; /* Unused */ 144 145 /* 146 * Most of the streams scheduling is done via task queues. Task queues may fail 147 * for non-sleep dispatches, so there are two backup threads servicing failed 148 * requests for queues and syncqs. Both of these threads also service failed 149 * dispatches freebs requests. Queues are put in the list specified by `qhead' 150 * and `qtail' pointers, syncqs use `sqhead' and `sqtail' pointers and freebs 151 * requests are put into `freebs_list' which has no tail pointer. All three 152 * lists are protected by a single `service_queue' lock and use 153 * `services_to_run' condition variable for signaling background threads. Use of 154 * a single lock should not be a problem because it is only used under heavy 155 * loads when task queues start to fail and at that time it may be a good idea 156 * to throttle scheduling requests. 157 * 158 * NOTE: queues and syncqs should be scheduled by two separate threads because 159 * queue servicing may be blocked waiting for a syncq which may be also 160 * scheduled for background execution. This may create a deadlock when only one 161 * thread is used for both. 162 */ 163 164 static taskq_t *streams_taskq; /* Used for most STREAMS scheduling */ 165 166 static kmutex_t service_queue; /* protects all of servicing vars */ 167 static kcondvar_t services_to_run; /* wake up background service thread */ 168 static kcondvar_t syncqs_to_run; /* wake up background service thread */ 169 170 /* 171 * List of queues scheduled for background processing due to lack of resources 172 * in the task queues. Protected by service_queue lock; 173 */ 174 static struct queue *qhead; 175 static struct queue *qtail; 176 177 /* 178 * Same list for syncqs 179 */ 180 static syncq_t *sqhead; 181 static syncq_t *sqtail; 182 183 static mblk_t *freebs_list; /* list of buffers to free */ 184 185 /* 186 * Backup threads for servicing queues and syncqs 187 */ 188 kthread_t *streams_qbkgrnd_thread; 189 kthread_t *streams_sqbkgrnd_thread; 190 191 /* 192 * Bufcalls related variables. 193 */ 194 struct bclist strbcalls; /* list of waiting bufcalls */ 195 kmutex_t strbcall_lock; /* protects bufcall list (strbcalls) */ 196 kcondvar_t strbcall_cv; /* Signaling when a bufcall is added */ 197 kmutex_t bcall_monitor; /* sleep/wakeup style monitor */ 198 kcondvar_t bcall_cv; /* wait 'till executing bufcall completes */ 199 kthread_t *bc_bkgrnd_thread; /* Thread to service bufcall requests */ 200 201 kmutex_t strresources; /* protects global resources */ 202 kmutex_t muxifier; /* single-threads multiplexor creation */ 203 204 static void *str_stack_init(netstackid_t stackid, netstack_t *ns); 205 static void str_stack_shutdown(netstackid_t stackid, void *arg); 206 static void str_stack_fini(netstackid_t stackid, void *arg); 207 208 extern void time_to_wait(clock_t *, clock_t); 209 210 /* 211 * run_queues is no longer used, but is kept in case some 3rd party 212 * module/driver decides to use it. 213 */ 214 int run_queues = 0; 215 216 /* 217 * sq_max_size is the depth of the syncq (in number of messages) before 218 * qfill_syncq() starts QFULL'ing destination queues. As its primary 219 * consumer - IP is no longer D_MTPERMOD, but there may be other 220 * modules/drivers depend on this syncq flow control, we prefer to 221 * choose a large number as the default value. For potential 222 * performance gain, this value is tunable in /etc/system. 223 */ 224 int sq_max_size = 10000; 225 226 /* 227 * The number of ciputctrl structures per syncq and stream we create when 228 * needed. 229 */ 230 int n_ciputctrl; 231 int max_n_ciputctrl = 16; 232 /* 233 * If n_ciputctrl is < min_n_ciputctrl don't even create ciputctrl_cache. 234 */ 235 int min_n_ciputctrl = 2; 236 237 /* 238 * Per-driver/module syncqs 239 * ======================== 240 * 241 * For drivers/modules that use PERMOD or outer syncqs we keep a list of 242 * perdm structures, new entries being added (and new syncqs allocated) when 243 * setq() encounters a module/driver with a streamtab that it hasn't seen 244 * before. 245 * The reason for this mechanism is that some modules and drivers share a 246 * common streamtab and it is necessary for those modules and drivers to also 247 * share a common PERMOD syncq. 248 * 249 * perdm_list --> dm_str == streamtab_1 250 * dm_sq == syncq_1 251 * dm_ref 252 * dm_next --> dm_str == streamtab_2 253 * dm_sq == syncq_2 254 * dm_ref 255 * dm_next --> ... NULL 256 * 257 * The dm_ref field is incremented for each new driver/module that takes 258 * a reference to the perdm structure and hence shares the syncq. 259 * References are held in the fmodsw_impl_t structure for each STREAMS module 260 * or the dev_impl array (indexed by device major number) for each driver. 261 * 262 * perdm_list -> [dm_ref == 1] -> [dm_ref == 2] -> [dm_ref == 1] -> NULL 263 * ^ ^ ^ ^ 264 * | ______________/ | | 265 * | / | | 266 * dev_impl: ...|x|y|... module A module B 267 * 268 * When a module/driver is unloaded the reference count is decremented and, 269 * when it falls to zero, the perdm structure is removed from the list and 270 * the syncq is freed (see rele_dm()). 271 */ 272 perdm_t *perdm_list = NULL; 273 static krwlock_t perdm_rwlock; 274 cdevsw_impl_t *devimpl; 275 276 extern struct qinit strdata; 277 extern struct qinit stwdata; 278 279 static void runservice(queue_t *); 280 static void streams_bufcall_service(void); 281 static void streams_qbkgrnd_service(void); 282 static void streams_sqbkgrnd_service(void); 283 static syncq_t *new_syncq(void); 284 static void free_syncq(syncq_t *); 285 static void outer_insert(syncq_t *, syncq_t *); 286 static void outer_remove(syncq_t *, syncq_t *); 287 static void write_now(syncq_t *); 288 static void clr_qfull(queue_t *); 289 static void runbufcalls(void); 290 static void sqenable(syncq_t *); 291 static void sqfill_events(syncq_t *, queue_t *, mblk_t *, void (*)()); 292 static void wait_q_syncq(queue_t *); 293 static void backenable_insertedq(queue_t *); 294 295 static void queue_service(queue_t *); 296 static void stream_service(stdata_t *); 297 static void syncq_service(syncq_t *); 298 static void qwriter_outer_service(syncq_t *); 299 static void mblk_free(mblk_t *); 300 #ifdef DEBUG 301 static int qprocsareon(queue_t *); 302 #endif 303 304 static void set_nfsrv_ptr(queue_t *, queue_t *, queue_t *, queue_t *); 305 static void reset_nfsrv_ptr(queue_t *, queue_t *); 306 void set_qfull(queue_t *); 307 308 static void sq_run_events(syncq_t *); 309 static int propagate_syncq(queue_t *); 310 311 static void blocksq(syncq_t *, ushort_t, int); 312 static void unblocksq(syncq_t *, ushort_t, int); 313 static int dropsq(syncq_t *, uint16_t); 314 static void emptysq(syncq_t *); 315 static sqlist_t *sqlist_alloc(struct stdata *, int); 316 static void sqlist_free(sqlist_t *); 317 static sqlist_t *sqlist_build(queue_t *, struct stdata *, boolean_t); 318 static void sqlist_insert(sqlist_t *, syncq_t *); 319 static void sqlist_insertall(sqlist_t *, queue_t *); 320 321 static void strsetuio(stdata_t *); 322 323 struct kmem_cache *stream_head_cache; 324 struct kmem_cache *queue_cache; 325 struct kmem_cache *syncq_cache; 326 struct kmem_cache *qband_cache; 327 struct kmem_cache *linkinfo_cache; 328 struct kmem_cache *ciputctrl_cache = NULL; 329 330 static linkinfo_t *linkinfo_list; 331 332 /* Global esballoc throttling queue */ 333 static esb_queue_t system_esbq; 334 335 /* 336 * esballoc tunable parameters. 337 */ 338 int esbq_max_qlen = 0x16; /* throttled queue length */ 339 clock_t esbq_timeout = 0x8; /* timeout to process esb queue */ 340 341 /* 342 * Routines to handle esballoc queueing. 343 */ 344 static void esballoc_process_queue(esb_queue_t *); 345 static void esballoc_enqueue_mblk(mblk_t *); 346 static void esballoc_timer(void *); 347 static void esballoc_set_timer(esb_queue_t *, clock_t); 348 static void esballoc_mblk_free(mblk_t *); 349 350 /* 351 * Qinit structure and Module_info structures 352 * for passthru read and write queues 353 */ 354 355 static void pass_wput(queue_t *, mblk_t *); 356 static queue_t *link_addpassthru(stdata_t *); 357 static void link_rempassthru(queue_t *); 358 359 struct module_info passthru_info = { 360 0, 361 "passthru", 362 0, 363 INFPSZ, 364 STRHIGH, 365 STRLOW 366 }; 367 368 struct qinit passthru_rinit = { 369 (int (*)())putnext, 370 NULL, 371 NULL, 372 NULL, 373 NULL, 374 &passthru_info, 375 NULL 376 }; 377 378 struct qinit passthru_winit = { 379 (int (*)()) pass_wput, 380 NULL, 381 NULL, 382 NULL, 383 NULL, 384 &passthru_info, 385 NULL 386 }; 387 388 /* 389 * Special form of assertion: verify that X implies Y i.e. when X is true Y 390 * should also be true. 391 */ 392 #define IMPLY(X, Y) ASSERT(!(X) || (Y)) 393 394 /* 395 * Logical equivalence. Verify that both X and Y are either TRUE or FALSE. 396 */ 397 #define EQUIV(X, Y) { IMPLY(X, Y); IMPLY(Y, X); } 398 399 /* 400 * Verify correctness of list head/tail pointers. 401 */ 402 #define LISTCHECK(head, tail, link) { \ 403 EQUIV(head, tail); \ 404 IMPLY(tail != NULL, tail->link == NULL); \ 405 } 406 407 /* 408 * Enqueue a list element `el' in the end of a list denoted by `head' and `tail' 409 * using a `link' field. 410 */ 411 #define ENQUEUE(el, head, tail, link) { \ 412 ASSERT(el->link == NULL); \ 413 LISTCHECK(head, tail, link); \ 414 if (head == NULL) \ 415 head = el; \ 416 else \ 417 tail->link = el; \ 418 tail = el; \ 419 } 420 421 /* 422 * Dequeue the first element of the list denoted by `head' and `tail' pointers 423 * using a `link' field and put result into `el'. 424 */ 425 #define DQ(el, head, tail, link) { \ 426 LISTCHECK(head, tail, link); \ 427 el = head; \ 428 if (head != NULL) { \ 429 head = head->link; \ 430 if (head == NULL) \ 431 tail = NULL; \ 432 el->link = NULL; \ 433 } \ 434 } 435 436 /* 437 * Remove `el' from the list using `chase' and `curr' pointers and return result 438 * in `succeed'. 439 */ 440 #define RMQ(el, head, tail, link, chase, curr, succeed) { \ 441 LISTCHECK(head, tail, link); \ 442 chase = NULL; \ 443 succeed = 0; \ 444 for (curr = head; (curr != el) && (curr != NULL); curr = curr->link) \ 445 chase = curr; \ 446 if (curr != NULL) { \ 447 succeed = 1; \ 448 ASSERT(curr == el); \ 449 if (chase != NULL) \ 450 chase->link = curr->link; \ 451 else \ 452 head = curr->link; \ 453 curr->link = NULL; \ 454 if (curr == tail) \ 455 tail = chase; \ 456 } \ 457 LISTCHECK(head, tail, link); \ 458 } 459 460 /* Handling of delayed messages on the inner syncq. */ 461 462 /* 463 * DEBUG versions should use function versions (to simplify tracing) and 464 * non-DEBUG kernels should use macro versions. 465 */ 466 467 /* 468 * Put a queue on the syncq list of queues. 469 * Assumes SQLOCK held. 470 */ 471 #define SQPUT_Q(sq, qp) \ 472 { \ 473 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 474 if (!(qp->q_sqflags & Q_SQQUEUED)) { \ 475 /* The queue should not be linked anywhere */ \ 476 ASSERT((qp->q_sqprev == NULL) && (qp->q_sqnext == NULL)); \ 477 /* Head and tail may only be NULL simultaneously */ \ 478 EQUIV(sq->sq_head, sq->sq_tail); \ 479 /* Queue may be only enqueued on its syncq */ \ 480 ASSERT(sq == qp->q_syncq); \ 481 /* Check the correctness of SQ_MESSAGES flag */ \ 482 EQUIV(sq->sq_head, (sq->sq_flags & SQ_MESSAGES)); \ 483 /* Sanity check first/last elements of the list */ \ 484 IMPLY(sq->sq_head != NULL, sq->sq_head->q_sqprev == NULL);\ 485 IMPLY(sq->sq_tail != NULL, sq->sq_tail->q_sqnext == NULL);\ 486 /* \ 487 * Sanity check of priority field: empty queue should \ 488 * have zero priority \ 489 * and nqueues equal to zero. \ 490 */ \ 491 IMPLY(sq->sq_head == NULL, sq->sq_pri == 0); \ 492 /* Sanity check of sq_nqueues field */ \ 493 EQUIV(sq->sq_head, sq->sq_nqueues); \ 494 if (sq->sq_head == NULL) { \ 495 sq->sq_head = sq->sq_tail = qp; \ 496 sq->sq_flags |= SQ_MESSAGES; \ 497 } else if (qp->q_spri == 0) { \ 498 qp->q_sqprev = sq->sq_tail; \ 499 sq->sq_tail->q_sqnext = qp; \ 500 sq->sq_tail = qp; \ 501 } else { \ 502 /* \ 503 * Put this queue in priority order: higher \ 504 * priority gets closer to the head. \ 505 */ \ 506 queue_t **qpp = &sq->sq_tail; \ 507 queue_t *qnext = NULL; \ 508 \ 509 while (*qpp != NULL && qp->q_spri > (*qpp)->q_spri) { \ 510 qnext = *qpp; \ 511 qpp = &(*qpp)->q_sqprev; \ 512 } \ 513 qp->q_sqnext = qnext; \ 514 qp->q_sqprev = *qpp; \ 515 if (*qpp != NULL) { \ 516 (*qpp)->q_sqnext = qp; \ 517 } else { \ 518 sq->sq_head = qp; \ 519 sq->sq_pri = sq->sq_head->q_spri; \ 520 } \ 521 *qpp = qp; \ 522 } \ 523 qp->q_sqflags |= Q_SQQUEUED; \ 524 qp->q_sqtstamp = lbolt; \ 525 sq->sq_nqueues++; \ 526 } \ 527 } 528 529 /* 530 * Remove a queue from the syncq list 531 * Assumes SQLOCK held. 532 */ 533 #define SQRM_Q(sq, qp) \ 534 { \ 535 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 536 ASSERT(qp->q_sqflags & Q_SQQUEUED); \ 537 ASSERT(sq->sq_head != NULL && sq->sq_tail != NULL); \ 538 ASSERT((sq->sq_flags & SQ_MESSAGES) != 0); \ 539 /* Check that the queue is actually in the list */ \ 540 ASSERT(qp->q_sqnext != NULL || sq->sq_tail == qp); \ 541 ASSERT(qp->q_sqprev != NULL || sq->sq_head == qp); \ 542 ASSERT(sq->sq_nqueues != 0); \ 543 if (qp->q_sqprev == NULL) { \ 544 /* First queue on list, make head q_sqnext */ \ 545 sq->sq_head = qp->q_sqnext; \ 546 } else { \ 547 /* Make prev->next == next */ \ 548 qp->q_sqprev->q_sqnext = qp->q_sqnext; \ 549 } \ 550 if (qp->q_sqnext == NULL) { \ 551 /* Last queue on list, make tail sqprev */ \ 552 sq->sq_tail = qp->q_sqprev; \ 553 } else { \ 554 /* Make next->prev == prev */ \ 555 qp->q_sqnext->q_sqprev = qp->q_sqprev; \ 556 } \ 557 /* clear out references on this queue */ \ 558 qp->q_sqprev = qp->q_sqnext = NULL; \ 559 qp->q_sqflags &= ~Q_SQQUEUED; \ 560 /* If there is nothing queued, clear SQ_MESSAGES */ \ 561 if (sq->sq_head != NULL) { \ 562 sq->sq_pri = sq->sq_head->q_spri; \ 563 } else { \ 564 sq->sq_flags &= ~SQ_MESSAGES; \ 565 sq->sq_pri = 0; \ 566 } \ 567 sq->sq_nqueues--; \ 568 ASSERT(sq->sq_head != NULL || sq->sq_evhead != NULL || \ 569 (sq->sq_flags & SQ_QUEUED) == 0); \ 570 } 571 572 /* Hide the definition from the header file. */ 573 #ifdef SQPUT_MP 574 #undef SQPUT_MP 575 #endif 576 577 /* 578 * Put a message on the queue syncq. 579 * Assumes QLOCK held. 580 */ 581 #define SQPUT_MP(qp, mp) \ 582 { \ 583 ASSERT(MUTEX_HELD(QLOCK(qp))); \ 584 ASSERT(qp->q_sqhead == NULL || \ 585 (qp->q_sqtail != NULL && \ 586 qp->q_sqtail->b_next == NULL)); \ 587 qp->q_syncqmsgs++; \ 588 ASSERT(qp->q_syncqmsgs != 0); /* Wraparound */ \ 589 if (qp->q_sqhead == NULL) { \ 590 qp->q_sqhead = qp->q_sqtail = mp; \ 591 } else { \ 592 qp->q_sqtail->b_next = mp; \ 593 qp->q_sqtail = mp; \ 594 } \ 595 ASSERT(qp->q_syncqmsgs > 0); \ 596 set_qfull(qp); \ 597 } 598 599 #define SQ_PUTCOUNT_SETFAST_LOCKED(sq) { \ 600 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 601 if ((sq)->sq_ciputctrl != NULL) { \ 602 int i; \ 603 int nlocks = (sq)->sq_nciputctrl; \ 604 ciputctrl_t *cip = (sq)->sq_ciputctrl; \ 605 ASSERT((sq)->sq_type & SQ_CIPUT); \ 606 for (i = 0; i <= nlocks; i++) { \ 607 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \ 608 cip[i].ciputctrl_count |= SQ_FASTPUT; \ 609 } \ 610 } \ 611 } 612 613 614 #define SQ_PUTCOUNT_CLRFAST_LOCKED(sq) { \ 615 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 616 if ((sq)->sq_ciputctrl != NULL) { \ 617 int i; \ 618 int nlocks = (sq)->sq_nciputctrl; \ 619 ciputctrl_t *cip = (sq)->sq_ciputctrl; \ 620 ASSERT((sq)->sq_type & SQ_CIPUT); \ 621 for (i = 0; i <= nlocks; i++) { \ 622 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \ 623 cip[i].ciputctrl_count &= ~SQ_FASTPUT; \ 624 } \ 625 } \ 626 } 627 628 /* 629 * Run service procedures for all queues in the stream head. 630 */ 631 #define STR_SERVICE(stp, q) { \ 632 ASSERT(MUTEX_HELD(&stp->sd_qlock)); \ 633 while (stp->sd_qhead != NULL) { \ 634 DQ(q, stp->sd_qhead, stp->sd_qtail, q_link); \ 635 ASSERT(stp->sd_nqueues > 0); \ 636 stp->sd_nqueues--; \ 637 ASSERT(!(q->q_flag & QINSERVICE)); \ 638 mutex_exit(&stp->sd_qlock); \ 639 queue_service(q); \ 640 mutex_enter(&stp->sd_qlock); \ 641 } \ 642 ASSERT(stp->sd_nqueues == 0); \ 643 ASSERT((stp->sd_qhead == NULL) && (stp->sd_qtail == NULL)); \ 644 } 645 646 /* 647 * Constructor/destructor routines for the stream head cache 648 */ 649 /* ARGSUSED */ 650 static int 651 stream_head_constructor(void *buf, void *cdrarg, int kmflags) 652 { 653 stdata_t *stp = buf; 654 655 mutex_init(&stp->sd_lock, NULL, MUTEX_DEFAULT, NULL); 656 mutex_init(&stp->sd_reflock, NULL, MUTEX_DEFAULT, NULL); 657 mutex_init(&stp->sd_qlock, NULL, MUTEX_DEFAULT, NULL); 658 cv_init(&stp->sd_monitor, NULL, CV_DEFAULT, NULL); 659 cv_init(&stp->sd_iocmonitor, NULL, CV_DEFAULT, NULL); 660 cv_init(&stp->sd_refmonitor, NULL, CV_DEFAULT, NULL); 661 cv_init(&stp->sd_qcv, NULL, CV_DEFAULT, NULL); 662 cv_init(&stp->sd_zcopy_wait, NULL, CV_DEFAULT, NULL); 663 stp->sd_wrq = NULL; 664 665 return (0); 666 } 667 668 /* ARGSUSED */ 669 static void 670 stream_head_destructor(void *buf, void *cdrarg) 671 { 672 stdata_t *stp = buf; 673 674 mutex_destroy(&stp->sd_lock); 675 mutex_destroy(&stp->sd_reflock); 676 mutex_destroy(&stp->sd_qlock); 677 cv_destroy(&stp->sd_monitor); 678 cv_destroy(&stp->sd_iocmonitor); 679 cv_destroy(&stp->sd_refmonitor); 680 cv_destroy(&stp->sd_qcv); 681 cv_destroy(&stp->sd_zcopy_wait); 682 } 683 684 /* 685 * Constructor/destructor routines for the queue cache 686 */ 687 /* ARGSUSED */ 688 static int 689 queue_constructor(void *buf, void *cdrarg, int kmflags) 690 { 691 queinfo_t *qip = buf; 692 queue_t *qp = &qip->qu_rqueue; 693 queue_t *wqp = &qip->qu_wqueue; 694 syncq_t *sq = &qip->qu_syncq; 695 696 qp->q_first = NULL; 697 qp->q_link = NULL; 698 qp->q_count = 0; 699 qp->q_mblkcnt = 0; 700 qp->q_sqhead = NULL; 701 qp->q_sqtail = NULL; 702 qp->q_sqnext = NULL; 703 qp->q_sqprev = NULL; 704 qp->q_sqflags = 0; 705 qp->q_rwcnt = 0; 706 qp->q_spri = 0; 707 708 mutex_init(QLOCK(qp), NULL, MUTEX_DEFAULT, NULL); 709 cv_init(&qp->q_wait, NULL, CV_DEFAULT, NULL); 710 711 wqp->q_first = NULL; 712 wqp->q_link = NULL; 713 wqp->q_count = 0; 714 wqp->q_mblkcnt = 0; 715 wqp->q_sqhead = NULL; 716 wqp->q_sqtail = NULL; 717 wqp->q_sqnext = NULL; 718 wqp->q_sqprev = NULL; 719 wqp->q_sqflags = 0; 720 wqp->q_rwcnt = 0; 721 wqp->q_spri = 0; 722 723 mutex_init(QLOCK(wqp), NULL, MUTEX_DEFAULT, NULL); 724 cv_init(&wqp->q_wait, NULL, CV_DEFAULT, NULL); 725 726 sq->sq_head = NULL; 727 sq->sq_tail = NULL; 728 sq->sq_evhead = NULL; 729 sq->sq_evtail = NULL; 730 sq->sq_callbpend = NULL; 731 sq->sq_outer = NULL; 732 sq->sq_onext = NULL; 733 sq->sq_oprev = NULL; 734 sq->sq_next = NULL; 735 sq->sq_svcflags = 0; 736 sq->sq_servcount = 0; 737 sq->sq_needexcl = 0; 738 sq->sq_nqueues = 0; 739 sq->sq_pri = 0; 740 741 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL); 742 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL); 743 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL); 744 745 return (0); 746 } 747 748 /* ARGSUSED */ 749 static void 750 queue_destructor(void *buf, void *cdrarg) 751 { 752 queinfo_t *qip = buf; 753 queue_t *qp = &qip->qu_rqueue; 754 queue_t *wqp = &qip->qu_wqueue; 755 syncq_t *sq = &qip->qu_syncq; 756 757 ASSERT(qp->q_sqhead == NULL); 758 ASSERT(wqp->q_sqhead == NULL); 759 ASSERT(qp->q_sqnext == NULL); 760 ASSERT(wqp->q_sqnext == NULL); 761 ASSERT(qp->q_rwcnt == 0); 762 ASSERT(wqp->q_rwcnt == 0); 763 764 mutex_destroy(&qp->q_lock); 765 cv_destroy(&qp->q_wait); 766 767 mutex_destroy(&wqp->q_lock); 768 cv_destroy(&wqp->q_wait); 769 770 mutex_destroy(&sq->sq_lock); 771 cv_destroy(&sq->sq_wait); 772 cv_destroy(&sq->sq_exitwait); 773 } 774 775 /* 776 * Constructor/destructor routines for the syncq cache 777 */ 778 /* ARGSUSED */ 779 static int 780 syncq_constructor(void *buf, void *cdrarg, int kmflags) 781 { 782 syncq_t *sq = buf; 783 784 bzero(buf, sizeof (syncq_t)); 785 786 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL); 787 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL); 788 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL); 789 790 return (0); 791 } 792 793 /* ARGSUSED */ 794 static void 795 syncq_destructor(void *buf, void *cdrarg) 796 { 797 syncq_t *sq = buf; 798 799 ASSERT(sq->sq_head == NULL); 800 ASSERT(sq->sq_tail == NULL); 801 ASSERT(sq->sq_evhead == NULL); 802 ASSERT(sq->sq_evtail == NULL); 803 ASSERT(sq->sq_callbpend == NULL); 804 ASSERT(sq->sq_callbflags == 0); 805 ASSERT(sq->sq_outer == NULL); 806 ASSERT(sq->sq_onext == NULL); 807 ASSERT(sq->sq_oprev == NULL); 808 ASSERT(sq->sq_next == NULL); 809 ASSERT(sq->sq_needexcl == 0); 810 ASSERT(sq->sq_svcflags == 0); 811 ASSERT(sq->sq_servcount == 0); 812 ASSERT(sq->sq_nqueues == 0); 813 ASSERT(sq->sq_pri == 0); 814 ASSERT(sq->sq_count == 0); 815 ASSERT(sq->sq_rmqcount == 0); 816 ASSERT(sq->sq_cancelid == 0); 817 ASSERT(sq->sq_ciputctrl == NULL); 818 ASSERT(sq->sq_nciputctrl == 0); 819 ASSERT(sq->sq_type == 0); 820 ASSERT(sq->sq_flags == 0); 821 822 mutex_destroy(&sq->sq_lock); 823 cv_destroy(&sq->sq_wait); 824 cv_destroy(&sq->sq_exitwait); 825 } 826 827 /* ARGSUSED */ 828 static int 829 ciputctrl_constructor(void *buf, void *cdrarg, int kmflags) 830 { 831 ciputctrl_t *cip = buf; 832 int i; 833 834 for (i = 0; i < n_ciputctrl; i++) { 835 cip[i].ciputctrl_count = SQ_FASTPUT; 836 mutex_init(&cip[i].ciputctrl_lock, NULL, MUTEX_DEFAULT, NULL); 837 } 838 839 return (0); 840 } 841 842 /* ARGSUSED */ 843 static void 844 ciputctrl_destructor(void *buf, void *cdrarg) 845 { 846 ciputctrl_t *cip = buf; 847 int i; 848 849 for (i = 0; i < n_ciputctrl; i++) { 850 ASSERT(cip[i].ciputctrl_count & SQ_FASTPUT); 851 mutex_destroy(&cip[i].ciputctrl_lock); 852 } 853 } 854 855 /* 856 * Init routine run from main at boot time. 857 */ 858 void 859 strinit(void) 860 { 861 int ncpus = ((boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus); 862 863 stream_head_cache = kmem_cache_create("stream_head_cache", 864 sizeof (stdata_t), 0, 865 stream_head_constructor, stream_head_destructor, NULL, 866 NULL, NULL, 0); 867 868 queue_cache = kmem_cache_create("queue_cache", sizeof (queinfo_t), 0, 869 queue_constructor, queue_destructor, NULL, NULL, NULL, 0); 870 871 syncq_cache = kmem_cache_create("syncq_cache", sizeof (syncq_t), 0, 872 syncq_constructor, syncq_destructor, NULL, NULL, NULL, 0); 873 874 qband_cache = kmem_cache_create("qband_cache", 875 sizeof (qband_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 876 877 linkinfo_cache = kmem_cache_create("linkinfo_cache", 878 sizeof (linkinfo_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 879 880 n_ciputctrl = ncpus; 881 n_ciputctrl = 1 << highbit(n_ciputctrl - 1); 882 ASSERT(n_ciputctrl >= 1); 883 n_ciputctrl = MIN(n_ciputctrl, max_n_ciputctrl); 884 if (n_ciputctrl >= min_n_ciputctrl) { 885 ciputctrl_cache = kmem_cache_create("ciputctrl_cache", 886 sizeof (ciputctrl_t) * n_ciputctrl, 887 sizeof (ciputctrl_t), ciputctrl_constructor, 888 ciputctrl_destructor, NULL, NULL, NULL, 0); 889 } 890 891 streams_taskq = system_taskq; 892 893 if (streams_taskq == NULL) 894 panic("strinit: no memory for streams taskq!"); 895 896 bc_bkgrnd_thread = thread_create(NULL, 0, 897 streams_bufcall_service, NULL, 0, &p0, TS_RUN, streams_lopri); 898 899 streams_qbkgrnd_thread = thread_create(NULL, 0, 900 streams_qbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri); 901 902 streams_sqbkgrnd_thread = thread_create(NULL, 0, 903 streams_sqbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri); 904 905 /* 906 * Create STREAMS kstats. 907 */ 908 str_kstat = kstat_create("streams", 0, "strstat", 909 "net", KSTAT_TYPE_NAMED, 910 sizeof (str_statistics) / sizeof (kstat_named_t), 911 KSTAT_FLAG_VIRTUAL); 912 913 if (str_kstat != NULL) { 914 str_kstat->ks_data = &str_statistics; 915 kstat_install(str_kstat); 916 } 917 918 /* 919 * TPI support routine initialisation. 920 */ 921 tpi_init(); 922 923 /* 924 * Handle to have autopush and persistent link information per 925 * zone. 926 * Note: uses shutdown hook instead of destroy hook so that the 927 * persistent links can be torn down before the destroy hooks 928 * in the TCP/IP stack are called. 929 */ 930 netstack_register(NS_STR, str_stack_init, str_stack_shutdown, 931 str_stack_fini); 932 } 933 934 void 935 str_sendsig(vnode_t *vp, int event, uchar_t band, int error) 936 { 937 struct stdata *stp; 938 939 ASSERT(vp->v_stream); 940 stp = vp->v_stream; 941 /* Have to hold sd_lock to prevent siglist from changing */ 942 mutex_enter(&stp->sd_lock); 943 if (stp->sd_sigflags & event) 944 strsendsig(stp->sd_siglist, event, band, error); 945 mutex_exit(&stp->sd_lock); 946 } 947 948 /* 949 * Send the "sevent" set of signals to a process. 950 * This might send more than one signal if the process is registered 951 * for multiple events. The caller should pass in an sevent that only 952 * includes the events for which the process has registered. 953 */ 954 static void 955 dosendsig(proc_t *proc, int events, int sevent, k_siginfo_t *info, 956 uchar_t band, int error) 957 { 958 ASSERT(MUTEX_HELD(&proc->p_lock)); 959 960 info->si_band = 0; 961 info->si_errno = 0; 962 963 if (sevent & S_ERROR) { 964 sevent &= ~S_ERROR; 965 info->si_code = POLL_ERR; 966 info->si_errno = error; 967 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 968 "strsendsig:proc %p info %p", proc, info); 969 sigaddq(proc, NULL, info, KM_NOSLEEP); 970 info->si_errno = 0; 971 } 972 if (sevent & S_HANGUP) { 973 sevent &= ~S_HANGUP; 974 info->si_code = POLL_HUP; 975 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 976 "strsendsig:proc %p info %p", proc, info); 977 sigaddq(proc, NULL, info, KM_NOSLEEP); 978 } 979 if (sevent & S_HIPRI) { 980 sevent &= ~S_HIPRI; 981 info->si_code = POLL_PRI; 982 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 983 "strsendsig:proc %p info %p", proc, info); 984 sigaddq(proc, NULL, info, KM_NOSLEEP); 985 } 986 if (sevent & S_RDBAND) { 987 sevent &= ~S_RDBAND; 988 if (events & S_BANDURG) 989 sigtoproc(proc, NULL, SIGURG); 990 else 991 sigtoproc(proc, NULL, SIGPOLL); 992 } 993 if (sevent & S_WRBAND) { 994 sevent &= ~S_WRBAND; 995 sigtoproc(proc, NULL, SIGPOLL); 996 } 997 if (sevent & S_INPUT) { 998 sevent &= ~S_INPUT; 999 info->si_code = POLL_IN; 1000 info->si_band = band; 1001 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 1002 "strsendsig:proc %p info %p", proc, info); 1003 sigaddq(proc, NULL, info, KM_NOSLEEP); 1004 info->si_band = 0; 1005 } 1006 if (sevent & S_OUTPUT) { 1007 sevent &= ~S_OUTPUT; 1008 info->si_code = POLL_OUT; 1009 info->si_band = band; 1010 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 1011 "strsendsig:proc %p info %p", proc, info); 1012 sigaddq(proc, NULL, info, KM_NOSLEEP); 1013 info->si_band = 0; 1014 } 1015 if (sevent & S_MSG) { 1016 sevent &= ~S_MSG; 1017 info->si_code = POLL_MSG; 1018 info->si_band = band; 1019 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 1020 "strsendsig:proc %p info %p", proc, info); 1021 sigaddq(proc, NULL, info, KM_NOSLEEP); 1022 info->si_band = 0; 1023 } 1024 if (sevent & S_RDNORM) { 1025 sevent &= ~S_RDNORM; 1026 sigtoproc(proc, NULL, SIGPOLL); 1027 } 1028 if (sevent != 0) { 1029 panic("strsendsig: unknown event(s) %x", sevent); 1030 } 1031 } 1032 1033 /* 1034 * Send SIGPOLL/SIGURG signal to all processes and process groups 1035 * registered on the given signal list that want a signal for at 1036 * least one of the specified events. 1037 * 1038 * Must be called with exclusive access to siglist (caller holding sd_lock). 1039 * 1040 * strioctl(I_SETSIG/I_ESETSIG) will only change siglist when holding 1041 * sd_lock and the ioctl code maintains a PID_HOLD on the pid structure 1042 * while it is in the siglist. 1043 * 1044 * For performance reasons (MP scalability) the code drops pidlock 1045 * when sending signals to a single process. 1046 * When sending to a process group the code holds 1047 * pidlock to prevent the membership in the process group from changing 1048 * while walking the p_pglink list. 1049 */ 1050 void 1051 strsendsig(strsig_t *siglist, int event, uchar_t band, int error) 1052 { 1053 strsig_t *ssp; 1054 k_siginfo_t info; 1055 struct pid *pidp; 1056 proc_t *proc; 1057 1058 info.si_signo = SIGPOLL; 1059 info.si_errno = 0; 1060 for (ssp = siglist; ssp; ssp = ssp->ss_next) { 1061 int sevent; 1062 1063 sevent = ssp->ss_events & event; 1064 if (sevent == 0) 1065 continue; 1066 1067 if ((pidp = ssp->ss_pidp) == NULL) { 1068 /* pid was released but still on event list */ 1069 continue; 1070 } 1071 1072 1073 if (ssp->ss_pid > 0) { 1074 /* 1075 * XXX This unfortunately still generates 1076 * a signal when a fd is closed but 1077 * the proc is active. 1078 */ 1079 ASSERT(ssp->ss_pid == pidp->pid_id); 1080 1081 mutex_enter(&pidlock); 1082 proc = prfind_zone(pidp->pid_id, ALL_ZONES); 1083 if (proc == NULL) { 1084 mutex_exit(&pidlock); 1085 continue; 1086 } 1087 mutex_enter(&proc->p_lock); 1088 mutex_exit(&pidlock); 1089 dosendsig(proc, ssp->ss_events, sevent, &info, 1090 band, error); 1091 mutex_exit(&proc->p_lock); 1092 } else { 1093 /* 1094 * Send to process group. Hold pidlock across 1095 * calls to dosendsig(). 1096 */ 1097 pid_t pgrp = -ssp->ss_pid; 1098 1099 mutex_enter(&pidlock); 1100 proc = pgfind_zone(pgrp, ALL_ZONES); 1101 while (proc != NULL) { 1102 mutex_enter(&proc->p_lock); 1103 dosendsig(proc, ssp->ss_events, sevent, 1104 &info, band, error); 1105 mutex_exit(&proc->p_lock); 1106 proc = proc->p_pglink; 1107 } 1108 mutex_exit(&pidlock); 1109 } 1110 } 1111 } 1112 1113 /* 1114 * Attach a stream device or module. 1115 * qp is a read queue; the new queue goes in so its next 1116 * read ptr is the argument, and the write queue corresponding 1117 * to the argument points to this queue. Return 0 on success, 1118 * or a non-zero errno on failure. 1119 */ 1120 int 1121 qattach(queue_t *qp, dev_t *devp, int oflag, cred_t *crp, fmodsw_impl_t *fp, 1122 boolean_t is_insert) 1123 { 1124 major_t major; 1125 cdevsw_impl_t *dp; 1126 struct streamtab *str; 1127 queue_t *rq; 1128 queue_t *wrq; 1129 uint32_t qflag; 1130 uint32_t sqtype; 1131 perdm_t *dmp; 1132 int error; 1133 int sflag; 1134 1135 rq = allocq(); 1136 wrq = _WR(rq); 1137 STREAM(rq) = STREAM(wrq) = STREAM(qp); 1138 1139 if (fp != NULL) { 1140 str = fp->f_str; 1141 qflag = fp->f_qflag; 1142 sqtype = fp->f_sqtype; 1143 dmp = fp->f_dmp; 1144 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL); 1145 sflag = MODOPEN; 1146 1147 /* 1148 * stash away a pointer to the module structure so we can 1149 * unref it in qdetach. 1150 */ 1151 rq->q_fp = fp; 1152 } else { 1153 ASSERT(!is_insert); 1154 1155 major = getmajor(*devp); 1156 dp = &devimpl[major]; 1157 1158 str = dp->d_str; 1159 ASSERT(str == STREAMSTAB(major)); 1160 1161 qflag = dp->d_qflag; 1162 ASSERT(qflag & QISDRV); 1163 sqtype = dp->d_sqtype; 1164 1165 /* create perdm_t if needed */ 1166 if (NEED_DM(dp->d_dmp, qflag)) 1167 dp->d_dmp = hold_dm(str, qflag, sqtype); 1168 1169 dmp = dp->d_dmp; 1170 sflag = 0; 1171 } 1172 1173 TRACE_2(TR_FAC_STREAMS_FR, TR_QATTACH_FLAGS, 1174 "qattach:qflag == %X(%X)", qflag, *devp); 1175 1176 /* setq might sleep in allocator - avoid holding locks. */ 1177 setq(rq, str->st_rdinit, str->st_wrinit, dmp, qflag, sqtype, B_FALSE); 1178 1179 /* 1180 * Before calling the module's open routine, set up the q_next 1181 * pointer for inserting a module in the middle of a stream. 1182 * 1183 * Note that we can always set _QINSERTING and set up q_next 1184 * pointer for both inserting and pushing a module. Then there 1185 * is no need for the is_insert parameter. In insertq(), called 1186 * by qprocson(), assume that q_next of the new module always points 1187 * to the correct queue and use it for insertion. Everything should 1188 * work out fine. But in the first release of _I_INSERT, we 1189 * distinguish between inserting and pushing to make sure that 1190 * pushing a module follows the same code path as before. 1191 */ 1192 if (is_insert) { 1193 rq->q_flag |= _QINSERTING; 1194 rq->q_next = qp; 1195 } 1196 1197 /* 1198 * If there is an outer perimeter get exclusive access during 1199 * the open procedure. Bump up the reference count on the queue. 1200 */ 1201 entersq(rq->q_syncq, SQ_OPENCLOSE); 1202 error = (*rq->q_qinfo->qi_qopen)(rq, devp, oflag, sflag, crp); 1203 if (error != 0) 1204 goto failed; 1205 leavesq(rq->q_syncq, SQ_OPENCLOSE); 1206 ASSERT(qprocsareon(rq)); 1207 return (0); 1208 1209 failed: 1210 rq->q_flag &= ~_QINSERTING; 1211 if (backq(wrq) != NULL && backq(wrq)->q_next == wrq) 1212 qprocsoff(rq); 1213 leavesq(rq->q_syncq, SQ_OPENCLOSE); 1214 rq->q_next = wrq->q_next = NULL; 1215 qdetach(rq, 0, 0, crp, B_FALSE); 1216 return (error); 1217 } 1218 1219 /* 1220 * Handle second open of stream. For modules, set the 1221 * last argument to MODOPEN and do not pass any open flags. 1222 * Ignore dummydev since this is not the first open. 1223 */ 1224 int 1225 qreopen(queue_t *qp, dev_t *devp, int flag, cred_t *crp) 1226 { 1227 int error; 1228 dev_t dummydev; 1229 queue_t *wqp = _WR(qp); 1230 1231 ASSERT(qp->q_flag & QREADR); 1232 entersq(qp->q_syncq, SQ_OPENCLOSE); 1233 1234 dummydev = *devp; 1235 if (error = ((*qp->q_qinfo->qi_qopen)(qp, &dummydev, 1236 (wqp->q_next ? 0 : flag), (wqp->q_next ? MODOPEN : 0), crp))) { 1237 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1238 mutex_enter(&STREAM(qp)->sd_lock); 1239 qp->q_stream->sd_flag |= STREOPENFAIL; 1240 mutex_exit(&STREAM(qp)->sd_lock); 1241 return (error); 1242 } 1243 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1244 1245 /* 1246 * successful open should have done qprocson() 1247 */ 1248 ASSERT(qprocsareon(_RD(qp))); 1249 return (0); 1250 } 1251 1252 /* 1253 * Detach a stream module or device. 1254 * If clmode == 1 then the module or driver was opened and its 1255 * close routine must be called. If clmode == 0, the module 1256 * or driver was never opened or the open failed, and so its close 1257 * should not be called. 1258 */ 1259 void 1260 qdetach(queue_t *qp, int clmode, int flag, cred_t *crp, boolean_t is_remove) 1261 { 1262 queue_t *wqp = _WR(qp); 1263 ASSERT(STREAM(qp)->sd_flag & (STRCLOSE|STWOPEN|STRPLUMB)); 1264 1265 if (STREAM_NEEDSERVICE(STREAM(qp))) 1266 stream_runservice(STREAM(qp)); 1267 1268 if (clmode) { 1269 /* 1270 * Make sure that all the messages on the write side syncq are 1271 * processed and nothing is left. Since we are closing, no new 1272 * messages may appear there. 1273 */ 1274 wait_q_syncq(wqp); 1275 1276 entersq(qp->q_syncq, SQ_OPENCLOSE); 1277 if (is_remove) { 1278 mutex_enter(QLOCK(qp)); 1279 qp->q_flag |= _QREMOVING; 1280 mutex_exit(QLOCK(qp)); 1281 } 1282 (*qp->q_qinfo->qi_qclose)(qp, flag, crp); 1283 /* 1284 * Check that qprocsoff() was actually called. 1285 */ 1286 ASSERT((qp->q_flag & QWCLOSE) && (wqp->q_flag & QWCLOSE)); 1287 1288 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1289 } else { 1290 disable_svc(qp); 1291 } 1292 1293 /* 1294 * Allow any threads blocked in entersq to proceed and discover 1295 * the QWCLOSE is set. 1296 * Note: This assumes that all users of entersq check QWCLOSE. 1297 * Currently runservice is the only entersq that can happen 1298 * after removeq has finished. 1299 * Removeq will have discarded all messages destined to the closing 1300 * pair of queues from the syncq. 1301 * NOTE: Calling a function inside an assert is unconventional. 1302 * However, it does not cause any problem since flush_syncq() does 1303 * not change any state except when it returns non-zero i.e. 1304 * when the assert will trigger. 1305 */ 1306 ASSERT(flush_syncq(qp->q_syncq, qp) == 0); 1307 ASSERT(flush_syncq(wqp->q_syncq, wqp) == 0); 1308 ASSERT((qp->q_flag & QPERMOD) || 1309 ((qp->q_syncq->sq_head == NULL) && 1310 (wqp->q_syncq->sq_head == NULL))); 1311 1312 /* release any fmodsw_impl_t structure held on behalf of the queue */ 1313 ASSERT(qp->q_fp != NULL || qp->q_flag & QISDRV); 1314 if (qp->q_fp != NULL) 1315 fmodsw_rele(qp->q_fp); 1316 1317 /* freeq removes us from the outer perimeter if any */ 1318 freeq(qp); 1319 } 1320 1321 /* Prevent service procedures from being called */ 1322 void 1323 disable_svc(queue_t *qp) 1324 { 1325 queue_t *wqp = _WR(qp); 1326 1327 ASSERT(qp->q_flag & QREADR); 1328 mutex_enter(QLOCK(qp)); 1329 qp->q_flag |= QWCLOSE; 1330 mutex_exit(QLOCK(qp)); 1331 mutex_enter(QLOCK(wqp)); 1332 wqp->q_flag |= QWCLOSE; 1333 mutex_exit(QLOCK(wqp)); 1334 } 1335 1336 /* Allow service procedures to be called again */ 1337 void 1338 enable_svc(queue_t *qp) 1339 { 1340 queue_t *wqp = _WR(qp); 1341 1342 ASSERT(qp->q_flag & QREADR); 1343 mutex_enter(QLOCK(qp)); 1344 qp->q_flag &= ~QWCLOSE; 1345 mutex_exit(QLOCK(qp)); 1346 mutex_enter(QLOCK(wqp)); 1347 wqp->q_flag &= ~QWCLOSE; 1348 mutex_exit(QLOCK(wqp)); 1349 } 1350 1351 /* 1352 * Remove queue from qhead/qtail if it is enabled. 1353 * Only reset QENAB if the queue was removed from the runlist. 1354 * A queue goes through 3 stages: 1355 * It is on the service list and QENAB is set. 1356 * It is removed from the service list but QENAB is still set. 1357 * QENAB gets changed to QINSERVICE. 1358 * QINSERVICE is reset (when the service procedure is done) 1359 * Thus we can not reset QENAB unless we actually removed it from the service 1360 * queue. 1361 */ 1362 void 1363 remove_runlist(queue_t *qp) 1364 { 1365 if (qp->q_flag & QENAB && qhead != NULL) { 1366 queue_t *q_chase; 1367 queue_t *q_curr; 1368 int removed; 1369 1370 mutex_enter(&service_queue); 1371 RMQ(qp, qhead, qtail, q_link, q_chase, q_curr, removed); 1372 mutex_exit(&service_queue); 1373 if (removed) { 1374 STRSTAT(qremoved); 1375 qp->q_flag &= ~QENAB; 1376 } 1377 } 1378 } 1379 1380 1381 /* 1382 * Wait for any pending service processing to complete. 1383 * The removal of queues from the runlist is not atomic with the 1384 * clearing of the QENABLED flag and setting the INSERVICE flag. 1385 * consequently it is possible for remove_runlist in strclose 1386 * to not find the queue on the runlist but for it to be QENABLED 1387 * and not yet INSERVICE -> hence wait_svc needs to check QENABLED 1388 * as well as INSERVICE. 1389 */ 1390 void 1391 wait_svc(queue_t *qp) 1392 { 1393 queue_t *wqp = _WR(qp); 1394 1395 ASSERT(qp->q_flag & QREADR); 1396 1397 /* 1398 * Try to remove queues from qhead/qtail list. 1399 */ 1400 if (qhead != NULL) { 1401 remove_runlist(qp); 1402 remove_runlist(wqp); 1403 } 1404 /* 1405 * Wait till the syncqs associated with the queue disappear from the 1406 * background processing list. 1407 * This only needs to be done for non-PERMOD perimeters since 1408 * for PERMOD perimeters the syncq may be shared and will only be freed 1409 * when the last module/driver is unloaded. 1410 * If for PERMOD perimeters queue was on the syncq list, removeq() 1411 * should call propagate_syncq() or drain_syncq() for it. Both of these 1412 * functions remove the queue from its syncq list, so sqthread will not 1413 * try to access the queue. 1414 */ 1415 if (!(qp->q_flag & QPERMOD)) { 1416 syncq_t *rsq = qp->q_syncq; 1417 syncq_t *wsq = wqp->q_syncq; 1418 1419 /* 1420 * Disable rsq and wsq and wait for any background processing of 1421 * syncq to complete. 1422 */ 1423 wait_sq_svc(rsq); 1424 if (wsq != rsq) 1425 wait_sq_svc(wsq); 1426 } 1427 1428 mutex_enter(QLOCK(qp)); 1429 while (qp->q_flag & (QINSERVICE|QENAB)) 1430 cv_wait(&qp->q_wait, QLOCK(qp)); 1431 mutex_exit(QLOCK(qp)); 1432 mutex_enter(QLOCK(wqp)); 1433 while (wqp->q_flag & (QINSERVICE|QENAB)) 1434 cv_wait(&wqp->q_wait, QLOCK(wqp)); 1435 mutex_exit(QLOCK(wqp)); 1436 } 1437 1438 /* 1439 * Put ioctl data from userland buffer `arg' into the mblk chain `bp'. 1440 * `flag' must always contain either K_TO_K or U_TO_K; STR_NOSIG may 1441 * also be set, and is passed through to allocb_cred_wait(). 1442 * 1443 * Returns errno on failure, zero on success. 1444 */ 1445 int 1446 putiocd(mblk_t *bp, char *arg, int flag, cred_t *cr) 1447 { 1448 mblk_t *tmp; 1449 ssize_t count; 1450 int error = 0; 1451 1452 ASSERT((flag & (U_TO_K | K_TO_K)) == U_TO_K || 1453 (flag & (U_TO_K | K_TO_K)) == K_TO_K); 1454 1455 if (bp->b_datap->db_type == M_IOCTL) { 1456 count = ((struct iocblk *)bp->b_rptr)->ioc_count; 1457 } else { 1458 ASSERT(bp->b_datap->db_type == M_COPYIN); 1459 count = ((struct copyreq *)bp->b_rptr)->cq_size; 1460 } 1461 /* 1462 * strdoioctl validates ioc_count, so if this assert fails it 1463 * cannot be due to user error. 1464 */ 1465 ASSERT(count >= 0); 1466 1467 if ((tmp = allocb_cred_wait(count, (flag & STR_NOSIG), &error, cr, 1468 curproc->p_pid)) == NULL) { 1469 return (error); 1470 } 1471 error = strcopyin(arg, tmp->b_wptr, count, flag & (U_TO_K|K_TO_K)); 1472 if (error != 0) { 1473 freeb(tmp); 1474 return (error); 1475 } 1476 DB_CPID(tmp) = curproc->p_pid; 1477 tmp->b_wptr += count; 1478 bp->b_cont = tmp; 1479 1480 return (0); 1481 } 1482 1483 /* 1484 * Copy ioctl data to user-land. Return non-zero errno on failure, 1485 * 0 for success. 1486 */ 1487 int 1488 getiocd(mblk_t *bp, char *arg, int copymode) 1489 { 1490 ssize_t count; 1491 size_t n; 1492 int error; 1493 1494 if (bp->b_datap->db_type == M_IOCACK) 1495 count = ((struct iocblk *)bp->b_rptr)->ioc_count; 1496 else { 1497 ASSERT(bp->b_datap->db_type == M_COPYOUT); 1498 count = ((struct copyreq *)bp->b_rptr)->cq_size; 1499 } 1500 ASSERT(count >= 0); 1501 1502 for (bp = bp->b_cont; bp && count; 1503 count -= n, bp = bp->b_cont, arg += n) { 1504 n = MIN(count, bp->b_wptr - bp->b_rptr); 1505 error = strcopyout(bp->b_rptr, arg, n, copymode); 1506 if (error) 1507 return (error); 1508 } 1509 ASSERT(count == 0); 1510 return (0); 1511 } 1512 1513 /* 1514 * Allocate a linkinfo entry given the write queue of the 1515 * bottom module of the top stream and the write queue of the 1516 * stream head of the bottom stream. 1517 */ 1518 linkinfo_t * 1519 alloclink(queue_t *qup, queue_t *qdown, file_t *fpdown) 1520 { 1521 linkinfo_t *linkp; 1522 1523 linkp = kmem_cache_alloc(linkinfo_cache, KM_SLEEP); 1524 1525 linkp->li_lblk.l_qtop = qup; 1526 linkp->li_lblk.l_qbot = qdown; 1527 linkp->li_fpdown = fpdown; 1528 1529 mutex_enter(&strresources); 1530 linkp->li_next = linkinfo_list; 1531 linkp->li_prev = NULL; 1532 if (linkp->li_next) 1533 linkp->li_next->li_prev = linkp; 1534 linkinfo_list = linkp; 1535 linkp->li_lblk.l_index = ++lnk_id; 1536 ASSERT(lnk_id != 0); /* this should never wrap in practice */ 1537 mutex_exit(&strresources); 1538 1539 return (linkp); 1540 } 1541 1542 /* 1543 * Free a linkinfo entry. 1544 */ 1545 void 1546 lbfree(linkinfo_t *linkp) 1547 { 1548 mutex_enter(&strresources); 1549 if (linkp->li_next) 1550 linkp->li_next->li_prev = linkp->li_prev; 1551 if (linkp->li_prev) 1552 linkp->li_prev->li_next = linkp->li_next; 1553 else 1554 linkinfo_list = linkp->li_next; 1555 mutex_exit(&strresources); 1556 1557 kmem_cache_free(linkinfo_cache, linkp); 1558 } 1559 1560 /* 1561 * Check for a potential linking cycle. 1562 * Return 1 if a link will result in a cycle, 1563 * and 0 otherwise. 1564 */ 1565 int 1566 linkcycle(stdata_t *upstp, stdata_t *lostp, str_stack_t *ss) 1567 { 1568 struct mux_node *np; 1569 struct mux_edge *ep; 1570 int i; 1571 major_t lomaj; 1572 major_t upmaj; 1573 /* 1574 * if the lower stream is a pipe/FIFO, return, since link 1575 * cycles can not happen on pipes/FIFOs 1576 */ 1577 if (lostp->sd_vnode->v_type == VFIFO) 1578 return (0); 1579 1580 for (i = 0; i < ss->ss_devcnt; i++) { 1581 np = &ss->ss_mux_nodes[i]; 1582 MUX_CLEAR(np); 1583 } 1584 lomaj = getmajor(lostp->sd_vnode->v_rdev); 1585 upmaj = getmajor(upstp->sd_vnode->v_rdev); 1586 np = &ss->ss_mux_nodes[lomaj]; 1587 for (;;) { 1588 if (!MUX_DIDVISIT(np)) { 1589 if (np->mn_imaj == upmaj) 1590 return (1); 1591 if (np->mn_outp == NULL) { 1592 MUX_VISIT(np); 1593 if (np->mn_originp == NULL) 1594 return (0); 1595 np = np->mn_originp; 1596 continue; 1597 } 1598 MUX_VISIT(np); 1599 np->mn_startp = np->mn_outp; 1600 } else { 1601 if (np->mn_startp == NULL) { 1602 if (np->mn_originp == NULL) 1603 return (0); 1604 else { 1605 np = np->mn_originp; 1606 continue; 1607 } 1608 } 1609 /* 1610 * If ep->me_nodep is a FIFO (me_nodep == NULL), 1611 * ignore the edge and move on. ep->me_nodep gets 1612 * set to NULL in mux_addedge() if it is a FIFO. 1613 * 1614 */ 1615 ep = np->mn_startp; 1616 np->mn_startp = ep->me_nextp; 1617 if (ep->me_nodep == NULL) 1618 continue; 1619 ep->me_nodep->mn_originp = np; 1620 np = ep->me_nodep; 1621 } 1622 } 1623 } 1624 1625 /* 1626 * Find linkinfo entry corresponding to the parameters. 1627 */ 1628 linkinfo_t * 1629 findlinks(stdata_t *stp, int index, int type, str_stack_t *ss) 1630 { 1631 linkinfo_t *linkp; 1632 struct mux_edge *mep; 1633 struct mux_node *mnp; 1634 queue_t *qup; 1635 1636 mutex_enter(&strresources); 1637 if ((type & LINKTYPEMASK) == LINKNORMAL) { 1638 qup = getendq(stp->sd_wrq); 1639 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) { 1640 if ((qup == linkp->li_lblk.l_qtop) && 1641 (!index || (index == linkp->li_lblk.l_index))) { 1642 mutex_exit(&strresources); 1643 return (linkp); 1644 } 1645 } 1646 } else { 1647 ASSERT((type & LINKTYPEMASK) == LINKPERSIST); 1648 mnp = &ss->ss_mux_nodes[getmajor(stp->sd_vnode->v_rdev)]; 1649 mep = mnp->mn_outp; 1650 while (mep) { 1651 if ((index == 0) || (index == mep->me_muxid)) 1652 break; 1653 mep = mep->me_nextp; 1654 } 1655 if (!mep) { 1656 mutex_exit(&strresources); 1657 return (NULL); 1658 } 1659 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) { 1660 if ((!linkp->li_lblk.l_qtop) && 1661 (mep->me_muxid == linkp->li_lblk.l_index)) { 1662 mutex_exit(&strresources); 1663 return (linkp); 1664 } 1665 } 1666 } 1667 mutex_exit(&strresources); 1668 return (NULL); 1669 } 1670 1671 /* 1672 * Given a queue ptr, follow the chain of q_next pointers until you reach the 1673 * last queue on the chain and return it. 1674 */ 1675 queue_t * 1676 getendq(queue_t *q) 1677 { 1678 ASSERT(q != NULL); 1679 while (_SAMESTR(q)) 1680 q = q->q_next; 1681 return (q); 1682 } 1683 1684 /* 1685 * Wait for the syncq count to drop to zero. 1686 * sq could be either outer or inner. 1687 */ 1688 1689 static void 1690 wait_syncq(syncq_t *sq) 1691 { 1692 uint16_t count; 1693 1694 mutex_enter(SQLOCK(sq)); 1695 count = sq->sq_count; 1696 SQ_PUTLOCKS_ENTER(sq); 1697 SUM_SQ_PUTCOUNTS(sq, count); 1698 while (count != 0) { 1699 sq->sq_flags |= SQ_WANTWAKEUP; 1700 SQ_PUTLOCKS_EXIT(sq); 1701 cv_wait(&sq->sq_wait, SQLOCK(sq)); 1702 count = sq->sq_count; 1703 SQ_PUTLOCKS_ENTER(sq); 1704 SUM_SQ_PUTCOUNTS(sq, count); 1705 } 1706 SQ_PUTLOCKS_EXIT(sq); 1707 mutex_exit(SQLOCK(sq)); 1708 } 1709 1710 /* 1711 * Wait while there are any messages for the queue in its syncq. 1712 */ 1713 static void 1714 wait_q_syncq(queue_t *q) 1715 { 1716 if ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) { 1717 syncq_t *sq = q->q_syncq; 1718 1719 mutex_enter(SQLOCK(sq)); 1720 while ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) { 1721 sq->sq_flags |= SQ_WANTWAKEUP; 1722 cv_wait(&sq->sq_wait, SQLOCK(sq)); 1723 } 1724 mutex_exit(SQLOCK(sq)); 1725 } 1726 } 1727 1728 1729 int 1730 mlink_file(vnode_t *vp, int cmd, struct file *fpdown, cred_t *crp, int *rvalp, 1731 int lhlink) 1732 { 1733 struct stdata *stp; 1734 struct strioctl strioc; 1735 struct linkinfo *linkp; 1736 struct stdata *stpdown; 1737 struct streamtab *str; 1738 queue_t *passq; 1739 syncq_t *passyncq; 1740 queue_t *rq; 1741 cdevsw_impl_t *dp; 1742 uint32_t qflag; 1743 uint32_t sqtype; 1744 perdm_t *dmp; 1745 int error = 0; 1746 netstack_t *ns; 1747 str_stack_t *ss; 1748 1749 stp = vp->v_stream; 1750 TRACE_1(TR_FAC_STREAMS_FR, 1751 TR_I_LINK, "I_LINK/I_PLINK:stp %p", stp); 1752 /* 1753 * Test for invalid upper stream 1754 */ 1755 if (stp->sd_flag & STRHUP) { 1756 return (ENXIO); 1757 } 1758 if (vp->v_type == VFIFO) { 1759 return (EINVAL); 1760 } 1761 if (stp->sd_strtab == NULL) { 1762 return (EINVAL); 1763 } 1764 if (!stp->sd_strtab->st_muxwinit) { 1765 return (EINVAL); 1766 } 1767 if (fpdown == NULL) { 1768 return (EBADF); 1769 } 1770 ns = netstack_find_by_cred(crp); 1771 ASSERT(ns != NULL); 1772 ss = ns->netstack_str; 1773 ASSERT(ss != NULL); 1774 1775 if (getmajor(stp->sd_vnode->v_rdev) >= ss->ss_devcnt) { 1776 netstack_rele(ss->ss_netstack); 1777 return (EINVAL); 1778 } 1779 mutex_enter(&muxifier); 1780 if (stp->sd_flag & STPLEX) { 1781 mutex_exit(&muxifier); 1782 netstack_rele(ss->ss_netstack); 1783 return (ENXIO); 1784 } 1785 1786 /* 1787 * Test for invalid lower stream. 1788 * The check for the v_type != VFIFO and having a major 1789 * number not >= devcnt is done to avoid problems with 1790 * adding mux_node entry past the end of mux_nodes[]. 1791 * For FIFO's we don't add an entry so this isn't a 1792 * problem. 1793 */ 1794 if (((stpdown = fpdown->f_vnode->v_stream) == NULL) || 1795 (stpdown == stp) || (stpdown->sd_flag & 1796 (STPLEX|STRHUP|STRDERR|STWRERR|IOCWAIT|STRPLUMB)) || 1797 ((stpdown->sd_vnode->v_type != VFIFO) && 1798 (getmajor(stpdown->sd_vnode->v_rdev) >= ss->ss_devcnt)) || 1799 linkcycle(stp, stpdown, ss)) { 1800 mutex_exit(&muxifier); 1801 netstack_rele(ss->ss_netstack); 1802 return (EINVAL); 1803 } 1804 TRACE_1(TR_FAC_STREAMS_FR, 1805 TR_STPDOWN, "stpdown:%p", stpdown); 1806 rq = getendq(stp->sd_wrq); 1807 if (cmd == I_PLINK) 1808 rq = NULL; 1809 1810 linkp = alloclink(rq, stpdown->sd_wrq, fpdown); 1811 1812 strioc.ic_cmd = cmd; 1813 strioc.ic_timout = INFTIM; 1814 strioc.ic_len = sizeof (struct linkblk); 1815 strioc.ic_dp = (char *)&linkp->li_lblk; 1816 1817 /* 1818 * STRPLUMB protects plumbing changes and should be set before 1819 * link_addpassthru()/link_rempassthru() are called, so it is set here 1820 * and cleared in the end of mlink when passthru queue is removed. 1821 * Setting of STRPLUMB prevents reopens of the stream while passthru 1822 * queue is in-place (it is not a proper module and doesn't have open 1823 * entry point). 1824 * 1825 * STPLEX prevents any threads from entering the stream from above. It 1826 * can't be set before the call to link_addpassthru() because putnext 1827 * from below may cause stream head I/O routines to be called and these 1828 * routines assert that STPLEX is not set. After link_addpassthru() 1829 * nothing may come from below since the pass queue syncq is blocked. 1830 * Note also that STPLEX should be cleared before the call to 1831 * link_rempassthru() since when messages start flowing to the stream 1832 * head (e.g. because of message propagation from the pass queue) stream 1833 * head I/O routines may be called with STPLEX flag set. 1834 * 1835 * When STPLEX is set, nothing may come into the stream from above and 1836 * it is safe to do a setq which will change stream head. So, the 1837 * correct sequence of actions is: 1838 * 1839 * 1) Set STRPLUMB 1840 * 2) Call link_addpassthru() 1841 * 3) Set STPLEX 1842 * 4) Call setq and update the stream state 1843 * 5) Clear STPLEX 1844 * 6) Call link_rempassthru() 1845 * 7) Clear STRPLUMB 1846 * 1847 * The same sequence applies to munlink() code. 1848 */ 1849 mutex_enter(&stpdown->sd_lock); 1850 stpdown->sd_flag |= STRPLUMB; 1851 mutex_exit(&stpdown->sd_lock); 1852 /* 1853 * Add passthru queue below lower mux. This will block 1854 * syncqs of lower muxs read queue during I_LINK/I_UNLINK. 1855 */ 1856 passq = link_addpassthru(stpdown); 1857 1858 mutex_enter(&stpdown->sd_lock); 1859 stpdown->sd_flag |= STPLEX; 1860 mutex_exit(&stpdown->sd_lock); 1861 1862 rq = _RD(stpdown->sd_wrq); 1863 /* 1864 * There may be messages in the streamhead's syncq due to messages 1865 * that arrived before link_addpassthru() was done. To avoid 1866 * background processing of the syncq happening simultaneous with 1867 * setq processing, we disable the streamhead syncq and wait until 1868 * existing background thread finishes working on it. 1869 */ 1870 wait_sq_svc(rq->q_syncq); 1871 passyncq = passq->q_syncq; 1872 if (!(passyncq->sq_flags & SQ_BLOCKED)) 1873 blocksq(passyncq, SQ_BLOCKED, 0); 1874 1875 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE); 1876 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq)); 1877 rq->q_ptr = _WR(rq)->q_ptr = NULL; 1878 1879 /* setq might sleep in allocator - avoid holding locks. */ 1880 /* Note: we are holding muxifier here. */ 1881 1882 str = stp->sd_strtab; 1883 dp = &devimpl[getmajor(vp->v_rdev)]; 1884 ASSERT(dp->d_str == str); 1885 1886 qflag = dp->d_qflag; 1887 sqtype = dp->d_sqtype; 1888 1889 /* create perdm_t if needed */ 1890 if (NEED_DM(dp->d_dmp, qflag)) 1891 dp->d_dmp = hold_dm(str, qflag, sqtype); 1892 1893 dmp = dp->d_dmp; 1894 1895 setq(rq, str->st_muxrinit, str->st_muxwinit, dmp, qflag, sqtype, 1896 B_TRUE); 1897 1898 /* 1899 * XXX Remove any "odd" messages from the queue. 1900 * Keep only M_DATA, M_PROTO, M_PCPROTO. 1901 */ 1902 error = strdoioctl(stp, &strioc, FNATIVE, 1903 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp); 1904 if (error != 0) { 1905 lbfree(linkp); 1906 1907 if (!(passyncq->sq_flags & SQ_BLOCKED)) 1908 blocksq(passyncq, SQ_BLOCKED, 0); 1909 /* 1910 * Restore the stream head queue and then remove 1911 * the passq. Turn off STPLEX before we turn on 1912 * the stream by removing the passq. 1913 */ 1914 rq->q_ptr = _WR(rq)->q_ptr = stpdown; 1915 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, 1916 B_TRUE); 1917 1918 mutex_enter(&stpdown->sd_lock); 1919 stpdown->sd_flag &= ~STPLEX; 1920 mutex_exit(&stpdown->sd_lock); 1921 1922 link_rempassthru(passq); 1923 1924 mutex_enter(&stpdown->sd_lock); 1925 stpdown->sd_flag &= ~STRPLUMB; 1926 /* Wakeup anyone waiting for STRPLUMB to clear. */ 1927 cv_broadcast(&stpdown->sd_monitor); 1928 mutex_exit(&stpdown->sd_lock); 1929 1930 mutex_exit(&muxifier); 1931 netstack_rele(ss->ss_netstack); 1932 return (error); 1933 } 1934 mutex_enter(&fpdown->f_tlock); 1935 fpdown->f_count++; 1936 mutex_exit(&fpdown->f_tlock); 1937 1938 /* 1939 * if we've made it here the linkage is all set up so we should also 1940 * set up the layered driver linkages 1941 */ 1942 1943 ASSERT((cmd == I_LINK) || (cmd == I_PLINK)); 1944 if (cmd == I_LINK) { 1945 ldi_mlink_fp(stp, fpdown, lhlink, LINKNORMAL); 1946 } else { 1947 ldi_mlink_fp(stp, fpdown, lhlink, LINKPERSIST); 1948 } 1949 1950 link_rempassthru(passq); 1951 1952 mux_addedge(stp, stpdown, linkp->li_lblk.l_index, ss); 1953 1954 /* 1955 * Mark the upper stream as having dependent links 1956 * so that strclose can clean it up. 1957 */ 1958 if (cmd == I_LINK) { 1959 mutex_enter(&stp->sd_lock); 1960 stp->sd_flag |= STRHASLINKS; 1961 mutex_exit(&stp->sd_lock); 1962 } 1963 /* 1964 * Wake up any other processes that may have been 1965 * waiting on the lower stream. These will all 1966 * error out. 1967 */ 1968 mutex_enter(&stpdown->sd_lock); 1969 /* The passthru module is removed so we may release STRPLUMB */ 1970 stpdown->sd_flag &= ~STRPLUMB; 1971 cv_broadcast(&rq->q_wait); 1972 cv_broadcast(&_WR(rq)->q_wait); 1973 cv_broadcast(&stpdown->sd_monitor); 1974 mutex_exit(&stpdown->sd_lock); 1975 mutex_exit(&muxifier); 1976 *rvalp = linkp->li_lblk.l_index; 1977 netstack_rele(ss->ss_netstack); 1978 return (0); 1979 } 1980 1981 int 1982 mlink(vnode_t *vp, int cmd, int arg, cred_t *crp, int *rvalp, int lhlink) 1983 { 1984 int ret; 1985 struct file *fpdown; 1986 1987 fpdown = getf(arg); 1988 ret = mlink_file(vp, cmd, fpdown, crp, rvalp, lhlink); 1989 if (fpdown != NULL) 1990 releasef(arg); 1991 return (ret); 1992 } 1993 1994 /* 1995 * Unlink a multiplexor link. Stp is the controlling stream for the 1996 * link, and linkp points to the link's entry in the linkinfo list. 1997 * The muxifier lock must be held on entry and is dropped on exit. 1998 * 1999 * NOTE : Currently it is assumed that mux would process all the messages 2000 * sitting on it's queue before ACKing the UNLINK. It is the responsibility 2001 * of the mux to handle all the messages that arrive before UNLINK. 2002 * If the mux has to send down messages on its lower stream before 2003 * ACKing I_UNLINK, then it *should* know to handle messages even 2004 * after the UNLINK is acked (actually it should be able to handle till we 2005 * re-block the read side of the pass queue here). If the mux does not 2006 * open up the lower stream, any messages that arrive during UNLINK 2007 * will be put in the stream head. In the case of lower stream opening 2008 * up, some messages might land in the stream head depending on when 2009 * the message arrived and when the read side of the pass queue was 2010 * re-blocked. 2011 */ 2012 int 2013 munlink(stdata_t *stp, linkinfo_t *linkp, int flag, cred_t *crp, int *rvalp, 2014 str_stack_t *ss) 2015 { 2016 struct strioctl strioc; 2017 struct stdata *stpdown; 2018 queue_t *rq, *wrq; 2019 queue_t *passq; 2020 syncq_t *passyncq; 2021 int error = 0; 2022 file_t *fpdown; 2023 2024 ASSERT(MUTEX_HELD(&muxifier)); 2025 2026 stpdown = linkp->li_fpdown->f_vnode->v_stream; 2027 2028 /* 2029 * See the comment in mlink() concerning STRPLUMB/STPLEX flags. 2030 */ 2031 mutex_enter(&stpdown->sd_lock); 2032 stpdown->sd_flag |= STRPLUMB; 2033 mutex_exit(&stpdown->sd_lock); 2034 2035 /* 2036 * Add passthru queue below lower mux. This will block 2037 * syncqs of lower muxs read queue during I_LINK/I_UNLINK. 2038 */ 2039 passq = link_addpassthru(stpdown); 2040 2041 if ((flag & LINKTYPEMASK) == LINKNORMAL) 2042 strioc.ic_cmd = I_UNLINK; 2043 else 2044 strioc.ic_cmd = I_PUNLINK; 2045 strioc.ic_timout = INFTIM; 2046 strioc.ic_len = sizeof (struct linkblk); 2047 strioc.ic_dp = (char *)&linkp->li_lblk; 2048 2049 error = strdoioctl(stp, &strioc, FNATIVE, 2050 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp); 2051 2052 /* 2053 * If there was an error and this is not called via strclose, 2054 * return to the user. Otherwise, pretend there was no error 2055 * and close the link. 2056 */ 2057 if (error) { 2058 if (flag & LINKCLOSE) { 2059 cmn_err(CE_WARN, "KERNEL: munlink: could not perform " 2060 "unlink ioctl, closing anyway (%d)\n", error); 2061 } else { 2062 link_rempassthru(passq); 2063 mutex_enter(&stpdown->sd_lock); 2064 stpdown->sd_flag &= ~STRPLUMB; 2065 cv_broadcast(&stpdown->sd_monitor); 2066 mutex_exit(&stpdown->sd_lock); 2067 mutex_exit(&muxifier); 2068 return (error); 2069 } 2070 } 2071 2072 mux_rmvedge(stp, linkp->li_lblk.l_index, ss); 2073 fpdown = linkp->li_fpdown; 2074 lbfree(linkp); 2075 2076 /* 2077 * We go ahead and drop muxifier here--it's a nasty global lock that 2078 * can slow others down. It's okay to since attempts to mlink() this 2079 * stream will be stopped because STPLEX is still set in the stdata 2080 * structure, and munlink() is stopped because mux_rmvedge() and 2081 * lbfree() have removed it from mux_nodes[] and linkinfo_list, 2082 * respectively. Note that we defer the closef() of fpdown until 2083 * after we drop muxifier since strclose() can call munlinkall(). 2084 */ 2085 mutex_exit(&muxifier); 2086 2087 wrq = stpdown->sd_wrq; 2088 rq = _RD(wrq); 2089 2090 /* 2091 * Get rid of outstanding service procedure runs, before we make 2092 * it a stream head, since a stream head doesn't have any service 2093 * procedure. 2094 */ 2095 disable_svc(rq); 2096 wait_svc(rq); 2097 2098 /* 2099 * Since we don't disable the syncq for QPERMOD, we wait for whatever 2100 * is queued up to be finished. mux should take care that nothing is 2101 * send down to this queue. We should do it now as we're going to block 2102 * passyncq if it was unblocked. 2103 */ 2104 if (wrq->q_flag & QPERMOD) { 2105 syncq_t *sq = wrq->q_syncq; 2106 2107 mutex_enter(SQLOCK(sq)); 2108 while (wrq->q_sqflags & Q_SQQUEUED) { 2109 sq->sq_flags |= SQ_WANTWAKEUP; 2110 cv_wait(&sq->sq_wait, SQLOCK(sq)); 2111 } 2112 mutex_exit(SQLOCK(sq)); 2113 } 2114 passyncq = passq->q_syncq; 2115 if (!(passyncq->sq_flags & SQ_BLOCKED)) { 2116 2117 syncq_t *sq, *outer; 2118 2119 /* 2120 * Messages could be flowing from underneath. We will 2121 * block the read side of the passq. This would be 2122 * sufficient for QPAIR and QPERQ muxes to ensure 2123 * that no data is flowing up into this queue 2124 * and hence no thread active in this instance of 2125 * lower mux. But for QPERMOD and QMTOUTPERIM there 2126 * could be messages on the inner and outer/inner 2127 * syncqs respectively. We will wait for them to drain. 2128 * Because passq is blocked messages end up in the syncq 2129 * And qfill_syncq could possibly end up setting QFULL 2130 * which will access the rq->q_flag. Hence, we have to 2131 * acquire the QLOCK in setq. 2132 * 2133 * XXX Messages can also flow from top into this 2134 * queue though the unlink is over (Ex. some instance 2135 * in putnext() called from top that has still not 2136 * accessed this queue. And also putq(lowerq) ?). 2137 * Solution : How about blocking the l_qtop queue ? 2138 * Do we really care about such pure D_MP muxes ? 2139 */ 2140 2141 blocksq(passyncq, SQ_BLOCKED, 0); 2142 2143 sq = rq->q_syncq; 2144 if ((outer = sq->sq_outer) != NULL) { 2145 2146 /* 2147 * We have to just wait for the outer sq_count 2148 * drop to zero. As this does not prevent new 2149 * messages to enter the outer perimeter, this 2150 * is subject to starvation. 2151 * 2152 * NOTE :Because of blocksq above, messages could 2153 * be in the inner syncq only because of some 2154 * thread holding the outer perimeter exclusively. 2155 * Hence it would be sufficient to wait for the 2156 * exclusive holder of the outer perimeter to drain 2157 * the inner and outer syncqs. But we will not depend 2158 * on this feature and hence check the inner syncqs 2159 * separately. 2160 */ 2161 wait_syncq(outer); 2162 } 2163 2164 2165 /* 2166 * There could be messages destined for 2167 * this queue. Let the exclusive holder 2168 * drain it. 2169 */ 2170 2171 wait_syncq(sq); 2172 ASSERT((rq->q_flag & QPERMOD) || 2173 ((rq->q_syncq->sq_head == NULL) && 2174 (_WR(rq)->q_syncq->sq_head == NULL))); 2175 } 2176 2177 /* 2178 * We haven't taken care of QPERMOD case yet. QPERMOD is a special 2179 * case as we don't disable its syncq or remove it off the syncq 2180 * service list. 2181 */ 2182 if (rq->q_flag & QPERMOD) { 2183 syncq_t *sq = rq->q_syncq; 2184 2185 mutex_enter(SQLOCK(sq)); 2186 while (rq->q_sqflags & Q_SQQUEUED) { 2187 sq->sq_flags |= SQ_WANTWAKEUP; 2188 cv_wait(&sq->sq_wait, SQLOCK(sq)); 2189 } 2190 mutex_exit(SQLOCK(sq)); 2191 } 2192 2193 /* 2194 * flush_syncq changes states only when there are some messages to 2195 * free, i.e. when it returns non-zero value to return. 2196 */ 2197 ASSERT(flush_syncq(rq->q_syncq, rq) == 0); 2198 ASSERT(flush_syncq(wrq->q_syncq, wrq) == 0); 2199 2200 /* 2201 * Nobody else should know about this queue now. 2202 * If the mux did not process the messages before 2203 * acking the I_UNLINK, free them now. 2204 */ 2205 2206 flushq(rq, FLUSHALL); 2207 flushq(_WR(rq), FLUSHALL); 2208 2209 /* 2210 * Convert the mux lower queue into a stream head queue. 2211 * Turn off STPLEX before we turn on the stream by removing the passq. 2212 */ 2213 rq->q_ptr = wrq->q_ptr = stpdown; 2214 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, B_TRUE); 2215 2216 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE); 2217 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq)); 2218 2219 enable_svc(rq); 2220 2221 /* 2222 * Now it is a proper stream, so STPLEX is cleared. But STRPLUMB still 2223 * needs to be set to prevent reopen() of the stream - such reopen may 2224 * try to call non-existent pass queue open routine and panic. 2225 */ 2226 mutex_enter(&stpdown->sd_lock); 2227 stpdown->sd_flag &= ~STPLEX; 2228 mutex_exit(&stpdown->sd_lock); 2229 2230 ASSERT(((flag & LINKTYPEMASK) == LINKNORMAL) || 2231 ((flag & LINKTYPEMASK) == LINKPERSIST)); 2232 2233 /* clean up the layered driver linkages */ 2234 if ((flag & LINKTYPEMASK) == LINKNORMAL) { 2235 ldi_munlink_fp(stp, fpdown, LINKNORMAL); 2236 } else { 2237 ldi_munlink_fp(stp, fpdown, LINKPERSIST); 2238 } 2239 2240 link_rempassthru(passq); 2241 2242 /* 2243 * Now all plumbing changes are finished and STRPLUMB is no 2244 * longer needed. 2245 */ 2246 mutex_enter(&stpdown->sd_lock); 2247 stpdown->sd_flag &= ~STRPLUMB; 2248 cv_broadcast(&stpdown->sd_monitor); 2249 mutex_exit(&stpdown->sd_lock); 2250 2251 (void) closef(fpdown); 2252 return (0); 2253 } 2254 2255 /* 2256 * Unlink all multiplexor links for which stp is the controlling stream. 2257 * Return 0, or a non-zero errno on failure. 2258 */ 2259 int 2260 munlinkall(stdata_t *stp, int flag, cred_t *crp, int *rvalp, str_stack_t *ss) 2261 { 2262 linkinfo_t *linkp; 2263 int error = 0; 2264 2265 mutex_enter(&muxifier); 2266 while (linkp = findlinks(stp, 0, flag, ss)) { 2267 /* 2268 * munlink() releases the muxifier lock. 2269 */ 2270 if (error = munlink(stp, linkp, flag, crp, rvalp, ss)) 2271 return (error); 2272 mutex_enter(&muxifier); 2273 } 2274 mutex_exit(&muxifier); 2275 return (0); 2276 } 2277 2278 /* 2279 * A multiplexor link has been made. Add an 2280 * edge to the directed graph. 2281 */ 2282 void 2283 mux_addedge(stdata_t *upstp, stdata_t *lostp, int muxid, str_stack_t *ss) 2284 { 2285 struct mux_node *np; 2286 struct mux_edge *ep; 2287 major_t upmaj; 2288 major_t lomaj; 2289 2290 upmaj = getmajor(upstp->sd_vnode->v_rdev); 2291 lomaj = getmajor(lostp->sd_vnode->v_rdev); 2292 np = &ss->ss_mux_nodes[upmaj]; 2293 if (np->mn_outp) { 2294 ep = np->mn_outp; 2295 while (ep->me_nextp) 2296 ep = ep->me_nextp; 2297 ep->me_nextp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP); 2298 ep = ep->me_nextp; 2299 } else { 2300 np->mn_outp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP); 2301 ep = np->mn_outp; 2302 } 2303 ep->me_nextp = NULL; 2304 ep->me_muxid = muxid; 2305 /* 2306 * Save the dev_t for the purposes of str_stack_shutdown. 2307 * str_stack_shutdown assumes that the device allows reopen, since 2308 * this dev_t is the one after any cloning by xx_open(). 2309 * Would prefer finding the dev_t from before any cloning, 2310 * but specfs doesn't retain that. 2311 */ 2312 ep->me_dev = upstp->sd_vnode->v_rdev; 2313 if (lostp->sd_vnode->v_type == VFIFO) 2314 ep->me_nodep = NULL; 2315 else 2316 ep->me_nodep = &ss->ss_mux_nodes[lomaj]; 2317 } 2318 2319 /* 2320 * A multiplexor link has been removed. Remove the 2321 * edge in the directed graph. 2322 */ 2323 void 2324 mux_rmvedge(stdata_t *upstp, int muxid, str_stack_t *ss) 2325 { 2326 struct mux_node *np; 2327 struct mux_edge *ep; 2328 struct mux_edge *pep = NULL; 2329 major_t upmaj; 2330 2331 upmaj = getmajor(upstp->sd_vnode->v_rdev); 2332 np = &ss->ss_mux_nodes[upmaj]; 2333 ASSERT(np->mn_outp != NULL); 2334 ep = np->mn_outp; 2335 while (ep) { 2336 if (ep->me_muxid == muxid) { 2337 if (pep) 2338 pep->me_nextp = ep->me_nextp; 2339 else 2340 np->mn_outp = ep->me_nextp; 2341 kmem_free(ep, sizeof (struct mux_edge)); 2342 return; 2343 } 2344 pep = ep; 2345 ep = ep->me_nextp; 2346 } 2347 ASSERT(0); /* should not reach here */ 2348 } 2349 2350 /* 2351 * Translate the device flags (from conf.h) to the corresponding 2352 * qflag and sq_flag (type) values. 2353 */ 2354 int 2355 devflg_to_qflag(struct streamtab *stp, uint32_t devflag, uint32_t *qflagp, 2356 uint32_t *sqtypep) 2357 { 2358 uint32_t qflag = 0; 2359 uint32_t sqtype = 0; 2360 2361 if (devflag & _D_OLD) 2362 goto bad; 2363 2364 /* Inner perimeter presence and scope */ 2365 switch (devflag & D_MTINNER_MASK) { 2366 case D_MP: 2367 qflag |= QMTSAFE; 2368 sqtype |= SQ_CI; 2369 break; 2370 case D_MTPERQ|D_MP: 2371 qflag |= QPERQ; 2372 break; 2373 case D_MTQPAIR|D_MP: 2374 qflag |= QPAIR; 2375 break; 2376 case D_MTPERMOD|D_MP: 2377 qflag |= QPERMOD; 2378 break; 2379 default: 2380 goto bad; 2381 } 2382 2383 /* Outer perimeter */ 2384 if (devflag & D_MTOUTPERIM) { 2385 switch (devflag & D_MTINNER_MASK) { 2386 case D_MP: 2387 case D_MTPERQ|D_MP: 2388 case D_MTQPAIR|D_MP: 2389 break; 2390 default: 2391 goto bad; 2392 } 2393 qflag |= QMTOUTPERIM; 2394 } 2395 2396 /* Inner perimeter modifiers */ 2397 if (devflag & D_MTINNER_MOD) { 2398 switch (devflag & D_MTINNER_MASK) { 2399 case D_MP: 2400 goto bad; 2401 default: 2402 break; 2403 } 2404 if (devflag & D_MTPUTSHARED) 2405 sqtype |= SQ_CIPUT; 2406 if (devflag & _D_MTOCSHARED) { 2407 /* 2408 * The code in putnext assumes that it has the 2409 * highest concurrency by not checking sq_count. 2410 * Thus _D_MTOCSHARED can only be supported when 2411 * D_MTPUTSHARED is set. 2412 */ 2413 if (!(devflag & D_MTPUTSHARED)) 2414 goto bad; 2415 sqtype |= SQ_CIOC; 2416 } 2417 if (devflag & _D_MTCBSHARED) { 2418 /* 2419 * The code in putnext assumes that it has the 2420 * highest concurrency by not checking sq_count. 2421 * Thus _D_MTCBSHARED can only be supported when 2422 * D_MTPUTSHARED is set. 2423 */ 2424 if (!(devflag & D_MTPUTSHARED)) 2425 goto bad; 2426 sqtype |= SQ_CICB; 2427 } 2428 if (devflag & _D_MTSVCSHARED) { 2429 /* 2430 * The code in putnext assumes that it has the 2431 * highest concurrency by not checking sq_count. 2432 * Thus _D_MTSVCSHARED can only be supported when 2433 * D_MTPUTSHARED is set. Also _D_MTSVCSHARED is 2434 * supported only for QPERMOD. 2435 */ 2436 if (!(devflag & D_MTPUTSHARED) || !(qflag & QPERMOD)) 2437 goto bad; 2438 sqtype |= SQ_CISVC; 2439 } 2440 } 2441 2442 /* Default outer perimeter concurrency */ 2443 sqtype |= SQ_CO; 2444 2445 /* Outer perimeter modifiers */ 2446 if (devflag & D_MTOCEXCL) { 2447 if (!(devflag & D_MTOUTPERIM)) { 2448 /* No outer perimeter */ 2449 goto bad; 2450 } 2451 sqtype &= ~SQ_COOC; 2452 } 2453 2454 /* Synchronous Streams extended qinit structure */ 2455 if (devflag & D_SYNCSTR) 2456 qflag |= QSYNCSTR; 2457 2458 /* 2459 * Private flag used by a transport module to indicate 2460 * to sockfs that it supports direct-access mode without 2461 * having to go through STREAMS. 2462 */ 2463 if (devflag & _D_DIRECT) { 2464 /* Reject unless the module is fully-MT (no perimeter) */ 2465 if ((qflag & QMT_TYPEMASK) != QMTSAFE) 2466 goto bad; 2467 qflag |= _QDIRECT; 2468 } 2469 2470 *qflagp = qflag; 2471 *sqtypep = sqtype; 2472 return (0); 2473 2474 bad: 2475 cmn_err(CE_WARN, 2476 "stropen: bad MT flags (0x%x) in driver '%s'", 2477 (int)(qflag & D_MTSAFETY_MASK), 2478 stp->st_rdinit->qi_minfo->mi_idname); 2479 2480 return (EINVAL); 2481 } 2482 2483 /* 2484 * Set the interface values for a pair of queues (qinit structure, 2485 * packet sizes, water marks). 2486 * setq assumes that the caller does not have a claim (entersq or claimq) 2487 * on the queue. 2488 */ 2489 void 2490 setq(queue_t *rq, struct qinit *rinit, struct qinit *winit, 2491 perdm_t *dmp, uint32_t qflag, uint32_t sqtype, boolean_t lock_needed) 2492 { 2493 queue_t *wq; 2494 syncq_t *sq, *outer; 2495 2496 ASSERT(rq->q_flag & QREADR); 2497 ASSERT((qflag & QMT_TYPEMASK) != 0); 2498 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL); 2499 2500 wq = _WR(rq); 2501 rq->q_qinfo = rinit; 2502 rq->q_hiwat = rinit->qi_minfo->mi_hiwat; 2503 rq->q_lowat = rinit->qi_minfo->mi_lowat; 2504 rq->q_minpsz = rinit->qi_minfo->mi_minpsz; 2505 rq->q_maxpsz = rinit->qi_minfo->mi_maxpsz; 2506 wq->q_qinfo = winit; 2507 wq->q_hiwat = winit->qi_minfo->mi_hiwat; 2508 wq->q_lowat = winit->qi_minfo->mi_lowat; 2509 wq->q_minpsz = winit->qi_minfo->mi_minpsz; 2510 wq->q_maxpsz = winit->qi_minfo->mi_maxpsz; 2511 2512 /* Remove old syncqs */ 2513 sq = rq->q_syncq; 2514 outer = sq->sq_outer; 2515 if (outer != NULL) { 2516 ASSERT(wq->q_syncq->sq_outer == outer); 2517 outer_remove(outer, rq->q_syncq); 2518 if (wq->q_syncq != rq->q_syncq) 2519 outer_remove(outer, wq->q_syncq); 2520 } 2521 ASSERT(sq->sq_outer == NULL); 2522 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 2523 2524 if (sq != SQ(rq)) { 2525 if (!(rq->q_flag & QPERMOD)) 2526 free_syncq(sq); 2527 if (wq->q_syncq == rq->q_syncq) 2528 wq->q_syncq = NULL; 2529 rq->q_syncq = NULL; 2530 } 2531 if (wq->q_syncq != NULL && wq->q_syncq != sq && 2532 wq->q_syncq != SQ(rq)) { 2533 free_syncq(wq->q_syncq); 2534 wq->q_syncq = NULL; 2535 } 2536 ASSERT(rq->q_syncq == NULL || (rq->q_syncq->sq_head == NULL && 2537 rq->q_syncq->sq_tail == NULL)); 2538 ASSERT(wq->q_syncq == NULL || (wq->q_syncq->sq_head == NULL && 2539 wq->q_syncq->sq_tail == NULL)); 2540 2541 if (!(rq->q_flag & QPERMOD) && 2542 rq->q_syncq != NULL && rq->q_syncq->sq_ciputctrl != NULL) { 2543 ASSERT(rq->q_syncq->sq_nciputctrl == n_ciputctrl - 1); 2544 SUMCHECK_CIPUTCTRL_COUNTS(rq->q_syncq->sq_ciputctrl, 2545 rq->q_syncq->sq_nciputctrl, 0); 2546 ASSERT(ciputctrl_cache != NULL); 2547 kmem_cache_free(ciputctrl_cache, rq->q_syncq->sq_ciputctrl); 2548 rq->q_syncq->sq_ciputctrl = NULL; 2549 rq->q_syncq->sq_nciputctrl = 0; 2550 } 2551 2552 if (!(wq->q_flag & QPERMOD) && 2553 wq->q_syncq != NULL && wq->q_syncq->sq_ciputctrl != NULL) { 2554 ASSERT(wq->q_syncq->sq_nciputctrl == n_ciputctrl - 1); 2555 SUMCHECK_CIPUTCTRL_COUNTS(wq->q_syncq->sq_ciputctrl, 2556 wq->q_syncq->sq_nciputctrl, 0); 2557 ASSERT(ciputctrl_cache != NULL); 2558 kmem_cache_free(ciputctrl_cache, wq->q_syncq->sq_ciputctrl); 2559 wq->q_syncq->sq_ciputctrl = NULL; 2560 wq->q_syncq->sq_nciputctrl = 0; 2561 } 2562 2563 sq = SQ(rq); 2564 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL); 2565 ASSERT(sq->sq_outer == NULL); 2566 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 2567 2568 /* 2569 * Create syncqs based on qflag and sqtype. Set the SQ_TYPES_IN_FLAGS 2570 * bits in sq_flag based on the sqtype. 2571 */ 2572 ASSERT((sq->sq_flags & ~SQ_TYPES_IN_FLAGS) == 0); 2573 2574 rq->q_syncq = wq->q_syncq = sq; 2575 sq->sq_type = sqtype; 2576 sq->sq_flags = (sqtype & SQ_TYPES_IN_FLAGS); 2577 2578 /* 2579 * We are making sq_svcflags zero, 2580 * resetting SQ_DISABLED in case it was set by 2581 * wait_svc() in the munlink path. 2582 * 2583 */ 2584 ASSERT((sq->sq_svcflags & SQ_SERVICE) == 0); 2585 sq->sq_svcflags = 0; 2586 2587 /* 2588 * We need to acquire the lock here for the mlink and munlink case, 2589 * where canputnext, backenable, etc can access the q_flag. 2590 */ 2591 if (lock_needed) { 2592 mutex_enter(QLOCK(rq)); 2593 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2594 mutex_exit(QLOCK(rq)); 2595 mutex_enter(QLOCK(wq)); 2596 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2597 mutex_exit(QLOCK(wq)); 2598 } else { 2599 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2600 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2601 } 2602 2603 if (qflag & QPERQ) { 2604 /* Allocate a separate syncq for the write side */ 2605 sq = new_syncq(); 2606 sq->sq_type = rq->q_syncq->sq_type; 2607 sq->sq_flags = rq->q_syncq->sq_flags; 2608 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 2609 sq->sq_oprev == NULL); 2610 wq->q_syncq = sq; 2611 } 2612 if (qflag & QPERMOD) { 2613 sq = dmp->dm_sq; 2614 2615 /* 2616 * Assert that we do have an inner perimeter syncq and that it 2617 * does not have an outer perimeter associated with it. 2618 */ 2619 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 2620 sq->sq_oprev == NULL); 2621 rq->q_syncq = wq->q_syncq = sq; 2622 } 2623 if (qflag & QMTOUTPERIM) { 2624 outer = dmp->dm_sq; 2625 2626 ASSERT(outer->sq_outer == NULL); 2627 outer_insert(outer, rq->q_syncq); 2628 if (wq->q_syncq != rq->q_syncq) 2629 outer_insert(outer, wq->q_syncq); 2630 } 2631 ASSERT((rq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) == 2632 (rq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS)); 2633 ASSERT((wq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) == 2634 (wq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS)); 2635 ASSERT((rq->q_flag & QMT_TYPEMASK) == (qflag & QMT_TYPEMASK)); 2636 2637 /* 2638 * Initialize struio() types. 2639 */ 2640 rq->q_struiot = 2641 (rq->q_flag & QSYNCSTR) ? rinit->qi_struiot : STRUIOT_NONE; 2642 wq->q_struiot = 2643 (wq->q_flag & QSYNCSTR) ? winit->qi_struiot : STRUIOT_NONE; 2644 } 2645 2646 perdm_t * 2647 hold_dm(struct streamtab *str, uint32_t qflag, uint32_t sqtype) 2648 { 2649 syncq_t *sq; 2650 perdm_t **pp; 2651 perdm_t *p; 2652 perdm_t *dmp; 2653 2654 ASSERT(str != NULL); 2655 ASSERT(qflag & (QPERMOD | QMTOUTPERIM)); 2656 2657 rw_enter(&perdm_rwlock, RW_READER); 2658 for (p = perdm_list; p != NULL; p = p->dm_next) { 2659 if (p->dm_str == str) { /* found one */ 2660 atomic_add_32(&(p->dm_ref), 1); 2661 rw_exit(&perdm_rwlock); 2662 return (p); 2663 } 2664 } 2665 rw_exit(&perdm_rwlock); 2666 2667 sq = new_syncq(); 2668 if (qflag & QPERMOD) { 2669 sq->sq_type = sqtype | SQ_PERMOD; 2670 sq->sq_flags = sqtype & SQ_TYPES_IN_FLAGS; 2671 } else { 2672 ASSERT(qflag & QMTOUTPERIM); 2673 sq->sq_onext = sq->sq_oprev = sq; 2674 } 2675 2676 dmp = kmem_alloc(sizeof (perdm_t), KM_SLEEP); 2677 dmp->dm_sq = sq; 2678 dmp->dm_str = str; 2679 dmp->dm_ref = 1; 2680 dmp->dm_next = NULL; 2681 2682 rw_enter(&perdm_rwlock, RW_WRITER); 2683 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) { 2684 if (p->dm_str == str) { /* already present */ 2685 p->dm_ref++; 2686 rw_exit(&perdm_rwlock); 2687 free_syncq(sq); 2688 kmem_free(dmp, sizeof (perdm_t)); 2689 return (p); 2690 } 2691 } 2692 2693 *pp = dmp; 2694 rw_exit(&perdm_rwlock); 2695 return (dmp); 2696 } 2697 2698 void 2699 rele_dm(perdm_t *dmp) 2700 { 2701 perdm_t **pp; 2702 perdm_t *p; 2703 2704 rw_enter(&perdm_rwlock, RW_WRITER); 2705 ASSERT(dmp->dm_ref > 0); 2706 2707 if (--dmp->dm_ref > 0) { 2708 rw_exit(&perdm_rwlock); 2709 return; 2710 } 2711 2712 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) 2713 if (p == dmp) 2714 break; 2715 ASSERT(p == dmp); 2716 *pp = p->dm_next; 2717 rw_exit(&perdm_rwlock); 2718 2719 /* 2720 * Wait for any background processing that relies on the 2721 * syncq to complete before it is freed. 2722 */ 2723 wait_sq_svc(p->dm_sq); 2724 free_syncq(p->dm_sq); 2725 kmem_free(p, sizeof (perdm_t)); 2726 } 2727 2728 /* 2729 * Make a protocol message given control and data buffers. 2730 * n.b., this can block; be careful of what locks you hold when calling it. 2731 * 2732 * If sd_maxblk is less than *iosize this routine can fail part way through 2733 * (due to an allocation failure). In this case on return *iosize will contain 2734 * the amount that was consumed. Otherwise *iosize will not be modified 2735 * i.e. it will contain the amount that was consumed. 2736 */ 2737 int 2738 strmakemsg( 2739 struct strbuf *mctl, 2740 ssize_t *iosize, 2741 struct uio *uiop, 2742 stdata_t *stp, 2743 int32_t flag, 2744 mblk_t **mpp) 2745 { 2746 mblk_t *mpctl = NULL; 2747 mblk_t *mpdata = NULL; 2748 int error; 2749 2750 ASSERT(uiop != NULL); 2751 2752 *mpp = NULL; 2753 /* Create control part, if any */ 2754 if ((mctl != NULL) && (mctl->len >= 0)) { 2755 error = strmakectl(mctl, flag, uiop->uio_fmode, &mpctl); 2756 if (error) 2757 return (error); 2758 } 2759 /* Create data part, if any */ 2760 if (*iosize >= 0) { 2761 error = strmakedata(iosize, uiop, stp, flag, &mpdata); 2762 if (error) { 2763 freemsg(mpctl); 2764 return (error); 2765 } 2766 } 2767 if (mpctl != NULL) { 2768 if (mpdata != NULL) 2769 linkb(mpctl, mpdata); 2770 *mpp = mpctl; 2771 } else { 2772 *mpp = mpdata; 2773 } 2774 return (0); 2775 } 2776 2777 /* 2778 * Make the control part of a protocol message given a control buffer. 2779 * n.b., this can block; be careful of what locks you hold when calling it. 2780 */ 2781 int 2782 strmakectl( 2783 struct strbuf *mctl, 2784 int32_t flag, 2785 int32_t fflag, 2786 mblk_t **mpp) 2787 { 2788 mblk_t *bp = NULL; 2789 unsigned char msgtype; 2790 int error = 0; 2791 cred_t *cr = CRED(); 2792 2793 /* We do not support interrupt threads using the stream head to send */ 2794 ASSERT(cr != NULL); 2795 2796 *mpp = NULL; 2797 /* 2798 * Create control part of message, if any. 2799 */ 2800 if ((mctl != NULL) && (mctl->len >= 0)) { 2801 caddr_t base; 2802 int ctlcount; 2803 int allocsz; 2804 2805 if (flag & RS_HIPRI) 2806 msgtype = M_PCPROTO; 2807 else 2808 msgtype = M_PROTO; 2809 2810 ctlcount = mctl->len; 2811 base = mctl->buf; 2812 2813 /* 2814 * Give modules a better chance to reuse M_PROTO/M_PCPROTO 2815 * blocks by increasing the size to something more usable. 2816 */ 2817 allocsz = MAX(ctlcount, 64); 2818 2819 /* 2820 * Range checking has already been done; simply try 2821 * to allocate a message block for the ctl part. 2822 */ 2823 while ((bp = allocb_cred(allocsz, cr, 2824 curproc->p_pid)) == NULL) { 2825 if (fflag & (FNDELAY|FNONBLOCK)) 2826 return (EAGAIN); 2827 if (error = strwaitbuf(allocsz, BPRI_MED)) 2828 return (error); 2829 } 2830 2831 bp->b_datap->db_type = msgtype; 2832 if (copyin(base, bp->b_wptr, ctlcount)) { 2833 freeb(bp); 2834 return (EFAULT); 2835 } 2836 bp->b_wptr += ctlcount; 2837 } 2838 *mpp = bp; 2839 return (0); 2840 } 2841 2842 /* 2843 * Make a protocol message given data buffers. 2844 * n.b., this can block; be careful of what locks you hold when calling it. 2845 * 2846 * If sd_maxblk is less than *iosize this routine can fail part way through 2847 * (due to an allocation failure). In this case on return *iosize will contain 2848 * the amount that was consumed. Otherwise *iosize will not be modified 2849 * i.e. it will contain the amount that was consumed. 2850 */ 2851 int 2852 strmakedata( 2853 ssize_t *iosize, 2854 struct uio *uiop, 2855 stdata_t *stp, 2856 int32_t flag, 2857 mblk_t **mpp) 2858 { 2859 mblk_t *mp = NULL; 2860 mblk_t *bp; 2861 int wroff = (int)stp->sd_wroff; 2862 int tail_len = (int)stp->sd_tail; 2863 int extra = wroff + tail_len; 2864 int error = 0; 2865 ssize_t maxblk; 2866 ssize_t count = *iosize; 2867 cred_t *cr; 2868 2869 *mpp = NULL; 2870 if (count < 0) 2871 return (0); 2872 2873 /* We do not support interrupt threads using the stream head to send */ 2874 cr = CRED(); 2875 ASSERT(cr != NULL); 2876 2877 maxblk = stp->sd_maxblk; 2878 if (maxblk == INFPSZ) 2879 maxblk = count; 2880 2881 /* 2882 * Create data part of message, if any. 2883 */ 2884 do { 2885 ssize_t size; 2886 dblk_t *dp; 2887 2888 ASSERT(uiop); 2889 2890 size = MIN(count, maxblk); 2891 2892 while ((bp = allocb_cred(size + extra, cr, 2893 curproc->p_pid)) == NULL) { 2894 error = EAGAIN; 2895 if ((uiop->uio_fmode & (FNDELAY|FNONBLOCK)) || 2896 (error = strwaitbuf(size + extra, BPRI_MED)) != 0) { 2897 if (count == *iosize) { 2898 freemsg(mp); 2899 return (error); 2900 } else { 2901 *iosize -= count; 2902 *mpp = mp; 2903 return (0); 2904 } 2905 } 2906 } 2907 dp = bp->b_datap; 2908 dp->db_cpid = curproc->p_pid; 2909 ASSERT(wroff <= dp->db_lim - bp->b_wptr); 2910 bp->b_wptr = bp->b_rptr = bp->b_rptr + wroff; 2911 2912 if (flag & STRUIO_POSTPONE) { 2913 /* 2914 * Setup the stream uio portion of the 2915 * dblk for subsequent use by struioget(). 2916 */ 2917 dp->db_struioflag = STRUIO_SPEC; 2918 dp->db_cksumstart = 0; 2919 dp->db_cksumstuff = 0; 2920 dp->db_cksumend = size; 2921 *(long long *)dp->db_struioun.data = 0ll; 2922 bp->b_wptr += size; 2923 } else { 2924 if (stp->sd_copyflag & STRCOPYCACHED) 2925 uiop->uio_extflg |= UIO_COPY_CACHED; 2926 2927 if (size != 0) { 2928 error = uiomove(bp->b_wptr, size, UIO_WRITE, 2929 uiop); 2930 if (error != 0) { 2931 freeb(bp); 2932 freemsg(mp); 2933 return (error); 2934 } 2935 } 2936 bp->b_wptr += size; 2937 2938 if (stp->sd_wputdatafunc != NULL) { 2939 mblk_t *newbp; 2940 2941 newbp = (stp->sd_wputdatafunc)(stp->sd_vnode, 2942 bp, NULL, NULL, NULL, NULL); 2943 if (newbp == NULL) { 2944 freeb(bp); 2945 freemsg(mp); 2946 return (ECOMM); 2947 } 2948 bp = newbp; 2949 } 2950 } 2951 2952 count -= size; 2953 2954 if (mp == NULL) 2955 mp = bp; 2956 else 2957 linkb(mp, bp); 2958 } while (count > 0); 2959 2960 *mpp = mp; 2961 return (0); 2962 } 2963 2964 /* 2965 * Wait for a buffer to become available. Return non-zero errno 2966 * if not able to wait, 0 if buffer is probably there. 2967 */ 2968 int 2969 strwaitbuf(size_t size, int pri) 2970 { 2971 bufcall_id_t id; 2972 2973 mutex_enter(&bcall_monitor); 2974 if ((id = bufcall(size, pri, (void (*)(void *))cv_broadcast, 2975 &ttoproc(curthread)->p_flag_cv)) == 0) { 2976 mutex_exit(&bcall_monitor); 2977 return (ENOSR); 2978 } 2979 if (!cv_wait_sig(&(ttoproc(curthread)->p_flag_cv), &bcall_monitor)) { 2980 unbufcall(id); 2981 mutex_exit(&bcall_monitor); 2982 return (EINTR); 2983 } 2984 unbufcall(id); 2985 mutex_exit(&bcall_monitor); 2986 return (0); 2987 } 2988 2989 /* 2990 * This function waits for a read or write event to happen on a stream. 2991 * fmode can specify FNDELAY and/or FNONBLOCK. 2992 * The timeout is in ms with -1 meaning infinite. 2993 * The flag values work as follows: 2994 * READWAIT Check for read side errors, send M_READ 2995 * GETWAIT Check for read side errors, no M_READ 2996 * WRITEWAIT Check for write side errors. 2997 * NOINTR Do not return error if nonblocking or timeout. 2998 * STR_NOERROR Ignore all errors except STPLEX. 2999 * STR_NOSIG Ignore/hold signals during the duration of the call. 3000 * STR_PEEK Pass through the strgeterr(). 3001 */ 3002 int 3003 strwaitq(stdata_t *stp, int flag, ssize_t count, int fmode, clock_t timout, 3004 int *done) 3005 { 3006 int slpflg, errs; 3007 int error; 3008 kcondvar_t *sleepon; 3009 mblk_t *mp; 3010 ssize_t *rd_count; 3011 clock_t rval; 3012 3013 ASSERT(MUTEX_HELD(&stp->sd_lock)); 3014 if ((flag & READWAIT) || (flag & GETWAIT)) { 3015 slpflg = RSLEEP; 3016 sleepon = &_RD(stp->sd_wrq)->q_wait; 3017 errs = STRDERR|STPLEX; 3018 } else { 3019 slpflg = WSLEEP; 3020 sleepon = &stp->sd_wrq->q_wait; 3021 errs = STWRERR|STRHUP|STPLEX; 3022 } 3023 if (flag & STR_NOERROR) 3024 errs = STPLEX; 3025 3026 if (stp->sd_wakeq & slpflg) { 3027 /* 3028 * A strwakeq() is pending, no need to sleep. 3029 */ 3030 stp->sd_wakeq &= ~slpflg; 3031 *done = 0; 3032 return (0); 3033 } 3034 3035 if (stp->sd_flag & errs) { 3036 /* 3037 * Check for errors before going to sleep since the 3038 * caller might not have checked this while holding 3039 * sd_lock. 3040 */ 3041 error = strgeterr(stp, errs, (flag & STR_PEEK)); 3042 if (error != 0) { 3043 *done = 1; 3044 return (error); 3045 } 3046 } 3047 3048 /* 3049 * If any module downstream has requested read notification 3050 * by setting SNDMREAD flag using M_SETOPTS, send a message 3051 * down stream. 3052 */ 3053 if ((flag & READWAIT) && (stp->sd_flag & SNDMREAD)) { 3054 mutex_exit(&stp->sd_lock); 3055 if (!(mp = allocb_wait(sizeof (ssize_t), BPRI_MED, 3056 (flag & STR_NOSIG), &error))) { 3057 mutex_enter(&stp->sd_lock); 3058 *done = 1; 3059 return (error); 3060 } 3061 mp->b_datap->db_type = M_READ; 3062 rd_count = (ssize_t *)mp->b_wptr; 3063 *rd_count = count; 3064 mp->b_wptr += sizeof (ssize_t); 3065 /* 3066 * Send the number of bytes requested by the 3067 * read as the argument to M_READ. 3068 */ 3069 stream_willservice(stp); 3070 putnext(stp->sd_wrq, mp); 3071 stream_runservice(stp); 3072 mutex_enter(&stp->sd_lock); 3073 3074 /* 3075 * If any data arrived due to inline processing 3076 * of putnext(), don't sleep. 3077 */ 3078 if (_RD(stp->sd_wrq)->q_first != NULL) { 3079 *done = 0; 3080 return (0); 3081 } 3082 } 3083 3084 if (fmode & (FNDELAY|FNONBLOCK)) { 3085 if (!(flag & NOINTR)) 3086 error = EAGAIN; 3087 else 3088 error = 0; 3089 *done = 1; 3090 return (error); 3091 } 3092 3093 stp->sd_flag |= slpflg; 3094 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAIT2, 3095 "strwaitq sleeps (2):%p, %X, %lX, %X, %p", 3096 stp, flag, count, fmode, done); 3097 3098 rval = str_cv_wait(sleepon, &stp->sd_lock, timout, flag & STR_NOSIG); 3099 if (rval > 0) { 3100 /* EMPTY */ 3101 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAKE2, 3102 "strwaitq awakes(2):%X, %X, %X, %X, %X", 3103 stp, flag, count, fmode, done); 3104 } else if (rval == 0) { 3105 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_INTR2, 3106 "strwaitq interrupt #2:%p, %X, %lX, %X, %p", 3107 stp, flag, count, fmode, done); 3108 stp->sd_flag &= ~slpflg; 3109 cv_broadcast(sleepon); 3110 if (!(flag & NOINTR)) 3111 error = EINTR; 3112 else 3113 error = 0; 3114 *done = 1; 3115 return (error); 3116 } else { 3117 /* timeout */ 3118 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_TIME, 3119 "strwaitq timeout:%p, %X, %lX, %X, %p", 3120 stp, flag, count, fmode, done); 3121 *done = 1; 3122 if (!(flag & NOINTR)) 3123 return (ETIME); 3124 else 3125 return (0); 3126 } 3127 /* 3128 * If the caller implements delayed errors (i.e. queued after data) 3129 * we can not check for errors here since data as well as an 3130 * error might have arrived at the stream head. We return to 3131 * have the caller check the read queue before checking for errors. 3132 */ 3133 if ((stp->sd_flag & errs) && !(flag & STR_DELAYERR)) { 3134 error = strgeterr(stp, errs, (flag & STR_PEEK)); 3135 if (error != 0) { 3136 *done = 1; 3137 return (error); 3138 } 3139 } 3140 *done = 0; 3141 return (0); 3142 } 3143 3144 /* 3145 * Perform job control discipline access checks. 3146 * Return 0 for success and the errno for failure. 3147 */ 3148 3149 #define cantsend(p, t, sig) \ 3150 (sigismember(&(p)->p_ignore, sig) || signal_is_blocked((t), sig)) 3151 3152 int 3153 straccess(struct stdata *stp, enum jcaccess mode) 3154 { 3155 extern kcondvar_t lbolt_cv; /* XXX: should be in a header file */ 3156 kthread_t *t = curthread; 3157 proc_t *p = ttoproc(t); 3158 sess_t *sp; 3159 3160 ASSERT(mutex_owned(&stp->sd_lock)); 3161 3162 if (stp->sd_sidp == NULL || stp->sd_vnode->v_type == VFIFO) 3163 return (0); 3164 3165 mutex_enter(&p->p_lock); /* protects p_pgidp */ 3166 3167 for (;;) { 3168 mutex_enter(&p->p_splock); /* protects p->p_sessp */ 3169 sp = p->p_sessp; 3170 mutex_enter(&sp->s_lock); /* protects sp->* */ 3171 3172 /* 3173 * If this is not the calling process's controlling terminal 3174 * or if the calling process is already in the foreground 3175 * then allow access. 3176 */ 3177 if (sp->s_dev != stp->sd_vnode->v_rdev || 3178 p->p_pgidp == stp->sd_pgidp) { 3179 mutex_exit(&sp->s_lock); 3180 mutex_exit(&p->p_splock); 3181 mutex_exit(&p->p_lock); 3182 return (0); 3183 } 3184 3185 /* 3186 * Check to see if controlling terminal has been deallocated. 3187 */ 3188 if (sp->s_vp == NULL) { 3189 if (!cantsend(p, t, SIGHUP)) 3190 sigtoproc(p, t, SIGHUP); 3191 mutex_exit(&sp->s_lock); 3192 mutex_exit(&p->p_splock); 3193 mutex_exit(&p->p_lock); 3194 return (EIO); 3195 } 3196 3197 mutex_exit(&sp->s_lock); 3198 mutex_exit(&p->p_splock); 3199 3200 if (mode == JCGETP) { 3201 mutex_exit(&p->p_lock); 3202 return (0); 3203 } 3204 3205 if (mode == JCREAD) { 3206 if (p->p_detached || cantsend(p, t, SIGTTIN)) { 3207 mutex_exit(&p->p_lock); 3208 return (EIO); 3209 } 3210 mutex_exit(&p->p_lock); 3211 mutex_exit(&stp->sd_lock); 3212 pgsignal(p->p_pgidp, SIGTTIN); 3213 mutex_enter(&stp->sd_lock); 3214 mutex_enter(&p->p_lock); 3215 } else { /* mode == JCWRITE or JCSETP */ 3216 if ((mode == JCWRITE && !(stp->sd_flag & STRTOSTOP)) || 3217 cantsend(p, t, SIGTTOU)) { 3218 mutex_exit(&p->p_lock); 3219 return (0); 3220 } 3221 if (p->p_detached) { 3222 mutex_exit(&p->p_lock); 3223 return (EIO); 3224 } 3225 mutex_exit(&p->p_lock); 3226 mutex_exit(&stp->sd_lock); 3227 pgsignal(p->p_pgidp, SIGTTOU); 3228 mutex_enter(&stp->sd_lock); 3229 mutex_enter(&p->p_lock); 3230 } 3231 3232 /* 3233 * We call cv_wait_sig_swap() to cause the appropriate 3234 * action for the jobcontrol signal to take place. 3235 * If the signal is being caught, we will take the 3236 * EINTR error return. Otherwise, the default action 3237 * of causing the process to stop will take place. 3238 * In this case, we rely on the periodic cv_broadcast() on 3239 * &lbolt_cv to wake us up to loop around and test again. 3240 * We can't get here if the signal is ignored or 3241 * if the current thread is blocking the signal. 3242 */ 3243 mutex_exit(&stp->sd_lock); 3244 if (!cv_wait_sig_swap(&lbolt_cv, &p->p_lock)) { 3245 mutex_exit(&p->p_lock); 3246 mutex_enter(&stp->sd_lock); 3247 return (EINTR); 3248 } 3249 mutex_exit(&p->p_lock); 3250 mutex_enter(&stp->sd_lock); 3251 mutex_enter(&p->p_lock); 3252 } 3253 } 3254 3255 /* 3256 * Return size of message of block type (bp->b_datap->db_type) 3257 */ 3258 size_t 3259 xmsgsize(mblk_t *bp) 3260 { 3261 unsigned char type; 3262 size_t count = 0; 3263 3264 type = bp->b_datap->db_type; 3265 3266 for (; bp; bp = bp->b_cont) { 3267 if (type != bp->b_datap->db_type) 3268 break; 3269 ASSERT(bp->b_wptr >= bp->b_rptr); 3270 count += bp->b_wptr - bp->b_rptr; 3271 } 3272 return (count); 3273 } 3274 3275 /* 3276 * Allocate a stream head. 3277 */ 3278 struct stdata * 3279 shalloc(queue_t *qp) 3280 { 3281 stdata_t *stp; 3282 3283 stp = kmem_cache_alloc(stream_head_cache, KM_SLEEP); 3284 3285 stp->sd_wrq = _WR(qp); 3286 stp->sd_strtab = NULL; 3287 stp->sd_iocid = 0; 3288 stp->sd_mate = NULL; 3289 stp->sd_freezer = NULL; 3290 stp->sd_refcnt = 0; 3291 stp->sd_wakeq = 0; 3292 stp->sd_anchor = 0; 3293 stp->sd_struiowrq = NULL; 3294 stp->sd_struiordq = NULL; 3295 stp->sd_struiodnak = 0; 3296 stp->sd_struionak = NULL; 3297 stp->sd_t_audit_data = NULL; 3298 stp->sd_rput_opt = 0; 3299 stp->sd_wput_opt = 0; 3300 stp->sd_read_opt = 0; 3301 stp->sd_rprotofunc = strrput_proto; 3302 stp->sd_rmiscfunc = strrput_misc; 3303 stp->sd_rderrfunc = stp->sd_wrerrfunc = NULL; 3304 stp->sd_rputdatafunc = stp->sd_wputdatafunc = NULL; 3305 stp->sd_ciputctrl = NULL; 3306 stp->sd_nciputctrl = 0; 3307 stp->sd_qhead = NULL; 3308 stp->sd_qtail = NULL; 3309 stp->sd_servid = NULL; 3310 stp->sd_nqueues = 0; 3311 stp->sd_svcflags = 0; 3312 stp->sd_copyflag = 0; 3313 3314 return (stp); 3315 } 3316 3317 /* 3318 * Free a stream head. 3319 */ 3320 void 3321 shfree(stdata_t *stp) 3322 { 3323 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock)); 3324 3325 stp->sd_wrq = NULL; 3326 3327 mutex_enter(&stp->sd_qlock); 3328 while (stp->sd_svcflags & STRS_SCHEDULED) { 3329 STRSTAT(strwaits); 3330 cv_wait(&stp->sd_qcv, &stp->sd_qlock); 3331 } 3332 mutex_exit(&stp->sd_qlock); 3333 3334 if (stp->sd_ciputctrl != NULL) { 3335 ASSERT(stp->sd_nciputctrl == n_ciputctrl - 1); 3336 SUMCHECK_CIPUTCTRL_COUNTS(stp->sd_ciputctrl, 3337 stp->sd_nciputctrl, 0); 3338 ASSERT(ciputctrl_cache != NULL); 3339 kmem_cache_free(ciputctrl_cache, stp->sd_ciputctrl); 3340 stp->sd_ciputctrl = NULL; 3341 stp->sd_nciputctrl = 0; 3342 } 3343 ASSERT(stp->sd_qhead == NULL); 3344 ASSERT(stp->sd_qtail == NULL); 3345 ASSERT(stp->sd_nqueues == 0); 3346 kmem_cache_free(stream_head_cache, stp); 3347 } 3348 3349 /* 3350 * Allocate a pair of queues and a syncq for the pair 3351 */ 3352 queue_t * 3353 allocq(void) 3354 { 3355 queinfo_t *qip; 3356 queue_t *qp, *wqp; 3357 syncq_t *sq; 3358 3359 qip = kmem_cache_alloc(queue_cache, KM_SLEEP); 3360 3361 qp = &qip->qu_rqueue; 3362 wqp = &qip->qu_wqueue; 3363 sq = &qip->qu_syncq; 3364 3365 qp->q_last = NULL; 3366 qp->q_next = NULL; 3367 qp->q_ptr = NULL; 3368 qp->q_flag = QUSE | QREADR; 3369 qp->q_bandp = NULL; 3370 qp->q_stream = NULL; 3371 qp->q_syncq = sq; 3372 qp->q_nband = 0; 3373 qp->q_nfsrv = NULL; 3374 qp->q_draining = 0; 3375 qp->q_syncqmsgs = 0; 3376 qp->q_spri = 0; 3377 qp->q_qtstamp = 0; 3378 qp->q_sqtstamp = 0; 3379 qp->q_fp = NULL; 3380 3381 wqp->q_last = NULL; 3382 wqp->q_next = NULL; 3383 wqp->q_ptr = NULL; 3384 wqp->q_flag = QUSE; 3385 wqp->q_bandp = NULL; 3386 wqp->q_stream = NULL; 3387 wqp->q_syncq = sq; 3388 wqp->q_nband = 0; 3389 wqp->q_nfsrv = NULL; 3390 wqp->q_draining = 0; 3391 wqp->q_syncqmsgs = 0; 3392 wqp->q_qtstamp = 0; 3393 wqp->q_sqtstamp = 0; 3394 wqp->q_spri = 0; 3395 3396 sq->sq_count = 0; 3397 sq->sq_rmqcount = 0; 3398 sq->sq_flags = 0; 3399 sq->sq_type = 0; 3400 sq->sq_callbflags = 0; 3401 sq->sq_cancelid = 0; 3402 sq->sq_ciputctrl = NULL; 3403 sq->sq_nciputctrl = 0; 3404 sq->sq_needexcl = 0; 3405 sq->sq_svcflags = 0; 3406 3407 return (qp); 3408 } 3409 3410 /* 3411 * Free a pair of queues and the "attached" syncq. 3412 * Discard any messages left on the syncq(s), remove the syncq(s) from the 3413 * outer perimeter, and free the syncq(s) if they are not the "attached" syncq. 3414 */ 3415 void 3416 freeq(queue_t *qp) 3417 { 3418 qband_t *qbp, *nqbp; 3419 syncq_t *sq, *outer; 3420 queue_t *wqp = _WR(qp); 3421 3422 ASSERT(qp->q_flag & QREADR); 3423 3424 /* 3425 * If a previously dispatched taskq job is scheduled to run 3426 * sync_service() or a service routine is scheduled for the 3427 * queues about to be freed, wait here until all service is 3428 * done on the queue and all associated queues and syncqs. 3429 */ 3430 wait_svc(qp); 3431 3432 (void) flush_syncq(qp->q_syncq, qp); 3433 (void) flush_syncq(wqp->q_syncq, wqp); 3434 ASSERT(qp->q_syncqmsgs == 0 && wqp->q_syncqmsgs == 0); 3435 3436 /* 3437 * Flush the queues before q_next is set to NULL This is needed 3438 * in order to backenable any downstream queue before we go away. 3439 * Note: we are already removed from the stream so that the 3440 * backenabling will not cause any messages to be delivered to our 3441 * put procedures. 3442 */ 3443 flushq(qp, FLUSHALL); 3444 flushq(wqp, FLUSHALL); 3445 3446 /* Tidy up - removeq only does a half-remove from stream */ 3447 qp->q_next = wqp->q_next = NULL; 3448 ASSERT(!(qp->q_flag & QENAB)); 3449 ASSERT(!(wqp->q_flag & QENAB)); 3450 3451 outer = qp->q_syncq->sq_outer; 3452 if (outer != NULL) { 3453 outer_remove(outer, qp->q_syncq); 3454 if (wqp->q_syncq != qp->q_syncq) 3455 outer_remove(outer, wqp->q_syncq); 3456 } 3457 /* 3458 * Free any syncqs that are outside what allocq returned. 3459 */ 3460 if (qp->q_syncq != SQ(qp) && !(qp->q_flag & QPERMOD)) 3461 free_syncq(qp->q_syncq); 3462 if (qp->q_syncq != wqp->q_syncq && wqp->q_syncq != SQ(qp)) 3463 free_syncq(wqp->q_syncq); 3464 3465 ASSERT((qp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0); 3466 ASSERT((wqp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0); 3467 ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); 3468 ASSERT(MUTEX_NOT_HELD(QLOCK(wqp))); 3469 sq = SQ(qp); 3470 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 3471 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL); 3472 ASSERT(sq->sq_outer == NULL); 3473 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 3474 ASSERT(sq->sq_callbpend == NULL); 3475 ASSERT(sq->sq_needexcl == 0); 3476 3477 if (sq->sq_ciputctrl != NULL) { 3478 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1); 3479 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl, 3480 sq->sq_nciputctrl, 0); 3481 ASSERT(ciputctrl_cache != NULL); 3482 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl); 3483 sq->sq_ciputctrl = NULL; 3484 sq->sq_nciputctrl = 0; 3485 } 3486 3487 ASSERT(qp->q_first == NULL && wqp->q_first == NULL); 3488 ASSERT(qp->q_count == 0 && wqp->q_count == 0); 3489 ASSERT(qp->q_mblkcnt == 0 && wqp->q_mblkcnt == 0); 3490 3491 qp->q_flag &= ~QUSE; 3492 wqp->q_flag &= ~QUSE; 3493 3494 /* NOTE: Uncomment the assert below once bugid 1159635 is fixed. */ 3495 /* ASSERT((qp->q_flag & QWANTW) == 0 && (wqp->q_flag & QWANTW) == 0); */ 3496 3497 qbp = qp->q_bandp; 3498 while (qbp) { 3499 nqbp = qbp->qb_next; 3500 freeband(qbp); 3501 qbp = nqbp; 3502 } 3503 qbp = wqp->q_bandp; 3504 while (qbp) { 3505 nqbp = qbp->qb_next; 3506 freeband(qbp); 3507 qbp = nqbp; 3508 } 3509 kmem_cache_free(queue_cache, qp); 3510 } 3511 3512 /* 3513 * Allocate a qband structure. 3514 */ 3515 qband_t * 3516 allocband(void) 3517 { 3518 qband_t *qbp; 3519 3520 qbp = kmem_cache_alloc(qband_cache, KM_NOSLEEP); 3521 if (qbp == NULL) 3522 return (NULL); 3523 3524 qbp->qb_next = NULL; 3525 qbp->qb_count = 0; 3526 qbp->qb_mblkcnt = 0; 3527 qbp->qb_first = NULL; 3528 qbp->qb_last = NULL; 3529 qbp->qb_flag = 0; 3530 3531 return (qbp); 3532 } 3533 3534 /* 3535 * Free a qband structure. 3536 */ 3537 void 3538 freeband(qband_t *qbp) 3539 { 3540 kmem_cache_free(qband_cache, qbp); 3541 } 3542 3543 /* 3544 * Just like putnextctl(9F), except that allocb_wait() is used. 3545 * 3546 * Consolidation Private, and of course only callable from the stream head or 3547 * routines that may block. 3548 */ 3549 int 3550 putnextctl_wait(queue_t *q, int type) 3551 { 3552 mblk_t *bp; 3553 int error; 3554 3555 if ((datamsg(type) && (type != M_DELAY)) || 3556 (bp = allocb_wait(0, BPRI_HI, 0, &error)) == NULL) 3557 return (0); 3558 3559 bp->b_datap->db_type = (unsigned char)type; 3560 putnext(q, bp); 3561 return (1); 3562 } 3563 3564 /* 3565 * Run any possible bufcalls. 3566 */ 3567 void 3568 runbufcalls(void) 3569 { 3570 strbufcall_t *bcp; 3571 3572 mutex_enter(&bcall_monitor); 3573 mutex_enter(&strbcall_lock); 3574 3575 if (strbcalls.bc_head) { 3576 size_t count; 3577 int nevent; 3578 3579 /* 3580 * count how many events are on the list 3581 * now so we can check to avoid looping 3582 * in low memory situations 3583 */ 3584 nevent = 0; 3585 for (bcp = strbcalls.bc_head; bcp; bcp = bcp->bc_next) 3586 nevent++; 3587 3588 /* 3589 * get estimate of available memory from kmem_avail(). 3590 * awake all bufcall functions waiting for 3591 * memory whose request could be satisfied 3592 * by 'count' memory and let 'em fight for it. 3593 */ 3594 count = kmem_avail(); 3595 while ((bcp = strbcalls.bc_head) != NULL && nevent) { 3596 STRSTAT(bufcalls); 3597 --nevent; 3598 if (bcp->bc_size <= count) { 3599 bcp->bc_executor = curthread; 3600 mutex_exit(&strbcall_lock); 3601 (*bcp->bc_func)(bcp->bc_arg); 3602 mutex_enter(&strbcall_lock); 3603 bcp->bc_executor = NULL; 3604 cv_broadcast(&bcall_cv); 3605 strbcalls.bc_head = bcp->bc_next; 3606 kmem_free(bcp, sizeof (strbufcall_t)); 3607 } else { 3608 /* 3609 * too big, try again later - note 3610 * that nevent was decremented above 3611 * so we won't retry this one on this 3612 * iteration of the loop 3613 */ 3614 if (bcp->bc_next != NULL) { 3615 strbcalls.bc_head = bcp->bc_next; 3616 bcp->bc_next = NULL; 3617 strbcalls.bc_tail->bc_next = bcp; 3618 strbcalls.bc_tail = bcp; 3619 } 3620 } 3621 } 3622 if (strbcalls.bc_head == NULL) 3623 strbcalls.bc_tail = NULL; 3624 } 3625 3626 mutex_exit(&strbcall_lock); 3627 mutex_exit(&bcall_monitor); 3628 } 3629 3630 3631 /* 3632 * Actually run queue's service routine. 3633 */ 3634 static void 3635 runservice(queue_t *q) 3636 { 3637 qband_t *qbp; 3638 3639 ASSERT(q->q_qinfo->qi_srvp); 3640 again: 3641 entersq(q->q_syncq, SQ_SVC); 3642 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_START, 3643 "runservice starts:%p", q); 3644 3645 if (!(q->q_flag & QWCLOSE)) 3646 (*q->q_qinfo->qi_srvp)(q); 3647 3648 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_END, 3649 "runservice ends:(%p)", q); 3650 3651 leavesq(q->q_syncq, SQ_SVC); 3652 3653 mutex_enter(QLOCK(q)); 3654 if (q->q_flag & QENAB) { 3655 q->q_flag &= ~QENAB; 3656 mutex_exit(QLOCK(q)); 3657 goto again; 3658 } 3659 q->q_flag &= ~QINSERVICE; 3660 q->q_flag &= ~QBACK; 3661 for (qbp = q->q_bandp; qbp; qbp = qbp->qb_next) 3662 qbp->qb_flag &= ~QB_BACK; 3663 /* 3664 * Wakeup thread waiting for the service procedure 3665 * to be run (strclose and qdetach). 3666 */ 3667 cv_broadcast(&q->q_wait); 3668 3669 mutex_exit(QLOCK(q)); 3670 } 3671 3672 /* 3673 * Background processing of bufcalls. 3674 */ 3675 void 3676 streams_bufcall_service(void) 3677 { 3678 callb_cpr_t cprinfo; 3679 3680 CALLB_CPR_INIT(&cprinfo, &strbcall_lock, callb_generic_cpr, 3681 "streams_bufcall_service"); 3682 3683 mutex_enter(&strbcall_lock); 3684 3685 for (;;) { 3686 if (strbcalls.bc_head != NULL && kmem_avail() > 0) { 3687 mutex_exit(&strbcall_lock); 3688 runbufcalls(); 3689 mutex_enter(&strbcall_lock); 3690 } 3691 if (strbcalls.bc_head != NULL) { 3692 clock_t wt, tick; 3693 3694 STRSTAT(bcwaits); 3695 /* Wait for memory to become available */ 3696 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3697 tick = SEC_TO_TICK(60); 3698 time_to_wait(&wt, tick); 3699 (void) cv_timedwait(&memavail_cv, &strbcall_lock, wt); 3700 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock); 3701 } 3702 3703 /* Wait for new work to arrive */ 3704 if (strbcalls.bc_head == NULL) { 3705 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3706 cv_wait(&strbcall_cv, &strbcall_lock); 3707 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock); 3708 } 3709 } 3710 } 3711 3712 /* 3713 * Background processing of streams background tasks which failed 3714 * taskq_dispatch. 3715 */ 3716 static void 3717 streams_qbkgrnd_service(void) 3718 { 3719 callb_cpr_t cprinfo; 3720 queue_t *q; 3721 3722 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr, 3723 "streams_bkgrnd_service"); 3724 3725 mutex_enter(&service_queue); 3726 3727 for (;;) { 3728 /* 3729 * Wait for work to arrive. 3730 */ 3731 while ((freebs_list == NULL) && (qhead == NULL)) { 3732 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3733 cv_wait(&services_to_run, &service_queue); 3734 CALLB_CPR_SAFE_END(&cprinfo, &service_queue); 3735 } 3736 /* 3737 * Handle all pending freebs requests to free memory. 3738 */ 3739 while (freebs_list != NULL) { 3740 mblk_t *mp = freebs_list; 3741 freebs_list = mp->b_next; 3742 mutex_exit(&service_queue); 3743 mblk_free(mp); 3744 mutex_enter(&service_queue); 3745 } 3746 /* 3747 * Run pending queues. 3748 */ 3749 while (qhead != NULL) { 3750 DQ(q, qhead, qtail, q_link); 3751 ASSERT(q != NULL); 3752 mutex_exit(&service_queue); 3753 queue_service(q); 3754 mutex_enter(&service_queue); 3755 } 3756 ASSERT(qhead == NULL && qtail == NULL); 3757 } 3758 } 3759 3760 /* 3761 * Background processing of streams background tasks which failed 3762 * taskq_dispatch. 3763 */ 3764 static void 3765 streams_sqbkgrnd_service(void) 3766 { 3767 callb_cpr_t cprinfo; 3768 syncq_t *sq; 3769 3770 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr, 3771 "streams_sqbkgrnd_service"); 3772 3773 mutex_enter(&service_queue); 3774 3775 for (;;) { 3776 /* 3777 * Wait for work to arrive. 3778 */ 3779 while (sqhead == NULL) { 3780 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3781 cv_wait(&syncqs_to_run, &service_queue); 3782 CALLB_CPR_SAFE_END(&cprinfo, &service_queue); 3783 } 3784 3785 /* 3786 * Run pending syncqs. 3787 */ 3788 while (sqhead != NULL) { 3789 DQ(sq, sqhead, sqtail, sq_next); 3790 ASSERT(sq != NULL); 3791 ASSERT(sq->sq_svcflags & SQ_BGTHREAD); 3792 mutex_exit(&service_queue); 3793 syncq_service(sq); 3794 mutex_enter(&service_queue); 3795 } 3796 } 3797 } 3798 3799 /* 3800 * Disable the syncq and wait for background syncq processing to complete. 3801 * If the syncq is placed on the sqhead/sqtail queue, try to remove it from the 3802 * list. 3803 */ 3804 void 3805 wait_sq_svc(syncq_t *sq) 3806 { 3807 mutex_enter(SQLOCK(sq)); 3808 sq->sq_svcflags |= SQ_DISABLED; 3809 if (sq->sq_svcflags & SQ_BGTHREAD) { 3810 syncq_t *sq_chase; 3811 syncq_t *sq_curr; 3812 int removed; 3813 3814 ASSERT(sq->sq_servcount == 1); 3815 mutex_enter(&service_queue); 3816 RMQ(sq, sqhead, sqtail, sq_next, sq_chase, sq_curr, removed); 3817 mutex_exit(&service_queue); 3818 if (removed) { 3819 sq->sq_svcflags &= ~SQ_BGTHREAD; 3820 sq->sq_servcount = 0; 3821 STRSTAT(sqremoved); 3822 goto done; 3823 } 3824 } 3825 while (sq->sq_servcount != 0) { 3826 sq->sq_flags |= SQ_WANTWAKEUP; 3827 cv_wait(&sq->sq_wait, SQLOCK(sq)); 3828 } 3829 done: 3830 mutex_exit(SQLOCK(sq)); 3831 } 3832 3833 /* 3834 * Put a syncq on the list of syncq's to be serviced by the sqthread. 3835 * Add the argument to the end of the sqhead list and set the flag 3836 * indicating this syncq has been enabled. If it has already been 3837 * enabled, don't do anything. 3838 * This routine assumes that SQLOCK is held. 3839 * NOTE that the lock order is to have the SQLOCK first, 3840 * so if the service_syncq lock is held, we need to release it 3841 * before acquiring the SQLOCK (mostly relevant for the background 3842 * thread, and this seems to be common among the STREAMS global locks). 3843 * Note that the sq_svcflags are protected by the SQLOCK. 3844 */ 3845 void 3846 sqenable(syncq_t *sq) 3847 { 3848 /* 3849 * This is probably not important except for where I believe it 3850 * is being called. At that point, it should be held (and it 3851 * is a pain to release it just for this routine, so don't do 3852 * it). 3853 */ 3854 ASSERT(MUTEX_HELD(SQLOCK(sq))); 3855 3856 IMPLY(sq->sq_servcount == 0, sq->sq_next == NULL); 3857 IMPLY(sq->sq_next != NULL, sq->sq_svcflags & SQ_BGTHREAD); 3858 3859 /* 3860 * Do not put on list if background thread is scheduled or 3861 * syncq is disabled. 3862 */ 3863 if (sq->sq_svcflags & (SQ_DISABLED | SQ_BGTHREAD)) 3864 return; 3865 3866 /* 3867 * Check whether we should enable sq at all. 3868 * Non PERMOD syncqs may be drained by at most one thread. 3869 * PERMOD syncqs may be drained by several threads but we limit the 3870 * total amount to the lesser of 3871 * Number of queues on the squeue and 3872 * Number of CPUs. 3873 */ 3874 if (sq->sq_servcount != 0) { 3875 if (((sq->sq_type & SQ_PERMOD) == 0) || 3876 (sq->sq_servcount >= MIN(sq->sq_nqueues, ncpus_online))) { 3877 STRSTAT(sqtoomany); 3878 return; 3879 } 3880 } 3881 3882 sq->sq_tstamp = lbolt; 3883 STRSTAT(sqenables); 3884 3885 /* Attempt a taskq dispatch */ 3886 sq->sq_servid = (void *)taskq_dispatch(streams_taskq, 3887 (task_func_t *)syncq_service, sq, TQ_NOSLEEP | TQ_NOQUEUE); 3888 if (sq->sq_servid != NULL) { 3889 sq->sq_servcount++; 3890 return; 3891 } 3892 3893 /* 3894 * This taskq dispatch failed, but a previous one may have succeeded. 3895 * Don't try to schedule on the background thread whilst there is 3896 * outstanding taskq processing. 3897 */ 3898 if (sq->sq_servcount != 0) 3899 return; 3900 3901 /* 3902 * System is low on resources and can't perform a non-sleeping 3903 * dispatch. Schedule the syncq for a background thread and mark the 3904 * syncq to avoid any further taskq dispatch attempts. 3905 */ 3906 mutex_enter(&service_queue); 3907 STRSTAT(taskqfails); 3908 ENQUEUE(sq, sqhead, sqtail, sq_next); 3909 sq->sq_svcflags |= SQ_BGTHREAD; 3910 sq->sq_servcount = 1; 3911 cv_signal(&syncqs_to_run); 3912 mutex_exit(&service_queue); 3913 } 3914 3915 /* 3916 * Note: fifo_close() depends on the mblk_t on the queue being freed 3917 * asynchronously. The asynchronous freeing of messages breaks the 3918 * recursive call chain of fifo_close() while there are I_SENDFD type of 3919 * messages referring to other file pointers on the queue. Then when 3920 * closing pipes it can avoid stack overflow in case of daisy-chained 3921 * pipes, and also avoid deadlock in case of fifonode_t pairs (which 3922 * share the same fifolock_t). 3923 */ 3924 3925 void 3926 freebs_enqueue(mblk_t *mp, dblk_t *dbp) 3927 { 3928 esb_queue_t *eqp = &system_esbq; 3929 3930 ASSERT(dbp->db_mblk == mp); 3931 3932 /* 3933 * Check data sanity. The dblock should have non-empty free function. 3934 * It is better to panic here then later when the dblock is freed 3935 * asynchronously when the context is lost. 3936 */ 3937 if (dbp->db_frtnp->free_func == NULL) { 3938 panic("freebs_enqueue: dblock %p has a NULL free callback", 3939 (void *)dbp); 3940 } 3941 3942 mutex_enter(&eqp->eq_lock); 3943 /* queue the new mblk on the esballoc queue */ 3944 if (eqp->eq_head == NULL) { 3945 eqp->eq_head = eqp->eq_tail = mp; 3946 } else { 3947 eqp->eq_tail->b_next = mp; 3948 eqp->eq_tail = mp; 3949 } 3950 eqp->eq_len++; 3951 3952 /* If we're the first thread to reach the threshold, process */ 3953 if (eqp->eq_len >= esbq_max_qlen && 3954 !(eqp->eq_flags & ESBQ_PROCESSING)) 3955 esballoc_process_queue(eqp); 3956 3957 esballoc_set_timer(eqp, esbq_timeout); 3958 mutex_exit(&eqp->eq_lock); 3959 } 3960 3961 static void 3962 esballoc_process_queue(esb_queue_t *eqp) 3963 { 3964 mblk_t *mp; 3965 3966 ASSERT(MUTEX_HELD(&eqp->eq_lock)); 3967 3968 eqp->eq_flags |= ESBQ_PROCESSING; 3969 3970 do { 3971 /* 3972 * Detach the message chain for processing. 3973 */ 3974 mp = eqp->eq_head; 3975 eqp->eq_tail->b_next = NULL; 3976 eqp->eq_head = eqp->eq_tail = NULL; 3977 eqp->eq_len = 0; 3978 mutex_exit(&eqp->eq_lock); 3979 3980 /* 3981 * Process the message chain. 3982 */ 3983 esballoc_enqueue_mblk(mp); 3984 mutex_enter(&eqp->eq_lock); 3985 } while ((eqp->eq_len >= esbq_max_qlen) && (eqp->eq_len > 0)); 3986 3987 eqp->eq_flags &= ~ESBQ_PROCESSING; 3988 } 3989 3990 /* 3991 * taskq callback routine to free esballoced mblk's 3992 */ 3993 static void 3994 esballoc_mblk_free(mblk_t *mp) 3995 { 3996 mblk_t *nextmp; 3997 3998 for (; mp != NULL; mp = nextmp) { 3999 nextmp = mp->b_next; 4000 mp->b_next = NULL; 4001 mblk_free(mp); 4002 } 4003 } 4004 4005 static void 4006 esballoc_enqueue_mblk(mblk_t *mp) 4007 { 4008 4009 if (taskq_dispatch(system_taskq, (task_func_t *)esballoc_mblk_free, mp, 4010 TQ_NOSLEEP) == NULL) { 4011 mblk_t *first_mp = mp; 4012 /* 4013 * System is low on resources and can't perform a non-sleeping 4014 * dispatch. Schedule for a background thread. 4015 */ 4016 mutex_enter(&service_queue); 4017 STRSTAT(taskqfails); 4018 4019 while (mp->b_next != NULL) 4020 mp = mp->b_next; 4021 4022 mp->b_next = freebs_list; 4023 freebs_list = first_mp; 4024 cv_signal(&services_to_run); 4025 mutex_exit(&service_queue); 4026 } 4027 } 4028 4029 static void 4030 esballoc_timer(void *arg) 4031 { 4032 esb_queue_t *eqp = arg; 4033 4034 mutex_enter(&eqp->eq_lock); 4035 eqp->eq_flags &= ~ESBQ_TIMER; 4036 4037 if (!(eqp->eq_flags & ESBQ_PROCESSING) && 4038 eqp->eq_len > 0) 4039 esballoc_process_queue(eqp); 4040 4041 esballoc_set_timer(eqp, esbq_timeout); 4042 mutex_exit(&eqp->eq_lock); 4043 } 4044 4045 static void 4046 esballoc_set_timer(esb_queue_t *eqp, clock_t eq_timeout) 4047 { 4048 ASSERT(MUTEX_HELD(&eqp->eq_lock)); 4049 4050 if (eqp->eq_len > 0 && !(eqp->eq_flags & ESBQ_TIMER)) { 4051 (void) timeout(esballoc_timer, eqp, eq_timeout); 4052 eqp->eq_flags |= ESBQ_TIMER; 4053 } 4054 } 4055 4056 void 4057 esballoc_queue_init(void) 4058 { 4059 system_esbq.eq_len = 0; 4060 system_esbq.eq_head = system_esbq.eq_tail = NULL; 4061 system_esbq.eq_flags = 0; 4062 } 4063 4064 /* 4065 * Set the QBACK or QB_BACK flag in the given queue for 4066 * the given priority band. 4067 */ 4068 void 4069 setqback(queue_t *q, unsigned char pri) 4070 { 4071 int i; 4072 qband_t *qbp; 4073 qband_t **qbpp; 4074 4075 ASSERT(MUTEX_HELD(QLOCK(q))); 4076 if (pri != 0) { 4077 if (pri > q->q_nband) { 4078 qbpp = &q->q_bandp; 4079 while (*qbpp) 4080 qbpp = &(*qbpp)->qb_next; 4081 while (pri > q->q_nband) { 4082 if ((*qbpp = allocband()) == NULL) { 4083 cmn_err(CE_WARN, 4084 "setqback: can't allocate qband\n"); 4085 return; 4086 } 4087 (*qbpp)->qb_hiwat = q->q_hiwat; 4088 (*qbpp)->qb_lowat = q->q_lowat; 4089 q->q_nband++; 4090 qbpp = &(*qbpp)->qb_next; 4091 } 4092 } 4093 qbp = q->q_bandp; 4094 i = pri; 4095 while (--i) 4096 qbp = qbp->qb_next; 4097 qbp->qb_flag |= QB_BACK; 4098 } else { 4099 q->q_flag |= QBACK; 4100 } 4101 } 4102 4103 int 4104 strcopyin(void *from, void *to, size_t len, int copyflag) 4105 { 4106 if (copyflag & U_TO_K) { 4107 ASSERT((copyflag & K_TO_K) == 0); 4108 if (copyin(from, to, len)) 4109 return (EFAULT); 4110 } else { 4111 ASSERT(copyflag & K_TO_K); 4112 bcopy(from, to, len); 4113 } 4114 return (0); 4115 } 4116 4117 int 4118 strcopyout(void *from, void *to, size_t len, int copyflag) 4119 { 4120 if (copyflag & U_TO_K) { 4121 if (copyout(from, to, len)) 4122 return (EFAULT); 4123 } else { 4124 ASSERT(copyflag & K_TO_K); 4125 bcopy(from, to, len); 4126 } 4127 return (0); 4128 } 4129 4130 /* 4131 * strsignal_nolock() posts a signal to the process(es) at the stream head. 4132 * It assumes that the stream head lock is already held, whereas strsignal() 4133 * acquires the lock first. This routine was created because a few callers 4134 * release the stream head lock before calling only to re-acquire it after 4135 * it returns. 4136 */ 4137 void 4138 strsignal_nolock(stdata_t *stp, int sig, uchar_t band) 4139 { 4140 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4141 switch (sig) { 4142 case SIGPOLL: 4143 if (stp->sd_sigflags & S_MSG) 4144 strsendsig(stp->sd_siglist, S_MSG, band, 0); 4145 break; 4146 default: 4147 if (stp->sd_pgidp) 4148 pgsignal(stp->sd_pgidp, sig); 4149 break; 4150 } 4151 } 4152 4153 void 4154 strsignal(stdata_t *stp, int sig, int32_t band) 4155 { 4156 TRACE_3(TR_FAC_STREAMS_FR, TR_SENDSIG, 4157 "strsignal:%p, %X, %X", stp, sig, band); 4158 4159 mutex_enter(&stp->sd_lock); 4160 switch (sig) { 4161 case SIGPOLL: 4162 if (stp->sd_sigflags & S_MSG) 4163 strsendsig(stp->sd_siglist, S_MSG, (uchar_t)band, 0); 4164 break; 4165 4166 default: 4167 if (stp->sd_pgidp) { 4168 pgsignal(stp->sd_pgidp, sig); 4169 } 4170 break; 4171 } 4172 mutex_exit(&stp->sd_lock); 4173 } 4174 4175 void 4176 strhup(stdata_t *stp) 4177 { 4178 ASSERT(mutex_owned(&stp->sd_lock)); 4179 pollwakeup(&stp->sd_pollist, POLLHUP); 4180 if (stp->sd_sigflags & S_HANGUP) 4181 strsendsig(stp->sd_siglist, S_HANGUP, 0, 0); 4182 } 4183 4184 /* 4185 * Backenable the first queue upstream from `q' with a service procedure. 4186 */ 4187 void 4188 backenable(queue_t *q, uchar_t pri) 4189 { 4190 queue_t *nq; 4191 4192 /* 4193 * Our presence might not prevent other modules in our own 4194 * stream from popping/pushing since the caller of getq might not 4195 * have a claim on the queue (some drivers do a getq on somebody 4196 * else's queue - they know that the queue itself is not going away 4197 * but the framework has to guarantee q_next in that stream). 4198 */ 4199 claimstr(q); 4200 4201 /* Find nearest back queue with service proc */ 4202 for (nq = backq(q); nq && !nq->q_qinfo->qi_srvp; nq = backq(nq)) { 4203 ASSERT(STRMATED(q->q_stream) || STREAM(q) == STREAM(nq)); 4204 } 4205 4206 if (nq) { 4207 kthread_t *freezer; 4208 /* 4209 * backenable can be called either with no locks held 4210 * or with the stream frozen (the latter occurs when a module 4211 * calls rmvq with the stream frozen). If the stream is frozen 4212 * by the caller the caller will hold all qlocks in the stream. 4213 * Note that a frozen stream doesn't freeze a mated stream, 4214 * so we explicitly check for that. 4215 */ 4216 freezer = STREAM(q)->sd_freezer; 4217 if (freezer != curthread || STREAM(q) != STREAM(nq)) { 4218 mutex_enter(QLOCK(nq)); 4219 } 4220 #ifdef DEBUG 4221 else { 4222 ASSERT(frozenstr(q)); 4223 ASSERT(MUTEX_HELD(QLOCK(q))); 4224 ASSERT(MUTEX_HELD(QLOCK(nq))); 4225 } 4226 #endif 4227 setqback(nq, pri); 4228 qenable_locked(nq); 4229 if (freezer != curthread || STREAM(q) != STREAM(nq)) 4230 mutex_exit(QLOCK(nq)); 4231 } 4232 releasestr(q); 4233 } 4234 4235 /* 4236 * Return the appropriate errno when one of flags_to_check is set 4237 * in sd_flags. Uses the exported error routines if they are set. 4238 * Will return 0 if non error is set (or if the exported error routines 4239 * do not return an error). 4240 * 4241 * If there is both a read and write error to check, we prefer the read error. 4242 * Also, give preference to recorded errno's over the error functions. 4243 * The flags that are handled are: 4244 * STPLEX return EINVAL 4245 * STRDERR return sd_rerror (and clear if STRDERRNONPERSIST) 4246 * STWRERR return sd_werror (and clear if STWRERRNONPERSIST) 4247 * STRHUP return sd_werror 4248 * 4249 * If the caller indicates that the operation is a peek, a nonpersistent error 4250 * is not cleared. 4251 */ 4252 int 4253 strgeterr(stdata_t *stp, int32_t flags_to_check, int ispeek) 4254 { 4255 int32_t sd_flag = stp->sd_flag & flags_to_check; 4256 int error = 0; 4257 4258 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4259 ASSERT((flags_to_check & ~(STRDERR|STWRERR|STRHUP|STPLEX)) == 0); 4260 if (sd_flag & STPLEX) 4261 error = EINVAL; 4262 else if (sd_flag & STRDERR) { 4263 error = stp->sd_rerror; 4264 if ((stp->sd_flag & STRDERRNONPERSIST) && !ispeek) { 4265 /* 4266 * Read errors are non-persistent i.e. discarded once 4267 * returned to a non-peeking caller, 4268 */ 4269 stp->sd_rerror = 0; 4270 stp->sd_flag &= ~STRDERR; 4271 } 4272 if (error == 0 && stp->sd_rderrfunc != NULL) { 4273 int clearerr = 0; 4274 4275 error = (*stp->sd_rderrfunc)(stp->sd_vnode, ispeek, 4276 &clearerr); 4277 if (clearerr) { 4278 stp->sd_flag &= ~STRDERR; 4279 stp->sd_rderrfunc = NULL; 4280 } 4281 } 4282 } else if (sd_flag & STWRERR) { 4283 error = stp->sd_werror; 4284 if ((stp->sd_flag & STWRERRNONPERSIST) && !ispeek) { 4285 /* 4286 * Write errors are non-persistent i.e. discarded once 4287 * returned to a non-peeking caller, 4288 */ 4289 stp->sd_werror = 0; 4290 stp->sd_flag &= ~STWRERR; 4291 } 4292 if (error == 0 && stp->sd_wrerrfunc != NULL) { 4293 int clearerr = 0; 4294 4295 error = (*stp->sd_wrerrfunc)(stp->sd_vnode, ispeek, 4296 &clearerr); 4297 if (clearerr) { 4298 stp->sd_flag &= ~STWRERR; 4299 stp->sd_wrerrfunc = NULL; 4300 } 4301 } 4302 } else if (sd_flag & STRHUP) { 4303 /* sd_werror set when STRHUP */ 4304 error = stp->sd_werror; 4305 } 4306 return (error); 4307 } 4308 4309 4310 /* 4311 * Single-thread open/close/push/pop 4312 * for twisted streams also 4313 */ 4314 int 4315 strstartplumb(stdata_t *stp, int flag, int cmd) 4316 { 4317 int waited = 1; 4318 int error = 0; 4319 4320 if (STRMATED(stp)) { 4321 struct stdata *stmatep = stp->sd_mate; 4322 4323 STRLOCKMATES(stp); 4324 while (waited) { 4325 waited = 0; 4326 while (stmatep->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4327 if ((cmd == I_POP) && 4328 (flag & (FNDELAY|FNONBLOCK))) { 4329 STRUNLOCKMATES(stp); 4330 return (EAGAIN); 4331 } 4332 waited = 1; 4333 mutex_exit(&stp->sd_lock); 4334 if (!cv_wait_sig(&stmatep->sd_monitor, 4335 &stmatep->sd_lock)) { 4336 mutex_exit(&stmatep->sd_lock); 4337 return (EINTR); 4338 } 4339 mutex_exit(&stmatep->sd_lock); 4340 STRLOCKMATES(stp); 4341 } 4342 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4343 if ((cmd == I_POP) && 4344 (flag & (FNDELAY|FNONBLOCK))) { 4345 STRUNLOCKMATES(stp); 4346 return (EAGAIN); 4347 } 4348 waited = 1; 4349 mutex_exit(&stmatep->sd_lock); 4350 if (!cv_wait_sig(&stp->sd_monitor, 4351 &stp->sd_lock)) { 4352 mutex_exit(&stp->sd_lock); 4353 return (EINTR); 4354 } 4355 mutex_exit(&stp->sd_lock); 4356 STRLOCKMATES(stp); 4357 } 4358 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) { 4359 error = strgeterr(stp, 4360 STRDERR|STWRERR|STRHUP|STPLEX, 0); 4361 if (error != 0) { 4362 STRUNLOCKMATES(stp); 4363 return (error); 4364 } 4365 } 4366 } 4367 stp->sd_flag |= STRPLUMB; 4368 STRUNLOCKMATES(stp); 4369 } else { 4370 mutex_enter(&stp->sd_lock); 4371 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4372 if (((cmd == I_POP) || (cmd == _I_REMOVE)) && 4373 (flag & (FNDELAY|FNONBLOCK))) { 4374 mutex_exit(&stp->sd_lock); 4375 return (EAGAIN); 4376 } 4377 if (!cv_wait_sig(&stp->sd_monitor, &stp->sd_lock)) { 4378 mutex_exit(&stp->sd_lock); 4379 return (EINTR); 4380 } 4381 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) { 4382 error = strgeterr(stp, 4383 STRDERR|STWRERR|STRHUP|STPLEX, 0); 4384 if (error != 0) { 4385 mutex_exit(&stp->sd_lock); 4386 return (error); 4387 } 4388 } 4389 } 4390 stp->sd_flag |= STRPLUMB; 4391 mutex_exit(&stp->sd_lock); 4392 } 4393 return (0); 4394 } 4395 4396 /* 4397 * Complete the plumbing operation associated with stream `stp'. 4398 */ 4399 void 4400 strendplumb(stdata_t *stp) 4401 { 4402 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4403 ASSERT(stp->sd_flag & STRPLUMB); 4404 stp->sd_flag &= ~STRPLUMB; 4405 cv_broadcast(&stp->sd_monitor); 4406 } 4407 4408 /* 4409 * This describes how the STREAMS framework handles synchronization 4410 * during open/push and close/pop. 4411 * The key interfaces for open and close are qprocson and qprocsoff, 4412 * respectively. While the close case in general is harder both open 4413 * have close have significant similarities. 4414 * 4415 * During close the STREAMS framework has to both ensure that there 4416 * are no stale references to the queue pair (and syncq) that 4417 * are being closed and also provide the guarantees that are documented 4418 * in qprocsoff(9F). 4419 * If there are stale references to the queue that is closing it can 4420 * result in kernel memory corruption or kernel panics. 4421 * 4422 * Note that is it up to the module/driver to ensure that it itself 4423 * does not have any stale references to the closing queues once its close 4424 * routine returns. This includes: 4425 * - Cancelling any timeout/bufcall/qtimeout/qbufcall callback routines 4426 * associated with the queues. For timeout and bufcall callbacks the 4427 * module/driver also has to ensure (or wait for) any callbacks that 4428 * are in progress. 4429 * - If the module/driver is using esballoc it has to ensure that any 4430 * esballoc free functions do not refer to a queue that has closed. 4431 * (Note that in general the close routine can not wait for the esballoc'ed 4432 * messages to be freed since that can cause a deadlock.) 4433 * - Cancelling any interrupts that refer to the closing queues and 4434 * also ensuring that there are no interrupts in progress that will 4435 * refer to the closing queues once the close routine returns. 4436 * - For multiplexors removing any driver global state that refers to 4437 * the closing queue and also ensuring that there are no threads in 4438 * the multiplexor that has picked up a queue pointer but not yet 4439 * finished using it. 4440 * 4441 * In addition, a driver/module can only reference the q_next pointer 4442 * in its open, close, put, or service procedures or in a 4443 * qtimeout/qbufcall callback procedure executing "on" the correct 4444 * stream. Thus it can not reference the q_next pointer in an interrupt 4445 * routine or a timeout, bufcall or esballoc callback routine. Likewise 4446 * it can not reference q_next of a different queue e.g. in a mux that 4447 * passes messages from one queues put/service procedure to another queue. 4448 * In all the cases when the driver/module can not access the q_next 4449 * field it must use the *next* versions e.g. canputnext instead of 4450 * canput(q->q_next) and putnextctl instead of putctl(q->q_next, ...). 4451 * 4452 * 4453 * Assuming that the driver/module conforms to the above constraints 4454 * the STREAMS framework has to avoid stale references to q_next for all 4455 * the framework internal cases which include (but are not limited to): 4456 * - Threads in canput/canputnext/backenable and elsewhere that are 4457 * walking q_next. 4458 * - Messages on a syncq that have a reference to the queue through b_queue. 4459 * - Messages on an outer perimeter (syncq) that have a reference to the 4460 * queue through b_queue. 4461 * - Threads that use q_nfsrv (e.g. canput) to find a queue. 4462 * Note that only canput and bcanput use q_nfsrv without any locking. 4463 * 4464 * The STREAMS framework providing the qprocsoff(9F) guarantees means that 4465 * after qprocsoff returns, the framework has to ensure that no threads can 4466 * enter the put or service routines for the closing read or write-side queue. 4467 * In addition to preventing "direct" entry into the put procedures 4468 * the framework also has to prevent messages being drained from 4469 * the syncq or the outer perimeter. 4470 * XXX Note that currently qdetach does relies on D_MTOCEXCL as the only 4471 * mechanism to prevent qwriter(PERIM_OUTER) from running after 4472 * qprocsoff has returned. 4473 * Note that if a module/driver uses put(9F) on one of its own queues 4474 * it is up to the module/driver to ensure that the put() doesn't 4475 * get called when the queue is closing. 4476 * 4477 * 4478 * The framework aspects of the above "contract" is implemented by 4479 * qprocsoff, removeq, and strlock: 4480 * - qprocsoff (disable_svc) sets QWCLOSE to prevent runservice from 4481 * entering the service procedures. 4482 * - strlock acquires the sd_lock and sd_reflock to prevent putnext, 4483 * canputnext, backenable etc from dereferencing the q_next that will 4484 * soon change. 4485 * - strlock waits for sd_refcnt to be zero to wait for e.g. any canputnext 4486 * or other q_next walker that uses claimstr/releasestr to finish. 4487 * - optionally for every syncq in the stream strlock acquires all the 4488 * sq_lock's and waits for all sq_counts to drop to a value that indicates 4489 * that no thread executes in the put or service procedures and that no 4490 * thread is draining into the module/driver. This ensures that no 4491 * open, close, put, service, or qtimeout/qbufcall callback procedure is 4492 * currently executing hence no such thread can end up with the old stale 4493 * q_next value and no canput/backenable can have the old stale 4494 * q_nfsrv/q_next. 4495 * - qdetach (wait_svc) makes sure that any scheduled or running threads 4496 * have either finished or observed the QWCLOSE flag and gone away. 4497 */ 4498 4499 4500 /* 4501 * Get all the locks necessary to change q_next. 4502 * 4503 * Wait for sd_refcnt to reach 0 and, if sqlist is present, wait for the 4504 * sq_count of each syncq in the list to drop to sq_rmqcount, indicating that 4505 * the only threads inside the syncq are threads currently calling removeq(). 4506 * Since threads calling removeq() are in the process of removing their queues 4507 * from the stream, we do not need to worry about them accessing a stale q_next 4508 * pointer and thus we do not need to wait for them to exit (in fact, waiting 4509 * for them can cause deadlock). 4510 * 4511 * This routine is subject to starvation since it does not set any flag to 4512 * prevent threads from entering a module in the stream (i.e. sq_count can 4513 * increase on some syncq while it is waiting on some other syncq). 4514 * 4515 * Assumes that only one thread attempts to call strlock for a given 4516 * stream. If this is not the case the two threads would deadlock. 4517 * This assumption is guaranteed since strlock is only called by insertq 4518 * and removeq and streams plumbing changes are single-threaded for 4519 * a given stream using the STWOPEN, STRCLOSE, and STRPLUMB flags. 4520 * 4521 * For pipes, it is not difficult to atomically designate a pair of streams 4522 * to be mated. Once mated atomically by the framework the twisted pair remain 4523 * configured that way until dismantled atomically by the framework. 4524 * When plumbing takes place on a twisted stream it is necessary to ensure that 4525 * this operation is done exclusively on the twisted stream since two such 4526 * operations, each initiated on different ends of the pipe will deadlock 4527 * waiting for each other to complete. 4528 * 4529 * On entry, no locks should be held. 4530 * The locks acquired and held by strlock depends on a few factors. 4531 * - If sqlist is non-NULL all the syncq locks in the sqlist will be acquired 4532 * and held on exit and all sq_count are at an acceptable level. 4533 * - In all cases, sd_lock and sd_reflock are acquired and held on exit with 4534 * sd_refcnt being zero. 4535 */ 4536 4537 static void 4538 strlock(struct stdata *stp, sqlist_t *sqlist) 4539 { 4540 syncql_t *sql, *sql2; 4541 retry: 4542 /* 4543 * Wait for any claimstr to go away. 4544 */ 4545 if (STRMATED(stp)) { 4546 struct stdata *stp1, *stp2; 4547 4548 STRLOCKMATES(stp); 4549 /* 4550 * Note that the selection of locking order is not 4551 * important, just that they are always acquired in 4552 * the same order. To assure this, we choose this 4553 * order based on the value of the pointer, and since 4554 * the pointer will not change for the life of this 4555 * pair, we will always grab the locks in the same 4556 * order (and hence, prevent deadlocks). 4557 */ 4558 if (&(stp->sd_lock) > &((stp->sd_mate)->sd_lock)) { 4559 stp1 = stp; 4560 stp2 = stp->sd_mate; 4561 } else { 4562 stp2 = stp; 4563 stp1 = stp->sd_mate; 4564 } 4565 mutex_enter(&stp1->sd_reflock); 4566 if (stp1->sd_refcnt > 0) { 4567 STRUNLOCKMATES(stp); 4568 cv_wait(&stp1->sd_refmonitor, &stp1->sd_reflock); 4569 mutex_exit(&stp1->sd_reflock); 4570 goto retry; 4571 } 4572 mutex_enter(&stp2->sd_reflock); 4573 if (stp2->sd_refcnt > 0) { 4574 STRUNLOCKMATES(stp); 4575 mutex_exit(&stp1->sd_reflock); 4576 cv_wait(&stp2->sd_refmonitor, &stp2->sd_reflock); 4577 mutex_exit(&stp2->sd_reflock); 4578 goto retry; 4579 } 4580 STREAM_PUTLOCKS_ENTER(stp1); 4581 STREAM_PUTLOCKS_ENTER(stp2); 4582 } else { 4583 mutex_enter(&stp->sd_lock); 4584 mutex_enter(&stp->sd_reflock); 4585 while (stp->sd_refcnt > 0) { 4586 mutex_exit(&stp->sd_lock); 4587 cv_wait(&stp->sd_refmonitor, &stp->sd_reflock); 4588 if (mutex_tryenter(&stp->sd_lock) == 0) { 4589 mutex_exit(&stp->sd_reflock); 4590 mutex_enter(&stp->sd_lock); 4591 mutex_enter(&stp->sd_reflock); 4592 } 4593 } 4594 STREAM_PUTLOCKS_ENTER(stp); 4595 } 4596 4597 if (sqlist == NULL) 4598 return; 4599 4600 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) { 4601 syncq_t *sq = sql->sql_sq; 4602 uint16_t count; 4603 4604 mutex_enter(SQLOCK(sq)); 4605 count = sq->sq_count; 4606 ASSERT(sq->sq_rmqcount <= count); 4607 SQ_PUTLOCKS_ENTER(sq); 4608 SUM_SQ_PUTCOUNTS(sq, count); 4609 if (count == sq->sq_rmqcount) 4610 continue; 4611 4612 /* Failed - drop all locks that we have acquired so far */ 4613 if (STRMATED(stp)) { 4614 STREAM_PUTLOCKS_EXIT(stp); 4615 STREAM_PUTLOCKS_EXIT(stp->sd_mate); 4616 STRUNLOCKMATES(stp); 4617 mutex_exit(&stp->sd_reflock); 4618 mutex_exit(&stp->sd_mate->sd_reflock); 4619 } else { 4620 STREAM_PUTLOCKS_EXIT(stp); 4621 mutex_exit(&stp->sd_lock); 4622 mutex_exit(&stp->sd_reflock); 4623 } 4624 for (sql2 = sqlist->sqlist_head; sql2 != sql; 4625 sql2 = sql2->sql_next) { 4626 SQ_PUTLOCKS_EXIT(sql2->sql_sq); 4627 mutex_exit(SQLOCK(sql2->sql_sq)); 4628 } 4629 4630 /* 4631 * The wait loop below may starve when there are many threads 4632 * claiming the syncq. This is especially a problem with permod 4633 * syncqs (IP). To lessen the impact of the problem we increment 4634 * sq_needexcl and clear fastbits so that putnexts will slow 4635 * down and call sqenable instead of draining right away. 4636 */ 4637 sq->sq_needexcl++; 4638 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 4639 while (count > sq->sq_rmqcount) { 4640 sq->sq_flags |= SQ_WANTWAKEUP; 4641 SQ_PUTLOCKS_EXIT(sq); 4642 cv_wait(&sq->sq_wait, SQLOCK(sq)); 4643 count = sq->sq_count; 4644 SQ_PUTLOCKS_ENTER(sq); 4645 SUM_SQ_PUTCOUNTS(sq, count); 4646 } 4647 sq->sq_needexcl--; 4648 if (sq->sq_needexcl == 0) 4649 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 4650 SQ_PUTLOCKS_EXIT(sq); 4651 ASSERT(count == sq->sq_rmqcount); 4652 mutex_exit(SQLOCK(sq)); 4653 goto retry; 4654 } 4655 } 4656 4657 /* 4658 * Drop all the locks that strlock acquired. 4659 */ 4660 static void 4661 strunlock(struct stdata *stp, sqlist_t *sqlist) 4662 { 4663 syncql_t *sql; 4664 4665 if (STRMATED(stp)) { 4666 STREAM_PUTLOCKS_EXIT(stp); 4667 STREAM_PUTLOCKS_EXIT(stp->sd_mate); 4668 STRUNLOCKMATES(stp); 4669 mutex_exit(&stp->sd_reflock); 4670 mutex_exit(&stp->sd_mate->sd_reflock); 4671 } else { 4672 STREAM_PUTLOCKS_EXIT(stp); 4673 mutex_exit(&stp->sd_lock); 4674 mutex_exit(&stp->sd_reflock); 4675 } 4676 4677 if (sqlist == NULL) 4678 return; 4679 4680 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) { 4681 SQ_PUTLOCKS_EXIT(sql->sql_sq); 4682 mutex_exit(SQLOCK(sql->sql_sq)); 4683 } 4684 } 4685 4686 /* 4687 * When the module has service procedure, we need check if the next 4688 * module which has service procedure is in flow control to trigger 4689 * the backenable. 4690 */ 4691 static void 4692 backenable_insertedq(queue_t *q) 4693 { 4694 qband_t *qbp; 4695 4696 claimstr(q); 4697 if (q->q_qinfo->qi_srvp != NULL && q->q_next != NULL) { 4698 if (q->q_next->q_nfsrv->q_flag & QWANTW) 4699 backenable(q, 0); 4700 4701 qbp = q->q_next->q_nfsrv->q_bandp; 4702 for (; qbp != NULL; qbp = qbp->qb_next) 4703 if ((qbp->qb_flag & QB_WANTW) && qbp->qb_first != NULL) 4704 backenable(q, qbp->qb_first->b_band); 4705 } 4706 releasestr(q); 4707 } 4708 4709 /* 4710 * Given two read queues, insert a new single one after another. 4711 * 4712 * This routine acquires all the necessary locks in order to change 4713 * q_next and related pointer using strlock(). 4714 * It depends on the stream head ensuring that there are no concurrent 4715 * insertq or removeq on the same stream. The stream head ensures this 4716 * using the flags STWOPEN, STRCLOSE, and STRPLUMB. 4717 * 4718 * Note that no syncq locks are held during the q_next change. This is 4719 * applied to all streams since, unlike removeq, there is no problem of stale 4720 * pointers when adding a module to the stream. Thus drivers/modules that do a 4721 * canput(rq->q_next) would never get a closed/freed queue pointer even if we 4722 * applied this optimization to all streams. 4723 */ 4724 void 4725 insertq(struct stdata *stp, queue_t *new) 4726 { 4727 queue_t *after; 4728 queue_t *wafter; 4729 queue_t *wnew = _WR(new); 4730 boolean_t have_fifo = B_FALSE; 4731 4732 if (new->q_flag & _QINSERTING) { 4733 ASSERT(stp->sd_vnode->v_type != VFIFO); 4734 after = new->q_next; 4735 wafter = _WR(new->q_next); 4736 } else { 4737 after = _RD(stp->sd_wrq); 4738 wafter = stp->sd_wrq; 4739 } 4740 4741 TRACE_2(TR_FAC_STREAMS_FR, TR_INSERTQ, 4742 "insertq:%p, %p", after, new); 4743 ASSERT(after->q_flag & QREADR); 4744 ASSERT(new->q_flag & QREADR); 4745 4746 strlock(stp, NULL); 4747 4748 /* Do we have a FIFO? */ 4749 if (wafter->q_next == after) { 4750 have_fifo = B_TRUE; 4751 wnew->q_next = new; 4752 } else { 4753 wnew->q_next = wafter->q_next; 4754 } 4755 new->q_next = after; 4756 4757 set_nfsrv_ptr(new, wnew, after, wafter); 4758 /* 4759 * set_nfsrv_ptr() needs to know if this is an insertion or not, 4760 * so only reset this flag after calling it. 4761 */ 4762 new->q_flag &= ~_QINSERTING; 4763 4764 if (have_fifo) { 4765 wafter->q_next = wnew; 4766 } else { 4767 if (wafter->q_next) 4768 _OTHERQ(wafter->q_next)->q_next = new; 4769 wafter->q_next = wnew; 4770 } 4771 4772 set_qend(new); 4773 /* The QEND flag might have to be updated for the upstream guy */ 4774 set_qend(after); 4775 4776 ASSERT(_SAMESTR(new) == O_SAMESTR(new)); 4777 ASSERT(_SAMESTR(wnew) == O_SAMESTR(wnew)); 4778 ASSERT(_SAMESTR(after) == O_SAMESTR(after)); 4779 ASSERT(_SAMESTR(wafter) == O_SAMESTR(wafter)); 4780 strsetuio(stp); 4781 4782 /* 4783 * If this was a module insertion, bump the push count. 4784 */ 4785 if (!(new->q_flag & QISDRV)) 4786 stp->sd_pushcnt++; 4787 4788 strunlock(stp, NULL); 4789 4790 /* check if the write Q needs backenable */ 4791 backenable_insertedq(wnew); 4792 4793 /* check if the read Q needs backenable */ 4794 backenable_insertedq(new); 4795 } 4796 4797 /* 4798 * Given a read queue, unlink it from any neighbors. 4799 * 4800 * This routine acquires all the necessary locks in order to 4801 * change q_next and related pointers and also guard against 4802 * stale references (e.g. through q_next) to the queue that 4803 * is being removed. It also plays part of the role in ensuring 4804 * that the module's/driver's put procedure doesn't get called 4805 * after qprocsoff returns. 4806 * 4807 * Removeq depends on the stream head ensuring that there are 4808 * no concurrent insertq or removeq on the same stream. The 4809 * stream head ensures this using the flags STWOPEN, STRCLOSE and 4810 * STRPLUMB. 4811 * 4812 * The set of locks needed to remove the queue is different in 4813 * different cases: 4814 * 4815 * Acquire sd_lock, sd_reflock, and all the syncq locks in the stream after 4816 * waiting for the syncq reference count to drop to 0 indicating that no 4817 * non-close threads are present anywhere in the stream. This ensures that any 4818 * module/driver can reference q_next in its open, close, put, or service 4819 * procedures. 4820 * 4821 * The sq_rmqcount counter tracks the number of threads inside removeq(). 4822 * strlock() ensures that there is either no threads executing inside perimeter 4823 * or there is only a thread calling qprocsoff(). 4824 * 4825 * strlock() compares the value of sq_count with the number of threads inside 4826 * removeq() and waits until sq_count is equal to sq_rmqcount. We need to wakeup 4827 * any threads waiting in strlock() when the sq_rmqcount increases. 4828 */ 4829 4830 void 4831 removeq(queue_t *qp) 4832 { 4833 queue_t *wqp = _WR(qp); 4834 struct stdata *stp = STREAM(qp); 4835 sqlist_t *sqlist = NULL; 4836 boolean_t isdriver; 4837 int moved; 4838 syncq_t *sq = qp->q_syncq; 4839 syncq_t *wsq = wqp->q_syncq; 4840 4841 ASSERT(stp); 4842 4843 TRACE_2(TR_FAC_STREAMS_FR, TR_REMOVEQ, 4844 "removeq:%p %p", qp, wqp); 4845 ASSERT(qp->q_flag&QREADR); 4846 4847 /* 4848 * For queues using Synchronous streams, we must wait for all threads in 4849 * rwnext() to drain out before proceeding. 4850 */ 4851 if (qp->q_flag & QSYNCSTR) { 4852 /* First, we need wakeup any threads blocked in rwnext() */ 4853 mutex_enter(SQLOCK(sq)); 4854 if (sq->sq_flags & SQ_WANTWAKEUP) { 4855 sq->sq_flags &= ~SQ_WANTWAKEUP; 4856 cv_broadcast(&sq->sq_wait); 4857 } 4858 mutex_exit(SQLOCK(sq)); 4859 4860 if (wsq != sq) { 4861 mutex_enter(SQLOCK(wsq)); 4862 if (wsq->sq_flags & SQ_WANTWAKEUP) { 4863 wsq->sq_flags &= ~SQ_WANTWAKEUP; 4864 cv_broadcast(&wsq->sq_wait); 4865 } 4866 mutex_exit(SQLOCK(wsq)); 4867 } 4868 4869 mutex_enter(QLOCK(qp)); 4870 while (qp->q_rwcnt > 0) { 4871 qp->q_flag |= QWANTRMQSYNC; 4872 cv_wait(&qp->q_wait, QLOCK(qp)); 4873 } 4874 mutex_exit(QLOCK(qp)); 4875 4876 mutex_enter(QLOCK(wqp)); 4877 while (wqp->q_rwcnt > 0) { 4878 wqp->q_flag |= QWANTRMQSYNC; 4879 cv_wait(&wqp->q_wait, QLOCK(wqp)); 4880 } 4881 mutex_exit(QLOCK(wqp)); 4882 } 4883 4884 mutex_enter(SQLOCK(sq)); 4885 sq->sq_rmqcount++; 4886 if (sq->sq_flags & SQ_WANTWAKEUP) { 4887 sq->sq_flags &= ~SQ_WANTWAKEUP; 4888 cv_broadcast(&sq->sq_wait); 4889 } 4890 mutex_exit(SQLOCK(sq)); 4891 4892 isdriver = (qp->q_flag & QISDRV); 4893 4894 sqlist = sqlist_build(qp, stp, STRMATED(stp)); 4895 strlock(stp, sqlist); 4896 4897 reset_nfsrv_ptr(qp, wqp); 4898 4899 ASSERT(wqp->q_next == NULL || backq(qp)->q_next == qp); 4900 ASSERT(qp->q_next == NULL || backq(wqp)->q_next == wqp); 4901 /* Do we have a FIFO? */ 4902 if (wqp->q_next == qp) { 4903 stp->sd_wrq->q_next = _RD(stp->sd_wrq); 4904 } else { 4905 if (wqp->q_next) 4906 backq(qp)->q_next = qp->q_next; 4907 if (qp->q_next) 4908 backq(wqp)->q_next = wqp->q_next; 4909 } 4910 4911 /* The QEND flag might have to be updated for the upstream guy */ 4912 if (qp->q_next) 4913 set_qend(qp->q_next); 4914 4915 ASSERT(_SAMESTR(stp->sd_wrq) == O_SAMESTR(stp->sd_wrq)); 4916 ASSERT(_SAMESTR(_RD(stp->sd_wrq)) == O_SAMESTR(_RD(stp->sd_wrq))); 4917 4918 /* 4919 * Move any messages destined for the put procedures to the next 4920 * syncq in line. Otherwise free them. 4921 */ 4922 moved = 0; 4923 /* 4924 * Quick check to see whether there are any messages or events. 4925 */ 4926 if (qp->q_syncqmsgs != 0 || (qp->q_syncq->sq_flags & SQ_EVENTS)) 4927 moved += propagate_syncq(qp); 4928 if (wqp->q_syncqmsgs != 0 || 4929 (wqp->q_syncq->sq_flags & SQ_EVENTS)) 4930 moved += propagate_syncq(wqp); 4931 4932 strsetuio(stp); 4933 4934 /* 4935 * If this was a module removal, decrement the push count. 4936 */ 4937 if (!isdriver) 4938 stp->sd_pushcnt--; 4939 4940 strunlock(stp, sqlist); 4941 sqlist_free(sqlist); 4942 4943 /* 4944 * Make sure any messages that were propagated are drained. 4945 * Also clear any QFULL bit caused by messages that were propagated. 4946 */ 4947 4948 if (qp->q_next != NULL) { 4949 clr_qfull(qp); 4950 /* 4951 * For the driver calling qprocsoff, propagate_syncq 4952 * frees all the messages instead of putting it in 4953 * the stream head 4954 */ 4955 if (!isdriver && (moved > 0)) 4956 emptysq(qp->q_next->q_syncq); 4957 } 4958 if (wqp->q_next != NULL) { 4959 clr_qfull(wqp); 4960 /* 4961 * We come here for any pop of a module except for the 4962 * case of driver being removed. We don't call emptysq 4963 * if we did not move any messages. This will avoid holding 4964 * PERMOD syncq locks in emptysq 4965 */ 4966 if (moved > 0) 4967 emptysq(wqp->q_next->q_syncq); 4968 } 4969 4970 mutex_enter(SQLOCK(sq)); 4971 sq->sq_rmqcount--; 4972 mutex_exit(SQLOCK(sq)); 4973 } 4974 4975 /* 4976 * Prevent further entry by setting a flag (like SQ_FROZEN, SQ_BLOCKED or 4977 * SQ_WRITER) on a syncq. 4978 * If maxcnt is not -1 it assumes that caller has "maxcnt" claim(s) on the 4979 * sync queue and waits until sq_count reaches maxcnt. 4980 * 4981 * If maxcnt is -1 there's no need to grab sq_putlocks since the caller 4982 * does not care about putnext threads that are in the middle of calling put 4983 * entry points. 4984 * 4985 * This routine is used for both inner and outer syncqs. 4986 */ 4987 static void 4988 blocksq(syncq_t *sq, ushort_t flag, int maxcnt) 4989 { 4990 uint16_t count = 0; 4991 4992 mutex_enter(SQLOCK(sq)); 4993 /* 4994 * Wait for SQ_FROZEN/SQ_BLOCKED to be reset. 4995 * SQ_FROZEN will be set if there is a frozen stream that has a 4996 * queue which also refers to this "shared" syncq. 4997 * SQ_BLOCKED will be set if there is "off" queue which also 4998 * refers to this "shared" syncq. 4999 */ 5000 if (maxcnt != -1) { 5001 count = sq->sq_count; 5002 SQ_PUTLOCKS_ENTER(sq); 5003 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 5004 SUM_SQ_PUTCOUNTS(sq, count); 5005 } 5006 sq->sq_needexcl++; 5007 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 5008 5009 while ((sq->sq_flags & flag) || 5010 (maxcnt != -1 && count > (unsigned)maxcnt)) { 5011 sq->sq_flags |= SQ_WANTWAKEUP; 5012 if (maxcnt != -1) { 5013 SQ_PUTLOCKS_EXIT(sq); 5014 } 5015 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5016 if (maxcnt != -1) { 5017 count = sq->sq_count; 5018 SQ_PUTLOCKS_ENTER(sq); 5019 SUM_SQ_PUTCOUNTS(sq, count); 5020 } 5021 } 5022 sq->sq_needexcl--; 5023 sq->sq_flags |= flag; 5024 ASSERT(maxcnt == -1 || count == maxcnt); 5025 if (maxcnt != -1) { 5026 if (sq->sq_needexcl == 0) { 5027 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 5028 } 5029 SQ_PUTLOCKS_EXIT(sq); 5030 } else if (sq->sq_needexcl == 0) { 5031 SQ_PUTCOUNT_SETFAST(sq); 5032 } 5033 5034 mutex_exit(SQLOCK(sq)); 5035 } 5036 5037 /* 5038 * Reset a flag that was set with blocksq. 5039 * 5040 * Can not use this routine to reset SQ_WRITER. 5041 * 5042 * If "isouter" is set then the syncq is assumed to be an outer perimeter 5043 * and drain_syncq is not called. Instead we rely on the qwriter_outer thread 5044 * to handle the queued qwriter operations. 5045 * 5046 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5047 * sq_putlocks are used. 5048 */ 5049 static void 5050 unblocksq(syncq_t *sq, uint16_t resetflag, int isouter) 5051 { 5052 uint16_t flags; 5053 5054 mutex_enter(SQLOCK(sq)); 5055 ASSERT(resetflag != SQ_WRITER); 5056 ASSERT(sq->sq_flags & resetflag); 5057 flags = sq->sq_flags & ~resetflag; 5058 sq->sq_flags = flags; 5059 if (flags & (SQ_QUEUED | SQ_WANTWAKEUP)) { 5060 if (flags & SQ_WANTWAKEUP) { 5061 flags &= ~SQ_WANTWAKEUP; 5062 cv_broadcast(&sq->sq_wait); 5063 } 5064 sq->sq_flags = flags; 5065 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5066 if (!isouter) { 5067 /* drain_syncq drops SQLOCK */ 5068 drain_syncq(sq); 5069 return; 5070 } 5071 } 5072 } 5073 mutex_exit(SQLOCK(sq)); 5074 } 5075 5076 /* 5077 * Reset a flag that was set with blocksq. 5078 * Does not drain the syncq. Use emptysq() for that. 5079 * Returns 1 if SQ_QUEUED is set. Otherwise 0. 5080 * 5081 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5082 * sq_putlocks are used. 5083 */ 5084 static int 5085 dropsq(syncq_t *sq, uint16_t resetflag) 5086 { 5087 uint16_t flags; 5088 5089 mutex_enter(SQLOCK(sq)); 5090 ASSERT(sq->sq_flags & resetflag); 5091 flags = sq->sq_flags & ~resetflag; 5092 if (flags & SQ_WANTWAKEUP) { 5093 flags &= ~SQ_WANTWAKEUP; 5094 cv_broadcast(&sq->sq_wait); 5095 } 5096 sq->sq_flags = flags; 5097 mutex_exit(SQLOCK(sq)); 5098 if (flags & SQ_QUEUED) 5099 return (1); 5100 return (0); 5101 } 5102 5103 /* 5104 * Empty all the messages on a syncq. 5105 * 5106 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5107 * sq_putlocks are used. 5108 */ 5109 static void 5110 emptysq(syncq_t *sq) 5111 { 5112 uint16_t flags; 5113 5114 mutex_enter(SQLOCK(sq)); 5115 flags = sq->sq_flags; 5116 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5117 /* 5118 * To prevent potential recursive invocation of drain_syncq we 5119 * do not call drain_syncq if count is non-zero. 5120 */ 5121 if (sq->sq_count == 0) { 5122 /* drain_syncq() drops SQLOCK */ 5123 drain_syncq(sq); 5124 return; 5125 } else 5126 sqenable(sq); 5127 } 5128 mutex_exit(SQLOCK(sq)); 5129 } 5130 5131 /* 5132 * Ordered insert while removing duplicates. 5133 */ 5134 static void 5135 sqlist_insert(sqlist_t *sqlist, syncq_t *sqp) 5136 { 5137 syncql_t *sqlp, **prev_sqlpp, *new_sqlp; 5138 5139 prev_sqlpp = &sqlist->sqlist_head; 5140 while ((sqlp = *prev_sqlpp) != NULL) { 5141 if (sqlp->sql_sq >= sqp) { 5142 if (sqlp->sql_sq == sqp) /* duplicate */ 5143 return; 5144 break; 5145 } 5146 prev_sqlpp = &sqlp->sql_next; 5147 } 5148 new_sqlp = &sqlist->sqlist_array[sqlist->sqlist_index++]; 5149 ASSERT((char *)new_sqlp < (char *)sqlist + sqlist->sqlist_size); 5150 new_sqlp->sql_next = sqlp; 5151 new_sqlp->sql_sq = sqp; 5152 *prev_sqlpp = new_sqlp; 5153 } 5154 5155 /* 5156 * Walk the write side queues until we hit either the driver 5157 * or a twist in the stream (_SAMESTR will return false in both 5158 * these cases) then turn around and walk the read side queues 5159 * back up to the stream head. 5160 */ 5161 static void 5162 sqlist_insertall(sqlist_t *sqlist, queue_t *q) 5163 { 5164 while (q != NULL) { 5165 sqlist_insert(sqlist, q->q_syncq); 5166 5167 if (_SAMESTR(q)) 5168 q = q->q_next; 5169 else if (!(q->q_flag & QREADR)) 5170 q = _RD(q); 5171 else 5172 q = NULL; 5173 } 5174 } 5175 5176 /* 5177 * Allocate and build a list of all syncqs in a stream and the syncq(s) 5178 * associated with the "q" parameter. The resulting list is sorted in a 5179 * canonical order and is free of duplicates. 5180 * Assumes the passed queue is a _RD(q). 5181 */ 5182 static sqlist_t * 5183 sqlist_build(queue_t *q, struct stdata *stp, boolean_t do_twist) 5184 { 5185 sqlist_t *sqlist = sqlist_alloc(stp, KM_SLEEP); 5186 5187 /* 5188 * start with the current queue/qpair 5189 */ 5190 ASSERT(q->q_flag & QREADR); 5191 5192 sqlist_insert(sqlist, q->q_syncq); 5193 sqlist_insert(sqlist, _WR(q)->q_syncq); 5194 5195 sqlist_insertall(sqlist, stp->sd_wrq); 5196 if (do_twist) 5197 sqlist_insertall(sqlist, stp->sd_mate->sd_wrq); 5198 5199 return (sqlist); 5200 } 5201 5202 static sqlist_t * 5203 sqlist_alloc(struct stdata *stp, int kmflag) 5204 { 5205 size_t sqlist_size; 5206 sqlist_t *sqlist; 5207 5208 /* 5209 * Allocate 2 syncql_t's for each pushed module. Note that 5210 * the sqlist_t structure already has 4 syncql_t's built in: 5211 * 2 for the stream head, and 2 for the driver/other stream head. 5212 */ 5213 sqlist_size = 2 * sizeof (syncql_t) * stp->sd_pushcnt + 5214 sizeof (sqlist_t); 5215 if (STRMATED(stp)) 5216 sqlist_size += 2 * sizeof (syncql_t) * stp->sd_mate->sd_pushcnt; 5217 sqlist = kmem_alloc(sqlist_size, kmflag); 5218 5219 sqlist->sqlist_head = NULL; 5220 sqlist->sqlist_size = sqlist_size; 5221 sqlist->sqlist_index = 0; 5222 5223 return (sqlist); 5224 } 5225 5226 /* 5227 * Free the list created by sqlist_alloc() 5228 */ 5229 static void 5230 sqlist_free(sqlist_t *sqlist) 5231 { 5232 kmem_free(sqlist, sqlist->sqlist_size); 5233 } 5234 5235 /* 5236 * Prevent any new entries into any syncq in this stream. 5237 * Used by freezestr. 5238 */ 5239 void 5240 strblock(queue_t *q) 5241 { 5242 struct stdata *stp; 5243 syncql_t *sql; 5244 sqlist_t *sqlist; 5245 5246 q = _RD(q); 5247 5248 stp = STREAM(q); 5249 ASSERT(stp != NULL); 5250 5251 /* 5252 * Get a sorted list with all the duplicates removed containing 5253 * all the syncqs referenced by this stream. 5254 */ 5255 sqlist = sqlist_build(q, stp, B_FALSE); 5256 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next) 5257 blocksq(sql->sql_sq, SQ_FROZEN, -1); 5258 sqlist_free(sqlist); 5259 } 5260 5261 /* 5262 * Release the block on new entries into this stream 5263 */ 5264 void 5265 strunblock(queue_t *q) 5266 { 5267 struct stdata *stp; 5268 syncql_t *sql; 5269 sqlist_t *sqlist; 5270 int drain_needed; 5271 5272 q = _RD(q); 5273 5274 /* 5275 * Get a sorted list with all the duplicates removed containing 5276 * all the syncqs referenced by this stream. 5277 * Have to drop the SQ_FROZEN flag on all the syncqs before 5278 * starting to drain them; otherwise the draining might 5279 * cause a freezestr in some module on the stream (which 5280 * would deadlock). 5281 */ 5282 stp = STREAM(q); 5283 ASSERT(stp != NULL); 5284 sqlist = sqlist_build(q, stp, B_FALSE); 5285 drain_needed = 0; 5286 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next) 5287 drain_needed += dropsq(sql->sql_sq, SQ_FROZEN); 5288 if (drain_needed) { 5289 for (sql = sqlist->sqlist_head; sql != NULL; 5290 sql = sql->sql_next) 5291 emptysq(sql->sql_sq); 5292 } 5293 sqlist_free(sqlist); 5294 } 5295 5296 #ifdef DEBUG 5297 static int 5298 qprocsareon(queue_t *rq) 5299 { 5300 if (rq->q_next == NULL) 5301 return (0); 5302 return (_WR(rq->q_next)->q_next == _WR(rq)); 5303 } 5304 5305 int 5306 qclaimed(queue_t *q) 5307 { 5308 uint_t count; 5309 5310 count = q->q_syncq->sq_count; 5311 SUM_SQ_PUTCOUNTS(q->q_syncq, count); 5312 return (count != 0); 5313 } 5314 5315 /* 5316 * Check if anyone has frozen this stream with freezestr 5317 */ 5318 int 5319 frozenstr(queue_t *q) 5320 { 5321 return ((q->q_syncq->sq_flags & SQ_FROZEN) != 0); 5322 } 5323 #endif /* DEBUG */ 5324 5325 /* 5326 * Enter a queue. 5327 * Obsoleted interface. Should not be used. 5328 */ 5329 void 5330 enterq(queue_t *q) 5331 { 5332 entersq(q->q_syncq, SQ_CALLBACK); 5333 } 5334 5335 void 5336 leaveq(queue_t *q) 5337 { 5338 leavesq(q->q_syncq, SQ_CALLBACK); 5339 } 5340 5341 /* 5342 * Enter a perimeter. c_inner and c_outer specifies which concurrency bits 5343 * to check. 5344 * Wait if SQ_QUEUED is set to preserve ordering between messages and qwriter 5345 * calls and the running of open, close and service procedures. 5346 * 5347 * If c_inner bit is set no need to grab sq_putlocks since we don't care 5348 * if other threads have entered or are entering put entry point. 5349 * 5350 * If c_inner bit is set it might have been possible to use 5351 * sq_putlocks/sq_putcounts instead of SQLOCK/sq_count (e.g. to optimize 5352 * open/close path for IP) but since the count may need to be decremented in 5353 * qwait() we wouldn't know which counter to decrement. Currently counter is 5354 * selected by current cpu_seqid and current CPU can change at any moment. XXX 5355 * in the future we might use curthread id bits to select the counter and this 5356 * would stay constant across routine calls. 5357 */ 5358 void 5359 entersq(syncq_t *sq, int entrypoint) 5360 { 5361 uint16_t count = 0; 5362 uint16_t flags; 5363 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL; 5364 uint16_t type; 5365 uint_t c_inner = entrypoint & SQ_CI; 5366 uint_t c_outer = entrypoint & SQ_CO; 5367 5368 /* 5369 * Increment ref count to keep closes out of this queue. 5370 */ 5371 ASSERT(sq); 5372 ASSERT(c_inner && c_outer); 5373 mutex_enter(SQLOCK(sq)); 5374 flags = sq->sq_flags; 5375 type = sq->sq_type; 5376 if (!(type & c_inner)) { 5377 /* Make sure all putcounts now use slowlock. */ 5378 count = sq->sq_count; 5379 SQ_PUTLOCKS_ENTER(sq); 5380 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 5381 SUM_SQ_PUTCOUNTS(sq, count); 5382 sq->sq_needexcl++; 5383 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 5384 waitflags |= SQ_MESSAGES; 5385 } 5386 /* 5387 * Wait until we can enter the inner perimeter. 5388 * If we want exclusive access we wait until sq_count is 0. 5389 * We have to do this before entering the outer perimeter in order 5390 * to preserve put/close message ordering. 5391 */ 5392 while ((flags & waitflags) || (!(type & c_inner) && count != 0)) { 5393 sq->sq_flags = flags | SQ_WANTWAKEUP; 5394 if (!(type & c_inner)) { 5395 SQ_PUTLOCKS_EXIT(sq); 5396 } 5397 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5398 if (!(type & c_inner)) { 5399 count = sq->sq_count; 5400 SQ_PUTLOCKS_ENTER(sq); 5401 SUM_SQ_PUTCOUNTS(sq, count); 5402 } 5403 flags = sq->sq_flags; 5404 } 5405 5406 if (!(type & c_inner)) { 5407 ASSERT(sq->sq_needexcl > 0); 5408 sq->sq_needexcl--; 5409 if (sq->sq_needexcl == 0) { 5410 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 5411 } 5412 } 5413 5414 /* Check if we need to enter the outer perimeter */ 5415 if (!(type & c_outer)) { 5416 /* 5417 * We have to enter the outer perimeter exclusively before 5418 * we can increment sq_count to avoid deadlock. This implies 5419 * that we have to re-check sq_flags and sq_count. 5420 * 5421 * is it possible to have c_inner set when c_outer is not set? 5422 */ 5423 if (!(type & c_inner)) { 5424 SQ_PUTLOCKS_EXIT(sq); 5425 } 5426 mutex_exit(SQLOCK(sq)); 5427 outer_enter(sq->sq_outer, SQ_GOAWAY); 5428 mutex_enter(SQLOCK(sq)); 5429 flags = sq->sq_flags; 5430 /* 5431 * there should be no need to recheck sq_putcounts 5432 * because outer_enter() has already waited for them to clear 5433 * after setting SQ_WRITER. 5434 */ 5435 count = sq->sq_count; 5436 #ifdef DEBUG 5437 /* 5438 * SUMCHECK_SQ_PUTCOUNTS should return the sum instead 5439 * of doing an ASSERT internally. Others should do 5440 * something like 5441 * ASSERT(SUMCHECK_SQ_PUTCOUNTS(sq) == 0); 5442 * without the need to #ifdef DEBUG it. 5443 */ 5444 SUMCHECK_SQ_PUTCOUNTS(sq, 0); 5445 #endif 5446 while ((flags & (SQ_EXCL|SQ_BLOCKED|SQ_FROZEN)) || 5447 (!(type & c_inner) && count != 0)) { 5448 sq->sq_flags = flags | SQ_WANTWAKEUP; 5449 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5450 count = sq->sq_count; 5451 flags = sq->sq_flags; 5452 } 5453 } 5454 5455 sq->sq_count++; 5456 ASSERT(sq->sq_count != 0); /* Wraparound */ 5457 if (!(type & c_inner)) { 5458 /* Exclusive entry */ 5459 ASSERT(sq->sq_count == 1); 5460 sq->sq_flags |= SQ_EXCL; 5461 if (type & c_outer) { 5462 SQ_PUTLOCKS_EXIT(sq); 5463 } 5464 } 5465 mutex_exit(SQLOCK(sq)); 5466 } 5467 5468 /* 5469 * Leave a syncq. Announce to framework that closes may proceed. 5470 * c_inner and c_outer specify which concurrency bits to check. 5471 * 5472 * Must never be called from driver or module put entry point. 5473 * 5474 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5475 * sq_putlocks are used. 5476 */ 5477 void 5478 leavesq(syncq_t *sq, int entrypoint) 5479 { 5480 uint16_t flags; 5481 uint16_t type; 5482 uint_t c_outer = entrypoint & SQ_CO; 5483 #ifdef DEBUG 5484 uint_t c_inner = entrypoint & SQ_CI; 5485 #endif 5486 5487 /* 5488 * Decrement ref count, drain the syncq if possible, and wake up 5489 * any waiting close. 5490 */ 5491 ASSERT(sq); 5492 ASSERT(c_inner && c_outer); 5493 mutex_enter(SQLOCK(sq)); 5494 flags = sq->sq_flags; 5495 type = sq->sq_type; 5496 if (flags & (SQ_QUEUED|SQ_WANTWAKEUP|SQ_WANTEXWAKEUP)) { 5497 5498 if (flags & SQ_WANTWAKEUP) { 5499 flags &= ~SQ_WANTWAKEUP; 5500 cv_broadcast(&sq->sq_wait); 5501 } 5502 if (flags & SQ_WANTEXWAKEUP) { 5503 flags &= ~SQ_WANTEXWAKEUP; 5504 cv_broadcast(&sq->sq_exitwait); 5505 } 5506 5507 if ((flags & SQ_QUEUED) && !(flags & SQ_STAYAWAY)) { 5508 /* 5509 * The syncq needs to be drained. "Exit" the syncq 5510 * before calling drain_syncq. 5511 */ 5512 ASSERT(sq->sq_count != 0); 5513 sq->sq_count--; 5514 ASSERT((flags & SQ_EXCL) || (type & c_inner)); 5515 sq->sq_flags = flags & ~SQ_EXCL; 5516 drain_syncq(sq); 5517 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 5518 /* Check if we need to exit the outer perimeter */ 5519 /* XXX will this ever be true? */ 5520 if (!(type & c_outer)) 5521 outer_exit(sq->sq_outer); 5522 return; 5523 } 5524 } 5525 ASSERT(sq->sq_count != 0); 5526 sq->sq_count--; 5527 ASSERT((flags & SQ_EXCL) || (type & c_inner)); 5528 sq->sq_flags = flags & ~SQ_EXCL; 5529 mutex_exit(SQLOCK(sq)); 5530 5531 /* Check if we need to exit the outer perimeter */ 5532 if (!(sq->sq_type & c_outer)) 5533 outer_exit(sq->sq_outer); 5534 } 5535 5536 /* 5537 * Prevent q_next from changing in this stream by incrementing sq_count. 5538 * 5539 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5540 * sq_putlocks are used. 5541 */ 5542 void 5543 claimq(queue_t *qp) 5544 { 5545 syncq_t *sq = qp->q_syncq; 5546 5547 mutex_enter(SQLOCK(sq)); 5548 sq->sq_count++; 5549 ASSERT(sq->sq_count != 0); /* Wraparound */ 5550 mutex_exit(SQLOCK(sq)); 5551 } 5552 5553 /* 5554 * Undo claimq. 5555 * 5556 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5557 * sq_putlocks are used. 5558 */ 5559 void 5560 releaseq(queue_t *qp) 5561 { 5562 syncq_t *sq = qp->q_syncq; 5563 uint16_t flags; 5564 5565 mutex_enter(SQLOCK(sq)); 5566 ASSERT(sq->sq_count > 0); 5567 sq->sq_count--; 5568 5569 flags = sq->sq_flags; 5570 if (flags & (SQ_WANTWAKEUP|SQ_QUEUED)) { 5571 if (flags & SQ_WANTWAKEUP) { 5572 flags &= ~SQ_WANTWAKEUP; 5573 cv_broadcast(&sq->sq_wait); 5574 } 5575 sq->sq_flags = flags; 5576 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5577 /* 5578 * To prevent potential recursive invocation of 5579 * drain_syncq we do not call drain_syncq if count is 5580 * non-zero. 5581 */ 5582 if (sq->sq_count == 0) { 5583 drain_syncq(sq); 5584 return; 5585 } else 5586 sqenable(sq); 5587 } 5588 } 5589 mutex_exit(SQLOCK(sq)); 5590 } 5591 5592 /* 5593 * Prevent q_next from changing in this stream by incrementing sd_refcnt. 5594 */ 5595 void 5596 claimstr(queue_t *qp) 5597 { 5598 struct stdata *stp = STREAM(qp); 5599 5600 mutex_enter(&stp->sd_reflock); 5601 stp->sd_refcnt++; 5602 ASSERT(stp->sd_refcnt != 0); /* Wraparound */ 5603 mutex_exit(&stp->sd_reflock); 5604 } 5605 5606 /* 5607 * Undo claimstr. 5608 */ 5609 void 5610 releasestr(queue_t *qp) 5611 { 5612 struct stdata *stp = STREAM(qp); 5613 5614 mutex_enter(&stp->sd_reflock); 5615 ASSERT(stp->sd_refcnt != 0); 5616 if (--stp->sd_refcnt == 0) 5617 cv_broadcast(&stp->sd_refmonitor); 5618 mutex_exit(&stp->sd_reflock); 5619 } 5620 5621 static syncq_t * 5622 new_syncq(void) 5623 { 5624 return (kmem_cache_alloc(syncq_cache, KM_SLEEP)); 5625 } 5626 5627 static void 5628 free_syncq(syncq_t *sq) 5629 { 5630 ASSERT(sq->sq_head == NULL); 5631 ASSERT(sq->sq_outer == NULL); 5632 ASSERT(sq->sq_callbpend == NULL); 5633 ASSERT((sq->sq_onext == NULL && sq->sq_oprev == NULL) || 5634 (sq->sq_onext == sq && sq->sq_oprev == sq)); 5635 5636 if (sq->sq_ciputctrl != NULL) { 5637 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1); 5638 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl, 5639 sq->sq_nciputctrl, 0); 5640 ASSERT(ciputctrl_cache != NULL); 5641 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl); 5642 } 5643 5644 sq->sq_tail = NULL; 5645 sq->sq_evhead = NULL; 5646 sq->sq_evtail = NULL; 5647 sq->sq_ciputctrl = NULL; 5648 sq->sq_nciputctrl = 0; 5649 sq->sq_count = 0; 5650 sq->sq_rmqcount = 0; 5651 sq->sq_callbflags = 0; 5652 sq->sq_cancelid = 0; 5653 sq->sq_next = NULL; 5654 sq->sq_needexcl = 0; 5655 sq->sq_svcflags = 0; 5656 sq->sq_nqueues = 0; 5657 sq->sq_pri = 0; 5658 sq->sq_onext = NULL; 5659 sq->sq_oprev = NULL; 5660 sq->sq_flags = 0; 5661 sq->sq_type = 0; 5662 sq->sq_servcount = 0; 5663 5664 kmem_cache_free(syncq_cache, sq); 5665 } 5666 5667 /* Outer perimeter code */ 5668 5669 /* 5670 * The outer syncq uses the fields and flags in the syncq slightly 5671 * differently from the inner syncqs. 5672 * sq_count Incremented when there are pending or running 5673 * writers at the outer perimeter to prevent the set of 5674 * inner syncqs that belong to the outer perimeter from 5675 * changing. 5676 * sq_head/tail List of deferred qwriter(OUTER) operations. 5677 * 5678 * SQ_BLOCKED Set to prevent traversing of sq_next,sq_prev while 5679 * inner syncqs are added to or removed from the 5680 * outer perimeter. 5681 * SQ_QUEUED sq_head/tail has messages or events queued. 5682 * 5683 * SQ_WRITER A thread is currently traversing all the inner syncqs 5684 * setting the SQ_WRITER flag. 5685 */ 5686 5687 /* 5688 * Get write access at the outer perimeter. 5689 * Note that read access is done by entersq, putnext, and put by simply 5690 * incrementing sq_count in the inner syncq. 5691 * 5692 * Waits until "flags" is no longer set in the outer to prevent multiple 5693 * threads from having write access at the same time. SQ_WRITER has to be part 5694 * of "flags". 5695 * 5696 * Increases sq_count on the outer syncq to keep away outer_insert/remove 5697 * until the outer_exit is finished. 5698 * 5699 * outer_enter is vulnerable to starvation since it does not prevent new 5700 * threads from entering the inner syncqs while it is waiting for sq_count to 5701 * go to zero. 5702 */ 5703 void 5704 outer_enter(syncq_t *outer, uint16_t flags) 5705 { 5706 syncq_t *sq; 5707 int wait_needed; 5708 uint16_t count; 5709 5710 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5711 outer->sq_oprev != NULL); 5712 ASSERT(flags & SQ_WRITER); 5713 5714 retry: 5715 mutex_enter(SQLOCK(outer)); 5716 while (outer->sq_flags & flags) { 5717 outer->sq_flags |= SQ_WANTWAKEUP; 5718 cv_wait(&outer->sq_wait, SQLOCK(outer)); 5719 } 5720 5721 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5722 outer->sq_flags |= SQ_WRITER; 5723 outer->sq_count++; 5724 ASSERT(outer->sq_count != 0); /* wraparound */ 5725 wait_needed = 0; 5726 /* 5727 * Set SQ_WRITER on all the inner syncqs while holding 5728 * the SQLOCK on the outer syncq. This ensures that the changing 5729 * of SQ_WRITER is atomic under the outer SQLOCK. 5730 */ 5731 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 5732 mutex_enter(SQLOCK(sq)); 5733 count = sq->sq_count; 5734 SQ_PUTLOCKS_ENTER(sq); 5735 sq->sq_flags |= SQ_WRITER; 5736 SUM_SQ_PUTCOUNTS(sq, count); 5737 if (count != 0) 5738 wait_needed = 1; 5739 SQ_PUTLOCKS_EXIT(sq); 5740 mutex_exit(SQLOCK(sq)); 5741 } 5742 mutex_exit(SQLOCK(outer)); 5743 5744 /* 5745 * Get everybody out of the syncqs sequentially. 5746 * Note that we don't actually need to acquire the PUTLOCKS, since 5747 * we have already cleared the fastbit, and set QWRITER. By 5748 * definition, the count can not increase since putnext will 5749 * take the slowlock path (and the purpose of acquiring the 5750 * putlocks was to make sure it didn't increase while we were 5751 * waiting). 5752 * 5753 * Note that we still acquire the PUTLOCKS to be safe. 5754 */ 5755 if (wait_needed) { 5756 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 5757 mutex_enter(SQLOCK(sq)); 5758 count = sq->sq_count; 5759 SQ_PUTLOCKS_ENTER(sq); 5760 SUM_SQ_PUTCOUNTS(sq, count); 5761 while (count != 0) { 5762 sq->sq_flags |= SQ_WANTWAKEUP; 5763 SQ_PUTLOCKS_EXIT(sq); 5764 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5765 count = sq->sq_count; 5766 SQ_PUTLOCKS_ENTER(sq); 5767 SUM_SQ_PUTCOUNTS(sq, count); 5768 } 5769 SQ_PUTLOCKS_EXIT(sq); 5770 mutex_exit(SQLOCK(sq)); 5771 } 5772 /* 5773 * Verify that none of the flags got set while we 5774 * were waiting for the sq_counts to drop. 5775 * If this happens we exit and retry entering the 5776 * outer perimeter. 5777 */ 5778 mutex_enter(SQLOCK(outer)); 5779 if (outer->sq_flags & (flags & ~SQ_WRITER)) { 5780 mutex_exit(SQLOCK(outer)); 5781 outer_exit(outer); 5782 goto retry; 5783 } 5784 mutex_exit(SQLOCK(outer)); 5785 } 5786 } 5787 5788 /* 5789 * Drop the write access at the outer perimeter. 5790 * Read access is dropped implicitly (by putnext, put, and leavesq) by 5791 * decrementing sq_count. 5792 */ 5793 void 5794 outer_exit(syncq_t *outer) 5795 { 5796 syncq_t *sq; 5797 int drain_needed; 5798 uint16_t flags; 5799 5800 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5801 outer->sq_oprev != NULL); 5802 ASSERT(MUTEX_NOT_HELD(SQLOCK(outer))); 5803 5804 /* 5805 * Atomically (from the perspective of threads calling become_writer) 5806 * drop the write access at the outer perimeter by holding 5807 * SQLOCK(outer) across all the dropsq calls and the resetting of 5808 * SQ_WRITER. 5809 * This defines a locking order between the outer perimeter 5810 * SQLOCK and the inner perimeter SQLOCKs. 5811 */ 5812 mutex_enter(SQLOCK(outer)); 5813 flags = outer->sq_flags; 5814 ASSERT(outer->sq_flags & SQ_WRITER); 5815 if (flags & SQ_QUEUED) { 5816 write_now(outer); 5817 flags = outer->sq_flags; 5818 } 5819 5820 /* 5821 * sq_onext is stable since sq_count has not yet been decreased. 5822 * Reset the SQ_WRITER flags in all syncqs. 5823 * After dropping SQ_WRITER on the outer syncq we empty all the 5824 * inner syncqs. 5825 */ 5826 drain_needed = 0; 5827 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) 5828 drain_needed += dropsq(sq, SQ_WRITER); 5829 ASSERT(!(outer->sq_flags & SQ_QUEUED)); 5830 flags &= ~SQ_WRITER; 5831 if (drain_needed) { 5832 outer->sq_flags = flags; 5833 mutex_exit(SQLOCK(outer)); 5834 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) 5835 emptysq(sq); 5836 mutex_enter(SQLOCK(outer)); 5837 flags = outer->sq_flags; 5838 } 5839 if (flags & SQ_WANTWAKEUP) { 5840 flags &= ~SQ_WANTWAKEUP; 5841 cv_broadcast(&outer->sq_wait); 5842 } 5843 outer->sq_flags = flags; 5844 ASSERT(outer->sq_count > 0); 5845 outer->sq_count--; 5846 mutex_exit(SQLOCK(outer)); 5847 } 5848 5849 /* 5850 * Add another syncq to an outer perimeter. 5851 * Block out all other access to the outer perimeter while it is being 5852 * changed using blocksq. 5853 * Assumes that the caller has *not* done an outer_enter. 5854 * 5855 * Vulnerable to starvation in blocksq. 5856 */ 5857 static void 5858 outer_insert(syncq_t *outer, syncq_t *sq) 5859 { 5860 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5861 outer->sq_oprev != NULL); 5862 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 5863 sq->sq_oprev == NULL); /* Can't be in an outer perimeter */ 5864 5865 /* Get exclusive access to the outer perimeter list */ 5866 blocksq(outer, SQ_BLOCKED, 0); 5867 ASSERT(outer->sq_flags & SQ_BLOCKED); 5868 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5869 5870 mutex_enter(SQLOCK(sq)); 5871 sq->sq_outer = outer; 5872 outer->sq_onext->sq_oprev = sq; 5873 sq->sq_onext = outer->sq_onext; 5874 outer->sq_onext = sq; 5875 sq->sq_oprev = outer; 5876 mutex_exit(SQLOCK(sq)); 5877 unblocksq(outer, SQ_BLOCKED, 1); 5878 } 5879 5880 /* 5881 * Remove a syncq from an outer perimeter. 5882 * Block out all other access to the outer perimeter while it is being 5883 * changed using blocksq. 5884 * Assumes that the caller has *not* done an outer_enter. 5885 * 5886 * Vulnerable to starvation in blocksq. 5887 */ 5888 static void 5889 outer_remove(syncq_t *outer, syncq_t *sq) 5890 { 5891 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5892 outer->sq_oprev != NULL); 5893 ASSERT(sq->sq_outer == outer); 5894 5895 /* Get exclusive access to the outer perimeter list */ 5896 blocksq(outer, SQ_BLOCKED, 0); 5897 ASSERT(outer->sq_flags & SQ_BLOCKED); 5898 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5899 5900 mutex_enter(SQLOCK(sq)); 5901 sq->sq_outer = NULL; 5902 sq->sq_onext->sq_oprev = sq->sq_oprev; 5903 sq->sq_oprev->sq_onext = sq->sq_onext; 5904 sq->sq_oprev = sq->sq_onext = NULL; 5905 mutex_exit(SQLOCK(sq)); 5906 unblocksq(outer, SQ_BLOCKED, 1); 5907 } 5908 5909 /* 5910 * Queue a deferred qwriter(OUTER) callback for this outer perimeter. 5911 * If this is the first callback for this outer perimeter then add 5912 * this outer perimeter to the list of outer perimeters that 5913 * the qwriter_outer_thread will process. 5914 * 5915 * Increments sq_count in the outer syncq to prevent the membership 5916 * of the outer perimeter (in terms of inner syncqs) to change while 5917 * the callback is pending. 5918 */ 5919 static void 5920 queue_writer(syncq_t *outer, void (*func)(), queue_t *q, mblk_t *mp) 5921 { 5922 ASSERT(MUTEX_HELD(SQLOCK(outer))); 5923 5924 mp->b_prev = (mblk_t *)func; 5925 mp->b_queue = q; 5926 mp->b_next = NULL; 5927 outer->sq_count++; /* Decremented when dequeued */ 5928 ASSERT(outer->sq_count != 0); /* Wraparound */ 5929 if (outer->sq_evhead == NULL) { 5930 /* First message. */ 5931 outer->sq_evhead = outer->sq_evtail = mp; 5932 outer->sq_flags |= SQ_EVENTS; 5933 mutex_exit(SQLOCK(outer)); 5934 STRSTAT(qwr_outer); 5935 (void) taskq_dispatch(streams_taskq, 5936 (task_func_t *)qwriter_outer_service, outer, TQ_SLEEP); 5937 } else { 5938 ASSERT(outer->sq_flags & SQ_EVENTS); 5939 outer->sq_evtail->b_next = mp; 5940 outer->sq_evtail = mp; 5941 mutex_exit(SQLOCK(outer)); 5942 } 5943 } 5944 5945 /* 5946 * Try and upgrade to write access at the outer perimeter. If this can 5947 * not be done without blocking then queue the callback to be done 5948 * by the qwriter_outer_thread. 5949 * 5950 * This routine can only be called from put or service procedures plus 5951 * asynchronous callback routines that have properly entered the queue (with 5952 * entersq). Thus qwriter(OUTER) assumes the caller has one claim on the syncq 5953 * associated with q. 5954 */ 5955 void 5956 qwriter_outer(queue_t *q, mblk_t *mp, void (*func)()) 5957 { 5958 syncq_t *osq, *sq, *outer; 5959 int failed; 5960 uint16_t flags; 5961 5962 osq = q->q_syncq; 5963 outer = osq->sq_outer; 5964 if (outer == NULL) 5965 panic("qwriter(PERIM_OUTER): no outer perimeter"); 5966 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5967 outer->sq_oprev != NULL); 5968 5969 mutex_enter(SQLOCK(outer)); 5970 flags = outer->sq_flags; 5971 /* 5972 * If some thread is traversing sq_next, or if we are blocked by 5973 * outer_insert or outer_remove, or if the we already have queued 5974 * callbacks, then queue this callback for later processing. 5975 * 5976 * Also queue the qwriter for an interrupt thread in order 5977 * to reduce the time spent running at high IPL. 5978 * to identify there are events. 5979 */ 5980 if ((flags & SQ_GOAWAY) || (curthread->t_pri >= kpreemptpri)) { 5981 /* 5982 * Queue the become_writer request. 5983 * The queueing is atomic under SQLOCK(outer) in order 5984 * to synchronize with outer_exit. 5985 * queue_writer will drop the outer SQLOCK 5986 */ 5987 if (flags & SQ_BLOCKED) { 5988 /* Must set SQ_WRITER on inner perimeter */ 5989 mutex_enter(SQLOCK(osq)); 5990 osq->sq_flags |= SQ_WRITER; 5991 mutex_exit(SQLOCK(osq)); 5992 } else { 5993 if (!(flags & SQ_WRITER)) { 5994 /* 5995 * The outer could have been SQ_BLOCKED thus 5996 * SQ_WRITER might not be set on the inner. 5997 */ 5998 mutex_enter(SQLOCK(osq)); 5999 osq->sq_flags |= SQ_WRITER; 6000 mutex_exit(SQLOCK(osq)); 6001 } 6002 ASSERT(osq->sq_flags & SQ_WRITER); 6003 } 6004 queue_writer(outer, func, q, mp); 6005 return; 6006 } 6007 /* 6008 * We are half-way to exclusive access to the outer perimeter. 6009 * Prevent any outer_enter, qwriter(OUTER), or outer_insert/remove 6010 * while the inner syncqs are traversed. 6011 */ 6012 outer->sq_count++; 6013 ASSERT(outer->sq_count != 0); /* wraparound */ 6014 flags |= SQ_WRITER; 6015 /* 6016 * Check if we can run the function immediately. Mark all 6017 * syncqs with the writer flag to prevent new entries into 6018 * put and service procedures. 6019 * 6020 * Set SQ_WRITER on all the inner syncqs while holding 6021 * the SQLOCK on the outer syncq. This ensures that the changing 6022 * of SQ_WRITER is atomic under the outer SQLOCK. 6023 */ 6024 failed = 0; 6025 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 6026 uint16_t count; 6027 uint_t maxcnt = (sq == osq) ? 1 : 0; 6028 6029 mutex_enter(SQLOCK(sq)); 6030 count = sq->sq_count; 6031 SQ_PUTLOCKS_ENTER(sq); 6032 SUM_SQ_PUTCOUNTS(sq, count); 6033 if (sq->sq_count > maxcnt) 6034 failed = 1; 6035 sq->sq_flags |= SQ_WRITER; 6036 SQ_PUTLOCKS_EXIT(sq); 6037 mutex_exit(SQLOCK(sq)); 6038 } 6039 if (failed) { 6040 /* 6041 * Some other thread has a read claim on the outer perimeter. 6042 * Queue the callback for deferred processing. 6043 * 6044 * queue_writer will set SQ_QUEUED before we drop SQ_WRITER 6045 * so that other qwriter(OUTER) calls will queue their 6046 * callbacks as well. queue_writer increments sq_count so we 6047 * decrement to compensate for the our increment. 6048 * 6049 * Dropping SQ_WRITER enables the writer thread to work 6050 * on this outer perimeter. 6051 */ 6052 outer->sq_flags = flags; 6053 queue_writer(outer, func, q, mp); 6054 /* queue_writer dropper the lock */ 6055 mutex_enter(SQLOCK(outer)); 6056 ASSERT(outer->sq_count > 0); 6057 outer->sq_count--; 6058 ASSERT(outer->sq_flags & SQ_WRITER); 6059 flags = outer->sq_flags; 6060 flags &= ~SQ_WRITER; 6061 if (flags & SQ_WANTWAKEUP) { 6062 flags &= ~SQ_WANTWAKEUP; 6063 cv_broadcast(&outer->sq_wait); 6064 } 6065 outer->sq_flags = flags; 6066 mutex_exit(SQLOCK(outer)); 6067 return; 6068 } else { 6069 outer->sq_flags = flags; 6070 mutex_exit(SQLOCK(outer)); 6071 } 6072 6073 /* Can run it immediately */ 6074 (*func)(q, mp); 6075 6076 outer_exit(outer); 6077 } 6078 6079 /* 6080 * Dequeue all writer callbacks from the outer perimeter and run them. 6081 */ 6082 static void 6083 write_now(syncq_t *outer) 6084 { 6085 mblk_t *mp; 6086 queue_t *q; 6087 void (*func)(); 6088 6089 ASSERT(MUTEX_HELD(SQLOCK(outer))); 6090 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 6091 outer->sq_oprev != NULL); 6092 while ((mp = outer->sq_evhead) != NULL) { 6093 /* 6094 * queues cannot be placed on the queuelist on the outer 6095 * perimeter. 6096 */ 6097 ASSERT(!(outer->sq_flags & SQ_MESSAGES)); 6098 ASSERT((outer->sq_flags & SQ_EVENTS)); 6099 6100 outer->sq_evhead = mp->b_next; 6101 if (outer->sq_evhead == NULL) { 6102 outer->sq_evtail = NULL; 6103 outer->sq_flags &= ~SQ_EVENTS; 6104 } 6105 ASSERT(outer->sq_count != 0); 6106 outer->sq_count--; /* Incremented when enqueued. */ 6107 mutex_exit(SQLOCK(outer)); 6108 /* 6109 * Drop the message if the queue is closing. 6110 * Make sure that the queue is "claimed" when the callback 6111 * is run in order to satisfy various ASSERTs. 6112 */ 6113 q = mp->b_queue; 6114 func = (void (*)())mp->b_prev; 6115 ASSERT(func != NULL); 6116 mp->b_next = mp->b_prev = NULL; 6117 if (q->q_flag & QWCLOSE) { 6118 freemsg(mp); 6119 } else { 6120 claimq(q); 6121 (*func)(q, mp); 6122 releaseq(q); 6123 } 6124 mutex_enter(SQLOCK(outer)); 6125 } 6126 ASSERT(MUTEX_HELD(SQLOCK(outer))); 6127 } 6128 6129 /* 6130 * The list of messages on the inner syncq is effectively hashed 6131 * by destination queue. These destination queues are doubly 6132 * linked lists (hopefully) in priority order. Messages are then 6133 * put on the queue referenced by the q_sqhead/q_sqtail elements. 6134 * Additional messages are linked together by the b_next/b_prev 6135 * elements in the mblk, with (similar to putq()) the first message 6136 * having a NULL b_prev and the last message having a NULL b_next. 6137 * 6138 * Events, such as qwriter callbacks, are put onto a list in FIFO 6139 * order referenced by sq_evhead, and sq_evtail. This is a singly 6140 * linked list, and messages here MUST be processed in the order queued. 6141 */ 6142 6143 /* 6144 * Run the events on the syncq event list (sq_evhead). 6145 * Assumes there is only one claim on the syncq, it is 6146 * already exclusive (SQ_EXCL set), and the SQLOCK held. 6147 * Messages here are processed in order, with the SQ_EXCL bit 6148 * held all the way through till the last message is processed. 6149 */ 6150 void 6151 sq_run_events(syncq_t *sq) 6152 { 6153 mblk_t *bp; 6154 queue_t *qp; 6155 uint16_t flags = sq->sq_flags; 6156 void (*func)(); 6157 6158 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6159 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6160 sq->sq_oprev == NULL) || 6161 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6162 sq->sq_oprev != NULL)); 6163 6164 ASSERT(flags & SQ_EXCL); 6165 ASSERT(sq->sq_count == 1); 6166 6167 /* 6168 * We need to process all of the events on this list. It 6169 * is possible that new events will be added while we are 6170 * away processing a callback, so on every loop, we start 6171 * back at the beginning of the list. 6172 */ 6173 /* 6174 * We have to reaccess sq_evhead since there is a 6175 * possibility of a new entry while we were running 6176 * the callback. 6177 */ 6178 for (bp = sq->sq_evhead; bp != NULL; bp = sq->sq_evhead) { 6179 ASSERT(bp->b_queue->q_syncq == sq); 6180 ASSERT(sq->sq_flags & SQ_EVENTS); 6181 6182 qp = bp->b_queue; 6183 func = (void (*)())bp->b_prev; 6184 ASSERT(func != NULL); 6185 6186 /* 6187 * Messages from the event queue must be taken off in 6188 * FIFO order. 6189 */ 6190 ASSERT(sq->sq_evhead == bp); 6191 sq->sq_evhead = bp->b_next; 6192 6193 if (bp->b_next == NULL) { 6194 /* Deleting last */ 6195 ASSERT(sq->sq_evtail == bp); 6196 sq->sq_evtail = NULL; 6197 sq->sq_flags &= ~SQ_EVENTS; 6198 } 6199 bp->b_prev = bp->b_next = NULL; 6200 ASSERT(bp->b_datap->db_ref != 0); 6201 6202 mutex_exit(SQLOCK(sq)); 6203 6204 (*func)(qp, bp); 6205 6206 mutex_enter(SQLOCK(sq)); 6207 /* 6208 * re-read the flags, since they could have changed. 6209 */ 6210 flags = sq->sq_flags; 6211 ASSERT(flags & SQ_EXCL); 6212 } 6213 ASSERT(sq->sq_evhead == NULL && sq->sq_evtail == NULL); 6214 ASSERT(!(sq->sq_flags & SQ_EVENTS)); 6215 6216 if (flags & SQ_WANTWAKEUP) { 6217 flags &= ~SQ_WANTWAKEUP; 6218 cv_broadcast(&sq->sq_wait); 6219 } 6220 if (flags & SQ_WANTEXWAKEUP) { 6221 flags &= ~SQ_WANTEXWAKEUP; 6222 cv_broadcast(&sq->sq_exitwait); 6223 } 6224 sq->sq_flags = flags; 6225 } 6226 6227 /* 6228 * Put messages on the event list. 6229 * If we can go exclusive now, do so and process the event list, otherwise 6230 * let the last claim service this list (or wake the sqthread). 6231 * This procedure assumes SQLOCK is held. To run the event list, it 6232 * must be called with no claims. 6233 */ 6234 static void 6235 sqfill_events(syncq_t *sq, queue_t *q, mblk_t *mp, void (*func)()) 6236 { 6237 uint16_t count; 6238 6239 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6240 ASSERT(func != NULL); 6241 6242 /* 6243 * This is a callback. Add it to the list of callbacks 6244 * and see about upgrading. 6245 */ 6246 mp->b_prev = (mblk_t *)func; 6247 mp->b_queue = q; 6248 mp->b_next = NULL; 6249 if (sq->sq_evhead == NULL) { 6250 sq->sq_evhead = sq->sq_evtail = mp; 6251 sq->sq_flags |= SQ_EVENTS; 6252 } else { 6253 ASSERT(sq->sq_evtail != NULL); 6254 ASSERT(sq->sq_evtail->b_next == NULL); 6255 ASSERT(sq->sq_flags & SQ_EVENTS); 6256 sq->sq_evtail->b_next = mp; 6257 sq->sq_evtail = mp; 6258 } 6259 /* 6260 * We have set SQ_EVENTS, so threads will have to 6261 * unwind out of the perimeter, and new entries will 6262 * not grab a putlock. But we still need to know 6263 * how many threads have already made a claim to the 6264 * syncq, so grab the putlocks, and sum the counts. 6265 * If there are no claims on the syncq, we can upgrade 6266 * to exclusive, and run the event list. 6267 * NOTE: We hold the SQLOCK, so we can just grab the 6268 * putlocks. 6269 */ 6270 count = sq->sq_count; 6271 SQ_PUTLOCKS_ENTER(sq); 6272 SUM_SQ_PUTCOUNTS(sq, count); 6273 /* 6274 * We have no claim, so we need to check if there 6275 * are no others, then we can upgrade. 6276 */ 6277 /* 6278 * There are currently no claims on 6279 * the syncq by this thread (at least on this entry). The thread who has 6280 * the claim should drain syncq. 6281 */ 6282 if (count > 0) { 6283 /* 6284 * Can't upgrade - other threads inside. 6285 */ 6286 SQ_PUTLOCKS_EXIT(sq); 6287 mutex_exit(SQLOCK(sq)); 6288 return; 6289 } 6290 /* 6291 * Need to set SQ_EXCL and make a claim on the syncq. 6292 */ 6293 ASSERT((sq->sq_flags & SQ_EXCL) == 0); 6294 sq->sq_flags |= SQ_EXCL; 6295 ASSERT(sq->sq_count == 0); 6296 sq->sq_count++; 6297 SQ_PUTLOCKS_EXIT(sq); 6298 6299 /* Process the events list */ 6300 sq_run_events(sq); 6301 6302 /* 6303 * Release our claim... 6304 */ 6305 sq->sq_count--; 6306 6307 /* 6308 * And release SQ_EXCL. 6309 * We don't need to acquire the putlocks to release 6310 * SQ_EXCL, since we are exclusive, and hold the SQLOCK. 6311 */ 6312 sq->sq_flags &= ~SQ_EXCL; 6313 6314 /* 6315 * sq_run_events should have released SQ_EXCL 6316 */ 6317 ASSERT(!(sq->sq_flags & SQ_EXCL)); 6318 6319 /* 6320 * If anything happened while we were running the 6321 * events (or was there before), we need to process 6322 * them now. We shouldn't be exclusive sine we 6323 * released the perimeter above (plus, we asserted 6324 * for it). 6325 */ 6326 if (!(sq->sq_flags & SQ_STAYAWAY) && (sq->sq_flags & SQ_QUEUED)) 6327 drain_syncq(sq); 6328 else 6329 mutex_exit(SQLOCK(sq)); 6330 } 6331 6332 /* 6333 * Perform delayed processing. The caller has to make sure that it is safe 6334 * to enter the syncq (e.g. by checking that none of the SQ_STAYAWAY bits are 6335 * set). 6336 * 6337 * Assume that the caller has NO claims on the syncq. However, a claim 6338 * on the syncq does not indicate that a thread is draining the syncq. 6339 * There may be more claims on the syncq than there are threads draining 6340 * (i.e. #_threads_draining <= sq_count) 6341 * 6342 * drain_syncq has to terminate when one of the SQ_STAYAWAY bits gets set 6343 * in order to preserve qwriter(OUTER) ordering constraints. 6344 * 6345 * sq_putcount only needs to be checked when dispatching the queued 6346 * writer call for CIPUT sync queue, but this is handled in sq_run_events. 6347 */ 6348 void 6349 drain_syncq(syncq_t *sq) 6350 { 6351 queue_t *qp; 6352 uint16_t count; 6353 uint16_t type = sq->sq_type; 6354 uint16_t flags = sq->sq_flags; 6355 boolean_t bg_service = sq->sq_svcflags & SQ_SERVICE; 6356 6357 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START, 6358 "drain_syncq start:%p", sq); 6359 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6360 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6361 sq->sq_oprev == NULL) || 6362 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6363 sq->sq_oprev != NULL)); 6364 6365 /* 6366 * Drop SQ_SERVICE flag. 6367 */ 6368 if (bg_service) 6369 sq->sq_svcflags &= ~SQ_SERVICE; 6370 6371 /* 6372 * If SQ_EXCL is set, someone else is processing this syncq - let him 6373 * finish the job. 6374 */ 6375 if (flags & SQ_EXCL) { 6376 if (bg_service) { 6377 ASSERT(sq->sq_servcount != 0); 6378 sq->sq_servcount--; 6379 } 6380 mutex_exit(SQLOCK(sq)); 6381 return; 6382 } 6383 6384 /* 6385 * This routine can be called by a background thread if 6386 * it was scheduled by a hi-priority thread. SO, if there are 6387 * NOT messages queued, return (remember, we have the SQLOCK, 6388 * and it cannot change until we release it). Wakeup any waiters also. 6389 */ 6390 if (!(flags & SQ_QUEUED)) { 6391 if (flags & SQ_WANTWAKEUP) { 6392 flags &= ~SQ_WANTWAKEUP; 6393 cv_broadcast(&sq->sq_wait); 6394 } 6395 if (flags & SQ_WANTEXWAKEUP) { 6396 flags &= ~SQ_WANTEXWAKEUP; 6397 cv_broadcast(&sq->sq_exitwait); 6398 } 6399 sq->sq_flags = flags; 6400 if (bg_service) { 6401 ASSERT(sq->sq_servcount != 0); 6402 sq->sq_servcount--; 6403 } 6404 mutex_exit(SQLOCK(sq)); 6405 return; 6406 } 6407 6408 /* 6409 * If this is not a concurrent put perimeter, we need to 6410 * become exclusive to drain. Also, if not CIPUT, we would 6411 * not have acquired a putlock, so we don't need to check 6412 * the putcounts. If not entering with a claim, we test 6413 * for sq_count == 0. 6414 */ 6415 type = sq->sq_type; 6416 if (!(type & SQ_CIPUT)) { 6417 if (sq->sq_count > 1) { 6418 if (bg_service) { 6419 ASSERT(sq->sq_servcount != 0); 6420 sq->sq_servcount--; 6421 } 6422 mutex_exit(SQLOCK(sq)); 6423 return; 6424 } 6425 sq->sq_flags |= SQ_EXCL; 6426 } 6427 6428 /* 6429 * This is where we make a claim to the syncq. 6430 * This can either be done by incrementing a putlock, or 6431 * the sq_count. But since we already have the SQLOCK 6432 * here, we just bump the sq_count. 6433 * 6434 * Note that after we make a claim, we need to let the code 6435 * fall through to the end of this routine to clean itself 6436 * up. A return in the while loop will put the syncq in a 6437 * very bad state. 6438 */ 6439 sq->sq_count++; 6440 ASSERT(sq->sq_count != 0); /* wraparound */ 6441 6442 while ((flags = sq->sq_flags) & SQ_QUEUED) { 6443 /* 6444 * If we are told to stayaway or went exclusive, 6445 * we are done. 6446 */ 6447 if (flags & (SQ_STAYAWAY)) { 6448 break; 6449 } 6450 6451 /* 6452 * If there are events to run, do so. 6453 * We have one claim to the syncq, so if there are 6454 * more than one, other threads are running. 6455 */ 6456 if (sq->sq_evhead != NULL) { 6457 ASSERT(sq->sq_flags & SQ_EVENTS); 6458 6459 count = sq->sq_count; 6460 SQ_PUTLOCKS_ENTER(sq); 6461 SUM_SQ_PUTCOUNTS(sq, count); 6462 if (count > 1) { 6463 SQ_PUTLOCKS_EXIT(sq); 6464 /* Can't upgrade - other threads inside */ 6465 break; 6466 } 6467 ASSERT((flags & SQ_EXCL) == 0); 6468 sq->sq_flags = flags | SQ_EXCL; 6469 SQ_PUTLOCKS_EXIT(sq); 6470 /* 6471 * we have the only claim, run the events, 6472 * sq_run_events will clear the SQ_EXCL flag. 6473 */ 6474 sq_run_events(sq); 6475 6476 /* 6477 * If this is a CIPUT perimeter, we need 6478 * to drop the SQ_EXCL flag so we can properly 6479 * continue draining the syncq. 6480 */ 6481 if (type & SQ_CIPUT) { 6482 ASSERT(sq->sq_flags & SQ_EXCL); 6483 sq->sq_flags &= ~SQ_EXCL; 6484 } 6485 6486 /* 6487 * And go back to the beginning just in case 6488 * anything changed while we were away. 6489 */ 6490 ASSERT((sq->sq_flags & SQ_EXCL) || (type & SQ_CIPUT)); 6491 continue; 6492 } 6493 6494 ASSERT(sq->sq_evhead == NULL); 6495 ASSERT(!(sq->sq_flags & SQ_EVENTS)); 6496 6497 /* 6498 * Find the queue that is not draining. 6499 * 6500 * q_draining is protected by QLOCK which we do not hold. 6501 * But if it was set, then a thread was draining, and if it gets 6502 * cleared, then it was because the thread has successfully 6503 * drained the syncq, or a GOAWAY state occurred. For the GOAWAY 6504 * state to happen, a thread needs the SQLOCK which we hold, and 6505 * if there was such a flag, we would have already seen it. 6506 */ 6507 6508 for (qp = sq->sq_head; 6509 qp != NULL && (qp->q_draining || 6510 (qp->q_sqflags & Q_SQDRAINING)); 6511 qp = qp->q_sqnext) 6512 ; 6513 6514 if (qp == NULL) 6515 break; 6516 6517 /* 6518 * We have a queue to work on, and we hold the 6519 * SQLOCK and one claim, call qdrain_syncq. 6520 * This means we need to release the SQLOCK and 6521 * acquire the QLOCK (OK since we have a claim). 6522 * Note that qdrain_syncq will actually dequeue 6523 * this queue from the sq_head list when it is 6524 * convinced all the work is done and release 6525 * the QLOCK before returning. 6526 */ 6527 qp->q_sqflags |= Q_SQDRAINING; 6528 mutex_exit(SQLOCK(sq)); 6529 mutex_enter(QLOCK(qp)); 6530 qdrain_syncq(sq, qp); 6531 mutex_enter(SQLOCK(sq)); 6532 6533 /* The queue is drained */ 6534 ASSERT(qp->q_sqflags & Q_SQDRAINING); 6535 qp->q_sqflags &= ~Q_SQDRAINING; 6536 /* 6537 * NOTE: After this point qp should not be used since it may be 6538 * closed. 6539 */ 6540 } 6541 6542 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6543 flags = sq->sq_flags; 6544 6545 /* 6546 * sq->sq_head cannot change because we hold the 6547 * sqlock. However, a thread CAN decide that it is no longer 6548 * going to drain that queue. However, this should be due to 6549 * a GOAWAY state, and we should see that here. 6550 * 6551 * This loop is not very efficient. One solution may be adding a second 6552 * pointer to the "draining" queue, but it is difficult to do when 6553 * queues are inserted in the middle due to priority ordering. Another 6554 * possibility is to yank the queue out of the sq list and put it onto 6555 * the "draining list" and then put it back if it can't be drained. 6556 */ 6557 6558 ASSERT((sq->sq_head == NULL) || (flags & SQ_GOAWAY) || 6559 (type & SQ_CI) || sq->sq_head->q_draining); 6560 6561 /* Drop SQ_EXCL for non-CIPUT perimeters */ 6562 if (!(type & SQ_CIPUT)) 6563 flags &= ~SQ_EXCL; 6564 ASSERT((flags & SQ_EXCL) == 0); 6565 6566 /* Wake up any waiters. */ 6567 if (flags & SQ_WANTWAKEUP) { 6568 flags &= ~SQ_WANTWAKEUP; 6569 cv_broadcast(&sq->sq_wait); 6570 } 6571 if (flags & SQ_WANTEXWAKEUP) { 6572 flags &= ~SQ_WANTEXWAKEUP; 6573 cv_broadcast(&sq->sq_exitwait); 6574 } 6575 sq->sq_flags = flags; 6576 6577 ASSERT(sq->sq_count != 0); 6578 /* Release our claim. */ 6579 sq->sq_count--; 6580 6581 if (bg_service) { 6582 ASSERT(sq->sq_servcount != 0); 6583 sq->sq_servcount--; 6584 } 6585 6586 mutex_exit(SQLOCK(sq)); 6587 6588 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END, 6589 "drain_syncq end:%p", sq); 6590 } 6591 6592 6593 /* 6594 * 6595 * qdrain_syncq can be called (currently) from only one of two places: 6596 * drain_syncq 6597 * putnext (or some variation of it). 6598 * and eventually 6599 * qwait(_sig) 6600 * 6601 * If called from drain_syncq, we found it in the list of queues needing 6602 * service, so there is work to be done (or it wouldn't be in the list). 6603 * 6604 * If called from some putnext variation, it was because the 6605 * perimeter is open, but messages are blocking a putnext and 6606 * there is not a thread working on it. Now a thread could start 6607 * working on it while we are getting ready to do so ourself, but 6608 * the thread would set the q_draining flag, and we can spin out. 6609 * 6610 * As for qwait(_sig), I think I shall let it continue to call 6611 * drain_syncq directly (after all, it will get here eventually). 6612 * 6613 * qdrain_syncq has to terminate when: 6614 * - one of the SQ_STAYAWAY bits gets set to preserve qwriter(OUTER) ordering 6615 * - SQ_EVENTS gets set to preserve qwriter(INNER) ordering 6616 * 6617 * ASSUMES: 6618 * One claim 6619 * QLOCK held 6620 * SQLOCK not held 6621 * Will release QLOCK before returning 6622 */ 6623 void 6624 qdrain_syncq(syncq_t *sq, queue_t *q) 6625 { 6626 mblk_t *bp; 6627 #ifdef DEBUG 6628 uint16_t count; 6629 #endif 6630 6631 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START, 6632 "drain_syncq start:%p", sq); 6633 ASSERT(q->q_syncq == sq); 6634 ASSERT(MUTEX_HELD(QLOCK(q))); 6635 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6636 /* 6637 * For non-CIPUT perimeters, we should be called with the exclusive bit 6638 * set already. For CIPUT perimeters, we will be doing a concurrent 6639 * drain, so it better not be set. 6640 */ 6641 ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT))); 6642 ASSERT(!((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL))); 6643 ASSERT((sq->sq_type & SQ_CIPUT) || (sq->sq_flags & SQ_EXCL)); 6644 /* 6645 * All outer pointers are set, or none of them are 6646 */ 6647 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6648 sq->sq_oprev == NULL) || 6649 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6650 sq->sq_oprev != NULL)); 6651 #ifdef DEBUG 6652 count = sq->sq_count; 6653 /* 6654 * This is OK without the putlocks, because we have one 6655 * claim either from the sq_count, or a putcount. We could 6656 * get an erroneous value from other counts, but ours won't 6657 * change, so one way or another, we will have at least a 6658 * value of one. 6659 */ 6660 SUM_SQ_PUTCOUNTS(sq, count); 6661 ASSERT(count >= 1); 6662 #endif /* DEBUG */ 6663 6664 /* 6665 * The first thing to do is find out if a thread is already draining 6666 * this queue. If so, we are done, just return. 6667 */ 6668 if (q->q_draining) { 6669 mutex_exit(QLOCK(q)); 6670 return; 6671 } 6672 6673 /* 6674 * If the perimeter is exclusive, there is nothing we can do right now, 6675 * go away. Note that there is nothing to prevent this case from 6676 * changing right after this check, but the spin-out will catch it. 6677 */ 6678 6679 /* Tell other threads that we are draining this queue */ 6680 q->q_draining = 1; /* Protected by QLOCK */ 6681 6682 /* 6683 * If there is nothing to do, clear QFULL as necessary. This caters for 6684 * the case where an empty queue was enqueued onto the syncq. 6685 */ 6686 if (q->q_sqhead == NULL) { 6687 ASSERT(q->q_syncqmsgs == 0); 6688 mutex_exit(QLOCK(q)); 6689 clr_qfull(q); 6690 mutex_enter(QLOCK(q)); 6691 } 6692 6693 /* 6694 * Note that q_sqhead must be re-checked here in case another message 6695 * was enqueued whilst QLOCK was dropped during the call to clr_qfull. 6696 */ 6697 for (bp = q->q_sqhead; bp != NULL; bp = q->q_sqhead) { 6698 /* 6699 * Because we can enter this routine just because a putnext is 6700 * blocked, we need to spin out if the perimeter wants to go 6701 * exclusive as well as just blocked. We need to spin out also 6702 * if events are queued on the syncq. 6703 * Don't check for SQ_EXCL, because non-CIPUT perimeters would 6704 * set it, and it can't become exclusive while we hold a claim. 6705 */ 6706 if (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS)) { 6707 break; 6708 } 6709 6710 #ifdef DEBUG 6711 /* 6712 * Since we are in qdrain_syncq, we already know the queue, 6713 * but for sanity, we want to check this against the qp that 6714 * was passed in by bp->b_queue. 6715 */ 6716 6717 ASSERT(bp->b_queue == q); 6718 ASSERT(bp->b_queue->q_syncq == sq); 6719 bp->b_queue = NULL; 6720 6721 /* 6722 * We would have the following check in the DEBUG code: 6723 * 6724 * if (bp->b_prev != NULL) { 6725 * ASSERT(bp->b_prev == (void (*)())q->q_qinfo->qi_putp); 6726 * } 6727 * 6728 * This can't be done, however, since IP modifies qinfo 6729 * structure at run-time (switching between IPv4 qinfo and IPv6 6730 * qinfo), invalidating the check. 6731 * So the assignment to func is left here, but the ASSERT itself 6732 * is removed until the whole issue is resolved. 6733 */ 6734 #endif 6735 ASSERT(q->q_sqhead == bp); 6736 q->q_sqhead = bp->b_next; 6737 bp->b_prev = bp->b_next = NULL; 6738 ASSERT(q->q_syncqmsgs > 0); 6739 mutex_exit(QLOCK(q)); 6740 6741 ASSERT(bp->b_datap->db_ref != 0); 6742 6743 (void) (*q->q_qinfo->qi_putp)(q, bp); 6744 6745 mutex_enter(QLOCK(q)); 6746 6747 /* 6748 * q_syncqmsgs should only be decremented after executing the 6749 * put procedure to avoid message re-ordering. This is due to an 6750 * optimisation in putnext() which can call the put procedure 6751 * directly if it sees q_syncqmsgs == 0 (despite Q_SQQUEUED 6752 * being set). 6753 * 6754 * We also need to clear QFULL in the next service procedure 6755 * queue if this is the last message destined for that queue. 6756 * 6757 * It would make better sense to have some sort of tunable for 6758 * the low water mark, but these semantics are not yet defined. 6759 * So, alas, we use a constant. 6760 */ 6761 if (--q->q_syncqmsgs == 0) { 6762 mutex_exit(QLOCK(q)); 6763 clr_qfull(q); 6764 mutex_enter(QLOCK(q)); 6765 } 6766 6767 /* 6768 * Always clear SQ_EXCL when CIPUT in order to handle 6769 * qwriter(INNER). The putp() can call qwriter and get exclusive 6770 * access IFF this is the only claim. So, we need to test for 6771 * this possibility, acquire the mutex and clear the bit. 6772 */ 6773 if ((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL)) { 6774 mutex_enter(SQLOCK(sq)); 6775 sq->sq_flags &= ~SQ_EXCL; 6776 mutex_exit(SQLOCK(sq)); 6777 } 6778 } 6779 6780 /* 6781 * We should either have no messages on this queue, or we were told to 6782 * goaway by a waiter (which we will wake up at the end of this 6783 * function). 6784 */ 6785 ASSERT((q->q_sqhead == NULL) || 6786 (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS))); 6787 6788 ASSERT(MUTEX_HELD(QLOCK(q))); 6789 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6790 6791 /* Remove the q from the syncq list if all the messages are drained. */ 6792 if (q->q_sqhead == NULL) { 6793 ASSERT(q->q_syncqmsgs == 0); 6794 mutex_enter(SQLOCK(sq)); 6795 if (q->q_sqflags & Q_SQQUEUED) 6796 SQRM_Q(sq, q); 6797 mutex_exit(SQLOCK(sq)); 6798 /* 6799 * Since the queue is removed from the list, reset its priority. 6800 */ 6801 q->q_spri = 0; 6802 } 6803 6804 /* 6805 * Remember, the q_draining flag is used to let another thread know 6806 * that there is a thread currently draining the messages for a queue. 6807 * Since we are now done with this queue (even if there may be messages 6808 * still there), we need to clear this flag so some thread will work on 6809 * it if needed. 6810 */ 6811 ASSERT(q->q_draining); 6812 q->q_draining = 0; 6813 6814 /* Called with a claim, so OK to drop all locks. */ 6815 mutex_exit(QLOCK(q)); 6816 6817 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END, 6818 "drain_syncq end:%p", sq); 6819 } 6820 /* END OF QDRAIN_SYNCQ */ 6821 6822 6823 /* 6824 * This is the mate to qdrain_syncq, except that it is putting the message onto 6825 * the queue instead of draining. Since the message is destined for the queue 6826 * that is selected, there is no need to identify the function because the 6827 * message is intended for the put routine for the queue. For debug kernels, 6828 * this routine will do it anyway just in case. 6829 * 6830 * After the message is enqueued on the syncq, it calls putnext_tail() 6831 * which will schedule a background thread to actually process the message. 6832 * 6833 * Assumes that there is a claim on the syncq (sq->sq_count > 0) and 6834 * SQLOCK(sq) and QLOCK(q) are not held. 6835 */ 6836 void 6837 qfill_syncq(syncq_t *sq, queue_t *q, mblk_t *mp) 6838 { 6839 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6840 ASSERT(MUTEX_NOT_HELD(QLOCK(q))); 6841 ASSERT(sq->sq_count > 0); 6842 ASSERT(q->q_syncq == sq); 6843 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6844 sq->sq_oprev == NULL) || 6845 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6846 sq->sq_oprev != NULL)); 6847 6848 mutex_enter(QLOCK(q)); 6849 6850 #ifdef DEBUG 6851 /* 6852 * This is used for debug in the qfill_syncq/qdrain_syncq case 6853 * to trace the queue that the message is intended for. Note 6854 * that the original use was to identify the queue and function 6855 * to call on the drain. In the new syncq, we have the context 6856 * of the queue that we are draining, so call it's putproc and 6857 * don't rely on the saved values. But for debug this is still 6858 * useful information. 6859 */ 6860 mp->b_prev = (mblk_t *)q->q_qinfo->qi_putp; 6861 mp->b_queue = q; 6862 mp->b_next = NULL; 6863 #endif 6864 ASSERT(q->q_syncq == sq); 6865 /* 6866 * Enqueue the message on the list. 6867 * SQPUT_MP() accesses q_syncqmsgs. We are already holding QLOCK to 6868 * protect it. So it's ok to acquire SQLOCK after SQPUT_MP(). 6869 */ 6870 SQPUT_MP(q, mp); 6871 mutex_enter(SQLOCK(sq)); 6872 6873 /* 6874 * And queue on syncq for scheduling, if not already queued. 6875 * Note that we need the SQLOCK for this, and for testing flags 6876 * at the end to see if we will drain. So grab it now, and 6877 * release it before we call qdrain_syncq or return. 6878 */ 6879 if (!(q->q_sqflags & Q_SQQUEUED)) { 6880 q->q_spri = curthread->t_pri; 6881 SQPUT_Q(sq, q); 6882 } 6883 #ifdef DEBUG 6884 else { 6885 /* 6886 * All of these conditions MUST be true! 6887 */ 6888 ASSERT(sq->sq_tail != NULL); 6889 if (sq->sq_tail == sq->sq_head) { 6890 ASSERT((q->q_sqprev == NULL) && 6891 (q->q_sqnext == NULL)); 6892 } else { 6893 ASSERT((q->q_sqprev != NULL) || 6894 (q->q_sqnext != NULL)); 6895 } 6896 ASSERT(sq->sq_flags & SQ_QUEUED); 6897 ASSERT(q->q_syncqmsgs != 0); 6898 ASSERT(q->q_sqflags & Q_SQQUEUED); 6899 } 6900 #endif 6901 mutex_exit(QLOCK(q)); 6902 /* 6903 * SQLOCK is still held, so sq_count can be safely decremented. 6904 */ 6905 sq->sq_count--; 6906 6907 putnext_tail(sq, q, 0); 6908 /* Should not reference sq or q after this point. */ 6909 } 6910 6911 /* End of qfill_syncq */ 6912 6913 /* 6914 * Remove all messages from a syncq (if qp is NULL) or remove all messages 6915 * that would be put into qp by drain_syncq. 6916 * Used when deleting the syncq (qp == NULL) or when detaching 6917 * a queue (qp != NULL). 6918 * Return non-zero if one or more messages were freed. 6919 * 6920 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 6921 * sq_putlocks are used. 6922 * 6923 * NOTE: This function assumes that it is called from the close() context and 6924 * that all the queues in the syncq are going away. For this reason it doesn't 6925 * acquire QLOCK for modifying q_sqhead/q_sqtail fields. This assumption is 6926 * currently valid, but it is useful to rethink this function to behave properly 6927 * in other cases. 6928 */ 6929 int 6930 flush_syncq(syncq_t *sq, queue_t *qp) 6931 { 6932 mblk_t *bp, *mp_head, *mp_next, *mp_prev; 6933 queue_t *q; 6934 int ret = 0; 6935 6936 mutex_enter(SQLOCK(sq)); 6937 6938 /* 6939 * Before we leave, we need to make sure there are no 6940 * events listed for this queue. All events for this queue 6941 * will just be freed. 6942 */ 6943 if (qp != NULL && sq->sq_evhead != NULL) { 6944 ASSERT(sq->sq_flags & SQ_EVENTS); 6945 6946 mp_prev = NULL; 6947 for (bp = sq->sq_evhead; bp != NULL; bp = mp_next) { 6948 mp_next = bp->b_next; 6949 if (bp->b_queue == qp) { 6950 /* Delete this message */ 6951 if (mp_prev != NULL) { 6952 mp_prev->b_next = mp_next; 6953 /* 6954 * Update sq_evtail if the last element 6955 * is removed. 6956 */ 6957 if (bp == sq->sq_evtail) { 6958 ASSERT(mp_next == NULL); 6959 sq->sq_evtail = mp_prev; 6960 } 6961 } else 6962 sq->sq_evhead = mp_next; 6963 if (sq->sq_evhead == NULL) 6964 sq->sq_flags &= ~SQ_EVENTS; 6965 bp->b_prev = bp->b_next = NULL; 6966 freemsg(bp); 6967 ret++; 6968 } else { 6969 mp_prev = bp; 6970 } 6971 } 6972 } 6973 6974 /* 6975 * Walk sq_head and: 6976 * - match qp if qp is set, remove it's messages 6977 * - all if qp is not set 6978 */ 6979 q = sq->sq_head; 6980 while (q != NULL) { 6981 ASSERT(q->q_syncq == sq); 6982 if ((qp == NULL) || (qp == q)) { 6983 /* 6984 * Yank the messages as a list off the queue 6985 */ 6986 mp_head = q->q_sqhead; 6987 /* 6988 * We do not have QLOCK(q) here (which is safe due to 6989 * assumptions mentioned above). To obtain the lock we 6990 * need to release SQLOCK which may allow lots of things 6991 * to change upon us. This place requires more analysis. 6992 */ 6993 q->q_sqhead = q->q_sqtail = NULL; 6994 ASSERT(mp_head->b_queue && 6995 mp_head->b_queue->q_syncq == sq); 6996 6997 /* 6998 * Free each of the messages. 6999 */ 7000 for (bp = mp_head; bp != NULL; bp = mp_next) { 7001 mp_next = bp->b_next; 7002 bp->b_prev = bp->b_next = NULL; 7003 freemsg(bp); 7004 ret++; 7005 } 7006 /* 7007 * Now remove the queue from the syncq. 7008 */ 7009 ASSERT(q->q_sqflags & Q_SQQUEUED); 7010 SQRM_Q(sq, q); 7011 q->q_spri = 0; 7012 q->q_syncqmsgs = 0; 7013 7014 /* 7015 * If qp was specified, we are done with it and are 7016 * going to drop SQLOCK(sq) and return. We wakeup syncq 7017 * waiters while we still have the SQLOCK. 7018 */ 7019 if ((qp != NULL) && (sq->sq_flags & SQ_WANTWAKEUP)) { 7020 sq->sq_flags &= ~SQ_WANTWAKEUP; 7021 cv_broadcast(&sq->sq_wait); 7022 } 7023 /* Drop SQLOCK across clr_qfull */ 7024 mutex_exit(SQLOCK(sq)); 7025 7026 /* 7027 * We avoid doing the test that drain_syncq does and 7028 * unconditionally clear qfull for every flushed 7029 * message. Since flush_syncq is only called during 7030 * close this should not be a problem. 7031 */ 7032 clr_qfull(q); 7033 if (qp != NULL) { 7034 return (ret); 7035 } else { 7036 mutex_enter(SQLOCK(sq)); 7037 /* 7038 * The head was removed by SQRM_Q above. 7039 * reread the new head and flush it. 7040 */ 7041 q = sq->sq_head; 7042 } 7043 } else { 7044 q = q->q_sqnext; 7045 } 7046 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7047 } 7048 7049 if (sq->sq_flags & SQ_WANTWAKEUP) { 7050 sq->sq_flags &= ~SQ_WANTWAKEUP; 7051 cv_broadcast(&sq->sq_wait); 7052 } 7053 7054 mutex_exit(SQLOCK(sq)); 7055 return (ret); 7056 } 7057 7058 /* 7059 * Propagate all messages from a syncq to the next syncq that are associated 7060 * with the specified queue. If the queue is attached to a driver or if the 7061 * messages have been added due to a qwriter(PERIM_INNER), free the messages. 7062 * 7063 * Assumes that the stream is strlock()'ed. We don't come here if there 7064 * are no messages to propagate. 7065 * 7066 * NOTE : If the queue is attached to a driver, all the messages are freed 7067 * as there is no point in propagating the messages from the driver syncq 7068 * to the closing stream head which will in turn get freed later. 7069 */ 7070 static int 7071 propagate_syncq(queue_t *qp) 7072 { 7073 mblk_t *bp, *head, *tail, *prev, *next; 7074 syncq_t *sq; 7075 queue_t *nqp; 7076 syncq_t *nsq; 7077 boolean_t isdriver; 7078 int moved = 0; 7079 uint16_t flags; 7080 pri_t priority = curthread->t_pri; 7081 #ifdef DEBUG 7082 void (*func)(); 7083 #endif 7084 7085 sq = qp->q_syncq; 7086 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7087 /* debug macro */ 7088 SQ_PUTLOCKS_HELD(sq); 7089 /* 7090 * As entersq() does not increment the sq_count for 7091 * the write side, check sq_count for non-QPERQ 7092 * perimeters alone. 7093 */ 7094 ASSERT((qp->q_flag & QPERQ) || (sq->sq_count >= 1)); 7095 7096 /* 7097 * propagate_syncq() can be called because of either messages on the 7098 * queue syncq or because on events on the queue syncq. Do actual 7099 * message propagations if there are any messages. 7100 */ 7101 if (qp->q_syncqmsgs) { 7102 isdriver = (qp->q_flag & QISDRV); 7103 7104 if (!isdriver) { 7105 nqp = qp->q_next; 7106 nsq = nqp->q_syncq; 7107 ASSERT(MUTEX_HELD(SQLOCK(nsq))); 7108 /* debug macro */ 7109 SQ_PUTLOCKS_HELD(nsq); 7110 #ifdef DEBUG 7111 func = (void (*)())nqp->q_qinfo->qi_putp; 7112 #endif 7113 } 7114 7115 SQRM_Q(sq, qp); 7116 priority = MAX(qp->q_spri, priority); 7117 qp->q_spri = 0; 7118 head = qp->q_sqhead; 7119 tail = qp->q_sqtail; 7120 qp->q_sqhead = qp->q_sqtail = NULL; 7121 qp->q_syncqmsgs = 0; 7122 7123 /* 7124 * Walk the list of messages, and free them if this is a driver, 7125 * otherwise reset the b_prev and b_queue value to the new putp. 7126 * Afterward, we will just add the head to the end of the next 7127 * syncq, and point the tail to the end of this one. 7128 */ 7129 7130 for (bp = head; bp != NULL; bp = next) { 7131 next = bp->b_next; 7132 if (isdriver) { 7133 bp->b_prev = bp->b_next = NULL; 7134 freemsg(bp); 7135 continue; 7136 } 7137 /* Change the q values for this message */ 7138 bp->b_queue = nqp; 7139 #ifdef DEBUG 7140 bp->b_prev = (mblk_t *)func; 7141 #endif 7142 moved++; 7143 } 7144 /* 7145 * Attach list of messages to the end of the new queue (if there 7146 * is a list of messages). 7147 */ 7148 7149 if (!isdriver && head != NULL) { 7150 ASSERT(tail != NULL); 7151 if (nqp->q_sqhead == NULL) { 7152 nqp->q_sqhead = head; 7153 } else { 7154 ASSERT(nqp->q_sqtail != NULL); 7155 nqp->q_sqtail->b_next = head; 7156 } 7157 nqp->q_sqtail = tail; 7158 /* 7159 * When messages are moved from high priority queue to 7160 * another queue, the destination queue priority is 7161 * upgraded. 7162 */ 7163 7164 if (priority > nqp->q_spri) 7165 nqp->q_spri = priority; 7166 7167 SQPUT_Q(nsq, nqp); 7168 7169 nqp->q_syncqmsgs += moved; 7170 ASSERT(nqp->q_syncqmsgs != 0); 7171 } 7172 } 7173 7174 /* 7175 * Before we leave, we need to make sure there are no 7176 * events listed for this queue. All events for this queue 7177 * will just be freed. 7178 */ 7179 if (sq->sq_evhead != NULL) { 7180 ASSERT(sq->sq_flags & SQ_EVENTS); 7181 prev = NULL; 7182 for (bp = sq->sq_evhead; bp != NULL; bp = next) { 7183 next = bp->b_next; 7184 if (bp->b_queue == qp) { 7185 /* Delete this message */ 7186 if (prev != NULL) { 7187 prev->b_next = next; 7188 /* 7189 * Update sq_evtail if the last element 7190 * is removed. 7191 */ 7192 if (bp == sq->sq_evtail) { 7193 ASSERT(next == NULL); 7194 sq->sq_evtail = prev; 7195 } 7196 } else 7197 sq->sq_evhead = next; 7198 if (sq->sq_evhead == NULL) 7199 sq->sq_flags &= ~SQ_EVENTS; 7200 bp->b_prev = bp->b_next = NULL; 7201 freemsg(bp); 7202 } else { 7203 prev = bp; 7204 } 7205 } 7206 } 7207 7208 flags = sq->sq_flags; 7209 7210 /* Wake up any waiter before leaving. */ 7211 if (flags & SQ_WANTWAKEUP) { 7212 flags &= ~SQ_WANTWAKEUP; 7213 cv_broadcast(&sq->sq_wait); 7214 } 7215 sq->sq_flags = flags; 7216 7217 return (moved); 7218 } 7219 7220 /* 7221 * Try and upgrade to exclusive access at the inner perimeter. If this can 7222 * not be done without blocking then request will be queued on the syncq 7223 * and drain_syncq will run it later. 7224 * 7225 * This routine can only be called from put or service procedures plus 7226 * asynchronous callback routines that have properly entered the queue (with 7227 * entersq). Thus qwriter_inner assumes the caller has one claim on the syncq 7228 * associated with q. 7229 */ 7230 void 7231 qwriter_inner(queue_t *q, mblk_t *mp, void (*func)()) 7232 { 7233 syncq_t *sq = q->q_syncq; 7234 uint16_t count; 7235 7236 mutex_enter(SQLOCK(sq)); 7237 count = sq->sq_count; 7238 SQ_PUTLOCKS_ENTER(sq); 7239 SUM_SQ_PUTCOUNTS(sq, count); 7240 ASSERT(count >= 1); 7241 ASSERT(sq->sq_type & (SQ_CIPUT|SQ_CISVC)); 7242 7243 if (count == 1) { 7244 /* 7245 * Can upgrade. This case also handles nested qwriter calls 7246 * (when the qwriter callback function calls qwriter). In that 7247 * case SQ_EXCL is already set. 7248 */ 7249 sq->sq_flags |= SQ_EXCL; 7250 SQ_PUTLOCKS_EXIT(sq); 7251 mutex_exit(SQLOCK(sq)); 7252 (*func)(q, mp); 7253 /* 7254 * Assumes that leavesq, putnext, and drain_syncq will reset 7255 * SQ_EXCL for SQ_CIPUT/SQ_CISVC queues. We leave SQ_EXCL on 7256 * until putnext, leavesq, or drain_syncq drops it. 7257 * That way we handle nested qwriter(INNER) without dropping 7258 * SQ_EXCL until the outermost qwriter callback routine is 7259 * done. 7260 */ 7261 return; 7262 } 7263 SQ_PUTLOCKS_EXIT(sq); 7264 sqfill_events(sq, q, mp, func); 7265 } 7266 7267 /* 7268 * Synchronous callback support functions 7269 */ 7270 7271 /* 7272 * Allocate a callback parameter structure. 7273 * Assumes that caller initializes the flags and the id. 7274 * Acquires SQLOCK(sq) if non-NULL is returned. 7275 */ 7276 callbparams_t * 7277 callbparams_alloc(syncq_t *sq, void (*func)(void *), void *arg, int kmflags) 7278 { 7279 callbparams_t *cbp; 7280 size_t size = sizeof (callbparams_t); 7281 7282 cbp = kmem_alloc(size, kmflags & ~KM_PANIC); 7283 7284 /* 7285 * Only try tryhard allocation if the caller is ready to panic. 7286 * Otherwise just fail. 7287 */ 7288 if (cbp == NULL) { 7289 if (kmflags & KM_PANIC) 7290 cbp = kmem_alloc_tryhard(sizeof (callbparams_t), 7291 &size, kmflags); 7292 else 7293 return (NULL); 7294 } 7295 7296 ASSERT(size >= sizeof (callbparams_t)); 7297 cbp->cbp_size = size; 7298 cbp->cbp_sq = sq; 7299 cbp->cbp_func = func; 7300 cbp->cbp_arg = arg; 7301 mutex_enter(SQLOCK(sq)); 7302 cbp->cbp_next = sq->sq_callbpend; 7303 sq->sq_callbpend = cbp; 7304 return (cbp); 7305 } 7306 7307 void 7308 callbparams_free(syncq_t *sq, callbparams_t *cbp) 7309 { 7310 callbparams_t **pp, *p; 7311 7312 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7313 7314 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) { 7315 if (p == cbp) { 7316 *pp = p->cbp_next; 7317 kmem_free(p, p->cbp_size); 7318 return; 7319 } 7320 } 7321 (void) (STRLOG(0, 0, 0, SL_CONSOLE, 7322 "callbparams_free: not found\n")); 7323 } 7324 7325 void 7326 callbparams_free_id(syncq_t *sq, callbparams_id_t id, int32_t flag) 7327 { 7328 callbparams_t **pp, *p; 7329 7330 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7331 7332 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) { 7333 if (p->cbp_id == id && p->cbp_flags == flag) { 7334 *pp = p->cbp_next; 7335 kmem_free(p, p->cbp_size); 7336 return; 7337 } 7338 } 7339 (void) (STRLOG(0, 0, 0, SL_CONSOLE, 7340 "callbparams_free_id: not found\n")); 7341 } 7342 7343 /* 7344 * Callback wrapper function used by once-only callbacks that can be 7345 * cancelled (qtimeout and qbufcall) 7346 * Contains inline version of entersq(sq, SQ_CALLBACK) that can be 7347 * cancelled by the qun* functions. 7348 */ 7349 void 7350 qcallbwrapper(void *arg) 7351 { 7352 callbparams_t *cbp = arg; 7353 syncq_t *sq; 7354 uint16_t count = 0; 7355 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL; 7356 uint16_t type; 7357 7358 sq = cbp->cbp_sq; 7359 mutex_enter(SQLOCK(sq)); 7360 type = sq->sq_type; 7361 if (!(type & SQ_CICB)) { 7362 count = sq->sq_count; 7363 SQ_PUTLOCKS_ENTER(sq); 7364 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 7365 SUM_SQ_PUTCOUNTS(sq, count); 7366 sq->sq_needexcl++; 7367 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 7368 waitflags |= SQ_MESSAGES; 7369 } 7370 /* Can not handle exclusive entry at outer perimeter */ 7371 ASSERT(type & SQ_COCB); 7372 7373 while ((sq->sq_flags & waitflags) || (!(type & SQ_CICB) &&count != 0)) { 7374 if ((sq->sq_callbflags & cbp->cbp_flags) && 7375 (sq->sq_cancelid == cbp->cbp_id)) { 7376 /* timeout has been cancelled */ 7377 sq->sq_callbflags |= SQ_CALLB_BYPASSED; 7378 callbparams_free(sq, cbp); 7379 if (!(type & SQ_CICB)) { 7380 ASSERT(sq->sq_needexcl > 0); 7381 sq->sq_needexcl--; 7382 if (sq->sq_needexcl == 0) { 7383 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 7384 } 7385 SQ_PUTLOCKS_EXIT(sq); 7386 } 7387 mutex_exit(SQLOCK(sq)); 7388 return; 7389 } 7390 sq->sq_flags |= SQ_WANTWAKEUP; 7391 if (!(type & SQ_CICB)) { 7392 SQ_PUTLOCKS_EXIT(sq); 7393 } 7394 cv_wait(&sq->sq_wait, SQLOCK(sq)); 7395 if (!(type & SQ_CICB)) { 7396 count = sq->sq_count; 7397 SQ_PUTLOCKS_ENTER(sq); 7398 SUM_SQ_PUTCOUNTS(sq, count); 7399 } 7400 } 7401 7402 sq->sq_count++; 7403 ASSERT(sq->sq_count != 0); /* Wraparound */ 7404 if (!(type & SQ_CICB)) { 7405 ASSERT(count == 0); 7406 sq->sq_flags |= SQ_EXCL; 7407 ASSERT(sq->sq_needexcl > 0); 7408 sq->sq_needexcl--; 7409 if (sq->sq_needexcl == 0) { 7410 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 7411 } 7412 SQ_PUTLOCKS_EXIT(sq); 7413 } 7414 7415 mutex_exit(SQLOCK(sq)); 7416 7417 cbp->cbp_func(cbp->cbp_arg); 7418 7419 /* 7420 * We drop the lock only for leavesq to re-acquire it. 7421 * Possible optimization is inline of leavesq. 7422 */ 7423 mutex_enter(SQLOCK(sq)); 7424 callbparams_free(sq, cbp); 7425 mutex_exit(SQLOCK(sq)); 7426 leavesq(sq, SQ_CALLBACK); 7427 } 7428 7429 /* 7430 * No need to grab sq_putlocks here. See comment in strsubr.h that 7431 * explains when sq_putlocks are used. 7432 * 7433 * sq_count (or one of the sq_putcounts) has already been 7434 * decremented by the caller, and if SQ_QUEUED, we need to call 7435 * drain_syncq (the global syncq drain). 7436 * If putnext_tail is called with the SQ_EXCL bit set, we are in 7437 * one of two states, non-CIPUT perimeter, and we need to clear 7438 * it, or we went exclusive in the put procedure. In any case, 7439 * we want to clear the bit now, and it is probably easier to do 7440 * this at the beginning of this function (remember, we hold 7441 * the SQLOCK). Lastly, if there are other messages queued 7442 * on the syncq (and not for our destination), enable the syncq 7443 * for background work. 7444 */ 7445 7446 /* ARGSUSED */ 7447 void 7448 putnext_tail(syncq_t *sq, queue_t *qp, uint32_t passflags) 7449 { 7450 uint16_t flags = sq->sq_flags; 7451 7452 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7453 ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); 7454 7455 /* Clear SQ_EXCL if set in passflags */ 7456 if (passflags & SQ_EXCL) { 7457 flags &= ~SQ_EXCL; 7458 } 7459 if (flags & SQ_WANTWAKEUP) { 7460 flags &= ~SQ_WANTWAKEUP; 7461 cv_broadcast(&sq->sq_wait); 7462 } 7463 if (flags & SQ_WANTEXWAKEUP) { 7464 flags &= ~SQ_WANTEXWAKEUP; 7465 cv_broadcast(&sq->sq_exitwait); 7466 } 7467 sq->sq_flags = flags; 7468 7469 /* 7470 * We have cleared SQ_EXCL if we were asked to, and started 7471 * the wakeup process for waiters. If there are no writers 7472 * then we need to drain the syncq if we were told to, or 7473 * enable the background thread to do it. 7474 */ 7475 if (!(flags & (SQ_STAYAWAY|SQ_EXCL))) { 7476 if ((passflags & SQ_QUEUED) || 7477 (sq->sq_svcflags & SQ_DISABLED)) { 7478 /* drain_syncq will take care of events in the list */ 7479 drain_syncq(sq); 7480 return; 7481 } else if (flags & SQ_QUEUED) { 7482 sqenable(sq); 7483 } 7484 } 7485 /* Drop the SQLOCK on exit */ 7486 mutex_exit(SQLOCK(sq)); 7487 TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END, 7488 "putnext_end:(%p, %p, %p) done", NULL, qp, sq); 7489 } 7490 7491 void 7492 set_qend(queue_t *q) 7493 { 7494 mutex_enter(QLOCK(q)); 7495 if (!O_SAMESTR(q)) 7496 q->q_flag |= QEND; 7497 else 7498 q->q_flag &= ~QEND; 7499 mutex_exit(QLOCK(q)); 7500 q = _OTHERQ(q); 7501 mutex_enter(QLOCK(q)); 7502 if (!O_SAMESTR(q)) 7503 q->q_flag |= QEND; 7504 else 7505 q->q_flag &= ~QEND; 7506 mutex_exit(QLOCK(q)); 7507 } 7508 7509 /* 7510 * Set QFULL in next service procedure queue (that cares) if not already 7511 * set and if there are already more messages on the syncq than 7512 * sq_max_size. If sq_max_size is 0, no flow control will be asserted on 7513 * any syncq. 7514 * 7515 * The fq here is the next queue with a service procedure. This is where 7516 * we would fail canputnext, so this is where we need to set QFULL. 7517 * In the case when fq != q we need to take QLOCK(fq) to set QFULL flag. 7518 * 7519 * We already have QLOCK at this point. To avoid cross-locks with 7520 * freezestr() which grabs all QLOCKs and with strlock() which grabs both 7521 * SQLOCK and sd_reflock, we need to drop respective locks first. 7522 */ 7523 void 7524 set_qfull(queue_t *q) 7525 { 7526 queue_t *fq = NULL; 7527 7528 ASSERT(MUTEX_HELD(QLOCK(q))); 7529 if ((sq_max_size != 0) && (!(q->q_nfsrv->q_flag & QFULL)) && 7530 (q->q_syncqmsgs > sq_max_size)) { 7531 if ((fq = q->q_nfsrv) == q) { 7532 fq->q_flag |= QFULL; 7533 } else { 7534 mutex_exit(QLOCK(q)); 7535 mutex_enter(QLOCK(fq)); 7536 fq->q_flag |= QFULL; 7537 mutex_exit(QLOCK(fq)); 7538 mutex_enter(QLOCK(q)); 7539 } 7540 } 7541 } 7542 7543 void 7544 clr_qfull(queue_t *q) 7545 { 7546 queue_t *oq = q; 7547 7548 q = q->q_nfsrv; 7549 /* Fast check if there is any work to do before getting the lock. */ 7550 if ((q->q_flag & (QFULL|QWANTW)) == 0) { 7551 return; 7552 } 7553 7554 /* 7555 * Do not reset QFULL (and backenable) if the q_count is the reason 7556 * for QFULL being set. 7557 */ 7558 mutex_enter(QLOCK(q)); 7559 /* 7560 * If queue is empty i.e q_mblkcnt is zero, queue can not be full. 7561 * Hence clear the QFULL. 7562 * If both q_count and q_mblkcnt are less than the hiwat mark, 7563 * clear the QFULL. 7564 */ 7565 if (q->q_mblkcnt == 0 || ((q->q_count < q->q_hiwat) && 7566 (q->q_mblkcnt < q->q_hiwat))) { 7567 q->q_flag &= ~QFULL; 7568 /* 7569 * A little more confusing, how about this way: 7570 * if someone wants to write, 7571 * AND 7572 * both counts are less than the lowat mark 7573 * OR 7574 * the lowat mark is zero 7575 * THEN 7576 * backenable 7577 */ 7578 if ((q->q_flag & QWANTW) && 7579 (((q->q_count < q->q_lowat) && 7580 (q->q_mblkcnt < q->q_lowat)) || q->q_lowat == 0)) { 7581 q->q_flag &= ~QWANTW; 7582 mutex_exit(QLOCK(q)); 7583 backenable(oq, 0); 7584 } else 7585 mutex_exit(QLOCK(q)); 7586 } else 7587 mutex_exit(QLOCK(q)); 7588 } 7589 7590 /* 7591 * Set the forward service procedure pointer. 7592 * 7593 * Called at insert-time to cache a queue's next forward service procedure in 7594 * q_nfsrv; used by canput() and canputnext(). If the queue to be inserted 7595 * has a service procedure then q_nfsrv points to itself. If the queue to be 7596 * inserted does not have a service procedure, then q_nfsrv points to the next 7597 * queue forward that has a service procedure. If the queue is at the logical 7598 * end of the stream (driver for write side, stream head for the read side) 7599 * and does not have a service procedure, then q_nfsrv also points to itself. 7600 */ 7601 void 7602 set_nfsrv_ptr( 7603 queue_t *rnew, /* read queue pointer to new module */ 7604 queue_t *wnew, /* write queue pointer to new module */ 7605 queue_t *prev_rq, /* read queue pointer to the module above */ 7606 queue_t *prev_wq) /* write queue pointer to the module above */ 7607 { 7608 queue_t *qp; 7609 7610 if (prev_wq->q_next == NULL) { 7611 /* 7612 * Insert the driver, initialize the driver and stream head. 7613 * In this case, prev_rq/prev_wq should be the stream head. 7614 * _I_INSERT does not allow inserting a driver. Make sure 7615 * that it is not an insertion. 7616 */ 7617 ASSERT(!(rnew->q_flag & _QINSERTING)); 7618 wnew->q_nfsrv = wnew; 7619 if (rnew->q_qinfo->qi_srvp) 7620 rnew->q_nfsrv = rnew; 7621 else 7622 rnew->q_nfsrv = prev_rq; 7623 prev_rq->q_nfsrv = prev_rq; 7624 prev_wq->q_nfsrv = prev_wq; 7625 } else { 7626 /* 7627 * set up read side q_nfsrv pointer. This MUST be done 7628 * before setting the write side, because the setting of 7629 * the write side for a fifo may depend on it. 7630 * 7631 * Suppose we have a fifo that only has pipemod pushed. 7632 * pipemod has no read or write service procedures, so 7633 * nfsrv for both pipemod queues points to prev_rq (the 7634 * stream read head). Now push bufmod (which has only a 7635 * read service procedure). Doing the write side first, 7636 * wnew->q_nfsrv is set to pipemod's writeq nfsrv, which 7637 * is WRONG; the next queue forward from wnew with a 7638 * service procedure will be rnew, not the stream read head. 7639 * Since the downstream queue (which in the case of a fifo 7640 * is the read queue rnew) can affect upstream queues, it 7641 * needs to be done first. Setting up the read side first 7642 * sets nfsrv for both pipemod queues to rnew and then 7643 * when the write side is set up, wnew-q_nfsrv will also 7644 * point to rnew. 7645 */ 7646 if (rnew->q_qinfo->qi_srvp) { 7647 /* 7648 * use _OTHERQ() because, if this is a pipe, next 7649 * module may have been pushed from other end and 7650 * q_next could be a read queue. 7651 */ 7652 qp = _OTHERQ(prev_wq->q_next); 7653 while (qp && qp->q_nfsrv != qp) { 7654 qp->q_nfsrv = rnew; 7655 qp = backq(qp); 7656 } 7657 rnew->q_nfsrv = rnew; 7658 } else 7659 rnew->q_nfsrv = prev_rq->q_nfsrv; 7660 7661 /* set up write side q_nfsrv pointer */ 7662 if (wnew->q_qinfo->qi_srvp) { 7663 wnew->q_nfsrv = wnew; 7664 7665 /* 7666 * For insertion, need to update nfsrv of the modules 7667 * above which do not have a service routine. 7668 */ 7669 if (rnew->q_flag & _QINSERTING) { 7670 for (qp = prev_wq; 7671 qp != NULL && qp->q_nfsrv != qp; 7672 qp = backq(qp)) { 7673 qp->q_nfsrv = wnew->q_nfsrv; 7674 } 7675 } 7676 } else { 7677 if (prev_wq->q_next == prev_rq) 7678 /* 7679 * Since prev_wq/prev_rq are the middle of a 7680 * fifo, wnew/rnew will also be the middle of 7681 * a fifo and wnew's nfsrv is same as rnew's. 7682 */ 7683 wnew->q_nfsrv = rnew->q_nfsrv; 7684 else 7685 wnew->q_nfsrv = prev_wq->q_next->q_nfsrv; 7686 } 7687 } 7688 } 7689 7690 /* 7691 * Reset the forward service procedure pointer; called at remove-time. 7692 */ 7693 void 7694 reset_nfsrv_ptr(queue_t *rqp, queue_t *wqp) 7695 { 7696 queue_t *tmp_qp; 7697 7698 /* Reset the write side q_nfsrv pointer for _I_REMOVE */ 7699 if ((rqp->q_flag & _QREMOVING) && (wqp->q_qinfo->qi_srvp != NULL)) { 7700 for (tmp_qp = backq(wqp); 7701 tmp_qp != NULL && tmp_qp->q_nfsrv == wqp; 7702 tmp_qp = backq(tmp_qp)) { 7703 tmp_qp->q_nfsrv = wqp->q_nfsrv; 7704 } 7705 } 7706 7707 /* reset the read side q_nfsrv pointer */ 7708 if (rqp->q_qinfo->qi_srvp) { 7709 if (wqp->q_next) { /* non-driver case */ 7710 tmp_qp = _OTHERQ(wqp->q_next); 7711 while (tmp_qp && tmp_qp->q_nfsrv == rqp) { 7712 /* Note that rqp->q_next cannot be NULL */ 7713 ASSERT(rqp->q_next != NULL); 7714 tmp_qp->q_nfsrv = rqp->q_next->q_nfsrv; 7715 tmp_qp = backq(tmp_qp); 7716 } 7717 } 7718 } 7719 } 7720 7721 /* 7722 * This routine should be called after all stream geometry changes to update 7723 * the stream head cached struio() rd/wr queue pointers. Note must be called 7724 * with the streamlock()ed. 7725 * 7726 * Note: only enables Synchronous STREAMS for a side of a Stream which has 7727 * an explicit synchronous barrier module queue. That is, a queue that 7728 * has specified a struio() type. 7729 */ 7730 static void 7731 strsetuio(stdata_t *stp) 7732 { 7733 queue_t *wrq; 7734 7735 if (stp->sd_flag & STPLEX) { 7736 /* 7737 * Not streamhead, but a mux, so no Synchronous STREAMS. 7738 */ 7739 stp->sd_struiowrq = NULL; 7740 stp->sd_struiordq = NULL; 7741 return; 7742 } 7743 /* 7744 * Scan the write queue(s) while synchronous 7745 * until we find a qinfo uio type specified. 7746 */ 7747 wrq = stp->sd_wrq->q_next; 7748 while (wrq) { 7749 if (wrq->q_struiot == STRUIOT_NONE) { 7750 wrq = 0; 7751 break; 7752 } 7753 if (wrq->q_struiot != STRUIOT_DONTCARE) 7754 break; 7755 if (! _SAMESTR(wrq)) { 7756 wrq = 0; 7757 break; 7758 } 7759 wrq = wrq->q_next; 7760 } 7761 stp->sd_struiowrq = wrq; 7762 /* 7763 * Scan the read queue(s) while synchronous 7764 * until we find a qinfo uio type specified. 7765 */ 7766 wrq = stp->sd_wrq->q_next; 7767 while (wrq) { 7768 if (_RD(wrq)->q_struiot == STRUIOT_NONE) { 7769 wrq = 0; 7770 break; 7771 } 7772 if (_RD(wrq)->q_struiot != STRUIOT_DONTCARE) 7773 break; 7774 if (! _SAMESTR(wrq)) { 7775 wrq = 0; 7776 break; 7777 } 7778 wrq = wrq->q_next; 7779 } 7780 stp->sd_struiordq = wrq ? _RD(wrq) : 0; 7781 } 7782 7783 /* 7784 * pass_wput, unblocks the passthru queues, so that 7785 * messages can arrive at muxs lower read queue, before 7786 * I_LINK/I_UNLINK is acked/nacked. 7787 */ 7788 static void 7789 pass_wput(queue_t *q, mblk_t *mp) 7790 { 7791 syncq_t *sq; 7792 7793 sq = _RD(q)->q_syncq; 7794 if (sq->sq_flags & SQ_BLOCKED) 7795 unblocksq(sq, SQ_BLOCKED, 0); 7796 putnext(q, mp); 7797 } 7798 7799 /* 7800 * Set up queues for the link/unlink. 7801 * Create a new queue and block it and then insert it 7802 * below the stream head on the lower stream. 7803 * This prevents any messages from arriving during the setq 7804 * as well as while the mux is processing the LINK/I_UNLINK. 7805 * The blocked passq is unblocked once the LINK/I_UNLINK has 7806 * been acked or nacked or if a message is generated and sent 7807 * down muxs write put procedure. 7808 * See pass_wput(). 7809 * 7810 * After the new queue is inserted, all messages coming from below are 7811 * blocked. The call to strlock will ensure that all activity in the stream head 7812 * read queue syncq is stopped (sq_count drops to zero). 7813 */ 7814 static queue_t * 7815 link_addpassthru(stdata_t *stpdown) 7816 { 7817 queue_t *passq; 7818 sqlist_t sqlist; 7819 7820 passq = allocq(); 7821 STREAM(passq) = STREAM(_WR(passq)) = stpdown; 7822 /* setq might sleep in allocator - avoid holding locks. */ 7823 setq(passq, &passthru_rinit, &passthru_winit, NULL, QPERQ, 7824 SQ_CI|SQ_CO, B_FALSE); 7825 claimq(passq); 7826 blocksq(passq->q_syncq, SQ_BLOCKED, 1); 7827 insertq(STREAM(passq), passq); 7828 7829 /* 7830 * Use strlock() to wait for the stream head sq_count to drop to zero 7831 * since we are going to change q_ptr in the stream head. Note that 7832 * insertq() doesn't wait for any syncq counts to drop to zero. 7833 */ 7834 sqlist.sqlist_head = NULL; 7835 sqlist.sqlist_index = 0; 7836 sqlist.sqlist_size = sizeof (sqlist_t); 7837 sqlist_insert(&sqlist, _RD(stpdown->sd_wrq)->q_syncq); 7838 strlock(stpdown, &sqlist); 7839 strunlock(stpdown, &sqlist); 7840 7841 releaseq(passq); 7842 return (passq); 7843 } 7844 7845 /* 7846 * Let messages flow up into the mux by removing 7847 * the passq. 7848 */ 7849 static void 7850 link_rempassthru(queue_t *passq) 7851 { 7852 claimq(passq); 7853 removeq(passq); 7854 releaseq(passq); 7855 freeq(passq); 7856 } 7857 7858 /* 7859 * Wait for the condition variable pointed to by `cvp' to be signaled, 7860 * or for `tim' milliseconds to elapse, whichever comes first. If `tim' 7861 * is negative, then there is no time limit. If `nosigs' is non-zero, 7862 * then the wait will be non-interruptible. 7863 * 7864 * Returns >0 if signaled, 0 if interrupted, or -1 upon timeout. 7865 */ 7866 clock_t 7867 str_cv_wait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim, int nosigs) 7868 { 7869 clock_t ret, now, tick; 7870 7871 if (tim < 0) { 7872 if (nosigs) { 7873 cv_wait(cvp, mp); 7874 ret = 1; 7875 } else { 7876 ret = cv_wait_sig(cvp, mp); 7877 } 7878 } else if (tim > 0) { 7879 /* 7880 * convert milliseconds to clock ticks 7881 */ 7882 tick = MSEC_TO_TICK_ROUNDUP(tim); 7883 time_to_wait(&now, tick); 7884 if (nosigs) { 7885 ret = cv_timedwait(cvp, mp, now); 7886 } else { 7887 ret = cv_timedwait_sig(cvp, mp, now); 7888 } 7889 } else { 7890 ret = -1; 7891 } 7892 return (ret); 7893 } 7894 7895 /* 7896 * Wait until the stream head can determine if it is at the mark but 7897 * don't wait forever to prevent a race condition between the "mark" state 7898 * in the stream head and any mark state in the caller/user of this routine. 7899 * 7900 * This is used by sockets and for a socket it would be incorrect 7901 * to return a failure for SIOCATMARK when there is no data in the receive 7902 * queue and the marked urgent data is traveling up the stream. 7903 * 7904 * This routine waits until the mark is known by waiting for one of these 7905 * three events: 7906 * The stream head read queue becoming non-empty (including an EOF). 7907 * The STRATMARK flag being set (due to a MSGMARKNEXT message). 7908 * The STRNOTATMARK flag being set (which indicates that the transport 7909 * has sent a MSGNOTMARKNEXT message to indicate that it is not at 7910 * the mark). 7911 * 7912 * The routine returns 1 if the stream is at the mark; 0 if it can 7913 * be determined that the stream is not at the mark. 7914 * If the wait times out and it can't determine 7915 * whether or not the stream might be at the mark the routine will return -1. 7916 * 7917 * Note: This routine should only be used when a mark is pending i.e., 7918 * in the socket case the SIGURG has been posted. 7919 * Note2: This can not wakeup just because synchronous streams indicate 7920 * that data is available since it is not possible to use the synchronous 7921 * streams interfaces to determine the b_flag value for the data queued below 7922 * the stream head. 7923 */ 7924 int 7925 strwaitmark(vnode_t *vp) 7926 { 7927 struct stdata *stp = vp->v_stream; 7928 queue_t *rq = _RD(stp->sd_wrq); 7929 int mark; 7930 7931 mutex_enter(&stp->sd_lock); 7932 while (rq->q_first == NULL && 7933 !(stp->sd_flag & (STRATMARK|STRNOTATMARK|STREOF))) { 7934 stp->sd_flag |= RSLEEP; 7935 7936 /* Wait for 100 milliseconds for any state change. */ 7937 if (str_cv_wait(&rq->q_wait, &stp->sd_lock, 100, 1) == -1) { 7938 mutex_exit(&stp->sd_lock); 7939 return (-1); 7940 } 7941 } 7942 if (stp->sd_flag & STRATMARK) 7943 mark = 1; 7944 else if (rq->q_first != NULL && (rq->q_first->b_flag & MSGMARK)) 7945 mark = 1; 7946 else 7947 mark = 0; 7948 7949 mutex_exit(&stp->sd_lock); 7950 return (mark); 7951 } 7952 7953 /* 7954 * Set a read side error. If persist is set change the socket error 7955 * to persistent. If errfunc is set install the function as the exported 7956 * error handler. 7957 */ 7958 void 7959 strsetrerror(vnode_t *vp, int error, int persist, errfunc_t errfunc) 7960 { 7961 struct stdata *stp = vp->v_stream; 7962 7963 mutex_enter(&stp->sd_lock); 7964 stp->sd_rerror = error; 7965 if (error == 0 && errfunc == NULL) 7966 stp->sd_flag &= ~STRDERR; 7967 else 7968 stp->sd_flag |= STRDERR; 7969 if (persist) { 7970 stp->sd_flag &= ~STRDERRNONPERSIST; 7971 } else { 7972 stp->sd_flag |= STRDERRNONPERSIST; 7973 } 7974 stp->sd_rderrfunc = errfunc; 7975 if (error != 0 || errfunc != NULL) { 7976 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */ 7977 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */ 7978 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 7979 7980 mutex_exit(&stp->sd_lock); 7981 pollwakeup(&stp->sd_pollist, POLLERR); 7982 mutex_enter(&stp->sd_lock); 7983 7984 if (stp->sd_sigflags & S_ERROR) 7985 strsendsig(stp->sd_siglist, S_ERROR, 0, error); 7986 } 7987 mutex_exit(&stp->sd_lock); 7988 } 7989 7990 /* 7991 * Set a write side error. If persist is set change the socket error 7992 * to persistent. 7993 */ 7994 void 7995 strsetwerror(vnode_t *vp, int error, int persist, errfunc_t errfunc) 7996 { 7997 struct stdata *stp = vp->v_stream; 7998 7999 mutex_enter(&stp->sd_lock); 8000 stp->sd_werror = error; 8001 if (error == 0 && errfunc == NULL) 8002 stp->sd_flag &= ~STWRERR; 8003 else 8004 stp->sd_flag |= STWRERR; 8005 if (persist) { 8006 stp->sd_flag &= ~STWRERRNONPERSIST; 8007 } else { 8008 stp->sd_flag |= STWRERRNONPERSIST; 8009 } 8010 stp->sd_wrerrfunc = errfunc; 8011 if (error != 0 || errfunc != NULL) { 8012 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */ 8013 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */ 8014 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 8015 8016 mutex_exit(&stp->sd_lock); 8017 pollwakeup(&stp->sd_pollist, POLLERR); 8018 mutex_enter(&stp->sd_lock); 8019 8020 if (stp->sd_sigflags & S_ERROR) 8021 strsendsig(stp->sd_siglist, S_ERROR, 0, error); 8022 } 8023 mutex_exit(&stp->sd_lock); 8024 } 8025 8026 /* 8027 * Make the stream return 0 (EOF) when all data has been read. 8028 * No effect on write side. 8029 */ 8030 void 8031 strseteof(vnode_t *vp, int eof) 8032 { 8033 struct stdata *stp = vp->v_stream; 8034 8035 mutex_enter(&stp->sd_lock); 8036 if (!eof) { 8037 stp->sd_flag &= ~STREOF; 8038 mutex_exit(&stp->sd_lock); 8039 return; 8040 } 8041 stp->sd_flag |= STREOF; 8042 if (stp->sd_flag & RSLEEP) { 8043 stp->sd_flag &= ~RSLEEP; 8044 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); 8045 } 8046 8047 mutex_exit(&stp->sd_lock); 8048 pollwakeup(&stp->sd_pollist, POLLIN|POLLRDNORM); 8049 mutex_enter(&stp->sd_lock); 8050 8051 if (stp->sd_sigflags & (S_INPUT|S_RDNORM)) 8052 strsendsig(stp->sd_siglist, S_INPUT|S_RDNORM, 0, 0); 8053 mutex_exit(&stp->sd_lock); 8054 } 8055 8056 void 8057 strflushrq(vnode_t *vp, int flag) 8058 { 8059 struct stdata *stp = vp->v_stream; 8060 8061 mutex_enter(&stp->sd_lock); 8062 flushq(_RD(stp->sd_wrq), flag); 8063 mutex_exit(&stp->sd_lock); 8064 } 8065 8066 void 8067 strsetrputhooks(vnode_t *vp, uint_t flags, 8068 msgfunc_t protofunc, msgfunc_t miscfunc) 8069 { 8070 struct stdata *stp = vp->v_stream; 8071 8072 mutex_enter(&stp->sd_lock); 8073 8074 if (protofunc == NULL) 8075 stp->sd_rprotofunc = strrput_proto; 8076 else 8077 stp->sd_rprotofunc = protofunc; 8078 8079 if (miscfunc == NULL) 8080 stp->sd_rmiscfunc = strrput_misc; 8081 else 8082 stp->sd_rmiscfunc = miscfunc; 8083 8084 if (flags & SH_CONSOL_DATA) 8085 stp->sd_rput_opt |= SR_CONSOL_DATA; 8086 else 8087 stp->sd_rput_opt &= ~SR_CONSOL_DATA; 8088 8089 if (flags & SH_SIGALLDATA) 8090 stp->sd_rput_opt |= SR_SIGALLDATA; 8091 else 8092 stp->sd_rput_opt &= ~SR_SIGALLDATA; 8093 8094 if (flags & SH_IGN_ZEROLEN) 8095 stp->sd_rput_opt |= SR_IGN_ZEROLEN; 8096 else 8097 stp->sd_rput_opt &= ~SR_IGN_ZEROLEN; 8098 8099 mutex_exit(&stp->sd_lock); 8100 } 8101 8102 void 8103 strsetwputhooks(vnode_t *vp, uint_t flags, clock_t closetime) 8104 { 8105 struct stdata *stp = vp->v_stream; 8106 8107 mutex_enter(&stp->sd_lock); 8108 stp->sd_closetime = closetime; 8109 8110 if (flags & SH_SIGPIPE) 8111 stp->sd_wput_opt |= SW_SIGPIPE; 8112 else 8113 stp->sd_wput_opt &= ~SW_SIGPIPE; 8114 if (flags & SH_RECHECK_ERR) 8115 stp->sd_wput_opt |= SW_RECHECK_ERR; 8116 else 8117 stp->sd_wput_opt &= ~SW_RECHECK_ERR; 8118 8119 mutex_exit(&stp->sd_lock); 8120 } 8121 8122 void 8123 strsetrwputdatahooks(vnode_t *vp, msgfunc_t rdatafunc, msgfunc_t wdatafunc) 8124 { 8125 struct stdata *stp = vp->v_stream; 8126 8127 mutex_enter(&stp->sd_lock); 8128 8129 stp->sd_rputdatafunc = rdatafunc; 8130 stp->sd_wputdatafunc = wdatafunc; 8131 8132 mutex_exit(&stp->sd_lock); 8133 } 8134 8135 /* Used within framework when the queue is already locked */ 8136 void 8137 qenable_locked(queue_t *q) 8138 { 8139 stdata_t *stp = STREAM(q); 8140 8141 ASSERT(MUTEX_HELD(QLOCK(q))); 8142 8143 if (!q->q_qinfo->qi_srvp) 8144 return; 8145 8146 /* 8147 * Do not place on run queue if already enabled or closing. 8148 */ 8149 if (q->q_flag & (QWCLOSE|QENAB)) 8150 return; 8151 8152 /* 8153 * mark queue enabled and place on run list if it is not already being 8154 * serviced. If it is serviced, the runservice() function will detect 8155 * that QENAB is set and call service procedure before clearing 8156 * QINSERVICE flag. 8157 */ 8158 q->q_flag |= QENAB; 8159 if (q->q_flag & QINSERVICE) 8160 return; 8161 8162 /* Record the time of qenable */ 8163 q->q_qtstamp = lbolt; 8164 8165 /* 8166 * Put the queue in the stp list and schedule it for background 8167 * processing if it is not already scheduled or if stream head does not 8168 * intent to process it in the foreground later by setting 8169 * STRS_WILLSERVICE flag. 8170 */ 8171 mutex_enter(&stp->sd_qlock); 8172 /* 8173 * If there are already something on the list, stp flags should show 8174 * intention to drain it. 8175 */ 8176 IMPLY(STREAM_NEEDSERVICE(stp), 8177 (stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))); 8178 8179 ENQUEUE(q, stp->sd_qhead, stp->sd_qtail, q_link); 8180 stp->sd_nqueues++; 8181 8182 /* 8183 * If no one will drain this stream we are the first producer and 8184 * need to schedule it for background thread. 8185 */ 8186 if (!(stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))) { 8187 /* 8188 * No one will service this stream later, so we have to 8189 * schedule it now. 8190 */ 8191 STRSTAT(stenables); 8192 stp->sd_svcflags |= STRS_SCHEDULED; 8193 stp->sd_servid = (void *)taskq_dispatch(streams_taskq, 8194 (task_func_t *)stream_service, stp, TQ_NOSLEEP|TQ_NOQUEUE); 8195 8196 if (stp->sd_servid == NULL) { 8197 /* 8198 * Task queue failed so fail over to the backup 8199 * servicing thread. 8200 */ 8201 STRSTAT(taskqfails); 8202 /* 8203 * It is safe to clear STRS_SCHEDULED flag because it 8204 * was set by this thread above. 8205 */ 8206 stp->sd_svcflags &= ~STRS_SCHEDULED; 8207 8208 /* 8209 * Failover scheduling is protected by service_queue 8210 * lock. 8211 */ 8212 mutex_enter(&service_queue); 8213 ASSERT((stp->sd_qhead == q) && (stp->sd_qtail == q)); 8214 ASSERT(q->q_link == NULL); 8215 /* 8216 * Append the queue to qhead/qtail list. 8217 */ 8218 if (qhead == NULL) 8219 qhead = q; 8220 else 8221 qtail->q_link = q; 8222 qtail = q; 8223 /* 8224 * Clear stp queue list. 8225 */ 8226 stp->sd_qhead = stp->sd_qtail = NULL; 8227 stp->sd_nqueues = 0; 8228 /* 8229 * Wakeup background queue processing thread. 8230 */ 8231 cv_signal(&services_to_run); 8232 mutex_exit(&service_queue); 8233 } 8234 } 8235 mutex_exit(&stp->sd_qlock); 8236 } 8237 8238 static void 8239 queue_service(queue_t *q) 8240 { 8241 /* 8242 * The queue in the list should have 8243 * QENAB flag set and should not have 8244 * QINSERVICE flag set. QINSERVICE is 8245 * set when the queue is dequeued and 8246 * qenable_locked doesn't enqueue a 8247 * queue with QINSERVICE set. 8248 */ 8249 8250 ASSERT(!(q->q_flag & QINSERVICE)); 8251 ASSERT((q->q_flag & QENAB)); 8252 mutex_enter(QLOCK(q)); 8253 q->q_flag &= ~QENAB; 8254 q->q_flag |= QINSERVICE; 8255 mutex_exit(QLOCK(q)); 8256 runservice(q); 8257 } 8258 8259 static void 8260 syncq_service(syncq_t *sq) 8261 { 8262 STRSTAT(syncqservice); 8263 mutex_enter(SQLOCK(sq)); 8264 ASSERT(!(sq->sq_svcflags & SQ_SERVICE)); 8265 ASSERT(sq->sq_servcount != 0); 8266 ASSERT(sq->sq_next == NULL); 8267 8268 /* if we came here from the background thread, clear the flag */ 8269 if (sq->sq_svcflags & SQ_BGTHREAD) 8270 sq->sq_svcflags &= ~SQ_BGTHREAD; 8271 8272 /* let drain_syncq know that it's being called in the background */ 8273 sq->sq_svcflags |= SQ_SERVICE; 8274 drain_syncq(sq); 8275 } 8276 8277 static void 8278 qwriter_outer_service(syncq_t *outer) 8279 { 8280 /* 8281 * Note that SQ_WRITER is used on the outer perimeter 8282 * to signal that a qwriter(OUTER) is either investigating 8283 * running or that it is actually running a function. 8284 */ 8285 outer_enter(outer, SQ_BLOCKED|SQ_WRITER); 8286 8287 /* 8288 * All inner syncq are empty and have SQ_WRITER set 8289 * to block entering the outer perimeter. 8290 * 8291 * We do not need to explicitly call write_now since 8292 * outer_exit does it for us. 8293 */ 8294 outer_exit(outer); 8295 } 8296 8297 static void 8298 mblk_free(mblk_t *mp) 8299 { 8300 dblk_t *dbp = mp->b_datap; 8301 frtn_t *frp = dbp->db_frtnp; 8302 8303 mp->b_next = NULL; 8304 if (dbp->db_fthdr != NULL) 8305 str_ftfree(dbp); 8306 8307 ASSERT(dbp->db_fthdr == NULL); 8308 frp->free_func(frp->free_arg); 8309 ASSERT(dbp->db_mblk == mp); 8310 8311 if (dbp->db_credp != NULL) { 8312 crfree(dbp->db_credp); 8313 dbp->db_credp = NULL; 8314 } 8315 dbp->db_cpid = -1; 8316 dbp->db_struioflag = 0; 8317 dbp->db_struioun.cksum.flags = 0; 8318 8319 kmem_cache_free(dbp->db_cache, dbp); 8320 } 8321 8322 /* 8323 * Background processing of the stream queue list. 8324 */ 8325 static void 8326 stream_service(stdata_t *stp) 8327 { 8328 queue_t *q; 8329 8330 mutex_enter(&stp->sd_qlock); 8331 8332 STR_SERVICE(stp, q); 8333 8334 stp->sd_svcflags &= ~STRS_SCHEDULED; 8335 stp->sd_servid = NULL; 8336 cv_signal(&stp->sd_qcv); 8337 mutex_exit(&stp->sd_qlock); 8338 } 8339 8340 /* 8341 * Foreground processing of the stream queue list. 8342 */ 8343 void 8344 stream_runservice(stdata_t *stp) 8345 { 8346 queue_t *q; 8347 8348 mutex_enter(&stp->sd_qlock); 8349 STRSTAT(rservice); 8350 /* 8351 * We are going to drain this stream queue list, so qenable_locked will 8352 * not schedule it until we finish. 8353 */ 8354 stp->sd_svcflags |= STRS_WILLSERVICE; 8355 8356 STR_SERVICE(stp, q); 8357 8358 stp->sd_svcflags &= ~STRS_WILLSERVICE; 8359 mutex_exit(&stp->sd_qlock); 8360 /* 8361 * Help backup background thread to drain the qhead/qtail list. 8362 */ 8363 while (qhead != NULL) { 8364 STRSTAT(qhelps); 8365 mutex_enter(&service_queue); 8366 DQ(q, qhead, qtail, q_link); 8367 mutex_exit(&service_queue); 8368 if (q != NULL) 8369 queue_service(q); 8370 } 8371 } 8372 8373 void 8374 stream_willservice(stdata_t *stp) 8375 { 8376 mutex_enter(&stp->sd_qlock); 8377 stp->sd_svcflags |= STRS_WILLSERVICE; 8378 mutex_exit(&stp->sd_qlock); 8379 } 8380 8381 /* 8382 * Replace the cred currently in the mblk with a different one. 8383 * Also update db_cpid. 8384 */ 8385 void 8386 mblk_setcred(mblk_t *mp, cred_t *cr, pid_t cpid) 8387 { 8388 dblk_t *dbp = mp->b_datap; 8389 cred_t *ocr = dbp->db_credp; 8390 8391 ASSERT(cr != NULL); 8392 8393 if (cr != ocr) { 8394 crhold(dbp->db_credp = cr); 8395 if (ocr != NULL) 8396 crfree(ocr); 8397 } 8398 /* Don't overwrite with NOPID */ 8399 if (cpid != NOPID) 8400 dbp->db_cpid = cpid; 8401 } 8402 8403 /* 8404 * If the src message has a cred, then replace the cred currently in the mblk 8405 * with it. 8406 * Also update db_cpid. 8407 */ 8408 void 8409 mblk_copycred(mblk_t *mp, const mblk_t *src) 8410 { 8411 dblk_t *dbp = mp->b_datap; 8412 cred_t *cr, *ocr; 8413 pid_t cpid; 8414 8415 cr = msg_getcred(src, &cpid); 8416 if (cr == NULL) 8417 return; 8418 8419 ocr = dbp->db_credp; 8420 if (cr != ocr) { 8421 crhold(dbp->db_credp = cr); 8422 if (ocr != NULL) 8423 crfree(ocr); 8424 } 8425 /* Don't overwrite with NOPID */ 8426 if (cpid != NOPID) 8427 dbp->db_cpid = cpid; 8428 } 8429 8430 int 8431 hcksum_assoc(mblk_t *mp, multidata_t *mmd, pdesc_t *pd, 8432 uint32_t start, uint32_t stuff, uint32_t end, uint32_t value, 8433 uint32_t flags, int km_flags) 8434 { 8435 int rc = 0; 8436 8437 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8438 if (mp->b_datap->db_type == M_DATA) { 8439 /* Associate values for M_DATA type */ 8440 DB_CKSUMSTART(mp) = (intptr_t)start; 8441 DB_CKSUMSTUFF(mp) = (intptr_t)stuff; 8442 DB_CKSUMEND(mp) = (intptr_t)end; 8443 DB_CKSUMFLAGS(mp) = flags; 8444 DB_CKSUM16(mp) = (uint16_t)value; 8445 8446 } else { 8447 pattrinfo_t pa_info; 8448 8449 ASSERT(mmd != NULL); 8450 8451 pa_info.type = PATTR_HCKSUM; 8452 pa_info.len = sizeof (pattr_hcksum_t); 8453 8454 if (mmd_addpattr(mmd, pd, &pa_info, B_TRUE, km_flags) != NULL) { 8455 pattr_hcksum_t *hck = (pattr_hcksum_t *)pa_info.buf; 8456 8457 hck->hcksum_start_offset = start; 8458 hck->hcksum_stuff_offset = stuff; 8459 hck->hcksum_end_offset = end; 8460 hck->hcksum_cksum_val.inet_cksum = (uint16_t)value; 8461 hck->hcksum_flags = flags; 8462 } else { 8463 rc = -1; 8464 } 8465 } 8466 return (rc); 8467 } 8468 8469 void 8470 hcksum_retrieve(mblk_t *mp, multidata_t *mmd, pdesc_t *pd, 8471 uint32_t *start, uint32_t *stuff, uint32_t *end, 8472 uint32_t *value, uint32_t *flags) 8473 { 8474 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8475 if (mp->b_datap->db_type == M_DATA) { 8476 if (flags != NULL) { 8477 *flags = DB_CKSUMFLAGS(mp) & (HCK_IPV4_HDRCKSUM | 8478 HCK_PARTIALCKSUM | HCK_FULLCKSUM | 8479 HCK_FULLCKSUM_OK); 8480 if ((*flags & (HCK_PARTIALCKSUM | 8481 HCK_FULLCKSUM)) != 0) { 8482 if (value != NULL) 8483 *value = (uint32_t)DB_CKSUM16(mp); 8484 if ((*flags & HCK_PARTIALCKSUM) != 0) { 8485 if (start != NULL) 8486 *start = 8487 (uint32_t)DB_CKSUMSTART(mp); 8488 if (stuff != NULL) 8489 *stuff = 8490 (uint32_t)DB_CKSUMSTUFF(mp); 8491 if (end != NULL) 8492 *end = 8493 (uint32_t)DB_CKSUMEND(mp); 8494 } 8495 } 8496 } 8497 } else { 8498 pattrinfo_t hck_attr = {PATTR_HCKSUM}; 8499 8500 ASSERT(mmd != NULL); 8501 8502 /* get hardware checksum attribute */ 8503 if (mmd_getpattr(mmd, pd, &hck_attr) != NULL) { 8504 pattr_hcksum_t *hck = (pattr_hcksum_t *)hck_attr.buf; 8505 8506 ASSERT(hck_attr.len >= sizeof (pattr_hcksum_t)); 8507 if (flags != NULL) 8508 *flags = hck->hcksum_flags; 8509 if (start != NULL) 8510 *start = hck->hcksum_start_offset; 8511 if (stuff != NULL) 8512 *stuff = hck->hcksum_stuff_offset; 8513 if (end != NULL) 8514 *end = hck->hcksum_end_offset; 8515 if (value != NULL) 8516 *value = (uint32_t) 8517 hck->hcksum_cksum_val.inet_cksum; 8518 } 8519 } 8520 } 8521 8522 void 8523 lso_info_set(mblk_t *mp, uint32_t mss, uint32_t flags) 8524 { 8525 ASSERT(DB_TYPE(mp) == M_DATA); 8526 8527 /* Set the flags */ 8528 DB_LSOFLAGS(mp) |= flags; 8529 DB_LSOMSS(mp) = mss; 8530 } 8531 8532 void 8533 lso_info_get(mblk_t *mp, uint32_t *mss, uint32_t *flags) 8534 { 8535 ASSERT(DB_TYPE(mp) == M_DATA); 8536 8537 if (flags != NULL) { 8538 *flags = DB_CKSUMFLAGS(mp) & HW_LSO; 8539 if ((*flags != 0) && (mss != NULL)) 8540 *mss = (uint32_t)DB_LSOMSS(mp); 8541 } 8542 } 8543 8544 /* 8545 * Checksum buffer *bp for len bytes with psum partial checksum, 8546 * or 0 if none, and return the 16 bit partial checksum. 8547 */ 8548 unsigned 8549 bcksum(uchar_t *bp, int len, unsigned int psum) 8550 { 8551 int odd = len & 1; 8552 extern unsigned int ip_ocsum(); 8553 8554 if (((intptr_t)bp & 1) == 0 && !odd) { 8555 /* 8556 * Bp is 16 bit aligned and len is multiple of 16 bit word. 8557 */ 8558 return (ip_ocsum((ushort_t *)bp, len >> 1, psum)); 8559 } 8560 if (((intptr_t)bp & 1) != 0) { 8561 /* 8562 * Bp isn't 16 bit aligned. 8563 */ 8564 unsigned int tsum; 8565 8566 #ifdef _LITTLE_ENDIAN 8567 psum += *bp; 8568 #else 8569 psum += *bp << 8; 8570 #endif 8571 len--; 8572 bp++; 8573 tsum = ip_ocsum((ushort_t *)bp, len >> 1, 0); 8574 psum += (tsum << 8) & 0xffff | (tsum >> 8); 8575 if (len & 1) { 8576 bp += len - 1; 8577 #ifdef _LITTLE_ENDIAN 8578 psum += *bp << 8; 8579 #else 8580 psum += *bp; 8581 #endif 8582 } 8583 } else { 8584 /* 8585 * Bp is 16 bit aligned. 8586 */ 8587 psum = ip_ocsum((ushort_t *)bp, len >> 1, psum); 8588 if (odd) { 8589 bp += len - 1; 8590 #ifdef _LITTLE_ENDIAN 8591 psum += *bp; 8592 #else 8593 psum += *bp << 8; 8594 #endif 8595 } 8596 } 8597 /* 8598 * Normalize psum to 16 bits before returning the new partial 8599 * checksum. The max psum value before normalization is 0x3FDFE. 8600 */ 8601 return ((psum >> 16) + (psum & 0xFFFF)); 8602 } 8603 8604 boolean_t 8605 is_vmloaned_mblk(mblk_t *mp, multidata_t *mmd, pdesc_t *pd) 8606 { 8607 boolean_t rc; 8608 8609 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8610 if (DB_TYPE(mp) == M_DATA) { 8611 rc = (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0); 8612 } else { 8613 pattrinfo_t zcopy_attr = {PATTR_ZCOPY}; 8614 8615 ASSERT(mmd != NULL); 8616 rc = (mmd_getpattr(mmd, pd, &zcopy_attr) != NULL); 8617 } 8618 return (rc); 8619 } 8620 8621 void 8622 freemsgchain(mblk_t *mp) 8623 { 8624 mblk_t *next; 8625 8626 while (mp != NULL) { 8627 next = mp->b_next; 8628 mp->b_next = NULL; 8629 8630 freemsg(mp); 8631 mp = next; 8632 } 8633 } 8634 8635 mblk_t * 8636 copymsgchain(mblk_t *mp) 8637 { 8638 mblk_t *nmp = NULL; 8639 mblk_t **nmpp = &nmp; 8640 8641 for (; mp != NULL; mp = mp->b_next) { 8642 if ((*nmpp = copymsg(mp)) == NULL) { 8643 freemsgchain(nmp); 8644 return (NULL); 8645 } 8646 8647 nmpp = &((*nmpp)->b_next); 8648 } 8649 8650 return (nmp); 8651 } 8652 8653 /* NOTE: Do not add code after this point. */ 8654 #undef QLOCK 8655 8656 /* 8657 * Replacement for QLOCK macro for those that can't use it. 8658 */ 8659 kmutex_t * 8660 QLOCK(queue_t *q) 8661 { 8662 return (&(q)->q_lock); 8663 } 8664 8665 /* 8666 * Dummy runqueues/queuerun functions functions for backwards compatibility. 8667 */ 8668 #undef runqueues 8669 void 8670 runqueues(void) 8671 { 8672 } 8673 8674 #undef queuerun 8675 void 8676 queuerun(void) 8677 { 8678 } 8679 8680 /* 8681 * Initialize the STR stack instance, which tracks autopush and persistent 8682 * links. 8683 */ 8684 /* ARGSUSED */ 8685 static void * 8686 str_stack_init(netstackid_t stackid, netstack_t *ns) 8687 { 8688 str_stack_t *ss; 8689 int i; 8690 8691 ss = (str_stack_t *)kmem_zalloc(sizeof (*ss), KM_SLEEP); 8692 ss->ss_netstack = ns; 8693 8694 /* 8695 * set up autopush 8696 */ 8697 sad_initspace(ss); 8698 8699 /* 8700 * set up mux_node structures. 8701 */ 8702 ss->ss_devcnt = devcnt; /* In case it should change before free */ 8703 ss->ss_mux_nodes = kmem_zalloc((sizeof (struct mux_node) * 8704 ss->ss_devcnt), KM_SLEEP); 8705 for (i = 0; i < ss->ss_devcnt; i++) 8706 ss->ss_mux_nodes[i].mn_imaj = i; 8707 return (ss); 8708 } 8709 8710 /* 8711 * Note: run at zone shutdown and not destroy so that the PLINKs are 8712 * gone by the time other cleanup happens from the destroy callbacks. 8713 */ 8714 static void 8715 str_stack_shutdown(netstackid_t stackid, void *arg) 8716 { 8717 str_stack_t *ss = (str_stack_t *)arg; 8718 int i; 8719 cred_t *cr; 8720 8721 cr = zone_get_kcred(netstackid_to_zoneid(stackid)); 8722 ASSERT(cr != NULL); 8723 8724 /* Undo all the I_PLINKs for this zone */ 8725 for (i = 0; i < ss->ss_devcnt; i++) { 8726 struct mux_edge *ep; 8727 ldi_handle_t lh; 8728 ldi_ident_t li; 8729 int ret; 8730 int rval; 8731 dev_t rdev; 8732 8733 ep = ss->ss_mux_nodes[i].mn_outp; 8734 if (ep == NULL) 8735 continue; 8736 ret = ldi_ident_from_major((major_t)i, &li); 8737 if (ret != 0) { 8738 continue; 8739 } 8740 rdev = ep->me_dev; 8741 ret = ldi_open_by_dev(&rdev, OTYP_CHR, FREAD|FWRITE, 8742 cr, &lh, li); 8743 if (ret != 0) { 8744 ldi_ident_release(li); 8745 continue; 8746 } 8747 8748 ret = ldi_ioctl(lh, I_PUNLINK, (intptr_t)MUXID_ALL, FKIOCTL, 8749 cr, &rval); 8750 if (ret) { 8751 (void) ldi_close(lh, FREAD|FWRITE, cr); 8752 ldi_ident_release(li); 8753 continue; 8754 } 8755 (void) ldi_close(lh, FREAD|FWRITE, cr); 8756 8757 /* Close layered handles */ 8758 ldi_ident_release(li); 8759 } 8760 crfree(cr); 8761 8762 sad_freespace(ss); 8763 8764 kmem_free(ss->ss_mux_nodes, sizeof (struct mux_node) * ss->ss_devcnt); 8765 ss->ss_mux_nodes = NULL; 8766 } 8767 8768 /* 8769 * Free the structure; str_stack_shutdown did the other cleanup work. 8770 */ 8771 /* ARGSUSED */ 8772 static void 8773 str_stack_fini(netstackid_t stackid, void *arg) 8774 { 8775 str_stack_t *ss = (str_stack_t *)arg; 8776 8777 kmem_free(ss, sizeof (*ss)); 8778 } 8779