1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 22 /* All Rights Reserved */ 23 24 25 /* 26 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 30 #include <sys/types.h> 31 #include <sys/sysmacros.h> 32 #include <sys/param.h> 33 #include <sys/errno.h> 34 #include <sys/signal.h> 35 #include <sys/proc.h> 36 #include <sys/conf.h> 37 #include <sys/cred.h> 38 #include <sys/user.h> 39 #include <sys/vnode.h> 40 #include <sys/file.h> 41 #include <sys/session.h> 42 #include <sys/stream.h> 43 #include <sys/strsubr.h> 44 #include <sys/stropts.h> 45 #include <sys/poll.h> 46 #include <sys/systm.h> 47 #include <sys/cpuvar.h> 48 #include <sys/uio.h> 49 #include <sys/cmn_err.h> 50 #include <sys/priocntl.h> 51 #include <sys/procset.h> 52 #include <sys/vmem.h> 53 #include <sys/bitmap.h> 54 #include <sys/kmem.h> 55 #include <sys/siginfo.h> 56 #include <sys/vtrace.h> 57 #include <sys/callb.h> 58 #include <sys/debug.h> 59 #include <sys/modctl.h> 60 #include <sys/vmsystm.h> 61 #include <vm/page.h> 62 #include <sys/atomic.h> 63 #include <sys/suntpi.h> 64 #include <sys/strlog.h> 65 #include <sys/promif.h> 66 #include <sys/project.h> 67 #include <sys/vm.h> 68 #include <sys/taskq.h> 69 #include <sys/sunddi.h> 70 #include <sys/sunldi_impl.h> 71 #include <sys/strsun.h> 72 #include <sys/isa_defs.h> 73 #include <sys/multidata.h> 74 #include <sys/pattr.h> 75 #include <sys/strft.h> 76 #include <sys/fs/snode.h> 77 #include <sys/zone.h> 78 #include <sys/open.h> 79 #include <sys/sunldi.h> 80 #include <sys/sad.h> 81 #include <sys/netstack.h> 82 83 #define O_SAMESTR(q) (((q)->q_next) && \ 84 (((q)->q_flag & QREADR) == ((q)->q_next->q_flag & QREADR))) 85 86 /* 87 * WARNING: 88 * The variables and routines in this file are private, belonging 89 * to the STREAMS subsystem. These should not be used by modules 90 * or drivers. Compatibility will not be guaranteed. 91 */ 92 93 /* 94 * Id value used to distinguish between different multiplexor links. 95 */ 96 static int32_t lnk_id = 0; 97 98 #define STREAMS_LOPRI MINCLSYSPRI 99 static pri_t streams_lopri = STREAMS_LOPRI; 100 101 #define STRSTAT(x) (str_statistics.x.value.ui64++) 102 typedef struct str_stat { 103 kstat_named_t sqenables; 104 kstat_named_t stenables; 105 kstat_named_t syncqservice; 106 kstat_named_t freebs; 107 kstat_named_t qwr_outer; 108 kstat_named_t rservice; 109 kstat_named_t strwaits; 110 kstat_named_t taskqfails; 111 kstat_named_t bufcalls; 112 kstat_named_t qhelps; 113 kstat_named_t qremoved; 114 kstat_named_t sqremoved; 115 kstat_named_t bcwaits; 116 kstat_named_t sqtoomany; 117 } str_stat_t; 118 119 static str_stat_t str_statistics = { 120 { "sqenables", KSTAT_DATA_UINT64 }, 121 { "stenables", KSTAT_DATA_UINT64 }, 122 { "syncqservice", KSTAT_DATA_UINT64 }, 123 { "freebs", KSTAT_DATA_UINT64 }, 124 { "qwr_outer", KSTAT_DATA_UINT64 }, 125 { "rservice", KSTAT_DATA_UINT64 }, 126 { "strwaits", KSTAT_DATA_UINT64 }, 127 { "taskqfails", KSTAT_DATA_UINT64 }, 128 { "bufcalls", KSTAT_DATA_UINT64 }, 129 { "qhelps", KSTAT_DATA_UINT64 }, 130 { "qremoved", KSTAT_DATA_UINT64 }, 131 { "sqremoved", KSTAT_DATA_UINT64 }, 132 { "bcwaits", KSTAT_DATA_UINT64 }, 133 { "sqtoomany", KSTAT_DATA_UINT64 }, 134 }; 135 136 static kstat_t *str_kstat; 137 138 /* 139 * qrunflag was used previously to control background scheduling of queues. It 140 * is not used anymore, but kept here in case some module still wants to access 141 * it via qready() and setqsched macros. 142 */ 143 char qrunflag; /* Unused */ 144 145 /* 146 * Most of the streams scheduling is done via task queues. Task queues may fail 147 * for non-sleep dispatches, so there are two backup threads servicing failed 148 * requests for queues and syncqs. Both of these threads also service failed 149 * dispatches freebs requests. Queues are put in the list specified by `qhead' 150 * and `qtail' pointers, syncqs use `sqhead' and `sqtail' pointers and freebs 151 * requests are put into `freebs_list' which has no tail pointer. All three 152 * lists are protected by a single `service_queue' lock and use 153 * `services_to_run' condition variable for signaling background threads. Use of 154 * a single lock should not be a problem because it is only used under heavy 155 * loads when task queues start to fail and at that time it may be a good idea 156 * to throttle scheduling requests. 157 * 158 * NOTE: queues and syncqs should be scheduled by two separate threads because 159 * queue servicing may be blocked waiting for a syncq which may be also 160 * scheduled for background execution. This may create a deadlock when only one 161 * thread is used for both. 162 */ 163 164 static taskq_t *streams_taskq; /* Used for most STREAMS scheduling */ 165 166 static kmutex_t service_queue; /* protects all of servicing vars */ 167 static kcondvar_t services_to_run; /* wake up background service thread */ 168 static kcondvar_t syncqs_to_run; /* wake up background service thread */ 169 170 /* 171 * List of queues scheduled for background processing dueue to lack of resources 172 * in the task queues. Protected by service_queue lock; 173 */ 174 static struct queue *qhead; 175 static struct queue *qtail; 176 177 /* 178 * Same list for syncqs 179 */ 180 static syncq_t *sqhead; 181 static syncq_t *sqtail; 182 183 static mblk_t *freebs_list; /* list of buffers to free */ 184 185 /* 186 * Backup threads for servicing queues and syncqs 187 */ 188 kthread_t *streams_qbkgrnd_thread; 189 kthread_t *streams_sqbkgrnd_thread; 190 191 /* 192 * Bufcalls related variables. 193 */ 194 struct bclist strbcalls; /* list of waiting bufcalls */ 195 kmutex_t strbcall_lock; /* protects bufcall list (strbcalls) */ 196 kcondvar_t strbcall_cv; /* Signaling when a bufcall is added */ 197 kmutex_t bcall_monitor; /* sleep/wakeup style monitor */ 198 kcondvar_t bcall_cv; /* wait 'till executing bufcall completes */ 199 kthread_t *bc_bkgrnd_thread; /* Thread to service bufcall requests */ 200 201 kmutex_t strresources; /* protects global resources */ 202 kmutex_t muxifier; /* single-threads multiplexor creation */ 203 204 static void *str_stack_init(netstackid_t stackid, netstack_t *ns); 205 static void str_stack_shutdown(netstackid_t stackid, void *arg); 206 static void str_stack_fini(netstackid_t stackid, void *arg); 207 208 extern void time_to_wait(clock_t *, clock_t); 209 210 /* 211 * run_queues is no longer used, but is kept in case some 3-d party 212 * module/driver decides to use it. 213 */ 214 int run_queues = 0; 215 216 /* 217 * sq_max_size is the depth of the syncq (in number of messages) before 218 * qfill_syncq() starts QFULL'ing destination queues. As its primary 219 * consumer - IP is no longer D_MTPERMOD, but there may be other 220 * modules/drivers depend on this syncq flow control, we prefer to 221 * choose a large number as the default value. For potential 222 * performance gain, this value is tunable in /etc/system. 223 */ 224 int sq_max_size = 10000; 225 226 /* 227 * the number of ciputctrl structures per syncq and stream we create when 228 * needed. 229 */ 230 int n_ciputctrl; 231 int max_n_ciputctrl = 16; 232 /* 233 * if n_ciputctrl is < min_n_ciputctrl don't even create ciputctrl_cache. 234 */ 235 int min_n_ciputctrl = 2; 236 237 /* 238 * Per-driver/module syncqs 239 * ======================== 240 * 241 * For drivers/modules that use PERMOD or outer syncqs we keep a list of 242 * perdm structures, new entries being added (and new syncqs allocated) when 243 * setq() encounters a module/driver with a streamtab that it hasn't seen 244 * before. 245 * The reason for this mechanism is that some modules and drivers share a 246 * common streamtab and it is necessary for those modules and drivers to also 247 * share a common PERMOD syncq. 248 * 249 * perdm_list --> dm_str == streamtab_1 250 * dm_sq == syncq_1 251 * dm_ref 252 * dm_next --> dm_str == streamtab_2 253 * dm_sq == syncq_2 254 * dm_ref 255 * dm_next --> ... NULL 256 * 257 * The dm_ref field is incremented for each new driver/module that takes 258 * a reference to the perdm structure and hence shares the syncq. 259 * References are held in the fmodsw_impl_t structure for each STREAMS module 260 * or the dev_impl array (indexed by device major number) for each driver. 261 * 262 * perdm_list -> [dm_ref == 1] -> [dm_ref == 2] -> [dm_ref == 1] -> NULL 263 * ^ ^ ^ ^ 264 * | ______________/ | | 265 * | / | | 266 * dev_impl: ...|x|y|... module A module B 267 * 268 * When a module/driver is unloaded the reference count is decremented and, 269 * when it falls to zero, the perdm structure is removed from the list and 270 * the syncq is freed (see rele_dm()). 271 */ 272 perdm_t *perdm_list = NULL; 273 static krwlock_t perdm_rwlock; 274 cdevsw_impl_t *devimpl; 275 276 extern struct qinit strdata; 277 extern struct qinit stwdata; 278 279 static void runservice(queue_t *); 280 static void streams_bufcall_service(void); 281 static void streams_qbkgrnd_service(void); 282 static void streams_sqbkgrnd_service(void); 283 static syncq_t *new_syncq(void); 284 static void free_syncq(syncq_t *); 285 static void outer_insert(syncq_t *, syncq_t *); 286 static void outer_remove(syncq_t *, syncq_t *); 287 static void write_now(syncq_t *); 288 static void clr_qfull(queue_t *); 289 static void runbufcalls(void); 290 static void sqenable(syncq_t *); 291 static void sqfill_events(syncq_t *, queue_t *, mblk_t *, void (*)()); 292 static void wait_q_syncq(queue_t *); 293 static void backenable_insertedq(queue_t *); 294 295 static void queue_service(queue_t *); 296 static void stream_service(stdata_t *); 297 static void syncq_service(syncq_t *); 298 static void qwriter_outer_service(syncq_t *); 299 static void mblk_free(mblk_t *); 300 #ifdef DEBUG 301 static int qprocsareon(queue_t *); 302 #endif 303 304 static void set_nfsrv_ptr(queue_t *, queue_t *, queue_t *, queue_t *); 305 static void reset_nfsrv_ptr(queue_t *, queue_t *); 306 void set_qfull(queue_t *); 307 308 static void sq_run_events(syncq_t *); 309 static int propagate_syncq(queue_t *); 310 311 static void blocksq(syncq_t *, ushort_t, int); 312 static void unblocksq(syncq_t *, ushort_t, int); 313 static int dropsq(syncq_t *, uint16_t); 314 static void emptysq(syncq_t *); 315 static sqlist_t *sqlist_alloc(struct stdata *, int); 316 static void sqlist_free(sqlist_t *); 317 static sqlist_t *sqlist_build(queue_t *, struct stdata *, boolean_t); 318 static void sqlist_insert(sqlist_t *, syncq_t *); 319 static void sqlist_insertall(sqlist_t *, queue_t *); 320 321 static void strsetuio(stdata_t *); 322 323 struct kmem_cache *stream_head_cache; 324 struct kmem_cache *queue_cache; 325 struct kmem_cache *syncq_cache; 326 struct kmem_cache *qband_cache; 327 struct kmem_cache *linkinfo_cache; 328 struct kmem_cache *ciputctrl_cache = NULL; 329 330 static linkinfo_t *linkinfo_list; 331 332 /* global esballoc throttling queue */ 333 static esb_queue_t system_esbq; 334 335 /* 336 * esballoc tunable parameters. 337 */ 338 int esbq_max_qlen = 0x16; /* throttled queue length */ 339 clock_t esbq_timeout = 0x8; /* timeout to process esb queue */ 340 341 /* 342 * routines to handle esballoc queuing. 343 */ 344 static void esballoc_process_queue(esb_queue_t *); 345 static void esballoc_enqueue_mblk(mblk_t *); 346 static void esballoc_timer(void *); 347 static void esballoc_set_timer(esb_queue_t *, clock_t); 348 static void esballoc_mblk_free(mblk_t *); 349 350 /* 351 * Qinit structure and Module_info structures 352 * for passthru read and write queues 353 */ 354 355 static void pass_wput(queue_t *, mblk_t *); 356 static queue_t *link_addpassthru(stdata_t *); 357 static void link_rempassthru(queue_t *); 358 359 struct module_info passthru_info = { 360 0, 361 "passthru", 362 0, 363 INFPSZ, 364 STRHIGH, 365 STRLOW 366 }; 367 368 struct qinit passthru_rinit = { 369 (int (*)())putnext, 370 NULL, 371 NULL, 372 NULL, 373 NULL, 374 &passthru_info, 375 NULL 376 }; 377 378 struct qinit passthru_winit = { 379 (int (*)()) pass_wput, 380 NULL, 381 NULL, 382 NULL, 383 NULL, 384 &passthru_info, 385 NULL 386 }; 387 388 /* 389 * Special form of assertion: verify that X implies Y i.e. when X is true Y 390 * should also be true. 391 */ 392 #define IMPLY(X, Y) ASSERT(!(X) || (Y)) 393 394 /* 395 * Logical equivalence. Verify that both X and Y are either TRUE or FALSE. 396 */ 397 #define EQUIV(X, Y) { IMPLY(X, Y); IMPLY(Y, X); } 398 399 /* 400 * Verify correctness of list head/tail pointers. 401 */ 402 #define LISTCHECK(head, tail, link) { \ 403 EQUIV(head, tail); \ 404 IMPLY(tail != NULL, tail->link == NULL); \ 405 } 406 407 /* 408 * Enqueue a list element `el' in the end of a list denoted by `head' and `tail' 409 * using a `link' field. 410 */ 411 #define ENQUEUE(el, head, tail, link) { \ 412 ASSERT(el->link == NULL); \ 413 LISTCHECK(head, tail, link); \ 414 if (head == NULL) \ 415 head = el; \ 416 else \ 417 tail->link = el; \ 418 tail = el; \ 419 } 420 421 /* 422 * Dequeue the first element of the list denoted by `head' and `tail' pointers 423 * using a `link' field and put result into `el'. 424 */ 425 #define DQ(el, head, tail, link) { \ 426 LISTCHECK(head, tail, link); \ 427 el = head; \ 428 if (head != NULL) { \ 429 head = head->link; \ 430 if (head == NULL) \ 431 tail = NULL; \ 432 el->link = NULL; \ 433 } \ 434 } 435 436 /* 437 * Remove `el' from the list using `chase' and `curr' pointers and return result 438 * in `succeed'. 439 */ 440 #define RMQ(el, head, tail, link, chase, curr, succeed) { \ 441 LISTCHECK(head, tail, link); \ 442 chase = NULL; \ 443 succeed = 0; \ 444 for (curr = head; (curr != el) && (curr != NULL); curr = curr->link) \ 445 chase = curr; \ 446 if (curr != NULL) { \ 447 succeed = 1; \ 448 ASSERT(curr == el); \ 449 if (chase != NULL) \ 450 chase->link = curr->link; \ 451 else \ 452 head = curr->link; \ 453 curr->link = NULL; \ 454 if (curr == tail) \ 455 tail = chase; \ 456 } \ 457 LISTCHECK(head, tail, link); \ 458 } 459 460 /* Handling of delayed messages on the inner syncq. */ 461 462 /* 463 * DEBUG versions should use function versions (to simplify tracing) and 464 * non-DEBUG kernels should use macro versions. 465 */ 466 467 /* 468 * Put a queue on the syncq list of queues. 469 * Assumes SQLOCK held. 470 */ 471 #define SQPUT_Q(sq, qp) \ 472 { \ 473 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 474 if (!(qp->q_sqflags & Q_SQQUEUED)) { \ 475 /* The queue should not be linked anywhere */ \ 476 ASSERT((qp->q_sqprev == NULL) && (qp->q_sqnext == NULL)); \ 477 /* Head and tail may only be NULL simultaneously */ \ 478 EQUIV(sq->sq_head, sq->sq_tail); \ 479 /* Queue may be only enqueyed on its syncq */ \ 480 ASSERT(sq == qp->q_syncq); \ 481 /* Check the correctness of SQ_MESSAGES flag */ \ 482 EQUIV(sq->sq_head, (sq->sq_flags & SQ_MESSAGES)); \ 483 /* Sanity check first/last elements of the list */ \ 484 IMPLY(sq->sq_head != NULL, sq->sq_head->q_sqprev == NULL);\ 485 IMPLY(sq->sq_tail != NULL, sq->sq_tail->q_sqnext == NULL);\ 486 /* \ 487 * Sanity check of priority field: empty queue should \ 488 * have zero priority \ 489 * and nqueues equal to zero. \ 490 */ \ 491 IMPLY(sq->sq_head == NULL, sq->sq_pri == 0); \ 492 /* Sanity check of sq_nqueues field */ \ 493 EQUIV(sq->sq_head, sq->sq_nqueues); \ 494 if (sq->sq_head == NULL) { \ 495 sq->sq_head = sq->sq_tail = qp; \ 496 sq->sq_flags |= SQ_MESSAGES; \ 497 } else if (qp->q_spri == 0) { \ 498 qp->q_sqprev = sq->sq_tail; \ 499 sq->sq_tail->q_sqnext = qp; \ 500 sq->sq_tail = qp; \ 501 } else { \ 502 /* \ 503 * Put this queue in priority order: higher \ 504 * priority gets closer to the head. \ 505 */ \ 506 queue_t **qpp = &sq->sq_tail; \ 507 queue_t *qnext = NULL; \ 508 \ 509 while (*qpp != NULL && qp->q_spri > (*qpp)->q_spri) { \ 510 qnext = *qpp; \ 511 qpp = &(*qpp)->q_sqprev; \ 512 } \ 513 qp->q_sqnext = qnext; \ 514 qp->q_sqprev = *qpp; \ 515 if (*qpp != NULL) { \ 516 (*qpp)->q_sqnext = qp; \ 517 } else { \ 518 sq->sq_head = qp; \ 519 sq->sq_pri = sq->sq_head->q_spri; \ 520 } \ 521 *qpp = qp; \ 522 } \ 523 qp->q_sqflags |= Q_SQQUEUED; \ 524 qp->q_sqtstamp = lbolt; \ 525 sq->sq_nqueues++; \ 526 } \ 527 } 528 529 /* 530 * Remove a queue from the syncq list 531 * Assumes SQLOCK held. 532 */ 533 #define SQRM_Q(sq, qp) \ 534 { \ 535 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 536 ASSERT(qp->q_sqflags & Q_SQQUEUED); \ 537 ASSERT(sq->sq_head != NULL && sq->sq_tail != NULL); \ 538 ASSERT((sq->sq_flags & SQ_MESSAGES) != 0); \ 539 /* Check that the queue is actually in the list */ \ 540 ASSERT(qp->q_sqnext != NULL || sq->sq_tail == qp); \ 541 ASSERT(qp->q_sqprev != NULL || sq->sq_head == qp); \ 542 ASSERT(sq->sq_nqueues != 0); \ 543 if (qp->q_sqprev == NULL) { \ 544 /* First queue on list, make head q_sqnext */ \ 545 sq->sq_head = qp->q_sqnext; \ 546 } else { \ 547 /* Make prev->next == next */ \ 548 qp->q_sqprev->q_sqnext = qp->q_sqnext; \ 549 } \ 550 if (qp->q_sqnext == NULL) { \ 551 /* Last queue on list, make tail sqprev */ \ 552 sq->sq_tail = qp->q_sqprev; \ 553 } else { \ 554 /* Make next->prev == prev */ \ 555 qp->q_sqnext->q_sqprev = qp->q_sqprev; \ 556 } \ 557 /* clear out references on this queue */ \ 558 qp->q_sqprev = qp->q_sqnext = NULL; \ 559 qp->q_sqflags &= ~Q_SQQUEUED; \ 560 /* If there is nothing queued, clear SQ_MESSAGES */ \ 561 if (sq->sq_head != NULL) { \ 562 sq->sq_pri = sq->sq_head->q_spri; \ 563 } else { \ 564 sq->sq_flags &= ~SQ_MESSAGES; \ 565 sq->sq_pri = 0; \ 566 } \ 567 sq->sq_nqueues--; \ 568 ASSERT(sq->sq_head != NULL || sq->sq_evhead != NULL || \ 569 (sq->sq_flags & SQ_QUEUED) == 0); \ 570 } 571 572 /* Hide the definition from the header file. */ 573 #ifdef SQPUT_MP 574 #undef SQPUT_MP 575 #endif 576 577 /* 578 * Put a message on the queue syncq. 579 * Assumes QLOCK held. 580 */ 581 #define SQPUT_MP(qp, mp) \ 582 { \ 583 ASSERT(MUTEX_HELD(QLOCK(qp))); \ 584 ASSERT(qp->q_sqhead == NULL || \ 585 (qp->q_sqtail != NULL && \ 586 qp->q_sqtail->b_next == NULL)); \ 587 qp->q_syncqmsgs++; \ 588 ASSERT(qp->q_syncqmsgs != 0); /* Wraparound */ \ 589 if (qp->q_sqhead == NULL) { \ 590 qp->q_sqhead = qp->q_sqtail = mp; \ 591 } else { \ 592 qp->q_sqtail->b_next = mp; \ 593 qp->q_sqtail = mp; \ 594 } \ 595 ASSERT(qp->q_syncqmsgs > 0); \ 596 set_qfull(qp); \ 597 } 598 599 #define SQ_PUTCOUNT_SETFAST_LOCKED(sq) { \ 600 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 601 if ((sq)->sq_ciputctrl != NULL) { \ 602 int i; \ 603 int nlocks = (sq)->sq_nciputctrl; \ 604 ciputctrl_t *cip = (sq)->sq_ciputctrl; \ 605 ASSERT((sq)->sq_type & SQ_CIPUT); \ 606 for (i = 0; i <= nlocks; i++) { \ 607 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \ 608 cip[i].ciputctrl_count |= SQ_FASTPUT; \ 609 } \ 610 } \ 611 } 612 613 614 #define SQ_PUTCOUNT_CLRFAST_LOCKED(sq) { \ 615 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 616 if ((sq)->sq_ciputctrl != NULL) { \ 617 int i; \ 618 int nlocks = (sq)->sq_nciputctrl; \ 619 ciputctrl_t *cip = (sq)->sq_ciputctrl; \ 620 ASSERT((sq)->sq_type & SQ_CIPUT); \ 621 for (i = 0; i <= nlocks; i++) { \ 622 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \ 623 cip[i].ciputctrl_count &= ~SQ_FASTPUT; \ 624 } \ 625 } \ 626 } 627 628 /* 629 * Run service procedures for all queues in the stream head. 630 */ 631 #define STR_SERVICE(stp, q) { \ 632 ASSERT(MUTEX_HELD(&stp->sd_qlock)); \ 633 while (stp->sd_qhead != NULL) { \ 634 DQ(q, stp->sd_qhead, stp->sd_qtail, q_link); \ 635 ASSERT(stp->sd_nqueues > 0); \ 636 stp->sd_nqueues--; \ 637 ASSERT(!(q->q_flag & QINSERVICE)); \ 638 mutex_exit(&stp->sd_qlock); \ 639 queue_service(q); \ 640 mutex_enter(&stp->sd_qlock); \ 641 } \ 642 ASSERT(stp->sd_nqueues == 0); \ 643 ASSERT((stp->sd_qhead == NULL) && (stp->sd_qtail == NULL)); \ 644 } 645 646 /* 647 * constructor/destructor routines for the stream head cache 648 */ 649 /* ARGSUSED */ 650 static int 651 stream_head_constructor(void *buf, void *cdrarg, int kmflags) 652 { 653 stdata_t *stp = buf; 654 655 mutex_init(&stp->sd_lock, NULL, MUTEX_DEFAULT, NULL); 656 mutex_init(&stp->sd_reflock, NULL, MUTEX_DEFAULT, NULL); 657 mutex_init(&stp->sd_qlock, NULL, MUTEX_DEFAULT, NULL); 658 cv_init(&stp->sd_monitor, NULL, CV_DEFAULT, NULL); 659 cv_init(&stp->sd_iocmonitor, NULL, CV_DEFAULT, NULL); 660 cv_init(&stp->sd_refmonitor, NULL, CV_DEFAULT, NULL); 661 cv_init(&stp->sd_qcv, NULL, CV_DEFAULT, NULL); 662 cv_init(&stp->sd_zcopy_wait, NULL, CV_DEFAULT, NULL); 663 stp->sd_wrq = NULL; 664 665 return (0); 666 } 667 668 /* ARGSUSED */ 669 static void 670 stream_head_destructor(void *buf, void *cdrarg) 671 { 672 stdata_t *stp = buf; 673 674 mutex_destroy(&stp->sd_lock); 675 mutex_destroy(&stp->sd_reflock); 676 mutex_destroy(&stp->sd_qlock); 677 cv_destroy(&stp->sd_monitor); 678 cv_destroy(&stp->sd_iocmonitor); 679 cv_destroy(&stp->sd_refmonitor); 680 cv_destroy(&stp->sd_qcv); 681 cv_destroy(&stp->sd_zcopy_wait); 682 } 683 684 /* 685 * constructor/destructor routines for the queue cache 686 */ 687 /* ARGSUSED */ 688 static int 689 queue_constructor(void *buf, void *cdrarg, int kmflags) 690 { 691 queinfo_t *qip = buf; 692 queue_t *qp = &qip->qu_rqueue; 693 queue_t *wqp = &qip->qu_wqueue; 694 syncq_t *sq = &qip->qu_syncq; 695 696 qp->q_first = NULL; 697 qp->q_link = NULL; 698 qp->q_count = 0; 699 qp->q_mblkcnt = 0; 700 qp->q_sqhead = NULL; 701 qp->q_sqtail = NULL; 702 qp->q_sqnext = NULL; 703 qp->q_sqprev = NULL; 704 qp->q_sqflags = 0; 705 qp->q_rwcnt = 0; 706 qp->q_spri = 0; 707 708 mutex_init(QLOCK(qp), NULL, MUTEX_DEFAULT, NULL); 709 cv_init(&qp->q_wait, NULL, CV_DEFAULT, NULL); 710 711 wqp->q_first = NULL; 712 wqp->q_link = NULL; 713 wqp->q_count = 0; 714 wqp->q_mblkcnt = 0; 715 wqp->q_sqhead = NULL; 716 wqp->q_sqtail = NULL; 717 wqp->q_sqnext = NULL; 718 wqp->q_sqprev = NULL; 719 wqp->q_sqflags = 0; 720 wqp->q_rwcnt = 0; 721 wqp->q_spri = 0; 722 723 mutex_init(QLOCK(wqp), NULL, MUTEX_DEFAULT, NULL); 724 cv_init(&wqp->q_wait, NULL, CV_DEFAULT, NULL); 725 726 sq->sq_head = NULL; 727 sq->sq_tail = NULL; 728 sq->sq_evhead = NULL; 729 sq->sq_evtail = NULL; 730 sq->sq_callbpend = NULL; 731 sq->sq_outer = NULL; 732 sq->sq_onext = NULL; 733 sq->sq_oprev = NULL; 734 sq->sq_next = NULL; 735 sq->sq_svcflags = 0; 736 sq->sq_servcount = 0; 737 sq->sq_needexcl = 0; 738 sq->sq_nqueues = 0; 739 sq->sq_pri = 0; 740 741 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL); 742 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL); 743 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL); 744 745 return (0); 746 } 747 748 /* ARGSUSED */ 749 static void 750 queue_destructor(void *buf, void *cdrarg) 751 { 752 queinfo_t *qip = buf; 753 queue_t *qp = &qip->qu_rqueue; 754 queue_t *wqp = &qip->qu_wqueue; 755 syncq_t *sq = &qip->qu_syncq; 756 757 ASSERT(qp->q_sqhead == NULL); 758 ASSERT(wqp->q_sqhead == NULL); 759 ASSERT(qp->q_sqnext == NULL); 760 ASSERT(wqp->q_sqnext == NULL); 761 ASSERT(qp->q_rwcnt == 0); 762 ASSERT(wqp->q_rwcnt == 0); 763 764 mutex_destroy(&qp->q_lock); 765 cv_destroy(&qp->q_wait); 766 767 mutex_destroy(&wqp->q_lock); 768 cv_destroy(&wqp->q_wait); 769 770 mutex_destroy(&sq->sq_lock); 771 cv_destroy(&sq->sq_wait); 772 cv_destroy(&sq->sq_exitwait); 773 } 774 775 /* 776 * constructor/destructor routines for the syncq cache 777 */ 778 /* ARGSUSED */ 779 static int 780 syncq_constructor(void *buf, void *cdrarg, int kmflags) 781 { 782 syncq_t *sq = buf; 783 784 bzero(buf, sizeof (syncq_t)); 785 786 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL); 787 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL); 788 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL); 789 790 return (0); 791 } 792 793 /* ARGSUSED */ 794 static void 795 syncq_destructor(void *buf, void *cdrarg) 796 { 797 syncq_t *sq = buf; 798 799 ASSERT(sq->sq_head == NULL); 800 ASSERT(sq->sq_tail == NULL); 801 ASSERT(sq->sq_evhead == NULL); 802 ASSERT(sq->sq_evtail == NULL); 803 ASSERT(sq->sq_callbpend == NULL); 804 ASSERT(sq->sq_callbflags == 0); 805 ASSERT(sq->sq_outer == NULL); 806 ASSERT(sq->sq_onext == NULL); 807 ASSERT(sq->sq_oprev == NULL); 808 ASSERT(sq->sq_next == NULL); 809 ASSERT(sq->sq_needexcl == 0); 810 ASSERT(sq->sq_svcflags == 0); 811 ASSERT(sq->sq_servcount == 0); 812 ASSERT(sq->sq_nqueues == 0); 813 ASSERT(sq->sq_pri == 0); 814 ASSERT(sq->sq_count == 0); 815 ASSERT(sq->sq_rmqcount == 0); 816 ASSERT(sq->sq_cancelid == 0); 817 ASSERT(sq->sq_ciputctrl == NULL); 818 ASSERT(sq->sq_nciputctrl == 0); 819 ASSERT(sq->sq_type == 0); 820 ASSERT(sq->sq_flags == 0); 821 822 mutex_destroy(&sq->sq_lock); 823 cv_destroy(&sq->sq_wait); 824 cv_destroy(&sq->sq_exitwait); 825 } 826 827 /* ARGSUSED */ 828 static int 829 ciputctrl_constructor(void *buf, void *cdrarg, int kmflags) 830 { 831 ciputctrl_t *cip = buf; 832 int i; 833 834 for (i = 0; i < n_ciputctrl; i++) { 835 cip[i].ciputctrl_count = SQ_FASTPUT; 836 mutex_init(&cip[i].ciputctrl_lock, NULL, MUTEX_DEFAULT, NULL); 837 } 838 839 return (0); 840 } 841 842 /* ARGSUSED */ 843 static void 844 ciputctrl_destructor(void *buf, void *cdrarg) 845 { 846 ciputctrl_t *cip = buf; 847 int i; 848 849 for (i = 0; i < n_ciputctrl; i++) { 850 ASSERT(cip[i].ciputctrl_count & SQ_FASTPUT); 851 mutex_destroy(&cip[i].ciputctrl_lock); 852 } 853 } 854 855 /* 856 * Init routine run from main at boot time. 857 */ 858 void 859 strinit(void) 860 { 861 int ncpus = ((boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus); 862 863 stream_head_cache = kmem_cache_create("stream_head_cache", 864 sizeof (stdata_t), 0, 865 stream_head_constructor, stream_head_destructor, NULL, 866 NULL, NULL, 0); 867 868 queue_cache = kmem_cache_create("queue_cache", sizeof (queinfo_t), 0, 869 queue_constructor, queue_destructor, NULL, NULL, NULL, 0); 870 871 syncq_cache = kmem_cache_create("syncq_cache", sizeof (syncq_t), 0, 872 syncq_constructor, syncq_destructor, NULL, NULL, NULL, 0); 873 874 qband_cache = kmem_cache_create("qband_cache", 875 sizeof (qband_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 876 877 linkinfo_cache = kmem_cache_create("linkinfo_cache", 878 sizeof (linkinfo_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 879 880 n_ciputctrl = ncpus; 881 n_ciputctrl = 1 << highbit(n_ciputctrl - 1); 882 ASSERT(n_ciputctrl >= 1); 883 n_ciputctrl = MIN(n_ciputctrl, max_n_ciputctrl); 884 if (n_ciputctrl >= min_n_ciputctrl) { 885 ciputctrl_cache = kmem_cache_create("ciputctrl_cache", 886 sizeof (ciputctrl_t) * n_ciputctrl, 887 sizeof (ciputctrl_t), ciputctrl_constructor, 888 ciputctrl_destructor, NULL, NULL, NULL, 0); 889 } 890 891 streams_taskq = system_taskq; 892 893 if (streams_taskq == NULL) 894 panic("strinit: no memory for streams taskq!"); 895 896 bc_bkgrnd_thread = thread_create(NULL, 0, 897 streams_bufcall_service, NULL, 0, &p0, TS_RUN, streams_lopri); 898 899 streams_qbkgrnd_thread = thread_create(NULL, 0, 900 streams_qbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri); 901 902 streams_sqbkgrnd_thread = thread_create(NULL, 0, 903 streams_sqbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri); 904 905 /* 906 * Create STREAMS kstats. 907 */ 908 str_kstat = kstat_create("streams", 0, "strstat", 909 "net", KSTAT_TYPE_NAMED, 910 sizeof (str_statistics) / sizeof (kstat_named_t), 911 KSTAT_FLAG_VIRTUAL); 912 913 if (str_kstat != NULL) { 914 str_kstat->ks_data = &str_statistics; 915 kstat_install(str_kstat); 916 } 917 918 /* 919 * TPI support routine initialisation. 920 */ 921 tpi_init(); 922 923 /* 924 * Handle to have autopush and persistent link information per 925 * zone. 926 * Note: uses shutdown hook instead of destroy hook so that the 927 * persistent links can be torn down before the destroy hooks 928 * in the TCP/IP stack are called. 929 */ 930 netstack_register(NS_STR, str_stack_init, str_stack_shutdown, 931 str_stack_fini); 932 } 933 934 void 935 str_sendsig(vnode_t *vp, int event, uchar_t band, int error) 936 { 937 struct stdata *stp; 938 939 ASSERT(vp->v_stream); 940 stp = vp->v_stream; 941 /* Have to hold sd_lock to prevent siglist from changing */ 942 mutex_enter(&stp->sd_lock); 943 if (stp->sd_sigflags & event) 944 strsendsig(stp->sd_siglist, event, band, error); 945 mutex_exit(&stp->sd_lock); 946 } 947 948 /* 949 * Send the "sevent" set of signals to a process. 950 * This might send more than one signal if the process is registered 951 * for multiple events. The caller should pass in an sevent that only 952 * includes the events for which the process has registered. 953 */ 954 static void 955 dosendsig(proc_t *proc, int events, int sevent, k_siginfo_t *info, 956 uchar_t band, int error) 957 { 958 ASSERT(MUTEX_HELD(&proc->p_lock)); 959 960 info->si_band = 0; 961 info->si_errno = 0; 962 963 if (sevent & S_ERROR) { 964 sevent &= ~S_ERROR; 965 info->si_code = POLL_ERR; 966 info->si_errno = error; 967 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 968 "strsendsig:proc %p info %p", proc, info); 969 sigaddq(proc, NULL, info, KM_NOSLEEP); 970 info->si_errno = 0; 971 } 972 if (sevent & S_HANGUP) { 973 sevent &= ~S_HANGUP; 974 info->si_code = POLL_HUP; 975 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 976 "strsendsig:proc %p info %p", proc, info); 977 sigaddq(proc, NULL, info, KM_NOSLEEP); 978 } 979 if (sevent & S_HIPRI) { 980 sevent &= ~S_HIPRI; 981 info->si_code = POLL_PRI; 982 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 983 "strsendsig:proc %p info %p", proc, info); 984 sigaddq(proc, NULL, info, KM_NOSLEEP); 985 } 986 if (sevent & S_RDBAND) { 987 sevent &= ~S_RDBAND; 988 if (events & S_BANDURG) 989 sigtoproc(proc, NULL, SIGURG); 990 else 991 sigtoproc(proc, NULL, SIGPOLL); 992 } 993 if (sevent & S_WRBAND) { 994 sevent &= ~S_WRBAND; 995 sigtoproc(proc, NULL, SIGPOLL); 996 } 997 if (sevent & S_INPUT) { 998 sevent &= ~S_INPUT; 999 info->si_code = POLL_IN; 1000 info->si_band = band; 1001 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 1002 "strsendsig:proc %p info %p", proc, info); 1003 sigaddq(proc, NULL, info, KM_NOSLEEP); 1004 info->si_band = 0; 1005 } 1006 if (sevent & S_OUTPUT) { 1007 sevent &= ~S_OUTPUT; 1008 info->si_code = POLL_OUT; 1009 info->si_band = band; 1010 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 1011 "strsendsig:proc %p info %p", proc, info); 1012 sigaddq(proc, NULL, info, KM_NOSLEEP); 1013 info->si_band = 0; 1014 } 1015 if (sevent & S_MSG) { 1016 sevent &= ~S_MSG; 1017 info->si_code = POLL_MSG; 1018 info->si_band = band; 1019 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 1020 "strsendsig:proc %p info %p", proc, info); 1021 sigaddq(proc, NULL, info, KM_NOSLEEP); 1022 info->si_band = 0; 1023 } 1024 if (sevent & S_RDNORM) { 1025 sevent &= ~S_RDNORM; 1026 sigtoproc(proc, NULL, SIGPOLL); 1027 } 1028 if (sevent != 0) { 1029 panic("strsendsig: unknown event(s) %x", sevent); 1030 } 1031 } 1032 1033 /* 1034 * Send SIGPOLL/SIGURG signal to all processes and process groups 1035 * registered on the given signal list that want a signal for at 1036 * least one of the specified events. 1037 * 1038 * Must be called with exclusive access to siglist (caller holding sd_lock). 1039 * 1040 * strioctl(I_SETSIG/I_ESETSIG) will only change siglist when holding 1041 * sd_lock and the ioctl code maintains a PID_HOLD on the pid structure 1042 * while it is in the siglist. 1043 * 1044 * For performance reasons (MP scalability) the code drops pidlock 1045 * when sending signals to a single process. 1046 * When sending to a process group the code holds 1047 * pidlock to prevent the membership in the process group from changing 1048 * while walking the p_pglink list. 1049 */ 1050 void 1051 strsendsig(strsig_t *siglist, int event, uchar_t band, int error) 1052 { 1053 strsig_t *ssp; 1054 k_siginfo_t info; 1055 struct pid *pidp; 1056 proc_t *proc; 1057 1058 info.si_signo = SIGPOLL; 1059 info.si_errno = 0; 1060 for (ssp = siglist; ssp; ssp = ssp->ss_next) { 1061 int sevent; 1062 1063 sevent = ssp->ss_events & event; 1064 if (sevent == 0) 1065 continue; 1066 1067 if ((pidp = ssp->ss_pidp) == NULL) { 1068 /* pid was released but still on event list */ 1069 continue; 1070 } 1071 1072 1073 if (ssp->ss_pid > 0) { 1074 /* 1075 * XXX This unfortunately still generates 1076 * a signal when a fd is closed but 1077 * the proc is active. 1078 */ 1079 ASSERT(ssp->ss_pid == pidp->pid_id); 1080 1081 mutex_enter(&pidlock); 1082 proc = prfind_zone(pidp->pid_id, ALL_ZONES); 1083 if (proc == NULL) { 1084 mutex_exit(&pidlock); 1085 continue; 1086 } 1087 mutex_enter(&proc->p_lock); 1088 mutex_exit(&pidlock); 1089 dosendsig(proc, ssp->ss_events, sevent, &info, 1090 band, error); 1091 mutex_exit(&proc->p_lock); 1092 } else { 1093 /* 1094 * Send to process group. Hold pidlock across 1095 * calls to dosendsig(). 1096 */ 1097 pid_t pgrp = -ssp->ss_pid; 1098 1099 mutex_enter(&pidlock); 1100 proc = pgfind_zone(pgrp, ALL_ZONES); 1101 while (proc != NULL) { 1102 mutex_enter(&proc->p_lock); 1103 dosendsig(proc, ssp->ss_events, sevent, 1104 &info, band, error); 1105 mutex_exit(&proc->p_lock); 1106 proc = proc->p_pglink; 1107 } 1108 mutex_exit(&pidlock); 1109 } 1110 } 1111 } 1112 1113 /* 1114 * Attach a stream device or module. 1115 * qp is a read queue; the new queue goes in so its next 1116 * read ptr is the argument, and the write queue corresponding 1117 * to the argument points to this queue. Return 0 on success, 1118 * or a non-zero errno on failure. 1119 */ 1120 int 1121 qattach(queue_t *qp, dev_t *devp, int oflag, cred_t *crp, fmodsw_impl_t *fp, 1122 boolean_t is_insert) 1123 { 1124 major_t major; 1125 cdevsw_impl_t *dp; 1126 struct streamtab *str; 1127 queue_t *rq; 1128 queue_t *wrq; 1129 uint32_t qflag; 1130 uint32_t sqtype; 1131 perdm_t *dmp; 1132 int error; 1133 int sflag; 1134 1135 rq = allocq(); 1136 wrq = _WR(rq); 1137 STREAM(rq) = STREAM(wrq) = STREAM(qp); 1138 1139 if (fp != NULL) { 1140 str = fp->f_str; 1141 qflag = fp->f_qflag; 1142 sqtype = fp->f_sqtype; 1143 dmp = fp->f_dmp; 1144 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL); 1145 sflag = MODOPEN; 1146 1147 /* 1148 * stash away a pointer to the module structure so we can 1149 * unref it in qdetach. 1150 */ 1151 rq->q_fp = fp; 1152 } else { 1153 ASSERT(!is_insert); 1154 1155 major = getmajor(*devp); 1156 dp = &devimpl[major]; 1157 1158 str = dp->d_str; 1159 ASSERT(str == STREAMSTAB(major)); 1160 1161 qflag = dp->d_qflag; 1162 ASSERT(qflag & QISDRV); 1163 sqtype = dp->d_sqtype; 1164 1165 /* create perdm_t if needed */ 1166 if (NEED_DM(dp->d_dmp, qflag)) 1167 dp->d_dmp = hold_dm(str, qflag, sqtype); 1168 1169 dmp = dp->d_dmp; 1170 sflag = 0; 1171 } 1172 1173 TRACE_2(TR_FAC_STREAMS_FR, TR_QATTACH_FLAGS, 1174 "qattach:qflag == %X(%X)", qflag, *devp); 1175 1176 /* setq might sleep in allocator - avoid holding locks. */ 1177 setq(rq, str->st_rdinit, str->st_wrinit, dmp, qflag, sqtype, B_FALSE); 1178 1179 /* 1180 * Before calling the module's open routine, set up the q_next 1181 * pointer for inserting a module in the middle of a stream. 1182 * 1183 * Note that we can always set _QINSERTING and set up q_next 1184 * pointer for both inserting and pushing a module. Then there 1185 * is no need for the is_insert parameter. In insertq(), called 1186 * by qprocson(), assume that q_next of the new module always points 1187 * to the correct queue and use it for insertion. Everything should 1188 * work out fine. But in the first release of _I_INSERT, we 1189 * distinguish between inserting and pushing to make sure that 1190 * pushing a module follows the same code path as before. 1191 */ 1192 if (is_insert) { 1193 rq->q_flag |= _QINSERTING; 1194 rq->q_next = qp; 1195 } 1196 1197 /* 1198 * If there is an outer perimeter get exclusive access during 1199 * the open procedure. Bump up the reference count on the queue. 1200 */ 1201 entersq(rq->q_syncq, SQ_OPENCLOSE); 1202 error = (*rq->q_qinfo->qi_qopen)(rq, devp, oflag, sflag, crp); 1203 if (error != 0) 1204 goto failed; 1205 leavesq(rq->q_syncq, SQ_OPENCLOSE); 1206 ASSERT(qprocsareon(rq)); 1207 return (0); 1208 1209 failed: 1210 rq->q_flag &= ~_QINSERTING; 1211 if (backq(wrq) != NULL && backq(wrq)->q_next == wrq) 1212 qprocsoff(rq); 1213 leavesq(rq->q_syncq, SQ_OPENCLOSE); 1214 rq->q_next = wrq->q_next = NULL; 1215 qdetach(rq, 0, 0, crp, B_FALSE); 1216 return (error); 1217 } 1218 1219 /* 1220 * Handle second open of stream. For modules, set the 1221 * last argument to MODOPEN and do not pass any open flags. 1222 * Ignore dummydev since this is not the first open. 1223 */ 1224 int 1225 qreopen(queue_t *qp, dev_t *devp, int flag, cred_t *crp) 1226 { 1227 int error; 1228 dev_t dummydev; 1229 queue_t *wqp = _WR(qp); 1230 1231 ASSERT(qp->q_flag & QREADR); 1232 entersq(qp->q_syncq, SQ_OPENCLOSE); 1233 1234 dummydev = *devp; 1235 if (error = ((*qp->q_qinfo->qi_qopen)(qp, &dummydev, 1236 (wqp->q_next ? 0 : flag), (wqp->q_next ? MODOPEN : 0), crp))) { 1237 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1238 mutex_enter(&STREAM(qp)->sd_lock); 1239 qp->q_stream->sd_flag |= STREOPENFAIL; 1240 mutex_exit(&STREAM(qp)->sd_lock); 1241 return (error); 1242 } 1243 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1244 1245 /* 1246 * successful open should have done qprocson() 1247 */ 1248 ASSERT(qprocsareon(_RD(qp))); 1249 return (0); 1250 } 1251 1252 /* 1253 * Detach a stream module or device. 1254 * If clmode == 1 then the module or driver was opened and its 1255 * close routine must be called. If clmode == 0, the module 1256 * or driver was never opened or the open failed, and so its close 1257 * should not be called. 1258 */ 1259 void 1260 qdetach(queue_t *qp, int clmode, int flag, cred_t *crp, boolean_t is_remove) 1261 { 1262 queue_t *wqp = _WR(qp); 1263 ASSERT(STREAM(qp)->sd_flag & (STRCLOSE|STWOPEN|STRPLUMB)); 1264 1265 if (STREAM_NEEDSERVICE(STREAM(qp))) 1266 stream_runservice(STREAM(qp)); 1267 1268 if (clmode) { 1269 /* 1270 * Make sure that all the messages on the write side syncq are 1271 * processed and nothing is left. Since we are closing, no new 1272 * messages may appear there. 1273 */ 1274 wait_q_syncq(wqp); 1275 1276 entersq(qp->q_syncq, SQ_OPENCLOSE); 1277 if (is_remove) { 1278 mutex_enter(QLOCK(qp)); 1279 qp->q_flag |= _QREMOVING; 1280 mutex_exit(QLOCK(qp)); 1281 } 1282 (*qp->q_qinfo->qi_qclose)(qp, flag, crp); 1283 /* 1284 * Check that qprocsoff() was actually called. 1285 */ 1286 ASSERT((qp->q_flag & QWCLOSE) && (wqp->q_flag & QWCLOSE)); 1287 1288 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1289 } else { 1290 disable_svc(qp); 1291 } 1292 1293 /* 1294 * Allow any threads blocked in entersq to proceed and discover 1295 * the QWCLOSE is set. 1296 * Note: This assumes that all users of entersq check QWCLOSE. 1297 * Currently runservice is the only entersq that can happen 1298 * after removeq has finished. 1299 * Removeq will have discarded all messages destined to the closing 1300 * pair of queues from the syncq. 1301 * NOTE: Calling a function inside an assert is unconventional. 1302 * However, it does not cause any problem since flush_syncq() does 1303 * not change any state except when it returns non-zero i.e. 1304 * when the assert will trigger. 1305 */ 1306 ASSERT(flush_syncq(qp->q_syncq, qp) == 0); 1307 ASSERT(flush_syncq(wqp->q_syncq, wqp) == 0); 1308 ASSERT((qp->q_flag & QPERMOD) || 1309 ((qp->q_syncq->sq_head == NULL) && 1310 (wqp->q_syncq->sq_head == NULL))); 1311 1312 /* release any fmodsw_impl_t structure held on behalf of the queue */ 1313 ASSERT(qp->q_fp != NULL || qp->q_flag & QISDRV); 1314 if (qp->q_fp != NULL) 1315 fmodsw_rele(qp->q_fp); 1316 1317 /* freeq removes us from the outer perimeter if any */ 1318 freeq(qp); 1319 } 1320 1321 /* Prevent service procedures from being called */ 1322 void 1323 disable_svc(queue_t *qp) 1324 { 1325 queue_t *wqp = _WR(qp); 1326 1327 ASSERT(qp->q_flag & QREADR); 1328 mutex_enter(QLOCK(qp)); 1329 qp->q_flag |= QWCLOSE; 1330 mutex_exit(QLOCK(qp)); 1331 mutex_enter(QLOCK(wqp)); 1332 wqp->q_flag |= QWCLOSE; 1333 mutex_exit(QLOCK(wqp)); 1334 } 1335 1336 /* allow service procedures to be called again */ 1337 void 1338 enable_svc(queue_t *qp) 1339 { 1340 queue_t *wqp = _WR(qp); 1341 1342 ASSERT(qp->q_flag & QREADR); 1343 mutex_enter(QLOCK(qp)); 1344 qp->q_flag &= ~QWCLOSE; 1345 mutex_exit(QLOCK(qp)); 1346 mutex_enter(QLOCK(wqp)); 1347 wqp->q_flag &= ~QWCLOSE; 1348 mutex_exit(QLOCK(wqp)); 1349 } 1350 1351 /* 1352 * Remove queue from qhead/qtail if it is enabled. 1353 * Only reset QENAB if the queue was removed from the runlist. 1354 * A queue goes through 3 stages: 1355 * It is on the service list and QENAB is set. 1356 * It is removed from the service list but QENAB is still set. 1357 * QENAB gets changed to QINSERVICE. 1358 * QINSERVICE is reset (when the service procedure is done) 1359 * Thus we can not reset QENAB unless we actually removed it from the service 1360 * queue. 1361 */ 1362 void 1363 remove_runlist(queue_t *qp) 1364 { 1365 if (qp->q_flag & QENAB && qhead != NULL) { 1366 queue_t *q_chase; 1367 queue_t *q_curr; 1368 int removed; 1369 1370 mutex_enter(&service_queue); 1371 RMQ(qp, qhead, qtail, q_link, q_chase, q_curr, removed); 1372 mutex_exit(&service_queue); 1373 if (removed) { 1374 STRSTAT(qremoved); 1375 qp->q_flag &= ~QENAB; 1376 } 1377 } 1378 } 1379 1380 1381 /* 1382 * wait for any pending service processing to complete. 1383 * The removal of queues from the runlist is not atomic with the 1384 * clearing of the QENABLED flag and setting the INSERVICE flag. 1385 * consequently it is possible for remove_runlist in strclose 1386 * to not find the queue on the runlist but for it to be QENABLED 1387 * and not yet INSERVICE -> hence wait_svc needs to check QENABLED 1388 * as well as INSERVICE. 1389 */ 1390 void 1391 wait_svc(queue_t *qp) 1392 { 1393 queue_t *wqp = _WR(qp); 1394 1395 ASSERT(qp->q_flag & QREADR); 1396 1397 /* 1398 * Try to remove queues from qhead/qtail list. 1399 */ 1400 if (qhead != NULL) { 1401 remove_runlist(qp); 1402 remove_runlist(wqp); 1403 } 1404 /* 1405 * Wait till the syncqs associated with the queue 1406 * will dissapear from background processing list. 1407 * This only needs to be done for non-PERMOD perimeters since 1408 * for PERMOD perimeters the syncq may be shared and will only be freed 1409 * when the last module/driver is unloaded. 1410 * If for PERMOD perimeters queue was on the syncq list, removeq() 1411 * should call propagate_syncq() or drain_syncq() for it. Both of these 1412 * function remove the queue from its syncq list, so sqthread will not 1413 * try to access the queue. 1414 */ 1415 if (!(qp->q_flag & QPERMOD)) { 1416 syncq_t *rsq = qp->q_syncq; 1417 syncq_t *wsq = wqp->q_syncq; 1418 1419 /* 1420 * Disable rsq and wsq and wait for any background processing of 1421 * syncq to complete. 1422 */ 1423 wait_sq_svc(rsq); 1424 if (wsq != rsq) 1425 wait_sq_svc(wsq); 1426 } 1427 1428 mutex_enter(QLOCK(qp)); 1429 while (qp->q_flag & (QINSERVICE|QENAB)) 1430 cv_wait(&qp->q_wait, QLOCK(qp)); 1431 mutex_exit(QLOCK(qp)); 1432 mutex_enter(QLOCK(wqp)); 1433 while (wqp->q_flag & (QINSERVICE|QENAB)) 1434 cv_wait(&wqp->q_wait, QLOCK(wqp)); 1435 mutex_exit(QLOCK(wqp)); 1436 } 1437 1438 /* 1439 * Put ioctl data from userland buffer `arg' into the mblk chain `bp'. 1440 * `flag' must always contain either K_TO_K or U_TO_K; STR_NOSIG may 1441 * also be set, and is passed through to allocb_cred_wait(). 1442 * 1443 * Returns errno on failure, zero on success. 1444 */ 1445 int 1446 putiocd(mblk_t *bp, char *arg, int flag, cred_t *cr) 1447 { 1448 mblk_t *tmp; 1449 ssize_t count; 1450 int error = 0; 1451 1452 ASSERT((flag & (U_TO_K | K_TO_K)) == U_TO_K || 1453 (flag & (U_TO_K | K_TO_K)) == K_TO_K); 1454 1455 if (bp->b_datap->db_type == M_IOCTL) { 1456 count = ((struct iocblk *)bp->b_rptr)->ioc_count; 1457 } else { 1458 ASSERT(bp->b_datap->db_type == M_COPYIN); 1459 count = ((struct copyreq *)bp->b_rptr)->cq_size; 1460 } 1461 /* 1462 * strdoioctl validates ioc_count, so if this assert fails it 1463 * cannot be due to user error. 1464 */ 1465 ASSERT(count >= 0); 1466 1467 if ((tmp = allocb_cred_wait(count, (flag & STR_NOSIG), &error, cr)) == 1468 NULL) { 1469 return (error); 1470 } 1471 error = strcopyin(arg, tmp->b_wptr, count, flag & (U_TO_K|K_TO_K)); 1472 if (error != 0) { 1473 freeb(tmp); 1474 return (error); 1475 } 1476 DB_CPID(tmp) = curproc->p_pid; 1477 tmp->b_wptr += count; 1478 bp->b_cont = tmp; 1479 1480 return (0); 1481 } 1482 1483 /* 1484 * Copy ioctl data to user-land. Return non-zero errno on failure, 1485 * 0 for success. 1486 */ 1487 int 1488 getiocd(mblk_t *bp, char *arg, int copymode) 1489 { 1490 ssize_t count; 1491 size_t n; 1492 int error; 1493 1494 if (bp->b_datap->db_type == M_IOCACK) 1495 count = ((struct iocblk *)bp->b_rptr)->ioc_count; 1496 else { 1497 ASSERT(bp->b_datap->db_type == M_COPYOUT); 1498 count = ((struct copyreq *)bp->b_rptr)->cq_size; 1499 } 1500 ASSERT(count >= 0); 1501 1502 for (bp = bp->b_cont; bp && count; 1503 count -= n, bp = bp->b_cont, arg += n) { 1504 n = MIN(count, bp->b_wptr - bp->b_rptr); 1505 error = strcopyout(bp->b_rptr, arg, n, copymode); 1506 if (error) 1507 return (error); 1508 } 1509 ASSERT(count == 0); 1510 return (0); 1511 } 1512 1513 /* 1514 * Allocate a linkinfo entry given the write queue of the 1515 * bottom module of the top stream and the write queue of the 1516 * stream head of the bottom stream. 1517 */ 1518 linkinfo_t * 1519 alloclink(queue_t *qup, queue_t *qdown, file_t *fpdown) 1520 { 1521 linkinfo_t *linkp; 1522 1523 linkp = kmem_cache_alloc(linkinfo_cache, KM_SLEEP); 1524 1525 linkp->li_lblk.l_qtop = qup; 1526 linkp->li_lblk.l_qbot = qdown; 1527 linkp->li_fpdown = fpdown; 1528 1529 mutex_enter(&strresources); 1530 linkp->li_next = linkinfo_list; 1531 linkp->li_prev = NULL; 1532 if (linkp->li_next) 1533 linkp->li_next->li_prev = linkp; 1534 linkinfo_list = linkp; 1535 linkp->li_lblk.l_index = ++lnk_id; 1536 ASSERT(lnk_id != 0); /* this should never wrap in practice */ 1537 mutex_exit(&strresources); 1538 1539 return (linkp); 1540 } 1541 1542 /* 1543 * Free a linkinfo entry. 1544 */ 1545 void 1546 lbfree(linkinfo_t *linkp) 1547 { 1548 mutex_enter(&strresources); 1549 if (linkp->li_next) 1550 linkp->li_next->li_prev = linkp->li_prev; 1551 if (linkp->li_prev) 1552 linkp->li_prev->li_next = linkp->li_next; 1553 else 1554 linkinfo_list = linkp->li_next; 1555 mutex_exit(&strresources); 1556 1557 kmem_cache_free(linkinfo_cache, linkp); 1558 } 1559 1560 /* 1561 * Check for a potential linking cycle. 1562 * Return 1 if a link will result in a cycle, 1563 * and 0 otherwise. 1564 */ 1565 int 1566 linkcycle(stdata_t *upstp, stdata_t *lostp, str_stack_t *ss) 1567 { 1568 struct mux_node *np; 1569 struct mux_edge *ep; 1570 int i; 1571 major_t lomaj; 1572 major_t upmaj; 1573 /* 1574 * if the lower stream is a pipe/FIFO, return, since link 1575 * cycles can not happen on pipes/FIFOs 1576 */ 1577 if (lostp->sd_vnode->v_type == VFIFO) 1578 return (0); 1579 1580 for (i = 0; i < ss->ss_devcnt; i++) { 1581 np = &ss->ss_mux_nodes[i]; 1582 MUX_CLEAR(np); 1583 } 1584 lomaj = getmajor(lostp->sd_vnode->v_rdev); 1585 upmaj = getmajor(upstp->sd_vnode->v_rdev); 1586 np = &ss->ss_mux_nodes[lomaj]; 1587 for (;;) { 1588 if (!MUX_DIDVISIT(np)) { 1589 if (np->mn_imaj == upmaj) 1590 return (1); 1591 if (np->mn_outp == NULL) { 1592 MUX_VISIT(np); 1593 if (np->mn_originp == NULL) 1594 return (0); 1595 np = np->mn_originp; 1596 continue; 1597 } 1598 MUX_VISIT(np); 1599 np->mn_startp = np->mn_outp; 1600 } else { 1601 if (np->mn_startp == NULL) { 1602 if (np->mn_originp == NULL) 1603 return (0); 1604 else { 1605 np = np->mn_originp; 1606 continue; 1607 } 1608 } 1609 /* 1610 * If ep->me_nodep is a FIFO (me_nodep == NULL), 1611 * ignore the edge and move on. ep->me_nodep gets 1612 * set to NULL in mux_addedge() if it is a FIFO. 1613 * 1614 */ 1615 ep = np->mn_startp; 1616 np->mn_startp = ep->me_nextp; 1617 if (ep->me_nodep == NULL) 1618 continue; 1619 ep->me_nodep->mn_originp = np; 1620 np = ep->me_nodep; 1621 } 1622 } 1623 } 1624 1625 /* 1626 * Find linkinfo entry corresponding to the parameters. 1627 */ 1628 linkinfo_t * 1629 findlinks(stdata_t *stp, int index, int type, str_stack_t *ss) 1630 { 1631 linkinfo_t *linkp; 1632 struct mux_edge *mep; 1633 struct mux_node *mnp; 1634 queue_t *qup; 1635 1636 mutex_enter(&strresources); 1637 if ((type & LINKTYPEMASK) == LINKNORMAL) { 1638 qup = getendq(stp->sd_wrq); 1639 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) { 1640 if ((qup == linkp->li_lblk.l_qtop) && 1641 (!index || (index == linkp->li_lblk.l_index))) { 1642 mutex_exit(&strresources); 1643 return (linkp); 1644 } 1645 } 1646 } else { 1647 ASSERT((type & LINKTYPEMASK) == LINKPERSIST); 1648 mnp = &ss->ss_mux_nodes[getmajor(stp->sd_vnode->v_rdev)]; 1649 mep = mnp->mn_outp; 1650 while (mep) { 1651 if ((index == 0) || (index == mep->me_muxid)) 1652 break; 1653 mep = mep->me_nextp; 1654 } 1655 if (!mep) { 1656 mutex_exit(&strresources); 1657 return (NULL); 1658 } 1659 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) { 1660 if ((!linkp->li_lblk.l_qtop) && 1661 (mep->me_muxid == linkp->li_lblk.l_index)) { 1662 mutex_exit(&strresources); 1663 return (linkp); 1664 } 1665 } 1666 } 1667 mutex_exit(&strresources); 1668 return (NULL); 1669 } 1670 1671 /* 1672 * Given a queue ptr, follow the chain of q_next pointers until you reach the 1673 * last queue on the chain and return it. 1674 */ 1675 queue_t * 1676 getendq(queue_t *q) 1677 { 1678 ASSERT(q != NULL); 1679 while (_SAMESTR(q)) 1680 q = q->q_next; 1681 return (q); 1682 } 1683 1684 /* 1685 * wait for the syncq count to drop to zero. 1686 * sq could be either outer or inner. 1687 */ 1688 1689 static void 1690 wait_syncq(syncq_t *sq) 1691 { 1692 uint16_t count; 1693 1694 mutex_enter(SQLOCK(sq)); 1695 count = sq->sq_count; 1696 SQ_PUTLOCKS_ENTER(sq); 1697 SUM_SQ_PUTCOUNTS(sq, count); 1698 while (count != 0) { 1699 sq->sq_flags |= SQ_WANTWAKEUP; 1700 SQ_PUTLOCKS_EXIT(sq); 1701 cv_wait(&sq->sq_wait, SQLOCK(sq)); 1702 count = sq->sq_count; 1703 SQ_PUTLOCKS_ENTER(sq); 1704 SUM_SQ_PUTCOUNTS(sq, count); 1705 } 1706 SQ_PUTLOCKS_EXIT(sq); 1707 mutex_exit(SQLOCK(sq)); 1708 } 1709 1710 /* 1711 * Wait while there are any messages for the queue in its syncq. 1712 */ 1713 static void 1714 wait_q_syncq(queue_t *q) 1715 { 1716 if ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) { 1717 syncq_t *sq = q->q_syncq; 1718 1719 mutex_enter(SQLOCK(sq)); 1720 while ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) { 1721 sq->sq_flags |= SQ_WANTWAKEUP; 1722 cv_wait(&sq->sq_wait, SQLOCK(sq)); 1723 } 1724 mutex_exit(SQLOCK(sq)); 1725 } 1726 } 1727 1728 1729 int 1730 mlink_file(vnode_t *vp, int cmd, struct file *fpdown, cred_t *crp, int *rvalp, 1731 int lhlink) 1732 { 1733 struct stdata *stp; 1734 struct strioctl strioc; 1735 struct linkinfo *linkp; 1736 struct stdata *stpdown; 1737 struct streamtab *str; 1738 queue_t *passq; 1739 syncq_t *passyncq; 1740 queue_t *rq; 1741 cdevsw_impl_t *dp; 1742 uint32_t qflag; 1743 uint32_t sqtype; 1744 perdm_t *dmp; 1745 int error = 0; 1746 netstack_t *ns; 1747 str_stack_t *ss; 1748 1749 stp = vp->v_stream; 1750 TRACE_1(TR_FAC_STREAMS_FR, 1751 TR_I_LINK, "I_LINK/I_PLINK:stp %p", stp); 1752 /* 1753 * Test for invalid upper stream 1754 */ 1755 if (stp->sd_flag & STRHUP) { 1756 return (ENXIO); 1757 } 1758 if (vp->v_type == VFIFO) { 1759 return (EINVAL); 1760 } 1761 if (stp->sd_strtab == NULL) { 1762 return (EINVAL); 1763 } 1764 if (!stp->sd_strtab->st_muxwinit) { 1765 return (EINVAL); 1766 } 1767 if (fpdown == NULL) { 1768 return (EBADF); 1769 } 1770 ns = netstack_find_by_cred(crp); 1771 ASSERT(ns != NULL); 1772 ss = ns->netstack_str; 1773 ASSERT(ss != NULL); 1774 1775 if (getmajor(stp->sd_vnode->v_rdev) >= ss->ss_devcnt) { 1776 netstack_rele(ss->ss_netstack); 1777 return (EINVAL); 1778 } 1779 mutex_enter(&muxifier); 1780 if (stp->sd_flag & STPLEX) { 1781 mutex_exit(&muxifier); 1782 netstack_rele(ss->ss_netstack); 1783 return (ENXIO); 1784 } 1785 1786 /* 1787 * Test for invalid lower stream. 1788 * The check for the v_type != VFIFO and having a major 1789 * number not >= devcnt is done to avoid problems with 1790 * adding mux_node entry past the end of mux_nodes[]. 1791 * For FIFO's we don't add an entry so this isn't a 1792 * problem. 1793 */ 1794 if (((stpdown = fpdown->f_vnode->v_stream) == NULL) || 1795 (stpdown == stp) || (stpdown->sd_flag & 1796 (STPLEX|STRHUP|STRDERR|STWRERR|IOCWAIT|STRPLUMB)) || 1797 ((stpdown->sd_vnode->v_type != VFIFO) && 1798 (getmajor(stpdown->sd_vnode->v_rdev) >= ss->ss_devcnt)) || 1799 linkcycle(stp, stpdown, ss)) { 1800 mutex_exit(&muxifier); 1801 netstack_rele(ss->ss_netstack); 1802 return (EINVAL); 1803 } 1804 TRACE_1(TR_FAC_STREAMS_FR, 1805 TR_STPDOWN, "stpdown:%p", stpdown); 1806 rq = getendq(stp->sd_wrq); 1807 if (cmd == I_PLINK) 1808 rq = NULL; 1809 1810 linkp = alloclink(rq, stpdown->sd_wrq, fpdown); 1811 1812 strioc.ic_cmd = cmd; 1813 strioc.ic_timout = INFTIM; 1814 strioc.ic_len = sizeof (struct linkblk); 1815 strioc.ic_dp = (char *)&linkp->li_lblk; 1816 1817 /* 1818 * STRPLUMB protects plumbing changes and should be set before 1819 * link_addpassthru()/link_rempassthru() are called, so it is set here 1820 * and cleared in the end of mlink when passthru queue is removed. 1821 * Setting of STRPLUMB prevents reopens of the stream while passthru 1822 * queue is in-place (it is not a proper module and doesn't have open 1823 * entry point). 1824 * 1825 * STPLEX prevents any threads from entering the stream from above. It 1826 * can't be set before the call to link_addpassthru() because putnext 1827 * from below may cause stream head I/O routines to be called and these 1828 * routines assert that STPLEX is not set. After link_addpassthru() 1829 * nothing may come from below since the pass queue syncq is blocked. 1830 * Note also that STPLEX should be cleared before the call to 1831 * link_remmpassthru() since when messages start flowing to the stream 1832 * head (e.g. because of message propagation from the pass queue) stream 1833 * head I/O routines may be called with STPLEX flag set. 1834 * 1835 * When STPLEX is set, nothing may come into the stream from above and 1836 * it is safe to do a setq which will change stream head. So, the 1837 * correct sequence of actions is: 1838 * 1839 * 1) Set STRPLUMB 1840 * 2) Call link_addpassthru() 1841 * 3) Set STPLEX 1842 * 4) Call setq and update the stream state 1843 * 5) Clear STPLEX 1844 * 6) Call link_rempassthru() 1845 * 7) Clear STRPLUMB 1846 * 1847 * The same sequence applies to munlink() code. 1848 */ 1849 mutex_enter(&stpdown->sd_lock); 1850 stpdown->sd_flag |= STRPLUMB; 1851 mutex_exit(&stpdown->sd_lock); 1852 /* 1853 * Add passthru queue below lower mux. This will block 1854 * syncqs of lower muxs read queue during I_LINK/I_UNLINK. 1855 */ 1856 passq = link_addpassthru(stpdown); 1857 1858 mutex_enter(&stpdown->sd_lock); 1859 stpdown->sd_flag |= STPLEX; 1860 mutex_exit(&stpdown->sd_lock); 1861 1862 rq = _RD(stpdown->sd_wrq); 1863 /* 1864 * There may be messages in the streamhead's syncq due to messages 1865 * that arrived before link_addpassthru() was done. To avoid 1866 * background processing of the syncq happening simultaneous with 1867 * setq processing, we disable the streamhead syncq and wait until 1868 * existing background thread finishes working on it. 1869 */ 1870 wait_sq_svc(rq->q_syncq); 1871 passyncq = passq->q_syncq; 1872 if (!(passyncq->sq_flags & SQ_BLOCKED)) 1873 blocksq(passyncq, SQ_BLOCKED, 0); 1874 1875 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE); 1876 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq)); 1877 rq->q_ptr = _WR(rq)->q_ptr = NULL; 1878 1879 /* setq might sleep in allocator - avoid holding locks. */ 1880 /* Note: we are holding muxifier here. */ 1881 1882 str = stp->sd_strtab; 1883 dp = &devimpl[getmajor(vp->v_rdev)]; 1884 ASSERT(dp->d_str == str); 1885 1886 qflag = dp->d_qflag; 1887 sqtype = dp->d_sqtype; 1888 1889 /* create perdm_t if needed */ 1890 if (NEED_DM(dp->d_dmp, qflag)) 1891 dp->d_dmp = hold_dm(str, qflag, sqtype); 1892 1893 dmp = dp->d_dmp; 1894 1895 setq(rq, str->st_muxrinit, str->st_muxwinit, dmp, qflag, sqtype, 1896 B_TRUE); 1897 1898 /* 1899 * XXX Remove any "odd" messages from the queue. 1900 * Keep only M_DATA, M_PROTO, M_PCPROTO. 1901 */ 1902 error = strdoioctl(stp, &strioc, FNATIVE, 1903 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp); 1904 if (error != 0) { 1905 lbfree(linkp); 1906 1907 if (!(passyncq->sq_flags & SQ_BLOCKED)) 1908 blocksq(passyncq, SQ_BLOCKED, 0); 1909 /* 1910 * Restore the stream head queue and then remove 1911 * the passq. Turn off STPLEX before we turn on 1912 * the stream by removing the passq. 1913 */ 1914 rq->q_ptr = _WR(rq)->q_ptr = stpdown; 1915 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, 1916 B_TRUE); 1917 1918 mutex_enter(&stpdown->sd_lock); 1919 stpdown->sd_flag &= ~STPLEX; 1920 mutex_exit(&stpdown->sd_lock); 1921 1922 link_rempassthru(passq); 1923 1924 mutex_enter(&stpdown->sd_lock); 1925 stpdown->sd_flag &= ~STRPLUMB; 1926 /* Wakeup anyone waiting for STRPLUMB to clear. */ 1927 cv_broadcast(&stpdown->sd_monitor); 1928 mutex_exit(&stpdown->sd_lock); 1929 1930 mutex_exit(&muxifier); 1931 netstack_rele(ss->ss_netstack); 1932 return (error); 1933 } 1934 mutex_enter(&fpdown->f_tlock); 1935 fpdown->f_count++; 1936 mutex_exit(&fpdown->f_tlock); 1937 1938 /* 1939 * if we've made it here the linkage is all set up so we should also 1940 * set up the layered driver linkages 1941 */ 1942 1943 ASSERT((cmd == I_LINK) || (cmd == I_PLINK)); 1944 if (cmd == I_LINK) { 1945 ldi_mlink_fp(stp, fpdown, lhlink, LINKNORMAL); 1946 } else { 1947 ldi_mlink_fp(stp, fpdown, lhlink, LINKPERSIST); 1948 } 1949 1950 link_rempassthru(passq); 1951 1952 mux_addedge(stp, stpdown, linkp->li_lblk.l_index, ss); 1953 1954 /* 1955 * Mark the upper stream as having dependent links 1956 * so that strclose can clean it up. 1957 */ 1958 if (cmd == I_LINK) { 1959 mutex_enter(&stp->sd_lock); 1960 stp->sd_flag |= STRHASLINKS; 1961 mutex_exit(&stp->sd_lock); 1962 } 1963 /* 1964 * Wake up any other processes that may have been 1965 * waiting on the lower stream. These will all 1966 * error out. 1967 */ 1968 mutex_enter(&stpdown->sd_lock); 1969 /* The passthru module is removed so we may release STRPLUMB */ 1970 stpdown->sd_flag &= ~STRPLUMB; 1971 cv_broadcast(&rq->q_wait); 1972 cv_broadcast(&_WR(rq)->q_wait); 1973 cv_broadcast(&stpdown->sd_monitor); 1974 mutex_exit(&stpdown->sd_lock); 1975 mutex_exit(&muxifier); 1976 *rvalp = linkp->li_lblk.l_index; 1977 netstack_rele(ss->ss_netstack); 1978 return (0); 1979 } 1980 1981 int 1982 mlink(vnode_t *vp, int cmd, int arg, cred_t *crp, int *rvalp, int lhlink) 1983 { 1984 int ret; 1985 struct file *fpdown; 1986 1987 fpdown = getf(arg); 1988 ret = mlink_file(vp, cmd, fpdown, crp, rvalp, lhlink); 1989 if (fpdown != NULL) 1990 releasef(arg); 1991 return (ret); 1992 } 1993 1994 /* 1995 * Unlink a multiplexor link. Stp is the controlling stream for the 1996 * link, and linkp points to the link's entry in the linkinfo list. 1997 * The muxifier lock must be held on entry and is dropped on exit. 1998 * 1999 * NOTE : Currently it is assumed that mux would process all the messages 2000 * sitting on it's queue before ACKing the UNLINK. It is the responsibility 2001 * of the mux to handle all the messages that arrive before UNLINK. 2002 * If the mux has to send down messages on its lower stream before 2003 * ACKing I_UNLINK, then it *should* know to handle messages even 2004 * after the UNLINK is acked (actually it should be able to handle till we 2005 * re-block the read side of the pass queue here). If the mux does not 2006 * open up the lower stream, any messages that arrive during UNLINK 2007 * will be put in the stream head. In the case of lower stream opening 2008 * up, some messages might land in the stream head depending on when 2009 * the message arrived and when the read side of the pass queue was 2010 * re-blocked. 2011 */ 2012 int 2013 munlink(stdata_t *stp, linkinfo_t *linkp, int flag, cred_t *crp, int *rvalp, 2014 str_stack_t *ss) 2015 { 2016 struct strioctl strioc; 2017 struct stdata *stpdown; 2018 queue_t *rq, *wrq; 2019 queue_t *passq; 2020 syncq_t *passyncq; 2021 int error = 0; 2022 file_t *fpdown; 2023 2024 ASSERT(MUTEX_HELD(&muxifier)); 2025 2026 stpdown = linkp->li_fpdown->f_vnode->v_stream; 2027 2028 /* 2029 * See the comment in mlink() concerning STRPLUMB/STPLEX flags. 2030 */ 2031 mutex_enter(&stpdown->sd_lock); 2032 stpdown->sd_flag |= STRPLUMB; 2033 mutex_exit(&stpdown->sd_lock); 2034 2035 /* 2036 * Add passthru queue below lower mux. This will block 2037 * syncqs of lower muxs read queue during I_LINK/I_UNLINK. 2038 */ 2039 passq = link_addpassthru(stpdown); 2040 2041 if ((flag & LINKTYPEMASK) == LINKNORMAL) 2042 strioc.ic_cmd = I_UNLINK; 2043 else 2044 strioc.ic_cmd = I_PUNLINK; 2045 strioc.ic_timout = INFTIM; 2046 strioc.ic_len = sizeof (struct linkblk); 2047 strioc.ic_dp = (char *)&linkp->li_lblk; 2048 2049 error = strdoioctl(stp, &strioc, FNATIVE, 2050 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp); 2051 2052 /* 2053 * If there was an error and this is not called via strclose, 2054 * return to the user. Otherwise, pretend there was no error 2055 * and close the link. 2056 */ 2057 if (error) { 2058 if (flag & LINKCLOSE) { 2059 cmn_err(CE_WARN, "KERNEL: munlink: could not perform " 2060 "unlink ioctl, closing anyway (%d)\n", error); 2061 } else { 2062 link_rempassthru(passq); 2063 mutex_enter(&stpdown->sd_lock); 2064 stpdown->sd_flag &= ~STRPLUMB; 2065 cv_broadcast(&stpdown->sd_monitor); 2066 mutex_exit(&stpdown->sd_lock); 2067 mutex_exit(&muxifier); 2068 return (error); 2069 } 2070 } 2071 2072 mux_rmvedge(stp, linkp->li_lblk.l_index, ss); 2073 fpdown = linkp->li_fpdown; 2074 lbfree(linkp); 2075 2076 /* 2077 * We go ahead and drop muxifier here--it's a nasty global lock that 2078 * can slow others down. It's okay to since attempts to mlink() this 2079 * stream will be stopped because STPLEX is still set in the stdata 2080 * structure, and munlink() is stopped because mux_rmvedge() and 2081 * lbfree() have removed it from mux_nodes[] and linkinfo_list, 2082 * respectively. Note that we defer the closef() of fpdown until 2083 * after we drop muxifier since strclose() can call munlinkall(). 2084 */ 2085 mutex_exit(&muxifier); 2086 2087 wrq = stpdown->sd_wrq; 2088 rq = _RD(wrq); 2089 2090 /* 2091 * Get rid of outstanding service procedure runs, before we make 2092 * it a stream head, since a stream head doesn't have any service 2093 * procedure. 2094 */ 2095 disable_svc(rq); 2096 wait_svc(rq); 2097 2098 /* 2099 * Since we don't disable the syncq for QPERMOD, we wait for whatever 2100 * is queued up to be finished. mux should take care that nothing is 2101 * send down to this queue. We should do it now as we're going to block 2102 * passyncq if it was unblocked. 2103 */ 2104 if (wrq->q_flag & QPERMOD) { 2105 syncq_t *sq = wrq->q_syncq; 2106 2107 mutex_enter(SQLOCK(sq)); 2108 while (wrq->q_sqflags & Q_SQQUEUED) { 2109 sq->sq_flags |= SQ_WANTWAKEUP; 2110 cv_wait(&sq->sq_wait, SQLOCK(sq)); 2111 } 2112 mutex_exit(SQLOCK(sq)); 2113 } 2114 passyncq = passq->q_syncq; 2115 if (!(passyncq->sq_flags & SQ_BLOCKED)) { 2116 2117 syncq_t *sq, *outer; 2118 2119 /* 2120 * Messages could be flowing from underneath. We will 2121 * block the read side of the passq. This would be 2122 * sufficient for QPAIR and QPERQ muxes to ensure 2123 * that no data is flowing up into this queue 2124 * and hence no thread active in this instance of 2125 * lower mux. But for QPERMOD and QMTOUTPERIM there 2126 * could be messages on the inner and outer/inner 2127 * syncqs respectively. We will wait for them to drain. 2128 * Because passq is blocked messages end up in the syncq 2129 * And qfill_syncq could possibly end up setting QFULL 2130 * which will access the rq->q_flag. Hence, we have to 2131 * acquire the QLOCK in setq. 2132 * 2133 * XXX Messages can also flow from top into this 2134 * queue though the unlink is over (Ex. some instance 2135 * in putnext() called from top that has still not 2136 * accessed this queue. And also putq(lowerq) ?). 2137 * Solution : How about blocking the l_qtop queue ? 2138 * Do we really care about such pure D_MP muxes ? 2139 */ 2140 2141 blocksq(passyncq, SQ_BLOCKED, 0); 2142 2143 sq = rq->q_syncq; 2144 if ((outer = sq->sq_outer) != NULL) { 2145 2146 /* 2147 * We have to just wait for the outer sq_count 2148 * drop to zero. As this does not prevent new 2149 * messages to enter the outer perimeter, this 2150 * is subject to starvation. 2151 * 2152 * NOTE :Because of blocksq above, messages could 2153 * be in the inner syncq only because of some 2154 * thread holding the outer perimeter exclusively. 2155 * Hence it would be sufficient to wait for the 2156 * exclusive holder of the outer perimeter to drain 2157 * the inner and outer syncqs. But we will not depend 2158 * on this feature and hence check the inner syncqs 2159 * separately. 2160 */ 2161 wait_syncq(outer); 2162 } 2163 2164 2165 /* 2166 * There could be messages destined for 2167 * this queue. Let the exclusive holder 2168 * drain it. 2169 */ 2170 2171 wait_syncq(sq); 2172 ASSERT((rq->q_flag & QPERMOD) || 2173 ((rq->q_syncq->sq_head == NULL) && 2174 (_WR(rq)->q_syncq->sq_head == NULL))); 2175 } 2176 2177 /* 2178 * We haven't taken care of QPERMOD case yet. QPERMOD is a special 2179 * case as we don't disable its syncq or remove it off the syncq 2180 * service list. 2181 */ 2182 if (rq->q_flag & QPERMOD) { 2183 syncq_t *sq = rq->q_syncq; 2184 2185 mutex_enter(SQLOCK(sq)); 2186 while (rq->q_sqflags & Q_SQQUEUED) { 2187 sq->sq_flags |= SQ_WANTWAKEUP; 2188 cv_wait(&sq->sq_wait, SQLOCK(sq)); 2189 } 2190 mutex_exit(SQLOCK(sq)); 2191 } 2192 2193 /* 2194 * flush_syncq changes states only when there is some messages to 2195 * free. ie when it returns non-zero value to return. 2196 */ 2197 ASSERT(flush_syncq(rq->q_syncq, rq) == 0); 2198 ASSERT(flush_syncq(wrq->q_syncq, wrq) == 0); 2199 2200 /* 2201 * No body else should know about this queue now. 2202 * If the mux did not process the messages before 2203 * acking the I_UNLINK, free them now. 2204 */ 2205 2206 flushq(rq, FLUSHALL); 2207 flushq(_WR(rq), FLUSHALL); 2208 2209 /* 2210 * Convert the mux lower queue into a stream head queue. 2211 * Turn off STPLEX before we turn on the stream by removing the passq. 2212 */ 2213 rq->q_ptr = wrq->q_ptr = stpdown; 2214 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, B_TRUE); 2215 2216 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE); 2217 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq)); 2218 2219 enable_svc(rq); 2220 2221 /* 2222 * Now it is a proper stream, so STPLEX is cleared. But STRPLUMB still 2223 * needs to be set to prevent reopen() of the stream - such reopen may 2224 * try to call non-existent pass queue open routine and panic. 2225 */ 2226 mutex_enter(&stpdown->sd_lock); 2227 stpdown->sd_flag &= ~STPLEX; 2228 mutex_exit(&stpdown->sd_lock); 2229 2230 ASSERT(((flag & LINKTYPEMASK) == LINKNORMAL) || 2231 ((flag & LINKTYPEMASK) == LINKPERSIST)); 2232 2233 /* clean up the layered driver linkages */ 2234 if ((flag & LINKTYPEMASK) == LINKNORMAL) { 2235 ldi_munlink_fp(stp, fpdown, LINKNORMAL); 2236 } else { 2237 ldi_munlink_fp(stp, fpdown, LINKPERSIST); 2238 } 2239 2240 link_rempassthru(passq); 2241 2242 /* 2243 * Now all plumbing changes are finished and STRPLUMB is no 2244 * longer needed. 2245 */ 2246 mutex_enter(&stpdown->sd_lock); 2247 stpdown->sd_flag &= ~STRPLUMB; 2248 cv_broadcast(&stpdown->sd_monitor); 2249 mutex_exit(&stpdown->sd_lock); 2250 2251 (void) closef(fpdown); 2252 return (0); 2253 } 2254 2255 /* 2256 * Unlink all multiplexor links for which stp is the controlling stream. 2257 * Return 0, or a non-zero errno on failure. 2258 */ 2259 int 2260 munlinkall(stdata_t *stp, int flag, cred_t *crp, int *rvalp, str_stack_t *ss) 2261 { 2262 linkinfo_t *linkp; 2263 int error = 0; 2264 2265 mutex_enter(&muxifier); 2266 while (linkp = findlinks(stp, 0, flag, ss)) { 2267 /* 2268 * munlink() releases the muxifier lock. 2269 */ 2270 if (error = munlink(stp, linkp, flag, crp, rvalp, ss)) 2271 return (error); 2272 mutex_enter(&muxifier); 2273 } 2274 mutex_exit(&muxifier); 2275 return (0); 2276 } 2277 2278 /* 2279 * A multiplexor link has been made. Add an 2280 * edge to the directed graph. 2281 */ 2282 void 2283 mux_addedge(stdata_t *upstp, stdata_t *lostp, int muxid, str_stack_t *ss) 2284 { 2285 struct mux_node *np; 2286 struct mux_edge *ep; 2287 major_t upmaj; 2288 major_t lomaj; 2289 2290 upmaj = getmajor(upstp->sd_vnode->v_rdev); 2291 lomaj = getmajor(lostp->sd_vnode->v_rdev); 2292 np = &ss->ss_mux_nodes[upmaj]; 2293 if (np->mn_outp) { 2294 ep = np->mn_outp; 2295 while (ep->me_nextp) 2296 ep = ep->me_nextp; 2297 ep->me_nextp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP); 2298 ep = ep->me_nextp; 2299 } else { 2300 np->mn_outp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP); 2301 ep = np->mn_outp; 2302 } 2303 ep->me_nextp = NULL; 2304 ep->me_muxid = muxid; 2305 /* 2306 * Save the dev_t for the purposes of str_stack_shutdown. 2307 * str_stack_shutdown assumes that the device allows reopen, since 2308 * this dev_t is the one after any cloning by xx_open(). 2309 * Would prefer finding the dev_t from before any cloning, 2310 * but specfs doesn't retain that. 2311 */ 2312 ep->me_dev = upstp->sd_vnode->v_rdev; 2313 if (lostp->sd_vnode->v_type == VFIFO) 2314 ep->me_nodep = NULL; 2315 else 2316 ep->me_nodep = &ss->ss_mux_nodes[lomaj]; 2317 } 2318 2319 /* 2320 * A multiplexor link has been removed. Remove the 2321 * edge in the directed graph. 2322 */ 2323 void 2324 mux_rmvedge(stdata_t *upstp, int muxid, str_stack_t *ss) 2325 { 2326 struct mux_node *np; 2327 struct mux_edge *ep; 2328 struct mux_edge *pep = NULL; 2329 major_t upmaj; 2330 2331 upmaj = getmajor(upstp->sd_vnode->v_rdev); 2332 np = &ss->ss_mux_nodes[upmaj]; 2333 ASSERT(np->mn_outp != NULL); 2334 ep = np->mn_outp; 2335 while (ep) { 2336 if (ep->me_muxid == muxid) { 2337 if (pep) 2338 pep->me_nextp = ep->me_nextp; 2339 else 2340 np->mn_outp = ep->me_nextp; 2341 kmem_free(ep, sizeof (struct mux_edge)); 2342 return; 2343 } 2344 pep = ep; 2345 ep = ep->me_nextp; 2346 } 2347 ASSERT(0); /* should not reach here */ 2348 } 2349 2350 /* 2351 * Translate the device flags (from conf.h) to the corresponding 2352 * qflag and sq_flag (type) values. 2353 */ 2354 int 2355 devflg_to_qflag(struct streamtab *stp, uint32_t devflag, uint32_t *qflagp, 2356 uint32_t *sqtypep) 2357 { 2358 uint32_t qflag = 0; 2359 uint32_t sqtype = 0; 2360 2361 if (devflag & _D_OLD) 2362 goto bad; 2363 2364 /* Inner perimeter presence and scope */ 2365 switch (devflag & D_MTINNER_MASK) { 2366 case D_MP: 2367 qflag |= QMTSAFE; 2368 sqtype |= SQ_CI; 2369 break; 2370 case D_MTPERQ|D_MP: 2371 qflag |= QPERQ; 2372 break; 2373 case D_MTQPAIR|D_MP: 2374 qflag |= QPAIR; 2375 break; 2376 case D_MTPERMOD|D_MP: 2377 qflag |= QPERMOD; 2378 break; 2379 default: 2380 goto bad; 2381 } 2382 2383 /* Outer perimeter */ 2384 if (devflag & D_MTOUTPERIM) { 2385 switch (devflag & D_MTINNER_MASK) { 2386 case D_MP: 2387 case D_MTPERQ|D_MP: 2388 case D_MTQPAIR|D_MP: 2389 break; 2390 default: 2391 goto bad; 2392 } 2393 qflag |= QMTOUTPERIM; 2394 } 2395 2396 /* Inner perimeter modifiers */ 2397 if (devflag & D_MTINNER_MOD) { 2398 switch (devflag & D_MTINNER_MASK) { 2399 case D_MP: 2400 goto bad; 2401 default: 2402 break; 2403 } 2404 if (devflag & D_MTPUTSHARED) 2405 sqtype |= SQ_CIPUT; 2406 if (devflag & _D_MTOCSHARED) { 2407 /* 2408 * The code in putnext assumes that it has the 2409 * highest concurrency by not checking sq_count. 2410 * Thus _D_MTOCSHARED can only be supported when 2411 * D_MTPUTSHARED is set. 2412 */ 2413 if (!(devflag & D_MTPUTSHARED)) 2414 goto bad; 2415 sqtype |= SQ_CIOC; 2416 } 2417 if (devflag & _D_MTCBSHARED) { 2418 /* 2419 * The code in putnext assumes that it has the 2420 * highest concurrency by not checking sq_count. 2421 * Thus _D_MTCBSHARED can only be supported when 2422 * D_MTPUTSHARED is set. 2423 */ 2424 if (!(devflag & D_MTPUTSHARED)) 2425 goto bad; 2426 sqtype |= SQ_CICB; 2427 } 2428 if (devflag & _D_MTSVCSHARED) { 2429 /* 2430 * The code in putnext assumes that it has the 2431 * highest concurrency by not checking sq_count. 2432 * Thus _D_MTSVCSHARED can only be supported when 2433 * D_MTPUTSHARED is set. Also _D_MTSVCSHARED is 2434 * supported only for QPERMOD. 2435 */ 2436 if (!(devflag & D_MTPUTSHARED) || !(qflag & QPERMOD)) 2437 goto bad; 2438 sqtype |= SQ_CISVC; 2439 } 2440 } 2441 2442 /* Default outer perimeter concurrency */ 2443 sqtype |= SQ_CO; 2444 2445 /* Outer perimeter modifiers */ 2446 if (devflag & D_MTOCEXCL) { 2447 if (!(devflag & D_MTOUTPERIM)) { 2448 /* No outer perimeter */ 2449 goto bad; 2450 } 2451 sqtype &= ~SQ_COOC; 2452 } 2453 2454 /* Synchronous Streams extended qinit structure */ 2455 if (devflag & D_SYNCSTR) 2456 qflag |= QSYNCSTR; 2457 2458 /* 2459 * Private flag used by a transport module to indicate 2460 * to sockfs that it supports direct-access mode without 2461 * having to go through STREAMS or the transport can use 2462 * sodirect_t sharing to bypass STREAMS for receive-side 2463 * M_DATA processing. 2464 */ 2465 if (devflag & (_D_DIRECT|_D_SODIRECT)) { 2466 /* Reject unless the module is fully-MT (no perimeter) */ 2467 if ((qflag & QMT_TYPEMASK) != QMTSAFE) 2468 goto bad; 2469 if (devflag & _D_DIRECT) 2470 qflag |= _QDIRECT; 2471 if (devflag & _D_SODIRECT) 2472 qflag |= _QSODIRECT; 2473 } 2474 2475 *qflagp = qflag; 2476 *sqtypep = sqtype; 2477 return (0); 2478 2479 bad: 2480 cmn_err(CE_WARN, 2481 "stropen: bad MT flags (0x%x) in driver '%s'", 2482 (int)(qflag & D_MTSAFETY_MASK), 2483 stp->st_rdinit->qi_minfo->mi_idname); 2484 2485 return (EINVAL); 2486 } 2487 2488 /* 2489 * Set the interface values for a pair of queues (qinit structure, 2490 * packet sizes, water marks). 2491 * setq assumes that the caller does not have a claim (entersq or claimq) 2492 * on the queue. 2493 */ 2494 void 2495 setq(queue_t *rq, struct qinit *rinit, struct qinit *winit, 2496 perdm_t *dmp, uint32_t qflag, uint32_t sqtype, boolean_t lock_needed) 2497 { 2498 queue_t *wq; 2499 syncq_t *sq, *outer; 2500 2501 ASSERT(rq->q_flag & QREADR); 2502 ASSERT((qflag & QMT_TYPEMASK) != 0); 2503 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL); 2504 2505 wq = _WR(rq); 2506 rq->q_qinfo = rinit; 2507 rq->q_hiwat = rinit->qi_minfo->mi_hiwat; 2508 rq->q_lowat = rinit->qi_minfo->mi_lowat; 2509 rq->q_minpsz = rinit->qi_minfo->mi_minpsz; 2510 rq->q_maxpsz = rinit->qi_minfo->mi_maxpsz; 2511 wq->q_qinfo = winit; 2512 wq->q_hiwat = winit->qi_minfo->mi_hiwat; 2513 wq->q_lowat = winit->qi_minfo->mi_lowat; 2514 wq->q_minpsz = winit->qi_minfo->mi_minpsz; 2515 wq->q_maxpsz = winit->qi_minfo->mi_maxpsz; 2516 2517 /* Remove old syncqs */ 2518 sq = rq->q_syncq; 2519 outer = sq->sq_outer; 2520 if (outer != NULL) { 2521 ASSERT(wq->q_syncq->sq_outer == outer); 2522 outer_remove(outer, rq->q_syncq); 2523 if (wq->q_syncq != rq->q_syncq) 2524 outer_remove(outer, wq->q_syncq); 2525 } 2526 ASSERT(sq->sq_outer == NULL); 2527 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 2528 2529 if (sq != SQ(rq)) { 2530 if (!(rq->q_flag & QPERMOD)) 2531 free_syncq(sq); 2532 if (wq->q_syncq == rq->q_syncq) 2533 wq->q_syncq = NULL; 2534 rq->q_syncq = NULL; 2535 } 2536 if (wq->q_syncq != NULL && wq->q_syncq != sq && 2537 wq->q_syncq != SQ(rq)) { 2538 free_syncq(wq->q_syncq); 2539 wq->q_syncq = NULL; 2540 } 2541 ASSERT(rq->q_syncq == NULL || (rq->q_syncq->sq_head == NULL && 2542 rq->q_syncq->sq_tail == NULL)); 2543 ASSERT(wq->q_syncq == NULL || (wq->q_syncq->sq_head == NULL && 2544 wq->q_syncq->sq_tail == NULL)); 2545 2546 if (!(rq->q_flag & QPERMOD) && 2547 rq->q_syncq != NULL && rq->q_syncq->sq_ciputctrl != NULL) { 2548 ASSERT(rq->q_syncq->sq_nciputctrl == n_ciputctrl - 1); 2549 SUMCHECK_CIPUTCTRL_COUNTS(rq->q_syncq->sq_ciputctrl, 2550 rq->q_syncq->sq_nciputctrl, 0); 2551 ASSERT(ciputctrl_cache != NULL); 2552 kmem_cache_free(ciputctrl_cache, rq->q_syncq->sq_ciputctrl); 2553 rq->q_syncq->sq_ciputctrl = NULL; 2554 rq->q_syncq->sq_nciputctrl = 0; 2555 } 2556 2557 if (!(wq->q_flag & QPERMOD) && 2558 wq->q_syncq != NULL && wq->q_syncq->sq_ciputctrl != NULL) { 2559 ASSERT(wq->q_syncq->sq_nciputctrl == n_ciputctrl - 1); 2560 SUMCHECK_CIPUTCTRL_COUNTS(wq->q_syncq->sq_ciputctrl, 2561 wq->q_syncq->sq_nciputctrl, 0); 2562 ASSERT(ciputctrl_cache != NULL); 2563 kmem_cache_free(ciputctrl_cache, wq->q_syncq->sq_ciputctrl); 2564 wq->q_syncq->sq_ciputctrl = NULL; 2565 wq->q_syncq->sq_nciputctrl = 0; 2566 } 2567 2568 sq = SQ(rq); 2569 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL); 2570 ASSERT(sq->sq_outer == NULL); 2571 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 2572 2573 /* 2574 * Create syncqs based on qflag and sqtype. Set the SQ_TYPES_IN_FLAGS 2575 * bits in sq_flag based on the sqtype. 2576 */ 2577 ASSERT((sq->sq_flags & ~SQ_TYPES_IN_FLAGS) == 0); 2578 2579 rq->q_syncq = wq->q_syncq = sq; 2580 sq->sq_type = sqtype; 2581 sq->sq_flags = (sqtype & SQ_TYPES_IN_FLAGS); 2582 2583 /* 2584 * We are making sq_svcflags zero, 2585 * resetting SQ_DISABLED in case it was set by 2586 * wait_svc() in the munlink path. 2587 * 2588 */ 2589 ASSERT((sq->sq_svcflags & SQ_SERVICE) == 0); 2590 sq->sq_svcflags = 0; 2591 2592 /* 2593 * We need to acquire the lock here for the mlink and munlink case, 2594 * where canputnext, backenable, etc can access the q_flag. 2595 */ 2596 if (lock_needed) { 2597 mutex_enter(QLOCK(rq)); 2598 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2599 mutex_exit(QLOCK(rq)); 2600 mutex_enter(QLOCK(wq)); 2601 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2602 mutex_exit(QLOCK(wq)); 2603 } else { 2604 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2605 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2606 } 2607 2608 if (qflag & QPERQ) { 2609 /* Allocate a separate syncq for the write side */ 2610 sq = new_syncq(); 2611 sq->sq_type = rq->q_syncq->sq_type; 2612 sq->sq_flags = rq->q_syncq->sq_flags; 2613 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 2614 sq->sq_oprev == NULL); 2615 wq->q_syncq = sq; 2616 } 2617 if (qflag & QPERMOD) { 2618 sq = dmp->dm_sq; 2619 2620 /* 2621 * Assert that we do have an inner perimeter syncq and that it 2622 * does not have an outer perimeter associated with it. 2623 */ 2624 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 2625 sq->sq_oprev == NULL); 2626 rq->q_syncq = wq->q_syncq = sq; 2627 } 2628 if (qflag & QMTOUTPERIM) { 2629 outer = dmp->dm_sq; 2630 2631 ASSERT(outer->sq_outer == NULL); 2632 outer_insert(outer, rq->q_syncq); 2633 if (wq->q_syncq != rq->q_syncq) 2634 outer_insert(outer, wq->q_syncq); 2635 } 2636 ASSERT((rq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) == 2637 (rq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS)); 2638 ASSERT((wq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) == 2639 (wq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS)); 2640 ASSERT((rq->q_flag & QMT_TYPEMASK) == (qflag & QMT_TYPEMASK)); 2641 2642 /* 2643 * Initialize struio() types. 2644 */ 2645 rq->q_struiot = 2646 (rq->q_flag & QSYNCSTR) ? rinit->qi_struiot : STRUIOT_NONE; 2647 wq->q_struiot = 2648 (wq->q_flag & QSYNCSTR) ? winit->qi_struiot : STRUIOT_NONE; 2649 } 2650 2651 perdm_t * 2652 hold_dm(struct streamtab *str, uint32_t qflag, uint32_t sqtype) 2653 { 2654 syncq_t *sq; 2655 perdm_t **pp; 2656 perdm_t *p; 2657 perdm_t *dmp; 2658 2659 ASSERT(str != NULL); 2660 ASSERT(qflag & (QPERMOD | QMTOUTPERIM)); 2661 2662 rw_enter(&perdm_rwlock, RW_READER); 2663 for (p = perdm_list; p != NULL; p = p->dm_next) { 2664 if (p->dm_str == str) { /* found one */ 2665 atomic_add_32(&(p->dm_ref), 1); 2666 rw_exit(&perdm_rwlock); 2667 return (p); 2668 } 2669 } 2670 rw_exit(&perdm_rwlock); 2671 2672 sq = new_syncq(); 2673 if (qflag & QPERMOD) { 2674 sq->sq_type = sqtype | SQ_PERMOD; 2675 sq->sq_flags = sqtype & SQ_TYPES_IN_FLAGS; 2676 } else { 2677 ASSERT(qflag & QMTOUTPERIM); 2678 sq->sq_onext = sq->sq_oprev = sq; 2679 } 2680 2681 dmp = kmem_alloc(sizeof (perdm_t), KM_SLEEP); 2682 dmp->dm_sq = sq; 2683 dmp->dm_str = str; 2684 dmp->dm_ref = 1; 2685 dmp->dm_next = NULL; 2686 2687 rw_enter(&perdm_rwlock, RW_WRITER); 2688 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) { 2689 if (p->dm_str == str) { /* already present */ 2690 p->dm_ref++; 2691 rw_exit(&perdm_rwlock); 2692 free_syncq(sq); 2693 kmem_free(dmp, sizeof (perdm_t)); 2694 return (p); 2695 } 2696 } 2697 2698 *pp = dmp; 2699 rw_exit(&perdm_rwlock); 2700 return (dmp); 2701 } 2702 2703 void 2704 rele_dm(perdm_t *dmp) 2705 { 2706 perdm_t **pp; 2707 perdm_t *p; 2708 2709 rw_enter(&perdm_rwlock, RW_WRITER); 2710 ASSERT(dmp->dm_ref > 0); 2711 2712 if (--dmp->dm_ref > 0) { 2713 rw_exit(&perdm_rwlock); 2714 return; 2715 } 2716 2717 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) 2718 if (p == dmp) 2719 break; 2720 ASSERT(p == dmp); 2721 *pp = p->dm_next; 2722 rw_exit(&perdm_rwlock); 2723 2724 /* 2725 * Wait for any background processing that relies on the 2726 * syncq to complete before it is freed. 2727 */ 2728 wait_sq_svc(p->dm_sq); 2729 free_syncq(p->dm_sq); 2730 kmem_free(p, sizeof (perdm_t)); 2731 } 2732 2733 /* 2734 * Make a protocol message given control and data buffers. 2735 * n.b., this can block; be careful of what locks you hold when calling it. 2736 * 2737 * If sd_maxblk is less than *iosize this routine can fail part way through 2738 * (due to an allocation failure). In this case on return *iosize will contain 2739 * the amount that was consumed. Otherwise *iosize will not be modified 2740 * i.e. it will contain the amount that was consumed. 2741 */ 2742 int 2743 strmakemsg( 2744 struct strbuf *mctl, 2745 ssize_t *iosize, 2746 struct uio *uiop, 2747 stdata_t *stp, 2748 int32_t flag, 2749 mblk_t **mpp) 2750 { 2751 mblk_t *mpctl = NULL; 2752 mblk_t *mpdata = NULL; 2753 int error; 2754 2755 ASSERT(uiop != NULL); 2756 2757 *mpp = NULL; 2758 /* Create control part, if any */ 2759 if ((mctl != NULL) && (mctl->len >= 0)) { 2760 error = strmakectl(mctl, flag, uiop->uio_fmode, &mpctl); 2761 if (error) 2762 return (error); 2763 } 2764 /* Create data part, if any */ 2765 if (*iosize >= 0) { 2766 error = strmakedata(iosize, uiop, stp, flag, &mpdata); 2767 if (error) { 2768 freemsg(mpctl); 2769 return (error); 2770 } 2771 } 2772 if (mpctl != NULL) { 2773 if (mpdata != NULL) 2774 linkb(mpctl, mpdata); 2775 *mpp = mpctl; 2776 } else { 2777 *mpp = mpdata; 2778 } 2779 return (0); 2780 } 2781 2782 /* 2783 * Make the control part of a protocol message given a control buffer. 2784 * n.b., this can block; be careful of what locks you hold when calling it. 2785 */ 2786 int 2787 strmakectl( 2788 struct strbuf *mctl, 2789 int32_t flag, 2790 int32_t fflag, 2791 mblk_t **mpp) 2792 { 2793 mblk_t *bp = NULL; 2794 unsigned char msgtype; 2795 int error = 0; 2796 2797 *mpp = NULL; 2798 /* 2799 * Create control part of message, if any. 2800 */ 2801 if ((mctl != NULL) && (mctl->len >= 0)) { 2802 caddr_t base; 2803 int ctlcount; 2804 int allocsz; 2805 2806 if (flag & RS_HIPRI) 2807 msgtype = M_PCPROTO; 2808 else 2809 msgtype = M_PROTO; 2810 2811 ctlcount = mctl->len; 2812 base = mctl->buf; 2813 2814 /* 2815 * Give modules a better chance to reuse M_PROTO/M_PCPROTO 2816 * blocks by increasing the size to something more usable. 2817 */ 2818 allocsz = MAX(ctlcount, 64); 2819 2820 /* 2821 * Range checking has already been done; simply try 2822 * to allocate a message block for the ctl part. 2823 */ 2824 while (!(bp = allocb(allocsz, BPRI_MED))) { 2825 if (fflag & (FNDELAY|FNONBLOCK)) 2826 return (EAGAIN); 2827 if (error = strwaitbuf(allocsz, BPRI_MED)) 2828 return (error); 2829 } 2830 2831 bp->b_datap->db_type = msgtype; 2832 if (copyin(base, bp->b_wptr, ctlcount)) { 2833 freeb(bp); 2834 return (EFAULT); 2835 } 2836 bp->b_wptr += ctlcount; 2837 } 2838 *mpp = bp; 2839 return (0); 2840 } 2841 2842 /* 2843 * Make a protocol message given data buffers. 2844 * n.b., this can block; be careful of what locks you hold when calling it. 2845 * 2846 * If sd_maxblk is less than *iosize this routine can fail part way through 2847 * (due to an allocation failure). In this case on return *iosize will contain 2848 * the amount that was consumed. Otherwise *iosize will not be modified 2849 * i.e. it will contain the amount that was consumed. 2850 */ 2851 int 2852 strmakedata( 2853 ssize_t *iosize, 2854 struct uio *uiop, 2855 stdata_t *stp, 2856 int32_t flag, 2857 mblk_t **mpp) 2858 { 2859 mblk_t *mp = NULL; 2860 mblk_t *bp; 2861 int wroff = (int)stp->sd_wroff; 2862 int tail_len = (int)stp->sd_tail; 2863 int extra = wroff + tail_len; 2864 int error = 0; 2865 ssize_t maxblk; 2866 ssize_t count = *iosize; 2867 cred_t *cr = CRED(); 2868 2869 *mpp = NULL; 2870 if (count < 0) 2871 return (0); 2872 2873 maxblk = stp->sd_maxblk; 2874 if (maxblk == INFPSZ) 2875 maxblk = count; 2876 2877 /* 2878 * Create data part of message, if any. 2879 */ 2880 do { 2881 ssize_t size; 2882 dblk_t *dp; 2883 2884 ASSERT(uiop); 2885 2886 size = MIN(count, maxblk); 2887 2888 while ((bp = allocb_cred(size + extra, cr)) == NULL) { 2889 error = EAGAIN; 2890 if ((uiop->uio_fmode & (FNDELAY|FNONBLOCK)) || 2891 (error = strwaitbuf(size + extra, BPRI_MED)) != 0) { 2892 if (count == *iosize) { 2893 freemsg(mp); 2894 return (error); 2895 } else { 2896 *iosize -= count; 2897 *mpp = mp; 2898 return (0); 2899 } 2900 } 2901 } 2902 dp = bp->b_datap; 2903 dp->db_cpid = curproc->p_pid; 2904 ASSERT(wroff <= dp->db_lim - bp->b_wptr); 2905 bp->b_wptr = bp->b_rptr = bp->b_rptr + wroff; 2906 2907 if (flag & STRUIO_POSTPONE) { 2908 /* 2909 * Setup the stream uio portion of the 2910 * dblk for subsequent use by struioget(). 2911 */ 2912 dp->db_struioflag = STRUIO_SPEC; 2913 dp->db_cksumstart = 0; 2914 dp->db_cksumstuff = 0; 2915 dp->db_cksumend = size; 2916 *(long long *)dp->db_struioun.data = 0ll; 2917 bp->b_wptr += size; 2918 } else { 2919 if (stp->sd_copyflag & STRCOPYCACHED) 2920 uiop->uio_extflg |= UIO_COPY_CACHED; 2921 2922 if (size != 0) { 2923 error = uiomove(bp->b_wptr, size, UIO_WRITE, 2924 uiop); 2925 if (error != 0) { 2926 freeb(bp); 2927 freemsg(mp); 2928 return (error); 2929 } 2930 } 2931 bp->b_wptr += size; 2932 2933 if (stp->sd_wputdatafunc != NULL) { 2934 mblk_t *newbp; 2935 2936 newbp = (stp->sd_wputdatafunc)(stp->sd_vnode, 2937 bp, NULL, NULL, NULL, NULL); 2938 if (newbp == NULL) { 2939 freeb(bp); 2940 freemsg(mp); 2941 return (ECOMM); 2942 } 2943 bp = newbp; 2944 } 2945 } 2946 2947 count -= size; 2948 2949 if (mp == NULL) 2950 mp = bp; 2951 else 2952 linkb(mp, bp); 2953 } while (count > 0); 2954 2955 *mpp = mp; 2956 return (0); 2957 } 2958 2959 /* 2960 * Wait for a buffer to become available. Return non-zero errno 2961 * if not able to wait, 0 if buffer is probably there. 2962 */ 2963 int 2964 strwaitbuf(size_t size, int pri) 2965 { 2966 bufcall_id_t id; 2967 2968 mutex_enter(&bcall_monitor); 2969 if ((id = bufcall(size, pri, (void (*)(void *))cv_broadcast, 2970 &ttoproc(curthread)->p_flag_cv)) == 0) { 2971 mutex_exit(&bcall_monitor); 2972 return (ENOSR); 2973 } 2974 if (!cv_wait_sig(&(ttoproc(curthread)->p_flag_cv), &bcall_monitor)) { 2975 unbufcall(id); 2976 mutex_exit(&bcall_monitor); 2977 return (EINTR); 2978 } 2979 unbufcall(id); 2980 mutex_exit(&bcall_monitor); 2981 return (0); 2982 } 2983 2984 /* 2985 * This function waits for a read or write event to happen on a stream. 2986 * fmode can specify FNDELAY and/or FNONBLOCK. 2987 * The timeout is in ms with -1 meaning infinite. 2988 * The flag values work as follows: 2989 * READWAIT Check for read side errors, send M_READ 2990 * GETWAIT Check for read side errors, no M_READ 2991 * WRITEWAIT Check for write side errors. 2992 * NOINTR Do not return error if nonblocking or timeout. 2993 * STR_NOERROR Ignore all errors except STPLEX. 2994 * STR_NOSIG Ignore/hold signals during the duration of the call. 2995 * STR_PEEK Pass through the strgeterr(). 2996 */ 2997 int 2998 strwaitq(stdata_t *stp, int flag, ssize_t count, int fmode, clock_t timout, 2999 int *done) 3000 { 3001 int slpflg, errs; 3002 int error; 3003 kcondvar_t *sleepon; 3004 mblk_t *mp; 3005 ssize_t *rd_count; 3006 clock_t rval; 3007 3008 ASSERT(MUTEX_HELD(&stp->sd_lock)); 3009 if ((flag & READWAIT) || (flag & GETWAIT)) { 3010 slpflg = RSLEEP; 3011 sleepon = &_RD(stp->sd_wrq)->q_wait; 3012 errs = STRDERR|STPLEX; 3013 } else { 3014 slpflg = WSLEEP; 3015 sleepon = &stp->sd_wrq->q_wait; 3016 errs = STWRERR|STRHUP|STPLEX; 3017 } 3018 if (flag & STR_NOERROR) 3019 errs = STPLEX; 3020 3021 if (stp->sd_wakeq & slpflg) { 3022 /* 3023 * A strwakeq() is pending, no need to sleep. 3024 */ 3025 stp->sd_wakeq &= ~slpflg; 3026 *done = 0; 3027 return (0); 3028 } 3029 3030 if (stp->sd_flag & errs) { 3031 /* 3032 * Check for errors before going to sleep since the 3033 * caller might not have checked this while holding 3034 * sd_lock. 3035 */ 3036 error = strgeterr(stp, errs, (flag & STR_PEEK)); 3037 if (error != 0) { 3038 *done = 1; 3039 return (error); 3040 } 3041 } 3042 3043 /* 3044 * If any module downstream has requested read notification 3045 * by setting SNDMREAD flag using M_SETOPTS, send a message 3046 * down stream. 3047 */ 3048 if ((flag & READWAIT) && (stp->sd_flag & SNDMREAD)) { 3049 mutex_exit(&stp->sd_lock); 3050 if (!(mp = allocb_wait(sizeof (ssize_t), BPRI_MED, 3051 (flag & STR_NOSIG), &error))) { 3052 mutex_enter(&stp->sd_lock); 3053 *done = 1; 3054 return (error); 3055 } 3056 mp->b_datap->db_type = M_READ; 3057 rd_count = (ssize_t *)mp->b_wptr; 3058 *rd_count = count; 3059 mp->b_wptr += sizeof (ssize_t); 3060 /* 3061 * Send the number of bytes requested by the 3062 * read as the argument to M_READ. 3063 */ 3064 stream_willservice(stp); 3065 putnext(stp->sd_wrq, mp); 3066 stream_runservice(stp); 3067 mutex_enter(&stp->sd_lock); 3068 3069 /* 3070 * If any data arrived due to inline processing 3071 * of putnext(), don't sleep. 3072 */ 3073 if (_RD(stp->sd_wrq)->q_first != NULL) { 3074 *done = 0; 3075 return (0); 3076 } 3077 } 3078 3079 if (fmode & (FNDELAY|FNONBLOCK)) { 3080 if (!(flag & NOINTR)) 3081 error = EAGAIN; 3082 else 3083 error = 0; 3084 *done = 1; 3085 return (error); 3086 } 3087 3088 stp->sd_flag |= slpflg; 3089 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAIT2, 3090 "strwaitq sleeps (2):%p, %X, %lX, %X, %p", 3091 stp, flag, count, fmode, done); 3092 3093 rval = str_cv_wait(sleepon, &stp->sd_lock, timout, flag & STR_NOSIG); 3094 if (rval > 0) { 3095 /* EMPTY */ 3096 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAKE2, 3097 "strwaitq awakes(2):%X, %X, %X, %X, %X", 3098 stp, flag, count, fmode, done); 3099 } else if (rval == 0) { 3100 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_INTR2, 3101 "strwaitq interrupt #2:%p, %X, %lX, %X, %p", 3102 stp, flag, count, fmode, done); 3103 stp->sd_flag &= ~slpflg; 3104 cv_broadcast(sleepon); 3105 if (!(flag & NOINTR)) 3106 error = EINTR; 3107 else 3108 error = 0; 3109 *done = 1; 3110 return (error); 3111 } else { 3112 /* timeout */ 3113 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_TIME, 3114 "strwaitq timeout:%p, %X, %lX, %X, %p", 3115 stp, flag, count, fmode, done); 3116 *done = 1; 3117 if (!(flag & NOINTR)) 3118 return (ETIME); 3119 else 3120 return (0); 3121 } 3122 /* 3123 * If the caller implements delayed errors (i.e. queued after data) 3124 * we can not check for errors here since data as well as an 3125 * error might have arrived at the stream head. We return to 3126 * have the caller check the read queue before checking for errors. 3127 */ 3128 if ((stp->sd_flag & errs) && !(flag & STR_DELAYERR)) { 3129 error = strgeterr(stp, errs, (flag & STR_PEEK)); 3130 if (error != 0) { 3131 *done = 1; 3132 return (error); 3133 } 3134 } 3135 *done = 0; 3136 return (0); 3137 } 3138 3139 /* 3140 * Perform job control discipline access checks. 3141 * Return 0 for success and the errno for failure. 3142 */ 3143 3144 #define cantsend(p, t, sig) \ 3145 (sigismember(&(p)->p_ignore, sig) || signal_is_blocked((t), sig)) 3146 3147 int 3148 straccess(struct stdata *stp, enum jcaccess mode) 3149 { 3150 extern kcondvar_t lbolt_cv; /* XXX: should be in a header file */ 3151 kthread_t *t = curthread; 3152 proc_t *p = ttoproc(t); 3153 sess_t *sp; 3154 3155 ASSERT(mutex_owned(&stp->sd_lock)); 3156 3157 if (stp->sd_sidp == NULL || stp->sd_vnode->v_type == VFIFO) 3158 return (0); 3159 3160 mutex_enter(&p->p_lock); /* protects p_pgidp */ 3161 3162 for (;;) { 3163 mutex_enter(&p->p_splock); /* protects p->p_sessp */ 3164 sp = p->p_sessp; 3165 mutex_enter(&sp->s_lock); /* protects sp->* */ 3166 3167 /* 3168 * If this is not the calling process's controlling terminal 3169 * or if the calling process is already in the foreground 3170 * then allow access. 3171 */ 3172 if (sp->s_dev != stp->sd_vnode->v_rdev || 3173 p->p_pgidp == stp->sd_pgidp) { 3174 mutex_exit(&sp->s_lock); 3175 mutex_exit(&p->p_splock); 3176 mutex_exit(&p->p_lock); 3177 return (0); 3178 } 3179 3180 /* 3181 * Check to see if controlling terminal has been deallocated. 3182 */ 3183 if (sp->s_vp == NULL) { 3184 if (!cantsend(p, t, SIGHUP)) 3185 sigtoproc(p, t, SIGHUP); 3186 mutex_exit(&sp->s_lock); 3187 mutex_exit(&p->p_splock); 3188 mutex_exit(&p->p_lock); 3189 return (EIO); 3190 } 3191 3192 mutex_exit(&sp->s_lock); 3193 mutex_exit(&p->p_splock); 3194 3195 if (mode == JCGETP) { 3196 mutex_exit(&p->p_lock); 3197 return (0); 3198 } 3199 3200 if (mode == JCREAD) { 3201 if (p->p_detached || cantsend(p, t, SIGTTIN)) { 3202 mutex_exit(&p->p_lock); 3203 return (EIO); 3204 } 3205 mutex_exit(&p->p_lock); 3206 mutex_exit(&stp->sd_lock); 3207 pgsignal(p->p_pgidp, SIGTTIN); 3208 mutex_enter(&stp->sd_lock); 3209 mutex_enter(&p->p_lock); 3210 } else { /* mode == JCWRITE or JCSETP */ 3211 if ((mode == JCWRITE && !(stp->sd_flag & STRTOSTOP)) || 3212 cantsend(p, t, SIGTTOU)) { 3213 mutex_exit(&p->p_lock); 3214 return (0); 3215 } 3216 if (p->p_detached) { 3217 mutex_exit(&p->p_lock); 3218 return (EIO); 3219 } 3220 mutex_exit(&p->p_lock); 3221 mutex_exit(&stp->sd_lock); 3222 pgsignal(p->p_pgidp, SIGTTOU); 3223 mutex_enter(&stp->sd_lock); 3224 mutex_enter(&p->p_lock); 3225 } 3226 3227 /* 3228 * We call cv_wait_sig_swap() to cause the appropriate 3229 * action for the jobcontrol signal to take place. 3230 * If the signal is being caught, we will take the 3231 * EINTR error return. Otherwise, the default action 3232 * of causing the process to stop will take place. 3233 * In this case, we rely on the periodic cv_broadcast() on 3234 * &lbolt_cv to wake us up to loop around and test again. 3235 * We can't get here if the signal is ignored or 3236 * if the current thread is blocking the signal. 3237 */ 3238 mutex_exit(&stp->sd_lock); 3239 if (!cv_wait_sig_swap(&lbolt_cv, &p->p_lock)) { 3240 mutex_exit(&p->p_lock); 3241 mutex_enter(&stp->sd_lock); 3242 return (EINTR); 3243 } 3244 mutex_exit(&p->p_lock); 3245 mutex_enter(&stp->sd_lock); 3246 mutex_enter(&p->p_lock); 3247 } 3248 } 3249 3250 /* 3251 * Return size of message of block type (bp->b_datap->db_type) 3252 */ 3253 size_t 3254 xmsgsize(mblk_t *bp) 3255 { 3256 unsigned char type; 3257 size_t count = 0; 3258 3259 type = bp->b_datap->db_type; 3260 3261 for (; bp; bp = bp->b_cont) { 3262 if (type != bp->b_datap->db_type) 3263 break; 3264 ASSERT(bp->b_wptr >= bp->b_rptr); 3265 count += bp->b_wptr - bp->b_rptr; 3266 } 3267 return (count); 3268 } 3269 3270 /* 3271 * Allocate a stream head. 3272 */ 3273 struct stdata * 3274 shalloc(queue_t *qp) 3275 { 3276 stdata_t *stp; 3277 3278 stp = kmem_cache_alloc(stream_head_cache, KM_SLEEP); 3279 3280 stp->sd_wrq = _WR(qp); 3281 stp->sd_strtab = NULL; 3282 stp->sd_iocid = 0; 3283 stp->sd_mate = NULL; 3284 stp->sd_freezer = NULL; 3285 stp->sd_refcnt = 0; 3286 stp->sd_wakeq = 0; 3287 stp->sd_anchor = 0; 3288 stp->sd_struiowrq = NULL; 3289 stp->sd_struiordq = NULL; 3290 stp->sd_struiodnak = 0; 3291 stp->sd_struionak = NULL; 3292 stp->sd_t_audit_data = NULL; 3293 stp->sd_rput_opt = 0; 3294 stp->sd_wput_opt = 0; 3295 stp->sd_read_opt = 0; 3296 stp->sd_rprotofunc = strrput_proto; 3297 stp->sd_rmiscfunc = strrput_misc; 3298 stp->sd_rderrfunc = stp->sd_wrerrfunc = NULL; 3299 stp->sd_rputdatafunc = stp->sd_wputdatafunc = NULL; 3300 stp->sd_ciputctrl = NULL; 3301 stp->sd_nciputctrl = 0; 3302 stp->sd_qhead = NULL; 3303 stp->sd_qtail = NULL; 3304 stp->sd_servid = NULL; 3305 stp->sd_nqueues = 0; 3306 stp->sd_svcflags = 0; 3307 stp->sd_copyflag = 0; 3308 3309 return (stp); 3310 } 3311 3312 /* 3313 * Free a stream head. 3314 */ 3315 void 3316 shfree(stdata_t *stp) 3317 { 3318 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock)); 3319 3320 stp->sd_wrq = NULL; 3321 3322 mutex_enter(&stp->sd_qlock); 3323 while (stp->sd_svcflags & STRS_SCHEDULED) { 3324 STRSTAT(strwaits); 3325 cv_wait(&stp->sd_qcv, &stp->sd_qlock); 3326 } 3327 mutex_exit(&stp->sd_qlock); 3328 3329 if (stp->sd_ciputctrl != NULL) { 3330 ASSERT(stp->sd_nciputctrl == n_ciputctrl - 1); 3331 SUMCHECK_CIPUTCTRL_COUNTS(stp->sd_ciputctrl, 3332 stp->sd_nciputctrl, 0); 3333 ASSERT(ciputctrl_cache != NULL); 3334 kmem_cache_free(ciputctrl_cache, stp->sd_ciputctrl); 3335 stp->sd_ciputctrl = NULL; 3336 stp->sd_nciputctrl = 0; 3337 } 3338 ASSERT(stp->sd_qhead == NULL); 3339 ASSERT(stp->sd_qtail == NULL); 3340 ASSERT(stp->sd_nqueues == 0); 3341 kmem_cache_free(stream_head_cache, stp); 3342 } 3343 3344 /* 3345 * Allocate a pair of queues and a syncq for the pair 3346 */ 3347 queue_t * 3348 allocq(void) 3349 { 3350 queinfo_t *qip; 3351 queue_t *qp, *wqp; 3352 syncq_t *sq; 3353 3354 qip = kmem_cache_alloc(queue_cache, KM_SLEEP); 3355 3356 qp = &qip->qu_rqueue; 3357 wqp = &qip->qu_wqueue; 3358 sq = &qip->qu_syncq; 3359 3360 qp->q_last = NULL; 3361 qp->q_next = NULL; 3362 qp->q_ptr = NULL; 3363 qp->q_flag = QUSE | QREADR; 3364 qp->q_bandp = NULL; 3365 qp->q_stream = NULL; 3366 qp->q_syncq = sq; 3367 qp->q_nband = 0; 3368 qp->q_nfsrv = NULL; 3369 qp->q_draining = 0; 3370 qp->q_syncqmsgs = 0; 3371 qp->q_spri = 0; 3372 qp->q_qtstamp = 0; 3373 qp->q_sqtstamp = 0; 3374 qp->q_fp = NULL; 3375 3376 wqp->q_last = NULL; 3377 wqp->q_next = NULL; 3378 wqp->q_ptr = NULL; 3379 wqp->q_flag = QUSE; 3380 wqp->q_bandp = NULL; 3381 wqp->q_stream = NULL; 3382 wqp->q_syncq = sq; 3383 wqp->q_nband = 0; 3384 wqp->q_nfsrv = NULL; 3385 wqp->q_draining = 0; 3386 wqp->q_syncqmsgs = 0; 3387 wqp->q_qtstamp = 0; 3388 wqp->q_sqtstamp = 0; 3389 wqp->q_spri = 0; 3390 3391 sq->sq_count = 0; 3392 sq->sq_rmqcount = 0; 3393 sq->sq_flags = 0; 3394 sq->sq_type = 0; 3395 sq->sq_callbflags = 0; 3396 sq->sq_cancelid = 0; 3397 sq->sq_ciputctrl = NULL; 3398 sq->sq_nciputctrl = 0; 3399 sq->sq_needexcl = 0; 3400 sq->sq_svcflags = 0; 3401 3402 return (qp); 3403 } 3404 3405 /* 3406 * Free a pair of queues and the "attached" syncq. 3407 * Discard any messages left on the syncq(s), remove the syncq(s) from the 3408 * outer perimeter, and free the syncq(s) if they are not the "attached" syncq. 3409 */ 3410 void 3411 freeq(queue_t *qp) 3412 { 3413 qband_t *qbp, *nqbp; 3414 syncq_t *sq, *outer; 3415 queue_t *wqp = _WR(qp); 3416 3417 ASSERT(qp->q_flag & QREADR); 3418 3419 /* 3420 * If a previously dispatched taskq job is scheduled to run 3421 * sync_service() or a service routine is scheduled for the 3422 * queues about to be freed, wait here until all service is 3423 * done on the queue and all associated queues and syncqs. 3424 */ 3425 wait_svc(qp); 3426 3427 (void) flush_syncq(qp->q_syncq, qp); 3428 (void) flush_syncq(wqp->q_syncq, wqp); 3429 ASSERT(qp->q_syncqmsgs == 0 && wqp->q_syncqmsgs == 0); 3430 3431 /* 3432 * Flush the queues before q_next is set to NULL This is needed 3433 * in order to backenable any downstream queue before we go away. 3434 * Note: we are already removed from the stream so that the 3435 * backenabling will not cause any messages to be delivered to our 3436 * put procedures. 3437 */ 3438 flushq(qp, FLUSHALL); 3439 flushq(wqp, FLUSHALL); 3440 3441 /* Tidy up - removeq only does a half-remove from stream */ 3442 qp->q_next = wqp->q_next = NULL; 3443 ASSERT(!(qp->q_flag & QENAB)); 3444 ASSERT(!(wqp->q_flag & QENAB)); 3445 3446 outer = qp->q_syncq->sq_outer; 3447 if (outer != NULL) { 3448 outer_remove(outer, qp->q_syncq); 3449 if (wqp->q_syncq != qp->q_syncq) 3450 outer_remove(outer, wqp->q_syncq); 3451 } 3452 /* 3453 * Free any syncqs that are outside what allocq returned. 3454 */ 3455 if (qp->q_syncq != SQ(qp) && !(qp->q_flag & QPERMOD)) 3456 free_syncq(qp->q_syncq); 3457 if (qp->q_syncq != wqp->q_syncq && wqp->q_syncq != SQ(qp)) 3458 free_syncq(wqp->q_syncq); 3459 3460 ASSERT((qp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0); 3461 ASSERT((wqp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0); 3462 ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); 3463 ASSERT(MUTEX_NOT_HELD(QLOCK(wqp))); 3464 sq = SQ(qp); 3465 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 3466 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL); 3467 ASSERT(sq->sq_outer == NULL); 3468 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 3469 ASSERT(sq->sq_callbpend == NULL); 3470 ASSERT(sq->sq_needexcl == 0); 3471 3472 if (sq->sq_ciputctrl != NULL) { 3473 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1); 3474 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl, 3475 sq->sq_nciputctrl, 0); 3476 ASSERT(ciputctrl_cache != NULL); 3477 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl); 3478 sq->sq_ciputctrl = NULL; 3479 sq->sq_nciputctrl = 0; 3480 } 3481 3482 ASSERT(qp->q_first == NULL && wqp->q_first == NULL); 3483 ASSERT(qp->q_count == 0 && wqp->q_count == 0); 3484 ASSERT(qp->q_mblkcnt == 0 && wqp->q_mblkcnt == 0); 3485 3486 qp->q_flag &= ~QUSE; 3487 wqp->q_flag &= ~QUSE; 3488 3489 /* NOTE: Uncomment the assert below once bugid 1159635 is fixed. */ 3490 /* ASSERT((qp->q_flag & QWANTW) == 0 && (wqp->q_flag & QWANTW) == 0); */ 3491 3492 qbp = qp->q_bandp; 3493 while (qbp) { 3494 nqbp = qbp->qb_next; 3495 freeband(qbp); 3496 qbp = nqbp; 3497 } 3498 qbp = wqp->q_bandp; 3499 while (qbp) { 3500 nqbp = qbp->qb_next; 3501 freeband(qbp); 3502 qbp = nqbp; 3503 } 3504 kmem_cache_free(queue_cache, qp); 3505 } 3506 3507 /* 3508 * Allocate a qband structure. 3509 */ 3510 qband_t * 3511 allocband(void) 3512 { 3513 qband_t *qbp; 3514 3515 qbp = kmem_cache_alloc(qband_cache, KM_NOSLEEP); 3516 if (qbp == NULL) 3517 return (NULL); 3518 3519 qbp->qb_next = NULL; 3520 qbp->qb_count = 0; 3521 qbp->qb_mblkcnt = 0; 3522 qbp->qb_first = NULL; 3523 qbp->qb_last = NULL; 3524 qbp->qb_flag = 0; 3525 3526 return (qbp); 3527 } 3528 3529 /* 3530 * Free a qband structure. 3531 */ 3532 void 3533 freeband(qband_t *qbp) 3534 { 3535 kmem_cache_free(qband_cache, qbp); 3536 } 3537 3538 /* 3539 * Just like putnextctl(9F), except that allocb_wait() is used. 3540 * 3541 * Consolidation Private, and of course only callable from the stream head or 3542 * routines that may block. 3543 */ 3544 int 3545 putnextctl_wait(queue_t *q, int type) 3546 { 3547 mblk_t *bp; 3548 int error; 3549 3550 if ((datamsg(type) && (type != M_DELAY)) || 3551 (bp = allocb_wait(0, BPRI_HI, 0, &error)) == NULL) 3552 return (0); 3553 3554 bp->b_datap->db_type = (unsigned char)type; 3555 putnext(q, bp); 3556 return (1); 3557 } 3558 3559 /* 3560 * run any possible bufcalls. 3561 */ 3562 void 3563 runbufcalls(void) 3564 { 3565 strbufcall_t *bcp; 3566 3567 mutex_enter(&bcall_monitor); 3568 mutex_enter(&strbcall_lock); 3569 3570 if (strbcalls.bc_head) { 3571 size_t count; 3572 int nevent; 3573 3574 /* 3575 * count how many events are on the list 3576 * now so we can check to avoid looping 3577 * in low memory situations 3578 */ 3579 nevent = 0; 3580 for (bcp = strbcalls.bc_head; bcp; bcp = bcp->bc_next) 3581 nevent++; 3582 3583 /* 3584 * get estimate of available memory from kmem_avail(). 3585 * awake all bufcall functions waiting for 3586 * memory whose request could be satisfied 3587 * by 'count' memory and let 'em fight for it. 3588 */ 3589 count = kmem_avail(); 3590 while ((bcp = strbcalls.bc_head) != NULL && nevent) { 3591 STRSTAT(bufcalls); 3592 --nevent; 3593 if (bcp->bc_size <= count) { 3594 bcp->bc_executor = curthread; 3595 mutex_exit(&strbcall_lock); 3596 (*bcp->bc_func)(bcp->bc_arg); 3597 mutex_enter(&strbcall_lock); 3598 bcp->bc_executor = NULL; 3599 cv_broadcast(&bcall_cv); 3600 strbcalls.bc_head = bcp->bc_next; 3601 kmem_free(bcp, sizeof (strbufcall_t)); 3602 } else { 3603 /* 3604 * too big, try again later - note 3605 * that nevent was decremented above 3606 * so we won't retry this one on this 3607 * iteration of the loop 3608 */ 3609 if (bcp->bc_next != NULL) { 3610 strbcalls.bc_head = bcp->bc_next; 3611 bcp->bc_next = NULL; 3612 strbcalls.bc_tail->bc_next = bcp; 3613 strbcalls.bc_tail = bcp; 3614 } 3615 } 3616 } 3617 if (strbcalls.bc_head == NULL) 3618 strbcalls.bc_tail = NULL; 3619 } 3620 3621 mutex_exit(&strbcall_lock); 3622 mutex_exit(&bcall_monitor); 3623 } 3624 3625 3626 /* 3627 * actually run queue's service routine. 3628 */ 3629 static void 3630 runservice(queue_t *q) 3631 { 3632 qband_t *qbp; 3633 3634 ASSERT(q->q_qinfo->qi_srvp); 3635 again: 3636 entersq(q->q_syncq, SQ_SVC); 3637 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_START, 3638 "runservice starts:%p", q); 3639 3640 if (!(q->q_flag & QWCLOSE)) 3641 (*q->q_qinfo->qi_srvp)(q); 3642 3643 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_END, 3644 "runservice ends:(%p)", q); 3645 3646 leavesq(q->q_syncq, SQ_SVC); 3647 3648 mutex_enter(QLOCK(q)); 3649 if (q->q_flag & QENAB) { 3650 q->q_flag &= ~QENAB; 3651 mutex_exit(QLOCK(q)); 3652 goto again; 3653 } 3654 q->q_flag &= ~QINSERVICE; 3655 q->q_flag &= ~QBACK; 3656 for (qbp = q->q_bandp; qbp; qbp = qbp->qb_next) 3657 qbp->qb_flag &= ~QB_BACK; 3658 /* 3659 * Wakeup thread waiting for the service procedure 3660 * to be run (strclose and qdetach). 3661 */ 3662 cv_broadcast(&q->q_wait); 3663 3664 mutex_exit(QLOCK(q)); 3665 } 3666 3667 /* 3668 * Background processing of bufcalls. 3669 */ 3670 void 3671 streams_bufcall_service(void) 3672 { 3673 callb_cpr_t cprinfo; 3674 3675 CALLB_CPR_INIT(&cprinfo, &strbcall_lock, callb_generic_cpr, 3676 "streams_bufcall_service"); 3677 3678 mutex_enter(&strbcall_lock); 3679 3680 for (;;) { 3681 if (strbcalls.bc_head != NULL && kmem_avail() > 0) { 3682 mutex_exit(&strbcall_lock); 3683 runbufcalls(); 3684 mutex_enter(&strbcall_lock); 3685 } 3686 if (strbcalls.bc_head != NULL) { 3687 clock_t wt, tick; 3688 3689 STRSTAT(bcwaits); 3690 /* Wait for memory to become available */ 3691 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3692 tick = SEC_TO_TICK(60); 3693 time_to_wait(&wt, tick); 3694 (void) cv_timedwait(&memavail_cv, &strbcall_lock, wt); 3695 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock); 3696 } 3697 3698 /* Wait for new work to arrive */ 3699 if (strbcalls.bc_head == NULL) { 3700 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3701 cv_wait(&strbcall_cv, &strbcall_lock); 3702 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock); 3703 } 3704 } 3705 } 3706 3707 /* 3708 * Background processing of streams background tasks which failed 3709 * taskq_dispatch. 3710 */ 3711 static void 3712 streams_qbkgrnd_service(void) 3713 { 3714 callb_cpr_t cprinfo; 3715 queue_t *q; 3716 3717 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr, 3718 "streams_bkgrnd_service"); 3719 3720 mutex_enter(&service_queue); 3721 3722 for (;;) { 3723 /* 3724 * Wait for work to arrive. 3725 */ 3726 while ((freebs_list == NULL) && (qhead == NULL)) { 3727 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3728 cv_wait(&services_to_run, &service_queue); 3729 CALLB_CPR_SAFE_END(&cprinfo, &service_queue); 3730 } 3731 /* 3732 * Handle all pending freebs requests to free memory. 3733 */ 3734 while (freebs_list != NULL) { 3735 mblk_t *mp = freebs_list; 3736 freebs_list = mp->b_next; 3737 mutex_exit(&service_queue); 3738 mblk_free(mp); 3739 mutex_enter(&service_queue); 3740 } 3741 /* 3742 * Run pending queues. 3743 */ 3744 while (qhead != NULL) { 3745 DQ(q, qhead, qtail, q_link); 3746 ASSERT(q != NULL); 3747 mutex_exit(&service_queue); 3748 queue_service(q); 3749 mutex_enter(&service_queue); 3750 } 3751 ASSERT(qhead == NULL && qtail == NULL); 3752 } 3753 } 3754 3755 /* 3756 * Background processing of streams background tasks which failed 3757 * taskq_dispatch. 3758 */ 3759 static void 3760 streams_sqbkgrnd_service(void) 3761 { 3762 callb_cpr_t cprinfo; 3763 syncq_t *sq; 3764 3765 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr, 3766 "streams_sqbkgrnd_service"); 3767 3768 mutex_enter(&service_queue); 3769 3770 for (;;) { 3771 /* 3772 * Wait for work to arrive. 3773 */ 3774 while (sqhead == NULL) { 3775 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3776 cv_wait(&syncqs_to_run, &service_queue); 3777 CALLB_CPR_SAFE_END(&cprinfo, &service_queue); 3778 } 3779 3780 /* 3781 * Run pending syncqs. 3782 */ 3783 while (sqhead != NULL) { 3784 DQ(sq, sqhead, sqtail, sq_next); 3785 ASSERT(sq != NULL); 3786 ASSERT(sq->sq_svcflags & SQ_BGTHREAD); 3787 mutex_exit(&service_queue); 3788 syncq_service(sq); 3789 mutex_enter(&service_queue); 3790 } 3791 } 3792 } 3793 3794 /* 3795 * Disable the syncq and wait for background syncq processing to complete. 3796 * If the syncq is placed on the sqhead/sqtail queue, try to remove it from the 3797 * list. 3798 */ 3799 void 3800 wait_sq_svc(syncq_t *sq) 3801 { 3802 mutex_enter(SQLOCK(sq)); 3803 sq->sq_svcflags |= SQ_DISABLED; 3804 if (sq->sq_svcflags & SQ_BGTHREAD) { 3805 syncq_t *sq_chase; 3806 syncq_t *sq_curr; 3807 int removed; 3808 3809 ASSERT(sq->sq_servcount == 1); 3810 mutex_enter(&service_queue); 3811 RMQ(sq, sqhead, sqtail, sq_next, sq_chase, sq_curr, removed); 3812 mutex_exit(&service_queue); 3813 if (removed) { 3814 sq->sq_svcflags &= ~SQ_BGTHREAD; 3815 sq->sq_servcount = 0; 3816 STRSTAT(sqremoved); 3817 goto done; 3818 } 3819 } 3820 while (sq->sq_servcount != 0) { 3821 sq->sq_flags |= SQ_WANTWAKEUP; 3822 cv_wait(&sq->sq_wait, SQLOCK(sq)); 3823 } 3824 done: 3825 mutex_exit(SQLOCK(sq)); 3826 } 3827 3828 /* 3829 * Put a syncq on the list of syncq's to be serviced by the sqthread. 3830 * Add the argument to the end of the sqhead list and set the flag 3831 * indicating this syncq has been enabled. If it has already been 3832 * enabled, don't do anything. 3833 * This routine assumes that SQLOCK is held. 3834 * NOTE that the lock order is to have the SQLOCK first, 3835 * so if the service_syncq lock is held, we need to release it 3836 * before aquiring the SQLOCK (mostly relevant for the background 3837 * thread, and this seems to be common among the STREAMS global locks). 3838 * Note the the sq_svcflags are protected by the SQLOCK. 3839 */ 3840 void 3841 sqenable(syncq_t *sq) 3842 { 3843 /* 3844 * This is probably not important except for where I believe it 3845 * is being called. At that point, it should be held (and it 3846 * is a pain to release it just for this routine, so don't do 3847 * it). 3848 */ 3849 ASSERT(MUTEX_HELD(SQLOCK(sq))); 3850 3851 IMPLY(sq->sq_servcount == 0, sq->sq_next == NULL); 3852 IMPLY(sq->sq_next != NULL, sq->sq_svcflags & SQ_BGTHREAD); 3853 3854 /* 3855 * Do not put on list if background thread is scheduled or 3856 * syncq is disabled. 3857 */ 3858 if (sq->sq_svcflags & (SQ_DISABLED | SQ_BGTHREAD)) 3859 return; 3860 3861 /* 3862 * Check whether we should enable sq at all. 3863 * Non PERMOD syncqs may be drained by at most one thread. 3864 * PERMOD syncqs may be drained by several threads but we limit the 3865 * total amount to the lesser of 3866 * Number of queues on the squeue and 3867 * Number of CPUs. 3868 */ 3869 if (sq->sq_servcount != 0) { 3870 if (((sq->sq_type & SQ_PERMOD) == 0) || 3871 (sq->sq_servcount >= MIN(sq->sq_nqueues, ncpus_online))) { 3872 STRSTAT(sqtoomany); 3873 return; 3874 } 3875 } 3876 3877 sq->sq_tstamp = lbolt; 3878 STRSTAT(sqenables); 3879 3880 /* Attempt a taskq dispatch */ 3881 sq->sq_servid = (void *)taskq_dispatch(streams_taskq, 3882 (task_func_t *)syncq_service, sq, TQ_NOSLEEP | TQ_NOQUEUE); 3883 if (sq->sq_servid != NULL) { 3884 sq->sq_servcount++; 3885 return; 3886 } 3887 3888 /* 3889 * This taskq dispatch failed, but a previous one may have succeeded. 3890 * Don't try to schedule on the background thread whilst there is 3891 * outstanding taskq processing. 3892 */ 3893 if (sq->sq_servcount != 0) 3894 return; 3895 3896 /* 3897 * System is low on resources and can't perform a non-sleeping 3898 * dispatch. Schedule the syncq for a background thread and mark the 3899 * syncq to avoid any further taskq dispatch attempts. 3900 */ 3901 mutex_enter(&service_queue); 3902 STRSTAT(taskqfails); 3903 ENQUEUE(sq, sqhead, sqtail, sq_next); 3904 sq->sq_svcflags |= SQ_BGTHREAD; 3905 sq->sq_servcount = 1; 3906 cv_signal(&syncqs_to_run); 3907 mutex_exit(&service_queue); 3908 } 3909 3910 /* 3911 * Note: fifo_close() depends on the mblk_t on the queue being freed 3912 * asynchronously. The asynchronous freeing of messages breaks the 3913 * recursive call chain of fifo_close() while there are I_SENDFD type of 3914 * messages refering other file pointers on the queue. Then when 3915 * closing pipes it can avoid stack overflow in case of daisy-chained 3916 * pipes, and also avoid deadlock in case of fifonode_t pairs (which 3917 * share the same fifolock_t). 3918 */ 3919 3920 void 3921 freebs_enqueue(mblk_t *mp, dblk_t *dbp) 3922 { 3923 esb_queue_t *eqp = &system_esbq; 3924 3925 ASSERT(dbp->db_mblk == mp); 3926 3927 /* 3928 * Check data sanity. The dblock should have non-empty free function. 3929 * It is better to panic here then later when the dblock is freed 3930 * asynchronously when the context is lost. 3931 */ 3932 if (dbp->db_frtnp->free_func == NULL) { 3933 panic("freebs_enqueue: dblock %p has a NULL free callback", 3934 (void *)dbp); 3935 } 3936 3937 mutex_enter(&eqp->eq_lock); 3938 /* queue the new mblk on the esballoc queue */ 3939 if (eqp->eq_head == NULL) { 3940 eqp->eq_head = eqp->eq_tail = mp; 3941 } else { 3942 eqp->eq_tail->b_next = mp; 3943 eqp->eq_tail = mp; 3944 } 3945 eqp->eq_len++; 3946 3947 /* If we're the first thread to reach the threshold, process */ 3948 if (eqp->eq_len >= esbq_max_qlen && 3949 !(eqp->eq_flags & ESBQ_PROCESSING)) 3950 esballoc_process_queue(eqp); 3951 3952 esballoc_set_timer(eqp, esbq_timeout); 3953 mutex_exit(&eqp->eq_lock); 3954 } 3955 3956 static void 3957 esballoc_process_queue(esb_queue_t *eqp) 3958 { 3959 mblk_t *mp; 3960 3961 ASSERT(MUTEX_HELD(&eqp->eq_lock)); 3962 3963 eqp->eq_flags |= ESBQ_PROCESSING; 3964 3965 do { 3966 /* 3967 * Detach the message chain for processing. 3968 */ 3969 mp = eqp->eq_head; 3970 eqp->eq_tail->b_next = NULL; 3971 eqp->eq_head = eqp->eq_tail = NULL; 3972 eqp->eq_len = 0; 3973 mutex_exit(&eqp->eq_lock); 3974 3975 /* 3976 * Process the message chain. 3977 */ 3978 esballoc_enqueue_mblk(mp); 3979 mutex_enter(&eqp->eq_lock); 3980 } while ((eqp->eq_len >= esbq_max_qlen) && (eqp->eq_len > 0)); 3981 3982 eqp->eq_flags &= ~ESBQ_PROCESSING; 3983 } 3984 3985 /* 3986 * taskq callback routine to free esballoced mblk's 3987 */ 3988 static void 3989 esballoc_mblk_free(mblk_t *mp) 3990 { 3991 mblk_t *nextmp; 3992 3993 for (; mp != NULL; mp = nextmp) { 3994 nextmp = mp->b_next; 3995 mp->b_next = NULL; 3996 mblk_free(mp); 3997 } 3998 } 3999 4000 static void 4001 esballoc_enqueue_mblk(mblk_t *mp) 4002 { 4003 4004 if (taskq_dispatch(system_taskq, (task_func_t *)esballoc_mblk_free, mp, 4005 TQ_NOSLEEP) == NULL) { 4006 mblk_t *first_mp = mp; 4007 /* 4008 * System is low on resources and can't perform a non-sleeping 4009 * dispatch. Schedule for a background thread. 4010 */ 4011 mutex_enter(&service_queue); 4012 STRSTAT(taskqfails); 4013 4014 while (mp->b_next != NULL) 4015 mp = mp->b_next; 4016 4017 mp->b_next = freebs_list; 4018 freebs_list = first_mp; 4019 cv_signal(&services_to_run); 4020 mutex_exit(&service_queue); 4021 } 4022 } 4023 4024 static void 4025 esballoc_timer(void *arg) 4026 { 4027 esb_queue_t *eqp = arg; 4028 4029 mutex_enter(&eqp->eq_lock); 4030 eqp->eq_flags &= ~ESBQ_TIMER; 4031 4032 if (!(eqp->eq_flags & ESBQ_PROCESSING) && 4033 eqp->eq_len > 0) 4034 esballoc_process_queue(eqp); 4035 4036 esballoc_set_timer(eqp, esbq_timeout); 4037 mutex_exit(&eqp->eq_lock); 4038 } 4039 4040 static void 4041 esballoc_set_timer(esb_queue_t *eqp, clock_t eq_timeout) 4042 { 4043 ASSERT(MUTEX_HELD(&eqp->eq_lock)); 4044 4045 if (eqp->eq_len > 0 && !(eqp->eq_flags & ESBQ_TIMER)) { 4046 (void) timeout(esballoc_timer, eqp, eq_timeout); 4047 eqp->eq_flags |= ESBQ_TIMER; 4048 } 4049 } 4050 4051 void 4052 esballoc_queue_init(void) 4053 { 4054 system_esbq.eq_len = 0; 4055 system_esbq.eq_head = system_esbq.eq_tail = NULL; 4056 system_esbq.eq_flags = 0; 4057 } 4058 4059 /* 4060 * Set the QBACK or QB_BACK flag in the given queue for 4061 * the given priority band. 4062 */ 4063 void 4064 setqback(queue_t *q, unsigned char pri) 4065 { 4066 int i; 4067 qband_t *qbp; 4068 qband_t **qbpp; 4069 4070 ASSERT(MUTEX_HELD(QLOCK(q))); 4071 if (pri != 0) { 4072 if (pri > q->q_nband) { 4073 qbpp = &q->q_bandp; 4074 while (*qbpp) 4075 qbpp = &(*qbpp)->qb_next; 4076 while (pri > q->q_nband) { 4077 if ((*qbpp = allocband()) == NULL) { 4078 cmn_err(CE_WARN, 4079 "setqback: can't allocate qband\n"); 4080 return; 4081 } 4082 (*qbpp)->qb_hiwat = q->q_hiwat; 4083 (*qbpp)->qb_lowat = q->q_lowat; 4084 q->q_nband++; 4085 qbpp = &(*qbpp)->qb_next; 4086 } 4087 } 4088 qbp = q->q_bandp; 4089 i = pri; 4090 while (--i) 4091 qbp = qbp->qb_next; 4092 qbp->qb_flag |= QB_BACK; 4093 } else { 4094 q->q_flag |= QBACK; 4095 } 4096 } 4097 4098 int 4099 strcopyin(void *from, void *to, size_t len, int copyflag) 4100 { 4101 if (copyflag & U_TO_K) { 4102 ASSERT((copyflag & K_TO_K) == 0); 4103 if (copyin(from, to, len)) 4104 return (EFAULT); 4105 } else { 4106 ASSERT(copyflag & K_TO_K); 4107 bcopy(from, to, len); 4108 } 4109 return (0); 4110 } 4111 4112 int 4113 strcopyout(void *from, void *to, size_t len, int copyflag) 4114 { 4115 if (copyflag & U_TO_K) { 4116 if (copyout(from, to, len)) 4117 return (EFAULT); 4118 } else { 4119 ASSERT(copyflag & K_TO_K); 4120 bcopy(from, to, len); 4121 } 4122 return (0); 4123 } 4124 4125 /* 4126 * strsignal_nolock() posts a signal to the process(es) at the stream head. 4127 * It assumes that the stream head lock is already held, whereas strsignal() 4128 * acquires the lock first. This routine was created because a few callers 4129 * release the stream head lock before calling only to re-acquire it after 4130 * it returns. 4131 */ 4132 void 4133 strsignal_nolock(stdata_t *stp, int sig, int32_t band) 4134 { 4135 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4136 switch (sig) { 4137 case SIGPOLL: 4138 if (stp->sd_sigflags & S_MSG) 4139 strsendsig(stp->sd_siglist, S_MSG, (uchar_t)band, 0); 4140 break; 4141 4142 default: 4143 if (stp->sd_pgidp) { 4144 pgsignal(stp->sd_pgidp, sig); 4145 } 4146 break; 4147 } 4148 } 4149 4150 void 4151 strsignal(stdata_t *stp, int sig, int32_t band) 4152 { 4153 TRACE_3(TR_FAC_STREAMS_FR, TR_SENDSIG, 4154 "strsignal:%p, %X, %X", stp, sig, band); 4155 4156 mutex_enter(&stp->sd_lock); 4157 switch (sig) { 4158 case SIGPOLL: 4159 if (stp->sd_sigflags & S_MSG) 4160 strsendsig(stp->sd_siglist, S_MSG, (uchar_t)band, 0); 4161 break; 4162 4163 default: 4164 if (stp->sd_pgidp) { 4165 pgsignal(stp->sd_pgidp, sig); 4166 } 4167 break; 4168 } 4169 mutex_exit(&stp->sd_lock); 4170 } 4171 4172 void 4173 strhup(stdata_t *stp) 4174 { 4175 ASSERT(mutex_owned(&stp->sd_lock)); 4176 pollwakeup(&stp->sd_pollist, POLLHUP); 4177 if (stp->sd_sigflags & S_HANGUP) 4178 strsendsig(stp->sd_siglist, S_HANGUP, 0, 0); 4179 } 4180 4181 /* 4182 * Backenable the first queue upstream from `q' with a service procedure. 4183 */ 4184 void 4185 backenable(queue_t *q, uchar_t pri) 4186 { 4187 queue_t *nq; 4188 4189 /* 4190 * our presence might not prevent other modules in our own 4191 * stream from popping/pushing since the caller of getq might not 4192 * have a claim on the queue (some drivers do a getq on somebody 4193 * else's queue - they know that the queue itself is not going away 4194 * but the framework has to guarantee q_next in that stream.) 4195 */ 4196 claimstr(q); 4197 4198 /* find nearest back queue with service proc */ 4199 for (nq = backq(q); nq && !nq->q_qinfo->qi_srvp; nq = backq(nq)) { 4200 ASSERT(STRMATED(q->q_stream) || STREAM(q) == STREAM(nq)); 4201 } 4202 4203 if (nq) { 4204 kthread_t *freezer; 4205 /* 4206 * backenable can be called either with no locks held 4207 * or with the stream frozen (the latter occurs when a module 4208 * calls rmvq with the stream frozen.) If the stream is frozen 4209 * by the caller the caller will hold all qlocks in the stream. 4210 * Note that a frozen stream doesn't freeze a mated stream, 4211 * so we explicitly check for that. 4212 */ 4213 freezer = STREAM(q)->sd_freezer; 4214 if (freezer != curthread || STREAM(q) != STREAM(nq)) { 4215 mutex_enter(QLOCK(nq)); 4216 } 4217 #ifdef DEBUG 4218 else { 4219 ASSERT(frozenstr(q)); 4220 ASSERT(MUTEX_HELD(QLOCK(q))); 4221 ASSERT(MUTEX_HELD(QLOCK(nq))); 4222 } 4223 #endif 4224 setqback(nq, pri); 4225 qenable_locked(nq); 4226 if (freezer != curthread || STREAM(q) != STREAM(nq)) 4227 mutex_exit(QLOCK(nq)); 4228 } 4229 releasestr(q); 4230 } 4231 4232 /* 4233 * Return the appropriate errno when one of flags_to_check is set 4234 * in sd_flags. Uses the exported error routines if they are set. 4235 * Will return 0 if non error is set (or if the exported error routines 4236 * do not return an error). 4237 * 4238 * If there is both a read and write error to check we prefer the read error. 4239 * Also, give preference to recorded errno's over the error functions. 4240 * The flags that are handled are: 4241 * STPLEX return EINVAL 4242 * STRDERR return sd_rerror (and clear if STRDERRNONPERSIST) 4243 * STWRERR return sd_werror (and clear if STWRERRNONPERSIST) 4244 * STRHUP return sd_werror 4245 * 4246 * If the caller indicates that the operation is a peek a nonpersistent error 4247 * is not cleared. 4248 */ 4249 int 4250 strgeterr(stdata_t *stp, int32_t flags_to_check, int ispeek) 4251 { 4252 int32_t sd_flag = stp->sd_flag & flags_to_check; 4253 int error = 0; 4254 4255 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4256 ASSERT((flags_to_check & ~(STRDERR|STWRERR|STRHUP|STPLEX)) == 0); 4257 if (sd_flag & STPLEX) 4258 error = EINVAL; 4259 else if (sd_flag & STRDERR) { 4260 error = stp->sd_rerror; 4261 if ((stp->sd_flag & STRDERRNONPERSIST) && !ispeek) { 4262 /* 4263 * Read errors are non-persistent i.e. discarded once 4264 * returned to a non-peeking caller, 4265 */ 4266 stp->sd_rerror = 0; 4267 stp->sd_flag &= ~STRDERR; 4268 } 4269 if (error == 0 && stp->sd_rderrfunc != NULL) { 4270 int clearerr = 0; 4271 4272 error = (*stp->sd_rderrfunc)(stp->sd_vnode, ispeek, 4273 &clearerr); 4274 if (clearerr) { 4275 stp->sd_flag &= ~STRDERR; 4276 stp->sd_rderrfunc = NULL; 4277 } 4278 } 4279 } else if (sd_flag & STWRERR) { 4280 error = stp->sd_werror; 4281 if ((stp->sd_flag & STWRERRNONPERSIST) && !ispeek) { 4282 /* 4283 * Write errors are non-persistent i.e. discarded once 4284 * returned to a non-peeking caller, 4285 */ 4286 stp->sd_werror = 0; 4287 stp->sd_flag &= ~STWRERR; 4288 } 4289 if (error == 0 && stp->sd_wrerrfunc != NULL) { 4290 int clearerr = 0; 4291 4292 error = (*stp->sd_wrerrfunc)(stp->sd_vnode, ispeek, 4293 &clearerr); 4294 if (clearerr) { 4295 stp->sd_flag &= ~STWRERR; 4296 stp->sd_wrerrfunc = NULL; 4297 } 4298 } 4299 } else if (sd_flag & STRHUP) { 4300 /* sd_werror set when STRHUP */ 4301 error = stp->sd_werror; 4302 } 4303 return (error); 4304 } 4305 4306 4307 /* 4308 * single-thread open/close/push/pop 4309 * for twisted streams also 4310 */ 4311 int 4312 strstartplumb(stdata_t *stp, int flag, int cmd) 4313 { 4314 int waited = 1; 4315 int error = 0; 4316 4317 if (STRMATED(stp)) { 4318 struct stdata *stmatep = stp->sd_mate; 4319 4320 STRLOCKMATES(stp); 4321 while (waited) { 4322 waited = 0; 4323 while (stmatep->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4324 if ((cmd == I_POP) && 4325 (flag & (FNDELAY|FNONBLOCK))) { 4326 STRUNLOCKMATES(stp); 4327 return (EAGAIN); 4328 } 4329 waited = 1; 4330 mutex_exit(&stp->sd_lock); 4331 if (!cv_wait_sig(&stmatep->sd_monitor, 4332 &stmatep->sd_lock)) { 4333 mutex_exit(&stmatep->sd_lock); 4334 return (EINTR); 4335 } 4336 mutex_exit(&stmatep->sd_lock); 4337 STRLOCKMATES(stp); 4338 } 4339 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4340 if ((cmd == I_POP) && 4341 (flag & (FNDELAY|FNONBLOCK))) { 4342 STRUNLOCKMATES(stp); 4343 return (EAGAIN); 4344 } 4345 waited = 1; 4346 mutex_exit(&stmatep->sd_lock); 4347 if (!cv_wait_sig(&stp->sd_monitor, 4348 &stp->sd_lock)) { 4349 mutex_exit(&stp->sd_lock); 4350 return (EINTR); 4351 } 4352 mutex_exit(&stp->sd_lock); 4353 STRLOCKMATES(stp); 4354 } 4355 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) { 4356 error = strgeterr(stp, 4357 STRDERR|STWRERR|STRHUP|STPLEX, 0); 4358 if (error != 0) { 4359 STRUNLOCKMATES(stp); 4360 return (error); 4361 } 4362 } 4363 } 4364 stp->sd_flag |= STRPLUMB; 4365 STRUNLOCKMATES(stp); 4366 } else { 4367 mutex_enter(&stp->sd_lock); 4368 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4369 if (((cmd == I_POP) || (cmd == _I_REMOVE)) && 4370 (flag & (FNDELAY|FNONBLOCK))) { 4371 mutex_exit(&stp->sd_lock); 4372 return (EAGAIN); 4373 } 4374 if (!cv_wait_sig(&stp->sd_monitor, &stp->sd_lock)) { 4375 mutex_exit(&stp->sd_lock); 4376 return (EINTR); 4377 } 4378 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) { 4379 error = strgeterr(stp, 4380 STRDERR|STWRERR|STRHUP|STPLEX, 0); 4381 if (error != 0) { 4382 mutex_exit(&stp->sd_lock); 4383 return (error); 4384 } 4385 } 4386 } 4387 stp->sd_flag |= STRPLUMB; 4388 mutex_exit(&stp->sd_lock); 4389 } 4390 return (0); 4391 } 4392 4393 /* 4394 * Complete the plumbing operation associated with stream `stp'. 4395 */ 4396 void 4397 strendplumb(stdata_t *stp) 4398 { 4399 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4400 ASSERT(stp->sd_flag & STRPLUMB); 4401 stp->sd_flag &= ~STRPLUMB; 4402 cv_broadcast(&stp->sd_monitor); 4403 } 4404 4405 /* 4406 * This describes how the STREAMS framework handles synchronization 4407 * during open/push and close/pop. 4408 * The key interfaces for open and close are qprocson and qprocsoff, 4409 * respectively. While the close case in general is harder both open 4410 * have close have significant similarities. 4411 * 4412 * During close the STREAMS framework has to both ensure that there 4413 * are no stale references to the queue pair (and syncq) that 4414 * are being closed and also provide the guarantees that are documented 4415 * in qprocsoff(9F). 4416 * If there are stale references to the queue that is closing it can 4417 * result in kernel memory corruption or kernel panics. 4418 * 4419 * Note that is it up to the module/driver to ensure that it itself 4420 * does not have any stale references to the closing queues once its close 4421 * routine returns. This includes: 4422 * - Cancelling any timeout/bufcall/qtimeout/qbufcall callback routines 4423 * associated with the queues. For timeout and bufcall callbacks the 4424 * module/driver also has to ensure (or wait for) any callbacks that 4425 * are in progress. 4426 * - If the module/driver is using esballoc it has to ensure that any 4427 * esballoc free functions do not refer to a queue that has closed. 4428 * (Note that in general the close routine can not wait for the esballoc'ed 4429 * messages to be freed since that can cause a deadlock.) 4430 * - Cancelling any interrupts that refer to the closing queues and 4431 * also ensuring that there are no interrupts in progress that will 4432 * refer to the closing queues once the close routine returns. 4433 * - For multiplexors removing any driver global state that refers to 4434 * the closing queue and also ensuring that there are no threads in 4435 * the multiplexor that has picked up a queue pointer but not yet 4436 * finished using it. 4437 * 4438 * In addition, a driver/module can only reference the q_next pointer 4439 * in its open, close, put, or service procedures or in a 4440 * qtimeout/qbufcall callback procedure executing "on" the correct 4441 * stream. Thus it can not reference the q_next pointer in an interrupt 4442 * routine or a timeout, bufcall or esballoc callback routine. Likewise 4443 * it can not reference q_next of a different queue e.g. in a mux that 4444 * passes messages from one queues put/service procedure to another queue. 4445 * In all the cases when the driver/module can not access the q_next 4446 * field it must use the *next* versions e.g. canputnext instead of 4447 * canput(q->q_next) and putnextctl instead of putctl(q->q_next, ...). 4448 * 4449 * 4450 * Assuming that the driver/module conforms to the above constraints 4451 * the STREAMS framework has to avoid stale references to q_next for all 4452 * the framework internal cases which include (but are not limited to): 4453 * - Threads in canput/canputnext/backenable and elsewhere that are 4454 * walking q_next. 4455 * - Messages on a syncq that have a reference to the queue through b_queue. 4456 * - Messages on an outer perimeter (syncq) that have a reference to the 4457 * queue through b_queue. 4458 * - Threads that use q_nfsrv (e.g. canput) to find a queue. 4459 * Note that only canput and bcanput use q_nfsrv without any locking. 4460 * 4461 * The STREAMS framework providing the qprocsoff(9F) guarantees means that 4462 * after qprocsoff returns, the framework has to ensure that no threads can 4463 * enter the put or service routines for the closing read or write-side queue. 4464 * In addition to preventing "direct" entry into the put procedures 4465 * the framework also has to prevent messages being drained from 4466 * the syncq or the outer perimeter. 4467 * XXX Note that currently qdetach does relies on D_MTOCEXCL as the only 4468 * mechanism to prevent qwriter(PERIM_OUTER) from running after 4469 * qprocsoff has returned. 4470 * Note that if a module/driver uses put(9F) on one of its own queues 4471 * it is up to the module/driver to ensure that the put() doesn't 4472 * get called when the queue is closing. 4473 * 4474 * 4475 * The framework aspects of the above "contract" is implemented by 4476 * qprocsoff, removeq, and strlock: 4477 * - qprocsoff (disable_svc) sets QWCLOSE to prevent runservice from 4478 * entering the service procedures. 4479 * - strlock acquires the sd_lock and sd_reflock to prevent putnext, 4480 * canputnext, backenable etc from dereferencing the q_next that will 4481 * soon change. 4482 * - strlock waits for sd_refcnt to be zero to wait for e.g. any canputnext 4483 * or other q_next walker that uses claimstr/releasestr to finish. 4484 * - optionally for every syncq in the stream strlock acquires all the 4485 * sq_lock's and waits for all sq_counts to drop to a value that indicates 4486 * that no thread executes in the put or service procedures and that no 4487 * thread is draining into the module/driver. This ensures that no 4488 * open, close, put, service, or qtimeout/qbufcall callback procedure is 4489 * currently executing hence no such thread can end up with the old stale 4490 * q_next value and no canput/backenable can have the old stale 4491 * q_nfsrv/q_next. 4492 * - qdetach (wait_svc) makes sure that any scheduled or running threads 4493 * have either finished or observed the QWCLOSE flag and gone away. 4494 */ 4495 4496 4497 /* 4498 * Get all the locks necessary to change q_next. 4499 * 4500 * Wait for sd_refcnt to reach 0 and, if sqlist is present, wait for the 4501 * sq_count of each syncq in the list to drop to sq_rmqcount, indicating that 4502 * the only threads inside the sqncq are threads currently calling removeq(). 4503 * Since threads calling removeq() are in the process of removing their queues 4504 * from the stream, we do not need to worry about them accessing a stale q_next 4505 * pointer and thus we do not need to wait for them to exit (in fact, waiting 4506 * for them can cause deadlock). 4507 * 4508 * This routine is subject to starvation since it does not set any flag to 4509 * prevent threads from entering a module in the stream(i.e. sq_count can 4510 * increase on some syncq while it is waiting on some other syncq.) 4511 * 4512 * Assumes that only one thread attempts to call strlock for a given 4513 * stream. If this is not the case the two threads would deadlock. 4514 * This assumption is guaranteed since strlock is only called by insertq 4515 * and removeq and streams plumbing changes are single-threaded for 4516 * a given stream using the STWOPEN, STRCLOSE, and STRPLUMB flags. 4517 * 4518 * For pipes, it is not difficult to atomically designate a pair of streams 4519 * to be mated. Once mated atomically by the framework the twisted pair remain 4520 * configured that way until dismantled atomically by the framework. 4521 * When plumbing takes place on a twisted stream it is necessary to ensure that 4522 * this operation is done exclusively on the twisted stream since two such 4523 * operations, each initiated on different ends of the pipe will deadlock 4524 * waiting for each other to complete. 4525 * 4526 * On entry, no locks should be held. 4527 * The locks acquired and held by strlock depends on a few factors. 4528 * - If sqlist is non-NULL all the syncq locks in the sqlist will be acquired 4529 * and held on exit and all sq_count are at an acceptable level. 4530 * - In all cases, sd_lock and sd_reflock are acquired and held on exit with 4531 * sd_refcnt being zero. 4532 */ 4533 4534 static void 4535 strlock(struct stdata *stp, sqlist_t *sqlist) 4536 { 4537 syncql_t *sql, *sql2; 4538 retry: 4539 /* 4540 * Wait for any claimstr to go away. 4541 */ 4542 if (STRMATED(stp)) { 4543 struct stdata *stp1, *stp2; 4544 4545 STRLOCKMATES(stp); 4546 /* 4547 * Note that the selection of locking order is not 4548 * important, just that they are always aquired in 4549 * the same order. To assure this, we choose this 4550 * order based on the value of the pointer, and since 4551 * the pointer will not change for the life of this 4552 * pair, we will always grab the locks in the same 4553 * order (and hence, prevent deadlocks). 4554 */ 4555 if (&(stp->sd_lock) > &((stp->sd_mate)->sd_lock)) { 4556 stp1 = stp; 4557 stp2 = stp->sd_mate; 4558 } else { 4559 stp2 = stp; 4560 stp1 = stp->sd_mate; 4561 } 4562 mutex_enter(&stp1->sd_reflock); 4563 if (stp1->sd_refcnt > 0) { 4564 STRUNLOCKMATES(stp); 4565 cv_wait(&stp1->sd_refmonitor, &stp1->sd_reflock); 4566 mutex_exit(&stp1->sd_reflock); 4567 goto retry; 4568 } 4569 mutex_enter(&stp2->sd_reflock); 4570 if (stp2->sd_refcnt > 0) { 4571 STRUNLOCKMATES(stp); 4572 mutex_exit(&stp1->sd_reflock); 4573 cv_wait(&stp2->sd_refmonitor, &stp2->sd_reflock); 4574 mutex_exit(&stp2->sd_reflock); 4575 goto retry; 4576 } 4577 STREAM_PUTLOCKS_ENTER(stp1); 4578 STREAM_PUTLOCKS_ENTER(stp2); 4579 } else { 4580 mutex_enter(&stp->sd_lock); 4581 mutex_enter(&stp->sd_reflock); 4582 while (stp->sd_refcnt > 0) { 4583 mutex_exit(&stp->sd_lock); 4584 cv_wait(&stp->sd_refmonitor, &stp->sd_reflock); 4585 if (mutex_tryenter(&stp->sd_lock) == 0) { 4586 mutex_exit(&stp->sd_reflock); 4587 mutex_enter(&stp->sd_lock); 4588 mutex_enter(&stp->sd_reflock); 4589 } 4590 } 4591 STREAM_PUTLOCKS_ENTER(stp); 4592 } 4593 4594 if (sqlist == NULL) 4595 return; 4596 4597 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) { 4598 syncq_t *sq = sql->sql_sq; 4599 uint16_t count; 4600 4601 mutex_enter(SQLOCK(sq)); 4602 count = sq->sq_count; 4603 ASSERT(sq->sq_rmqcount <= count); 4604 SQ_PUTLOCKS_ENTER(sq); 4605 SUM_SQ_PUTCOUNTS(sq, count); 4606 if (count == sq->sq_rmqcount) 4607 continue; 4608 4609 /* Failed - drop all locks that we have acquired so far */ 4610 if (STRMATED(stp)) { 4611 STREAM_PUTLOCKS_EXIT(stp); 4612 STREAM_PUTLOCKS_EXIT(stp->sd_mate); 4613 STRUNLOCKMATES(stp); 4614 mutex_exit(&stp->sd_reflock); 4615 mutex_exit(&stp->sd_mate->sd_reflock); 4616 } else { 4617 STREAM_PUTLOCKS_EXIT(stp); 4618 mutex_exit(&stp->sd_lock); 4619 mutex_exit(&stp->sd_reflock); 4620 } 4621 for (sql2 = sqlist->sqlist_head; sql2 != sql; 4622 sql2 = sql2->sql_next) { 4623 SQ_PUTLOCKS_EXIT(sql2->sql_sq); 4624 mutex_exit(SQLOCK(sql2->sql_sq)); 4625 } 4626 4627 /* 4628 * The wait loop below may starve when there are many threads 4629 * claiming the syncq. This is especially a problem with permod 4630 * syncqs (IP). To lessen the impact of the problem we increment 4631 * sq_needexcl and clear fastbits so that putnexts will slow 4632 * down and call sqenable instead of draining right away. 4633 */ 4634 sq->sq_needexcl++; 4635 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 4636 while (count > sq->sq_rmqcount) { 4637 sq->sq_flags |= SQ_WANTWAKEUP; 4638 SQ_PUTLOCKS_EXIT(sq); 4639 cv_wait(&sq->sq_wait, SQLOCK(sq)); 4640 count = sq->sq_count; 4641 SQ_PUTLOCKS_ENTER(sq); 4642 SUM_SQ_PUTCOUNTS(sq, count); 4643 } 4644 sq->sq_needexcl--; 4645 if (sq->sq_needexcl == 0) 4646 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 4647 SQ_PUTLOCKS_EXIT(sq); 4648 ASSERT(count == sq->sq_rmqcount); 4649 mutex_exit(SQLOCK(sq)); 4650 goto retry; 4651 } 4652 } 4653 4654 /* 4655 * Drop all the locks that strlock acquired. 4656 */ 4657 static void 4658 strunlock(struct stdata *stp, sqlist_t *sqlist) 4659 { 4660 syncql_t *sql; 4661 4662 if (STRMATED(stp)) { 4663 STREAM_PUTLOCKS_EXIT(stp); 4664 STREAM_PUTLOCKS_EXIT(stp->sd_mate); 4665 STRUNLOCKMATES(stp); 4666 mutex_exit(&stp->sd_reflock); 4667 mutex_exit(&stp->sd_mate->sd_reflock); 4668 } else { 4669 STREAM_PUTLOCKS_EXIT(stp); 4670 mutex_exit(&stp->sd_lock); 4671 mutex_exit(&stp->sd_reflock); 4672 } 4673 4674 if (sqlist == NULL) 4675 return; 4676 4677 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) { 4678 SQ_PUTLOCKS_EXIT(sql->sql_sq); 4679 mutex_exit(SQLOCK(sql->sql_sq)); 4680 } 4681 } 4682 4683 /* 4684 * When the module has service procedure, we need check if the next 4685 * module which has service procedure is in flow control to trigger 4686 * the backenable. 4687 */ 4688 static void 4689 backenable_insertedq(queue_t *q) 4690 { 4691 qband_t *qbp; 4692 4693 claimstr(q); 4694 if (q->q_qinfo->qi_srvp != NULL && q->q_next != NULL) { 4695 if (q->q_next->q_nfsrv->q_flag & QWANTW) 4696 backenable(q, 0); 4697 4698 qbp = q->q_next->q_nfsrv->q_bandp; 4699 for (; qbp != NULL; qbp = qbp->qb_next) 4700 if ((qbp->qb_flag & QB_WANTW) && qbp->qb_first != NULL) 4701 backenable(q, qbp->qb_first->b_band); 4702 } 4703 releasestr(q); 4704 } 4705 4706 /* 4707 * Given two read queues, insert a new single one after another. 4708 * 4709 * This routine acquires all the necessary locks in order to change 4710 * q_next and related pointer using strlock(). 4711 * It depends on the stream head ensuring that there are no concurrent 4712 * insertq or removeq on the same stream. The stream head ensures this 4713 * using the flags STWOPEN, STRCLOSE, and STRPLUMB. 4714 * 4715 * Note that no syncq locks are held during the q_next change. This is 4716 * applied to all streams since, unlike removeq, there is no problem of stale 4717 * pointers when adding a module to the stream. Thus drivers/modules that do a 4718 * canput(rq->q_next) would never get a closed/freed queue pointer even if we 4719 * applied this optimization to all streams. 4720 */ 4721 void 4722 insertq(struct stdata *stp, queue_t *new) 4723 { 4724 queue_t *after; 4725 queue_t *wafter; 4726 queue_t *wnew = _WR(new); 4727 boolean_t have_fifo = B_FALSE; 4728 4729 if (new->q_flag & _QINSERTING) { 4730 ASSERT(stp->sd_vnode->v_type != VFIFO); 4731 after = new->q_next; 4732 wafter = _WR(new->q_next); 4733 } else { 4734 after = _RD(stp->sd_wrq); 4735 wafter = stp->sd_wrq; 4736 } 4737 4738 TRACE_2(TR_FAC_STREAMS_FR, TR_INSERTQ, 4739 "insertq:%p, %p", after, new); 4740 ASSERT(after->q_flag & QREADR); 4741 ASSERT(new->q_flag & QREADR); 4742 4743 strlock(stp, NULL); 4744 4745 /* Do we have a FIFO? */ 4746 if (wafter->q_next == after) { 4747 have_fifo = B_TRUE; 4748 wnew->q_next = new; 4749 } else { 4750 wnew->q_next = wafter->q_next; 4751 } 4752 new->q_next = after; 4753 4754 set_nfsrv_ptr(new, wnew, after, wafter); 4755 /* 4756 * set_nfsrv_ptr() needs to know if this is an insertion or not, 4757 * so only reset this flag after calling it. 4758 */ 4759 new->q_flag &= ~_QINSERTING; 4760 4761 if (have_fifo) { 4762 wafter->q_next = wnew; 4763 } else { 4764 if (wafter->q_next) 4765 _OTHERQ(wafter->q_next)->q_next = new; 4766 wafter->q_next = wnew; 4767 } 4768 4769 set_qend(new); 4770 /* The QEND flag might have to be updated for the upstream guy */ 4771 set_qend(after); 4772 4773 ASSERT(_SAMESTR(new) == O_SAMESTR(new)); 4774 ASSERT(_SAMESTR(wnew) == O_SAMESTR(wnew)); 4775 ASSERT(_SAMESTR(after) == O_SAMESTR(after)); 4776 ASSERT(_SAMESTR(wafter) == O_SAMESTR(wafter)); 4777 strsetuio(stp); 4778 4779 /* 4780 * If this was a module insertion, bump the push count. 4781 */ 4782 if (!(new->q_flag & QISDRV)) 4783 stp->sd_pushcnt++; 4784 4785 strunlock(stp, NULL); 4786 4787 /* check if the write Q needs backenable */ 4788 backenable_insertedq(wnew); 4789 4790 /* check if the read Q needs backenable */ 4791 backenable_insertedq(new); 4792 } 4793 4794 /* 4795 * Given a read queue, unlink it from any neighbors. 4796 * 4797 * This routine acquires all the necessary locks in order to 4798 * change q_next and related pointers and also guard against 4799 * stale references (e.g. through q_next) to the queue that 4800 * is being removed. It also plays part of the role in ensuring 4801 * that the module's/driver's put procedure doesn't get called 4802 * after qprocsoff returns. 4803 * 4804 * Removeq depends on the stream head ensuring that there are 4805 * no concurrent insertq or removeq on the same stream. The 4806 * stream head ensures this using the flags STWOPEN, STRCLOSE and 4807 * STRPLUMB. 4808 * 4809 * The set of locks needed to remove the queue is different in 4810 * different cases: 4811 * 4812 * Acquire sd_lock, sd_reflock, and all the syncq locks in the stream after 4813 * waiting for the syncq reference count to drop to 0 indicating that no 4814 * non-close threads are present anywhere in the stream. This ensures that any 4815 * module/driver can reference q_next in its open, close, put, or service 4816 * procedures. 4817 * 4818 * The sq_rmqcount counter tracks the number of threads inside removeq(). 4819 * strlock() ensures that there is either no threads executing inside perimeter 4820 * or there is only a thread calling qprocsoff(). 4821 * 4822 * strlock() compares the value of sq_count with the number of threads inside 4823 * removeq() and waits until sq_count is equal to sq_rmqcount. We need to wakeup 4824 * any threads waiting in strlock() when the sq_rmqcount increases. 4825 */ 4826 4827 void 4828 removeq(queue_t *qp) 4829 { 4830 queue_t *wqp = _WR(qp); 4831 struct stdata *stp = STREAM(qp); 4832 sqlist_t *sqlist = NULL; 4833 boolean_t isdriver; 4834 int moved; 4835 syncq_t *sq = qp->q_syncq; 4836 syncq_t *wsq = wqp->q_syncq; 4837 4838 ASSERT(stp); 4839 4840 TRACE_2(TR_FAC_STREAMS_FR, TR_REMOVEQ, 4841 "removeq:%p %p", qp, wqp); 4842 ASSERT(qp->q_flag&QREADR); 4843 4844 /* 4845 * For queues using Synchronous streams, we must wait for all threads in 4846 * rwnext() to drain out before proceeding. 4847 */ 4848 if (qp->q_flag & QSYNCSTR) { 4849 /* First, we need wakeup any threads blocked in rwnext() */ 4850 mutex_enter(SQLOCK(sq)); 4851 if (sq->sq_flags & SQ_WANTWAKEUP) { 4852 sq->sq_flags &= ~SQ_WANTWAKEUP; 4853 cv_broadcast(&sq->sq_wait); 4854 } 4855 mutex_exit(SQLOCK(sq)); 4856 4857 if (wsq != sq) { 4858 mutex_enter(SQLOCK(wsq)); 4859 if (wsq->sq_flags & SQ_WANTWAKEUP) { 4860 wsq->sq_flags &= ~SQ_WANTWAKEUP; 4861 cv_broadcast(&wsq->sq_wait); 4862 } 4863 mutex_exit(SQLOCK(wsq)); 4864 } 4865 4866 mutex_enter(QLOCK(qp)); 4867 while (qp->q_rwcnt > 0) { 4868 qp->q_flag |= QWANTRMQSYNC; 4869 cv_wait(&qp->q_wait, QLOCK(qp)); 4870 } 4871 mutex_exit(QLOCK(qp)); 4872 4873 mutex_enter(QLOCK(wqp)); 4874 while (wqp->q_rwcnt > 0) { 4875 wqp->q_flag |= QWANTRMQSYNC; 4876 cv_wait(&wqp->q_wait, QLOCK(wqp)); 4877 } 4878 mutex_exit(QLOCK(wqp)); 4879 } 4880 4881 mutex_enter(SQLOCK(sq)); 4882 sq->sq_rmqcount++; 4883 if (sq->sq_flags & SQ_WANTWAKEUP) { 4884 sq->sq_flags &= ~SQ_WANTWAKEUP; 4885 cv_broadcast(&sq->sq_wait); 4886 } 4887 mutex_exit(SQLOCK(sq)); 4888 4889 isdriver = (qp->q_flag & QISDRV); 4890 4891 sqlist = sqlist_build(qp, stp, STRMATED(stp)); 4892 strlock(stp, sqlist); 4893 4894 reset_nfsrv_ptr(qp, wqp); 4895 4896 ASSERT(wqp->q_next == NULL || backq(qp)->q_next == qp); 4897 ASSERT(qp->q_next == NULL || backq(wqp)->q_next == wqp); 4898 /* Do we have a FIFO? */ 4899 if (wqp->q_next == qp) { 4900 stp->sd_wrq->q_next = _RD(stp->sd_wrq); 4901 } else { 4902 if (wqp->q_next) 4903 backq(qp)->q_next = qp->q_next; 4904 if (qp->q_next) 4905 backq(wqp)->q_next = wqp->q_next; 4906 } 4907 4908 /* The QEND flag might have to be updated for the upstream guy */ 4909 if (qp->q_next) 4910 set_qend(qp->q_next); 4911 4912 ASSERT(_SAMESTR(stp->sd_wrq) == O_SAMESTR(stp->sd_wrq)); 4913 ASSERT(_SAMESTR(_RD(stp->sd_wrq)) == O_SAMESTR(_RD(stp->sd_wrq))); 4914 4915 /* 4916 * Move any messages destined for the put procedures to the next 4917 * syncq in line. Otherwise free them. 4918 */ 4919 moved = 0; 4920 /* 4921 * Quick check to see whether there are any messages or events. 4922 */ 4923 if (qp->q_syncqmsgs != 0 || (qp->q_syncq->sq_flags & SQ_EVENTS)) 4924 moved += propagate_syncq(qp); 4925 if (wqp->q_syncqmsgs != 0 || 4926 (wqp->q_syncq->sq_flags & SQ_EVENTS)) 4927 moved += propagate_syncq(wqp); 4928 4929 strsetuio(stp); 4930 4931 /* 4932 * If this was a module removal, decrement the push count. 4933 */ 4934 if (!isdriver) 4935 stp->sd_pushcnt--; 4936 4937 strunlock(stp, sqlist); 4938 sqlist_free(sqlist); 4939 4940 /* 4941 * Make sure any messages that were propagated are drained. 4942 * Also clear any QFULL bit caused by messages that were propagated. 4943 */ 4944 4945 if (qp->q_next != NULL) { 4946 clr_qfull(qp); 4947 /* 4948 * For the driver calling qprocsoff, propagate_syncq 4949 * frees all the messages instead of putting it in 4950 * the stream head 4951 */ 4952 if (!isdriver && (moved > 0)) 4953 emptysq(qp->q_next->q_syncq); 4954 } 4955 if (wqp->q_next != NULL) { 4956 clr_qfull(wqp); 4957 /* 4958 * We come here for any pop of a module except for the 4959 * case of driver being removed. We don't call emptysq 4960 * if we did not move any messages. This will avoid holding 4961 * PERMOD syncq locks in emptysq 4962 */ 4963 if (moved > 0) 4964 emptysq(wqp->q_next->q_syncq); 4965 } 4966 4967 mutex_enter(SQLOCK(sq)); 4968 sq->sq_rmqcount--; 4969 mutex_exit(SQLOCK(sq)); 4970 } 4971 4972 /* 4973 * Prevent further entry by setting a flag (like SQ_FROZEN, SQ_BLOCKED or 4974 * SQ_WRITER) on a syncq. 4975 * If maxcnt is not -1 it assumes that caller has "maxcnt" claim(s) on the 4976 * sync queue and waits until sq_count reaches maxcnt. 4977 * 4978 * if maxcnt is -1 there's no need to grab sq_putlocks since the caller 4979 * does not care about putnext threads that are in the middle of calling put 4980 * entry points. 4981 * 4982 * This routine is used for both inner and outer syncqs. 4983 */ 4984 static void 4985 blocksq(syncq_t *sq, ushort_t flag, int maxcnt) 4986 { 4987 uint16_t count = 0; 4988 4989 mutex_enter(SQLOCK(sq)); 4990 /* 4991 * Wait for SQ_FROZEN/SQ_BLOCKED to be reset. 4992 * SQ_FROZEN will be set if there is a frozen stream that has a 4993 * queue which also refers to this "shared" syncq. 4994 * SQ_BLOCKED will be set if there is "off" queue which also 4995 * refers to this "shared" syncq. 4996 */ 4997 if (maxcnt != -1) { 4998 count = sq->sq_count; 4999 SQ_PUTLOCKS_ENTER(sq); 5000 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 5001 SUM_SQ_PUTCOUNTS(sq, count); 5002 } 5003 sq->sq_needexcl++; 5004 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 5005 5006 while ((sq->sq_flags & flag) || 5007 (maxcnt != -1 && count > (unsigned)maxcnt)) { 5008 sq->sq_flags |= SQ_WANTWAKEUP; 5009 if (maxcnt != -1) { 5010 SQ_PUTLOCKS_EXIT(sq); 5011 } 5012 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5013 if (maxcnt != -1) { 5014 count = sq->sq_count; 5015 SQ_PUTLOCKS_ENTER(sq); 5016 SUM_SQ_PUTCOUNTS(sq, count); 5017 } 5018 } 5019 sq->sq_needexcl--; 5020 sq->sq_flags |= flag; 5021 ASSERT(maxcnt == -1 || count == maxcnt); 5022 if (maxcnt != -1) { 5023 if (sq->sq_needexcl == 0) { 5024 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 5025 } 5026 SQ_PUTLOCKS_EXIT(sq); 5027 } else if (sq->sq_needexcl == 0) { 5028 SQ_PUTCOUNT_SETFAST(sq); 5029 } 5030 5031 mutex_exit(SQLOCK(sq)); 5032 } 5033 5034 /* 5035 * Reset a flag that was set with blocksq. 5036 * 5037 * Can not use this routine to reset SQ_WRITER. 5038 * 5039 * If "isouter" is set then the syncq is assumed to be an outer perimeter 5040 * and drain_syncq is not called. Instead we rely on the qwriter_outer thread 5041 * to handle the queued qwriter operations. 5042 * 5043 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 5044 * sq_putlocks are used. 5045 */ 5046 static void 5047 unblocksq(syncq_t *sq, uint16_t resetflag, int isouter) 5048 { 5049 uint16_t flags; 5050 5051 mutex_enter(SQLOCK(sq)); 5052 ASSERT(resetflag != SQ_WRITER); 5053 ASSERT(sq->sq_flags & resetflag); 5054 flags = sq->sq_flags & ~resetflag; 5055 sq->sq_flags = flags; 5056 if (flags & (SQ_QUEUED | SQ_WANTWAKEUP)) { 5057 if (flags & SQ_WANTWAKEUP) { 5058 flags &= ~SQ_WANTWAKEUP; 5059 cv_broadcast(&sq->sq_wait); 5060 } 5061 sq->sq_flags = flags; 5062 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5063 if (!isouter) { 5064 /* drain_syncq drops SQLOCK */ 5065 drain_syncq(sq); 5066 return; 5067 } 5068 } 5069 } 5070 mutex_exit(SQLOCK(sq)); 5071 } 5072 5073 /* 5074 * Reset a flag that was set with blocksq. 5075 * Does not drain the syncq. Use emptysq() for that. 5076 * Returns 1 if SQ_QUEUED is set. Otherwise 0. 5077 * 5078 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 5079 * sq_putlocks are used. 5080 */ 5081 static int 5082 dropsq(syncq_t *sq, uint16_t resetflag) 5083 { 5084 uint16_t flags; 5085 5086 mutex_enter(SQLOCK(sq)); 5087 ASSERT(sq->sq_flags & resetflag); 5088 flags = sq->sq_flags & ~resetflag; 5089 if (flags & SQ_WANTWAKEUP) { 5090 flags &= ~SQ_WANTWAKEUP; 5091 cv_broadcast(&sq->sq_wait); 5092 } 5093 sq->sq_flags = flags; 5094 mutex_exit(SQLOCK(sq)); 5095 if (flags & SQ_QUEUED) 5096 return (1); 5097 return (0); 5098 } 5099 5100 /* 5101 * Empty all the messages on a syncq. 5102 * 5103 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 5104 * sq_putlocks are used. 5105 */ 5106 static void 5107 emptysq(syncq_t *sq) 5108 { 5109 uint16_t flags; 5110 5111 mutex_enter(SQLOCK(sq)); 5112 flags = sq->sq_flags; 5113 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5114 /* 5115 * To prevent potential recursive invocation of drain_syncq we 5116 * do not call drain_syncq if count is non-zero. 5117 */ 5118 if (sq->sq_count == 0) { 5119 /* drain_syncq() drops SQLOCK */ 5120 drain_syncq(sq); 5121 return; 5122 } else 5123 sqenable(sq); 5124 } 5125 mutex_exit(SQLOCK(sq)); 5126 } 5127 5128 /* 5129 * Ordered insert while removing duplicates. 5130 */ 5131 static void 5132 sqlist_insert(sqlist_t *sqlist, syncq_t *sqp) 5133 { 5134 syncql_t *sqlp, **prev_sqlpp, *new_sqlp; 5135 5136 prev_sqlpp = &sqlist->sqlist_head; 5137 while ((sqlp = *prev_sqlpp) != NULL) { 5138 if (sqlp->sql_sq >= sqp) { 5139 if (sqlp->sql_sq == sqp) /* duplicate */ 5140 return; 5141 break; 5142 } 5143 prev_sqlpp = &sqlp->sql_next; 5144 } 5145 new_sqlp = &sqlist->sqlist_array[sqlist->sqlist_index++]; 5146 ASSERT((char *)new_sqlp < (char *)sqlist + sqlist->sqlist_size); 5147 new_sqlp->sql_next = sqlp; 5148 new_sqlp->sql_sq = sqp; 5149 *prev_sqlpp = new_sqlp; 5150 } 5151 5152 /* 5153 * Walk the write side queues until we hit either the driver 5154 * or a twist in the stream (_SAMESTR will return false in both 5155 * these cases) then turn around and walk the read side queues 5156 * back up to the stream head. 5157 */ 5158 static void 5159 sqlist_insertall(sqlist_t *sqlist, queue_t *q) 5160 { 5161 while (q != NULL) { 5162 sqlist_insert(sqlist, q->q_syncq); 5163 5164 if (_SAMESTR(q)) 5165 q = q->q_next; 5166 else if (!(q->q_flag & QREADR)) 5167 q = _RD(q); 5168 else 5169 q = NULL; 5170 } 5171 } 5172 5173 /* 5174 * Allocate and build a list of all syncqs in a stream and the syncq(s) 5175 * associated with the "q" parameter. The resulting list is sorted in a 5176 * canonical order and is free of duplicates. 5177 * Assumes the passed queue is a _RD(q). 5178 */ 5179 static sqlist_t * 5180 sqlist_build(queue_t *q, struct stdata *stp, boolean_t do_twist) 5181 { 5182 sqlist_t *sqlist = sqlist_alloc(stp, KM_SLEEP); 5183 5184 /* 5185 * start with the current queue/qpair 5186 */ 5187 ASSERT(q->q_flag & QREADR); 5188 5189 sqlist_insert(sqlist, q->q_syncq); 5190 sqlist_insert(sqlist, _WR(q)->q_syncq); 5191 5192 sqlist_insertall(sqlist, stp->sd_wrq); 5193 if (do_twist) 5194 sqlist_insertall(sqlist, stp->sd_mate->sd_wrq); 5195 5196 return (sqlist); 5197 } 5198 5199 static sqlist_t * 5200 sqlist_alloc(struct stdata *stp, int kmflag) 5201 { 5202 size_t sqlist_size; 5203 sqlist_t *sqlist; 5204 5205 /* 5206 * Allocate 2 syncql_t's for each pushed module. Note that 5207 * the sqlist_t structure already has 4 syncql_t's built in: 5208 * 2 for the stream head, and 2 for the driver/other stream head. 5209 */ 5210 sqlist_size = 2 * sizeof (syncql_t) * stp->sd_pushcnt + 5211 sizeof (sqlist_t); 5212 if (STRMATED(stp)) 5213 sqlist_size += 2 * sizeof (syncql_t) * stp->sd_mate->sd_pushcnt; 5214 sqlist = kmem_alloc(sqlist_size, kmflag); 5215 5216 sqlist->sqlist_head = NULL; 5217 sqlist->sqlist_size = sqlist_size; 5218 sqlist->sqlist_index = 0; 5219 5220 return (sqlist); 5221 } 5222 5223 /* 5224 * Free the list created by sqlist_alloc() 5225 */ 5226 static void 5227 sqlist_free(sqlist_t *sqlist) 5228 { 5229 kmem_free(sqlist, sqlist->sqlist_size); 5230 } 5231 5232 /* 5233 * Prevent any new entries into any syncq in this stream. 5234 * Used by freezestr. 5235 */ 5236 void 5237 strblock(queue_t *q) 5238 { 5239 struct stdata *stp; 5240 syncql_t *sql; 5241 sqlist_t *sqlist; 5242 5243 q = _RD(q); 5244 5245 stp = STREAM(q); 5246 ASSERT(stp != NULL); 5247 5248 /* 5249 * Get a sorted list with all the duplicates removed containing 5250 * all the syncqs referenced by this stream. 5251 */ 5252 sqlist = sqlist_build(q, stp, B_FALSE); 5253 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next) 5254 blocksq(sql->sql_sq, SQ_FROZEN, -1); 5255 sqlist_free(sqlist); 5256 } 5257 5258 /* 5259 * Release the block on new entries into this stream 5260 */ 5261 void 5262 strunblock(queue_t *q) 5263 { 5264 struct stdata *stp; 5265 syncql_t *sql; 5266 sqlist_t *sqlist; 5267 int drain_needed; 5268 5269 q = _RD(q); 5270 5271 /* 5272 * Get a sorted list with all the duplicates removed containing 5273 * all the syncqs referenced by this stream. 5274 * Have to drop the SQ_FROZEN flag on all the syncqs before 5275 * starting to drain them; otherwise the draining might 5276 * cause a freezestr in some module on the stream (which 5277 * would deadlock.) 5278 */ 5279 stp = STREAM(q); 5280 ASSERT(stp != NULL); 5281 sqlist = sqlist_build(q, stp, B_FALSE); 5282 drain_needed = 0; 5283 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next) 5284 drain_needed += dropsq(sql->sql_sq, SQ_FROZEN); 5285 if (drain_needed) { 5286 for (sql = sqlist->sqlist_head; sql != NULL; 5287 sql = sql->sql_next) 5288 emptysq(sql->sql_sq); 5289 } 5290 sqlist_free(sqlist); 5291 } 5292 5293 #ifdef DEBUG 5294 static int 5295 qprocsareon(queue_t *rq) 5296 { 5297 if (rq->q_next == NULL) 5298 return (0); 5299 return (_WR(rq->q_next)->q_next == _WR(rq)); 5300 } 5301 5302 int 5303 qclaimed(queue_t *q) 5304 { 5305 uint_t count; 5306 5307 count = q->q_syncq->sq_count; 5308 SUM_SQ_PUTCOUNTS(q->q_syncq, count); 5309 return (count != 0); 5310 } 5311 5312 /* 5313 * Check if anyone has frozen this stream with freezestr 5314 */ 5315 int 5316 frozenstr(queue_t *q) 5317 { 5318 return ((q->q_syncq->sq_flags & SQ_FROZEN) != 0); 5319 } 5320 #endif /* DEBUG */ 5321 5322 /* 5323 * Enter a queue. 5324 * Obsoleted interface. Should not be used. 5325 */ 5326 void 5327 enterq(queue_t *q) 5328 { 5329 entersq(q->q_syncq, SQ_CALLBACK); 5330 } 5331 5332 void 5333 leaveq(queue_t *q) 5334 { 5335 leavesq(q->q_syncq, SQ_CALLBACK); 5336 } 5337 5338 /* 5339 * Enter a perimeter. c_inner and c_outer specifies which concurrency bits 5340 * to check. 5341 * Wait if SQ_QUEUED is set to preserve ordering between messages and qwriter 5342 * calls and the running of open, close and service procedures. 5343 * 5344 * if c_inner bit is set no need to grab sq_putlocks since we don't care 5345 * if other threads have entered or are entering put entry point. 5346 * 5347 * if c_inner bit is set it might have been posible to use 5348 * sq_putlocks/sq_putcounts instead of SQLOCK/sq_count (e.g. to optimize 5349 * open/close path for IP) but since the count may need to be decremented in 5350 * qwait() we wouldn't know which counter to decrement. Currently counter is 5351 * selected by current cpu_seqid and current CPU can change at any moment. XXX 5352 * in the future we might use curthread id bits to select the counter and this 5353 * would stay constant across routine calls. 5354 */ 5355 void 5356 entersq(syncq_t *sq, int entrypoint) 5357 { 5358 uint16_t count = 0; 5359 uint16_t flags; 5360 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL; 5361 uint16_t type; 5362 uint_t c_inner = entrypoint & SQ_CI; 5363 uint_t c_outer = entrypoint & SQ_CO; 5364 5365 /* 5366 * Increment ref count to keep closes out of this queue. 5367 */ 5368 ASSERT(sq); 5369 ASSERT(c_inner && c_outer); 5370 mutex_enter(SQLOCK(sq)); 5371 flags = sq->sq_flags; 5372 type = sq->sq_type; 5373 if (!(type & c_inner)) { 5374 /* Make sure all putcounts now use slowlock. */ 5375 count = sq->sq_count; 5376 SQ_PUTLOCKS_ENTER(sq); 5377 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 5378 SUM_SQ_PUTCOUNTS(sq, count); 5379 sq->sq_needexcl++; 5380 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 5381 waitflags |= SQ_MESSAGES; 5382 } 5383 /* 5384 * Wait until we can enter the inner perimeter. 5385 * If we want exclusive access we wait until sq_count is 0. 5386 * We have to do this before entering the outer perimeter in order 5387 * to preserve put/close message ordering. 5388 */ 5389 while ((flags & waitflags) || (!(type & c_inner) && count != 0)) { 5390 sq->sq_flags = flags | SQ_WANTWAKEUP; 5391 if (!(type & c_inner)) { 5392 SQ_PUTLOCKS_EXIT(sq); 5393 } 5394 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5395 if (!(type & c_inner)) { 5396 count = sq->sq_count; 5397 SQ_PUTLOCKS_ENTER(sq); 5398 SUM_SQ_PUTCOUNTS(sq, count); 5399 } 5400 flags = sq->sq_flags; 5401 } 5402 5403 if (!(type & c_inner)) { 5404 ASSERT(sq->sq_needexcl > 0); 5405 sq->sq_needexcl--; 5406 if (sq->sq_needexcl == 0) { 5407 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 5408 } 5409 } 5410 5411 /* Check if we need to enter the outer perimeter */ 5412 if (!(type & c_outer)) { 5413 /* 5414 * We have to enter the outer perimeter exclusively before 5415 * we can increment sq_count to avoid deadlock. This implies 5416 * that we have to re-check sq_flags and sq_count. 5417 * 5418 * is it possible to have c_inner set when c_outer is not set? 5419 */ 5420 if (!(type & c_inner)) { 5421 SQ_PUTLOCKS_EXIT(sq); 5422 } 5423 mutex_exit(SQLOCK(sq)); 5424 outer_enter(sq->sq_outer, SQ_GOAWAY); 5425 mutex_enter(SQLOCK(sq)); 5426 flags = sq->sq_flags; 5427 /* 5428 * there should be no need to recheck sq_putcounts 5429 * because outer_enter() has already waited for them to clear 5430 * after setting SQ_WRITER. 5431 */ 5432 count = sq->sq_count; 5433 #ifdef DEBUG 5434 /* 5435 * SUMCHECK_SQ_PUTCOUNTS should return the sum instead 5436 * of doing an ASSERT internally. Others should do 5437 * something like 5438 * ASSERT(SUMCHECK_SQ_PUTCOUNTS(sq) == 0); 5439 * without the need to #ifdef DEBUG it. 5440 */ 5441 SUMCHECK_SQ_PUTCOUNTS(sq, 0); 5442 #endif 5443 while ((flags & (SQ_EXCL|SQ_BLOCKED|SQ_FROZEN)) || 5444 (!(type & c_inner) && count != 0)) { 5445 sq->sq_flags = flags | SQ_WANTWAKEUP; 5446 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5447 count = sq->sq_count; 5448 flags = sq->sq_flags; 5449 } 5450 } 5451 5452 sq->sq_count++; 5453 ASSERT(sq->sq_count != 0); /* Wraparound */ 5454 if (!(type & c_inner)) { 5455 /* Exclusive entry */ 5456 ASSERT(sq->sq_count == 1); 5457 sq->sq_flags |= SQ_EXCL; 5458 if (type & c_outer) { 5459 SQ_PUTLOCKS_EXIT(sq); 5460 } 5461 } 5462 mutex_exit(SQLOCK(sq)); 5463 } 5464 5465 /* 5466 * leave a syncq. announce to framework that closes may proceed. 5467 * c_inner and c_outer specifies which concurrency bits 5468 * to check. 5469 * 5470 * must never be called from driver or module put entry point. 5471 * 5472 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 5473 * sq_putlocks are used. 5474 */ 5475 void 5476 leavesq(syncq_t *sq, int entrypoint) 5477 { 5478 uint16_t flags; 5479 uint16_t type; 5480 uint_t c_outer = entrypoint & SQ_CO; 5481 #ifdef DEBUG 5482 uint_t c_inner = entrypoint & SQ_CI; 5483 #endif 5484 5485 /* 5486 * decrement ref count, drain the syncq if possible, and wake up 5487 * any waiting close. 5488 */ 5489 ASSERT(sq); 5490 ASSERT(c_inner && c_outer); 5491 mutex_enter(SQLOCK(sq)); 5492 flags = sq->sq_flags; 5493 type = sq->sq_type; 5494 if (flags & (SQ_QUEUED|SQ_WANTWAKEUP|SQ_WANTEXWAKEUP)) { 5495 5496 if (flags & SQ_WANTWAKEUP) { 5497 flags &= ~SQ_WANTWAKEUP; 5498 cv_broadcast(&sq->sq_wait); 5499 } 5500 if (flags & SQ_WANTEXWAKEUP) { 5501 flags &= ~SQ_WANTEXWAKEUP; 5502 cv_broadcast(&sq->sq_exitwait); 5503 } 5504 5505 if ((flags & SQ_QUEUED) && !(flags & SQ_STAYAWAY)) { 5506 /* 5507 * The syncq needs to be drained. "Exit" the syncq 5508 * before calling drain_syncq. 5509 */ 5510 ASSERT(sq->sq_count != 0); 5511 sq->sq_count--; 5512 ASSERT((flags & SQ_EXCL) || (type & c_inner)); 5513 sq->sq_flags = flags & ~SQ_EXCL; 5514 drain_syncq(sq); 5515 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 5516 /* Check if we need to exit the outer perimeter */ 5517 /* XXX will this ever be true? */ 5518 if (!(type & c_outer)) 5519 outer_exit(sq->sq_outer); 5520 return; 5521 } 5522 } 5523 ASSERT(sq->sq_count != 0); 5524 sq->sq_count--; 5525 ASSERT((flags & SQ_EXCL) || (type & c_inner)); 5526 sq->sq_flags = flags & ~SQ_EXCL; 5527 mutex_exit(SQLOCK(sq)); 5528 5529 /* Check if we need to exit the outer perimeter */ 5530 if (!(sq->sq_type & c_outer)) 5531 outer_exit(sq->sq_outer); 5532 } 5533 5534 /* 5535 * Prevent q_next from changing in this stream by incrementing sq_count. 5536 * 5537 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 5538 * sq_putlocks are used. 5539 */ 5540 void 5541 claimq(queue_t *qp) 5542 { 5543 syncq_t *sq = qp->q_syncq; 5544 5545 mutex_enter(SQLOCK(sq)); 5546 sq->sq_count++; 5547 ASSERT(sq->sq_count != 0); /* Wraparound */ 5548 mutex_exit(SQLOCK(sq)); 5549 } 5550 5551 /* 5552 * Undo claimq. 5553 * 5554 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 5555 * sq_putlocks are used. 5556 */ 5557 void 5558 releaseq(queue_t *qp) 5559 { 5560 syncq_t *sq = qp->q_syncq; 5561 uint16_t flags; 5562 5563 mutex_enter(SQLOCK(sq)); 5564 ASSERT(sq->sq_count > 0); 5565 sq->sq_count--; 5566 5567 flags = sq->sq_flags; 5568 if (flags & (SQ_WANTWAKEUP|SQ_QUEUED)) { 5569 if (flags & SQ_WANTWAKEUP) { 5570 flags &= ~SQ_WANTWAKEUP; 5571 cv_broadcast(&sq->sq_wait); 5572 } 5573 sq->sq_flags = flags; 5574 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5575 /* 5576 * To prevent potential recursive invocation of 5577 * drain_syncq we do not call drain_syncq if count is 5578 * non-zero. 5579 */ 5580 if (sq->sq_count == 0) { 5581 drain_syncq(sq); 5582 return; 5583 } else 5584 sqenable(sq); 5585 } 5586 } 5587 mutex_exit(SQLOCK(sq)); 5588 } 5589 5590 /* 5591 * Prevent q_next from changing in this stream by incrementing sd_refcnt. 5592 */ 5593 void 5594 claimstr(queue_t *qp) 5595 { 5596 struct stdata *stp = STREAM(qp); 5597 5598 mutex_enter(&stp->sd_reflock); 5599 stp->sd_refcnt++; 5600 ASSERT(stp->sd_refcnt != 0); /* Wraparound */ 5601 mutex_exit(&stp->sd_reflock); 5602 } 5603 5604 /* 5605 * Undo claimstr. 5606 */ 5607 void 5608 releasestr(queue_t *qp) 5609 { 5610 struct stdata *stp = STREAM(qp); 5611 5612 mutex_enter(&stp->sd_reflock); 5613 ASSERT(stp->sd_refcnt != 0); 5614 if (--stp->sd_refcnt == 0) 5615 cv_broadcast(&stp->sd_refmonitor); 5616 mutex_exit(&stp->sd_reflock); 5617 } 5618 5619 static syncq_t * 5620 new_syncq(void) 5621 { 5622 return (kmem_cache_alloc(syncq_cache, KM_SLEEP)); 5623 } 5624 5625 static void 5626 free_syncq(syncq_t *sq) 5627 { 5628 ASSERT(sq->sq_head == NULL); 5629 ASSERT(sq->sq_outer == NULL); 5630 ASSERT(sq->sq_callbpend == NULL); 5631 ASSERT((sq->sq_onext == NULL && sq->sq_oprev == NULL) || 5632 (sq->sq_onext == sq && sq->sq_oprev == sq)); 5633 5634 if (sq->sq_ciputctrl != NULL) { 5635 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1); 5636 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl, 5637 sq->sq_nciputctrl, 0); 5638 ASSERT(ciputctrl_cache != NULL); 5639 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl); 5640 } 5641 5642 sq->sq_tail = NULL; 5643 sq->sq_evhead = NULL; 5644 sq->sq_evtail = NULL; 5645 sq->sq_ciputctrl = NULL; 5646 sq->sq_nciputctrl = 0; 5647 sq->sq_count = 0; 5648 sq->sq_rmqcount = 0; 5649 sq->sq_callbflags = 0; 5650 sq->sq_cancelid = 0; 5651 sq->sq_next = NULL; 5652 sq->sq_needexcl = 0; 5653 sq->sq_svcflags = 0; 5654 sq->sq_nqueues = 0; 5655 sq->sq_pri = 0; 5656 sq->sq_onext = NULL; 5657 sq->sq_oprev = NULL; 5658 sq->sq_flags = 0; 5659 sq->sq_type = 0; 5660 sq->sq_servcount = 0; 5661 5662 kmem_cache_free(syncq_cache, sq); 5663 } 5664 5665 /* Outer perimeter code */ 5666 5667 /* 5668 * The outer syncq uses the fields and flags in the syncq slightly 5669 * differently from the inner syncqs. 5670 * sq_count Incremented when there are pending or running 5671 * writers at the outer perimeter to prevent the set of 5672 * inner syncqs that belong to the outer perimeter from 5673 * changing. 5674 * sq_head/tail List of deferred qwriter(OUTER) operations. 5675 * 5676 * SQ_BLOCKED Set to prevent traversing of sq_next,sq_prev while 5677 * inner syncqs are added to or removed from the 5678 * outer perimeter. 5679 * SQ_QUEUED sq_head/tail has messages or eventsqueued. 5680 * 5681 * SQ_WRITER A thread is currently traversing all the inner syncqs 5682 * setting the SQ_WRITER flag. 5683 */ 5684 5685 /* 5686 * Get write access at the outer perimeter. 5687 * Note that read access is done by entersq, putnext, and put by simply 5688 * incrementing sq_count in the inner syncq. 5689 * 5690 * Waits until "flags" is no longer set in the outer to prevent multiple 5691 * threads from having write access at the same time. SQ_WRITER has to be part 5692 * of "flags". 5693 * 5694 * Increases sq_count on the outer syncq to keep away outer_insert/remove 5695 * until the outer_exit is finished. 5696 * 5697 * outer_enter is vulnerable to starvation since it does not prevent new 5698 * threads from entering the inner syncqs while it is waiting for sq_count to 5699 * go to zero. 5700 */ 5701 void 5702 outer_enter(syncq_t *outer, uint16_t flags) 5703 { 5704 syncq_t *sq; 5705 int wait_needed; 5706 uint16_t count; 5707 5708 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5709 outer->sq_oprev != NULL); 5710 ASSERT(flags & SQ_WRITER); 5711 5712 retry: 5713 mutex_enter(SQLOCK(outer)); 5714 while (outer->sq_flags & flags) { 5715 outer->sq_flags |= SQ_WANTWAKEUP; 5716 cv_wait(&outer->sq_wait, SQLOCK(outer)); 5717 } 5718 5719 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5720 outer->sq_flags |= SQ_WRITER; 5721 outer->sq_count++; 5722 ASSERT(outer->sq_count != 0); /* wraparound */ 5723 wait_needed = 0; 5724 /* 5725 * Set SQ_WRITER on all the inner syncqs while holding 5726 * the SQLOCK on the outer syncq. This ensures that the changing 5727 * of SQ_WRITER is atomic under the outer SQLOCK. 5728 */ 5729 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 5730 mutex_enter(SQLOCK(sq)); 5731 count = sq->sq_count; 5732 SQ_PUTLOCKS_ENTER(sq); 5733 sq->sq_flags |= SQ_WRITER; 5734 SUM_SQ_PUTCOUNTS(sq, count); 5735 if (count != 0) 5736 wait_needed = 1; 5737 SQ_PUTLOCKS_EXIT(sq); 5738 mutex_exit(SQLOCK(sq)); 5739 } 5740 mutex_exit(SQLOCK(outer)); 5741 5742 /* 5743 * Get everybody out of the syncqs sequentially. 5744 * Note that we don't actually need to aqiure the PUTLOCKS, since 5745 * we have already cleared the fastbit, and set QWRITER. By 5746 * definition, the count can not increase since putnext will 5747 * take the slowlock path (and the purpose of aquiring the 5748 * putlocks was to make sure it didn't increase while we were 5749 * waiting). 5750 * 5751 * Note that we still aquire the PUTLOCKS to be safe. 5752 */ 5753 if (wait_needed) { 5754 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 5755 mutex_enter(SQLOCK(sq)); 5756 count = sq->sq_count; 5757 SQ_PUTLOCKS_ENTER(sq); 5758 SUM_SQ_PUTCOUNTS(sq, count); 5759 while (count != 0) { 5760 sq->sq_flags |= SQ_WANTWAKEUP; 5761 SQ_PUTLOCKS_EXIT(sq); 5762 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5763 count = sq->sq_count; 5764 SQ_PUTLOCKS_ENTER(sq); 5765 SUM_SQ_PUTCOUNTS(sq, count); 5766 } 5767 SQ_PUTLOCKS_EXIT(sq); 5768 mutex_exit(SQLOCK(sq)); 5769 } 5770 /* 5771 * Verify that none of the flags got set while we 5772 * were waiting for the sq_counts to drop. 5773 * If this happens we exit and retry entering the 5774 * outer perimeter. 5775 */ 5776 mutex_enter(SQLOCK(outer)); 5777 if (outer->sq_flags & (flags & ~SQ_WRITER)) { 5778 mutex_exit(SQLOCK(outer)); 5779 outer_exit(outer); 5780 goto retry; 5781 } 5782 mutex_exit(SQLOCK(outer)); 5783 } 5784 } 5785 5786 /* 5787 * Drop the write access at the outer perimeter. 5788 * Read access is dropped implicitly (by putnext, put, and leavesq) by 5789 * decrementing sq_count. 5790 */ 5791 void 5792 outer_exit(syncq_t *outer) 5793 { 5794 syncq_t *sq; 5795 int drain_needed; 5796 uint16_t flags; 5797 5798 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5799 outer->sq_oprev != NULL); 5800 ASSERT(MUTEX_NOT_HELD(SQLOCK(outer))); 5801 5802 /* 5803 * Atomically (from the perspective of threads calling become_writer) 5804 * drop the write access at the outer perimeter by holding 5805 * SQLOCK(outer) across all the dropsq calls and the resetting of 5806 * SQ_WRITER. 5807 * This defines a locking order between the outer perimeter 5808 * SQLOCK and the inner perimeter SQLOCKs. 5809 */ 5810 mutex_enter(SQLOCK(outer)); 5811 flags = outer->sq_flags; 5812 ASSERT(outer->sq_flags & SQ_WRITER); 5813 if (flags & SQ_QUEUED) { 5814 write_now(outer); 5815 flags = outer->sq_flags; 5816 } 5817 5818 /* 5819 * sq_onext is stable since sq_count has not yet been decreased. 5820 * Reset the SQ_WRITER flags in all syncqs. 5821 * After dropping SQ_WRITER on the outer syncq we empty all the 5822 * inner syncqs. 5823 */ 5824 drain_needed = 0; 5825 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) 5826 drain_needed += dropsq(sq, SQ_WRITER); 5827 ASSERT(!(outer->sq_flags & SQ_QUEUED)); 5828 flags &= ~SQ_WRITER; 5829 if (drain_needed) { 5830 outer->sq_flags = flags; 5831 mutex_exit(SQLOCK(outer)); 5832 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) 5833 emptysq(sq); 5834 mutex_enter(SQLOCK(outer)); 5835 flags = outer->sq_flags; 5836 } 5837 if (flags & SQ_WANTWAKEUP) { 5838 flags &= ~SQ_WANTWAKEUP; 5839 cv_broadcast(&outer->sq_wait); 5840 } 5841 outer->sq_flags = flags; 5842 ASSERT(outer->sq_count > 0); 5843 outer->sq_count--; 5844 mutex_exit(SQLOCK(outer)); 5845 } 5846 5847 /* 5848 * Add another syncq to an outer perimeter. 5849 * Block out all other access to the outer perimeter while it is being 5850 * changed using blocksq. 5851 * Assumes that the caller has *not* done an outer_enter. 5852 * 5853 * Vulnerable to starvation in blocksq. 5854 */ 5855 static void 5856 outer_insert(syncq_t *outer, syncq_t *sq) 5857 { 5858 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5859 outer->sq_oprev != NULL); 5860 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 5861 sq->sq_oprev == NULL); /* Can't be in an outer perimeter */ 5862 5863 /* Get exclusive access to the outer perimeter list */ 5864 blocksq(outer, SQ_BLOCKED, 0); 5865 ASSERT(outer->sq_flags & SQ_BLOCKED); 5866 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5867 5868 mutex_enter(SQLOCK(sq)); 5869 sq->sq_outer = outer; 5870 outer->sq_onext->sq_oprev = sq; 5871 sq->sq_onext = outer->sq_onext; 5872 outer->sq_onext = sq; 5873 sq->sq_oprev = outer; 5874 mutex_exit(SQLOCK(sq)); 5875 unblocksq(outer, SQ_BLOCKED, 1); 5876 } 5877 5878 /* 5879 * Remove a syncq from an outer perimeter. 5880 * Block out all other access to the outer perimeter while it is being 5881 * changed using blocksq. 5882 * Assumes that the caller has *not* done an outer_enter. 5883 * 5884 * Vulnerable to starvation in blocksq. 5885 */ 5886 static void 5887 outer_remove(syncq_t *outer, syncq_t *sq) 5888 { 5889 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5890 outer->sq_oprev != NULL); 5891 ASSERT(sq->sq_outer == outer); 5892 5893 /* Get exclusive access to the outer perimeter list */ 5894 blocksq(outer, SQ_BLOCKED, 0); 5895 ASSERT(outer->sq_flags & SQ_BLOCKED); 5896 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5897 5898 mutex_enter(SQLOCK(sq)); 5899 sq->sq_outer = NULL; 5900 sq->sq_onext->sq_oprev = sq->sq_oprev; 5901 sq->sq_oprev->sq_onext = sq->sq_onext; 5902 sq->sq_oprev = sq->sq_onext = NULL; 5903 mutex_exit(SQLOCK(sq)); 5904 unblocksq(outer, SQ_BLOCKED, 1); 5905 } 5906 5907 /* 5908 * Queue a deferred qwriter(OUTER) callback for this outer perimeter. 5909 * If this is the first callback for this outer perimeter then add 5910 * this outer perimeter to the list of outer perimeters that 5911 * the qwriter_outer_thread will process. 5912 * 5913 * Increments sq_count in the outer syncq to prevent the membership 5914 * of the outer perimeter (in terms of inner syncqs) to change while 5915 * the callback is pending. 5916 */ 5917 static void 5918 queue_writer(syncq_t *outer, void (*func)(), queue_t *q, mblk_t *mp) 5919 { 5920 ASSERT(MUTEX_HELD(SQLOCK(outer))); 5921 5922 mp->b_prev = (mblk_t *)func; 5923 mp->b_queue = q; 5924 mp->b_next = NULL; 5925 outer->sq_count++; /* Decremented when dequeued */ 5926 ASSERT(outer->sq_count != 0); /* Wraparound */ 5927 if (outer->sq_evhead == NULL) { 5928 /* First message. */ 5929 outer->sq_evhead = outer->sq_evtail = mp; 5930 outer->sq_flags |= SQ_EVENTS; 5931 mutex_exit(SQLOCK(outer)); 5932 STRSTAT(qwr_outer); 5933 (void) taskq_dispatch(streams_taskq, 5934 (task_func_t *)qwriter_outer_service, outer, TQ_SLEEP); 5935 } else { 5936 ASSERT(outer->sq_flags & SQ_EVENTS); 5937 outer->sq_evtail->b_next = mp; 5938 outer->sq_evtail = mp; 5939 mutex_exit(SQLOCK(outer)); 5940 } 5941 } 5942 5943 /* 5944 * Try and upgrade to write access at the outer perimeter. If this can 5945 * not be done without blocking then queue the callback to be done 5946 * by the qwriter_outer_thread. 5947 * 5948 * This routine can only be called from put or service procedures plus 5949 * asynchronous callback routines that have properly entered to 5950 * queue (with entersq.) Thus qwriter(OUTER) assumes the caller has one claim 5951 * on the syncq associated with q. 5952 */ 5953 void 5954 qwriter_outer(queue_t *q, mblk_t *mp, void (*func)()) 5955 { 5956 syncq_t *osq, *sq, *outer; 5957 int failed; 5958 uint16_t flags; 5959 5960 osq = q->q_syncq; 5961 outer = osq->sq_outer; 5962 if (outer == NULL) 5963 panic("qwriter(PERIM_OUTER): no outer perimeter"); 5964 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5965 outer->sq_oprev != NULL); 5966 5967 mutex_enter(SQLOCK(outer)); 5968 flags = outer->sq_flags; 5969 /* 5970 * If some thread is traversing sq_next, or if we are blocked by 5971 * outer_insert or outer_remove, or if the we already have queued 5972 * callbacks, then queue this callback for later processing. 5973 * 5974 * Also queue the qwriter for an interrupt thread in order 5975 * to reduce the time spent running at high IPL. 5976 * to identify there are events. 5977 */ 5978 if ((flags & SQ_GOAWAY) || (curthread->t_pri >= kpreemptpri)) { 5979 /* 5980 * Queue the become_writer request. 5981 * The queueing is atomic under SQLOCK(outer) in order 5982 * to synchronize with outer_exit. 5983 * queue_writer will drop the outer SQLOCK 5984 */ 5985 if (flags & SQ_BLOCKED) { 5986 /* Must set SQ_WRITER on inner perimeter */ 5987 mutex_enter(SQLOCK(osq)); 5988 osq->sq_flags |= SQ_WRITER; 5989 mutex_exit(SQLOCK(osq)); 5990 } else { 5991 if (!(flags & SQ_WRITER)) { 5992 /* 5993 * The outer could have been SQ_BLOCKED thus 5994 * SQ_WRITER might not be set on the inner. 5995 */ 5996 mutex_enter(SQLOCK(osq)); 5997 osq->sq_flags |= SQ_WRITER; 5998 mutex_exit(SQLOCK(osq)); 5999 } 6000 ASSERT(osq->sq_flags & SQ_WRITER); 6001 } 6002 queue_writer(outer, func, q, mp); 6003 return; 6004 } 6005 /* 6006 * We are half-way to exclusive access to the outer perimeter. 6007 * Prevent any outer_enter, qwriter(OUTER), or outer_insert/remove 6008 * while the inner syncqs are traversed. 6009 */ 6010 outer->sq_count++; 6011 ASSERT(outer->sq_count != 0); /* wraparound */ 6012 flags |= SQ_WRITER; 6013 /* 6014 * Check if we can run the function immediately. Mark all 6015 * syncqs with the writer flag to prevent new entries into 6016 * put and service procedures. 6017 * 6018 * Set SQ_WRITER on all the inner syncqs while holding 6019 * the SQLOCK on the outer syncq. This ensures that the changing 6020 * of SQ_WRITER is atomic under the outer SQLOCK. 6021 */ 6022 failed = 0; 6023 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 6024 uint16_t count; 6025 uint_t maxcnt = (sq == osq) ? 1 : 0; 6026 6027 mutex_enter(SQLOCK(sq)); 6028 count = sq->sq_count; 6029 SQ_PUTLOCKS_ENTER(sq); 6030 SUM_SQ_PUTCOUNTS(sq, count); 6031 if (sq->sq_count > maxcnt) 6032 failed = 1; 6033 sq->sq_flags |= SQ_WRITER; 6034 SQ_PUTLOCKS_EXIT(sq); 6035 mutex_exit(SQLOCK(sq)); 6036 } 6037 if (failed) { 6038 /* 6039 * Some other thread has a read claim on the outer perimeter. 6040 * Queue the callback for deferred processing. 6041 * 6042 * queue_writer will set SQ_QUEUED before we drop SQ_WRITER 6043 * so that other qwriter(OUTER) calls will queue their 6044 * callbacks as well. queue_writer increments sq_count so we 6045 * decrement to compensate for the our increment. 6046 * 6047 * Dropping SQ_WRITER enables the writer thread to work 6048 * on this outer perimeter. 6049 */ 6050 outer->sq_flags = flags; 6051 queue_writer(outer, func, q, mp); 6052 /* queue_writer dropper the lock */ 6053 mutex_enter(SQLOCK(outer)); 6054 ASSERT(outer->sq_count > 0); 6055 outer->sq_count--; 6056 ASSERT(outer->sq_flags & SQ_WRITER); 6057 flags = outer->sq_flags; 6058 flags &= ~SQ_WRITER; 6059 if (flags & SQ_WANTWAKEUP) { 6060 flags &= ~SQ_WANTWAKEUP; 6061 cv_broadcast(&outer->sq_wait); 6062 } 6063 outer->sq_flags = flags; 6064 mutex_exit(SQLOCK(outer)); 6065 return; 6066 } else { 6067 outer->sq_flags = flags; 6068 mutex_exit(SQLOCK(outer)); 6069 } 6070 6071 /* Can run it immediately */ 6072 (*func)(q, mp); 6073 6074 outer_exit(outer); 6075 } 6076 6077 /* 6078 * Dequeue all writer callbacks from the outer perimeter and run them. 6079 */ 6080 static void 6081 write_now(syncq_t *outer) 6082 { 6083 mblk_t *mp; 6084 queue_t *q; 6085 void (*func)(); 6086 6087 ASSERT(MUTEX_HELD(SQLOCK(outer))); 6088 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 6089 outer->sq_oprev != NULL); 6090 while ((mp = outer->sq_evhead) != NULL) { 6091 /* 6092 * queues cannot be placed on the queuelist on the outer 6093 * perimiter. 6094 */ 6095 ASSERT(!(outer->sq_flags & SQ_MESSAGES)); 6096 ASSERT((outer->sq_flags & SQ_EVENTS)); 6097 6098 outer->sq_evhead = mp->b_next; 6099 if (outer->sq_evhead == NULL) { 6100 outer->sq_evtail = NULL; 6101 outer->sq_flags &= ~SQ_EVENTS; 6102 } 6103 ASSERT(outer->sq_count != 0); 6104 outer->sq_count--; /* Incremented when enqueued. */ 6105 mutex_exit(SQLOCK(outer)); 6106 /* 6107 * Drop the message if the queue is closing. 6108 * Make sure that the queue is "claimed" when the callback 6109 * is run in order to satisfy various ASSERTs. 6110 */ 6111 q = mp->b_queue; 6112 func = (void (*)())mp->b_prev; 6113 ASSERT(func != NULL); 6114 mp->b_next = mp->b_prev = NULL; 6115 if (q->q_flag & QWCLOSE) { 6116 freemsg(mp); 6117 } else { 6118 claimq(q); 6119 (*func)(q, mp); 6120 releaseq(q); 6121 } 6122 mutex_enter(SQLOCK(outer)); 6123 } 6124 ASSERT(MUTEX_HELD(SQLOCK(outer))); 6125 } 6126 6127 /* 6128 * The list of messages on the inner syncq is effectively hashed 6129 * by destination queue. These destination queues are doubly 6130 * linked lists (hopefully) in priority order. Messages are then 6131 * put on the queue referenced by the q_sqhead/q_sqtail elements. 6132 * Additional messages are linked together by the b_next/b_prev 6133 * elements in the mblk, with (similar to putq()) the first message 6134 * having a NULL b_prev and the last message having a NULL b_next. 6135 * 6136 * Events, such as qwriter callbacks, are put onto a list in FIFO 6137 * order referenced by sq_evhead, and sq_evtail. This is a singly 6138 * linked list, and messages here MUST be processed in the order queued. 6139 */ 6140 6141 /* 6142 * Run the events on the syncq event list (sq_evhead). 6143 * Assumes there is only one claim on the syncq, it is 6144 * already exclusive (SQ_EXCL set), and the SQLOCK held. 6145 * Messages here are processed in order, with the SQ_EXCL bit 6146 * held all the way through till the last message is processed. 6147 */ 6148 void 6149 sq_run_events(syncq_t *sq) 6150 { 6151 mblk_t *bp; 6152 queue_t *qp; 6153 uint16_t flags = sq->sq_flags; 6154 void (*func)(); 6155 6156 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6157 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6158 sq->sq_oprev == NULL) || 6159 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6160 sq->sq_oprev != NULL)); 6161 6162 ASSERT(flags & SQ_EXCL); 6163 ASSERT(sq->sq_count == 1); 6164 6165 /* 6166 * We need to process all of the events on this list. It 6167 * is possible that new events will be added while we are 6168 * away processing a callback, so on every loop, we start 6169 * back at the beginning of the list. 6170 */ 6171 /* 6172 * We have to reaccess sq_evhead since there is a 6173 * possibility of a new entry while we were running 6174 * the callback. 6175 */ 6176 for (bp = sq->sq_evhead; bp != NULL; bp = sq->sq_evhead) { 6177 ASSERT(bp->b_queue->q_syncq == sq); 6178 ASSERT(sq->sq_flags & SQ_EVENTS); 6179 6180 qp = bp->b_queue; 6181 func = (void (*)())bp->b_prev; 6182 ASSERT(func != NULL); 6183 6184 /* 6185 * Messages from the event queue must be taken off in 6186 * FIFO order. 6187 */ 6188 ASSERT(sq->sq_evhead == bp); 6189 sq->sq_evhead = bp->b_next; 6190 6191 if (bp->b_next == NULL) { 6192 /* Deleting last */ 6193 ASSERT(sq->sq_evtail == bp); 6194 sq->sq_evtail = NULL; 6195 sq->sq_flags &= ~SQ_EVENTS; 6196 } 6197 bp->b_prev = bp->b_next = NULL; 6198 ASSERT(bp->b_datap->db_ref != 0); 6199 6200 mutex_exit(SQLOCK(sq)); 6201 6202 (*func)(qp, bp); 6203 6204 mutex_enter(SQLOCK(sq)); 6205 /* 6206 * re-read the flags, since they could have changed. 6207 */ 6208 flags = sq->sq_flags; 6209 ASSERT(flags & SQ_EXCL); 6210 } 6211 ASSERT(sq->sq_evhead == NULL && sq->sq_evtail == NULL); 6212 ASSERT(!(sq->sq_flags & SQ_EVENTS)); 6213 6214 if (flags & SQ_WANTWAKEUP) { 6215 flags &= ~SQ_WANTWAKEUP; 6216 cv_broadcast(&sq->sq_wait); 6217 } 6218 if (flags & SQ_WANTEXWAKEUP) { 6219 flags &= ~SQ_WANTEXWAKEUP; 6220 cv_broadcast(&sq->sq_exitwait); 6221 } 6222 sq->sq_flags = flags; 6223 } 6224 6225 /* 6226 * Put messages on the event list. 6227 * If we can go exclusive now, do so and process the event list, otherwise 6228 * let the last claim service this list (or wake the sqthread). 6229 * This procedure assumes SQLOCK is held. To run the event list, it 6230 * must be called with no claims. 6231 */ 6232 static void 6233 sqfill_events(syncq_t *sq, queue_t *q, mblk_t *mp, void (*func)()) 6234 { 6235 uint16_t count; 6236 6237 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6238 ASSERT(func != NULL); 6239 6240 /* 6241 * This is a callback. Add it to the list of callbacks 6242 * and see about upgrading. 6243 */ 6244 mp->b_prev = (mblk_t *)func; 6245 mp->b_queue = q; 6246 mp->b_next = NULL; 6247 if (sq->sq_evhead == NULL) { 6248 sq->sq_evhead = sq->sq_evtail = mp; 6249 sq->sq_flags |= SQ_EVENTS; 6250 } else { 6251 ASSERT(sq->sq_evtail != NULL); 6252 ASSERT(sq->sq_evtail->b_next == NULL); 6253 ASSERT(sq->sq_flags & SQ_EVENTS); 6254 sq->sq_evtail->b_next = mp; 6255 sq->sq_evtail = mp; 6256 } 6257 /* 6258 * We have set SQ_EVENTS, so threads will have to 6259 * unwind out of the perimiter, and new entries will 6260 * not grab a putlock. But we still need to know 6261 * how many threads have already made a claim to the 6262 * syncq, so grab the putlocks, and sum the counts. 6263 * If there are no claims on the syncq, we can upgrade 6264 * to exclusive, and run the event list. 6265 * NOTE: We hold the SQLOCK, so we can just grab the 6266 * putlocks. 6267 */ 6268 count = sq->sq_count; 6269 SQ_PUTLOCKS_ENTER(sq); 6270 SUM_SQ_PUTCOUNTS(sq, count); 6271 /* 6272 * We have no claim, so we need to check if there 6273 * are no others, then we can upgrade. 6274 */ 6275 /* 6276 * There are currently no claims on 6277 * the syncq by this thread (at least on this entry). The thread who has 6278 * the claim should drain syncq. 6279 */ 6280 if (count > 0) { 6281 /* 6282 * Can't upgrade - other threads inside. 6283 */ 6284 SQ_PUTLOCKS_EXIT(sq); 6285 mutex_exit(SQLOCK(sq)); 6286 return; 6287 } 6288 /* 6289 * Need to set SQ_EXCL and make a claim on the syncq. 6290 */ 6291 ASSERT((sq->sq_flags & SQ_EXCL) == 0); 6292 sq->sq_flags |= SQ_EXCL; 6293 ASSERT(sq->sq_count == 0); 6294 sq->sq_count++; 6295 SQ_PUTLOCKS_EXIT(sq); 6296 6297 /* Process the events list */ 6298 sq_run_events(sq); 6299 6300 /* 6301 * Release our claim... 6302 */ 6303 sq->sq_count--; 6304 6305 /* 6306 * And release SQ_EXCL. 6307 * We don't need to acquire the putlocks to release 6308 * SQ_EXCL, since we are exclusive, and hold the SQLOCK. 6309 */ 6310 sq->sq_flags &= ~SQ_EXCL; 6311 6312 /* 6313 * sq_run_events should have released SQ_EXCL 6314 */ 6315 ASSERT(!(sq->sq_flags & SQ_EXCL)); 6316 6317 /* 6318 * If anything happened while we were running the 6319 * events (or was there before), we need to process 6320 * them now. We shouldn't be exclusive sine we 6321 * released the perimiter above (plus, we asserted 6322 * for it). 6323 */ 6324 if (!(sq->sq_flags & SQ_STAYAWAY) && (sq->sq_flags & SQ_QUEUED)) 6325 drain_syncq(sq); 6326 else 6327 mutex_exit(SQLOCK(sq)); 6328 } 6329 6330 /* 6331 * Perform delayed processing. The caller has to make sure that it is safe 6332 * to enter the syncq (e.g. by checking that none of the SQ_STAYAWAY bits are 6333 * set.) 6334 * 6335 * Assume that the caller has NO claims on the syncq. However, a claim 6336 * on the syncq does not indicate that a thread is draining the syncq. 6337 * There may be more claims on the syncq than there are threads draining 6338 * (i.e. #_threads_draining <= sq_count) 6339 * 6340 * drain_syncq has to terminate when one of the SQ_STAYAWAY bits gets set 6341 * in order to preserve qwriter(OUTER) ordering constraints. 6342 * 6343 * sq_putcount only needs to be checked when dispatching the queued 6344 * writer call for CIPUT sync queue, but this is handled in sq_run_events. 6345 */ 6346 void 6347 drain_syncq(syncq_t *sq) 6348 { 6349 queue_t *qp; 6350 uint16_t count; 6351 uint16_t type = sq->sq_type; 6352 uint16_t flags = sq->sq_flags; 6353 boolean_t bg_service = sq->sq_svcflags & SQ_SERVICE; 6354 6355 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START, 6356 "drain_syncq start:%p", sq); 6357 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6358 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6359 sq->sq_oprev == NULL) || 6360 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6361 sq->sq_oprev != NULL)); 6362 6363 /* 6364 * Drop SQ_SERVICE flag. 6365 */ 6366 if (bg_service) 6367 sq->sq_svcflags &= ~SQ_SERVICE; 6368 6369 /* 6370 * If SQ_EXCL is set, someone else is processing this syncq - let him 6371 * finish the job. 6372 */ 6373 if (flags & SQ_EXCL) { 6374 if (bg_service) { 6375 ASSERT(sq->sq_servcount != 0); 6376 sq->sq_servcount--; 6377 } 6378 mutex_exit(SQLOCK(sq)); 6379 return; 6380 } 6381 6382 /* 6383 * This routine can be called by a background thread if 6384 * it was scheduled by a hi-priority thread. SO, if there are 6385 * NOT messages queued, return (remember, we have the SQLOCK, 6386 * and it cannot change until we release it). Wakeup any waiters also. 6387 */ 6388 if (!(flags & SQ_QUEUED)) { 6389 if (flags & SQ_WANTWAKEUP) { 6390 flags &= ~SQ_WANTWAKEUP; 6391 cv_broadcast(&sq->sq_wait); 6392 } 6393 if (flags & SQ_WANTEXWAKEUP) { 6394 flags &= ~SQ_WANTEXWAKEUP; 6395 cv_broadcast(&sq->sq_exitwait); 6396 } 6397 sq->sq_flags = flags; 6398 if (bg_service) { 6399 ASSERT(sq->sq_servcount != 0); 6400 sq->sq_servcount--; 6401 } 6402 mutex_exit(SQLOCK(sq)); 6403 return; 6404 } 6405 6406 /* 6407 * If this is not a concurrent put perimiter, we need to 6408 * become exclusive to drain. Also, if not CIPUT, we would 6409 * not have acquired a putlock, so we don't need to check 6410 * the putcounts. If not entering with a claim, we test 6411 * for sq_count == 0. 6412 */ 6413 type = sq->sq_type; 6414 if (!(type & SQ_CIPUT)) { 6415 if (sq->sq_count > 1) { 6416 if (bg_service) { 6417 ASSERT(sq->sq_servcount != 0); 6418 sq->sq_servcount--; 6419 } 6420 mutex_exit(SQLOCK(sq)); 6421 return; 6422 } 6423 sq->sq_flags |= SQ_EXCL; 6424 } 6425 6426 /* 6427 * This is where we make a claim to the syncq. 6428 * This can either be done by incrementing a putlock, or 6429 * the sq_count. But since we already have the SQLOCK 6430 * here, we just bump the sq_count. 6431 * 6432 * Note that after we make a claim, we need to let the code 6433 * fall through to the end of this routine to clean itself 6434 * up. A return in the while loop will put the syncq in a 6435 * very bad state. 6436 */ 6437 sq->sq_count++; 6438 ASSERT(sq->sq_count != 0); /* wraparound */ 6439 6440 while ((flags = sq->sq_flags) & SQ_QUEUED) { 6441 /* 6442 * If we are told to stayaway or went exclusive, 6443 * we are done. 6444 */ 6445 if (flags & (SQ_STAYAWAY)) { 6446 break; 6447 } 6448 6449 /* 6450 * If there are events to run, do so. 6451 * We have one claim to the syncq, so if there are 6452 * more than one, other threads are running. 6453 */ 6454 if (sq->sq_evhead != NULL) { 6455 ASSERT(sq->sq_flags & SQ_EVENTS); 6456 6457 count = sq->sq_count; 6458 SQ_PUTLOCKS_ENTER(sq); 6459 SUM_SQ_PUTCOUNTS(sq, count); 6460 if (count > 1) { 6461 SQ_PUTLOCKS_EXIT(sq); 6462 /* Can't upgrade - other threads inside */ 6463 break; 6464 } 6465 ASSERT((flags & SQ_EXCL) == 0); 6466 sq->sq_flags = flags | SQ_EXCL; 6467 SQ_PUTLOCKS_EXIT(sq); 6468 /* 6469 * we have the only claim, run the events, 6470 * sq_run_events will clear the SQ_EXCL flag. 6471 */ 6472 sq_run_events(sq); 6473 6474 /* 6475 * If this is a CIPUT perimiter, we need 6476 * to drop the SQ_EXCL flag so we can properly 6477 * continue draining the syncq. 6478 */ 6479 if (type & SQ_CIPUT) { 6480 ASSERT(sq->sq_flags & SQ_EXCL); 6481 sq->sq_flags &= ~SQ_EXCL; 6482 } 6483 6484 /* 6485 * And go back to the beginning just in case 6486 * anything changed while we were away. 6487 */ 6488 ASSERT((sq->sq_flags & SQ_EXCL) || (type & SQ_CIPUT)); 6489 continue; 6490 } 6491 6492 ASSERT(sq->sq_evhead == NULL); 6493 ASSERT(!(sq->sq_flags & SQ_EVENTS)); 6494 6495 /* 6496 * Find the queue that is not draining. 6497 * 6498 * q_draining is protected by QLOCK which we do not hold. 6499 * But if it was set, then a thread was draining, and if it gets 6500 * cleared, then it was because the thread has successfully 6501 * drained the syncq, or a GOAWAY state occured. For the GOAWAY 6502 * state to happen, a thread needs the SQLOCK which we hold, and 6503 * if there was such a flag, we whould have already seen it. 6504 */ 6505 6506 for (qp = sq->sq_head; 6507 qp != NULL && (qp->q_draining || 6508 (qp->q_sqflags & Q_SQDRAINING)); 6509 qp = qp->q_sqnext) 6510 ; 6511 6512 if (qp == NULL) 6513 break; 6514 6515 /* 6516 * We have a queue to work on, and we hold the 6517 * SQLOCK and one claim, call qdrain_syncq. 6518 * This means we need to release the SQLOCK and 6519 * aquire the QLOCK (OK since we have a claim). 6520 * Note that qdrain_syncq will actually dequeue 6521 * this queue from the sq_head list when it is 6522 * convinced all the work is done and release 6523 * the QLOCK before returning. 6524 */ 6525 qp->q_sqflags |= Q_SQDRAINING; 6526 mutex_exit(SQLOCK(sq)); 6527 mutex_enter(QLOCK(qp)); 6528 qdrain_syncq(sq, qp); 6529 mutex_enter(SQLOCK(sq)); 6530 6531 /* The queue is drained */ 6532 ASSERT(qp->q_sqflags & Q_SQDRAINING); 6533 qp->q_sqflags &= ~Q_SQDRAINING; 6534 /* 6535 * NOTE: After this point qp should not be used since it may be 6536 * closed. 6537 */ 6538 } 6539 6540 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6541 flags = sq->sq_flags; 6542 6543 /* 6544 * sq->sq_head cannot change because we hold the 6545 * sqlock. However, a thread CAN decide that it is no longer 6546 * going to drain that queue. However, this should be due to 6547 * a GOAWAY state, and we should see that here. 6548 * 6549 * This loop is not very efficient. One solution may be adding a second 6550 * pointer to the "draining" queue, but it is difficult to do when 6551 * queues are inserted in the middle due to priority ordering. Another 6552 * possibility is to yank the queue out of the sq list and put it onto 6553 * the "draining list" and then put it back if it can't be drained. 6554 */ 6555 6556 ASSERT((sq->sq_head == NULL) || (flags & SQ_GOAWAY) || 6557 (type & SQ_CI) || sq->sq_head->q_draining); 6558 6559 /* Drop SQ_EXCL for non-CIPUT perimiters */ 6560 if (!(type & SQ_CIPUT)) 6561 flags &= ~SQ_EXCL; 6562 ASSERT((flags & SQ_EXCL) == 0); 6563 6564 /* Wake up any waiters. */ 6565 if (flags & SQ_WANTWAKEUP) { 6566 flags &= ~SQ_WANTWAKEUP; 6567 cv_broadcast(&sq->sq_wait); 6568 } 6569 if (flags & SQ_WANTEXWAKEUP) { 6570 flags &= ~SQ_WANTEXWAKEUP; 6571 cv_broadcast(&sq->sq_exitwait); 6572 } 6573 sq->sq_flags = flags; 6574 6575 ASSERT(sq->sq_count != 0); 6576 /* Release our claim. */ 6577 sq->sq_count--; 6578 6579 if (bg_service) { 6580 ASSERT(sq->sq_servcount != 0); 6581 sq->sq_servcount--; 6582 } 6583 6584 mutex_exit(SQLOCK(sq)); 6585 6586 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END, 6587 "drain_syncq end:%p", sq); 6588 } 6589 6590 6591 /* 6592 * 6593 * qdrain_syncq can be called (currently) from only one of two places: 6594 * drain_syncq 6595 * putnext (or some variation of it). 6596 * and eventually 6597 * qwait(_sig) 6598 * 6599 * If called from drain_syncq, we found it in the list 6600 * of queue's needing service, so there is work to be done (or it 6601 * wouldn't be on the list). 6602 * 6603 * If called from some putnext variation, it was because the 6604 * perimiter is open, but messages are blocking a putnext and 6605 * there is not a thread working on it. Now a thread could start 6606 * working on it while we are getting ready to do so ourself, but 6607 * the thread would set the q_draining flag, and we can spin out. 6608 * 6609 * As for qwait(_sig), I think I shall let it continue to call 6610 * drain_syncq directly (after all, it will get here eventually). 6611 * 6612 * qdrain_syncq has to terminate when: 6613 * - one of the SQ_STAYAWAY bits gets set to preserve qwriter(OUTER) ordering 6614 * - SQ_EVENTS gets set to preserve qwriter(INNER) ordering 6615 * 6616 * ASSUMES: 6617 * One claim 6618 * QLOCK held 6619 * SQLOCK not held 6620 * Will release QLOCK before returning 6621 */ 6622 void 6623 qdrain_syncq(syncq_t *sq, queue_t *q) 6624 { 6625 mblk_t *bp; 6626 boolean_t do_clr; 6627 #ifdef DEBUG 6628 uint16_t count; 6629 #endif 6630 6631 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START, 6632 "drain_syncq start:%p", sq); 6633 ASSERT(q->q_syncq == sq); 6634 ASSERT(MUTEX_HELD(QLOCK(q))); 6635 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6636 /* 6637 * For non-CIPUT perimiters, we should be called with the 6638 * exclusive bit set already. For non-CIPUT perimiters we 6639 * will be doing a concurrent drain, so it better not be set. 6640 */ 6641 ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT))); 6642 ASSERT(!((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL))); 6643 ASSERT((sq->sq_type & SQ_CIPUT) || (sq->sq_flags & SQ_EXCL)); 6644 /* 6645 * All outer pointers are set, or none of them are 6646 */ 6647 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6648 sq->sq_oprev == NULL) || 6649 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6650 sq->sq_oprev != NULL)); 6651 #ifdef DEBUG 6652 count = sq->sq_count; 6653 /* 6654 * This is OK without the putlocks, because we have one 6655 * claim either from the sq_count, or a putcount. We could 6656 * get an erroneous value from other counts, but ours won't 6657 * change, so one way or another, we will have at least a 6658 * value of one. 6659 */ 6660 SUM_SQ_PUTCOUNTS(sq, count); 6661 ASSERT(count >= 1); 6662 #endif /* DEBUG */ 6663 6664 /* 6665 * The first thing to do here, is find out if a thread is already 6666 * draining this queue or the queue is closing. If so, we are done, 6667 * just return. Also, if there are no messages, we are done as well. 6668 * Note that we check the q_sqhead since there is s window of 6669 * opportunity for us to enter here because Q_SQQUEUED was set, but is 6670 * not anymore. 6671 */ 6672 if (q->q_draining || (q->q_sqhead == NULL)) { 6673 mutex_exit(QLOCK(q)); 6674 return; 6675 } 6676 6677 /* 6678 * If the perimiter is exclusive, there is nothing we can 6679 * do right now, go away. 6680 * Note that there is nothing to prevent this case from changing 6681 * right after this check, but the spin-out will catch it. 6682 */ 6683 6684 /* Tell other threads that we are draining this queue */ 6685 q->q_draining = 1; /* Protected by QLOCK */ 6686 6687 for (bp = q->q_sqhead; bp != NULL; bp = q->q_sqhead) { 6688 6689 /* 6690 * Because we can enter this routine just because 6691 * a putnext is blocked, we need to spin out if 6692 * the perimiter wants to go exclusive as well 6693 * as just blocked. We need to spin out also if 6694 * events are queued on the syncq. 6695 * Don't check for SQ_EXCL, because non-CIPUT 6696 * perimiters would set it, and it can't become 6697 * exclusive while we hold a claim. 6698 */ 6699 if (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS)) { 6700 break; 6701 } 6702 6703 #ifdef DEBUG 6704 /* 6705 * Since we are in qdrain_syncq, we already know the queue, 6706 * but for sanity, we want to check this against the qp that 6707 * was passed in by bp->b_queue. 6708 */ 6709 6710 ASSERT(bp->b_queue == q); 6711 ASSERT(bp->b_queue->q_syncq == sq); 6712 bp->b_queue = NULL; 6713 6714 /* 6715 * We would have the following check in the DEBUG code: 6716 * 6717 * if (bp->b_prev != NULL) { 6718 * ASSERT(bp->b_prev == (void (*)())q->q_qinfo->qi_putp); 6719 * } 6720 * 6721 * This can't be done, however, since IP modifies qinfo 6722 * structure at run-time (switching between IPv4 qinfo and IPv6 6723 * qinfo), invalidating the check. 6724 * So the assignment to func is left here, but the ASSERT itself 6725 * is removed until the whole issue is resolved. 6726 */ 6727 #endif 6728 ASSERT(q->q_sqhead == bp); 6729 q->q_sqhead = bp->b_next; 6730 bp->b_prev = bp->b_next = NULL; 6731 ASSERT(q->q_syncqmsgs > 0); 6732 mutex_exit(QLOCK(q)); 6733 6734 ASSERT(bp->b_datap->db_ref != 0); 6735 6736 (void) (*q->q_qinfo->qi_putp)(q, bp); 6737 6738 mutex_enter(QLOCK(q)); 6739 /* 6740 * We should decrement q_syncqmsgs only after executing the 6741 * put procedure to avoid a possible race with putnext(). 6742 * In putnext() though it sees Q_SQQUEUED is set, there is 6743 * an optimization which allows putnext to call the put 6744 * procedure directly if (q_syncqmsgs == 0) and thus 6745 * a message reodering could otherwise occur. 6746 */ 6747 q->q_syncqmsgs--; 6748 6749 /* 6750 * Clear QFULL in the next service procedure queue if 6751 * this is the last message destined to that queue. 6752 * 6753 * It would make better sense to have some sort of 6754 * tunable for the low water mark, but these symantics 6755 * are not yet defined. So, alas, we use a constant. 6756 */ 6757 do_clr = (q->q_syncqmsgs == 0); 6758 mutex_exit(QLOCK(q)); 6759 6760 if (do_clr) 6761 clr_qfull(q); 6762 6763 mutex_enter(QLOCK(q)); 6764 /* 6765 * Always clear SQ_EXCL when CIPUT in order to handle 6766 * qwriter(INNER). 6767 */ 6768 /* 6769 * The putp() can call qwriter and get exclusive access 6770 * IFF this is the only claim. So, we need to test for 6771 * this possibility so we can aquire the mutex and clear 6772 * the bit. 6773 */ 6774 if ((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL)) { 6775 mutex_enter(SQLOCK(sq)); 6776 sq->sq_flags &= ~SQ_EXCL; 6777 mutex_exit(SQLOCK(sq)); 6778 } 6779 } 6780 6781 /* 6782 * We should either have no queues on the syncq, or we were 6783 * told to goaway by a waiter (which we will wake up at the 6784 * end of this function). 6785 */ 6786 ASSERT((q->q_sqhead == NULL) || 6787 (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS))); 6788 6789 ASSERT(MUTEX_HELD(QLOCK(q))); 6790 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6791 6792 /* 6793 * Remove the q from the syncq list if all the messages are 6794 * drained. 6795 */ 6796 if (q->q_sqhead == NULL) { 6797 mutex_enter(SQLOCK(sq)); 6798 if (q->q_sqflags & Q_SQQUEUED) 6799 SQRM_Q(sq, q); 6800 mutex_exit(SQLOCK(sq)); 6801 /* 6802 * Since the queue is removed from the list, reset its priority. 6803 */ 6804 q->q_spri = 0; 6805 } 6806 6807 /* 6808 * Remember, the q_draining flag is used to let another 6809 * thread know that there is a thread currently draining 6810 * the messages for a queue. Since we are now done with 6811 * this queue (even if there may be messages still there), 6812 * we need to clear this flag so some thread will work 6813 * on it if needed. 6814 */ 6815 ASSERT(q->q_draining); 6816 q->q_draining = 0; 6817 6818 /* called with a claim, so OK to drop all locks. */ 6819 mutex_exit(QLOCK(q)); 6820 6821 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END, 6822 "drain_syncq end:%p", sq); 6823 } 6824 /* END OF QDRAIN_SYNCQ */ 6825 6826 6827 /* 6828 * This is the mate to qdrain_syncq, except that it is putting the 6829 * message onto the the queue instead draining. Since the 6830 * message is destined for the queue that is selected, there is 6831 * no need to identify the function because the message is 6832 * intended for the put routine for the queue. But this 6833 * routine will do it anyway just in case (but only for debug kernels). 6834 * 6835 * After the message is enqueued on the syncq, it calls putnext_tail() 6836 * which will schedule a background thread to actually process the message. 6837 * 6838 * Assumes that there is a claim on the syncq (sq->sq_count > 0) and 6839 * SQLOCK(sq) and QLOCK(q) are not held. 6840 */ 6841 void 6842 qfill_syncq(syncq_t *sq, queue_t *q, mblk_t *mp) 6843 { 6844 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6845 ASSERT(MUTEX_NOT_HELD(QLOCK(q))); 6846 ASSERT(sq->sq_count > 0); 6847 ASSERT(q->q_syncq == sq); 6848 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6849 sq->sq_oprev == NULL) || 6850 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6851 sq->sq_oprev != NULL)); 6852 6853 mutex_enter(QLOCK(q)); 6854 6855 #ifdef DEBUG 6856 /* 6857 * This is used for debug in the qfill_syncq/qdrain_syncq case 6858 * to trace the queue that the message is intended for. Note 6859 * that the original use was to identify the queue and function 6860 * to call on the drain. In the new syncq, we have the context 6861 * of the queue that we are draining, so call it's putproc and 6862 * don't rely on the saved values. But for debug this is still 6863 * usefull information. 6864 */ 6865 mp->b_prev = (mblk_t *)q->q_qinfo->qi_putp; 6866 mp->b_queue = q; 6867 mp->b_next = NULL; 6868 #endif 6869 ASSERT(q->q_syncq == sq); 6870 /* 6871 * Enqueue the message on the list. 6872 * SQPUT_MP() accesses q_syncqmsgs. We are already holding QLOCK to 6873 * protect it. So its ok to acquire SQLOCK after SQPUT_MP(). 6874 */ 6875 SQPUT_MP(q, mp); 6876 mutex_enter(SQLOCK(sq)); 6877 6878 /* 6879 * And queue on syncq for scheduling, if not already queued. 6880 * Note that we need the SQLOCK for this, and for testing flags 6881 * at the end to see if we will drain. So grab it now, and 6882 * release it before we call qdrain_syncq or return. 6883 */ 6884 if (!(q->q_sqflags & Q_SQQUEUED)) { 6885 q->q_spri = curthread->t_pri; 6886 SQPUT_Q(sq, q); 6887 } 6888 #ifdef DEBUG 6889 else { 6890 /* 6891 * All of these conditions MUST be true! 6892 */ 6893 ASSERT(sq->sq_tail != NULL); 6894 if (sq->sq_tail == sq->sq_head) { 6895 ASSERT((q->q_sqprev == NULL) && 6896 (q->q_sqnext == NULL)); 6897 } else { 6898 ASSERT((q->q_sqprev != NULL) || 6899 (q->q_sqnext != NULL)); 6900 } 6901 ASSERT(sq->sq_flags & SQ_QUEUED); 6902 ASSERT(q->q_syncqmsgs != 0); 6903 ASSERT(q->q_sqflags & Q_SQQUEUED); 6904 } 6905 #endif 6906 mutex_exit(QLOCK(q)); 6907 /* 6908 * SQLOCK is still held, so sq_count can be safely decremented. 6909 */ 6910 sq->sq_count--; 6911 6912 putnext_tail(sq, q, 0); 6913 /* Should not reference sq or q after this point. */ 6914 } 6915 6916 /* End of qfill_syncq */ 6917 6918 /* 6919 * Remove all messages from a syncq (if qp is NULL) or remove all messages 6920 * that would be put into qp by drain_syncq. 6921 * Used when deleting the syncq (qp == NULL) or when detaching 6922 * a queue (qp != NULL). 6923 * Return non-zero if one or more messages were freed. 6924 * 6925 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 6926 * sq_putlocks are used. 6927 * 6928 * NOTE: This function assumes that it is called from the close() context and 6929 * that all the queues in the syncq are going aay. For this reason it doesn't 6930 * acquire QLOCK for modifying q_sqhead/q_sqtail fields. This assumption is 6931 * currently valid, but it is useful to rethink this function to behave properly 6932 * in other cases. 6933 */ 6934 int 6935 flush_syncq(syncq_t *sq, queue_t *qp) 6936 { 6937 mblk_t *bp, *mp_head, *mp_next, *mp_prev; 6938 queue_t *q; 6939 int ret = 0; 6940 6941 mutex_enter(SQLOCK(sq)); 6942 6943 /* 6944 * Before we leave, we need to make sure there are no 6945 * events listed for this queue. All events for this queue 6946 * will just be freed. 6947 */ 6948 if (qp != NULL && sq->sq_evhead != NULL) { 6949 ASSERT(sq->sq_flags & SQ_EVENTS); 6950 6951 mp_prev = NULL; 6952 for (bp = sq->sq_evhead; bp != NULL; bp = mp_next) { 6953 mp_next = bp->b_next; 6954 if (bp->b_queue == qp) { 6955 /* Delete this message */ 6956 if (mp_prev != NULL) { 6957 mp_prev->b_next = mp_next; 6958 /* 6959 * Update sq_evtail if the last element 6960 * is removed. 6961 */ 6962 if (bp == sq->sq_evtail) { 6963 ASSERT(mp_next == NULL); 6964 sq->sq_evtail = mp_prev; 6965 } 6966 } else 6967 sq->sq_evhead = mp_next; 6968 if (sq->sq_evhead == NULL) 6969 sq->sq_flags &= ~SQ_EVENTS; 6970 bp->b_prev = bp->b_next = NULL; 6971 freemsg(bp); 6972 ret++; 6973 } else { 6974 mp_prev = bp; 6975 } 6976 } 6977 } 6978 6979 /* 6980 * Walk sq_head and: 6981 * - match qp if qp is set, remove it's messages 6982 * - all if qp is not set 6983 */ 6984 q = sq->sq_head; 6985 while (q != NULL) { 6986 ASSERT(q->q_syncq == sq); 6987 if ((qp == NULL) || (qp == q)) { 6988 /* 6989 * Yank the messages as a list off the queue 6990 */ 6991 mp_head = q->q_sqhead; 6992 /* 6993 * We do not have QLOCK(q) here (which is safe due to 6994 * assumptions mentioned above). To obtain the lock we 6995 * need to release SQLOCK which may allow lots of things 6996 * to change upon us. This place requires more analysis. 6997 */ 6998 q->q_sqhead = q->q_sqtail = NULL; 6999 ASSERT(mp_head->b_queue && 7000 mp_head->b_queue->q_syncq == sq); 7001 7002 /* 7003 * Free each of the messages. 7004 */ 7005 for (bp = mp_head; bp != NULL; bp = mp_next) { 7006 mp_next = bp->b_next; 7007 bp->b_prev = bp->b_next = NULL; 7008 freemsg(bp); 7009 ret++; 7010 } 7011 /* 7012 * Now remove the queue from the syncq. 7013 */ 7014 ASSERT(q->q_sqflags & Q_SQQUEUED); 7015 SQRM_Q(sq, q); 7016 q->q_spri = 0; 7017 q->q_syncqmsgs = 0; 7018 7019 /* 7020 * If qp was specified, we are done with it and are 7021 * going to drop SQLOCK(sq) and return. We wakeup syncq 7022 * waiters while we still have the SQLOCK. 7023 */ 7024 if ((qp != NULL) && (sq->sq_flags & SQ_WANTWAKEUP)) { 7025 sq->sq_flags &= ~SQ_WANTWAKEUP; 7026 cv_broadcast(&sq->sq_wait); 7027 } 7028 /* Drop SQLOCK across clr_qfull */ 7029 mutex_exit(SQLOCK(sq)); 7030 7031 /* 7032 * We avoid doing the test that drain_syncq does and 7033 * unconditionally clear qfull for every flushed 7034 * message. Since flush_syncq is only called during 7035 * close this should not be a problem. 7036 */ 7037 clr_qfull(q); 7038 if (qp != NULL) { 7039 return (ret); 7040 } else { 7041 mutex_enter(SQLOCK(sq)); 7042 /* 7043 * The head was removed by SQRM_Q above. 7044 * reread the new head and flush it. 7045 */ 7046 q = sq->sq_head; 7047 } 7048 } else { 7049 q = q->q_sqnext; 7050 } 7051 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7052 } 7053 7054 if (sq->sq_flags & SQ_WANTWAKEUP) { 7055 sq->sq_flags &= ~SQ_WANTWAKEUP; 7056 cv_broadcast(&sq->sq_wait); 7057 } 7058 7059 mutex_exit(SQLOCK(sq)); 7060 return (ret); 7061 } 7062 7063 /* 7064 * Propagate all messages from a syncq to the next syncq that are associated 7065 * with the specified queue. If the queue is attached to a driver or if the 7066 * messages have been added due to a qwriter(PERIM_INNER), free the messages. 7067 * 7068 * Assumes that the stream is strlock()'ed. We don't come here if there 7069 * are no messages to propagate. 7070 * 7071 * NOTE : If the queue is attached to a driver, all the messages are freed 7072 * as there is no point in propagating the messages from the driver syncq 7073 * to the closing stream head which will in turn get freed later. 7074 */ 7075 static int 7076 propagate_syncq(queue_t *qp) 7077 { 7078 mblk_t *bp, *head, *tail, *prev, *next; 7079 syncq_t *sq; 7080 queue_t *nqp; 7081 syncq_t *nsq; 7082 boolean_t isdriver; 7083 int moved = 0; 7084 uint16_t flags; 7085 pri_t priority = curthread->t_pri; 7086 #ifdef DEBUG 7087 void (*func)(); 7088 #endif 7089 7090 sq = qp->q_syncq; 7091 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7092 /* debug macro */ 7093 SQ_PUTLOCKS_HELD(sq); 7094 /* 7095 * As entersq() does not increment the sq_count for 7096 * the write side, check sq_count for non-QPERQ 7097 * perimeters alone. 7098 */ 7099 ASSERT((qp->q_flag & QPERQ) || (sq->sq_count >= 1)); 7100 7101 /* 7102 * propagate_syncq() can be called because of either messages on the 7103 * queue syncq or because on events on the queue syncq. Do actual 7104 * message propagations if there are any messages. 7105 */ 7106 if (qp->q_syncqmsgs) { 7107 isdriver = (qp->q_flag & QISDRV); 7108 7109 if (!isdriver) { 7110 nqp = qp->q_next; 7111 nsq = nqp->q_syncq; 7112 ASSERT(MUTEX_HELD(SQLOCK(nsq))); 7113 /* debug macro */ 7114 SQ_PUTLOCKS_HELD(nsq); 7115 #ifdef DEBUG 7116 func = (void (*)())nqp->q_qinfo->qi_putp; 7117 #endif 7118 } 7119 7120 SQRM_Q(sq, qp); 7121 priority = MAX(qp->q_spri, priority); 7122 qp->q_spri = 0; 7123 head = qp->q_sqhead; 7124 tail = qp->q_sqtail; 7125 qp->q_sqhead = qp->q_sqtail = NULL; 7126 qp->q_syncqmsgs = 0; 7127 7128 /* 7129 * Walk the list of messages, and free them if this is a driver, 7130 * otherwise reset the b_prev and b_queue value to the new putp. 7131 * Afterward, we will just add the head to the end of the next 7132 * syncq, and point the tail to the end of this one. 7133 */ 7134 7135 for (bp = head; bp != NULL; bp = next) { 7136 next = bp->b_next; 7137 if (isdriver) { 7138 bp->b_prev = bp->b_next = NULL; 7139 freemsg(bp); 7140 continue; 7141 } 7142 /* Change the q values for this message */ 7143 bp->b_queue = nqp; 7144 #ifdef DEBUG 7145 bp->b_prev = (mblk_t *)func; 7146 #endif 7147 moved++; 7148 } 7149 /* 7150 * Attach list of messages to the end of the new queue (if there 7151 * is a list of messages). 7152 */ 7153 7154 if (!isdriver && head != NULL) { 7155 ASSERT(tail != NULL); 7156 if (nqp->q_sqhead == NULL) { 7157 nqp->q_sqhead = head; 7158 } else { 7159 ASSERT(nqp->q_sqtail != NULL); 7160 nqp->q_sqtail->b_next = head; 7161 } 7162 nqp->q_sqtail = tail; 7163 /* 7164 * When messages are moved from high priority queue to 7165 * another queue, the destination queue priority is 7166 * upgraded. 7167 */ 7168 7169 if (priority > nqp->q_spri) 7170 nqp->q_spri = priority; 7171 7172 SQPUT_Q(nsq, nqp); 7173 7174 nqp->q_syncqmsgs += moved; 7175 ASSERT(nqp->q_syncqmsgs != 0); 7176 } 7177 } 7178 7179 /* 7180 * Before we leave, we need to make sure there are no 7181 * events listed for this queue. All events for this queue 7182 * will just be freed. 7183 */ 7184 if (sq->sq_evhead != NULL) { 7185 ASSERT(sq->sq_flags & SQ_EVENTS); 7186 prev = NULL; 7187 for (bp = sq->sq_evhead; bp != NULL; bp = next) { 7188 next = bp->b_next; 7189 if (bp->b_queue == qp) { 7190 /* Delete this message */ 7191 if (prev != NULL) { 7192 prev->b_next = next; 7193 /* 7194 * Update sq_evtail if the last element 7195 * is removed. 7196 */ 7197 if (bp == sq->sq_evtail) { 7198 ASSERT(next == NULL); 7199 sq->sq_evtail = prev; 7200 } 7201 } else 7202 sq->sq_evhead = next; 7203 if (sq->sq_evhead == NULL) 7204 sq->sq_flags &= ~SQ_EVENTS; 7205 bp->b_prev = bp->b_next = NULL; 7206 freemsg(bp); 7207 } else { 7208 prev = bp; 7209 } 7210 } 7211 } 7212 7213 flags = sq->sq_flags; 7214 7215 /* Wake up any waiter before leaving. */ 7216 if (flags & SQ_WANTWAKEUP) { 7217 flags &= ~SQ_WANTWAKEUP; 7218 cv_broadcast(&sq->sq_wait); 7219 } 7220 sq->sq_flags = flags; 7221 7222 return (moved); 7223 } 7224 7225 /* 7226 * Try and upgrade to exclusive access at the inner perimeter. If this can 7227 * not be done without blocking then request will be queued on the syncq 7228 * and drain_syncq will run it later. 7229 * 7230 * This routine can only be called from put or service procedures plus 7231 * asynchronous callback routines that have properly entered to 7232 * queue (with entersq.) Thus qwriter_inner assumes the caller has one claim 7233 * on the syncq associated with q. 7234 */ 7235 void 7236 qwriter_inner(queue_t *q, mblk_t *mp, void (*func)()) 7237 { 7238 syncq_t *sq = q->q_syncq; 7239 uint16_t count; 7240 7241 mutex_enter(SQLOCK(sq)); 7242 count = sq->sq_count; 7243 SQ_PUTLOCKS_ENTER(sq); 7244 SUM_SQ_PUTCOUNTS(sq, count); 7245 ASSERT(count >= 1); 7246 ASSERT(sq->sq_type & (SQ_CIPUT|SQ_CISVC)); 7247 7248 if (count == 1) { 7249 /* 7250 * Can upgrade. This case also handles nested qwriter calls 7251 * (when the qwriter callback function calls qwriter). In that 7252 * case SQ_EXCL is already set. 7253 */ 7254 sq->sq_flags |= SQ_EXCL; 7255 SQ_PUTLOCKS_EXIT(sq); 7256 mutex_exit(SQLOCK(sq)); 7257 (*func)(q, mp); 7258 /* 7259 * Assumes that leavesq, putnext, and drain_syncq will reset 7260 * SQ_EXCL for SQ_CIPUT/SQ_CISVC queues. We leave SQ_EXCL on 7261 * until putnext, leavesq, or drain_syncq drops it. 7262 * That way we handle nested qwriter(INNER) without dropping 7263 * SQ_EXCL until the outermost qwriter callback routine is 7264 * done. 7265 */ 7266 return; 7267 } 7268 SQ_PUTLOCKS_EXIT(sq); 7269 sqfill_events(sq, q, mp, func); 7270 } 7271 7272 /* 7273 * Synchronous callback support functions 7274 */ 7275 7276 /* 7277 * Allocate a callback parameter structure. 7278 * Assumes that caller initializes the flags and the id. 7279 * Acquires SQLOCK(sq) if non-NULL is returned. 7280 */ 7281 callbparams_t * 7282 callbparams_alloc(syncq_t *sq, void (*func)(void *), void *arg, int kmflags) 7283 { 7284 callbparams_t *cbp; 7285 size_t size = sizeof (callbparams_t); 7286 7287 cbp = kmem_alloc(size, kmflags & ~KM_PANIC); 7288 7289 /* 7290 * Only try tryhard allocation if the caller is ready to panic. 7291 * Otherwise just fail. 7292 */ 7293 if (cbp == NULL) { 7294 if (kmflags & KM_PANIC) 7295 cbp = kmem_alloc_tryhard(sizeof (callbparams_t), 7296 &size, kmflags); 7297 else 7298 return (NULL); 7299 } 7300 7301 ASSERT(size >= sizeof (callbparams_t)); 7302 cbp->cbp_size = size; 7303 cbp->cbp_sq = sq; 7304 cbp->cbp_func = func; 7305 cbp->cbp_arg = arg; 7306 mutex_enter(SQLOCK(sq)); 7307 cbp->cbp_next = sq->sq_callbpend; 7308 sq->sq_callbpend = cbp; 7309 return (cbp); 7310 } 7311 7312 void 7313 callbparams_free(syncq_t *sq, callbparams_t *cbp) 7314 { 7315 callbparams_t **pp, *p; 7316 7317 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7318 7319 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) { 7320 if (p == cbp) { 7321 *pp = p->cbp_next; 7322 kmem_free(p, p->cbp_size); 7323 return; 7324 } 7325 } 7326 (void) (STRLOG(0, 0, 0, SL_CONSOLE, 7327 "callbparams_free: not found\n")); 7328 } 7329 7330 void 7331 callbparams_free_id(syncq_t *sq, callbparams_id_t id, int32_t flag) 7332 { 7333 callbparams_t **pp, *p; 7334 7335 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7336 7337 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) { 7338 if (p->cbp_id == id && p->cbp_flags == flag) { 7339 *pp = p->cbp_next; 7340 kmem_free(p, p->cbp_size); 7341 return; 7342 } 7343 } 7344 (void) (STRLOG(0, 0, 0, SL_CONSOLE, 7345 "callbparams_free_id: not found\n")); 7346 } 7347 7348 /* 7349 * Callback wrapper function used by once-only callbacks that can be 7350 * cancelled (qtimeout and qbufcall) 7351 * Contains inline version of entersq(sq, SQ_CALLBACK) that can be 7352 * cancelled by the qun* functions. 7353 */ 7354 void 7355 qcallbwrapper(void *arg) 7356 { 7357 callbparams_t *cbp = arg; 7358 syncq_t *sq; 7359 uint16_t count = 0; 7360 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL; 7361 uint16_t type; 7362 7363 sq = cbp->cbp_sq; 7364 mutex_enter(SQLOCK(sq)); 7365 type = sq->sq_type; 7366 if (!(type & SQ_CICB)) { 7367 count = sq->sq_count; 7368 SQ_PUTLOCKS_ENTER(sq); 7369 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 7370 SUM_SQ_PUTCOUNTS(sq, count); 7371 sq->sq_needexcl++; 7372 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 7373 waitflags |= SQ_MESSAGES; 7374 } 7375 /* Can not handle exlusive entry at outer perimeter */ 7376 ASSERT(type & SQ_COCB); 7377 7378 while ((sq->sq_flags & waitflags) || (!(type & SQ_CICB) &&count != 0)) { 7379 if ((sq->sq_callbflags & cbp->cbp_flags) && 7380 (sq->sq_cancelid == cbp->cbp_id)) { 7381 /* timeout has been cancelled */ 7382 sq->sq_callbflags |= SQ_CALLB_BYPASSED; 7383 callbparams_free(sq, cbp); 7384 if (!(type & SQ_CICB)) { 7385 ASSERT(sq->sq_needexcl > 0); 7386 sq->sq_needexcl--; 7387 if (sq->sq_needexcl == 0) { 7388 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 7389 } 7390 SQ_PUTLOCKS_EXIT(sq); 7391 } 7392 mutex_exit(SQLOCK(sq)); 7393 return; 7394 } 7395 sq->sq_flags |= SQ_WANTWAKEUP; 7396 if (!(type & SQ_CICB)) { 7397 SQ_PUTLOCKS_EXIT(sq); 7398 } 7399 cv_wait(&sq->sq_wait, SQLOCK(sq)); 7400 if (!(type & SQ_CICB)) { 7401 count = sq->sq_count; 7402 SQ_PUTLOCKS_ENTER(sq); 7403 SUM_SQ_PUTCOUNTS(sq, count); 7404 } 7405 } 7406 7407 sq->sq_count++; 7408 ASSERT(sq->sq_count != 0); /* Wraparound */ 7409 if (!(type & SQ_CICB)) { 7410 ASSERT(count == 0); 7411 sq->sq_flags |= SQ_EXCL; 7412 ASSERT(sq->sq_needexcl > 0); 7413 sq->sq_needexcl--; 7414 if (sq->sq_needexcl == 0) { 7415 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 7416 } 7417 SQ_PUTLOCKS_EXIT(sq); 7418 } 7419 7420 mutex_exit(SQLOCK(sq)); 7421 7422 cbp->cbp_func(cbp->cbp_arg); 7423 7424 /* 7425 * We drop the lock only for leavesq to re-acquire it. 7426 * Possible optimization is inline of leavesq. 7427 */ 7428 mutex_enter(SQLOCK(sq)); 7429 callbparams_free(sq, cbp); 7430 mutex_exit(SQLOCK(sq)); 7431 leavesq(sq, SQ_CALLBACK); 7432 } 7433 7434 /* 7435 * no need to grab sq_putlocks here. See comment in strsubr.h that 7436 * explains when sq_putlocks are used. 7437 * 7438 * sq_count (or one of the sq_putcounts) has already been 7439 * decremented by the caller, and if SQ_QUEUED, we need to call 7440 * drain_syncq (the global syncq drain). 7441 * If putnext_tail is called with the SQ_EXCL bit set, we are in 7442 * one of two states, non-CIPUT perimiter, and we need to clear 7443 * it, or we went exclusive in the put procedure. In any case, 7444 * we want to clear the bit now, and it is probably easier to do 7445 * this at the beginning of this function (remember, we hold 7446 * the SQLOCK). Lastly, if there are other messages queued 7447 * on the syncq (and not for our destination), enable the syncq 7448 * for background work. 7449 */ 7450 7451 /* ARGSUSED */ 7452 void 7453 putnext_tail(syncq_t *sq, queue_t *qp, uint32_t passflags) 7454 { 7455 uint16_t flags = sq->sq_flags; 7456 7457 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7458 ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); 7459 7460 /* Clear SQ_EXCL if set in passflags */ 7461 if (passflags & SQ_EXCL) { 7462 flags &= ~SQ_EXCL; 7463 } 7464 if (flags & SQ_WANTWAKEUP) { 7465 flags &= ~SQ_WANTWAKEUP; 7466 cv_broadcast(&sq->sq_wait); 7467 } 7468 if (flags & SQ_WANTEXWAKEUP) { 7469 flags &= ~SQ_WANTEXWAKEUP; 7470 cv_broadcast(&sq->sq_exitwait); 7471 } 7472 sq->sq_flags = flags; 7473 7474 /* 7475 * We have cleared SQ_EXCL if we were asked to, and started 7476 * the wakeup process for waiters. If there are no writers 7477 * then we need to drain the syncq if we were told to, or 7478 * enable the background thread to do it. 7479 */ 7480 if (!(flags & (SQ_STAYAWAY|SQ_EXCL))) { 7481 if ((passflags & SQ_QUEUED) || 7482 (sq->sq_svcflags & SQ_DISABLED)) { 7483 /* drain_syncq will take care of events in the list */ 7484 drain_syncq(sq); 7485 return; 7486 } else if (flags & SQ_QUEUED) { 7487 sqenable(sq); 7488 } 7489 } 7490 /* Drop the SQLOCK on exit */ 7491 mutex_exit(SQLOCK(sq)); 7492 TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END, 7493 "putnext_end:(%p, %p, %p) done", NULL, qp, sq); 7494 } 7495 7496 void 7497 set_qend(queue_t *q) 7498 { 7499 mutex_enter(QLOCK(q)); 7500 if (!O_SAMESTR(q)) 7501 q->q_flag |= QEND; 7502 else 7503 q->q_flag &= ~QEND; 7504 mutex_exit(QLOCK(q)); 7505 q = _OTHERQ(q); 7506 mutex_enter(QLOCK(q)); 7507 if (!O_SAMESTR(q)) 7508 q->q_flag |= QEND; 7509 else 7510 q->q_flag &= ~QEND; 7511 mutex_exit(QLOCK(q)); 7512 } 7513 7514 /* 7515 * Set QFULL in next service procedure queue (that cares) if not already 7516 * set and if there are already more messages on the syncq than 7517 * sq_max_size. If sq_max_size is 0, no flow control will be asserted on 7518 * any syncq. 7519 * 7520 * The fq here is the next queue with a service procedure. This is where 7521 * we would fail canputnext, so this is where we need to set QFULL. 7522 * In the case when fq != q we need to take QLOCK(fq) to set QFULL flag. 7523 * 7524 * We already have QLOCK at this point. To avoid cross-locks with 7525 * freezestr() which grabs all QLOCKs and with strlock() which grabs both 7526 * SQLOCK and sd_reflock, we need to drop respective locks first. 7527 */ 7528 void 7529 set_qfull(queue_t *q) 7530 { 7531 queue_t *fq = NULL; 7532 7533 ASSERT(MUTEX_HELD(QLOCK(q))); 7534 if ((sq_max_size != 0) && (!(q->q_nfsrv->q_flag & QFULL)) && 7535 (q->q_syncqmsgs > sq_max_size)) { 7536 if ((fq = q->q_nfsrv) == q) { 7537 fq->q_flag |= QFULL; 7538 } else { 7539 mutex_exit(QLOCK(q)); 7540 mutex_enter(QLOCK(fq)); 7541 fq->q_flag |= QFULL; 7542 mutex_exit(QLOCK(fq)); 7543 mutex_enter(QLOCK(q)); 7544 } 7545 } 7546 } 7547 7548 void 7549 clr_qfull(queue_t *q) 7550 { 7551 queue_t *oq = q; 7552 7553 q = q->q_nfsrv; 7554 /* Fast check if there is any work to do before getting the lock. */ 7555 if ((q->q_flag & (QFULL|QWANTW)) == 0) { 7556 return; 7557 } 7558 7559 /* 7560 * Do not reset QFULL (and backenable) if the q_count is the reason 7561 * for QFULL being set. 7562 */ 7563 mutex_enter(QLOCK(q)); 7564 /* 7565 * If queue is empty i.e q_mblkcnt is zero, queue can not be full. 7566 * Hence clear the QFULL. 7567 * If both q_count and q_mblkcnt are less than the hiwat mark, 7568 * clear the QFULL. 7569 */ 7570 if (q->q_mblkcnt == 0 || ((q->q_count < q->q_hiwat) && 7571 (q->q_mblkcnt < q->q_hiwat))) { 7572 q->q_flag &= ~QFULL; 7573 /* 7574 * A little more confusing, how about this way: 7575 * if someone wants to write, 7576 * AND 7577 * both counts are less than the lowat mark 7578 * OR 7579 * the lowat mark is zero 7580 * THEN 7581 * backenable 7582 */ 7583 if ((q->q_flag & QWANTW) && 7584 (((q->q_count < q->q_lowat) && 7585 (q->q_mblkcnt < q->q_lowat)) || q->q_lowat == 0)) { 7586 q->q_flag &= ~QWANTW; 7587 mutex_exit(QLOCK(q)); 7588 backenable(oq, 0); 7589 } else 7590 mutex_exit(QLOCK(q)); 7591 } else 7592 mutex_exit(QLOCK(q)); 7593 } 7594 7595 /* 7596 * Set the forward service procedure pointer. 7597 * 7598 * Called at insert-time to cache a queue's next forward service procedure in 7599 * q_nfsrv; used by canput() and canputnext(). If the queue to be inserted 7600 * has a service procedure then q_nfsrv points to itself. If the queue to be 7601 * inserted does not have a service procedure, then q_nfsrv points to the next 7602 * queue forward that has a service procedure. If the queue is at the logical 7603 * end of the stream (driver for write side, stream head for the read side) 7604 * and does not have a service procedure, then q_nfsrv also points to itself. 7605 */ 7606 void 7607 set_nfsrv_ptr( 7608 queue_t *rnew, /* read queue pointer to new module */ 7609 queue_t *wnew, /* write queue pointer to new module */ 7610 queue_t *prev_rq, /* read queue pointer to the module above */ 7611 queue_t *prev_wq) /* write queue pointer to the module above */ 7612 { 7613 queue_t *qp; 7614 7615 if (prev_wq->q_next == NULL) { 7616 /* 7617 * Insert the driver, initialize the driver and stream head. 7618 * In this case, prev_rq/prev_wq should be the stream head. 7619 * _I_INSERT does not allow inserting a driver. Make sure 7620 * that it is not an insertion. 7621 */ 7622 ASSERT(!(rnew->q_flag & _QINSERTING)); 7623 wnew->q_nfsrv = wnew; 7624 if (rnew->q_qinfo->qi_srvp) 7625 rnew->q_nfsrv = rnew; 7626 else 7627 rnew->q_nfsrv = prev_rq; 7628 prev_rq->q_nfsrv = prev_rq; 7629 prev_wq->q_nfsrv = prev_wq; 7630 } else { 7631 /* 7632 * set up read side q_nfsrv pointer. This MUST be done 7633 * before setting the write side, because the setting of 7634 * the write side for a fifo may depend on it. 7635 * 7636 * Suppose we have a fifo that only has pipemod pushed. 7637 * pipemod has no read or write service procedures, so 7638 * nfsrv for both pipemod queues points to prev_rq (the 7639 * stream read head). Now push bufmod (which has only a 7640 * read service procedure). Doing the write side first, 7641 * wnew->q_nfsrv is set to pipemod's writeq nfsrv, which 7642 * is WRONG; the next queue forward from wnew with a 7643 * service procedure will be rnew, not the stream read head. 7644 * Since the downstream queue (which in the case of a fifo 7645 * is the read queue rnew) can affect upstream queues, it 7646 * needs to be done first. Setting up the read side first 7647 * sets nfsrv for both pipemod queues to rnew and then 7648 * when the write side is set up, wnew-q_nfsrv will also 7649 * point to rnew. 7650 */ 7651 if (rnew->q_qinfo->qi_srvp) { 7652 /* 7653 * use _OTHERQ() because, if this is a pipe, next 7654 * module may have been pushed from other end and 7655 * q_next could be a read queue. 7656 */ 7657 qp = _OTHERQ(prev_wq->q_next); 7658 while (qp && qp->q_nfsrv != qp) { 7659 qp->q_nfsrv = rnew; 7660 qp = backq(qp); 7661 } 7662 rnew->q_nfsrv = rnew; 7663 } else 7664 rnew->q_nfsrv = prev_rq->q_nfsrv; 7665 7666 /* set up write side q_nfsrv pointer */ 7667 if (wnew->q_qinfo->qi_srvp) { 7668 wnew->q_nfsrv = wnew; 7669 7670 /* 7671 * For insertion, need to update nfsrv of the modules 7672 * above which do not have a service routine. 7673 */ 7674 if (rnew->q_flag & _QINSERTING) { 7675 for (qp = prev_wq; 7676 qp != NULL && qp->q_nfsrv != qp; 7677 qp = backq(qp)) { 7678 qp->q_nfsrv = wnew->q_nfsrv; 7679 } 7680 } 7681 } else { 7682 if (prev_wq->q_next == prev_rq) 7683 /* 7684 * Since prev_wq/prev_rq are the middle of a 7685 * fifo, wnew/rnew will also be the middle of 7686 * a fifo and wnew's nfsrv is same as rnew's. 7687 */ 7688 wnew->q_nfsrv = rnew->q_nfsrv; 7689 else 7690 wnew->q_nfsrv = prev_wq->q_next->q_nfsrv; 7691 } 7692 } 7693 } 7694 7695 /* 7696 * Reset the forward service procedure pointer; called at remove-time. 7697 */ 7698 void 7699 reset_nfsrv_ptr(queue_t *rqp, queue_t *wqp) 7700 { 7701 queue_t *tmp_qp; 7702 7703 /* Reset the write side q_nfsrv pointer for _I_REMOVE */ 7704 if ((rqp->q_flag & _QREMOVING) && (wqp->q_qinfo->qi_srvp != NULL)) { 7705 for (tmp_qp = backq(wqp); 7706 tmp_qp != NULL && tmp_qp->q_nfsrv == wqp; 7707 tmp_qp = backq(tmp_qp)) { 7708 tmp_qp->q_nfsrv = wqp->q_nfsrv; 7709 } 7710 } 7711 7712 /* reset the read side q_nfsrv pointer */ 7713 if (rqp->q_qinfo->qi_srvp) { 7714 if (wqp->q_next) { /* non-driver case */ 7715 tmp_qp = _OTHERQ(wqp->q_next); 7716 while (tmp_qp && tmp_qp->q_nfsrv == rqp) { 7717 /* Note that rqp->q_next cannot be NULL */ 7718 ASSERT(rqp->q_next != NULL); 7719 tmp_qp->q_nfsrv = rqp->q_next->q_nfsrv; 7720 tmp_qp = backq(tmp_qp); 7721 } 7722 } 7723 } 7724 } 7725 7726 /* 7727 * This routine should be called after all stream geometry changes to update 7728 * the stream head cached struio() rd/wr queue pointers. Note must be called 7729 * with the streamlock()ed. 7730 * 7731 * Note: only enables Synchronous STREAMS for a side of a Stream which has 7732 * an explicit synchronous barrier module queue. That is, a queue that 7733 * has specified a struio() type. 7734 */ 7735 static void 7736 strsetuio(stdata_t *stp) 7737 { 7738 queue_t *wrq; 7739 7740 if (stp->sd_flag & STPLEX) { 7741 /* 7742 * Not stremahead, but a mux, so no Synchronous STREAMS. 7743 */ 7744 stp->sd_struiowrq = NULL; 7745 stp->sd_struiordq = NULL; 7746 return; 7747 } 7748 /* 7749 * Scan the write queue(s) while synchronous 7750 * until we find a qinfo uio type specified. 7751 */ 7752 wrq = stp->sd_wrq->q_next; 7753 while (wrq) { 7754 if (wrq->q_struiot == STRUIOT_NONE) { 7755 wrq = 0; 7756 break; 7757 } 7758 if (wrq->q_struiot != STRUIOT_DONTCARE) 7759 break; 7760 if (! _SAMESTR(wrq)) { 7761 wrq = 0; 7762 break; 7763 } 7764 wrq = wrq->q_next; 7765 } 7766 stp->sd_struiowrq = wrq; 7767 /* 7768 * Scan the read queue(s) while synchronous 7769 * until we find a qinfo uio type specified. 7770 */ 7771 wrq = stp->sd_wrq->q_next; 7772 while (wrq) { 7773 if (_RD(wrq)->q_struiot == STRUIOT_NONE) { 7774 wrq = 0; 7775 break; 7776 } 7777 if (_RD(wrq)->q_struiot != STRUIOT_DONTCARE) 7778 break; 7779 if (! _SAMESTR(wrq)) { 7780 wrq = 0; 7781 break; 7782 } 7783 wrq = wrq->q_next; 7784 } 7785 stp->sd_struiordq = wrq ? _RD(wrq) : 0; 7786 } 7787 7788 /* 7789 * pass_wput, unblocks the passthru queues, so that 7790 * messages can arrive at muxs lower read queue, before 7791 * I_LINK/I_UNLINK is acked/nacked. 7792 */ 7793 static void 7794 pass_wput(queue_t *q, mblk_t *mp) 7795 { 7796 syncq_t *sq; 7797 7798 sq = _RD(q)->q_syncq; 7799 if (sq->sq_flags & SQ_BLOCKED) 7800 unblocksq(sq, SQ_BLOCKED, 0); 7801 putnext(q, mp); 7802 } 7803 7804 /* 7805 * Set up queues for the link/unlink. 7806 * Create a new queue and block it and then insert it 7807 * below the stream head on the lower stream. 7808 * This prevents any messages from arriving during the setq 7809 * as well as while the mux is processing the LINK/I_UNLINK. 7810 * The blocked passq is unblocked once the LINK/I_UNLINK has 7811 * been acked or nacked or if a message is generated and sent 7812 * down muxs write put procedure. 7813 * see pass_wput(). 7814 * 7815 * After the new queue is inserted, all messages coming from below are 7816 * blocked. The call to strlock will ensure that all activity in the stream head 7817 * read queue syncq is stopped (sq_count drops to zero). 7818 */ 7819 static queue_t * 7820 link_addpassthru(stdata_t *stpdown) 7821 { 7822 queue_t *passq; 7823 sqlist_t sqlist; 7824 7825 passq = allocq(); 7826 STREAM(passq) = STREAM(_WR(passq)) = stpdown; 7827 /* setq might sleep in allocator - avoid holding locks. */ 7828 setq(passq, &passthru_rinit, &passthru_winit, NULL, QPERQ, 7829 SQ_CI|SQ_CO, B_FALSE); 7830 claimq(passq); 7831 blocksq(passq->q_syncq, SQ_BLOCKED, 1); 7832 insertq(STREAM(passq), passq); 7833 7834 /* 7835 * Use strlock() to wait for the stream head sq_count to drop to zero 7836 * since we are going to change q_ptr in the stream head. Note that 7837 * insertq() doesn't wait for any syncq counts to drop to zero. 7838 */ 7839 sqlist.sqlist_head = NULL; 7840 sqlist.sqlist_index = 0; 7841 sqlist.sqlist_size = sizeof (sqlist_t); 7842 sqlist_insert(&sqlist, _RD(stpdown->sd_wrq)->q_syncq); 7843 strlock(stpdown, &sqlist); 7844 strunlock(stpdown, &sqlist); 7845 7846 releaseq(passq); 7847 return (passq); 7848 } 7849 7850 /* 7851 * Let messages flow up into the mux by removing 7852 * the passq. 7853 */ 7854 static void 7855 link_rempassthru(queue_t *passq) 7856 { 7857 claimq(passq); 7858 removeq(passq); 7859 releaseq(passq); 7860 freeq(passq); 7861 } 7862 7863 /* 7864 * Wait for the condition variable pointed to by `cvp' to be signaled, 7865 * or for `tim' milliseconds to elapse, whichever comes first. If `tim' 7866 * is negative, then there is no time limit. If `nosigs' is non-zero, 7867 * then the wait will be non-interruptible. 7868 * 7869 * Returns >0 if signaled, 0 if interrupted, or -1 upon timeout. 7870 */ 7871 clock_t 7872 str_cv_wait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim, int nosigs) 7873 { 7874 clock_t ret, now, tick; 7875 7876 if (tim < 0) { 7877 if (nosigs) { 7878 cv_wait(cvp, mp); 7879 ret = 1; 7880 } else { 7881 ret = cv_wait_sig(cvp, mp); 7882 } 7883 } else if (tim > 0) { 7884 /* 7885 * convert milliseconds to clock ticks 7886 */ 7887 tick = MSEC_TO_TICK_ROUNDUP(tim); 7888 time_to_wait(&now, tick); 7889 if (nosigs) { 7890 ret = cv_timedwait(cvp, mp, now); 7891 } else { 7892 ret = cv_timedwait_sig(cvp, mp, now); 7893 } 7894 } else { 7895 ret = -1; 7896 } 7897 return (ret); 7898 } 7899 7900 /* 7901 * Wait until the stream head can determine if it is at the mark but 7902 * don't wait forever to prevent a race condition between the "mark" state 7903 * in the stream head and any mark state in the caller/user of this routine. 7904 * 7905 * This is used by sockets and for a socket it would be incorrect 7906 * to return a failure for SIOCATMARK when there is no data in the receive 7907 * queue and the marked urgent data is traveling up the stream. 7908 * 7909 * This routine waits until the mark is known by waiting for one of these 7910 * three events: 7911 * The stream head read queue becoming non-empty (including an EOF) 7912 * The STRATMARK flag being set. (Due to a MSGMARKNEXT message.) 7913 * The STRNOTATMARK flag being set (which indicates that the transport 7914 * has sent a MSGNOTMARKNEXT message to indicate that it is not at 7915 * the mark). 7916 * 7917 * The routine returns 1 if the stream is at the mark; 0 if it can 7918 * be determined that the stream is not at the mark. 7919 * If the wait times out and it can't determine 7920 * whether or not the stream might be at the mark the routine will return -1. 7921 * 7922 * Note: This routine should only be used when a mark is pending i.e., 7923 * in the socket case the SIGURG has been posted. 7924 * Note2: This can not wakeup just because synchronous streams indicate 7925 * that data is available since it is not possible to use the synchronous 7926 * streams interfaces to determine the b_flag value for the data queued below 7927 * the stream head. 7928 */ 7929 int 7930 strwaitmark(vnode_t *vp) 7931 { 7932 struct stdata *stp = vp->v_stream; 7933 queue_t *rq = _RD(stp->sd_wrq); 7934 int mark; 7935 7936 mutex_enter(&stp->sd_lock); 7937 while (rq->q_first == NULL && 7938 !(stp->sd_flag & (STRATMARK|STRNOTATMARK|STREOF))) { 7939 stp->sd_flag |= RSLEEP; 7940 7941 /* Wait for 100 milliseconds for any state change. */ 7942 if (str_cv_wait(&rq->q_wait, &stp->sd_lock, 100, 1) == -1) { 7943 mutex_exit(&stp->sd_lock); 7944 return (-1); 7945 } 7946 } 7947 if (stp->sd_flag & STRATMARK) 7948 mark = 1; 7949 else if (rq->q_first != NULL && (rq->q_first->b_flag & MSGMARK)) 7950 mark = 1; 7951 else 7952 mark = 0; 7953 7954 mutex_exit(&stp->sd_lock); 7955 return (mark); 7956 } 7957 7958 /* 7959 * Set a read side error. If persist is set change the socket error 7960 * to persistent. If errfunc is set install the function as the exported 7961 * error handler. 7962 */ 7963 void 7964 strsetrerror(vnode_t *vp, int error, int persist, errfunc_t errfunc) 7965 { 7966 struct stdata *stp = vp->v_stream; 7967 7968 mutex_enter(&stp->sd_lock); 7969 stp->sd_rerror = error; 7970 if (error == 0 && errfunc == NULL) 7971 stp->sd_flag &= ~STRDERR; 7972 else 7973 stp->sd_flag |= STRDERR; 7974 if (persist) { 7975 stp->sd_flag &= ~STRDERRNONPERSIST; 7976 } else { 7977 stp->sd_flag |= STRDERRNONPERSIST; 7978 } 7979 stp->sd_rderrfunc = errfunc; 7980 if (error != 0 || errfunc != NULL) { 7981 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */ 7982 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */ 7983 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 7984 7985 mutex_exit(&stp->sd_lock); 7986 pollwakeup(&stp->sd_pollist, POLLERR); 7987 mutex_enter(&stp->sd_lock); 7988 7989 if (stp->sd_sigflags & S_ERROR) 7990 strsendsig(stp->sd_siglist, S_ERROR, 0, error); 7991 } 7992 mutex_exit(&stp->sd_lock); 7993 } 7994 7995 /* 7996 * Set a write side error. If persist is set change the socket error 7997 * to persistent. 7998 */ 7999 void 8000 strsetwerror(vnode_t *vp, int error, int persist, errfunc_t errfunc) 8001 { 8002 struct stdata *stp = vp->v_stream; 8003 8004 mutex_enter(&stp->sd_lock); 8005 stp->sd_werror = error; 8006 if (error == 0 && errfunc == NULL) 8007 stp->sd_flag &= ~STWRERR; 8008 else 8009 stp->sd_flag |= STWRERR; 8010 if (persist) { 8011 stp->sd_flag &= ~STWRERRNONPERSIST; 8012 } else { 8013 stp->sd_flag |= STWRERRNONPERSIST; 8014 } 8015 stp->sd_wrerrfunc = errfunc; 8016 if (error != 0 || errfunc != NULL) { 8017 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */ 8018 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */ 8019 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 8020 8021 mutex_exit(&stp->sd_lock); 8022 pollwakeup(&stp->sd_pollist, POLLERR); 8023 mutex_enter(&stp->sd_lock); 8024 8025 if (stp->sd_sigflags & S_ERROR) 8026 strsendsig(stp->sd_siglist, S_ERROR, 0, error); 8027 } 8028 mutex_exit(&stp->sd_lock); 8029 } 8030 8031 /* 8032 * Make the stream return 0 (EOF) when all data has been read. 8033 * No effect on write side. 8034 */ 8035 void 8036 strseteof(vnode_t *vp, int eof) 8037 { 8038 struct stdata *stp = vp->v_stream; 8039 8040 mutex_enter(&stp->sd_lock); 8041 if (!eof) { 8042 stp->sd_flag &= ~STREOF; 8043 mutex_exit(&stp->sd_lock); 8044 return; 8045 } 8046 stp->sd_flag |= STREOF; 8047 if (stp->sd_flag & RSLEEP) { 8048 stp->sd_flag &= ~RSLEEP; 8049 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); 8050 } 8051 8052 mutex_exit(&stp->sd_lock); 8053 pollwakeup(&stp->sd_pollist, POLLIN|POLLRDNORM); 8054 mutex_enter(&stp->sd_lock); 8055 8056 if (stp->sd_sigflags & (S_INPUT|S_RDNORM)) 8057 strsendsig(stp->sd_siglist, S_INPUT|S_RDNORM, 0, 0); 8058 mutex_exit(&stp->sd_lock); 8059 } 8060 8061 void 8062 strflushrq(vnode_t *vp, int flag) 8063 { 8064 struct stdata *stp = vp->v_stream; 8065 8066 mutex_enter(&stp->sd_lock); 8067 flushq(_RD(stp->sd_wrq), flag); 8068 mutex_exit(&stp->sd_lock); 8069 } 8070 8071 void 8072 strsetrputhooks(vnode_t *vp, uint_t flags, 8073 msgfunc_t protofunc, msgfunc_t miscfunc) 8074 { 8075 struct stdata *stp = vp->v_stream; 8076 8077 mutex_enter(&stp->sd_lock); 8078 8079 if (protofunc == NULL) 8080 stp->sd_rprotofunc = strrput_proto; 8081 else 8082 stp->sd_rprotofunc = protofunc; 8083 8084 if (miscfunc == NULL) 8085 stp->sd_rmiscfunc = strrput_misc; 8086 else 8087 stp->sd_rmiscfunc = miscfunc; 8088 8089 if (flags & SH_CONSOL_DATA) 8090 stp->sd_rput_opt |= SR_CONSOL_DATA; 8091 else 8092 stp->sd_rput_opt &= ~SR_CONSOL_DATA; 8093 8094 if (flags & SH_SIGALLDATA) 8095 stp->sd_rput_opt |= SR_SIGALLDATA; 8096 else 8097 stp->sd_rput_opt &= ~SR_SIGALLDATA; 8098 8099 if (flags & SH_IGN_ZEROLEN) 8100 stp->sd_rput_opt |= SR_IGN_ZEROLEN; 8101 else 8102 stp->sd_rput_opt &= ~SR_IGN_ZEROLEN; 8103 8104 mutex_exit(&stp->sd_lock); 8105 } 8106 8107 void 8108 strsetwputhooks(vnode_t *vp, uint_t flags, clock_t closetime) 8109 { 8110 struct stdata *stp = vp->v_stream; 8111 8112 mutex_enter(&stp->sd_lock); 8113 stp->sd_closetime = closetime; 8114 8115 if (flags & SH_SIGPIPE) 8116 stp->sd_wput_opt |= SW_SIGPIPE; 8117 else 8118 stp->sd_wput_opt &= ~SW_SIGPIPE; 8119 if (flags & SH_RECHECK_ERR) 8120 stp->sd_wput_opt |= SW_RECHECK_ERR; 8121 else 8122 stp->sd_wput_opt &= ~SW_RECHECK_ERR; 8123 8124 mutex_exit(&stp->sd_lock); 8125 } 8126 8127 void 8128 strsetrwputdatahooks(vnode_t *vp, msgfunc_t rdatafunc, msgfunc_t wdatafunc) 8129 { 8130 struct stdata *stp = vp->v_stream; 8131 8132 mutex_enter(&stp->sd_lock); 8133 8134 stp->sd_rputdatafunc = rdatafunc; 8135 stp->sd_wputdatafunc = wdatafunc; 8136 8137 mutex_exit(&stp->sd_lock); 8138 } 8139 8140 /* Used within framework when the queue is already locked */ 8141 void 8142 qenable_locked(queue_t *q) 8143 { 8144 stdata_t *stp = STREAM(q); 8145 8146 ASSERT(MUTEX_HELD(QLOCK(q))); 8147 8148 if (!q->q_qinfo->qi_srvp) 8149 return; 8150 8151 /* 8152 * Do not place on run queue if already enabled or closing. 8153 */ 8154 if (q->q_flag & (QWCLOSE|QENAB)) 8155 return; 8156 8157 /* 8158 * mark queue enabled and place on run list if it is not already being 8159 * serviced. If it is serviced, the runservice() function will detect 8160 * that QENAB is set and call service procedure before clearing 8161 * QINSERVICE flag. 8162 */ 8163 q->q_flag |= QENAB; 8164 if (q->q_flag & QINSERVICE) 8165 return; 8166 8167 /* Record the time of qenable */ 8168 q->q_qtstamp = lbolt; 8169 8170 /* 8171 * Put the queue in the stp list and schedule it for background 8172 * processing if it is not already scheduled or if stream head does not 8173 * intent to process it in the foreground later by setting 8174 * STRS_WILLSERVICE flag. 8175 */ 8176 mutex_enter(&stp->sd_qlock); 8177 /* 8178 * If there are already something on the list, stp flags should show 8179 * intention to drain it. 8180 */ 8181 IMPLY(STREAM_NEEDSERVICE(stp), 8182 (stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))); 8183 8184 ENQUEUE(q, stp->sd_qhead, stp->sd_qtail, q_link); 8185 stp->sd_nqueues++; 8186 8187 /* 8188 * If no one will drain this stream we are the first producer and 8189 * need to schedule it for background thread. 8190 */ 8191 if (!(stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))) { 8192 /* 8193 * No one will service this stream later, so we have to 8194 * schedule it now. 8195 */ 8196 STRSTAT(stenables); 8197 stp->sd_svcflags |= STRS_SCHEDULED; 8198 stp->sd_servid = (void *)taskq_dispatch(streams_taskq, 8199 (task_func_t *)stream_service, stp, TQ_NOSLEEP|TQ_NOQUEUE); 8200 8201 if (stp->sd_servid == NULL) { 8202 /* 8203 * Task queue failed so fail over to the backup 8204 * servicing thread. 8205 */ 8206 STRSTAT(taskqfails); 8207 /* 8208 * It is safe to clear STRS_SCHEDULED flag because it 8209 * was set by this thread above. 8210 */ 8211 stp->sd_svcflags &= ~STRS_SCHEDULED; 8212 8213 /* 8214 * Failover scheduling is protected by service_queue 8215 * lock. 8216 */ 8217 mutex_enter(&service_queue); 8218 ASSERT((stp->sd_qhead == q) && (stp->sd_qtail == q)); 8219 ASSERT(q->q_link == NULL); 8220 /* 8221 * Append the queue to qhead/qtail list. 8222 */ 8223 if (qhead == NULL) 8224 qhead = q; 8225 else 8226 qtail->q_link = q; 8227 qtail = q; 8228 /* 8229 * Clear stp queue list. 8230 */ 8231 stp->sd_qhead = stp->sd_qtail = NULL; 8232 stp->sd_nqueues = 0; 8233 /* 8234 * Wakeup background queue processing thread. 8235 */ 8236 cv_signal(&services_to_run); 8237 mutex_exit(&service_queue); 8238 } 8239 } 8240 mutex_exit(&stp->sd_qlock); 8241 } 8242 8243 static void 8244 queue_service(queue_t *q) 8245 { 8246 /* 8247 * The queue in the list should have 8248 * QENAB flag set and should not have 8249 * QINSERVICE flag set. QINSERVICE is 8250 * set when the queue is dequeued and 8251 * qenable_locked doesn't enqueue a 8252 * queue with QINSERVICE set. 8253 */ 8254 8255 ASSERT(!(q->q_flag & QINSERVICE)); 8256 ASSERT((q->q_flag & QENAB)); 8257 mutex_enter(QLOCK(q)); 8258 q->q_flag &= ~QENAB; 8259 q->q_flag |= QINSERVICE; 8260 mutex_exit(QLOCK(q)); 8261 runservice(q); 8262 } 8263 8264 static void 8265 syncq_service(syncq_t *sq) 8266 { 8267 STRSTAT(syncqservice); 8268 mutex_enter(SQLOCK(sq)); 8269 ASSERT(!(sq->sq_svcflags & SQ_SERVICE)); 8270 ASSERT(sq->sq_servcount != 0); 8271 ASSERT(sq->sq_next == NULL); 8272 8273 /* if we came here from the background thread, clear the flag */ 8274 if (sq->sq_svcflags & SQ_BGTHREAD) 8275 sq->sq_svcflags &= ~SQ_BGTHREAD; 8276 8277 /* let drain_syncq know that it's being called in the background */ 8278 sq->sq_svcflags |= SQ_SERVICE; 8279 drain_syncq(sq); 8280 } 8281 8282 static void 8283 qwriter_outer_service(syncq_t *outer) 8284 { 8285 /* 8286 * Note that SQ_WRITER is used on the outer perimeter 8287 * to signal that a qwriter(OUTER) is either investigating 8288 * running or that it is actually running a function. 8289 */ 8290 outer_enter(outer, SQ_BLOCKED|SQ_WRITER); 8291 8292 /* 8293 * All inner syncq are empty and have SQ_WRITER set 8294 * to block entering the outer perimeter. 8295 * 8296 * We do not need to explicitly call write_now since 8297 * outer_exit does it for us. 8298 */ 8299 outer_exit(outer); 8300 } 8301 8302 static void 8303 mblk_free(mblk_t *mp) 8304 { 8305 dblk_t *dbp = mp->b_datap; 8306 frtn_t *frp = dbp->db_frtnp; 8307 8308 mp->b_next = NULL; 8309 if (dbp->db_fthdr != NULL) 8310 str_ftfree(dbp); 8311 8312 ASSERT(dbp->db_fthdr == NULL); 8313 frp->free_func(frp->free_arg); 8314 ASSERT(dbp->db_mblk == mp); 8315 8316 if (dbp->db_credp != NULL) { 8317 crfree(dbp->db_credp); 8318 dbp->db_credp = NULL; 8319 } 8320 dbp->db_cpid = -1; 8321 dbp->db_struioflag = 0; 8322 dbp->db_struioun.cksum.flags = 0; 8323 8324 kmem_cache_free(dbp->db_cache, dbp); 8325 } 8326 8327 /* 8328 * Background processing of the stream queue list. 8329 */ 8330 static void 8331 stream_service(stdata_t *stp) 8332 { 8333 queue_t *q; 8334 8335 mutex_enter(&stp->sd_qlock); 8336 8337 STR_SERVICE(stp, q); 8338 8339 stp->sd_svcflags &= ~STRS_SCHEDULED; 8340 stp->sd_servid = NULL; 8341 cv_signal(&stp->sd_qcv); 8342 mutex_exit(&stp->sd_qlock); 8343 } 8344 8345 /* 8346 * Foreground processing of the stream queue list. 8347 */ 8348 void 8349 stream_runservice(stdata_t *stp) 8350 { 8351 queue_t *q; 8352 8353 mutex_enter(&stp->sd_qlock); 8354 STRSTAT(rservice); 8355 /* 8356 * We are going to drain this stream queue list, so qenable_locked will 8357 * not schedule it until we finish. 8358 */ 8359 stp->sd_svcflags |= STRS_WILLSERVICE; 8360 8361 STR_SERVICE(stp, q); 8362 8363 stp->sd_svcflags &= ~STRS_WILLSERVICE; 8364 mutex_exit(&stp->sd_qlock); 8365 /* 8366 * Help backup background thread to drain the qhead/qtail list. 8367 */ 8368 while (qhead != NULL) { 8369 STRSTAT(qhelps); 8370 mutex_enter(&service_queue); 8371 DQ(q, qhead, qtail, q_link); 8372 mutex_exit(&service_queue); 8373 if (q != NULL) 8374 queue_service(q); 8375 } 8376 } 8377 8378 void 8379 stream_willservice(stdata_t *stp) 8380 { 8381 mutex_enter(&stp->sd_qlock); 8382 stp->sd_svcflags |= STRS_WILLSERVICE; 8383 mutex_exit(&stp->sd_qlock); 8384 } 8385 8386 /* 8387 * Replace the cred currently in the mblk with a different one. 8388 */ 8389 void 8390 mblk_setcred(mblk_t *mp, cred_t *cr) 8391 { 8392 cred_t *ocr = DB_CRED(mp); 8393 8394 ASSERT(cr != NULL); 8395 8396 if (cr != ocr) { 8397 crhold(mp->b_datap->db_credp = cr); 8398 if (ocr != NULL) 8399 crfree(ocr); 8400 } 8401 } 8402 8403 /* 8404 * Set the cred and pid for each mblk in the message. It is assumed that 8405 * the message passed in does not already have a cred. 8406 */ 8407 void 8408 msg_setcredpid(mblk_t *mp, cred_t *cr, pid_t pid) 8409 { 8410 while (mp != NULL) { 8411 ASSERT(DB_CRED(mp) == NULL); 8412 mblk_setcred(mp, cr); 8413 DB_CPID(mp) = pid; 8414 mp = mp->b_cont; 8415 } 8416 } 8417 8418 int 8419 hcksum_assoc(mblk_t *mp, multidata_t *mmd, pdesc_t *pd, 8420 uint32_t start, uint32_t stuff, uint32_t end, uint32_t value, 8421 uint32_t flags, int km_flags) 8422 { 8423 int rc = 0; 8424 8425 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8426 if (mp->b_datap->db_type == M_DATA) { 8427 /* Associate values for M_DATA type */ 8428 DB_CKSUMSTART(mp) = (intptr_t)start; 8429 DB_CKSUMSTUFF(mp) = (intptr_t)stuff; 8430 DB_CKSUMEND(mp) = (intptr_t)end; 8431 DB_CKSUMFLAGS(mp) = flags; 8432 DB_CKSUM16(mp) = (uint16_t)value; 8433 8434 } else { 8435 pattrinfo_t pa_info; 8436 8437 ASSERT(mmd != NULL); 8438 8439 pa_info.type = PATTR_HCKSUM; 8440 pa_info.len = sizeof (pattr_hcksum_t); 8441 8442 if (mmd_addpattr(mmd, pd, &pa_info, B_TRUE, km_flags) != NULL) { 8443 pattr_hcksum_t *hck = (pattr_hcksum_t *)pa_info.buf; 8444 8445 hck->hcksum_start_offset = start; 8446 hck->hcksum_stuff_offset = stuff; 8447 hck->hcksum_end_offset = end; 8448 hck->hcksum_cksum_val.inet_cksum = (uint16_t)value; 8449 hck->hcksum_flags = flags; 8450 } else { 8451 rc = -1; 8452 } 8453 } 8454 return (rc); 8455 } 8456 8457 void 8458 hcksum_retrieve(mblk_t *mp, multidata_t *mmd, pdesc_t *pd, 8459 uint32_t *start, uint32_t *stuff, uint32_t *end, 8460 uint32_t *value, uint32_t *flags) 8461 { 8462 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8463 if (mp->b_datap->db_type == M_DATA) { 8464 if (flags != NULL) { 8465 *flags = DB_CKSUMFLAGS(mp) & (HCK_IPV4_HDRCKSUM | 8466 HCK_PARTIALCKSUM | HCK_FULLCKSUM | 8467 HCK_FULLCKSUM_OK); 8468 if ((*flags & (HCK_PARTIALCKSUM | 8469 HCK_FULLCKSUM)) != 0) { 8470 if (value != NULL) 8471 *value = (uint32_t)DB_CKSUM16(mp); 8472 if ((*flags & HCK_PARTIALCKSUM) != 0) { 8473 if (start != NULL) 8474 *start = 8475 (uint32_t)DB_CKSUMSTART(mp); 8476 if (stuff != NULL) 8477 *stuff = 8478 (uint32_t)DB_CKSUMSTUFF(mp); 8479 if (end != NULL) 8480 *end = 8481 (uint32_t)DB_CKSUMEND(mp); 8482 } 8483 } 8484 } 8485 } else { 8486 pattrinfo_t hck_attr = {PATTR_HCKSUM}; 8487 8488 ASSERT(mmd != NULL); 8489 8490 /* get hardware checksum attribute */ 8491 if (mmd_getpattr(mmd, pd, &hck_attr) != NULL) { 8492 pattr_hcksum_t *hck = (pattr_hcksum_t *)hck_attr.buf; 8493 8494 ASSERT(hck_attr.len >= sizeof (pattr_hcksum_t)); 8495 if (flags != NULL) 8496 *flags = hck->hcksum_flags; 8497 if (start != NULL) 8498 *start = hck->hcksum_start_offset; 8499 if (stuff != NULL) 8500 *stuff = hck->hcksum_stuff_offset; 8501 if (end != NULL) 8502 *end = hck->hcksum_end_offset; 8503 if (value != NULL) 8504 *value = (uint32_t) 8505 hck->hcksum_cksum_val.inet_cksum; 8506 } 8507 } 8508 } 8509 8510 void 8511 lso_info_set(mblk_t *mp, uint32_t mss, uint32_t flags) 8512 { 8513 ASSERT(DB_TYPE(mp) == M_DATA); 8514 8515 /* Set the flags */ 8516 DB_LSOFLAGS(mp) |= flags; 8517 DB_LSOMSS(mp) = mss; 8518 } 8519 8520 void 8521 lso_info_get(mblk_t *mp, uint32_t *mss, uint32_t *flags) 8522 { 8523 ASSERT(DB_TYPE(mp) == M_DATA); 8524 8525 if (flags != NULL) { 8526 *flags = DB_CKSUMFLAGS(mp) & HW_LSO; 8527 if ((*flags != 0) && (mss != NULL)) 8528 *mss = (uint32_t)DB_LSOMSS(mp); 8529 } 8530 } 8531 8532 /* 8533 * Checksum buffer *bp for len bytes with psum partial checksum, 8534 * or 0 if none, and return the 16 bit partial checksum. 8535 */ 8536 unsigned 8537 bcksum(uchar_t *bp, int len, unsigned int psum) 8538 { 8539 int odd = len & 1; 8540 extern unsigned int ip_ocsum(); 8541 8542 if (((intptr_t)bp & 1) == 0 && !odd) { 8543 /* 8544 * Bp is 16 bit aligned and len is multiple of 16 bit word. 8545 */ 8546 return (ip_ocsum((ushort_t *)bp, len >> 1, psum)); 8547 } 8548 if (((intptr_t)bp & 1) != 0) { 8549 /* 8550 * Bp isn't 16 bit aligned. 8551 */ 8552 unsigned int tsum; 8553 8554 #ifdef _LITTLE_ENDIAN 8555 psum += *bp; 8556 #else 8557 psum += *bp << 8; 8558 #endif 8559 len--; 8560 bp++; 8561 tsum = ip_ocsum((ushort_t *)bp, len >> 1, 0); 8562 psum += (tsum << 8) & 0xffff | (tsum >> 8); 8563 if (len & 1) { 8564 bp += len - 1; 8565 #ifdef _LITTLE_ENDIAN 8566 psum += *bp << 8; 8567 #else 8568 psum += *bp; 8569 #endif 8570 } 8571 } else { 8572 /* 8573 * Bp is 16 bit aligned. 8574 */ 8575 psum = ip_ocsum((ushort_t *)bp, len >> 1, psum); 8576 if (odd) { 8577 bp += len - 1; 8578 #ifdef _LITTLE_ENDIAN 8579 psum += *bp; 8580 #else 8581 psum += *bp << 8; 8582 #endif 8583 } 8584 } 8585 /* 8586 * Normalize psum to 16 bits before returning the new partial 8587 * checksum. The max psum value before normalization is 0x3FDFE. 8588 */ 8589 return ((psum >> 16) + (psum & 0xFFFF)); 8590 } 8591 8592 boolean_t 8593 is_vmloaned_mblk(mblk_t *mp, multidata_t *mmd, pdesc_t *pd) 8594 { 8595 boolean_t rc; 8596 8597 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8598 if (DB_TYPE(mp) == M_DATA) { 8599 rc = (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0); 8600 } else { 8601 pattrinfo_t zcopy_attr = {PATTR_ZCOPY}; 8602 8603 ASSERT(mmd != NULL); 8604 rc = (mmd_getpattr(mmd, pd, &zcopy_attr) != NULL); 8605 } 8606 return (rc); 8607 } 8608 8609 void 8610 freemsgchain(mblk_t *mp) 8611 { 8612 mblk_t *next; 8613 8614 while (mp != NULL) { 8615 next = mp->b_next; 8616 mp->b_next = NULL; 8617 8618 freemsg(mp); 8619 mp = next; 8620 } 8621 } 8622 8623 mblk_t * 8624 copymsgchain(mblk_t *mp) 8625 { 8626 mblk_t *nmp = NULL; 8627 mblk_t **nmpp = &nmp; 8628 8629 for (; mp != NULL; mp = mp->b_next) { 8630 if ((*nmpp = copymsg(mp)) == NULL) { 8631 freemsgchain(nmp); 8632 return (NULL); 8633 } 8634 8635 nmpp = &((*nmpp)->b_next); 8636 } 8637 8638 return (nmp); 8639 } 8640 8641 /* NOTE: Do not add code after this point. */ 8642 #undef QLOCK 8643 8644 /* 8645 * replacement for QLOCK macro for those that can't use it. 8646 */ 8647 kmutex_t * 8648 QLOCK(queue_t *q) 8649 { 8650 return (&(q)->q_lock); 8651 } 8652 8653 /* 8654 * Dummy runqueues/queuerun functions functions for backwards compatibility. 8655 */ 8656 #undef runqueues 8657 void 8658 runqueues(void) 8659 { 8660 } 8661 8662 #undef queuerun 8663 void 8664 queuerun(void) 8665 { 8666 } 8667 8668 /* 8669 * Initialize the STR stack instance, which tracks autopush and persistent 8670 * links. 8671 */ 8672 /* ARGSUSED */ 8673 static void * 8674 str_stack_init(netstackid_t stackid, netstack_t *ns) 8675 { 8676 str_stack_t *ss; 8677 int i; 8678 8679 ss = (str_stack_t *)kmem_zalloc(sizeof (*ss), KM_SLEEP); 8680 ss->ss_netstack = ns; 8681 8682 /* 8683 * set up autopush 8684 */ 8685 sad_initspace(ss); 8686 8687 /* 8688 * set up mux_node structures. 8689 */ 8690 ss->ss_devcnt = devcnt; /* In case it should change before free */ 8691 ss->ss_mux_nodes = kmem_zalloc((sizeof (struct mux_node) * 8692 ss->ss_devcnt), KM_SLEEP); 8693 for (i = 0; i < ss->ss_devcnt; i++) 8694 ss->ss_mux_nodes[i].mn_imaj = i; 8695 return (ss); 8696 } 8697 8698 /* 8699 * Note: run at zone shutdown and not destroy so that the PLINKs are 8700 * gone by the time other cleanup happens from the destroy callbacks. 8701 */ 8702 static void 8703 str_stack_shutdown(netstackid_t stackid, void *arg) 8704 { 8705 str_stack_t *ss = (str_stack_t *)arg; 8706 int i; 8707 cred_t *cr; 8708 8709 cr = zone_get_kcred(netstackid_to_zoneid(stackid)); 8710 ASSERT(cr != NULL); 8711 8712 /* Undo all the I_PLINKs for this zone */ 8713 for (i = 0; i < ss->ss_devcnt; i++) { 8714 struct mux_edge *ep; 8715 ldi_handle_t lh; 8716 ldi_ident_t li; 8717 int ret; 8718 int rval; 8719 dev_t rdev; 8720 8721 ep = ss->ss_mux_nodes[i].mn_outp; 8722 if (ep == NULL) 8723 continue; 8724 ret = ldi_ident_from_major((major_t)i, &li); 8725 if (ret != 0) { 8726 continue; 8727 } 8728 rdev = ep->me_dev; 8729 ret = ldi_open_by_dev(&rdev, OTYP_CHR, FREAD|FWRITE, 8730 cr, &lh, li); 8731 if (ret != 0) { 8732 ldi_ident_release(li); 8733 continue; 8734 } 8735 8736 ret = ldi_ioctl(lh, I_PUNLINK, (intptr_t)MUXID_ALL, FKIOCTL, 8737 cr, &rval); 8738 if (ret) { 8739 (void) ldi_close(lh, FREAD|FWRITE, cr); 8740 ldi_ident_release(li); 8741 continue; 8742 } 8743 (void) ldi_close(lh, FREAD|FWRITE, cr); 8744 8745 /* Close layered handles */ 8746 ldi_ident_release(li); 8747 } 8748 crfree(cr); 8749 8750 sad_freespace(ss); 8751 8752 kmem_free(ss->ss_mux_nodes, sizeof (struct mux_node) * ss->ss_devcnt); 8753 ss->ss_mux_nodes = NULL; 8754 } 8755 8756 /* 8757 * Free the structure; str_stack_shutdown did the other cleanup work. 8758 */ 8759 /* ARGSUSED */ 8760 static void 8761 str_stack_fini(netstackid_t stackid, void *arg) 8762 { 8763 str_stack_t *ss = (str_stack_t *)arg; 8764 8765 kmem_free(ss, sizeof (*ss)); 8766 } 8767